Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/windirstat.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOliver Schneider <oliver@assarbad.net>2015-11-29 17:15:36 +0300
committerOliver Schneider <oliver@assarbad.net>2015-11-29 17:15:36 +0300
commit8d9602a7550bdc6e12bf2b246e185cc866ee18ec (patch)
treefd6936907e1d6dc9543808bc3009a0ca6f1aed84 /3rdparty
parent54e628301b96624b9f0848f581d6c1c032e62681 (diff)
Backed out changeset: 8527d65ba0cf
Diffstat (limited to '3rdparty')
-rw-r--r--3rdparty/lua/COPYRIGHT112
-rw-r--r--3rdparty/lua/Makefile300
-rw-r--r--3rdparty/lua/README32
-rw-r--r--3rdparty/lua/doc/bluequad-print.css332
-rw-r--r--3rdparty/lua/doc/bluequad.css650
-rw-r--r--3rdparty/lua/doc/changes.html1870
-rw-r--r--3rdparty/lua/doc/contact.html204
-rw-r--r--3rdparty/lua/doc/ext_c_api.html374
-rw-r--r--3rdparty/lua/doc/ext_ffi.html660
-rw-r--r--3rdparty/lua/doc/ext_ffi_api.html1132
-rw-r--r--3rdparty/lua/doc/ext_ffi_semantics.html2488
-rw-r--r--3rdparty/lua/doc/ext_ffi_tutorial.html1202
-rw-r--r--3rdparty/lua/doc/ext_jit.html398
-rw-r--r--3rdparty/lua/doc/extensions.html816
-rw-r--r--3rdparty/lua/doc/faq.html368
-rw-r--r--3rdparty/lua/doc/install.html67
-rw-r--r--3rdparty/lua/doc/luajit.html462
-rw-r--r--3rdparty/lua/doc/running.html612
-rw-r--r--3rdparty/lua/doc/status.html241
-rw-r--r--3rdparty/lua/dynasm/dasm_arm.h912
-rw-r--r--3rdparty/lua/dynasm/dasm_arm.lua2247
-rw-r--r--3rdparty/lua/dynasm/dasm_mips.h832
-rw-r--r--3rdparty/lua/dynasm/dasm_mips.lua1906
-rw-r--r--3rdparty/lua/dynasm/dasm_ppc.h824
-rw-r--r--3rdparty/lua/dynasm/dasm_ppc.lua2498
-rw-r--r--3rdparty/lua/dynasm/dasm_proto.h166
-rw-r--r--3rdparty/lua/dynasm/dasm_x64.lua24
-rw-r--r--3rdparty/lua/dynasm/dasm_x86.h942
-rw-r--r--3rdparty/lua/dynasm/dasm_x86.lua22
-rw-r--r--3rdparty/lua/dynasm/dynasm.lua2189
-rw-r--r--3rdparty/lua/etc/luajit.1176
-rw-r--r--3rdparty/lua/etc/luajit.pc49
-rw-r--r--3rdparty/lua/src/Makefile31
-rw-r--r--3rdparty/lua/src/Makefile.dep452
-rw-r--r--3rdparty/lua/src/host/README8
-rw-r--r--3rdparty/lua/src/host/buildvm.c1032
-rw-r--r--3rdparty/lua/src/host/buildvm.h208
-rw-r--r--3rdparty/lua/src/host/buildvm_asm.c626
-rw-r--r--3rdparty/lua/src/host/buildvm_fold.c458
-rw-r--r--3rdparty/lua/src/host/buildvm_lib.c796
-rw-r--r--3rdparty/lua/src/host/buildvm_peobj.c736
-rw-r--r--3rdparty/lua/src/host/genminilua.lua855
-rw-r--r--3rdparty/lua/src/host/minilua.c15539
-rw-r--r--3rdparty/lua/src/jit/bc.lua382
-rw-r--r--3rdparty/lua/src/jit/bcsave.lua1318
-rw-r--r--3rdparty/lua/src/jit/dis_arm.lua1378
-rw-r--r--3rdparty/lua/src/jit/dis_mips.lua856
-rw-r--r--3rdparty/lua/src/jit/dis_mipsel.lua40
-rw-r--r--3rdparty/lua/src/jit/dis_ppc.lua1182
-rw-r--r--3rdparty/lua/src/jit/dis_x64.lua40
-rw-r--r--3rdparty/lua/src/jit/dis_x86.lua1672
-rw-r--r--3rdparty/lua/src/jit/dump.lua7
-rw-r--r--3rdparty/lua/src/jit/v.lua334
-rw-r--r--3rdparty/lua/src/lauxlib.h334
-rw-r--r--3rdparty/lua/src/lib_aux.c712
-rw-r--r--3rdparty/lua/src/lib_base.c1366
-rw-r--r--3rdparty/lua/src/lib_bit.c148
-rw-r--r--3rdparty/lua/src/lib_debug.c810
-rw-r--r--3rdparty/lua/src/lib_ffi.c1701
-rw-r--r--3rdparty/lua/src/lib_init.c110
-rw-r--r--3rdparty/lua/src/lib_io.c4
-rw-r--r--3rdparty/lua/src/lib_jit.c1326
-rw-r--r--3rdparty/lua/src/lib_math.c466
-rw-r--r--3rdparty/lua/src/lib_os.c13
-rw-r--r--3rdparty/lua/src/lib_package.c1207
-rw-r--r--3rdparty/lua/src/lib_string.c1880
-rw-r--r--3rdparty/lua/src/lib_table.c600
-rw-r--r--3rdparty/lua/src/lj_alloc.c22
-rw-r--r--3rdparty/lua/src/lj_alloc.h34
-rw-r--r--3rdparty/lua/src/lj_api.c2400
-rw-r--r--3rdparty/lua/src/lj_arch.h26
-rw-r--r--3rdparty/lua/src/lj_asm.c3832
-rw-r--r--3rdparty/lua/src/lj_asm.h34
-rw-r--r--3rdparty/lua/src/lj_asm_arm.h4719
-rw-r--r--3rdparty/lua/src/lj_asm_mips.h3953
-rw-r--r--3rdparty/lua/src/lj_asm_ppc.h4335
-rw-r--r--3rdparty/lua/src/lj_asm_x86.h5599
-rw-r--r--3rdparty/lua/src/lj_bc.c28
-rw-r--r--3rdparty/lua/src/lj_bc.h522
-rw-r--r--3rdparty/lua/src/lj_bcdump.h132
-rw-r--r--3rdparty/lua/src/lj_bcread.c952
-rw-r--r--3rdparty/lua/src/lj_bcwrite.c792
-rw-r--r--3rdparty/lua/src/lj_carith.c704
-rw-r--r--3rdparty/lua/src/lj_carith.h54
-rw-r--r--3rdparty/lua/src/lj_ccall.c1799
-rw-r--r--3rdparty/lua/src/lj_ccall.h342
-rw-r--r--3rdparty/lua/src/lj_ccallback.c1285
-rw-r--r--3rdparty/lua/src/lj_ccallback.h50
-rw-r--r--3rdparty/lua/src/lj_cconv.c1503
-rw-r--r--3rdparty/lua/src/lj_cconv.h140
-rw-r--r--3rdparty/lua/src/lj_cdata.c570
-rw-r--r--3rdparty/lua/src/lj_cdata.h150
-rw-r--r--3rdparty/lua/src/lj_char.c86
-rw-r--r--3rdparty/lua/src/lj_char.h84
-rw-r--r--3rdparty/lua/src/lj_clib.c821
-rw-r--r--3rdparty/lua/src/lj_clib.h58
-rw-r--r--3rdparty/lua/src/lj_cparse.c24
-rw-r--r--3rdparty/lua/src/lj_cparse.h130
-rw-r--r--3rdparty/lua/src/lj_crecord.c3324
-rw-r--r--3rdparty/lua/src/lj_crecord.h62
-rw-r--r--3rdparty/lua/src/lj_ctype.c1268
-rw-r--r--3rdparty/lua/src/lj_ctype.h922
-rw-r--r--3rdparty/lua/src/lj_debug.c1201
-rw-r--r--3rdparty/lua/src/lj_debug.h122
-rw-r--r--3rdparty/lua/src/lj_def.h702
-rw-r--r--3rdparty/lua/src/lj_dispatch.c988
-rw-r--r--3rdparty/lua/src/lj_dispatch.h262
-rw-r--r--3rdparty/lua/src/lj_emit_arm.h712
-rw-r--r--3rdparty/lua/src/lj_emit_mips.h422
-rw-r--r--3rdparty/lua/src/lj_emit_ppc.h476
-rw-r--r--3rdparty/lua/src/lj_emit_x86.h932
-rw-r--r--3rdparty/lua/src/lj_err.c33
-rw-r--r--3rdparty/lua/src/lj_err.h82
-rw-r--r--3rdparty/lua/src/lj_errmsg.h385
-rw-r--r--3rdparty/lua/src/lj_ff.h36
-rw-r--r--3rdparty/lua/src/lj_ffrecord.c23
-rw-r--r--3rdparty/lua/src/lj_ffrecord.h48
-rw-r--r--3rdparty/lua/src/lj_frame.h2
-rw-r--r--3rdparty/lua/src/lj_func.c370
-rw-r--r--3rdparty/lua/src/lj_func.h48
-rw-r--r--3rdparty/lua/src/lj_gc.c1688
-rw-r--r--3rdparty/lua/src/lj_gc.h268
-rw-r--r--3rdparty/lua/src/lj_gdbjit.c1588
-rw-r--r--3rdparty/lua/src/lj_gdbjit.h44
-rw-r--r--3rdparty/lua/src/lj_ir.c1002
-rw-r--r--3rdparty/lua/src/lj_ir.h1102
-rw-r--r--3rdparty/lua/src/lj_ircall.h548
-rw-r--r--3rdparty/lua/src/lj_iropt.h322
-rw-r--r--3rdparty/lua/src/lj_jit.h833
-rw-r--r--3rdparty/lua/src/lj_lex.c963
-rw-r--r--3rdparty/lua/src/lj_lex.h170
-rw-r--r--3rdparty/lua/src/lj_lib.c516
-rw-r--r--3rdparty/lua/src/lj_lib.h224
-rw-r--r--3rdparty/lua/src/lj_load.c336
-rw-r--r--3rdparty/lua/src/lj_mcode.c746
-rw-r--r--3rdparty/lua/src/lj_mcode.h60
-rw-r--r--3rdparty/lua/src/lj_meta.c932
-rw-r--r--3rdparty/lua/src/lj_meta.h74
-rw-r--r--3rdparty/lua/src/lj_obj.c70
-rw-r--r--3rdparty/lua/src/lj_obj.h1712
-rw-r--r--3rdparty/lua/src/lj_opt_dce.c155
-rw-r--r--3rdparty/lua/src/lj_opt_fold.c4599
-rw-r--r--3rdparty/lua/src/lj_opt_loop.c873
-rw-r--r--3rdparty/lua/src/lj_opt_mem.c1823
-rw-r--r--3rdparty/lua/src/lj_opt_narrow.c16
-rw-r--r--3rdparty/lua/src/lj_opt_sink.c490
-rw-r--r--3rdparty/lua/src/lj_opt_split.c1462
-rw-r--r--3rdparty/lua/src/lj_parse.c5504
-rw-r--r--3rdparty/lua/src/lj_parse.h36
-rw-r--r--3rdparty/lua/src/lj_record.c4499
-rw-r--r--3rdparty/lua/src/lj_record.h88
-rw-r--r--3rdparty/lua/src/lj_snap.c18
-rw-r--r--3rdparty/lua/src/lj_snap.h68
-rw-r--r--3rdparty/lua/src/lj_state.c574
-rw-r--r--3rdparty/lua/src/lj_state.h70
-rw-r--r--3rdparty/lua/src/lj_str.c678
-rw-r--r--3rdparty/lua/src/lj_str.h100
-rw-r--r--3rdparty/lua/src/lj_strscan.c995
-rw-r--r--3rdparty/lua/src/lj_strscan.h78
-rw-r--r--3rdparty/lua/src/lj_tab.c1255
-rw-r--r--3rdparty/lua/src/lj_tab.h137
-rw-r--r--3rdparty/lua/src/lj_target.h324
-rw-r--r--3rdparty/lua/src/lj_target_arm.h548
-rw-r--r--3rdparty/lua/src/lj_target_mips.h514
-rw-r--r--3rdparty/lua/src/lj_target_ppc.h560
-rw-r--r--3rdparty/lua/src/lj_target_x86.h684
-rw-r--r--3rdparty/lua/src/lj_trace.c3
-rw-r--r--3rdparty/lua/src/lj_trace.h106
-rw-r--r--3rdparty/lua/src/lj_traceerr.h4
-rw-r--r--3rdparty/lua/src/lj_udata.c68
-rw-r--r--3rdparty/lua/src/lj_udata.h28
-rw-r--r--3rdparty/lua/src/lj_vm.h232
-rw-r--r--3rdparty/lua/src/lj_vmevent.c114
-rw-r--r--3rdparty/lua/src/lj_vmevent.h118
-rw-r--r--3rdparty/lua/src/lj_vmmath.c280
-rw-r--r--3rdparty/lua/src/ljamalg.c186
-rw-r--r--3rdparty/lua/src/lua.h786
-rw-r--r--3rdparty/lua/src/lua.hpp18
-rw-r--r--3rdparty/lua/src/luaconf.h295
-rw-r--r--3rdparty/lua/src/luajit.c1142
-rw-r--r--3rdparty/lua/src/luajit.h140
-rw-r--r--3rdparty/lua/src/lualib.h86
-rw-r--r--3rdparty/lua/src/msvcbuild.bat2
-rw-r--r--3rdparty/lua/src/vm_arm.dasc8973
-rw-r--r--3rdparty/lua/src/vm_mips.dasc8482
-rw-r--r--3rdparty/lua/src/vm_ppc.dasc10297
-rw-r--r--3rdparty/lua/src/vm_ppcspe.dasc7382
-rw-r--r--3rdparty/lua/src/vm_x86.dasc25
-rw-r--r--3rdparty/lua/src/xedkbuild.bat4
-rw-r--r--3rdparty/sqlite3/shell.c2274
-rw-r--r--3rdparty/sqlite3/sqlite3.c76636
-rw-r--r--3rdparty/sqlite3/sqlite3.h2571
-rw-r--r--3rdparty/sqlite3/sqlite3ext.h57
193 files changed, 114598 insertions, 156391 deletions
diff --git a/3rdparty/lua/COPYRIGHT b/3rdparty/lua/COPYRIGHT
index b13dfe2..83ce94d 100644
--- a/3rdparty/lua/COPYRIGHT
+++ b/3rdparty/lua/COPYRIGHT
@@ -1,56 +1,56 @@
-===============================================================================
-LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
-
-Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-[ MIT license: http://www.opensource.org/licenses/mit-license.php ]
-
-===============================================================================
-[ LuaJIT includes code from Lua 5.1/5.2, which has this license statement: ]
-
-Copyright (C) 1994-2012 Lua.org, PUC-Rio.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-===============================================================================
-[ LuaJIT includes code from dlmalloc, which has this license statement: ]
-
-This is a version (aka dlmalloc) of malloc/free/realloc written by
-Doug Lea and released to the public domain, as explained at
-http://creativecommons.org/licenses/publicdomain
-
-===============================================================================
+===============================================================================
+LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
+
+Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+[ MIT license: http://www.opensource.org/licenses/mit-license.php ]
+
+===============================================================================
+[ LuaJIT includes code from Lua 5.1/5.2, which has this license statement: ]
+
+Copyright (C) 1994-2012 Lua.org, PUC-Rio.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+===============================================================================
+[ LuaJIT includes code from dlmalloc, which has this license statement: ]
+
+This is a version (aka dlmalloc) of malloc/free/realloc written by
+Doug Lea and released to the public domain, as explained at
+http://creativecommons.org/licenses/publicdomain
+
+===============================================================================
diff --git a/3rdparty/lua/Makefile b/3rdparty/lua/Makefile
index 2fe528a..8883503 100644
--- a/3rdparty/lua/Makefile
+++ b/3rdparty/lua/Makefile
@@ -1,151 +1,149 @@
-##############################################################################
-# LuaJIT top level Makefile for installation. Requires GNU Make.
-#
-# Please read doc/install.html before changing any variables!
-#
-# Suitable for POSIX platforms (Linux, *BSD, OSX etc.).
-# Note: src/Makefile has many more configurable options.
-#
-# ##### This Makefile is NOT useful for Windows! #####
-# For MSVC, please follow the instructions given in src/msvcbuild.bat.
-# For MinGW and Cygwin, cd to src and run make with the Makefile there.
-#
-# Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-##############################################################################
-
-MAJVER= 2
-MINVER= 0
-RELVER= 4
-VERSION= $(MAJVER).$(MINVER).$(RELVER)
-ABIVER= 5.1
-
-##############################################################################
-#
-# Change the installation path as needed. This automatically adjusts
-# the paths in src/luaconf.h, too. Note: PREFIX must be an absolute path!
-#
-export PREFIX= /usr/local
-export MULTILIB= lib
-##############################################################################
-
-DPREFIX= $(DESTDIR)$(PREFIX)
-INSTALL_BIN= $(DPREFIX)/bin
-INSTALL_LIB= $(DPREFIX)/$(MULTILIB)
-INSTALL_SHARE= $(DPREFIX)/share
-INSTALL_INC= $(DPREFIX)/include/luajit-$(MAJVER).$(MINVER)
-
-INSTALL_LJLIBD= $(INSTALL_SHARE)/luajit-$(VERSION)
-INSTALL_JITLIB= $(INSTALL_LJLIBD)/jit
-INSTALL_LMODD= $(INSTALL_SHARE)/lua
-INSTALL_LMOD= $(INSTALL_LMODD)/$(ABIVER)
-INSTALL_CMODD= $(INSTALL_LIB)/lua
-INSTALL_CMOD= $(INSTALL_CMODD)/$(ABIVER)
-INSTALL_MAN= $(INSTALL_SHARE)/man/man1
-INSTALL_PKGCONFIG= $(INSTALL_LIB)/pkgconfig
-
-INSTALL_TNAME= luajit-$(VERSION)
-INSTALL_TSYMNAME= luajit
-INSTALL_ANAME= libluajit-$(ABIVER).a
-INSTALL_SONAME= libluajit-$(ABIVER).so.$(MAJVER).$(MINVER).$(RELVER)
-INSTALL_SOSHORT= libluajit-$(ABIVER).so
-INSTALL_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).$(MINVER).$(RELVER).dylib
-INSTALL_DYLIBSHORT1= libluajit-$(ABIVER).dylib
-INSTALL_DYLIBSHORT2= libluajit-$(ABIVER).$(MAJVER).dylib
-INSTALL_PCNAME= luajit.pc
-
-INSTALL_STATIC= $(INSTALL_LIB)/$(INSTALL_ANAME)
-INSTALL_DYN= $(INSTALL_LIB)/$(INSTALL_SONAME)
-INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_SOSHORT)
-INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_SOSHORT)
-INSTALL_T= $(INSTALL_BIN)/$(INSTALL_TNAME)
-INSTALL_TSYM= $(INSTALL_BIN)/$(INSTALL_TSYMNAME)
-INSTALL_PC= $(INSTALL_PKGCONFIG)/$(INSTALL_PCNAME)
-
-INSTALL_DIRS= $(INSTALL_BIN) $(INSTALL_LIB) $(INSTALL_INC) $(INSTALL_MAN) \
- $(INSTALL_PKGCONFIG) $(INSTALL_JITLIB) $(INSTALL_LMOD) $(INSTALL_CMOD)
-UNINSTALL_DIRS= $(INSTALL_JITLIB) $(INSTALL_LJLIBD) $(INSTALL_INC) \
- $(INSTALL_LMOD) $(INSTALL_LMODD) $(INSTALL_CMOD) $(INSTALL_CMODD)
-
-RM= rm -f
-MKDIR= mkdir -p
-RMDIR= rmdir 2>/dev/null
-SYMLINK= ln -sf
-INSTALL_X= install -m 0755
-INSTALL_F= install -m 0644
-UNINSTALL= $(RM)
-LDCONFIG= ldconfig -n
-SED_PC= sed -e "s|^prefix=.*|prefix=$(PREFIX)|" \
- -e "s|^multilib=.*|multilib=$(MULTILIB)|"
-
-FILE_T= luajit
-FILE_A= libluajit.a
-FILE_SO= libluajit.so
-FILE_MAN= luajit.1
-FILE_PC= luajit.pc
-FILES_INC= lua.h lualib.h lauxlib.h luaconf.h lua.hpp luajit.h
-FILES_JITLIB= bc.lua v.lua dump.lua dis_x86.lua dis_x64.lua dis_arm.lua \
- dis_ppc.lua dis_mips.lua dis_mipsel.lua bcsave.lua vmdef.lua
-
-ifeq (,$(findstring Windows,$(OS)))
- ifeq (Darwin,$(shell uname -s))
- INSTALL_SONAME= $(INSTALL_DYLIBNAME)
- INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT1)
- INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT2)
- LDCONFIG= :
- endif
-endif
-
-##############################################################################
-
-INSTALL_DEP= src/luajit
-
-default all $(INSTALL_DEP):
- @echo "==== Building LuaJIT $(VERSION) ===="
- $(MAKE) -C src
- @echo "==== Successfully built LuaJIT $(VERSION) ===="
-
-install: $(INSTALL_DEP)
- @echo "==== Installing LuaJIT $(VERSION) to $(PREFIX) ===="
- $(MKDIR) $(INSTALL_DIRS)
- cd src && $(INSTALL_X) $(FILE_T) $(INSTALL_T)
- cd src && test -f $(FILE_A) && $(INSTALL_F) $(FILE_A) $(INSTALL_STATIC) || :
- $(RM) $(INSTALL_TSYM) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2)
- cd src && test -f $(FILE_SO) && \
- $(INSTALL_X) $(FILE_SO) $(INSTALL_DYN) && \
- $(LDCONFIG) $(INSTALL_LIB) && \
- $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT1) && \
- $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT2) || :
- cd etc && $(INSTALL_F) $(FILE_MAN) $(INSTALL_MAN)
- cd etc && $(SED_PC) $(FILE_PC) > $(FILE_PC).tmp && \
- $(INSTALL_F) $(FILE_PC).tmp $(INSTALL_PC) && \
- $(RM) $(FILE_PC).tmp
- cd src && $(INSTALL_F) $(FILES_INC) $(INSTALL_INC)
- cd src/jit && $(INSTALL_F) $(FILES_JITLIB) $(INSTALL_JITLIB)
- $(SYMLINK) $(INSTALL_TNAME) $(INSTALL_TSYM)
- @echo "==== Successfully installed LuaJIT $(VERSION) to $(PREFIX) ===="
-
-uninstall:
- @echo "==== Uninstalling LuaJIT $(VERSION) from $(PREFIX) ===="
- $(UNINSTALL) $(INSTALL_TSYM) $(INSTALL_T) $(INSTALL_STATIC) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2) $(INSTALL_MAN)/$(FILE_MAN) $(INSTALL_PC)
- for file in $(FILES_JITLIB); do \
- $(UNINSTALL) $(INSTALL_JITLIB)/$$file; \
- done
- for file in $(FILES_INC); do \
- $(UNINSTALL) $(INSTALL_INC)/$$file; \
- done
- $(LDCONFIG) $(INSTALL_LIB)
- $(RMDIR) $(UNINSTALL_DIRS) || :
- @echo "==== Successfully uninstalled LuaJIT $(VERSION) from $(PREFIX) ===="
-
-##############################################################################
-
-amalg:
- @echo "Building LuaJIT $(VERSION)"
- $(MAKE) -C src amalg
-
-clean:
- $(MAKE) -C src clean
-
-.PHONY: all install amalg clean
-
-##############################################################################
+##############################################################################
+# LuaJIT top level Makefile for installation. Requires GNU Make.
+#
+# Please read doc/install.html before changing any variables!
+#
+# Suitable for POSIX platforms (Linux, *BSD, OSX etc.).
+# Note: src/Makefile has many more configurable options.
+#
+# ##### This Makefile is NOT useful for Windows! #####
+# For MSVC, please follow the instructions given in src/msvcbuild.bat.
+# For MinGW and Cygwin, cd to src and run make with the Makefile there.
+#
+# Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+##############################################################################
+
+MAJVER= 2
+MINVER= 0
+RELVER= 2
+VERSION= $(MAJVER).$(MINVER).$(RELVER)
+ABIVER= 5.1
+
+##############################################################################
+#
+# Change the installation path as needed. This automatically adjusts
+# the paths in src/luaconf.h, too. Note: PREFIX must be an absolute path!
+#
+export PREFIX= /usr/local
+##############################################################################
+
+DPREFIX= $(DESTDIR)$(PREFIX)
+INSTALL_BIN= $(DPREFIX)/bin
+INSTALL_LIB= $(DPREFIX)/lib
+INSTALL_SHARE= $(DPREFIX)/share
+INSTALL_INC= $(DPREFIX)/include/luajit-$(MAJVER).$(MINVER)
+
+INSTALL_LJLIBD= $(INSTALL_SHARE)/luajit-$(VERSION)
+INSTALL_JITLIB= $(INSTALL_LJLIBD)/jit
+INSTALL_LMODD= $(INSTALL_SHARE)/lua
+INSTALL_LMOD= $(INSTALL_LMODD)/$(ABIVER)
+INSTALL_CMODD= $(INSTALL_LIB)/lua
+INSTALL_CMOD= $(INSTALL_CMODD)/$(ABIVER)
+INSTALL_MAN= $(INSTALL_SHARE)/man/man1
+INSTALL_PKGCONFIG= $(INSTALL_LIB)/pkgconfig
+
+INSTALL_TNAME= luajit-$(VERSION)
+INSTALL_TSYMNAME= luajit
+INSTALL_ANAME= libluajit-$(ABIVER).a
+INSTALL_SONAME= libluajit-$(ABIVER).so.$(MAJVER).$(MINVER).$(RELVER)
+INSTALL_SOSHORT= libluajit-$(ABIVER).so
+INSTALL_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).$(MINVER).$(RELVER).dylib
+INSTALL_DYLIBSHORT1= libluajit-$(ABIVER).dylib
+INSTALL_DYLIBSHORT2= libluajit-$(ABIVER).$(MAJVER).dylib
+INSTALL_PCNAME= luajit.pc
+
+INSTALL_STATIC= $(INSTALL_LIB)/$(INSTALL_ANAME)
+INSTALL_DYN= $(INSTALL_LIB)/$(INSTALL_SONAME)
+INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_SOSHORT)
+INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_SOSHORT)
+INSTALL_T= $(INSTALL_BIN)/$(INSTALL_TNAME)
+INSTALL_TSYM= $(INSTALL_BIN)/$(INSTALL_TSYMNAME)
+INSTALL_PC= $(INSTALL_PKGCONFIG)/$(INSTALL_PCNAME)
+
+INSTALL_DIRS= $(INSTALL_BIN) $(INSTALL_LIB) $(INSTALL_INC) $(INSTALL_MAN) \
+ $(INSTALL_PKGCONFIG) $(INSTALL_JITLIB) $(INSTALL_LMOD) $(INSTALL_CMOD)
+UNINSTALL_DIRS= $(INSTALL_JITLIB) $(INSTALL_LJLIBD) $(INSTALL_INC) \
+ $(INSTALL_LMOD) $(INSTALL_LMODD) $(INSTALL_CMOD) $(INSTALL_CMODD)
+
+RM= rm -f
+MKDIR= mkdir -p
+RMDIR= rmdir 2>/dev/null
+SYMLINK= ln -sf
+INSTALL_X= install -m 0755
+INSTALL_F= install -m 0644
+UNINSTALL= $(RM)
+LDCONFIG= ldconfig -n
+SED_PC= sed -e "s|^prefix=.*|prefix=$(PREFIX)|"
+
+FILE_T= luajit
+FILE_A= libluajit.a
+FILE_SO= libluajit.so
+FILE_MAN= luajit.1
+FILE_PC= luajit.pc
+FILES_INC= lua.h lualib.h lauxlib.h luaconf.h lua.hpp luajit.h
+FILES_JITLIB= bc.lua v.lua dump.lua dis_x86.lua dis_x64.lua dis_arm.lua \
+ dis_ppc.lua dis_mips.lua dis_mipsel.lua bcsave.lua vmdef.lua
+
+ifeq (,$(findstring Windows,$(OS)))
+ ifeq (Darwin,$(shell uname -s))
+ INSTALL_SONAME= $(INSTALL_DYLIBNAME)
+ INSTALL_SHORT1= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT1)
+ INSTALL_SHORT2= $(INSTALL_LIB)/$(INSTALL_DYLIBSHORT2)
+ LDCONFIG= :
+ endif
+endif
+
+##############################################################################
+
+INSTALL_DEP= src/luajit
+
+default all $(INSTALL_DEP):
+ @echo "==== Building LuaJIT $(VERSION) ===="
+ $(MAKE) -C src
+ @echo "==== Successfully built LuaJIT $(VERSION) ===="
+
+install: $(INSTALL_DEP)
+ @echo "==== Installing LuaJIT $(VERSION) to $(PREFIX) ===="
+ $(MKDIR) $(INSTALL_DIRS)
+ cd src && $(INSTALL_X) $(FILE_T) $(INSTALL_T)
+ cd src && test -f $(FILE_A) && $(INSTALL_F) $(FILE_A) $(INSTALL_STATIC) || :
+ $(RM) $(INSTALL_TSYM) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2)
+ cd src && test -f $(FILE_SO) && \
+ $(INSTALL_X) $(FILE_SO) $(INSTALL_DYN) && \
+ $(LDCONFIG) $(INSTALL_LIB) && \
+ $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT1) && \
+ $(SYMLINK) $(INSTALL_SONAME) $(INSTALL_SHORT2) || :
+ cd etc && $(INSTALL_F) $(FILE_MAN) $(INSTALL_MAN)
+ cd etc && $(SED_PC) $(FILE_PC) > $(FILE_PC).tmp && \
+ $(INSTALL_F) $(FILE_PC).tmp $(INSTALL_PC) && \
+ $(RM) $(FILE_PC).tmp
+ cd src && $(INSTALL_F) $(FILES_INC) $(INSTALL_INC)
+ cd src/jit && $(INSTALL_F) $(FILES_JITLIB) $(INSTALL_JITLIB)
+ $(SYMLINK) $(INSTALL_TNAME) $(INSTALL_TSYM)
+ @echo "==== Successfully installed LuaJIT $(VERSION) to $(PREFIX) ===="
+
+uninstall:
+ @echo "==== Uninstalling LuaJIT $(VERSION) from $(PREFIX) ===="
+ $(UNINSTALL) $(INSTALL_TSYM) $(INSTALL_T) $(INSTALL_STATIC) $(INSTALL_DYN) $(INSTALL_SHORT1) $(INSTALL_SHORT2) $(INSTALL_MAN)/$(FILE_MAN) $(INSTALL_PC)
+ for file in $(FILES_JITLIB); do \
+ $(UNINSTALL) $(INSTALL_JITLIB)/$$file; \
+ done
+ for file in $(FILES_INC); do \
+ $(UNINSTALL) $(INSTALL_INC)/$$file; \
+ done
+ $(LDCONFIG) $(INSTALL_LIB)
+ $(RMDIR) $(UNINSTALL_DIRS) || :
+ @echo "==== Successfully uninstalled LuaJIT $(VERSION) from $(PREFIX) ===="
+
+##############################################################################
+
+amalg:
+ @echo "Building LuaJIT $(VERSION)"
+ $(MAKE) -C src amalg
+
+clean:
+ $(MAKE) -C src clean
+
+.PHONY: all install amalg clean
+
+##############################################################################
diff --git a/3rdparty/lua/README b/3rdparty/lua/README
index be2f790..d837fd2 100644
--- a/3rdparty/lua/README
+++ b/3rdparty/lua/README
@@ -1,16 +1,16 @@
-README for LuaJIT 2.0.4
------------------------
-
-LuaJIT is a Just-In-Time (JIT) compiler for the Lua programming language.
-
-Project Homepage: http://luajit.org/
-
-LuaJIT is Copyright (C) 2005-2015 Mike Pall.
-LuaJIT is free software, released under the MIT license.
-See full Copyright Notice in the COPYRIGHT file or in luajit.h.
-
-Documentation for LuaJIT is available in HTML format.
-Please point your favorite browser to:
-
- doc/luajit.html
-
+README for LuaJIT 2.0.2
+-----------------------
+
+LuaJIT is a Just-In-Time (JIT) compiler for the Lua programming language.
+
+Project Homepage: http://luajit.org/
+
+LuaJIT is Copyright (C) 2005-2013 Mike Pall.
+LuaJIT is free software, released under the MIT license.
+See full Copyright Notice in the COPYRIGHT file or in luajit.h.
+
+Documentation for LuaJIT is available in HTML format.
+Please point your favorite browser to:
+
+ doc/luajit.html
+
diff --git a/3rdparty/lua/doc/bluequad-print.css b/3rdparty/lua/doc/bluequad-print.css
index dc62ce5..41ae757 100644
--- a/3rdparty/lua/doc/bluequad-print.css
+++ b/3rdparty/lua/doc/bluequad-print.css
@@ -1,166 +1,166 @@
-/* Copyright (C) 2004-2015 Mike Pall.
- *
- * You are welcome to use the general ideas of this design for your own sites.
- * But please do not steal the stylesheet, the layout or the color scheme.
- */
-body {
- font-family: serif;
- font-size: 11pt;
- margin: 0 3em;
- padding: 0;
- border: none;
-}
-a:link, a:visited, a:hover, a:active {
- text-decoration: none;
- background: transparent;
- color: #0000ff;
-}
-h1, h2, h3 {
- font-family: sans-serif;
- font-weight: bold;
- text-align: left;
- margin: 0.5em 0;
- padding: 0;
-}
-h1 {
- font-size: 200%;
-}
-h2 {
- font-size: 150%;
-}
-h3 {
- font-size: 125%;
-}
-p {
- margin: 0 0 0.5em 0;
- padding: 0;
-}
-ul, ol {
- margin: 0.5em 0;
- padding: 0 0 0 2em;
-}
-ul {
- list-style: outside square;
-}
-ol {
- list-style: outside decimal;
-}
-li {
- margin: 0;
- padding: 0;
-}
-dl {
- margin: 1em 0;
- padding: 1em;
- border: 1px solid black;
-}
-dt {
- font-weight: bold;
- margin: 0;
- padding: 0;
-}
-dt sup {
- float: right;
- margin-left: 1em;
-}
-dd {
- margin: 0.5em 0 0 2em;
- padding: 0;
-}
-table {
- table-layout: fixed;
- width: 100%;
- margin: 1em 0;
- padding: 0;
- border: 1px solid black;
- border-spacing: 0;
- border-collapse: collapse;
-}
-tr {
- margin: 0;
- padding: 0;
- border: none;
-}
-td {
- text-align: left;
- margin: 0;
- padding: 0.2em 0.5em;
- border-top: 1px solid black;
- border-bottom: 1px solid black;
-}
-tr.separate td {
- border-top: double;
-}
-tt, pre, code, kbd, samp {
- font-family: monospace;
- font-size: 75%;
-}
-kbd {
- font-weight: bolder;
-}
-blockquote, pre {
- margin: 1em 2em;
- padding: 0;
-}
-img {
- border: none;
- vertical-align: baseline;
- margin: 0;
- padding: 0;
-}
-img.left {
- float: left;
- margin: 0.5em 1em 0.5em 0;
-}
-img.right {
- float: right;
- margin: 0.5em 0 0.5em 1em;
-}
-.flush {
- clear: both;
- visibility: hidden;
-}
-.hide, .noprint, #nav {
- display: none !important;
-}
-.pagebreak {
- page-break-before: always;
-}
-#site {
- text-align: right;
- font-family: sans-serif;
- font-weight: bold;
- margin: 0 1em;
- border-bottom: 1pt solid black;
-}
-#site a {
- font-size: 1.2em;
-}
-#site a:link, #site a:visited {
- text-decoration: none;
- font-weight: bold;
- background: transparent;
- color: #ffffff;
-}
-#logo {
- color: #ff8000;
-}
-#head {
- clear: both;
- margin: 0 1em;
-}
-#main {
- line-height: 1.3;
- text-align: justify;
- margin: 1em;
-}
-#foot {
- clear: both;
- font-size: 80%;
- text-align: center;
- margin: 0 1.25em;
- padding: 0.5em 0 0 0;
- border-top: 1pt solid black;
- page-break-before: avoid;
- page-break-after: avoid;
-}
+/* Copyright (C) 2004-2013 Mike Pall.
+ *
+ * You are welcome to use the general ideas of this design for your own sites.
+ * But please do not steal the stylesheet, the layout or the color scheme.
+ */
+body {
+ font-family: serif;
+ font-size: 11pt;
+ margin: 0 3em;
+ padding: 0;
+ border: none;
+}
+a:link, a:visited, a:hover, a:active {
+ text-decoration: none;
+ background: transparent;
+ color: #0000ff;
+}
+h1, h2, h3 {
+ font-family: sans-serif;
+ font-weight: bold;
+ text-align: left;
+ margin: 0.5em 0;
+ padding: 0;
+}
+h1 {
+ font-size: 200%;
+}
+h2 {
+ font-size: 150%;
+}
+h3 {
+ font-size: 125%;
+}
+p {
+ margin: 0 0 0.5em 0;
+ padding: 0;
+}
+ul, ol {
+ margin: 0.5em 0;
+ padding: 0 0 0 2em;
+}
+ul {
+ list-style: outside square;
+}
+ol {
+ list-style: outside decimal;
+}
+li {
+ margin: 0;
+ padding: 0;
+}
+dl {
+ margin: 1em 0;
+ padding: 1em;
+ border: 1px solid black;
+}
+dt {
+ font-weight: bold;
+ margin: 0;
+ padding: 0;
+}
+dt sup {
+ float: right;
+ margin-left: 1em;
+}
+dd {
+ margin: 0.5em 0 0 2em;
+ padding: 0;
+}
+table {
+ table-layout: fixed;
+ width: 100%;
+ margin: 1em 0;
+ padding: 0;
+ border: 1px solid black;
+ border-spacing: 0;
+ border-collapse: collapse;
+}
+tr {
+ margin: 0;
+ padding: 0;
+ border: none;
+}
+td {
+ text-align: left;
+ margin: 0;
+ padding: 0.2em 0.5em;
+ border-top: 1px solid black;
+ border-bottom: 1px solid black;
+}
+tr.separate td {
+ border-top: double;
+}
+tt, pre, code, kbd, samp {
+ font-family: monospace;
+ font-size: 75%;
+}
+kbd {
+ font-weight: bolder;
+}
+blockquote, pre {
+ margin: 1em 2em;
+ padding: 0;
+}
+img {
+ border: none;
+ vertical-align: baseline;
+ margin: 0;
+ padding: 0;
+}
+img.left {
+ float: left;
+ margin: 0.5em 1em 0.5em 0;
+}
+img.right {
+ float: right;
+ margin: 0.5em 0 0.5em 1em;
+}
+.flush {
+ clear: both;
+ visibility: hidden;
+}
+.hide, .noprint, #nav {
+ display: none !important;
+}
+.pagebreak {
+ page-break-before: always;
+}
+#site {
+ text-align: right;
+ font-family: sans-serif;
+ font-weight: bold;
+ margin: 0 1em;
+ border-bottom: 1pt solid black;
+}
+#site a {
+ font-size: 1.2em;
+}
+#site a:link, #site a:visited {
+ text-decoration: none;
+ font-weight: bold;
+ background: transparent;
+ color: #ffffff;
+}
+#logo {
+ color: #ff8000;
+}
+#head {
+ clear: both;
+ margin: 0 1em;
+}
+#main {
+ line-height: 1.3;
+ text-align: justify;
+ margin: 1em;
+}
+#foot {
+ clear: both;
+ font-size: 80%;
+ text-align: center;
+ margin: 0 1.25em;
+ padding: 0.5em 0 0 0;
+ border-top: 1pt solid black;
+ page-break-before: avoid;
+ page-break-after: avoid;
+}
diff --git a/3rdparty/lua/doc/bluequad.css b/3rdparty/lua/doc/bluequad.css
index 6839e0a..5e8d5ce 100644
--- a/3rdparty/lua/doc/bluequad.css
+++ b/3rdparty/lua/doc/bluequad.css
@@ -1,325 +1,325 @@
-/* Copyright (C) 2004-2015 Mike Pall.
- *
- * You are welcome to use the general ideas of this design for your own sites.
- * But please do not steal the stylesheet, the layout or the color scheme.
- */
-/* colorscheme:
- *
- * site | head #4162bf/white | #6078bf/#e6ecff
- * ------+------ ----------------+-------------------
- * nav | main #bfcfff | #e6ecff/black
- *
- * nav: hiback loback #c5d5ff #b9c9f9
- * hiborder loborder #e6ecff #97a7d7
- * link hover #2142bf #ff0000
- *
- * link: link visited hover #2142bf #8122bf #ff0000
- *
- * main: boxback boxborder #f0f4ff #bfcfff
- */
-body {
- font-family: Verdana, Arial, Helvetica, sans-serif;
- font-size: 10pt;
- margin: 0;
- padding: 0;
- border: none;
- background: #e0e0e0;
- color: #000000;
-}
-a:link {
- text-decoration: none;
- background: transparent;
- color: #2142bf;
-}
-a:visited {
- text-decoration: none;
- background: transparent;
- color: #8122bf;
-}
-a:hover, a:active {
- text-decoration: underline;
- background: transparent;
- color: #ff0000;
-}
-h1, h2, h3 {
- font-weight: bold;
- text-align: left;
- margin: 0.5em 0;
- padding: 0;
- background: transparent;
-}
-h1 {
- font-size: 200%;
- line-height: 3em; /* really 6em relative to body, match #site span */
- margin: 0;
-}
-h2 {
- font-size: 150%;
- color: #606060;
-}
-h3 {
- font-size: 125%;
- color: #404040;
-}
-p {
- max-width: 600px;
- margin: 0 0 0.5em 0;
- padding: 0;
-}
-b {
- color: #404040;
-}
-ul, ol {
- max-width: 600px;
- margin: 0.5em 0;
- padding: 0 0 0 2em;
-}
-ul {
- list-style: outside square;
-}
-ol {
- list-style: outside decimal;
-}
-li {
- margin: 0;
- padding: 0;
-}
-dl {
- max-width: 600px;
- margin: 1em 0;
- padding: 1em;
- border: 1px solid #bfcfff;
- background: #f0f4ff;
-}
-dt {
- font-weight: bold;
- margin: 0;
- padding: 0;
-}
-dt sup {
- float: right;
- margin-left: 1em;
- color: #808080;
-}
-dt a:visited {
- text-decoration: none;
- color: #2142bf;
-}
-dt a:hover, dt a:active {
- text-decoration: none;
- color: #ff0000;
-}
-dd {
- margin: 0.5em 0 0 2em;
- padding: 0;
-}
-div.tablewrap { /* for IE *sigh* */
- max-width: 600px;
-}
-table {
- table-layout: fixed;
- border-spacing: 0;
- border-collapse: collapse;
- max-width: 600px;
- width: 100%;
- margin: 1em 0;
- padding: 0;
- border: 1px solid #bfcfff;
-}
-tr {
- margin: 0;
- padding: 0;
- border: none;
-}
-tr.odd {
- background: #f0f4ff;
-}
-tr.separate td {
- border-top: 1px solid #bfcfff;
-}
-td {
- text-align: left;
- margin: 0;
- padding: 0.2em 0.5em;
- border: none;
-}
-tt, code, kbd, samp {
- font-family: Courier New, Courier, monospace;
- line-height: 1.2;
- font-size: 110%;
-}
-kbd {
- font-weight: bolder;
-}
-blockquote, pre {
- max-width: 600px;
- margin: 1em 2em;
- padding: 0;
-}
-pre {
- line-height: 1.1;
-}
-pre.code {
- line-height: 1.4;
- margin: 0.5em 0 1em 0.5em;
- padding: 0.5em 1em;
- border: 1px solid #bfcfff;
- background: #f0f4ff;
-}
-pre.mark {
- padding-left: 2em;
-}
-span.codemark {
- position:absolute;
- left: 16em;
- color: #4040c0;
-}
-span.mark {
- color: #4040c0;
- font-family: Courier New, Courier, monospace;
- line-height: 1.1;
-}
-img {
- border: none;
- vertical-align: baseline;
- margin: 0;
- padding: 0;
-}
-img.left {
- float: left;
- margin: 0.5em 1em 0.5em 0;
-}
-img.right {
- float: right;
- margin: 0.5em 0 0.5em 1em;
-}
-.indent {
- padding-left: 1em;
-}
-.flush {
- clear: both;
- visibility: hidden;
-}
-.hide, .noscreen {
- display: none !important;
-}
-.ext {
- color: #ff8000;
-}
-.new {
- font-size: 6pt;
- vertical-align: middle;
- background: #ff8000;
- color: #ffffff;
-}
-#site {
- clear: both;
- float: left;
- width: 13em;
- text-align: center;
- font-weight: bold;
- margin: 0;
- padding: 0;
- background: transparent;
- color: #ffffff;
-}
-#site a {
- font-size: 200%;
-}
-#site a:link, #site a:visited {
- text-decoration: none;
- font-weight: bold;
- background: transparent;
- color: #ffffff;
-}
-#site span {
- line-height: 3em; /* really 6em relative to body, match h1 */
-}
-#logo {
- color: #ffb380;
-}
-#head {
- margin: 0;
- padding: 0 0 0 2em;
- border-left: solid 13em #4162bf;
- border-right: solid 3em #6078bf;
- background: #6078bf;
- color: #e6ecff;
-}
-#nav {
- clear: both;
- float: left;
- overflow: hidden;
- text-align: left;
- line-height: 1.5;
- width: 13em;
- padding-top: 1em;
- background: transparent;
-}
-#nav ul {
- list-style: none outside;
- margin: 0;
- padding: 0;
-}
-#nav li {
- margin: 0;
- padding: 0;
-}
-#nav a {
- display: block;
- text-decoration: none;
- font-weight: bold;
- margin: 0;
- padding: 2px 1em;
- border-top: 1px solid transparent;
- border-bottom: 1px solid transparent;
- background: transparent;
- color: #2142bf;
-}
-#nav a:hover, #nav a:active {
- text-decoration: none;
- border-top: 1px solid #97a7d7;
- border-bottom: 1px solid #e6ecff;
- background: #b9c9f9;
- color: #ff0000;
-}
-#nav a.current, #nav a.current:hover, #nav a.current:active {
- border-top: 1px solid #e6ecff;
- border-bottom: 1px solid #97a7d7;
- background: #c5d5ff;
- color: #2142bf;
-}
-#nav ul ul a {
- padding: 0 1em 0 1.7em;
-}
-#nav ul ul ul a {
- padding: 0 0.5em 0 2.4em;
-}
-#main {
- line-height: 1.5;
- text-align: left;
- margin: 0;
- padding: 1em 2em;
- border-left: solid 13em #bfcfff;
- border-right: solid 3em #e6ecff;
- background: #e6ecff;
-}
-#foot {
- clear: both;
- font-size: 80%;
- text-align: center;
- margin: 0;
- padding: 0.5em;
- background: #6078bf;
- color: #ffffff;
-}
-#foot a:link, #foot a:visited {
- text-decoration: underline;
- background: transparent;
- color: #ffffff;
-}
-#foot a:hover, #foot a:active {
- text-decoration: underline;
- background: transparent;
- color: #bfcfff;
-}
+/* Copyright (C) 2004-2013 Mike Pall.
+ *
+ * You are welcome to use the general ideas of this design for your own sites.
+ * But please do not steal the stylesheet, the layout or the color scheme.
+ */
+/* colorscheme:
+ *
+ * site | head #4162bf/white | #6078bf/#e6ecff
+ * ------+------ ----------------+-------------------
+ * nav | main #bfcfff | #e6ecff/black
+ *
+ * nav: hiback loback #c5d5ff #b9c9f9
+ * hiborder loborder #e6ecff #97a7d7
+ * link hover #2142bf #ff0000
+ *
+ * link: link visited hover #2142bf #8122bf #ff0000
+ *
+ * main: boxback boxborder #f0f4ff #bfcfff
+ */
+body {
+ font-family: Verdana, Arial, Helvetica, sans-serif;
+ font-size: 10pt;
+ margin: 0;
+ padding: 0;
+ border: none;
+ background: #e0e0e0;
+ color: #000000;
+}
+a:link {
+ text-decoration: none;
+ background: transparent;
+ color: #2142bf;
+}
+a:visited {
+ text-decoration: none;
+ background: transparent;
+ color: #8122bf;
+}
+a:hover, a:active {
+ text-decoration: underline;
+ background: transparent;
+ color: #ff0000;
+}
+h1, h2, h3 {
+ font-weight: bold;
+ text-align: left;
+ margin: 0.5em 0;
+ padding: 0;
+ background: transparent;
+}
+h1 {
+ font-size: 200%;
+ line-height: 3em; /* really 6em relative to body, match #site span */
+ margin: 0;
+}
+h2 {
+ font-size: 150%;
+ color: #606060;
+}
+h3 {
+ font-size: 125%;
+ color: #404040;
+}
+p {
+ max-width: 600px;
+ margin: 0 0 0.5em 0;
+ padding: 0;
+}
+b {
+ color: #404040;
+}
+ul, ol {
+ max-width: 600px;
+ margin: 0.5em 0;
+ padding: 0 0 0 2em;
+}
+ul {
+ list-style: outside square;
+}
+ol {
+ list-style: outside decimal;
+}
+li {
+ margin: 0;
+ padding: 0;
+}
+dl {
+ max-width: 600px;
+ margin: 1em 0;
+ padding: 1em;
+ border: 1px solid #bfcfff;
+ background: #f0f4ff;
+}
+dt {
+ font-weight: bold;
+ margin: 0;
+ padding: 0;
+}
+dt sup {
+ float: right;
+ margin-left: 1em;
+ color: #808080;
+}
+dt a:visited {
+ text-decoration: none;
+ color: #2142bf;
+}
+dt a:hover, dt a:active {
+ text-decoration: none;
+ color: #ff0000;
+}
+dd {
+ margin: 0.5em 0 0 2em;
+ padding: 0;
+}
+div.tablewrap { /* for IE *sigh* */
+ max-width: 600px;
+}
+table {
+ table-layout: fixed;
+ border-spacing: 0;
+ border-collapse: collapse;
+ max-width: 600px;
+ width: 100%;
+ margin: 1em 0;
+ padding: 0;
+ border: 1px solid #bfcfff;
+}
+tr {
+ margin: 0;
+ padding: 0;
+ border: none;
+}
+tr.odd {
+ background: #f0f4ff;
+}
+tr.separate td {
+ border-top: 1px solid #bfcfff;
+}
+td {
+ text-align: left;
+ margin: 0;
+ padding: 0.2em 0.5em;
+ border: none;
+}
+tt, code, kbd, samp {
+ font-family: Courier New, Courier, monospace;
+ line-height: 1.2;
+ font-size: 110%;
+}
+kbd {
+ font-weight: bolder;
+}
+blockquote, pre {
+ max-width: 600px;
+ margin: 1em 2em;
+ padding: 0;
+}
+pre {
+ line-height: 1.1;
+}
+pre.code {
+ line-height: 1.4;
+ margin: 0.5em 0 1em 0.5em;
+ padding: 0.5em 1em;
+ border: 1px solid #bfcfff;
+ background: #f0f4ff;
+}
+pre.mark {
+ padding-left: 2em;
+}
+span.codemark {
+ position:absolute;
+ left: 16em;
+ color: #4040c0;
+}
+span.mark {
+ color: #4040c0;
+ font-family: Courier New, Courier, monospace;
+ line-height: 1.1;
+}
+img {
+ border: none;
+ vertical-align: baseline;
+ margin: 0;
+ padding: 0;
+}
+img.left {
+ float: left;
+ margin: 0.5em 1em 0.5em 0;
+}
+img.right {
+ float: right;
+ margin: 0.5em 0 0.5em 1em;
+}
+.indent {
+ padding-left: 1em;
+}
+.flush {
+ clear: both;
+ visibility: hidden;
+}
+.hide, .noscreen {
+ display: none !important;
+}
+.ext {
+ color: #ff8000;
+}
+.new {
+ font-size: 6pt;
+ vertical-align: middle;
+ background: #ff8000;
+ color: #ffffff;
+}
+#site {
+ clear: both;
+ float: left;
+ width: 13em;
+ text-align: center;
+ font-weight: bold;
+ margin: 0;
+ padding: 0;
+ background: transparent;
+ color: #ffffff;
+}
+#site a {
+ font-size: 200%;
+}
+#site a:link, #site a:visited {
+ text-decoration: none;
+ font-weight: bold;
+ background: transparent;
+ color: #ffffff;
+}
+#site span {
+ line-height: 3em; /* really 6em relative to body, match h1 */
+}
+#logo {
+ color: #ffb380;
+}
+#head {
+ margin: 0;
+ padding: 0 0 0 2em;
+ border-left: solid 13em #4162bf;
+ border-right: solid 3em #6078bf;
+ background: #6078bf;
+ color: #e6ecff;
+}
+#nav {
+ clear: both;
+ float: left;
+ overflow: hidden;
+ text-align: left;
+ line-height: 1.5;
+ width: 13em;
+ padding-top: 1em;
+ background: transparent;
+}
+#nav ul {
+ list-style: none outside;
+ margin: 0;
+ padding: 0;
+}
+#nav li {
+ margin: 0;
+ padding: 0;
+}
+#nav a {
+ display: block;
+ text-decoration: none;
+ font-weight: bold;
+ margin: 0;
+ padding: 2px 1em;
+ border-top: 1px solid transparent;
+ border-bottom: 1px solid transparent;
+ background: transparent;
+ color: #2142bf;
+}
+#nav a:hover, #nav a:active {
+ text-decoration: none;
+ border-top: 1px solid #97a7d7;
+ border-bottom: 1px solid #e6ecff;
+ background: #b9c9f9;
+ color: #ff0000;
+}
+#nav a.current, #nav a.current:hover, #nav a.current:active {
+ border-top: 1px solid #e6ecff;
+ border-bottom: 1px solid #97a7d7;
+ background: #c5d5ff;
+ color: #2142bf;
+}
+#nav ul ul a {
+ padding: 0 1em 0 1.7em;
+}
+#nav ul ul ul a {
+ padding: 0 0.5em 0 2.4em;
+}
+#main {
+ line-height: 1.5;
+ text-align: left;
+ margin: 0;
+ padding: 1em 2em;
+ border-left: solid 13em #bfcfff;
+ border-right: solid 3em #e6ecff;
+ background: #e6ecff;
+}
+#foot {
+ clear: both;
+ font-size: 80%;
+ text-align: center;
+ margin: 0;
+ padding: 0.5em;
+ background: #6078bf;
+ color: #ffffff;
+}
+#foot a:link, #foot a:visited {
+ text-decoration: underline;
+ background: transparent;
+ color: #ffffff;
+}
+#foot a:hover, #foot a:active {
+ text-decoration: underline;
+ background: transparent;
+ color: #bfcfff;
+}
diff --git a/3rdparty/lua/doc/changes.html b/3rdparty/lua/doc/changes.html
index 5bc2931..b3deeaf 100644
--- a/3rdparty/lua/doc/changes.html
+++ b/3rdparty/lua/doc/changes.html
@@ -1,978 +1,892 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>LuaJIT Change History</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-div.major { max-width: 600px; padding: 1em; margin: 1em 0 1em 0; }
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>LuaJIT Change History</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a class="current" href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-This is a list of changes between the released versions of LuaJIT.<br>
-The current <span style="color: #0000c0;">stable version</span> is <strong>LuaJIT&nbsp;2.0.4</strong>.<br>
-</p>
-<p>
-Please check the
-<a href="http://luajit.org/changes.html"><span class="ext">&raquo;</span>&nbsp;Online Change History</a>
-to see whether newer versions are available.
-</p>
-
-<div class="major" style="background: #d0d0ff;">
-<h2 id="LuaJIT-2.0.4">LuaJIT 2.0.4 &mdash; 2015-05-14</h2>
-<ul>
-<li>Fix stack check in narrowing optimization.</li>
-<li>Fix Lua/C API typecheck error for special indexes.</li>
-<li>Fix string to number conversion.</li>
-<li>Fix lexer error for chunks without tokens.</li>
-<li>Don't compile <tt>IR_RETF</tt> after <tt>CALLT</tt> to ff with-side effects.</li>
-<li>Fix <tt>BC_UCLO</tt>/<tt>BC_JMP</tt> join optimization in Lua parser.</li>
-<li>Fix corner case in string to number conversion.</li>
-<li>Gracefully handle <tt>lua_error()</tt> for a suspended coroutine.</li>
-<li>Avoid error messages when building with Clang.</li>
-<li>Fix snapshot #0 handling for traces with a stack check on entry.</li>
-<li>Fix fused constant loads under high register pressure.</li>
-<li>Invalidate backpropagation cache after DCE.</li>
-<li>Fix ABC elimination.</li>
-<li>Fix debug info for main chunk of stripped bytecode.</li>
-<li>Fix FOLD rule for <tt>string.sub(s, ...) == k</tt>.</li>
-<li>Fix FOLD rule for <tt>STRREF</tt> of <tt>SNEW</tt>.</li>
-<li>Fix frame traversal while searching for error function.</li>
-<li>Prevent GC estimate miscalculation due to buffer growth.</li>
-<li>Prevent adding side traces for stack checks.</li>
-<li>Fix top slot calculation for snapshots with continuations.</li>
-<li>Fix check for reuse of SCEV results in <tt>FORL</tt>.</li>
-<li>Add PS Vita port.</li>
-<li>Fix compatibility issues with Illumos.</li>
-<li>Fix DragonFly build (unsupported).</li>
-<li>OpenBSD/x86: Better executable memory allocation for W^X mode.</li>
-<li>x86: Fix argument checks for <tt>ipairs()</tt> iterator.</li>
-<li>x86: <tt>lj_math_random_step()</tt> clobbers XMM regs on OSX Clang.</li>
-<li>x86: Fix code generation for unused result of <tt>math.random()</tt>.</li>
-<li>x64: Allow building with <tt>LUAJIT_USE_SYSMALLOC</tt> and <tt>LUAJIT_USE_VALGRIND</tt>.</li>
-<li>x86/x64: Fix argument check for bit shifts.</li>
-<li>x86/x64: Fix code generation for fused test/arith ops.</li>
-<li>ARM: Fix write barrier check in <tt>BC_USETS</tt>.</li>
-<li>PPC: Fix red zone overflow in machine code generation.</li>
-<li>PPC: Don't use <tt>mcrxr</tt> on PPE.</li>
-<li>Various archs: Fix excess stack growth in interpreter.</li>
-<li>FFI: Fix FOLD rule for <tt>TOBIT</tt> + <tt>CONV num.u32</tt>.</li>
-<li>FFI: Prevent DSE across <tt>ffi.string()</tt>.</li>
-<li>FFI: No meta fallback when indexing pointer to incomplete struct.</li>
-<li>FFI: Fix initialization of unions of subtypes.</li>
-<li>FFI: Fix cdata vs. non-cdata arithmetic and comparisons.</li>
-<li>FFI: Fix <tt>__index</tt>/<tt>__newindex</tt> metamethod resolution for ctypes.</li>
-<li>FFI: Fix compilation of reference field access.</li>
-<li>FFI: Fix frame traversal for backtraces with FFI callbacks.</li>
-<li>FFI: Fix recording of indexing a struct pointer ctype object itself.</li>
-<li>FFI: Allow non-scalar cdata to be compared for equality by address.</li>
-<li>FFI: Fix pseudo type conversions for type punning.</li>
-</ul>
-
-<h2 id="LuaJIT-2.0.3">LuaJIT 2.0.3 &mdash; 2014-03-12</h2>
-<ul>
-<li>Add PS4 port.</li>
-<li>Add support for multilib distro builds.</li>
-<li>Fix OSX build.</li>
-<li>Fix MinGW build.</li>
-<li>Fix Xbox 360 build.</li>
-<li>Improve ULOAD forwarding for open upvalues.</li>
-<li>Fix GC steps threshold handling when called by JIT-compiled code.</li>
-<li>Fix argument checks for <tt>math.deg()</tt> and <tt>math.rad()</tt>.</li>
-<li>Fix <tt>jit.flush(func|true)</tt>.</li>
-<li>Respect <tt>jit.off(func)</tt> when returning to a function, too.</li>
-<li>Fix compilation of <tt>string.byte(s, nil, n)</tt>.</li>
-<li>Fix line number for relocated bytecode after closure fixup</li>
-<li>Fix frame traversal for backtraces.</li>
-<li>Fix ABC elimination.</li>
-<li>Fix handling of redundant PHIs.</li>
-<li>Fix snapshot restore for exit to function header.</li>
-<li>Fix type punning alias analysis for constified pointers</li>
-<li>Fix call unroll checks in the presence of metamethod frames.</li>
-<li>Fix initial maxslot for down-recursive traces.</li>
-<li>Prevent BASE register coalescing if parent uses <tt>IR_RETF</tt>.</li>
-<li>Don't purge modified function from stack slots in <tt>BC_RET</tt>.</li>
-<li>Fix recording of <tt>BC_VARG</tt>.</li>
-<li>Don't access dangling reference to reallocated IR.</li>
-<li>Fix frame depth display for bytecode dump in <tt>-jdump</tt>.</li>
-<li>ARM: Fix register allocation when rematerializing FPRs.</li>
-<li>x64: Fix store to upvalue for lightuserdata values.</li>
-<li>FFI: Add missing GC steps for callback argument conversions.</li>
-<li>FFI: Properly unload loaded DLLs.</li>
-<li>FFI: Fix argument checks for <tt>ffi.string()</tt>.</li>
-<li>FFI/x64: Fix passing of vector arguments to calls.</li>
-<li>FFI: Rehash finalizer table after GC cycle, if needed.</li>
-<li>FFI: Fix <tt>cts-&gt;L</tt> for cdata unsinking in snapshot restore.</li>
-</ul>
-
-<h2 id="LuaJIT-2.0.2">LuaJIT 2.0.2 &mdash; 2013-06-03</h2>
-<ul>
-<li>Fix memory access check for fast string interning.</li>
-<li>Fix MSVC intrinsics for older versions.</li>
-<li>Add missing GC steps for <tt>io.*</tt> functions.</li>
-<li>Fix spurious red zone overflows in machine code generation.</li>
-<li>Fix jump-range constrained mcode allocation.</li>
-<li>Inhibit DSE for implicit loads via calls.</li>
-<li>Fix builtin string to number conversion for overflow digits.</li>
-<li>Fix optional argument handling while recording builtins.</li>
-<li>Fix optional argument handling in <tt>table.concat()</tt>.</li>
-<li>Add partial support for building with MingW64 GCC 4.8-SEH.</li>
-<li>Add missing PHI barrier to <tt>string.sub(str, a, b) == kstr</tt> FOLD rule.</li>
-<li>Fix compatibility issues with Illumos.</li>
-<li>ARM: Fix cache flush/sync for exit stubs of JIT-compiled code.</li>
-<li>MIPS: Fix cache flush/sync for JIT-compiled code jump area.</li>
-<li>PPC: Add <tt>plt</tt> suffix for external calls from assembler code.</li>
-<li>FFI: Fix snapshot substitution in SPLIT pass.</li>
-<li>FFI/x86: Fix register allocation for 64 bit comparisons.</li>
-<li>FFI: Fix tailcall in lowest frame to C&nbsp;function with bool result.</li>
-<li>FFI: Ignore <tt>long</tt> type specifier in <tt>ffi.istype()</tt>.</li>
-<li>FFI: Fix calling conventions for 32 bit OSX and iOS simulator (struct returns).</li>
-<li>FFI: Fix calling conventions for ARM hard-float EABI (nested structs).</li>
-<li>FFI: Improve error messages for arithmetic and comparison operators.</li>
-<li>FFI: Insert no-op type conversion for pointer to integer cast.</li>
-<li>FFI: Fix unroll limit for <tt>ffi.fill()</tt>.</li>
-<li>FFI: Must sink <tt>XBAR</tt> together with <tt>XSTORE</tt>s.</li>
-<li>FFI: Preserve intermediate string for <tt>const&nbsp;char&nbsp;*</tt> conversion.</li>
-</ul>
-
-<h2 id="LuaJIT-2.0.1">LuaJIT 2.0.1 &mdash; 2013-02-19</h2>
-<ul>
-<li>Don't clear frame for out-of-memory error.</li>
-<li>Leave hook when resume catches error thrown from hook.</li>
-<li>Add missing GC steps for template table creation.</li>
-<li>Fix discharge order of comparisons in Lua parser.</li>
-<li>Improve buffer handling for <tt>io.read()</tt>.</li>
-<li>OSX: Add support for Mach-O object files to <tt>-b</tt> option.</li>
-<li>Fix PS3 port.</li>
-<li>Fix/enable Xbox 360 port.</li>
-<li>x86/x64: Always mark ref for shift count as non-weak.</li>
-<li>x64: Don't fuse implicitly 32-to-64 extended operands.</li>
-<li>ARM: Fix armhf call argument handling.</li>
-<li>ARM: Fix code generation for integer math.min/math.max.</li>
-<li>PPC/e500: Fix <tt>lj_vm_floor()</tt> for Inf/NaN.</li>
-<li>FFI: Change priority of table initializer variants for structs.</li>
-<li>FFI: Fix code generation for bool call result check on x86/x64.</li>
-<li>FFI: Load FFI library on-demand for bytecode with cdata literals.</li>
-<li>FFI: Fix handling of qualified transparent structs/unions.</li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0">LuaJIT 2.0.0 &mdash; 2012-11-08</h2>
-<ul>
-<li>Correctness and completeness:
-<ul>
- <li>Fix Android/x86 build.</li>
- <li>Fix recording of equality comparisons with <tt>__eq</tt> metamethods.</li>
- <li>Fix detection of immutable upvalues.</li>
- <li>Replace error with PANIC for callbacks from JIT-compiled code.</li>
- <li>Fix builtin string to number conversion for <tt>INT_MIN</tt>.</li>
- <li>Don't create unneeded array part for template tables.</li>
- <li>Fix <tt>CONV.num.int</tt> sinking.</li>
- <li>Don't propagate implicitly widened number to index metamethods.</li>
- <li>ARM: Fix ordered comparisons of number vs. non-number.</li>
- <li>FFI: Fix code generation for replay of sunk float fields.</li>
- <li>FFI: Fix signedness of bool.</li>
- <li>FFI: Fix recording of bool call result check on x86/x64.</li>
- <li>FFI: Fix stack-adjustment for <tt>__thiscall</tt> callbacks.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta11">LuaJIT 2.0.0-beta11 &mdash; 2012-10-16</h2>
-<ul>
-<li>New features:
-<ul>
- <li>Use ARM VFP instructions, if available (build-time detection).</li>
- <li>Add support for ARM hard-float EABI (<tt>armhf</tt>).</li>
- <li>Add PS3 port.</li>
- <li>Add many features from Lua&nbsp;5.2, e.g. <tt>goto</tt>/labels.
- Refer to <a href="extensions.html#lua52">this list</a>.</li>
- <li>FFI: Add parameterized C types.</li>
- <li>FFI: Add support for copy constructors.</li>
- <li>FFI: Equality comparisons never raise an error (treat as unequal instead).</li>
- <li>FFI: Box all accessed or returned enums.</li>
- <li>FFI: Check for <tt>__new</tt> metamethod when calling a constructor.</li>
- <li>FFI: Handle <tt>__pairs</tt>/<tt>__ipairs</tt> metamethods for cdata objects.</li>
- <li>FFI: Convert <tt>io.*</tt> file handle to <tt>FILE *</tt> pointer (but as a <tt>void *</tt>).</li>
- <li>FFI: Detect and support type punning through unions.</li>
- <li>FFI: Improve various error messages.</li>
-</ul></li>
-<li>Build-system reorganization:
-<ul>
- <li>Reorganize directory layout:<br>
- <tt>lib/*</tt> &rarr; <tt>src/jit/*</tt><br>
- <tt>src/buildvm_*.dasc</tt> &rarr; <tt>src/vm_*.dasc</tt><br>
- <tt>src/buildvm_*.h</tt> &rarr; removed<br>
- <tt>src/buildvm*</tt> &rarr; <tt>src/host/*</tt></li>
- <li>Add minified Lua interpreter plus Lua BitOp (<tt>minilua</tt>) to run DynASM.</li>
- <li>Change DynASM bit operations to use Lua BitOp</li>
- <li>Translate only <tt>vm_*.dasc</tt> for detected target architecture.</li>
- <li>Improve target detection for <tt>msvcbuild.bat</tt>.</li>
- <li>Fix build issues on Cygwin and MinGW with optional MSys.</li>
- <li>Handle cross-compiles with FPU/no-FPU or hard-fp/soft-fp ABI mismatch.</li>
- <li>Remove some library functions for no-JIT/no-FFI builds.</li>
- <li>Add uninstall target to top-level Makefile.</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
- <li>Preserve snapshot #0 PC for all traces.</li>
- <li>Fix argument checks for <tt>coroutine.create()</tt>.</li>
- <li>Command line prints version and JIT status to <tt>stdout</tt>, not <tt>stderr</tt>.</li>
- <li>Fix userdata <tt>__gc</tt> separations at Lua state close.</li>
- <li>Fix <tt>TDUP</tt> to <tt>HLOAD</tt> forwarding for <tt>LJ_DUALNUM</tt> builds.</li>
- <li>Fix buffer check in bytecode writer.</li>
- <li>Make <tt>os.date()</tt> thread-safe.</li>
- <li>Add missing declarations for MSVC intrinsics.</li>
- <li>Fix dispatch table modifications for return hooks.</li>
- <li>Workaround for MSVC conversion bug (<tt>double</tt> &rarr; <tt>uint32_t</tt> &rarr; <tt>int32_t</tt>).</li>
- <li>Fix FOLD rule <tt>(i-j)-i => 0-j</tt>.</li>
- <li>Never use DWARF unwinder on Windows.</li>
- <li>Fix shrinking of direct mapped blocks in builtin allocator.</li>
- <li>Limit recursion depth in <tt>string.match()</tt> et al.</li>
- <li>Fix late despecialization of <tt>ITERN</tt> after loop has been entered.</li>
- <li>Fix <tt>'f'</tt> and <tt>'L'</tt> options for <tt>debug.getinfo()</tt> and <tt>lua_getinfo()</tt>.</li>
- <li>Fix <tt>package.searchpath()</tt>.</li>
- <li>OSX: Change dylib names to be consistent with other platforms.</li>
- <li>Android: Workaround for broken <tt>sprintf("%g",&nbsp;-0.0)</tt>.</li>
- <li>x86: Remove support for ancient CPUs without <tt>CMOV</tt> (before Pentium Pro).</li>
- <li>x86: Fix register allocation for calls returning register pair.</li>
- <li>x86/x64: Fix fusion of unsigned byte comparisons with swapped operands.</li>
- <li>ARM: Fix <tt>tonumber()</tt> argument check.</li>
- <li>ARM: Fix modulo operator and <tt>math.floor()</tt>/<tt>math.ceil()</tt> for <tt>inf</tt>/<tt>nan</tt>.</li>
- <li>ARM: Invoke SPLIT pass for leftover <tt>IR_TOBIT</tt>.</li>
- <li>ARM: Fix BASE register coalescing.</li>
- <li>PPC: Fix interpreter state setup in callbacks.</li>
- <li>PPC: Fix <tt>string.sub()</tt> range check.</li>
- <li>MIPS: Support generation of MIPS/MIPSEL bytecode object files.</li>
- <li>MIPS: Fix calls to <tt>floor()</tt>/<tt>ceil()</tt><tt>/trunc()</tt>.</li>
- <li>ARM/PPC: Detect more target architecture variants.</li>
- <li>ARM/PPC/e500/MIPS: Fix tailcalls from fast functions, esp. <tt>tostring()</tt>.</li>
- <li>ARM/PPC/MIPS: Fix rematerialization of FP constants.</li>
- <li>FFI: Don't call <tt>FreeLibrary()</tt> on our own EXE/DLL.</li>
- <li>FFI: Resolve metamethods for constructors, too.</li>
- <li>FFI: Properly disable callbacks on iOS (would require executable memory).</li>
- <li>FFI: Fix cdecl string parsing during recording.</li>
- <li>FFI: Show address pointed to for <tt>tostring(ref)</tt>, too.</li>
- <li>FFI: Fix alignment of C call argument/return structure.</li>
- <li>FFI: Initialize all fields of standard types.</li>
- <li>FFI: Fix callback handling when new C&nbsp;types are declared in callback.</li>
- <li>FFI: Fix recording of constructors for pointers.</li>
- <li>FFI: Always resolve metamethods for pointers to structs.</li>
- <li>FFI: Correctly propagate alignment when interning nested types.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
- <li>Add allocation sinking and store sinking optimization.</li>
- <li>Constify immutable upvalues.</li>
- <li>Add builtin string to integer or FP number conversion. Improves cross-platform consistency and correctness.</li>
- <li>Create string hash slots in template tables for non-const values, too. Avoids later table resizes.</li>
- <li>Eliminate <tt>HREFK</tt> guard for template table references.</li>
- <li>Add various new FOLD rules.</li>
- <li>Don't use stack unwinding for <tt>lua_yield()</tt> (slow on x64).</li>
- <li>ARM, PPC, MIPS: Improve <tt>XLOAD</tt> operand fusion and register hinting.</li>
- <li>PPC, MIPS: Compile <tt>math.sqrt()</tt> to sqrt instruction, if available.</li>
- <li>FFI: Fold <tt>KPTR</tt> + constant offset in SPLIT pass.</li>
- <li>FFI: Optimize/inline <tt>ffi.copy()</tt> and <tt>ffi.fill()</tt>.</li>
- <li>FFI: Compile and optimize array/struct copies.</li>
- <li>FFI: Compile <tt>ffi.typeof(cdata|ctype)</tt>, <tt>ffi.sizeof()</tt>, <tt>ffi.alignof()</tt>, <tt>ffi.offsetof()</tt> and <tt>ffi.gc()</tt>.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta10">LuaJIT 2.0.0-beta10 &mdash; 2012-05-09</h2>
-<ul>
-<li>New features:
-<ul>
-<li>The MIPS of LuaJIT is complete. It requires a CPU conforming to the
-MIPS32&nbsp;R1 architecture with hardware FPU. O32 hard-fp ABI,
-little-endian or big-endian.</li>
-<li>Auto-detect target arch via cross-compiler. No need for
-<tt>TARGET=arch</tt> anymore.</li>
-<li>Make DynASM compatible with Lua 5.2.</li>
-<li>From Lua 5.2: Try <tt>__tostring</tt> metamethod on non-string error
-messages..</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
-<li>Fix parsing of hex literals with exponents.</li>
-<li>Fix bytecode dump for certain number constants.</li>
-<li>Fix argument type in error message for relative arguments.</li>
-<li>Fix argument error handling on Lua stacks without a frame.</li>
-<li>Add missing mcode limit check in assembler backend.</li>
-<li>Fix compilation on OpenBSD.</li>
-<li>Avoid recursive GC steps after GC-triggered trace exit.</li>
-<li>Replace <tt>&lt;unwind.h&gt;</tt> definitions with our own.</li>
-<li>Fix OSX build issues. Bump minimum required OSX version to 10.4.</li>
-<li>Fix discharge order of comparisons in Lua parser.</li>
-<li>Ensure running <tt>__gc</tt> of userdata created in <tt>__gc</tt>
-at state close.</li>
-<li>Limit number of userdata <tt>__gc</tt> separations at state close.</li>
-<li>Fix bytecode <tt>JMP</tt> slot range when optimizing
-<tt>and</tt>/<tt>or</tt> with constant LHS.</li>
-<li>Fix DSE of <tt>USTORE</tt>.</li>
-<li>Make <tt>lua_concat()</tt> work from C&nbsp;hook with partial frame.</li>
-<li>Add required PHIs for implicit conversions, e.g. via <tt>XREF</tt>
-forwarding.</li>
-<li>Add more comparison variants to Valgrind suppressions file.</li>
-<li>Disable loading bytecode with an extra header (BOM or <tt>#!</tt>).</li>
-<li>Fix PHI stack slot syncing.</li>
-<li>ARM: Reorder type/value tests to silence Valgrind.</li>
-<li>ARM: Fix register allocation for <tt>ldrd</tt>-optimized
-<tt>HREFK</tt>.</li>
-<li>ARM: Fix conditional branch fixup for <tt>OBAR</tt>.</li>
-<li>ARM: Invoke SPLIT pass for <tt>double</tt> args in FFI call.</li>
-<li>ARM: Handle all <tt>CALL*</tt> ops with <tt>double</tt> results in
-SPLIT pass.</li>
-<li>ARM: Fix rejoin of <tt>POW</tt> in SPLIT pass.</li>
-<li>ARM: Fix compilation of <tt>math.sinh</tt>, <tt>math.cosh</tt>,
-<tt>math.tanh</tt>.</li>
-<li>ARM, PPC: Avoid pointless arg clearing in <tt>BC_IFUNCF</tt>.</li>
-<li>PPC: Fix resume after yield from hook.</li>
-<li>PPC: Fix argument checking for <tt>rawget()</tt>.</li>
-<li>PPC: Fix fusion of floating-point <tt>XLOAD</tt>/<tt>XSTORE</tt>.</li>
-<li>PPC: Fix <tt>HREFK</tt> code generation for huge tables.</li>
-<li>PPC: Use builtin D-Cache/I-Cache sync code.</li>
-</ul></li>
-<li>FFI library:
-<ul>
-<li>Ignore empty statements in <tt>ffi.cdef()</tt>.</li>
-<li>Ignore number parsing errors while skipping definitions.</li>
-<li>Don't touch frame in callbacks with tailcalls to fast functions.</li>
-<li>Fix library unloading on POSIX systems.</li>
-<li>Finalize cdata before userdata when closing the state.</li>
-<li>Change <tt>ffi.load()</tt> library name resolution for Cygwin.</li>
-<li>Fix resolving of function name redirects on Windows/x86.</li>
-<li>Fix symbol resolving error messages on Windows.</li>
-<li>Fix blacklisting of C functions calling callbacks.</li>
-<li>Fix result type of pointer difference.</li>
-<li>Use correct PC in FFI metamethod error message.</li>
-<li>Allow <tt>'typedef _Bool int BOOL;'</tt> for the Windows API.</li>
-<li>Don't record test for bool result of call, if ignored.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta9">LuaJIT 2.0.0-beta9 &mdash; 2011-12-14</h2>
-<ul>
-<li>New features:
-<ul>
-<li>PPC port of LuaJIT is complete. Default is the dual-number port
-(usually faster). Single-number port selectable via <tt>src/Makefile</tt>
-at build time.</li>
-<li>Add FFI callback support.</li>
-<li>Extend <tt>-b</tt> to generate <tt>.c</tt>, <tt>.h</tt> or <tt>.obj/.o</tt>
-files with embedded bytecode.</li>
-<li>Allow loading embedded bytecode with <tt>require()</tt>.</li>
-<li>From Lua 5.2: Change to <tt>'\z'</tt> escape. Reject undefined escape
-sequences.</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
-<li>Fix OSX 10.7 build. Fix <tt>install_name</tt> and versioning on OSX.</li>
-<li>Fix iOS build.</li>
-<li>Install <tt>dis_arm.lua</tt>, too.</li>
-<li>Mark installed shared library as executable.</li>
-<li>Add debug option to <tt>msvcbuild.bat</tt> and improve error handling.</li>
-<li>Fix data-flow analysis for iterators.</li>
-<li>Fix forced unwinding triggered by external unwinder.</li>
-<li>Record missing <tt>for</tt> loop slot loads (return to lower frame).</li>
-<li>Always use ANSI variants of Windows system functions.</li>
-<li>Fix GC barrier for multi-result table constructor (<tt>TSETM</tt>).</li>
-<li>Fix/add various FOLD rules.</li>
-<li>Add potential PHI for number conversions due to type instability.</li>
-<li>Do not eliminate PHIs only referenced from other PHIs.</li>
-<li>Correctly anchor implicit number to string conversions in Lua/C API.</li>
-<li>Fix various stack limit checks.</li>
-<li>x64: Use thread-safe exceptions for external unwinding (GCC platforms).</li>
-<li>x64: Fix result type of cdata index conversions.</li>
-<li>x64: Fix <tt>math.random()</tt> and <tt>bit.bswap()</tt> code generation.</li>
-<li>x64: Fix <tt>lightuserdata</tt> comparisons.</li>
-<li>x64: Always extend stack-passed arguments to pointer size.</li>
-<li>ARM: Many fixes to code generation backend.</li>
-<li>PPC/e500: Fix dispatch for binop metamethods.</li>
-<li>PPC/e500: Save/restore condition registers when entering/leaving the VM.</li>
-<li>PPC/e500: Fix write barrier in stores of strings to upvalues.</li>
-</ul></li>
-<li>FFI library:
-<ul>
-<li>Fix C comment parsing.</li>
-<li>Fix snapshot optimization for cdata comparisons.</li>
-<li>Fix recording of const/enum lookups in namespaces.</li>
-<li>Fix call argument and return handling for <tt>I8/U8/I16/U16</tt> types.</li>
-<li>Fix unfused loads of float fields.</li>
-<li>Fix <tt>ffi.string()</tt> recording.</li>
-<li>Save <tt>GetLastError()</tt> around <tt>ffi.load()</tt> and symbol
-resolving, too.</li>
-<li>Improve ld script detection in <tt>ffi.load()</tt>.</li>
-<li>Record loads/stores to external variables in namespaces.</li>
-<li>Compile calls to stdcall, fastcall and vararg functions.</li>
-<li>Treat function ctypes like pointers in comparisons.</li>
-<li>Resolve <tt>__call</tt> metamethod for pointers, too.</li>
-<li>Record C function calls with bool return values.</li>
-<li>Record <tt>ffi.errno()</tt>.</li>
-<li>x86: Fix number to <tt>uint32_t</tt> conversion rounding.</li>
-<li>x86: Fix 64 bit arithmetic in assembler backend.</li>
-<li>x64: Fix struct-by-value calling conventions.</li>
-<li>ARM: Ensure invocation of SPLIT pass for float conversions.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>Display trace types with <tt>-jv</tt> and <tt>-jdump</tt>.</li>
-<li>Record isolated calls. But prefer recording loops over calls.</li>
-<li>Specialize to prototype for non-monomorphic functions. Solves the
-trace-explosion problem for closure-heavy programming styles.</li>
-<li>Always generate a portable <tt>vmdef.lua</tt>. Easier for distros.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta8">LuaJIT 2.0.0-beta8 &mdash; 2011-06-23</h2>
-<ul>
-<li>New features:
-<ul>
-<li>Soft-float ARM port of LuaJIT is complete.</li>
-<li>Add support for bytecode loading/saving and <tt>-b</tt> command line
-option.</li>
-<li>From Lua 5.2: <tt>__len</tt> metamethod for tables
-(disabled by default).</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
-<li>ARM: Misc. fixes for interpreter.</li>
-<li>x86/x64: Fix <tt>bit.*</tt> argument checking in interpreter.</li>
-<li>Catch early out-of-memory in memory allocator initialization.</li>
-<li>Fix data-flow analysis for paths leading to an upvalue close.</li>
-<li>Fix check for missing arguments in <tt>string.format()</tt>.</li>
-<li>Fix Solaris/x86 build (note: not a supported target).</li>
-<li>Fix recording of loops with instable directions in side traces.</li>
-<li>x86/x64: Fix fusion of comparisons with <tt>u8</tt>/<tt>u16</tt>
-<tt>XLOAD</tt>.</li>
-<li>x86/x64: Fix register allocation for variable shifts.</li>
-</ul></li>
-<li>FFI library:
-<ul>
-<li>Add <tt>ffi.errno()</tt>. Save <tt>errno</tt>/<tt>GetLastError()</tt>
-around allocations etc.</li>
-<li>Fix <tt>__gc</tt> for VLA/VLS cdata objects.</li>
-<li>Fix recording of casts from 32 bit cdata pointers to integers.</li>
-<li><tt>tonumber(cdata)</tt> returns <tt>nil</tt> for non-numbers.</li>
-<li>Show address pointed to for <tt>tostring(pointer)</tt>.</li>
-<li>Print <tt>NULL</tt> pointers as <tt>"cdata&lt;... *&gt;: NULL"</tt>.</li>
-<li>Support <tt>__tostring</tt> metamethod for pointers to structs, too.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>More tuning for loop unrolling heuristics.</li>
-<li>Flatten and compress in-memory debug info (saves ~70%).</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta7">LuaJIT 2.0.0-beta7 &mdash; 2011-05-05</h2>
-<ul>
-<li>New features:
-<ul>
-<li>ARM port of the LuaJIT interpreter is complete.</li>
-<li>FFI library: Add <tt>ffi.gc()</tt>, <tt>ffi.metatype()</tt>,
-<tt>ffi.istype()</tt>.</li>
-<li>FFI library: Resolve ld script redirection in <tt>ffi.load()</tt>.</li>
-<li>From Lua 5.2: <tt>package.searchpath()</tt>, <tt>fp:read("*L")</tt>,
-<tt>load(string)</tt>.</li>
-<li>From Lua 5.2, disabled by default: empty statement,
-<tt>table.unpack()</tt>, modified <tt>coroutine.running()</tt>.</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
-<li>FFI library: numerous fixes.</li>
-<li>Fix type mismatches in store-to-load forwarding.</li>
-<li>Fix error handling within metamethods.</li>
-<li>Fix <tt>table.maxn()</tt>.</li>
-<li>Improve accuracy of <tt>x^-k</tt> on x64.</li>
-<li>Fix code generation for Intel Atom in x64 mode.</li>
-<li>Fix narrowing of POW.</li>
-<li>Fix recording of retried fast functions.</li>
-<li>Fix code generation for <tt>bit.bnot()</tt> and multiplies.</li>
-<li>Fix error location within cpcall frames.</li>
-<li>Add workaround for old libgcc unwind bug.</li>
-<li>Fix <tt>lua_yield()</tt> and <tt>getmetatable(lightuserdata)</tt> on x64.</li>
-<li>Misc. fixes for PPC/e500 interpreter.</li>
-<li>Fix stack slot updates for down-recursion.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>Add dual-number mode (int/double) for the VM. Enabled for ARM.</li>
-<li>Improve narrowing of arithmetic operators and <tt>for</tt> loops.</li>
-<li>Tune loop unrolling heuristics and increase trace recorder limits.</li>
-<li>Eliminate dead slots in snapshots using bytecode data-flow analysis.</li>
-<li>Avoid phantom stores to proxy tables.</li>
-<li>Optimize lookups in empty proxy tables.</li>
-<li>Improve bytecode optimization of <tt>and</tt>/<tt>or</tt> operators.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta6">LuaJIT 2.0.0-beta6 &mdash; 2011-02-11</h2>
-<ul>
-<li>New features:
-<ul>
-<li>PowerPC/e500v2 port of the LuaJIT interpreter is complete.</li>
-<li>Various minor features from Lua 5.2: Hex escapes in literals,
-<tt>'\*'</tt> escape, reversible <tt>string.format("%q",s)</tt>,
-<tt>"%g"</tt> pattern, <tt>table.sort</tt> checks callbacks,
-<tt>os.exit(status|true|false[,close])</tt>.</li>
-<li>Lua 5.2 <tt>__pairs</tt> and <tt>__ipairs</tt> metamethods
-(disabled by default).</li>
-<li>Initial release of the FFI library.</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
-<li>Fix <tt>string.format()</tt> for non-finite numbers.</li>
-<li>Fix memory leak when compiled to use the built-in allocator.</li>
-<li>x86/x64: Fix unnecessary resize in <tt>TSETM</tt> bytecode.</li>
-<li>Fix various GC issues with traces and <tt>jit.flush()</tt>.</li>
-<li>x64: Fix fusion of indexes for array references.</li>
-<li>x86/x64: Fix stack overflow handling for coroutine results.</li>
-<li>Enable low-2GB memory allocation on FreeBSD/x64.</li>
-<li>Fix <tt>collectgarbage("count")</tt> result if more than 2GB is in use.</li>
-<li>Fix parsing of hex floats.</li>
-<li>x86/x64: Fix loop branch inversion with trailing
-<tt>HREF+NE/EQ</tt>.</li>
-<li>Add <tt>jit.os</tt> string.</li>
-<li><tt>coroutine.create()</tt> permits running C functions, too.</li>
-<li>Fix OSX build to work with newer ld64 versions.</li>
-<li>Fix bytecode optimization of <tt>and</tt>/<tt>or</tt> operators.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>Emit specialized bytecode for <tt>pairs()</tt>/<tt>next()</tt>.</li>
-<li>Improve bytecode coalescing of <tt>nil</tt> constants.</li>
-<li>Compile calls to vararg functions.</li>
-<li>Compile <tt>select()</tt>.</li>
-<li>Improve alias analysis, esp. for loads from allocations.</li>
-<li>Tuning of various compiler heuristics.</li>
-<li>Refactor and extend IR conversion instructions.</li>
-<li>x86/x64: Various backend enhancements related to the FFI.</li>
-<li>Add SPLIT pass to split 64 bit IR instructions for 32 bit CPUs.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta5">LuaJIT 2.0.0-beta5 &mdash; 2010-08-24</h2>
-<ul>
-<li>Correctness and completeness:
-<ul>
-<li>Fix trace exit dispatch to function headers.</li>
-<li>Fix Windows and OSX builds with LUAJIT_DISABLE_JIT.</li>
-<li>Reorganize and fix placement of generated machine code on x64.</li>
-<li>Fix TNEW in x64 interpreter.</li>
-<li>Do not eliminate PHIs for values only referenced from side exits.</li>
-<li>OS-independent canonicalization of strings for non-finite numbers.</li>
-<li>Fix <tt>string.char()</tt> range check on x64.</li>
-<li>Fix <tt>tostring()</tt> resolving within <tt>print()</tt>.</li>
-<li>Fix error handling for <tt>next()</tt>.</li>
-<li>Fix passing of constant arguments to external calls on x64.</li>
-<li>Fix interpreter argument check for two-argument SSE math functions.</li>
-<li>Fix C frame chain corruption caused by <tt>lua_cpcall()</tt>.</li>
-<li>Fix return from <tt>pcall()</tt> within active hook.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>Replace on-trace GC frame syncing with interpreter exit.</li>
-<li>Improve hash lookup specialization by not removing dead keys during GC.</li>
-<li>Turn traces into true GC objects.</li>
-<li>Avoid starting a GC cycle immediately after library init.</li>
-<li>Add weak guards to improve dead-code elimination.</li>
-<li>Speed up string interning.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta4">LuaJIT 2.0.0-beta4 &mdash; 2010-03-28</h2>
-<ul>
-<li>Correctness and completeness:
-<ul>
-<li>Fix precondition for on-trace creation of table keys.</li>
-<li>Fix <tt>{f()}</tt> on x64 when table is resized.</li>
-<li>Fix folding of ordered comparisons with same references.</li>
-<li>Fix snapshot restores for multi-result bytecodes.</li>
-<li>Fix potential hang when recording bytecode with nested closures.</li>
-<li>Fix recording of <tt>getmetatable()</tt>, <tt>tonumber()</tt> and bad argument types.</li>
-<li>Fix SLOAD fusion across returns to lower frames.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>Add array bounds check elimination. <tt>-Oabc</tt> is enabled by default.</li>
-<li>More tuning for x64, e.g. smaller table objects.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta3">LuaJIT 2.0.0-beta3 &mdash; 2010-03-07</h2>
-<ul>
-<li>LuaJIT x64 port:
-<ul>
-<li>Port integrated memory allocator to Linux/x64, Windows/x64 and OSX/x64.</li>
-<li>Port interpreter and JIT compiler to x64.</li>
-<li>Port DynASM to x64.</li>
-<li>Many 32/64 bit cleanups in the VM.</li>
-<li>Allow building the interpreter with either x87 or SSE2 arithmetics.</li>
-<li>Add external unwinding and C++ exception interop (default on x64).</li>
-</ul></li>
-<li>Correctness and completeness:
-<ul>
-<li>Fix constructor bytecode generation for certain conditional values.</li>
-<li>Fix some cases of ordered string comparisons.</li>
-<li>Fix <tt>lua_tocfunction()</tt>.</li>
-<li>Fix cutoff register in JMP bytecode for some conditional expressions.</li>
-<li>Fix PHI marking algorithm for references from variant slots.</li>
-<li>Fix <tt>package.cpath</tt> for non-default PREFIX.</li>
-<li>Fix DWARF2 frame unwind information for interpreter on OSX.</li>
-<li>Drive the GC forward on string allocations in the parser.</li>
-<li>Implement call/return hooks (zero-cost if disabled).</li>
-<li>Implement yield from C hooks.</li>
-<li>Disable JIT compiler on older non-SSE2 CPUs instead of aborting.</li>
-</ul></li>
-<li>Structural and performance enhancements:
-<ul>
-<li>Compile recursive code (tail-, up- and down-recursion).</li>
-<li>Improve heuristics for bytecode penalties and blacklisting.</li>
-<li>Split CALL/FUNC recording and clean up fast function call semantics.</li>
-<li>Major redesign of internal function call handling.</li>
-<li>Improve FOR loop const specialization and integerness checks.</li>
-<li>Switch to pre-initialized stacks. Avoid frame-clearing.</li>
-<li>Colocation of prototypes and related data: bytecode, constants, debug info.</li>
-<li>Cleanup parser and streamline bytecode generation.</li>
-<li>Add support for weak IR references to register allocator.</li>
-<li>Switch to compressed, extensible snapshots.</li>
-<li>Compile returns to frames below the start frame.</li>
-<li>Improve alias analysis of upvalues using a disambiguation hash value.</li>
-<li>Compile floor/ceil/trunc to SSE2 helper calls or SSE4.1 instructions.</li>
-<li>Add generic C call handling to IR and backend.</li>
-<li>Improve KNUM fuse vs. load heuristics.</li>
-<li>Compile various <tt>io.*()</tt> functions.</li>
-<li>Compile <tt>math.sinh()</tt>, <tt>math.cosh()</tt>, <tt>math.tanh()</tt>
-and <tt>math.random()</tt>.</li>
-</ul></li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta2">LuaJIT 2.0.0-beta2 &mdash; 2009-11-09</h2>
-<ul>
-<li>Reorganize build system. Build static+shared library on POSIX.</li>
-<li>Allow C++ exception conversion on all platforms
-using a wrapper function.</li>
-<li>Automatically catch C++ exceptions and rethrow Lua error
-(DWARF2 only).</li>
-<li>Check for the correct x87 FPU precision at strategic points.</li>
-<li>Always use wrappers for libm functions.</li>
-<li>Resurrect metamethod name strings before copying them.</li>
-<li>Mark current trace, even if compiler is idle.</li>
-<li>Ensure FILE metatable is created only once.</li>
-<li>Fix type comparisons when different integer types are involved.</li>
-<li>Fix <tt>getmetatable()</tt> recording.</li>
-<li>Fix TDUP with dead keys in template table.</li>
-<li><tt>jit.flush(tr)</tt> returns status.
-Prevent manual flush of a trace that's still linked.</li>
-<li>Improve register allocation heuristics for invariant references.</li>
-<li>Compile the push/pop variants of <tt>table.insert()</tt> and
-<tt>table.remove()</tt>.</li>
-<li>Compatibility with MSVC <tt>link&nbsp/debug</tt>.</li>
-<li>Fix <tt>lua_iscfunction()</tt>.</li>
-<li>Fix <tt>math.random()</tt> when compiled with <tt>-fpic</tt> (OSX).</li>
-<li>Fix <tt>table.maxn()</tt>.</li>
-<li>Bump <tt>MACOSX_DEPLOYMENT_TARGET</tt> to <tt>10.4</tt></li>
-<li><tt>luaL_check*()</tt> and <tt>luaL_opt*()</tt> now support
-negative arguments, too.<br>
-This matches the behavior of Lua 5.1, but not the specification.</li>
-</ul>
-
-<h2 id="LuaJIT-2.0.0-beta1">LuaJIT 2.0.0-beta1 &mdash; 2009-10-31</h2>
-<ul>
-<li>This is the first public release of LuaJIT 2.0.</li>
-<li>The whole VM has been rewritten from the ground up, so there's
-no point in listing differences over earlier versions.</li>
-</ul>
-</div>
-
-<div class="major" style="background: #ffff80;">
-<h2 id="LuaJIT-1.1.8">LuaJIT 1.1.8 &mdash; 2012-04-16</h2>
-<ul>
-<li>Merged with Lua 5.1.5. Also integrated fixes for all
-<a href="http://www.lua.org/bugs.html#5.1.5"><span class="ext">&raquo;</span>&nbsp;<span class="ext">&raquo;</span>&nbsp;currently known bugs in Lua 5.1.5</a>.</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.7">LuaJIT 1.1.7 &mdash; 2011-05-05</h2>
-<ul>
-<li>Added fixes for the
-<a href="http://www.lua.org/bugs.html#5.1.4"><span class="ext">&raquo;</span>&nbsp;currently known bugs in Lua 5.1.4</a>.</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.6">LuaJIT 1.1.6 &mdash; 2010-03-28</h2>
-<ul>
-<li>Added fixes for the
-<a href="http://www.lua.org/bugs.html#5.1.4"><span class="ext">&raquo;</span>&nbsp;currently known bugs in Lua 5.1.4</a>.</li>
-<li>Removed wrong GC check in <tt>jit_createstate()</tt>.
-Thanks to Tim Mensch.</li>
-<li>Fixed bad assertions while compiling <tt>table.insert()</tt> and
-<tt>table.remove()</tt>.</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.5">LuaJIT 1.1.5 &mdash; 2008-10-25</h2>
-<ul>
-<li>Merged with Lua 5.1.4. Fixes all
-<a href="http://www.lua.org/bugs.html#5.1.3"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1.3</a>.</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.4">LuaJIT 1.1.4 &mdash; 2008-02-05</h2>
-<ul>
-<li>Merged with Lua 5.1.3. Fixes all
-<a href="http://www.lua.org/bugs.html#5.1.2"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1.2</a>.</li>
-<li>Fixed possible (but unlikely) stack corruption while compiling
-<tt>k^x</tt> expressions.</li>
-<li>Fixed DynASM template for cmpss instruction.</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.3">LuaJIT 1.1.3 &mdash; 2007-05-24</h2>
-<ul>
-<li>Merged with Lua 5.1.2. Fixes all
-<a href="http://www.lua.org/bugs.html#5.1.1"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1.1</a>.</li>
-<li>Merged pending Lua 5.1.x fixes: "return -nil" bug, spurious count hook call.</li>
-<li>Remove a (sometimes) wrong assertion in <tt>luaJIT_findpc()</tt>.</li>
-<li>DynASM now allows labels for displacements and <tt>.aword</tt>.</li>
-<li>Fix some compiler warnings for DynASM glue (internal API change).</li>
-<li>Correct naming for SSSE3 (temporarily known as SSE4) in DynASM and x86 disassembler.</li>
-<li>The loadable debug modules now handle redirection to stdout
-(e.g. <tt>-j&nbsp;trace=-</tt>).</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.2">LuaJIT 1.1.2 &mdash; 2006-06-24</h2>
-<ul>
-<li>Fix MSVC inline assembly: use only local variables with
-<tt>lua_number2int()</tt>.</li>
-<li>Fix "attempt to call a thread value" bug on Mac OS X:
-make values of consts used as lightuserdata keys unique
-to avoid joining by the compiler/linker.</li>
-</ul>
-
-<h2 id="LuaJIT-1.1.1">LuaJIT 1.1.1 &mdash; 2006-06-20</h2>
-<ul>
-<li>Merged with Lua 5.1.1. Fixes all
-<a href="http://www.lua.org/bugs.html#5.1"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1</a>.</li>
-<li>Enforce (dynamic) linker error for EXE/DLL version mismatches.</li>
-<li>Minor changes to DynASM: faster pre-processing, smaller encoding
-for some immediates.</li>
-</ul>
-<p>
-This release is in sync with Coco 1.1.1 (see the
-<a href="http://coco.luajit.org/changes.html"><span class="ext">&raquo;</span>&nbsp;Coco Change History</a>).
-</p>
-
-<h2 id="LuaJIT-1.1.0">LuaJIT 1.1.0 &mdash; 2006-03-13</h2>
-<ul>
-<li>Merged with Lua 5.1 (final).</li>
-
-<li>New JIT call frame setup:
-<ul>
-<li>The C stack is kept 16 byte aligned (faster).
-Mandatory for Mac OS X on Intel, too.</li>
-<li>Faster calling conventions for internal C helper functions.</li>
-<li>Better instruction scheduling for function prologue, OP_CALL and
-OP_RETURN.</li>
-</ul></li>
-
-<li>Miscellaneous optimizations:
-<ul>
-<li>Faster loads of FP constants. Remove narrow-to-wide store-to-load
-forwarding stalls.</li>
-<li>Use (scalar) SSE2 ops (if the CPU supports it) to speed up slot moves
-and FP to integer conversions.</li>
-<li>Optimized the two-argument form of <tt>OP_CONCAT</tt> (<tt>a..b</tt>).</li>
-<li>Inlined <tt>OP_MOD</tt> (<tt>a%b</tt>).
-With better accuracy than the C variant, too.</li>
-<li>Inlined <tt>OP_POW</tt> (<tt>a^b</tt>). Unroll <tt>x^k</tt> or
-use <tt>k^x = 2^(log2(k)*x)</tt> or call <tt>pow()</tt>.</li>
-</ul></li>
-
-<li>Changes in the optimizer:
-<ul>
-<li>Improved hinting for table keys derived from table values
-(<tt>t1[t2[x]]</tt>).</li>
-<li>Lookup hinting now works with arbitrary object types and
-supports index chains, too.</li>
-<li>Generate type hints for arithmetic and comparison operators,
-OP_LEN, OP_CONCAT and OP_FORPREP.</li>
-<li>Remove several hint definitions in favour of a generic COMBINE hint.</li>
-<li>Complete rewrite of <tt>jit.opt_inline</tt> module
-(ex <tt>jit.opt_lib</tt>).</li>
-</ul></li>
-
-<li>Use adaptive deoptimization:
-<ul>
-<li>If runtime verification of a contract fails, the affected
-instruction is recompiled and patched on-the-fly.
-Regular programs will trigger deoptimization only occasionally.</li>
-<li>This avoids generating code for uncommon fallback cases
-most of the time. Generated code is up to 30% smaller compared to
-LuaJIT&nbsp;1.0.3.</li>
-<li>Deoptimization is used for many opcodes and contracts:
-<ul>
-<li>OP_CALL, OP_TAILCALL: type mismatch for callable.</li>
-<li>Inlined calls: closure mismatch, parameter number and type mismatches.</li>
-<li>OP_GETTABLE, OP_SETTABLE: table or key type and range mismatches.</li>
-<li>All arithmetic and comparison operators, OP_LEN, OP_CONCAT,
-OP_FORPREP: operand type and range mismatches.</li>
-</ul></li>
-<li>Complete redesign of the debug and traceback info
-(bytecode &harr; mcode) to support deoptimization.
-Much more flexible and needs only 50% of the space.</li>
-<li>The modules <tt>jit.trace</tt>, <tt>jit.dumphints</tt> and
-<tt>jit.dump</tt> handle deoptimization.</li>
-</ul></li>
-
-<li>Inlined many popular library functions
-(for commonly used arguments only):
-<ul>
-<li>Most <tt>math.*</tt> functions (the 18 most used ones)
-[2x-10x faster].</li>
-<li><tt>string.len</tt>, <tt>string.sub</tt> and <tt>string.char</tt>
-[2x-10x faster].</li>
-<li><tt>table.insert</tt>, <tt>table.remove</tt> and <tt>table.getn</tt>
-[3x-5x faster].</li>
-<li><tt>coroutine.yield</tt> and <tt>coroutine.resume</tt>
-[3x-5x faster].</li>
-<li><tt>pairs</tt>, <tt>ipairs</tt> and the corresponding iterators
-[8x-15x faster].</li>
-</ul></li>
-
-<li>Changes in the core and loadable modules and the stand-alone executable:
-<ul>
-<li>Added <tt>jit.version</tt>, <tt>jit.version_num</tt>
-and <tt>jit.arch</tt>.</li>
-<li>Reorganized some internal API functions (<tt>jit.util.*mcode*</tt>).</li>
-<li>The <tt>-j dump</tt> output now shows JSUB names, too.</li>
-<li>New x86 disassembler module written in pure Lua. No dependency
-on ndisasm anymore. Flexible API, very compact (500 lines)
-and complete (x87, MMX, SSE, SSE2, SSE3, SSSE3, privileged instructions).</li>
-<li><tt>luajit -v</tt> prints the LuaJIT version and copyright
-on a separate line.</li>
-</ul></li>
-
-<li>Added SSE, SSE2, SSE3 and SSSE3 support to DynASM.</li>
-<li>Miscellaneous doc changes. Added a section about
-<a href="install.html#embedding">embedding LuaJIT</a>.</li>
-</ul>
-<p>
-This release is in sync with Coco 1.1.0 (see the
-<a href="http://coco.luajit.org/changes.html"><span class="ext">&raquo;</span>&nbsp;Coco Change History</a>).
-</p>
-</div>
-
-<div class="major" style="background: #ffffd0;">
-<h2 id="LuaJIT-1.0.3">LuaJIT 1.0.3 &mdash; 2005-09-08</h2>
-<ul>
-<li>Even more docs.</li>
-<li>Unified closure checks in <tt>jit.*</tt>.</li>
-<li>Fixed some range checks in <tt>jit.util.*</tt>.</li>
-<li>Fixed __newindex call originating from <tt>jit_settable_str()</tt>.</li>
-<li>Merged with Lua 5.1 alpha (including early bug fixes).</li>
-</ul>
-<p>
-This is the first public release of LuaJIT.
-</p>
-
-<h2 id="LuaJIT-1.0.2">LuaJIT 1.0.2 &mdash; 2005-09-02</h2>
-<ul>
-<li>Add support for flushing the Valgrind translation cache <br>
-(<tt>MYCFLAGS= -DUSE_VALGRIND</tt>).</li>
-<li>Add support for freeing executable mcode memory to the <tt>mmap()</tt>-based
-variant for POSIX systems.</li>
-<li>Reorganized the C&nbsp;function signature handling in
-<tt>jit.opt_lib</tt>.</li>
-<li>Changed to index-based hints for inlining C&nbsp;functions.
-Still no support in the backend for inlining.</li>
-<li>Hardcode <tt>HEAP_CREATE_ENABLE_EXECUTE</tt> value if undefined.</li>
-<li>Misc. changes to the <tt>jit.*</tt> modules.</li>
-<li>Misc. changes to the Makefiles.</li>
-<li>Lots of new docs.</li>
-<li>Complete doc reorg.</li>
-</ul>
-<p>
-Not released because Lua 5.1 alpha came out today.
-</p>
-
-<h2 id="LuaJIT-1.0.1">LuaJIT 1.0.1 &mdash; 2005-08-31</h2>
-<ul>
-<li>Missing GC step in <tt>OP_CONCAT</tt>.</li>
-<li>Fix result handling for C &ndash;> JIT calls.</li>
-<li>Detect CPU feature bits.</li>
-<li>Encode conditional moves (<tt>fucomip</tt>) only when supported.</li>
-<li>Add fallback instructions for FP compares.</li>
-<li>Add support for <tt>LUA_COMPAT_VARARG</tt>. Still disabled by default.</li>
-<li>MSVC needs a specific place for the <tt>CALLBACK</tt> attribute
-(David Burgess).</li>
-<li>Misc. doc updates.</li>
-</ul>
-<p>
-Interim non-public release.
-Special thanks to Adam D. Moss for reporting most of the bugs.
-</p>
-
-<h2 id="LuaJIT-1.0.0">LuaJIT 1.0.0 &mdash; 2005-08-29</h2>
-<p>
-This is the initial non-public release of LuaJIT.
-</p>
-</div>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>LuaJIT Change History</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+div.major { max-width: 600px; padding: 1em; margin: 1em 0 1em 0; }
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>LuaJIT Change History</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a class="current" href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+This is a list of changes between the released versions of LuaJIT.<br>
+The current <span style="color: #0000c0;">stable version</span> is <strong>LuaJIT&nbsp;2.0.2</strong>.<br>
+</p>
+<p>
+Please check the
+<a href="http://luajit.org/changes.html"><span class="ext">&raquo;</span>&nbsp;Online Change History</a>
+to see whether newer versions are available.
+</p>
+
+<div class="major" style="background: #d0d0ff;">
+<h2 id="LuaJIT-2.0.2">LuaJIT 2.0.2 &mdash; 2013-06-03</h2>
+<ul>
+<li>Fix memory access check for fast string interning.</li>
+<li>Fix MSVC intrinsics for older versions.</li>
+<li>Add missing GC steps for <tt>io.*</tt> functions.</li>
+<li>Fix spurious red zone overflows in machine code generation.</li>
+<li>Fix jump-range constrained mcode allocation.</li>
+<li>Inhibit DSE for implicit loads via calls.</li>
+<li>Fix builtin string to number conversion for overflow digits.</li>
+<li>Fix optional argument handling while recording builtins.</li>
+<li>Fix optional argument handling in <tt>table.concat()</tt>.</li>
+<li>Add partial support for building with MingW64 GCC 4.8-SEH.</li>
+<li>Add missing PHI barrier to <tt>string.sub(str, a, b) == kstr</tt> FOLD rule.</li>
+<li>Fix compatibility issues with Illumos.</li>
+<li>ARM: Fix cache flush/sync for exit stubs of JIT-compiled code.</li>
+<li>MIPS: Fix cache flush/sync for JIT-compiled code jump area.</li>
+<li>PPC: Add <tt>plt</tt> suffix for external calls from assembler code.</li>
+<li>FFI: Fix snapshot substitution in SPLIT pass.</li>
+<li>FFI/x86: Fix register allocation for 64 bit comparisons.</li>
+<li>FFI: Fix tailcall in lowest frame to C&nbsp;function with bool result.</li>
+<li>FFI: Ignore <tt>long</tt> type specifier in <tt>ffi.istype()</tt>.</li>
+<li>FFI: Fix calling conventions for 32 bit OSX and iOS simulator (struct returns).</li>
+<li>FFI: Fix calling conventions for ARM hard-float EABI (nested structs).</li>
+<li>FFI: Improve error messages for arithmetic and comparison operators.</li>
+<li>FFI: Insert no-op type conversion for pointer to integer cast.</li>
+<li>FFI: Fix unroll limit for <tt>ffi.fill()</tt>.</li>
+<li>FFI: Must sink <tt>XBAR</tt> together with <tt>XSTORE</tt>s.</li>
+<li>FFI: Preserve intermediate string for <tt>const&nbsp;char&nbsp;*</tt> conversion.</li>
+</ul>
+
+<h2 id="LuaJIT-2.0.1">LuaJIT 2.0.1 &mdash; 2013-02-19</h2>
+<ul>
+<li>Don't clear frame for out-of-memory error.</li>
+<li>Leave hook when resume catches error thrown from hook.</li>
+<li>Add missing GC steps for template table creation.</li>
+<li>Fix discharge order of comparisons in Lua parser.</li>
+<li>Improve buffer handling for <tt>io.read()</tt>.</li>
+<li>OSX: Add support for Mach-O object files to <tt>-b</tt> option.</li>
+<li>Fix PS3 port.</li>
+<li>Fix/enable Xbox 360 port.</li>
+<li>x86/x64: Always mark ref for shift count as non-weak.</li>
+<li>x64: Don't fuse implicitly 32-to-64 extended operands.</li>
+<li>ARM: Fix armhf call argument handling.</li>
+<li>ARM: Fix code generation for integer math.min/math.max.</li>
+<li>PPC/e500: Fix <tt>lj_vm_floor()</tt> for Inf/NaN.</li>
+<li>FFI: Change priority of table initializer variants for structs.</li>
+<li>FFI: Fix code generation for bool call result check on x86/x64.</li>
+<li>FFI: Load FFI library on-demand for bytecode with cdata literals.</li>
+<li>FFI: Fix handling of qualified transparent structs/unions.</li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0">LuaJIT 2.0.0 &mdash; 2012-11-08</h2>
+<ul>
+<li>Correctness and completeness:
+<ul>
+ <li>Fix Android/x86 build.</li>
+ <li>Fix recording of equality comparisons with <tt>__eq</tt> metamethods.</li>
+ <li>Fix detection of immutable upvalues.</li>
+ <li>Replace error with PANIC for callbacks from JIT-compiled code.</li>
+ <li>Fix builtin string to number conversion for <tt>INT_MIN</tt>.</li>
+ <li>Don't create unneeded array part for template tables.</li>
+ <li>Fix <tt>CONV.num.int</tt> sinking.</li>
+ <li>Don't propagate implicitly widened number to index metamethods.</li>
+ <li>ARM: Fix ordered comparisons of number vs. non-number.</li>
+ <li>FFI: Fix code generation for replay of sunk float fields.</li>
+ <li>FFI: Fix signedness of bool.</li>
+ <li>FFI: Fix recording of bool call result check on x86/x64.</li>
+ <li>FFI: Fix stack-adjustment for <tt>__thiscall</tt> callbacks.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta11">LuaJIT 2.0.0-beta11 &mdash; 2012-10-16</h2>
+<ul>
+<li>New features:
+<ul>
+ <li>Use ARM VFP instructions, if available (build-time detection).</li>
+ <li>Add support for ARM hard-float EABI (<tt>armhf</tt>).</li>
+ <li>Add PS3 port.</li>
+ <li>Add many features from Lua&nbsp;5.2, e.g. <tt>goto</tt>/labels.
+ Refer to <a href="extensions.html#lua52">this list</a>.</li>
+ <li>FFI: Add parameterized C types.</li>
+ <li>FFI: Add support for copy constructors.</li>
+ <li>FFI: Equality comparisons never raise an error (treat as unequal instead).</li>
+ <li>FFI: Box all accessed or returned enums.</li>
+ <li>FFI: Check for <tt>__new</tt> metamethod when calling a constructor.</li>
+ <li>FFI: Handle <tt>__pairs</tt>/<tt>__ipairs</tt> metamethods for cdata objects.</li>
+ <li>FFI: Convert <tt>io.*</tt> file handle to <tt>FILE *</tt> pointer (but as a <tt>void *</tt>).</li>
+ <li>FFI: Detect and support type punning through unions.</li>
+ <li>FFI: Improve various error messages.</li>
+</ul></li>
+<li>Build-system reorganization:
+<ul>
+ <li>Reorganize directory layout:<br>
+ <tt>lib/*</tt> &rarr; <tt>src/jit/*</tt><br>
+ <tt>src/buildvm_*.dasc</tt> &rarr; <tt>src/vm_*.dasc</tt><br>
+ <tt>src/buildvm_*.h</tt> &rarr; removed<br>
+ <tt>src/buildvm*</tt> &rarr; <tt>src/host/*</tt></li>
+ <li>Add minified Lua interpreter plus Lua BitOp (<tt>minilua</tt>) to run DynASM.</li>
+ <li>Change DynASM bit operations to use Lua BitOp</li>
+ <li>Translate only <tt>vm_*.dasc</tt> for detected target architecture.</li>
+ <li>Improve target detection for <tt>msvcbuild.bat</tt>.</li>
+ <li>Fix build issues on Cygwin and MinGW with optional MSys.</li>
+ <li>Handle cross-compiles with FPU/no-FPU or hard-fp/soft-fp ABI mismatch.</li>
+ <li>Remove some library functions for no-JIT/no-FFI builds.</li>
+ <li>Add uninstall target to top-level Makefile.</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+ <li>Preserve snapshot #0 PC for all traces.</li>
+ <li>Fix argument checks for <tt>coroutine.create()</tt>.</li>
+ <li>Command line prints version and JIT status to <tt>stdout</tt>, not <tt>stderr</tt>.</li>
+ <li>Fix userdata <tt>__gc</tt> separations at Lua state close.</li>
+ <li>Fix <tt>TDUP</tt> to <tt>HLOAD</tt> forwarding for <tt>LJ_DUALNUM</tt> builds.</li>
+ <li>Fix buffer check in bytecode writer.</li>
+ <li>Make <tt>os.date()</tt> thread-safe.</li>
+ <li>Add missing declarations for MSVC intrinsics.</li>
+ <li>Fix dispatch table modifications for return hooks.</li>
+ <li>Workaround for MSVC conversion bug (<tt>double</tt> &rarr; <tt>uint32_t</tt> &rarr; <tt>int32_t</tt>).</li>
+ <li>Fix FOLD rule <tt>(i-j)-i => 0-j</tt>.</li>
+ <li>Never use DWARF unwinder on Windows.</li>
+ <li>Fix shrinking of direct mapped blocks in builtin allocator.</li>
+ <li>Limit recursion depth in <tt>string.match()</tt> et al.</li>
+ <li>Fix late despecialization of <tt>ITERN</tt> after loop has been entered.</li>
+ <li>Fix <tt>'f'</tt> and <tt>'L'</tt> options for <tt>debug.getinfo()</tt> and <tt>lua_getinfo()</tt>.</li>
+ <li>Fix <tt>package.searchpath()</tt>.</li>
+ <li>OSX: Change dylib names to be consistent with other platforms.</li>
+ <li>Android: Workaround for broken <tt>sprintf("%g",&nbsp;-0.0)</tt>.</li>
+ <li>x86: Remove support for ancient CPUs without <tt>CMOV</tt> (before Pentium Pro).</li>
+ <li>x86: Fix register allocation for calls returning register pair.</li>
+ <li>x86/x64: Fix fusion of unsigned byte comparisons with swapped operands.</li>
+ <li>ARM: Fix <tt>tonumber()</tt> argument check.</li>
+ <li>ARM: Fix modulo operator and <tt>math.floor()</tt>/<tt>math.ceil()</tt> for <tt>inf</tt>/<tt>nan</tt>.</li>
+ <li>ARM: Invoke SPLIT pass for leftover <tt>IR_TOBIT</tt>.</li>
+ <li>ARM: Fix BASE register coalescing.</li>
+ <li>PPC: Fix interpreter state setup in callbacks.</li>
+ <li>PPC: Fix <tt>string.sub()</tt> range check.</li>
+ <li>MIPS: Support generation of MIPS/MIPSEL bytecode object files.</li>
+ <li>MIPS: Fix calls to <tt>floor()</tt>/<tt>ceil()</tt><tt>/trunc()</tt>.</li>
+ <li>ARM/PPC: Detect more target architecture variants.</li>
+ <li>ARM/PPC/e500/MIPS: Fix tailcalls from fast functions, esp. <tt>tostring()</tt>.</li>
+ <li>ARM/PPC/MIPS: Fix rematerialization of FP constants.</li>
+ <li>FFI: Don't call <tt>FreeLibrary()</tt> on our own EXE/DLL.</li>
+ <li>FFI: Resolve metamethods for constructors, too.</li>
+ <li>FFI: Properly disable callbacks on iOS (would require executable memory).</li>
+ <li>FFI: Fix cdecl string parsing during recording.</li>
+ <li>FFI: Show address pointed to for <tt>tostring(ref)</tt>, too.</li>
+ <li>FFI: Fix alignment of C call argument/return structure.</li>
+ <li>FFI: Initialize all fields of standard types.</li>
+ <li>FFI: Fix callback handling when new C&nbsp;types are declared in callback.</li>
+ <li>FFI: Fix recording of constructors for pointers.</li>
+ <li>FFI: Always resolve metamethods for pointers to structs.</li>
+ <li>FFI: Correctly propagate alignment when interning nested types.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+ <li>Add allocation sinking and store sinking optimization.</li>
+ <li>Constify immutable upvalues.</li>
+ <li>Add builtin string to integer or FP number conversion. Improves cross-platform consistency and correctness.</li>
+ <li>Create string hash slots in template tables for non-const values, too. Avoids later table resizes.</li>
+ <li>Eliminate <tt>HREFK</tt> guard for template table references.</li>
+ <li>Add various new FOLD rules.</li>
+ <li>Don't use stack unwinding for <tt>lua_yield()</tt> (slow on x64).</li>
+ <li>ARM, PPC, MIPS: Improve <tt>XLOAD</tt> operand fusion and register hinting.</li>
+ <li>PPC, MIPS: Compile <tt>math.sqrt()</tt> to sqrt instruction, if available.</li>
+ <li>FFI: Fold <tt>KPTR</tt> + constant offset in SPLIT pass.</li>
+ <li>FFI: Optimize/inline <tt>ffi.copy()</tt> and <tt>ffi.fill()</tt>.</li>
+ <li>FFI: Compile and optimize array/struct copies.</li>
+ <li>FFI: Compile <tt>ffi.typeof(cdata|ctype)</tt>, <tt>ffi.sizeof()</tt>, <tt>ffi.alignof()</tt>, <tt>ffi.offsetof()</tt> and <tt>ffi.gc()</tt>.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta10">LuaJIT 2.0.0-beta10 &mdash; 2012-05-09</h2>
+<ul>
+<li>New features:
+<ul>
+<li>The MIPS of LuaJIT is complete. It requires a CPU conforming to the
+MIPS32&nbsp;R1 architecture with hardware FPU. O32 hard-fp ABI,
+little-endian or big-endian.</li>
+<li>Auto-detect target arch via cross-compiler. No need for
+<tt>TARGET=arch</tt> anymore.</li>
+<li>Make DynASM compatible with Lua 5.2.</li>
+<li>From Lua 5.2: Try <tt>__tostring</tt> metamethod on non-string error
+messages..</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+<li>Fix parsing of hex literals with exponents.</li>
+<li>Fix bytecode dump for certain number constants.</li>
+<li>Fix argument type in error message for relative arguments.</li>
+<li>Fix argument error handling on Lua stacks without a frame.</li>
+<li>Add missing mcode limit check in assembler backend.</li>
+<li>Fix compilation on OpenBSD.</li>
+<li>Avoid recursive GC steps after GC-triggered trace exit.</li>
+<li>Replace <tt>&lt;unwind.h&gt;</tt> definitions with our own.</li>
+<li>Fix OSX build issues. Bump minimum required OSX version to 10.4.</li>
+<li>Fix discharge order of comparisons in Lua parser.</li>
+<li>Ensure running <tt>__gc</tt> of userdata created in <tt>__gc</tt>
+at state close.</li>
+<li>Limit number of userdata <tt>__gc</tt> separations at state close.</li>
+<li>Fix bytecode <tt>JMP</tt> slot range when optimizing
+<tt>and</tt>/<tt>or</tt> with constant LHS.</li>
+<li>Fix DSE of <tt>USTORE</tt>.</li>
+<li>Make <tt>lua_concat()</tt> work from C&nbsp;hook with partial frame.</li>
+<li>Add required PHIs for implicit conversions, e.g. via <tt>XREF</tt>
+forwarding.</li>
+<li>Add more comparison variants to Valgrind suppressions file.</li>
+<li>Disable loading bytecode with an extra header (BOM or <tt>#!</tt>).</li>
+<li>Fix PHI stack slot syncing.</li>
+<li>ARM: Reorder type/value tests to silence Valgrind.</li>
+<li>ARM: Fix register allocation for <tt>ldrd</tt>-optimized
+<tt>HREFK</tt>.</li>
+<li>ARM: Fix conditional branch fixup for <tt>OBAR</tt>.</li>
+<li>ARM: Invoke SPLIT pass for <tt>double</tt> args in FFI call.</li>
+<li>ARM: Handle all <tt>CALL*</tt> ops with <tt>double</tt> results in
+SPLIT pass.</li>
+<li>ARM: Fix rejoin of <tt>POW</tt> in SPLIT pass.</li>
+<li>ARM: Fix compilation of <tt>math.sinh</tt>, <tt>math.cosh</tt>,
+<tt>math.tanh</tt>.</li>
+<li>ARM, PPC: Avoid pointless arg clearing in <tt>BC_IFUNCF</tt>.</li>
+<li>PPC: Fix resume after yield from hook.</li>
+<li>PPC: Fix argument checking for <tt>rawget()</tt>.</li>
+<li>PPC: Fix fusion of floating-point <tt>XLOAD</tt>/<tt>XSTORE</tt>.</li>
+<li>PPC: Fix <tt>HREFK</tt> code generation for huge tables.</li>
+<li>PPC: Use builtin D-Cache/I-Cache sync code.</li>
+</ul></li>
+<li>FFI library:
+<ul>
+<li>Ignore empty statements in <tt>ffi.cdef()</tt>.</li>
+<li>Ignore number parsing errors while skipping definitions.</li>
+<li>Don't touch frame in callbacks with tailcalls to fast functions.</li>
+<li>Fix library unloading on POSIX systems.</li>
+<li>Finalize cdata before userdata when closing the state.</li>
+<li>Change <tt>ffi.load()</tt> library name resolution for Cygwin.</li>
+<li>Fix resolving of function name redirects on Windows/x86.</li>
+<li>Fix symbol resolving error messages on Windows.</li>
+<li>Fix blacklisting of C functions calling callbacks.</li>
+<li>Fix result type of pointer difference.</li>
+<li>Use correct PC in FFI metamethod error message.</li>
+<li>Allow <tt>'typedef _Bool int BOOL;'</tt> for the Windows API.</li>
+<li>Don't record test for bool result of call, if ignored.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta9">LuaJIT 2.0.0-beta9 &mdash; 2011-12-14</h2>
+<ul>
+<li>New features:
+<ul>
+<li>PPC port of LuaJIT is complete. Default is the dual-number port
+(usually faster). Single-number port selectable via <tt>src/Makefile</tt>
+at build time.</li>
+<li>Add FFI callback support.</li>
+<li>Extend <tt>-b</tt> to generate <tt>.c</tt>, <tt>.h</tt> or <tt>.obj/.o</tt>
+files with embedded bytecode.</li>
+<li>Allow loading embedded bytecode with <tt>require()</tt>.</li>
+<li>From Lua 5.2: Change to <tt>'\z'</tt> escape. Reject undefined escape
+sequences.</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+<li>Fix OSX 10.7 build. Fix <tt>install_name</tt> and versioning on OSX.</li>
+<li>Fix iOS build.</li>
+<li>Install <tt>dis_arm.lua</tt>, too.</li>
+<li>Mark installed shared library as executable.</li>
+<li>Add debug option to <tt>msvcbuild.bat</tt> and improve error handling.</li>
+<li>Fix data-flow analysis for iterators.</li>
+<li>Fix forced unwinding triggered by external unwinder.</li>
+<li>Record missing <tt>for</tt> loop slot loads (return to lower frame).</li>
+<li>Always use ANSI variants of Windows system functions.</li>
+<li>Fix GC barrier for multi-result table constructor (<tt>TSETM</tt>).</li>
+<li>Fix/add various FOLD rules.</li>
+<li>Add potential PHI for number conversions due to type instability.</li>
+<li>Do not eliminate PHIs only referenced from other PHIs.</li>
+<li>Correctly anchor implicit number to string conversions in Lua/C API.</li>
+<li>Fix various stack limit checks.</li>
+<li>x64: Use thread-safe exceptions for external unwinding (GCC platforms).</li>
+<li>x64: Fix result type of cdata index conversions.</li>
+<li>x64: Fix <tt>math.random()</tt> and <tt>bit.bswap()</tt> code generation.</li>
+<li>x64: Fix <tt>lightuserdata</tt> comparisons.</li>
+<li>x64: Always extend stack-passed arguments to pointer size.</li>
+<li>ARM: Many fixes to code generation backend.</li>
+<li>PPC/e500: Fix dispatch for binop metamethods.</li>
+<li>PPC/e500: Save/restore condition registers when entering/leaving the VM.</li>
+<li>PPC/e500: Fix write barrier in stores of strings to upvalues.</li>
+</ul></li>
+<li>FFI library:
+<ul>
+<li>Fix C comment parsing.</li>
+<li>Fix snapshot optimization for cdata comparisons.</li>
+<li>Fix recording of const/enum lookups in namespaces.</li>
+<li>Fix call argument and return handling for <tt>I8/U8/I16/U16</tt> types.</li>
+<li>Fix unfused loads of float fields.</li>
+<li>Fix <tt>ffi.string()</tt> recording.</li>
+<li>Save <tt>GetLastError()</tt> around <tt>ffi.load()</tt> and symbol
+resolving, too.</li>
+<li>Improve ld script detection in <tt>ffi.load()</tt>.</li>
+<li>Record loads/stores to external variables in namespaces.</li>
+<li>Compile calls to stdcall, fastcall and vararg functions.</li>
+<li>Treat function ctypes like pointers in comparisons.</li>
+<li>Resolve <tt>__call</tt> metamethod for pointers, too.</li>
+<li>Record C function calls with bool return values.</li>
+<li>Record <tt>ffi.errno()</tt>.</li>
+<li>x86: Fix number to <tt>uint32_t</tt> conversion rounding.</li>
+<li>x86: Fix 64 bit arithmetic in assembler backend.</li>
+<li>x64: Fix struct-by-value calling conventions.</li>
+<li>ARM: Ensure invocation of SPLIT pass for float conversions.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>Display trace types with <tt>-jv</tt> and <tt>-jdump</tt>.</li>
+<li>Record isolated calls. But prefer recording loops over calls.</li>
+<li>Specialize to prototype for non-monomorphic functions. Solves the
+trace-explosion problem for closure-heavy programming styles.</li>
+<li>Always generate a portable <tt>vmdef.lua</tt>. Easier for distros.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta8">LuaJIT 2.0.0-beta8 &mdash; 2011-06-23</h2>
+<ul>
+<li>New features:
+<ul>
+<li>Soft-float ARM port of LuaJIT is complete.</li>
+<li>Add support for bytecode loading/saving and <tt>-b</tt> command line
+option.</li>
+<li>From Lua 5.2: <tt>__len</tt> metamethod for tables
+(disabled by default).</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+<li>ARM: Misc. fixes for interpreter.</li>
+<li>x86/x64: Fix <tt>bit.*</tt> argument checking in interpreter.</li>
+<li>Catch early out-of-memory in memory allocator initialization.</li>
+<li>Fix data-flow analysis for paths leading to an upvalue close.</li>
+<li>Fix check for missing arguments in <tt>string.format()</tt>.</li>
+<li>Fix Solaris/x86 build (note: not a supported target).</li>
+<li>Fix recording of loops with instable directions in side traces.</li>
+<li>x86/x64: Fix fusion of comparisons with <tt>u8</tt>/<tt>u16</tt>
+<tt>XLOAD</tt>.</li>
+<li>x86/x64: Fix register allocation for variable shifts.</li>
+</ul></li>
+<li>FFI library:
+<ul>
+<li>Add <tt>ffi.errno()</tt>. Save <tt>errno</tt>/<tt>GetLastError()</tt>
+around allocations etc.</li>
+<li>Fix <tt>__gc</tt> for VLA/VLS cdata objects.</li>
+<li>Fix recording of casts from 32 bit cdata pointers to integers.</li>
+<li><tt>tonumber(cdata)</tt> returns <tt>nil</tt> for non-numbers.</li>
+<li>Show address pointed to for <tt>tostring(pointer)</tt>.</li>
+<li>Print <tt>NULL</tt> pointers as <tt>"cdata&lt;... *&gt;: NULL"</tt>.</li>
+<li>Support <tt>__tostring</tt> metamethod for pointers to structs, too.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>More tuning for loop unrolling heuristics.</li>
+<li>Flatten and compress in-memory debug info (saves ~70%).</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta7">LuaJIT 2.0.0-beta7 &mdash; 2011-05-05</h2>
+<ul>
+<li>New features:
+<ul>
+<li>ARM port of the LuaJIT interpreter is complete.</li>
+<li>FFI library: Add <tt>ffi.gc()</tt>, <tt>ffi.metatype()</tt>,
+<tt>ffi.istype()</tt>.</li>
+<li>FFI library: Resolve ld script redirection in <tt>ffi.load()</tt>.</li>
+<li>From Lua 5.2: <tt>package.searchpath()</tt>, <tt>fp:read("*L")</tt>,
+<tt>load(string)</tt>.</li>
+<li>From Lua 5.2, disabled by default: empty statement,
+<tt>table.unpack()</tt>, modified <tt>coroutine.running()</tt>.</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+<li>FFI library: numerous fixes.</li>
+<li>Fix type mismatches in store-to-load forwarding.</li>
+<li>Fix error handling within metamethods.</li>
+<li>Fix <tt>table.maxn()</tt>.</li>
+<li>Improve accuracy of <tt>x^-k</tt> on x64.</li>
+<li>Fix code generation for Intel Atom in x64 mode.</li>
+<li>Fix narrowing of POW.</li>
+<li>Fix recording of retried fast functions.</li>
+<li>Fix code generation for <tt>bit.bnot()</tt> and multiplies.</li>
+<li>Fix error location within cpcall frames.</li>
+<li>Add workaround for old libgcc unwind bug.</li>
+<li>Fix <tt>lua_yield()</tt> and <tt>getmetatable(lightuserdata)</tt> on x64.</li>
+<li>Misc. fixes for PPC/e500 interpreter.</li>
+<li>Fix stack slot updates for down-recursion.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>Add dual-number mode (int/double) for the VM. Enabled for ARM.</li>
+<li>Improve narrowing of arithmetic operators and <tt>for</tt> loops.</li>
+<li>Tune loop unrolling heuristics and increase trace recorder limits.</li>
+<li>Eliminate dead slots in snapshots using bytecode data-flow analysis.</li>
+<li>Avoid phantom stores to proxy tables.</li>
+<li>Optimize lookups in empty proxy tables.</li>
+<li>Improve bytecode optimization of <tt>and</tt>/<tt>or</tt> operators.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta6">LuaJIT 2.0.0-beta6 &mdash; 2011-02-11</h2>
+<ul>
+<li>New features:
+<ul>
+<li>PowerPC/e500v2 port of the LuaJIT interpreter is complete.</li>
+<li>Various minor features from Lua 5.2: Hex escapes in literals,
+<tt>'\*'</tt> escape, reversible <tt>string.format("%q",s)</tt>,
+<tt>"%g"</tt> pattern, <tt>table.sort</tt> checks callbacks,
+<tt>os.exit(status|true|false[,close])</tt>.</li>
+<li>Lua 5.2 <tt>__pairs</tt> and <tt>__ipairs</tt> metamethods
+(disabled by default).</li>
+<li>Initial release of the FFI library.</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+<li>Fix <tt>string.format()</tt> for non-finite numbers.</li>
+<li>Fix memory leak when compiled to use the built-in allocator.</li>
+<li>x86/x64: Fix unnecessary resize in <tt>TSETM</tt> bytecode.</li>
+<li>Fix various GC issues with traces and <tt>jit.flush()</tt>.</li>
+<li>x64: Fix fusion of indexes for array references.</li>
+<li>x86/x64: Fix stack overflow handling for coroutine results.</li>
+<li>Enable low-2GB memory allocation on FreeBSD/x64.</li>
+<li>Fix <tt>collectgarbage("count")</tt> result if more than 2GB is in use.</li>
+<li>Fix parsing of hex floats.</li>
+<li>x86/x64: Fix loop branch inversion with trailing
+<tt>HREF+NE/EQ</tt>.</li>
+<li>Add <tt>jit.os</tt> string.</li>
+<li><tt>coroutine.create()</tt> permits running C functions, too.</li>
+<li>Fix OSX build to work with newer ld64 versions.</li>
+<li>Fix bytecode optimization of <tt>and</tt>/<tt>or</tt> operators.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>Emit specialized bytecode for <tt>pairs()</tt>/<tt>next()</tt>.</li>
+<li>Improve bytecode coalescing of <tt>nil</tt> constants.</li>
+<li>Compile calls to vararg functions.</li>
+<li>Compile <tt>select()</tt>.</li>
+<li>Improve alias analysis, esp. for loads from allocations.</li>
+<li>Tuning of various compiler heuristics.</li>
+<li>Refactor and extend IR conversion instructions.</li>
+<li>x86/x64: Various backend enhancements related to the FFI.</li>
+<li>Add SPLIT pass to split 64 bit IR instructions for 32 bit CPUs.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta5">LuaJIT 2.0.0-beta5 &mdash; 2010-08-24</h2>
+<ul>
+<li>Correctness and completeness:
+<ul>
+<li>Fix trace exit dispatch to function headers.</li>
+<li>Fix Windows and OSX builds with LUAJIT_DISABLE_JIT.</li>
+<li>Reorganize and fix placement of generated machine code on x64.</li>
+<li>Fix TNEW in x64 interpreter.</li>
+<li>Do not eliminate PHIs for values only referenced from side exits.</li>
+<li>OS-independent canonicalization of strings for non-finite numbers.</li>
+<li>Fix <tt>string.char()</tt> range check on x64.</li>
+<li>Fix <tt>tostring()</tt> resolving within <tt>print()</tt>.</li>
+<li>Fix error handling for <tt>next()</tt>.</li>
+<li>Fix passing of constant arguments to external calls on x64.</li>
+<li>Fix interpreter argument check for two-argument SSE math functions.</li>
+<li>Fix C frame chain corruption caused by <tt>lua_cpcall()</tt>.</li>
+<li>Fix return from <tt>pcall()</tt> within active hook.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>Replace on-trace GC frame syncing with interpreter exit.</li>
+<li>Improve hash lookup specialization by not removing dead keys during GC.</li>
+<li>Turn traces into true GC objects.</li>
+<li>Avoid starting a GC cycle immediately after library init.</li>
+<li>Add weak guards to improve dead-code elimination.</li>
+<li>Speed up string interning.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta4">LuaJIT 2.0.0-beta4 &mdash; 2010-03-28</h2>
+<ul>
+<li>Correctness and completeness:
+<ul>
+<li>Fix precondition for on-trace creation of table keys.</li>
+<li>Fix <tt>{f()}</tt> on x64 when table is resized.</li>
+<li>Fix folding of ordered comparisons with same references.</li>
+<li>Fix snapshot restores for multi-result bytecodes.</li>
+<li>Fix potential hang when recording bytecode with nested closures.</li>
+<li>Fix recording of <tt>getmetatable()</tt>, <tt>tonumber()</tt> and bad argument types.</li>
+<li>Fix SLOAD fusion across returns to lower frames.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>Add array bounds check elimination. <tt>-Oabc</tt> is enabled by default.</li>
+<li>More tuning for x64, e.g. smaller table objects.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta3">LuaJIT 2.0.0-beta3 &mdash; 2010-03-07</h2>
+<ul>
+<li>LuaJIT x64 port:
+<ul>
+<li>Port integrated memory allocator to Linux/x64, Windows/x64 and OSX/x64.</li>
+<li>Port interpreter and JIT compiler to x64.</li>
+<li>Port DynASM to x64.</li>
+<li>Many 32/64 bit cleanups in the VM.</li>
+<li>Allow building the interpreter with either x87 or SSE2 arithmetics.</li>
+<li>Add external unwinding and C++ exception interop (default on x64).</li>
+</ul></li>
+<li>Correctness and completeness:
+<ul>
+<li>Fix constructor bytecode generation for certain conditional values.</li>
+<li>Fix some cases of ordered string comparisons.</li>
+<li>Fix <tt>lua_tocfunction()</tt>.</li>
+<li>Fix cutoff register in JMP bytecode for some conditional expressions.</li>
+<li>Fix PHI marking algorithm for references from variant slots.</li>
+<li>Fix <tt>package.cpath</tt> for non-default PREFIX.</li>
+<li>Fix DWARF2 frame unwind information for interpreter on OSX.</li>
+<li>Drive the GC forward on string allocations in the parser.</li>
+<li>Implement call/return hooks (zero-cost if disabled).</li>
+<li>Implement yield from C hooks.</li>
+<li>Disable JIT compiler on older non-SSE2 CPUs instead of aborting.</li>
+</ul></li>
+<li>Structural and performance enhancements:
+<ul>
+<li>Compile recursive code (tail-, up- and down-recursion).</li>
+<li>Improve heuristics for bytecode penalties and blacklisting.</li>
+<li>Split CALL/FUNC recording and clean up fast function call semantics.</li>
+<li>Major redesign of internal function call handling.</li>
+<li>Improve FOR loop const specialization and integerness checks.</li>
+<li>Switch to pre-initialized stacks. Avoid frame-clearing.</li>
+<li>Colocation of prototypes and related data: bytecode, constants, debug info.</li>
+<li>Cleanup parser and streamline bytecode generation.</li>
+<li>Add support for weak IR references to register allocator.</li>
+<li>Switch to compressed, extensible snapshots.</li>
+<li>Compile returns to frames below the start frame.</li>
+<li>Improve alias analysis of upvalues using a disambiguation hash value.</li>
+<li>Compile floor/ceil/trunc to SSE2 helper calls or SSE4.1 instructions.</li>
+<li>Add generic C call handling to IR and backend.</li>
+<li>Improve KNUM fuse vs. load heuristics.</li>
+<li>Compile various <tt>io.*()</tt> functions.</li>
+<li>Compile <tt>math.sinh()</tt>, <tt>math.cosh()</tt>, <tt>math.tanh()</tt>
+and <tt>math.random()</tt>.</li>
+</ul></li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta2">LuaJIT 2.0.0-beta2 &mdash; 2009-11-09</h2>
+<ul>
+<li>Reorganize build system. Build static+shared library on POSIX.</li>
+<li>Allow C++ exception conversion on all platforms
+using a wrapper function.</li>
+<li>Automatically catch C++ exceptions and rethrow Lua error
+(DWARF2 only).</li>
+<li>Check for the correct x87 FPU precision at strategic points.</li>
+<li>Always use wrappers for libm functions.</li>
+<li>Resurrect metamethod name strings before copying them.</li>
+<li>Mark current trace, even if compiler is idle.</li>
+<li>Ensure FILE metatable is created only once.</li>
+<li>Fix type comparisons when different integer types are involved.</li>
+<li>Fix <tt>getmetatable()</tt> recording.</li>
+<li>Fix TDUP with dead keys in template table.</li>
+<li><tt>jit.flush(tr)</tt> returns status.
+Prevent manual flush of a trace that's still linked.</li>
+<li>Improve register allocation heuristics for invariant references.</li>
+<li>Compile the push/pop variants of <tt>table.insert()</tt> and
+<tt>table.remove()</tt>.</li>
+<li>Compatibility with MSVC <tt>link&nbsp/debug</tt>.</li>
+<li>Fix <tt>lua_iscfunction()</tt>.</li>
+<li>Fix <tt>math.random()</tt> when compiled with <tt>-fpic</tt> (OSX).</li>
+<li>Fix <tt>table.maxn()</tt>.</li>
+<li>Bump <tt>MACOSX_DEPLOYMENT_TARGET</tt> to <tt>10.4</tt></li>
+<li><tt>luaL_check*()</tt> and <tt>luaL_opt*()</tt> now support
+negative arguments, too.<br>
+This matches the behavior of Lua 5.1, but not the specification.</li>
+</ul>
+
+<h2 id="LuaJIT-2.0.0-beta1">LuaJIT 2.0.0-beta1 &mdash; 2009-10-31</h2>
+<ul>
+<li>This is the first public release of LuaJIT 2.0.</li>
+<li>The whole VM has been rewritten from the ground up, so there's
+no point in listing differences over earlier versions.</li>
+</ul>
+</div>
+
+<div class="major" style="background: #ffff80;">
+<h2 id="LuaJIT-1.1.8">LuaJIT 1.1.8 &mdash; 2012-04-16</h2>
+<ul>
+<li>Merged with Lua 5.1.5. Also integrated fixes for all
+<a href="http://www.lua.org/bugs.html#5.1.5"><span class="ext">&raquo;</span>&nbsp;<span class="ext">&raquo;</span>&nbsp;currently known bugs in Lua 5.1.5</a>.</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.7">LuaJIT 1.1.7 &mdash; 2011-05-05</h2>
+<ul>
+<li>Added fixes for the
+<a href="http://www.lua.org/bugs.html#5.1.4"><span class="ext">&raquo;</span>&nbsp;currently known bugs in Lua 5.1.4</a>.</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.6">LuaJIT 1.1.6 &mdash; 2010-03-28</h2>
+<ul>
+<li>Added fixes for the
+<a href="http://www.lua.org/bugs.html#5.1.4"><span class="ext">&raquo;</span>&nbsp;currently known bugs in Lua 5.1.4</a>.</li>
+<li>Removed wrong GC check in <tt>jit_createstate()</tt>.
+Thanks to Tim Mensch.</li>
+<li>Fixed bad assertions while compiling <tt>table.insert()</tt> and
+<tt>table.remove()</tt>.</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.5">LuaJIT 1.1.5 &mdash; 2008-10-25</h2>
+<ul>
+<li>Merged with Lua 5.1.4. Fixes all
+<a href="http://www.lua.org/bugs.html#5.1.3"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1.3</a>.</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.4">LuaJIT 1.1.4 &mdash; 2008-02-05</h2>
+<ul>
+<li>Merged with Lua 5.1.3. Fixes all
+<a href="http://www.lua.org/bugs.html#5.1.2"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1.2</a>.</li>
+<li>Fixed possible (but unlikely) stack corruption while compiling
+<tt>k^x</tt> expressions.</li>
+<li>Fixed DynASM template for cmpss instruction.</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.3">LuaJIT 1.1.3 &mdash; 2007-05-24</h2>
+<ul>
+<li>Merged with Lua 5.1.2. Fixes all
+<a href="http://www.lua.org/bugs.html#5.1.1"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1.1</a>.</li>
+<li>Merged pending Lua 5.1.x fixes: "return -nil" bug, spurious count hook call.</li>
+<li>Remove a (sometimes) wrong assertion in <tt>luaJIT_findpc()</tt>.</li>
+<li>DynASM now allows labels for displacements and <tt>.aword</tt>.</li>
+<li>Fix some compiler warnings for DynASM glue (internal API change).</li>
+<li>Correct naming for SSSE3 (temporarily known as SSE4) in DynASM and x86 disassembler.</li>
+<li>The loadable debug modules now handle redirection to stdout
+(e.g. <tt>-j&nbsp;trace=-</tt>).</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.2">LuaJIT 1.1.2 &mdash; 2006-06-24</h2>
+<ul>
+<li>Fix MSVC inline assembly: use only local variables with
+<tt>lua_number2int()</tt>.</li>
+<li>Fix "attempt to call a thread value" bug on Mac OS X:
+make values of consts used as lightuserdata keys unique
+to avoid joining by the compiler/linker.</li>
+</ul>
+
+<h2 id="LuaJIT-1.1.1">LuaJIT 1.1.1 &mdash; 2006-06-20</h2>
+<ul>
+<li>Merged with Lua 5.1.1. Fixes all
+<a href="http://www.lua.org/bugs.html#5.1"><span class="ext">&raquo;</span>&nbsp;known bugs in Lua 5.1</a>.</li>
+<li>Enforce (dynamic) linker error for EXE/DLL version mismatches.</li>
+<li>Minor changes to DynASM: faster pre-processing, smaller encoding
+for some immediates.</li>
+</ul>
+<p>
+This release is in sync with Coco 1.1.1 (see the
+<a href="http://coco.luajit.org/changes.html"><span class="ext">&raquo;</span>&nbsp;Coco Change History</a>).
+</p>
+
+<h2 id="LuaJIT-1.1.0">LuaJIT 1.1.0 &mdash; 2006-03-13</h2>
+<ul>
+<li>Merged with Lua 5.1 (final).</li>
+
+<li>New JIT call frame setup:
+<ul>
+<li>The C stack is kept 16 byte aligned (faster).
+Mandatory for Mac OS X on Intel, too.</li>
+<li>Faster calling conventions for internal C helper functions.</li>
+<li>Better instruction scheduling for function prologue, OP_CALL and
+OP_RETURN.</li>
+</ul></li>
+
+<li>Miscellaneous optimizations:
+<ul>
+<li>Faster loads of FP constants. Remove narrow-to-wide store-to-load
+forwarding stalls.</li>
+<li>Use (scalar) SSE2 ops (if the CPU supports it) to speed up slot moves
+and FP to integer conversions.</li>
+<li>Optimized the two-argument form of <tt>OP_CONCAT</tt> (<tt>a..b</tt>).</li>
+<li>Inlined <tt>OP_MOD</tt> (<tt>a%b</tt>).
+With better accuracy than the C variant, too.</li>
+<li>Inlined <tt>OP_POW</tt> (<tt>a^b</tt>). Unroll <tt>x^k</tt> or
+use <tt>k^x = 2^(log2(k)*x)</tt> or call <tt>pow()</tt>.</li>
+</ul></li>
+
+<li>Changes in the optimizer:
+<ul>
+<li>Improved hinting for table keys derived from table values
+(<tt>t1[t2[x]]</tt>).</li>
+<li>Lookup hinting now works with arbitrary object types and
+supports index chains, too.</li>
+<li>Generate type hints for arithmetic and comparison operators,
+OP_LEN, OP_CONCAT and OP_FORPREP.</li>
+<li>Remove several hint definitions in favour of a generic COMBINE hint.</li>
+<li>Complete rewrite of <tt>jit.opt_inline</tt> module
+(ex <tt>jit.opt_lib</tt>).</li>
+</ul></li>
+
+<li>Use adaptive deoptimization:
+<ul>
+<li>If runtime verification of a contract fails, the affected
+instruction is recompiled and patched on-the-fly.
+Regular programs will trigger deoptimization only occasionally.</li>
+<li>This avoids generating code for uncommon fallback cases
+most of the time. Generated code is up to 30% smaller compared to
+LuaJIT&nbsp;1.0.3.</li>
+<li>Deoptimization is used for many opcodes and contracts:
+<ul>
+<li>OP_CALL, OP_TAILCALL: type mismatch for callable.</li>
+<li>Inlined calls: closure mismatch, parameter number and type mismatches.</li>
+<li>OP_GETTABLE, OP_SETTABLE: table or key type and range mismatches.</li>
+<li>All arithmetic and comparison operators, OP_LEN, OP_CONCAT,
+OP_FORPREP: operand type and range mismatches.</li>
+</ul></li>
+<li>Complete redesign of the debug and traceback info
+(bytecode &harr; mcode) to support deoptimization.
+Much more flexible and needs only 50% of the space.</li>
+<li>The modules <tt>jit.trace</tt>, <tt>jit.dumphints</tt> and
+<tt>jit.dump</tt> handle deoptimization.</li>
+</ul></li>
+
+<li>Inlined many popular library functions
+(for commonly used arguments only):
+<ul>
+<li>Most <tt>math.*</tt> functions (the 18 most used ones)
+[2x-10x faster].</li>
+<li><tt>string.len</tt>, <tt>string.sub</tt> and <tt>string.char</tt>
+[2x-10x faster].</li>
+<li><tt>table.insert</tt>, <tt>table.remove</tt> and <tt>table.getn</tt>
+[3x-5x faster].</li>
+<li><tt>coroutine.yield</tt> and <tt>coroutine.resume</tt>
+[3x-5x faster].</li>
+<li><tt>pairs</tt>, <tt>ipairs</tt> and the corresponding iterators
+[8x-15x faster].</li>
+</ul></li>
+
+<li>Changes in the core and loadable modules and the stand-alone executable:
+<ul>
+<li>Added <tt>jit.version</tt>, <tt>jit.version_num</tt>
+and <tt>jit.arch</tt>.</li>
+<li>Reorganized some internal API functions (<tt>jit.util.*mcode*</tt>).</li>
+<li>The <tt>-j dump</tt> output now shows JSUB names, too.</li>
+<li>New x86 disassembler module written in pure Lua. No dependency
+on ndisasm anymore. Flexible API, very compact (500 lines)
+and complete (x87, MMX, SSE, SSE2, SSE3, SSSE3, privileged instructions).</li>
+<li><tt>luajit -v</tt> prints the LuaJIT version and copyright
+on a separate line.</li>
+</ul></li>
+
+<li>Added SSE, SSE2, SSE3 and SSSE3 support to DynASM.</li>
+<li>Miscellaneous doc changes. Added a section about
+<a href="install.html#embedding">embedding LuaJIT</a>.</li>
+</ul>
+<p>
+This release is in sync with Coco 1.1.0 (see the
+<a href="http://coco.luajit.org/changes.html"><span class="ext">&raquo;</span>&nbsp;Coco Change History</a>).
+</p>
+</div>
+
+<div class="major" style="background: #ffffd0;">
+<h2 id="LuaJIT-1.0.3">LuaJIT 1.0.3 &mdash; 2005-09-08</h2>
+<ul>
+<li>Even more docs.</li>
+<li>Unified closure checks in <tt>jit.*</tt>.</li>
+<li>Fixed some range checks in <tt>jit.util.*</tt>.</li>
+<li>Fixed __newindex call originating from <tt>jit_settable_str()</tt>.</li>
+<li>Merged with Lua 5.1 alpha (including early bug fixes).</li>
+</ul>
+<p>
+This is the first public release of LuaJIT.
+</p>
+
+<h2 id="LuaJIT-1.0.2">LuaJIT 1.0.2 &mdash; 2005-09-02</h2>
+<ul>
+<li>Add support for flushing the Valgrind translation cache <br>
+(<tt>MYCFLAGS= -DUSE_VALGRIND</tt>).</li>
+<li>Add support for freeing executable mcode memory to the <tt>mmap()</tt>-based
+variant for POSIX systems.</li>
+<li>Reorganized the C&nbsp;function signature handling in
+<tt>jit.opt_lib</tt>.</li>
+<li>Changed to index-based hints for inlining C&nbsp;functions.
+Still no support in the backend for inlining.</li>
+<li>Hardcode <tt>HEAP_CREATE_ENABLE_EXECUTE</tt> value if undefined.</li>
+<li>Misc. changes to the <tt>jit.*</tt> modules.</li>
+<li>Misc. changes to the Makefiles.</li>
+<li>Lots of new docs.</li>
+<li>Complete doc reorg.</li>
+</ul>
+<p>
+Not released because Lua 5.1 alpha came out today.
+</p>
+
+<h2 id="LuaJIT-1.0.1">LuaJIT 1.0.1 &mdash; 2005-08-31</h2>
+<ul>
+<li>Missing GC step in <tt>OP_CONCAT</tt>.</li>
+<li>Fix result handling for C &ndash;> JIT calls.</li>
+<li>Detect CPU feature bits.</li>
+<li>Encode conditional moves (<tt>fucomip</tt>) only when supported.</li>
+<li>Add fallback instructions for FP compares.</li>
+<li>Add support for <tt>LUA_COMPAT_VARARG</tt>. Still disabled by default.</li>
+<li>MSVC needs a specific place for the <tt>CALLBACK</tt> attribute
+(David Burgess).</li>
+<li>Misc. doc updates.</li>
+</ul>
+<p>
+Interim non-public release.
+Special thanks to Adam D. Moss for reporting most of the bugs.
+</p>
+
+<h2 id="LuaJIT-1.0.0">LuaJIT 1.0.0 &mdash; 2005-08-29</h2>
+<p>
+This is the initial non-public release of LuaJIT.
+</p>
+</div>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/contact.html b/3rdparty/lua/doc/contact.html
index d791e12..4735faf 100644
--- a/3rdparty/lua/doc/contact.html
+++ b/3rdparty/lua/doc/contact.html
@@ -1,102 +1,102 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>Contact</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>Contact</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-Please send general questions to the
-<a href="http://luajit.org/list.html"><span class="ext">&raquo;</span>&nbsp;LuaJIT mailing list</a>.
-You can also send any questions you have directly to me:
-</p>
-
-<script type="text/javascript">
-<!--
-var xS="@-:\" .0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ<abc>defghijklmnopqrstuvwxyz";function xD(s)
-{var len=s.length;var r="";for(var i=0;i<len;i++)
-{var c=s.charAt(i);var n=xS.indexOf(c);if(n!=-1)c=xS.charAt(69-n);r+=c;}
-document.write("<"+"p>"+r+"<"+"/p>\n");}
-//-->
-</script>
-<script type="text/javascript">
-<!--
-xD("fyZKB8xv\"FJytmz8.KAB0u52D")
-//--></script>
-<noscript>
-<p><img src="img/contact.png" alt="Contact info in image" width="170" height="13">
-</p>
-</noscript>
-
-<h2>Copyright</h2>
-<p>
-All documentation is
-Copyright &copy; 2005-2015 Mike Pall.
-</p>
-
-
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>Contact</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>Contact</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+Please send general questions to the
+<a href="http://luajit.org/list.html"><span class="ext">&raquo;</span>&nbsp;LuaJIT mailing list</a>.
+You can also send any questions you have directly to me:
+</p>
+
+<script type="text/javascript">
+<!--
+var xS="@-:\" .0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ<abc>defghijklmnopqrstuvwxyz";function xD(s)
+{var len=s.length;var r="";for(var i=0;i<len;i++)
+{var c=s.charAt(i);var n=xS.indexOf(c);if(n!=-1)c=xS.charAt(69-n);r+=c;}
+document.write("<"+"p>"+r+"<"+"/p>\n");}
+//-->
+</script>
+<script type="text/javascript">
+<!--
+xD("fyZKB8xv\"FJytmz8.KAB0u52D")
+//--></script>
+<noscript>
+<p><img src="img/contact.png" alt="Contact info in image" width="170" height="13">
+</p>
+</noscript>
+
+<h2>Copyright</h2>
+<p>
+All documentation is
+Copyright &copy; 2005-2013 Mike Pall.
+</p>
+
+
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/ext_c_api.html b/3rdparty/lua/doc/ext_c_api.html
index 3b82d44..c6feb8e 100644
--- a/3rdparty/lua/doc/ext_c_api.html
+++ b/3rdparty/lua/doc/ext_c_api.html
@@ -1,187 +1,187 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>Lua/C API Extensions</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>Lua/C API Extensions</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a class="current" href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-LuaJIT adds some extensions to the standard Lua/C API. The LuaJIT include
-directory must be in the compiler search path (<tt>-I<i>path</i></tt>)
-to be able to include the required header for C code:
-</p>
-<pre class="code">
-#include "luajit.h"
-</pre>
-<p>
-Or for C++ code:
-</p>
-<pre class="code">
-#include "lua.hpp"
-</pre>
-
-<h2 id="luaJIT_setmode"><tt>luaJIT_setmode(L, idx, mode)</tt>
-&mdash; Control VM</h2>
-<p>
-This is a C API extension to allow control of the VM from C code. The
-full prototype of <tt>LuaJIT_setmode</tt> is:
-</p>
-<pre class="code">
-LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
-</pre>
-<p>
-The returned status is either success (<tt>1</tt>) or failure (<tt>0</tt>).
-The second argument is either <tt>0</tt> or a stack index (similar to the
-other Lua/C API functions).
-</p>
-<p>
-The third argument specifies the mode, which is 'or'ed with a flag.
-The flag can be <tt>LUAJIT_MODE_OFF</tt> to turn a feature on,
-<tt>LUAJIT_MODE_ON</tt> to turn a feature off, or
-<tt>LUAJIT_MODE_FLUSH</tt> to flush cached code.
-</p>
-<p>
-The following modes are defined:
-</p>
-
-<h3 id="mode_engine"><tt>luaJIT_setmode(L, 0, LUAJIT_MODE_ENGINE|flag)</tt></h3>
-<p>
-Turn the whole JIT compiler on or off or flush the whole cache of compiled code.
-</p>
-
-<h3 id="mode_func"><tt>luaJIT_setmode(L, idx, LUAJIT_MODE_FUNC|flag)</tt><br>
-<tt>luaJIT_setmode(L, idx, LUAJIT_MODE_ALLFUNC|flag)</tt><br>
-<tt>luaJIT_setmode(L, idx, LUAJIT_MODE_ALLSUBFUNC|flag)</tt></h3>
-<p>
-This sets the mode for the function at the stack index <tt>idx</tt> or
-the parent of the calling function (<tt>idx = 0</tt>). It either
-enables JIT compilation for a function, disables it and flushes any
-already compiled code or only flushes already compiled code. This
-applies recursively to all sub-functions of the function with
-<tt>LUAJIT_MODE_ALLFUNC</tt> or only to the sub-functions with
-<tt>LUAJIT_MODE_ALLSUBFUNC</tt>.
-</p>
-
-<h3 id="mode_trace"><tt>luaJIT_setmode(L, trace,<br>
-&nbsp;&nbsp;LUAJIT_MODE_TRACE|LUAJIT_MODE_FLUSH)</tt></h3>
-<p>
-Flushes the specified root trace and all of its side traces from the cache.
-The code for the trace will be retained as long as there are any other
-traces which link to it.
-</p>
-
-<h3 id="mode_wrapcfunc"><tt>luaJIT_setmode(L, idx, LUAJIT_MODE_WRAPCFUNC|flag)</tt></h3>
-<p>
-This mode defines a wrapper function for calls to C functions. If
-called with <tt>LUAJIT_MODE_ON</tt>, the stack index at <tt>idx</tt>
-must be a <tt>lightuserdata</tt> object holding a pointer to the wrapper
-function. From now on all C functions are called through the wrapper
-function. If called with <tt>LUAJIT_MODE_OFF</tt> this mode is turned
-off and all C functions are directly called.
-</p>
-<p>
-The wrapper function can be used for debugging purposes or to catch
-and convert foreign exceptions. But please read the section on
-<a href="extensions.html#exceptions">C++&nbsp;exception interoperability</a>
-first. Recommended usage can be seen in this C++ code excerpt:
-</p>
-<pre class="code">
-#include &lt;exception&gt;
-#include "lua.hpp"
-
-// Catch C++ exceptions and convert them to Lua error messages.
-// Customize as needed for your own exception classes.
-static int wrap_exceptions(lua_State *L, lua_CFunction f)
-{
- try {
- return f(L); // Call wrapped function and return result.
- } catch (const char *s) { // Catch and convert exceptions.
- lua_pushstring(L, s);
- } catch (std::exception& e) {
- lua_pushstring(L, e.what());
- } catch (...) {
- lua_pushliteral(L, "caught (...)");
- }
- return lua_error(L); // Rethrow as a Lua error.
-}
-
-static int myinit(lua_State *L)
-{
- ...
- // Define wrapper function and enable it.
- lua_pushlightuserdata(L, (void *)wrap_exceptions);
- luaJIT_setmode(L, -1, LUAJIT_MODE_WRAPCFUNC|LUAJIT_MODE_ON);
- lua_pop(L, 1);
- ...
-}
-</pre>
-<p>
-Note that you can only define <b>a single global wrapper function</b>,
-so be careful when using this mechanism from multiple C++ modules.
-Also note that this mechanism is not without overhead.
-</p>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>Lua/C API Extensions</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>Lua/C API Extensions</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a class="current" href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+LuaJIT adds some extensions to the standard Lua/C API. The LuaJIT include
+directory must be in the compiler search path (<tt>-I<i>path</i></tt>)
+to be able to include the required header for C code:
+</p>
+<pre class="code">
+#include "luajit.h"
+</pre>
+<p>
+Or for C++ code:
+</p>
+<pre class="code">
+#include "lua.hpp"
+</pre>
+
+<h2 id="luaJIT_setmode"><tt>luaJIT_setmode(L, idx, mode)</tt>
+&mdash; Control VM</h2>
+<p>
+This is a C API extension to allow control of the VM from C code. The
+full prototype of <tt>LuaJIT_setmode</tt> is:
+</p>
+<pre class="code">
+LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
+</pre>
+<p>
+The returned status is either success (<tt>1</tt>) or failure (<tt>0</tt>).
+The second argument is either <tt>0</tt> or a stack index (similar to the
+other Lua/C API functions).
+</p>
+<p>
+The third argument specifies the mode, which is 'or'ed with a flag.
+The flag can be <tt>LUAJIT_MODE_OFF</tt> to turn a feature on,
+<tt>LUAJIT_MODE_ON</tt> to turn a feature off, or
+<tt>LUAJIT_MODE_FLUSH</tt> to flush cached code.
+</p>
+<p>
+The following modes are defined:
+</p>
+
+<h3 id="mode_engine"><tt>luaJIT_setmode(L, 0, LUAJIT_MODE_ENGINE|flag)</tt></h3>
+<p>
+Turn the whole JIT compiler on or off or flush the whole cache of compiled code.
+</p>
+
+<h3 id="mode_func"><tt>luaJIT_setmode(L, idx, LUAJIT_MODE_FUNC|flag)</tt><br>
+<tt>luaJIT_setmode(L, idx, LUAJIT_MODE_ALLFUNC|flag)</tt><br>
+<tt>luaJIT_setmode(L, idx, LUAJIT_MODE_ALLSUBFUNC|flag)</tt></h3>
+<p>
+This sets the mode for the function at the stack index <tt>idx</tt> or
+the parent of the calling function (<tt>idx = 0</tt>). It either
+enables JIT compilation for a function, disables it and flushes any
+already compiled code or only flushes already compiled code. This
+applies recursively to all sub-functions of the function with
+<tt>LUAJIT_MODE_ALLFUNC</tt> or only to the sub-functions with
+<tt>LUAJIT_MODE_ALLSUBFUNC</tt>.
+</p>
+
+<h3 id="mode_trace"><tt>luaJIT_setmode(L, trace,<br>
+&nbsp;&nbsp;LUAJIT_MODE_TRACE|LUAJIT_MODE_FLUSH)</tt></h3>
+<p>
+Flushes the specified root trace and all of its side traces from the cache.
+The code for the trace will be retained as long as there are any other
+traces which link to it.
+</p>
+
+<h3 id="mode_wrapcfunc"><tt>luaJIT_setmode(L, idx, LUAJIT_MODE_WRAPCFUNC|flag)</tt></h3>
+<p>
+This mode defines a wrapper function for calls to C functions. If
+called with <tt>LUAJIT_MODE_ON</tt>, the stack index at <tt>idx</tt>
+must be a <tt>lightuserdata</tt> object holding a pointer to the wrapper
+function. From now on all C functions are called through the wrapper
+function. If called with <tt>LUAJIT_MODE_OFF</tt> this mode is turned
+off and all C functions are directly called.
+</p>
+<p>
+The wrapper function can be used for debugging purposes or to catch
+and convert foreign exceptions. But please read the section on
+<a href="extensions.html#exceptions">C++&nbsp;exception interoperability</a>
+first. Recommended usage can be seen in this C++ code excerpt:
+</p>
+<pre class="code">
+#include &lt;exception&gt;
+#include "lua.hpp"
+
+// Catch C++ exceptions and convert them to Lua error messages.
+// Customize as needed for your own exception classes.
+static int wrap_exceptions(lua_State *L, lua_CFunction f)
+{
+ try {
+ return f(L); // Call wrapped function and return result.
+ } catch (const char *s) { // Catch and convert exceptions.
+ lua_pushstring(L, s);
+ } catch (std::exception& e) {
+ lua_pushstring(L, e.what());
+ } catch (...) {
+ lua_pushliteral(L, "caught (...)");
+ }
+ return lua_error(L); // Rethrow as a Lua error.
+}
+
+static int myinit(lua_State *L)
+{
+ ...
+ // Define wrapper function and enable it.
+ lua_pushlightuserdata(L, (void *)wrap_exceptions);
+ luaJIT_setmode(L, -1, LUAJIT_MODE_WRAPCFUNC|LUAJIT_MODE_ON);
+ lua_pop(L, 1);
+ ...
+}
+</pre>
+<p>
+Note that you can only define <b>a single global wrapper function</b>,
+so be careful when using this mechanism from multiple C++ modules.
+Also note that this mechanism is not without overhead.
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/ext_ffi.html b/3rdparty/lua/doc/ext_ffi.html
index a2078ee..a146b05 100644
--- a/3rdparty/lua/doc/ext_ffi.html
+++ b/3rdparty/lua/doc/ext_ffi.html
@@ -1,330 +1,330 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>FFI Library</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>FFI Library</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a class="current" href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-
-The FFI library allows <b>calling external C&nbsp;functions</b> and
-<b>using C&nbsp;data structures</b> from pure Lua code.
-
-</p>
-<p>
-
-The FFI library largely obviates the need to write tedious manual
-Lua/C bindings in C. No need to learn a separate binding language
-&mdash; <b>it parses plain C&nbsp;declarations!</b> These can be
-cut-n-pasted from C&nbsp;header files or reference manuals. It's up to
-the task of binding large libraries without the need for dealing with
-fragile binding generators.
-
-</p>
-<p>
-The FFI library is tightly integrated into LuaJIT (it's not available
-as a separate module). The code generated by the JIT-compiler for
-accesses to C&nbsp;data structures from Lua code is on par with the
-code a C&nbsp;compiler would generate. Calls to C&nbsp;functions can
-be inlined in JIT-compiled code, unlike calls to functions bound via
-the classic Lua/C API.
-</p>
-<p>
-This page gives a short introduction to the usage of the FFI library.
-<em>Please use the FFI sub-topics in the navigation bar to learn more.</em>
-</p>
-
-<h2 id="call">Motivating Example: Calling External C Functions</h2>
-<p>
-It's really easy to call an external C&nbsp;library function:
-</p>
-<pre class="code mark">
-<span class="codemark">&#9312;
-&#9313;
-
-
-&#9314;</span>local ffi = require("ffi")
-ffi.cdef[[
-<span style="color:#00a000;">int printf(const char *fmt, ...);</span>
-]]
-ffi.C.printf("Hello %s!", "world")
-</pre>
-<p>
-So, let's pick that apart:
-</p>
-<p>
-<span class="mark">&#9312;</span> Load the FFI library.
-</p>
-<p>
-<span class="mark">&#9313;</span> Add a C&nbsp;declaration
-for the function. The part inside the double-brackets (in green) is
-just standard C&nbsp;syntax.
-</p>
-<p>
-<span class="mark">&#9314;</span> Call the named
-C&nbsp;function &mdash; Yes, it's that simple!
-</p>
-<p style="font-size: 8pt;">
-Actually, what goes on behind the scenes is far from simple: <span
-style="color:#4040c0;">&#9314;</span> makes use of the standard
-C&nbsp;library namespace <tt>ffi.C</tt>. Indexing this namespace with
-a symbol name (<tt>"printf"</tt>) automatically binds it to the
-standard C&nbsp;library. The result is a special kind of object which,
-when called, runs the <tt>printf</tt> function. The arguments passed
-to this function are automatically converted from Lua objects to the
-corresponding C&nbsp;types.
-</p>
-<p>
-Ok, so maybe the use of <tt>printf()</tt> wasn't such a spectacular
-example. You could have done that with <tt>io.write()</tt> and
-<tt>string.format()</tt>, too. But you get the idea ...
-</p>
-<p>
-So here's something to pop up a message box on Windows:
-</p>
-<pre class="code">
-local ffi = require("ffi")
-ffi.cdef[[
-<span style="color:#00a000;">int MessageBoxA(void *w, const char *txt, const char *cap, int type);</span>
-]]
-ffi.C.MessageBoxA(nil, "Hello world!", "Test", 0)
-</pre>
-<p>
-Bing! Again, that was far too easy, no?
-</p>
-<p style="font-size: 8pt;">
-Compare this with the effort required to bind that function using the
-classic Lua/C API: create an extra C&nbsp;file, add a C&nbsp;function
-that retrieves and checks the argument types passed from Lua and calls
-the actual C&nbsp;function, add a list of module functions and their
-names, add a <tt>luaopen_*</tt> function and register all module
-functions, compile and link it into a shared library (DLL), move it to
-the proper path, add Lua code that loads the module aaaand ... finally
-call the binding function. Phew!
-</p>
-
-<h2 id="cdata">Motivating Example: Using C Data Structures</h2>
-<p>
-The FFI library allows you to create and access C&nbsp;data
-structures. Of course the main use for this is for interfacing with
-C&nbsp;functions. But they can be used stand-alone, too.
-</p>
-<p>
-Lua is built upon high-level data types. They are flexible, extensible
-and dynamic. That's why we all love Lua so much. Alas, this can be
-inefficient for certain tasks, where you'd really want a low-level
-data type. E.g. a large array of a fixed structure needs to be
-implemented with a big table holding lots of tiny tables. This imposes
-both a substantial memory overhead as well as a performance overhead.
-</p>
-<p>
-Here's a sketch of a library that operates on color images plus a
-simple benchmark. First, the plain Lua version:
-</p>
-<pre class="code">
-local floor = math.floor
-
-local function image_ramp_green(n)
- local img = {}
- local f = 255/(n-1)
- for i=1,n do
- img[i] = { red = 0, green = floor((i-1)*f), blue = 0, alpha = 255 }
- end
- return img
-end
-
-local function image_to_grey(img, n)
- for i=1,n do
- local y = floor(0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue)
- img[i].red = y; img[i].green = y; img[i].blue = y
- end
-end
-
-local N = 400*400
-local img = image_ramp_green(N)
-for i=1,1000 do
- image_to_grey(img, N)
-end
-</pre>
-<p>
-This creates a table with 160.000 pixels, each of which is a table
-holding four number values in the range of 0-255. First an image with
-a green ramp is created (1D for simplicity), then the image is
-converted to greyscale 1000 times. Yes, that's silly, but I was in
-need of a simple example ...
-</p>
-<p>
-And here's the FFI version. The modified parts have been marked in
-bold:
-</p>
-<pre class="code mark">
-<span class="codemark">&#9312;
-
-
-
-
-
-&#9313;
-
-&#9314;
-&#9315;
-
-
-
-
-
-
-&#9314;
-&#9316;</span><b>local ffi = require("ffi")
-ffi.cdef[[
-</b><span style="color:#00a000;">typedef struct { uint8_t red, green, blue, alpha; } rgba_pixel;</span><b>
-]]</b>
-
-local function image_ramp_green(n)
- <b>local img = ffi.new("rgba_pixel[?]", n)</b>
- local f = 255/(n-1)
- for i=<b>0,n-1</b> do
- <b>img[i].green = i*f</b>
- <b>img[i].alpha = 255</b>
- end
- return img
-end
-
-local function image_to_grey(img, n)
- for i=<b>0,n-1</b> do
- local y = <b>0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue</b>
- img[i].red = y; img[i].green = y; img[i].blue = y
- end
-end
-
-local N = 400*400
-local img = image_ramp_green(N)
-for i=1,1000 do
- image_to_grey(img, N)
-end
-</pre>
-<p>
-Ok, so that wasn't too difficult:
-</p>
-<p>
-<span class="mark">&#9312;</span> First, load the FFI
-library and declare the low-level data type. Here we choose a
-<tt>struct</tt> which holds four byte fields, one for each component
-of a 4x8&nbsp;bit RGBA pixel.
-</p>
-<p>
-<span class="mark">&#9313;</span> Creating the data
-structure with <tt>ffi.new()</tt> is straightforward &mdash; the
-<tt>'?'</tt> is a placeholder for the number of elements of a
-variable-length array.
-</p>
-<p>
-<span class="mark">&#9314;</span> C&nbsp;arrays are
-zero-based, so the indexes have to run from <tt>0</tt> to
-<tt>n-1</tt>. One might want to allocate one more element instead to
-simplify converting legacy code.
-</p>
-<p>
-<span class="mark">&#9315;</span> Since <tt>ffi.new()</tt>
-zero-fills the array by default, we only need to set the green and the
-alpha fields.
-</p>
-<p>
-<span class="mark">&#9316;</span> The calls to
-<tt>math.floor()</tt> can be omitted here, because floating-point
-numbers are already truncated towards zero when converting them to an
-integer. This happens implicitly when the number is stored in the
-fields of each pixel.
-</p>
-<p>
-Now let's have a look at the impact of the changes: first, memory
-consumption for the image is down from 22&nbsp;Megabytes to
-640&nbsp;Kilobytes (400*400*4 bytes). That's a factor of 35x less! So,
-yes, tables do have a noticeable overhead. BTW: The original program
-would consume 40&nbsp;Megabytes in plain Lua (on x64).
-</p>
-<p>
-Next, performance: the pure Lua version runs in 9.57 seconds (52.9
-seconds with the Lua interpreter) and the FFI version runs in 0.48
-seconds on my machine (YMMV). That's a factor of 20x faster (110x
-faster than the Lua interpreter).
-</p>
-<p style="font-size: 8pt;">
-The avid reader may notice that converting the pure Lua version over
-to use array indexes for the colors (<tt>[1]</tt> instead of
-<tt>.red</tt>, <tt>[2]</tt> instead of <tt>.green</tt> etc.) ought to
-be more compact and faster. This is certainly true (by a factor of
-~1.7x). Switching to a struct-of-arrays would help, too.
-</p>
-<p style="font-size: 8pt;">
-However the resulting code would be less idiomatic and rather
-error-prone. And it still doesn't get even close to the performance of
-the FFI version of the code. Also, high-level data structures cannot
-be easily passed to other C&nbsp;functions, especially I/O functions,
-without undue conversion penalties.
-</p>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>FFI Library</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>FFI Library</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a class="current" href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+
+The FFI library allows <b>calling external C&nbsp;functions</b> and
+<b>using C&nbsp;data structures</b> from pure Lua code.
+
+</p>
+<p>
+
+The FFI library largely obviates the need to write tedious manual
+Lua/C bindings in C. No need to learn a separate binding language
+&mdash; <b>it parses plain C&nbsp;declarations!</b> These can be
+cut-n-pasted from C&nbsp;header files or reference manuals. It's up to
+the task of binding large libraries without the need for dealing with
+fragile binding generators.
+
+</p>
+<p>
+The FFI library is tightly integrated into LuaJIT (it's not available
+as a separate module). The code generated by the JIT-compiler for
+accesses to C&nbsp;data structures from Lua code is on par with the
+code a C&nbsp;compiler would generate. Calls to C&nbsp;functions can
+be inlined in JIT-compiled code, unlike calls to functions bound via
+the classic Lua/C API.
+</p>
+<p>
+This page gives a short introduction to the usage of the FFI library.
+<em>Please use the FFI sub-topics in the navigation bar to learn more.</em>
+</p>
+
+<h2 id="call">Motivating Example: Calling External C Functions</h2>
+<p>
+It's really easy to call an external C&nbsp;library function:
+</p>
+<pre class="code mark">
+<span class="codemark">&#9312;
+&#9313;
+
+
+&#9314;</span>local ffi = require("ffi")
+ffi.cdef[[
+<span style="color:#00a000;">int printf(const char *fmt, ...);</span>
+]]
+ffi.C.printf("Hello %s!", "world")
+</pre>
+<p>
+So, let's pick that apart:
+</p>
+<p>
+<span class="mark">&#9312;</span> Load the FFI library.
+</p>
+<p>
+<span class="mark">&#9313;</span> Add a C&nbsp;declaration
+for the function. The part inside the double-brackets (in green) is
+just standard C&nbsp;syntax.
+</p>
+<p>
+<span class="mark">&#9314;</span> Call the named
+C&nbsp;function &mdash; Yes, it's that simple!
+</p>
+<p style="font-size: 8pt;">
+Actually, what goes on behind the scenes is far from simple: <span
+style="color:#4040c0;">&#9314;</span> makes use of the standard
+C&nbsp;library namespace <tt>ffi.C</tt>. Indexing this namespace with
+a symbol name (<tt>"printf"</tt>) automatically binds it to the
+standard C&nbsp;library. The result is a special kind of object which,
+when called, runs the <tt>printf</tt> function. The arguments passed
+to this function are automatically converted from Lua objects to the
+corresponding C&nbsp;types.
+</p>
+<p>
+Ok, so maybe the use of <tt>printf()</tt> wasn't such a spectacular
+example. You could have done that with <tt>io.write()</tt> and
+<tt>string.format()</tt>, too. But you get the idea ...
+</p>
+<p>
+So here's something to pop up a message box on Windows:
+</p>
+<pre class="code">
+local ffi = require("ffi")
+ffi.cdef[[
+<span style="color:#00a000;">int MessageBoxA(void *w, const char *txt, const char *cap, int type);</span>
+]]
+ffi.C.MessageBoxA(nil, "Hello world!", "Test", 0)
+</pre>
+<p>
+Bing! Again, that was far too easy, no?
+</p>
+<p style="font-size: 8pt;">
+Compare this with the effort required to bind that function using the
+classic Lua/C API: create an extra C&nbsp;file, add a C&nbsp;function
+that retrieves and checks the argument types passed from Lua and calls
+the actual C&nbsp;function, add a list of module functions and their
+names, add a <tt>luaopen_*</tt> function and register all module
+functions, compile and link it into a shared library (DLL), move it to
+the proper path, add Lua code that loads the module aaaand ... finally
+call the binding function. Phew!
+</p>
+
+<h2 id="cdata">Motivating Example: Using C Data Structures</h2>
+<p>
+The FFI library allows you to create and access C&nbsp;data
+structures. Of course the main use for this is for interfacing with
+C&nbsp;functions. But they can be used stand-alone, too.
+</p>
+<p>
+Lua is built upon high-level data types. They are flexible, extensible
+and dynamic. That's why we all love Lua so much. Alas, this can be
+inefficient for certain tasks, where you'd really want a low-level
+data type. E.g. a large array of a fixed structure needs to be
+implemented with a big table holding lots of tiny tables. This imposes
+both a substantial memory overhead as well as a performance overhead.
+</p>
+<p>
+Here's a sketch of a library that operates on color images plus a
+simple benchmark. First, the plain Lua version:
+</p>
+<pre class="code">
+local floor = math.floor
+
+local function image_ramp_green(n)
+ local img = {}
+ local f = 255/(n-1)
+ for i=1,n do
+ img[i] = { red = 0, green = floor((i-1)*f), blue = 0, alpha = 255 }
+ end
+ return img
+end
+
+local function image_to_grey(img, n)
+ for i=1,n do
+ local y = floor(0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue)
+ img[i].red = y; img[i].green = y; img[i].blue = y
+ end
+end
+
+local N = 400*400
+local img = image_ramp_green(N)
+for i=1,1000 do
+ image_to_grey(img, N)
+end
+</pre>
+<p>
+This creates a table with 160.000 pixels, each of which is a table
+holding four number values in the range of 0-255. First an image with
+a green ramp is created (1D for simplicity), then the image is
+converted to greyscale 1000 times. Yes, that's silly, but I was in
+need of a simple example ...
+</p>
+<p>
+And here's the FFI version. The modified parts have been marked in
+bold:
+</p>
+<pre class="code mark">
+<span class="codemark">&#9312;
+
+
+
+
+
+&#9313;
+
+&#9314;
+&#9315;
+
+
+
+
+
+
+&#9314;
+&#9316;</span><b>local ffi = require("ffi")
+ffi.cdef[[
+</b><span style="color:#00a000;">typedef struct { uint8_t red, green, blue, alpha; } rgba_pixel;</span><b>
+]]</b>
+
+local function image_ramp_green(n)
+ <b>local img = ffi.new("rgba_pixel[?]", n)</b>
+ local f = 255/(n-1)
+ for i=<b>0,n-1</b> do
+ <b>img[i].green = i*f</b>
+ <b>img[i].alpha = 255</b>
+ end
+ return img
+end
+
+local function image_to_grey(img, n)
+ for i=<b>0,n-1</b> do
+ local y = <b>0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue</b>
+ img[i].red = y; img[i].green = y; img[i].blue = y
+ end
+end
+
+local N = 400*400
+local img = image_ramp_green(N)
+for i=1,1000 do
+ image_to_grey(img, N)
+end
+</pre>
+<p>
+Ok, so that wasn't too difficult:
+</p>
+<p>
+<span class="mark">&#9312;</span> First, load the FFI
+library and declare the low-level data type. Here we choose a
+<tt>struct</tt> which holds four byte fields, one for each component
+of a 4x8&nbsp;bit RGBA pixel.
+</p>
+<p>
+<span class="mark">&#9313;</span> Creating the data
+structure with <tt>ffi.new()</tt> is straightforward &mdash; the
+<tt>'?'</tt> is a placeholder for the number of elements of a
+variable-length array.
+</p>
+<p>
+<span class="mark">&#9314;</span> C&nbsp;arrays are
+zero-based, so the indexes have to run from <tt>0</tt> to
+<tt>n-1</tt>. One might want to allocate one more element instead to
+simplify converting legacy code.
+</p>
+<p>
+<span class="mark">&#9315;</span> Since <tt>ffi.new()</tt>
+zero-fills the array by default, we only need to set the green and the
+alpha fields.
+</p>
+<p>
+<span class="mark">&#9316;</span> The calls to
+<tt>math.floor()</tt> can be omitted here, because floating-point
+numbers are already truncated towards zero when converting them to an
+integer. This happens implicitly when the number is stored in the
+fields of each pixel.
+</p>
+<p>
+Now let's have a look at the impact of the changes: first, memory
+consumption for the image is down from 22&nbsp;Megabytes to
+640&nbsp;Kilobytes (400*400*4 bytes). That's a factor of 35x less! So,
+yes, tables do have a noticeable overhead. BTW: The original program
+would consume 40&nbsp;Megabytes in plain Lua (on x64).
+</p>
+<p>
+Next, performance: the pure Lua version runs in 9.57 seconds (52.9
+seconds with the Lua interpreter) and the FFI version runs in 0.48
+seconds on my machine (YMMV). That's a factor of 20x faster (110x
+faster than the Lua interpreter).
+</p>
+<p style="font-size: 8pt;">
+The avid reader may notice that converting the pure Lua version over
+to use array indexes for the colors (<tt>[1]</tt> instead of
+<tt>.red</tt>, <tt>[2]</tt> instead of <tt>.green</tt> etc.) ought to
+be more compact and faster. This is certainly true (by a factor of
+~1.7x). Switching to a struct-of-arrays would help, too.
+</p>
+<p style="font-size: 8pt;">
+However the resulting code would be less idiomatic and rather
+error-prone. And it still doesn't get even close to the performance of
+the FFI version of the code. Also, high-level data structures cannot
+be easily passed to other C&nbsp;functions, especially I/O functions,
+without undue conversion penalties.
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/ext_ffi_api.html b/3rdparty/lua/doc/ext_ffi_api.html
index 2afd3de..8b2555b 100644
--- a/3rdparty/lua/doc/ext_ffi_api.html
+++ b/3rdparty/lua/doc/ext_ffi_api.html
@@ -1,566 +1,566 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>ffi.* API Functions</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-table.abitable { width: 30em; line-height: 1.2; }
-tr.abihead td { font-weight: bold; }
-td.abiparam { font-weight: bold; width: 6em; }
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1><tt>ffi.*</tt> API Functions</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a class="current" href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-This page describes the API functions provided by the FFI library in
-detail. It's recommended to read through the
-<a href="ext_ffi.html">introduction</a> and the
-<a href="ext_ffi_tutorial.html">FFI tutorial</a> first.
-</p>
-
-<h2 id="glossary">Glossary</h2>
-<ul>
-<li><b>cdecl</b> &mdash; An abstract C&nbsp;type declaration (a Lua
-string).</li>
-<li><b>ctype</b> &mdash; A C&nbsp;type object. This is a special kind of
-<b>cdata</b> returned by <tt>ffi.typeof()</tt>. It serves as a
-<b>cdata</b> <a href="#ffi_new">constructor</a> when called.</li>
-<li><b>cdata</b> &mdash; A C&nbsp;data object. It holds a value of the
-corresponding <b>ctype</b>.</li>
-<li><b>ct</b> &mdash; A C&nbsp;type specification which can be used for
-most of the API functions. Either a <b>cdecl</b>, a <b>ctype</b> or a
-<b>cdata</b> serving as a template type.</li>
-<li><b>cb</b> &mdash; A callback object. This is a C&nbsp;data object
-holding a special function pointer. Calling this function from
-C&nbsp;code runs an associated Lua function.</li>
-<li><b>VLA</b> &mdash; A variable-length array is declared with a
-<tt>?</tt> instead of the number of elements, e.g. <tt>"int[?]"</tt>.
-The number of elements (<tt>nelem</tt>) must be given when it's
-<a href="#ffi_new">created</a>.</li>
-<li><b>VLS</b> &mdash; A variable-length struct is a <tt>struct</tt> C
-type where the last element is a <b>VLA</b>. The same rules for
-declaration and creation apply.</li>
-</ul>
-
-<h2 id="decl">Declaring and Accessing External Symbols</h2>
-<p>
-External symbols must be declared first and can then be accessed by
-indexing a <a href="ext_ffi_semantics.html#clib">C&nbsp;library
-namespace</a>, which automatically binds the symbol to a specific
-library.
-</p>
-
-<h3 id="ffi_cdef"><tt>ffi.cdef(def)</tt></h3>
-<p>
-Adds multiple C&nbsp;declarations for types or external symbols (named
-variables or functions). <tt>def</tt> must be a Lua string. It's
-recommended to use the syntactic sugar for string arguments as
-follows:
-</p>
-<pre class="code">
-ffi.cdef[[
-<span style="color:#00a000;">typedef struct foo { int a, b; } foo_t; // Declare a struct and typedef.
-int dofoo(foo_t *f, int n); /* Declare an external C function. */</span>
-]]
-</pre>
-<p>
-The contents of the string (the part in green above) must be a
-sequence of
-<a href="ext_ffi_semantics.html#clang">C&nbsp;declarations</a>,
-separated by semicolons. The trailing semicolon for a single
-declaration may be omitted.
-</p>
-<p>
-Please note that external symbols are only <em>declared</em>, but they
-are <em>not bound</em> to any specific address, yet. Binding is
-achieved with C&nbsp;library namespaces (see below).
-</p>
-<p style="color: #c00000;">
-C&nbsp;declarations are not passed through a C&nbsp;pre-processor,
-yet. No pre-processor tokens are allowed, except for
-<tt>#pragma&nbsp;pack</tt>. Replace <tt>#define</tt> in existing
-C&nbsp;header files with <tt>enum</tt>, <tt>static&nbsp;const</tt>
-or <tt>typedef</tt> and/or pass the files through an external
-C&nbsp;pre-processor (once). Be careful not to include unneeded or
-redundant declarations from unrelated header files.
-</p>
-
-<h3 id="ffi_C"><tt>ffi.C</tt></h3>
-<p>
-This is the default C&nbsp;library namespace &mdash; note the
-uppercase <tt>'C'</tt>. It binds to the default set of symbols or
-libraries on the target system. These are more or less the same as a
-C&nbsp;compiler would offer by default, without specifying extra link
-libraries.
-</p>
-<p>
-On POSIX systems, this binds to symbols in the default or global
-namespace. This includes all exported symbols from the executable and
-any libraries loaded into the global namespace. This includes at least
-<tt>libc</tt>, <tt>libm</tt>, <tt>libdl</tt> (on Linux),
-<tt>libgcc</tt> (if compiled with GCC), as well as any exported
-symbols from the Lua/C&nbsp;API provided by LuaJIT itself.
-</p>
-<p>
-On Windows systems, this binds to symbols exported from the
-<tt>*.exe</tt>, the <tt>lua51.dll</tt> (i.e. the Lua/C&nbsp;API
-provided by LuaJIT itself), the C&nbsp;runtime library LuaJIT was linked
-with (<tt>msvcrt*.dll</tt>), <tt>kernel32.dll</tt>,
-<tt>user32.dll</tt> and <tt>gdi32.dll</tt>.
-</p>
-
-<h3 id="ffi_load"><tt>clib = ffi.load(name [,global])</tt></h3>
-<p>
-This loads the dynamic library given by <tt>name</tt> and returns
-a new C&nbsp;library namespace which binds to its symbols. On POSIX
-systems, if <tt>global</tt> is <tt>true</tt>, the library symbols are
-loaded into the global namespace, too.
-</p>
-<p>
-If <tt>name</tt> is a path, the library is loaded from this path.
-Otherwise <tt>name</tt> is canonicalized in a system-dependent way and
-searched in the default search path for dynamic libraries:
-</p>
-<p>
-On POSIX systems, if the name contains no dot, the extension
-<tt>.so</tt> is appended. Also, the <tt>lib</tt> prefix is prepended
-if necessary. So <tt>ffi.load("z")</tt> looks for <tt>"libz.so"</tt>
-in the default shared library search path.
-</p>
-<p>
-On Windows systems, if the name contains no dot, the extension
-<tt>.dll</tt> is appended. So <tt>ffi.load("ws2_32")</tt> looks for
-<tt>"ws2_32.dll"</tt> in the default DLL search path.
-</p>
-
-<h2 id="create">Creating cdata Objects</h2>
-<p>
-The following API functions create cdata objects (<tt>type()</tt>
-returns <tt>"cdata"</tt>). All created cdata objects are
-<a href="ext_ffi_semantics.html#gc">garbage collected</a>.
-</p>
-
-<h3 id="ffi_new"><tt>cdata = ffi.new(ct [,nelem] [,init...])<br>
-cdata = <em>ctype</em>([nelem,] [init...])</tt></h3>
-<p>
-Creates a cdata object for the given <tt>ct</tt>. VLA/VLS types
-require the <tt>nelem</tt> argument. The second syntax uses a ctype as
-a constructor and is otherwise fully equivalent.
-</p>
-<p>
-The cdata object is initialized according to the
-<a href="ext_ffi_semantics.html#init">rules for initializers</a>,
-using the optional <tt>init</tt> arguments. Excess initializers cause
-an error.
-</p>
-<p>
-Performance notice: if you want to create many objects of one kind,
-parse the cdecl only once and get its ctype with
-<tt>ffi.typeof()</tt>. Then use the ctype as a constructor repeatedly.
-</p>
-<p style="font-size: 8pt;">
-Please note that an anonymous <tt>struct</tt> declaration implicitly
-creates a new and distinguished ctype every time you use it for
-<tt>ffi.new()</tt>. This is probably <b>not</b> what you want,
-especially if you create more than one cdata object. Different anonymous
-<tt>structs</tt> are not considered assignment-compatible by the
-C&nbsp;standard, even though they may have the same fields! Also, they
-are considered different types by the JIT-compiler, which may cause an
-excessive number of traces. It's strongly suggested to either declare
-a named <tt>struct</tt> or <tt>typedef</tt> with <tt>ffi.cdef()</tt>
-or to create a single ctype object for an anonymous <tt>struct</tt>
-with <tt>ffi.typeof()</tt>.
-</p>
-
-<h3 id="ffi_typeof"><tt>ctype = ffi.typeof(ct)</tt></h3>
-<p>
-Creates a ctype object for the given <tt>ct</tt>.
-</p>
-<p>
-This function is especially useful to parse a cdecl only once and then
-use the resulting ctype object as a <a href="#ffi_new">constructor</a>.
-</p>
-
-<h3 id="ffi_cast"><tt>cdata = ffi.cast(ct, init)</tt></h3>
-<p>
-Creates a scalar cdata object for the given <tt>ct</tt>. The cdata
-object is initialized with <tt>init</tt> using the "cast" variant of
-the <a href="ext_ffi_semantics.html#convert">C&nbsp;type conversion
-rules</a>.
-</p>
-<p>
-This functions is mainly useful to override the pointer compatibility
-checks or to convert pointers to addresses or vice versa.
-</p>
-
-<h3 id="ffi_metatype"><tt>ctype = ffi.metatype(ct, metatable)</tt></h3>
-<p>
-Creates a ctype object for the given <tt>ct</tt> and associates it with
-a metatable. Only <tt>struct</tt>/<tt>union</tt> types, complex numbers
-and vectors are allowed. Other types may be wrapped in a
-<tt>struct</tt>, if needed.
-</p>
-<p>
-The association with a metatable is permanent and cannot be changed
-afterwards. Neither the contents of the <tt>metatable</tt> nor the
-contents of an <tt>__index</tt> table (if any) may be modified
-afterwards. The associated metatable automatically applies to all uses
-of this type, no matter how the objects are created or where they
-originate from. Note that pre-defined operations on types have
-precedence (e.g. declared field names cannot be overriden).
-</p>
-<p>
-All standard Lua metamethods are implemented. These are called directly,
-without shortcuts and on any mix of types. For binary operations, the
-left operand is checked first for a valid ctype metamethod. The
-<tt>__gc</tt> metamethod only applies to <tt>struct</tt>/<tt>union</tt>
-types and performs an implicit <a href="#ffi_gc"><tt>ffi.gc()</tt></a>
-call during creation of an instance.
-</p>
-
-<h3 id="ffi_gc"><tt>cdata = ffi.gc(cdata, finalizer)</tt></h3>
-<p>
-Associates a finalizer with a pointer or aggregate cdata object. The
-cdata object is returned unchanged.
-</p>
-<p>
-This function allows safe integration of unmanaged resources into the
-automatic memory management of the LuaJIT garbage collector. Typical
-usage:
-</p>
-<pre class="code">
-local p = ffi.gc(ffi.C.malloc(n), ffi.C.free)
-...
-p = nil -- Last reference to p is gone.
--- GC will eventually run finalizer: ffi.C.free(p)
-</pre>
-<p>
-A cdata finalizer works like the <tt>__gc</tt> metamethod for userdata
-objects: when the last reference to a cdata object is gone, the
-associated finalizer is called with the cdata object as an argument. The
-finalizer can be a Lua function or a cdata function or cdata function
-pointer. An existing finalizer can be removed by setting a <tt>nil</tt>
-finalizer, e.g. right before explicitly deleting a resource:
-</p>
-<pre class="code">
-ffi.C.free(ffi.gc(p, nil)) -- Manually free the memory.
-</pre>
-
-<h2 id="info">C&nbsp;Type Information</h2>
-<p>
-The following API functions return information about C&nbsp;types.
-They are most useful for inspecting cdata objects.
-</p>
-
-<h3 id="ffi_sizeof"><tt>size = ffi.sizeof(ct [,nelem])</tt></h3>
-<p>
-Returns the size of <tt>ct</tt> in bytes. Returns <tt>nil</tt> if
-the size is not known (e.g. for <tt>"void"</tt> or function types).
-Requires <tt>nelem</tt> for VLA/VLS types, except for cdata objects.
-</p>
-
-<h3 id="ffi_alignof"><tt>align = ffi.alignof(ct)</tt></h3>
-<p>
-Returns the minimum required alignment for <tt>ct</tt> in bytes.
-</p>
-
-<h3 id="ffi_offsetof"><tt>ofs [,bpos,bsize] = ffi.offsetof(ct, field)</tt></h3>
-<p>
-Returns the offset (in bytes) of <tt>field</tt> relative to the start
-of <tt>ct</tt>, which must be a <tt>struct</tt>. Additionally returns
-the position and the field size (in bits) for bit fields.
-</p>
-
-<h3 id="ffi_istype"><tt>status = ffi.istype(ct, obj)</tt></h3>
-<p>
-Returns <tt>true</tt> if <tt>obj</tt> has the C&nbsp;type given by
-<tt>ct</tt>. Returns <tt>false</tt> otherwise.
-</p>
-<p>
-C&nbsp;type qualifiers (<tt>const</tt> etc.) are ignored. Pointers are
-checked with the standard pointer compatibility rules, but without any
-special treatment for <tt>void&nbsp;*</tt>. If <tt>ct</tt> specifies a
-<tt>struct</tt>/<tt>union</tt>, then a pointer to this type is accepted,
-too. Otherwise the types must match exactly.
-</p>
-<p>
-Note: this function accepts all kinds of Lua objects for the
-<tt>obj</tt> argument, but always returns <tt>false</tt> for non-cdata
-objects.
-</p>
-
-<h2 id="util">Utility Functions</h2>
-
-<h3 id="ffi_errno"><tt>err = ffi.errno([newerr])</tt></h3>
-<p>
-Returns the error number set by the last C&nbsp;function call which
-indicated an error condition. If the optional <tt>newerr</tt> argument
-is present, the error number is set to the new value and the previous
-value is returned.
-</p>
-<p>
-This function offers a portable and OS-independent way to get and set the
-error number. Note that only <em>some</em> C&nbsp;functions set the error
-number. And it's only significant if the function actually indicated an
-error condition (e.g. with a return value of <tt>-1</tt> or
-<tt>NULL</tt>). Otherwise, it may or may not contain any previously set
-value.
-</p>
-<p>
-You're advised to call this function only when needed and as close as
-possible after the return of the related C&nbsp;function. The
-<tt>errno</tt> value is preserved across hooks, memory allocations,
-invocations of the JIT compiler and other internal VM activity. The same
-applies to the value returned by <tt>GetLastError()</tt> on Windows, but
-you need to declare and call it yourself.
-</p>
-
-<h3 id="ffi_string"><tt>str = ffi.string(ptr [,len])</tt></h3>
-<p>
-Creates an interned Lua string from the data pointed to by
-<tt>ptr</tt>.
-</p>
-<p>
-If the optional argument <tt>len</tt> is missing, <tt>ptr</tt> is
-converted to a <tt>"char&nbsp;*"</tt> and the data is assumed to be
-zero-terminated. The length of the string is computed with
-<tt>strlen()</tt>.
-</p>
-<p>
-Otherwise <tt>ptr</tt> is converted to a <tt>"void&nbsp;*"</tt> and
-<tt>len</tt> gives the length of the data. The data may contain
-embedded zeros and need not be byte-oriented (though this may cause
-endianess issues).
-</p>
-<p>
-This function is mainly useful to convert (temporary)
-<tt>"const&nbsp;char&nbsp;*"</tt> pointers returned by
-C&nbsp;functions to Lua strings and store them or pass them to other
-functions expecting a Lua string. The Lua string is an (interned) copy
-of the data and bears no relation to the original data area anymore.
-Lua strings are 8&nbsp;bit clean and may be used to hold arbitrary,
-non-character data.
-</p>
-<p>
-Performance notice: it's faster to pass the length of the string, if
-it's known. E.g. when the length is returned by a C&nbsp;call like
-<tt>sprintf()</tt>.
-</p>
-
-<h3 id="ffi_copy"><tt>ffi.copy(dst, src, len)<br>
-ffi.copy(dst, str)</tt></h3>
-<p>
-Copies the data pointed to by <tt>src</tt> to <tt>dst</tt>.
-<tt>dst</tt> is converted to a <tt>"void&nbsp;*"</tt> and <tt>src</tt>
-is converted to a <tt>"const void&nbsp;*"</tt>.
-</p>
-<p>
-In the first syntax, <tt>len</tt> gives the number of bytes to copy.
-Caveat: if <tt>src</tt> is a Lua string, then <tt>len</tt> must not
-exceed <tt>#src+1</tt>.
-</p>
-<p>
-In the second syntax, the source of the copy must be a Lua string. All
-bytes of the string <em>plus a zero-terminator</em> are copied to
-<tt>dst</tt> (i.e. <tt>#src+1</tt> bytes).
-</p>
-<p>
-Performance notice: <tt>ffi.copy()</tt> may be used as a faster
-(inlinable) replacement for the C&nbsp;library functions
-<tt>memcpy()</tt>, <tt>strcpy()</tt> and <tt>strncpy()</tt>.
-</p>
-
-<h3 id="ffi_fill"><tt>ffi.fill(dst, len [,c])</tt></h3>
-<p>
-Fills the data pointed to by <tt>dst</tt> with <tt>len</tt> constant
-bytes, given by <tt>c</tt>. If <tt>c</tt> is omitted, the data is
-zero-filled.
-</p>
-<p>
-Performance notice: <tt>ffi.fill()</tt> may be used as a faster
-(inlinable) replacement for the C&nbsp;library function
-<tt>memset(dst,&nbsp;c,&nbsp;len)</tt>. Please note the different
-order of arguments!
-</p>
-
-<h2 id="target">Target-specific Information</h2>
-
-<h3 id="ffi_abi"><tt>status = ffi.abi(param)</tt></h3>
-<p>
-Returns <tt>true</tt> if <tt>param</tt> (a Lua string) applies for the
-target ABI (Application Binary Interface). Returns <tt>false</tt>
-otherwise. The following parameters are currently defined:
-</p>
-<table class="abitable">
-<tr class="abihead">
-<td class="abiparam">Parameter</td>
-<td class="abidesc">Description</td>
-</tr>
-<tr class="odd separate">
-<td class="abiparam">32bit</td><td class="abidesc">32 bit architecture</td></tr>
-<tr class="even">
-<td class="abiparam">64bit</td><td class="abidesc">64 bit architecture</td></tr>
-<tr class="odd separate">
-<td class="abiparam">le</td><td class="abidesc">Little-endian architecture</td></tr>
-<tr class="even">
-<td class="abiparam">be</td><td class="abidesc">Big-endian architecture</td></tr>
-<tr class="odd separate">
-<td class="abiparam">fpu</td><td class="abidesc">Target has a hardware FPU</td></tr>
-<tr class="even">
-<td class="abiparam">softfp</td><td class="abidesc">softfp calling conventions</td></tr>
-<tr class="odd">
-<td class="abiparam">hardfp</td><td class="abidesc">hardfp calling conventions</td></tr>
-<tr class="even separate">
-<td class="abiparam">eabi</td><td class="abidesc">EABI variant of the standard ABI</td></tr>
-<tr class="odd">
-<td class="abiparam">win</td><td class="abidesc">Windows variant of the standard ABI</td></tr>
-</table>
-
-<h3 id="ffi_os"><tt>ffi.os</tt></h3>
-<p>
-Contains the target OS name. Same contents as
-<a href="ext_jit.html#jit_os"><tt>jit.os</tt></a>.
-</p>
-
-<h3 id="ffi_arch"><tt>ffi.arch</tt></h3>
-<p>
-Contains the target architecture name. Same contents as
-<a href="ext_jit.html#jit_arch"><tt>jit.arch</tt></a>.
-</p>
-
-<h2 id="callback">Methods for Callbacks</h2>
-<p>
-The C&nbsp;types for <a href="ext_ffi_semantics.html#callback">callbacks</a>
-have some extra methods:
-</p>
-
-<h3 id="callback_free"><tt>cb:free()</tt></h3>
-<p>
-Free the resources associated with a callback. The associated Lua
-function is unanchored and may be garbage collected. The callback
-function pointer is no longer valid and must not be called anymore
-(it may be reused by a subsequently created callback).
-</p>
-
-<h3 id="callback_set"><tt>cb:set(func)</tt></h3>
-<p>
-Associate a new Lua function with a callback. The C&nbsp;type of the
-callback and the callback function pointer are unchanged.
-</p>
-<p>
-This method is useful to dynamically switch the receiver of callbacks
-without creating a new callback each time and registering it again (e.g.
-with a GUI library).
-</p>
-
-<h2 id="extended">Extended Standard Library Functions</h2>
-<p>
-The following standard library functions have been extended to work
-with cdata objects:
-</p>
-
-<h3 id="tonumber"><tt>n = tonumber(cdata)</tt></h3>
-<p>
-Converts a number cdata object to a <tt>double</tt> and returns it as
-a Lua number. This is particularly useful for boxed 64&nbsp;bit
-integer values. Caveat: this conversion may incur a precision loss.
-</p>
-
-<h3 id="tostring"><tt>s = tostring(cdata)</tt></h3>
-<p>
-Returns a string representation of the value of 64&nbsp;bit integers
-(<tt><b>"</b>nnn<b>LL"</b></tt> or <tt><b>"</b>nnn<b>ULL"</b></tt>) or
-complex numbers (<tt><b>"</b>re&plusmn;im<b>i"</b></tt>). Otherwise
-returns a string representation of the C&nbsp;type of a ctype object
-(<tt><b>"ctype&lt;</b>type<b>&gt;"</b></tt>) or a cdata object
-(<tt><b>"cdata&lt;</b>type<b>&gt;:&nbsp;</b>address"</tt>), unless you
-override it with a <tt>__tostring</tt> metamethod (see
-<a href="#ffi_metatype"><tt>ffi.metatype()</tt></a>).
-</p>
-
-<h3 id="pairs"><tt>iter, obj, start = pairs(cdata)<br>
-iter, obj, start = ipairs(cdata)<br></tt></h3>
-<p>
-Calls the <tt>__pairs</tt> or <tt>__ipairs</tt> metamethod of the
-corresponding ctype.
-</p>
-
-<h2 id="literals">Extensions to the Lua Parser</h2>
-<p>
-The parser for Lua source code treats numeric literals with the
-suffixes <tt>LL</tt> or <tt>ULL</tt> as signed or unsigned 64&nbsp;bit
-integers. Case doesn't matter, but uppercase is recommended for
-readability. It handles both decimal (<tt>42LL</tt>) and hexadecimal
-(<tt>0x2aLL</tt>) literals.
-</p>
-<p>
-The imaginary part of complex numbers can be specified by suffixing
-number literals with <tt>i</tt> or <tt>I</tt>, e.g. <tt>12.5i</tt>.
-Caveat: you'll need to use <tt>1i</tt> to get an imaginary part with
-the value one, since <tt>i</tt> itself still refers to a variable
-named <tt>i</tt>.
-</p>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>ffi.* API Functions</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+table.abitable { width: 30em; line-height: 1.2; }
+tr.abihead td { font-weight: bold; }
+td.abiparam { font-weight: bold; width: 6em; }
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1><tt>ffi.*</tt> API Functions</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a class="current" href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+This page describes the API functions provided by the FFI library in
+detail. It's recommended to read through the
+<a href="ext_ffi.html">introduction</a> and the
+<a href="ext_ffi_tutorial.html">FFI tutorial</a> first.
+</p>
+
+<h2 id="glossary">Glossary</h2>
+<ul>
+<li><b>cdecl</b> &mdash; An abstract C&nbsp;type declaration (a Lua
+string).</li>
+<li><b>ctype</b> &mdash; A C&nbsp;type object. This is a special kind of
+<b>cdata</b> returned by <tt>ffi.typeof()</tt>. It serves as a
+<b>cdata</b> <a href="#ffi_new">constructor</a> when called.</li>
+<li><b>cdata</b> &mdash; A C&nbsp;data object. It holds a value of the
+corresponding <b>ctype</b>.</li>
+<li><b>ct</b> &mdash; A C&nbsp;type specification which can be used for
+most of the API functions. Either a <b>cdecl</b>, a <b>ctype</b> or a
+<b>cdata</b> serving as a template type.</li>
+<li><b>cb</b> &mdash; A callback object. This is a C&nbsp;data object
+holding a special function pointer. Calling this function from
+C&nbsp;code runs an associated Lua function.</li>
+<li><b>VLA</b> &mdash; A variable-length array is declared with a
+<tt>?</tt> instead of the number of elements, e.g. <tt>"int[?]"</tt>.
+The number of elements (<tt>nelem</tt>) must be given when it's
+<a href="#ffi_new">created</a>.</li>
+<li><b>VLS</b> &mdash; A variable-length struct is a <tt>struct</tt> C
+type where the last element is a <b>VLA</b>. The same rules for
+declaration and creation apply.</li>
+</ul>
+
+<h2 id="decl">Declaring and Accessing External Symbols</h2>
+<p>
+External symbols must be declared first and can then be accessed by
+indexing a <a href="ext_ffi_semantics.html#clib">C&nbsp;library
+namespace</a>, which automatically binds the symbol to a specific
+library.
+</p>
+
+<h3 id="ffi_cdef"><tt>ffi.cdef(def)</tt></h3>
+<p>
+Adds multiple C&nbsp;declarations for types or external symbols (named
+variables or functions). <tt>def</tt> must be a Lua string. It's
+recommended to use the syntactic sugar for string arguments as
+follows:
+</p>
+<pre class="code">
+ffi.cdef[[
+<span style="color:#00a000;">typedef struct foo { int a, b; } foo_t; // Declare a struct and typedef.
+int dofoo(foo_t *f, int n); /* Declare an external C function. */</span>
+]]
+</pre>
+<p>
+The contents of the string (the part in green above) must be a
+sequence of
+<a href="ext_ffi_semantics.html#clang">C&nbsp;declarations</a>,
+separated by semicolons. The trailing semicolon for a single
+declaration may be omitted.
+</p>
+<p>
+Please note that external symbols are only <em>declared</em>, but they
+are <em>not bound</em> to any specific address, yet. Binding is
+achieved with C&nbsp;library namespaces (see below).
+</p>
+<p style="color: #c00000;">
+C&nbsp;declarations are not passed through a C&nbsp;pre-processor,
+yet. No pre-processor tokens are allowed, except for
+<tt>#pragma&nbsp;pack</tt>. Replace <tt>#define</tt> in existing
+C&nbsp;header files with <tt>enum</tt>, <tt>static&nbsp;const</tt>
+or <tt>typedef</tt> and/or pass the files through an external
+C&nbsp;pre-processor (once). Be careful not to include unneeded or
+redundant declarations from unrelated header files.
+</p>
+
+<h3 id="ffi_C"><tt>ffi.C</tt></h3>
+<p>
+This is the default C&nbsp;library namespace &mdash; note the
+uppercase <tt>'C'</tt>. It binds to the default set of symbols or
+libraries on the target system. These are more or less the same as a
+C&nbsp;compiler would offer by default, without specifying extra link
+libraries.
+</p>
+<p>
+On POSIX systems, this binds to symbols in the default or global
+namespace. This includes all exported symbols from the executable and
+any libraries loaded into the global namespace. This includes at least
+<tt>libc</tt>, <tt>libm</tt>, <tt>libdl</tt> (on Linux),
+<tt>libgcc</tt> (if compiled with GCC), as well as any exported
+symbols from the Lua/C&nbsp;API provided by LuaJIT itself.
+</p>
+<p>
+On Windows systems, this binds to symbols exported from the
+<tt>*.exe</tt>, the <tt>lua51.dll</tt> (i.e. the Lua/C&nbsp;API
+provided by LuaJIT itself), the C&nbsp;runtime library LuaJIT was linked
+with (<tt>msvcrt*.dll</tt>), <tt>kernel32.dll</tt>,
+<tt>user32.dll</tt> and <tt>gdi32.dll</tt>.
+</p>
+
+<h3 id="ffi_load"><tt>clib = ffi.load(name [,global])</tt></h3>
+<p>
+This loads the dynamic library given by <tt>name</tt> and returns
+a new C&nbsp;library namespace which binds to its symbols. On POSIX
+systems, if <tt>global</tt> is <tt>true</tt>, the library symbols are
+loaded into the global namespace, too.
+</p>
+<p>
+If <tt>name</tt> is a path, the library is loaded from this path.
+Otherwise <tt>name</tt> is canonicalized in a system-dependent way and
+searched in the default search path for dynamic libraries:
+</p>
+<p>
+On POSIX systems, if the name contains no dot, the extension
+<tt>.so</tt> is appended. Also, the <tt>lib</tt> prefix is prepended
+if necessary. So <tt>ffi.load("z")</tt> looks for <tt>"libz.so"</tt>
+in the default shared library search path.
+</p>
+<p>
+On Windows systems, if the name contains no dot, the extension
+<tt>.dll</tt> is appended. So <tt>ffi.load("ws2_32")</tt> looks for
+<tt>"ws2_32.dll"</tt> in the default DLL search path.
+</p>
+
+<h2 id="create">Creating cdata Objects</h2>
+<p>
+The following API functions create cdata objects (<tt>type()</tt>
+returns <tt>"cdata"</tt>). All created cdata objects are
+<a href="ext_ffi_semantics.html#gc">garbage collected</a>.
+</p>
+
+<h3 id="ffi_new"><tt>cdata = ffi.new(ct [,nelem] [,init...])<br>
+cdata = <em>ctype</em>([nelem,] [init...])</tt></h3>
+<p>
+Creates a cdata object for the given <tt>ct</tt>. VLA/VLS types
+require the <tt>nelem</tt> argument. The second syntax uses a ctype as
+a constructor and is otherwise fully equivalent.
+</p>
+<p>
+The cdata object is initialized according to the
+<a href="ext_ffi_semantics.html#init">rules for initializers</a>,
+using the optional <tt>init</tt> arguments. Excess initializers cause
+an error.
+</p>
+<p>
+Performance notice: if you want to create many objects of one kind,
+parse the cdecl only once and get its ctype with
+<tt>ffi.typeof()</tt>. Then use the ctype as a constructor repeatedly.
+</p>
+<p style="font-size: 8pt;">
+Please note that an anonymous <tt>struct</tt> declaration implicitly
+creates a new and distinguished ctype every time you use it for
+<tt>ffi.new()</tt>. This is probably <b>not</b> what you want,
+especially if you create more than one cdata object. Different anonymous
+<tt>structs</tt> are not considered assignment-compatible by the
+C&nbsp;standard, even though they may have the same fields! Also, they
+are considered different types by the JIT-compiler, which may cause an
+excessive number of traces. It's strongly suggested to either declare
+a named <tt>struct</tt> or <tt>typedef</tt> with <tt>ffi.cdef()</tt>
+or to create a single ctype object for an anonymous <tt>struct</tt>
+with <tt>ffi.typeof()</tt>.
+</p>
+
+<h3 id="ffi_typeof"><tt>ctype = ffi.typeof(ct)</tt></h3>
+<p>
+Creates a ctype object for the given <tt>ct</tt>.
+</p>
+<p>
+This function is especially useful to parse a cdecl only once and then
+use the resulting ctype object as a <a href="#ffi_new">constructor</a>.
+</p>
+
+<h3 id="ffi_cast"><tt>cdata = ffi.cast(ct, init)</tt></h3>
+<p>
+Creates a scalar cdata object for the given <tt>ct</tt>. The cdata
+object is initialized with <tt>init</tt> using the "cast" variant of
+the <a href="ext_ffi_semantics.html#convert">C&nbsp;type conversion
+rules</a>.
+</p>
+<p>
+This functions is mainly useful to override the pointer compatibility
+checks or to convert pointers to addresses or vice versa.
+</p>
+
+<h3 id="ffi_metatype"><tt>ctype = ffi.metatype(ct, metatable)</tt></h3>
+<p>
+Creates a ctype object for the given <tt>ct</tt> and associates it with
+a metatable. Only <tt>struct</tt>/<tt>union</tt> types, complex numbers
+and vectors are allowed. Other types may be wrapped in a
+<tt>struct</tt>, if needed.
+</p>
+<p>
+The association with a metatable is permanent and cannot be changed
+afterwards. Neither the contents of the <tt>metatable</tt> nor the
+contents of an <tt>__index</tt> table (if any) may be modified
+afterwards. The associated metatable automatically applies to all uses
+of this type, no matter how the objects are created or where they
+originate from. Note that pre-defined operations on types have
+precedence (e.g. declared field names cannot be overriden).
+</p>
+<p>
+All standard Lua metamethods are implemented. These are called directly,
+without shortcuts and on any mix of types. For binary operations, the
+left operand is checked first for a valid ctype metamethod. The
+<tt>__gc</tt> metamethod only applies to <tt>struct</tt>/<tt>union</tt>
+types and performs an implicit <a href="#ffi_gc"><tt>ffi.gc()</tt></a>
+call during creation of an instance.
+</p>
+
+<h3 id="ffi_gc"><tt>cdata = ffi.gc(cdata, finalizer)</tt></h3>
+<p>
+Associates a finalizer with a pointer or aggregate cdata object. The
+cdata object is returned unchanged.
+</p>
+<p>
+This function allows safe integration of unmanaged resources into the
+automatic memory management of the LuaJIT garbage collector. Typical
+usage:
+</p>
+<pre class="code">
+local p = ffi.gc(ffi.C.malloc(n), ffi.C.free)
+...
+p = nil -- Last reference to p is gone.
+-- GC will eventually run finalizer: ffi.C.free(p)
+</pre>
+<p>
+A cdata finalizer works like the <tt>__gc</tt> metamethod for userdata
+objects: when the last reference to a cdata object is gone, the
+associated finalizer is called with the cdata object as an argument. The
+finalizer can be a Lua function or a cdata function or cdata function
+pointer. An existing finalizer can be removed by setting a <tt>nil</tt>
+finalizer, e.g. right before explicitly deleting a resource:
+</p>
+<pre class="code">
+ffi.C.free(ffi.gc(p, nil)) -- Manually free the memory.
+</pre>
+
+<h2 id="info">C&nbsp;Type Information</h2>
+<p>
+The following API functions return information about C&nbsp;types.
+They are most useful for inspecting cdata objects.
+</p>
+
+<h3 id="ffi_sizeof"><tt>size = ffi.sizeof(ct [,nelem])</tt></h3>
+<p>
+Returns the size of <tt>ct</tt> in bytes. Returns <tt>nil</tt> if
+the size is not known (e.g. for <tt>"void"</tt> or function types).
+Requires <tt>nelem</tt> for VLA/VLS types, except for cdata objects.
+</p>
+
+<h3 id="ffi_alignof"><tt>align = ffi.alignof(ct)</tt></h3>
+<p>
+Returns the minimum required alignment for <tt>ct</tt> in bytes.
+</p>
+
+<h3 id="ffi_offsetof"><tt>ofs [,bpos,bsize] = ffi.offsetof(ct, field)</tt></h3>
+<p>
+Returns the offset (in bytes) of <tt>field</tt> relative to the start
+of <tt>ct</tt>, which must be a <tt>struct</tt>. Additionally returns
+the position and the field size (in bits) for bit fields.
+</p>
+
+<h3 id="ffi_istype"><tt>status = ffi.istype(ct, obj)</tt></h3>
+<p>
+Returns <tt>true</tt> if <tt>obj</tt> has the C&nbsp;type given by
+<tt>ct</tt>. Returns <tt>false</tt> otherwise.
+</p>
+<p>
+C&nbsp;type qualifiers (<tt>const</tt> etc.) are ignored. Pointers are
+checked with the standard pointer compatibility rules, but without any
+special treatment for <tt>void&nbsp;*</tt>. If <tt>ct</tt> specifies a
+<tt>struct</tt>/<tt>union</tt>, then a pointer to this type is accepted,
+too. Otherwise the types must match exactly.
+</p>
+<p>
+Note: this function accepts all kinds of Lua objects for the
+<tt>obj</tt> argument, but always returns <tt>false</tt> for non-cdata
+objects.
+</p>
+
+<h2 id="util">Utility Functions</h2>
+
+<h3 id="ffi_errno"><tt>err = ffi.errno([newerr])</tt></h3>
+<p>
+Returns the error number set by the last C&nbsp;function call which
+indicated an error condition. If the optional <tt>newerr</tt> argument
+is present, the error number is set to the new value and the previous
+value is returned.
+</p>
+<p>
+This function offers a portable and OS-independent way to get and set the
+error number. Note that only <em>some</em> C&nbsp;functions set the error
+number. And it's only significant if the function actually indicated an
+error condition (e.g. with a return value of <tt>-1</tt> or
+<tt>NULL</tt>). Otherwise, it may or may not contain any previously set
+value.
+</p>
+<p>
+You're advised to call this function only when needed and as close as
+possible after the return of the related C&nbsp;function. The
+<tt>errno</tt> value is preserved across hooks, memory allocations,
+invocations of the JIT compiler and other internal VM activity. The same
+applies to the value returned by <tt>GetLastError()</tt> on Windows, but
+you need to declare and call it yourself.
+</p>
+
+<h3 id="ffi_string"><tt>str = ffi.string(ptr [,len])</tt></h3>
+<p>
+Creates an interned Lua string from the data pointed to by
+<tt>ptr</tt>.
+</p>
+<p>
+If the optional argument <tt>len</tt> is missing, <tt>ptr</tt> is
+converted to a <tt>"char&nbsp;*"</tt> and the data is assumed to be
+zero-terminated. The length of the string is computed with
+<tt>strlen()</tt>.
+</p>
+<p>
+Otherwise <tt>ptr</tt> is converted to a <tt>"void&nbsp;*"</tt> and
+<tt>len</tt> gives the length of the data. The data may contain
+embedded zeros and need not be byte-oriented (though this may cause
+endianess issues).
+</p>
+<p>
+This function is mainly useful to convert (temporary)
+<tt>"const&nbsp;char&nbsp;*"</tt> pointers returned by
+C&nbsp;functions to Lua strings and store them or pass them to other
+functions expecting a Lua string. The Lua string is an (interned) copy
+of the data and bears no relation to the original data area anymore.
+Lua strings are 8&nbsp;bit clean and may be used to hold arbitrary,
+non-character data.
+</p>
+<p>
+Performance notice: it's faster to pass the length of the string, if
+it's known. E.g. when the length is returned by a C&nbsp;call like
+<tt>sprintf()</tt>.
+</p>
+
+<h3 id="ffi_copy"><tt>ffi.copy(dst, src, len)<br>
+ffi.copy(dst, str)</tt></h3>
+<p>
+Copies the data pointed to by <tt>src</tt> to <tt>dst</tt>.
+<tt>dst</tt> is converted to a <tt>"void&nbsp;*"</tt> and <tt>src</tt>
+is converted to a <tt>"const void&nbsp;*"</tt>.
+</p>
+<p>
+In the first syntax, <tt>len</tt> gives the number of bytes to copy.
+Caveat: if <tt>src</tt> is a Lua string, then <tt>len</tt> must not
+exceed <tt>#src+1</tt>.
+</p>
+<p>
+In the second syntax, the source of the copy must be a Lua string. All
+bytes of the string <em>plus a zero-terminator</em> are copied to
+<tt>dst</tt> (i.e. <tt>#src+1</tt> bytes).
+</p>
+<p>
+Performance notice: <tt>ffi.copy()</tt> may be used as a faster
+(inlinable) replacement for the C&nbsp;library functions
+<tt>memcpy()</tt>, <tt>strcpy()</tt> and <tt>strncpy()</tt>.
+</p>
+
+<h3 id="ffi_fill"><tt>ffi.fill(dst, len [,c])</tt></h3>
+<p>
+Fills the data pointed to by <tt>dst</tt> with <tt>len</tt> constant
+bytes, given by <tt>c</tt>. If <tt>c</tt> is omitted, the data is
+zero-filled.
+</p>
+<p>
+Performance notice: <tt>ffi.fill()</tt> may be used as a faster
+(inlinable) replacement for the C&nbsp;library function
+<tt>memset(dst,&nbsp;c,&nbsp;len)</tt>. Please note the different
+order of arguments!
+</p>
+
+<h2 id="target">Target-specific Information</h2>
+
+<h3 id="ffi_abi"><tt>status = ffi.abi(param)</tt></h3>
+<p>
+Returns <tt>true</tt> if <tt>param</tt> (a Lua string) applies for the
+target ABI (Application Binary Interface). Returns <tt>false</tt>
+otherwise. The following parameters are currently defined:
+</p>
+<table class="abitable">
+<tr class="abihead">
+<td class="abiparam">Parameter</td>
+<td class="abidesc">Description</td>
+</tr>
+<tr class="odd separate">
+<td class="abiparam">32bit</td><td class="abidesc">32 bit architecture</td></tr>
+<tr class="even">
+<td class="abiparam">64bit</td><td class="abidesc">64 bit architecture</td></tr>
+<tr class="odd separate">
+<td class="abiparam">le</td><td class="abidesc">Little-endian architecture</td></tr>
+<tr class="even">
+<td class="abiparam">be</td><td class="abidesc">Big-endian architecture</td></tr>
+<tr class="odd separate">
+<td class="abiparam">fpu</td><td class="abidesc">Target has a hardware FPU</td></tr>
+<tr class="even">
+<td class="abiparam">softfp</td><td class="abidesc">softfp calling conventions</td></tr>
+<tr class="odd">
+<td class="abiparam">hardfp</td><td class="abidesc">hardfp calling conventions</td></tr>
+<tr class="even separate">
+<td class="abiparam">eabi</td><td class="abidesc">EABI variant of the standard ABI</td></tr>
+<tr class="odd">
+<td class="abiparam">win</td><td class="abidesc">Windows variant of the standard ABI</td></tr>
+</table>
+
+<h3 id="ffi_os"><tt>ffi.os</tt></h3>
+<p>
+Contains the target OS name. Same contents as
+<a href="ext_jit.html#jit_os"><tt>jit.os</tt></a>.
+</p>
+
+<h3 id="ffi_arch"><tt>ffi.arch</tt></h3>
+<p>
+Contains the target architecture name. Same contents as
+<a href="ext_jit.html#jit_arch"><tt>jit.arch</tt></a>.
+</p>
+
+<h2 id="callback">Methods for Callbacks</h2>
+<p>
+The C&nbsp;types for <a href="ext_ffi_semantics.html#callback">callbacks</a>
+have some extra methods:
+</p>
+
+<h3 id="callback_free"><tt>cb:free()</tt></h3>
+<p>
+Free the resources associated with a callback. The associated Lua
+function is unanchored and may be garbage collected. The callback
+function pointer is no longer valid and must not be called anymore
+(it may be reused by a subsequently created callback).
+</p>
+
+<h3 id="callback_set"><tt>cb:set(func)</tt></h3>
+<p>
+Associate a new Lua function with a callback. The C&nbsp;type of the
+callback and the callback function pointer are unchanged.
+</p>
+<p>
+This method is useful to dynamically switch the receiver of callbacks
+without creating a new callback each time and registering it again (e.g.
+with a GUI library).
+</p>
+
+<h2 id="extended">Extended Standard Library Functions</h2>
+<p>
+The following standard library functions have been extended to work
+with cdata objects:
+</p>
+
+<h3 id="tonumber"><tt>n = tonumber(cdata)</tt></h3>
+<p>
+Converts a number cdata object to a <tt>double</tt> and returns it as
+a Lua number. This is particularly useful for boxed 64&nbsp;bit
+integer values. Caveat: this conversion may incur a precision loss.
+</p>
+
+<h3 id="tostring"><tt>s = tostring(cdata)</tt></h3>
+<p>
+Returns a string representation of the value of 64&nbsp;bit integers
+(<tt><b>"</b>nnn<b>LL"</b></tt> or <tt><b>"</b>nnn<b>ULL"</b></tt>) or
+complex numbers (<tt><b>"</b>re&plusmn;im<b>i"</b></tt>). Otherwise
+returns a string representation of the C&nbsp;type of a ctype object
+(<tt><b>"ctype&lt;</b>type<b>&gt;"</b></tt>) or a cdata object
+(<tt><b>"cdata&lt;</b>type<b>&gt;:&nbsp;</b>address"</tt>), unless you
+override it with a <tt>__tostring</tt> metamethod (see
+<a href="#ffi_metatype"><tt>ffi.metatype()</tt></a>).
+</p>
+
+<h3 id="pairs"><tt>iter, obj, start = pairs(cdata)<br>
+iter, obj, start = ipairs(cdata)<br></tt></h3>
+<p>
+Calls the <tt>__pairs</tt> or <tt>__ipairs</tt> metamethod of the
+corresponding ctype.
+</p>
+
+<h2 id="literals">Extensions to the Lua Parser</h2>
+<p>
+The parser for Lua source code treats numeric literals with the
+suffixes <tt>LL</tt> or <tt>ULL</tt> as signed or unsigned 64&nbsp;bit
+integers. Case doesn't matter, but uppercase is recommended for
+readability. It handles both decimal (<tt>42LL</tt>) and hexadecimal
+(<tt>0x2aLL</tt>) literals.
+</p>
+<p>
+The imaginary part of complex numbers can be specified by suffixing
+number literals with <tt>i</tt> or <tt>I</tt>, e.g. <tt>12.5i</tt>.
+Caveat: you'll need to use <tt>1i</tt> to get an imaginary part with
+the value one, since <tt>i</tt> itself still refers to a variable
+named <tt>i</tt>.
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/ext_ffi_semantics.html b/3rdparty/lua/doc/ext_ffi_semantics.html
index 6c7e637..0322901 100644
--- a/3rdparty/lua/doc/ext_ffi_semantics.html
+++ b/3rdparty/lua/doc/ext_ffi_semantics.html
@@ -1,1245 +1,1243 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>FFI Semantics</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-table.convtable { line-height: 1.2; }
-tr.convhead td { font-weight: bold; }
-td.convop { font-style: italic; width: 40%; }
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>FFI Semantics</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a class="current" href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-This page describes the detailed semantics underlying the FFI library
-and its interaction with both Lua and C&nbsp;code.
-</p>
-<p>
-Given that the FFI library is designed to interface with C&nbsp;code
-and that declarations can be written in plain C&nbsp;syntax, <b>it
-closely follows the C&nbsp;language semantics</b>, wherever possible.
-Some minor concessions are needed for smoother interoperation with Lua
-language semantics.
-</p>
-<p>
-Please don't be overwhelmed by the contents of this page &mdash; this
-is a reference and you may need to consult it, if in doubt. It doesn't
-hurt to skim this page, but most of the semantics "just work" as you'd
-expect them to work. It should be straightforward to write
-applications using the LuaJIT FFI for developers with a C or C++
-background.
-</p>
-
-<h2 id="clang">C Language Support</h2>
-<p>
-The FFI library has a built-in C&nbsp;parser with a minimal memory
-footprint. It's used by the <a href="ext_ffi_api.html">ffi.* library
-functions</a> to declare C&nbsp;types or external symbols.
-</p>
-<p>
-It's only purpose is to parse C&nbsp;declarations, as found e.g. in
-C&nbsp;header files. Although it does evaluate constant expressions,
-it's <em>not</em> a C&nbsp;compiler. The body of <tt>inline</tt>
-C&nbsp;function definitions is simply ignored.
-</p>
-<p>
-Also, this is <em>not</em> a validating C&nbsp;parser. It expects and
-accepts correctly formed C&nbsp;declarations, but it may choose to
-ignore bad declarations or show rather generic error messages. If in
-doubt, please check the input against your favorite C&nbsp;compiler.
-</p>
-<p>
-The C&nbsp;parser complies to the <b>C99 language standard</b> plus
-the following extensions:
-</p>
-<ul>
-
-<li>The <tt>'\e'</tt> escape in character and string literals.</li>
-
-<li>The C99/C++ boolean type, declared with the keywords <tt>bool</tt>
-or <tt>_Bool</tt>.</li>
-
-<li>Complex numbers, declared with the keywords <tt>complex</tt> or
-<tt>_Complex</tt>.</li>
-
-<li>Two complex number types: <tt>complex</tt> (aka
-<tt>complex&nbsp;double</tt>) and <tt>complex&nbsp;float</tt>.</li>
-
-<li>Vector types, declared with the GCC <tt>mode</tt> or
-<tt>vector_size</tt> attribute.</li>
-
-<li>Unnamed ('transparent') <tt>struct</tt>/<tt>union</tt> fields
-inside a <tt>struct</tt>/<tt>union</tt>.</li>
-
-<li>Incomplete <tt>enum</tt> declarations, handled like incomplete
-<tt>struct</tt> declarations.</li>
-
-<li>Unnamed <tt>enum</tt> fields inside a
-<tt>struct</tt>/<tt>union</tt>. This is similar to a scoped C++
-<tt>enum</tt>, except that declared constants are visible in the
-global namespace, too.</li>
-
-<li>Scoped <tt>static&nbsp;const</tt> declarations inside a
-<tt>struct</tt>/<tt>union</tt> (from C++).</li>
-
-<li>Zero-length arrays (<tt>[0]</tt>), empty
-<tt>struct</tt>/<tt>union</tt>, variable-length arrays (VLA,
-<tt>[?]</tt>) and variable-length structs (VLS, with a trailing
-VLA).</li>
-
-<li>C++ reference types (<tt>int&nbsp;&amp;x</tt>).</li>
-
-<li>Alternate GCC keywords with '<tt>__</tt>', e.g.
-<tt>__const__</tt>.</li>
-
-<li>GCC <tt>__attribute__</tt> with the following attributes:
-<tt>aligned</tt>, <tt>packed</tt>, <tt>mode</tt>,
-<tt>vector_size</tt>, <tt>cdecl</tt>, <tt>fastcall</tt>,
-<tt>stdcall</tt>, <tt>thiscall</tt>.</li>
-
-<li>The GCC <tt>__extension__</tt> keyword and the GCC
-<tt>__alignof__</tt> operator.</li>
-
-<li>GCC <tt>__asm__("symname")</tt> symbol name redirection for
-function declarations.</li>
-
-<li>MSVC keywords for fixed-length types: <tt>__int8</tt>,
-<tt>__int16</tt>, <tt>__int32</tt> and <tt>__int64</tt>.</li>
-
-<li>MSVC <tt>__cdecl</tt>, <tt>__fastcall</tt>, <tt>__stdcall</tt>,
-<tt>__thiscall</tt>, <tt>__ptr32</tt>, <tt>__ptr64</tt>,
-<tt>__declspec(align(n))</tt> and <tt>#pragma&nbsp;pack</tt>.</li>
-
-<li>All other GCC/MSVC-specific attributes are ignored.</li>
-
-</ul>
-<p>
-The following C&nbsp;types are pre-defined by the C&nbsp;parser (like
-a <tt>typedef</tt>, except re-declarations will be ignored):
-</p>
-<ul>
-
-<li>Vararg handling: <tt>va_list</tt>, <tt>__builtin_va_list</tt>,
-<tt>__gnuc_va_list</tt>.</li>
-
-<li>From <tt>&lt;stddef.h&gt;</tt>: <tt>ptrdiff_t</tt>,
-<tt>size_t</tt>, <tt>wchar_t</tt>.</li>
-
-<li>From <tt>&lt;stdint.h&gt;</tt>: <tt>int8_t</tt>, <tt>int16_t</tt>,
-<tt>int32_t</tt>, <tt>int64_t</tt>, <tt>uint8_t</tt>,
-<tt>uint16_t</tt>, <tt>uint32_t</tt>, <tt>uint64_t</tt>,
-<tt>intptr_t</tt>, <tt>uintptr_t</tt>.</li>
-
-</ul>
-<p>
-You're encouraged to use these types in preference to
-compiler-specific extensions or target-dependent standard types.
-E.g. <tt>char</tt> differs in signedness and <tt>long</tt> differs in
-size, depending on the target architecture and platform ABI.
-</p>
-<p>
-The following C&nbsp;features are <b>not</b> supported:
-</p>
-<ul>
-
-<li>A declaration must always have a type specifier; it doesn't
-default to an <tt>int</tt> type.</li>
-
-<li>Old-style empty function declarations (K&amp;R) are not allowed.
-All C&nbsp;functions must have a proper prototype declaration. A
-function declared without parameters (<tt>int&nbsp;foo();</tt>) is
-treated as a function taking zero arguments, like in C++.</li>
-
-<li>The <tt>long double</tt> C&nbsp;type is parsed correctly, but
-there's no support for the related conversions, accesses or arithmetic
-operations.</li>
-
-<li>Wide character strings and character literals are not
-supported.</li>
-
-<li><a href="#status">See below</a> for features that are currently
-not implemented.</li>
-
-</ul>
-
-<h2 id="convert">C Type Conversion Rules</h2>
-
-<h3 id="convert_tolua">Conversions from C&nbsp;types to Lua objects</h3>
-<p>
-These conversion rules apply for <em>read accesses</em> to
-C&nbsp;types: indexing pointers, arrays or
-<tt>struct</tt>/<tt>union</tt> types; reading external variables or
-constant values; retrieving return values from C&nbsp;calls:
-</p>
-<table class="convtable">
-<tr class="convhead">
-<td class="convin">Input</td>
-<td class="convop">Conversion</td>
-<td class="convout">Output</td>
-</tr>
-<tr class="odd separate">
-<td class="convin"><tt>int8_t</tt>, <tt>int16_t</tt></td><td class="convop">&rarr;<sup>sign-ext</sup> <tt>int32_t</tt> &rarr; <tt>double</tt></td><td class="convout">number</td></tr>
-<tr class="even">
-<td class="convin"><tt>uint8_t</tt>, <tt>uint16_t</tt></td><td class="convop">&rarr;<sup>zero-ext</sup> <tt>int32_t</tt> &rarr; <tt>double</tt></td><td class="convout">number</td></tr>
-<tr class="odd">
-<td class="convin"><tt>int32_t</tt>, <tt>uint32_t</tt></td><td class="convop">&rarr; <tt>double</tt></td><td class="convout">number</td></tr>
-<tr class="even">
-<td class="convin"><tt>int64_t</tt>, <tt>uint64_t</tt></td><td class="convop">boxed value</td><td class="convout">64 bit int cdata</td></tr>
-<tr class="odd separate">
-<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr; <tt>double</tt></td><td class="convout">number</td></tr>
-<tr class="even separate">
-<td class="convin"><tt>bool</tt></td><td class="convop">0 &rarr; <tt>false</tt>, otherwise <tt>true</tt></td><td class="convout">boolean</td></tr>
-<tr class="odd separate">
-<td class="convin"><tt>enum</tt></td><td class="convop">boxed value</td><td class="convout">enum cdata</td></tr>
-<tr class="even">
-<td class="convin">Complex number</td><td class="convop">boxed value</td><td class="convout">complex cdata</td></tr>
-<tr class="odd">
-<td class="convin">Vector</td><td class="convop">boxed value</td><td class="convout">vector cdata</td></tr>
-<tr class="even">
-<td class="convin">Pointer</td><td class="convop">boxed value</td><td class="convout">pointer cdata</td></tr>
-<tr class="odd separate">
-<td class="convin">Array</td><td class="convop">boxed reference</td><td class="convout">reference cdata</td></tr>
-<tr class="even">
-<td class="convin"><tt>struct</tt>/<tt>union</tt></td><td class="convop">boxed reference</td><td class="convout">reference cdata</td></tr>
-</table>
-<p>
-Bitfields are treated like their underlying type.
-</p>
-<p>
-Reference types are dereferenced <em>before</em> a conversion can take
-place &mdash; the conversion is applied to the C&nbsp;type pointed to
-by the reference.
-</p>
-
-<h3 id="convert_fromlua">Conversions from Lua objects to C&nbsp;types</h3>
-<p>
-These conversion rules apply for <em>write accesses</em> to
-C&nbsp;types: indexing pointers, arrays or
-<tt>struct</tt>/<tt>union</tt> types; initializing cdata objects;
-casts to C&nbsp;types; writing to external variables; passing
-arguments to C&nbsp;calls:
-</p>
-<table class="convtable">
-<tr class="convhead">
-<td class="convin">Input</td>
-<td class="convop">Conversion</td>
-<td class="convout">Output</td>
-</tr>
-<tr class="odd separate">
-<td class="convin">number</td><td class="convop">&rarr;</td><td class="convout"><tt>double</tt></td></tr>
-<tr class="even">
-<td class="convin">boolean</td><td class="convop"><tt>false</tt> &rarr; 0, <tt>true</tt> &rarr; 1</td><td class="convout"><tt>bool</tt></td></tr>
-<tr class="odd separate">
-<td class="convin">nil</td><td class="convop"><tt>NULL</tt> &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="even">
-<td class="convin">lightuserdata</td><td class="convop">lightuserdata address &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="odd">
-<td class="convin">userdata</td><td class="convop">userdata payload &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="even">
-<td class="convin">io.* file</td><td class="convop">get FILE * handle &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="odd separate">
-<td class="convin">string</td><td class="convop">match against <tt>enum</tt> constant</td><td class="convout"><tt>enum</tt></td></tr>
-<tr class="even">
-<td class="convin">string</td><td class="convop">copy string data + zero-byte</td><td class="convout"><tt>int8_t[]</tt>, <tt>uint8_t[]</tt></td></tr>
-<tr class="odd">
-<td class="convin">string</td><td class="convop">string data &rarr;</td><td class="convout"><tt>const char[]</tt></td></tr>
-<tr class="even separate">
-<td class="convin">function</td><td class="convop"><a href="#callback">create callback</a> &rarr;</td><td class="convout">C function type</td></tr>
-<tr class="odd separate">
-<td class="convin">table</td><td class="convop"><a href="#init_table">table initializer</a></td><td class="convout">Array</td></tr>
-<tr class="even">
-<td class="convin">table</td><td class="convop"><a href="#init_table">table initializer</a></td><td class="convout"><tt>struct</tt>/<tt>union</tt></td></tr>
-<tr class="odd separate">
-<td class="convin">cdata</td><td class="convop">cdata payload &rarr;</td><td class="convout">C type</td></tr>
-</table>
-<p>
-If the result type of this conversion doesn't match the
-C&nbsp;type of the destination, the
-<a href="#convert_between">conversion rules between C&nbsp;types</a>
-are applied.
-</p>
-<p>
-Reference types are immutable after initialization ("no re-seating of
-references"). For initialization purposes or when passing values to
-reference parameters, they are treated like pointers. Note that unlike
-in C++, there's no way to implement automatic reference generation of
-variables under the Lua language semantics. If you want to call a
-function with a reference parameter, you need to explicitly pass a
-one-element array.
-</p>
-
-<h3 id="convert_between">Conversions between C&nbsp;types</h3>
-<p>
-These conversion rules are more or less the same as the standard
-C&nbsp;conversion rules. Some rules only apply to casts, or require
-pointer or type compatibility:
-</p>
-<table class="convtable">
-<tr class="convhead">
-<td class="convin">Input</td>
-<td class="convop">Conversion</td>
-<td class="convout">Output</td>
-</tr>
-<tr class="odd separate">
-<td class="convin">Signed integer</td><td class="convop">&rarr;<sup>narrow or sign-extend</sup></td><td class="convout">Integer</td></tr>
-<tr class="even">
-<td class="convin">Unsigned integer</td><td class="convop">&rarr;<sup>narrow or zero-extend</sup></td><td class="convout">Integer</td></tr>
-<tr class="odd">
-<td class="convin">Integer</td><td class="convop">&rarr;<sup>round</sup></td><td class="convout"><tt>double</tt>, <tt>float</tt></td></tr>
-<tr class="even">
-<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr;<sup>trunc</sup> <tt>int32_t</tt> &rarr;<sup>narrow</sup></td><td class="convout"><tt>(u)int8_t</tt>, <tt>(u)int16_t</tt></td></tr>
-<tr class="odd">
-<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr;<sup>trunc</sup></td><td class="convout"><tt>(u)int32_t</tt>, <tt>(u)int64_t</tt></td></tr>
-<tr class="even">
-<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr;<sup>round</sup></td><td class="convout"><tt>float</tt>, <tt>double</tt></td></tr>
-<tr class="odd separate">
-<td class="convin">Number</td><td class="convop">n == 0 &rarr; 0, otherwise 1</td><td class="convout"><tt>bool</tt></td></tr>
-<tr class="even">
-<td class="convin"><tt>bool</tt></td><td class="convop"><tt>false</tt> &rarr; 0, <tt>true</tt> &rarr; 1</td><td class="convout">Number</td></tr>
-<tr class="odd separate">
-<td class="convin">Complex number</td><td class="convop">convert real part</td><td class="convout">Number</td></tr>
-<tr class="even">
-<td class="convin">Number</td><td class="convop">convert real part, imag = 0</td><td class="convout">Complex number</td></tr>
-<tr class="odd">
-<td class="convin">Complex number</td><td class="convop">convert real and imag part</td><td class="convout">Complex number</td></tr>
-<tr class="even separate">
-<td class="convin">Number</td><td class="convop">convert scalar and replicate</td><td class="convout">Vector</td></tr>
-<tr class="odd">
-<td class="convin">Vector</td><td class="convop">copy (same size)</td><td class="convout">Vector</td></tr>
-<tr class="even separate">
-<td class="convin"><tt>struct</tt>/<tt>union</tt></td><td class="convop">take base address (compat)</td><td class="convout">Pointer</td></tr>
-<tr class="odd">
-<td class="convin">Array</td><td class="convop">take base address (compat)</td><td class="convout">Pointer</td></tr>
-<tr class="even">
-<td class="convin">Function</td><td class="convop">take function address</td><td class="convout">Function pointer</td></tr>
-<tr class="odd separate">
-<td class="convin">Number</td><td class="convop">convert via <tt>uintptr_t</tt> (cast)</td><td class="convout">Pointer</td></tr>
-<tr class="even">
-<td class="convin">Pointer</td><td class="convop">convert address (compat/cast)</td><td class="convout">Pointer</td></tr>
-<tr class="odd">
-<td class="convin">Pointer</td><td class="convop">convert address (cast)</td><td class="convout">Integer</td></tr>
-<tr class="even">
-<td class="convin">Array</td><td class="convop">convert base address (cast)</td><td class="convout">Integer</td></tr>
-<tr class="odd separate">
-<td class="convin">Array</td><td class="convop">copy (compat)</td><td class="convout">Array</td></tr>
-<tr class="even">
-<td class="convin"><tt>struct</tt>/<tt>union</tt></td><td class="convop">copy (identical type)</td><td class="convout"><tt>struct</tt>/<tt>union</tt></td></tr>
-</table>
-<p>
-Bitfields or <tt>enum</tt> types are treated like their underlying
-type.
-</p>
-<p>
-Conversions not listed above will raise an error. E.g. it's not
-possible to convert a pointer to a complex number or vice versa.
-</p>
-
-<h3 id="convert_vararg">Conversions for vararg C&nbsp;function arguments</h3>
-<p>
-The following default conversion rules apply when passing Lua objects
-to the variable argument part of vararg C&nbsp;functions:
-</p>
-<table class="convtable">
-<tr class="convhead">
-<td class="convin">Input</td>
-<td class="convop">Conversion</td>
-<td class="convout">Output</td>
-</tr>
-<tr class="odd separate">
-<td class="convin">number</td><td class="convop">&rarr;</td><td class="convout"><tt>double</tt></td></tr>
-<tr class="even">
-<td class="convin">boolean</td><td class="convop"><tt>false</tt> &rarr; 0, <tt>true</tt> &rarr; 1</td><td class="convout"><tt>bool</tt></td></tr>
-<tr class="odd separate">
-<td class="convin">nil</td><td class="convop"><tt>NULL</tt> &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="even">
-<td class="convin">userdata</td><td class="convop">userdata payload &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="odd">
-<td class="convin">lightuserdata</td><td class="convop">lightuserdata address &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
-<tr class="even separate">
-<td class="convin">string</td><td class="convop">string data &rarr;</td><td class="convout"><tt>const char *</tt></td></tr>
-<tr class="odd separate">
-<td class="convin"><tt>float</tt> cdata</td><td class="convop">&rarr;</td><td class="convout"><tt>double</tt></td></tr>
-<tr class="even">
-<td class="convin">Array cdata</td><td class="convop">take base address</td><td class="convout">Element pointer</td></tr>
-<tr class="odd">
-<td class="convin"><tt>struct</tt>/<tt>union</tt> cdata</td><td class="convop">take base address</td><td class="convout"><tt>struct</tt>/<tt>union</tt> pointer</td></tr>
-<tr class="even">
-<td class="convin">Function cdata</td><td class="convop">take function address</td><td class="convout">Function pointer</td></tr>
-<tr class="odd">
-<td class="convin">Any other cdata</td><td class="convop">no conversion</td><td class="convout">C type</td></tr>
-</table>
-<p>
-To pass a Lua object, other than a cdata object, as a specific type,
-you need to override the conversion rules: create a temporary cdata
-object with a constructor or a cast and initialize it with the value
-to pass:
-</p>
-<p>
-Assuming <tt>x</tt> is a Lua number, here's how to pass it as an
-integer to a vararg function:
-</p>
-<pre class="code">
-ffi.cdef[[
-int printf(const char *fmt, ...);
-]]
-ffi.C.printf("integer value: %d\n", ffi.new("int", x))
-</pre>
-<p>
-If you don't do this, the default Lua number &rarr; <tt>double</tt>
-conversion rule applies. A vararg C&nbsp;function expecting an integer
-will see a garbled or uninitialized value.
-</p>
-
-<h2 id="init">Initializers</h2>
-<p>
-Creating a cdata object with
-<a href="ext_ffi_api.html#ffi_new"><tt>ffi.new()</tt></a> or the
-equivalent constructor syntax always initializes its contents, too.
-Different rules apply, depending on the number of optional
-initializers and the C&nbsp;types involved:
-</p>
-<ul>
-<li>If no initializers are given, the object is filled with zero bytes.</li>
-
-<li>Scalar types (numbers and pointers) accept a single initializer.
-The Lua object is <a href="#convert_fromlua">converted to the scalar
-C&nbsp;type</a>.</li>
-
-<li>Valarrays (complex numbers and vectors) are treated like scalars
-when a single initializer is given. Otherwise they are treated like
-regular arrays.</li>
-
-<li>Aggregate types (arrays and structs) accept either a single cdata
-initializer of the same type (copy constructor), a single
-<a href="#init_table">table initializer</a>, or a flat list of
-initializers.</li>
-
-<li>The elements of an array are initialized, starting at index zero.
-If a single initializer is given for an array, it's repeated for all
-remaining elements. This doesn't happen if two or more initializers
-are given: all remaining uninitialized elements are filled with zero
-bytes.</li>
-
-<li>Byte arrays may also be initialized with a Lua string. This copies
-the whole string plus a terminating zero-byte. The copy stops early only
-if the array has a known, fixed size.</li>
-
-<li>The fields of a <tt>struct</tt> are initialized in the order of
-their declaration. Uninitialized fields are filled with zero
-bytes.</li>
-
-<li>Only the first field of a <tt>union</tt> can be initialized with a
-flat initializer.</li>
-
-<li>Elements or fields which are aggregates themselves are initialized
-with a <em>single</em> initializer, but this may be a table
-initializer or a compatible aggregate.</li>
-
-<li>Excess initializers cause an error.</li>
-
-</ul>
-
-<h2 id="init_table">Table Initializers</h2>
-<p>
-The following rules apply if a Lua table is used to initialize an
-Array or a <tt>struct</tt>/<tt>union</tt>:
-</p>
-<ul>
-
-<li>If the table index <tt>[0]</tt> is non-<tt>nil</tt>, then the
-table is assumed to be zero-based. Otherwise it's assumed to be
-one-based.</li>
-
-<li>Array elements, starting at index zero, are initialized one-by-one
-with the consecutive table elements, starting at either index
-<tt>[0]</tt> or <tt>[1]</tt>. This process stops at the first
-<tt>nil</tt> table element.</li>
-
-<li>If exactly one array element was initialized, it's repeated for
-all the remaining elements. Otherwise all remaining uninitialized
-elements are filled with zero bytes.</li>
-
-<li>The above logic only applies to arrays with a known fixed size.
-A VLA is only initialized with the element(s) given in the table.
-Depending on the use case, you may need to explicitly add a
-<tt>NULL</tt> or <tt>0</tt> terminator to a VLA.</li>
-
-<li>A <tt>struct</tt>/<tt>union</tt> can be initialized in the
-order of the declaration of its fields. Each field is initialized with
-consecutive table elements, starting at either index <tt>[0]</tt>
-or <tt>[1]</tt>. This process stops at the first <tt>nil</tt> table
-element.</li>
-
-<li>Otherwise, if neither index <tt>[0]</tt> nor <tt>[1]</tt> is present,
-a <tt>struct</tt>/<tt>union</tt> is initialized by looking up each field
-name (as a string key) in the table. Each non-<tt>nil</tt> value is
-used to initialize the corresponding field.</li>
-
-<li>Uninitialized fields of a <tt>struct</tt> are filled with zero
-bytes, except for the trailing VLA of a VLS.</li>
-
-<li>Initialization of a <tt>union</tt> stops after one field has been
-initialized. If no field has been initialized, the <tt>union</tt> is
-filled with zero bytes.</li>
-
-<li>Elements or fields which are aggregates themselves are initialized
-with a <em>single</em> initializer, but this may be a nested table
-initializer (or a compatible aggregate).</li>
-
-<li>Excess initializers for an array cause an error. Excess
-initializers for a <tt>struct</tt>/<tt>union</tt> are ignored.
-Unrelated table entries are ignored, too.</li>
-
-</ul>
-<p>
-Example:
-</p>
-<pre class="code">
-local ffi = require("ffi")
-
-ffi.cdef[[
-struct foo { int a, b; };
-union bar { int i; double d; };
-struct nested { int x; struct foo y; };
-]]
-
-ffi.new("int[3]", {}) --> 0, 0, 0
-ffi.new("int[3]", {1}) --> 1, 1, 1
-ffi.new("int[3]", {1,2}) --> 1, 2, 0
-ffi.new("int[3]", {1,2,3}) --> 1, 2, 3
-ffi.new("int[3]", {[0]=1}) --> 1, 1, 1
-ffi.new("int[3]", {[0]=1,2}) --> 1, 2, 0
-ffi.new("int[3]", {[0]=1,2,3}) --> 1, 2, 3
-ffi.new("int[3]", {[0]=1,2,3,4}) --> error: too many initializers
-
-ffi.new("struct foo", {}) --> a = 0, b = 0
-ffi.new("struct foo", {1}) --> a = 1, b = 0
-ffi.new("struct foo", {1,2}) --> a = 1, b = 2
-ffi.new("struct foo", {[0]=1,2}) --> a = 1, b = 2
-ffi.new("struct foo", {b=2}) --> a = 0, b = 2
-ffi.new("struct foo", {a=1,b=2,c=3}) --> a = 1, b = 2 'c' is ignored
-
-ffi.new("union bar", {}) --> i = 0, d = 0.0
-ffi.new("union bar", {1}) --> i = 1, d = ?
-ffi.new("union bar", {[0]=1,2}) --> i = 1, d = ? '2' is ignored
-ffi.new("union bar", {d=2}) --> i = ?, d = 2.0
-
-ffi.new("struct nested", {1,{2,3}}) --> x = 1, y.a = 2, y.b = 3
-ffi.new("struct nested", {x=1,y={2,3}}) --> x = 1, y.a = 2, y.b = 3
-</pre>
-
-<h2 id="cdata_ops">Operations on cdata Objects</h2>
-<p>
-All of the standard Lua operators can be applied to cdata objects or a
-mix of a cdata object and another Lua object. The following list shows
-the pre-defined operations.
-</p>
-<p>
-Reference types are dereferenced <em>before</em> performing each of
-the operations below &mdash; the operation is applied to the
-C&nbsp;type pointed to by the reference.
-</p>
-<p>
-The pre-defined operations are always tried first before deferring to a
-metamethod or index table (if any) for the corresponding ctype (except
-for <tt>__new</tt>). An error is raised if the metamethod lookup or
-index table lookup fails.
-</p>
-
-<h3 id="cdata_array">Indexing a cdata object</h3>
-<ul>
-
-<li><b>Indexing a pointer/array</b>: a cdata pointer/array can be
-indexed by a cdata number or a Lua number. The element address is
-computed as the base address plus the number value multiplied by the
-element size in bytes. A read access loads the element value and
-<a href="#convert_tolua">converts it to a Lua object</a>. A write
-access <a href="#convert_fromlua">converts a Lua object to the element
-type</a> and stores the converted value to the element. An error is
-raised if the element size is undefined or a write access to a
-constant element is attempted.</li>
-
-<li><b>Dereferencing a <tt>struct</tt>/<tt>union</tt> field</b>: a
-cdata <tt>struct</tt>/<tt>union</tt> or a pointer to a
-<tt>struct</tt>/<tt>union</tt> can be dereferenced by a string key,
-giving the field name. The field address is computed as the base
-address plus the relative offset of the field. A read access loads the
-field value and <a href="#convert_tolua">converts it to a Lua
-object</a>. A write access <a href="#convert_fromlua">converts a Lua
-object to the field type</a> and stores the converted value to the
-field. An error is raised if a write access to a constant
-<tt>struct</tt>/<tt>union</tt> or a constant field is attempted.
-Scoped enum constants or static constants are treated like a constant
-field.</li>
-
-<li><b>Indexing a complex number</b>: a complex number can be indexed
-either by a cdata number or a Lua number with the values 0 or 1, or by
-the strings <tt>"re"</tt> or <tt>"im"</tt>. A read access loads the
-real part (<tt>[0]</tt>, <tt>.re</tt>) or the imaginary part
-(<tt>[1]</tt>, <tt>.im</tt>) part of a complex number and
-<a href="#convert_tolua">converts it to a Lua number</a>. The
-sub-parts of a complex number are immutable &mdash; assigning to an
-index of a complex number raises an error. Accessing out-of-bound
-indexes returns unspecified results, but is guaranteed not to trigger
-memory access violations.</li>
-
-<li><b>Indexing a vector</b>: a vector is treated like an array for
-indexing purposes, except the vector elements are immutable &mdash;
-assigning to an index of a vector raises an error.</li>
-
-</ul>
-<p>
-A ctype object can be indexed with a string key, too. The only
-pre-defined operation is reading scoped constants of
-<tt>struct</tt>/<tt>union</tt> types. All other accesses defer
-to the corresponding metamethods or index tables (if any).
-</p>
-<p>
-Note: since there's (deliberately) no address-of operator, a cdata
-object holding a value type is effectively immutable after
-initialization. The JIT compiler benefits from this fact when applying
-certain optimizations.
-</p>
-<p>
-As a consequence, the <em>elements</em> of complex numbers and
-vectors are immutable. But the elements of an aggregate holding these
-types <em>may</em> be modified of course. I.e. you cannot assign to
-<tt>foo.c.im</tt>, but you can assign a (newly created) complex number
-to <tt>foo.c</tt>.
-</p>
-<p>
-The JIT compiler implements strict aliasing rules: accesses to different
-types do <b>not</b> alias, except for differences in signedness (this
-applies even to <tt>char</tt> pointers, unlike C99). Type punning
-through unions is explicitly detected and allowed.
-</p>
-
-<h3 id="cdata_call">Calling a cdata object</h3>
-<ul>
-
-<li><b>Constructor</b>: a ctype object can be called and used as a
-<a href="ext_ffi_api.html#ffi_new">constructor</a>. This is equivalent
-to <tt>ffi.new(ct, ...)</tt>, unless a <tt>__new</tt> metamethod is
-defined. The <tt>__new</tt> metamethod is called with the ctype object
-plus any other arguments passed to the contructor. Note that you have to
-use <tt>ffi.new</tt> inside of it, since calling <tt>ct(...)</tt> would
-cause infinite recursion.</li>
-
-<li><b>C&nbsp;function call</b>: a cdata function or cdata function
-pointer can be called. The passed arguments are
-<a href="#convert_fromlua">converted to the C&nbsp;types</a> of the
-parameters given by the function declaration. Arguments passed to the
-variable argument part of vararg C&nbsp;function use
-<a href="#convert_vararg">special conversion rules</a>. This
-C&nbsp;function is called and the return value (if any) is
-<a href="#convert_tolua">converted to a Lua object</a>.<br>
-On Windows/x86 systems, <tt>__stdcall</tt> functions are automatically
-detected and a function declared as <tt>__cdecl</tt> (the default) is
-silently fixed up after the first call.</li>
-
-</ul>
-
-<h3 id="cdata_arith">Arithmetic on cdata objects</h3>
-<ul>
-
-<li><b>Pointer arithmetic</b>: a cdata pointer/array and a cdata
-number or a Lua number can be added or subtracted. The number must be
-on the right hand side for a subtraction. The result is a pointer of
-the same type with an address plus or minus the number value
-multiplied by the element size in bytes. An error is raised if the
-element size is undefined.</li>
-
-<li><b>Pointer difference</b>: two compatible cdata pointers/arrays
-can be subtracted. The result is the difference between their
-addresses, divided by the element size in bytes. An error is raised if
-the element size is undefined or zero.</li>
-
-<li><b>64&nbsp;bit integer arithmetic</b>: the standard arithmetic
-operators (<tt>+&nbsp;-&nbsp;*&nbsp;/&nbsp;%&nbsp;^</tt> and unary
-minus) can be applied to two cdata numbers, or a cdata number and a
-Lua number. If one of them is an <tt>uint64_t</tt>, the other side is
-converted to an <tt>uint64_t</tt> and an unsigned arithmetic operation
-is performed. Otherwise both sides are converted to an
-<tt>int64_t</tt> and a signed arithmetic operation is performed. The
-result is a boxed 64&nbsp;bit cdata object.<br>
-
-If one of the operands is an <tt>enum</tt> and the other operand is a
-string, the string is converted to the value of a matching <tt>enum</tt>
-constant before the above conversion.<br>
-
-These rules ensure that 64&nbsp;bit integers are "sticky". Any
-expression involving at least one 64&nbsp;bit integer operand results
-in another one. The undefined cases for the division, modulo and power
-operators return <tt>2LL&nbsp;^&nbsp;63</tt> or
-<tt>2ULL&nbsp;^&nbsp;63</tt>.<br>
-
-You'll have to explicitly convert a 64&nbsp;bit integer to a Lua
-number (e.g. for regular floating-point calculations) with
-<tt>tonumber()</tt>. But note this may incur a precision loss.</li>
-
-</ul>
-
-<h3 id="cdata_comp">Comparisons of cdata objects</h3>
-<ul>
-
-<li><b>Pointer comparison</b>: two compatible cdata pointers/arrays
-can be compared. The result is the same as an unsigned comparison of
-their addresses. <tt>nil</tt> is treated like a <tt>NULL</tt> pointer,
-which is compatible with any other pointer type.</li>
-
-<li><b>64&nbsp;bit integer comparison</b>: two cdata numbers, or a
-cdata number and a Lua number can be compared with each other. If one
-of them is an <tt>uint64_t</tt>, the other side is converted to an
-<tt>uint64_t</tt> and an unsigned comparison is performed. Otherwise
-both sides are converted to an <tt>int64_t</tt> and a signed
-comparison is performed.<br>
-
-If one of the operands is an <tt>enum</tt> and the other operand is a
-string, the string is converted to the value of a matching <tt>enum</tt>
-constant before the above conversion.<br>
-
-<li><b>Comparisons for equality/inequality</b> never raise an error.
-Even incompatible pointers can be compared for equality by address. Any
-other incompatible comparison (also with non-cdata objects) treats the
-two sides as unequal.</li>
-
-</ul>
-
-<h3 id="cdata_key">cdata objects as table keys</h3>
-<p>
-Lua tables may be indexed by cdata objects, but this doesn't provide
-any useful semantics &mdash; <b>cdata objects are unsuitable as table
-keys!</b>
-</p>
-<p>
-A cdata object is treated like any other garbage-collected object and
-is hashed and compared by its address for table indexing. Since
-there's no interning for cdata value types, the same value may be
-boxed in different cdata objects with different addresses. Thus
-<tt>t[1LL+1LL]</tt> and <tt>t[2LL]</tt> usually <b>do not</b> point to
-the same hash slot and they certainly <b>do not</b> point to the same
-hash slot as <tt>t[2]</tt>.
-</p>
-<p>
-It would seriously drive up implementation complexity and slow down
-the common case, if one were to add extra handling for by-value
-hashing and comparisons to Lua tables. Given the ubiquity of their use
-inside the VM, this is not acceptable.
-</p>
-<p>
-There are three viable alternatives, if you really need to use cdata
-objects as keys:
-</p>
-<ul>
-
-<li>If you can get by with the precision of Lua numbers
-(52&nbsp;bits), then use <tt>tonumber()</tt> on a cdata number or
-combine multiple fields of a cdata aggregate to a Lua number. Then use
-the resulting Lua number as a key when indexing tables.<br>
-One obvious benefit: <tt>t[tonumber(2LL)]</tt> <b>does</b> point to
-the same slot as <tt>t[2]</tt>.</li>
-
-<li>Otherwise use either <tt>tostring()</tt> on 64&nbsp;bit integers
-or complex numbers or combine multiple fields of a cdata aggregate to
-a Lua string (e.g. with
-<a href="ext_ffi_api.html#ffi_string"><tt>ffi.string()</tt></a>). Then
-use the resulting Lua string as a key when indexing tables.</li>
-
-<li>Create your own specialized hash table implementation using the
-C&nbsp;types provided by the FFI library, just like you would in
-C&nbsp;code. Ultimately this may give much better performance than the
-other alternatives or what a generic by-value hash table could
-possibly provide.</li>
-
-</ul>
-
-<h2 id="param">Parameterized Types</h2>
-<p>
-To facilitate some abstractions, the two functions
-<a href="ext_ffi_api.html#ffi_typeof"><tt>ffi.typeof</tt></a> and
-<a href="ext_ffi_api.html#ffi_cdef"><tt>ffi.cdef</tt></a> support
-parameterized types in C&nbsp;declarations. Note: none of the other API
-functions taking a cdecl allow this.
-</p>
-<p>
-Any place you can write a <b><tt>typedef</tt> name</b>, an
-<b>identifier</b> or a <b>number</b> in a declaration, you can write
-<tt>$</tt> (the dollar sign) instead. These placeholders are replaced in
-order of appearance with the arguments following the cdecl string:
-</p>
-<pre class="code">
--- Declare a struct with a parameterized field type and name:
-ffi.cdef([[
-typedef struct { $ $; } foo_t;
-]], type1, name1)
-
--- Anonymous struct with dynamic names:
-local bar_t = ffi.typeof("struct { int $, $; }", name1, name2)
--- Derived pointer type:
-local bar_ptr_t = ffi.typeof("$ *", bar_t)
-
--- Parameterized dimensions work even where a VLA won't work:
-local matrix_t = ffi.typeof("uint8_t[$][$]", width, height)
-</pre>
-<p>
-Caveat: this is <em>not</em> simple text substitution! A passed ctype or
-cdata object is treated like the underlying type, a passed string is
-considered an identifier and a number is considered a number. You must
-not mix this up: e.g. passing <tt>"int"</tt> as a string doesn't work in
-place of a type, you'd need to use <tt>ffi.typeof("int")</tt> instead.
-</p>
-<p>
-The main use for parameterized types are libraries implementing abstract
-data types
-(<a href="http://www.freelists.org/post/luajit/ffi-type-of-pointer-to,8"><span class="ext">&raquo;</span>&nbsp;example</a>),
-similar to what can be achieved with C++ template metaprogramming.
-Another use case are derived types of anonymous structs, which avoids
-pollution of the global struct namespace.
-</p>
-<p>
-Please note that parameterized types are a nice tool and indispensable
-for certain use cases. But you'll want to use them sparingly in regular
-code, e.g. when all types are actually fixed.
-</p>
-
-<h2 id="gc">Garbage Collection of cdata Objects</h2>
-<p>
-All explicitly (<tt>ffi.new()</tt>, <tt>ffi.cast()</tt> etc.) or
-implicitly (accessors) created cdata objects are garbage collected.
-You need to ensure to retain valid references to cdata objects
-somewhere on a Lua stack, an upvalue or in a Lua table while they are
-still in use. Once the last reference to a cdata object is gone, the
-garbage collector will automatically free the memory used by it (at
-the end of the next GC cycle).
-</p>
-<p>
-Please note that pointers themselves are cdata objects, however they
-are <b>not</b> followed by the garbage collector. So e.g. if you
-assign a cdata array to a pointer, you must keep the cdata object
-holding the array alive as long as the pointer is still in use:
-</p>
-<pre class="code">
-ffi.cdef[[
-typedef struct { int *a; } foo_t;
-]]
-
-local s = ffi.new("foo_t", ffi.new("int[10]")) -- <span style="color:#c00000;">WRONG!</span>
-
-local a = ffi.new("int[10]") -- <span style="color:#00a000;">OK</span>
-local s = ffi.new("foo_t", a)
--- Now do something with 's', but keep 'a' alive until you're done.
-</pre>
-<p>
-Similar rules apply for Lua strings which are implicitly converted to
-<tt>"const&nbsp;char&nbsp;*"</tt>: the string object itself must be
-referenced somewhere or it'll be garbage collected eventually. The
-pointer will then point to stale data, which may have already been
-overwritten. Note that <em>string literals</em> are automatically kept
-alive as long as the function containing it (actually its prototype)
-is not garbage collected.
-</p>
-<p>
-Objects which are passed as an argument to an external C&nbsp;function
-are kept alive until the call returns. So it's generally safe to
-create temporary cdata objects in argument lists. This is a common
-idiom for <a href="#convert_vararg">passing specific C&nbsp;types to
-vararg functions</a>.
-</p>
-<p>
-Memory areas returned by C functions (e.g. from <tt>malloc()</tt>)
-must be manually managed, of course (or use
-<a href="ext_ffi_api.html#ffi_gc"><tt>ffi.gc()</tt></a>). Pointers to
-cdata objects are indistinguishable from pointers returned by C
-functions (which is one of the reasons why the GC cannot follow them).
-</p>
-
-<h2 id="callback">Callbacks</h2>
-<p>
-The LuaJIT FFI automatically generates special callback functions
-whenever a Lua function is converted to a C&nbsp;function pointer. This
-associates the generated callback function pointer with the C&nbsp;type
-of the function pointer and the Lua function object (closure).
-</p>
-<p>
-This can happen implicitly due to the usual conversions, e.g. when
-passing a Lua function to a function pointer argument. Or you can use
-<tt>ffi.cast()</tt> to explicitly cast a Lua function to a
-C&nbsp;function pointer.
-</p>
-<p>
-Currently only certain C&nbsp;function types can be used as callback
-functions. Neither C&nbsp;vararg functions nor functions with
-pass-by-value aggregate argument or result types are supported. There
-are no restrictions for the kind of Lua functions that can be called
-from the callback &mdash; no checks for the proper number of arguments
-are made. The return value of the Lua function will be converted to the
-result type and an error will be thrown for invalid conversions.
-</p>
-<p>
-It's allowed to throw errors across a callback invocation, but it's not
-advisable in general. Do this only if you know the C&nbsp;function, that
-called the callback, copes with the forced stack unwinding and doesn't
-leak resources.
-</p>
-<p>
-One thing that's not allowed, is to let an FFI call into a C&nbsp;function
-get JIT-compiled, which in turn calls a callback, calling into Lua again.
-Usually this attempt is caught by the interpreter first and the
-C&nbsp;function is blacklisted for compilation.
-</p>
-<p>
-However, this heuristic may fail under specific circumstances: e.g. a
-message polling function might not run Lua callbacks right away and the call
-gets JIT-compiled. If it later happens to call back into Lua (e.g. a rarely
-invoked error callback), you'll get a VM PANIC with the message
-<tt>"bad callback"</tt>. Then you'll need to manually turn off
-JIT-compilation with
-<a href="ext_jit.html#jit_onoff_func"><tt>jit.off()</tt></a> for the
-surrounding Lua function that invokes such a message polling function (or
-similar).
-</p>
-
-<h3 id="callback_resources">Callback resource handling</h3>
-<p>
-Callbacks take up resources &mdash; you can only have a limited number
-of them at the same time (500&nbsp;-&nbsp;1000, depending on the
-architecture). The associated Lua functions are anchored to prevent
-garbage collection, too.
-</p>
-<p>
-<b>Callbacks due to implicit conversions are permanent!</b> There is no
-way to guess their lifetime, since the C&nbsp;side might store the
-function pointer for later use (typical for GUI toolkits). The associated
-resources cannot be reclaimed until termination:
-</p>
-<pre class="code">
-ffi.cdef[[
-typedef int (__stdcall *WNDENUMPROC)(void *hwnd, intptr_t l);
-int EnumWindows(WNDENUMPROC func, intptr_t l);
-]]
-
--- Implicit conversion to a callback via function pointer argument.
-local count = 0
-ffi.C.EnumWindows(function(hwnd, l)
- count = count + 1
- return true
-end, 0)
--- The callback is permanent and its resources cannot be reclaimed!
--- Ok, so this may not be a problem, if you do this only once.
-</pre>
-<p>
-Note: this example shows that you <em>must</em> properly declare
-<tt>__stdcall</tt> callbacks on Windows/x86 systems. The calling
-convention cannot be automatically detected, unlike for
-<tt>__stdcall</tt> calls <em>to</em> Windows functions.
-</p>
-<p>
-For some use cases it's necessary to free up the resources or to
-dynamically redirect callbacks. Use an explicit cast to a
-C&nbsp;function pointer and keep the resulting cdata object. Then use
-the <a href="ext_ffi_api.html#callback_free"><tt>cb:free()</tt></a>
-or <a href="ext_ffi_api.html#callback_set"><tt>cb:set()</tt></a> methods
-on the cdata object:
-</p>
-<pre class="code">
--- Explicitly convert to a callback via cast.
-local count = 0
-local cb = ffi.cast("WNDENUMPROC", function(hwnd, l)
- count = count + 1
- return true
-end)
-
--- Pass it to a C function.
-ffi.C.EnumWindows(cb, 0)
--- EnumWindows doesn't need the callback after it returns, so free it.
-
-cb:free()
--- The callback function pointer is no longer valid and its resources
--- will be reclaimed. The created Lua closure will be garbage collected.
-</pre>
-
-<h3 id="callback_performance">Callback performance</h3>
-<p>
-<b>Callbacks are slow!</b> First, the C&nbsp;to Lua transition itself
-has an unavoidable cost, similar to a <tt>lua_call()</tt> or
-<tt>lua_pcall()</tt>. Argument and result marshalling add to that cost.
-And finally, neither the C&nbsp;compiler nor LuaJIT can inline or
-optimize across the language barrier and hoist repeated computations out
-of a callback function.
-</p>
-<p>
-Do not use callbacks for performance-sensitive work: e.g. consider a
-numerical integration routine which takes a user-defined function to
-integrate over. It's a bad idea to call a user-defined Lua function from
-C&nbsp;code millions of times. The callback overhead will be absolutely
-detrimental for performance.
-</p>
-<p>
-It's considerably faster to write the numerical integration routine
-itself in Lua &mdash; the JIT compiler will be able to inline the
-user-defined function and optimize it together with its calling context,
-with very competitive performance.
-</p>
-<p>
-As a general guideline: <b>use callbacks only when you must</b>, because
-of existing C&nbsp;APIs. E.g. callback performance is irrelevant for a
-GUI application, which waits for user input most of the time, anyway.
-</p>
-<p>
-For new designs <b>avoid push-style APIs</b>: a C&nbsp;function repeatedly
-calling a callback for each result. Instead <b>use pull-style APIs</b>:
-call a C&nbsp;function repeatedly to get a new result. Calls from Lua
-to C via the FFI are much faster than the other way round. Most well-designed
-libraries already use pull-style APIs (read/write, get/put).
-</p>
-
-<h2 id="clib">C Library Namespaces</h2>
-<p>
-A C&nbsp;library namespace is a special kind of object which allows
-access to the symbols contained in shared libraries or the default
-symbol namespace. The default
-<a href="ext_ffi_api.html#ffi_C"><tt>ffi.C</tt></a> namespace is
-automatically created when the FFI library is loaded. C&nbsp;library
-namespaces for specific shared libraries may be created with the
-<a href="ext_ffi_api.html#ffi_load"><tt>ffi.load()</tt></a> API
-function.
-</p>
-<p>
-Indexing a C&nbsp;library namespace object with a symbol name (a Lua
-string) automatically binds it to the library. First the symbol type
-is resolved &mdash; it must have been declared with
-<a href="ext_ffi_api.html#ffi_cdef"><tt>ffi.cdef</tt></a>. Then the
-symbol address is resolved by searching for the symbol name in the
-associated shared libraries or the default symbol namespace. Finally,
-the resulting binding between the symbol name, the symbol type and its
-address is cached. Missing symbol declarations or nonexistent symbol
-names cause an error.
-</p>
-<p>
-This is what happens on a <b>read access</b> for the different kinds of
-symbols:
-</p>
-<ul>
-
-<li>External functions: a cdata object with the type of the function
-and its address is returned.</li>
-
-<li>External variables: the symbol address is dereferenced and the
-loaded value is <a href="#convert_tolua">converted to a Lua object</a>
-and returned.</li>
-
-<li>Constant values (<tt>static&nbsp;const</tt> or <tt>enum</tt>
-constants): the constant is <a href="#convert_tolua">converted to a
-Lua object</a> and returned.</li>
-
-</ul>
-<p>
-This is what happens on a <b>write access</b>:
-</p>
-<ul>
-
-<li>External variables: the value to be written is
-<a href="#convert_fromlua">converted to the C&nbsp;type</a> of the
-variable and then stored at the symbol address.</li>
-
-<li>Writing to constant variables or to any other symbol type causes
-an error, like any other attempted write to a constant location.</li>
-
-</ul>
-<p>
-C&nbsp;library namespaces themselves are garbage collected objects. If
-the last reference to the namespace object is gone, the garbage
-collector will eventually release the shared library reference and
-remove all memory associated with the namespace. Since this may
-trigger the removal of the shared library from the memory of the
-running process, it's generally <em>not safe</em> to use function
-cdata objects obtained from a library if the namespace object may be
-unreferenced.
-</p>
-<p>
-Performance notice: the JIT compiler specializes to the identity of
-namespace objects and to the strings used to index it. This
-effectively turns function cdata objects into constants. It's not
-useful and actually counter-productive to explicitly cache these
-function objects, e.g. <tt>local strlen = ffi.C.strlen</tt>. OTOH it
-<em>is</em> useful to cache the namespace itself, e.g. <tt>local C =
-ffi.C</tt>.
-</p>
-
-<h2 id="policy">No Hand-holding!</h2>
-<p>
-The FFI library has been designed as <b>a low-level library</b>. The
-goal is to interface with C&nbsp;code and C&nbsp;data types with a
-minimum of overhead. This means <b>you can do anything you can do
-from&nbsp;C</b>: access all memory, overwrite anything in memory, call
-machine code at any memory address and so on.
-</p>
-<p>
-The FFI library provides <b>no memory safety</b>, unlike regular Lua
-code. It will happily allow you to dereference a <tt>NULL</tt>
-pointer, to access arrays out of bounds or to misdeclare
-C&nbsp;functions. If you make a mistake, your application might crash,
-just like equivalent C&nbsp;code would.
-</p>
-<p>
-This behavior is inevitable, since the goal is to provide full
-interoperability with C&nbsp;code. Adding extra safety measures, like
-bounds checks, would be futile. There's no way to detect
-misdeclarations of C&nbsp;functions, since shared libraries only
-provide symbol names, but no type information. Likewise there's no way
-to infer the valid range of indexes for a returned pointer.
-</p>
-<p>
-Again: the FFI library is a low-level library. This implies it needs
-to be used with care, but it's flexibility and performance often
-outweigh this concern. If you're a C or C++ developer, it'll be easy
-to apply your existing knowledge. OTOH writing code for the FFI
-library is not for the faint of heart and probably shouldn't be the
-first exercise for someone with little experience in Lua, C or C++.
-</p>
-<p>
-As a corollary of the above, the FFI library is <b>not safe for use by
-untrusted Lua code</b>. If you're sandboxing untrusted Lua code, you
-definitely don't want to give this code access to the FFI library or
-to <em>any</em> cdata object (except 64&nbsp;bit integers or complex
-numbers). Any properly engineered Lua sandbox needs to provide safety
-wrappers for many of the standard Lua library functions &mdash;
-similar wrappers need to be written for high-level operations on FFI
-data types, too.
-</p>
-
-<h2 id="status">Current Status</h2>
-<p>
-The initial release of the FFI library has some limitations and is
-missing some features. Most of these will be fixed in future releases.
-</p>
-<p>
-<a href="#clang">C language support</a> is
-currently incomplete:
-</p>
-<ul>
-<li>C&nbsp;declarations are not passed through a C&nbsp;pre-processor,
-yet.</li>
-<li>The C&nbsp;parser is able to evaluate most constant expressions
-commonly found in C&nbsp;header files. However it doesn't handle the
-full range of C&nbsp;expression semantics and may fail for some
-obscure constructs.</li>
-<li><tt>static const</tt> declarations only work for integer types
-up to 32&nbsp;bits. Neither declaring string constants nor
-floating-point constants is supported.</li>
-<li>Packed <tt>struct</tt> bitfields that cross container boundaries
-are not implemented.</li>
-<li>Native vector types may be defined with the GCC <tt>mode</tt> or
-<tt>vector_size</tt> attribute. But no operations other than loading,
-storing and initializing them are supported, yet.</li>
-<li>The <tt>volatile</tt> type qualifier is currently ignored by
-compiled code.</li>
-<li><a href="ext_ffi_api.html#ffi_cdef"><tt>ffi.cdef</tt></a> silently
-ignores most re-declarations. Note: avoid re-declarations which do not
-conform to C99. The implementation will eventually be changed to
-perform strict checks.</li>
-</ul>
-<p>
-The JIT compiler already handles a large subset of all FFI operations.
-It automatically falls back to the interpreter for unimplemented
-operations (you can check for this with the
-<a href="running.html#opt_j"><tt>-jv</tt></a> command line option).
-The following operations are currently not compiled and may exhibit
-suboptimal performance, especially when used in inner loops:
-</p>
-<ul>
-<li>Bitfield accesses and initializations.</li>
-<li>Vector operations.</li>
-<li>Table initializers.</li>
-<li>Initialization of nested <tt>struct</tt>/<tt>union</tt> types.</li>
-<li>Allocations of variable-length arrays or structs.</li>
-<li>Allocations of C&nbsp;types with a size &gt; 128&nbsp;bytes or an
-alignment &gt; 8&nbsp;bytes.</li>
-<li>Conversions from lightuserdata to <tt>void&nbsp;*</tt>.</li>
-<li>Pointer differences for element sizes that are not a power of
-two.</li>
-<li>Calls to C&nbsp;functions with aggregates passed or returned by
-value.</li>
-<li>Calls to ctype metamethods which are not plain functions.</li>
-<li>ctype <tt>__newindex</tt> tables and non-string lookups in ctype
-<tt>__index</tt> tables.</li>
-<li><tt>tostring()</tt> for cdata types.</li>
-<li>Calls to <tt>ffi.cdef()</tt>, <tt>ffi.load()</tt> and
-<tt>ffi.metatype()</tt>.</li>
-</ul>
-<p>
-Other missing features:
-</p>
-<ul>
-<li>Bit operations for 64&nbsp;bit types.</li>
-<li>Arithmetic for <tt>complex</tt> numbers.</li>
-<li>Passing structs by value to vararg C&nbsp;functions.</li>
-<li><a href="extensions.html#exceptions">C++ exception interoperability</a>
-does not extend to C&nbsp;functions called via the FFI, if the call is
-compiled.</li>
-</ul>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>FFI Semantics</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+table.convtable { line-height: 1.2; }
+tr.convhead td { font-weight: bold; }
+td.convop { font-style: italic; width: 40%; }
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>FFI Semantics</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a class="current" href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+This page describes the detailed semantics underlying the FFI library
+and its interaction with both Lua and C&nbsp;code.
+</p>
+<p>
+Given that the FFI library is designed to interface with C&nbsp;code
+and that declarations can be written in plain C&nbsp;syntax, <b>it
+closely follows the C&nbsp;language semantics</b>, wherever possible.
+Some minor concessions are needed for smoother interoperation with Lua
+language semantics.
+</p>
+<p>
+Please don't be overwhelmed by the contents of this page &mdash; this
+is a reference and you may need to consult it, if in doubt. It doesn't
+hurt to skim this page, but most of the semantics "just work" as you'd
+expect them to work. It should be straightforward to write
+applications using the LuaJIT FFI for developers with a C or C++
+background.
+</p>
+
+<h2 id="clang">C Language Support</h2>
+<p>
+The FFI library has a built-in C&nbsp;parser with a minimal memory
+footprint. It's used by the <a href="ext_ffi_api.html">ffi.* library
+functions</a> to declare C&nbsp;types or external symbols.
+</p>
+<p>
+It's only purpose is to parse C&nbsp;declarations, as found e.g. in
+C&nbsp;header files. Although it does evaluate constant expressions,
+it's <em>not</em> a C&nbsp;compiler. The body of <tt>inline</tt>
+C&nbsp;function definitions is simply ignored.
+</p>
+<p>
+Also, this is <em>not</em> a validating C&nbsp;parser. It expects and
+accepts correctly formed C&nbsp;declarations, but it may choose to
+ignore bad declarations or show rather generic error messages. If in
+doubt, please check the input against your favorite C&nbsp;compiler.
+</p>
+<p>
+The C&nbsp;parser complies to the <b>C99 language standard</b> plus
+the following extensions:
+</p>
+<ul>
+
+<li>The <tt>'\e'</tt> escape in character and string literals.</li>
+
+<li>The C99/C++ boolean type, declared with the keywords <tt>bool</tt>
+or <tt>_Bool</tt>.</li>
+
+<li>Complex numbers, declared with the keywords <tt>complex</tt> or
+<tt>_Complex</tt>.</li>
+
+<li>Two complex number types: <tt>complex</tt> (aka
+<tt>complex&nbsp;double</tt>) and <tt>complex&nbsp;float</tt>.</li>
+
+<li>Vector types, declared with the GCC <tt>mode</tt> or
+<tt>vector_size</tt> attribute.</li>
+
+<li>Unnamed ('transparent') <tt>struct</tt>/<tt>union</tt> fields
+inside a <tt>struct</tt>/<tt>union</tt>.</li>
+
+<li>Incomplete <tt>enum</tt> declarations, handled like incomplete
+<tt>struct</tt> declarations.</li>
+
+<li>Unnamed <tt>enum</tt> fields inside a
+<tt>struct</tt>/<tt>union</tt>. This is similar to a scoped C++
+<tt>enum</tt>, except that declared constants are visible in the
+global namespace, too.</li>
+
+<li>Scoped <tt>static&nbsp;const</tt> declarations inside a
+<tt>struct</tt>/<tt>union</tt> (from C++).</li>
+
+<li>Zero-length arrays (<tt>[0]</tt>), empty
+<tt>struct</tt>/<tt>union</tt>, variable-length arrays (VLA,
+<tt>[?]</tt>) and variable-length structs (VLS, with a trailing
+VLA).</li>
+
+<li>C++ reference types (<tt>int&nbsp;&amp;x</tt>).</li>
+
+<li>Alternate GCC keywords with '<tt>__</tt>', e.g.
+<tt>__const__</tt>.</li>
+
+<li>GCC <tt>__attribute__</tt> with the following attributes:
+<tt>aligned</tt>, <tt>packed</tt>, <tt>mode</tt>,
+<tt>vector_size</tt>, <tt>cdecl</tt>, <tt>fastcall</tt>,
+<tt>stdcall</tt>, <tt>thiscall</tt>.</li>
+
+<li>The GCC <tt>__extension__</tt> keyword and the GCC
+<tt>__alignof__</tt> operator.</li>
+
+<li>GCC <tt>__asm__("symname")</tt> symbol name redirection for
+function declarations.</li>
+
+<li>MSVC keywords for fixed-length types: <tt>__int8</tt>,
+<tt>__int16</tt>, <tt>__int32</tt> and <tt>__int64</tt>.</li>
+
+<li>MSVC <tt>__cdecl</tt>, <tt>__fastcall</tt>, <tt>__stdcall</tt>,
+<tt>__thiscall</tt>, <tt>__ptr32</tt>, <tt>__ptr64</tt>,
+<tt>__declspec(align(n))</tt> and <tt>#pragma&nbsp;pack</tt>.</li>
+
+<li>All other GCC/MSVC-specific attributes are ignored.</li>
+
+</ul>
+<p>
+The following C&nbsp;types are pre-defined by the C&nbsp;parser (like
+a <tt>typedef</tt>, except re-declarations will be ignored):
+</p>
+<ul>
+
+<li>Vararg handling: <tt>va_list</tt>, <tt>__builtin_va_list</tt>,
+<tt>__gnuc_va_list</tt>.</li>
+
+<li>From <tt>&lt;stddef.h&gt;</tt>: <tt>ptrdiff_t</tt>,
+<tt>size_t</tt>, <tt>wchar_t</tt>.</li>
+
+<li>From <tt>&lt;stdint.h&gt;</tt>: <tt>int8_t</tt>, <tt>int16_t</tt>,
+<tt>int32_t</tt>, <tt>int64_t</tt>, <tt>uint8_t</tt>,
+<tt>uint16_t</tt>, <tt>uint32_t</tt>, <tt>uint64_t</tt>,
+<tt>intptr_t</tt>, <tt>uintptr_t</tt>.</li>
+
+</ul>
+<p>
+You're encouraged to use these types in preference to
+compiler-specific extensions or target-dependent standard types.
+E.g. <tt>char</tt> differs in signedness and <tt>long</tt> differs in
+size, depending on the target architecture and platform ABI.
+</p>
+<p>
+The following C&nbsp;features are <b>not</b> supported:
+</p>
+<ul>
+
+<li>A declaration must always have a type specifier; it doesn't
+default to an <tt>int</tt> type.</li>
+
+<li>Old-style empty function declarations (K&amp;R) are not allowed.
+All C&nbsp;functions must have a proper prototype declaration. A
+function declared without parameters (<tt>int&nbsp;foo();</tt>) is
+treated as a function taking zero arguments, like in C++.</li>
+
+<li>The <tt>long double</tt> C&nbsp;type is parsed correctly, but
+there's no support for the related conversions, accesses or arithmetic
+operations.</li>
+
+<li>Wide character strings and character literals are not
+supported.</li>
+
+<li><a href="#status">See below</a> for features that are currently
+not implemented.</li>
+
+</ul>
+
+<h2 id="convert">C Type Conversion Rules</h2>
+
+<h3 id="convert_tolua">Conversions from C&nbsp;types to Lua objects</h3>
+<p>
+These conversion rules apply for <em>read accesses</em> to
+C&nbsp;types: indexing pointers, arrays or
+<tt>struct</tt>/<tt>union</tt> types; reading external variables or
+constant values; retrieving return values from C&nbsp;calls:
+</p>
+<table class="convtable">
+<tr class="convhead">
+<td class="convin">Input</td>
+<td class="convop">Conversion</td>
+<td class="convout">Output</td>
+</tr>
+<tr class="odd separate">
+<td class="convin"><tt>int8_t</tt>, <tt>int16_t</tt></td><td class="convop">&rarr;<sup>sign-ext</sup> <tt>int32_t</tt> &rarr; <tt>double</tt></td><td class="convout">number</td></tr>
+<tr class="even">
+<td class="convin"><tt>uint8_t</tt>, <tt>uint16_t</tt></td><td class="convop">&rarr;<sup>zero-ext</sup> <tt>int32_t</tt> &rarr; <tt>double</tt></td><td class="convout">number</td></tr>
+<tr class="odd">
+<td class="convin"><tt>int32_t</tt>, <tt>uint32_t</tt></td><td class="convop">&rarr; <tt>double</tt></td><td class="convout">number</td></tr>
+<tr class="even">
+<td class="convin"><tt>int64_t</tt>, <tt>uint64_t</tt></td><td class="convop">boxed value</td><td class="convout">64 bit int cdata</td></tr>
+<tr class="odd separate">
+<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr; <tt>double</tt></td><td class="convout">number</td></tr>
+<tr class="even separate">
+<td class="convin"><tt>bool</tt></td><td class="convop">0 &rarr; <tt>false</tt>, otherwise <tt>true</tt></td><td class="convout">boolean</td></tr>
+<tr class="odd separate">
+<td class="convin"><tt>enum</tt></td><td class="convop">boxed value</td><td class="convout">enum cdata</td></tr>
+<tr class="even">
+<td class="convin">Complex number</td><td class="convop">boxed value</td><td class="convout">complex cdata</td></tr>
+<tr class="odd">
+<td class="convin">Vector</td><td class="convop">boxed value</td><td class="convout">vector cdata</td></tr>
+<tr class="even">
+<td class="convin">Pointer</td><td class="convop">boxed value</td><td class="convout">pointer cdata</td></tr>
+<tr class="odd separate">
+<td class="convin">Array</td><td class="convop">boxed reference</td><td class="convout">reference cdata</td></tr>
+<tr class="even">
+<td class="convin"><tt>struct</tt>/<tt>union</tt></td><td class="convop">boxed reference</td><td class="convout">reference cdata</td></tr>
+</table>
+<p>
+Bitfields are treated like their underlying type.
+</p>
+<p>
+Reference types are dereferenced <em>before</em> a conversion can take
+place &mdash; the conversion is applied to the C&nbsp;type pointed to
+by the reference.
+</p>
+
+<h3 id="convert_fromlua">Conversions from Lua objects to C&nbsp;types</h3>
+<p>
+These conversion rules apply for <em>write accesses</em> to
+C&nbsp;types: indexing pointers, arrays or
+<tt>struct</tt>/<tt>union</tt> types; initializing cdata objects;
+casts to C&nbsp;types; writing to external variables; passing
+arguments to C&nbsp;calls:
+</p>
+<table class="convtable">
+<tr class="convhead">
+<td class="convin">Input</td>
+<td class="convop">Conversion</td>
+<td class="convout">Output</td>
+</tr>
+<tr class="odd separate">
+<td class="convin">number</td><td class="convop">&rarr;</td><td class="convout"><tt>double</tt></td></tr>
+<tr class="even">
+<td class="convin">boolean</td><td class="convop"><tt>false</tt> &rarr; 0, <tt>true</tt> &rarr; 1</td><td class="convout"><tt>bool</tt></td></tr>
+<tr class="odd separate">
+<td class="convin">nil</td><td class="convop"><tt>NULL</tt> &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="even">
+<td class="convin">lightuserdata</td><td class="convop">lightuserdata address &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="odd">
+<td class="convin">userdata</td><td class="convop">userdata payload &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="even">
+<td class="convin">io.* file</td><td class="convop">get FILE * handle &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="odd separate">
+<td class="convin">string</td><td class="convop">match against <tt>enum</tt> constant</td><td class="convout"><tt>enum</tt></td></tr>
+<tr class="even">
+<td class="convin">string</td><td class="convop">copy string data + zero-byte</td><td class="convout"><tt>int8_t[]</tt>, <tt>uint8_t[]</tt></td></tr>
+<tr class="odd">
+<td class="convin">string</td><td class="convop">string data &rarr;</td><td class="convout"><tt>const char[]</tt></td></tr>
+<tr class="even separate">
+<td class="convin">function</td><td class="convop"><a href="#callback">create callback</a> &rarr;</td><td class="convout">C function type</td></tr>
+<tr class="odd separate">
+<td class="convin">table</td><td class="convop"><a href="#init_table">table initializer</a></td><td class="convout">Array</td></tr>
+<tr class="even">
+<td class="convin">table</td><td class="convop"><a href="#init_table">table initializer</a></td><td class="convout"><tt>struct</tt>/<tt>union</tt></td></tr>
+<tr class="odd separate">
+<td class="convin">cdata</td><td class="convop">cdata payload &rarr;</td><td class="convout">C type</td></tr>
+</table>
+<p>
+If the result type of this conversion doesn't match the
+C&nbsp;type of the destination, the
+<a href="#convert_between">conversion rules between C&nbsp;types</a>
+are applied.
+</p>
+<p>
+Reference types are immutable after initialization ("no re-seating of
+references"). For initialization purposes or when passing values to
+reference parameters, they are treated like pointers. Note that unlike
+in C++, there's no way to implement automatic reference generation of
+variables under the Lua language semantics. If you want to call a
+function with a reference parameter, you need to explicitly pass a
+one-element array.
+</p>
+
+<h3 id="convert_between">Conversions between C&nbsp;types</h3>
+<p>
+These conversion rules are more or less the same as the standard
+C&nbsp;conversion rules. Some rules only apply to casts, or require
+pointer or type compatibility:
+</p>
+<table class="convtable">
+<tr class="convhead">
+<td class="convin">Input</td>
+<td class="convop">Conversion</td>
+<td class="convout">Output</td>
+</tr>
+<tr class="odd separate">
+<td class="convin">Signed integer</td><td class="convop">&rarr;<sup>narrow or sign-extend</sup></td><td class="convout">Integer</td></tr>
+<tr class="even">
+<td class="convin">Unsigned integer</td><td class="convop">&rarr;<sup>narrow or zero-extend</sup></td><td class="convout">Integer</td></tr>
+<tr class="odd">
+<td class="convin">Integer</td><td class="convop">&rarr;<sup>round</sup></td><td class="convout"><tt>double</tt>, <tt>float</tt></td></tr>
+<tr class="even">
+<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr;<sup>trunc</sup> <tt>int32_t</tt> &rarr;<sup>narrow</sup></td><td class="convout"><tt>(u)int8_t</tt>, <tt>(u)int16_t</tt></td></tr>
+<tr class="odd">
+<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr;<sup>trunc</sup></td><td class="convout"><tt>(u)int32_t</tt>, <tt>(u)int64_t</tt></td></tr>
+<tr class="even">
+<td class="convin"><tt>double</tt>, <tt>float</tt></td><td class="convop">&rarr;<sup>round</sup></td><td class="convout"><tt>float</tt>, <tt>double</tt></td></tr>
+<tr class="odd separate">
+<td class="convin">Number</td><td class="convop">n == 0 &rarr; 0, otherwise 1</td><td class="convout"><tt>bool</tt></td></tr>
+<tr class="even">
+<td class="convin"><tt>bool</tt></td><td class="convop"><tt>false</tt> &rarr; 0, <tt>true</tt> &rarr; 1</td><td class="convout">Number</td></tr>
+<tr class="odd separate">
+<td class="convin">Complex number</td><td class="convop">convert real part</td><td class="convout">Number</td></tr>
+<tr class="even">
+<td class="convin">Number</td><td class="convop">convert real part, imag = 0</td><td class="convout">Complex number</td></tr>
+<tr class="odd">
+<td class="convin">Complex number</td><td class="convop">convert real and imag part</td><td class="convout">Complex number</td></tr>
+<tr class="even separate">
+<td class="convin">Number</td><td class="convop">convert scalar and replicate</td><td class="convout">Vector</td></tr>
+<tr class="odd">
+<td class="convin">Vector</td><td class="convop">copy (same size)</td><td class="convout">Vector</td></tr>
+<tr class="even separate">
+<td class="convin"><tt>struct</tt>/<tt>union</tt></td><td class="convop">take base address (compat)</td><td class="convout">Pointer</td></tr>
+<tr class="odd">
+<td class="convin">Array</td><td class="convop">take base address (compat)</td><td class="convout">Pointer</td></tr>
+<tr class="even">
+<td class="convin">Function</td><td class="convop">take function address</td><td class="convout">Function pointer</td></tr>
+<tr class="odd separate">
+<td class="convin">Number</td><td class="convop">convert via <tt>uintptr_t</tt> (cast)</td><td class="convout">Pointer</td></tr>
+<tr class="even">
+<td class="convin">Pointer</td><td class="convop">convert address (compat/cast)</td><td class="convout">Pointer</td></tr>
+<tr class="odd">
+<td class="convin">Pointer</td><td class="convop">convert address (cast)</td><td class="convout">Integer</td></tr>
+<tr class="even">
+<td class="convin">Array</td><td class="convop">convert base address (cast)</td><td class="convout">Integer</td></tr>
+<tr class="odd separate">
+<td class="convin">Array</td><td class="convop">copy (compat)</td><td class="convout">Array</td></tr>
+<tr class="even">
+<td class="convin"><tt>struct</tt>/<tt>union</tt></td><td class="convop">copy (identical type)</td><td class="convout"><tt>struct</tt>/<tt>union</tt></td></tr>
+</table>
+<p>
+Bitfields or <tt>enum</tt> types are treated like their underlying
+type.
+</p>
+<p>
+Conversions not listed above will raise an error. E.g. it's not
+possible to convert a pointer to a complex number or vice versa.
+</p>
+
+<h3 id="convert_vararg">Conversions for vararg C&nbsp;function arguments</h3>
+<p>
+The following default conversion rules apply when passing Lua objects
+to the variable argument part of vararg C&nbsp;functions:
+</p>
+<table class="convtable">
+<tr class="convhead">
+<td class="convin">Input</td>
+<td class="convop">Conversion</td>
+<td class="convout">Output</td>
+</tr>
+<tr class="odd separate">
+<td class="convin">number</td><td class="convop">&rarr;</td><td class="convout"><tt>double</tt></td></tr>
+<tr class="even">
+<td class="convin">boolean</td><td class="convop"><tt>false</tt> &rarr; 0, <tt>true</tt> &rarr; 1</td><td class="convout"><tt>bool</tt></td></tr>
+<tr class="odd separate">
+<td class="convin">nil</td><td class="convop"><tt>NULL</tt> &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="even">
+<td class="convin">userdata</td><td class="convop">userdata payload &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="odd">
+<td class="convin">lightuserdata</td><td class="convop">lightuserdata address &rarr;</td><td class="convout"><tt>(void *)</tt></td></tr>
+<tr class="even separate">
+<td class="convin">string</td><td class="convop">string data &rarr;</td><td class="convout"><tt>const char *</tt></td></tr>
+<tr class="odd separate">
+<td class="convin"><tt>float</tt> cdata</td><td class="convop">&rarr;</td><td class="convout"><tt>double</tt></td></tr>
+<tr class="even">
+<td class="convin">Array cdata</td><td class="convop">take base address</td><td class="convout">Element pointer</td></tr>
+<tr class="odd">
+<td class="convin"><tt>struct</tt>/<tt>union</tt> cdata</td><td class="convop">take base address</td><td class="convout"><tt>struct</tt>/<tt>union</tt> pointer</td></tr>
+<tr class="even">
+<td class="convin">Function cdata</td><td class="convop">take function address</td><td class="convout">Function pointer</td></tr>
+<tr class="odd">
+<td class="convin">Any other cdata</td><td class="convop">no conversion</td><td class="convout">C type</td></tr>
+</table>
+<p>
+To pass a Lua object, other than a cdata object, as a specific type,
+you need to override the conversion rules: create a temporary cdata
+object with a constructor or a cast and initialize it with the value
+to pass:
+</p>
+<p>
+Assuming <tt>x</tt> is a Lua number, here's how to pass it as an
+integer to a vararg function:
+</p>
+<pre class="code">
+ffi.cdef[[
+int printf(const char *fmt, ...);
+]]
+ffi.C.printf("integer value: %d\n", ffi.new("int", x))
+</pre>
+<p>
+If you don't do this, the default Lua number &rarr; <tt>double</tt>
+conversion rule applies. A vararg C&nbsp;function expecting an integer
+will see a garbled or uninitialized value.
+</p>
+
+<h2 id="init">Initializers</h2>
+<p>
+Creating a cdata object with
+<a href="ext_ffi_api.html#ffi_new"><tt>ffi.new()</tt></a> or the
+equivalent constructor syntax always initializes its contents, too.
+Different rules apply, depending on the number of optional
+initializers and the C&nbsp;types involved:
+</p>
+<ul>
+<li>If no initializers are given, the object is filled with zero bytes.</li>
+
+<li>Scalar types (numbers and pointers) accept a single initializer.
+The Lua object is <a href="#convert_fromlua">converted to the scalar
+C&nbsp;type</a>.</li>
+
+<li>Valarrays (complex numbers and vectors) are treated like scalars
+when a single initializer is given. Otherwise they are treated like
+regular arrays.</li>
+
+<li>Aggregate types (arrays and structs) accept either a single cdata
+initializer of the same type (copy constructor), a single
+<a href="#init_table">table initializer</a>, or a flat list of
+initializers.</li>
+
+<li>The elements of an array are initialized, starting at index zero.
+If a single initializer is given for an array, it's repeated for all
+remaining elements. This doesn't happen if two or more initializers
+are given: all remaining uninitialized elements are filled with zero
+bytes.</li>
+
+<li>Byte arrays may also be initialized with a Lua string. This copies
+the whole string plus a terminating zero-byte. The copy stops early only
+if the array has a known, fixed size.</li>
+
+<li>The fields of a <tt>struct</tt> are initialized in the order of
+their declaration. Uninitialized fields are filled with zero
+bytes.</li>
+
+<li>Only the first field of a <tt>union</tt> can be initialized with a
+flat initializer.</li>
+
+<li>Elements or fields which are aggregates themselves are initialized
+with a <em>single</em> initializer, but this may be a table
+initializer or a compatible aggregate.</li>
+
+<li>Excess initializers cause an error.</li>
+
+</ul>
+
+<h2 id="init_table">Table Initializers</h2>
+<p>
+The following rules apply if a Lua table is used to initialize an
+Array or a <tt>struct</tt>/<tt>union</tt>:
+</p>
+<ul>
+
+<li>If the table index <tt>[0]</tt> is non-<tt>nil</tt>, then the
+table is assumed to be zero-based. Otherwise it's assumed to be
+one-based.</li>
+
+<li>Array elements, starting at index zero, are initialized one-by-one
+with the consecutive table elements, starting at either index
+<tt>[0]</tt> or <tt>[1]</tt>. This process stops at the first
+<tt>nil</tt> table element.</li>
+
+<li>If exactly one array element was initialized, it's repeated for
+all the remaining elements. Otherwise all remaining uninitialized
+elements are filled with zero bytes.</li>
+
+<li>The above logic only applies to arrays with a known fixed size.
+A VLA is only initialized with the element(s) given in the table.
+Depending on the use case, you may need to explicitly add a
+<tt>NULL</tt> or <tt>0</tt> terminator to a VLA.</li>
+
+<li>A <tt>struct</tt>/<tt>union</tt> can be initialized in the
+order of the declaration of its fields. Each field is initialized with
+consecutive table elements, starting at either index <tt>[0]</tt>
+or <tt>[1]</tt>. This process stops at the first <tt>nil</tt> table
+element.</li>
+
+<li>Otherwise, if neither index <tt>[0]</tt> nor <tt>[1]</tt> is present,
+a <tt>struct</tt>/<tt>union</tt> is initialized by looking up each field
+name (as a string key) in the table. Each non-<tt>nil</tt> value is
+used to initialize the corresponding field.</li>
+
+<li>Uninitialized fields of a <tt>struct</tt> are filled with zero
+bytes, except for the trailing VLA of a VLS.</li>
+
+<li>Initialization of a <tt>union</tt> stops after one field has been
+initialized. If no field has been initialized, the <tt>union</tt> is
+filled with zero bytes.</li>
+
+<li>Elements or fields which are aggregates themselves are initialized
+with a <em>single</em> initializer, but this may be a nested table
+initializer (or a compatible aggregate).</li>
+
+<li>Excess initializers for an array cause an error. Excess
+initializers for a <tt>struct</tt>/<tt>union</tt> are ignored.
+Unrelated table entries are ignored, too.</li>
+
+</ul>
+<p>
+Example:
+</p>
+<pre class="code">
+local ffi = require("ffi")
+
+ffi.cdef[[
+struct foo { int a, b; };
+union bar { int i; double d; };
+struct nested { int x; struct foo y; };
+]]
+
+ffi.new("int[3]", {}) --> 0, 0, 0
+ffi.new("int[3]", {1}) --> 1, 1, 1
+ffi.new("int[3]", {1,2}) --> 1, 2, 0
+ffi.new("int[3]", {1,2,3}) --> 1, 2, 3
+ffi.new("int[3]", {[0]=1}) --> 1, 1, 1
+ffi.new("int[3]", {[0]=1,2}) --> 1, 2, 0
+ffi.new("int[3]", {[0]=1,2,3}) --> 1, 2, 3
+ffi.new("int[3]", {[0]=1,2,3,4}) --> error: too many initializers
+
+ffi.new("struct foo", {}) --> a = 0, b = 0
+ffi.new("struct foo", {1}) --> a = 1, b = 0
+ffi.new("struct foo", {1,2}) --> a = 1, b = 2
+ffi.new("struct foo", {[0]=1,2}) --> a = 1, b = 2
+ffi.new("struct foo", {b=2}) --> a = 0, b = 2
+ffi.new("struct foo", {a=1,b=2,c=3}) --> a = 1, b = 2 'c' is ignored
+
+ffi.new("union bar", {}) --> i = 0, d = 0.0
+ffi.new("union bar", {1}) --> i = 1, d = ?
+ffi.new("union bar", {[0]=1,2}) --> i = 1, d = ? '2' is ignored
+ffi.new("union bar", {d=2}) --> i = ?, d = 2.0
+
+ffi.new("struct nested", {1,{2,3}}) --> x = 1, y.a = 2, y.b = 3
+ffi.new("struct nested", {x=1,y={2,3}}) --> x = 1, y.a = 2, y.b = 3
+</pre>
+
+<h2 id="cdata_ops">Operations on cdata Objects</h2>
+<p>
+All of the standard Lua operators can be applied to cdata objects or a
+mix of a cdata object and another Lua object. The following list shows
+the pre-defined operations.
+</p>
+<p>
+Reference types are dereferenced <em>before</em> performing each of
+the operations below &mdash; the operation is applied to the
+C&nbsp;type pointed to by the reference.
+</p>
+<p>
+The pre-defined operations are always tried first before deferring to a
+metamethod or index table (if any) for the corresponding ctype (except
+for <tt>__new</tt>). An error is raised if the metamethod lookup or
+index table lookup fails.
+</p>
+
+<h3 id="cdata_array">Indexing a cdata object</h3>
+<ul>
+
+<li><b>Indexing a pointer/array</b>: a cdata pointer/array can be
+indexed by a cdata number or a Lua number. The element address is
+computed as the base address plus the number value multiplied by the
+element size in bytes. A read access loads the element value and
+<a href="#convert_tolua">converts it to a Lua object</a>. A write
+access <a href="#convert_fromlua">converts a Lua object to the element
+type</a> and stores the converted value to the element. An error is
+raised if the element size is undefined or a write access to a
+constant element is attempted.</li>
+
+<li><b>Dereferencing a <tt>struct</tt>/<tt>union</tt> field</b>: a
+cdata <tt>struct</tt>/<tt>union</tt> or a pointer to a
+<tt>struct</tt>/<tt>union</tt> can be dereferenced by a string key,
+giving the field name. The field address is computed as the base
+address plus the relative offset of the field. A read access loads the
+field value and <a href="#convert_tolua">converts it to a Lua
+object</a>. A write access <a href="#convert_fromlua">converts a Lua
+object to the field type</a> and stores the converted value to the
+field. An error is raised if a write access to a constant
+<tt>struct</tt>/<tt>union</tt> or a constant field is attempted.
+Scoped enum constants or static constants are treated like a constant
+field.</li>
+
+<li><b>Indexing a complex number</b>: a complex number can be indexed
+either by a cdata number or a Lua number with the values 0 or 1, or by
+the strings <tt>"re"</tt> or <tt>"im"</tt>. A read access loads the
+real part (<tt>[0]</tt>, <tt>.re</tt>) or the imaginary part
+(<tt>[1]</tt>, <tt>.im</tt>) part of a complex number and
+<a href="#convert_tolua">converts it to a Lua number</a>. The
+sub-parts of a complex number are immutable &mdash; assigning to an
+index of a complex number raises an error. Accessing out-of-bound
+indexes returns unspecified results, but is guaranteed not to trigger
+memory access violations.</li>
+
+<li><b>Indexing a vector</b>: a vector is treated like an array for
+indexing purposes, except the vector elements are immutable &mdash;
+assigning to an index of a vector raises an error.</li>
+
+</ul>
+<p>
+A ctype object can be indexed with a string key, too. The only
+pre-defined operation is reading scoped constants of
+<tt>struct</tt>/<tt>union</tt> types. All other accesses defer
+to the corresponding metamethods or index tables (if any).
+</p>
+<p>
+Note: since there's (deliberately) no address-of operator, a cdata
+object holding a value type is effectively immutable after
+initialization. The JIT compiler benefits from this fact when applying
+certain optimizations.
+</p>
+<p>
+As a consequence, the <em>elements</em> of complex numbers and
+vectors are immutable. But the elements of an aggregate holding these
+types <em>may</em> be modified of course. I.e. you cannot assign to
+<tt>foo.c.im</tt>, but you can assign a (newly created) complex number
+to <tt>foo.c</tt>.
+</p>
+<p>
+The JIT compiler implements strict aliasing rules: accesses to different
+types do <b>not</b> alias, except for differences in signedness (this
+applies even to <tt>char</tt> pointers, unlike C99). Type punning
+through unions is explicitly detected and allowed.
+</p>
+
+<h3 id="cdata_call">Calling a cdata object</h3>
+<ul>
+
+<li><b>Constructor</b>: a ctype object can be called and used as a
+<a href="ext_ffi_api.html#ffi_new">constructor</a>. This is equivalent
+to <tt>ffi.new(ct, ...)</tt>, unless a <tt>__new</tt> metamethod is
+defined. The <tt>__new</tt> metamethod is called with the ctype object
+plus any other arguments passed to the contructor. Note that you have to
+use <tt>ffi.new</tt> inside of it, since calling <tt>ct(...)</tt> would
+cause infinite recursion.</li>
+
+<li><b>C&nbsp;function call</b>: a cdata function or cdata function
+pointer can be called. The passed arguments are
+<a href="#convert_fromlua">converted to the C&nbsp;types</a> of the
+parameters given by the function declaration. Arguments passed to the
+variable argument part of vararg C&nbsp;function use
+<a href="#convert_vararg">special conversion rules</a>. This
+C&nbsp;function is called and the return value (if any) is
+<a href="#convert_tolua">converted to a Lua object</a>.<br>
+On Windows/x86 systems, <tt>__stdcall</tt> functions are automatically
+detected and a function declared as <tt>__cdecl</tt> (the default) is
+silently fixed up after the first call.</li>
+
+</ul>
+
+<h3 id="cdata_arith">Arithmetic on cdata objects</h3>
+<ul>
+
+<li><b>Pointer arithmetic</b>: a cdata pointer/array and a cdata
+number or a Lua number can be added or subtracted. The number must be
+on the right hand side for a subtraction. The result is a pointer of
+the same type with an address plus or minus the number value
+multiplied by the element size in bytes. An error is raised if the
+element size is undefined.</li>
+
+<li><b>Pointer difference</b>: two compatible cdata pointers/arrays
+can be subtracted. The result is the difference between their
+addresses, divided by the element size in bytes. An error is raised if
+the element size is undefined or zero.</li>
+
+<li><b>64&nbsp;bit integer arithmetic</b>: the standard arithmetic
+operators (<tt>+&nbsp;-&nbsp;*&nbsp;/&nbsp;%&nbsp;^</tt> and unary
+minus) can be applied to two cdata numbers, or a cdata number and a
+Lua number. If one of them is an <tt>uint64_t</tt>, the other side is
+converted to an <tt>uint64_t</tt> and an unsigned arithmetic operation
+is performed. Otherwise both sides are converted to an
+<tt>int64_t</tt> and a signed arithmetic operation is performed. The
+result is a boxed 64&nbsp;bit cdata object.<br>
+
+If one of the operands is an <tt>enum</tt> and the other operand is a
+string, the string is converted to the value of a matching <tt>enum</tt>
+constant before the above conversion.<br>
+
+These rules ensure that 64&nbsp;bit integers are "sticky". Any
+expression involving at least one 64&nbsp;bit integer operand results
+in another one. The undefined cases for the division, modulo and power
+operators return <tt>2LL&nbsp;^&nbsp;63</tt> or
+<tt>2ULL&nbsp;^&nbsp;63</tt>.<br>
+
+You'll have to explicitly convert a 64&nbsp;bit integer to a Lua
+number (e.g. for regular floating-point calculations) with
+<tt>tonumber()</tt>. But note this may incur a precision loss.</li>
+
+</ul>
+
+<h3 id="cdata_comp">Comparisons of cdata objects</h3>
+<ul>
+
+<li><b>Pointer comparison</b>: two compatible cdata pointers/arrays
+can be compared. The result is the same as an unsigned comparison of
+their addresses. <tt>nil</tt> is treated like a <tt>NULL</tt> pointer,
+which is compatible with any other pointer type.</li>
+
+<li><b>64&nbsp;bit integer comparison</b>: two cdata numbers, or a
+cdata number and a Lua number can be compared with each other. If one
+of them is an <tt>uint64_t</tt>, the other side is converted to an
+<tt>uint64_t</tt> and an unsigned comparison is performed. Otherwise
+both sides are converted to an <tt>int64_t</tt> and a signed
+comparison is performed.<br>
+
+If one of the operands is an <tt>enum</tt> and the other operand is a
+string, the string is converted to the value of a matching <tt>enum</tt>
+constant before the above conversion.<br>
+
+<li><b>Comparisons for equality/inequality</b> never raise an error.
+Even incompatible pointers can be compared for equality by address. Any
+other incompatible comparison (also with non-cdata objects) treats the
+two sides as unequal.</li>
+
+</ul>
+
+<h3 id="cdata_key">cdata objects as table keys</h3>
+<p>
+Lua tables may be indexed by cdata objects, but this doesn't provide
+any useful semantics &mdash; <b>cdata objects are unsuitable as table
+keys!</b>
+</p>
+<p>
+A cdata object is treated like any other garbage-collected object and
+is hashed and compared by its address for table indexing. Since
+there's no interning for cdata value types, the same value may be
+boxed in different cdata objects with different addresses. Thus
+<tt>t[1LL+1LL]</tt> and <tt>t[2LL]</tt> usually <b>do not</b> point to
+the same hash slot and they certainly <b>do not</b> point to the same
+hash slot as <tt>t[2]</tt>.
+</p>
+<p>
+It would seriously drive up implementation complexity and slow down
+the common case, if one were to add extra handling for by-value
+hashing and comparisons to Lua tables. Given the ubiquity of their use
+inside the VM, this is not acceptable.
+</p>
+<p>
+There are three viable alternatives, if you really need to use cdata
+objects as keys:
+</p>
+<ul>
+
+<li>If you can get by with the precision of Lua numbers
+(52&nbsp;bits), then use <tt>tonumber()</tt> on a cdata number or
+combine multiple fields of a cdata aggregate to a Lua number. Then use
+the resulting Lua number as a key when indexing tables.<br>
+One obvious benefit: <tt>t[tonumber(2LL)]</tt> <b>does</b> point to
+the same slot as <tt>t[2]</tt>.</li>
+
+<li>Otherwise use either <tt>tostring()</tt> on 64&nbsp;bit integers
+or complex numbers or combine multiple fields of a cdata aggregate to
+a Lua string (e.g. with
+<a href="ext_ffi_api.html#ffi_string"><tt>ffi.string()</tt></a>). Then
+use the resulting Lua string as a key when indexing tables.</li>
+
+<li>Create your own specialized hash table implementation using the
+C&nbsp;types provided by the FFI library, just like you would in
+C&nbsp;code. Ultimately this may give much better performance than the
+other alternatives or what a generic by-value hash table could
+possibly provide.</li>
+
+</ul>
+
+<h2 id="param">Parameterized Types</h2>
+<p>
+To facilitate some abstractions, the two functions
+<a href="ext_ffi_api.html#ffi_typeof"><tt>ffi.typeof</tt></a> and
+<a href="ext_ffi_api.html#ffi_cdef"><tt>ffi.cdef</tt></a> support
+parameterized types in C&nbsp;declarations. Note: none of the other API
+functions taking a cdecl allow this.
+</p>
+<p>
+Any place you can write a <b><tt>typedef</tt> name</b>, an
+<b>identifier</b> or a <b>number</b> in a declaration, you can write
+<tt>$</tt> (the dollar sign) instead. These placeholders are replaced in
+order of appearance with the arguments following the cdecl string:
+</p>
+<pre class="code">
+-- Declare a struct with a parameterized field type and name:
+ffi.cdef([[
+typedef struct { $ $; } foo_t;
+]], type1, name1)
+
+-- Anonymous struct with dynamic names:
+local bar_t = ffi.typeof("struct { int $, $; }", name1, name2)
+-- Derived pointer type:
+local bar_ptr_t = ffi.typeof("$ *", bar_t)
+
+-- Parameterized dimensions work even where a VLA won't work:
+local matrix_t = ffi.typeof("uint8_t[$][$]", width, height)
+</pre>
+<p>
+Caveat: this is <em>not</em> simple text substitution! A passed ctype or
+cdata object is treated like the underlying type, a passed string is
+considered an identifier and a number is considered a number. You must
+not mix this up: e.g. passing <tt>"int"</tt> as a string doesn't work in
+place of a type, you'd need to use <tt>ffi.typeof("int")</tt> instead.
+</p>
+<p>
+The main use for parameterized types are libraries implementing abstract
+data types
+(<a href="http://www.freelists.org/post/luajit/ffi-type-of-pointer-to,8"><span class="ext">&raquo;</span>&nbsp;example</a>),
+similar to what can be achieved with C++ template metaprogramming.
+Another use case are derived types of anonymous structs, which avoids
+pollution of the global struct namespace.
+</p>
+<p>
+Please note that parameterized types are a nice tool and indispensable
+for certain use cases. But you'll want to use them sparingly in regular
+code, e.g. when all types are actually fixed.
+</p>
+
+<h2 id="gc">Garbage Collection of cdata Objects</h2>
+<p>
+All explicitly (<tt>ffi.new()</tt>, <tt>ffi.cast()</tt> etc.) or
+implicitly (accessors) created cdata objects are garbage collected.
+You need to ensure to retain valid references to cdata objects
+somewhere on a Lua stack, an upvalue or in a Lua table while they are
+still in use. Once the last reference to a cdata object is gone, the
+garbage collector will automatically free the memory used by it (at
+the end of the next GC cycle).
+</p>
+<p>
+Please note that pointers themselves are cdata objects, however they
+are <b>not</b> followed by the garbage collector. So e.g. if you
+assign a cdata array to a pointer, you must keep the cdata object
+holding the array alive as long as the pointer is still in use:
+</p>
+<pre class="code">
+ffi.cdef[[
+typedef struct { int *a; } foo_t;
+]]
+
+local s = ffi.new("foo_t", ffi.new("int[10]")) -- <span style="color:#c00000;">WRONG!</span>
+
+local a = ffi.new("int[10]") -- <span style="color:#00a000;">OK</span>
+local s = ffi.new("foo_t", a)
+-- Now do something with 's', but keep 'a' alive until you're done.
+</pre>
+<p>
+Similar rules apply for Lua strings which are implicitly converted to
+<tt>"const&nbsp;char&nbsp;*"</tt>: the string object itself must be
+referenced somewhere or it'll be garbage collected eventually. The
+pointer will then point to stale data, which may have already been
+overwritten. Note that <em>string literals</em> are automatically kept
+alive as long as the function containing it (actually its prototype)
+is not garbage collected.
+</p>
+<p>
+Objects which are passed as an argument to an external C&nbsp;function
+are kept alive until the call returns. So it's generally safe to
+create temporary cdata objects in argument lists. This is a common
+idiom for <a href="#convert_vararg">passing specific C&nbsp;types to
+vararg functions</a>.
+</p>
+<p>
+Memory areas returned by C functions (e.g. from <tt>malloc()</tt>)
+must be manually managed, of course (or use
+<a href="ext_ffi_api.html#ffi_gc"><tt>ffi.gc()</tt></a>). Pointers to
+cdata objects are indistinguishable from pointers returned by C
+functions (which is one of the reasons why the GC cannot follow them).
+</p>
+
+<h2 id="callback">Callbacks</h2>
+<p>
+The LuaJIT FFI automatically generates special callback functions
+whenever a Lua function is converted to a C&nbsp;function pointer. This
+associates the generated callback function pointer with the C&nbsp;type
+of the function pointer and the Lua function object (closure).
+</p>
+<p>
+This can happen implicitly due to the usual conversions, e.g. when
+passing a Lua function to a function pointer argument. Or you can use
+<tt>ffi.cast()</tt> to explicitly cast a Lua function to a
+C&nbsp;function pointer.
+</p>
+<p>
+Currently only certain C&nbsp;function types can be used as callback
+functions. Neither C&nbsp;vararg functions nor functions with
+pass-by-value aggregate argument or result types are supported. There
+are no restrictions for the kind of Lua functions that can be called
+from the callback &mdash; no checks for the proper number of arguments
+are made. The return value of the Lua function will be converted to the
+result type and an error will be thrown for invalid conversions.
+</p>
+<p>
+It's allowed to throw errors across a callback invocation, but it's not
+advisable in general. Do this only if you know the C&nbsp;function, that
+called the callback, copes with the forced stack unwinding and doesn't
+leak resources.
+</p>
+<p>
+One thing that's not allowed, is to let an FFI call into a C&nbsp;function
+get JIT-compiled, which in turn calls a callback, calling into Lua again.
+Usually this attempt is caught by the interpreter first and the
+C&nbsp;function is blacklisted for compilation.
+</p>
+<p>
+However, this heuristic may fail under specific circumstances: e.g. a
+message polling function might not run Lua callbacks right away and the call
+gets JIT-compiled. If it later happens to call back into Lua (e.g. a rarely
+invoked error callback), you'll get a VM PANIC with the message
+<tt>"bad callback"</tt>. Then you'll need to manually turn off
+JIT-compilation with
+<a href="ext_jit.html#jit_onoff_func"><tt>jit.off()</tt></a> for the
+surrounding Lua function that invokes such a message polling function (or
+similar).
+</p>
+
+<h3 id="callback_resources">Callback resource handling</h3>
+<p>
+Callbacks take up resources &mdash; you can only have a limited number
+of them at the same time (500&nbsp;-&nbsp;1000, depending on the
+architecture). The associated Lua functions are anchored to prevent
+garbage collection, too.
+</p>
+<p>
+<b>Callbacks due to implicit conversions are permanent!</b> There is no
+way to guess their lifetime, since the C&nbsp;side might store the
+function pointer for later use (typical for GUI toolkits). The associated
+resources cannot be reclaimed until termination:
+</p>
+<pre class="code">
+ffi.cdef[[
+typedef int (__stdcall *WNDENUMPROC)(void *hwnd, intptr_t l);
+int EnumWindows(WNDENUMPROC func, intptr_t l);
+]]
+
+-- Implicit conversion to a callback via function pointer argument.
+local count = 0
+ffi.C.EnumWindows(function(hwnd, l)
+ count = count + 1
+ return true
+end, 0)
+-- The callback is permanent and its resources cannot be reclaimed!
+-- Ok, so this may not be a problem, if you do this only once.
+</pre>
+<p>
+Note: this example shows that you <em>must</em> properly declare
+<tt>__stdcall</tt> callbacks on Windows/x86 systems. The calling
+convention cannot be automatically detected, unlike for
+<tt>__stdcall</tt> calls <em>to</em> Windows functions.
+</p>
+<p>
+For some use cases it's necessary to free up the resources or to
+dynamically redirect callbacks. Use an explicit cast to a
+C&nbsp;function pointer and keep the resulting cdata object. Then use
+the <a href="ext_ffi_api.html#callback_free"><tt>cb:free()</tt></a>
+or <a href="ext_ffi_api.html#callback_set"><tt>cb:set()</tt></a> methods
+on the cdata object:
+</p>
+<pre class="code">
+-- Explicitly convert to a callback via cast.
+local count = 0
+local cb = ffi.cast("WNDENUMPROC", function(hwnd, l)
+ count = count + 1
+ return true
+end)
+
+-- Pass it to a C function.
+ffi.C.EnumWindows(cb, 0)
+-- EnumWindows doesn't need the callback after it returns, so free it.
+
+cb:free()
+-- The callback function pointer is no longer valid and its resources
+-- will be reclaimed. The created Lua closure will be garbage collected.
+</pre>
+
+<h3 id="callback_performance">Callback performance</h3>
+<p>
+<b>Callbacks are slow!</b> First, the C&nbsp;to Lua transition itself
+has an unavoidable cost, similar to a <tt>lua_call()</tt> or
+<tt>lua_pcall()</tt>. Argument and result marshalling add to that cost.
+And finally, neither the C&nbsp;compiler nor LuaJIT can inline or
+optimize across the language barrier and hoist repeated computations out
+of a callback function.
+</p>
+<p>
+Do not use callbacks for performance-sensitive work: e.g. consider a
+numerical integration routine which takes a user-defined function to
+integrate over. It's a bad idea to call a user-defined Lua function from
+C&nbsp;code millions of times. The callback overhead will be absolutely
+detrimental for performance.
+</p>
+<p>
+It's considerably faster to write the numerical integration routine
+itself in Lua &mdash; the JIT compiler will be able to inline the
+user-defined function and optimize it together with its calling context,
+with very competitive performance.
+</p>
+<p>
+As a general guideline: <b>use callbacks only when you must</b>, because
+of existing C&nbsp;APIs. E.g. callback performance is irrelevant for a
+GUI application, which waits for user input most of the time, anyway.
+</p>
+<p>
+For new designs <b>avoid push-style APIs</b>: a C&nbsp;function repeatedly
+calling a callback for each result. Instead <b>use pull-style APIs</b>:
+call a C&nbsp;function repeatedly to get a new result. Calls from Lua
+to C via the FFI are much faster than the other way round. Most well-designed
+libraries already use pull-style APIs (read/write, get/put).
+</p>
+
+<h2 id="clib">C Library Namespaces</h2>
+<p>
+A C&nbsp;library namespace is a special kind of object which allows
+access to the symbols contained in shared libraries or the default
+symbol namespace. The default
+<a href="ext_ffi_api.html#ffi_C"><tt>ffi.C</tt></a> namespace is
+automatically created when the FFI library is loaded. C&nbsp;library
+namespaces for specific shared libraries may be created with the
+<a href="ext_ffi_api.html#ffi_load"><tt>ffi.load()</tt></a> API
+function.
+</p>
+<p>
+Indexing a C&nbsp;library namespace object with a symbol name (a Lua
+string) automatically binds it to the library. First the symbol type
+is resolved &mdash; it must have been declared with
+<a href="ext_ffi_api.html#ffi_cdef"><tt>ffi.cdef</tt></a>. Then the
+symbol address is resolved by searching for the symbol name in the
+associated shared libraries or the default symbol namespace. Finally,
+the resulting binding between the symbol name, the symbol type and its
+address is cached. Missing symbol declarations or nonexistent symbol
+names cause an error.
+</p>
+<p>
+This is what happens on a <b>read access</b> for the different kinds of
+symbols:
+</p>
+<ul>
+
+<li>External functions: a cdata object with the type of the function
+and its address is returned.</li>
+
+<li>External variables: the symbol address is dereferenced and the
+loaded value is <a href="#convert_tolua">converted to a Lua object</a>
+and returned.</li>
+
+<li>Constant values (<tt>static&nbsp;const</tt> or <tt>enum</tt>
+constants): the constant is <a href="#convert_tolua">converted to a
+Lua object</a> and returned.</li>
+
+</ul>
+<p>
+This is what happens on a <b>write access</b>:
+</p>
+<ul>
+
+<li>External variables: the value to be written is
+<a href="#convert_fromlua">converted to the C&nbsp;type</a> of the
+variable and then stored at the symbol address.</li>
+
+<li>Writing to constant variables or to any other symbol type causes
+an error, like any other attempted write to a constant location.</li>
+
+</ul>
+<p>
+C&nbsp;library namespaces themselves are garbage collected objects. If
+the last reference to the namespace object is gone, the garbage
+collector will eventually release the shared library reference and
+remove all memory associated with the namespace. Since this may
+trigger the removal of the shared library from the memory of the
+running process, it's generally <em>not safe</em> to use function
+cdata objects obtained from a library if the namespace object may be
+unreferenced.
+</p>
+<p>
+Performance notice: the JIT compiler specializes to the identity of
+namespace objects and to the strings used to index it. This
+effectively turns function cdata objects into constants. It's not
+useful and actually counter-productive to explicitly cache these
+function objects, e.g. <tt>local strlen = ffi.C.strlen</tt>. OTOH it
+<em>is</em> useful to cache the namespace itself, e.g. <tt>local C =
+ffi.C</tt>.
+</p>
+
+<h2 id="policy">No Hand-holding!</h2>
+<p>
+The FFI library has been designed as <b>a low-level library</b>. The
+goal is to interface with C&nbsp;code and C&nbsp;data types with a
+minimum of overhead. This means <b>you can do anything you can do
+from&nbsp;C</b>: access all memory, overwrite anything in memory, call
+machine code at any memory address and so on.
+</p>
+<p>
+The FFI library provides <b>no memory safety</b>, unlike regular Lua
+code. It will happily allow you to dereference a <tt>NULL</tt>
+pointer, to access arrays out of bounds or to misdeclare
+C&nbsp;functions. If you make a mistake, your application might crash,
+just like equivalent C&nbsp;code would.
+</p>
+<p>
+This behavior is inevitable, since the goal is to provide full
+interoperability with C&nbsp;code. Adding extra safety measures, like
+bounds checks, would be futile. There's no way to detect
+misdeclarations of C&nbsp;functions, since shared libraries only
+provide symbol names, but no type information. Likewise there's no way
+to infer the valid range of indexes for a returned pointer.
+</p>
+<p>
+Again: the FFI library is a low-level library. This implies it needs
+to be used with care, but it's flexibility and performance often
+outweigh this concern. If you're a C or C++ developer, it'll be easy
+to apply your existing knowledge. OTOH writing code for the FFI
+library is not for the faint of heart and probably shouldn't be the
+first exercise for someone with little experience in Lua, C or C++.
+</p>
+<p>
+As a corollary of the above, the FFI library is <b>not safe for use by
+untrusted Lua code</b>. If you're sandboxing untrusted Lua code, you
+definitely don't want to give this code access to the FFI library or
+to <em>any</em> cdata object (except 64&nbsp;bit integers or complex
+numbers). Any properly engineered Lua sandbox needs to provide safety
+wrappers for many of the standard Lua library functions &mdash;
+similar wrappers need to be written for high-level operations on FFI
+data types, too.
+</p>
+
+<h2 id="status">Current Status</h2>
+<p>
+The initial release of the FFI library has some limitations and is
+missing some features. Most of these will be fixed in future releases.
+</p>
+<p>
+<a href="#clang">C language support</a> is
+currently incomplete:
+</p>
+<ul>
+<li>C&nbsp;declarations are not passed through a C&nbsp;pre-processor,
+yet.</li>
+<li>The C&nbsp;parser is able to evaluate most constant expressions
+commonly found in C&nbsp;header files. However it doesn't handle the
+full range of C&nbsp;expression semantics and may fail for some
+obscure constructs.</li>
+<li><tt>static const</tt> declarations only work for integer types
+up to 32&nbsp;bits. Neither declaring string constants nor
+floating-point constants is supported.</li>
+<li>Packed <tt>struct</tt> bitfields that cross container boundaries
+are not implemented.</li>
+<li>Native vector types may be defined with the GCC <tt>mode</tt> or
+<tt>vector_size</tt> attribute. But no operations other than loading,
+storing and initializing them are supported, yet.</li>
+<li>The <tt>volatile</tt> type qualifier is currently ignored by
+compiled code.</li>
+<li><a href="ext_ffi_api.html#ffi_cdef"><tt>ffi.cdef</tt></a> silently
+ignores all re-declarations.</li>
+</ul>
+<p>
+The JIT compiler already handles a large subset of all FFI operations.
+It automatically falls back to the interpreter for unimplemented
+operations (you can check for this with the
+<a href="running.html#opt_j"><tt>-jv</tt></a> command line option).
+The following operations are currently not compiled and may exhibit
+suboptimal performance, especially when used in inner loops:
+</p>
+<ul>
+<li>Bitfield accesses and initializations.</li>
+<li>Vector operations.</li>
+<li>Table initializers.</li>
+<li>Initialization of nested <tt>struct</tt>/<tt>union</tt> types.</li>
+<li>Allocations of variable-length arrays or structs.</li>
+<li>Allocations of C&nbsp;types with a size &gt; 128&nbsp;bytes or an
+alignment &gt; 8&nbsp;bytes.</li>
+<li>Conversions from lightuserdata to <tt>void&nbsp;*</tt>.</li>
+<li>Pointer differences for element sizes that are not a power of
+two.</li>
+<li>Calls to C&nbsp;functions with aggregates passed or returned by
+value.</li>
+<li>Calls to ctype metamethods which are not plain functions.</li>
+<li>ctype <tt>__newindex</tt> tables and non-string lookups in ctype
+<tt>__index</tt> tables.</li>
+<li><tt>tostring()</tt> for cdata types.</li>
+<li>Calls to <tt>ffi.cdef()</tt>, <tt>ffi.load()</tt> and
+<tt>ffi.metatype()</tt>.</li>
+</ul>
+<p>
+Other missing features:
+</p>
+<ul>
+<li>Bit operations for 64&nbsp;bit types.</li>
+<li>Arithmetic for <tt>complex</tt> numbers.</li>
+<li>Passing structs by value to vararg C&nbsp;functions.</li>
+<li><a href="extensions.html#exceptions">C++ exception interoperability</a>
+does not extend to C&nbsp;functions called via the FFI, if the call is
+compiled.</li>
+</ul>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/ext_ffi_tutorial.html b/3rdparty/lua/doc/ext_ffi_tutorial.html
index 9356edc..30213b3 100644
--- a/3rdparty/lua/doc/ext_ffi_tutorial.html
+++ b/3rdparty/lua/doc/ext_ffi_tutorial.html
@@ -1,601 +1,601 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>FFI Tutorial</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-table.idiomtable { font-size: 90%; line-height: 1.2; }
-table.idiomtable tt { font-size: 100%; }
-table.idiomtable td { vertical-align: top; }
-tr.idiomhead td { font-weight: bold; }
-td.idiomlua b { font-weight: normal; color: #2142bf; }
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>FFI Tutorial</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a class="current" href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-This page is intended to give you an overview of the features of the FFI
-library by presenting a few use cases and guidelines.
-</p>
-<p>
-This page makes no attempt to explain all of the FFI library, though.
-You'll want to have a look at the <a href="ext_ffi_api.html">ffi.* API
-function reference</a> and the <a href="ext_ffi_semantics.html">FFI
-semantics</a> to learn more.
-</p>
-
-<h2 id="load">Loading the FFI Library</h2>
-<p>
-The FFI library is built into LuaJIT by default, but it's not loaded
-and initialized by default. The suggested way to use the FFI library
-is to add the following to the start of every Lua file that needs one
-of its functions:
-</p>
-<pre class="code">
-local ffi = require("ffi")
-</pre>
-<p>
-Please note this doesn't define an <tt>ffi</tt> variable in the table
-of globals &mdash; you really need to use the local variable. The
-<tt>require</tt> function ensures the library is only loaded once.
-</p>
-<p style="font-size: 8pt;">
-Note: If you want to experiment with the FFI from the interactive prompt
-of the command line executable, omit the <tt>local</tt>, as it doesn't
-preserve local variables across lines.
-</p>
-
-<h2 id="sleep">Accessing Standard System Functions</h2>
-<p>
-The following code explains how to access standard system functions.
-We slowly print two lines of dots by sleeping for 10&nbsp;milliseconds
-after each dot:
-</p>
-<pre class="code mark">
-<span class="codemark">&nbsp;
-&#9312;
-
-
-
-
-
-&#9313;
-&#9314;
-&#9315;
-
-
-
-&#9316;
-
-
-
-
-
-&#9317;</span>local ffi = require("ffi")
-ffi.cdef[[
-<span style="color:#00a000;">void Sleep(int ms);
-int poll(struct pollfd *fds, unsigned long nfds, int timeout);</span>
-]]
-
-local sleep
-if ffi.os == "Windows" then
- function sleep(s)
- ffi.C.Sleep(s*1000)
- end
-else
- function sleep(s)
- ffi.C.poll(nil, 0, s*1000)
- end
-end
-
-for i=1,160 do
- io.write("."); io.flush()
- sleep(0.01)
-end
-io.write("\n")
-</pre>
-<p>
-Here's the step-by-step explanation:
-</p>
-<p>
-<span class="mark">&#9312;</span> This defines the
-C&nbsp;library functions we're going to use. The part inside the
-double-brackets (in green) is just standard C&nbsp;syntax. You can
-usually get this info from the C&nbsp;header files or the
-documentation provided by each C&nbsp;library or C&nbsp;compiler.
-</p>
-<p>
-<span class="mark">&#9313;</span> The difficulty we're
-facing here, is that there are different standards to choose from.
-Windows has a simple <tt>Sleep()</tt> function. On other systems there
-are a variety of functions available to achieve sub-second sleeps, but
-with no clear consensus. Thankfully <tt>poll()</tt> can be used for
-this task, too, and it's present on most non-Windows systems. The
-check for <tt>ffi.os</tt> makes sure we use the Windows-specific
-function only on Windows systems.
-</p>
-<p>
-<span class="mark">&#9314;</span> Here we're wrapping the
-call to the C&nbsp;function in a Lua function. This isn't strictly
-necessary, but it's helpful to deal with system-specific issues only
-in one part of the code. The way we're wrapping it ensures the check
-for the OS is only done during initialization and not for every call.
-</p>
-<p>
-<span class="mark">&#9315;</span> A more subtle point is
-that we defined our <tt>sleep()</tt> function (for the sake of this
-example) as taking the number of seconds, but accepting fractional
-seconds. Multiplying this by 1000 gets us milliseconds, but that still
-leaves it a Lua number, which is a floating-point value. Alas, the
-<tt>Sleep()</tt> function only accepts an integer value. Luckily for
-us, the FFI library automatically performs the conversion when calling
-the function (truncating the FP value towards zero, like in C).
-</p>
-<p style="font-size: 8pt;">
-Some readers will notice that <tt>Sleep()</tt> is part of
-<tt>KERNEL32.DLL</tt> and is also a <tt>stdcall</tt> function. So how
-can this possibly work? The FFI library provides the <tt>ffi.C</tt>
-default C&nbsp;library namespace, which allows calling functions from
-the default set of libraries, like a C&nbsp;compiler would. Also, the
-FFI library automatically detects <tt>stdcall</tt> functions, so you
-don't need to declare them as such.
-</p>
-<p>
-<span class="mark">&#9316;</span> The <tt>poll()</tt>
-function takes a couple more arguments we're not going to use. You can
-simply use <tt>nil</tt> to pass a <tt>NULL</tt> pointer and <tt>0</tt>
-for the <tt>nfds</tt> parameter. Please note that the
-number&nbsp;<tt>0</tt> <em>does not convert to a pointer value</em>,
-unlike in C++. You really have to pass pointers to pointer arguments
-and numbers to number arguments.
-</p>
-<p style="font-size: 8pt;">
-The page on <a href="ext_ffi_semantics.html">FFI semantics</a> has all
-of the gory details about
-<a href="ext_ffi_semantics.html#convert">conversions between Lua
-objects and C&nbsp;types</a>. For the most part you don't have to deal
-with this, as it's performed automatically and it's carefully designed
-to bridge the semantic differences between Lua and C.
-</p>
-<p>
-<span class="mark">&#9317;</span> Now that we have defined
-our own <tt>sleep()</tt> function, we can just call it from plain Lua
-code. That wasn't so bad, huh? Turning these boring animated dots into
-a fascinating best-selling game is left as an exercise for the reader.
-:-)
-</p>
-
-<h2 id="zlib">Accessing the zlib Compression Library</h2>
-<p>
-The following code shows how to access the <a
-href="http://zlib.net/">zlib</a> compression library from Lua code.
-We'll define two convenience wrapper functions that take a string and
-compress or uncompress it to another string:
-</p>
-<pre class="code mark">
-<span class="codemark">&nbsp;
-&#9312;
-
-
-
-
-
-
-&#9313;
-
-
-&#9314;
-
-&#9315;
-
-
-&#9316;
-
-
-&#9317;
-
-
-
-
-
-
-
-&#9318;</span>local ffi = require("ffi")
-ffi.cdef[[
-<span style="color:#00a000;">unsigned long compressBound(unsigned long sourceLen);
-int compress2(uint8_t *dest, unsigned long *destLen,
- const uint8_t *source, unsigned long sourceLen, int level);
-int uncompress(uint8_t *dest, unsigned long *destLen,
- const uint8_t *source, unsigned long sourceLen);</span>
-]]
-local zlib = ffi.load(ffi.os == "Windows" and "zlib1" or "z")
-
-local function compress(txt)
- local n = zlib.compressBound(#txt)
- local buf = ffi.new("uint8_t[?]", n)
- local buflen = ffi.new("unsigned long[1]", n)
- local res = zlib.compress2(buf, buflen, txt, #txt, 9)
- assert(res == 0)
- return ffi.string(buf, buflen[0])
-end
-
-local function uncompress(comp, n)
- local buf = ffi.new("uint8_t[?]", n)
- local buflen = ffi.new("unsigned long[1]", n)
- local res = zlib.uncompress(buf, buflen, comp, #comp)
- assert(res == 0)
- return ffi.string(buf, buflen[0])
-end
-
--- Simple test code.
-local txt = string.rep("abcd", 1000)
-print("Uncompressed size: ", #txt)
-local c = compress(txt)
-print("Compressed size: ", #c)
-local txt2 = uncompress(c, #txt)
-assert(txt2 == txt)
-</pre>
-<p>
-Here's the step-by-step explanation:
-</p>
-<p>
-<span class="mark">&#9312;</span> This defines some of the
-C&nbsp;functions provided by zlib. For the sake of this example, some
-type indirections have been reduced and it uses the pre-defined
-fixed-size integer types, while still adhering to the zlib API/ABI.
-</p>
-<p>
-<span class="mark">&#9313;</span> This loads the zlib shared
-library. On POSIX systems it's named <tt>libz.so</tt> and usually
-comes pre-installed. Since <tt>ffi.load()</tt> automatically adds any
-missing standard prefixes/suffixes, we can simply load the
-<tt>"z"</tt> library. On Windows it's named <tt>zlib1.dll</tt> and
-you'll have to download it first from the
-<a href="http://zlib.net/"><span class="ext">&raquo;</span>&nbsp;zlib site</a>. The check for
-<tt>ffi.os</tt> makes sure we pass the right name to
-<tt>ffi.load()</tt>.
-</p>
-<p>
-<span class="mark">&#9314;</span> First, the maximum size of
-the compression buffer is obtained by calling the
-<tt>zlib.compressBound</tt> function with the length of the
-uncompressed string. The next line allocates a byte buffer of this
-size. The <tt>[?]</tt> in the type specification indicates a
-variable-length array (VLA). The actual number of elements of this
-array is given as the 2nd argument to <tt>ffi.new()</tt>.
-</p>
-<p>
-<span class="mark">&#9315;</span> This may look strange at
-first, but have a look at the declaration of the <tt>compress2</tt>
-function from zlib: the destination length is defined as a pointer!
-This is because you pass in the maximum buffer size and get back the
-actual length that was used.
-</p>
-<p>
-In C you'd pass in the address of a local variable
-(<tt>&amp;buflen</tt>). But since there's no address-of operator in
-Lua, we'll just pass in a one-element array. Conveniently it can be
-initialized with the maximum buffer size in one step. Calling the
-actual <tt>zlib.compress2</tt> function is then straightforward.
-</p>
-<p>
-<span class="mark">&#9316;</span> We want to return the
-compressed data as a Lua string, so we'll use <tt>ffi.string()</tt>.
-It needs a pointer to the start of the data and the actual length. The
-length has been returned in the <tt>buflen</tt> array, so we'll just
-get it from there.
-</p>
-<p style="font-size: 8pt;">
-Note that since the function returns now, the <tt>buf</tt> and
-<tt>buflen</tt> variables will eventually be garbage collected. This
-is fine, because <tt>ffi.string()</tt> has copied the contents to a
-newly created (interned) Lua string. If you plan to call this function
-lots of times, consider reusing the buffers and/or handing back the
-results in buffers instead of strings. This will reduce the overhead
-for garbage collection and string interning.
-</p>
-<p>
-<span class="mark">&#9317;</span> The <tt>uncompress</tt>
-functions does the exact opposite of the <tt>compress</tt> function.
-The compressed data doesn't include the size of the original string,
-so this needs to be passed in. Otherwise no surprises here.
-</p>
-<p>
-<span class="mark">&#9318;</span> The code, that makes use
-of the functions we just defined, is just plain Lua code. It doesn't
-need to know anything about the LuaJIT FFI &mdash; the convenience
-wrapper functions completely hide it.
-</p>
-<p>
-One major advantage of the LuaJIT FFI is that you are now able to
-write those wrappers <em>in Lua</em>. And at a fraction of the time it
-would cost you to create an extra C&nbsp;module using the Lua/C API.
-Many of the simpler C&nbsp;functions can probably be used directly
-from your Lua code, without any wrappers.
-</p>
-<p style="font-size: 8pt;">
-Side note: the zlib API uses the <tt>long</tt> type for passing
-lengths and sizes around. But all those zlib functions actually only
-deal with 32&nbsp;bit values. This is an unfortunate choice for a
-public API, but may be explained by zlib's history &mdash; we'll just
-have to deal with it.
-</p>
-<p style="font-size: 8pt;">
-First, you should know that a <tt>long</tt> is a 64&nbsp;bit type e.g.
-on POSIX/x64 systems, but a 32&nbsp;bit type on Windows/x64 and on
-32&nbsp;bit systems. Thus a <tt>long</tt> result can be either a plain
-Lua number or a boxed 64&nbsp;bit integer cdata object, depending on
-the target system.
-</p>
-<p style="font-size: 8pt;">
-Ok, so the <tt>ffi.*</tt> functions generally accept cdata objects
-wherever you'd want to use a number. That's why we get a away with
-passing <tt>n</tt> to <tt>ffi.string()</tt> above. But other Lua
-library functions or modules don't know how to deal with this. So for
-maximum portability one needs to use <tt>tonumber()</tt> on returned
-<tt>long</tt> results before passing them on. Otherwise the
-application might work on some systems, but would fail in a POSIX/x64
-environment.
-</p>
-
-<h2 id="metatype">Defining Metamethods for a C&nbsp;Type</h2>
-<p>
-The following code explains how to define metamethods for a C type.
-We define a simple point type and add some operations to it:
-</p>
-<pre class="code mark">
-<span class="codemark">&nbsp;
-&#9312;
-
-
-
-&#9313;
-
-&#9314;
-
-&#9315;
-
-
-
-&#9316;
-
-&#9317;</span>local ffi = require("ffi")
-ffi.cdef[[
-<span style="color:#00a000;">typedef struct { double x, y; } point_t;</span>
-]]
-
-local point
-local mt = {
- __add = function(a, b) return point(a.x+b.x, a.y+b.y) end,
- __len = function(a) return math.sqrt(a.x*a.x + a.y*a.y) end,
- __index = {
- area = function(a) return a.x*a.x + a.y*a.y end,
- },
-}
-point = ffi.metatype("point_t", mt)
-
-local a = point(3, 4)
-print(a.x, a.y) --> 3 4
-print(#a) --> 5
-print(a:area()) --> 25
-local b = a + point(0.5, 8)
-print(#b) --> 12.5
-</pre>
-<p>
-Here's the step-by-step explanation:
-</p>
-<p>
-<span class="mark">&#9312;</span> This defines the C&nbsp;type for a
-two-dimensional point object.
-</p>
-<p>
-<span class="mark">&#9313;</span> We have to declare the variable
-holding the point constructor first, because it's used inside of a
-metamethod.
-</p>
-<p>
-<span class="mark">&#9314;</span> Let's define an <tt>__add</tt>
-metamethod which adds the coordinates of two points and creates a new
-point object. For simplicity, this function assumes that both arguments
-are points. But it could be any mix of objects, if at least one operand
-is of the required type (e.g. adding a point plus a number or vice
-versa). Our <tt>__len</tt> metamethod returns the distance of a point to
-the origin.
-</p>
-<p>
-<span class="mark">&#9315;</span> If we run out of operators, we can
-define named methods, too. Here the <tt>__index</tt> table defines an
-<tt>area</tt> function. For custom indexing needs, one might want to
-define <tt>__index</tt> and <tt>__newindex</tt> <em>functions</em> instead.
-</p>
-<p>
-<span class="mark">&#9316;</span> This associates the metamethods with
-our C&nbsp;type. This only needs to be done once. For convenience, a
-constructor is returned by
-<a href="ext_ffi_api.html#ffi_metatype"><tt>ffi.metatype()</tt></a>.
-We're not required to use it, though. The original C&nbsp;type can still
-be used e.g. to create an array of points. The metamethods automatically
-apply to any and all uses of this type.
-</p>
-<p>
-Please note that the association with a metatable is permanent and
-<b>the metatable must not be modified afterwards!</b> Ditto for the
-<tt>__index</tt> table.
-</p>
-<p>
-<span class="mark">&#9317;</span> Here are some simple usage examples
-for the point type and their expected results. The pre-defined
-operations (such as <tt>a.x</tt>) can be freely mixed with the newly
-defined metamethods. Note that <tt>area</tt> is a method and must be
-called with the Lua syntax for methods: <tt>a:area()</tt>, not
-<tt>a.area()</tt>.
-</p>
-<p>
-The C&nbsp;type metamethod mechanism is most useful when used in
-conjunction with C&nbsp;libraries that are written in an object-oriented
-style. Creators return a pointer to a new instance and methods take an
-instance pointer as the first argument. Sometimes you can just point
-<tt>__index</tt> to the library namespace and <tt>__gc</tt> to the
-destructor and you're done. But often enough you'll want to add
-convenience wrappers, e.g. to return actual Lua strings or when
-returning multiple values.
-</p>
-<p>
-Some C libraries only declare instance pointers as an opaque
-<tt>void&nbsp;*</tt> type. In this case you can use a fake type for all
-declarations, e.g. a pointer to a named (incomplete) struct will do:
-<tt>typedef struct foo_type *foo_handle</tt>. The C&nbsp;side doesn't
-know what you declare with the LuaJIT FFI, but as long as the underlying
-types are compatible, everything still works.
-</p>
-
-<h2 id="idioms">Translating C&nbsp;Idioms</h2>
-<p>
-Here's a list of common C&nbsp;idioms and their translation to the
-LuaJIT FFI:
-</p>
-<table class="idiomtable">
-<tr class="idiomhead">
-<td class="idiomdesc">Idiom</td>
-<td class="idiomc">C&nbsp;code</td>
-<td class="idiomlua">Lua code</td>
-</tr>
-<tr class="odd separate">
-<td class="idiomdesc">Pointer dereference<br><tt>int *p;</tt></td><td class="idiomc"><tt>x = *p;<br>*p = y;</tt></td><td class="idiomlua"><tt>x = <b>p[0]</b><br><b>p[0]</b> = y</tt></td></tr>
-<tr class="even">
-<td class="idiomdesc">Pointer indexing<br><tt>int i, *p;</tt></td><td class="idiomc"><tt>x = p[i];<br>p[i+1] = y;</tt></td><td class="idiomlua"><tt>x = p[i]<br>p[i+1] = y</tt></td></tr>
-<tr class="odd">
-<td class="idiomdesc">Array indexing<br><tt>int i, a[];</tt></td><td class="idiomc"><tt>x = a[i];<br>a[i+1] = y;</tt></td><td class="idiomlua"><tt>x = a[i]<br>a[i+1] = y</tt></td></tr>
-<tr class="even separate">
-<td class="idiomdesc"><tt>struct</tt>/<tt>union</tt> dereference<br><tt>struct foo s;</tt></td><td class="idiomc"><tt>x = s.field;<br>s.field = y;</tt></td><td class="idiomlua"><tt>x = s.field<br>s.field = y</tt></td></tr>
-<tr class="odd">
-<td class="idiomdesc"><tt>struct</tt>/<tt>union</tt> pointer deref.<br><tt>struct foo *sp;</tt></td><td class="idiomc"><tt>x = sp->field;<br>sp->field = y;</tt></td><td class="idiomlua"><tt>x = <b>s.field</b><br><b>s.field</b> = y</tt></td></tr>
-<tr class="even separate">
-<td class="idiomdesc">Pointer arithmetic<br><tt>int i, *p;</tt></td><td class="idiomc"><tt>x = p + i;<br>y = p - i;</tt></td><td class="idiomlua"><tt>x = p + i<br>y = p - i</tt></td></tr>
-<tr class="odd">
-<td class="idiomdesc">Pointer difference<br><tt>int *p1, *p2;</tt></td><td class="idiomc"><tt>x = p1 - p2;</tt></td><td class="idiomlua"><tt>x = p1 - p2</tt></td></tr>
-<tr class="even">
-<td class="idiomdesc">Array element pointer<br><tt>int i, a[];</tt></td><td class="idiomc"><tt>x = &amp;a[i];</tt></td><td class="idiomlua"><tt>x = <b>a+i</b></tt></td></tr>
-<tr class="odd">
-<td class="idiomdesc">Cast pointer to address<br><tt>int *p;</tt></td><td class="idiomc"><tt>x = (intptr_t)p;</tt></td><td class="idiomlua"><tt>x = <b>tonumber(<br>&nbsp;ffi.cast("intptr_t",<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;p))</b></tt></td></tr>
-<tr class="even separate">
-<td class="idiomdesc">Functions with outargs<br><tt>void foo(int *inoutlen);</tt></td><td class="idiomc"><tt>int len = x;<br>foo(&amp;len);<br>y = len;</tt></td><td class="idiomlua"><tt><b>local len =<br>&nbsp;&nbsp;ffi.new("int[1]", x)<br>foo(len)<br>y = len[0]</b></tt></td></tr>
-<tr class="odd">
-<td class="idiomdesc"><a href="ext_ffi_semantics.html#convert_vararg">Vararg conversions</a><br><tt>int printf(char *fmt, ...);</tt></td><td class="idiomc"><tt>printf("%g", 1.0);<br>printf("%d", 1);<br>&nbsp;</tt></td><td class="idiomlua"><tt>printf("%g", 1);<br>printf("%d",<br>&nbsp;&nbsp;<b>ffi.new("int", 1)</b>)</tt></td></tr>
-</table>
-
-<h2 id="cache">To Cache or Not to Cache</h2>
-<p>
-It's a common Lua idiom to cache library functions in local variables
-or upvalues, e.g.:
-</p>
-<pre class="code">
-local byte, char = string.byte, string.char
-local function foo(x)
- return char(byte(x)+1)
-end
-</pre>
-<p>
-This replaces several hash-table lookups with a (faster) direct use of
-a local or an upvalue. This is less important with LuaJIT, since the
-JIT compiler optimizes hash-table lookups a lot and is even able to
-hoist most of them out of the inner loops. It can't eliminate
-<em>all</em> of them, though, and it saves some typing for often-used
-functions. So there's still a place for this, even with LuaJIT.
-</p>
-<p>
-The situation is a bit different with C&nbsp;function calls via the
-FFI library. The JIT compiler has special logic to eliminate <em>all
-of the lookup overhead</em> for functions resolved from a
-<a href="ext_ffi_semantics.html#clib">C&nbsp;library namespace</a>!
-Thus it's not helpful and actually counter-productive to cache
-individual C&nbsp;functions like this:
-</p>
-<pre class="code">
-local <b>funca</b>, <b>funcb</b> = ffi.C.funca, ffi.C.funcb -- <span style="color:#c00000;">Not helpful!</span>
-local function foo(x, n)
- for i=1,n do <b>funcb</b>(<b>funca</b>(x, i), 1) end
-end
-</pre>
-<p>
-This turns them into indirect calls and generates bigger and slower
-machine code. Instead you'll want to cache the namespace itself and
-rely on the JIT compiler to eliminate the lookups:
-</p>
-<pre class="code">
-local <b>C</b> = ffi.C -- <span style="color:#00a000;">Instead use this!</span>
-local function foo(x, n)
- for i=1,n do <b>C.funcb</b>(<b>C.funca</b>(x, i), 1) end
-end
-</pre>
-<p>
-This generates both shorter and faster code. So <b>don't cache
-C&nbsp;functions</b>, but <b>do</b> cache namespaces! Most often the
-namespace is already in a local variable at an outer scope, e.g. from
-<tt>local&nbsp;lib&nbsp;=&nbsp;ffi.load(...)</tt>. Note that copying
-it to a local variable in the function scope is unnecessary.
-</p>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>FFI Tutorial</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+table.idiomtable { font-size: 90%; line-height: 1.2; }
+table.idiomtable tt { font-size: 100%; }
+table.idiomtable td { vertical-align: top; }
+tr.idiomhead td { font-weight: bold; }
+td.idiomlua b { font-weight: normal; color: #2142bf; }
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>FFI Tutorial</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a class="current" href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+This page is intended to give you an overview of the features of the FFI
+library by presenting a few use cases and guidelines.
+</p>
+<p>
+This page makes no attempt to explain all of the FFI library, though.
+You'll want to have a look at the <a href="ext_ffi_api.html">ffi.* API
+function reference</a> and the <a href="ext_ffi_semantics.html">FFI
+semantics</a> to learn more.
+</p>
+
+<h2 id="load">Loading the FFI Library</h2>
+<p>
+The FFI library is built into LuaJIT by default, but it's not loaded
+and initialized by default. The suggested way to use the FFI library
+is to add the following to the start of every Lua file that needs one
+of its functions:
+</p>
+<pre class="code">
+local ffi = require("ffi")
+</pre>
+<p>
+Please note this doesn't define an <tt>ffi</tt> variable in the table
+of globals &mdash; you really need to use the local variable. The
+<tt>require</tt> function ensures the library is only loaded once.
+</p>
+<p style="font-size: 8pt;">
+Note: If you want to experiment with the FFI from the interactive prompt
+of the command line executable, omit the <tt>local</tt>, as it doesn't
+preserve local variables across lines.
+</p>
+
+<h2 id="sleep">Accessing Standard System Functions</h2>
+<p>
+The following code explains how to access standard system functions.
+We slowly print two lines of dots by sleeping for 10&nbsp;milliseconds
+after each dot:
+</p>
+<pre class="code mark">
+<span class="codemark">&nbsp;
+&#9312;
+
+
+
+
+
+&#9313;
+&#9314;
+&#9315;
+
+
+
+&#9316;
+
+
+
+
+
+&#9317;</span>local ffi = require("ffi")
+ffi.cdef[[
+<span style="color:#00a000;">void Sleep(int ms);
+int poll(struct pollfd *fds, unsigned long nfds, int timeout);</span>
+]]
+
+local sleep
+if ffi.os == "Windows" then
+ function sleep(s)
+ ffi.C.Sleep(s*1000)
+ end
+else
+ function sleep(s)
+ ffi.C.poll(nil, 0, s*1000)
+ end
+end
+
+for i=1,160 do
+ io.write("."); io.flush()
+ sleep(0.01)
+end
+io.write("\n")
+</pre>
+<p>
+Here's the step-by-step explanation:
+</p>
+<p>
+<span class="mark">&#9312;</span> This defines the
+C&nbsp;library functions we're going to use. The part inside the
+double-brackets (in green) is just standard C&nbsp;syntax. You can
+usually get this info from the C&nbsp;header files or the
+documentation provided by each C&nbsp;library or C&nbsp;compiler.
+</p>
+<p>
+<span class="mark">&#9313;</span> The difficulty we're
+facing here, is that there are different standards to choose from.
+Windows has a simple <tt>Sleep()</tt> function. On other systems there
+are a variety of functions available to achieve sub-second sleeps, but
+with no clear consensus. Thankfully <tt>poll()</tt> can be used for
+this task, too, and it's present on most non-Windows systems. The
+check for <tt>ffi.os</tt> makes sure we use the Windows-specific
+function only on Windows systems.
+</p>
+<p>
+<span class="mark">&#9314;</span> Here we're wrapping the
+call to the C&nbsp;function in a Lua function. This isn't strictly
+necessary, but it's helpful to deal with system-specific issues only
+in one part of the code. The way we're wrapping it ensures the check
+for the OS is only done during initialization and not for every call.
+</p>
+<p>
+<span class="mark">&#9315;</span> A more subtle point is
+that we defined our <tt>sleep()</tt> function (for the sake of this
+example) as taking the number of seconds, but accepting fractional
+seconds. Multiplying this by 1000 gets us milliseconds, but that still
+leaves it a Lua number, which is a floating-point value. Alas, the
+<tt>Sleep()</tt> function only accepts an integer value. Luckily for
+us, the FFI library automatically performs the conversion when calling
+the function (truncating the FP value towards zero, like in C).
+</p>
+<p style="font-size: 8pt;">
+Some readers will notice that <tt>Sleep()</tt> is part of
+<tt>KERNEL32.DLL</tt> and is also a <tt>stdcall</tt> function. So how
+can this possibly work? The FFI library provides the <tt>ffi.C</tt>
+default C&nbsp;library namespace, which allows calling functions from
+the default set of libraries, like a C&nbsp;compiler would. Also, the
+FFI library automatically detects <tt>stdcall</tt> functions, so you
+don't need to declare them as such.
+</p>
+<p>
+<span class="mark">&#9316;</span> The <tt>poll()</tt>
+function takes a couple more arguments we're not going to use. You can
+simply use <tt>nil</tt> to pass a <tt>NULL</tt> pointer and <tt>0</tt>
+for the <tt>nfds</tt> parameter. Please note that the
+number&nbsp;<tt>0</tt> <em>does not convert to a pointer value</em>,
+unlike in C++. You really have to pass pointers to pointer arguments
+and numbers to number arguments.
+</p>
+<p style="font-size: 8pt;">
+The page on <a href="ext_ffi_semantics.html">FFI semantics</a> has all
+of the gory details about
+<a href="ext_ffi_semantics.html#convert">conversions between Lua
+objects and C&nbsp;types</a>. For the most part you don't have to deal
+with this, as it's performed automatically and it's carefully designed
+to bridge the semantic differences between Lua and C.
+</p>
+<p>
+<span class="mark">&#9317;</span> Now that we have defined
+our own <tt>sleep()</tt> function, we can just call it from plain Lua
+code. That wasn't so bad, huh? Turning these boring animated dots into
+a fascinating best-selling game is left as an exercise for the reader.
+:-)
+</p>
+
+<h2 id="zlib">Accessing the zlib Compression Library</h2>
+<p>
+The following code shows how to access the <a
+href="http://zlib.net/">zlib</a> compression library from Lua code.
+We'll define two convenience wrapper functions that take a string and
+compress or uncompress it to another string:
+</p>
+<pre class="code mark">
+<span class="codemark">&nbsp;
+&#9312;
+
+
+
+
+
+
+&#9313;
+
+
+&#9314;
+
+&#9315;
+
+
+&#9316;
+
+
+&#9317;
+
+
+
+
+
+
+
+&#9318;</span>local ffi = require("ffi")
+ffi.cdef[[
+<span style="color:#00a000;">unsigned long compressBound(unsigned long sourceLen);
+int compress2(uint8_t *dest, unsigned long *destLen,
+ const uint8_t *source, unsigned long sourceLen, int level);
+int uncompress(uint8_t *dest, unsigned long *destLen,
+ const uint8_t *source, unsigned long sourceLen);</span>
+]]
+local zlib = ffi.load(ffi.os == "Windows" and "zlib1" or "z")
+
+local function compress(txt)
+ local n = zlib.compressBound(#txt)
+ local buf = ffi.new("uint8_t[?]", n)
+ local buflen = ffi.new("unsigned long[1]", n)
+ local res = zlib.compress2(buf, buflen, txt, #txt, 9)
+ assert(res == 0)
+ return ffi.string(buf, buflen[0])
+end
+
+local function uncompress(comp, n)
+ local buf = ffi.new("uint8_t[?]", n)
+ local buflen = ffi.new("unsigned long[1]", n)
+ local res = zlib.uncompress(buf, buflen, comp, #comp)
+ assert(res == 0)
+ return ffi.string(buf, buflen[0])
+end
+
+-- Simple test code.
+local txt = string.rep("abcd", 1000)
+print("Uncompressed size: ", #txt)
+local c = compress(txt)
+print("Compressed size: ", #c)
+local txt2 = uncompress(c, #txt)
+assert(txt2 == txt)
+</pre>
+<p>
+Here's the step-by-step explanation:
+</p>
+<p>
+<span class="mark">&#9312;</span> This defines some of the
+C&nbsp;functions provided by zlib. For the sake of this example, some
+type indirections have been reduced and it uses the pre-defined
+fixed-size integer types, while still adhering to the zlib API/ABI.
+</p>
+<p>
+<span class="mark">&#9313;</span> This loads the zlib shared
+library. On POSIX systems it's named <tt>libz.so</tt> and usually
+comes pre-installed. Since <tt>ffi.load()</tt> automatically adds any
+missing standard prefixes/suffixes, we can simply load the
+<tt>"z"</tt> library. On Windows it's named <tt>zlib1.dll</tt> and
+you'll have to download it first from the
+<a href="http://zlib.net/"><span class="ext">&raquo;</span>&nbsp;zlib site</a>. The check for
+<tt>ffi.os</tt> makes sure we pass the right name to
+<tt>ffi.load()</tt>.
+</p>
+<p>
+<span class="mark">&#9314;</span> First, the maximum size of
+the compression buffer is obtained by calling the
+<tt>zlib.compressBound</tt> function with the length of the
+uncompressed string. The next line allocates a byte buffer of this
+size. The <tt>[?]</tt> in the type specification indicates a
+variable-length array (VLA). The actual number of elements of this
+array is given as the 2nd argument to <tt>ffi.new()</tt>.
+</p>
+<p>
+<span class="mark">&#9315;</span> This may look strange at
+first, but have a look at the declaration of the <tt>compress2</tt>
+function from zlib: the destination length is defined as a pointer!
+This is because you pass in the maximum buffer size and get back the
+actual length that was used.
+</p>
+<p>
+In C you'd pass in the address of a local variable
+(<tt>&amp;buflen</tt>). But since there's no address-of operator in
+Lua, we'll just pass in a one-element array. Conveniently it can be
+initialized with the maximum buffer size in one step. Calling the
+actual <tt>zlib.compress2</tt> function is then straightforward.
+</p>
+<p>
+<span class="mark">&#9316;</span> We want to return the
+compressed data as a Lua string, so we'll use <tt>ffi.string()</tt>.
+It needs a pointer to the start of the data and the actual length. The
+length has been returned in the <tt>buflen</tt> array, so we'll just
+get it from there.
+</p>
+<p style="font-size: 8pt;">
+Note that since the function returns now, the <tt>buf</tt> and
+<tt>buflen</tt> variables will eventually be garbage collected. This
+is fine, because <tt>ffi.string()</tt> has copied the contents to a
+newly created (interned) Lua string. If you plan to call this function
+lots of times, consider reusing the buffers and/or handing back the
+results in buffers instead of strings. This will reduce the overhead
+for garbage collection and string interning.
+</p>
+<p>
+<span class="mark">&#9317;</span> The <tt>uncompress</tt>
+functions does the exact opposite of the <tt>compress</tt> function.
+The compressed data doesn't include the size of the original string,
+so this needs to be passed in. Otherwise no surprises here.
+</p>
+<p>
+<span class="mark">&#9318;</span> The code, that makes use
+of the functions we just defined, is just plain Lua code. It doesn't
+need to know anything about the LuaJIT FFI &mdash; the convenience
+wrapper functions completely hide it.
+</p>
+<p>
+One major advantage of the LuaJIT FFI is that you are now able to
+write those wrappers <em>in Lua</em>. And at a fraction of the time it
+would cost you to create an extra C&nbsp;module using the Lua/C API.
+Many of the simpler C&nbsp;functions can probably be used directly
+from your Lua code, without any wrappers.
+</p>
+<p style="font-size: 8pt;">
+Side note: the zlib API uses the <tt>long</tt> type for passing
+lengths and sizes around. But all those zlib functions actually only
+deal with 32&nbsp;bit values. This is an unfortunate choice for a
+public API, but may be explained by zlib's history &mdash; we'll just
+have to deal with it.
+</p>
+<p style="font-size: 8pt;">
+First, you should know that a <tt>long</tt> is a 64&nbsp;bit type e.g.
+on POSIX/x64 systems, but a 32&nbsp;bit type on Windows/x64 and on
+32&nbsp;bit systems. Thus a <tt>long</tt> result can be either a plain
+Lua number or a boxed 64&nbsp;bit integer cdata object, depending on
+the target system.
+</p>
+<p style="font-size: 8pt;">
+Ok, so the <tt>ffi.*</tt> functions generally accept cdata objects
+wherever you'd want to use a number. That's why we get a away with
+passing <tt>n</tt> to <tt>ffi.string()</tt> above. But other Lua
+library functions or modules don't know how to deal with this. So for
+maximum portability one needs to use <tt>tonumber()</tt> on returned
+<tt>long</tt> results before passing them on. Otherwise the
+application might work on some systems, but would fail in a POSIX/x64
+environment.
+</p>
+
+<h2 id="metatype">Defining Metamethods for a C&nbsp;Type</h2>
+<p>
+The following code explains how to define metamethods for a C type.
+We define a simple point type and add some operations to it:
+</p>
+<pre class="code mark">
+<span class="codemark">&nbsp;
+&#9312;
+
+
+
+&#9313;
+
+&#9314;
+
+&#9315;
+
+
+
+&#9316;
+
+&#9317;</span>local ffi = require("ffi")
+ffi.cdef[[
+<span style="color:#00a000;">typedef struct { double x, y; } point_t;</span>
+]]
+
+local point
+local mt = {
+ __add = function(a, b) return point(a.x+b.x, a.y+b.y) end,
+ __len = function(a) return math.sqrt(a.x*a.x + a.y*a.y) end,
+ __index = {
+ area = function(a) return a.x*a.x + a.y*a.y end,
+ },
+}
+point = ffi.metatype("point_t", mt)
+
+local a = point(3, 4)
+print(a.x, a.y) --> 3 4
+print(#a) --> 5
+print(a:area()) --> 25
+local b = a + point(0.5, 8)
+print(#b) --> 12.5
+</pre>
+<p>
+Here's the step-by-step explanation:
+</p>
+<p>
+<span class="mark">&#9312;</span> This defines the C&nbsp;type for a
+two-dimensional point object.
+</p>
+<p>
+<span class="mark">&#9313;</span> We have to declare the variable
+holding the point constructor first, because it's used inside of a
+metamethod.
+</p>
+<p>
+<span class="mark">&#9314;</span> Let's define an <tt>__add</tt>
+metamethod which adds the coordinates of two points and creates a new
+point object. For simplicity, this function assumes that both arguments
+are points. But it could be any mix of objects, if at least one operand
+is of the required type (e.g. adding a point plus a number or vice
+versa). Our <tt>__len</tt> metamethod returns the distance of a point to
+the origin.
+</p>
+<p>
+<span class="mark">&#9315;</span> If we run out of operators, we can
+define named methods, too. Here the <tt>__index</tt> table defines an
+<tt>area</tt> function. For custom indexing needs, one might want to
+define <tt>__index</tt> and <tt>__newindex</tt> <em>functions</em> instead.
+</p>
+<p>
+<span class="mark">&#9316;</span> This associates the metamethods with
+our C&nbsp;type. This only needs to be done once. For convenience, a
+constructor is returned by
+<a href="ext_ffi_api.html#ffi_metatype"><tt>ffi.metatype()</tt></a>.
+We're not required to use it, though. The original C&nbsp;type can still
+be used e.g. to create an array of points. The metamethods automatically
+apply to any and all uses of this type.
+</p>
+<p>
+Please note that the association with a metatable is permanent and
+<b>the metatable must not be modified afterwards!</b> Ditto for the
+<tt>__index</tt> table.
+</p>
+<p>
+<span class="mark">&#9317;</span> Here are some simple usage examples
+for the point type and their expected results. The pre-defined
+operations (such as <tt>a.x</tt>) can be freely mixed with the newly
+defined metamethods. Note that <tt>area</tt> is a method and must be
+called with the Lua syntax for methods: <tt>a:area()</tt>, not
+<tt>a.area()</tt>.
+</p>
+<p>
+The C&nbsp;type metamethod mechanism is most useful when used in
+conjunction with C&nbsp;libraries that are written in an object-oriented
+style. Creators return a pointer to a new instance and methods take an
+instance pointer as the first argument. Sometimes you can just point
+<tt>__index</tt> to the library namespace and <tt>__gc</tt> to the
+destructor and you're done. But often enough you'll want to add
+convenience wrappers, e.g. to return actual Lua strings or when
+returning multiple values.
+</p>
+<p>
+Some C libraries only declare instance pointers as an opaque
+<tt>void&nbsp;*</tt> type. In this case you can use a fake type for all
+declarations, e.g. a pointer to a named (incomplete) struct will do:
+<tt>typedef struct foo_type *foo_handle</tt>. The C&nbsp;side doesn't
+know what you declare with the LuaJIT FFI, but as long as the underlying
+types are compatible, everything still works.
+</p>
+
+<h2 id="idioms">Translating C&nbsp;Idioms</h2>
+<p>
+Here's a list of common C&nbsp;idioms and their translation to the
+LuaJIT FFI:
+</p>
+<table class="idiomtable">
+<tr class="idiomhead">
+<td class="idiomdesc">Idiom</td>
+<td class="idiomc">C&nbsp;code</td>
+<td class="idiomlua">Lua code</td>
+</tr>
+<tr class="odd separate">
+<td class="idiomdesc">Pointer dereference<br><tt>int *p;</tt></td><td class="idiomc"><tt>x = *p;<br>*p = y;</tt></td><td class="idiomlua"><tt>x = <b>p[0]</b><br><b>p[0]</b> = y</tt></td></tr>
+<tr class="even">
+<td class="idiomdesc">Pointer indexing<br><tt>int i, *p;</tt></td><td class="idiomc"><tt>x = p[i];<br>p[i+1] = y;</tt></td><td class="idiomlua"><tt>x = p[i]<br>p[i+1] = y</tt></td></tr>
+<tr class="odd">
+<td class="idiomdesc">Array indexing<br><tt>int i, a[];</tt></td><td class="idiomc"><tt>x = a[i];<br>a[i+1] = y;</tt></td><td class="idiomlua"><tt>x = a[i]<br>a[i+1] = y</tt></td></tr>
+<tr class="even separate">
+<td class="idiomdesc"><tt>struct</tt>/<tt>union</tt> dereference<br><tt>struct foo s;</tt></td><td class="idiomc"><tt>x = s.field;<br>s.field = y;</tt></td><td class="idiomlua"><tt>x = s.field<br>s.field = y</tt></td></tr>
+<tr class="odd">
+<td class="idiomdesc"><tt>struct</tt>/<tt>union</tt> pointer deref.<br><tt>struct foo *sp;</tt></td><td class="idiomc"><tt>x = sp->field;<br>sp->field = y;</tt></td><td class="idiomlua"><tt>x = <b>s.field</b><br><b>s.field</b> = y</tt></td></tr>
+<tr class="even separate">
+<td class="idiomdesc">Pointer arithmetic<br><tt>int i, *p;</tt></td><td class="idiomc"><tt>x = p + i;<br>y = p - i;</tt></td><td class="idiomlua"><tt>x = p + i<br>y = p - i</tt></td></tr>
+<tr class="odd">
+<td class="idiomdesc">Pointer difference<br><tt>int *p1, *p2;</tt></td><td class="idiomc"><tt>x = p1 - p2;</tt></td><td class="idiomlua"><tt>x = p1 - p2</tt></td></tr>
+<tr class="even">
+<td class="idiomdesc">Array element pointer<br><tt>int i, a[];</tt></td><td class="idiomc"><tt>x = &amp;a[i];</tt></td><td class="idiomlua"><tt>x = <b>a+i</b></tt></td></tr>
+<tr class="odd">
+<td class="idiomdesc">Cast pointer to address<br><tt>int *p;</tt></td><td class="idiomc"><tt>x = (intptr_t)p;</tt></td><td class="idiomlua"><tt>x = <b>tonumber(<br>&nbsp;ffi.cast("intptr_t",<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;p))</b></tt></td></tr>
+<tr class="even separate">
+<td class="idiomdesc">Functions with outargs<br><tt>void foo(int *inoutlen);</tt></td><td class="idiomc"><tt>int len = x;<br>foo(&amp;len);<br>y = len;</tt></td><td class="idiomlua"><tt><b>local len =<br>&nbsp;&nbsp;ffi.new("int[1]", x)<br>foo(len)<br>y = len[0]</b></tt></td></tr>
+<tr class="odd">
+<td class="idiomdesc"><a href="ext_ffi_semantics.html#convert_vararg">Vararg conversions</a><br><tt>int printf(char *fmt, ...);</tt></td><td class="idiomc"><tt>printf("%g", 1.0);<br>printf("%d", 1);<br>&nbsp;</tt></td><td class="idiomlua"><tt>printf("%g", 1);<br>printf("%d",<br>&nbsp;&nbsp;<b>ffi.new("int", 1)</b>)</tt></td></tr>
+</table>
+
+<h2 id="cache">To Cache or Not to Cache</h2>
+<p>
+It's a common Lua idiom to cache library functions in local variables
+or upvalues, e.g.:
+</p>
+<pre class="code">
+local byte, char = string.byte, string.char
+local function foo(x)
+ return char(byte(x)+1)
+end
+</pre>
+<p>
+This replaces several hash-table lookups with a (faster) direct use of
+a local or an upvalue. This is less important with LuaJIT, since the
+JIT compiler optimizes hash-table lookups a lot and is even able to
+hoist most of them out of the inner loops. It can't eliminate
+<em>all</em> of them, though, and it saves some typing for often-used
+functions. So there's still a place for this, even with LuaJIT.
+</p>
+<p>
+The situation is a bit different with C&nbsp;function calls via the
+FFI library. The JIT compiler has special logic to eliminate <em>all
+of the lookup overhead</em> for functions resolved from a
+<a href="ext_ffi_semantics.html#clib">C&nbsp;library namespace</a>!
+Thus it's not helpful and actually counter-productive to cache
+individual C&nbsp;functions like this:
+</p>
+<pre class="code">
+local <b>funca</b>, <b>funcb</b> = ffi.C.funcb, ffi.C.funcb -- <span style="color:#c00000;">Not helpful!</span>
+local function foo(x, n)
+ for i=1,n do <b>funcb</b>(<b>funca</b>(x, i), 1) end
+end
+</pre>
+<p>
+This turns them into indirect calls and generates bigger and slower
+machine code. Instead you'll want to cache the namespace itself and
+rely on the JIT compiler to eliminate the lookups:
+</p>
+<pre class="code">
+local <b>C</b> = ffi.C -- <span style="color:#00a000;">Instead use this!</span>
+local function foo(x, n)
+ for i=1,n do <b>C.funcb</b>(<b>C.funca</b>(x, i), 1) end
+end
+</pre>
+<p>
+This generates both shorter and faster code. So <b>don't cache
+C&nbsp;functions</b>, but <b>do</b> cache namespaces! Most often the
+namespace is already in a local variable at an outer scope, e.g. from
+<tt>local&nbsp;lib&nbsp;=&nbsp;ffi.load(...)</tt>. Note that copying
+it to a local variable in the function scope is unnecessary.
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/ext_jit.html b/3rdparty/lua/doc/ext_jit.html
index b5d4921..cc00e72 100644
--- a/3rdparty/lua/doc/ext_jit.html
+++ b/3rdparty/lua/doc/ext_jit.html
@@ -1,199 +1,199 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>jit.* Library</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1><tt>jit.*</tt> Library</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a class="current" href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-The functions in this built-in module control the behavior of the JIT
-compiler engine. Note that JIT-compilation is fully automatic &mdash;
-you probably won't need to use any of the following functions unless
-you have special needs.
-</p>
-
-<h3 id="jit_onoff"><tt>jit.on()<br>
-jit.off()</tt></h3>
-<p>
-Turns the whole JIT compiler on (default) or off.
-</p>
-<p>
-These functions are typically used with the command line options
-<tt>-j on</tt> or <tt>-j off</tt>.
-</p>
-
-<h3 id="jit_flush"><tt>jit.flush()</tt></h3>
-<p>
-Flushes the whole cache of compiled code.
-</p>
-
-<h3 id="jit_onoff_func"><tt>jit.on(func|true [,true|false])<br>
-jit.off(func|true [,true|false])<br>
-jit.flush(func|true [,true|false])</tt></h3>
-<p>
-<tt>jit.on</tt> enables JIT compilation for a Lua function (this is
-the default).
-</p>
-<p>
-<tt>jit.off</tt> disables JIT compilation for a Lua function and
-flushes any already compiled code from the code cache.
-</p>
-<p>
-<tt>jit.flush</tt> flushes the code, but doesn't affect the
-enable/disable status.
-</p>
-<p>
-The current function, i.e. the Lua function calling this library
-function, can also be specified by passing <tt>true</tt> as the first
-argument.
-</p>
-<p>
-If the second argument is <tt>true</tt>, JIT compilation is also
-enabled, disabled or flushed recursively for all sub-functions of a
-function. With <tt>false</tt> only the sub-functions are affected.
-</p>
-<p>
-The <tt>jit.on</tt> and <tt>jit.off</tt> functions only set a flag
-which is checked when the function is about to be compiled. They do
-not trigger immediate compilation.
-</p>
-<p>
-Typical usage is <tt>jit.off(true, true)</tt> in the main chunk
-of a module to turn off JIT compilation for the whole module for
-debugging purposes.
-</p>
-
-<h3 id="jit_flush_tr"><tt>jit.flush(tr)</tt></h3>
-<p>
-Flushes the root trace, specified by its number, and all of its side
-traces from the cache. The code for the trace will be retained as long
-as there are any other traces which link to it.
-</p>
-
-<h3 id="jit_status"><tt>status, ... = jit.status()</tt></h3>
-<p>
-Returns the current status of the JIT compiler. The first result is
-either <tt>true</tt> or <tt>false</tt> if the JIT compiler is turned
-on or off. The remaining results are strings for CPU-specific features
-and enabled optimizations.
-</p>
-
-<h3 id="jit_version"><tt>jit.version</tt></h3>
-<p>
-Contains the LuaJIT version string.
-</p>
-
-<h3 id="jit_version_num"><tt>jit.version_num</tt></h3>
-<p>
-Contains the version number of the LuaJIT core. Version xx.yy.zz
-is represented by the decimal number xxyyzz.
-</p>
-
-<h3 id="jit_os"><tt>jit.os</tt></h3>
-<p>
-Contains the target OS name:
-"Windows", "Linux", "OSX", "BSD", "POSIX" or "Other".
-</p>
-
-<h3 id="jit_arch"><tt>jit.arch</tt></h3>
-<p>
-Contains the target architecture name:
-"x86", "x64", "arm", "ppc", "ppcspe", or "mips".
-</p>
-
-<h2 id="jit_opt"><tt>jit.opt.*</tt> &mdash; JIT compiler optimization control</h2>
-<p>
-This sub-module provides the backend for the <tt>-O</tt> command line
-option.
-</p>
-<p>
-You can also use it programmatically, e.g.:
-</p>
-<pre class="code">
-jit.opt.start(2) -- same as -O2
-jit.opt.start("-dce")
-jit.opt.start("hotloop=10", "hotexit=2")
-</pre>
-<p>
-Unlike in LuaJIT 1.x, the module is built-in and
-<b>optimization is turned on by default!</b>
-It's no longer necessary to run <tt>require("jit.opt").start()</tt>,
-which was one of the ways to enable optimization.
-</p>
-
-<h2 id="jit_util"><tt>jit.util.*</tt> &mdash; JIT compiler introspection</h2>
-<p>
-This sub-module holds functions to introspect the bytecode, generated
-traces, the IR and the generated machine code. The functionality
-provided by this module is still in flux and therefore undocumented.
-</p>
-<p>
-The debug modules <tt>-jbc</tt>, <tt>-jv</tt> and <tt>-jdump</tt> make
-extensive use of these functions. Please check out their source code,
-if you want to know more.
-</p>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>jit.* Library</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1><tt>jit.*</tt> Library</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a class="current" href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+The functions in this built-in module control the behavior of the JIT
+compiler engine. Note that JIT-compilation is fully automatic &mdash;
+you probably won't need to use any of the following functions unless
+you have special needs.
+</p>
+
+<h3 id="jit_onoff"><tt>jit.on()<br>
+jit.off()</tt></h3>
+<p>
+Turns the whole JIT compiler on (default) or off.
+</p>
+<p>
+These functions are typically used with the command line options
+<tt>-j on</tt> or <tt>-j off</tt>.
+</p>
+
+<h3 id="jit_flush"><tt>jit.flush()</tt></h3>
+<p>
+Flushes the whole cache of compiled code.
+</p>
+
+<h3 id="jit_onoff_func"><tt>jit.on(func|true [,true|false])<br>
+jit.off(func|true [,true|false])<br>
+jit.flush(func|true [,true|false])</tt></h3>
+<p>
+<tt>jit.on</tt> enables JIT compilation for a Lua function (this is
+the default).
+</p>
+<p>
+<tt>jit.off</tt> disables JIT compilation for a Lua function and
+flushes any already compiled code from the code cache.
+</p>
+<p>
+<tt>jit.flush</tt> flushes the code, but doesn't affect the
+enable/disable status.
+</p>
+<p>
+The current function, i.e. the Lua function calling this library
+function, can also be specified by passing <tt>true</tt> as the first
+argument.
+</p>
+<p>
+If the second argument is <tt>true</tt>, JIT compilation is also
+enabled, disabled or flushed recursively for all sub-functions of a
+function. With <tt>false</tt> only the sub-functions are affected.
+</p>
+<p>
+The <tt>jit.on</tt> and <tt>jit.off</tt> functions only set a flag
+which is checked when the function is about to be compiled. They do
+not trigger immediate compilation.
+</p>
+<p>
+Typical usage is <tt>jit.off(true, true)</tt> in the main chunk
+of a module to turn off JIT compilation for the whole module for
+debugging purposes.
+</p>
+
+<h3 id="jit_flush_tr"><tt>jit.flush(tr)</tt></h3>
+<p>
+Flushes the root trace, specified by its number, and all of its side
+traces from the cache. The code for the trace will be retained as long
+as there are any other traces which link to it.
+</p>
+
+<h3 id="jit_status"><tt>status, ... = jit.status()</tt></h3>
+<p>
+Returns the current status of the JIT compiler. The first result is
+either <tt>true</tt> or <tt>false</tt> if the JIT compiler is turned
+on or off. The remaining results are strings for CPU-specific features
+and enabled optimizations.
+</p>
+
+<h3 id="jit_version"><tt>jit.version</tt></h3>
+<p>
+Contains the LuaJIT version string.
+</p>
+
+<h3 id="jit_version_num"><tt>jit.version_num</tt></h3>
+<p>
+Contains the version number of the LuaJIT core. Version xx.yy.zz
+is represented by the decimal number xxyyzz.
+</p>
+
+<h3 id="jit_os"><tt>jit.os</tt></h3>
+<p>
+Contains the target OS name:
+"Windows", "Linux", "OSX", "BSD", "POSIX" or "Other".
+</p>
+
+<h3 id="jit_arch"><tt>jit.arch</tt></h3>
+<p>
+Contains the target architecture name:
+"x86", "x64" or "ppcspe".
+</p>
+
+<h2 id="jit_opt"><tt>jit.opt.*</tt> &mdash; JIT compiler optimization control</h2>
+<p>
+This sub-module provides the backend for the <tt>-O</tt> command line
+option.
+</p>
+<p>
+You can also use it programmatically, e.g.:
+</p>
+<pre class="code">
+jit.opt.start(2) -- same as -O2
+jit.opt.start("-dce")
+jit.opt.start("hotloop=10", "hotexit=2")
+</pre>
+<p>
+Unlike in LuaJIT 1.x, the module is built-in and
+<b>optimization is turned on by default!</b>
+It's no longer necessary to run <tt>require("jit.opt").start()</tt>,
+which was one of the ways to enable optimization.
+</p>
+
+<h2 id="jit_util"><tt>jit.util.*</tt> &mdash; JIT compiler introspection</h2>
+<p>
+This sub-module holds functions to introspect the bytecode, generated
+traces, the IR and the generated machine code. The functionality
+provided by this module is still in flux and therefore undocumented.
+</p>
+<p>
+The debug modules <tt>-jbc</tt>, <tt>-jv</tt> and <tt>-jdump</tt> make
+extensive use of these functions. Please check out their source code,
+if you want to know more.
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/extensions.html b/3rdparty/lua/doc/extensions.html
index b55d0a1..8684dc3 100644
--- a/3rdparty/lua/doc/extensions.html
+++ b/3rdparty/lua/doc/extensions.html
@@ -1,408 +1,408 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>Extensions</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-table.exc {
- line-height: 1.2;
-}
-tr.exchead td {
- font-weight: bold;
-}
-td.excplatform {
- width: 48%;
-}
-td.exccompiler {
- width: 29%;
-}
-td.excinterop {
- width: 23%;
-}
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>Extensions</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a class="current" href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-LuaJIT is fully upwards-compatible with Lua 5.1. It supports all
-<a href="http://www.lua.org/manual/5.1/manual.html#5"><span class="ext">&raquo;</span>&nbsp;standard Lua
-library functions</a> and the full set of
-<a href="http://www.lua.org/manual/5.1/manual.html#3"><span class="ext">&raquo;</span>&nbsp;Lua/C API
-functions</a>.
-</p>
-<p>
-LuaJIT is also fully ABI-compatible to Lua 5.1 at the linker/dynamic
-loader level. This means you can compile a C&nbsp;module against the
-standard Lua headers and load the same shared library from either Lua
-or LuaJIT.
-</p>
-<p>
-LuaJIT extends the standard Lua VM with new functionality and adds
-several extension modules. Please note this page is only about
-<em>functional</em> enhancements and not about performance enhancements,
-such as the optimized VM, the faster interpreter or the JIT compiler.
-</p>
-
-<h2 id="modules">Extensions Modules</h2>
-<p>
-LuaJIT comes with several built-in extension modules:
-</p>
-
-<h3 id="bit"><tt>bit.*</tt> &mdash; Bitwise operations</h3>
-<p>
-LuaJIT supports all bitwise operations as defined by
-<a href="http://bitop.luajit.org"><span class="ext">&raquo;</span>&nbsp;Lua BitOp</a>:
-</p>
-<pre class="code">
-bit.tobit bit.tohex bit.bnot bit.band bit.bor bit.bxor
-bit.lshift bit.rshift bit.arshift bit.rol bit.ror bit.bswap
-</pre>
-<p>
-This module is a LuaJIT built-in &mdash; you don't need to download or
-install Lua BitOp. The Lua BitOp site has full documentation for all
-<a href="http://bitop.luajit.org/api.html"><span class="ext">&raquo;</span>&nbsp;Lua BitOp API functions</a>.
-</p>
-<p>
-Please make sure to <tt>require</tt> the module before using any of
-its functions:
-</p>
-<pre class="code">
-local bit = require("bit")
-</pre>
-<p>
-An already installed Lua BitOp module is ignored by LuaJIT.
-This way you can use bit operations from both Lua and LuaJIT on a
-shared installation.
-</p>
-
-<h3 id="ffi"><tt>ffi.*</tt> &mdash; FFI library</h3>
-<p>
-The <a href="ext_ffi.html">FFI library</a> allows calling external
-C&nbsp;functions and the use of C&nbsp;data structures from pure Lua
-code.
-</p>
-
-<h3 id="jit"><tt>jit.*</tt> &mdash; JIT compiler control</h3>
-<p>
-The functions in this module
-<a href="ext_jit.html">control the behavior of the JIT compiler engine</a>.
-</p>
-
-<h3 id="c_api">C API extensions</h3>
-<p>
-LuaJIT adds some
-<a href="ext_c_api.html">extra functions to the Lua/C API</a>.
-</p>
-
-<h2 id="library">Enhanced Standard Library Functions</h2>
-
-<h3 id="xpcall"><tt>xpcall(f, err [,args...])</tt> passes arguments</h3>
-<p>
-Unlike the standard implementation in Lua 5.1, <tt>xpcall()</tt>
-passes any arguments after the error function to the function
-which is called in a protected context.
-</p>
-
-<h3 id="load"><tt>loadfile()</tt> etc. handle UTF-8 source code</h3>
-<p>
-Non-ASCII characters are handled transparently by the Lua source code parser.
-This allows the use of UTF-8 characters in identifiers and strings.
-A UTF-8 BOM is skipped at the start of the source code.
-</p>
-
-<h3 id="tostring"><tt>tostring()</tt> etc. canonicalize NaN and &plusmn;Inf</h3>
-<p>
-All number-to-string conversions consistently convert non-finite numbers
-to the same strings on all platforms. NaN results in <tt>"nan"</tt>,
-positive infinity results in <tt>"inf"</tt> and negative infinity results
-in <tt>"-inf"</tt>.
-</p>
-
-<h3 id="tonumber"><tt>tonumber()</tt> etc. use builtin string to number conversion</h3>
-<p>
-All string-to-number conversions consistently convert integer and
-floating-point inputs in decimal and hexadecimal on all platforms.
-<tt>strtod()</tt> is <em>not</em> used anymore, which avoids numerous
-problems with poor C library implementations. The builtin conversion
-function provides full precision according to the IEEE-754 standard, it
-works independently of the current locale and it supports hex floating-point
-numbers (e.g. <tt>0x1.5p-3</tt>).
-</p>
-
-<h3 id="string_dump"><tt>string.dump(f [,strip])</tt> generates portable bytecode</h3>
-<p>
-An extra argument has been added to <tt>string.dump()</tt>. If set to
-<tt>true</tt>, 'stripped' bytecode without debug information is
-generated. This speeds up later bytecode loading and reduces memory
-usage. See also the
-<a href="running.html#opt_b"><tt>-b</tt> command line option</a>.
-</p>
-<p>
-The generated bytecode is portable and can be loaded on any architecture
-that LuaJIT supports, independent of word size or endianess. However the
-bytecode compatibility versions must match. Bytecode stays compatible
-for dot releases (x.y.0 &rarr; x.y.1), but may change with major or
-minor releases (2.0 &rarr; 2.1) or between any beta release. Foreign
-bytecode (e.g. from Lua 5.1) is incompatible and cannot be loaded.
-</p>
-
-<h3 id="math_random">Enhanced PRNG for <tt>math.random()</tt></h3>
-<p>
-LuaJIT uses a Tausworthe PRNG with period 2^223 to implement
-<tt>math.random()</tt> and <tt>math.randomseed()</tt>. The quality of
-the PRNG results is much superior compared to the standard Lua
-implementation which uses the platform-specific ANSI rand().
-</p>
-<p>
-The PRNG generates the same sequences from the same seeds on all
-platforms and makes use of all bits in the seed argument.
-<tt>math.random()</tt> without arguments generates 52 pseudo-random bits
-for every call. The result is uniformly distributed between 0.0 and 1.0.
-It's correctly scaled up and rounded for <tt>math.random(n&nbsp;[,m])</tt> to
-preserve uniformity.
-</p>
-
-<h3 id="io"><tt>io.*</tt> functions handle 64&nbsp;bit file offsets</h3>
-<p>
-The file I/O functions in the standard <tt>io.*</tt> library handle
-64&nbsp;bit file offsets. In particular this means it's possible
-to open files larger than 2&nbsp;Gigabytes and to reposition or obtain
-the current file position for offsets beyond 2&nbsp;GB
-(<tt>fp:seek()</tt> method).
-</p>
-
-<h3 id="debug_meta"><tt>debug.*</tt> functions identify metamethods</h3>
-<p>
-<tt>debug.getinfo()</tt> and <tt>lua_getinfo()</tt> also return information
-about invoked metamethods. The <tt>namewhat</tt> field is set to
-<tt>"metamethod"</tt> and the <tt>name</tt> field has the name of
-the corresponding metamethod (e.g. <tt>"__index"</tt>).
-</p>
-
-<h2 id="resumable">Fully Resumable VM</h2>
-<p>
-The LuaJIT VM is fully resumable. This means you can yield from a
-coroutine even across contexts, where this would not possible with
-the standard Lua&nbsp;5.1 VM: e.g. you can yield across <tt>pcall()</tt>
-and <tt>xpcall()</tt>, across iterators and across metamethods.
-</p>
-
-<h2 id="lua52">Extensions from Lua 5.2</h2>
-<p>
-LuaJIT supports some language and library extensions from Lua&nbsp;5.2.
-Features that are unlikely to break existing code are unconditionally
-enabled:
-</p>
-<ul>
-<li><tt>goto</tt> and <tt>::labels::</tt>.</li>
-<li>Hex escapes <tt>'\x3F'</tt> and <tt>'\*'</tt> escape in strings.</li>
-<li><tt>load(string|reader [, chunkname [,mode [,env]]])</tt>.</li>
-<li><tt>loadstring()</tt> is an alias for <tt>load()</tt>.</li>
-<li><tt>loadfile(filename [,mode [,env]])</tt>.</li>
-<li><tt>math.log(x [,base])</tt>.
-<li><tt>string.rep(s, n [,sep])</tt>.
-<li><tt>string.format()</tt>: <tt>%q</tt> reversible.
-<tt>%s</tt> checks <tt>__tostring</tt>.
-<tt>%a</tt> and <tt>"%A</tt> added.</li>
-<li>String matching pattern <tt>%g</tt> added.</li>
-<li><tt>io.read("*L")</tt>.</li>
-<li><tt>io.lines()</tt> and <tt>file:lines()</tt> process
-<tt>io.read()</tt> options.</li>
-<li><tt>os.exit(status|true|false [,close])</tt>.</li>
-<li><tt>package.searchpath(name, path [, sep [, rep]])</tt>.</li>
-<li><tt>package.loadlib(name, "*")</tt>.</li>
-<li><tt>debug.getinfo()</tt> returns <tt>nparams</tt> and <tt>isvararg</tt>
-for option <tt>"u"</tt>.</li>
-<li><tt>debug.getlocal()</tt> accepts function instead of level.</li>
-<li><tt>debug.getlocal()</tt> and <tt>debug.setlocal()</tt> accept negative
-indexes for varargs.</li>
-<li><tt>debug.getupvalue()</tt> and <tt>debug.setupvalue()</tt> handle
-C&nbsp;functions.</li>
-<li><tt>debug.upvalueid()</tt> and <tt>debug.upvaluejoin()</tt>.</li>
-<li>Command line option <tt>-E</tt>.</li>
-<li>Command line checks <tt>__tostring</tt> for errors.</li>
-</ul>
-<p>
-Other features are only enabled, if LuaJIT is built with
-<tt>-DLUAJIT_ENABLE_LUA52COMPAT</tt>:
-</p>
-<ul>
-<li><tt>goto</tt> is a keyword and not a valid variable name anymore.</li>
-<li><tt>break</tt> can be placed anywhere. Empty statements (<tt>;;</tt>)
-are allowed.</li>
-<li><tt>__lt</tt>, <tt>__le</tt> are invoked for mixed types.</li>
-<li><tt>__len</tt> for tables. <tt>rawlen()</tt> library function.</li>
-<li><tt>pairs()</tt> and <tt>ipairs()</tt> check for <tt>__pairs</tt> and
-<tt>__ipairs</tt>.</li>
-<li><tt>coroutine.running()</tt> returns two results.</li>
-<li><tt>table.pack()</tt> and <tt>table.unpack()</tt>
-(same as <tt>unpack()</tt>).</li>
-<li><tt>io.write()</tt> and <tt>file:write()</tt> return file handle
-instead of <tt>true</tt>.</li>
-<li><tt>os.execute()</tt> and <tt>pipe:close()</tt> return detailed
-exit status.</li>
-<li><tt>debug.setmetatable()</tt> returns object.</li>
-<li><tt>debug.getuservalue()</tt> and <tt>debug.setuservalue()</tt>.</li>
-<li>Remove <tt>math.mod()</tt>, <tt>string.gfind()</tt>.
-</ul>
-<p>
-Note: this provides only partial compatibility with Lua 5.2 at the
-language and Lua library level. LuaJIT is API+ABI-compatible with
-Lua&nbsp;5.1, which prevents implementing features that would otherwise
-break the Lua/C API and ABI (e.g. <tt>_ENV</tt>).
-</p>
-
-<h2 id="exceptions">C++ Exception Interoperability</h2>
-<p>
-LuaJIT has built-in support for interoperating with C++&nbsp;exceptions.
-The available range of features depends on the target platform and
-the toolchain used to compile LuaJIT:
-</p>
-<table class="exc">
-<tr class="exchead">
-<td class="excplatform">Platform</td>
-<td class="exccompiler">Compiler</td>
-<td class="excinterop">Interoperability</td>
-</tr>
-<tr class="odd separate">
-<td class="excplatform">POSIX/x64, DWARF2 unwinding</td>
-<td class="exccompiler">GCC 4.3+</td>
-<td class="excinterop"><b style="color: #00a000;">Full</b></td>
-</tr>
-<tr class="even">
-<td class="excplatform">Other platforms, DWARF2 unwinding</td>
-<td class="exccompiler">GCC</td>
-<td class="excinterop"><b style="color: #c06000;">Limited</b></td>
-</tr>
-<tr class="odd">
-<td class="excplatform">Windows/x64</td>
-<td class="exccompiler">MSVC or WinSDK</td>
-<td class="excinterop"><b style="color: #00a000;">Full</b></td>
-</tr>
-<tr class="even">
-<td class="excplatform">Windows/x86</td>
-<td class="exccompiler">Any</td>
-<td class="excinterop"><b style="color: #a00000;">No</b></td>
-</tr>
-<tr class="odd">
-<td class="excplatform">Other platforms</td>
-<td class="exccompiler">Other compilers</td>
-<td class="excinterop"><b style="color: #a00000;">No</b></td>
-</tr>
-</table>
-<p>
-<b style="color: #00a000;">Full interoperability</b> means:
-</p>
-<ul>
-<li>C++&nbsp;exceptions can be caught on the Lua side with <tt>pcall()</tt>,
-<tt>lua_pcall()</tt> etc.</li>
-<li>C++&nbsp;exceptions will be converted to the generic Lua error
-<tt>"C++&nbsp;exception"</tt>, unless you use the
-<a href="ext_c_api.html#mode_wrapcfunc">C&nbsp;call wrapper</a> feature.</li>
-<li>It's safe to throw C++&nbsp;exceptions across non-protected Lua frames
-on the C&nbsp;stack. The contents of the C++&nbsp;exception object
-pass through unmodified.</li>
-<li>Lua errors can be caught on the C++ side with <tt>catch(...)</tt>.
-The corresponding Lua error message can be retrieved from the Lua stack.</li>
-<li>Throwing Lua errors across C++ frames is safe. C++ destructors
-will be called.</li>
-</ul>
-<p>
-<b style="color: #c06000;">Limited interoperability</b> means:
-</p>
-<ul>
-<li>C++&nbsp;exceptions can be caught on the Lua side with <tt>pcall()</tt>,
-<tt>lua_pcall()</tt> etc.</li>
-<li>C++&nbsp;exceptions will be converted to the generic Lua error
-<tt>"C++&nbsp;exception"</tt>, unless you use the
-<a href="ext_c_api.html#mode_wrapcfunc">C&nbsp;call wrapper</a> feature.</li>
-<li>C++&nbsp;exceptions will be caught by non-protected Lua frames and
-are rethrown as a generic Lua error. The C++&nbsp;exception object will
-be destroyed.</li>
-<li>Lua errors <b>cannot</b> be caught on the C++ side.</li>
-<li>Throwing Lua errors across C++ frames will <b>not</b> call
-C++ destructors.</li>
-</ul>
-
-<p>
-<b style="color: #a00000;">No interoperability</b> means:
-</p>
-<ul>
-<li>It's <b>not</b> safe to throw C++&nbsp;exceptions across Lua frames.</li>
-<li>C++&nbsp;exceptions <b>cannot</b> be caught on the Lua side.</li>
-<li>Lua errors <b>cannot</b> be caught on the C++ side.</li>
-<li>Throwing Lua errors across C++ frames will <b>not</b> call
-C++ destructors.</li>
-<li>Additionally, on Windows/x86 with SEH-based C++&nbsp;exceptions:
-it's <b>not</b> safe to throw a Lua error across any frames containing
-a C++ function with any try/catch construct or using variables with
-(implicit) destructors. This also applies to any functions which may be
-inlined in such a function. It doesn't matter whether <tt>lua_error()</tt>
-is called inside or outside of a try/catch or whether any object actually
-needs to be destroyed: the SEH chain is corrupted and this will eventually
-lead to the termination of the process.</li>
-</ul>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>Extensions</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+table.exc {
+ line-height: 1.2;
+}
+tr.exchead td {
+ font-weight: bold;
+}
+td.excplatform {
+ width: 48%;
+}
+td.exccompiler {
+ width: 29%;
+}
+td.excinterop {
+ width: 23%;
+}
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>Extensions</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a class="current" href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+LuaJIT is fully upwards-compatible with Lua 5.1. It supports all
+<a href="http://www.lua.org/manual/5.1/manual.html#5"><span class="ext">&raquo;</span>&nbsp;standard Lua
+library functions</a> and the full set of
+<a href="http://www.lua.org/manual/5.1/manual.html#3"><span class="ext">&raquo;</span>&nbsp;Lua/C API
+functions</a>.
+</p>
+<p>
+LuaJIT is also fully ABI-compatible to Lua 5.1 at the linker/dynamic
+loader level. This means you can compile a C&nbsp;module against the
+standard Lua headers and load the same shared library from either Lua
+or LuaJIT.
+</p>
+<p>
+LuaJIT extends the standard Lua VM with new functionality and adds
+several extension modules. Please note this page is only about
+<em>functional</em> enhancements and not about performance enhancements,
+such as the optimized VM, the faster interpreter or the JIT compiler.
+</p>
+
+<h2 id="modules">Extensions Modules</h2>
+<p>
+LuaJIT comes with several built-in extension modules:
+</p>
+
+<h3 id="bit"><tt>bit.*</tt> &mdash; Bitwise operations</h3>
+<p>
+LuaJIT supports all bitwise operations as defined by
+<a href="http://bitop.luajit.org"><span class="ext">&raquo;</span>&nbsp;Lua BitOp</a>:
+</p>
+<pre class="code">
+bit.tobit bit.tohex bit.bnot bit.band bit.bor bit.bxor
+bit.lshift bit.rshift bit.arshift bit.rol bit.ror bit.bswap
+</pre>
+<p>
+This module is a LuaJIT built-in &mdash; you don't need to download or
+install Lua BitOp. The Lua BitOp site has full documentation for all
+<a href="http://bitop.luajit.org/api.html"><span class="ext">&raquo;</span>&nbsp;Lua BitOp API functions</a>.
+</p>
+<p>
+Please make sure to <tt>require</tt> the module before using any of
+its functions:
+</p>
+<pre class="code">
+local bit = require("bit")
+</pre>
+<p>
+An already installed Lua BitOp module is ignored by LuaJIT.
+This way you can use bit operations from both Lua and LuaJIT on a
+shared installation.
+</p>
+
+<h3 id="ffi"><tt>ffi.*</tt> &mdash; FFI library</h3>
+<p>
+The <a href="ext_ffi.html">FFI library</a> allows calling external
+C&nbsp;functions and the use of C&nbsp;data structures from pure Lua
+code.
+</p>
+
+<h3 id="jit"><tt>jit.*</tt> &mdash; JIT compiler control</h3>
+<p>
+The functions in this module
+<a href="ext_jit.html">control the behavior of the JIT compiler engine</a>.
+</p>
+
+<h3 id="c_api">C API extensions</h3>
+<p>
+LuaJIT adds some
+<a href="ext_c_api.html">extra functions to the Lua/C API</a>.
+</p>
+
+<h2 id="library">Enhanced Standard Library Functions</h2>
+
+<h3 id="xpcall"><tt>xpcall(f, err [,args...])</tt> passes arguments</h3>
+<p>
+Unlike the standard implementation in Lua 5.1, <tt>xpcall()</tt>
+passes any arguments after the error function to the function
+which is called in a protected context.
+</p>
+
+<h3 id="load"><tt>loadfile()</tt> etc. handle UTF-8 source code</h3>
+<p>
+Non-ASCII characters are handled transparently by the Lua source code parser.
+This allows the use of UTF-8 characters in identifiers and strings.
+A UTF-8 BOM is skipped at the start of the source code.
+</p>
+
+<h3 id="tostring"><tt>tostring()</tt> etc. canonicalize NaN and &plusmn;Inf</h3>
+<p>
+All number-to-string conversions consistently convert non-finite numbers
+to the same strings on all platforms. NaN results in <tt>"nan"</tt>,
+positive infinity results in <tt>"inf"</tt> and negative infinity results
+in <tt>"-inf"</tt>.
+</p>
+
+<h3 id="tonumber"><tt>tonumber()</tt> etc. use builtin string to number conversion</h3>
+<p>
+All string-to-number conversions consistently convert integer and
+floating-point inputs in decimal and hexadecimal on all platforms.
+<tt>strtod()</tt> is <em>not</em> used anymore, which avoids numerous
+problems with poor C library implementations. The builtin conversion
+function provides full precision according to the IEEE-754 standard, it
+works independently of the current locale and it supports hex floating-point
+numbers (e.g. <tt>0x1.5p-3</tt>).
+</p>
+
+<h3 id="string_dump"><tt>string.dump(f [,strip])</tt> generates portable bytecode</h3>
+<p>
+An extra argument has been added to <tt>string.dump()</tt>. If set to
+<tt>true</tt>, 'stripped' bytecode without debug information is
+generated. This speeds up later bytecode loading and reduces memory
+usage. See also the
+<a href="running.html#opt_b"><tt>-b</tt> command line option</a>.
+</p>
+<p>
+The generated bytecode is portable and can be loaded on any architecture
+that LuaJIT supports, independent of word size or endianess. However the
+bytecode compatibility versions must match. Bytecode stays compatible
+for dot releases (x.y.0 &rarr; x.y.1), but may change with major or
+minor releases (2.0 &rarr; 2.1) or between any beta release. Foreign
+bytecode (e.g. from Lua 5.1) is incompatible and cannot be loaded.
+</p>
+
+<h3 id="math_random">Enhanced PRNG for <tt>math.random()</tt></h3>
+<p>
+LuaJIT uses a Tausworthe PRNG with period 2^223 to implement
+<tt>math.random()</tt> and <tt>math.randomseed()</tt>. The quality of
+the PRNG results is much superior compared to the standard Lua
+implementation which uses the platform-specific ANSI rand().
+</p>
+<p>
+The PRNG generates the same sequences from the same seeds on all
+platforms and makes use of all bits in the seed argument.
+<tt>math.random()</tt> without arguments generates 52 pseudo-random bits
+for every call. The result is uniformly distributed between 0.0 and 1.0.
+It's correctly scaled up and rounded for <tt>math.random(n&nbsp;[,m])</tt> to
+preserve uniformity.
+</p>
+
+<h3 id="io"><tt>io.*</tt> functions handle 64&nbsp;bit file offsets</h3>
+<p>
+The file I/O functions in the standard <tt>io.*</tt> library handle
+64&nbsp;bit file offsets. In particular this means it's possible
+to open files larger than 2&nbsp;Gigabytes and to reposition or obtain
+the current file position for offsets beyond 2&nbsp;GB
+(<tt>fp:seek()</tt> method).
+</p>
+
+<h3 id="debug_meta"><tt>debug.*</tt> functions identify metamethods</h3>
+<p>
+<tt>debug.getinfo()</tt> and <tt>lua_getinfo()</tt> also return information
+about invoked metamethods. The <tt>namewhat</tt> field is set to
+<tt>"metamethod"</tt> and the <tt>name</tt> field has the name of
+the corresponding metamethod (e.g. <tt>"__index"</tt>).
+</p>
+
+<h2 id="resumable">Fully Resumable VM</h2>
+<p>
+The LuaJIT VM is fully resumable. This means you can yield from a
+coroutine even across contexts, where this would not possible with
+the standard Lua&nbsp;5.1 VM: e.g. you can yield across <tt>pcall()</tt>
+and <tt>xpcall()</tt>, across iterators and across metamethods.
+</p>
+
+<h2 id="lua52">Extensions from Lua 5.2</h2>
+<p>
+LuaJIT supports some language and library extensions from Lua&nbsp;5.2.
+Features that are unlikely to break existing code are unconditionally
+enabled:
+</p>
+<ul>
+<li><tt>goto</tt> and <tt>::labels::</tt>.</li>
+<li>Hex escapes <tt>'\x3F'</tt> and <tt>'\*'</tt> escape in strings.</li>
+<li><tt>load(string|reader [, chunkname [,mode [,env]]])</tt>.</li>
+<li><tt>loadstring()</tt> is an alias for <tt>load()</tt>.</li>
+<li><tt>loadfile(filename [,mode [,env]])</tt>.</li>
+<li><tt>math.log(x [,base])</tt>.
+<li><tt>string.rep(s, n [,sep])</tt>.
+<li><tt>string.format()</tt>: <tt>%q</tt> reversible.
+<tt>%s</tt> checks <tt>__tostring</tt>.
+<tt>%a</tt> and <tt>"%A</tt> added.</li>
+<li>String matching pattern <tt>%g</tt> added.</li>
+<li><tt>io.read("*L")</tt>.</li>
+<li><tt>io.lines()</tt> and <tt>file:lines()</tt> process
+<tt>io.read()</tt> options.</li>
+<li><tt>os.exit(status|true|false [,close])</tt>.</li>
+<li><tt>package.searchpath(name, path [, sep [, rep]])</tt>.</li>
+<li><tt>package.loadlib(name, "*")</tt>.</li>
+<li><tt>debug.getinfo()</tt> returns <tt>nparams</tt> and <tt>isvararg</tt>
+for option <tt>"u"</tt>.</li>
+<li><tt>debug.getlocal()</tt> accepts function instead of level.</li>
+<li><tt>debug.getlocal()</tt> and <tt>debug.setlocal()</tt> accept negative
+indexes for varargs.</li>
+<li><tt>debug.getupvalue()</tt> and <tt>debug.setupvalue()</tt> handle
+C&nbsp;functions.</li>
+<li><tt>debug.upvalueid()</tt> and <tt>debug.upvaluejoin()</tt>.</li>
+<li>Command line option <tt>-E</tt>.</li>
+<li>Command line checks <tt>__tostring</tt> for errors.</li>
+</ul>
+<p>
+Other features are only enabled, if LuaJIT is built with
+<tt>-DLUAJIT_ENABLE_LUA52COMPAT</tt>:
+</p>
+<ul>
+<li><tt>goto</tt> is a keyword and not a valid variable name anymore.</li>
+<li><tt>break</tt> can be placed anywhere. Empty statements (<tt>;;</tt>)
+are allowed.</li>
+<li><tt>__lt</tt>, <tt>__le</tt> are invoked for mixed types.</li>
+<li><tt>__len</tt> for tables. <tt>rawlen()</tt> library function.</li>
+<li><tt>pairs()</tt> and <tt>ipairs()</tt> check for <tt>__pairs</tt> and
+<tt>__ipairs</tt>.</li>
+<li><tt>coroutine.running()</tt> returns two results.</li>
+<li><tt>table.pack()</tt> and <tt>table.unpack()</tt>
+(same as <tt>unpack()</tt>).</li>
+<li><tt>io.write()</tt> and <tt>file:write()</tt> return file handle
+instead of <tt>true</tt>.</li>
+<li><tt>os.execute()</tt> and <tt>pipe:close()</tt> return detailed
+exit status.</li>
+<li><tt>debug.setmetatable()</tt> returns object.</li>
+<li><tt>debug.getuservalue()</tt> and <tt>debug.setuservalue()</tt>.</li>
+<li>Remove <tt>math.mod()</tt>, <tt>string.gfind()</tt>.
+</ul>
+<p>
+Note: this provides only partial compatibility with Lua 5.2 at the
+language and Lua library level. LuaJIT is API+ABI-compatible with
+Lua&nbsp;5.1, which prevents implementing features that would otherwise
+break the Lua/C API and ABI (e.g. <tt>_ENV</tt>).
+</p>
+
+<h2 id="exceptions">C++ Exception Interoperability</h2>
+<p>
+LuaJIT has built-in support for interoperating with C++&nbsp;exceptions.
+The available range of features depends on the target platform and
+the toolchain used to compile LuaJIT:
+</p>
+<table class="exc">
+<tr class="exchead">
+<td class="excplatform">Platform</td>
+<td class="exccompiler">Compiler</td>
+<td class="excinterop">Interoperability</td>
+</tr>
+<tr class="odd separate">
+<td class="excplatform">POSIX/x64, DWARF2 unwinding</td>
+<td class="exccompiler">GCC 4.3+</td>
+<td class="excinterop"><b style="color: #00a000;">Full</b></td>
+</tr>
+<tr class="even">
+<td class="excplatform">Other platforms, DWARF2 unwinding</td>
+<td class="exccompiler">GCC</td>
+<td class="excinterop"><b style="color: #c06000;">Limited</b></td>
+</tr>
+<tr class="odd">
+<td class="excplatform">Windows/x64</td>
+<td class="exccompiler">MSVC or WinSDK</td>
+<td class="excinterop"><b style="color: #00a000;">Full</b></td>
+</tr>
+<tr class="even">
+<td class="excplatform">Windows/x86</td>
+<td class="exccompiler">Any</td>
+<td class="excinterop"><b style="color: #a00000;">No</b></td>
+</tr>
+<tr class="odd">
+<td class="excplatform">Other platforms</td>
+<td class="exccompiler">Other compilers</td>
+<td class="excinterop"><b style="color: #a00000;">No</b></td>
+</tr>
+</table>
+<p>
+<b style="color: #00a000;">Full interoperability</b> means:
+</p>
+<ul>
+<li>C++&nbsp;exceptions can be caught on the Lua side with <tt>pcall()</tt>,
+<tt>lua_pcall()</tt> etc.</li>
+<li>C++&nbsp;exceptions will be converted to the generic Lua error
+<tt>"C++&nbsp;exception"</tt>, unless you use the
+<a href="ext_c_api.html#mode_wrapcfunc">C&nbsp;call wrapper</a> feature.</li>
+<li>It's safe to throw C++&nbsp;exceptions across non-protected Lua frames
+on the C&nbsp;stack. The contents of the C++&nbsp;exception object
+pass through unmodified.</li>
+<li>Lua errors can be caught on the C++ side with <tt>catch(...)</tt>.
+The corresponding Lua error message can be retrieved from the Lua stack.</li>
+<li>Throwing Lua errors across C++ frames is safe. C++ destructors
+will be called.</li>
+</ul>
+<p>
+<b style="color: #c06000;">Limited interoperability</b> means:
+</p>
+<ul>
+<li>C++&nbsp;exceptions can be caught on the Lua side with <tt>pcall()</tt>,
+<tt>lua_pcall()</tt> etc.</li>
+<li>C++&nbsp;exceptions will be converted to the generic Lua error
+<tt>"C++&nbsp;exception"</tt>, unless you use the
+<a href="ext_c_api.html#mode_wrapcfunc">C&nbsp;call wrapper</a> feature.</li>
+<li>C++&nbsp;exceptions will be caught by non-protected Lua frames and
+are rethrown as a generic Lua error. The C++&nbsp;exception object will
+be destroyed.</li>
+<li>Lua errors <b>cannot</b> be caught on the C++ side.</li>
+<li>Throwing Lua errors across C++ frames will <b>not</b> call
+C++ destructors.</li>
+</ul>
+
+<p>
+<b style="color: #a00000;">No interoperability</b> means:
+</p>
+<ul>
+<li>It's <b>not</b> safe to throw C++&nbsp;exceptions across Lua frames.</li>
+<li>C++&nbsp;exceptions <b>cannot</b> be caught on the Lua side.</li>
+<li>Lua errors <b>cannot</b> be caught on the C++ side.</li>
+<li>Throwing Lua errors across C++ frames will <b>not</b> call
+C++ destructors.</li>
+<li>Additionally, on Windows/x86 with SEH-based C++&nbsp;exceptions:
+it's <b>not</b> safe to throw a Lua error across any frames containing
+a C++ function with any try/catch construct or using variables with
+(implicit) destructors. This also applies to any functions which may be
+inlined in such a function. It doesn't matter whether <tt>lua_error()</tt>
+is called inside or outside of a try/catch or whether any object actually
+needs to be destroyed: the SEH chain is corrupted and this will eventually
+lead to the termination of the process.</li>
+</ul>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/faq.html b/3rdparty/lua/doc/faq.html
index 887a5b2..c61b8dc 100644
--- a/3rdparty/lua/doc/faq.html
+++ b/3rdparty/lua/doc/faq.html
@@ -1,184 +1,184 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>Frequently Asked Questions (FAQ)</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-dd { margin-left: 1.5em; }
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>Frequently Asked Questions (FAQ)</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a class="current" href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<dl>
-<dt>Q: Where can I learn more about LuaJIT and Lua?</dt>
-<dd>
-<ul style="padding: 0;">
-<li>The <a href="http://luajit.org/list.html"><span class="ext">&raquo;</span>&nbsp;LuaJIT mailing list</a> focuses on topics
-related to LuaJIT.</li>
-<li>The <a href="http://wiki.luajit.org/"><span class="ext">&raquo;</span>&nbsp;LuaJIT wiki</a> gathers community
-resources about LuaJIT.</li>
-<li>News about Lua itself can be found at the
-<a href="http://www.lua.org/lua-l.html"><span class="ext">&raquo;</span>&nbsp;Lua mailing list</a>.
-The mailing list archives are worth checking out for older postings
-about LuaJIT.</li>
-<li>The <a href="http://lua.org"><span class="ext">&raquo;</span>&nbsp;main Lua.org site</a> has complete
-<a href="http://www.lua.org/docs.html"><span class="ext">&raquo;</span>&nbsp;documentation</a> of the language
-and links to books and papers about Lua.</li>
-<li>The community-managed <a href="http://lua-users.org/wiki/"><span class="ext">&raquo;</span>&nbsp;Lua Wiki</a>
-has information about diverse topics.</li>
-</ul>
-</dl>
-
-<dl>
-<dt>Q: Where can I learn more about the compiler technology used by LuaJIT?</dt>
-<dd>
-I'm planning to write more documentation about the internals of LuaJIT.
-In the meantime, please use the following Google Scholar searches
-to find relevant papers:<br>
-Search for: <a href="http://scholar.google.com/scholar?q=Trace+Compiler"><span class="ext">&raquo;</span>&nbsp;Trace Compiler</a><br>
-Search for: <a href="http://scholar.google.com/scholar?q=JIT+Compiler"><span class="ext">&raquo;</span>&nbsp;JIT Compiler</a><br>
-Search for: <a href="http://scholar.google.com/scholar?q=Dynamic+Language+Optimizations"><span class="ext">&raquo;</span>&nbsp;Dynamic Language Optimizations</a><br>
-Search for: <a href="http://scholar.google.com/scholar?q=SSA+Form"><span class="ext">&raquo;</span>&nbsp;SSA Form</a><br>
-Search for: <a href="http://scholar.google.com/scholar?q=Linear+Scan+Register+Allocation"><span class="ext">&raquo;</span>&nbsp;Linear Scan Register Allocation</a><br>
-Here is a list of the <a href="http://article.gmane.org/gmane.comp.lang.lua.general/58908"><span class="ext">&raquo;</span>&nbsp;innovative features in LuaJIT</a>.<br>
-And, you know, reading the source is of course the only way to enlightenment. :-)
-</dd>
-</dl>
-
-<dl>
-<dt>Q: Why do I get this error: "attempt to index global 'arg' (a nil value)"?<br>
-Q: My vararg functions fail after switching to LuaJIT!</dt>
-<dd>LuaJIT is compatible to the Lua 5.1 language standard. It doesn't
-support the implicit <tt>arg</tt> parameter for old-style vararg
-functions from Lua 5.0.<br>Please convert your code to the
-<a href="http://www.lua.org/manual/5.1/manual.html#2.5.9"><span class="ext">&raquo;</span>&nbsp;Lua 5.1
-vararg syntax</a>.</dd>
-</dl>
-
-<dl>
-<dt>Q: Why do I get this error: "bad FPU precision"?<br>
-<dt>Q: I get weird behavior after initializing Direct3D.<br>
-<dt>Q: Some FPU operations crash after I load a Delphi DLL.<br>
-</dt>
-<dd>
-
-DirectX/Direct3D (up to version 9) sets the x87 FPU to single-precision
-mode by default. This violates the Windows ABI and interferes with the
-operation of many programs &mdash; LuaJIT is affected, too. Please make
-sure you always use the <tt>D3DCREATE_FPU_PRESERVE</tt> flag when
-initializing Direct3D.<br>
-
-Direct3D version 10 or higher do not show this behavior anymore.
-Consider testing your application with older versions, too.<br>
-
-Similarly, the Borland/Delphi runtime modifies the FPU control word and
-enables FP exceptions. Of course this violates the Windows ABI, too.
-Please check the Delphi docs for the Set8087CW method.
-
-</dl>
-
-<dl>
-<dt>Q: Sometimes Ctrl-C fails to stop my Lua program. Why?</dt>
-<dd>The interrupt signal handler sets a Lua debug hook. But this is
-currently ignored by compiled code (this will eventually be fixed). If
-your program is running in a tight loop and never falls back to the
-interpreter, the debug hook never runs and can't throw the
-"interrupted!" error.<br> In the meantime you have to press Ctrl-C
-twice to get stop your program. That's similar to when it's stuck
-running inside a C function under the Lua interpreter.</dd>
-</dl>
-
-<dl>
-<dt>Q: Why doesn't my favorite power-patch for Lua apply against LuaJIT?</dt>
-<dd>Because it's a completely redesigned VM and has very little code
-in common with Lua anymore. Also, if the patch introduces changes to
-the Lua semantics, these would need to be reflected everywhere in the
-VM, from the interpreter up to all stages of the compiler.<br> Please
-use only standard Lua language constructs. For many common needs you
-can use source transformations or use wrapper or proxy functions.
-The compiler will happily optimize away such indirections.</dd>
-</dl>
-
-<dl>
-<dt>Q: Lua runs everywhere. Why doesn't LuaJIT support my CPU?</dt>
-<dd>Because it's a compiler &mdash; it needs to generate native
-machine code. This means the code generator must be ported to each
-architecture. And the fast interpreter is written in assembler and
-must be ported, too. This is quite an undertaking.<br>
-The <a href="install.html">install documentation</a> shows the supported
-architectures. Other architectures will follow based on sufficient user
-demand and/or sponsoring.</dd>
-</dl>
-
-<dl>
-<dt>Q: When will feature X be added? When will the next version be released?</dt>
-<dd>When it's ready.<br>
-C'mon, it's open source &mdash; I'm doing it on my own time and you're
-getting it for free. You can either contribute a patch or sponsor
-the development of certain features, if they are important to you.
-</dd>
-</dl>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>Frequently Asked Questions (FAQ)</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+dd { margin-left: 1.5em; }
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>Frequently Asked Questions (FAQ)</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a class="current" href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<dl>
+<dt>Q: Where can I learn more about LuaJIT and Lua?</dt>
+<dd>
+<ul style="padding: 0;">
+<li>The <a href="http://luajit.org/list.html"><span class="ext">&raquo;</span>&nbsp;LuaJIT mailing list</a> focuses on topics
+related to LuaJIT.</li>
+<li>The <a href="http://wiki.luajit.org/"><span class="ext">&raquo;</span>&nbsp;LuaJIT wiki</a> gathers community
+resources about LuaJIT.</li>
+<li>News about Lua itself can be found at the
+<a href="http://www.lua.org/lua-l.html"><span class="ext">&raquo;</span>&nbsp;Lua mailing list</a>.
+The mailing list archives are worth checking out for older postings
+about LuaJIT.</li>
+<li>The <a href="http://lua.org"><span class="ext">&raquo;</span>&nbsp;main Lua.org site</a> has complete
+<a href="http://www.lua.org/docs.html"><span class="ext">&raquo;</span>&nbsp;documentation</a> of the language
+and links to books and papers about Lua.</li>
+<li>The community-managed <a href="http://lua-users.org/wiki/"><span class="ext">&raquo;</span>&nbsp;Lua Wiki</a>
+has information about diverse topics.</li>
+</ul>
+</dl>
+
+<dl>
+<dt>Q: Where can I learn more about the compiler technology used by LuaJIT?</dt>
+<dd>
+I'm planning to write more documentation about the internals of LuaJIT.
+In the meantime, please use the following Google Scholar searches
+to find relevant papers:<br>
+Search for: <a href="http://scholar.google.com/scholar?q=Trace+Compiler"><span class="ext">&raquo;</span>&nbsp;Trace Compiler</a><br>
+Search for: <a href="http://scholar.google.com/scholar?q=JIT+Compiler"><span class="ext">&raquo;</span>&nbsp;JIT Compiler</a><br>
+Search for: <a href="http://scholar.google.com/scholar?q=Dynamic+Language+Optimizations"><span class="ext">&raquo;</span>&nbsp;Dynamic Language Optimizations</a><br>
+Search for: <a href="http://scholar.google.com/scholar?q=SSA+Form"><span class="ext">&raquo;</span>&nbsp;SSA Form</a><br>
+Search for: <a href="http://scholar.google.com/scholar?q=Linear+Scan+Register+Allocation"><span class="ext">&raquo;</span>&nbsp;Linear Scan Register Allocation</a><br>
+Here is a list of the <a href="http://article.gmane.org/gmane.comp.lang.lua.general/58908"><span class="ext">&raquo;</span>&nbsp;innovative features in LuaJIT</a>.<br>
+And, you know, reading the source is of course the only way to enlightenment. :-)
+</dd>
+</dl>
+
+<dl>
+<dt>Q: Why do I get this error: "attempt to index global 'arg' (a nil value)"?<br>
+Q: My vararg functions fail after switching to LuaJIT!</dt>
+<dd>LuaJIT is compatible to the Lua 5.1 language standard. It doesn't
+support the implicit <tt>arg</tt> parameter for old-style vararg
+functions from Lua 5.0.<br>Please convert your code to the
+<a href="http://www.lua.org/manual/5.1/manual.html#2.5.9"><span class="ext">&raquo;</span>&nbsp;Lua 5.1
+vararg syntax</a>.</dd>
+</dl>
+
+<dl>
+<dt>Q: Why do I get this error: "bad FPU precision"?<br>
+<dt>Q: I get weird behavior after initializing Direct3D.<br>
+<dt>Q: Some FPU operations crash after I load a Delphi DLL.<br>
+</dt>
+<dd>
+
+DirectX/Direct3D (up to version 9) sets the x87 FPU to single-precision
+mode by default. This violates the Windows ABI and interferes with the
+operation of many programs &mdash; LuaJIT is affected, too. Please make
+sure you always use the <tt>D3DCREATE_FPU_PRESERVE</tt> flag when
+initializing Direct3D.<br>
+
+Direct3D version 10 or higher do not show this behavior anymore.
+Consider testing your application with older versions, too.<br>
+
+Similarly, the Borland/Delphi runtime modifies the FPU control word and
+enables FP exceptions. Of course this violates the Windows ABI, too.
+Please check the Delphi docs for the Set8087CW method.
+
+</dl>
+
+<dl>
+<dt>Q: Sometimes Ctrl-C fails to stop my Lua program. Why?</dt>
+<dd>The interrupt signal handler sets a Lua debug hook. But this is
+currently ignored by compiled code (this will eventually be fixed). If
+your program is running in a tight loop and never falls back to the
+interpreter, the debug hook never runs and can't throw the
+"interrupted!" error.<br> In the meantime you have to press Ctrl-C
+twice to get stop your program. That's similar to when it's stuck
+running inside a C function under the Lua interpreter.</dd>
+</dl>
+
+<dl>
+<dt>Q: Why doesn't my favorite power-patch for Lua apply against LuaJIT?</dt>
+<dd>Because it's a completely redesigned VM and has very little code
+in common with Lua anymore. Also, if the patch introduces changes to
+the Lua semantics, these would need to be reflected everywhere in the
+VM, from the interpreter up to all stages of the compiler.<br> Please
+use only standard Lua language constructs. For many common needs you
+can use source transformations or use wrapper or proxy functions.
+The compiler will happily optimize away such indirections.</dd>
+</dl>
+
+<dl>
+<dt>Q: Lua runs everywhere. Why doesn't LuaJIT support my CPU?</dt>
+<dd>Because it's a compiler &mdash; it needs to generate native
+machine code. This means the code generator must be ported to each
+architecture. And the fast interpreter is written in assembler and
+must be ported, too. This is quite an undertaking.<br>
+The <a href="install.html">install documentation</a> shows the supported
+architectures. Other architectures will follow based on sufficient user
+demand and/or sponsoring.</dd>
+</dl>
+
+<dl>
+<dt>Q: When will feature X be added? When will the next version be released?</dt>
+<dd>When it's ready.<br>
+C'mon, it's open source &mdash; I'm doing it on my own time and you're
+getting it for free. You can either contribute a patch or sponsor
+the development of certain features, if they are important to you.
+</dd>
+</dl>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/install.html b/3rdparty/lua/doc/install.html
index 7a878b1..faf19c4 100644
--- a/3rdparty/lua/doc/install.html
+++ b/3rdparty/lua/doc/install.html
@@ -4,7 +4,7 @@
<title>Installation</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
<meta name="Language" content="en">
<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
@@ -120,14 +120,14 @@ operating systems, CPUs and compilers:
<tr class="even">
<td class="compatcpu">x64 (64 bit)</td>
<td class="compatos">GCC 4.x</td>
-<td class="compatos">ORBIS (<a href="#ps4">PS4</a>)</td>
+<td class="compatos compatno">&nbsp;</td>
<td class="compatos">GCC 4.x</td>
<td class="compatos">MSVC + SDK v7.0<br>WinSDK v7.0</td>
</tr>
<tr class="odd">
<td class="compatcpu"><a href="#cross2">ARMv5+<br>ARM9E+</a></td>
<td class="compatos">GCC 4.2+</td>
-<td class="compatos">GCC 4.2+<br>PSP2 (<a href="#psvita">PS VITA</a>)</td>
+<td class="compatos">GCC 4.2+</td>
<td class="compatos">GCC 4.2+</td>
<td class="compatos compatno">&nbsp;</td>
</tr>
@@ -188,8 +188,8 @@ open a terminal window and change to this directory. Now unpack the archive
and change to the newly created directory:
</p>
<pre class="code">
-tar zxf LuaJIT-2.0.4.tar.gz
-cd LuaJIT-2.0.4</pre>
+tar zxf LuaJIT-2.0.2.tar.gz
+cd LuaJIT-2.0.2</pre>
<h3>Building LuaJIT</h3>
<p>
The supplied Makefiles try to auto-detect the settings needed for your
@@ -458,56 +458,25 @@ ISDKF="-arch armv7 -isysroot $ISDK/SDKs/$ISDKVER"
make HOST_CC="gcc -m32 -arch i386" CROSS=$ISDKP TARGET_FLAGS="$ISDKF" \
TARGET_SYS=iOS
</pre>
-
-<h3 id="consoles">Cross-compiling for consoles</h3>
-<p>
-Building LuaJIT for consoles requires both a supported host compiler
-(x86 or x64) and a cross-compiler (to PPC or ARM) from the official
-console SDK.
-</p>
-<p>
-Due to restrictions on consoles, the JIT compiler is disabled and only
-the fast interpreter is built. This is still faster than plain Lua,
-but much slower than the JIT compiler. The FFI is disabled, too, since
-it's not very useful in such an environment.
-</p>
-<p>
-The following commands build a static library <tt>libluajit.a</tt>,
-which can be linked against your game, just like the Lua library.
-</p>
<p>
-To cross-compile for <b id="ps3">PS3</b> from a Linux host (requires
-32&nbsp;bit GCC, i.e. multilib Linux/x64) or a Windows host (requires
-32&nbsp;bit MinGW), run this command:
+You can cross-compile for <b id="ps3">PS3</b> using the PS3&nbsp;SDK from
+a Linux host or a Windows host (requires 32 bit MinGW (GCC) on the host,
+too). Due to restrictions on consoles, the JIT compiler is disabled and
+only the fast interpreter is built:
</p>
<pre class="code">
make HOST_CC="gcc -m32" CROSS=ppu-lv2-
</pre>
<p>
-To cross-compile for <b id="ps4">PS4</b> from a Windows host,
-open a "Visual Studio .NET Command Prompt" (64&nbsp;bit host compiler),
-<tt>cd</tt> to the directory where you've unpacked the sources and
-run the following commands:
+You can cross-compile for <b id="xbox360">Xbox 360</b> using the
+Xbox&nbsp;360 SDK (MSVC + XEDK). Due to restrictions on consoles, the
+JIT compiler is disabled and only the fast interpreter is built.
</p>
-<pre class="code">
-cd src
-ps4build
-</pre>
<p>
-To cross-compile for <b id="psvita">PS Vita</b> from a Windows host,
-open a "Visual Studio .NET Command Prompt" (32&nbsp;bit host compiler),
-<tt>cd</tt> to the directory where you've unpacked the sources and
-run the following commands:
-</p>
-<pre class="code">
-cd src
-psvitabuild
-</pre>
-<p>
-To cross-compile for <b id="xbox360">Xbox 360</b> from a Windows host,
-open a "Visual Studio .NET Command Prompt" (32&nbsp;bit host compiler),
+Open a "Visual Studio .NET Command Prompt" (32&nbsp;bit host compiler),
<tt>cd</tt> to the directory where you've unpacked the sources and run
-the following commands:
+the following commands. This builds a static library <tt>luajit20.lib</tt>,
+which can be linked against your game, just like the Lua library.
</p>
<pre class="code">
cd src
@@ -596,11 +565,9 @@ for a regular distribution build:
<ul>
<li><tt>PREFIX</tt> overrides the installation path and should usually
be set to <tt>/usr</tt>. Setting this also changes the module paths and
-the paths needed to locate the shared library.</li>
+the <tt>-rpath</tt> of the shared library.</li>
<li><tt>DESTDIR</tt> is an absolute path which allows you to install
to a shadow tree instead of the root tree of the build system.</li>
-<li><tt>MULTILIB</tt> sets the architecture-specific library path component
-for multilib systems. The default is <tt>lib</tt>.</li>
<li>Have a look at the top-level <tt>Makefile</tt> and <tt>src/Makefile</tt>
for additional variables to tweak. The following variables <em>may</em> be
overridden, but it's <em>not</em> recommended, except for special needs
@@ -636,7 +603,7 @@ to me (the upstream) and not you (the package maintainer), anyway.
</div>
<div id="foot">
<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
+Copyright &copy; 2005-2013 Mike Pall
<span class="noprint">
&middot;
<a href="contact.html">Contact</a>
diff --git a/3rdparty/lua/doc/luajit.html b/3rdparty/lua/doc/luajit.html
index 54e5ca6..e8581d3 100644
--- a/3rdparty/lua/doc/luajit.html
+++ b/3rdparty/lua/doc/luajit.html
@@ -1,234 +1,228 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>LuaJIT</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<meta name="description" content="LuaJIT is a Just-In-Time (JIT) compiler for the Lua language.">
-<style type="text/css">
-table.feature {
- width: inherit;
- line-height: 1.2;
- margin: 0;
-}
-table.feature td {
- width: 80px;
- height: 40px;
- vertical-align: middle;
- text-align: center;
- font-weight: bold;
- border: 4px solid #e6ecff;
- border-radius: 12px;
-}
-table.os td {
- background: #7080d0;
- background-image: linear-gradient(#4060c0 10%, #b0b0ff 95%);
- background-image: -moz-linear-gradient(#4060c0 10%, #b0b0ff 95%);
- background-image: -webkit-linear-gradient(#4060c0 10%, #b0b0ff 95%);
- background-image: -o-linear-gradient(#4060c0 10%, #b0b0ff 95%);
- background-image: -ms-linear-gradient(#4060c0 10%, #b0b0ff 95%);
-}
-table.os1 td {
- color: #ffff80;
-}
-table.os2 td {
- color: #ffa040;
-}
-table.os3 td {
- color: #40ffff;
-}
-table.compiler td {
- color: #2080ff;
- background: #62bf41;
- background-image: linear-gradient(#62bf41 10%, #b0ffb0 95%);
- background-image: -moz-linear-gradient(#62bf41 10%, #b0ffb0 95%);
- background-image: -webkit-linear-gradient(#62bf41 10%, #b0ffb0 95%);
- background-image: -o-linear-gradient(#62bf41 10%, #b0ffb0 95%);
- background-image: -ms-linear-gradient(#62bf41 10%, #b0ffb0 95%);
-}
-table.cpu td {
- color: #ffff00;
- background: #cf7251;
- background-image: linear-gradient(#bf6241 10%, #ffb0b0 95%);
- background-image: -moz-linear-gradient(#bf6241 10%, #ffb0b0 95%);
- background-image: -webkit-linear-gradient(#bf6241 10%, #ffb0b0 95%);
- background-image: -o-linear-gradient(#bf6241 10%, #ffb0b0 95%);
- background-image: -ms-linear-gradient(#bf6241 10%, #ffb0b0 95%);
-}
-table.fcompat td {
- color: #2060e0;
- background: #61cfcf;
- background-image: linear-gradient(#41bfbf 10%, #b0ffff 95%);
- background-image: -moz-linear-gradient(#41bfbf 10%, #b0ffff 95%);
- background-image: -webkit-linear-gradient(#41bfbf 10%, #b0ffff 95%);
- background-image: -o-linear-gradient(#41bfbf 10%, #b0ffff 95%);
- background-image: -ms-linear-gradient(#41bfbf 10%, #b0ffff 95%);
-}
-table.stats td {
- color: #ffffff;
- background: #a0a0a0;
- background-image: linear-gradient(#808080 10%, #d0d0d0 95%);
- background-image: -moz-linear-gradient(#808080 10%, #d0d0d0 95%);
- background-image: -webkit-linear-gradient(#808080 10%, #d0d0d0 95%);
- background-image: -o-linear-gradient(#808080 10%, #d0d0d0 95%);
- background-image: -ms-linear-gradient(#808080 10%, #d0d0d0 95%);
-}
-table.stats td.speed {
- color: #ff4020;
-}
-table.stats td.kb {
- color: #ffff80;
- background: #808080;
- background-image: linear-gradient(#606060 10%, #c0c0c0 95%);
- background-image: -moz-linear-gradient(#606060 10%, #c0c0c0 95%);
- background-image: -webkit-linear-gradient(#606060 10%, #c0c0c0 95%);
- background-image: -o-linear-gradient(#606060 10%, #c0c0c0 95%);
- background-image: -ms-linear-gradient(#606060 10%, #c0c0c0 95%);
-}
-table.feature small {
- font-size: 50%;
-}
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>LuaJIT</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a class="current" href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-LuaJIT is a <b>Just-In-Time Compiler</b> (JIT) for the
-<a href="http://www.lua.org/"><span class="ext">&raquo;</span>&nbsp;Lua</a> programming language.
-Lua is a powerful, dynamic and light-weight programming language.
-It may be embedded or used as a general-purpose, stand-alone language.
-</p>
-<p>
-LuaJIT is Copyright &copy; 2005-2015 Mike Pall, released under the
-<a href="http://www.opensource.org/licenses/mit-license.php"><span class="ext">&raquo;</span>&nbsp;MIT open source license</a>.
-</p>
-<p>
-</p>
-
-<h2>Compatibility</h2>
-<table class="feature os os1">
-<tr><td>Windows</td><td>Linux</td><td>BSD</td><td>OSX</td><td>POSIX</td></tr>
-</table>
-<table class="feature os os2">
-<tr><td><span style="font-size:90%;">Embedded</span></td><td>Android</td><td>iOS</td></tr>
-</table>
-<table class="feature os os3">
-<tr><td>PS3</td><td>PS4</td><td>PS Vita</td><td>Xbox 360</td></tr>
-</table>
-<table class="feature compiler">
-<tr><td>GCC</td><td>CLANG<br>LLVM</td><td>MSVC</td></tr>
-</table>
-<table class="feature cpu">
-<tr><td>x86</td><td>x64</td><td>ARM</td><td>PPC</td><td>e500</td><td>MIPS</td></tr>
-</table>
-<table class="feature fcompat">
-<tr><td>Lua&nbsp;5.1<br>API+ABI</td><td>+&nbsp;JIT</td><td>+&nbsp;BitOp</td><td>+&nbsp;FFI</td><td>Drop-in<br>DLL/.so</td></tr>
-</table>
-
-<h2>Overview</h2>
-<table class="feature stats">
-<tr>
-<td class="speed">3x<br>-&nbsp;&nbsp;100x</td>
-<td class="kb">115&nbsp;<small>KB</small><br>VM</td>
-<td class="kb">90&nbsp;<small>KB</small><br>JIT</td>
-<td class="kloc">63&nbsp;<small>KLOC</small><br>C</td>
-<td class="kloc">24&nbsp;<small>KLOC</small><br>ASM</td>
-<td class="kloc">11&nbsp;<small>KLOC</small><br>Lua</td>
-</tr>
-</table>
-<p style="margin-top: 1em;">
-LuaJIT has been successfully used as a <b>scripting middleware</b> in
-games, appliances, network and graphics apps, numerical simulations,
-trading platforms and many other specialty applications. It scales from
-embedded devices, smartphones, desktops up to server farms. It combines
-high flexibility with <a href="http://luajit.org/performance.html"><span class="ext">&raquo;</span>&nbsp;high performance</a>
-and an unmatched <b>low memory footprint</b>.
-</p>
-<p>
-LuaJIT has been in continuous development since 2005. It's widely
-considered to be <b>one of the fastest dynamic language
-implementations</b>. It has outperformed other dynamic languages on many
-cross-language benchmarks since its first release &mdash; often by a
-substantial margin.
-</p>
-<p>
-For <b>LuaJIT 2.0</b>, the whole VM has been rewritten from the ground up
-and relentlessly optimized for performance. It combines a <b>high-speed
-interpreter</b>, written in assembler, with a <b>state-of-the-art JIT
-compiler</b>.
-</p>
-<p>
-An innovative <b>trace compiler</b> is integrated with advanced,
-SSA-based optimizations and highly tuned code generation backends.
-A substantial reduction of the overhead associated with dynamic languages
-allows it to break into the performance range traditionally reserved for
-offline, static language compilers.
-</p>
-
-<h2>More ...</h2>
-<p>
-Please select a sub-topic in the navigation bar to learn more about LuaJIT.
-</p>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>LuaJIT</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<meta name="description" content="LuaJIT is a Just-In-Time (JIT) compiler for the Lua language.">
+<style type="text/css">
+table.feature {
+ width: inherit;
+ line-height: 1.2;
+ margin: 0;
+}
+table.feature td {
+ width: 80px;
+ height: 40px;
+ vertical-align: middle;
+ text-align: center;
+ font-weight: bold;
+ border: 4px solid #e6ecff;
+ border-radius: 12px;
+}
+table.os td {
+ background: #7080d0;
+ background-image: linear-gradient(#4060c0 10%, #b0b0ff 95%);
+ background-image: -moz-linear-gradient(#4060c0 10%, #b0b0ff 95%);
+ background-image: -webkit-linear-gradient(#4060c0 10%, #b0b0ff 95%);
+ background-image: -o-linear-gradient(#4060c0 10%, #b0b0ff 95%);
+ background-image: -ms-linear-gradient(#4060c0 10%, #b0b0ff 95%);
+}
+table.os1 td {
+ color: #ffff80;
+}
+table.os2 td {
+ color: #ffa040;
+}
+table.compiler td {
+ color: #2080ff;
+ background: #62bf41;
+ background-image: linear-gradient(#62bf41 10%, #b0ffb0 95%);
+ background-image: -moz-linear-gradient(#62bf41 10%, #b0ffb0 95%);
+ background-image: -webkit-linear-gradient(#62bf41 10%, #b0ffb0 95%);
+ background-image: -o-linear-gradient(#62bf41 10%, #b0ffb0 95%);
+ background-image: -ms-linear-gradient(#62bf41 10%, #b0ffb0 95%);
+}
+table.cpu td {
+ color: #ffff00;
+ background: #cf7251;
+ background-image: linear-gradient(#bf6241 10%, #ffb0b0 95%);
+ background-image: -moz-linear-gradient(#bf6241 10%, #ffb0b0 95%);
+ background-image: -webkit-linear-gradient(#bf6241 10%, #ffb0b0 95%);
+ background-image: -o-linear-gradient(#bf6241 10%, #ffb0b0 95%);
+ background-image: -ms-linear-gradient(#bf6241 10%, #ffb0b0 95%);
+}
+table.fcompat td {
+ color: #2060e0;
+ background: #61cfcf;
+ background-image: linear-gradient(#41bfbf 10%, #b0ffff 95%);
+ background-image: -moz-linear-gradient(#41bfbf 10%, #b0ffff 95%);
+ background-image: -webkit-linear-gradient(#41bfbf 10%, #b0ffff 95%);
+ background-image: -o-linear-gradient(#41bfbf 10%, #b0ffff 95%);
+ background-image: -ms-linear-gradient(#41bfbf 10%, #b0ffff 95%);
+}
+table.stats td {
+ color: #ffffff;
+ background: #a0a0a0;
+ background-image: linear-gradient(#808080 10%, #d0d0d0 95%);
+ background-image: -moz-linear-gradient(#808080 10%, #d0d0d0 95%);
+ background-image: -webkit-linear-gradient(#808080 10%, #d0d0d0 95%);
+ background-image: -o-linear-gradient(#808080 10%, #d0d0d0 95%);
+ background-image: -ms-linear-gradient(#808080 10%, #d0d0d0 95%);
+}
+table.stats td.speed {
+ color: #ff4020;
+}
+table.stats td.kb {
+ color: #ffff80;
+ background: #808080;
+ background-image: linear-gradient(#606060 10%, #c0c0c0 95%);
+ background-image: -moz-linear-gradient(#606060 10%, #c0c0c0 95%);
+ background-image: -webkit-linear-gradient(#606060 10%, #c0c0c0 95%);
+ background-image: -o-linear-gradient(#606060 10%, #c0c0c0 95%);
+ background-image: -ms-linear-gradient(#606060 10%, #c0c0c0 95%);
+}
+table.feature small {
+ font-size: 50%;
+}
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>LuaJIT</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a class="current" href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+LuaJIT is a <b>Just-In-Time Compiler</b> (JIT) for the
+<a href="http://www.lua.org/"><span class="ext">&raquo;</span>&nbsp;Lua</a> programming language.
+Lua is a powerful, dynamic and light-weight programming language.
+It may be embedded or used as a general-purpose, stand-alone language.
+</p>
+<p>
+LuaJIT is Copyright &copy; 2005-2013 Mike Pall, released under the
+<a href="http://www.opensource.org/licenses/mit-license.php"><span class="ext">&raquo;</span>&nbsp;MIT open source license</a>.
+</p>
+<p>
+</p>
+
+<h2>Compatibility</h2>
+<table class="feature os os1">
+<tr><td>Windows</td><td>Linux</td><td>BSD</td><td>OSX</td><td>POSIX</td></tr>
+</table>
+<table class="feature os os2">
+<tr><td><span style="font-size:90%;">Embedded</span></td><td>Android</td><td>iOS</td><td>PS3</td><td>Xbox 360</td></tr>
+</table>
+<table class="feature compiler">
+<tr><td>GCC</td><td>CLANG<br>LLVM</td><td>MSVC</td></tr>
+</table>
+<table class="feature cpu">
+<tr><td>x86</td><td>x64</td><td>ARM</td><td>PPC</td><td>e500</td><td>MIPS</td></tr>
+</table>
+<table class="feature fcompat">
+<tr><td>Lua&nbsp;5.1<br>API+ABI</td><td>+&nbsp;JIT</td><td>+&nbsp;BitOp</td><td>+&nbsp;FFI</td><td>Drop-in<br>DLL/.so</td></tr>
+</table>
+
+<h2>Overview</h2>
+<table class="feature stats">
+<tr>
+<td class="speed">3x<br>-&nbsp;&nbsp;100x</td>
+<td class="kb">115&nbsp;<small>KB</small><br>VM</td>
+<td class="kb">90&nbsp;<small>KB</small><br>JIT</td>
+<td class="kloc">63&nbsp;<small>KLOC</small><br>C</td>
+<td class="kloc">24&nbsp;<small>KLOC</small><br>ASM</td>
+<td class="kloc">11&nbsp;<small>KLOC</small><br>Lua</td>
+</tr>
+</table>
+<p style="margin-top: 1em;">
+LuaJIT has been successfully used as a <b>scripting middleware</b> in
+games, appliances, network and graphics apps, numerical simulations,
+trading platforms and many other specialty applications. It scales from
+embedded devices, smartphones, desktops up to server farms. It combines
+high flexibility with <a href="http://luajit.org/performance.html"><span class="ext">&raquo;</span>&nbsp;high performance</a>
+and an unmatched <b>low memory footprint</b>.
+</p>
+<p>
+LuaJIT has been in continuous development since 2005. It's widely
+considered to be <b>one of the fastest dynamic language
+implementations</b>. It has outperformed other dynamic languages on many
+cross-language benchmarks since its first release &mdash; often by a
+substantial margin.
+</p>
+<p>
+For <b>LuaJIT 2.0</b>, the whole VM has been rewritten from the ground up
+and relentlessly optimized for performance. It combines a <b>high-speed
+interpreter</b>, written in assembler, with a <b>state-of-the-art JIT
+compiler</b>.
+</p>
+<p>
+An innovative <b>trace compiler</b> is integrated with advanced,
+SSA-based optimizations and highly tuned code generation backends.
+A substantial reduction of the overhead associated with dynamic languages
+allows it to break into the performance range traditionally reserved for
+offline, static language compilers.
+</p>
+
+<h2>More ...</h2>
+<p>
+Please select a sub-topic in the navigation bar to learn more about LuaJIT.
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/running.html b/3rdparty/lua/doc/running.html
index faffa72..3149b38 100644
--- a/3rdparty/lua/doc/running.html
+++ b/3rdparty/lua/doc/running.html
@@ -1,306 +1,306 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>Running LuaJIT</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-table.opt {
- line-height: 1.2;
-}
-tr.opthead td {
- font-weight: bold;
-}
-td.flag_name {
- width: 4em;
-}
-td.flag_level {
- width: 2em;
- text-align: center;
-}
-td.param_name {
- width: 6em;
-}
-td.param_default {
- width: 4em;
- text-align: right;
-}
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>Running LuaJIT</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a class="current" href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-LuaJIT has only a single stand-alone executable, called <tt>luajit</tt> on
-POSIX systems or <tt>luajit.exe</tt> on Windows. It can be used to run simple
-Lua statements or whole Lua applications from the command line. It has an
-interactive mode, too.
-</p>
-
-<h2 id="options">Command Line Options</h2>
-<p>
-The <tt>luajit</tt> stand-alone executable is just a slightly modified
-version of the regular <tt>lua</tt> stand-alone executable.
-It supports the same basic options, too. <tt>luajit&nbsp;-h</tt>
-prints a short list of the available options. Please have a look at the
-<a href="http://www.lua.org/manual/5.1/manual.html#6"><span class="ext">&raquo;</span>&nbsp;Lua manual</a>
-for details.
-</p>
-<p>
-LuaJIT has some additional options:
-</p>
-
-<h3 id="opt_b"><tt>-b[options] input output</tt></h3>
-<p>
-This option saves or lists bytecode. The following additional options
-are accepted:
-</p>
-<ul>
-<li><tt>-l</tt> &mdash; Only list bytecode.</li>
-<li><tt>-s</tt> &mdash; Strip debug info (this is the default).</li>
-<li><tt>-g</tt> &mdash; Keep debug info.</li>
-<li><tt>-n name</tt> &mdash; Set module name (default: auto-detect from input name)</li>
-<li><tt>-t type</tt> &mdash; Set output file type (default: auto-detect from output name).</li>
-<li><tt>-a arch</tt> &mdash; Override architecture for object files (default: native).</li>
-<li><tt>-o os</tt> &mdash; Override OS for object files (default: native).</li>
-<li><tt>-e chunk</tt> &mdash; Use chunk string as input.</li>
-<li><tt>-</tt> (a single minus sign) &mdash; Use stdin as input and/or stdout as output.</li>
-</ul>
-<p>
-The output file type is auto-detected from the extension of the output
-file name:
-</p>
-<ul>
-<li><tt>c</tt> &mdash; C source file, exported bytecode data.</li>
-<li><tt>h</tt> &mdash; C header file, static bytecode data.</li>
-<li><tt>obj</tt> or <tt>o</tt> &mdash; Object file, exported bytecode data
-(OS- and architecture-specific).</li>
-<li><tt>raw</tt> or any other extension &mdash; Raw bytecode file (portable).
-</ul>
-<p>
-Notes:
-</p>
-<ul>
-<li>See also <a href="extensions.html#string_dump">string.dump()</a>
-for information on bytecode portability and compatibility.</li>
-<li>A file in raw bytecode format is auto-detected and can be loaded like
-any Lua source file. E.g. directly from the command line or with
-<tt>loadfile()</tt>, <tt>dofile()</tt> etc.</li>
-<li>To statically embed the bytecode of a module in your application,
-generate an object file and just link it with your application.</li>
-<li>On most ELF-based systems (e.g. Linux) you need to explicitly export the
-global symbols when linking your application, e.g. with: <tt>-Wl,-E</tt></li>
-<li><tt>require()</tt> tries to load embedded bytecode data from exported
-symbols (in <tt>*.exe</tt> or <tt>lua51.dll</tt> on Windows) and from
-shared libraries in <tt>package.cpath</tt>.</li>
-</ul>
-<p>
-Typical usage examples:
-</p>
-<pre class="code">
-luajit -b test.lua test.out # Save bytecode to test.out
-luajit -bg test.lua test.out # Keep debug info
-luajit -be "print('hello world')" test.out # Save cmdline script
-
-luajit -bl test.lua # List to stdout
-luajit -bl test.lua test.txt # List to test.txt
-luajit -ble "print('hello world')" # List cmdline script
-
-luajit -b test.lua test.obj # Generate object file
-# Link test.obj with your application and load it with require("test")
-</pre>
-
-<h3 id="opt_j"><tt>-j cmd[=arg[,arg...]]</tt></h3>
-<p>
-This option performs a LuaJIT control command or activates one of the
-loadable extension modules. The command is first looked up in the
-<tt>jit.*</tt> library. If no matching function is found, a module
-named <tt>jit.&lt;cmd&gt;</tt> is loaded and the <tt>start()</tt>
-function of the module is called with the specified arguments (if
-any). The space between <tt>-j</tt> and <tt>cmd</tt> is optional.
-</p>
-<p>
-Here are the available LuaJIT control commands:
-</p>
-<ul>
-<li id="j_on"><tt>-jon</tt> &mdash; Turns the JIT compiler on (default).</li>
-<li id="j_off"><tt>-joff</tt> &mdash; Turns the JIT compiler off (only use the interpreter).</li>
-<li id="j_flush"><tt>-jflush</tt> &mdash; Flushes the whole cache of compiled code.</li>
-<li id="j_v"><tt>-jv</tt> &mdash; Shows verbose information about the progress of the JIT compiler.</li>
-<li id="j_dump"><tt>-jdump</tt> &mdash; Dumps the code and structures used in various compiler stages.</li>
-</ul>
-<p>
-The <tt>-jv</tt> and <tt>-jdump</tt> commands are extension modules
-written in Lua. They are mainly used for debugging the JIT compiler
-itself. For a description of their options and output format, please
-read the comment block at the start of their source.
-They can be found in the <tt>lib</tt> directory of the source
-distribution or installed under the <tt>jit</tt> directory. By default
-this is <tt>/usr/local/share/luajit-2.0.4/jit</tt> on POSIX
-systems.
-</p>
-
-<h3 id="opt_O"><tt>-O[level]</tt><br>
-<tt>-O[+]flag</tt>&nbsp;&nbsp;&nbsp;<tt>-O-flag</tt><br>
-<tt>-Oparam=value</tt></h3>
-<p>
-This options allows fine-tuned control of the optimizations used by
-the JIT compiler. This is mainly intended for debugging LuaJIT itself.
-Please note that the JIT compiler is extremely fast (we are talking
-about the microsecond to millisecond range). Disabling optimizations
-doesn't have any visible impact on its overhead, but usually generates
-code that runs slower.
-</p>
-<p>
-The first form sets an optimization level &mdash; this enables a
-specific mix of optimization flags. <tt>-O0</tt> turns off all
-optimizations and higher numbers enable more optimizations. Omitting
-the level (i.e. just <tt>-O</tt>) sets the default optimization level,
-which is <tt>-O3</tt> in the current version.
-</p>
-<p>
-The second form adds or removes individual optimization flags.
-The third form sets a parameter for the VM or the JIT compiler
-to a specific value.
-</p>
-<p>
-You can either use this option multiple times (like <tt>-Ocse
--O-dce -Ohotloop=10</tt>) or separate several settings with a comma
-(like <tt>-O+cse,-dce,hotloop=10</tt>). The settings are applied from
-left to right and later settings override earlier ones. You can freely
-mix the three forms, but note that setting an optimization level
-overrides all earlier flags.
-</p>
-<p>
-Here are the available flags and at what optimization levels they
-are enabled:
-</p>
-<table class="opt">
-<tr class="opthead">
-<td class="flag_name">Flag</td>
-<td class="flag_level">-O1</td>
-<td class="flag_level">-O2</td>
-<td class="flag_level">-O3</td>
-<td class="flag_desc">&nbsp;</td>
-</tr>
-<tr class="odd separate">
-<td class="flag_name">fold</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Constant Folding, Simplifications and Reassociation</td></tr>
-<tr class="even">
-<td class="flag_name">cse</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Common-Subexpression Elimination</td></tr>
-<tr class="odd">
-<td class="flag_name">dce</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Dead-Code Elimination</td></tr>
-<tr class="even">
-<td class="flag_name">narrow</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Narrowing of numbers to integers</td></tr>
-<tr class="odd">
-<td class="flag_name">loop</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Loop Optimizations (code hoisting)</td></tr>
-<tr class="even">
-<td class="flag_name">fwd</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Load Forwarding (L2L) and Store Forwarding (S2L)</td></tr>
-<tr class="odd">
-<td class="flag_name">dse</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Dead-Store Elimination</td></tr>
-<tr class="even">
-<td class="flag_name">abc</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Array Bounds Check Elimination</td></tr>
-<tr class="odd">
-<td class="flag_name">sink</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Allocation/Store Sinking</td></tr>
-<tr class="even">
-<td class="flag_name">fuse</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Fusion of operands into instructions</td></tr>
-</table>
-<p>
-Here are the parameters and their default settings:
-</p>
-<table class="opt">
-<tr class="opthead">
-<td class="param_name">Parameter</td>
-<td class="param_default">Default</td>
-<td class="param_desc">&nbsp;</td>
-</tr>
-<tr class="odd separate">
-<td class="param_name">maxtrace</td><td class="param_default">1000</td><td class="param_desc">Max. number of traces in the cache</td></tr>
-<tr class="even">
-<td class="param_name">maxrecord</td><td class="param_default">4000</td><td class="param_desc">Max. number of recorded IR instructions</td></tr>
-<tr class="odd">
-<td class="param_name">maxirconst</td><td class="param_default">500</td><td class="param_desc">Max. number of IR constants of a trace</td></tr>
-<tr class="even">
-<td class="param_name">maxside</td><td class="param_default">100</td><td class="param_desc">Max. number of side traces of a root trace</td></tr>
-<tr class="odd">
-<td class="param_name">maxsnap</td><td class="param_default">500</td><td class="param_desc">Max. number of snapshots for a trace</td></tr>
-<tr class="even separate">
-<td class="param_name">hotloop</td><td class="param_default">56</td><td class="param_desc">Number of iterations to detect a hot loop or hot call</td></tr>
-<tr class="odd">
-<td class="param_name">hotexit</td><td class="param_default">10</td><td class="param_desc">Number of taken exits to start a side trace</td></tr>
-<tr class="even">
-<td class="param_name">tryside</td><td class="param_default">4</td><td class="param_desc">Number of attempts to compile a side trace</td></tr>
-<tr class="odd separate">
-<td class="param_name">instunroll</td><td class="param_default">4</td><td class="param_desc">Max. unroll factor for instable loops</td></tr>
-<tr class="even">
-<td class="param_name">loopunroll</td><td class="param_default">15</td><td class="param_desc">Max. unroll factor for loop ops in side traces</td></tr>
-<tr class="odd">
-<td class="param_name">callunroll</td><td class="param_default">3</td><td class="param_desc">Max. unroll factor for pseudo-recursive calls</td></tr>
-<tr class="even">
-<td class="param_name">recunroll</td><td class="param_default">2</td><td class="param_desc">Min. unroll factor for true recursion</td></tr>
-<tr class="odd separate">
-<td class="param_name">sizemcode</td><td class="param_default">32</td><td class="param_desc">Size of each machine code area in KBytes (Windows: 64K)</td></tr>
-<tr class="even">
-<td class="param_name">maxmcode</td><td class="param_default">512</td><td class="param_desc">Max. total size of all machine code areas in KBytes</td></tr>
-</table>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>Running LuaJIT</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+table.opt {
+ line-height: 1.2;
+}
+tr.opthead td {
+ font-weight: bold;
+}
+td.flag_name {
+ width: 4em;
+}
+td.flag_level {
+ width: 2em;
+ text-align: center;
+}
+td.param_name {
+ width: 6em;
+}
+td.param_default {
+ width: 4em;
+ text-align: right;
+}
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>Running LuaJIT</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a class="current" href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+LuaJIT has only a single stand-alone executable, called <tt>luajit</tt> on
+POSIX systems or <tt>luajit.exe</tt> on Windows. It can be used to run simple
+Lua statements or whole Lua applications from the command line. It has an
+interactive mode, too.
+</p>
+
+<h2 id="options">Command Line Options</h2>
+<p>
+The <tt>luajit</tt> stand-alone executable is just a slightly modified
+version of the regular <tt>lua</tt> stand-alone executable.
+It supports the same basic options, too. <tt>luajit&nbsp;-h</tt>
+prints a short list of the available options. Please have a look at the
+<a href="http://www.lua.org/manual/5.1/manual.html#6"><span class="ext">&raquo;</span>&nbsp;Lua manual</a>
+for details.
+</p>
+<p>
+LuaJIT has some additional options:
+</p>
+
+<h3 id="opt_b"><tt>-b[options] input output</tt></h3>
+<p>
+This option saves or lists bytecode. The following additional options
+are accepted:
+</p>
+<ul>
+<li><tt>-l</tt> &mdash; Only list bytecode.</li>
+<li><tt>-s</tt> &mdash; Strip debug info (this is the default).</li>
+<li><tt>-g</tt> &mdash; Keep debug info.</li>
+<li><tt>-n name</tt> &mdash; Set module name (default: auto-detect from input name)</li>
+<li><tt>-t type</tt> &mdash; Set output file type (default: auto-detect from output name).</li>
+<li><tt>-a arch</tt> &mdash; Override architecture for object files (default: native).</li>
+<li><tt>-o os</tt> &mdash; Override OS for object files (default: native).</li>
+<li><tt>-e chunk</tt> &mdash; Use chunk string as input.</li>
+<li><tt>-</tt> (a single minus sign) &mdash; Use stdin as input and/or stdout as output.</li>
+</ul>
+<p>
+The output file type is auto-detected from the extension of the output
+file name:
+</p>
+<ul>
+<li><tt>c</tt> &mdash; C source file, exported bytecode data.</li>
+<li><tt>h</tt> &mdash; C header file, static bytecode data.</li>
+<li><tt>obj</tt> or <tt>o</tt> &mdash; Object file, exported bytecode data
+(OS- and architecture-specific).</li>
+<li><tt>raw</tt> or any other extension &mdash; Raw bytecode file (portable).
+</ul>
+<p>
+Notes:
+</p>
+<ul>
+<li>See also <a href="extensions.html#string_dump">string.dump()</a>
+for information on bytecode portability and compatibility.</li>
+<li>A file in raw bytecode format is auto-detected and can be loaded like
+any Lua source file. E.g. directly from the command line or with
+<tt>loadfile()</tt>, <tt>dofile()</tt> etc.</li>
+<li>To statically embed the bytecode of a module in your application,
+generate an object file and just link it with your application.</li>
+<li>On most ELF-based systems (e.g. Linux) you need to explicitly export the
+global symbols when linking your application, e.g. with: <tt>-Wl,-E</tt></li>
+<li><tt>require()</tt> tries to load embedded bytecode data from exported
+symbols (in <tt>*.exe</tt> or <tt>lua51.dll</tt> on Windows) and from
+shared libraries in <tt>package.cpath</tt>.</li>
+</ul>
+<p>
+Typical usage examples:
+</p>
+<pre class="code">
+luajit -b test.lua test.out # Save bytecode to test.out
+luajit -bg test.lua test.out # Keep debug info
+luajit -be "print('hello world')" test.out # Save cmdline script
+
+luajit -bl test.lua # List to stdout
+luajit -bl test.lua test.txt # List to test.txt
+luajit -ble "print('hello world')" # List cmdline script
+
+luajit -b test.lua test.obj # Generate object file
+# Link test.obj with your application and load it with require("test")
+</pre>
+
+<h3 id="opt_j"><tt>-j cmd[=arg[,arg...]]</tt></h3>
+<p>
+This option performs a LuaJIT control command or activates one of the
+loadable extension modules. The command is first looked up in the
+<tt>jit.*</tt> library. If no matching function is found, a module
+named <tt>jit.&lt;cmd&gt;</tt> is loaded and the <tt>start()</tt>
+function of the module is called with the specified arguments (if
+any). The space between <tt>-j</tt> and <tt>cmd</tt> is optional.
+</p>
+<p>
+Here are the available LuaJIT control commands:
+</p>
+<ul>
+<li id="j_on"><tt>-jon</tt> &mdash; Turns the JIT compiler on (default).</li>
+<li id="j_off"><tt>-joff</tt> &mdash; Turns the JIT compiler off (only use the interpreter).</li>
+<li id="j_flush"><tt>-jflush</tt> &mdash; Flushes the whole cache of compiled code.</li>
+<li id="j_v"><tt>-jv</tt> &mdash; Shows verbose information about the progress of the JIT compiler.</li>
+<li id="j_dump"><tt>-jdump</tt> &mdash; Dumps the code and structures used in various compiler stages.</li>
+</ul>
+<p>
+The <tt>-jv</tt> and <tt>-jdump</tt> commands are extension modules
+written in Lua. They are mainly used for debugging the JIT compiler
+itself. For a description of their options and output format, please
+read the comment block at the start of their source.
+They can be found in the <tt>lib</tt> directory of the source
+distribution or installed under the <tt>jit</tt> directory. By default
+this is <tt>/usr/local/share/luajit-2.0.2/jit</tt> on POSIX
+systems.
+</p>
+
+<h3 id="opt_O"><tt>-O[level]</tt><br>
+<tt>-O[+]flag</tt>&nbsp;&nbsp;&nbsp;<tt>-O-flag</tt><br>
+<tt>-Oparam=value</tt></h3>
+<p>
+This options allows fine-tuned control of the optimizations used by
+the JIT compiler. This is mainly intended for debugging LuaJIT itself.
+Please note that the JIT compiler is extremely fast (we are talking
+about the microsecond to millisecond range). Disabling optimizations
+doesn't have any visible impact on its overhead, but usually generates
+code that runs slower.
+</p>
+<p>
+The first form sets an optimization level &mdash; this enables a
+specific mix of optimization flags. <tt>-O0</tt> turns off all
+optimizations and higher numbers enable more optimizations. Omitting
+the level (i.e. just <tt>-O</tt>) sets the default optimization level,
+which is <tt>-O3</tt> in the current version.
+</p>
+<p>
+The second form adds or removes individual optimization flags.
+The third form sets a parameter for the VM or the JIT compiler
+to a specific value.
+</p>
+<p>
+You can either use this option multiple times (like <tt>-Ocse
+-O-dce -Ohotloop=10</tt>) or separate several settings with a comma
+(like <tt>-O+cse,-dce,hotloop=10</tt>). The settings are applied from
+left to right and later settings override earlier ones. You can freely
+mix the three forms, but note that setting an optimization level
+overrides all earlier flags.
+</p>
+<p>
+Here are the available flags and at what optimization levels they
+are enabled:
+</p>
+<table class="opt">
+<tr class="opthead">
+<td class="flag_name">Flag</td>
+<td class="flag_level">-O1</td>
+<td class="flag_level">-O2</td>
+<td class="flag_level">-O3</td>
+<td class="flag_desc">&nbsp;</td>
+</tr>
+<tr class="odd separate">
+<td class="flag_name">fold</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Constant Folding, Simplifications and Reassociation</td></tr>
+<tr class="even">
+<td class="flag_name">cse</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Common-Subexpression Elimination</td></tr>
+<tr class="odd">
+<td class="flag_name">dce</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Dead-Code Elimination</td></tr>
+<tr class="even">
+<td class="flag_name">narrow</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Narrowing of numbers to integers</td></tr>
+<tr class="odd">
+<td class="flag_name">loop</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_level">&bull;</td><td class="flag_desc">Loop Optimizations (code hoisting)</td></tr>
+<tr class="even">
+<td class="flag_name">fwd</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Load Forwarding (L2L) and Store Forwarding (S2L)</td></tr>
+<tr class="odd">
+<td class="flag_name">dse</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Dead-Store Elimination</td></tr>
+<tr class="even">
+<td class="flag_name">abc</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Array Bounds Check Elimination</td></tr>
+<tr class="odd">
+<td class="flag_name">sink</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Allocation/Store Sinking</td></tr>
+<tr class="even">
+<td class="flag_name">fuse</td><td class="flag_level">&nbsp;</td><td class="flag_level">&nbsp;</td><td class="flag_level">&bull;</td><td class="flag_desc">Fusion of operands into instructions</td></tr>
+</table>
+<p>
+Here are the parameters and their default settings:
+</p>
+<table class="opt">
+<tr class="opthead">
+<td class="param_name">Parameter</td>
+<td class="param_default">Default</td>
+<td class="param_desc">&nbsp;</td>
+</tr>
+<tr class="odd separate">
+<td class="param_name">maxtrace</td><td class="param_default">1000</td><td class="param_desc">Max. number of traces in the cache</td></tr>
+<tr class="even">
+<td class="param_name">maxrecord</td><td class="param_default">4000</td><td class="param_desc">Max. number of recorded IR instructions</td></tr>
+<tr class="odd">
+<td class="param_name">maxirconst</td><td class="param_default">500</td><td class="param_desc">Max. number of IR constants of a trace</td></tr>
+<tr class="even">
+<td class="param_name">maxside</td><td class="param_default">100</td><td class="param_desc">Max. number of side traces of a root trace</td></tr>
+<tr class="odd">
+<td class="param_name">maxsnap</td><td class="param_default">500</td><td class="param_desc">Max. number of snapshots for a trace</td></tr>
+<tr class="even separate">
+<td class="param_name">hotloop</td><td class="param_default">56</td><td class="param_desc">Number of iterations to detect a hot loop or hot call</td></tr>
+<tr class="odd">
+<td class="param_name">hotexit</td><td class="param_default">10</td><td class="param_desc">Number of taken exits to start a side trace</td></tr>
+<tr class="even">
+<td class="param_name">tryside</td><td class="param_default">4</td><td class="param_desc">Number of attempts to compile a side trace</td></tr>
+<tr class="odd separate">
+<td class="param_name">instunroll</td><td class="param_default">4</td><td class="param_desc">Max. unroll factor for instable loops</td></tr>
+<tr class="even">
+<td class="param_name">loopunroll</td><td class="param_default">15</td><td class="param_desc">Max. unroll factor for loop ops in side traces</td></tr>
+<tr class="odd">
+<td class="param_name">callunroll</td><td class="param_default">3</td><td class="param_desc">Max. unroll factor for pseudo-recursive calls</td></tr>
+<tr class="even">
+<td class="param_name">recunroll</td><td class="param_default">2</td><td class="param_desc">Min. unroll factor for true recursion</td></tr>
+<tr class="odd separate">
+<td class="param_name">sizemcode</td><td class="param_default">32</td><td class="param_desc">Size of each machine code area in KBytes (Windows: 64K)</td></tr>
+<tr class="even">
+<td class="param_name">maxmcode</td><td class="param_default">512</td><td class="param_desc">Max. total size of all machine code areas in KBytes</td></tr>
+</table>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/doc/status.html b/3rdparty/lua/doc/status.html
index 2014abd..3d148b0 100644
--- a/3rdparty/lua/doc/status.html
+++ b/3rdparty/lua/doc/status.html
@@ -1,116 +1,125 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<title>Status</title>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
-<meta name="Author" content="Mike Pall">
-<meta name="Copyright" content="Copyright (C) 2005-2015, Mike Pall">
-<meta name="Language" content="en">
-<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
-<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
-<style type="text/css">
-ul li { padding-bottom: 0.3em; }
-</style>
-</head>
-<body>
-<div id="site">
-<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
-</div>
-<div id="head">
-<h1>Status</h1>
-</div>
-<div id="nav">
-<ul><li>
-<a href="luajit.html">LuaJIT</a>
-<ul><li>
-<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="install.html">Installation</a>
-</li><li>
-<a href="running.html">Running</a>
-</li></ul>
-</li><li>
-<a href="extensions.html">Extensions</a>
-<ul><li>
-<a href="ext_ffi.html">FFI Library</a>
-<ul><li>
-<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
-</li><li>
-<a href="ext_ffi_api.html">ffi.* API</a>
-</li><li>
-<a href="ext_ffi_semantics.html">FFI Semantics</a>
-</li></ul>
-</li><li>
-<a href="ext_jit.html">jit.* Library</a>
-</li><li>
-<a href="ext_c_api.html">Lua/C API</a>
-</li></ul>
-</li><li>
-<a class="current" href="status.html">Status</a>
-<ul><li>
-<a href="changes.html">Changes</a>
-</li></ul>
-</li><li>
-<a href="faq.html">FAQ</a>
-</li><li>
-<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
-</li><li>
-<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
-</li></ul>
-</div>
-<div id="main">
-<p>
-<span style="color: #0000c0;">LuaJIT&nbsp;2.0</span> is the current
-<span style="color: #0000c0;">stable branch</span>. This branch is in
-feature-freeze &mdash; new features will only be added to LuaJIT&nbsp;2.1.
-</p>
-
-<h2>Current Status</h2>
-<p>
-LuaJIT ought to run all Lua&nbsp;5.1-compatible source code just fine.
-It's considered a serious bug if the VM crashes or produces unexpected
-results &mdash; please report this.
-</p>
-<p>
-Known incompatibilities and issues in LuaJIT&nbsp;2.0:
-</p>
-<ul>
-<li>
-There are some differences in <b>implementation-defined</b> behavior.
-These either have a good reason, are arbitrary design choices
-or are due to quirks in the VM. The latter cases may get fixed if a
-demonstrable need is shown.
-</li>
-<li>
-The Lua <b>debug API</b> is missing a couple of features (return
-hooks for non-Lua functions) and shows slightly different behavior
-in LuaJIT (no per-coroutine hooks, no tail call counting).
-</li>
-<li>
-Some checks are missing in the JIT-compiled code for obscure situations
-with <b>open upvalues aliasing</b> one of the SSA slots later on (or
-vice versa). Bonus points, if you can find a real world test case for
-this.
-</li>
-<li>
-Currently some <b>out-of-memory</b> errors from <b>on-trace code</b> are not
-handled correctly. The error may fall through an on-trace
-<tt>pcall</tt> or it may be passed on to the function set with
-<tt>lua_atpanic</tt> on x64. This issue will be fixed with the new
-garbage collector.
-</li>
-</ul>
-<br class="flush">
-</div>
-<div id="foot">
-<hr class="hide">
-Copyright &copy; 2005-2015 Mike Pall
-<span class="noprint">
-&middot;
-<a href="contact.html">Contact</a>
-</span>
-</div>
-</body>
-</html>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<title>Status &amp; Roadmap</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+<meta name="Author" content="Mike Pall">
+<meta name="Copyright" content="Copyright (C) 2005-2013, Mike Pall">
+<meta name="Language" content="en">
+<link rel="stylesheet" type="text/css" href="bluequad.css" media="screen">
+<link rel="stylesheet" type="text/css" href="bluequad-print.css" media="print">
+<style type="text/css">
+ul li { padding-bottom: 0.3em; }
+</style>
+</head>
+<body>
+<div id="site">
+<a href="http://luajit.org"><span>Lua<span id="logo">JIT</span></span></a>
+</div>
+<div id="head">
+<h1>Status &amp; Roadmap</h1>
+</div>
+<div id="nav">
+<ul><li>
+<a href="luajit.html">LuaJIT</a>
+<ul><li>
+<a href="http://luajit.org/download.html">Download <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="install.html">Installation</a>
+</li><li>
+<a href="running.html">Running</a>
+</li></ul>
+</li><li>
+<a href="extensions.html">Extensions</a>
+<ul><li>
+<a href="ext_ffi.html">FFI Library</a>
+<ul><li>
+<a href="ext_ffi_tutorial.html">FFI Tutorial</a>
+</li><li>
+<a href="ext_ffi_api.html">ffi.* API</a>
+</li><li>
+<a href="ext_ffi_semantics.html">FFI Semantics</a>
+</li></ul>
+</li><li>
+<a href="ext_jit.html">jit.* Library</a>
+</li><li>
+<a href="ext_c_api.html">Lua/C API</a>
+</li></ul>
+</li><li>
+<a class="current" href="status.html">Status</a>
+<ul><li>
+<a href="changes.html">Changes</a>
+</li></ul>
+</li><li>
+<a href="faq.html">FAQ</a>
+</li><li>
+<a href="http://luajit.org/performance.html">Performance <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://wiki.luajit.org/">Wiki <span class="ext">&raquo;</span></a>
+</li><li>
+<a href="http://luajit.org/list.html">Mailing List <span class="ext">&raquo;</span></a>
+</li></ul>
+</div>
+<div id="main">
+<p>
+<span style="color: #0000c0;">LuaJIT&nbsp;2.0</span> is the current
+<span style="color: #0000c0;">stable branch</span>. This branch is in
+feature-freeze &mdash; new features will only be added to LuaJIT&nbsp;2.1.
+</p>
+
+<h2>Current Status</h2>
+<p>
+LuaJIT ought to run all Lua&nbsp;5.1-compatible source code just fine.
+It's considered a serious bug if the VM crashes or produces unexpected
+results &mdash; please report this.
+</p>
+<p>
+Known incompatibilities and issues in LuaJIT&nbsp;2.0:
+</p>
+<ul>
+<li>
+There are some differences in <b>implementation-defined</b> behavior.
+These either have a good reason, are arbitrary design choices
+or are due to quirks in the VM. The latter cases may get fixed if a
+demonstrable need is shown.
+</li>
+<li>
+The Lua <b>debug API</b> is missing a couple of features (return
+hooks for non-Lua functions) and shows slightly different behavior
+in LuaJIT (no per-coroutine hooks, no tail call counting).
+</li>
+<li>
+Some checks are missing in the JIT-compiled code for obscure situations
+with <b>open upvalues aliasing</b> one of the SSA slots later on (or
+vice versa). Bonus points, if you can find a real world test case for
+this.
+</li>
+<li>
+Currently some <b>out-of-memory</b> errors from <b>on-trace code</b> are not
+handled correctly. The error may fall through an on-trace
+<tt>pcall</tt> or it may be passed on to the function set with
+<tt>lua_atpanic</tt> on x64. This issue will be fixed with the new
+garbage collector.
+</li>
+</ul>
+
+<h2>Roadmap</h2>
+<p>
+Please refer to the
+<a href="http://www.freelists.org/post/luajit/LuaJIT-Roadmap-20122013"><span class="ext">&raquo;</span>&nbsp;LuaJIT Roadmap 2012/2013</a> and an
+<a href="http://www.freelists.org/post/luajit/LuaJIT-Roadmap-20122013-UPDATE"><span class="ext">&raquo;</span>&nbsp;update on release planning</a> for details.
+</p>
+<p>
+</p>
+<br class="flush">
+</div>
+<div id="foot">
+<hr class="hide">
+Copyright &copy; 2005-2013 Mike Pall
+<span class="noprint">
+&middot;
+<a href="contact.html">Contact</a>
+</span>
+</div>
+</body>
+</html>
diff --git a/3rdparty/lua/dynasm/dasm_arm.h b/3rdparty/lua/dynasm/dasm_arm.h
index e5c21db..d83cbbb 100644
--- a/3rdparty/lua/dynasm/dasm_arm.h
+++ b/3rdparty/lua/dynasm/dasm_arm.h
@@ -1,456 +1,456 @@
-/*
-** DynASM ARM encoding engine.
-** Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-** Released under the MIT license. See dynasm.lua for full copyright notice.
-*/
-
-#include <stddef.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdlib.h>
-
-#define DASM_ARCH "arm"
-
-#ifndef DASM_EXTERN
-#define DASM_EXTERN(a,b,c,d) 0
-#endif
-
-/* Action definitions. */
-enum {
- DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
- /* The following actions need a buffer position. */
- DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
- /* The following actions also have an argument. */
- DASM_REL_PC, DASM_LABEL_PC,
- DASM_IMM, DASM_IMM12, DASM_IMM16, DASM_IMML8, DASM_IMML12, DASM_IMMV8,
- DASM__MAX
-};
-
-/* Maximum number of section buffer positions for a single dasm_put() call. */
-#define DASM_MAXSECPOS 25
-
-/* DynASM encoder status codes. Action list offset or number are or'ed in. */
-#define DASM_S_OK 0x00000000
-#define DASM_S_NOMEM 0x01000000
-#define DASM_S_PHASE 0x02000000
-#define DASM_S_MATCH_SEC 0x03000000
-#define DASM_S_RANGE_I 0x11000000
-#define DASM_S_RANGE_SEC 0x12000000
-#define DASM_S_RANGE_LG 0x13000000
-#define DASM_S_RANGE_PC 0x14000000
-#define DASM_S_RANGE_REL 0x15000000
-#define DASM_S_UNDEF_LG 0x21000000
-#define DASM_S_UNDEF_PC 0x22000000
-
-/* Macros to convert positions (8 bit section + 24 bit index). */
-#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
-#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
-#define DASM_SEC2POS(sec) ((sec)<<24)
-#define DASM_POS2SEC(pos) ((pos)>>24)
-#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
-
-/* Action list type. */
-typedef const unsigned int *dasm_ActList;
-
-/* Per-section structure. */
-typedef struct dasm_Section {
- int *rbuf; /* Biased buffer pointer (negative section bias). */
- int *buf; /* True buffer pointer. */
- size_t bsize; /* Buffer size in bytes. */
- int pos; /* Biased buffer position. */
- int epos; /* End of biased buffer position - max single put. */
- int ofs; /* Byte offset into section. */
-} dasm_Section;
-
-/* Core structure holding the DynASM encoding state. */
-struct dasm_State {
- size_t psize; /* Allocated size of this structure. */
- dasm_ActList actionlist; /* Current actionlist pointer. */
- int *lglabels; /* Local/global chain/pos ptrs. */
- size_t lgsize;
- int *pclabels; /* PC label chains/pos ptrs. */
- size_t pcsize;
- void **globals; /* Array of globals (bias -10). */
- dasm_Section *section; /* Pointer to active section. */
- size_t codesize; /* Total size of all code sections. */
- int maxsection; /* 0 <= sectionidx < maxsection. */
- int status; /* Status code. */
- dasm_Section sections[1]; /* All sections. Alloc-extended. */
-};
-
-/* The size of the core structure depends on the max. number of sections. */
-#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
-
-
-/* Initialize DynASM state. */
-void dasm_init(Dst_DECL, int maxsection)
-{
- dasm_State *D;
- size_t psz = 0;
- int i;
- Dst_REF = NULL;
- DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
- D = Dst_REF;
- D->psize = psz;
- D->lglabels = NULL;
- D->lgsize = 0;
- D->pclabels = NULL;
- D->pcsize = 0;
- D->globals = NULL;
- D->maxsection = maxsection;
- for (i = 0; i < maxsection; i++) {
- D->sections[i].buf = NULL; /* Need this for pass3. */
- D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
- D->sections[i].bsize = 0;
- D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
- }
-}
-
-/* Free DynASM state. */
-void dasm_free(Dst_DECL)
-{
- dasm_State *D = Dst_REF;
- int i;
- for (i = 0; i < D->maxsection; i++)
- if (D->sections[i].buf)
- DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
- if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
- if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
- DASM_M_FREE(Dst, D, D->psize);
-}
-
-/* Setup global label array. Must be called before dasm_setup(). */
-void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
-{
- dasm_State *D = Dst_REF;
- D->globals = gl - 10; /* Negative bias to compensate for locals. */
- DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
-}
-
-/* Grow PC label array. Can be called after dasm_setup(), too. */
-void dasm_growpc(Dst_DECL, unsigned int maxpc)
-{
- dasm_State *D = Dst_REF;
- size_t osz = D->pcsize;
- DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
- memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
-}
-
-/* Setup encoder. */
-void dasm_setup(Dst_DECL, const void *actionlist)
-{
- dasm_State *D = Dst_REF;
- int i;
- D->actionlist = (dasm_ActList)actionlist;
- D->status = DASM_S_OK;
- D->section = &D->sections[0];
- memset((void *)D->lglabels, 0, D->lgsize);
- if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
- for (i = 0; i < D->maxsection; i++) {
- D->sections[i].pos = DASM_SEC2POS(i);
- D->sections[i].ofs = 0;
- }
-}
-
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) { \
- D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
-#define CKPL(kind, st) \
- do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
- D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
-#else
-#define CK(x, st) ((void)0)
-#define CKPL(kind, st) ((void)0)
-#endif
-
-static int dasm_imm12(unsigned int n)
-{
- int i;
- for (i = 0; i < 16; i++, n = (n << 2) | (n >> 30))
- if (n <= 255) return (int)(n + (i << 8));
- return -1;
-}
-
-/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
-void dasm_put(Dst_DECL, int start, ...)
-{
- va_list ap;
- dasm_State *D = Dst_REF;
- dasm_ActList p = D->actionlist + start;
- dasm_Section *sec = D->section;
- int pos = sec->pos, ofs = sec->ofs;
- int *b;
-
- if (pos >= sec->epos) {
- DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
- sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
- sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
- sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
- }
-
- b = sec->rbuf;
- b[pos++] = start;
-
- va_start(ap, start);
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16);
- if (action >= DASM__MAX) {
- ofs += 4;
- } else {
- int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
- switch (action) {
- case DASM_STOP: goto stop;
- case DASM_SECTION:
- n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
- D->section = &D->sections[n]; goto stop;
- case DASM_ESC: p++; ofs += 4; break;
- case DASM_REL_EXT: break;
- case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
- case DASM_REL_LG:
- n = (ins & 2047) - 10; pl = D->lglabels + n;
- /* Bkwd rel or global. */
- if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
- pl += 10; n = *pl;
- if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
- goto linkrel;
- case DASM_REL_PC:
- pl = D->pclabels + n; CKPL(pc, PC);
- putrel:
- n = *pl;
- if (n < 0) { /* Label exists. Get label pos and store it. */
- b[pos] = -n;
- } else {
- linkrel:
- b[pos] = n; /* Else link to rel chain, anchored at label. */
- *pl = pos;
- }
- pos++;
- break;
- case DASM_LABEL_LG:
- pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
- case DASM_LABEL_PC:
- pl = D->pclabels + n; CKPL(pc, PC);
- putlabel:
- n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
- }
- *pl = -pos; /* Label exists now. */
- b[pos++] = ofs; /* Store pass1 offset estimate. */
- break;
- case DASM_IMM:
- case DASM_IMM16:
-#ifdef DASM_CHECKS
- CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
- if ((ins & 0x8000))
- CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
- else
- CK((n>>((ins>>5)&31)) == 0, RANGE_I);
-#endif
- b[pos++] = n;
- break;
- case DASM_IMMV8:
- CK((n & 3) == 0, RANGE_I);
- n >>= 2;
- case DASM_IMML8:
- case DASM_IMML12:
- CK(n >= 0 ? ((n>>((ins>>5)&31)) == 0) :
- (((-n)>>((ins>>5)&31)) == 0), RANGE_I);
- b[pos++] = n;
- break;
- case DASM_IMM12:
- CK(dasm_imm12((unsigned int)n) != -1, RANGE_I);
- b[pos++] = n;
- break;
- }
- }
- }
-stop:
- va_end(ap);
- sec->pos = pos;
- sec->ofs = ofs;
-}
-#undef CK
-
-/* Pass 2: Link sections, shrink aligns, fix label offsets. */
-int dasm_link(Dst_DECL, size_t *szp)
-{
- dasm_State *D = Dst_REF;
- int secnum;
- int ofs = 0;
-
-#ifdef DASM_CHECKS
- *szp = 0;
- if (D->status != DASM_S_OK) return D->status;
- {
- int pc;
- for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
- if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
- }
-#endif
-
- { /* Handle globals not defined in this translation unit. */
- int idx;
- for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
- int n = D->lglabels[idx];
- /* Undefined label: Collapse rel chain and replace with marker (< 0). */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
- }
- }
-
- /* Combine all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->rbuf;
- int pos = DASM_SEC2POS(secnum);
- int lastpos = sec->pos;
-
- while (pos != lastpos) {
- dasm_ActList p = D->actionlist + b[pos++];
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16);
- switch (action) {
- case DASM_STOP: case DASM_SECTION: goto stop;
- case DASM_ESC: p++; break;
- case DASM_REL_EXT: break;
- case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
- case DASM_REL_LG: case DASM_REL_PC: pos++; break;
- case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
- case DASM_IMM: case DASM_IMM12: case DASM_IMM16:
- case DASM_IMML8: case DASM_IMML12: case DASM_IMMV8: pos++; break;
- }
- }
- stop: (void)0;
- }
- ofs += sec->ofs; /* Next section starts right after current section. */
- }
-
- D->codesize = ofs; /* Total size of all code sections */
- *szp = ofs;
- return DASM_S_OK;
-}
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
-#else
-#define CK(x, st) ((void)0)
-#endif
-
-/* Pass 3: Encode sections. */
-int dasm_encode(Dst_DECL, void *buffer)
-{
- dasm_State *D = Dst_REF;
- char *base = (char *)buffer;
- unsigned int *cp = (unsigned int *)buffer;
- int secnum;
-
- /* Encode all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->buf;
- int *endb = sec->rbuf + sec->pos;
-
- while (b != endb) {
- dasm_ActList p = D->actionlist + *b++;
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16);
- int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
- switch (action) {
- case DASM_STOP: case DASM_SECTION: goto stop;
- case DASM_ESC: *cp++ = *p++; break;
- case DASM_REL_EXT:
- n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048));
- goto patchrel;
- case DASM_ALIGN:
- ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000;
- break;
- case DASM_REL_LG:
- CK(n >= 0, UNDEF_LG);
- case DASM_REL_PC:
- CK(n >= 0, UNDEF_PC);
- n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) - 4;
- patchrel:
- if ((ins & 0x800) == 0) {
- CK((n & 3) == 0 && ((n+0x02000000) >> 26) == 0, RANGE_REL);
- cp[-1] |= ((n >> 2) & 0x00ffffff);
- } else if ((ins & 0x1000)) {
- CK((n & 3) == 0 && -256 <= n && n <= 256, RANGE_REL);
- goto patchimml8;
- } else if ((ins & 0x2000) == 0) {
- CK((n & 3) == 0 && -4096 <= n && n <= 4096, RANGE_REL);
- goto patchimml;
- } else {
- CK((n & 3) == 0 && -1020 <= n && n <= 1020, RANGE_REL);
- n >>= 2;
- goto patchimml;
- }
- break;
- case DASM_LABEL_LG:
- ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
- break;
- case DASM_LABEL_PC: break;
- case DASM_IMM:
- cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31);
- break;
- case DASM_IMM12:
- cp[-1] |= dasm_imm12((unsigned int)n);
- break;
- case DASM_IMM16:
- cp[-1] |= ((n & 0xf000) << 4) | (n & 0x0fff);
- break;
- case DASM_IMML8: patchimml8:
- cp[-1] |= n >= 0 ? (0x00800000 | (n & 0x0f) | ((n & 0xf0) << 4)) :
- ((-n & 0x0f) | ((-n & 0xf0) << 4));
- break;
- case DASM_IMML12: case DASM_IMMV8: patchimml:
- cp[-1] |= n >= 0 ? (0x00800000 | n) : (-n);
- break;
- default: *cp++ = ins; break;
- }
- }
- stop: (void)0;
- }
- }
-
- if (base + D->codesize != (char *)cp) /* Check for phase errors. */
- return DASM_S_PHASE;
- return DASM_S_OK;
-}
-#undef CK
-
-/* Get PC label offset. */
-int dasm_getpclabel(Dst_DECL, unsigned int pc)
-{
- dasm_State *D = Dst_REF;
- if (pc*sizeof(int) < D->pcsize) {
- int pos = D->pclabels[pc];
- if (pos < 0) return *DASM_POS2PTR(D, -pos);
- if (pos > 0) return -1; /* Undefined. */
- }
- return -2; /* Unused or out of range. */
-}
-
-#ifdef DASM_CHECKS
-/* Optional sanity checker to call between isolated encoding steps. */
-int dasm_checkstep(Dst_DECL, int secmatch)
-{
- dasm_State *D = Dst_REF;
- if (D->status == DASM_S_OK) {
- int i;
- for (i = 1; i <= 9; i++) {
- if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
- D->lglabels[i] = 0;
- }
- }
- if (D->status == DASM_S_OK && secmatch >= 0 &&
- D->section != &D->sections[secmatch])
- D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
- return D->status;
-}
-#endif
-
+/*
+** DynASM ARM encoding engine.
+** Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define DASM_ARCH "arm"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. */
+enum {
+ DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
+ /* The following actions need a buffer position. */
+ DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
+ /* The following actions also have an argument. */
+ DASM_REL_PC, DASM_LABEL_PC,
+ DASM_IMM, DASM_IMM12, DASM_IMM16, DASM_IMML8, DASM_IMML12, DASM_IMMV8,
+ DASM__MAX
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_REL 0x15000000
+#define DASM_S_UNDEF_LG 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned int *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+static int dasm_imm12(unsigned int n)
+{
+ int i;
+ for (i = 0; i < 16; i++, n = (n << 2) | (n >> 30))
+ if (n <= 255) return (int)(n + (i << 8));
+ return -1;
+}
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ if (action >= DASM__MAX) {
+ ofs += 4;
+ } else {
+ int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
+ switch (action) {
+ case DASM_STOP: goto stop;
+ case DASM_SECTION:
+ n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
+ D->section = &D->sections[n]; goto stop;
+ case DASM_ESC: p++; ofs += 4; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
+ case DASM_REL_LG:
+ n = (ins & 2047) - 10; pl = D->lglabels + n;
+ /* Bkwd rel or global. */
+ if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
+ pl += 10; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ break;
+ case DASM_LABEL_LG:
+ pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
+ }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_IMM:
+ case DASM_IMM16:
+#ifdef DASM_CHECKS
+ CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
+ if ((ins & 0x8000))
+ CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
+ else
+ CK((n>>((ins>>5)&31)) == 0, RANGE_I);
+#endif
+ b[pos++] = n;
+ break;
+ case DASM_IMMV8:
+ CK((n & 3) == 0, RANGE_I);
+ n >>= 2;
+ case DASM_IMML8:
+ case DASM_IMML12:
+ CK(n >= 0 ? ((n>>((ins>>5)&31)) == 0) :
+ (((-n)>>((ins>>5)&31)) == 0), RANGE_I);
+ b[pos++] = n;
+ break;
+ case DASM_IMM12:
+ CK(dasm_imm12((unsigned int)n) != -1, RANGE_I);
+ b[pos++] = n;
+ break;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: p++; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
+ case DASM_REL_LG: case DASM_REL_PC: pos++; break;
+ case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
+ case DASM_IMM: case DASM_IMM12: case DASM_IMM16:
+ case DASM_IMML8: case DASM_IMML12: case DASM_IMMV8: pos++; break;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
+#else
+#define CK(x, st) ((void)0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ char *base = (char *)buffer;
+ unsigned int *cp = (unsigned int *)buffer;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: *cp++ = *p++; break;
+ case DASM_REL_EXT:
+ n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins&2047), !(ins&2048));
+ goto patchrel;
+ case DASM_ALIGN:
+ ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0xe1a00000;
+ break;
+ case DASM_REL_LG:
+ CK(n >= 0, UNDEF_LG);
+ case DASM_REL_PC:
+ CK(n >= 0, UNDEF_PC);
+ n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base) - 4;
+ patchrel:
+ if ((ins & 0x800) == 0) {
+ CK((n & 3) == 0 && ((n+0x02000000) >> 26) == 0, RANGE_REL);
+ cp[-1] |= ((n >> 2) & 0x00ffffff);
+ } else if ((ins & 0x1000)) {
+ CK((n & 3) == 0 && -256 <= n && n <= 256, RANGE_REL);
+ goto patchimml8;
+ } else if ((ins & 0x2000) == 0) {
+ CK((n & 3) == 0 && -4096 <= n && n <= 4096, RANGE_REL);
+ goto patchimml;
+ } else {
+ CK((n & 3) == 0 && -1020 <= n && n <= 1020, RANGE_REL);
+ n >>= 2;
+ goto patchimml;
+ }
+ break;
+ case DASM_LABEL_LG:
+ ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
+ break;
+ case DASM_LABEL_PC: break;
+ case DASM_IMM:
+ cp[-1] |= ((n>>((ins>>10)&31)) & ((1<<((ins>>5)&31))-1)) << (ins&31);
+ break;
+ case DASM_IMM12:
+ cp[-1] |= dasm_imm12((unsigned int)n);
+ break;
+ case DASM_IMM16:
+ cp[-1] |= ((n & 0xf000) << 4) | (n & 0x0fff);
+ break;
+ case DASM_IMML8: patchimml8:
+ cp[-1] |= n >= 0 ? (0x00800000 | (n & 0x0f) | ((n & 0xf0) << 4)) :
+ ((-n & 0x0f) | ((-n & 0xf0) << 4));
+ break;
+ case DASM_IMML12: case DASM_IMMV8: patchimml:
+ cp[-1] |= n >= 0 ? (0x00800000 | n) : (-n);
+ break;
+ default: *cp++ = ins; break;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != (char *)cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+#undef CK
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/3rdparty/lua/dynasm/dasm_arm.lua b/3rdparty/lua/dynasm/dasm_arm.lua
index f3913a5..9720e57 100644
--- a/3rdparty/lua/dynasm/dasm_arm.lua
+++ b/3rdparty/lua/dynasm/dasm_arm.lua
@@ -1,1125 +1,1122 @@
-------------------------------------------------------------------------------
--- DynASM ARM module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- See dynasm.lua for full copyright notice.
-------------------------------------------------------------------------------
-
--- Module information:
-local _info = {
- arch = "arm",
- description = "DynASM ARM module",
- version = "1.3.0",
- vernum = 10300,
- release = "2011-05-05",
- author = "Mike Pall",
- license = "MIT",
-}
-
--- Exported glue functions for the arch-specific module.
-local _M = { _info = _info }
-
--- Cache library functions.
-local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
-local assert, setmetatable, rawget = assert, setmetatable, rawget
-local _s = string
-local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
-local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub
-local concat, sort, insert = table.concat, table.sort, table.insert
-local bit = bit or require("bit")
-local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift
-local ror, tohex = bit.ror, bit.tohex
-
--- Inherited tables and callbacks.
-local g_opt, g_arch
-local wline, werror, wfatal, wwarn
-
--- Action name list.
--- CHECK: Keep this in sync with the C code!
-local action_names = {
- "STOP", "SECTION", "ESC", "REL_EXT",
- "ALIGN", "REL_LG", "LABEL_LG",
- "REL_PC", "LABEL_PC", "IMM", "IMM12", "IMM16", "IMML8", "IMML12", "IMMV8",
-}
-
--- Maximum number of section buffer positions for dasm_put().
--- CHECK: Keep this in sync with the C code!
-local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
-
--- Action name -> action number.
-local map_action = {}
-for n,name in ipairs(action_names) do
- map_action[name] = n-1
-end
-
--- Action list buffer.
-local actlist = {}
-
--- Argument list for next dasm_put(). Start with offset 0 into action list.
-local actargs = { 0 }
-
--- Current number of section buffer positions for dasm_put().
-local secpos = 1
-
-------------------------------------------------------------------------------
-
--- Dump action names and numbers.
-local function dumpactions(out)
- out:write("DynASM encoding engine action codes:\n")
- for n,name in ipairs(action_names) do
- local num = map_action[name]
- out:write(format(" %-10s %02X %d\n", name, num, num))
- end
- out:write("\n")
-end
-
--- Write action list buffer as a huge static C array.
-local function writeactions(out, name)
- local nn = #actlist
- if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
- out:write("static const unsigned int ", name, "[", nn, "] = {\n")
- for i = 1,nn-1 do
- assert(out:write("0x", tohex(actlist[i]), ",\n"))
- end
- assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
-end
-
-------------------------------------------------------------------------------
-
--- Add word to action list.
-local function wputxw(n)
- assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
- actlist[#actlist+1] = n
-end
-
--- Add action to list with optional arg. Advance buffer pos, too.
-local function waction(action, val, a, num)
- local w = assert(map_action[action], "bad action name `"..action.."'")
- wputxw(w * 0x10000 + (val or 0))
- if a then actargs[#actargs+1] = a end
- if a or num then secpos = secpos + (num or 1) end
-end
-
--- Flush action list (intervening C code or buffer pos overflow).
-local function wflush(term)
- if #actlist == actargs[1] then return end -- Nothing to flush.
- if not term then waction("STOP") end -- Terminate action list.
- wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
- actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
- secpos = 1 -- The actionlist offset occupies a buffer position, too.
-end
-
--- Put escaped word.
-local function wputw(n)
- if n <= 0x000fffff then waction("ESC") end
- wputxw(n)
-end
-
--- Reserve position for word.
-local function wpos()
- local pos = #actlist+1
- actlist[pos] = ""
- return pos
-end
-
--- Store word to reserved position.
-local function wputpos(pos, n)
- assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
- if n <= 0x000fffff then
- insert(actlist, pos+1, n)
- n = map_action.ESC * 0x10000
- end
- actlist[pos] = n
-end
-
-------------------------------------------------------------------------------
-
--- Global label name -> global label number. With auto assignment on 1st use.
-local next_global = 20
-local map_global = setmetatable({}, { __index = function(t, name)
- if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
- local n = next_global
- if n > 2047 then werror("too many global labels") end
- next_global = n + 1
- t[name] = n
- return n
-end})
-
--- Dump global labels.
-local function dumpglobals(out, lvl)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("Global labels:\n")
- for i=20,next_global-1 do
- out:write(format(" %s\n", t[i]))
- end
- out:write("\n")
-end
-
--- Write global label enum.
-local function writeglobals(out, prefix)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("enum {\n")
- for i=20,next_global-1 do
- out:write(" ", prefix, t[i], ",\n")
- end
- out:write(" ", prefix, "_MAX\n};\n")
-end
-
--- Write global label names.
-local function writeglobalnames(out, name)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("static const char *const ", name, "[] = {\n")
- for i=20,next_global-1 do
- out:write(" \"", t[i], "\",\n")
- end
- out:write(" (const char *)0\n};\n")
-end
-
-------------------------------------------------------------------------------
-
--- Extern label name -> extern label number. With auto assignment on 1st use.
-local next_extern = 0
-local map_extern_ = {}
-local map_extern = setmetatable({}, { __index = function(t, name)
- -- No restrictions on the name for now.
- local n = next_extern
- if n > 2047 then werror("too many extern labels") end
- next_extern = n + 1
- t[name] = n
- map_extern_[n] = name
- return n
-end})
-
--- Dump extern labels.
-local function dumpexterns(out, lvl)
- out:write("Extern labels:\n")
- for i=0,next_extern-1 do
- out:write(format(" %s\n", map_extern_[i]))
- end
- out:write("\n")
-end
-
--- Write extern label names.
-local function writeexternnames(out, name)
- out:write("static const char *const ", name, "[] = {\n")
- for i=0,next_extern-1 do
- out:write(" \"", map_extern_[i], "\",\n")
- end
- out:write(" (const char *)0\n};\n")
-end
-
-------------------------------------------------------------------------------
-
--- Arch-specific maps.
-
--- Ext. register name -> int. name.
-local map_archdef = { sp = "r13", lr = "r14", pc = "r15", }
-
--- Int. register name -> ext. name.
-local map_reg_rev = { r13 = "sp", r14 = "lr", r15 = "pc", }
-
-local map_type = {} -- Type name -> { ctype, reg }
-local ctypenum = 0 -- Type number (for Dt... macros).
-
--- Reverse defines for registers.
-function _M.revdef(s)
- return map_reg_rev[s] or s
-end
-
-local map_shift = { lsl = 0, lsr = 1, asr = 2, ror = 3, }
-
-local map_cond = {
- eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7,
- hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14,
- hs = 2, lo = 3,
-}
-
-------------------------------------------------------------------------------
-
--- Template strings for ARM instructions.
-local map_op = {
- -- Basic data processing instructions.
- and_3 = "e0000000DNPs",
- eor_3 = "e0200000DNPs",
- sub_3 = "e0400000DNPs",
- rsb_3 = "e0600000DNPs",
- add_3 = "e0800000DNPs",
- adc_3 = "e0a00000DNPs",
- sbc_3 = "e0c00000DNPs",
- rsc_3 = "e0e00000DNPs",
- tst_2 = "e1100000NP",
- teq_2 = "e1300000NP",
- cmp_2 = "e1500000NP",
- cmn_2 = "e1700000NP",
- orr_3 = "e1800000DNPs",
- mov_2 = "e1a00000DPs",
- bic_3 = "e1c00000DNPs",
- mvn_2 = "e1e00000DPs",
-
- and_4 = "e0000000DNMps",
- eor_4 = "e0200000DNMps",
- sub_4 = "e0400000DNMps",
- rsb_4 = "e0600000DNMps",
- add_4 = "e0800000DNMps",
- adc_4 = "e0a00000DNMps",
- sbc_4 = "e0c00000DNMps",
- rsc_4 = "e0e00000DNMps",
- tst_3 = "e1100000NMp",
- teq_3 = "e1300000NMp",
- cmp_3 = "e1500000NMp",
- cmn_3 = "e1700000NMp",
- orr_4 = "e1800000DNMps",
- mov_3 = "e1a00000DMps",
- bic_4 = "e1c00000DNMps",
- mvn_3 = "e1e00000DMps",
-
- lsl_3 = "e1a00000DMws",
- lsr_3 = "e1a00020DMws",
- asr_3 = "e1a00040DMws",
- ror_3 = "e1a00060DMws",
- rrx_2 = "e1a00060DMs",
-
- -- Multiply and multiply-accumulate.
- mul_3 = "e0000090NMSs",
- mla_4 = "e0200090NMSDs",
- umaal_4 = "e0400090DNMSs", -- v6
- mls_4 = "e0600090DNMSs", -- v6T2
- umull_4 = "e0800090DNMSs",
- umlal_4 = "e0a00090DNMSs",
- smull_4 = "e0c00090DNMSs",
- smlal_4 = "e0e00090DNMSs",
-
- -- Halfword multiply and multiply-accumulate.
- smlabb_4 = "e1000080NMSD", -- v5TE
- smlatb_4 = "e10000a0NMSD", -- v5TE
- smlabt_4 = "e10000c0NMSD", -- v5TE
- smlatt_4 = "e10000e0NMSD", -- v5TE
- smlawb_4 = "e1200080NMSD", -- v5TE
- smulwb_3 = "e12000a0NMS", -- v5TE
- smlawt_4 = "e12000c0NMSD", -- v5TE
- smulwt_3 = "e12000e0NMS", -- v5TE
- smlalbb_4 = "e1400080NMSD", -- v5TE
- smlaltb_4 = "e14000a0NMSD", -- v5TE
- smlalbt_4 = "e14000c0NMSD", -- v5TE
- smlaltt_4 = "e14000e0NMSD", -- v5TE
- smulbb_3 = "e1600080NMS", -- v5TE
- smultb_3 = "e16000a0NMS", -- v5TE
- smulbt_3 = "e16000c0NMS", -- v5TE
- smultt_3 = "e16000e0NMS", -- v5TE
-
- -- Miscellaneous data processing instructions.
- clz_2 = "e16f0f10DM", -- v5T
- rev_2 = "e6bf0f30DM", -- v6
- rev16_2 = "e6bf0fb0DM", -- v6
- revsh_2 = "e6ff0fb0DM", -- v6
- sel_3 = "e6800fb0DNM", -- v6
- usad8_3 = "e780f010NMS", -- v6
- usada8_4 = "e7800010NMSD", -- v6
- rbit_2 = "e6ff0f30DM", -- v6T2
- movw_2 = "e3000000DW", -- v6T2
- movt_2 = "e3400000DW", -- v6T2
- -- Note: the X encodes width-1, not width.
- sbfx_4 = "e7a00050DMvX", -- v6T2
- ubfx_4 = "e7e00050DMvX", -- v6T2
- -- Note: the X encodes the msb field, not the width.
- bfc_3 = "e7c0001fDvX", -- v6T2
- bfi_4 = "e7c00010DMvX", -- v6T2
-
- -- Packing and unpacking instructions.
- pkhbt_3 = "e6800010DNM", pkhbt_4 = "e6800010DNMv", -- v6
- pkhtb_3 = "e6800050DNM", pkhtb_4 = "e6800050DNMv", -- v6
- sxtab_3 = "e6a00070DNM", sxtab_4 = "e6a00070DNMv", -- v6
- sxtab16_3 = "e6800070DNM", sxtab16_4 = "e6800070DNMv", -- v6
- sxtah_3 = "e6b00070DNM", sxtah_4 = "e6b00070DNMv", -- v6
- sxtb_2 = "e6af0070DM", sxtb_3 = "e6af0070DMv", -- v6
- sxtb16_2 = "e68f0070DM", sxtb16_3 = "e68f0070DMv", -- v6
- sxth_2 = "e6bf0070DM", sxth_3 = "e6bf0070DMv", -- v6
- uxtab_3 = "e6e00070DNM", uxtab_4 = "e6e00070DNMv", -- v6
- uxtab16_3 = "e6c00070DNM", uxtab16_4 = "e6c00070DNMv", -- v6
- uxtah_3 = "e6f00070DNM", uxtah_4 = "e6f00070DNMv", -- v6
- uxtb_2 = "e6ef0070DM", uxtb_3 = "e6ef0070DMv", -- v6
- uxtb16_2 = "e6cf0070DM", uxtb16_3 = "e6cf0070DMv", -- v6
- uxth_2 = "e6ff0070DM", uxth_3 = "e6ff0070DMv", -- v6
-
- -- Saturating instructions.
- qadd_3 = "e1000050DMN", -- v5TE
- qsub_3 = "e1200050DMN", -- v5TE
- qdadd_3 = "e1400050DMN", -- v5TE
- qdsub_3 = "e1600050DMN", -- v5TE
- -- Note: the X for ssat* encodes sat_imm-1, not sat_imm.
- ssat_3 = "e6a00010DXM", ssat_4 = "e6a00010DXMp", -- v6
- usat_3 = "e6e00010DXM", usat_4 = "e6e00010DXMp", -- v6
- ssat16_3 = "e6a00f30DXM", -- v6
- usat16_3 = "e6e00f30DXM", -- v6
-
- -- Parallel addition and subtraction.
- sadd16_3 = "e6100f10DNM", -- v6
- sasx_3 = "e6100f30DNM", -- v6
- ssax_3 = "e6100f50DNM", -- v6
- ssub16_3 = "e6100f70DNM", -- v6
- sadd8_3 = "e6100f90DNM", -- v6
- ssub8_3 = "e6100ff0DNM", -- v6
- qadd16_3 = "e6200f10DNM", -- v6
- qasx_3 = "e6200f30DNM", -- v6
- qsax_3 = "e6200f50DNM", -- v6
- qsub16_3 = "e6200f70DNM", -- v6
- qadd8_3 = "e6200f90DNM", -- v6
- qsub8_3 = "e6200ff0DNM", -- v6
- shadd16_3 = "e6300f10DNM", -- v6
- shasx_3 = "e6300f30DNM", -- v6
- shsax_3 = "e6300f50DNM", -- v6
- shsub16_3 = "e6300f70DNM", -- v6
- shadd8_3 = "e6300f90DNM", -- v6
- shsub8_3 = "e6300ff0DNM", -- v6
- uadd16_3 = "e6500f10DNM", -- v6
- uasx_3 = "e6500f30DNM", -- v6
- usax_3 = "e6500f50DNM", -- v6
- usub16_3 = "e6500f70DNM", -- v6
- uadd8_3 = "e6500f90DNM", -- v6
- usub8_3 = "e6500ff0DNM", -- v6
- uqadd16_3 = "e6600f10DNM", -- v6
- uqasx_3 = "e6600f30DNM", -- v6
- uqsax_3 = "e6600f50DNM", -- v6
- uqsub16_3 = "e6600f70DNM", -- v6
- uqadd8_3 = "e6600f90DNM", -- v6
- uqsub8_3 = "e6600ff0DNM", -- v6
- uhadd16_3 = "e6700f10DNM", -- v6
- uhasx_3 = "e6700f30DNM", -- v6
- uhsax_3 = "e6700f50DNM", -- v6
- uhsub16_3 = "e6700f70DNM", -- v6
- uhadd8_3 = "e6700f90DNM", -- v6
- uhsub8_3 = "e6700ff0DNM", -- v6
-
- -- Load/store instructions.
- str_2 = "e4000000DL", str_3 = "e4000000DL", str_4 = "e4000000DL",
- strb_2 = "e4400000DL", strb_3 = "e4400000DL", strb_4 = "e4400000DL",
- ldr_2 = "e4100000DL", ldr_3 = "e4100000DL", ldr_4 = "e4100000DL",
- ldrb_2 = "e4500000DL", ldrb_3 = "e4500000DL", ldrb_4 = "e4500000DL",
- strh_2 = "e00000b0DL", strh_3 = "e00000b0DL",
- ldrh_2 = "e01000b0DL", ldrh_3 = "e01000b0DL",
- ldrd_2 = "e00000d0DL", ldrd_3 = "e00000d0DL", -- v5TE
- ldrsb_2 = "e01000d0DL", ldrsb_3 = "e01000d0DL",
- strd_2 = "e00000f0DL", strd_3 = "e00000f0DL", -- v5TE
- ldrsh_2 = "e01000f0DL", ldrsh_3 = "e01000f0DL",
-
- ldm_2 = "e8900000oR", ldmia_2 = "e8900000oR", ldmfd_2 = "e8900000oR",
- ldmda_2 = "e8100000oR", ldmfa_2 = "e8100000oR",
- ldmdb_2 = "e9100000oR", ldmea_2 = "e9100000oR",
- ldmib_2 = "e9900000oR", ldmed_2 = "e9900000oR",
- stm_2 = "e8800000oR", stmia_2 = "e8800000oR", stmfd_2 = "e8800000oR",
- stmda_2 = "e8000000oR", stmfa_2 = "e8000000oR",
- stmdb_2 = "e9000000oR", stmea_2 = "e9000000oR",
- stmib_2 = "e9800000oR", stmed_2 = "e9800000oR",
- pop_1 = "e8bd0000R", push_1 = "e92d0000R",
-
- -- Branch instructions.
- b_1 = "ea000000B",
- bl_1 = "eb000000B",
- blx_1 = "e12fff30C",
- bx_1 = "e12fff10M",
-
- -- Miscellaneous instructions.
- nop_0 = "e1a00000",
- mrs_1 = "e10f0000D",
- bkpt_1 = "e1200070K", -- v5T
- svc_1 = "ef000000T", swi_1 = "ef000000T",
- ud_0 = "e7f001f0",
-
- -- VFP instructions.
- ["vadd.f32_3"] = "ee300a00dnm",
- ["vadd.f64_3"] = "ee300b00Gdnm",
- ["vsub.f32_3"] = "ee300a40dnm",
- ["vsub.f64_3"] = "ee300b40Gdnm",
- ["vmul.f32_3"] = "ee200a00dnm",
- ["vmul.f64_3"] = "ee200b00Gdnm",
- ["vnmul.f32_3"] = "ee200a40dnm",
- ["vnmul.f64_3"] = "ee200b40Gdnm",
- ["vmla.f32_3"] = "ee000a00dnm",
- ["vmla.f64_3"] = "ee000b00Gdnm",
- ["vmls.f32_3"] = "ee000a40dnm",
- ["vmls.f64_3"] = "ee000b40Gdnm",
- ["vnmla.f32_3"] = "ee100a40dnm",
- ["vnmla.f64_3"] = "ee100b40Gdnm",
- ["vnmls.f32_3"] = "ee100a00dnm",
- ["vnmls.f64_3"] = "ee100b00Gdnm",
- ["vdiv.f32_3"] = "ee800a00dnm",
- ["vdiv.f64_3"] = "ee800b00Gdnm",
-
- ["vabs.f32_2"] = "eeb00ac0dm",
- ["vabs.f64_2"] = "eeb00bc0Gdm",
- ["vneg.f32_2"] = "eeb10a40dm",
- ["vneg.f64_2"] = "eeb10b40Gdm",
- ["vsqrt.f32_2"] = "eeb10ac0dm",
- ["vsqrt.f64_2"] = "eeb10bc0Gdm",
- ["vcmp.f32_2"] = "eeb40a40dm",
- ["vcmp.f64_2"] = "eeb40b40Gdm",
- ["vcmpe.f32_2"] = "eeb40ac0dm",
- ["vcmpe.f64_2"] = "eeb40bc0Gdm",
- ["vcmpz.f32_1"] = "eeb50a40d",
- ["vcmpz.f64_1"] = "eeb50b40Gd",
- ["vcmpze.f32_1"] = "eeb50ac0d",
- ["vcmpze.f64_1"] = "eeb50bc0Gd",
-
- vldr_2 = "ed100a00dl|ed100b00Gdl",
- vstr_2 = "ed000a00dl|ed000b00Gdl",
- vldm_2 = "ec900a00or",
- vldmia_2 = "ec900a00or",
- vldmdb_2 = "ed100a00or",
- vpop_1 = "ecbd0a00r",
- vstm_2 = "ec800a00or",
- vstmia_2 = "ec800a00or",
- vstmdb_2 = "ed000a00or",
- vpush_1 = "ed2d0a00r",
-
- ["vmov.f32_2"] = "eeb00a40dm|eeb00a00dY", -- #imm is VFPv3 only
- ["vmov.f64_2"] = "eeb00b40Gdm|eeb00b00GdY", -- #imm is VFPv3 only
- vmov_2 = "ee100a10Dn|ee000a10nD",
- vmov_3 = "ec500a10DNm|ec400a10mDN|ec500b10GDNm|ec400b10GmDN",
-
- vmrs_0 = "eef1fa10",
- vmrs_1 = "eef10a10D",
- vmsr_1 = "eee10a10D",
-
- ["vcvt.s32.f32_2"] = "eebd0ac0dm",
- ["vcvt.s32.f64_2"] = "eebd0bc0dGm",
- ["vcvt.u32.f32_2"] = "eebc0ac0dm",
- ["vcvt.u32.f64_2"] = "eebc0bc0dGm",
- ["vcvtr.s32.f32_2"] = "eebd0a40dm",
- ["vcvtr.s32.f64_2"] = "eebd0b40dGm",
- ["vcvtr.u32.f32_2"] = "eebc0a40dm",
- ["vcvtr.u32.f64_2"] = "eebc0b40dGm",
- ["vcvt.f32.s32_2"] = "eeb80ac0dm",
- ["vcvt.f64.s32_2"] = "eeb80bc0GdFm",
- ["vcvt.f32.u32_2"] = "eeb80a40dm",
- ["vcvt.f64.u32_2"] = "eeb80b40GdFm",
- ["vcvt.f32.f64_2"] = "eeb70bc0dGm",
- ["vcvt.f64.f32_2"] = "eeb70ac0GdFm",
-
- -- VFPv4 only:
- ["vfma.f32_3"] = "eea00a00dnm",
- ["vfma.f64_3"] = "eea00b00Gdnm",
- ["vfms.f32_3"] = "eea00a40dnm",
- ["vfms.f64_3"] = "eea00b40Gdnm",
- ["vfnma.f32_3"] = "ee900a40dnm",
- ["vfnma.f64_3"] = "ee900b40Gdnm",
- ["vfnms.f32_3"] = "ee900a00dnm",
- ["vfnms.f64_3"] = "ee900b00Gdnm",
-
- -- NYI: Advanced SIMD instructions.
-
- -- NYI: I have no need for these instructions right now:
- -- swp, swpb, strex, ldrex, strexd, ldrexd, strexb, ldrexb, strexh, ldrexh
- -- msr, nopv6, yield, wfe, wfi, sev, dbg, bxj, smc, srs, rfe
- -- cps, setend, pli, pld, pldw, clrex, dsb, dmb, isb
- -- stc, ldc, mcr, mcr2, mrc, mrc2, mcrr, mcrr2, mrrc, mrrc2, cdp, cdp2
-}
-
--- Add mnemonics for "s" variants.
-do
- local t = {}
- for k,v in pairs(map_op) do
- if sub(v, -1) == "s" then
- local v2 = sub(v, 1, 2)..char(byte(v, 3)+1)..sub(v, 4, -2)
- t[sub(k, 1, -3).."s"..sub(k, -2)] = v2
- end
- end
- for k,v in pairs(t) do
- map_op[k] = v
- end
-end
-
-------------------------------------------------------------------------------
-
-local function parse_gpr(expr)
- local tname, ovreg = match(expr, "^([%w_]+):(r1?[0-9])$")
- local tp = map_type[tname or expr]
- if tp then
- local reg = ovreg or tp.reg
- if not reg then
- werror("type `"..(tname or expr).."' needs a register override")
- end
- expr = reg
- end
- local r = match(expr, "^r(1?[0-9])$")
- if r then
- r = tonumber(r)
- if r <= 15 then return r, tp end
- end
- werror("bad register name `"..expr.."'")
-end
-
-local function parse_gpr_pm(expr)
- local pm, expr2 = match(expr, "^([+-]?)(.*)$")
- return parse_gpr(expr2), (pm == "-")
-end
-
-local function parse_vr(expr, tp)
- local t, r = match(expr, "^([sd])([0-9]+)$")
- if t == tp then
- r = tonumber(r)
- if r <= 31 then
- if t == "s" then return shr(r, 1), band(r, 1) end
- return band(r, 15), shr(r, 4)
- end
- end
- werror("bad register name `"..expr.."'")
-end
-
-local function parse_reglist(reglist)
- reglist = match(reglist, "^{%s*([^}]*)}$")
- if not reglist then werror("register list expected") end
- local rr = 0
- for p in gmatch(reglist..",", "%s*([^,]*),") do
- local rbit = shl(1, parse_gpr(gsub(p, "%s+$", "")))
- if band(rr, rbit) ~= 0 then
- werror("duplicate register `"..p.."'")
- end
- rr = rr + rbit
- end
- return rr
-end
-
-local function parse_vrlist(reglist)
- local ta, ra, tb, rb = match(reglist,
- "^{%s*([sd])([0-9]+)%s*%-%s*([sd])([0-9]+)%s*}$")
- ra, rb = tonumber(ra), tonumber(rb)
- if ta and ta == tb and ra and rb and ra <= 31 and rb <= 31 and ra <= rb then
- local nr = rb+1 - ra
- if ta == "s" then
- return shl(shr(ra,1),12)+shl(band(ra,1),22) + nr
- else
- return shl(band(ra,15),12)+shl(shr(ra,4),22) + nr*2 + 0x100
- end
- end
- werror("register list expected")
-end
-
-local function parse_imm(imm, bits, shift, scale, signed)
- imm = match(imm, "^#(.*)$")
- if not imm then werror("expected immediate operand") end
- local n = tonumber(imm)
- if n then
- local m = sar(n, scale)
- if shl(m, scale) == n then
- if signed then
- local s = sar(m, bits-1)
- if s == 0 then return shl(m, shift)
- elseif s == -1 then return shl(m + shl(1, bits), shift) end
- else
- if sar(m, bits) == 0 then return shl(m, shift) end
- end
- end
- werror("out of range immediate `"..imm.."'")
- else
- waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
- return 0
- end
-end
-
-local function parse_imm12(imm)
- local n = tonumber(imm)
- if n then
- local m = band(n)
- for i=0,-15,-1 do
- if shr(m, 8) == 0 then return m + shl(band(i, 15), 8) end
- m = ror(m, 2)
- end
- werror("out of range immediate `"..imm.."'")
- else
- waction("IMM12", 0, imm)
- return 0
- end
-end
-
-local function parse_imm16(imm)
- imm = match(imm, "^#(.*)$")
- if not imm then werror("expected immediate operand") end
- local n = tonumber(imm)
- if n then
- if shr(n, 16) == 0 then return band(n, 0x0fff) + shl(band(n, 0xf000), 4) end
- werror("out of range immediate `"..imm.."'")
- else
- waction("IMM16", 32*16, imm)
- return 0
- end
-end
-
-local function parse_imm_load(imm, ext)
- local n = tonumber(imm)
- if n then
- if ext then
- if n >= -255 and n <= 255 then
- local up = 0x00800000
- if n < 0 then n = -n; up = 0 end
- return shl(band(n, 0xf0), 4) + band(n, 0x0f) + up
- end
- else
- if n >= -4095 and n <= 4095 then
- if n >= 0 then return n+0x00800000 end
- return -n
- end
- end
- werror("out of range immediate `"..imm.."'")
- else
- waction(ext and "IMML8" or "IMML12", 32768 + shl(ext and 8 or 12, 5), imm)
- return 0
- end
-end
-
-local function parse_shift(shift, gprok)
- if shift == "rrx" then
- return 3 * 32
- else
- local s, s2 = match(shift, "^(%S+)%s*(.*)$")
- s = map_shift[s]
- if not s then werror("expected shift operand") end
- if sub(s2, 1, 1) == "#" then
- return parse_imm(s2, 5, 7, 0, false) + shl(s, 5)
- else
- if not gprok then werror("expected immediate shift operand") end
- return shl(parse_gpr(s2), 8) + shl(s, 5) + 16
- end
- end
-end
-
-local function parse_label(label, def)
- local prefix = sub(label, 1, 2)
- -- =>label (pc label reference)
- if prefix == "=>" then
- return "PC", 0, sub(label, 3)
- end
- -- ->name (global label reference)
- if prefix == "->" then
- return "LG", map_global[sub(label, 3)]
- end
- if def then
- -- [1-9] (local label definition)
- if match(label, "^[1-9]$") then
- return "LG", 10+tonumber(label)
- end
- else
- -- [<>][1-9] (local label reference)
- local dir, lnum = match(label, "^([<>])([1-9])$")
- if dir then -- Fwd: 1-9, Bkwd: 11-19.
- return "LG", lnum + (dir == ">" and 0 or 10)
- end
- -- extern label (extern label reference)
- local extname = match(label, "^extern%s+(%S+)$")
- if extname then
- return "EXT", map_extern[extname]
- end
- end
- werror("bad label `"..label.."'")
-end
-
-local function parse_load(params, nparams, n, op)
- local oplo = band(op, 255)
- local ext, ldrd = (oplo ~= 0), (oplo == 208)
- local d
- if (ldrd or oplo == 240) then
- d = band(shr(op, 12), 15)
- if band(d, 1) ~= 0 then werror("odd destination register") end
- end
- local pn = params[n]
- local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$")
- local p2 = params[n+1]
- if not p1 then
- if not p2 then
- if match(pn, "^[<>=%-]") or match(pn, "^extern%s+") then
- local mode, n, s = parse_label(pn, false)
- waction("REL_"..mode, n + (ext and 0x1800 or 0x0800), s, 1)
- return op + 15 * 65536 + 0x01000000 + (ext and 0x00400000 or 0)
- end
- local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$")
- if reg and tailr ~= "" then
- local d, tp = parse_gpr(reg)
- if tp then
- waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12),
- format(tp.ctypefmt, tailr))
- return op + shl(d, 16) + 0x01000000 + (ext and 0x00400000 or 0)
- end
- end
- end
- werror("expected address operand")
- end
- if wb == "!" then op = op + 0x00200000 end
- if p2 then
- if wb == "!" then werror("bad use of '!'") end
- local p3 = params[n+2]
- op = op + shl(parse_gpr(p1), 16)
- local imm = match(p2, "^#(.*)$")
- if imm then
- local m = parse_imm_load(imm, ext)
- if p3 then werror("too many parameters") end
- op = op + m + (ext and 0x00400000 or 0)
- else
- local m, neg = parse_gpr_pm(p2)
- if ldrd and (m == d or m-1 == d) then werror("register conflict") end
- op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000)
- if p3 then op = op + parse_shift(p3) end
- end
- else
- local p1a, p2 = match(p1, "^([^,%s]*)%s*(.*)$")
- op = op + shl(parse_gpr(p1a), 16) + 0x01000000
- if p2 ~= "" then
- local imm = match(p2, "^,%s*#(.*)$")
- if imm then
- local m = parse_imm_load(imm, ext)
- op = op + m + (ext and 0x00400000 or 0)
- else
- local p2a, p3 = match(p2, "^,%s*([^,%s]*)%s*,?%s*(.*)$")
- local m, neg = parse_gpr_pm(p2a)
- if ldrd and (m == d or m-1 == d) then werror("register conflict") end
- op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000)
- if p3 ~= "" then
- if ext then werror("too many parameters") end
- op = op + parse_shift(p3)
- end
- end
- else
- if wb == "!" then werror("bad use of '!'") end
- op = op + (ext and 0x00c00000 or 0x00800000)
- end
- end
- return op
-end
-
-local function parse_vload(q)
- local reg, imm = match(q, "^%[%s*([^,%s]*)%s*(.*)%]$")
- if reg then
- local d = shl(parse_gpr(reg), 16)
- if imm == "" then return d end
- imm = match(imm, "^,%s*#(.*)$")
- if imm then
- local n = tonumber(imm)
- if n then
- if n >= -1020 and n <= 1020 and n%4 == 0 then
- return d + (n >= 0 and n/4+0x00800000 or -n/4)
- end
- werror("out of range immediate `"..imm.."'")
- else
- waction("IMMV8", 32768 + 32*8, imm)
- return d
- end
- end
- else
- if match(q, "^[<>=%-]") or match(q, "^extern%s+") then
- local mode, n, s = parse_label(q, false)
- waction("REL_"..mode, n + 0x2800, s, 1)
- return 15 * 65536
- end
- local reg, tailr = match(q, "^([%w_:]+)%s*(.*)$")
- if reg and tailr ~= "" then
- local d, tp = parse_gpr(reg)
- if tp then
- waction("IMMV8", 32768 + 32*8, format(tp.ctypefmt, tailr))
- return shl(d, 16)
- end
- end
- end
- werror("expected address operand")
-end
-
-------------------------------------------------------------------------------
-
--- Handle opcodes defined with template strings.
-local function parse_template(params, template, nparams, pos)
- local op = tonumber(sub(template, 1, 8), 16)
- local n = 1
- local vr = "s"
-
- -- Process each character.
- for p in gmatch(sub(template, 9), ".") do
- local q = params[n]
- if p == "D" then
- op = op + shl(parse_gpr(q), 12); n = n + 1
- elseif p == "N" then
- op = op + shl(parse_gpr(q), 16); n = n + 1
- elseif p == "S" then
- op = op + shl(parse_gpr(q), 8); n = n + 1
- elseif p == "M" then
- op = op + parse_gpr(q); n = n + 1
- elseif p == "d" then
- local r,h = parse_vr(q, vr); op = op+shl(r,12)+shl(h,22); n = n + 1
- elseif p == "n" then
- local r,h = parse_vr(q, vr); op = op+shl(r,16)+shl(h,7); n = n + 1
- elseif p == "m" then
- local r,h = parse_vr(q, vr); op = op+r+shl(h,5); n = n + 1
- elseif p == "P" then
- local imm = match(q, "^#(.*)$")
- if imm then
- op = op + parse_imm12(imm) + 0x02000000
- else
- op = op + parse_gpr(q)
- end
- n = n + 1
- elseif p == "p" then
- op = op + parse_shift(q, true); n = n + 1
- elseif p == "L" then
- op = parse_load(params, nparams, n, op)
- elseif p == "l" then
- op = op + parse_vload(q)
- elseif p == "B" then
- local mode, n, s = parse_label(q, false)
- waction("REL_"..mode, n, s, 1)
- elseif p == "C" then -- blx gpr vs. blx label.
- if match(q, "^([%w_]+):(r1?[0-9])$") or match(q, "^r(1?[0-9])$") then
- op = op + parse_gpr(q)
- else
- if op < 0xe0000000 then werror("unconditional instruction") end
- local mode, n, s = parse_label(q, false)
- waction("REL_"..mode, n, s, 1)
- op = 0xfa000000
- end
- elseif p == "F" then
- vr = "s"
- elseif p == "G" then
- vr = "d"
- elseif p == "o" then
- local r, wb = match(q, "^([^!]*)(!?)$")
- op = op + shl(parse_gpr(r), 16) + (wb == "!" and 0x00200000 or 0)
- n = n + 1
- elseif p == "R" then
- op = op + parse_reglist(q); n = n + 1
- elseif p == "r" then
- op = op + parse_vrlist(q); n = n + 1
- elseif p == "W" then
- op = op + parse_imm16(q); n = n + 1
- elseif p == "v" then
- op = op + parse_imm(q, 5, 7, 0, false); n = n + 1
- elseif p == "w" then
- local imm = match(q, "^#(.*)$")
- if imm then
- op = op + parse_imm(q, 5, 7, 0, false); n = n + 1
- else
- op = op + shl(parse_gpr(q), 8) + 16
- end
- elseif p == "X" then
- op = op + parse_imm(q, 5, 16, 0, false); n = n + 1
- elseif p == "Y" then
- local imm = tonumber(match(q, "^#(.*)$")); n = n + 1
- if not imm or shr(imm, 8) ~= 0 then
- werror("bad immediate operand")
- end
- op = op + shl(band(imm, 0xf0), 12) + band(imm, 0x0f)
- elseif p == "K" then
- local imm = tonumber(match(q, "^#(.*)$")); n = n + 1
- if not imm or shr(imm, 16) ~= 0 then
- werror("bad immediate operand")
- end
- op = op + shl(band(imm, 0xfff0), 4) + band(imm, 0x000f)
- elseif p == "T" then
- op = op + parse_imm(q, 24, 0, 0, false); n = n + 1
- elseif p == "s" then
- -- Ignored.
- else
- assert(false)
- end
- end
- wputpos(pos, op)
-end
-
-map_op[".template__"] = function(params, template, nparams)
- if not params then return template:gsub("%x%x%x%x%x%x%x%x", "") end
-
- -- Limit number of section buffer positions used by a single dasm_put().
- -- A single opcode needs a maximum of 3 positions.
- if secpos+3 > maxsecpos then wflush() end
- local pos = wpos()
- local lpos, apos, spos = #actlist, #actargs, secpos
-
- local ok, err
- for t in gmatch(template, "[^|]+") do
- ok, err = pcall(parse_template, params, t, nparams, pos)
- if ok then return end
- secpos = spos
- actlist[lpos+1] = nil
- actlist[lpos+2] = nil
- actlist[lpos+3] = nil
- actargs[apos+1] = nil
- actargs[apos+2] = nil
- actargs[apos+3] = nil
- end
- error(err, 0)
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcode to mark the position where the action list is to be emitted.
-map_op[".actionlist_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeactions(out, name) end)
-end
-
--- Pseudo-opcode to mark the position where the global enum is to be emitted.
-map_op[".globals_1"] = function(params)
- if not params then return "prefix" end
- local prefix = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeglobals(out, prefix) end)
-end
-
--- Pseudo-opcode to mark the position where the global names are to be emitted.
-map_op[".globalnames_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeglobalnames(out, name) end)
-end
-
--- Pseudo-opcode to mark the position where the extern names are to be emitted.
-map_op[".externnames_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeexternnames(out, name) end)
-end
-
-------------------------------------------------------------------------------
-
--- Label pseudo-opcode (converted from trailing colon form).
-map_op[".label_1"] = function(params)
- if not params then return "[1-9] | ->global | =>pcexpr" end
- if secpos+1 > maxsecpos then wflush() end
- local mode, n, s = parse_label(params[1], true)
- if mode == "EXT" then werror("bad label definition") end
- waction("LABEL_"..mode, n, s, 1)
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcodes for data storage.
-map_op[".long_*"] = function(params)
- if not params then return "imm..." end
- for _,p in ipairs(params) do
- local n = tonumber(p)
- if not n then werror("bad immediate `"..p.."'") end
- if n < 0 then n = n + 2^32 end
- wputw(n)
- if secpos+2 > maxsecpos then wflush() end
- end
-end
-
--- Alignment pseudo-opcode.
-map_op[".align_1"] = function(params)
- if not params then return "numpow2" end
- if secpos+1 > maxsecpos then wflush() end
- local align = tonumber(params[1])
- if align then
- local x = align
- -- Must be a power of 2 in the range (2 ... 256).
- for i=1,8 do
- x = x / 2
- if x == 1 then
- waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
- return
- end
- end
- end
- werror("bad alignment")
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcode for (primitive) type definitions (map to C types).
-map_op[".type_3"] = function(params, nparams)
- if not params then
- return nparams == 2 and "name, ctype" or "name, ctype, reg"
- end
- local name, ctype, reg = params[1], params[2], params[3]
- if not match(name, "^[%a_][%w_]*$") then
- werror("bad type name `"..name.."'")
- end
- local tp = map_type[name]
- if tp then
- werror("duplicate type `"..name.."'")
- end
- -- Add #type to defines. A bit unclean to put it in map_archdef.
- map_archdef["#"..name] = "sizeof("..ctype..")"
- -- Add new type and emit shortcut define.
- local num = ctypenum + 1
- map_type[name] = {
- ctype = ctype,
- ctypefmt = format("Dt%X(%%s)", num),
- reg = reg,
- }
- wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
- ctypenum = num
-end
-map_op[".type_2"] = map_op[".type_3"]
-
--- Dump type definitions.
-local function dumptypes(out, lvl)
- local t = {}
- for name in pairs(map_type) do t[#t+1] = name end
- sort(t)
- out:write("Type definitions:\n")
- for _,name in ipairs(t) do
- local tp = map_type[name]
- local reg = tp.reg or ""
- out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
- end
- out:write("\n")
-end
-
-------------------------------------------------------------------------------
-
--- Set the current section.
-function _M.section(num)
- waction("SECTION", num)
- wflush(true) -- SECTION is a terminal action.
-end
-
-------------------------------------------------------------------------------
-
--- Dump architecture description.
-function _M.dumparch(out)
- out:write(format("DynASM %s version %s, released %s\n\n",
- _info.arch, _info.version, _info.release))
- dumpactions(out)
-end
-
--- Dump all user defined elements.
-function _M.dumpdef(out, lvl)
- dumptypes(out, lvl)
- dumpglobals(out, lvl)
- dumpexterns(out, lvl)
-end
-
-------------------------------------------------------------------------------
-
--- Pass callbacks from/to the DynASM core.
-function _M.passcb(wl, we, wf, ww)
- wline, werror, wfatal, wwarn = wl, we, wf, ww
- return wflush
-end
-
--- Setup the arch-specific module.
-function _M.setup(arch, opt)
- g_arch, g_opt = arch, opt
-end
-
--- Merge the core maps and the arch-specific maps.
-function _M.mergemaps(map_coreop, map_def)
- setmetatable(map_op, { __index = function(t, k)
- local v = map_coreop[k]
- if v then return v end
- local k1, cc, k2 = match(k, "^(.-)(..)([._].*)$")
- local cv = map_cond[cc]
- if cv then
- local v = rawget(t, k1..k2)
- if type(v) == "string" then
- local scv = format("%x", cv)
- return gsub(scv..sub(v, 2), "|e", "|"..scv)
- end
- end
- end })
- setmetatable(map_def, { __index = map_archdef })
- return map_op, map_def
-end
-
-return _M
-
-------------------------------------------------------------------------------
-
+------------------------------------------------------------------------------
+-- DynASM ARM module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Module information:
+local _info = {
+ arch = "arm",
+ description = "DynASM ARM module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, setmetatable, rawget = assert, setmetatable, rawget
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local match, gmatch, gsub = _s.match, _s.gmatch, _s.gsub
+local concat, sort, insert = table.concat, table.sort, table.insert
+local bit = bit or require("bit")
+local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift
+local ror, tohex = bit.ror, bit.tohex
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ "STOP", "SECTION", "ESC", "REL_EXT",
+ "ALIGN", "REL_LG", "LABEL_LG",
+ "REL_PC", "LABEL_PC", "IMM", "IMM12", "IMM16", "IMML8", "IMML12", "IMMV8",
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number.
+local map_action = {}
+for n,name in ipairs(action_names) do
+ map_action[name] = n-1
+end
+
+-- Action list buffer.
+local actlist = {}
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
+ out:write("static const unsigned int ", name, "[", nn, "] = {\n")
+ for i = 1,nn-1 do
+ assert(out:write("0x", tohex(actlist[i]), ",\n"))
+ end
+ assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
+end
+
+------------------------------------------------------------------------------
+
+-- Add word to action list.
+local function wputxw(n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, val, a, num)
+ local w = assert(map_action[action], "bad action name `"..action.."'")
+ wputxw(w * 0x10000 + (val or 0))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ if #actlist == actargs[1] then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped word.
+local function wputw(n)
+ if n <= 0x000fffff then waction("ESC") end
+ wputxw(n)
+end
+
+-- Reserve position for word.
+local function wpos()
+ local pos = #actlist+1
+ actlist[pos] = ""
+ return pos
+end
+
+-- Store word to reserved position.
+local function wputpos(pos, n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ if n <= 0x000fffff then
+ insert(actlist, pos+1, n)
+ n = map_action.ESC * 0x10000
+ end
+ actlist[pos] = n
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 20
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 2047 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=20,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=20,next_global-1 do
+ out:write(" ", prefix, t[i], ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=20,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = 0
+local map_extern_ = {}
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n > 2047 then werror("too many extern labels") end
+ next_extern = n + 1
+ t[name] = n
+ map_extern_[n] = name
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ out:write("Extern labels:\n")
+ for i=0,next_extern-1 do
+ out:write(format(" %s\n", map_extern_[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=0,next_extern-1 do
+ out:write(" \"", map_extern_[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+
+-- Ext. register name -> int. name.
+local map_archdef = { sp = "r13", lr = "r14", pc = "r15", }
+
+-- Int. register name -> ext. name.
+local map_reg_rev = { r13 = "sp", r14 = "lr", r15 = "pc", }
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for Dt... macros).
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ return map_reg_rev[s] or s
+end
+
+local map_shift = { lsl = 0, lsr = 1, asr = 2, ror = 3, }
+
+local map_cond = {
+ eq = 0, ne = 1, cs = 2, cc = 3, mi = 4, pl = 5, vs = 6, vc = 7,
+ hi = 8, ls = 9, ge = 10, lt = 11, gt = 12, le = 13, al = 14,
+ hs = 2, lo = 3,
+}
+
+------------------------------------------------------------------------------
+
+-- Template strings for ARM instructions.
+local map_op = {
+ -- Basic data processing instructions.
+ and_3 = "e0000000DNPs",
+ eor_3 = "e0200000DNPs",
+ sub_3 = "e0400000DNPs",
+ rsb_3 = "e0600000DNPs",
+ add_3 = "e0800000DNPs",
+ adc_3 = "e0a00000DNPs",
+ sbc_3 = "e0c00000DNPs",
+ rsc_3 = "e0e00000DNPs",
+ tst_2 = "e1100000NP",
+ teq_2 = "e1300000NP",
+ cmp_2 = "e1500000NP",
+ cmn_2 = "e1700000NP",
+ orr_3 = "e1800000DNPs",
+ mov_2 = "e1a00000DPs",
+ bic_3 = "e1c00000DNPs",
+ mvn_2 = "e1e00000DPs",
+
+ and_4 = "e0000000DNMps",
+ eor_4 = "e0200000DNMps",
+ sub_4 = "e0400000DNMps",
+ rsb_4 = "e0600000DNMps",
+ add_4 = "e0800000DNMps",
+ adc_4 = "e0a00000DNMps",
+ sbc_4 = "e0c00000DNMps",
+ rsc_4 = "e0e00000DNMps",
+ tst_3 = "e1100000NMp",
+ teq_3 = "e1300000NMp",
+ cmp_3 = "e1500000NMp",
+ cmn_3 = "e1700000NMp",
+ orr_4 = "e1800000DNMps",
+ mov_3 = "e1a00000DMps",
+ bic_4 = "e1c00000DNMps",
+ mvn_3 = "e1e00000DMps",
+
+ lsl_3 = "e1a00000DMws",
+ lsr_3 = "e1a00020DMws",
+ asr_3 = "e1a00040DMws",
+ ror_3 = "e1a00060DMws",
+ rrx_2 = "e1a00060DMs",
+
+ -- Multiply and multiply-accumulate.
+ mul_3 = "e0000090NMSs",
+ mla_4 = "e0200090NMSDs",
+ umaal_4 = "e0400090DNMSs", -- v6
+ mls_4 = "e0600090DNMSs", -- v6T2
+ umull_4 = "e0800090DNMSs",
+ umlal_4 = "e0a00090DNMSs",
+ smull_4 = "e0c00090DNMSs",
+ smlal_4 = "e0e00090DNMSs",
+
+ -- Halfword multiply and multiply-accumulate.
+ smlabb_4 = "e1000080NMSD", -- v5TE
+ smlatb_4 = "e10000a0NMSD", -- v5TE
+ smlabt_4 = "e10000c0NMSD", -- v5TE
+ smlatt_4 = "e10000e0NMSD", -- v5TE
+ smlawb_4 = "e1200080NMSD", -- v5TE
+ smulwb_3 = "e12000a0NMS", -- v5TE
+ smlawt_4 = "e12000c0NMSD", -- v5TE
+ smulwt_3 = "e12000e0NMS", -- v5TE
+ smlalbb_4 = "e1400080NMSD", -- v5TE
+ smlaltb_4 = "e14000a0NMSD", -- v5TE
+ smlalbt_4 = "e14000c0NMSD", -- v5TE
+ smlaltt_4 = "e14000e0NMSD", -- v5TE
+ smulbb_3 = "e1600080NMS", -- v5TE
+ smultb_3 = "e16000a0NMS", -- v5TE
+ smulbt_3 = "e16000c0NMS", -- v5TE
+ smultt_3 = "e16000e0NMS", -- v5TE
+
+ -- Miscellaneous data processing instructions.
+ clz_2 = "e16f0f10DM", -- v5T
+ rev_2 = "e6bf0f30DM", -- v6
+ rev16_2 = "e6bf0fb0DM", -- v6
+ revsh_2 = "e6ff0fb0DM", -- v6
+ sel_3 = "e6800fb0DNM", -- v6
+ usad8_3 = "e780f010NMS", -- v6
+ usada8_4 = "e7800010NMSD", -- v6
+ rbit_2 = "e6ff0f30DM", -- v6T2
+ movw_2 = "e3000000DW", -- v6T2
+ movt_2 = "e3400000DW", -- v6T2
+ -- Note: the X encodes width-1, not width.
+ sbfx_4 = "e7a00050DMvX", -- v6T2
+ ubfx_4 = "e7e00050DMvX", -- v6T2
+ -- Note: the X encodes the msb field, not the width.
+ bfc_3 = "e7c0001fDvX", -- v6T2
+ bfi_4 = "e7c00010DMvX", -- v6T2
+
+ -- Packing and unpacking instructions.
+ pkhbt_3 = "e6800010DNM", pkhbt_4 = "e6800010DNMv", -- v6
+ pkhtb_3 = "e6800050DNM", pkhtb_4 = "e6800050DNMv", -- v6
+ sxtab_3 = "e6a00070DNM", sxtab_4 = "e6a00070DNMv", -- v6
+ sxtab16_3 = "e6800070DNM", sxtab16_4 = "e6800070DNMv", -- v6
+ sxtah_3 = "e6b00070DNM", sxtah_4 = "e6b00070DNMv", -- v6
+ sxtb_2 = "e6af0070DM", sxtb_3 = "e6af0070DMv", -- v6
+ sxtb16_2 = "e68f0070DM", sxtb16_3 = "e68f0070DMv", -- v6
+ sxth_2 = "e6bf0070DM", sxth_3 = "e6bf0070DMv", -- v6
+ uxtab_3 = "e6e00070DNM", uxtab_4 = "e6e00070DNMv", -- v6
+ uxtab16_3 = "e6c00070DNM", uxtab16_4 = "e6c00070DNMv", -- v6
+ uxtah_3 = "e6f00070DNM", uxtah_4 = "e6f00070DNMv", -- v6
+ uxtb_2 = "e6ef0070DM", uxtb_3 = "e6ef0070DMv", -- v6
+ uxtb16_2 = "e6cf0070DM", uxtb16_3 = "e6cf0070DMv", -- v6
+ uxth_2 = "e6ff0070DM", uxth_3 = "e6ff0070DMv", -- v6
+
+ -- Saturating instructions.
+ qadd_3 = "e1000050DMN", -- v5TE
+ qsub_3 = "e1200050DMN", -- v5TE
+ qdadd_3 = "e1400050DMN", -- v5TE
+ qdsub_3 = "e1600050DMN", -- v5TE
+ -- Note: the X for ssat* encodes sat_imm-1, not sat_imm.
+ ssat_3 = "e6a00010DXM", ssat_4 = "e6a00010DXMp", -- v6
+ usat_3 = "e6e00010DXM", usat_4 = "e6e00010DXMp", -- v6
+ ssat16_3 = "e6a00f30DXM", -- v6
+ usat16_3 = "e6e00f30DXM", -- v6
+
+ -- Parallel addition and subtraction.
+ sadd16_3 = "e6100f10DNM", -- v6
+ sasx_3 = "e6100f30DNM", -- v6
+ ssax_3 = "e6100f50DNM", -- v6
+ ssub16_3 = "e6100f70DNM", -- v6
+ sadd8_3 = "e6100f90DNM", -- v6
+ ssub8_3 = "e6100ff0DNM", -- v6
+ qadd16_3 = "e6200f10DNM", -- v6
+ qasx_3 = "e6200f30DNM", -- v6
+ qsax_3 = "e6200f50DNM", -- v6
+ qsub16_3 = "e6200f70DNM", -- v6
+ qadd8_3 = "e6200f90DNM", -- v6
+ qsub8_3 = "e6200ff0DNM", -- v6
+ shadd16_3 = "e6300f10DNM", -- v6
+ shasx_3 = "e6300f30DNM", -- v6
+ shsax_3 = "e6300f50DNM", -- v6
+ shsub16_3 = "e6300f70DNM", -- v6
+ shadd8_3 = "e6300f90DNM", -- v6
+ shsub8_3 = "e6300ff0DNM", -- v6
+ uadd16_3 = "e6500f10DNM", -- v6
+ uasx_3 = "e6500f30DNM", -- v6
+ usax_3 = "e6500f50DNM", -- v6
+ usub16_3 = "e6500f70DNM", -- v6
+ uadd8_3 = "e6500f90DNM", -- v6
+ usub8_3 = "e6500ff0DNM", -- v6
+ uqadd16_3 = "e6600f10DNM", -- v6
+ uqasx_3 = "e6600f30DNM", -- v6
+ uqsax_3 = "e6600f50DNM", -- v6
+ uqsub16_3 = "e6600f70DNM", -- v6
+ uqadd8_3 = "e6600f90DNM", -- v6
+ uqsub8_3 = "e6600ff0DNM", -- v6
+ uhadd16_3 = "e6700f10DNM", -- v6
+ uhasx_3 = "e6700f30DNM", -- v6
+ uhsax_3 = "e6700f50DNM", -- v6
+ uhsub16_3 = "e6700f70DNM", -- v6
+ uhadd8_3 = "e6700f90DNM", -- v6
+ uhsub8_3 = "e6700ff0DNM", -- v6
+
+ -- Load/store instructions.
+ str_2 = "e4000000DL", str_3 = "e4000000DL", str_4 = "e4000000DL",
+ strb_2 = "e4400000DL", strb_3 = "e4400000DL", strb_4 = "e4400000DL",
+ ldr_2 = "e4100000DL", ldr_3 = "e4100000DL", ldr_4 = "e4100000DL",
+ ldrb_2 = "e4500000DL", ldrb_3 = "e4500000DL", ldrb_4 = "e4500000DL",
+ strh_2 = "e00000b0DL", strh_3 = "e00000b0DL",
+ ldrh_2 = "e01000b0DL", ldrh_3 = "e01000b0DL",
+ ldrd_2 = "e00000d0DL", ldrd_3 = "e00000d0DL", -- v5TE
+ ldrsb_2 = "e01000d0DL", ldrsb_3 = "e01000d0DL",
+ strd_2 = "e00000f0DL", strd_3 = "e00000f0DL", -- v5TE
+ ldrsh_2 = "e01000f0DL", ldrsh_3 = "e01000f0DL",
+
+ ldm_2 = "e8900000oR", ldmia_2 = "e8900000oR", ldmfd_2 = "e8900000oR",
+ ldmda_2 = "e8100000oR", ldmfa_2 = "e8100000oR",
+ ldmdb_2 = "e9100000oR", ldmea_2 = "e9100000oR",
+ ldmib_2 = "e9900000oR", ldmed_2 = "e9900000oR",
+ stm_2 = "e8800000oR", stmia_2 = "e8800000oR", stmfd_2 = "e8800000oR",
+ stmda_2 = "e8000000oR", stmfa_2 = "e8000000oR",
+ stmdb_2 = "e9000000oR", stmea_2 = "e9000000oR",
+ stmib_2 = "e9800000oR", stmed_2 = "e9800000oR",
+ pop_1 = "e8bd0000R", push_1 = "e92d0000R",
+
+ -- Branch instructions.
+ b_1 = "ea000000B",
+ bl_1 = "eb000000B",
+ blx_1 = "e12fff30C",
+ bx_1 = "e12fff10M",
+
+ -- Miscellaneous instructions.
+ nop_0 = "e1a00000",
+ mrs_1 = "e10f0000D",
+ bkpt_1 = "e1200070K", -- v5T
+ svc_1 = "ef000000T", swi_1 = "ef000000T",
+ ud_0 = "e7f001f0",
+
+ -- VFP instructions.
+ ["vadd.f32_3"] = "ee300a00dnm",
+ ["vadd.f64_3"] = "ee300b00Gdnm",
+ ["vsub.f32_3"] = "ee300a40dnm",
+ ["vsub.f64_3"] = "ee300b40Gdnm",
+ ["vmul.f32_3"] = "ee200a00dnm",
+ ["vmul.f64_3"] = "ee200b00Gdnm",
+ ["vnmul.f32_3"] = "ee200a40dnm",
+ ["vnmul.f64_3"] = "ee200b40Gdnm",
+ ["vmla.f32_3"] = "ee000a00dnm",
+ ["vmla.f64_3"] = "ee000b00Gdnm",
+ ["vmls.f32_3"] = "ee000a40dnm",
+ ["vmls.f64_3"] = "ee000b40Gdnm",
+ ["vnmla.f32_3"] = "ee100a40dnm",
+ ["vnmla.f64_3"] = "ee100b40Gdnm",
+ ["vnmls.f32_3"] = "ee100a00dnm",
+ ["vnmls.f64_3"] = "ee100b00Gdnm",
+ ["vdiv.f32_3"] = "ee800a00dnm",
+ ["vdiv.f64_3"] = "ee800b00Gdnm",
+
+ ["vabs.f32_2"] = "eeb00ac0dm",
+ ["vabs.f64_2"] = "eeb00bc0Gdm",
+ ["vneg.f32_2"] = "eeb10a40dm",
+ ["vneg.f64_2"] = "eeb10b40Gdm",
+ ["vsqrt.f32_2"] = "eeb10ac0dm",
+ ["vsqrt.f64_2"] = "eeb10bc0Gdm",
+ ["vcmp.f32_2"] = "eeb40a40dm",
+ ["vcmp.f64_2"] = "eeb40b40Gdm",
+ ["vcmpe.f32_2"] = "eeb40ac0dm",
+ ["vcmpe.f64_2"] = "eeb40bc0Gdm",
+ ["vcmpz.f32_1"] = "eeb50a40d",
+ ["vcmpz.f64_1"] = "eeb50b40Gd",
+ ["vcmpze.f32_1"] = "eeb50ac0d",
+ ["vcmpze.f64_1"] = "eeb50bc0Gd",
+
+ vldr_2 = "ed100a00dl|ed100b00Gdl",
+ vstr_2 = "ed000a00dl|ed000b00Gdl",
+ vldm_2 = "ec900a00or",
+ vldmia_2 = "ec900a00or",
+ vldmdb_2 = "ed100a00or",
+ vpop_1 = "ecbd0a00r",
+ vstm_2 = "ec800a00or",
+ vstmia_2 = "ec800a00or",
+ vstmdb_2 = "ed000a00or",
+ vpush_1 = "ed2d0a00r",
+
+ ["vmov.f32_2"] = "eeb00a40dm|eeb00a00dY", -- #imm is VFPv3 only
+ ["vmov.f64_2"] = "eeb00b40Gdm|eeb00b00GdY", -- #imm is VFPv3 only
+ vmov_2 = "ee100a10Dn|ee000a10nD",
+ vmov_3 = "ec500a10DNm|ec400a10mDN|ec500b10GDNm|ec400b10GmDN",
+
+ vmrs_0 = "eef1fa10",
+ vmrs_1 = "eef10a10D",
+ vmsr_1 = "eee10a10D",
+
+ ["vcvt.s32.f32_2"] = "eebd0ac0dm",
+ ["vcvt.s32.f64_2"] = "eebd0bc0dGm",
+ ["vcvt.u32.f32_2"] = "eebc0ac0dm",
+ ["vcvt.u32.f64_2"] = "eebc0bc0dGm",
+ ["vcvtr.s32.f32_2"] = "eebd0a40dm",
+ ["vcvtr.s32.f64_2"] = "eebd0b40dGm",
+ ["vcvtr.u32.f32_2"] = "eebc0a40dm",
+ ["vcvtr.u32.f64_2"] = "eebc0b40dGm",
+ ["vcvt.f32.s32_2"] = "eeb80ac0dm",
+ ["vcvt.f64.s32_2"] = "eeb80bc0GdFm",
+ ["vcvt.f32.u32_2"] = "eeb80a40dm",
+ ["vcvt.f64.u32_2"] = "eeb80b40GdFm",
+ ["vcvt.f32.f64_2"] = "eeb70bc0dGm",
+ ["vcvt.f64.f32_2"] = "eeb70ac0GdFm",
+
+ -- VFPv4 only:
+ ["vfma.f32_3"] = "eea00a00dnm",
+ ["vfma.f64_3"] = "eea00b00Gdnm",
+ ["vfms.f32_3"] = "eea00a40dnm",
+ ["vfms.f64_3"] = "eea00b40Gdnm",
+ ["vfnma.f32_3"] = "ee900a40dnm",
+ ["vfnma.f64_3"] = "ee900b40Gdnm",
+ ["vfnms.f32_3"] = "ee900a00dnm",
+ ["vfnms.f64_3"] = "ee900b00Gdnm",
+
+ -- NYI: Advanced SIMD instructions.
+
+ -- NYI: I have no need for these instructions right now:
+ -- swp, swpb, strex, ldrex, strexd, ldrexd, strexb, ldrexb, strexh, ldrexh
+ -- msr, nopv6, yield, wfe, wfi, sev, dbg, bxj, smc, srs, rfe
+ -- cps, setend, pli, pld, pldw, clrex, dsb, dmb, isb
+ -- stc, ldc, mcr, mcr2, mrc, mrc2, mcrr, mcrr2, mrrc, mrrc2, cdp, cdp2
+}
+
+-- Add mnemonics for "s" variants.
+do
+ local t = {}
+ for k,v in pairs(map_op) do
+ if sub(v, -1) == "s" then
+ local v2 = sub(v, 1, 2)..char(byte(v, 3)+1)..sub(v, 4, -2)
+ t[sub(k, 1, -3).."s"..sub(k, -2)] = v2
+ end
+ end
+ for k,v in pairs(t) do
+ map_op[k] = v
+ end
+end
+
+------------------------------------------------------------------------------
+
+local function parse_gpr(expr)
+ local tname, ovreg = match(expr, "^([%w_]+):(r1?[0-9])$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ if not reg then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ expr = reg
+ end
+ local r = match(expr, "^r(1?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 15 then return r, tp end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_gpr_pm(expr)
+ local pm, expr2 = match(expr, "^([+-]?)(.*)$")
+ return parse_gpr(expr2), (pm == "-")
+end
+
+local function parse_vr(expr, tp)
+ local t, r = match(expr, "^([sd])([0-9]+)$")
+ if t == tp then
+ r = tonumber(r)
+ if r <= 31 then
+ if t == "s" then return shr(r, 1), band(r, 1) end
+ return band(r, 15), shr(r, 4)
+ end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_reglist(reglist)
+ reglist = match(reglist, "^{%s*([^}]*)}$")
+ if not reglist then werror("register list expected") end
+ local rr = 0
+ for p in gmatch(reglist..",", "%s*([^,]*),") do
+ local rbit = shl(1, parse_gpr(gsub(p, "%s+$", "")))
+ if band(rr, rbit) ~= 0 then
+ werror("duplicate register `"..p.."'")
+ end
+ rr = rr + rbit
+ end
+ return rr
+end
+
+local function parse_vrlist(reglist)
+ local ta, ra, tb, rb = match(reglist,
+ "^{%s*([sd])([0-9]+)%s*%-%s*([sd])([0-9]+)%s*}$")
+ ra, rb = tonumber(ra), tonumber(rb)
+ if ta and ta == tb and ra and rb and ra <= 31 and rb <= 31 and ra <= rb then
+ local nr = rb+1 - ra
+ if ta == "s" then
+ return shl(shr(ra,1),12)+shl(band(ra,1),22) + nr
+ else
+ return shl(band(ra,15),12)+shl(shr(ra,4),22) + nr*2 + 0x100
+ end
+ end
+ werror("register list expected")
+end
+
+local function parse_imm(imm, bits, shift, scale, signed)
+ imm = match(imm, "^#(.*)$")
+ if not imm then werror("expected immediate operand") end
+ local n = tonumber(imm)
+ if n then
+ local m = sar(n, scale)
+ if shl(m, scale) == n then
+ if signed then
+ local s = sar(m, bits-1)
+ if s == 0 then return shl(m, shift)
+ elseif s == -1 then return shl(m + shl(1, bits), shift) end
+ else
+ if sar(m, bits) == 0 then return shl(m, shift) end
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
+ return 0
+ end
+end
+
+local function parse_imm12(imm)
+ local n = tonumber(imm)
+ if n then
+ local m = band(n)
+ for i=0,-15,-1 do
+ if shr(m, 8) == 0 then return m + shl(band(i, 15), 8) end
+ m = ror(m, 2)
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMM12", 0, imm)
+ return 0
+ end
+end
+
+local function parse_imm16(imm)
+ imm = match(imm, "^#(.*)$")
+ if not imm then werror("expected immediate operand") end
+ local n = tonumber(imm)
+ if n then
+ if shr(n, 16) == 0 then return band(n, 0x0fff) + shl(band(n, 0xf000), 4) end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMM16", 32*16, imm)
+ return 0
+ end
+end
+
+local function parse_imm_load(imm, ext)
+ local n = tonumber(imm)
+ if n then
+ if ext then
+ if n >= -255 and n <= 255 then
+ local up = 0x00800000
+ if n < 0 then n = -n; up = 0 end
+ return shl(band(n, 0xf0), 4) + band(n, 0x0f) + up
+ end
+ else
+ if n >= -4095 and n <= 4095 then
+ if n >= 0 then return n+0x00800000 end
+ return -n
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction(ext and "IMML8" or "IMML12", 32768 + shl(ext and 8 or 12, 5), imm)
+ return 0
+ end
+end
+
+local function parse_shift(shift, gprok)
+ if shift == "rrx" then
+ return 3 * 32
+ else
+ local s, s2 = match(shift, "^(%S+)%s*(.*)$")
+ s = map_shift[s]
+ if not s then werror("expected shift operand") end
+ if sub(s2, 1, 1) == "#" then
+ return parse_imm(s2, 5, 7, 0, false) + shl(s, 5)
+ else
+ if not gprok then werror("expected immediate shift operand") end
+ return shl(parse_gpr(s2), 8) + shl(s, 5) + 16
+ end
+ end
+end
+
+local function parse_label(label, def)
+ local prefix = sub(label, 1, 2)
+ -- =>label (pc label reference)
+ if prefix == "=>" then
+ return "PC", 0, sub(label, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "LG", map_global[sub(label, 3)]
+ end
+ if def then
+ -- [1-9] (local label definition)
+ if match(label, "^[1-9]$") then
+ return "LG", 10+tonumber(label)
+ end
+ else
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(label, "^([<>])([1-9])$")
+ if dir then -- Fwd: 1-9, Bkwd: 11-19.
+ return "LG", lnum + (dir == ">" and 0 or 10)
+ end
+ -- extern label (extern label reference)
+ local extname = match(label, "^extern%s+(%S+)$")
+ if extname then
+ return "EXT", map_extern[extname]
+ end
+ end
+ werror("bad label `"..label.."'")
+end
+
+local function parse_load(params, nparams, n, op)
+ local oplo = band(op, 255)
+ local ext, ldrd = (oplo ~= 0), (oplo == 208)
+ local d
+ if (ldrd or oplo == 240) then
+ d = band(shr(op, 12), 15)
+ if band(d, 1) ~= 0 then werror("odd destination register") end
+ end
+ local pn = params[n]
+ local p1, wb = match(pn, "^%[%s*(.-)%s*%](!?)$")
+ local p2 = params[n+1]
+ if not p1 then
+ if not p2 then
+ if match(pn, "^[<>=%-]") or match(pn, "^extern%s+") then
+ local mode, n, s = parse_label(pn, false)
+ waction("REL_"..mode, n + (ext and 0x1800 or 0x0800), s, 1)
+ return op + 15 * 65536 + 0x01000000 + (ext and 0x00400000 or 0)
+ end
+ local reg, tailr = match(pn, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local d, tp = parse_gpr(reg)
+ if tp then
+ waction(ext and "IMML8" or "IMML12", 32768 + 32*(ext and 8 or 12),
+ format(tp.ctypefmt, tailr))
+ return op + shl(d, 16) + 0x01000000 + (ext and 0x00400000 or 0)
+ end
+ end
+ end
+ werror("expected address operand")
+ end
+ if wb == "!" then op = op + 0x00200000 end
+ if p2 then
+ if wb == "!" then werror("bad use of '!'") end
+ local p3 = params[n+2]
+ op = op + shl(parse_gpr(p1), 16)
+ local imm = match(p2, "^#(.*)$")
+ if imm then
+ local m = parse_imm_load(imm, ext)
+ if p3 then werror("too many parameters") end
+ op = op + m + (ext and 0x00400000 or 0)
+ else
+ local m, neg = parse_gpr_pm(p2)
+ if ldrd and (m == d or m-1 == d) then werror("register conflict") end
+ op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000)
+ if p3 then op = op + parse_shift(p3) end
+ end
+ else
+ local p1a, p2 = match(p1, "^([^,%s]*)%s*(.*)$")
+ op = op + shl(parse_gpr(p1a), 16) + 0x01000000
+ if p2 ~= "" then
+ local imm = match(p2, "^,%s*#(.*)$")
+ if imm then
+ local m = parse_imm_load(imm, ext)
+ op = op + m + (ext and 0x00400000 or 0)
+ else
+ local p2a, p3 = match(p2, "^,%s*([^,%s]*)%s*,?%s*(.*)$")
+ local m, neg = parse_gpr_pm(p2a)
+ if ldrd and (m == d or m-1 == d) then werror("register conflict") end
+ op = op + m + (neg and 0 or 0x00800000) + (ext and 0 or 0x02000000)
+ if p3 ~= "" then
+ if ext then werror("too many parameters") end
+ op = op + parse_shift(p3)
+ end
+ end
+ else
+ if wb == "!" then werror("bad use of '!'") end
+ op = op + (ext and 0x00c00000 or 0x00800000)
+ end
+ end
+ return op
+end
+
+local function parse_vload(q)
+ local reg, imm = match(q, "^%[%s*([^,%s]*)%s*(.*)%]$")
+ if reg then
+ local d = shl(parse_gpr(reg), 16)
+ if imm == "" then return d end
+ imm = match(imm, "^,%s*#(.*)$")
+ if imm then
+ local n = tonumber(imm)
+ if n then
+ if n >= -1020 and n <= 1020 and n%4 == 0 then
+ return d + (n >= 0 and n/4+0x00800000 or -n/4)
+ end
+ werror("out of range immediate `"..imm.."'")
+ else
+ waction("IMMV8", 32768 + 32*8, imm)
+ return d
+ end
+ end
+ else
+ if match(q, "^[<>=%-]") or match(q, "^extern%s+") then
+ local mode, n, s = parse_label(q, false)
+ waction("REL_"..mode, n + 0x2800, s, 1)
+ return 15 * 65536
+ end
+ local reg, tailr = match(q, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local d, tp = parse_gpr(reg)
+ if tp then
+ waction("IMMV8", 32768 + 32*8, format(tp.ctypefmt, tailr))
+ return shl(d, 16)
+ end
+ end
+ end
+ werror("expected address operand")
+end
+
+------------------------------------------------------------------------------
+
+-- Handle opcodes defined with template strings.
+local function parse_template(params, template, nparams, pos)
+ local op = tonumber(sub(template, 1, 8), 16)
+ local n = 1
+ local vr = "s"
+
+ -- Process each character.
+ for p in gmatch(sub(template, 9), ".") do
+ local q = params[n]
+ if p == "D" then
+ op = op + shl(parse_gpr(q), 12); n = n + 1
+ elseif p == "N" then
+ op = op + shl(parse_gpr(q), 16); n = n + 1
+ elseif p == "S" then
+ op = op + shl(parse_gpr(q), 8); n = n + 1
+ elseif p == "M" then
+ op = op + parse_gpr(q); n = n + 1
+ elseif p == "d" then
+ local r,h = parse_vr(q, vr); op = op+shl(r,12)+shl(h,22); n = n + 1
+ elseif p == "n" then
+ local r,h = parse_vr(q, vr); op = op+shl(r,16)+shl(h,7); n = n + 1
+ elseif p == "m" then
+ local r,h = parse_vr(q, vr); op = op+r+shl(h,5); n = n + 1
+ elseif p == "P" then
+ local imm = match(q, "^#(.*)$")
+ if imm then
+ op = op + parse_imm12(imm) + 0x02000000
+ else
+ op = op + parse_gpr(q)
+ end
+ n = n + 1
+ elseif p == "p" then
+ op = op + parse_shift(q, true); n = n + 1
+ elseif p == "L" then
+ op = parse_load(params, nparams, n, op)
+ elseif p == "l" then
+ op = op + parse_vload(q)
+ elseif p == "B" then
+ local mode, n, s = parse_label(q, false)
+ waction("REL_"..mode, n, s, 1)
+ elseif p == "C" then -- blx gpr vs. blx label.
+ if match(q, "^([%w_]+):(r1?[0-9])$") or match(q, "^r(1?[0-9])$") then
+ op = op + parse_gpr(q)
+ else
+ if op < 0xe0000000 then werror("unconditional instruction") end
+ local mode, n, s = parse_label(q, false)
+ waction("REL_"..mode, n, s, 1)
+ op = 0xfa000000
+ end
+ elseif p == "F" then
+ vr = "s"
+ elseif p == "G" then
+ vr = "d"
+ elseif p == "o" then
+ local r, wb = match(q, "^([^!]*)(!?)$")
+ op = op + shl(parse_gpr(r), 16) + (wb == "!" and 0x00200000 or 0)
+ n = n + 1
+ elseif p == "R" then
+ op = op + parse_reglist(q); n = n + 1
+ elseif p == "r" then
+ op = op + parse_vrlist(q); n = n + 1
+ elseif p == "W" then
+ op = op + parse_imm16(q); n = n + 1
+ elseif p == "v" then
+ op = op + parse_imm(q, 5, 7, 0, false); n = n + 1
+ elseif p == "w" then
+ local imm = match(q, "^#(.*)$")
+ if imm then
+ op = op + parse_imm(q, 5, 7, 0, false); n = n + 1
+ else
+ op = op + shl(parse_gpr(q), 8) + 16
+ end
+ elseif p == "X" then
+ op = op + parse_imm(q, 5, 16, 0, false); n = n + 1
+ elseif p == "Y" then
+ local imm = tonumber(match(q, "^#(.*)$")); n = n + 1
+ if not imm or shr(imm, 8) ~= 0 then
+ werror("bad immediate operand")
+ end
+ op = op + shl(band(imm, 0xf0), 12) + band(imm, 0x0f)
+ elseif p == "K" then
+ local imm = tonumber(match(q, "^#(.*)$")); n = n + 1
+ if not imm or shr(imm, 16) ~= 0 then
+ werror("bad immediate operand")
+ end
+ op = op + shl(band(imm, 0xfff0), 4) + band(imm, 0x000f)
+ elseif p == "T" then
+ op = op + parse_imm(q, 24, 0, 0, false); n = n + 1
+ elseif p == "s" then
+ -- Ignored.
+ else
+ assert(false)
+ end
+ end
+ wputpos(pos, op)
+end
+
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return sub(template, 9) end
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 3 positions.
+ if secpos+3 > maxsecpos then wflush() end
+ local pos = wpos()
+ local apos, spos = #actargs, secpos
+
+ local ok, err
+ for t in gmatch(template, "[^|]+") do
+ ok, err = pcall(parse_template, params, t, nparams, pos)
+ if ok then return end
+ secpos = spos
+ actargs[apos+1] = nil
+ actargs[apos+2] = nil
+ actargs[apos+3] = nil
+ end
+ error(err, 0)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_1"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr" end
+ if secpos+1 > maxsecpos then wflush() end
+ local mode, n, s = parse_label(params[1], true)
+ if mode == "EXT" then werror("bad label definition") end
+ waction("LABEL_"..mode, n, s, 1)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+map_op[".long_*"] = function(params)
+ if not params then return "imm..." end
+ for _,p in ipairs(params) do
+ local n = tonumber(p)
+ if not n then werror("bad immediate `"..p.."'") end
+ if n < 0 then n = n + 2^32 end
+ wputw(n)
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1])
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION", num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = function(t, k)
+ local v = map_coreop[k]
+ if v then return v end
+ local k1, cc, k2 = match(k, "^(.-)(..)([._].*)$")
+ local cv = map_cond[cc]
+ if cv then
+ local v = rawget(t, k1..k2)
+ if type(v) == "string" then
+ local scv = format("%x", cv)
+ return gsub(scv..sub(v, 2), "|e", "|"..scv)
+ end
+ end
+ end })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/3rdparty/lua/dynasm/dasm_mips.h b/3rdparty/lua/dynasm/dasm_mips.h
index 4ef08e7..0866beb 100644
--- a/3rdparty/lua/dynasm/dasm_mips.h
+++ b/3rdparty/lua/dynasm/dasm_mips.h
@@ -1,416 +1,416 @@
-/*
-** DynASM MIPS encoding engine.
-** Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-** Released under the MIT license. See dynasm.lua for full copyright notice.
-*/
-
-#include <stddef.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdlib.h>
-
-#define DASM_ARCH "mips"
-
-#ifndef DASM_EXTERN
-#define DASM_EXTERN(a,b,c,d) 0
-#endif
-
-/* Action definitions. */
-enum {
- DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
- /* The following actions need a buffer position. */
- DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
- /* The following actions also have an argument. */
- DASM_REL_PC, DASM_LABEL_PC, DASM_IMM,
- DASM__MAX
-};
-
-/* Maximum number of section buffer positions for a single dasm_put() call. */
-#define DASM_MAXSECPOS 25
-
-/* DynASM encoder status codes. Action list offset or number are or'ed in. */
-#define DASM_S_OK 0x00000000
-#define DASM_S_NOMEM 0x01000000
-#define DASM_S_PHASE 0x02000000
-#define DASM_S_MATCH_SEC 0x03000000
-#define DASM_S_RANGE_I 0x11000000
-#define DASM_S_RANGE_SEC 0x12000000
-#define DASM_S_RANGE_LG 0x13000000
-#define DASM_S_RANGE_PC 0x14000000
-#define DASM_S_RANGE_REL 0x15000000
-#define DASM_S_UNDEF_LG 0x21000000
-#define DASM_S_UNDEF_PC 0x22000000
-
-/* Macros to convert positions (8 bit section + 24 bit index). */
-#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
-#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
-#define DASM_SEC2POS(sec) ((sec)<<24)
-#define DASM_POS2SEC(pos) ((pos)>>24)
-#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
-
-/* Action list type. */
-typedef const unsigned int *dasm_ActList;
-
-/* Per-section structure. */
-typedef struct dasm_Section {
- int *rbuf; /* Biased buffer pointer (negative section bias). */
- int *buf; /* True buffer pointer. */
- size_t bsize; /* Buffer size in bytes. */
- int pos; /* Biased buffer position. */
- int epos; /* End of biased buffer position - max single put. */
- int ofs; /* Byte offset into section. */
-} dasm_Section;
-
-/* Core structure holding the DynASM encoding state. */
-struct dasm_State {
- size_t psize; /* Allocated size of this structure. */
- dasm_ActList actionlist; /* Current actionlist pointer. */
- int *lglabels; /* Local/global chain/pos ptrs. */
- size_t lgsize;
- int *pclabels; /* PC label chains/pos ptrs. */
- size_t pcsize;
- void **globals; /* Array of globals (bias -10). */
- dasm_Section *section; /* Pointer to active section. */
- size_t codesize; /* Total size of all code sections. */
- int maxsection; /* 0 <= sectionidx < maxsection. */
- int status; /* Status code. */
- dasm_Section sections[1]; /* All sections. Alloc-extended. */
-};
-
-/* The size of the core structure depends on the max. number of sections. */
-#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
-
-
-/* Initialize DynASM state. */
-void dasm_init(Dst_DECL, int maxsection)
-{
- dasm_State *D;
- size_t psz = 0;
- int i;
- Dst_REF = NULL;
- DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
- D = Dst_REF;
- D->psize = psz;
- D->lglabels = NULL;
- D->lgsize = 0;
- D->pclabels = NULL;
- D->pcsize = 0;
- D->globals = NULL;
- D->maxsection = maxsection;
- for (i = 0; i < maxsection; i++) {
- D->sections[i].buf = NULL; /* Need this for pass3. */
- D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
- D->sections[i].bsize = 0;
- D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
- }
-}
-
-/* Free DynASM state. */
-void dasm_free(Dst_DECL)
-{
- dasm_State *D = Dst_REF;
- int i;
- for (i = 0; i < D->maxsection; i++)
- if (D->sections[i].buf)
- DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
- if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
- if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
- DASM_M_FREE(Dst, D, D->psize);
-}
-
-/* Setup global label array. Must be called before dasm_setup(). */
-void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
-{
- dasm_State *D = Dst_REF;
- D->globals = gl - 10; /* Negative bias to compensate for locals. */
- DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
-}
-
-/* Grow PC label array. Can be called after dasm_setup(), too. */
-void dasm_growpc(Dst_DECL, unsigned int maxpc)
-{
- dasm_State *D = Dst_REF;
- size_t osz = D->pcsize;
- DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
- memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
-}
-
-/* Setup encoder. */
-void dasm_setup(Dst_DECL, const void *actionlist)
-{
- dasm_State *D = Dst_REF;
- int i;
- D->actionlist = (dasm_ActList)actionlist;
- D->status = DASM_S_OK;
- D->section = &D->sections[0];
- memset((void *)D->lglabels, 0, D->lgsize);
- if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
- for (i = 0; i < D->maxsection; i++) {
- D->sections[i].pos = DASM_SEC2POS(i);
- D->sections[i].ofs = 0;
- }
-}
-
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) { \
- D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
-#define CKPL(kind, st) \
- do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
- D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
-#else
-#define CK(x, st) ((void)0)
-#define CKPL(kind, st) ((void)0)
-#endif
-
-/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
-void dasm_put(Dst_DECL, int start, ...)
-{
- va_list ap;
- dasm_State *D = Dst_REF;
- dasm_ActList p = D->actionlist + start;
- dasm_Section *sec = D->section;
- int pos = sec->pos, ofs = sec->ofs;
- int *b;
-
- if (pos >= sec->epos) {
- DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
- sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
- sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
- sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
- }
-
- b = sec->rbuf;
- b[pos++] = start;
-
- va_start(ap, start);
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16) - 0xff00;
- if (action >= DASM__MAX) {
- ofs += 4;
- } else {
- int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
- switch (action) {
- case DASM_STOP: goto stop;
- case DASM_SECTION:
- n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
- D->section = &D->sections[n]; goto stop;
- case DASM_ESC: p++; ofs += 4; break;
- case DASM_REL_EXT: break;
- case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
- case DASM_REL_LG:
- n = (ins & 2047) - 10; pl = D->lglabels + n;
- /* Bkwd rel or global. */
- if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
- pl += 10; n = *pl;
- if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
- goto linkrel;
- case DASM_REL_PC:
- pl = D->pclabels + n; CKPL(pc, PC);
- putrel:
- n = *pl;
- if (n < 0) { /* Label exists. Get label pos and store it. */
- b[pos] = -n;
- } else {
- linkrel:
- b[pos] = n; /* Else link to rel chain, anchored at label. */
- *pl = pos;
- }
- pos++;
- break;
- case DASM_LABEL_LG:
- pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
- case DASM_LABEL_PC:
- pl = D->pclabels + n; CKPL(pc, PC);
- putlabel:
- n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
- }
- *pl = -pos; /* Label exists now. */
- b[pos++] = ofs; /* Store pass1 offset estimate. */
- break;
- case DASM_IMM:
-#ifdef DASM_CHECKS
- CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
-#endif
- n >>= ((ins>>10)&31);
-#ifdef DASM_CHECKS
- if (ins & 0x8000)
- CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
- else
- CK((n>>((ins>>5)&31)) == 0, RANGE_I);
-#endif
- b[pos++] = n;
- break;
- }
- }
- }
-stop:
- va_end(ap);
- sec->pos = pos;
- sec->ofs = ofs;
-}
-#undef CK
-
-/* Pass 2: Link sections, shrink aligns, fix label offsets. */
-int dasm_link(Dst_DECL, size_t *szp)
-{
- dasm_State *D = Dst_REF;
- int secnum;
- int ofs = 0;
-
-#ifdef DASM_CHECKS
- *szp = 0;
- if (D->status != DASM_S_OK) return D->status;
- {
- int pc;
- for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
- if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
- }
-#endif
-
- { /* Handle globals not defined in this translation unit. */
- int idx;
- for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
- int n = D->lglabels[idx];
- /* Undefined label: Collapse rel chain and replace with marker (< 0). */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
- }
- }
-
- /* Combine all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->rbuf;
- int pos = DASM_SEC2POS(secnum);
- int lastpos = sec->pos;
-
- while (pos != lastpos) {
- dasm_ActList p = D->actionlist + b[pos++];
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16) - 0xff00;
- switch (action) {
- case DASM_STOP: case DASM_SECTION: goto stop;
- case DASM_ESC: p++; break;
- case DASM_REL_EXT: break;
- case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
- case DASM_REL_LG: case DASM_REL_PC: pos++; break;
- case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
- case DASM_IMM: pos++; break;
- }
- }
- stop: (void)0;
- }
- ofs += sec->ofs; /* Next section starts right after current section. */
- }
-
- D->codesize = ofs; /* Total size of all code sections */
- *szp = ofs;
- return DASM_S_OK;
-}
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
-#else
-#define CK(x, st) ((void)0)
-#endif
-
-/* Pass 3: Encode sections. */
-int dasm_encode(Dst_DECL, void *buffer)
-{
- dasm_State *D = Dst_REF;
- char *base = (char *)buffer;
- unsigned int *cp = (unsigned int *)buffer;
- int secnum;
-
- /* Encode all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->buf;
- int *endb = sec->rbuf + sec->pos;
-
- while (b != endb) {
- dasm_ActList p = D->actionlist + *b++;
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16) - 0xff00;
- int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
- switch (action) {
- case DASM_STOP: case DASM_SECTION: goto stop;
- case DASM_ESC: *cp++ = *p++; break;
- case DASM_REL_EXT:
- n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1);
- goto patchrel;
- case DASM_ALIGN:
- ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000;
- break;
- case DASM_REL_LG:
- CK(n >= 0, UNDEF_LG);
- case DASM_REL_PC:
- CK(n >= 0, UNDEF_PC);
- n = *DASM_POS2PTR(D, n);
- if (ins & 2048)
- n = n - (int)((char *)cp - base);
- else
- n = (n + (int)base) & 0x0fffffff;
- patchrel:
- CK((n & 3) == 0 &&
- ((n + ((ins & 2048) ? 0x00020000 : 0)) >>
- ((ins & 2048) ? 18 : 28)) == 0, RANGE_REL);
- cp[-1] |= ((n>>2) & ((ins & 2048) ? 0x0000ffff: 0x03ffffff));
- break;
- case DASM_LABEL_LG:
- ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
- break;
- case DASM_LABEL_PC: break;
- case DASM_IMM:
- cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31);
- break;
- default: *cp++ = ins; break;
- }
- }
- stop: (void)0;
- }
- }
-
- if (base + D->codesize != (char *)cp) /* Check for phase errors. */
- return DASM_S_PHASE;
- return DASM_S_OK;
-}
-#undef CK
-
-/* Get PC label offset. */
-int dasm_getpclabel(Dst_DECL, unsigned int pc)
-{
- dasm_State *D = Dst_REF;
- if (pc*sizeof(int) < D->pcsize) {
- int pos = D->pclabels[pc];
- if (pos < 0) return *DASM_POS2PTR(D, -pos);
- if (pos > 0) return -1; /* Undefined. */
- }
- return -2; /* Unused or out of range. */
-}
-
-#ifdef DASM_CHECKS
-/* Optional sanity checker to call between isolated encoding steps. */
-int dasm_checkstep(Dst_DECL, int secmatch)
-{
- dasm_State *D = Dst_REF;
- if (D->status == DASM_S_OK) {
- int i;
- for (i = 1; i <= 9; i++) {
- if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
- D->lglabels[i] = 0;
- }
- }
- if (D->status == DASM_S_OK && secmatch >= 0 &&
- D->section != &D->sections[secmatch])
- D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
- return D->status;
-}
-#endif
-
+/*
+** DynASM MIPS encoding engine.
+** Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define DASM_ARCH "mips"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. */
+enum {
+ DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
+ /* The following actions need a buffer position. */
+ DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
+ /* The following actions also have an argument. */
+ DASM_REL_PC, DASM_LABEL_PC, DASM_IMM,
+ DASM__MAX
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_REL 0x15000000
+#define DASM_S_UNDEF_LG 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned int *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16) - 0xff00;
+ if (action >= DASM__MAX) {
+ ofs += 4;
+ } else {
+ int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
+ switch (action) {
+ case DASM_STOP: goto stop;
+ case DASM_SECTION:
+ n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
+ D->section = &D->sections[n]; goto stop;
+ case DASM_ESC: p++; ofs += 4; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
+ case DASM_REL_LG:
+ n = (ins & 2047) - 10; pl = D->lglabels + n;
+ /* Bkwd rel or global. */
+ if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
+ pl += 10; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ break;
+ case DASM_LABEL_LG:
+ pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
+ }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_IMM:
+#ifdef DASM_CHECKS
+ CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
+#endif
+ n >>= ((ins>>10)&31);
+#ifdef DASM_CHECKS
+ if (ins & 0x8000)
+ CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
+ else
+ CK((n>>((ins>>5)&31)) == 0, RANGE_I);
+#endif
+ b[pos++] = n;
+ break;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16) - 0xff00;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: p++; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
+ case DASM_REL_LG: case DASM_REL_PC: pos++; break;
+ case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
+ case DASM_IMM: pos++; break;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
+#else
+#define CK(x, st) ((void)0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ char *base = (char *)buffer;
+ unsigned int *cp = (unsigned int *)buffer;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16) - 0xff00;
+ int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: *cp++ = *p++; break;
+ case DASM_REL_EXT:
+ n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1);
+ goto patchrel;
+ case DASM_ALIGN:
+ ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000;
+ break;
+ case DASM_REL_LG:
+ CK(n >= 0, UNDEF_LG);
+ case DASM_REL_PC:
+ CK(n >= 0, UNDEF_PC);
+ n = *DASM_POS2PTR(D, n);
+ if (ins & 2048)
+ n = n - (int)((char *)cp - base);
+ else
+ n = (n + (int)base) & 0x0fffffff;
+ patchrel:
+ CK((n & 3) == 0 &&
+ ((n + ((ins & 2048) ? 0x00020000 : 0)) >>
+ ((ins & 2048) ? 18 : 28)) == 0, RANGE_REL);
+ cp[-1] |= ((n>>2) & ((ins & 2048) ? 0x0000ffff: 0x03ffffff));
+ break;
+ case DASM_LABEL_LG:
+ ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
+ break;
+ case DASM_LABEL_PC: break;
+ case DASM_IMM:
+ cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31);
+ break;
+ default: *cp++ = ins; break;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != (char *)cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+#undef CK
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/3rdparty/lua/dynasm/dasm_mips.lua b/3rdparty/lua/dynasm/dasm_mips.lua
index bc05d5a..02ab9d5 100644
--- a/3rdparty/lua/dynasm/dasm_mips.lua
+++ b/3rdparty/lua/dynasm/dasm_mips.lua
@@ -1,953 +1,953 @@
-------------------------------------------------------------------------------
--- DynASM MIPS module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- See dynasm.lua for full copyright notice.
-------------------------------------------------------------------------------
-
--- Module information:
-local _info = {
- arch = "mips",
- description = "DynASM MIPS module",
- version = "1.3.0",
- vernum = 10300,
- release = "2012-01-23",
- author = "Mike Pall",
- license = "MIT",
-}
-
--- Exported glue functions for the arch-specific module.
-local _M = { _info = _info }
-
--- Cache library functions.
-local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
-local assert, setmetatable = assert, setmetatable
-local _s = string
-local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
-local match, gmatch = _s.match, _s.gmatch
-local concat, sort = table.concat, table.sort
-local bit = bit or require("bit")
-local band, shl, sar, tohex = bit.band, bit.lshift, bit.arshift, bit.tohex
-
--- Inherited tables and callbacks.
-local g_opt, g_arch
-local wline, werror, wfatal, wwarn
-
--- Action name list.
--- CHECK: Keep this in sync with the C code!
-local action_names = {
- "STOP", "SECTION", "ESC", "REL_EXT",
- "ALIGN", "REL_LG", "LABEL_LG",
- "REL_PC", "LABEL_PC", "IMM",
-}
-
--- Maximum number of section buffer positions for dasm_put().
--- CHECK: Keep this in sync with the C code!
-local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
-
--- Action name -> action number.
-local map_action = {}
-for n,name in ipairs(action_names) do
- map_action[name] = n-1
-end
-
--- Action list buffer.
-local actlist = {}
-
--- Argument list for next dasm_put(). Start with offset 0 into action list.
-local actargs = { 0 }
-
--- Current number of section buffer positions for dasm_put().
-local secpos = 1
-
-------------------------------------------------------------------------------
-
--- Dump action names and numbers.
-local function dumpactions(out)
- out:write("DynASM encoding engine action codes:\n")
- for n,name in ipairs(action_names) do
- local num = map_action[name]
- out:write(format(" %-10s %02X %d\n", name, num, num))
- end
- out:write("\n")
-end
-
--- Write action list buffer as a huge static C array.
-local function writeactions(out, name)
- local nn = #actlist
- if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
- out:write("static const unsigned int ", name, "[", nn, "] = {\n")
- for i = 1,nn-1 do
- assert(out:write("0x", tohex(actlist[i]), ",\n"))
- end
- assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
-end
-
-------------------------------------------------------------------------------
-
--- Add word to action list.
-local function wputxw(n)
- assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
- actlist[#actlist+1] = n
-end
-
--- Add action to list with optional arg. Advance buffer pos, too.
-local function waction(action, val, a, num)
- local w = assert(map_action[action], "bad action name `"..action.."'")
- wputxw(0xff000000 + w * 0x10000 + (val or 0))
- if a then actargs[#actargs+1] = a end
- if a or num then secpos = secpos + (num or 1) end
-end
-
--- Flush action list (intervening C code or buffer pos overflow).
-local function wflush(term)
- if #actlist == actargs[1] then return end -- Nothing to flush.
- if not term then waction("STOP") end -- Terminate action list.
- wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
- actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
- secpos = 1 -- The actionlist offset occupies a buffer position, too.
-end
-
--- Put escaped word.
-local function wputw(n)
- if n >= 0xff000000 then waction("ESC") end
- wputxw(n)
-end
-
--- Reserve position for word.
-local function wpos()
- local pos = #actlist+1
- actlist[pos] = ""
- return pos
-end
-
--- Store word to reserved position.
-local function wputpos(pos, n)
- assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
- actlist[pos] = n
-end
-
-------------------------------------------------------------------------------
-
--- Global label name -> global label number. With auto assignment on 1st use.
-local next_global = 20
-local map_global = setmetatable({}, { __index = function(t, name)
- if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
- local n = next_global
- if n > 2047 then werror("too many global labels") end
- next_global = n + 1
- t[name] = n
- return n
-end})
-
--- Dump global labels.
-local function dumpglobals(out, lvl)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("Global labels:\n")
- for i=20,next_global-1 do
- out:write(format(" %s\n", t[i]))
- end
- out:write("\n")
-end
-
--- Write global label enum.
-local function writeglobals(out, prefix)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("enum {\n")
- for i=20,next_global-1 do
- out:write(" ", prefix, t[i], ",\n")
- end
- out:write(" ", prefix, "_MAX\n};\n")
-end
-
--- Write global label names.
-local function writeglobalnames(out, name)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("static const char *const ", name, "[] = {\n")
- for i=20,next_global-1 do
- out:write(" \"", t[i], "\",\n")
- end
- out:write(" (const char *)0\n};\n")
-end
-
-------------------------------------------------------------------------------
-
--- Extern label name -> extern label number. With auto assignment on 1st use.
-local next_extern = 0
-local map_extern_ = {}
-local map_extern = setmetatable({}, { __index = function(t, name)
- -- No restrictions on the name for now.
- local n = next_extern
- if n > 2047 then werror("too many extern labels") end
- next_extern = n + 1
- t[name] = n
- map_extern_[n] = name
- return n
-end})
-
--- Dump extern labels.
-local function dumpexterns(out, lvl)
- out:write("Extern labels:\n")
- for i=0,next_extern-1 do
- out:write(format(" %s\n", map_extern_[i]))
- end
- out:write("\n")
-end
-
--- Write extern label names.
-local function writeexternnames(out, name)
- out:write("static const char *const ", name, "[] = {\n")
- for i=0,next_extern-1 do
- out:write(" \"", map_extern_[i], "\",\n")
- end
- out:write(" (const char *)0\n};\n")
-end
-
-------------------------------------------------------------------------------
-
--- Arch-specific maps.
-local map_archdef = { sp="r29", ra="r31" } -- Ext. register name -> int. name.
-
-local map_type = {} -- Type name -> { ctype, reg }
-local ctypenum = 0 -- Type number (for Dt... macros).
-
--- Reverse defines for registers.
-function _M.revdef(s)
- if s == "r29" then return "sp"
- elseif s == "r31" then return "ra" end
- return s
-end
-
-------------------------------------------------------------------------------
-
--- Template strings for MIPS instructions.
-local map_op = {
- -- First-level opcodes.
- j_1 = "08000000J",
- jal_1 = "0c000000J",
- b_1 = "10000000B",
- beqz_2 = "10000000SB",
- beq_3 = "10000000STB",
- bnez_2 = "14000000SB",
- bne_3 = "14000000STB",
- blez_2 = "18000000SB",
- bgtz_2 = "1c000000SB",
- addi_3 = "20000000TSI",
- li_2 = "24000000TI",
- addiu_3 = "24000000TSI",
- slti_3 = "28000000TSI",
- sltiu_3 = "2c000000TSI",
- andi_3 = "30000000TSU",
- lu_2 = "34000000TU",
- ori_3 = "34000000TSU",
- xori_3 = "38000000TSU",
- lui_2 = "3c000000TU",
- beqzl_2 = "50000000SB",
- beql_3 = "50000000STB",
- bnezl_2 = "54000000SB",
- bnel_3 = "54000000STB",
- blezl_2 = "58000000SB",
- bgtzl_2 = "5c000000SB",
- lb_2 = "80000000TO",
- lh_2 = "84000000TO",
- lwl_2 = "88000000TO",
- lw_2 = "8c000000TO",
- lbu_2 = "90000000TO",
- lhu_2 = "94000000TO",
- lwr_2 = "98000000TO",
- sb_2 = "a0000000TO",
- sh_2 = "a4000000TO",
- swl_2 = "a8000000TO",
- sw_2 = "ac000000TO",
- swr_2 = "b8000000TO",
- cache_2 = "bc000000NO",
- ll_2 = "c0000000TO",
- lwc1_2 = "c4000000HO",
- pref_2 = "cc000000NO",
- ldc1_2 = "d4000000HO",
- sc_2 = "e0000000TO",
- swc1_2 = "e4000000HO",
- sdc1_2 = "f4000000HO",
-
- -- Opcode SPECIAL.
- nop_0 = "00000000",
- sll_3 = "00000000DTA",
- movf_2 = "00000001DS",
- movf_3 = "00000001DSC",
- movt_2 = "00010001DS",
- movt_3 = "00010001DSC",
- srl_3 = "00000002DTA",
- rotr_3 = "00200002DTA",
- sra_3 = "00000003DTA",
- sllv_3 = "00000004DTS",
- srlv_3 = "00000006DTS",
- rotrv_3 = "00000046DTS",
- srav_3 = "00000007DTS",
- jr_1 = "00000008S",
- jalr_1 = "0000f809S",
- jalr_2 = "00000009DS",
- movz_3 = "0000000aDST",
- movn_3 = "0000000bDST",
- syscall_0 = "0000000c",
- syscall_1 = "0000000cY",
- break_0 = "0000000d",
- break_1 = "0000000dY",
- sync_0 = "0000000f",
- mfhi_1 = "00000010D",
- mthi_1 = "00000011S",
- mflo_1 = "00000012D",
- mtlo_1 = "00000013S",
- mult_2 = "00000018ST",
- multu_2 = "00000019ST",
- div_2 = "0000001aST",
- divu_2 = "0000001bST",
- add_3 = "00000020DST",
- move_2 = "00000021DS",
- addu_3 = "00000021DST",
- sub_3 = "00000022DST",
- negu_2 = "00000023DT",
- subu_3 = "00000023DST",
- and_3 = "00000024DST",
- or_3 = "00000025DST",
- xor_3 = "00000026DST",
- not_2 = "00000027DS",
- nor_3 = "00000027DST",
- slt_3 = "0000002aDST",
- sltu_3 = "0000002bDST",
- tge_2 = "00000030ST",
- tge_3 = "00000030STZ",
- tgeu_2 = "00000031ST",
- tgeu_3 = "00000031STZ",
- tlt_2 = "00000032ST",
- tlt_3 = "00000032STZ",
- tltu_2 = "00000033ST",
- tltu_3 = "00000033STZ",
- teq_2 = "00000034ST",
- teq_3 = "00000034STZ",
- tne_2 = "00000036ST",
- tne_3 = "00000036STZ",
-
- -- Opcode REGIMM.
- bltz_2 = "04000000SB",
- bgez_2 = "04010000SB",
- bltzl_2 = "04020000SB",
- bgezl_2 = "04030000SB",
- tgei_2 = "04080000SI",
- tgeiu_2 = "04090000SI",
- tlti_2 = "040a0000SI",
- tltiu_2 = "040b0000SI",
- teqi_2 = "040c0000SI",
- tnei_2 = "040e0000SI",
- bltzal_2 = "04100000SB",
- bal_1 = "04110000B",
- bgezal_2 = "04110000SB",
- bltzall_2 = "04120000SB",
- bgezall_2 = "04130000SB",
- synci_1 = "041f0000O",
-
- -- Opcode SPECIAL2.
- madd_2 = "70000000ST",
- maddu_2 = "70000001ST",
- mul_3 = "70000002DST",
- msub_2 = "70000004ST",
- msubu_2 = "70000005ST",
- clz_2 = "70000020DS=",
- clo_2 = "70000021DS=",
- sdbbp_0 = "7000003f",
- sdbbp_1 = "7000003fY",
-
- -- Opcode SPECIAL3.
- ext_4 = "7c000000TSAM", -- Note: last arg is msbd = size-1
- ins_4 = "7c000004TSAM", -- Note: last arg is msb = pos+size-1
- wsbh_2 = "7c0000a0DT",
- seb_2 = "7c000420DT",
- seh_2 = "7c000620DT",
- rdhwr_2 = "7c00003bTD",
-
- -- Opcode COP0.
- mfc0_2 = "40000000TD",
- mfc0_3 = "40000000TDW",
- mtc0_2 = "40800000TD",
- mtc0_3 = "40800000TDW",
- rdpgpr_2 = "41400000DT",
- di_0 = "41606000",
- di_1 = "41606000T",
- ei_0 = "41606020",
- ei_1 = "41606020T",
- wrpgpr_2 = "41c00000DT",
- tlbr_0 = "42000001",
- tlbwi_0 = "42000002",
- tlbwr_0 = "42000006",
- tlbp_0 = "42000008",
- eret_0 = "42000018",
- deret_0 = "4200001f",
- wait_0 = "42000020",
-
- -- Opcode COP1.
- mfc1_2 = "44000000TG",
- cfc1_2 = "44400000TG",
- mfhc1_2 = "44600000TG",
- mtc1_2 = "44800000TG",
- ctc1_2 = "44c00000TG",
- mthc1_2 = "44e00000TG",
-
- bc1f_1 = "45000000B",
- bc1f_2 = "45000000CB",
- bc1t_1 = "45010000B",
- bc1t_2 = "45010000CB",
- bc1fl_1 = "45020000B",
- bc1fl_2 = "45020000CB",
- bc1tl_1 = "45030000B",
- bc1tl_2 = "45030000CB",
-
- ["add.s_3"] = "46000000FGH",
- ["sub.s_3"] = "46000001FGH",
- ["mul.s_3"] = "46000002FGH",
- ["div.s_3"] = "46000003FGH",
- ["sqrt.s_2"] = "46000004FG",
- ["abs.s_2"] = "46000005FG",
- ["mov.s_2"] = "46000006FG",
- ["neg.s_2"] = "46000007FG",
- ["round.l.s_2"] = "46000008FG",
- ["trunc.l.s_2"] = "46000009FG",
- ["ceil.l.s_2"] = "4600000aFG",
- ["floor.l.s_2"] = "4600000bFG",
- ["round.w.s_2"] = "4600000cFG",
- ["trunc.w.s_2"] = "4600000dFG",
- ["ceil.w.s_2"] = "4600000eFG",
- ["floor.w.s_2"] = "4600000fFG",
- ["movf.s_2"] = "46000011FG",
- ["movf.s_3"] = "46000011FGC",
- ["movt.s_2"] = "46010011FG",
- ["movt.s_3"] = "46010011FGC",
- ["movz.s_3"] = "46000012FGT",
- ["movn.s_3"] = "46000013FGT",
- ["recip.s_2"] = "46000015FG",
- ["rsqrt.s_2"] = "46000016FG",
- ["cvt.d.s_2"] = "46000021FG",
- ["cvt.w.s_2"] = "46000024FG",
- ["cvt.l.s_2"] = "46000025FG",
- ["cvt.ps.s_3"] = "46000026FGH",
- ["c.f.s_2"] = "46000030GH",
- ["c.f.s_3"] = "46000030VGH",
- ["c.un.s_2"] = "46000031GH",
- ["c.un.s_3"] = "46000031VGH",
- ["c.eq.s_2"] = "46000032GH",
- ["c.eq.s_3"] = "46000032VGH",
- ["c.ueq.s_2"] = "46000033GH",
- ["c.ueq.s_3"] = "46000033VGH",
- ["c.olt.s_2"] = "46000034GH",
- ["c.olt.s_3"] = "46000034VGH",
- ["c.ult.s_2"] = "46000035GH",
- ["c.ult.s_3"] = "46000035VGH",
- ["c.ole.s_2"] = "46000036GH",
- ["c.ole.s_3"] = "46000036VGH",
- ["c.ule.s_2"] = "46000037GH",
- ["c.ule.s_3"] = "46000037VGH",
- ["c.sf.s_2"] = "46000038GH",
- ["c.sf.s_3"] = "46000038VGH",
- ["c.ngle.s_2"] = "46000039GH",
- ["c.ngle.s_3"] = "46000039VGH",
- ["c.seq.s_2"] = "4600003aGH",
- ["c.seq.s_3"] = "4600003aVGH",
- ["c.ngl.s_2"] = "4600003bGH",
- ["c.ngl.s_3"] = "4600003bVGH",
- ["c.lt.s_2"] = "4600003cGH",
- ["c.lt.s_3"] = "4600003cVGH",
- ["c.nge.s_2"] = "4600003dGH",
- ["c.nge.s_3"] = "4600003dVGH",
- ["c.le.s_2"] = "4600003eGH",
- ["c.le.s_3"] = "4600003eVGH",
- ["c.ngt.s_2"] = "4600003fGH",
- ["c.ngt.s_3"] = "4600003fVGH",
-
- ["add.d_3"] = "46200000FGH",
- ["sub.d_3"] = "46200001FGH",
- ["mul.d_3"] = "46200002FGH",
- ["div.d_3"] = "46200003FGH",
- ["sqrt.d_2"] = "46200004FG",
- ["abs.d_2"] = "46200005FG",
- ["mov.d_2"] = "46200006FG",
- ["neg.d_2"] = "46200007FG",
- ["round.l.d_2"] = "46200008FG",
- ["trunc.l.d_2"] = "46200009FG",
- ["ceil.l.d_2"] = "4620000aFG",
- ["floor.l.d_2"] = "4620000bFG",
- ["round.w.d_2"] = "4620000cFG",
- ["trunc.w.d_2"] = "4620000dFG",
- ["ceil.w.d_2"] = "4620000eFG",
- ["floor.w.d_2"] = "4620000fFG",
- ["movf.d_2"] = "46200011FG",
- ["movf.d_3"] = "46200011FGC",
- ["movt.d_2"] = "46210011FG",
- ["movt.d_3"] = "46210011FGC",
- ["movz.d_3"] = "46200012FGT",
- ["movn.d_3"] = "46200013FGT",
- ["recip.d_2"] = "46200015FG",
- ["rsqrt.d_2"] = "46200016FG",
- ["cvt.s.d_2"] = "46200020FG",
- ["cvt.w.d_2"] = "46200024FG",
- ["cvt.l.d_2"] = "46200025FG",
- ["c.f.d_2"] = "46200030GH",
- ["c.f.d_3"] = "46200030VGH",
- ["c.un.d_2"] = "46200031GH",
- ["c.un.d_3"] = "46200031VGH",
- ["c.eq.d_2"] = "46200032GH",
- ["c.eq.d_3"] = "46200032VGH",
- ["c.ueq.d_2"] = "46200033GH",
- ["c.ueq.d_3"] = "46200033VGH",
- ["c.olt.d_2"] = "46200034GH",
- ["c.olt.d_3"] = "46200034VGH",
- ["c.ult.d_2"] = "46200035GH",
- ["c.ult.d_3"] = "46200035VGH",
- ["c.ole.d_2"] = "46200036GH",
- ["c.ole.d_3"] = "46200036VGH",
- ["c.ule.d_2"] = "46200037GH",
- ["c.ule.d_3"] = "46200037VGH",
- ["c.sf.d_2"] = "46200038GH",
- ["c.sf.d_3"] = "46200038VGH",
- ["c.ngle.d_2"] = "46200039GH",
- ["c.ngle.d_3"] = "46200039VGH",
- ["c.seq.d_2"] = "4620003aGH",
- ["c.seq.d_3"] = "4620003aVGH",
- ["c.ngl.d_2"] = "4620003bGH",
- ["c.ngl.d_3"] = "4620003bVGH",
- ["c.lt.d_2"] = "4620003cGH",
- ["c.lt.d_3"] = "4620003cVGH",
- ["c.nge.d_2"] = "4620003dGH",
- ["c.nge.d_3"] = "4620003dVGH",
- ["c.le.d_2"] = "4620003eGH",
- ["c.le.d_3"] = "4620003eVGH",
- ["c.ngt.d_2"] = "4620003fGH",
- ["c.ngt.d_3"] = "4620003fVGH",
-
- ["add.ps_3"] = "46c00000FGH",
- ["sub.ps_3"] = "46c00001FGH",
- ["mul.ps_3"] = "46c00002FGH",
- ["abs.ps_2"] = "46c00005FG",
- ["mov.ps_2"] = "46c00006FG",
- ["neg.ps_2"] = "46c00007FG",
- ["movf.ps_2"] = "46c00011FG",
- ["movf.ps_3"] = "46c00011FGC",
- ["movt.ps_2"] = "46c10011FG",
- ["movt.ps_3"] = "46c10011FGC",
- ["movz.ps_3"] = "46c00012FGT",
- ["movn.ps_3"] = "46c00013FGT",
- ["cvt.s.pu_2"] = "46c00020FG",
- ["cvt.s.pl_2"] = "46c00028FG",
- ["pll.ps_3"] = "46c0002cFGH",
- ["plu.ps_3"] = "46c0002dFGH",
- ["pul.ps_3"] = "46c0002eFGH",
- ["puu.ps_3"] = "46c0002fFGH",
- ["c.f.ps_2"] = "46c00030GH",
- ["c.f.ps_3"] = "46c00030VGH",
- ["c.un.ps_2"] = "46c00031GH",
- ["c.un.ps_3"] = "46c00031VGH",
- ["c.eq.ps_2"] = "46c00032GH",
- ["c.eq.ps_3"] = "46c00032VGH",
- ["c.ueq.ps_2"] = "46c00033GH",
- ["c.ueq.ps_3"] = "46c00033VGH",
- ["c.olt.ps_2"] = "46c00034GH",
- ["c.olt.ps_3"] = "46c00034VGH",
- ["c.ult.ps_2"] = "46c00035GH",
- ["c.ult.ps_3"] = "46c00035VGH",
- ["c.ole.ps_2"] = "46c00036GH",
- ["c.ole.ps_3"] = "46c00036VGH",
- ["c.ule.ps_2"] = "46c00037GH",
- ["c.ule.ps_3"] = "46c00037VGH",
- ["c.sf.ps_2"] = "46c00038GH",
- ["c.sf.ps_3"] = "46c00038VGH",
- ["c.ngle.ps_2"] = "46c00039GH",
- ["c.ngle.ps_3"] = "46c00039VGH",
- ["c.seq.ps_2"] = "46c0003aGH",
- ["c.seq.ps_3"] = "46c0003aVGH",
- ["c.ngl.ps_2"] = "46c0003bGH",
- ["c.ngl.ps_3"] = "46c0003bVGH",
- ["c.lt.ps_2"] = "46c0003cGH",
- ["c.lt.ps_3"] = "46c0003cVGH",
- ["c.nge.ps_2"] = "46c0003dGH",
- ["c.nge.ps_3"] = "46c0003dVGH",
- ["c.le.ps_2"] = "46c0003eGH",
- ["c.le.ps_3"] = "46c0003eVGH",
- ["c.ngt.ps_2"] = "46c0003fGH",
- ["c.ngt.ps_3"] = "46c0003fVGH",
-
- ["cvt.s.w_2"] = "46800020FG",
- ["cvt.d.w_2"] = "46800021FG",
-
- ["cvt.s.l_2"] = "46a00020FG",
- ["cvt.d.l_2"] = "46a00021FG",
-
- -- Opcode COP1X.
- lwxc1_2 = "4c000000FX",
- ldxc1_2 = "4c000001FX",
- luxc1_2 = "4c000005FX",
- swxc1_2 = "4c000008FX",
- sdxc1_2 = "4c000009FX",
- suxc1_2 = "4c00000dFX",
- prefx_2 = "4c00000fMX",
- ["alnv.ps_4"] = "4c00001eFGHS",
- ["madd.s_4"] = "4c000020FRGH",
- ["madd.d_4"] = "4c000021FRGH",
- ["madd.ps_4"] = "4c000026FRGH",
- ["msub.s_4"] = "4c000028FRGH",
- ["msub.d_4"] = "4c000029FRGH",
- ["msub.ps_4"] = "4c00002eFRGH",
- ["nmadd.s_4"] = "4c000030FRGH",
- ["nmadd.d_4"] = "4c000031FRGH",
- ["nmadd.ps_4"] = "4c000036FRGH",
- ["nmsub.s_4"] = "4c000038FRGH",
- ["nmsub.d_4"] = "4c000039FRGH",
- ["nmsub.ps_4"] = "4c00003eFRGH",
-}
-
-------------------------------------------------------------------------------
-
-local function parse_gpr(expr)
- local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$")
- local tp = map_type[tname or expr]
- if tp then
- local reg = ovreg or tp.reg
- if not reg then
- werror("type `"..(tname or expr).."' needs a register override")
- end
- expr = reg
- end
- local r = match(expr, "^r([1-3]?[0-9])$")
- if r then
- r = tonumber(r)
- if r <= 31 then return r, tp end
- end
- werror("bad register name `"..expr.."'")
-end
-
-local function parse_fpr(expr)
- local r = match(expr, "^f([1-3]?[0-9])$")
- if r then
- r = tonumber(r)
- if r <= 31 then return r end
- end
- werror("bad register name `"..expr.."'")
-end
-
-local function parse_imm(imm, bits, shift, scale, signed)
- local n = tonumber(imm)
- if n then
- local m = sar(n, scale)
- if shl(m, scale) == n then
- if signed then
- local s = sar(m, bits-1)
- if s == 0 then return shl(m, shift)
- elseif s == -1 then return shl(m + shl(1, bits), shift) end
- else
- if sar(m, bits) == 0 then return shl(m, shift) end
- end
- end
- werror("out of range immediate `"..imm.."'")
- elseif match(imm, "^[rf]([1-3]?[0-9])$") or
- match(imm, "^([%w_]+):([rf][1-3]?[0-9])$") then
- werror("expected immediate operand, got register")
- else
- waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
- return 0
- end
-end
-
-local function parse_disp(disp)
- local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
- if imm then
- local r = shl(parse_gpr(reg), 21)
- local extname = match(imm, "^extern%s+(%S+)$")
- if extname then
- waction("REL_EXT", map_extern[extname], nil, 1)
- return r
- else
- return r + parse_imm(imm, 16, 0, 0, true)
- end
- end
- local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
- if reg and tailr ~= "" then
- local r, tp = parse_gpr(reg)
- if tp then
- waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr))
- return shl(r, 21)
- end
- end
- werror("bad displacement `"..disp.."'")
-end
-
-local function parse_index(idx)
- local rt, rs = match(idx, "^(.*)%(([%w_:]+)%)$")
- if rt then
- rt = parse_gpr(rt)
- rs = parse_gpr(rs)
- return shl(rt, 16) + shl(rs, 21)
- end
- werror("bad index `"..idx.."'")
-end
-
-local function parse_label(label, def)
- local prefix = sub(label, 1, 2)
- -- =>label (pc label reference)
- if prefix == "=>" then
- return "PC", 0, sub(label, 3)
- end
- -- ->name (global label reference)
- if prefix == "->" then
- return "LG", map_global[sub(label, 3)]
- end
- if def then
- -- [1-9] (local label definition)
- if match(label, "^[1-9]$") then
- return "LG", 10+tonumber(label)
- end
- else
- -- [<>][1-9] (local label reference)
- local dir, lnum = match(label, "^([<>])([1-9])$")
- if dir then -- Fwd: 1-9, Bkwd: 11-19.
- return "LG", lnum + (dir == ">" and 0 or 10)
- end
- -- extern label (extern label reference)
- local extname = match(label, "^extern%s+(%S+)$")
- if extname then
- return "EXT", map_extern[extname]
- end
- end
- werror("bad label `"..label.."'")
-end
-
-------------------------------------------------------------------------------
-
--- Handle opcodes defined with template strings.
-map_op[".template__"] = function(params, template, nparams)
- if not params then return sub(template, 9) end
- local op = tonumber(sub(template, 1, 8), 16)
- local n = 1
-
- -- Limit number of section buffer positions used by a single dasm_put().
- -- A single opcode needs a maximum of 2 positions (ins/ext).
- if secpos+2 > maxsecpos then wflush() end
- local pos = wpos()
-
- -- Process each character.
- for p in gmatch(sub(template, 9), ".") do
- if p == "D" then
- op = op + shl(parse_gpr(params[n]), 11); n = n + 1
- elseif p == "T" then
- op = op + shl(parse_gpr(params[n]), 16); n = n + 1
- elseif p == "S" then
- op = op + shl(parse_gpr(params[n]), 21); n = n + 1
- elseif p == "F" then
- op = op + shl(parse_fpr(params[n]), 6); n = n + 1
- elseif p == "G" then
- op = op + shl(parse_fpr(params[n]), 11); n = n + 1
- elseif p == "H" then
- op = op + shl(parse_fpr(params[n]), 16); n = n + 1
- elseif p == "R" then
- op = op + shl(parse_fpr(params[n]), 21); n = n + 1
- elseif p == "I" then
- op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1
- elseif p == "U" then
- op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1
- elseif p == "O" then
- op = op + parse_disp(params[n]); n = n + 1
- elseif p == "X" then
- op = op + parse_index(params[n]); n = n + 1
- elseif p == "B" or p == "J" then
- local mode, n, s = parse_label(params[n], false)
- if p == "B" then n = n + 2048 end
- waction("REL_"..mode, n, s, 1)
- n = n + 1
- elseif p == "A" then
- op = op + parse_imm(params[n], 5, 6, 0, false); n = n + 1
- elseif p == "M" then
- op = op + parse_imm(params[n], 5, 11, 0, false); n = n + 1
- elseif p == "N" then
- op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1
- elseif p == "C" then
- op = op + parse_imm(params[n], 3, 18, 0, false); n = n + 1
- elseif p == "V" then
- op = op + parse_imm(params[n], 3, 8, 0, false); n = n + 1
- elseif p == "W" then
- op = op + parse_imm(params[n], 3, 0, 0, false); n = n + 1
- elseif p == "Y" then
- op = op + parse_imm(params[n], 20, 6, 0, false); n = n + 1
- elseif p == "Z" then
- op = op + parse_imm(params[n], 10, 6, 0, false); n = n + 1
- elseif p == "=" then
- op = op + shl(band(op, 0xf800), 5) -- Copy D to T for clz, clo.
- else
- assert(false)
- end
- end
- wputpos(pos, op)
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcode to mark the position where the action list is to be emitted.
-map_op[".actionlist_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeactions(out, name) end)
-end
-
--- Pseudo-opcode to mark the position where the global enum is to be emitted.
-map_op[".globals_1"] = function(params)
- if not params then return "prefix" end
- local prefix = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeglobals(out, prefix) end)
-end
-
--- Pseudo-opcode to mark the position where the global names are to be emitted.
-map_op[".globalnames_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeglobalnames(out, name) end)
-end
-
--- Pseudo-opcode to mark the position where the extern names are to be emitted.
-map_op[".externnames_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeexternnames(out, name) end)
-end
-
-------------------------------------------------------------------------------
-
--- Label pseudo-opcode (converted from trailing colon form).
-map_op[".label_1"] = function(params)
- if not params then return "[1-9] | ->global | =>pcexpr" end
- if secpos+1 > maxsecpos then wflush() end
- local mode, n, s = parse_label(params[1], true)
- if mode == "EXT" then werror("bad label definition") end
- waction("LABEL_"..mode, n, s, 1)
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcodes for data storage.
-map_op[".long_*"] = function(params)
- if not params then return "imm..." end
- for _,p in ipairs(params) do
- local n = tonumber(p)
- if not n then werror("bad immediate `"..p.."'") end
- if n < 0 then n = n + 2^32 end
- wputw(n)
- if secpos+2 > maxsecpos then wflush() end
- end
-end
-
--- Alignment pseudo-opcode.
-map_op[".align_1"] = function(params)
- if not params then return "numpow2" end
- if secpos+1 > maxsecpos then wflush() end
- local align = tonumber(params[1])
- if align then
- local x = align
- -- Must be a power of 2 in the range (2 ... 256).
- for i=1,8 do
- x = x / 2
- if x == 1 then
- waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
- return
- end
- end
- end
- werror("bad alignment")
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcode for (primitive) type definitions (map to C types).
-map_op[".type_3"] = function(params, nparams)
- if not params then
- return nparams == 2 and "name, ctype" or "name, ctype, reg"
- end
- local name, ctype, reg = params[1], params[2], params[3]
- if not match(name, "^[%a_][%w_]*$") then
- werror("bad type name `"..name.."'")
- end
- local tp = map_type[name]
- if tp then
- werror("duplicate type `"..name.."'")
- end
- -- Add #type to defines. A bit unclean to put it in map_archdef.
- map_archdef["#"..name] = "sizeof("..ctype..")"
- -- Add new type and emit shortcut define.
- local num = ctypenum + 1
- map_type[name] = {
- ctype = ctype,
- ctypefmt = format("Dt%X(%%s)", num),
- reg = reg,
- }
- wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
- ctypenum = num
-end
-map_op[".type_2"] = map_op[".type_3"]
-
--- Dump type definitions.
-local function dumptypes(out, lvl)
- local t = {}
- for name in pairs(map_type) do t[#t+1] = name end
- sort(t)
- out:write("Type definitions:\n")
- for _,name in ipairs(t) do
- local tp = map_type[name]
- local reg = tp.reg or ""
- out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
- end
- out:write("\n")
-end
-
-------------------------------------------------------------------------------
-
--- Set the current section.
-function _M.section(num)
- waction("SECTION", num)
- wflush(true) -- SECTION is a terminal action.
-end
-
-------------------------------------------------------------------------------
-
--- Dump architecture description.
-function _M.dumparch(out)
- out:write(format("DynASM %s version %s, released %s\n\n",
- _info.arch, _info.version, _info.release))
- dumpactions(out)
-end
-
--- Dump all user defined elements.
-function _M.dumpdef(out, lvl)
- dumptypes(out, lvl)
- dumpglobals(out, lvl)
- dumpexterns(out, lvl)
-end
-
-------------------------------------------------------------------------------
-
--- Pass callbacks from/to the DynASM core.
-function _M.passcb(wl, we, wf, ww)
- wline, werror, wfatal, wwarn = wl, we, wf, ww
- return wflush
-end
-
--- Setup the arch-specific module.
-function _M.setup(arch, opt)
- g_arch, g_opt = arch, opt
-end
-
--- Merge the core maps and the arch-specific maps.
-function _M.mergemaps(map_coreop, map_def)
- setmetatable(map_op, { __index = map_coreop })
- setmetatable(map_def, { __index = map_archdef })
- return map_op, map_def
-end
-
-return _M
-
-------------------------------------------------------------------------------
-
+------------------------------------------------------------------------------
+-- DynASM MIPS module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Module information:
+local _info = {
+ arch = "mips",
+ description = "DynASM MIPS module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2012-01-23",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, setmetatable = assert, setmetatable
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local match, gmatch = _s.match, _s.gmatch
+local concat, sort = table.concat, table.sort
+local bit = bit or require("bit")
+local band, shl, sar, tohex = bit.band, bit.lshift, bit.arshift, bit.tohex
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ "STOP", "SECTION", "ESC", "REL_EXT",
+ "ALIGN", "REL_LG", "LABEL_LG",
+ "REL_PC", "LABEL_PC", "IMM",
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number.
+local map_action = {}
+for n,name in ipairs(action_names) do
+ map_action[name] = n-1
+end
+
+-- Action list buffer.
+local actlist = {}
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
+ out:write("static const unsigned int ", name, "[", nn, "] = {\n")
+ for i = 1,nn-1 do
+ assert(out:write("0x", tohex(actlist[i]), ",\n"))
+ end
+ assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
+end
+
+------------------------------------------------------------------------------
+
+-- Add word to action list.
+local function wputxw(n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, val, a, num)
+ local w = assert(map_action[action], "bad action name `"..action.."'")
+ wputxw(0xff000000 + w * 0x10000 + (val or 0))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ if #actlist == actargs[1] then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped word.
+local function wputw(n)
+ if n >= 0xff000000 then waction("ESC") end
+ wputxw(n)
+end
+
+-- Reserve position for word.
+local function wpos()
+ local pos = #actlist+1
+ actlist[pos] = ""
+ return pos
+end
+
+-- Store word to reserved position.
+local function wputpos(pos, n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[pos] = n
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 20
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 2047 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=20,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=20,next_global-1 do
+ out:write(" ", prefix, t[i], ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=20,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = 0
+local map_extern_ = {}
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n > 2047 then werror("too many extern labels") end
+ next_extern = n + 1
+ t[name] = n
+ map_extern_[n] = name
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ out:write("Extern labels:\n")
+ for i=0,next_extern-1 do
+ out:write(format(" %s\n", map_extern_[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=0,next_extern-1 do
+ out:write(" \"", map_extern_[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+local map_archdef = { sp="r29", ra="r31" } -- Ext. register name -> int. name.
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for Dt... macros).
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ if s == "r29" then return "sp"
+ elseif s == "r31" then return "ra" end
+ return s
+end
+
+------------------------------------------------------------------------------
+
+-- Template strings for MIPS instructions.
+local map_op = {
+ -- First-level opcodes.
+ j_1 = "08000000J",
+ jal_1 = "0c000000J",
+ b_1 = "10000000B",
+ beqz_2 = "10000000SB",
+ beq_3 = "10000000STB",
+ bnez_2 = "14000000SB",
+ bne_3 = "14000000STB",
+ blez_2 = "18000000SB",
+ bgtz_2 = "1c000000SB",
+ addi_3 = "20000000TSI",
+ li_2 = "24000000TI",
+ addiu_3 = "24000000TSI",
+ slti_3 = "28000000TSI",
+ sltiu_3 = "2c000000TSI",
+ andi_3 = "30000000TSU",
+ lu_2 = "34000000TU",
+ ori_3 = "34000000TSU",
+ xori_3 = "38000000TSU",
+ lui_2 = "3c000000TU",
+ beqzl_2 = "50000000SB",
+ beql_3 = "50000000STB",
+ bnezl_2 = "54000000SB",
+ bnel_3 = "54000000STB",
+ blezl_2 = "58000000SB",
+ bgtzl_2 = "5c000000SB",
+ lb_2 = "80000000TO",
+ lh_2 = "84000000TO",
+ lwl_2 = "88000000TO",
+ lw_2 = "8c000000TO",
+ lbu_2 = "90000000TO",
+ lhu_2 = "94000000TO",
+ lwr_2 = "98000000TO",
+ sb_2 = "a0000000TO",
+ sh_2 = "a4000000TO",
+ swl_2 = "a8000000TO",
+ sw_2 = "ac000000TO",
+ swr_2 = "b8000000TO",
+ cache_2 = "bc000000NO",
+ ll_2 = "c0000000TO",
+ lwc1_2 = "c4000000HO",
+ pref_2 = "cc000000NO",
+ ldc1_2 = "d4000000HO",
+ sc_2 = "e0000000TO",
+ swc1_2 = "e4000000HO",
+ sdc1_2 = "f4000000HO",
+
+ -- Opcode SPECIAL.
+ nop_0 = "00000000",
+ sll_3 = "00000000DTA",
+ movf_2 = "00000001DS",
+ movf_3 = "00000001DSC",
+ movt_2 = "00010001DS",
+ movt_3 = "00010001DSC",
+ srl_3 = "00000002DTA",
+ rotr_3 = "00200002DTA",
+ sra_3 = "00000003DTA",
+ sllv_3 = "00000004DTS",
+ srlv_3 = "00000006DTS",
+ rotrv_3 = "00000046DTS",
+ srav_3 = "00000007DTS",
+ jr_1 = "00000008S",
+ jalr_1 = "0000f809S",
+ jalr_2 = "00000009DS",
+ movz_3 = "0000000aDST",
+ movn_3 = "0000000bDST",
+ syscall_0 = "0000000c",
+ syscall_1 = "0000000cY",
+ break_0 = "0000000d",
+ break_1 = "0000000dY",
+ sync_0 = "0000000f",
+ mfhi_1 = "00000010D",
+ mthi_1 = "00000011S",
+ mflo_1 = "00000012D",
+ mtlo_1 = "00000013S",
+ mult_2 = "00000018ST",
+ multu_2 = "00000019ST",
+ div_2 = "0000001aST",
+ divu_2 = "0000001bST",
+ add_3 = "00000020DST",
+ move_2 = "00000021DS",
+ addu_3 = "00000021DST",
+ sub_3 = "00000022DST",
+ negu_2 = "00000023DT",
+ subu_3 = "00000023DST",
+ and_3 = "00000024DST",
+ or_3 = "00000025DST",
+ xor_3 = "00000026DST",
+ not_2 = "00000027DS",
+ nor_3 = "00000027DST",
+ slt_3 = "0000002aDST",
+ sltu_3 = "0000002bDST",
+ tge_2 = "00000030ST",
+ tge_3 = "00000030STZ",
+ tgeu_2 = "00000031ST",
+ tgeu_3 = "00000031STZ",
+ tlt_2 = "00000032ST",
+ tlt_3 = "00000032STZ",
+ tltu_2 = "00000033ST",
+ tltu_3 = "00000033STZ",
+ teq_2 = "00000034ST",
+ teq_3 = "00000034STZ",
+ tne_2 = "00000036ST",
+ tne_3 = "00000036STZ",
+
+ -- Opcode REGIMM.
+ bltz_2 = "04000000SB",
+ bgez_2 = "04010000SB",
+ bltzl_2 = "04020000SB",
+ bgezl_2 = "04030000SB",
+ tgei_2 = "04080000SI",
+ tgeiu_2 = "04090000SI",
+ tlti_2 = "040a0000SI",
+ tltiu_2 = "040b0000SI",
+ teqi_2 = "040c0000SI",
+ tnei_2 = "040e0000SI",
+ bltzal_2 = "04100000SB",
+ bal_1 = "04110000B",
+ bgezal_2 = "04110000SB",
+ bltzall_2 = "04120000SB",
+ bgezall_2 = "04130000SB",
+ synci_1 = "041f0000O",
+
+ -- Opcode SPECIAL2.
+ madd_2 = "70000000ST",
+ maddu_2 = "70000001ST",
+ mul_3 = "70000002DST",
+ msub_2 = "70000004ST",
+ msubu_2 = "70000005ST",
+ clz_2 = "70000020DS=",
+ clo_2 = "70000021DS=",
+ sdbbp_0 = "7000003f",
+ sdbbp_1 = "7000003fY",
+
+ -- Opcode SPECIAL3.
+ ext_4 = "7c000000TSAM", -- Note: last arg is msbd = size-1
+ ins_4 = "7c000004TSAM", -- Note: last arg is msb = pos+size-1
+ wsbh_2 = "7c0000a0DT",
+ seb_2 = "7c000420DT",
+ seh_2 = "7c000620DT",
+ rdhwr_2 = "7c00003bTD",
+
+ -- Opcode COP0.
+ mfc0_2 = "40000000TD",
+ mfc0_3 = "40000000TDW",
+ mtc0_2 = "40800000TD",
+ mtc0_3 = "40800000TDW",
+ rdpgpr_2 = "41400000DT",
+ di_0 = "41606000",
+ di_1 = "41606000T",
+ ei_0 = "41606020",
+ ei_1 = "41606020T",
+ wrpgpr_2 = "41c00000DT",
+ tlbr_0 = "42000001",
+ tlbwi_0 = "42000002",
+ tlbwr_0 = "42000006",
+ tlbp_0 = "42000008",
+ eret_0 = "42000018",
+ deret_0 = "4200001f",
+ wait_0 = "42000020",
+
+ -- Opcode COP1.
+ mfc1_2 = "44000000TG",
+ cfc1_2 = "44400000TG",
+ mfhc1_2 = "44600000TG",
+ mtc1_2 = "44800000TG",
+ ctc1_2 = "44c00000TG",
+ mthc1_2 = "44e00000TG",
+
+ bc1f_1 = "45000000B",
+ bc1f_2 = "45000000CB",
+ bc1t_1 = "45010000B",
+ bc1t_2 = "45010000CB",
+ bc1fl_1 = "45020000B",
+ bc1fl_2 = "45020000CB",
+ bc1tl_1 = "45030000B",
+ bc1tl_2 = "45030000CB",
+
+ ["add.s_3"] = "46000000FGH",
+ ["sub.s_3"] = "46000001FGH",
+ ["mul.s_3"] = "46000002FGH",
+ ["div.s_3"] = "46000003FGH",
+ ["sqrt.s_2"] = "46000004FG",
+ ["abs.s_2"] = "46000005FG",
+ ["mov.s_2"] = "46000006FG",
+ ["neg.s_2"] = "46000007FG",
+ ["round.l.s_2"] = "46000008FG",
+ ["trunc.l.s_2"] = "46000009FG",
+ ["ceil.l.s_2"] = "4600000aFG",
+ ["floor.l.s_2"] = "4600000bFG",
+ ["round.w.s_2"] = "4600000cFG",
+ ["trunc.w.s_2"] = "4600000dFG",
+ ["ceil.w.s_2"] = "4600000eFG",
+ ["floor.w.s_2"] = "4600000fFG",
+ ["movf.s_2"] = "46000011FG",
+ ["movf.s_3"] = "46000011FGC",
+ ["movt.s_2"] = "46010011FG",
+ ["movt.s_3"] = "46010011FGC",
+ ["movz.s_3"] = "46000012FGT",
+ ["movn.s_3"] = "46000013FGT",
+ ["recip.s_2"] = "46000015FG",
+ ["rsqrt.s_2"] = "46000016FG",
+ ["cvt.d.s_2"] = "46000021FG",
+ ["cvt.w.s_2"] = "46000024FG",
+ ["cvt.l.s_2"] = "46000025FG",
+ ["cvt.ps.s_3"] = "46000026FGH",
+ ["c.f.s_2"] = "46000030GH",
+ ["c.f.s_3"] = "46000030VGH",
+ ["c.un.s_2"] = "46000031GH",
+ ["c.un.s_3"] = "46000031VGH",
+ ["c.eq.s_2"] = "46000032GH",
+ ["c.eq.s_3"] = "46000032VGH",
+ ["c.ueq.s_2"] = "46000033GH",
+ ["c.ueq.s_3"] = "46000033VGH",
+ ["c.olt.s_2"] = "46000034GH",
+ ["c.olt.s_3"] = "46000034VGH",
+ ["c.ult.s_2"] = "46000035GH",
+ ["c.ult.s_3"] = "46000035VGH",
+ ["c.ole.s_2"] = "46000036GH",
+ ["c.ole.s_3"] = "46000036VGH",
+ ["c.ule.s_2"] = "46000037GH",
+ ["c.ule.s_3"] = "46000037VGH",
+ ["c.sf.s_2"] = "46000038GH",
+ ["c.sf.s_3"] = "46000038VGH",
+ ["c.ngle.s_2"] = "46000039GH",
+ ["c.ngle.s_3"] = "46000039VGH",
+ ["c.seq.s_2"] = "4600003aGH",
+ ["c.seq.s_3"] = "4600003aVGH",
+ ["c.ngl.s_2"] = "4600003bGH",
+ ["c.ngl.s_3"] = "4600003bVGH",
+ ["c.lt.s_2"] = "4600003cGH",
+ ["c.lt.s_3"] = "4600003cVGH",
+ ["c.nge.s_2"] = "4600003dGH",
+ ["c.nge.s_3"] = "4600003dVGH",
+ ["c.le.s_2"] = "4600003eGH",
+ ["c.le.s_3"] = "4600003eVGH",
+ ["c.ngt.s_2"] = "4600003fGH",
+ ["c.ngt.s_3"] = "4600003fVGH",
+
+ ["add.d_3"] = "46200000FGH",
+ ["sub.d_3"] = "46200001FGH",
+ ["mul.d_3"] = "46200002FGH",
+ ["div.d_3"] = "46200003FGH",
+ ["sqrt.d_2"] = "46200004FG",
+ ["abs.d_2"] = "46200005FG",
+ ["mov.d_2"] = "46200006FG",
+ ["neg.d_2"] = "46200007FG",
+ ["round.l.d_2"] = "46200008FG",
+ ["trunc.l.d_2"] = "46200009FG",
+ ["ceil.l.d_2"] = "4620000aFG",
+ ["floor.l.d_2"] = "4620000bFG",
+ ["round.w.d_2"] = "4620000cFG",
+ ["trunc.w.d_2"] = "4620000dFG",
+ ["ceil.w.d_2"] = "4620000eFG",
+ ["floor.w.d_2"] = "4620000fFG",
+ ["movf.d_2"] = "46200011FG",
+ ["movf.d_3"] = "46200011FGC",
+ ["movt.d_2"] = "46210011FG",
+ ["movt.d_3"] = "46210011FGC",
+ ["movz.d_3"] = "46200012FGT",
+ ["movn.d_3"] = "46200013FGT",
+ ["recip.d_2"] = "46200015FG",
+ ["rsqrt.d_2"] = "46200016FG",
+ ["cvt.s.d_2"] = "46200020FG",
+ ["cvt.w.d_2"] = "46200024FG",
+ ["cvt.l.d_2"] = "46200025FG",
+ ["c.f.d_2"] = "46200030GH",
+ ["c.f.d_3"] = "46200030VGH",
+ ["c.un.d_2"] = "46200031GH",
+ ["c.un.d_3"] = "46200031VGH",
+ ["c.eq.d_2"] = "46200032GH",
+ ["c.eq.d_3"] = "46200032VGH",
+ ["c.ueq.d_2"] = "46200033GH",
+ ["c.ueq.d_3"] = "46200033VGH",
+ ["c.olt.d_2"] = "46200034GH",
+ ["c.olt.d_3"] = "46200034VGH",
+ ["c.ult.d_2"] = "46200035GH",
+ ["c.ult.d_3"] = "46200035VGH",
+ ["c.ole.d_2"] = "46200036GH",
+ ["c.ole.d_3"] = "46200036VGH",
+ ["c.ule.d_2"] = "46200037GH",
+ ["c.ule.d_3"] = "46200037VGH",
+ ["c.sf.d_2"] = "46200038GH",
+ ["c.sf.d_3"] = "46200038VGH",
+ ["c.ngle.d_2"] = "46200039GH",
+ ["c.ngle.d_3"] = "46200039VGH",
+ ["c.seq.d_2"] = "4620003aGH",
+ ["c.seq.d_3"] = "4620003aVGH",
+ ["c.ngl.d_2"] = "4620003bGH",
+ ["c.ngl.d_3"] = "4620003bVGH",
+ ["c.lt.d_2"] = "4620003cGH",
+ ["c.lt.d_3"] = "4620003cVGH",
+ ["c.nge.d_2"] = "4620003dGH",
+ ["c.nge.d_3"] = "4620003dVGH",
+ ["c.le.d_2"] = "4620003eGH",
+ ["c.le.d_3"] = "4620003eVGH",
+ ["c.ngt.d_2"] = "4620003fGH",
+ ["c.ngt.d_3"] = "4620003fVGH",
+
+ ["add.ps_3"] = "46c00000FGH",
+ ["sub.ps_3"] = "46c00001FGH",
+ ["mul.ps_3"] = "46c00002FGH",
+ ["abs.ps_2"] = "46c00005FG",
+ ["mov.ps_2"] = "46c00006FG",
+ ["neg.ps_2"] = "46c00007FG",
+ ["movf.ps_2"] = "46c00011FG",
+ ["movf.ps_3"] = "46c00011FGC",
+ ["movt.ps_2"] = "46c10011FG",
+ ["movt.ps_3"] = "46c10011FGC",
+ ["movz.ps_3"] = "46c00012FGT",
+ ["movn.ps_3"] = "46c00013FGT",
+ ["cvt.s.pu_2"] = "46c00020FG",
+ ["cvt.s.pl_2"] = "46c00028FG",
+ ["pll.ps_3"] = "46c0002cFGH",
+ ["plu.ps_3"] = "46c0002dFGH",
+ ["pul.ps_3"] = "46c0002eFGH",
+ ["puu.ps_3"] = "46c0002fFGH",
+ ["c.f.ps_2"] = "46c00030GH",
+ ["c.f.ps_3"] = "46c00030VGH",
+ ["c.un.ps_2"] = "46c00031GH",
+ ["c.un.ps_3"] = "46c00031VGH",
+ ["c.eq.ps_2"] = "46c00032GH",
+ ["c.eq.ps_3"] = "46c00032VGH",
+ ["c.ueq.ps_2"] = "46c00033GH",
+ ["c.ueq.ps_3"] = "46c00033VGH",
+ ["c.olt.ps_2"] = "46c00034GH",
+ ["c.olt.ps_3"] = "46c00034VGH",
+ ["c.ult.ps_2"] = "46c00035GH",
+ ["c.ult.ps_3"] = "46c00035VGH",
+ ["c.ole.ps_2"] = "46c00036GH",
+ ["c.ole.ps_3"] = "46c00036VGH",
+ ["c.ule.ps_2"] = "46c00037GH",
+ ["c.ule.ps_3"] = "46c00037VGH",
+ ["c.sf.ps_2"] = "46c00038GH",
+ ["c.sf.ps_3"] = "46c00038VGH",
+ ["c.ngle.ps_2"] = "46c00039GH",
+ ["c.ngle.ps_3"] = "46c00039VGH",
+ ["c.seq.ps_2"] = "46c0003aGH",
+ ["c.seq.ps_3"] = "46c0003aVGH",
+ ["c.ngl.ps_2"] = "46c0003bGH",
+ ["c.ngl.ps_3"] = "46c0003bVGH",
+ ["c.lt.ps_2"] = "46c0003cGH",
+ ["c.lt.ps_3"] = "46c0003cVGH",
+ ["c.nge.ps_2"] = "46c0003dGH",
+ ["c.nge.ps_3"] = "46c0003dVGH",
+ ["c.le.ps_2"] = "46c0003eGH",
+ ["c.le.ps_3"] = "46c0003eVGH",
+ ["c.ngt.ps_2"] = "46c0003fGH",
+ ["c.ngt.ps_3"] = "46c0003fVGH",
+
+ ["cvt.s.w_2"] = "46800020FG",
+ ["cvt.d.w_2"] = "46800021FG",
+
+ ["cvt.s.l_2"] = "46a00020FG",
+ ["cvt.d.l_2"] = "46a00021FG",
+
+ -- Opcode COP1X.
+ lwxc1_2 = "4c000000FX",
+ ldxc1_2 = "4c000001FX",
+ luxc1_2 = "4c000005FX",
+ swxc1_2 = "4c000008FX",
+ sdxc1_2 = "4c000009FX",
+ suxc1_2 = "4c00000dFX",
+ prefx_2 = "4c00000fMX",
+ ["alnv.ps_4"] = "4c00001eFGHS",
+ ["madd.s_4"] = "4c000020FRGH",
+ ["madd.d_4"] = "4c000021FRGH",
+ ["madd.ps_4"] = "4c000026FRGH",
+ ["msub.s_4"] = "4c000028FRGH",
+ ["msub.d_4"] = "4c000029FRGH",
+ ["msub.ps_4"] = "4c00002eFRGH",
+ ["nmadd.s_4"] = "4c000030FRGH",
+ ["nmadd.d_4"] = "4c000031FRGH",
+ ["nmadd.ps_4"] = "4c000036FRGH",
+ ["nmsub.s_4"] = "4c000038FRGH",
+ ["nmsub.d_4"] = "4c000039FRGH",
+ ["nmsub.ps_4"] = "4c00003eFRGH",
+}
+
+------------------------------------------------------------------------------
+
+local function parse_gpr(expr)
+ local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ if not reg then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ expr = reg
+ end
+ local r = match(expr, "^r([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r, tp end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_fpr(expr)
+ local r = match(expr, "^f([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_imm(imm, bits, shift, scale, signed)
+ local n = tonumber(imm)
+ if n then
+ local m = sar(n, scale)
+ if shl(m, scale) == n then
+ if signed then
+ local s = sar(m, bits-1)
+ if s == 0 then return shl(m, shift)
+ elseif s == -1 then return shl(m + shl(1, bits), shift) end
+ else
+ if sar(m, bits) == 0 then return shl(m, shift) end
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ elseif match(imm, "^[rf]([1-3]?[0-9])$") or
+ match(imm, "^([%w_]+):([rf][1-3]?[0-9])$") then
+ werror("expected immediate operand, got register")
+ else
+ waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
+ return 0
+ end
+end
+
+local function parse_disp(disp)
+ local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
+ if imm then
+ local r = shl(parse_gpr(reg), 21)
+ local extname = match(imm, "^extern%s+(%S+)$")
+ if extname then
+ waction("REL_EXT", map_extern[extname], nil, 1)
+ return r
+ else
+ return r + parse_imm(imm, 16, 0, 0, true)
+ end
+ end
+ local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local r, tp = parse_gpr(reg)
+ if tp then
+ waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr))
+ return shl(r, 21)
+ end
+ end
+ werror("bad displacement `"..disp.."'")
+end
+
+local function parse_index(idx)
+ local rt, rs = match(idx, "^(.*)%(([%w_:]+)%)$")
+ if rt then
+ rt = parse_gpr(rt)
+ rs = parse_gpr(rs)
+ return shl(rt, 16) + shl(rs, 21)
+ end
+ werror("bad index `"..idx.."'")
+end
+
+local function parse_label(label, def)
+ local prefix = sub(label, 1, 2)
+ -- =>label (pc label reference)
+ if prefix == "=>" then
+ return "PC", 0, sub(label, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "LG", map_global[sub(label, 3)]
+ end
+ if def then
+ -- [1-9] (local label definition)
+ if match(label, "^[1-9]$") then
+ return "LG", 10+tonumber(label)
+ end
+ else
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(label, "^([<>])([1-9])$")
+ if dir then -- Fwd: 1-9, Bkwd: 11-19.
+ return "LG", lnum + (dir == ">" and 0 or 10)
+ end
+ -- extern label (extern label reference)
+ local extname = match(label, "^extern%s+(%S+)$")
+ if extname then
+ return "EXT", map_extern[extname]
+ end
+ end
+ werror("bad label `"..label.."'")
+end
+
+------------------------------------------------------------------------------
+
+-- Handle opcodes defined with template strings.
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return sub(template, 9) end
+ local op = tonumber(sub(template, 1, 8), 16)
+ local n = 1
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 2 positions (ins/ext).
+ if secpos+2 > maxsecpos then wflush() end
+ local pos = wpos()
+
+ -- Process each character.
+ for p in gmatch(sub(template, 9), ".") do
+ if p == "D" then
+ op = op + shl(parse_gpr(params[n]), 11); n = n + 1
+ elseif p == "T" then
+ op = op + shl(parse_gpr(params[n]), 16); n = n + 1
+ elseif p == "S" then
+ op = op + shl(parse_gpr(params[n]), 21); n = n + 1
+ elseif p == "F" then
+ op = op + shl(parse_fpr(params[n]), 6); n = n + 1
+ elseif p == "G" then
+ op = op + shl(parse_fpr(params[n]), 11); n = n + 1
+ elseif p == "H" then
+ op = op + shl(parse_fpr(params[n]), 16); n = n + 1
+ elseif p == "R" then
+ op = op + shl(parse_fpr(params[n]), 21); n = n + 1
+ elseif p == "I" then
+ op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1
+ elseif p == "U" then
+ op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1
+ elseif p == "O" then
+ op = op + parse_disp(params[n]); n = n + 1
+ elseif p == "X" then
+ op = op + parse_index(params[n]); n = n + 1
+ elseif p == "B" or p == "J" then
+ local mode, n, s = parse_label(params[n], false)
+ if p == "B" then n = n + 2048 end
+ waction("REL_"..mode, n, s, 1)
+ n = n + 1
+ elseif p == "A" then
+ op = op + parse_imm(params[n], 5, 6, 0, false); n = n + 1
+ elseif p == "M" then
+ op = op + parse_imm(params[n], 5, 11, 0, false); n = n + 1
+ elseif p == "N" then
+ op = op + parse_imm(params[n], 5, 16, 0, false); n = n + 1
+ elseif p == "C" then
+ op = op + parse_imm(params[n], 3, 18, 0, false); n = n + 1
+ elseif p == "V" then
+ op = op + parse_imm(params[n], 3, 8, 0, false); n = n + 1
+ elseif p == "W" then
+ op = op + parse_imm(params[n], 3, 0, 0, false); n = n + 1
+ elseif p == "Y" then
+ op = op + parse_imm(params[n], 20, 6, 0, false); n = n + 1
+ elseif p == "Z" then
+ op = op + parse_imm(params[n], 10, 6, 0, false); n = n + 1
+ elseif p == "=" then
+ op = op + shl(band(op, 0xf800), 5) -- Copy D to T for clz, clo.
+ else
+ assert(false)
+ end
+ end
+ wputpos(pos, op)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_1"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr" end
+ if secpos+1 > maxsecpos then wflush() end
+ local mode, n, s = parse_label(params[1], true)
+ if mode == "EXT" then werror("bad label definition") end
+ waction("LABEL_"..mode, n, s, 1)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+map_op[".long_*"] = function(params)
+ if not params then return "imm..." end
+ for _,p in ipairs(params) do
+ local n = tonumber(p)
+ if not n then werror("bad immediate `"..p.."'") end
+ if n < 0 then n = n + 2^32 end
+ wputw(n)
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1])
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION", num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = map_coreop })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/3rdparty/lua/dynasm/dasm_ppc.h b/3rdparty/lua/dynasm/dasm_ppc.h
index 7d9636a..13c4461 100644
--- a/3rdparty/lua/dynasm/dasm_ppc.h
+++ b/3rdparty/lua/dynasm/dasm_ppc.h
@@ -1,412 +1,412 @@
-/*
-** DynASM PPC encoding engine.
-** Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-** Released under the MIT license. See dynasm.lua for full copyright notice.
-*/
-
-#include <stddef.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdlib.h>
-
-#define DASM_ARCH "ppc"
-
-#ifndef DASM_EXTERN
-#define DASM_EXTERN(a,b,c,d) 0
-#endif
-
-/* Action definitions. */
-enum {
- DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
- /* The following actions need a buffer position. */
- DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
- /* The following actions also have an argument. */
- DASM_REL_PC, DASM_LABEL_PC, DASM_IMM,
- DASM__MAX
-};
-
-/* Maximum number of section buffer positions for a single dasm_put() call. */
-#define DASM_MAXSECPOS 25
-
-/* DynASM encoder status codes. Action list offset or number are or'ed in. */
-#define DASM_S_OK 0x00000000
-#define DASM_S_NOMEM 0x01000000
-#define DASM_S_PHASE 0x02000000
-#define DASM_S_MATCH_SEC 0x03000000
-#define DASM_S_RANGE_I 0x11000000
-#define DASM_S_RANGE_SEC 0x12000000
-#define DASM_S_RANGE_LG 0x13000000
-#define DASM_S_RANGE_PC 0x14000000
-#define DASM_S_RANGE_REL 0x15000000
-#define DASM_S_UNDEF_LG 0x21000000
-#define DASM_S_UNDEF_PC 0x22000000
-
-/* Macros to convert positions (8 bit section + 24 bit index). */
-#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
-#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
-#define DASM_SEC2POS(sec) ((sec)<<24)
-#define DASM_POS2SEC(pos) ((pos)>>24)
-#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
-
-/* Action list type. */
-typedef const unsigned int *dasm_ActList;
-
-/* Per-section structure. */
-typedef struct dasm_Section {
- int *rbuf; /* Biased buffer pointer (negative section bias). */
- int *buf; /* True buffer pointer. */
- size_t bsize; /* Buffer size in bytes. */
- int pos; /* Biased buffer position. */
- int epos; /* End of biased buffer position - max single put. */
- int ofs; /* Byte offset into section. */
-} dasm_Section;
-
-/* Core structure holding the DynASM encoding state. */
-struct dasm_State {
- size_t psize; /* Allocated size of this structure. */
- dasm_ActList actionlist; /* Current actionlist pointer. */
- int *lglabels; /* Local/global chain/pos ptrs. */
- size_t lgsize;
- int *pclabels; /* PC label chains/pos ptrs. */
- size_t pcsize;
- void **globals; /* Array of globals (bias -10). */
- dasm_Section *section; /* Pointer to active section. */
- size_t codesize; /* Total size of all code sections. */
- int maxsection; /* 0 <= sectionidx < maxsection. */
- int status; /* Status code. */
- dasm_Section sections[1]; /* All sections. Alloc-extended. */
-};
-
-/* The size of the core structure depends on the max. number of sections. */
-#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
-
-
-/* Initialize DynASM state. */
-void dasm_init(Dst_DECL, int maxsection)
-{
- dasm_State *D;
- size_t psz = 0;
- int i;
- Dst_REF = NULL;
- DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
- D = Dst_REF;
- D->psize = psz;
- D->lglabels = NULL;
- D->lgsize = 0;
- D->pclabels = NULL;
- D->pcsize = 0;
- D->globals = NULL;
- D->maxsection = maxsection;
- for (i = 0; i < maxsection; i++) {
- D->sections[i].buf = NULL; /* Need this for pass3. */
- D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
- D->sections[i].bsize = 0;
- D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
- }
-}
-
-/* Free DynASM state. */
-void dasm_free(Dst_DECL)
-{
- dasm_State *D = Dst_REF;
- int i;
- for (i = 0; i < D->maxsection; i++)
- if (D->sections[i].buf)
- DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
- if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
- if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
- DASM_M_FREE(Dst, D, D->psize);
-}
-
-/* Setup global label array. Must be called before dasm_setup(). */
-void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
-{
- dasm_State *D = Dst_REF;
- D->globals = gl - 10; /* Negative bias to compensate for locals. */
- DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
-}
-
-/* Grow PC label array. Can be called after dasm_setup(), too. */
-void dasm_growpc(Dst_DECL, unsigned int maxpc)
-{
- dasm_State *D = Dst_REF;
- size_t osz = D->pcsize;
- DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
- memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
-}
-
-/* Setup encoder. */
-void dasm_setup(Dst_DECL, const void *actionlist)
-{
- dasm_State *D = Dst_REF;
- int i;
- D->actionlist = (dasm_ActList)actionlist;
- D->status = DASM_S_OK;
- D->section = &D->sections[0];
- memset((void *)D->lglabels, 0, D->lgsize);
- if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
- for (i = 0; i < D->maxsection; i++) {
- D->sections[i].pos = DASM_SEC2POS(i);
- D->sections[i].ofs = 0;
- }
-}
-
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) { \
- D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
-#define CKPL(kind, st) \
- do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
- D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
-#else
-#define CK(x, st) ((void)0)
-#define CKPL(kind, st) ((void)0)
-#endif
-
-/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
-void dasm_put(Dst_DECL, int start, ...)
-{
- va_list ap;
- dasm_State *D = Dst_REF;
- dasm_ActList p = D->actionlist + start;
- dasm_Section *sec = D->section;
- int pos = sec->pos, ofs = sec->ofs;
- int *b;
-
- if (pos >= sec->epos) {
- DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
- sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
- sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
- sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
- }
-
- b = sec->rbuf;
- b[pos++] = start;
-
- va_start(ap, start);
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16);
- if (action >= DASM__MAX) {
- ofs += 4;
- } else {
- int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
- switch (action) {
- case DASM_STOP: goto stop;
- case DASM_SECTION:
- n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
- D->section = &D->sections[n]; goto stop;
- case DASM_ESC: p++; ofs += 4; break;
- case DASM_REL_EXT: break;
- case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
- case DASM_REL_LG:
- n = (ins & 2047) - 10; pl = D->lglabels + n;
- /* Bkwd rel or global. */
- if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
- pl += 10; n = *pl;
- if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
- goto linkrel;
- case DASM_REL_PC:
- pl = D->pclabels + n; CKPL(pc, PC);
- putrel:
- n = *pl;
- if (n < 0) { /* Label exists. Get label pos and store it. */
- b[pos] = -n;
- } else {
- linkrel:
- b[pos] = n; /* Else link to rel chain, anchored at label. */
- *pl = pos;
- }
- pos++;
- break;
- case DASM_LABEL_LG:
- pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
- case DASM_LABEL_PC:
- pl = D->pclabels + n; CKPL(pc, PC);
- putlabel:
- n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
- }
- *pl = -pos; /* Label exists now. */
- b[pos++] = ofs; /* Store pass1 offset estimate. */
- break;
- case DASM_IMM:
-#ifdef DASM_CHECKS
- CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
-#endif
- n >>= ((ins>>10)&31);
-#ifdef DASM_CHECKS
- if (ins & 0x8000)
- CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
- else
- CK((n>>((ins>>5)&31)) == 0, RANGE_I);
-#endif
- b[pos++] = n;
- break;
- }
- }
- }
-stop:
- va_end(ap);
- sec->pos = pos;
- sec->ofs = ofs;
-}
-#undef CK
-
-/* Pass 2: Link sections, shrink aligns, fix label offsets. */
-int dasm_link(Dst_DECL, size_t *szp)
-{
- dasm_State *D = Dst_REF;
- int secnum;
- int ofs = 0;
-
-#ifdef DASM_CHECKS
- *szp = 0;
- if (D->status != DASM_S_OK) return D->status;
- {
- int pc;
- for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
- if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
- }
-#endif
-
- { /* Handle globals not defined in this translation unit. */
- int idx;
- for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
- int n = D->lglabels[idx];
- /* Undefined label: Collapse rel chain and replace with marker (< 0). */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
- }
- }
-
- /* Combine all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->rbuf;
- int pos = DASM_SEC2POS(secnum);
- int lastpos = sec->pos;
-
- while (pos != lastpos) {
- dasm_ActList p = D->actionlist + b[pos++];
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16);
- switch (action) {
- case DASM_STOP: case DASM_SECTION: goto stop;
- case DASM_ESC: p++; break;
- case DASM_REL_EXT: break;
- case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
- case DASM_REL_LG: case DASM_REL_PC: pos++; break;
- case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
- case DASM_IMM: pos++; break;
- }
- }
- stop: (void)0;
- }
- ofs += sec->ofs; /* Next section starts right after current section. */
- }
-
- D->codesize = ofs; /* Total size of all code sections */
- *szp = ofs;
- return DASM_S_OK;
-}
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
-#else
-#define CK(x, st) ((void)0)
-#endif
-
-/* Pass 3: Encode sections. */
-int dasm_encode(Dst_DECL, void *buffer)
-{
- dasm_State *D = Dst_REF;
- char *base = (char *)buffer;
- unsigned int *cp = (unsigned int *)buffer;
- int secnum;
-
- /* Encode all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->buf;
- int *endb = sec->rbuf + sec->pos;
-
- while (b != endb) {
- dasm_ActList p = D->actionlist + *b++;
- while (1) {
- unsigned int ins = *p++;
- unsigned int action = (ins >> 16);
- int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
- switch (action) {
- case DASM_STOP: case DASM_SECTION: goto stop;
- case DASM_ESC: *cp++ = *p++; break;
- case DASM_REL_EXT:
- n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1) - 4;
- goto patchrel;
- case DASM_ALIGN:
- ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000;
- break;
- case DASM_REL_LG:
- CK(n >= 0, UNDEF_LG);
- case DASM_REL_PC:
- CK(n >= 0, UNDEF_PC);
- n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base);
- patchrel:
- CK((n & 3) == 0 &&
- (((n+4) + ((ins & 2048) ? 0x00008000 : 0x02000000)) >>
- ((ins & 2048) ? 16 : 26)) == 0, RANGE_REL);
- cp[-1] |= ((n+4) & ((ins & 2048) ? 0x0000fffc: 0x03fffffc));
- break;
- case DASM_LABEL_LG:
- ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
- break;
- case DASM_LABEL_PC: break;
- case DASM_IMM:
- cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31);
- break;
- default: *cp++ = ins; break;
- }
- }
- stop: (void)0;
- }
- }
-
- if (base + D->codesize != (char *)cp) /* Check for phase errors. */
- return DASM_S_PHASE;
- return DASM_S_OK;
-}
-#undef CK
-
-/* Get PC label offset. */
-int dasm_getpclabel(Dst_DECL, unsigned int pc)
-{
- dasm_State *D = Dst_REF;
- if (pc*sizeof(int) < D->pcsize) {
- int pos = D->pclabels[pc];
- if (pos < 0) return *DASM_POS2PTR(D, -pos);
- if (pos > 0) return -1; /* Undefined. */
- }
- return -2; /* Unused or out of range. */
-}
-
-#ifdef DASM_CHECKS
-/* Optional sanity checker to call between isolated encoding steps. */
-int dasm_checkstep(Dst_DECL, int secmatch)
-{
- dasm_State *D = Dst_REF;
- if (D->status == DASM_S_OK) {
- int i;
- for (i = 1; i <= 9; i++) {
- if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
- D->lglabels[i] = 0;
- }
- }
- if (D->status == DASM_S_OK && secmatch >= 0 &&
- D->section != &D->sections[secmatch])
- D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
- return D->status;
-}
-#endif
-
+/*
+** DynASM PPC encoding engine.
+** Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define DASM_ARCH "ppc"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. */
+enum {
+ DASM_STOP, DASM_SECTION, DASM_ESC, DASM_REL_EXT,
+ /* The following actions need a buffer position. */
+ DASM_ALIGN, DASM_REL_LG, DASM_LABEL_LG,
+ /* The following actions also have an argument. */
+ DASM_REL_PC, DASM_LABEL_PC, DASM_IMM,
+ DASM__MAX
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_REL 0x15000000
+#define DASM_S_UNDEF_LG 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned int *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status = DASM_S_RANGE_##st|(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ if (action >= DASM__MAX) {
+ ofs += 4;
+ } else {
+ int *pl, n = action >= DASM_REL_PC ? va_arg(ap, int) : 0;
+ switch (action) {
+ case DASM_STOP: goto stop;
+ case DASM_SECTION:
+ n = (ins & 255); CK(n < D->maxsection, RANGE_SEC);
+ D->section = &D->sections[n]; goto stop;
+ case DASM_ESC: p++; ofs += 4; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs += (ins & 255); b[pos++] = ofs; break;
+ case DASM_REL_LG:
+ n = (ins & 2047) - 10; pl = D->lglabels + n;
+ /* Bkwd rel or global. */
+ if (n >= 0) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
+ pl += 10; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ break;
+ case DASM_LABEL_LG:
+ pl = D->lglabels + (ins & 2047) - 10; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC:
+ pl = D->pclabels + n; CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos;
+ }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_IMM:
+#ifdef DASM_CHECKS
+ CK((n & ((1<<((ins>>10)&31))-1)) == 0, RANGE_I);
+#endif
+ n >>= ((ins>>10)&31);
+#ifdef DASM_CHECKS
+ if (ins & 0x8000)
+ CK(((n + (1<<(((ins>>5)&31)-1)))>>((ins>>5)&31)) == 0, RANGE_I);
+ else
+ CK((n>>((ins>>5)&31)) == 0, RANGE_I);
+#endif
+ b[pos++] = n;
+ break;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 20; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: p++; break;
+ case DASM_REL_EXT: break;
+ case DASM_ALIGN: ofs -= (b[pos++] + ofs) & (ins & 255); break;
+ case DASM_REL_LG: case DASM_REL_PC: pos++; break;
+ case DASM_LABEL_LG: case DASM_LABEL_PC: b[pos++] += ofs; break;
+ case DASM_IMM: pos++; break;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) return DASM_S_##st|(p-D->actionlist-1); } while (0)
+#else
+#define CK(x, st) ((void)0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ char *base = (char *)buffer;
+ unsigned int *cp = (unsigned int *)buffer;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ while (1) {
+ unsigned int ins = *p++;
+ unsigned int action = (ins >> 16);
+ int n = (action >= DASM_ALIGN && action < DASM__MAX) ? *b++ : 0;
+ switch (action) {
+ case DASM_STOP: case DASM_SECTION: goto stop;
+ case DASM_ESC: *cp++ = *p++; break;
+ case DASM_REL_EXT:
+ n = DASM_EXTERN(Dst, (unsigned char *)cp, (ins & 2047), 1) - 4;
+ goto patchrel;
+ case DASM_ALIGN:
+ ins &= 255; while ((((char *)cp - base) & ins)) *cp++ = 0x60000000;
+ break;
+ case DASM_REL_LG:
+ CK(n >= 0, UNDEF_LG);
+ case DASM_REL_PC:
+ CK(n >= 0, UNDEF_PC);
+ n = *DASM_POS2PTR(D, n) - (int)((char *)cp - base);
+ patchrel:
+ CK((n & 3) == 0 &&
+ (((n+4) + ((ins & 2048) ? 0x00008000 : 0x02000000)) >>
+ ((ins & 2048) ? 16 : 26)) == 0, RANGE_REL);
+ cp[-1] |= ((n+4) & ((ins & 2048) ? 0x0000fffc: 0x03fffffc));
+ break;
+ case DASM_LABEL_LG:
+ ins &= 2047; if (ins >= 20) D->globals[ins-10] = (void *)(base + n);
+ break;
+ case DASM_LABEL_PC: break;
+ case DASM_IMM:
+ cp[-1] |= (n & ((1<<((ins>>5)&31))-1)) << (ins&31);
+ break;
+ default: *cp++ = ins; break;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != (char *)cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+#undef CK
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_LG|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/3rdparty/lua/dynasm/dasm_ppc.lua b/3rdparty/lua/dynasm/dasm_ppc.lua
index a40e7b5..65da317 100644
--- a/3rdparty/lua/dynasm/dasm_ppc.lua
+++ b/3rdparty/lua/dynasm/dasm_ppc.lua
@@ -1,1249 +1,1249 @@
-------------------------------------------------------------------------------
--- DynASM PPC module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- See dynasm.lua for full copyright notice.
-------------------------------------------------------------------------------
-
--- Module information:
-local _info = {
- arch = "ppc",
- description = "DynASM PPC module",
- version = "1.3.0",
- vernum = 10300,
- release = "2011-05-05",
- author = "Mike Pall",
- license = "MIT",
-}
-
--- Exported glue functions for the arch-specific module.
-local _M = { _info = _info }
-
--- Cache library functions.
-local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
-local assert, setmetatable = assert, setmetatable
-local _s = string
-local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
-local match, gmatch = _s.match, _s.gmatch
-local concat, sort = table.concat, table.sort
-local bit = bit or require("bit")
-local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift
-local tohex = bit.tohex
-
--- Inherited tables and callbacks.
-local g_opt, g_arch
-local wline, werror, wfatal, wwarn
-
--- Action name list.
--- CHECK: Keep this in sync with the C code!
-local action_names = {
- "STOP", "SECTION", "ESC", "REL_EXT",
- "ALIGN", "REL_LG", "LABEL_LG",
- "REL_PC", "LABEL_PC", "IMM",
-}
-
--- Maximum number of section buffer positions for dasm_put().
--- CHECK: Keep this in sync with the C code!
-local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
-
--- Action name -> action number.
-local map_action = {}
-for n,name in ipairs(action_names) do
- map_action[name] = n-1
-end
-
--- Action list buffer.
-local actlist = {}
-
--- Argument list for next dasm_put(). Start with offset 0 into action list.
-local actargs = { 0 }
-
--- Current number of section buffer positions for dasm_put().
-local secpos = 1
-
-------------------------------------------------------------------------------
-
--- Dump action names and numbers.
-local function dumpactions(out)
- out:write("DynASM encoding engine action codes:\n")
- for n,name in ipairs(action_names) do
- local num = map_action[name]
- out:write(format(" %-10s %02X %d\n", name, num, num))
- end
- out:write("\n")
-end
-
--- Write action list buffer as a huge static C array.
-local function writeactions(out, name)
- local nn = #actlist
- if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
- out:write("static const unsigned int ", name, "[", nn, "] = {\n")
- for i = 1,nn-1 do
- assert(out:write("0x", tohex(actlist[i]), ",\n"))
- end
- assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
-end
-
-------------------------------------------------------------------------------
-
--- Add word to action list.
-local function wputxw(n)
- assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
- actlist[#actlist+1] = n
-end
-
--- Add action to list with optional arg. Advance buffer pos, too.
-local function waction(action, val, a, num)
- local w = assert(map_action[action], "bad action name `"..action.."'")
- wputxw(w * 0x10000 + (val or 0))
- if a then actargs[#actargs+1] = a end
- if a or num then secpos = secpos + (num or 1) end
-end
-
--- Flush action list (intervening C code or buffer pos overflow).
-local function wflush(term)
- if #actlist == actargs[1] then return end -- Nothing to flush.
- if not term then waction("STOP") end -- Terminate action list.
- wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
- actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
- secpos = 1 -- The actionlist offset occupies a buffer position, too.
-end
-
--- Put escaped word.
-local function wputw(n)
- if n <= 0xffffff then waction("ESC") end
- wputxw(n)
-end
-
--- Reserve position for word.
-local function wpos()
- local pos = #actlist+1
- actlist[pos] = ""
- return pos
-end
-
--- Store word to reserved position.
-local function wputpos(pos, n)
- assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
- actlist[pos] = n
-end
-
-------------------------------------------------------------------------------
-
--- Global label name -> global label number. With auto assignment on 1st use.
-local next_global = 20
-local map_global = setmetatable({}, { __index = function(t, name)
- if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
- local n = next_global
- if n > 2047 then werror("too many global labels") end
- next_global = n + 1
- t[name] = n
- return n
-end})
-
--- Dump global labels.
-local function dumpglobals(out, lvl)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("Global labels:\n")
- for i=20,next_global-1 do
- out:write(format(" %s\n", t[i]))
- end
- out:write("\n")
-end
-
--- Write global label enum.
-local function writeglobals(out, prefix)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("enum {\n")
- for i=20,next_global-1 do
- out:write(" ", prefix, t[i], ",\n")
- end
- out:write(" ", prefix, "_MAX\n};\n")
-end
-
--- Write global label names.
-local function writeglobalnames(out, name)
- local t = {}
- for name, n in pairs(map_global) do t[n] = name end
- out:write("static const char *const ", name, "[] = {\n")
- for i=20,next_global-1 do
- out:write(" \"", t[i], "\",\n")
- end
- out:write(" (const char *)0\n};\n")
-end
-
-------------------------------------------------------------------------------
-
--- Extern label name -> extern label number. With auto assignment on 1st use.
-local next_extern = 0
-local map_extern_ = {}
-local map_extern = setmetatable({}, { __index = function(t, name)
- -- No restrictions on the name for now.
- local n = next_extern
- if n > 2047 then werror("too many extern labels") end
- next_extern = n + 1
- t[name] = n
- map_extern_[n] = name
- return n
-end})
-
--- Dump extern labels.
-local function dumpexterns(out, lvl)
- out:write("Extern labels:\n")
- for i=0,next_extern-1 do
- out:write(format(" %s\n", map_extern_[i]))
- end
- out:write("\n")
-end
-
--- Write extern label names.
-local function writeexternnames(out, name)
- out:write("static const char *const ", name, "[] = {\n")
- for i=0,next_extern-1 do
- out:write(" \"", map_extern_[i], "\",\n")
- end
- out:write(" (const char *)0\n};\n")
-end
-
-------------------------------------------------------------------------------
-
--- Arch-specific maps.
-local map_archdef = { sp = "r1" } -- Ext. register name -> int. name.
-
-local map_type = {} -- Type name -> { ctype, reg }
-local ctypenum = 0 -- Type number (for Dt... macros).
-
--- Reverse defines for registers.
-function _M.revdef(s)
- if s == "r1" then return "sp" end
- return s
-end
-
-local map_cond = {
- lt = 0, gt = 1, eq = 2, so = 3,
- ge = 4, le = 5, ne = 6, ns = 7,
-}
-
-------------------------------------------------------------------------------
-
--- Template strings for PPC instructions.
-local map_op = {
- tdi_3 = "08000000ARI",
- twi_3 = "0c000000ARI",
- mulli_3 = "1c000000RRI",
- subfic_3 = "20000000RRI",
- cmplwi_3 = "28000000XRU",
- cmplwi_2 = "28000000-RU",
- cmpldi_3 = "28200000XRU",
- cmpldi_2 = "28200000-RU",
- cmpwi_3 = "2c000000XRI",
- cmpwi_2 = "2c000000-RI",
- cmpdi_3 = "2c200000XRI",
- cmpdi_2 = "2c200000-RI",
- addic_3 = "30000000RRI",
- ["addic._3"] = "34000000RRI",
- addi_3 = "38000000RR0I",
- li_2 = "38000000RI",
- la_2 = "38000000RD",
- addis_3 = "3c000000RR0I",
- lis_2 = "3c000000RI",
- lus_2 = "3c000000RU",
- bc_3 = "40000000AAK",
- bcl_3 = "40000001AAK",
- bdnz_1 = "42000000K",
- bdz_1 = "42400000K",
- sc_0 = "44000000",
- b_1 = "48000000J",
- bl_1 = "48000001J",
- rlwimi_5 = "50000000RR~AAA.",
- rlwinm_5 = "54000000RR~AAA.",
- rlwnm_5 = "5c000000RR~RAA.",
- ori_3 = "60000000RR~U",
- nop_0 = "60000000",
- oris_3 = "64000000RR~U",
- xori_3 = "68000000RR~U",
- xoris_3 = "6c000000RR~U",
- ["andi._3"] = "70000000RR~U",
- ["andis._3"] = "74000000RR~U",
- lwz_2 = "80000000RD",
- lwzu_2 = "84000000RD",
- lbz_2 = "88000000RD",
- lbzu_2 = "8c000000RD",
- stw_2 = "90000000RD",
- stwu_2 = "94000000RD",
- stb_2 = "98000000RD",
- stbu_2 = "9c000000RD",
- lhz_2 = "a0000000RD",
- lhzu_2 = "a4000000RD",
- lha_2 = "a8000000RD",
- lhau_2 = "ac000000RD",
- sth_2 = "b0000000RD",
- sthu_2 = "b4000000RD",
- lmw_2 = "b8000000RD",
- stmw_2 = "bc000000RD",
- lfs_2 = "c0000000FD",
- lfsu_2 = "c4000000FD",
- lfd_2 = "c8000000FD",
- lfdu_2 = "cc000000FD",
- stfs_2 = "d0000000FD",
- stfsu_2 = "d4000000FD",
- stfd_2 = "d8000000FD",
- stfdu_2 = "dc000000FD",
- ld_2 = "e8000000RD", -- NYI: displacement must be divisible by 4.
- ldu_2 = "e8000001RD",
- lwa_2 = "e8000002RD",
- std_2 = "f8000000RD",
- stdu_2 = "f8000001RD",
-
- -- Primary opcode 19:
- mcrf_2 = "4c000000XX",
- isync_0 = "4c00012c",
- crnor_3 = "4c000042CCC",
- crnot_2 = "4c000042CC=",
- crandc_3 = "4c000102CCC",
- crxor_3 = "4c000182CCC",
- crclr_1 = "4c000182C==",
- crnand_3 = "4c0001c2CCC",
- crand_3 = "4c000202CCC",
- creqv_3 = "4c000242CCC",
- crset_1 = "4c000242C==",
- crorc_3 = "4c000342CCC",
- cror_3 = "4c000382CCC",
- crmove_2 = "4c000382CC=",
- bclr_2 = "4c000020AA",
- bclrl_2 = "4c000021AA",
- bcctr_2 = "4c000420AA",
- bcctrl_2 = "4c000421AA",
- blr_0 = "4e800020",
- blrl_0 = "4e800021",
- bctr_0 = "4e800420",
- bctrl_0 = "4e800421",
-
- -- Primary opcode 31:
- cmpw_3 = "7c000000XRR",
- cmpw_2 = "7c000000-RR",
- cmpd_3 = "7c200000XRR",
- cmpd_2 = "7c200000-RR",
- tw_3 = "7c000008ARR",
- subfc_3 = "7c000010RRR.",
- subc_3 = "7c000010RRR~.",
- mulhdu_3 = "7c000012RRR.",
- addc_3 = "7c000014RRR.",
- mulhwu_3 = "7c000016RRR.",
- isel_4 = "7c00001eRRRC",
- isellt_3 = "7c00001eRRR",
- iselgt_3 = "7c00005eRRR",
- iseleq_3 = "7c00009eRRR",
- mfcr_1 = "7c000026R",
- mfocrf_2 = "7c100026RG",
- mtcrf_2 = "7c000120GR",
- mtocrf_2 = "7c100120GR",
- lwarx_3 = "7c000028RR0R",
- ldx_3 = "7c00002aRR0R",
- lwzx_3 = "7c00002eRR0R",
- slw_3 = "7c000030RR~R.",
- cntlzw_2 = "7c000034RR~",
- sld_3 = "7c000036RR~R.",
- and_3 = "7c000038RR~R.",
- cmplw_3 = "7c000040XRR",
- cmplw_2 = "7c000040-RR",
- cmpld_3 = "7c200040XRR",
- cmpld_2 = "7c200040-RR",
- subf_3 = "7c000050RRR.",
- sub_3 = "7c000050RRR~.",
- ldux_3 = "7c00006aRR0R",
- dcbst_2 = "7c00006c-RR",
- lwzux_3 = "7c00006eRR0R",
- cntlzd_2 = "7c000074RR~",
- andc_3 = "7c000078RR~R.",
- td_3 = "7c000088ARR",
- mulhd_3 = "7c000092RRR.",
- mulhw_3 = "7c000096RRR.",
- ldarx_3 = "7c0000a8RR0R",
- dcbf_2 = "7c0000ac-RR",
- lbzx_3 = "7c0000aeRR0R",
- neg_2 = "7c0000d0RR.",
- lbzux_3 = "7c0000eeRR0R",
- popcntb_2 = "7c0000f4RR~",
- not_2 = "7c0000f8RR~%.",
- nor_3 = "7c0000f8RR~R.",
- subfe_3 = "7c000110RRR.",
- sube_3 = "7c000110RRR~.",
- adde_3 = "7c000114RRR.",
- stdx_3 = "7c00012aRR0R",
- stwcx_3 = "7c00012cRR0R.",
- stwx_3 = "7c00012eRR0R",
- prtyw_2 = "7c000134RR~",
- stdux_3 = "7c00016aRR0R",
- stwux_3 = "7c00016eRR0R",
- prtyd_2 = "7c000174RR~",
- subfze_2 = "7c000190RR.",
- addze_2 = "7c000194RR.",
- stdcx_3 = "7c0001acRR0R.",
- stbx_3 = "7c0001aeRR0R",
- subfme_2 = "7c0001d0RR.",
- mulld_3 = "7c0001d2RRR.",
- addme_2 = "7c0001d4RR.",
- mullw_3 = "7c0001d6RRR.",
- dcbtst_2 = "7c0001ec-RR",
- stbux_3 = "7c0001eeRR0R",
- add_3 = "7c000214RRR.",
- dcbt_2 = "7c00022c-RR",
- lhzx_3 = "7c00022eRR0R",
- eqv_3 = "7c000238RR~R.",
- eciwx_3 = "7c00026cRR0R",
- lhzux_3 = "7c00026eRR0R",
- xor_3 = "7c000278RR~R.",
- mfspefscr_1 = "7c0082a6R",
- mfxer_1 = "7c0102a6R",
- mflr_1 = "7c0802a6R",
- mfctr_1 = "7c0902a6R",
- lwax_3 = "7c0002aaRR0R",
- lhax_3 = "7c0002aeRR0R",
- mftb_1 = "7c0c42e6R",
- mftbu_1 = "7c0d42e6R",
- lwaux_3 = "7c0002eaRR0R",
- lhaux_3 = "7c0002eeRR0R",
- sthx_3 = "7c00032eRR0R",
- orc_3 = "7c000338RR~R.",
- ecowx_3 = "7c00036cRR0R",
- sthux_3 = "7c00036eRR0R",
- or_3 = "7c000378RR~R.",
- mr_2 = "7c000378RR~%.",
- divdu_3 = "7c000392RRR.",
- divwu_3 = "7c000396RRR.",
- mtspefscr_1 = "7c0083a6R",
- mtxer_1 = "7c0103a6R",
- mtlr_1 = "7c0803a6R",
- mtctr_1 = "7c0903a6R",
- dcbi_2 = "7c0003ac-RR",
- nand_3 = "7c0003b8RR~R.",
- divd_3 = "7c0003d2RRR.",
- divw_3 = "7c0003d6RRR.",
- cmpb_3 = "7c0003f8RR~R.",
- mcrxr_1 = "7c000400X",
- subfco_3 = "7c000410RRR.",
- subco_3 = "7c000410RRR~.",
- addco_3 = "7c000414RRR.",
- ldbrx_3 = "7c000428RR0R",
- lswx_3 = "7c00042aRR0R",
- lwbrx_3 = "7c00042cRR0R",
- lfsx_3 = "7c00042eFR0R",
- srw_3 = "7c000430RR~R.",
- srd_3 = "7c000436RR~R.",
- subfo_3 = "7c000450RRR.",
- subo_3 = "7c000450RRR~.",
- lfsux_3 = "7c00046eFR0R",
- lswi_3 = "7c0004aaRR0A",
- sync_0 = "7c0004ac",
- lwsync_0 = "7c2004ac",
- ptesync_0 = "7c4004ac",
- lfdx_3 = "7c0004aeFR0R",
- nego_2 = "7c0004d0RR.",
- lfdux_3 = "7c0004eeFR0R",
- subfeo_3 = "7c000510RRR.",
- subeo_3 = "7c000510RRR~.",
- addeo_3 = "7c000514RRR.",
- stdbrx_3 = "7c000528RR0R",
- stswx_3 = "7c00052aRR0R",
- stwbrx_3 = "7c00052cRR0R",
- stfsx_3 = "7c00052eFR0R",
- stfsux_3 = "7c00056eFR0R",
- subfzeo_2 = "7c000590RR.",
- addzeo_2 = "7c000594RR.",
- stswi_3 = "7c0005aaRR0A",
- stfdx_3 = "7c0005aeFR0R",
- subfmeo_2 = "7c0005d0RR.",
- mulldo_3 = "7c0005d2RRR.",
- addmeo_2 = "7c0005d4RR.",
- mullwo_3 = "7c0005d6RRR.",
- dcba_2 = "7c0005ec-RR",
- stfdux_3 = "7c0005eeFR0R",
- addo_3 = "7c000614RRR.",
- lhbrx_3 = "7c00062cRR0R",
- sraw_3 = "7c000630RR~R.",
- srad_3 = "7c000634RR~R.",
- srawi_3 = "7c000670RR~A.",
- sradi_3 = "7c000674RR~H.",
- eieio_0 = "7c0006ac",
- lfiwax_3 = "7c0006aeFR0R",
- sthbrx_3 = "7c00072cRR0R",
- extsh_2 = "7c000734RR~.",
- extsb_2 = "7c000774RR~.",
- divduo_3 = "7c000792RRR.",
- divwou_3 = "7c000796RRR.",
- icbi_2 = "7c0007ac-RR",
- stfiwx_3 = "7c0007aeFR0R",
- extsw_2 = "7c0007b4RR~.",
- divdo_3 = "7c0007d2RRR.",
- divwo_3 = "7c0007d6RRR.",
- dcbz_2 = "7c0007ec-RR",
-
- -- Primary opcode 30:
- rldicl_4 = "78000000RR~HM.",
- rldicr_4 = "78000004RR~HM.",
- rldic_4 = "78000008RR~HM.",
- rldimi_4 = "7800000cRR~HM.",
- rldcl_4 = "78000010RR~RM.",
- rldcr_4 = "78000012RR~RM.",
-
- -- Primary opcode 59:
- fdivs_3 = "ec000024FFF.",
- fsubs_3 = "ec000028FFF.",
- fadds_3 = "ec00002aFFF.",
- fsqrts_2 = "ec00002cF-F.",
- fres_2 = "ec000030F-F.",
- fmuls_3 = "ec000032FF-F.",
- frsqrtes_2 = "ec000034F-F.",
- fmsubs_4 = "ec000038FFFF~.",
- fmadds_4 = "ec00003aFFFF~.",
- fnmsubs_4 = "ec00003cFFFF~.",
- fnmadds_4 = "ec00003eFFFF~.",
-
- -- Primary opcode 63:
- fdiv_3 = "fc000024FFF.",
- fsub_3 = "fc000028FFF.",
- fadd_3 = "fc00002aFFF.",
- fsqrt_2 = "fc00002cF-F.",
- fsel_4 = "fc00002eFFFF~.",
- fre_2 = "fc000030F-F.",
- fmul_3 = "fc000032FF-F.",
- frsqrte_2 = "fc000034F-F.",
- fmsub_4 = "fc000038FFFF~.",
- fmadd_4 = "fc00003aFFFF~.",
- fnmsub_4 = "fc00003cFFFF~.",
- fnmadd_4 = "fc00003eFFFF~.",
- fcmpu_3 = "fc000000XFF",
- fcpsgn_3 = "fc000010FFF.",
- fcmpo_3 = "fc000040XFF",
- mtfsb1_1 = "fc00004cA",
- fneg_2 = "fc000050F-F.",
- mcrfs_2 = "fc000080XX",
- mtfsb0_1 = "fc00008cA",
- fmr_2 = "fc000090F-F.",
- frsp_2 = "fc000018F-F.",
- fctiw_2 = "fc00001cF-F.",
- fctiwz_2 = "fc00001eF-F.",
- mtfsfi_2 = "fc00010cAA", -- NYI: upshift.
- fnabs_2 = "fc000110F-F.",
- fabs_2 = "fc000210F-F.",
- frin_2 = "fc000310F-F.",
- friz_2 = "fc000350F-F.",
- frip_2 = "fc000390F-F.",
- frim_2 = "fc0003d0F-F.",
- mffs_1 = "fc00048eF.",
- -- NYI: mtfsf, mtfsb0, mtfsb1.
- fctid_2 = "fc00065cF-F.",
- fctidz_2 = "fc00065eF-F.",
- fcfid_2 = "fc00069cF-F.",
-
- -- Primary opcode 4, SPE APU extension:
- evaddw_3 = "10000200RRR",
- evaddiw_3 = "10000202RAR~",
- evsubw_3 = "10000204RRR~",
- evsubiw_3 = "10000206RAR~",
- evabs_2 = "10000208RR",
- evneg_2 = "10000209RR",
- evextsb_2 = "1000020aRR",
- evextsh_2 = "1000020bRR",
- evrndw_2 = "1000020cRR",
- evcntlzw_2 = "1000020dRR",
- evcntlsw_2 = "1000020eRR",
- brinc_3 = "1000020fRRR",
- evand_3 = "10000211RRR",
- evandc_3 = "10000212RRR",
- evxor_3 = "10000216RRR",
- evor_3 = "10000217RRR",
- evmr_2 = "10000217RR=",
- evnor_3 = "10000218RRR",
- evnot_2 = "10000218RR=",
- eveqv_3 = "10000219RRR",
- evorc_3 = "1000021bRRR",
- evnand_3 = "1000021eRRR",
- evsrwu_3 = "10000220RRR",
- evsrws_3 = "10000221RRR",
- evsrwiu_3 = "10000222RRA",
- evsrwis_3 = "10000223RRA",
- evslw_3 = "10000224RRR",
- evslwi_3 = "10000226RRA",
- evrlw_3 = "10000228RRR",
- evsplati_2 = "10000229RS",
- evrlwi_3 = "1000022aRRA",
- evsplatfi_2 = "1000022bRS",
- evmergehi_3 = "1000022cRRR",
- evmergelo_3 = "1000022dRRR",
- evcmpgtu_3 = "10000230XRR",
- evcmpgtu_2 = "10000230-RR",
- evcmpgts_3 = "10000231XRR",
- evcmpgts_2 = "10000231-RR",
- evcmpltu_3 = "10000232XRR",
- evcmpltu_2 = "10000232-RR",
- evcmplts_3 = "10000233XRR",
- evcmplts_2 = "10000233-RR",
- evcmpeq_3 = "10000234XRR",
- evcmpeq_2 = "10000234-RR",
- evsel_4 = "10000278RRRW",
- evsel_3 = "10000278RRR",
- evfsadd_3 = "10000280RRR",
- evfssub_3 = "10000281RRR",
- evfsabs_2 = "10000284RR",
- evfsnabs_2 = "10000285RR",
- evfsneg_2 = "10000286RR",
- evfsmul_3 = "10000288RRR",
- evfsdiv_3 = "10000289RRR",
- evfscmpgt_3 = "1000028cXRR",
- evfscmpgt_2 = "1000028c-RR",
- evfscmplt_3 = "1000028dXRR",
- evfscmplt_2 = "1000028d-RR",
- evfscmpeq_3 = "1000028eXRR",
- evfscmpeq_2 = "1000028e-RR",
- evfscfui_2 = "10000290R-R",
- evfscfsi_2 = "10000291R-R",
- evfscfuf_2 = "10000292R-R",
- evfscfsf_2 = "10000293R-R",
- evfsctui_2 = "10000294R-R",
- evfsctsi_2 = "10000295R-R",
- evfsctuf_2 = "10000296R-R",
- evfsctsf_2 = "10000297R-R",
- evfsctuiz_2 = "10000298R-R",
- evfsctsiz_2 = "1000029aR-R",
- evfststgt_3 = "1000029cXRR",
- evfststgt_2 = "1000029c-RR",
- evfststlt_3 = "1000029dXRR",
- evfststlt_2 = "1000029d-RR",
- evfststeq_3 = "1000029eXRR",
- evfststeq_2 = "1000029e-RR",
- efsadd_3 = "100002c0RRR",
- efssub_3 = "100002c1RRR",
- efsabs_2 = "100002c4RR",
- efsnabs_2 = "100002c5RR",
- efsneg_2 = "100002c6RR",
- efsmul_3 = "100002c8RRR",
- efsdiv_3 = "100002c9RRR",
- efscmpgt_3 = "100002ccXRR",
- efscmpgt_2 = "100002cc-RR",
- efscmplt_3 = "100002cdXRR",
- efscmplt_2 = "100002cd-RR",
- efscmpeq_3 = "100002ceXRR",
- efscmpeq_2 = "100002ce-RR",
- efscfd_2 = "100002cfR-R",
- efscfui_2 = "100002d0R-R",
- efscfsi_2 = "100002d1R-R",
- efscfuf_2 = "100002d2R-R",
- efscfsf_2 = "100002d3R-R",
- efsctui_2 = "100002d4R-R",
- efsctsi_2 = "100002d5R-R",
- efsctuf_2 = "100002d6R-R",
- efsctsf_2 = "100002d7R-R",
- efsctuiz_2 = "100002d8R-R",
- efsctsiz_2 = "100002daR-R",
- efststgt_3 = "100002dcXRR",
- efststgt_2 = "100002dc-RR",
- efststlt_3 = "100002ddXRR",
- efststlt_2 = "100002dd-RR",
- efststeq_3 = "100002deXRR",
- efststeq_2 = "100002de-RR",
- efdadd_3 = "100002e0RRR",
- efdsub_3 = "100002e1RRR",
- efdcfuid_2 = "100002e2R-R",
- efdcfsid_2 = "100002e3R-R",
- efdabs_2 = "100002e4RR",
- efdnabs_2 = "100002e5RR",
- efdneg_2 = "100002e6RR",
- efdmul_3 = "100002e8RRR",
- efddiv_3 = "100002e9RRR",
- efdctuidz_2 = "100002eaR-R",
- efdctsidz_2 = "100002ebR-R",
- efdcmpgt_3 = "100002ecXRR",
- efdcmpgt_2 = "100002ec-RR",
- efdcmplt_3 = "100002edXRR",
- efdcmplt_2 = "100002ed-RR",
- efdcmpeq_3 = "100002eeXRR",
- efdcmpeq_2 = "100002ee-RR",
- efdcfs_2 = "100002efR-R",
- efdcfui_2 = "100002f0R-R",
- efdcfsi_2 = "100002f1R-R",
- efdcfuf_2 = "100002f2R-R",
- efdcfsf_2 = "100002f3R-R",
- efdctui_2 = "100002f4R-R",
- efdctsi_2 = "100002f5R-R",
- efdctuf_2 = "100002f6R-R",
- efdctsf_2 = "100002f7R-R",
- efdctuiz_2 = "100002f8R-R",
- efdctsiz_2 = "100002faR-R",
- efdtstgt_3 = "100002fcXRR",
- efdtstgt_2 = "100002fc-RR",
- efdtstlt_3 = "100002fdXRR",
- efdtstlt_2 = "100002fd-RR",
- efdtsteq_3 = "100002feXRR",
- efdtsteq_2 = "100002fe-RR",
- evlddx_3 = "10000300RR0R",
- evldd_2 = "10000301R8",
- evldwx_3 = "10000302RR0R",
- evldw_2 = "10000303R8",
- evldhx_3 = "10000304RR0R",
- evldh_2 = "10000305R8",
- evlwhex_3 = "10000310RR0R",
- evlwhe_2 = "10000311R4",
- evlwhoux_3 = "10000314RR0R",
- evlwhou_2 = "10000315R4",
- evlwhosx_3 = "10000316RR0R",
- evlwhos_2 = "10000317R4",
- evstddx_3 = "10000320RR0R",
- evstdd_2 = "10000321R8",
- evstdwx_3 = "10000322RR0R",
- evstdw_2 = "10000323R8",
- evstdhx_3 = "10000324RR0R",
- evstdh_2 = "10000325R8",
- evstwhex_3 = "10000330RR0R",
- evstwhe_2 = "10000331R4",
- evstwhox_3 = "10000334RR0R",
- evstwho_2 = "10000335R4",
- evstwwex_3 = "10000338RR0R",
- evstwwe_2 = "10000339R4",
- evstwwox_3 = "1000033cRR0R",
- evstwwo_2 = "1000033dR4",
- evmhessf_3 = "10000403RRR",
- evmhossf_3 = "10000407RRR",
- evmheumi_3 = "10000408RRR",
- evmhesmi_3 = "10000409RRR",
- evmhesmf_3 = "1000040bRRR",
- evmhoumi_3 = "1000040cRRR",
- evmhosmi_3 = "1000040dRRR",
- evmhosmf_3 = "1000040fRRR",
- evmhessfa_3 = "10000423RRR",
- evmhossfa_3 = "10000427RRR",
- evmheumia_3 = "10000428RRR",
- evmhesmia_3 = "10000429RRR",
- evmhesmfa_3 = "1000042bRRR",
- evmhoumia_3 = "1000042cRRR",
- evmhosmia_3 = "1000042dRRR",
- evmhosmfa_3 = "1000042fRRR",
- evmwhssf_3 = "10000447RRR",
- evmwlumi_3 = "10000448RRR",
- evmwhumi_3 = "1000044cRRR",
- evmwhsmi_3 = "1000044dRRR",
- evmwhsmf_3 = "1000044fRRR",
- evmwssf_3 = "10000453RRR",
- evmwumi_3 = "10000458RRR",
- evmwsmi_3 = "10000459RRR",
- evmwsmf_3 = "1000045bRRR",
- evmwhssfa_3 = "10000467RRR",
- evmwlumia_3 = "10000468RRR",
- evmwhumia_3 = "1000046cRRR",
- evmwhsmia_3 = "1000046dRRR",
- evmwhsmfa_3 = "1000046fRRR",
- evmwssfa_3 = "10000473RRR",
- evmwumia_3 = "10000478RRR",
- evmwsmia_3 = "10000479RRR",
- evmwsmfa_3 = "1000047bRRR",
- evmra_2 = "100004c4RR",
- evdivws_3 = "100004c6RRR",
- evdivwu_3 = "100004c7RRR",
- evmwssfaa_3 = "10000553RRR",
- evmwumiaa_3 = "10000558RRR",
- evmwsmiaa_3 = "10000559RRR",
- evmwsmfaa_3 = "1000055bRRR",
- evmwssfan_3 = "100005d3RRR",
- evmwumian_3 = "100005d8RRR",
- evmwsmian_3 = "100005d9RRR",
- evmwsmfan_3 = "100005dbRRR",
- evmergehilo_3 = "1000022eRRR",
- evmergelohi_3 = "1000022fRRR",
- evlhhesplatx_3 = "10000308RR0R",
- evlhhesplat_2 = "10000309R2",
- evlhhousplatx_3 = "1000030cRR0R",
- evlhhousplat_2 = "1000030dR2",
- evlhhossplatx_3 = "1000030eRR0R",
- evlhhossplat_2 = "1000030fR2",
- evlwwsplatx_3 = "10000318RR0R",
- evlwwsplat_2 = "10000319R4",
- evlwhsplatx_3 = "1000031cRR0R",
- evlwhsplat_2 = "1000031dR4",
- evaddusiaaw_2 = "100004c0RR",
- evaddssiaaw_2 = "100004c1RR",
- evsubfusiaaw_2 = "100004c2RR",
- evsubfssiaaw_2 = "100004c3RR",
- evaddumiaaw_2 = "100004c8RR",
- evaddsmiaaw_2 = "100004c9RR",
- evsubfumiaaw_2 = "100004caRR",
- evsubfsmiaaw_2 = "100004cbRR",
- evmheusiaaw_3 = "10000500RRR",
- evmhessiaaw_3 = "10000501RRR",
- evmhessfaaw_3 = "10000503RRR",
- evmhousiaaw_3 = "10000504RRR",
- evmhossiaaw_3 = "10000505RRR",
- evmhossfaaw_3 = "10000507RRR",
- evmheumiaaw_3 = "10000508RRR",
- evmhesmiaaw_3 = "10000509RRR",
- evmhesmfaaw_3 = "1000050bRRR",
- evmhoumiaaw_3 = "1000050cRRR",
- evmhosmiaaw_3 = "1000050dRRR",
- evmhosmfaaw_3 = "1000050fRRR",
- evmhegumiaa_3 = "10000528RRR",
- evmhegsmiaa_3 = "10000529RRR",
- evmhegsmfaa_3 = "1000052bRRR",
- evmhogumiaa_3 = "1000052cRRR",
- evmhogsmiaa_3 = "1000052dRRR",
- evmhogsmfaa_3 = "1000052fRRR",
- evmwlusiaaw_3 = "10000540RRR",
- evmwlssiaaw_3 = "10000541RRR",
- evmwlumiaaw_3 = "10000548RRR",
- evmwlsmiaaw_3 = "10000549RRR",
- evmheusianw_3 = "10000580RRR",
- evmhessianw_3 = "10000581RRR",
- evmhessfanw_3 = "10000583RRR",
- evmhousianw_3 = "10000584RRR",
- evmhossianw_3 = "10000585RRR",
- evmhossfanw_3 = "10000587RRR",
- evmheumianw_3 = "10000588RRR",
- evmhesmianw_3 = "10000589RRR",
- evmhesmfanw_3 = "1000058bRRR",
- evmhoumianw_3 = "1000058cRRR",
- evmhosmianw_3 = "1000058dRRR",
- evmhosmfanw_3 = "1000058fRRR",
- evmhegumian_3 = "100005a8RRR",
- evmhegsmian_3 = "100005a9RRR",
- evmhegsmfan_3 = "100005abRRR",
- evmhogumian_3 = "100005acRRR",
- evmhogsmian_3 = "100005adRRR",
- evmhogsmfan_3 = "100005afRRR",
- evmwlusianw_3 = "100005c0RRR",
- evmwlssianw_3 = "100005c1RRR",
- evmwlumianw_3 = "100005c8RRR",
- evmwlsmianw_3 = "100005c9RRR",
-
- -- NYI: Book E instructions.
-}
-
--- Add mnemonics for "." variants.
-do
- local t = {}
- for k,v in pairs(map_op) do
- if sub(v, -1) == "." then
- local v2 = sub(v, 1, 7)..char(byte(v, 8)+1)..sub(v, 9, -2)
- t[sub(k, 1, -3).."."..sub(k, -2)] = v2
- end
- end
- for k,v in pairs(t) do
- map_op[k] = v
- end
-end
-
--- Add more branch mnemonics.
-for cond,c in pairs(map_cond) do
- local b1 = "b"..cond
- local c1 = shl(band(c, 3), 16) + (c < 4 and 0x01000000 or 0)
- -- bX[l]
- map_op[b1.."_1"] = tohex(0x40800000 + c1).."K"
- map_op[b1.."y_1"] = tohex(0x40a00000 + c1).."K"
- map_op[b1.."l_1"] = tohex(0x40800001 + c1).."K"
- map_op[b1.."_2"] = tohex(0x40800000 + c1).."-XK"
- map_op[b1.."y_2"] = tohex(0x40a00000 + c1).."-XK"
- map_op[b1.."l_2"] = tohex(0x40800001 + c1).."-XK"
- -- bXlr[l]
- map_op[b1.."lr_0"] = tohex(0x4c800020 + c1)
- map_op[b1.."lrl_0"] = tohex(0x4c800021 + c1)
- map_op[b1.."ctr_0"] = tohex(0x4c800420 + c1)
- map_op[b1.."ctrl_0"] = tohex(0x4c800421 + c1)
- -- bXctr[l]
- map_op[b1.."lr_1"] = tohex(0x4c800020 + c1).."-X"
- map_op[b1.."lrl_1"] = tohex(0x4c800021 + c1).."-X"
- map_op[b1.."ctr_1"] = tohex(0x4c800420 + c1).."-X"
- map_op[b1.."ctrl_1"] = tohex(0x4c800421 + c1).."-X"
-end
-
-------------------------------------------------------------------------------
-
-local function parse_gpr(expr)
- local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$")
- local tp = map_type[tname or expr]
- if tp then
- local reg = ovreg or tp.reg
- if not reg then
- werror("type `"..(tname or expr).."' needs a register override")
- end
- expr = reg
- end
- local r = match(expr, "^r([1-3]?[0-9])$")
- if r then
- r = tonumber(r)
- if r <= 31 then return r, tp end
- end
- werror("bad register name `"..expr.."'")
-end
-
-local function parse_fpr(expr)
- local r = match(expr, "^f([1-3]?[0-9])$")
- if r then
- r = tonumber(r)
- if r <= 31 then return r end
- end
- werror("bad register name `"..expr.."'")
-end
-
-local function parse_cr(expr)
- local r = match(expr, "^cr([0-7])$")
- if r then return tonumber(r) end
- werror("bad condition register name `"..expr.."'")
-end
-
-local function parse_cond(expr)
- local r, cond = match(expr, "^4%*cr([0-7])%+(%w%w)$")
- if r then
- r = tonumber(r)
- local c = map_cond[cond]
- if c and c < 4 then return r*4+c end
- end
- werror("bad condition bit name `"..expr.."'")
-end
-
-local function parse_imm(imm, bits, shift, scale, signed)
- local n = tonumber(imm)
- if n then
- local m = sar(n, scale)
- if shl(m, scale) == n then
- if signed then
- local s = sar(m, bits-1)
- if s == 0 then return shl(m, shift)
- elseif s == -1 then return shl(m + shl(1, bits), shift) end
- else
- if sar(m, bits) == 0 then return shl(m, shift) end
- end
- end
- werror("out of range immediate `"..imm.."'")
- elseif match(imm, "^r([1-3]?[0-9])$") or
- match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then
- werror("expected immediate operand, got register")
- else
- waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
- return 0
- end
-end
-
-local function parse_shiftmask(imm, isshift)
- local n = tonumber(imm)
- if n then
- if shr(n, 6) == 0 then
- local lsb = band(imm, 31)
- local msb = imm - lsb
- return isshift and (shl(lsb, 11)+shr(msb, 4)) or (shl(lsb, 6)+msb)
- end
- werror("out of range immediate `"..imm.."'")
- elseif match(imm, "^r([1-3]?[0-9])$") or
- match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then
- werror("expected immediate operand, got register")
- else
- werror("NYI: parameterized 64 bit shift/mask")
- end
-end
-
-local function parse_disp(disp)
- local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
- if imm then
- local r = parse_gpr(reg)
- if r == 0 then werror("cannot use r0 in displacement") end
- return shl(r, 16) + parse_imm(imm, 16, 0, 0, true)
- end
- local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
- if reg and tailr ~= "" then
- local r, tp = parse_gpr(reg)
- if r == 0 then werror("cannot use r0 in displacement") end
- if tp then
- waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr))
- return shl(r, 16)
- end
- end
- werror("bad displacement `"..disp.."'")
-end
-
-local function parse_u5disp(disp, scale)
- local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
- if imm then
- local r = parse_gpr(reg)
- if r == 0 then werror("cannot use r0 in displacement") end
- return shl(r, 16) + parse_imm(imm, 5, 11, scale, false)
- end
- local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
- if reg and tailr ~= "" then
- local r, tp = parse_gpr(reg)
- if r == 0 then werror("cannot use r0 in displacement") end
- if tp then
- waction("IMM", scale*1024+5*32+11, format(tp.ctypefmt, tailr))
- return shl(r, 16)
- end
- end
- werror("bad displacement `"..disp.."'")
-end
-
-local function parse_label(label, def)
- local prefix = sub(label, 1, 2)
- -- =>label (pc label reference)
- if prefix == "=>" then
- return "PC", 0, sub(label, 3)
- end
- -- ->name (global label reference)
- if prefix == "->" then
- return "LG", map_global[sub(label, 3)]
- end
- if def then
- -- [1-9] (local label definition)
- if match(label, "^[1-9]$") then
- return "LG", 10+tonumber(label)
- end
- else
- -- [<>][1-9] (local label reference)
- local dir, lnum = match(label, "^([<>])([1-9])$")
- if dir then -- Fwd: 1-9, Bkwd: 11-19.
- return "LG", lnum + (dir == ">" and 0 or 10)
- end
- -- extern label (extern label reference)
- local extname = match(label, "^extern%s+(%S+)$")
- if extname then
- return "EXT", map_extern[extname]
- end
- end
- werror("bad label `"..label.."'")
-end
-
-------------------------------------------------------------------------------
-
--- Handle opcodes defined with template strings.
-map_op[".template__"] = function(params, template, nparams)
- if not params then return sub(template, 9) end
- local op = tonumber(sub(template, 1, 8), 16)
- local n, rs = 1, 26
-
- -- Limit number of section buffer positions used by a single dasm_put().
- -- A single opcode needs a maximum of 3 positions (rlwinm).
- if secpos+3 > maxsecpos then wflush() end
- local pos = wpos()
-
- -- Process each character.
- for p in gmatch(sub(template, 9), ".") do
- if p == "R" then
- rs = rs - 5; op = op + shl(parse_gpr(params[n]), rs); n = n + 1
- elseif p == "F" then
- rs = rs - 5; op = op + shl(parse_fpr(params[n]), rs); n = n + 1
- elseif p == "A" then
- rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, false); n = n + 1
- elseif p == "S" then
- rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, true); n = n + 1
- elseif p == "I" then
- op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1
- elseif p == "U" then
- op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1
- elseif p == "D" then
- op = op + parse_disp(params[n]); n = n + 1
- elseif p == "2" then
- op = op + parse_u5disp(params[n], 1); n = n + 1
- elseif p == "4" then
- op = op + parse_u5disp(params[n], 2); n = n + 1
- elseif p == "8" then
- op = op + parse_u5disp(params[n], 3); n = n + 1
- elseif p == "C" then
- rs = rs - 5; op = op + shl(parse_cond(params[n]), rs); n = n + 1
- elseif p == "X" then
- rs = rs - 5; op = op + shl(parse_cr(params[n]), rs+2); n = n + 1
- elseif p == "W" then
- op = op + parse_cr(params[n]); n = n + 1
- elseif p == "G" then
- op = op + parse_imm(params[n], 8, 12, 0, false); n = n + 1
- elseif p == "H" then
- op = op + parse_shiftmask(params[n], true); n = n + 1
- elseif p == "M" then
- op = op + parse_shiftmask(params[n], false); n = n + 1
- elseif p == "J" or p == "K" then
- local mode, n, s = parse_label(params[n], false)
- if p == "K" then n = n + 2048 end
- waction("REL_"..mode, n, s, 1)
- n = n + 1
- elseif p == "0" then
- if band(shr(op, rs), 31) == 0 then werror("cannot use r0") end
- elseif p == "=" or p == "%" then
- local t = band(shr(op, p == "%" and rs+5 or rs), 31)
- rs = rs - 5
- op = op + shl(t, rs)
- elseif p == "~" then
- local mm = shl(31, rs)
- local lo = band(op, mm)
- local hi = band(op, shl(mm, 5))
- op = op - lo - hi + shl(lo, 5) + shr(hi, 5)
- elseif p == "-" then
- rs = rs - 5
- elseif p == "." then
- -- Ignored.
- else
- assert(false)
- end
- end
- wputpos(pos, op)
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcode to mark the position where the action list is to be emitted.
-map_op[".actionlist_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeactions(out, name) end)
-end
-
--- Pseudo-opcode to mark the position where the global enum is to be emitted.
-map_op[".globals_1"] = function(params)
- if not params then return "prefix" end
- local prefix = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeglobals(out, prefix) end)
-end
-
--- Pseudo-opcode to mark the position where the global names are to be emitted.
-map_op[".globalnames_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeglobalnames(out, name) end)
-end
-
--- Pseudo-opcode to mark the position where the extern names are to be emitted.
-map_op[".externnames_1"] = function(params)
- if not params then return "cvar" end
- local name = params[1] -- No syntax check. You get to keep the pieces.
- wline(function(out) writeexternnames(out, name) end)
-end
-
-------------------------------------------------------------------------------
-
--- Label pseudo-opcode (converted from trailing colon form).
-map_op[".label_1"] = function(params)
- if not params then return "[1-9] | ->global | =>pcexpr" end
- if secpos+1 > maxsecpos then wflush() end
- local mode, n, s = parse_label(params[1], true)
- if mode == "EXT" then werror("bad label definition") end
- waction("LABEL_"..mode, n, s, 1)
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcodes for data storage.
-map_op[".long_*"] = function(params)
- if not params then return "imm..." end
- for _,p in ipairs(params) do
- local n = tonumber(p)
- if not n then werror("bad immediate `"..p.."'") end
- if n < 0 then n = n + 2^32 end
- wputw(n)
- if secpos+2 > maxsecpos then wflush() end
- end
-end
-
--- Alignment pseudo-opcode.
-map_op[".align_1"] = function(params)
- if not params then return "numpow2" end
- if secpos+1 > maxsecpos then wflush() end
- local align = tonumber(params[1])
- if align then
- local x = align
- -- Must be a power of 2 in the range (2 ... 256).
- for i=1,8 do
- x = x / 2
- if x == 1 then
- waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
- return
- end
- end
- end
- werror("bad alignment")
-end
-
-------------------------------------------------------------------------------
-
--- Pseudo-opcode for (primitive) type definitions (map to C types).
-map_op[".type_3"] = function(params, nparams)
- if not params then
- return nparams == 2 and "name, ctype" or "name, ctype, reg"
- end
- local name, ctype, reg = params[1], params[2], params[3]
- if not match(name, "^[%a_][%w_]*$") then
- werror("bad type name `"..name.."'")
- end
- local tp = map_type[name]
- if tp then
- werror("duplicate type `"..name.."'")
- end
- -- Add #type to defines. A bit unclean to put it in map_archdef.
- map_archdef["#"..name] = "sizeof("..ctype..")"
- -- Add new type and emit shortcut define.
- local num = ctypenum + 1
- map_type[name] = {
- ctype = ctype,
- ctypefmt = format("Dt%X(%%s)", num),
- reg = reg,
- }
- wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
- ctypenum = num
-end
-map_op[".type_2"] = map_op[".type_3"]
-
--- Dump type definitions.
-local function dumptypes(out, lvl)
- local t = {}
- for name in pairs(map_type) do t[#t+1] = name end
- sort(t)
- out:write("Type definitions:\n")
- for _,name in ipairs(t) do
- local tp = map_type[name]
- local reg = tp.reg or ""
- out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
- end
- out:write("\n")
-end
-
-------------------------------------------------------------------------------
-
--- Set the current section.
-function _M.section(num)
- waction("SECTION", num)
- wflush(true) -- SECTION is a terminal action.
-end
-
-------------------------------------------------------------------------------
-
--- Dump architecture description.
-function _M.dumparch(out)
- out:write(format("DynASM %s version %s, released %s\n\n",
- _info.arch, _info.version, _info.release))
- dumpactions(out)
-end
-
--- Dump all user defined elements.
-function _M.dumpdef(out, lvl)
- dumptypes(out, lvl)
- dumpglobals(out, lvl)
- dumpexterns(out, lvl)
-end
-
-------------------------------------------------------------------------------
-
--- Pass callbacks from/to the DynASM core.
-function _M.passcb(wl, we, wf, ww)
- wline, werror, wfatal, wwarn = wl, we, wf, ww
- return wflush
-end
-
--- Setup the arch-specific module.
-function _M.setup(arch, opt)
- g_arch, g_opt = arch, opt
-end
-
--- Merge the core maps and the arch-specific maps.
-function _M.mergemaps(map_coreop, map_def)
- setmetatable(map_op, { __index = map_coreop })
- setmetatable(map_def, { __index = map_archdef })
- return map_op, map_def
-end
-
-return _M
-
-------------------------------------------------------------------------------
-
+------------------------------------------------------------------------------
+-- DynASM PPC module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Module information:
+local _info = {
+ arch = "ppc",
+ description = "DynASM PPC module",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ license = "MIT",
+}
+
+-- Exported glue functions for the arch-specific module.
+local _M = { _info = _info }
+
+-- Cache library functions.
+local type, tonumber, pairs, ipairs = type, tonumber, pairs, ipairs
+local assert, setmetatable = assert, setmetatable
+local _s = string
+local sub, format, byte, char = _s.sub, _s.format, _s.byte, _s.char
+local match, gmatch = _s.match, _s.gmatch
+local concat, sort = table.concat, table.sort
+local bit = bit or require("bit")
+local band, shl, shr, sar = bit.band, bit.lshift, bit.rshift, bit.arshift
+local tohex = bit.tohex
+
+-- Inherited tables and callbacks.
+local g_opt, g_arch
+local wline, werror, wfatal, wwarn
+
+-- Action name list.
+-- CHECK: Keep this in sync with the C code!
+local action_names = {
+ "STOP", "SECTION", "ESC", "REL_EXT",
+ "ALIGN", "REL_LG", "LABEL_LG",
+ "REL_PC", "LABEL_PC", "IMM",
+}
+
+-- Maximum number of section buffer positions for dasm_put().
+-- CHECK: Keep this in sync with the C code!
+local maxsecpos = 25 -- Keep this low, to avoid excessively long C lines.
+
+-- Action name -> action number.
+local map_action = {}
+for n,name in ipairs(action_names) do
+ map_action[name] = n-1
+end
+
+-- Action list buffer.
+local actlist = {}
+
+-- Argument list for next dasm_put(). Start with offset 0 into action list.
+local actargs = { 0 }
+
+-- Current number of section buffer positions for dasm_put().
+local secpos = 1
+
+------------------------------------------------------------------------------
+
+-- Dump action names and numbers.
+local function dumpactions(out)
+ out:write("DynASM encoding engine action codes:\n")
+ for n,name in ipairs(action_names) do
+ local num = map_action[name]
+ out:write(format(" %-10s %02X %d\n", name, num, num))
+ end
+ out:write("\n")
+end
+
+-- Write action list buffer as a huge static C array.
+local function writeactions(out, name)
+ local nn = #actlist
+ if nn == 0 then nn = 1; actlist[0] = map_action.STOP end
+ out:write("static const unsigned int ", name, "[", nn, "] = {\n")
+ for i = 1,nn-1 do
+ assert(out:write("0x", tohex(actlist[i]), ",\n"))
+ end
+ assert(out:write("0x", tohex(actlist[nn]), "\n};\n\n"))
+end
+
+------------------------------------------------------------------------------
+
+-- Add word to action list.
+local function wputxw(n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[#actlist+1] = n
+end
+
+-- Add action to list with optional arg. Advance buffer pos, too.
+local function waction(action, val, a, num)
+ local w = assert(map_action[action], "bad action name `"..action.."'")
+ wputxw(w * 0x10000 + (val or 0))
+ if a then actargs[#actargs+1] = a end
+ if a or num then secpos = secpos + (num or 1) end
+end
+
+-- Flush action list (intervening C code or buffer pos overflow).
+local function wflush(term)
+ if #actlist == actargs[1] then return end -- Nothing to flush.
+ if not term then waction("STOP") end -- Terminate action list.
+ wline(format("dasm_put(Dst, %s);", concat(actargs, ", ")), true)
+ actargs = { #actlist } -- Actionlist offset is 1st arg to next dasm_put().
+ secpos = 1 -- The actionlist offset occupies a buffer position, too.
+end
+
+-- Put escaped word.
+local function wputw(n)
+ if n <= 0xffffff then waction("ESC") end
+ wputxw(n)
+end
+
+-- Reserve position for word.
+local function wpos()
+ local pos = #actlist+1
+ actlist[pos] = ""
+ return pos
+end
+
+-- Store word to reserved position.
+local function wputpos(pos, n)
+ assert(n >= 0 and n <= 0xffffffff and n % 1 == 0, "word out of range")
+ actlist[pos] = n
+end
+
+------------------------------------------------------------------------------
+
+-- Global label name -> global label number. With auto assignment on 1st use.
+local next_global = 20
+local map_global = setmetatable({}, { __index = function(t, name)
+ if not match(name, "^[%a_][%w_]*$") then werror("bad global label") end
+ local n = next_global
+ if n > 2047 then werror("too many global labels") end
+ next_global = n + 1
+ t[name] = n
+ return n
+end})
+
+-- Dump global labels.
+local function dumpglobals(out, lvl)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("Global labels:\n")
+ for i=20,next_global-1 do
+ out:write(format(" %s\n", t[i]))
+ end
+ out:write("\n")
+end
+
+-- Write global label enum.
+local function writeglobals(out, prefix)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("enum {\n")
+ for i=20,next_global-1 do
+ out:write(" ", prefix, t[i], ",\n")
+ end
+ out:write(" ", prefix, "_MAX\n};\n")
+end
+
+-- Write global label names.
+local function writeglobalnames(out, name)
+ local t = {}
+ for name, n in pairs(map_global) do t[n] = name end
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=20,next_global-1 do
+ out:write(" \"", t[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Extern label name -> extern label number. With auto assignment on 1st use.
+local next_extern = 0
+local map_extern_ = {}
+local map_extern = setmetatable({}, { __index = function(t, name)
+ -- No restrictions on the name for now.
+ local n = next_extern
+ if n > 2047 then werror("too many extern labels") end
+ next_extern = n + 1
+ t[name] = n
+ map_extern_[n] = name
+ return n
+end})
+
+-- Dump extern labels.
+local function dumpexterns(out, lvl)
+ out:write("Extern labels:\n")
+ for i=0,next_extern-1 do
+ out:write(format(" %s\n", map_extern_[i]))
+ end
+ out:write("\n")
+end
+
+-- Write extern label names.
+local function writeexternnames(out, name)
+ out:write("static const char *const ", name, "[] = {\n")
+ for i=0,next_extern-1 do
+ out:write(" \"", map_extern_[i], "\",\n")
+ end
+ out:write(" (const char *)0\n};\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Arch-specific maps.
+local map_archdef = { sp = "r1" } -- Ext. register name -> int. name.
+
+local map_type = {} -- Type name -> { ctype, reg }
+local ctypenum = 0 -- Type number (for Dt... macros).
+
+-- Reverse defines for registers.
+function _M.revdef(s)
+ if s == "r1" then return "sp" end
+ return s
+end
+
+local map_cond = {
+ lt = 0, gt = 1, eq = 2, so = 3,
+ ge = 4, le = 5, ne = 6, ns = 7,
+}
+
+------------------------------------------------------------------------------
+
+-- Template strings for PPC instructions.
+local map_op = {
+ tdi_3 = "08000000ARI",
+ twi_3 = "0c000000ARI",
+ mulli_3 = "1c000000RRI",
+ subfic_3 = "20000000RRI",
+ cmplwi_3 = "28000000XRU",
+ cmplwi_2 = "28000000-RU",
+ cmpldi_3 = "28200000XRU",
+ cmpldi_2 = "28200000-RU",
+ cmpwi_3 = "2c000000XRI",
+ cmpwi_2 = "2c000000-RI",
+ cmpdi_3 = "2c200000XRI",
+ cmpdi_2 = "2c200000-RI",
+ addic_3 = "30000000RRI",
+ ["addic._3"] = "34000000RRI",
+ addi_3 = "38000000RR0I",
+ li_2 = "38000000RI",
+ la_2 = "38000000RD",
+ addis_3 = "3c000000RR0I",
+ lis_2 = "3c000000RI",
+ lus_2 = "3c000000RU",
+ bc_3 = "40000000AAK",
+ bcl_3 = "40000001AAK",
+ bdnz_1 = "42000000K",
+ bdz_1 = "42400000K",
+ sc_0 = "44000000",
+ b_1 = "48000000J",
+ bl_1 = "48000001J",
+ rlwimi_5 = "50000000RR~AAA.",
+ rlwinm_5 = "54000000RR~AAA.",
+ rlwnm_5 = "5c000000RR~RAA.",
+ ori_3 = "60000000RR~U",
+ nop_0 = "60000000",
+ oris_3 = "64000000RR~U",
+ xori_3 = "68000000RR~U",
+ xoris_3 = "6c000000RR~U",
+ ["andi._3"] = "70000000RR~U",
+ ["andis._3"] = "74000000RR~U",
+ lwz_2 = "80000000RD",
+ lwzu_2 = "84000000RD",
+ lbz_2 = "88000000RD",
+ lbzu_2 = "8c000000RD",
+ stw_2 = "90000000RD",
+ stwu_2 = "94000000RD",
+ stb_2 = "98000000RD",
+ stbu_2 = "9c000000RD",
+ lhz_2 = "a0000000RD",
+ lhzu_2 = "a4000000RD",
+ lha_2 = "a8000000RD",
+ lhau_2 = "ac000000RD",
+ sth_2 = "b0000000RD",
+ sthu_2 = "b4000000RD",
+ lmw_2 = "b8000000RD",
+ stmw_2 = "bc000000RD",
+ lfs_2 = "c0000000FD",
+ lfsu_2 = "c4000000FD",
+ lfd_2 = "c8000000FD",
+ lfdu_2 = "cc000000FD",
+ stfs_2 = "d0000000FD",
+ stfsu_2 = "d4000000FD",
+ stfd_2 = "d8000000FD",
+ stfdu_2 = "dc000000FD",
+ ld_2 = "e8000000RD", -- NYI: displacement must be divisible by 4.
+ ldu_2 = "e8000001RD",
+ lwa_2 = "e8000002RD",
+ std_2 = "f8000000RD",
+ stdu_2 = "f8000001RD",
+
+ -- Primary opcode 19:
+ mcrf_2 = "4c000000XX",
+ isync_0 = "4c00012c",
+ crnor_3 = "4c000042CCC",
+ crnot_2 = "4c000042CC=",
+ crandc_3 = "4c000102CCC",
+ crxor_3 = "4c000182CCC",
+ crclr_1 = "4c000182C==",
+ crnand_3 = "4c0001c2CCC",
+ crand_3 = "4c000202CCC",
+ creqv_3 = "4c000242CCC",
+ crset_1 = "4c000242C==",
+ crorc_3 = "4c000342CCC",
+ cror_3 = "4c000382CCC",
+ crmove_2 = "4c000382CC=",
+ bclr_2 = "4c000020AA",
+ bclrl_2 = "4c000021AA",
+ bcctr_2 = "4c000420AA",
+ bcctrl_2 = "4c000421AA",
+ blr_0 = "4e800020",
+ blrl_0 = "4e800021",
+ bctr_0 = "4e800420",
+ bctrl_0 = "4e800421",
+
+ -- Primary opcode 31:
+ cmpw_3 = "7c000000XRR",
+ cmpw_2 = "7c000000-RR",
+ cmpd_3 = "7c200000XRR",
+ cmpd_2 = "7c200000-RR",
+ tw_3 = "7c000008ARR",
+ subfc_3 = "7c000010RRR.",
+ subc_3 = "7c000010RRR~.",
+ mulhdu_3 = "7c000012RRR.",
+ addc_3 = "7c000014RRR.",
+ mulhwu_3 = "7c000016RRR.",
+ isel_4 = "7c00001eRRRC",
+ isellt_3 = "7c00001eRRR",
+ iselgt_3 = "7c00005eRRR",
+ iseleq_3 = "7c00009eRRR",
+ mfcr_1 = "7c000026R",
+ mfocrf_2 = "7c100026RG",
+ mtcrf_2 = "7c000120GR",
+ mtocrf_2 = "7c100120GR",
+ lwarx_3 = "7c000028RR0R",
+ ldx_3 = "7c00002aRR0R",
+ lwzx_3 = "7c00002eRR0R",
+ slw_3 = "7c000030RR~R.",
+ cntlzw_2 = "7c000034RR~",
+ sld_3 = "7c000036RR~R.",
+ and_3 = "7c000038RR~R.",
+ cmplw_3 = "7c000040XRR",
+ cmplw_2 = "7c000040-RR",
+ cmpld_3 = "7c200040XRR",
+ cmpld_2 = "7c200040-RR",
+ subf_3 = "7c000050RRR.",
+ sub_3 = "7c000050RRR~.",
+ ldux_3 = "7c00006aRR0R",
+ dcbst_2 = "7c00006c-RR",
+ lwzux_3 = "7c00006eRR0R",
+ cntlzd_2 = "7c000074RR~",
+ andc_3 = "7c000078RR~R.",
+ td_3 = "7c000088ARR",
+ mulhd_3 = "7c000092RRR.",
+ mulhw_3 = "7c000096RRR.",
+ ldarx_3 = "7c0000a8RR0R",
+ dcbf_2 = "7c0000ac-RR",
+ lbzx_3 = "7c0000aeRR0R",
+ neg_2 = "7c0000d0RR.",
+ lbzux_3 = "7c0000eeRR0R",
+ popcntb_2 = "7c0000f4RR~",
+ not_2 = "7c0000f8RR~%.",
+ nor_3 = "7c0000f8RR~R.",
+ subfe_3 = "7c000110RRR.",
+ sube_3 = "7c000110RRR~.",
+ adde_3 = "7c000114RRR.",
+ stdx_3 = "7c00012aRR0R",
+ stwcx_3 = "7c00012cRR0R.",
+ stwx_3 = "7c00012eRR0R",
+ prtyw_2 = "7c000134RR~",
+ stdux_3 = "7c00016aRR0R",
+ stwux_3 = "7c00016eRR0R",
+ prtyd_2 = "7c000174RR~",
+ subfze_2 = "7c000190RR.",
+ addze_2 = "7c000194RR.",
+ stdcx_3 = "7c0001acRR0R.",
+ stbx_3 = "7c0001aeRR0R",
+ subfme_2 = "7c0001d0RR.",
+ mulld_3 = "7c0001d2RRR.",
+ addme_2 = "7c0001d4RR.",
+ mullw_3 = "7c0001d6RRR.",
+ dcbtst_2 = "7c0001ec-RR",
+ stbux_3 = "7c0001eeRR0R",
+ add_3 = "7c000214RRR.",
+ dcbt_2 = "7c00022c-RR",
+ lhzx_3 = "7c00022eRR0R",
+ eqv_3 = "7c000238RR~R.",
+ eciwx_3 = "7c00026cRR0R",
+ lhzux_3 = "7c00026eRR0R",
+ xor_3 = "7c000278RR~R.",
+ mfspefscr_1 = "7c0082a6R",
+ mfxer_1 = "7c0102a6R",
+ mflr_1 = "7c0802a6R",
+ mfctr_1 = "7c0902a6R",
+ lwax_3 = "7c0002aaRR0R",
+ lhax_3 = "7c0002aeRR0R",
+ mftb_1 = "7c0c42e6R",
+ mftbu_1 = "7c0d42e6R",
+ lwaux_3 = "7c0002eaRR0R",
+ lhaux_3 = "7c0002eeRR0R",
+ sthx_3 = "7c00032eRR0R",
+ orc_3 = "7c000338RR~R.",
+ ecowx_3 = "7c00036cRR0R",
+ sthux_3 = "7c00036eRR0R",
+ or_3 = "7c000378RR~R.",
+ mr_2 = "7c000378RR~%.",
+ divdu_3 = "7c000392RRR.",
+ divwu_3 = "7c000396RRR.",
+ mtspefscr_1 = "7c0083a6R",
+ mtxer_1 = "7c0103a6R",
+ mtlr_1 = "7c0803a6R",
+ mtctr_1 = "7c0903a6R",
+ dcbi_2 = "7c0003ac-RR",
+ nand_3 = "7c0003b8RR~R.",
+ divd_3 = "7c0003d2RRR.",
+ divw_3 = "7c0003d6RRR.",
+ cmpb_3 = "7c0003f8RR~R.",
+ mcrxr_1 = "7c000400X",
+ subfco_3 = "7c000410RRR.",
+ subco_3 = "7c000410RRR~.",
+ addco_3 = "7c000414RRR.",
+ ldbrx_3 = "7c000428RR0R",
+ lswx_3 = "7c00042aRR0R",
+ lwbrx_3 = "7c00042cRR0R",
+ lfsx_3 = "7c00042eFR0R",
+ srw_3 = "7c000430RR~R.",
+ srd_3 = "7c000436RR~R.",
+ subfo_3 = "7c000450RRR.",
+ subo_3 = "7c000450RRR~.",
+ lfsux_3 = "7c00046eFR0R",
+ lswi_3 = "7c0004aaRR0A",
+ sync_0 = "7c0004ac",
+ lwsync_0 = "7c2004ac",
+ ptesync_0 = "7c4004ac",
+ lfdx_3 = "7c0004aeFR0R",
+ nego_2 = "7c0004d0RR.",
+ lfdux_3 = "7c0004eeFR0R",
+ subfeo_3 = "7c000510RRR.",
+ subeo_3 = "7c000510RRR~.",
+ addeo_3 = "7c000514RRR.",
+ stdbrx_3 = "7c000528RR0R",
+ stswx_3 = "7c00052aRR0R",
+ stwbrx_3 = "7c00052cRR0R",
+ stfsx_3 = "7c00052eFR0R",
+ stfsux_3 = "7c00056eFR0R",
+ subfzeo_2 = "7c000590RR.",
+ addzeo_2 = "7c000594RR.",
+ stswi_3 = "7c0005aaRR0A",
+ stfdx_3 = "7c0005aeFR0R",
+ subfmeo_2 = "7c0005d0RR.",
+ mulldo_3 = "7c0005d2RRR.",
+ addmeo_2 = "7c0005d4RR.",
+ mullwo_3 = "7c0005d6RRR.",
+ dcba_2 = "7c0005ec-RR",
+ stfdux_3 = "7c0005eeFR0R",
+ addo_3 = "7c000614RRR.",
+ lhbrx_3 = "7c00062cRR0R",
+ sraw_3 = "7c000630RR~R.",
+ srad_3 = "7c000634RR~R.",
+ srawi_3 = "7c000670RR~A.",
+ sradi_3 = "7c000674RR~H.",
+ eieio_0 = "7c0006ac",
+ lfiwax_3 = "7c0006aeFR0R",
+ sthbrx_3 = "7c00072cRR0R",
+ extsh_2 = "7c000734RR~.",
+ extsb_2 = "7c000774RR~.",
+ divduo_3 = "7c000792RRR.",
+ divwou_3 = "7c000796RRR.",
+ icbi_2 = "7c0007ac-RR",
+ stfiwx_3 = "7c0007aeFR0R",
+ extsw_2 = "7c0007b4RR~.",
+ divdo_3 = "7c0007d2RRR.",
+ divwo_3 = "7c0007d6RRR.",
+ dcbz_2 = "7c0007ec-RR",
+
+ -- Primary opcode 30:
+ rldicl_4 = "78000000RR~HM.",
+ rldicr_4 = "78000004RR~HM.",
+ rldic_4 = "78000008RR~HM.",
+ rldimi_4 = "7800000cRR~HM.",
+ rldcl_4 = "78000010RR~RM.",
+ rldcr_4 = "78000012RR~RM.",
+
+ -- Primary opcode 59:
+ fdivs_3 = "ec000024FFF.",
+ fsubs_3 = "ec000028FFF.",
+ fadds_3 = "ec00002aFFF.",
+ fsqrts_2 = "ec00002cF-F.",
+ fres_2 = "ec000030F-F.",
+ fmuls_3 = "ec000032FF-F.",
+ frsqrtes_2 = "ec000034F-F.",
+ fmsubs_4 = "ec000038FFFF~.",
+ fmadds_4 = "ec00003aFFFF~.",
+ fnmsubs_4 = "ec00003cFFFF~.",
+ fnmadds_4 = "ec00003eFFFF~.",
+
+ -- Primary opcode 63:
+ fdiv_3 = "fc000024FFF.",
+ fsub_3 = "fc000028FFF.",
+ fadd_3 = "fc00002aFFF.",
+ fsqrt_2 = "fc00002cF-F.",
+ fsel_4 = "fc00002eFFFF~.",
+ fre_2 = "fc000030F-F.",
+ fmul_3 = "fc000032FF-F.",
+ frsqrte_2 = "fc000034F-F.",
+ fmsub_4 = "fc000038FFFF~.",
+ fmadd_4 = "fc00003aFFFF~.",
+ fnmsub_4 = "fc00003cFFFF~.",
+ fnmadd_4 = "fc00003eFFFF~.",
+ fcmpu_3 = "fc000000XFF",
+ fcpsgn_3 = "fc000010FFF.",
+ fcmpo_3 = "fc000040XFF",
+ mtfsb1_1 = "fc00004cA",
+ fneg_2 = "fc000050F-F.",
+ mcrfs_2 = "fc000080XX",
+ mtfsb0_1 = "fc00008cA",
+ fmr_2 = "fc000090F-F.",
+ frsp_2 = "fc000018F-F.",
+ fctiw_2 = "fc00001cF-F.",
+ fctiwz_2 = "fc00001eF-F.",
+ mtfsfi_2 = "fc00010cAA", -- NYI: upshift.
+ fnabs_2 = "fc000110F-F.",
+ fabs_2 = "fc000210F-F.",
+ frin_2 = "fc000310F-F.",
+ friz_2 = "fc000350F-F.",
+ frip_2 = "fc000390F-F.",
+ frim_2 = "fc0003d0F-F.",
+ mffs_1 = "fc00048eF.",
+ -- NYI: mtfsf, mtfsb0, mtfsb1.
+ fctid_2 = "fc00065cF-F.",
+ fctidz_2 = "fc00065eF-F.",
+ fcfid_2 = "fc00069cF-F.",
+
+ -- Primary opcode 4, SPE APU extension:
+ evaddw_3 = "10000200RRR",
+ evaddiw_3 = "10000202RAR~",
+ evsubw_3 = "10000204RRR~",
+ evsubiw_3 = "10000206RAR~",
+ evabs_2 = "10000208RR",
+ evneg_2 = "10000209RR",
+ evextsb_2 = "1000020aRR",
+ evextsh_2 = "1000020bRR",
+ evrndw_2 = "1000020cRR",
+ evcntlzw_2 = "1000020dRR",
+ evcntlsw_2 = "1000020eRR",
+ brinc_3 = "1000020fRRR",
+ evand_3 = "10000211RRR",
+ evandc_3 = "10000212RRR",
+ evxor_3 = "10000216RRR",
+ evor_3 = "10000217RRR",
+ evmr_2 = "10000217RR=",
+ evnor_3 = "10000218RRR",
+ evnot_2 = "10000218RR=",
+ eveqv_3 = "10000219RRR",
+ evorc_3 = "1000021bRRR",
+ evnand_3 = "1000021eRRR",
+ evsrwu_3 = "10000220RRR",
+ evsrws_3 = "10000221RRR",
+ evsrwiu_3 = "10000222RRA",
+ evsrwis_3 = "10000223RRA",
+ evslw_3 = "10000224RRR",
+ evslwi_3 = "10000226RRA",
+ evrlw_3 = "10000228RRR",
+ evsplati_2 = "10000229RS",
+ evrlwi_3 = "1000022aRRA",
+ evsplatfi_2 = "1000022bRS",
+ evmergehi_3 = "1000022cRRR",
+ evmergelo_3 = "1000022dRRR",
+ evcmpgtu_3 = "10000230XRR",
+ evcmpgtu_2 = "10000230-RR",
+ evcmpgts_3 = "10000231XRR",
+ evcmpgts_2 = "10000231-RR",
+ evcmpltu_3 = "10000232XRR",
+ evcmpltu_2 = "10000232-RR",
+ evcmplts_3 = "10000233XRR",
+ evcmplts_2 = "10000233-RR",
+ evcmpeq_3 = "10000234XRR",
+ evcmpeq_2 = "10000234-RR",
+ evsel_4 = "10000278RRRW",
+ evsel_3 = "10000278RRR",
+ evfsadd_3 = "10000280RRR",
+ evfssub_3 = "10000281RRR",
+ evfsabs_2 = "10000284RR",
+ evfsnabs_2 = "10000285RR",
+ evfsneg_2 = "10000286RR",
+ evfsmul_3 = "10000288RRR",
+ evfsdiv_3 = "10000289RRR",
+ evfscmpgt_3 = "1000028cXRR",
+ evfscmpgt_2 = "1000028c-RR",
+ evfscmplt_3 = "1000028dXRR",
+ evfscmplt_2 = "1000028d-RR",
+ evfscmpeq_3 = "1000028eXRR",
+ evfscmpeq_2 = "1000028e-RR",
+ evfscfui_2 = "10000290R-R",
+ evfscfsi_2 = "10000291R-R",
+ evfscfuf_2 = "10000292R-R",
+ evfscfsf_2 = "10000293R-R",
+ evfsctui_2 = "10000294R-R",
+ evfsctsi_2 = "10000295R-R",
+ evfsctuf_2 = "10000296R-R",
+ evfsctsf_2 = "10000297R-R",
+ evfsctuiz_2 = "10000298R-R",
+ evfsctsiz_2 = "1000029aR-R",
+ evfststgt_3 = "1000029cXRR",
+ evfststgt_2 = "1000029c-RR",
+ evfststlt_3 = "1000029dXRR",
+ evfststlt_2 = "1000029d-RR",
+ evfststeq_3 = "1000029eXRR",
+ evfststeq_2 = "1000029e-RR",
+ efsadd_3 = "100002c0RRR",
+ efssub_3 = "100002c1RRR",
+ efsabs_2 = "100002c4RR",
+ efsnabs_2 = "100002c5RR",
+ efsneg_2 = "100002c6RR",
+ efsmul_3 = "100002c8RRR",
+ efsdiv_3 = "100002c9RRR",
+ efscmpgt_3 = "100002ccXRR",
+ efscmpgt_2 = "100002cc-RR",
+ efscmplt_3 = "100002cdXRR",
+ efscmplt_2 = "100002cd-RR",
+ efscmpeq_3 = "100002ceXRR",
+ efscmpeq_2 = "100002ce-RR",
+ efscfd_2 = "100002cfR-R",
+ efscfui_2 = "100002d0R-R",
+ efscfsi_2 = "100002d1R-R",
+ efscfuf_2 = "100002d2R-R",
+ efscfsf_2 = "100002d3R-R",
+ efsctui_2 = "100002d4R-R",
+ efsctsi_2 = "100002d5R-R",
+ efsctuf_2 = "100002d6R-R",
+ efsctsf_2 = "100002d7R-R",
+ efsctuiz_2 = "100002d8R-R",
+ efsctsiz_2 = "100002daR-R",
+ efststgt_3 = "100002dcXRR",
+ efststgt_2 = "100002dc-RR",
+ efststlt_3 = "100002ddXRR",
+ efststlt_2 = "100002dd-RR",
+ efststeq_3 = "100002deXRR",
+ efststeq_2 = "100002de-RR",
+ efdadd_3 = "100002e0RRR",
+ efdsub_3 = "100002e1RRR",
+ efdcfuid_2 = "100002e2R-R",
+ efdcfsid_2 = "100002e3R-R",
+ efdabs_2 = "100002e4RR",
+ efdnabs_2 = "100002e5RR",
+ efdneg_2 = "100002e6RR",
+ efdmul_3 = "100002e8RRR",
+ efddiv_3 = "100002e9RRR",
+ efdctuidz_2 = "100002eaR-R",
+ efdctsidz_2 = "100002ebR-R",
+ efdcmpgt_3 = "100002ecXRR",
+ efdcmpgt_2 = "100002ec-RR",
+ efdcmplt_3 = "100002edXRR",
+ efdcmplt_2 = "100002ed-RR",
+ efdcmpeq_3 = "100002eeXRR",
+ efdcmpeq_2 = "100002ee-RR",
+ efdcfs_2 = "100002efR-R",
+ efdcfui_2 = "100002f0R-R",
+ efdcfsi_2 = "100002f1R-R",
+ efdcfuf_2 = "100002f2R-R",
+ efdcfsf_2 = "100002f3R-R",
+ efdctui_2 = "100002f4R-R",
+ efdctsi_2 = "100002f5R-R",
+ efdctuf_2 = "100002f6R-R",
+ efdctsf_2 = "100002f7R-R",
+ efdctuiz_2 = "100002f8R-R",
+ efdctsiz_2 = "100002faR-R",
+ efdtstgt_3 = "100002fcXRR",
+ efdtstgt_2 = "100002fc-RR",
+ efdtstlt_3 = "100002fdXRR",
+ efdtstlt_2 = "100002fd-RR",
+ efdtsteq_3 = "100002feXRR",
+ efdtsteq_2 = "100002fe-RR",
+ evlddx_3 = "10000300RR0R",
+ evldd_2 = "10000301R8",
+ evldwx_3 = "10000302RR0R",
+ evldw_2 = "10000303R8",
+ evldhx_3 = "10000304RR0R",
+ evldh_2 = "10000305R8",
+ evlwhex_3 = "10000310RR0R",
+ evlwhe_2 = "10000311R4",
+ evlwhoux_3 = "10000314RR0R",
+ evlwhou_2 = "10000315R4",
+ evlwhosx_3 = "10000316RR0R",
+ evlwhos_2 = "10000317R4",
+ evstddx_3 = "10000320RR0R",
+ evstdd_2 = "10000321R8",
+ evstdwx_3 = "10000322RR0R",
+ evstdw_2 = "10000323R8",
+ evstdhx_3 = "10000324RR0R",
+ evstdh_2 = "10000325R8",
+ evstwhex_3 = "10000330RR0R",
+ evstwhe_2 = "10000331R4",
+ evstwhox_3 = "10000334RR0R",
+ evstwho_2 = "10000335R4",
+ evstwwex_3 = "10000338RR0R",
+ evstwwe_2 = "10000339R4",
+ evstwwox_3 = "1000033cRR0R",
+ evstwwo_2 = "1000033dR4",
+ evmhessf_3 = "10000403RRR",
+ evmhossf_3 = "10000407RRR",
+ evmheumi_3 = "10000408RRR",
+ evmhesmi_3 = "10000409RRR",
+ evmhesmf_3 = "1000040bRRR",
+ evmhoumi_3 = "1000040cRRR",
+ evmhosmi_3 = "1000040dRRR",
+ evmhosmf_3 = "1000040fRRR",
+ evmhessfa_3 = "10000423RRR",
+ evmhossfa_3 = "10000427RRR",
+ evmheumia_3 = "10000428RRR",
+ evmhesmia_3 = "10000429RRR",
+ evmhesmfa_3 = "1000042bRRR",
+ evmhoumia_3 = "1000042cRRR",
+ evmhosmia_3 = "1000042dRRR",
+ evmhosmfa_3 = "1000042fRRR",
+ evmwhssf_3 = "10000447RRR",
+ evmwlumi_3 = "10000448RRR",
+ evmwhumi_3 = "1000044cRRR",
+ evmwhsmi_3 = "1000044dRRR",
+ evmwhsmf_3 = "1000044fRRR",
+ evmwssf_3 = "10000453RRR",
+ evmwumi_3 = "10000458RRR",
+ evmwsmi_3 = "10000459RRR",
+ evmwsmf_3 = "1000045bRRR",
+ evmwhssfa_3 = "10000467RRR",
+ evmwlumia_3 = "10000468RRR",
+ evmwhumia_3 = "1000046cRRR",
+ evmwhsmia_3 = "1000046dRRR",
+ evmwhsmfa_3 = "1000046fRRR",
+ evmwssfa_3 = "10000473RRR",
+ evmwumia_3 = "10000478RRR",
+ evmwsmia_3 = "10000479RRR",
+ evmwsmfa_3 = "1000047bRRR",
+ evmra_2 = "100004c4RR",
+ evdivws_3 = "100004c6RRR",
+ evdivwu_3 = "100004c7RRR",
+ evmwssfaa_3 = "10000553RRR",
+ evmwumiaa_3 = "10000558RRR",
+ evmwsmiaa_3 = "10000559RRR",
+ evmwsmfaa_3 = "1000055bRRR",
+ evmwssfan_3 = "100005d3RRR",
+ evmwumian_3 = "100005d8RRR",
+ evmwsmian_3 = "100005d9RRR",
+ evmwsmfan_3 = "100005dbRRR",
+ evmergehilo_3 = "1000022eRRR",
+ evmergelohi_3 = "1000022fRRR",
+ evlhhesplatx_3 = "10000308RR0R",
+ evlhhesplat_2 = "10000309R2",
+ evlhhousplatx_3 = "1000030cRR0R",
+ evlhhousplat_2 = "1000030dR2",
+ evlhhossplatx_3 = "1000030eRR0R",
+ evlhhossplat_2 = "1000030fR2",
+ evlwwsplatx_3 = "10000318RR0R",
+ evlwwsplat_2 = "10000319R4",
+ evlwhsplatx_3 = "1000031cRR0R",
+ evlwhsplat_2 = "1000031dR4",
+ evaddusiaaw_2 = "100004c0RR",
+ evaddssiaaw_2 = "100004c1RR",
+ evsubfusiaaw_2 = "100004c2RR",
+ evsubfssiaaw_2 = "100004c3RR",
+ evaddumiaaw_2 = "100004c8RR",
+ evaddsmiaaw_2 = "100004c9RR",
+ evsubfumiaaw_2 = "100004caRR",
+ evsubfsmiaaw_2 = "100004cbRR",
+ evmheusiaaw_3 = "10000500RRR",
+ evmhessiaaw_3 = "10000501RRR",
+ evmhessfaaw_3 = "10000503RRR",
+ evmhousiaaw_3 = "10000504RRR",
+ evmhossiaaw_3 = "10000505RRR",
+ evmhossfaaw_3 = "10000507RRR",
+ evmheumiaaw_3 = "10000508RRR",
+ evmhesmiaaw_3 = "10000509RRR",
+ evmhesmfaaw_3 = "1000050bRRR",
+ evmhoumiaaw_3 = "1000050cRRR",
+ evmhosmiaaw_3 = "1000050dRRR",
+ evmhosmfaaw_3 = "1000050fRRR",
+ evmhegumiaa_3 = "10000528RRR",
+ evmhegsmiaa_3 = "10000529RRR",
+ evmhegsmfaa_3 = "1000052bRRR",
+ evmhogumiaa_3 = "1000052cRRR",
+ evmhogsmiaa_3 = "1000052dRRR",
+ evmhogsmfaa_3 = "1000052fRRR",
+ evmwlusiaaw_3 = "10000540RRR",
+ evmwlssiaaw_3 = "10000541RRR",
+ evmwlumiaaw_3 = "10000548RRR",
+ evmwlsmiaaw_3 = "10000549RRR",
+ evmheusianw_3 = "10000580RRR",
+ evmhessianw_3 = "10000581RRR",
+ evmhessfanw_3 = "10000583RRR",
+ evmhousianw_3 = "10000584RRR",
+ evmhossianw_3 = "10000585RRR",
+ evmhossfanw_3 = "10000587RRR",
+ evmheumianw_3 = "10000588RRR",
+ evmhesmianw_3 = "10000589RRR",
+ evmhesmfanw_3 = "1000058bRRR",
+ evmhoumianw_3 = "1000058cRRR",
+ evmhosmianw_3 = "1000058dRRR",
+ evmhosmfanw_3 = "1000058fRRR",
+ evmhegumian_3 = "100005a8RRR",
+ evmhegsmian_3 = "100005a9RRR",
+ evmhegsmfan_3 = "100005abRRR",
+ evmhogumian_3 = "100005acRRR",
+ evmhogsmian_3 = "100005adRRR",
+ evmhogsmfan_3 = "100005afRRR",
+ evmwlusianw_3 = "100005c0RRR",
+ evmwlssianw_3 = "100005c1RRR",
+ evmwlumianw_3 = "100005c8RRR",
+ evmwlsmianw_3 = "100005c9RRR",
+
+ -- NYI: Book E instructions.
+}
+
+-- Add mnemonics for "." variants.
+do
+ local t = {}
+ for k,v in pairs(map_op) do
+ if sub(v, -1) == "." then
+ local v2 = sub(v, 1, 7)..char(byte(v, 8)+1)..sub(v, 9, -2)
+ t[sub(k, 1, -3).."."..sub(k, -2)] = v2
+ end
+ end
+ for k,v in pairs(t) do
+ map_op[k] = v
+ end
+end
+
+-- Add more branch mnemonics.
+for cond,c in pairs(map_cond) do
+ local b1 = "b"..cond
+ local c1 = shl(band(c, 3), 16) + (c < 4 and 0x01000000 or 0)
+ -- bX[l]
+ map_op[b1.."_1"] = tohex(0x40800000 + c1).."K"
+ map_op[b1.."y_1"] = tohex(0x40a00000 + c1).."K"
+ map_op[b1.."l_1"] = tohex(0x40800001 + c1).."K"
+ map_op[b1.."_2"] = tohex(0x40800000 + c1).."-XK"
+ map_op[b1.."y_2"] = tohex(0x40a00000 + c1).."-XK"
+ map_op[b1.."l_2"] = tohex(0x40800001 + c1).."-XK"
+ -- bXlr[l]
+ map_op[b1.."lr_0"] = tohex(0x4c800020 + c1)
+ map_op[b1.."lrl_0"] = tohex(0x4c800021 + c1)
+ map_op[b1.."ctr_0"] = tohex(0x4c800420 + c1)
+ map_op[b1.."ctrl_0"] = tohex(0x4c800421 + c1)
+ -- bXctr[l]
+ map_op[b1.."lr_1"] = tohex(0x4c800020 + c1).."-X"
+ map_op[b1.."lrl_1"] = tohex(0x4c800021 + c1).."-X"
+ map_op[b1.."ctr_1"] = tohex(0x4c800420 + c1).."-X"
+ map_op[b1.."ctrl_1"] = tohex(0x4c800421 + c1).."-X"
+end
+
+------------------------------------------------------------------------------
+
+local function parse_gpr(expr)
+ local tname, ovreg = match(expr, "^([%w_]+):(r[1-3]?[0-9])$")
+ local tp = map_type[tname or expr]
+ if tp then
+ local reg = ovreg or tp.reg
+ if not reg then
+ werror("type `"..(tname or expr).."' needs a register override")
+ end
+ expr = reg
+ end
+ local r = match(expr, "^r([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r, tp end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_fpr(expr)
+ local r = match(expr, "^f([1-3]?[0-9])$")
+ if r then
+ r = tonumber(r)
+ if r <= 31 then return r end
+ end
+ werror("bad register name `"..expr.."'")
+end
+
+local function parse_cr(expr)
+ local r = match(expr, "^cr([0-7])$")
+ if r then return tonumber(r) end
+ werror("bad condition register name `"..expr.."'")
+end
+
+local function parse_cond(expr)
+ local r, cond = match(expr, "^4%*cr([0-7])%+(%w%w)$")
+ if r then
+ r = tonumber(r)
+ local c = map_cond[cond]
+ if c and c < 4 then return r*4+c end
+ end
+ werror("bad condition bit name `"..expr.."'")
+end
+
+local function parse_imm(imm, bits, shift, scale, signed)
+ local n = tonumber(imm)
+ if n then
+ local m = sar(n, scale)
+ if shl(m, scale) == n then
+ if signed then
+ local s = sar(m, bits-1)
+ if s == 0 then return shl(m, shift)
+ elseif s == -1 then return shl(m + shl(1, bits), shift) end
+ else
+ if sar(m, bits) == 0 then return shl(m, shift) end
+ end
+ end
+ werror("out of range immediate `"..imm.."'")
+ elseif match(imm, "^r([1-3]?[0-9])$") or
+ match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then
+ werror("expected immediate operand, got register")
+ else
+ waction("IMM", (signed and 32768 or 0)+scale*1024+bits*32+shift, imm)
+ return 0
+ end
+end
+
+local function parse_shiftmask(imm, isshift)
+ local n = tonumber(imm)
+ if n then
+ if shr(n, 6) == 0 then
+ local lsb = band(imm, 31)
+ local msb = imm - lsb
+ return isshift and (shl(lsb, 11)+shr(msb, 4)) or (shl(lsb, 6)+msb)
+ end
+ werror("out of range immediate `"..imm.."'")
+ elseif match(imm, "^r([1-3]?[0-9])$") or
+ match(imm, "^([%w_]+):(r[1-3]?[0-9])$") then
+ werror("expected immediate operand, got register")
+ else
+ werror("NYI: parameterized 64 bit shift/mask")
+ end
+end
+
+local function parse_disp(disp)
+ local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
+ if imm then
+ local r = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ return shl(r, 16) + parse_imm(imm, 16, 0, 0, true)
+ end
+ local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local r, tp = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ if tp then
+ waction("IMM", 32768+16*32, format(tp.ctypefmt, tailr))
+ return shl(r, 16)
+ end
+ end
+ werror("bad displacement `"..disp.."'")
+end
+
+local function parse_u5disp(disp, scale)
+ local imm, reg = match(disp, "^(.*)%(([%w_:]+)%)$")
+ if imm then
+ local r = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ return shl(r, 16) + parse_imm(imm, 5, 11, scale, false)
+ end
+ local reg, tailr = match(disp, "^([%w_:]+)%s*(.*)$")
+ if reg and tailr ~= "" then
+ local r, tp = parse_gpr(reg)
+ if r == 0 then werror("cannot use r0 in displacement") end
+ if tp then
+ waction("IMM", scale*1024+5*32+11, format(tp.ctypefmt, tailr))
+ return shl(r, 16)
+ end
+ end
+ werror("bad displacement `"..disp.."'")
+end
+
+local function parse_label(label, def)
+ local prefix = sub(label, 1, 2)
+ -- =>label (pc label reference)
+ if prefix == "=>" then
+ return "PC", 0, sub(label, 3)
+ end
+ -- ->name (global label reference)
+ if prefix == "->" then
+ return "LG", map_global[sub(label, 3)]
+ end
+ if def then
+ -- [1-9] (local label definition)
+ if match(label, "^[1-9]$") then
+ return "LG", 10+tonumber(label)
+ end
+ else
+ -- [<>][1-9] (local label reference)
+ local dir, lnum = match(label, "^([<>])([1-9])$")
+ if dir then -- Fwd: 1-9, Bkwd: 11-19.
+ return "LG", lnum + (dir == ">" and 0 or 10)
+ end
+ -- extern label (extern label reference)
+ local extname = match(label, "^extern%s+(%S+)$")
+ if extname then
+ return "EXT", map_extern[extname]
+ end
+ end
+ werror("bad label `"..label.."'")
+end
+
+------------------------------------------------------------------------------
+
+-- Handle opcodes defined with template strings.
+map_op[".template__"] = function(params, template, nparams)
+ if not params then return sub(template, 9) end
+ local op = tonumber(sub(template, 1, 8), 16)
+ local n, rs = 1, 26
+
+ -- Limit number of section buffer positions used by a single dasm_put().
+ -- A single opcode needs a maximum of 3 positions (rlwinm).
+ if secpos+3 > maxsecpos then wflush() end
+ local pos = wpos()
+
+ -- Process each character.
+ for p in gmatch(sub(template, 9), ".") do
+ if p == "R" then
+ rs = rs - 5; op = op + shl(parse_gpr(params[n]), rs); n = n + 1
+ elseif p == "F" then
+ rs = rs - 5; op = op + shl(parse_fpr(params[n]), rs); n = n + 1
+ elseif p == "A" then
+ rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, false); n = n + 1
+ elseif p == "S" then
+ rs = rs - 5; op = op + parse_imm(params[n], 5, rs, 0, true); n = n + 1
+ elseif p == "I" then
+ op = op + parse_imm(params[n], 16, 0, 0, true); n = n + 1
+ elseif p == "U" then
+ op = op + parse_imm(params[n], 16, 0, 0, false); n = n + 1
+ elseif p == "D" then
+ op = op + parse_disp(params[n]); n = n + 1
+ elseif p == "2" then
+ op = op + parse_u5disp(params[n], 1); n = n + 1
+ elseif p == "4" then
+ op = op + parse_u5disp(params[n], 2); n = n + 1
+ elseif p == "8" then
+ op = op + parse_u5disp(params[n], 3); n = n + 1
+ elseif p == "C" then
+ rs = rs - 5; op = op + shl(parse_cond(params[n]), rs); n = n + 1
+ elseif p == "X" then
+ rs = rs - 5; op = op + shl(parse_cr(params[n]), rs+2); n = n + 1
+ elseif p == "W" then
+ op = op + parse_cr(params[n]); n = n + 1
+ elseif p == "G" then
+ op = op + parse_imm(params[n], 8, 12, 0, false); n = n + 1
+ elseif p == "H" then
+ op = op + parse_shiftmask(params[n], true); n = n + 1
+ elseif p == "M" then
+ op = op + parse_shiftmask(params[n], false); n = n + 1
+ elseif p == "J" or p == "K" then
+ local mode, n, s = parse_label(params[n], false)
+ if p == "K" then n = n + 2048 end
+ waction("REL_"..mode, n, s, 1)
+ n = n + 1
+ elseif p == "0" then
+ if band(shr(op, rs), 31) == 0 then werror("cannot use r0") end
+ elseif p == "=" or p == "%" then
+ local t = band(shr(op, p == "%" and rs+5 or rs), 31)
+ rs = rs - 5
+ op = op + shl(t, rs)
+ elseif p == "~" then
+ local mm = shl(31, rs)
+ local lo = band(op, mm)
+ local hi = band(op, shl(mm, 5))
+ op = op - lo - hi + shl(lo, 5) + shr(hi, 5)
+ elseif p == "-" then
+ rs = rs - 5
+ elseif p == "." then
+ -- Ignored.
+ else
+ assert(false)
+ end
+ end
+ wputpos(pos, op)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode to mark the position where the action list is to be emitted.
+map_op[".actionlist_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeactions(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the global enum is to be emitted.
+map_op[".globals_1"] = function(params)
+ if not params then return "prefix" end
+ local prefix = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobals(out, prefix) end)
+end
+
+-- Pseudo-opcode to mark the position where the global names are to be emitted.
+map_op[".globalnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeglobalnames(out, name) end)
+end
+
+-- Pseudo-opcode to mark the position where the extern names are to be emitted.
+map_op[".externnames_1"] = function(params)
+ if not params then return "cvar" end
+ local name = params[1] -- No syntax check. You get to keep the pieces.
+ wline(function(out) writeexternnames(out, name) end)
+end
+
+------------------------------------------------------------------------------
+
+-- Label pseudo-opcode (converted from trailing colon form).
+map_op[".label_1"] = function(params)
+ if not params then return "[1-9] | ->global | =>pcexpr" end
+ if secpos+1 > maxsecpos then wflush() end
+ local mode, n, s = parse_label(params[1], true)
+ if mode == "EXT" then werror("bad label definition") end
+ waction("LABEL_"..mode, n, s, 1)
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcodes for data storage.
+map_op[".long_*"] = function(params)
+ if not params then return "imm..." end
+ for _,p in ipairs(params) do
+ local n = tonumber(p)
+ if not n then werror("bad immediate `"..p.."'") end
+ if n < 0 then n = n + 2^32 end
+ wputw(n)
+ if secpos+2 > maxsecpos then wflush() end
+ end
+end
+
+-- Alignment pseudo-opcode.
+map_op[".align_1"] = function(params)
+ if not params then return "numpow2" end
+ if secpos+1 > maxsecpos then wflush() end
+ local align = tonumber(params[1])
+ if align then
+ local x = align
+ -- Must be a power of 2 in the range (2 ... 256).
+ for i=1,8 do
+ x = x / 2
+ if x == 1 then
+ waction("ALIGN", align-1, nil, 1) -- Action byte is 2**n-1.
+ return
+ end
+ end
+ end
+ werror("bad alignment")
+end
+
+------------------------------------------------------------------------------
+
+-- Pseudo-opcode for (primitive) type definitions (map to C types).
+map_op[".type_3"] = function(params, nparams)
+ if not params then
+ return nparams == 2 and "name, ctype" or "name, ctype, reg"
+ end
+ local name, ctype, reg = params[1], params[2], params[3]
+ if not match(name, "^[%a_][%w_]*$") then
+ werror("bad type name `"..name.."'")
+ end
+ local tp = map_type[name]
+ if tp then
+ werror("duplicate type `"..name.."'")
+ end
+ -- Add #type to defines. A bit unclean to put it in map_archdef.
+ map_archdef["#"..name] = "sizeof("..ctype..")"
+ -- Add new type and emit shortcut define.
+ local num = ctypenum + 1
+ map_type[name] = {
+ ctype = ctype,
+ ctypefmt = format("Dt%X(%%s)", num),
+ reg = reg,
+ }
+ wline(format("#define Dt%X(_V) (int)(ptrdiff_t)&(((%s *)0)_V)", num, ctype))
+ ctypenum = num
+end
+map_op[".type_2"] = map_op[".type_3"]
+
+-- Dump type definitions.
+local function dumptypes(out, lvl)
+ local t = {}
+ for name in pairs(map_type) do t[#t+1] = name end
+ sort(t)
+ out:write("Type definitions:\n")
+ for _,name in ipairs(t) do
+ local tp = map_type[name]
+ local reg = tp.reg or ""
+ out:write(format(" %-20s %-20s %s\n", name, tp.ctype, reg))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Set the current section.
+function _M.section(num)
+ waction("SECTION", num)
+ wflush(true) -- SECTION is a terminal action.
+end
+
+------------------------------------------------------------------------------
+
+-- Dump architecture description.
+function _M.dumparch(out)
+ out:write(format("DynASM %s version %s, released %s\n\n",
+ _info.arch, _info.version, _info.release))
+ dumpactions(out)
+end
+
+-- Dump all user defined elements.
+function _M.dumpdef(out, lvl)
+ dumptypes(out, lvl)
+ dumpglobals(out, lvl)
+ dumpexterns(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Pass callbacks from/to the DynASM core.
+function _M.passcb(wl, we, wf, ww)
+ wline, werror, wfatal, wwarn = wl, we, wf, ww
+ return wflush
+end
+
+-- Setup the arch-specific module.
+function _M.setup(arch, opt)
+ g_arch, g_opt = arch, opt
+end
+
+-- Merge the core maps and the arch-specific maps.
+function _M.mergemaps(map_coreop, map_def)
+ setmetatable(map_op, { __index = map_coreop })
+ setmetatable(map_def, { __index = map_archdef })
+ return map_op, map_def
+end
+
+return _M
+
+------------------------------------------------------------------------------
+
diff --git a/3rdparty/lua/dynasm/dasm_proto.h b/3rdparty/lua/dynasm/dasm_proto.h
index 52ae6ba..960ddb7 100644
--- a/3rdparty/lua/dynasm/dasm_proto.h
+++ b/3rdparty/lua/dynasm/dasm_proto.h
@@ -1,83 +1,83 @@
-/*
-** DynASM encoding engine prototypes.
-** Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-** Released under the MIT license. See dynasm.lua for full copyright notice.
-*/
-
-#ifndef _DASM_PROTO_H
-#define _DASM_PROTO_H
-
-#include <stddef.h>
-#include <stdarg.h>
-
-#define DASM_IDENT "DynASM 1.3.0"
-#define DASM_VERSION 10300 /* 1.3.0 */
-
-#ifndef Dst_DECL
-#define Dst_DECL dasm_State **Dst
-#endif
-
-#ifndef Dst_REF
-#define Dst_REF (*Dst)
-#endif
-
-#ifndef DASM_FDEF
-#define DASM_FDEF extern
-#endif
-
-#ifndef DASM_M_GROW
-#define DASM_M_GROW(ctx, t, p, sz, need) \
- do { \
- size_t _sz = (sz), _need = (need); \
- if (_sz < _need) { \
- if (_sz < 16) _sz = 16; \
- while (_sz < _need) _sz += _sz; \
- (p) = (t *)realloc((p), _sz); \
- if ((p) == NULL) exit(1); \
- (sz) = _sz; \
- } \
- } while(0)
-#endif
-
-#ifndef DASM_M_FREE
-#define DASM_M_FREE(ctx, p, sz) free(p)
-#endif
-
-/* Internal DynASM encoder state. */
-typedef struct dasm_State dasm_State;
-
-
-/* Initialize and free DynASM state. */
-DASM_FDEF void dasm_init(Dst_DECL, int maxsection);
-DASM_FDEF void dasm_free(Dst_DECL);
-
-/* Setup global array. Must be called before dasm_setup(). */
-DASM_FDEF void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl);
-
-/* Grow PC label array. Can be called after dasm_setup(), too. */
-DASM_FDEF void dasm_growpc(Dst_DECL, unsigned int maxpc);
-
-/* Setup encoder. */
-DASM_FDEF void dasm_setup(Dst_DECL, const void *actionlist);
-
-/* Feed encoder with actions. Calls are generated by pre-processor. */
-DASM_FDEF void dasm_put(Dst_DECL, int start, ...);
-
-/* Link sections and return the resulting size. */
-DASM_FDEF int dasm_link(Dst_DECL, size_t *szp);
-
-/* Encode sections into buffer. */
-DASM_FDEF int dasm_encode(Dst_DECL, void *buffer);
-
-/* Get PC label offset. */
-DASM_FDEF int dasm_getpclabel(Dst_DECL, unsigned int pc);
-
-#ifdef DASM_CHECKS
-/* Optional sanity checker to call between isolated encoding steps. */
-DASM_FDEF int dasm_checkstep(Dst_DECL, int secmatch);
-#else
-#define dasm_checkstep(a, b) 0
-#endif
-
-
-#endif /* _DASM_PROTO_H */
+/*
+** DynASM encoding engine prototypes.
+** Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#ifndef _DASM_PROTO_H
+#define _DASM_PROTO_H
+
+#include <stddef.h>
+#include <stdarg.h>
+
+#define DASM_IDENT "DynASM 1.3.0"
+#define DASM_VERSION 10300 /* 1.3.0 */
+
+#ifndef Dst_DECL
+#define Dst_DECL dasm_State **Dst
+#endif
+
+#ifndef Dst_REF
+#define Dst_REF (*Dst)
+#endif
+
+#ifndef DASM_FDEF
+#define DASM_FDEF extern
+#endif
+
+#ifndef DASM_M_GROW
+#define DASM_M_GROW(ctx, t, p, sz, need) \
+ do { \
+ size_t _sz = (sz), _need = (need); \
+ if (_sz < _need) { \
+ if (_sz < 16) _sz = 16; \
+ while (_sz < _need) _sz += _sz; \
+ (p) = (t *)realloc((p), _sz); \
+ if ((p) == NULL) exit(1); \
+ (sz) = _sz; \
+ } \
+ } while(0)
+#endif
+
+#ifndef DASM_M_FREE
+#define DASM_M_FREE(ctx, p, sz) free(p)
+#endif
+
+/* Internal DynASM encoder state. */
+typedef struct dasm_State dasm_State;
+
+
+/* Initialize and free DynASM state. */
+DASM_FDEF void dasm_init(Dst_DECL, int maxsection);
+DASM_FDEF void dasm_free(Dst_DECL);
+
+/* Setup global array. Must be called before dasm_setup(). */
+DASM_FDEF void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl);
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+DASM_FDEF void dasm_growpc(Dst_DECL, unsigned int maxpc);
+
+/* Setup encoder. */
+DASM_FDEF void dasm_setup(Dst_DECL, const void *actionlist);
+
+/* Feed encoder with actions. Calls are generated by pre-processor. */
+DASM_FDEF void dasm_put(Dst_DECL, int start, ...);
+
+/* Link sections and return the resulting size. */
+DASM_FDEF int dasm_link(Dst_DECL, size_t *szp);
+
+/* Encode sections into buffer. */
+DASM_FDEF int dasm_encode(Dst_DECL, void *buffer);
+
+/* Get PC label offset. */
+DASM_FDEF int dasm_getpclabel(Dst_DECL, unsigned int pc);
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+DASM_FDEF int dasm_checkstep(Dst_DECL, int secmatch);
+#else
+#define dasm_checkstep(a, b) 0
+#endif
+
+
+#endif /* _DASM_PROTO_H */
diff --git a/3rdparty/lua/dynasm/dasm_x64.lua b/3rdparty/lua/dynasm/dasm_x64.lua
index 9a584ea..ca25823 100644
--- a/3rdparty/lua/dynasm/dasm_x64.lua
+++ b/3rdparty/lua/dynasm/dasm_x64.lua
@@ -1,12 +1,12 @@
-------------------------------------------------------------------------------
--- DynASM x64 module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- See dynasm.lua for full copyright notice.
-------------------------------------------------------------------------------
--- This module just sets 64 bit mode for the combined x86/x64 module.
--- All the interesting stuff is there.
-------------------------------------------------------------------------------
-
-x64 = true -- Using a global is an ugly, but effective solution.
-return require("dasm_x86")
+------------------------------------------------------------------------------
+-- DynASM x64 module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- See dynasm.lua for full copyright notice.
+------------------------------------------------------------------------------
+-- This module just sets 64 bit mode for the combined x86/x64 module.
+-- All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+x64 = true -- Using a global is an ugly, but effective solution.
+return require("dasm_x86")
diff --git a/3rdparty/lua/dynasm/dasm_x86.h b/3rdparty/lua/dynasm/dasm_x86.h
index 5e1e79b..c91bfdd 100644
--- a/3rdparty/lua/dynasm/dasm_x86.h
+++ b/3rdparty/lua/dynasm/dasm_x86.h
@@ -1,471 +1,471 @@
-/*
-** DynASM x86 encoding engine.
-** Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-** Released under the MIT license. See dynasm.lua for full copyright notice.
-*/
-
-#include <stddef.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdlib.h>
-
-#define DASM_ARCH "x86"
-
-#ifndef DASM_EXTERN
-#define DASM_EXTERN(a,b,c,d) 0
-#endif
-
-/* Action definitions. DASM_STOP must be 255. */
-enum {
- DASM_DISP = 233,
- DASM_IMM_S, DASM_IMM_B, DASM_IMM_W, DASM_IMM_D, DASM_IMM_WB, DASM_IMM_DB,
- DASM_VREG, DASM_SPACE, DASM_SETLABEL, DASM_REL_A, DASM_REL_LG, DASM_REL_PC,
- DASM_IMM_LG, DASM_IMM_PC, DASM_LABEL_LG, DASM_LABEL_PC, DASM_ALIGN,
- DASM_EXTERN, DASM_ESC, DASM_MARK, DASM_SECTION, DASM_STOP
-};
-
-/* Maximum number of section buffer positions for a single dasm_put() call. */
-#define DASM_MAXSECPOS 25
-
-/* DynASM encoder status codes. Action list offset or number are or'ed in. */
-#define DASM_S_OK 0x00000000
-#define DASM_S_NOMEM 0x01000000
-#define DASM_S_PHASE 0x02000000
-#define DASM_S_MATCH_SEC 0x03000000
-#define DASM_S_RANGE_I 0x11000000
-#define DASM_S_RANGE_SEC 0x12000000
-#define DASM_S_RANGE_LG 0x13000000
-#define DASM_S_RANGE_PC 0x14000000
-#define DASM_S_RANGE_VREG 0x15000000
-#define DASM_S_UNDEF_L 0x21000000
-#define DASM_S_UNDEF_PC 0x22000000
-
-/* Macros to convert positions (8 bit section + 24 bit index). */
-#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
-#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
-#define DASM_SEC2POS(sec) ((sec)<<24)
-#define DASM_POS2SEC(pos) ((pos)>>24)
-#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
-
-/* Action list type. */
-typedef const unsigned char *dasm_ActList;
-
-/* Per-section structure. */
-typedef struct dasm_Section {
- int *rbuf; /* Biased buffer pointer (negative section bias). */
- int *buf; /* True buffer pointer. */
- size_t bsize; /* Buffer size in bytes. */
- int pos; /* Biased buffer position. */
- int epos; /* End of biased buffer position - max single put. */
- int ofs; /* Byte offset into section. */
-} dasm_Section;
-
-/* Core structure holding the DynASM encoding state. */
-struct dasm_State {
- size_t psize; /* Allocated size of this structure. */
- dasm_ActList actionlist; /* Current actionlist pointer. */
- int *lglabels; /* Local/global chain/pos ptrs. */
- size_t lgsize;
- int *pclabels; /* PC label chains/pos ptrs. */
- size_t pcsize;
- void **globals; /* Array of globals (bias -10). */
- dasm_Section *section; /* Pointer to active section. */
- size_t codesize; /* Total size of all code sections. */
- int maxsection; /* 0 <= sectionidx < maxsection. */
- int status; /* Status code. */
- dasm_Section sections[1]; /* All sections. Alloc-extended. */
-};
-
-/* The size of the core structure depends on the max. number of sections. */
-#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
-
-
-/* Initialize DynASM state. */
-void dasm_init(Dst_DECL, int maxsection)
-{
- dasm_State *D;
- size_t psz = 0;
- int i;
- Dst_REF = NULL;
- DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
- D = Dst_REF;
- D->psize = psz;
- D->lglabels = NULL;
- D->lgsize = 0;
- D->pclabels = NULL;
- D->pcsize = 0;
- D->globals = NULL;
- D->maxsection = maxsection;
- for (i = 0; i < maxsection; i++) {
- D->sections[i].buf = NULL; /* Need this for pass3. */
- D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
- D->sections[i].bsize = 0;
- D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
- }
-}
-
-/* Free DynASM state. */
-void dasm_free(Dst_DECL)
-{
- dasm_State *D = Dst_REF;
- int i;
- for (i = 0; i < D->maxsection; i++)
- if (D->sections[i].buf)
- DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
- if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
- if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
- DASM_M_FREE(Dst, D, D->psize);
-}
-
-/* Setup global label array. Must be called before dasm_setup(). */
-void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
-{
- dasm_State *D = Dst_REF;
- D->globals = gl - 10; /* Negative bias to compensate for locals. */
- DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
-}
-
-/* Grow PC label array. Can be called after dasm_setup(), too. */
-void dasm_growpc(Dst_DECL, unsigned int maxpc)
-{
- dasm_State *D = Dst_REF;
- size_t osz = D->pcsize;
- DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
- memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
-}
-
-/* Setup encoder. */
-void dasm_setup(Dst_DECL, const void *actionlist)
-{
- dasm_State *D = Dst_REF;
- int i;
- D->actionlist = (dasm_ActList)actionlist;
- D->status = DASM_S_OK;
- D->section = &D->sections[0];
- memset((void *)D->lglabels, 0, D->lgsize);
- if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
- for (i = 0; i < D->maxsection; i++) {
- D->sections[i].pos = DASM_SEC2POS(i);
- D->sections[i].ofs = 0;
- }
-}
-
-
-#ifdef DASM_CHECKS
-#define CK(x, st) \
- do { if (!(x)) { \
- D->status = DASM_S_##st|(int)(p-D->actionlist-1); return; } } while (0)
-#define CKPL(kind, st) \
- do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
- D->status=DASM_S_RANGE_##st|(int)(p-D->actionlist-1); return; } } while (0)
-#else
-#define CK(x, st) ((void)0)
-#define CKPL(kind, st) ((void)0)
-#endif
-
-/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
-void dasm_put(Dst_DECL, int start, ...)
-{
- va_list ap;
- dasm_State *D = Dst_REF;
- dasm_ActList p = D->actionlist + start;
- dasm_Section *sec = D->section;
- int pos = sec->pos, ofs = sec->ofs, mrm = 4;
- int *b;
-
- if (pos >= sec->epos) {
- DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
- sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
- sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
- sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
- }
-
- b = sec->rbuf;
- b[pos++] = start;
-
- va_start(ap, start);
- while (1) {
- int action = *p++;
- if (action < DASM_DISP) {
- ofs++;
- } else if (action <= DASM_REL_A) {
- int n = va_arg(ap, int);
- b[pos++] = n;
- switch (action) {
- case DASM_DISP:
- if (n == 0) { if ((mrm&7) == 4) mrm = p[-2]; if ((mrm&7) != 5) break; }
- case DASM_IMM_DB: if (((n+128)&-256) == 0) goto ob;
- case DASM_REL_A: /* Assumes ptrdiff_t is int. !x64 */
- case DASM_IMM_D: ofs += 4; break;
- case DASM_IMM_S: CK(((n+128)&-256) == 0, RANGE_I); goto ob;
- case DASM_IMM_B: CK((n&-256) == 0, RANGE_I); ob: ofs++; break;
- case DASM_IMM_WB: if (((n+128)&-256) == 0) goto ob;
- case DASM_IMM_W: CK((n&-65536) == 0, RANGE_I); ofs += 2; break;
- case DASM_SPACE: p++; ofs += n; break;
- case DASM_SETLABEL: b[pos-2] = -0x40000000; break; /* Neg. label ofs. */
- case DASM_VREG: CK((n&-8) == 0 && (n != 4 || (*p&1) == 0), RANGE_VREG);
- if (*p++ == 1 && *p == DASM_DISP) mrm = n; continue;
- }
- mrm = 4;
- } else {
- int *pl, n;
- switch (action) {
- case DASM_REL_LG:
- case DASM_IMM_LG:
- n = *p++; pl = D->lglabels + n;
- /* Bkwd rel or global. */
- if (n <= 246) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
- pl -= 246; n = *pl;
- if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
- goto linkrel;
- case DASM_REL_PC:
- case DASM_IMM_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC);
- putrel:
- n = *pl;
- if (n < 0) { /* Label exists. Get label pos and store it. */
- b[pos] = -n;
- } else {
- linkrel:
- b[pos] = n; /* Else link to rel chain, anchored at label. */
- *pl = pos;
- }
- pos++;
- ofs += 4; /* Maximum offset needed. */
- if (action == DASM_REL_LG || action == DASM_REL_PC)
- b[pos++] = ofs; /* Store pass1 offset estimate. */
- break;
- case DASM_LABEL_LG: pl = D->lglabels + *p++; CKPL(lg, LG); goto putlabel;
- case DASM_LABEL_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC);
- putlabel:
- n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; }
- *pl = -pos; /* Label exists now. */
- b[pos++] = ofs; /* Store pass1 offset estimate. */
- break;
- case DASM_ALIGN:
- ofs += *p++; /* Maximum alignment needed (arg is 2**n-1). */
- b[pos++] = ofs; /* Store pass1 offset estimate. */
- break;
- case DASM_EXTERN: p += 2; ofs += 4; break;
- case DASM_ESC: p++; ofs++; break;
- case DASM_MARK: mrm = p[-2]; break;
- case DASM_SECTION:
- n = *p; CK(n < D->maxsection, RANGE_SEC); D->section = &D->sections[n];
- case DASM_STOP: goto stop;
- }
- }
- }
-stop:
- va_end(ap);
- sec->pos = pos;
- sec->ofs = ofs;
-}
-#undef CK
-
-/* Pass 2: Link sections, shrink branches/aligns, fix label offsets. */
-int dasm_link(Dst_DECL, size_t *szp)
-{
- dasm_State *D = Dst_REF;
- int secnum;
- int ofs = 0;
-
-#ifdef DASM_CHECKS
- *szp = 0;
- if (D->status != DASM_S_OK) return D->status;
- {
- int pc;
- for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
- if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
- }
-#endif
-
- { /* Handle globals not defined in this translation unit. */
- int idx;
- for (idx = 10; idx*sizeof(int) < D->lgsize; idx++) {
- int n = D->lglabels[idx];
- /* Undefined label: Collapse rel chain and replace with marker (< 0). */
- while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
- }
- }
-
- /* Combine all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->rbuf;
- int pos = DASM_SEC2POS(secnum);
- int lastpos = sec->pos;
-
- while (pos != lastpos) {
- dasm_ActList p = D->actionlist + b[pos++];
- while (1) {
- int op, action = *p++;
- switch (action) {
- case DASM_REL_LG: p++; op = p[-3]; goto rel_pc;
- case DASM_REL_PC: op = p[-2]; rel_pc: {
- int shrink = op == 0xe9 ? 3 : ((op&0xf0) == 0x80 ? 4 : 0);
- if (shrink) { /* Shrinkable branch opcode? */
- int lofs, lpos = b[pos];
- if (lpos < 0) goto noshrink; /* Ext global? */
- lofs = *DASM_POS2PTR(D, lpos);
- if (lpos > pos) { /* Fwd label: add cumulative section offsets. */
- int i;
- for (i = secnum; i < DASM_POS2SEC(lpos); i++)
- lofs += D->sections[i].ofs;
- } else {
- lofs -= ofs; /* Bkwd label: unfix offset. */
- }
- lofs -= b[pos+1]; /* Short branch ok? */
- if (lofs >= -128-shrink && lofs <= 127) ofs -= shrink; /* Yes. */
- else { noshrink: shrink = 0; } /* No, cannot shrink op. */
- }
- b[pos+1] = shrink;
- pos += 2;
- break;
- }
- case DASM_SPACE: case DASM_IMM_LG: case DASM_VREG: p++;
- case DASM_DISP: case DASM_IMM_S: case DASM_IMM_B: case DASM_IMM_W:
- case DASM_IMM_D: case DASM_IMM_WB: case DASM_IMM_DB:
- case DASM_SETLABEL: case DASM_REL_A: case DASM_IMM_PC: pos++; break;
- case DASM_LABEL_LG: p++;
- case DASM_LABEL_PC: b[pos++] += ofs; break; /* Fix label offset. */
- case DASM_ALIGN: ofs -= (b[pos++]+ofs)&*p++; break; /* Adjust ofs. */
- case DASM_EXTERN: p += 2; break;
- case DASM_ESC: p++; break;
- case DASM_MARK: break;
- case DASM_SECTION: case DASM_STOP: goto stop;
- }
- }
- stop: (void)0;
- }
- ofs += sec->ofs; /* Next section starts right after current section. */
- }
-
- D->codesize = ofs; /* Total size of all code sections */
- *szp = ofs;
- return DASM_S_OK;
-}
-
-#define dasmb(x) *cp++ = (unsigned char)(x)
-#ifndef DASM_ALIGNED_WRITES
-#define dasmw(x) \
- do { *((unsigned short *)cp) = (unsigned short)(x); cp+=2; } while (0)
-#define dasmd(x) \
- do { *((unsigned int *)cp) = (unsigned int)(x); cp+=4; } while (0)
-#else
-#define dasmw(x) do { dasmb(x); dasmb((x)>>8); } while (0)
-#define dasmd(x) do { dasmw(x); dasmw((x)>>16); } while (0)
-#endif
-
-/* Pass 3: Encode sections. */
-int dasm_encode(Dst_DECL, void *buffer)
-{
- dasm_State *D = Dst_REF;
- unsigned char *base = (unsigned char *)buffer;
- unsigned char *cp = base;
- int secnum;
-
- /* Encode all code sections. No support for data sections (yet). */
- for (secnum = 0; secnum < D->maxsection; secnum++) {
- dasm_Section *sec = D->sections + secnum;
- int *b = sec->buf;
- int *endb = sec->rbuf + sec->pos;
-
- while (b != endb) {
- dasm_ActList p = D->actionlist + *b++;
- unsigned char *mark = NULL;
- while (1) {
- int action = *p++;
- int n = (action >= DASM_DISP && action <= DASM_ALIGN) ? *b++ : 0;
- switch (action) {
- case DASM_DISP: if (!mark) mark = cp; {
- unsigned char *mm = mark;
- if (*p != DASM_IMM_DB && *p != DASM_IMM_WB) mark = NULL;
- if (n == 0) { int mrm = mm[-1]&7; if (mrm == 4) mrm = mm[0]&7;
- if (mrm != 5) { mm[-1] -= 0x80; break; } }
- if (((n+128) & -256) != 0) goto wd; else mm[-1] -= 0x40;
- }
- case DASM_IMM_S: case DASM_IMM_B: wb: dasmb(n); break;
- case DASM_IMM_DB: if (((n+128)&-256) == 0) {
- db: if (!mark) mark = cp; mark[-2] += 2; mark = NULL; goto wb;
- } else mark = NULL;
- case DASM_IMM_D: wd: dasmd(n); break;
- case DASM_IMM_WB: if (((n+128)&-256) == 0) goto db; else mark = NULL;
- case DASM_IMM_W: dasmw(n); break;
- case DASM_VREG: { int t = *p++; if (t >= 2) n<<=3; cp[-1] |= n; break; }
- case DASM_REL_LG: p++; if (n >= 0) goto rel_pc;
- b++; n = (int)(ptrdiff_t)D->globals[-n];
- case DASM_REL_A: rel_a: n -= (int)(ptrdiff_t)(cp+4); goto wd; /* !x64 */
- case DASM_REL_PC: rel_pc: {
- int shrink = *b++;
- int *pb = DASM_POS2PTR(D, n); if (*pb < 0) { n = pb[1]; goto rel_a; }
- n = *pb - ((int)(cp-base) + 4-shrink);
- if (shrink == 0) goto wd;
- if (shrink == 4) { cp--; cp[-1] = *cp-0x10; } else cp[-1] = 0xeb;
- goto wb;
- }
- case DASM_IMM_LG:
- p++; if (n < 0) { n = (int)(ptrdiff_t)D->globals[-n]; goto wd; }
- case DASM_IMM_PC: {
- int *pb = DASM_POS2PTR(D, n);
- n = *pb < 0 ? pb[1] : (*pb + (int)(ptrdiff_t)base);
- goto wd;
- }
- case DASM_LABEL_LG: {
- int idx = *p++;
- if (idx >= 10)
- D->globals[idx] = (void *)(base + (*p == DASM_SETLABEL ? *b : n));
- break;
- }
- case DASM_LABEL_PC: case DASM_SETLABEL: break;
- case DASM_SPACE: { int fill = *p++; while (n--) *cp++ = fill; break; }
- case DASM_ALIGN:
- n = *p++;
- while (((cp-base) & n)) *cp++ = 0x90; /* nop */
- break;
- case DASM_EXTERN: n = DASM_EXTERN(Dst, cp, p[1], *p); p += 2; goto wd;
- case DASM_MARK: mark = cp; break;
- case DASM_ESC: action = *p++;
- default: *cp++ = action; break;
- case DASM_SECTION: case DASM_STOP: goto stop;
- }
- }
- stop: (void)0;
- }
- }
-
- if (base + D->codesize != cp) /* Check for phase errors. */
- return DASM_S_PHASE;
- return DASM_S_OK;
-}
-
-/* Get PC label offset. */
-int dasm_getpclabel(Dst_DECL, unsigned int pc)
-{
- dasm_State *D = Dst_REF;
- if (pc*sizeof(int) < D->pcsize) {
- int pos = D->pclabels[pc];
- if (pos < 0) return *DASM_POS2PTR(D, -pos);
- if (pos > 0) return -1; /* Undefined. */
- }
- return -2; /* Unused or out of range. */
-}
-
-#ifdef DASM_CHECKS
-/* Optional sanity checker to call between isolated encoding steps. */
-int dasm_checkstep(Dst_DECL, int secmatch)
-{
- dasm_State *D = Dst_REF;
- if (D->status == DASM_S_OK) {
- int i;
- for (i = 1; i <= 9; i++) {
- if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_L|i; break; }
- D->lglabels[i] = 0;
- }
- }
- if (D->status == DASM_S_OK && secmatch >= 0 &&
- D->section != &D->sections[secmatch])
- D->status = DASM_S_MATCH_SEC|(int)(D->section-D->sections);
- return D->status;
-}
-#endif
-
+/*
+** DynASM x86 encoding engine.
+** Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+** Released under the MIT license. See dynasm.lua for full copyright notice.
+*/
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define DASM_ARCH "x86"
+
+#ifndef DASM_EXTERN
+#define DASM_EXTERN(a,b,c,d) 0
+#endif
+
+/* Action definitions. DASM_STOP must be 255. */
+enum {
+ DASM_DISP = 233,
+ DASM_IMM_S, DASM_IMM_B, DASM_IMM_W, DASM_IMM_D, DASM_IMM_WB, DASM_IMM_DB,
+ DASM_VREG, DASM_SPACE, DASM_SETLABEL, DASM_REL_A, DASM_REL_LG, DASM_REL_PC,
+ DASM_IMM_LG, DASM_IMM_PC, DASM_LABEL_LG, DASM_LABEL_PC, DASM_ALIGN,
+ DASM_EXTERN, DASM_ESC, DASM_MARK, DASM_SECTION, DASM_STOP
+};
+
+/* Maximum number of section buffer positions for a single dasm_put() call. */
+#define DASM_MAXSECPOS 25
+
+/* DynASM encoder status codes. Action list offset or number are or'ed in. */
+#define DASM_S_OK 0x00000000
+#define DASM_S_NOMEM 0x01000000
+#define DASM_S_PHASE 0x02000000
+#define DASM_S_MATCH_SEC 0x03000000
+#define DASM_S_RANGE_I 0x11000000
+#define DASM_S_RANGE_SEC 0x12000000
+#define DASM_S_RANGE_LG 0x13000000
+#define DASM_S_RANGE_PC 0x14000000
+#define DASM_S_RANGE_VREG 0x15000000
+#define DASM_S_UNDEF_L 0x21000000
+#define DASM_S_UNDEF_PC 0x22000000
+
+/* Macros to convert positions (8 bit section + 24 bit index). */
+#define DASM_POS2IDX(pos) ((pos)&0x00ffffff)
+#define DASM_POS2BIAS(pos) ((pos)&0xff000000)
+#define DASM_SEC2POS(sec) ((sec)<<24)
+#define DASM_POS2SEC(pos) ((pos)>>24)
+#define DASM_POS2PTR(D, pos) (D->sections[DASM_POS2SEC(pos)].rbuf + (pos))
+
+/* Action list type. */
+typedef const unsigned char *dasm_ActList;
+
+/* Per-section structure. */
+typedef struct dasm_Section {
+ int *rbuf; /* Biased buffer pointer (negative section bias). */
+ int *buf; /* True buffer pointer. */
+ size_t bsize; /* Buffer size in bytes. */
+ int pos; /* Biased buffer position. */
+ int epos; /* End of biased buffer position - max single put. */
+ int ofs; /* Byte offset into section. */
+} dasm_Section;
+
+/* Core structure holding the DynASM encoding state. */
+struct dasm_State {
+ size_t psize; /* Allocated size of this structure. */
+ dasm_ActList actionlist; /* Current actionlist pointer. */
+ int *lglabels; /* Local/global chain/pos ptrs. */
+ size_t lgsize;
+ int *pclabels; /* PC label chains/pos ptrs. */
+ size_t pcsize;
+ void **globals; /* Array of globals (bias -10). */
+ dasm_Section *section; /* Pointer to active section. */
+ size_t codesize; /* Total size of all code sections. */
+ int maxsection; /* 0 <= sectionidx < maxsection. */
+ int status; /* Status code. */
+ dasm_Section sections[1]; /* All sections. Alloc-extended. */
+};
+
+/* The size of the core structure depends on the max. number of sections. */
+#define DASM_PSZ(ms) (sizeof(dasm_State)+(ms-1)*sizeof(dasm_Section))
+
+
+/* Initialize DynASM state. */
+void dasm_init(Dst_DECL, int maxsection)
+{
+ dasm_State *D;
+ size_t psz = 0;
+ int i;
+ Dst_REF = NULL;
+ DASM_M_GROW(Dst, struct dasm_State, Dst_REF, psz, DASM_PSZ(maxsection));
+ D = Dst_REF;
+ D->psize = psz;
+ D->lglabels = NULL;
+ D->lgsize = 0;
+ D->pclabels = NULL;
+ D->pcsize = 0;
+ D->globals = NULL;
+ D->maxsection = maxsection;
+ for (i = 0; i < maxsection; i++) {
+ D->sections[i].buf = NULL; /* Need this for pass3. */
+ D->sections[i].rbuf = D->sections[i].buf - DASM_SEC2POS(i);
+ D->sections[i].bsize = 0;
+ D->sections[i].epos = 0; /* Wrong, but is recalculated after resize. */
+ }
+}
+
+/* Free DynASM state. */
+void dasm_free(Dst_DECL)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ for (i = 0; i < D->maxsection; i++)
+ if (D->sections[i].buf)
+ DASM_M_FREE(Dst, D->sections[i].buf, D->sections[i].bsize);
+ if (D->pclabels) DASM_M_FREE(Dst, D->pclabels, D->pcsize);
+ if (D->lglabels) DASM_M_FREE(Dst, D->lglabels, D->lgsize);
+ DASM_M_FREE(Dst, D, D->psize);
+}
+
+/* Setup global label array. Must be called before dasm_setup(). */
+void dasm_setupglobal(Dst_DECL, void **gl, unsigned int maxgl)
+{
+ dasm_State *D = Dst_REF;
+ D->globals = gl - 10; /* Negative bias to compensate for locals. */
+ DASM_M_GROW(Dst, int, D->lglabels, D->lgsize, (10+maxgl)*sizeof(int));
+}
+
+/* Grow PC label array. Can be called after dasm_setup(), too. */
+void dasm_growpc(Dst_DECL, unsigned int maxpc)
+{
+ dasm_State *D = Dst_REF;
+ size_t osz = D->pcsize;
+ DASM_M_GROW(Dst, int, D->pclabels, D->pcsize, maxpc*sizeof(int));
+ memset((void *)(((unsigned char *)D->pclabels)+osz), 0, D->pcsize-osz);
+}
+
+/* Setup encoder. */
+void dasm_setup(Dst_DECL, const void *actionlist)
+{
+ dasm_State *D = Dst_REF;
+ int i;
+ D->actionlist = (dasm_ActList)actionlist;
+ D->status = DASM_S_OK;
+ D->section = &D->sections[0];
+ memset((void *)D->lglabels, 0, D->lgsize);
+ if (D->pclabels) memset((void *)D->pclabels, 0, D->pcsize);
+ for (i = 0; i < D->maxsection; i++) {
+ D->sections[i].pos = DASM_SEC2POS(i);
+ D->sections[i].ofs = 0;
+ }
+}
+
+
+#ifdef DASM_CHECKS
+#define CK(x, st) \
+ do { if (!(x)) { \
+ D->status = DASM_S_##st|(int)(p-D->actionlist-1); return; } } while (0)
+#define CKPL(kind, st) \
+ do { if ((size_t)((char *)pl-(char *)D->kind##labels) >= D->kind##size) { \
+ D->status=DASM_S_RANGE_##st|(int)(p-D->actionlist-1); return; } } while (0)
+#else
+#define CK(x, st) ((void)0)
+#define CKPL(kind, st) ((void)0)
+#endif
+
+/* Pass 1: Store actions and args, link branches/labels, estimate offsets. */
+void dasm_put(Dst_DECL, int start, ...)
+{
+ va_list ap;
+ dasm_State *D = Dst_REF;
+ dasm_ActList p = D->actionlist + start;
+ dasm_Section *sec = D->section;
+ int pos = sec->pos, ofs = sec->ofs, mrm = 4;
+ int *b;
+
+ if (pos >= sec->epos) {
+ DASM_M_GROW(Dst, int, sec->buf, sec->bsize,
+ sec->bsize + 2*DASM_MAXSECPOS*sizeof(int));
+ sec->rbuf = sec->buf - DASM_POS2BIAS(pos);
+ sec->epos = (int)sec->bsize/sizeof(int) - DASM_MAXSECPOS+DASM_POS2BIAS(pos);
+ }
+
+ b = sec->rbuf;
+ b[pos++] = start;
+
+ va_start(ap, start);
+ while (1) {
+ int action = *p++;
+ if (action < DASM_DISP) {
+ ofs++;
+ } else if (action <= DASM_REL_A) {
+ int n = va_arg(ap, int);
+ b[pos++] = n;
+ switch (action) {
+ case DASM_DISP:
+ if (n == 0) { if ((mrm&7) == 4) mrm = p[-2]; if ((mrm&7) != 5) break; }
+ case DASM_IMM_DB: if (((n+128)&-256) == 0) goto ob;
+ case DASM_REL_A: /* Assumes ptrdiff_t is int. !x64 */
+ case DASM_IMM_D: ofs += 4; break;
+ case DASM_IMM_S: CK(((n+128)&-256) == 0, RANGE_I); goto ob;
+ case DASM_IMM_B: CK((n&-256) == 0, RANGE_I); ob: ofs++; break;
+ case DASM_IMM_WB: if (((n+128)&-256) == 0) goto ob;
+ case DASM_IMM_W: CK((n&-65536) == 0, RANGE_I); ofs += 2; break;
+ case DASM_SPACE: p++; ofs += n; break;
+ case DASM_SETLABEL: b[pos-2] = -0x40000000; break; /* Neg. label ofs. */
+ case DASM_VREG: CK((n&-8) == 0 && (n != 4 || (*p&1) == 0), RANGE_VREG);
+ if (*p++ == 1 && *p == DASM_DISP) mrm = n; continue;
+ }
+ mrm = 4;
+ } else {
+ int *pl, n;
+ switch (action) {
+ case DASM_REL_LG:
+ case DASM_IMM_LG:
+ n = *p++; pl = D->lglabels + n;
+ /* Bkwd rel or global. */
+ if (n <= 246) { CK(n>=10||*pl<0, RANGE_LG); CKPL(lg, LG); goto putrel; }
+ pl -= 246; n = *pl;
+ if (n < 0) n = 0; /* Start new chain for fwd rel if label exists. */
+ goto linkrel;
+ case DASM_REL_PC:
+ case DASM_IMM_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC);
+ putrel:
+ n = *pl;
+ if (n < 0) { /* Label exists. Get label pos and store it. */
+ b[pos] = -n;
+ } else {
+ linkrel:
+ b[pos] = n; /* Else link to rel chain, anchored at label. */
+ *pl = pos;
+ }
+ pos++;
+ ofs += 4; /* Maximum offset needed. */
+ if (action == DASM_REL_LG || action == DASM_REL_PC)
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_LABEL_LG: pl = D->lglabels + *p++; CKPL(lg, LG); goto putlabel;
+ case DASM_LABEL_PC: pl = D->pclabels + va_arg(ap, int); CKPL(pc, PC);
+ putlabel:
+ n = *pl; /* n > 0: Collapse rel chain and replace with label pos. */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = pos; }
+ *pl = -pos; /* Label exists now. */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_ALIGN:
+ ofs += *p++; /* Maximum alignment needed (arg is 2**n-1). */
+ b[pos++] = ofs; /* Store pass1 offset estimate. */
+ break;
+ case DASM_EXTERN: p += 2; ofs += 4; break;
+ case DASM_ESC: p++; ofs++; break;
+ case DASM_MARK: mrm = p[-2]; break;
+ case DASM_SECTION:
+ n = *p; CK(n < D->maxsection, RANGE_SEC); D->section = &D->sections[n];
+ case DASM_STOP: goto stop;
+ }
+ }
+ }
+stop:
+ va_end(ap);
+ sec->pos = pos;
+ sec->ofs = ofs;
+}
+#undef CK
+
+/* Pass 2: Link sections, shrink branches/aligns, fix label offsets. */
+int dasm_link(Dst_DECL, size_t *szp)
+{
+ dasm_State *D = Dst_REF;
+ int secnum;
+ int ofs = 0;
+
+#ifdef DASM_CHECKS
+ *szp = 0;
+ if (D->status != DASM_S_OK) return D->status;
+ {
+ int pc;
+ for (pc = 0; pc*sizeof(int) < D->pcsize; pc++)
+ if (D->pclabels[pc] > 0) return DASM_S_UNDEF_PC|pc;
+ }
+#endif
+
+ { /* Handle globals not defined in this translation unit. */
+ int idx;
+ for (idx = 10; idx*sizeof(int) < D->lgsize; idx++) {
+ int n = D->lglabels[idx];
+ /* Undefined label: Collapse rel chain and replace with marker (< 0). */
+ while (n > 0) { int *pb = DASM_POS2PTR(D, n); n = *pb; *pb = -idx; }
+ }
+ }
+
+ /* Combine all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->rbuf;
+ int pos = DASM_SEC2POS(secnum);
+ int lastpos = sec->pos;
+
+ while (pos != lastpos) {
+ dasm_ActList p = D->actionlist + b[pos++];
+ while (1) {
+ int op, action = *p++;
+ switch (action) {
+ case DASM_REL_LG: p++; op = p[-3]; goto rel_pc;
+ case DASM_REL_PC: op = p[-2]; rel_pc: {
+ int shrink = op == 0xe9 ? 3 : ((op&0xf0) == 0x80 ? 4 : 0);
+ if (shrink) { /* Shrinkable branch opcode? */
+ int lofs, lpos = b[pos];
+ if (lpos < 0) goto noshrink; /* Ext global? */
+ lofs = *DASM_POS2PTR(D, lpos);
+ if (lpos > pos) { /* Fwd label: add cumulative section offsets. */
+ int i;
+ for (i = secnum; i < DASM_POS2SEC(lpos); i++)
+ lofs += D->sections[i].ofs;
+ } else {
+ lofs -= ofs; /* Bkwd label: unfix offset. */
+ }
+ lofs -= b[pos+1]; /* Short branch ok? */
+ if (lofs >= -128-shrink && lofs <= 127) ofs -= shrink; /* Yes. */
+ else { noshrink: shrink = 0; } /* No, cannot shrink op. */
+ }
+ b[pos+1] = shrink;
+ pos += 2;
+ break;
+ }
+ case DASM_SPACE: case DASM_IMM_LG: case DASM_VREG: p++;
+ case DASM_DISP: case DASM_IMM_S: case DASM_IMM_B: case DASM_IMM_W:
+ case DASM_IMM_D: case DASM_IMM_WB: case DASM_IMM_DB:
+ case DASM_SETLABEL: case DASM_REL_A: case DASM_IMM_PC: pos++; break;
+ case DASM_LABEL_LG: p++;
+ case DASM_LABEL_PC: b[pos++] += ofs; break; /* Fix label offset. */
+ case DASM_ALIGN: ofs -= (b[pos++]+ofs)&*p++; break; /* Adjust ofs. */
+ case DASM_EXTERN: p += 2; break;
+ case DASM_ESC: p++; break;
+ case DASM_MARK: break;
+ case DASM_SECTION: case DASM_STOP: goto stop;
+ }
+ }
+ stop: (void)0;
+ }
+ ofs += sec->ofs; /* Next section starts right after current section. */
+ }
+
+ D->codesize = ofs; /* Total size of all code sections */
+ *szp = ofs;
+ return DASM_S_OK;
+}
+
+#define dasmb(x) *cp++ = (unsigned char)(x)
+#ifndef DASM_ALIGNED_WRITES
+#define dasmw(x) \
+ do { *((unsigned short *)cp) = (unsigned short)(x); cp+=2; } while (0)
+#define dasmd(x) \
+ do { *((unsigned int *)cp) = (unsigned int)(x); cp+=4; } while (0)
+#else
+#define dasmw(x) do { dasmb(x); dasmb((x)>>8); } while (0)
+#define dasmd(x) do { dasmw(x); dasmw((x)>>16); } while (0)
+#endif
+
+/* Pass 3: Encode sections. */
+int dasm_encode(Dst_DECL, void *buffer)
+{
+ dasm_State *D = Dst_REF;
+ unsigned char *base = (unsigned char *)buffer;
+ unsigned char *cp = base;
+ int secnum;
+
+ /* Encode all code sections. No support for data sections (yet). */
+ for (secnum = 0; secnum < D->maxsection; secnum++) {
+ dasm_Section *sec = D->sections + secnum;
+ int *b = sec->buf;
+ int *endb = sec->rbuf + sec->pos;
+
+ while (b != endb) {
+ dasm_ActList p = D->actionlist + *b++;
+ unsigned char *mark = NULL;
+ while (1) {
+ int action = *p++;
+ int n = (action >= DASM_DISP && action <= DASM_ALIGN) ? *b++ : 0;
+ switch (action) {
+ case DASM_DISP: if (!mark) mark = cp; {
+ unsigned char *mm = mark;
+ if (*p != DASM_IMM_DB && *p != DASM_IMM_WB) mark = NULL;
+ if (n == 0) { int mrm = mm[-1]&7; if (mrm == 4) mrm = mm[0]&7;
+ if (mrm != 5) { mm[-1] -= 0x80; break; } }
+ if (((n+128) & -256) != 0) goto wd; else mm[-1] -= 0x40;
+ }
+ case DASM_IMM_S: case DASM_IMM_B: wb: dasmb(n); break;
+ case DASM_IMM_DB: if (((n+128)&-256) == 0) {
+ db: if (!mark) mark = cp; mark[-2] += 2; mark = NULL; goto wb;
+ } else mark = NULL;
+ case DASM_IMM_D: wd: dasmd(n); break;
+ case DASM_IMM_WB: if (((n+128)&-256) == 0) goto db; else mark = NULL;
+ case DASM_IMM_W: dasmw(n); break;
+ case DASM_VREG: { int t = *p++; if (t >= 2) n<<=3; cp[-1] |= n; break; }
+ case DASM_REL_LG: p++; if (n >= 0) goto rel_pc;
+ b++; n = (int)(ptrdiff_t)D->globals[-n];
+ case DASM_REL_A: rel_a: n -= (int)(ptrdiff_t)(cp+4); goto wd; /* !x64 */
+ case DASM_REL_PC: rel_pc: {
+ int shrink = *b++;
+ int *pb = DASM_POS2PTR(D, n); if (*pb < 0) { n = pb[1]; goto rel_a; }
+ n = *pb - ((int)(cp-base) + 4-shrink);
+ if (shrink == 0) goto wd;
+ if (shrink == 4) { cp--; cp[-1] = *cp-0x10; } else cp[-1] = 0xeb;
+ goto wb;
+ }
+ case DASM_IMM_LG:
+ p++; if (n < 0) { n = (int)(ptrdiff_t)D->globals[-n]; goto wd; }
+ case DASM_IMM_PC: {
+ int *pb = DASM_POS2PTR(D, n);
+ n = *pb < 0 ? pb[1] : (*pb + (int)(ptrdiff_t)base);
+ goto wd;
+ }
+ case DASM_LABEL_LG: {
+ int idx = *p++;
+ if (idx >= 10)
+ D->globals[idx] = (void *)(base + (*p == DASM_SETLABEL ? *b : n));
+ break;
+ }
+ case DASM_LABEL_PC: case DASM_SETLABEL: break;
+ case DASM_SPACE: { int fill = *p++; while (n--) *cp++ = fill; break; }
+ case DASM_ALIGN:
+ n = *p++;
+ while (((cp-base) & n)) *cp++ = 0x90; /* nop */
+ break;
+ case DASM_EXTERN: n = DASM_EXTERN(Dst, cp, p[1], *p); p += 2; goto wd;
+ case DASM_MARK: mark = cp; break;
+ case DASM_ESC: action = *p++;
+ default: *cp++ = action; break;
+ case DASM_SECTION: case DASM_STOP: goto stop;
+ }
+ }
+ stop: (void)0;
+ }
+ }
+
+ if (base + D->codesize != cp) /* Check for phase errors. */
+ return DASM_S_PHASE;
+ return DASM_S_OK;
+}
+
+/* Get PC label offset. */
+int dasm_getpclabel(Dst_DECL, unsigned int pc)
+{
+ dasm_State *D = Dst_REF;
+ if (pc*sizeof(int) < D->pcsize) {
+ int pos = D->pclabels[pc];
+ if (pos < 0) return *DASM_POS2PTR(D, -pos);
+ if (pos > 0) return -1; /* Undefined. */
+ }
+ return -2; /* Unused or out of range. */
+}
+
+#ifdef DASM_CHECKS
+/* Optional sanity checker to call between isolated encoding steps. */
+int dasm_checkstep(Dst_DECL, int secmatch)
+{
+ dasm_State *D = Dst_REF;
+ if (D->status == DASM_S_OK) {
+ int i;
+ for (i = 1; i <= 9; i++) {
+ if (D->lglabels[i] > 0) { D->status = DASM_S_UNDEF_L|i; break; }
+ D->lglabels[i] = 0;
+ }
+ }
+ if (D->status == DASM_S_OK && secmatch >= 0 &&
+ D->section != &D->sections[secmatch])
+ D->status = DASM_S_MATCH_SEC|(int)(D->section-D->sections);
+ return D->status;
+}
+#endif
+
diff --git a/3rdparty/lua/dynasm/dasm_x86.lua b/3rdparty/lua/dynasm/dasm_x86.lua
index 7ca061d..1f0981f 100644
--- a/3rdparty/lua/dynasm/dasm_x86.lua
+++ b/3rdparty/lua/dynasm/dasm_x86.lua
@@ -1,7 +1,7 @@
------------------------------------------------------------------------------
-- DynASM x86/x64 module.
--
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
-- See dynasm.lua for full copyright notice.
------------------------------------------------------------------------------
@@ -1040,7 +1040,7 @@ local map_op = {
-- ED: *in Rdw,dx
-- EE: *out dx,Rb
-- EF: *out dx,Rdw
- lock_0 = "F0",
+ -- F0: *lock
int1_0 = "F1",
repne_0 = "F2",
repnz_0 = "F2",
@@ -1081,9 +1081,6 @@ local map_op = {
btr_2 = "mrqdw:0FB3Rm|miqdw:0FBA6mU",
bts_2 = "mrqdw:0FABRm|miqdw:0FBA5mU",
- shld_3 = "mriqdw:0FA4RmU|mrCqdw:0FA5Rm",
- shrd_3 = "mriqdw:0FACRmU|mrCqdw:0FADRm",
-
rdtsc_0 = "0F31", -- P1+
cpuid_0 = "0FA2", -- P1+
@@ -1117,9 +1114,6 @@ local map_op = {
fucompp_0 = "DAE9",
fcompp_0 = "DED9",
- fldenv_1 = "x.:D94m",
- fnstenv_1 = "x.:D96m",
- fstenv_1 = "x.:9BD96m",
fldcw_1 = "xw:nD95m",
fstcw_1 = "xw:n9BD97m",
fnstcw_1 = "xw:nD97m",
@@ -1195,8 +1189,6 @@ local map_op = {
cvttps2dq_2 = "rmo:F30F5BrM",
cvttsd2si_2 = "rr/do:F20F2CrM|rr/qo:|rx/dq:|rxq:",
cvttss2si_2 = "rr/do:F30F2CrM|rr/qo:|rxd:|rx/qd:",
- fxsave_1 = "x.:0FAE0m",
- fxrstor_1 = "x.:0FAE1m",
ldmxcsr_1 = "xd:0FAE2m",
lfence_0 = "0FAEE8",
maskmovdqu_2 = "rro:660FF7rM",
@@ -1686,7 +1678,7 @@ if x64 then
function map_op.mov64_2(params)
if not params then return { "reg, imm", "reg, [disp]", "[disp], reg" } end
if secpos+2 > maxsecpos then wflush() end
- local opcode, op64, sz, rex, vreg
+ local opcode, op64, sz, rex
local op64 = match(params[1], "^%[%s*(.-)%s*%]$")
if op64 then
local a = parseoperand(params[2])
@@ -1707,17 +1699,11 @@ if x64 then
werror("bad operand mode")
end
op64 = params[2]
- if a.reg == -1 then
- vreg = a.vreg
- opcode = 0xb8
- else
- opcode = 0xb8 + band(a.reg, 7)
- end
+ opcode = 0xb8 + band(a.reg, 7) -- !x64: no VREG support.
rex = a.reg > 7 and 9 or 8
end
end
wputop(sz, opcode, rex)
- if vreg then waction("VREG", vreg); wputxb(0) end
waction("IMM_D", format("(unsigned int)(%s)", op64))
waction("IMM_D", format("(unsigned int)((%s)>>32)", op64))
end
diff --git a/3rdparty/lua/dynasm/dynasm.lua b/3rdparty/lua/dynasm/dynasm.lua
index c070d07..d670f8f 100644
--- a/3rdparty/lua/dynasm/dynasm.lua
+++ b/3rdparty/lua/dynasm/dynasm.lua
@@ -1,1094 +1,1095 @@
-------------------------------------------------------------------------------
--- DynASM. A dynamic assembler for code generation engines.
--- Originally designed and implemented for LuaJIT.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- See below for full copyright notice.
-------------------------------------------------------------------------------
-
--- Application information.
-local _info = {
- name = "DynASM",
- description = "A dynamic assembler for code generation engines",
- version = "1.3.0",
- vernum = 10300,
- release = "2011-05-05",
- author = "Mike Pall",
- url = "http://luajit.org/dynasm.html",
- license = "MIT",
- copyright = [[
-Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-[ MIT license: http://www.opensource.org/licenses/mit-license.php ]
-]],
-}
-
--- Cache library functions.
-local type, pairs, ipairs = type, pairs, ipairs
-local pcall, error, assert = pcall, error, assert
-local _s = string
-local sub, match, gmatch, gsub = _s.sub, _s.match, _s.gmatch, _s.gsub
-local format, rep, upper = _s.format, _s.rep, _s.upper
-local _t = table
-local insert, remove, concat, sort = _t.insert, _t.remove, _t.concat, _t.sort
-local exit = os.exit
-local io = io
-local stdin, stdout, stderr = io.stdin, io.stdout, io.stderr
-
-------------------------------------------------------------------------------
-
--- Program options.
-local g_opt = {}
-
--- Global state for current file.
-local g_fname, g_curline, g_indent, g_lineno, g_synclineno, g_arch
-local g_errcount = 0
-
--- Write buffer for output file.
-local g_wbuffer, g_capbuffer
-
-------------------------------------------------------------------------------
-
--- Write an output line (or callback function) to the buffer.
-local function wline(line, needindent)
- local buf = g_capbuffer or g_wbuffer
- buf[#buf+1] = needindent and g_indent..line or line
- g_synclineno = g_synclineno + 1
-end
-
--- Write assembler line as a comment, if requestd.
-local function wcomment(aline)
- if g_opt.comment then
- wline(g_opt.comment..aline..g_opt.endcomment, true)
- end
-end
-
--- Resync CPP line numbers.
-local function wsync()
- if g_synclineno ~= g_lineno and g_opt.cpp then
- wline("#line "..g_lineno..' "'..g_fname..'"')
- g_synclineno = g_lineno
- end
-end
-
--- Dummy action flush function. Replaced with arch-specific function later.
-local function wflush(term)
-end
-
--- Dump all buffered output lines.
-local function wdumplines(out, buf)
- for _,line in ipairs(buf) do
- if type(line) == "string" then
- assert(out:write(line, "\n"))
- else
- -- Special callback to dynamically insert lines after end of processing.
- line(out)
- end
- end
-end
-
-------------------------------------------------------------------------------
-
--- Emit an error. Processing continues with next statement.
-local function werror(msg)
- error(format("%s:%s: error: %s:\n%s", g_fname, g_lineno, msg, g_curline), 0)
-end
-
--- Emit a fatal error. Processing stops.
-local function wfatal(msg)
- g_errcount = "fatal"
- werror(msg)
-end
-
--- Print a warning. Processing continues.
-local function wwarn(msg)
- stderr:write(format("%s:%s: warning: %s:\n%s\n",
- g_fname, g_lineno, msg, g_curline))
-end
-
--- Print caught error message. But suppress excessive errors.
-local function wprinterr(...)
- if type(g_errcount) == "number" then
- -- Regular error.
- g_errcount = g_errcount + 1
- if g_errcount < 21 then -- Seems to be a reasonable limit.
- stderr:write(...)
- elseif g_errcount == 21 then
- stderr:write(g_fname,
- ":*: warning: too many errors (suppressed further messages).\n")
- end
- else
- -- Fatal error.
- stderr:write(...)
- return true -- Stop processing.
- end
-end
-
-------------------------------------------------------------------------------
-
--- Map holding all option handlers.
-local opt_map = {}
-local opt_current
-
--- Print error and exit with error status.
-local function opterror(...)
- stderr:write("dynasm.lua: ERROR: ", ...)
- stderr:write("\n")
- exit(1)
-end
-
--- Get option parameter.
-local function optparam(args)
- local argn = args.argn
- local p = args[argn]
- if not p then
- opterror("missing parameter for option `", opt_current, "'.")
- end
- args.argn = argn + 1
- return p
-end
-
-------------------------------------------------------------------------------
-
--- Core pseudo-opcodes.
-local map_coreop = {}
--- Dummy opcode map. Replaced by arch-specific map.
-local map_op = {}
-
--- Forward declarations.
-local dostmt
-local readfile
-
-------------------------------------------------------------------------------
-
--- Map for defines (initially empty, chains to arch-specific map).
-local map_def = {}
-
--- Pseudo-opcode to define a substitution.
-map_coreop[".define_2"] = function(params, nparams)
- if not params then return nparams == 1 and "name" or "name, subst" end
- local name, def = params[1], params[2] or "1"
- if not match(name, "^[%a_][%w_]*$") then werror("bad or duplicate define") end
- map_def[name] = def
-end
-map_coreop[".define_1"] = map_coreop[".define_2"]
-
--- Define a substitution on the command line.
-function opt_map.D(args)
- local namesubst = optparam(args)
- local name, subst = match(namesubst, "^([%a_][%w_]*)=(.*)$")
- if name then
- map_def[name] = subst
- elseif match(namesubst, "^[%a_][%w_]*$") then
- map_def[namesubst] = "1"
- else
- opterror("bad define")
- end
-end
-
--- Undefine a substitution on the command line.
-function opt_map.U(args)
- local name = optparam(args)
- if match(name, "^[%a_][%w_]*$") then
- map_def[name] = nil
- else
- opterror("bad define")
- end
-end
-
--- Helper for definesubst.
-local gotsubst
-
-local function definesubst_one(word)
- local subst = map_def[word]
- if subst then gotsubst = word; return subst else return word end
-end
-
--- Iteratively substitute defines.
-local function definesubst(stmt)
- -- Limit number of iterations.
- for i=1,100 do
- gotsubst = false
- stmt = gsub(stmt, "#?[%w_]+", definesubst_one)
- if not gotsubst then break end
- end
- if gotsubst then wfatal("recursive define involving `"..gotsubst.."'") end
- return stmt
-end
-
--- Dump all defines.
-local function dumpdefines(out, lvl)
- local t = {}
- for name in pairs(map_def) do
- t[#t+1] = name
- end
- sort(t)
- out:write("Defines:\n")
- for _,name in ipairs(t) do
- local subst = map_def[name]
- if g_arch then subst = g_arch.revdef(subst) end
- out:write(format(" %-20s %s\n", name, subst))
- end
- out:write("\n")
-end
-
-------------------------------------------------------------------------------
-
--- Support variables for conditional assembly.
-local condlevel = 0
-local condstack = {}
-
--- Evaluate condition with a Lua expression. Substitutions already performed.
-local function cond_eval(cond)
- local func, err
- if setfenv then
- func, err = loadstring("return "..cond, "=expr")
- else
- -- No globals. All unknown identifiers evaluate to nil.
- func, err = load("return "..cond, "=expr", "t", {})
- end
- if func then
- if setfenv then
- setfenv(func, {}) -- No globals. All unknown identifiers evaluate to nil.
- end
- local ok, res = pcall(func)
- if ok then
- if res == 0 then return false end -- Oh well.
- return not not res
- end
- err = res
- end
- wfatal("bad condition: "..err)
-end
-
--- Skip statements until next conditional pseudo-opcode at the same level.
-local function stmtskip()
- local dostmt_save = dostmt
- local lvl = 0
- dostmt = function(stmt)
- local op = match(stmt, "^%s*(%S+)")
- if op == ".if" then
- lvl = lvl + 1
- elseif lvl ~= 0 then
- if op == ".endif" then lvl = lvl - 1 end
- elseif op == ".elif" or op == ".else" or op == ".endif" then
- dostmt = dostmt_save
- dostmt(stmt)
- end
- end
-end
-
--- Pseudo-opcodes for conditional assembly.
-map_coreop[".if_1"] = function(params)
- if not params then return "condition" end
- local lvl = condlevel + 1
- local res = cond_eval(params[1])
- condlevel = lvl
- condstack[lvl] = res
- if not res then stmtskip() end
-end
-
-map_coreop[".elif_1"] = function(params)
- if not params then return "condition" end
- if condlevel == 0 then wfatal(".elif without .if") end
- local lvl = condlevel
- local res = condstack[lvl]
- if res then
- if res == "else" then wfatal(".elif after .else") end
- else
- res = cond_eval(params[1])
- if res then
- condstack[lvl] = res
- return
- end
- end
- stmtskip()
-end
-
-map_coreop[".else_0"] = function(params)
- if condlevel == 0 then wfatal(".else without .if") end
- local lvl = condlevel
- local res = condstack[lvl]
- condstack[lvl] = "else"
- if res then
- if res == "else" then wfatal(".else after .else") end
- stmtskip()
- end
-end
-
-map_coreop[".endif_0"] = function(params)
- local lvl = condlevel
- if lvl == 0 then wfatal(".endif without .if") end
- condlevel = lvl - 1
-end
-
--- Check for unfinished conditionals.
-local function checkconds()
- if g_errcount ~= "fatal" and condlevel ~= 0 then
- wprinterr(g_fname, ":*: error: unbalanced conditional\n")
- end
-end
-
-------------------------------------------------------------------------------
-
--- Search for a file in the given path and open it for reading.
-local function pathopen(path, name)
- local dirsep = package and match(package.path, "\\") and "\\" or "/"
- for _,p in ipairs(path) do
- local fullname = p == "" and name or p..dirsep..name
- local fin = io.open(fullname, "r")
- if fin then
- g_fname = fullname
- return fin
- end
- end
-end
-
--- Include a file.
-map_coreop[".include_1"] = function(params)
- if not params then return "filename" end
- local name = params[1]
- -- Save state. Ugly, I know. but upvalues are fast.
- local gf, gl, gcl, gi = g_fname, g_lineno, g_curline, g_indent
- -- Read the included file.
- local fatal = readfile(pathopen(g_opt.include, name) or
- wfatal("include file `"..name.."' not found"))
- -- Restore state.
- g_synclineno = -1
- g_fname, g_lineno, g_curline, g_indent = gf, gl, gcl, gi
- if fatal then wfatal("in include file") end
-end
-
--- Make .include and conditionals initially available, too.
-map_op[".include_1"] = map_coreop[".include_1"]
-map_op[".if_1"] = map_coreop[".if_1"]
-map_op[".elif_1"] = map_coreop[".elif_1"]
-map_op[".else_0"] = map_coreop[".else_0"]
-map_op[".endif_0"] = map_coreop[".endif_0"]
-
-------------------------------------------------------------------------------
-
--- Support variables for macros.
-local mac_capture, mac_lineno, mac_name
-local mac_active = {}
-local mac_list = {}
-
--- Pseudo-opcode to define a macro.
-map_coreop[".macro_*"] = function(mparams)
- if not mparams then return "name [, params...]" end
- -- Split off and validate macro name.
- local name = remove(mparams, 1)
- if not name then werror("missing macro name") end
- if not (match(name, "^[%a_][%w_%.]*$") or match(name, "^%.[%w_%.]*$")) then
- wfatal("bad macro name `"..name.."'")
- end
- -- Validate macro parameter names.
- local mdup = {}
- for _,mp in ipairs(mparams) do
- if not match(mp, "^[%a_][%w_]*$") then
- wfatal("bad macro parameter name `"..mp.."'")
- end
- if mdup[mp] then wfatal("duplicate macro parameter name `"..mp.."'") end
- mdup[mp] = true
- end
- -- Check for duplicate or recursive macro definitions.
- local opname = name.."_"..#mparams
- if map_op[opname] or map_op[name.."_*"] then
- wfatal("duplicate macro `"..name.."' ("..#mparams.." parameters)")
- end
- if mac_capture then wfatal("recursive macro definition") end
-
- -- Enable statement capture.
- local lines = {}
- mac_lineno = g_lineno
- mac_name = name
- mac_capture = function(stmt) -- Statement capture function.
- -- Stop macro definition with .endmacro pseudo-opcode.
- if not match(stmt, "^%s*.endmacro%s*$") then
- lines[#lines+1] = stmt
- return
- end
- mac_capture = nil
- mac_lineno = nil
- mac_name = nil
- mac_list[#mac_list+1] = opname
- -- Add macro-op definition.
- map_op[opname] = function(params)
- if not params then return mparams, lines end
- -- Protect against recursive macro invocation.
- if mac_active[opname] then wfatal("recursive macro invocation") end
- mac_active[opname] = true
- -- Setup substitution map.
- local subst = {}
- for i,mp in ipairs(mparams) do subst[mp] = params[i] end
- local mcom
- if g_opt.maccomment and g_opt.comment then
- mcom = " MACRO "..name.." ("..#mparams..")"
- wcomment("{"..mcom)
- end
- -- Loop through all captured statements
- for _,stmt in ipairs(lines) do
- -- Substitute macro parameters.
- local st = gsub(stmt, "[%w_]+", subst)
- st = definesubst(st)
- st = gsub(st, "%s*%.%.%s*", "") -- Token paste a..b.
- if mcom and sub(st, 1, 1) ~= "|" then wcomment(st) end
- -- Emit statement. Use a protected call for better diagnostics.
- local ok, err = pcall(dostmt, st)
- if not ok then
- -- Add the captured statement to the error.
- wprinterr(err, "\n", g_indent, "| ", stmt,
- "\t[MACRO ", name, " (", #mparams, ")]\n")
- end
- end
- if mcom then wcomment("}"..mcom) end
- mac_active[opname] = nil
- end
- end
-end
-
--- An .endmacro pseudo-opcode outside of a macro definition is an error.
-map_coreop[".endmacro_0"] = function(params)
- wfatal(".endmacro without .macro")
-end
-
--- Dump all macros and their contents (with -PP only).
-local function dumpmacros(out, lvl)
- sort(mac_list)
- out:write("Macros:\n")
- for _,opname in ipairs(mac_list) do
- local name = sub(opname, 1, -3)
- local params, lines = map_op[opname]()
- out:write(format(" %-20s %s\n", name, concat(params, ", ")))
- if lvl > 1 then
- for _,line in ipairs(lines) do
- out:write(" |", line, "\n")
- end
- out:write("\n")
- end
- end
- out:write("\n")
-end
-
--- Check for unfinished macro definitions.
-local function checkmacros()
- if mac_capture then
- wprinterr(g_fname, ":", mac_lineno,
- ": error: unfinished .macro `", mac_name ,"'\n")
- end
-end
-
-------------------------------------------------------------------------------
-
--- Support variables for captures.
-local cap_lineno, cap_name
-local cap_buffers = {}
-local cap_used = {}
-
--- Start a capture.
-map_coreop[".capture_1"] = function(params)
- if not params then return "name" end
- wflush()
- local name = params[1]
- if not match(name, "^[%a_][%w_]*$") then
- wfatal("bad capture name `"..name.."'")
- end
- if cap_name then
- wfatal("already capturing to `"..cap_name.."' since line "..cap_lineno)
- end
- cap_name = name
- cap_lineno = g_lineno
- -- Create or continue a capture buffer and start the output line capture.
- local buf = cap_buffers[name]
- if not buf then buf = {}; cap_buffers[name] = buf end
- g_capbuffer = buf
- g_synclineno = 0
-end
-
--- Stop a capture.
-map_coreop[".endcapture_0"] = function(params)
- wflush()
- if not cap_name then wfatal(".endcapture without a valid .capture") end
- cap_name = nil
- cap_lineno = nil
- g_capbuffer = nil
- g_synclineno = 0
-end
-
--- Dump a capture buffer.
-map_coreop[".dumpcapture_1"] = function(params)
- if not params then return "name" end
- wflush()
- local name = params[1]
- if not match(name, "^[%a_][%w_]*$") then
- wfatal("bad capture name `"..name.."'")
- end
- cap_used[name] = true
- wline(function(out)
- local buf = cap_buffers[name]
- if buf then wdumplines(out, buf) end
- end)
- g_synclineno = 0
-end
-
--- Dump all captures and their buffers (with -PP only).
-local function dumpcaptures(out, lvl)
- out:write("Captures:\n")
- for name,buf in pairs(cap_buffers) do
- out:write(format(" %-20s %4s)\n", name, "("..#buf))
- if lvl > 1 then
- local bar = rep("=", 76)
- out:write(" ", bar, "\n")
- for _,line in ipairs(buf) do
- out:write(" ", line, "\n")
- end
- out:write(" ", bar, "\n\n")
- end
- end
- out:write("\n")
-end
-
--- Check for unfinished or unused captures.
-local function checkcaptures()
- if cap_name then
- wprinterr(g_fname, ":", cap_lineno,
- ": error: unfinished .capture `", cap_name,"'\n")
- return
- end
- for name in pairs(cap_buffers) do
- if not cap_used[name] then
- wprinterr(g_fname, ":*: error: missing .dumpcapture ", name ,"\n")
- end
- end
-end
-
-------------------------------------------------------------------------------
-
--- Sections names.
-local map_sections = {}
-
--- Pseudo-opcode to define code sections.
--- TODO: Data sections, BSS sections. Needs extra C code and API.
-map_coreop[".section_*"] = function(params)
- if not params then return "name..." end
- if #map_sections > 0 then werror("duplicate section definition") end
- wflush()
- for sn,name in ipairs(params) do
- local opname = "."..name.."_0"
- if not match(name, "^[%a][%w_]*$") or
- map_op[opname] or map_op["."..name.."_*"] then
- werror("bad section name `"..name.."'")
- end
- map_sections[#map_sections+1] = name
- wline(format("#define DASM_SECTION_%s\t%d", upper(name), sn-1))
- map_op[opname] = function(params) g_arch.section(sn-1) end
- end
- wline(format("#define DASM_MAXSECTION\t\t%d", #map_sections))
-end
-
--- Dump all sections.
-local function dumpsections(out, lvl)
- out:write("Sections:\n")
- for _,name in ipairs(map_sections) do
- out:write(format(" %s\n", name))
- end
- out:write("\n")
-end
-
-------------------------------------------------------------------------------
-
--- Replacement for customized Lua, which lacks the package library.
-local prefix = ""
-if not require then
- function require(name)
- local fp = assert(io.open(prefix..name..".lua"))
- local s = fp:read("*a")
- assert(fp:close())
- return assert(loadstring(s, "@"..name..".lua"))()
- end
-end
-
--- Load architecture-specific module.
-local function loadarch(arch)
- if not match(arch, "^[%w_]+$") then return "bad arch name" end
- local ok, m_arch = pcall(require, "dasm_"..arch)
- if not ok then return "cannot load module: "..m_arch end
- g_arch = m_arch
- wflush = m_arch.passcb(wline, werror, wfatal, wwarn)
- m_arch.setup(arch, g_opt)
- map_op, map_def = m_arch.mergemaps(map_coreop, map_def)
-end
-
--- Dump architecture description.
-function opt_map.dumparch(args)
- local name = optparam(args)
- if not g_arch then
- local err = loadarch(name)
- if err then opterror(err) end
- end
-
- local t = {}
- for name in pairs(map_coreop) do t[#t+1] = name end
- for name in pairs(map_op) do t[#t+1] = name end
- sort(t)
-
- local out = stdout
- local _arch = g_arch._info
- out:write(format("%s version %s, released %s, %s\n",
- _info.name, _info.version, _info.release, _info.url))
- g_arch.dumparch(out)
-
- local pseudo = true
- out:write("Pseudo-Opcodes:\n")
- for _,sname in ipairs(t) do
- local name, nparam = match(sname, "^(.+)_([0-9%*])$")
- if name then
- if pseudo and sub(name, 1, 1) ~= "." then
- out:write("\nOpcodes:\n")
- pseudo = false
- end
- local f = map_op[sname]
- local s
- if nparam ~= "*" then nparam = nparam + 0 end
- if nparam == 0 then
- s = ""
- elseif type(f) == "string" then
- s = map_op[".template__"](nil, f, nparam)
- else
- s = f(nil, nparam)
- end
- if type(s) == "table" then
- for _,s2 in ipairs(s) do
- out:write(format(" %-12s %s\n", name, s2))
- end
- else
- out:write(format(" %-12s %s\n", name, s))
- end
- end
- end
- out:write("\n")
- exit(0)
-end
-
--- Pseudo-opcode to set the architecture.
--- Only initially available (map_op is replaced when called).
-map_op[".arch_1"] = function(params)
- if not params then return "name" end
- local err = loadarch(params[1])
- if err then wfatal(err) end
- wline(format("#if DASM_VERSION != %d", _info.vernum))
- wline('#error "Version mismatch between DynASM and included encoding engine"')
- wline("#endif")
-end
-
--- Dummy .arch pseudo-opcode to improve the error report.
-map_coreop[".arch_1"] = function(params)
- if not params then return "name" end
- wfatal("duplicate .arch statement")
-end
-
-------------------------------------------------------------------------------
-
--- Dummy pseudo-opcode. Don't confuse '.nop' with 'nop'.
-map_coreop[".nop_*"] = function(params)
- if not params then return "[ignored...]" end
-end
-
--- Pseudo-opcodes to raise errors.
-map_coreop[".error_1"] = function(params)
- if not params then return "message" end
- werror(params[1])
-end
-
-map_coreop[".fatal_1"] = function(params)
- if not params then return "message" end
- wfatal(params[1])
-end
-
--- Dump all user defined elements.
-local function dumpdef(out)
- local lvl = g_opt.dumpdef
- if lvl == 0 then return end
- dumpsections(out, lvl)
- dumpdefines(out, lvl)
- if g_arch then g_arch.dumpdef(out, lvl) end
- dumpmacros(out, lvl)
- dumpcaptures(out, lvl)
-end
-
-------------------------------------------------------------------------------
-
--- Helper for splitstmt.
-local splitlvl
-
-local function splitstmt_one(c)
- if c == "(" then
- splitlvl = ")"..splitlvl
- elseif c == "[" then
- splitlvl = "]"..splitlvl
- elseif c == "{" then
- splitlvl = "}"..splitlvl
- elseif c == ")" or c == "]" or c == "}" then
- if sub(splitlvl, 1, 1) ~= c then werror("unbalanced (), [] or {}") end
- splitlvl = sub(splitlvl, 2)
- elseif splitlvl == "" then
- return " \0 "
- end
- return c
-end
-
--- Split statement into (pseudo-)opcode and params.
-local function splitstmt(stmt)
- -- Convert label with trailing-colon into .label statement.
- local label = match(stmt, "^%s*(.+):%s*$")
- if label then return ".label", {label} end
-
- -- Split at commas and equal signs, but obey parentheses and brackets.
- splitlvl = ""
- stmt = gsub(stmt, "[,%(%)%[%]{}]", splitstmt_one)
- if splitlvl ~= "" then werror("unbalanced () or []") end
-
- -- Split off opcode.
- local op, other = match(stmt, "^%s*([^%s%z]+)%s*(.*)$")
- if not op then werror("bad statement syntax") end
-
- -- Split parameters.
- local params = {}
- for p in gmatch(other, "%s*(%Z+)%z?") do
- params[#params+1] = gsub(p, "%s+$", "")
- end
- if #params > 16 then werror("too many parameters") end
-
- params.op = op
- return op, params
-end
-
--- Process a single statement.
-dostmt = function(stmt)
- -- Ignore empty statements.
- if match(stmt, "^%s*$") then return end
-
- -- Capture macro defs before substitution.
- if mac_capture then return mac_capture(stmt) end
- stmt = definesubst(stmt)
-
- -- Emit C code without parsing the line.
- if sub(stmt, 1, 1) == "|" then
- local tail = sub(stmt, 2)
- wflush()
- if sub(tail, 1, 2) == "//" then wcomment(tail) else wline(tail, true) end
- return
- end
-
- -- Split into (pseudo-)opcode and params.
- local op, params = splitstmt(stmt)
-
- -- Get opcode handler (matching # of parameters or generic handler).
- local f = map_op[op.."_"..#params] or map_op[op.."_*"]
- if not f then
- if not g_arch then wfatal("first statement must be .arch") end
- -- Improve error report.
- for i=0,9 do
- if map_op[op.."_"..i] then
- werror("wrong number of parameters for `"..op.."'")
- end
- end
- werror("unknown statement `"..op.."'")
- end
-
- -- Call opcode handler or special handler for template strings.
- if type(f) == "string" then
- map_op[".template__"](params, f)
- else
- f(params)
- end
-end
-
--- Process a single line.
-local function doline(line)
- if g_opt.flushline then wflush() end
-
- -- Assembler line?
- local indent, aline = match(line, "^(%s*)%|(.*)$")
- if not aline then
- -- No, plain C code line, need to flush first.
- wflush()
- wsync()
- wline(line, false)
- return
- end
-
- g_indent = indent -- Remember current line indentation.
-
- -- Emit C code (even from macros). Avoids echo and line parsing.
- if sub(aline, 1, 1) == "|" then
- if not mac_capture then
- wsync()
- elseif g_opt.comment then
- wsync()
- wcomment(aline)
- end
- dostmt(aline)
- return
- end
-
- -- Echo assembler line as a comment.
- if g_opt.comment then
- wsync()
- wcomment(aline)
- end
-
- -- Strip assembler comments.
- aline = gsub(aline, "//.*$", "")
-
- -- Split line into statements at semicolons.
- if match(aline, ";") then
- for stmt in gmatch(aline, "[^;]+") do dostmt(stmt) end
- else
- dostmt(aline)
- end
-end
-
-------------------------------------------------------------------------------
-
--- Write DynASM header.
-local function dasmhead(out)
- out:write(format([[
-/*
-** This file has been pre-processed with DynASM.
-** %s
-** DynASM version %s, DynASM %s version %s
-** DO NOT EDIT! The original file is in "%s".
-*/
-
-]], _info.url,
- _info.version, g_arch._info.arch, g_arch._info.version,
- g_fname))
-end
-
--- Read input file.
-readfile = function(fin)
- g_indent = ""
- g_lineno = 0
- g_synclineno = -1
-
- -- Process all lines.
- for line in fin:lines() do
- g_lineno = g_lineno + 1
- g_curline = line
- local ok, err = pcall(doline, line)
- if not ok and wprinterr(err, "\n") then return true end
- end
- wflush()
-
- -- Close input file.
- assert(fin == stdin or fin:close())
-end
-
--- Write output file.
-local function writefile(outfile)
- local fout
-
- -- Open output file.
- if outfile == nil or outfile == "-" then
- fout = stdout
- else
- fout = assert(io.open(outfile, "w"))
- end
-
- -- Write all buffered lines
- wdumplines(fout, g_wbuffer)
-
- -- Close output file.
- assert(fout == stdout or fout:close())
-
- -- Optionally dump definitions.
- dumpdef(fout == stdout and stderr or stdout)
-end
-
--- Translate an input file to an output file.
-local function translate(infile, outfile)
- g_wbuffer = {}
- g_indent = ""
- g_lineno = 0
- g_synclineno = -1
-
- -- Put header.
- wline(dasmhead)
-
- -- Read input file.
- local fin
- if infile == "-" then
- g_fname = "(stdin)"
- fin = stdin
- else
- g_fname = infile
- fin = assert(io.open(infile, "r"))
- end
- readfile(fin)
-
- -- Check for errors.
- if not g_arch then
- wprinterr(g_fname, ":*: error: missing .arch directive\n")
- end
- checkconds()
- checkmacros()
- checkcaptures()
-
- if g_errcount ~= 0 then
- stderr:write(g_fname, ":*: info: ", g_errcount, " error",
- (type(g_errcount) == "number" and g_errcount > 1) and "s" or "",
- " in input file -- no output file generated.\n")
- dumpdef(stderr)
- exit(1)
- end
-
- -- Write output file.
- writefile(outfile)
-end
-
-------------------------------------------------------------------------------
-
--- Print help text.
-function opt_map.help()
- stdout:write("DynASM -- ", _info.description, ".\n")
- stdout:write("DynASM ", _info.version, " ", _info.release, " ", _info.url, "\n")
- stdout:write[[
-
-Usage: dynasm [OPTION]... INFILE.dasc|-
-
- -h, --help Display this help text.
- -V, --version Display version and copyright information.
-
- -o, --outfile FILE Output file name (default is stdout).
- -I, --include DIR Add directory to the include search path.
-
- -c, --ccomment Use /* */ comments for assembler lines.
- -C, --cppcomment Use // comments for assembler lines (default).
- -N, --nocomment Suppress assembler lines in output.
- -M, --maccomment Show macro expansions as comments (default off).
-
- -L, --nolineno Suppress CPP line number information in output.
- -F, --flushline Flush action list for every line.
-
- -D NAME[=SUBST] Define a substitution.
- -U NAME Undefine a substitution.
-
- -P, --dumpdef Dump defines, macros, etc. Repeat for more output.
- -A, --dumparch ARCH Load architecture ARCH and dump description.
-]]
- exit(0)
-end
-
--- Print version information.
-function opt_map.version()
- stdout:write(format("%s version %s, released %s\n%s\n\n%s",
- _info.name, _info.version, _info.release, _info.url, _info.copyright))
- exit(0)
-end
-
--- Misc. options.
-function opt_map.outfile(args) g_opt.outfile = optparam(args) end
-function opt_map.include(args) insert(g_opt.include, 1, optparam(args)) end
-function opt_map.ccomment() g_opt.comment = "/*|"; g_opt.endcomment = " */" end
-function opt_map.cppcomment() g_opt.comment = "//|"; g_opt.endcomment = "" end
-function opt_map.nocomment() g_opt.comment = false end
-function opt_map.maccomment() g_opt.maccomment = true end
-function opt_map.nolineno() g_opt.cpp = false end
-function opt_map.flushline() g_opt.flushline = true end
-function opt_map.dumpdef() g_opt.dumpdef = g_opt.dumpdef + 1 end
-
-------------------------------------------------------------------------------
-
--- Short aliases for long options.
-local opt_alias = {
- h = "help", ["?"] = "help", V = "version",
- o = "outfile", I = "include",
- c = "ccomment", C = "cppcomment", N = "nocomment", M = "maccomment",
- L = "nolineno", F = "flushline",
- P = "dumpdef", A = "dumparch",
-}
-
--- Parse single option.
-local function parseopt(opt, args)
- opt_current = #opt == 1 and "-"..opt or "--"..opt
- local f = opt_map[opt] or opt_map[opt_alias[opt]]
- if not f then
- opterror("unrecognized option `", opt_current, "'. Try `--help'.\n")
- end
- f(args)
-end
-
--- Parse arguments.
-local function parseargs(args)
- -- Default options.
- g_opt.comment = "//|"
- g_opt.endcomment = ""
- g_opt.cpp = true
- g_opt.dumpdef = 0
- g_opt.include = { "" }
-
- -- Process all option arguments.
- args.argn = 1
- repeat
- local a = args[args.argn]
- if not a then break end
- local lopt, opt = match(a, "^%-(%-?)(.+)")
- if not opt then break end
- args.argn = args.argn + 1
- if lopt == "" then
- -- Loop through short options.
- for o in gmatch(opt, ".") do parseopt(o, args) end
- else
- -- Long option.
- parseopt(opt, args)
- end
- until false
-
- -- Check for proper number of arguments.
- local nargs = #args - args.argn + 1
- if nargs ~= 1 then
- if nargs == 0 then
- if g_opt.dumpdef > 0 then return dumpdef(stdout) end
- end
- opt_map.help()
- end
-
- -- Translate a single input file to a single output file
- -- TODO: Handle multiple files?
- translate(args[args.argn], g_opt.outfile)
-end
-
-------------------------------------------------------------------------------
-
--- Add the directory dynasm.lua resides in to the Lua module search path.
-local arg = arg
-if arg and arg[0] then
- prefix = match(arg[0], "^(.*[/\\])")
- if package and prefix then package.path = prefix.."?.lua;"..package.path end
-end
-
--- Start DynASM.
-parseargs{...}
-
-------------------------------------------------------------------------------
-
+------------------------------------------------------------------------------
+-- DynASM. A dynamic assembler for code generation engines.
+-- Originally designed and implemented for LuaJIT.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- See below for full copyright notice.
+------------------------------------------------------------------------------
+
+-- Application information.
+local _info = {
+ name = "DynASM",
+ description = "A dynamic assembler for code generation engines",
+ version = "1.3.0",
+ vernum = 10300,
+ release = "2011-05-05",
+ author = "Mike Pall",
+ url = "http://luajit.org/dynasm.html",
+ license = "MIT",
+ copyright = [[
+Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+[ MIT license: http://www.opensource.org/licenses/mit-license.php ]
+]],
+}
+
+-- Cache library functions.
+local type, pairs, ipairs = type, pairs, ipairs
+local pcall, error, assert = pcall, error, assert
+local _s = string
+local sub, match, gmatch, gsub = _s.sub, _s.match, _s.gmatch, _s.gsub
+local format, rep, upper = _s.format, _s.rep, _s.upper
+local _t = table
+local insert, remove, concat, sort = _t.insert, _t.remove, _t.concat, _t.sort
+local exit = os.exit
+local io = io
+local stdin, stdout, stderr = io.stdin, io.stdout, io.stderr
+
+------------------------------------------------------------------------------
+
+-- Program options.
+local g_opt = {}
+
+-- Global state for current file.
+local g_fname, g_curline, g_indent, g_lineno, g_synclineno, g_arch
+local g_errcount = 0
+
+-- Write buffer for output file.
+local g_wbuffer, g_capbuffer
+
+------------------------------------------------------------------------------
+
+-- Write an output line (or callback function) to the buffer.
+local function wline(line, needindent)
+ local buf = g_capbuffer or g_wbuffer
+ buf[#buf+1] = needindent and g_indent..line or line
+ g_synclineno = g_synclineno + 1
+end
+
+-- Write assembler line as a comment, if requestd.
+local function wcomment(aline)
+ if g_opt.comment then
+ wline(g_opt.comment..aline..g_opt.endcomment, true)
+ end
+end
+
+-- Resync CPP line numbers.
+local function wsync()
+ if g_synclineno ~= g_lineno and g_opt.cpp then
+ wline("# "..g_lineno..' "'..g_fname..'"')
+ g_synclineno = g_lineno
+ end
+end
+
+-- Dummy action flush function. Replaced with arch-specific function later.
+local function wflush(term)
+end
+
+-- Dump all buffered output lines.
+local function wdumplines(out, buf)
+ for _,line in ipairs(buf) do
+ if type(line) == "string" then
+ assert(out:write(line, "\n"))
+ else
+ -- Special callback to dynamically insert lines after end of processing.
+ line(out)
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Emit an error. Processing continues with next statement.
+local function werror(msg)
+ error(format("%s:%s: error: %s:\n%s", g_fname, g_lineno, msg, g_curline), 0)
+end
+
+-- Emit a fatal error. Processing stops.
+local function wfatal(msg)
+ g_errcount = "fatal"
+ werror(msg)
+end
+
+-- Print a warning. Processing continues.
+local function wwarn(msg)
+ stderr:write(format("%s:%s: warning: %s:\n%s\n",
+ g_fname, g_lineno, msg, g_curline))
+end
+
+-- Print caught error message. But suppress excessive errors.
+local function wprinterr(...)
+ if type(g_errcount) == "number" then
+ -- Regular error.
+ g_errcount = g_errcount + 1
+ if g_errcount < 21 then -- Seems to be a reasonable limit.
+ stderr:write(...)
+ elseif g_errcount == 21 then
+ stderr:write(g_fname,
+ ":*: warning: too many errors (suppressed further messages).\n")
+ end
+ else
+ -- Fatal error.
+ stderr:write(...)
+ return true -- Stop processing.
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Map holding all option handlers.
+local opt_map = {}
+local opt_current
+
+-- Print error and exit with error status.
+local function opterror(...)
+ stderr:write("dynasm.lua: ERROR: ", ...)
+ stderr:write("\n")
+ exit(1)
+end
+
+-- Get option parameter.
+local function optparam(args)
+ local argn = args.argn
+ local p = args[argn]
+ if not p then
+ opterror("missing parameter for option `", opt_current, "'.")
+ end
+ args.argn = argn + 1
+ return p
+end
+
+------------------------------------------------------------------------------
+
+-- Core pseudo-opcodes.
+local map_coreop = {}
+-- Dummy opcode map. Replaced by arch-specific map.
+local map_op = {}
+
+-- Forward declarations.
+local dostmt
+local readfile
+
+------------------------------------------------------------------------------
+
+-- Map for defines (initially empty, chains to arch-specific map).
+local map_def = {}
+
+-- Pseudo-opcode to define a substitution.
+map_coreop[".define_2"] = function(params, nparams)
+ if not params then return nparams == 1 and "name" or "name, subst" end
+ local name, def = params[1], params[2] or "1"
+ if not match(name, "^[%a_][%w_]*$") then werror("bad or duplicate define") end
+ map_def[name] = def
+end
+map_coreop[".define_1"] = map_coreop[".define_2"]
+
+-- Define a substitution on the command line.
+function opt_map.D(args)
+ local namesubst = optparam(args)
+ local name, subst = match(namesubst, "^([%a_][%w_]*)=(.*)$")
+ if name then
+ map_def[name] = subst
+ elseif match(namesubst, "^[%a_][%w_]*$") then
+ map_def[namesubst] = "1"
+ else
+ opterror("bad define")
+ end
+end
+
+-- Undefine a substitution on the command line.
+function opt_map.U(args)
+ local name = optparam(args)
+ if match(name, "^[%a_][%w_]*$") then
+ map_def[name] = nil
+ else
+ opterror("bad define")
+ end
+end
+
+-- Helper for definesubst.
+local gotsubst
+
+local function definesubst_one(word)
+ local subst = map_def[word]
+ if subst then gotsubst = word; return subst else return word end
+end
+
+-- Iteratively substitute defines.
+local function definesubst(stmt)
+ -- Limit number of iterations.
+ for i=1,100 do
+ gotsubst = false
+ stmt = gsub(stmt, "#?[%w_]+", definesubst_one)
+ if not gotsubst then break end
+ end
+ if gotsubst then wfatal("recursive define involving `"..gotsubst.."'") end
+ return stmt
+end
+
+-- Dump all defines.
+local function dumpdefines(out, lvl)
+ local t = {}
+ for name in pairs(map_def) do
+ t[#t+1] = name
+ end
+ sort(t)
+ out:write("Defines:\n")
+ for _,name in ipairs(t) do
+ local subst = map_def[name]
+ if g_arch then subst = g_arch.revdef(subst) end
+ out:write(format(" %-20s %s\n", name, subst))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Support variables for conditional assembly.
+local condlevel = 0
+local condstack = {}
+
+-- Evaluate condition with a Lua expression. Substitutions already performed.
+local function cond_eval(cond)
+ local func, err
+ if setfenv then
+ func, err = loadstring("return "..cond, "=expr")
+ else
+ -- No globals. All unknown identifiers evaluate to nil.
+ func, err = load("return "..cond, "=expr", "t", {})
+ end
+ if func then
+ if setfenv then
+ setfenv(func, {}) -- No globals. All unknown identifiers evaluate to nil.
+ end
+ local ok, res = pcall(func)
+ if ok then
+ if res == 0 then return false end -- Oh well.
+ return not not res
+ end
+ err = res
+ end
+ wfatal("bad condition: "..err)
+end
+
+-- Skip statements until next conditional pseudo-opcode at the same level.
+local function stmtskip()
+ local dostmt_save = dostmt
+ local lvl = 0
+ dostmt = function(stmt)
+ local op = match(stmt, "^%s*(%S+)")
+ if op == ".if" then
+ lvl = lvl + 1
+ elseif lvl ~= 0 then
+ if op == ".endif" then lvl = lvl - 1 end
+ elseif op == ".elif" or op == ".else" or op == ".endif" then
+ dostmt = dostmt_save
+ dostmt(stmt)
+ end
+ end
+end
+
+-- Pseudo-opcodes for conditional assembly.
+map_coreop[".if_1"] = function(params)
+ if not params then return "condition" end
+ local lvl = condlevel + 1
+ local res = cond_eval(params[1])
+ condlevel = lvl
+ condstack[lvl] = res
+ if not res then stmtskip() end
+end
+
+map_coreop[".elif_1"] = function(params)
+ if not params then return "condition" end
+ if condlevel == 0 then wfatal(".elif without .if") end
+ local lvl = condlevel
+ local res = condstack[lvl]
+ if res then
+ if res == "else" then wfatal(".elif after .else") end
+ else
+ res = cond_eval(params[1])
+ if res then
+ condstack[lvl] = res
+ return
+ end
+ end
+ stmtskip()
+end
+
+map_coreop[".else_0"] = function(params)
+ if condlevel == 0 then wfatal(".else without .if") end
+ local lvl = condlevel
+ local res = condstack[lvl]
+ condstack[lvl] = "else"
+ if res then
+ if res == "else" then wfatal(".else after .else") end
+ stmtskip()
+ end
+end
+
+map_coreop[".endif_0"] = function(params)
+ local lvl = condlevel
+ if lvl == 0 then wfatal(".endif without .if") end
+ condlevel = lvl - 1
+end
+
+-- Check for unfinished conditionals.
+local function checkconds()
+ if g_errcount ~= "fatal" and condlevel ~= 0 then
+ wprinterr(g_fname, ":*: error: unbalanced conditional\n")
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Search for a file in the given path and open it for reading.
+local function pathopen(path, name)
+ local dirsep = package and match(package.path, "\\") and "\\" or "/"
+ for _,p in ipairs(path) do
+ local fullname = p == "" and name or p..dirsep..name
+ local fin = io.open(fullname, "r")
+ if fin then
+ g_fname = fullname
+ return fin
+ end
+ end
+end
+
+-- Include a file.
+map_coreop[".include_1"] = function(params)
+ if not params then return "filename" end
+ local name = params[1]
+ -- Save state. Ugly, I know. but upvalues are fast.
+ local gf, gl, gcl, gi = g_fname, g_lineno, g_curline, g_indent
+ -- Read the included file.
+ local fatal = readfile(pathopen(g_opt.include, name) or
+ wfatal("include file `"..name.."' not found"))
+ -- Restore state.
+ g_synclineno = -1
+ g_fname, g_lineno, g_curline, g_indent = gf, gl, gcl, gi
+ if fatal then wfatal("in include file") end
+end
+
+-- Make .include and conditionals initially available, too.
+map_op[".include_1"] = map_coreop[".include_1"]
+map_op[".if_1"] = map_coreop[".if_1"]
+map_op[".elif_1"] = map_coreop[".elif_1"]
+map_op[".else_0"] = map_coreop[".else_0"]
+map_op[".endif_0"] = map_coreop[".endif_0"]
+
+------------------------------------------------------------------------------
+
+-- Support variables for macros.
+local mac_capture, mac_lineno, mac_name
+local mac_active = {}
+local mac_list = {}
+
+-- Pseudo-opcode to define a macro.
+map_coreop[".macro_*"] = function(mparams)
+ if not mparams then return "name [, params...]" end
+ -- Split off and validate macro name.
+ local name = remove(mparams, 1)
+ if not name then werror("missing macro name") end
+ if not (match(name, "^[%a_][%w_%.]*$") or match(name, "^%.[%w_%.]*$")) then
+ wfatal("bad macro name `"..name.."'")
+ end
+ -- Validate macro parameter names.
+ local mdup = {}
+ for _,mp in ipairs(mparams) do
+ if not match(mp, "^[%a_][%w_]*$") then
+ wfatal("bad macro parameter name `"..mp.."'")
+ end
+ if mdup[mp] then wfatal("duplicate macro parameter name `"..mp.."'") end
+ mdup[mp] = true
+ end
+ -- Check for duplicate or recursive macro definitions.
+ local opname = name.."_"..#mparams
+ if map_op[opname] or map_op[name.."_*"] then
+ wfatal("duplicate macro `"..name.."' ("..#mparams.." parameters)")
+ end
+ if mac_capture then wfatal("recursive macro definition") end
+
+ -- Enable statement capture.
+ local lines = {}
+ mac_lineno = g_lineno
+ mac_name = name
+ mac_capture = function(stmt) -- Statement capture function.
+ -- Stop macro definition with .endmacro pseudo-opcode.
+ if not match(stmt, "^%s*.endmacro%s*$") then
+ lines[#lines+1] = stmt
+ return
+ end
+ mac_capture = nil
+ mac_lineno = nil
+ mac_name = nil
+ mac_list[#mac_list+1] = opname
+ -- Add macro-op definition.
+ map_op[opname] = function(params)
+ if not params then return mparams, lines end
+ -- Protect against recursive macro invocation.
+ if mac_active[opname] then wfatal("recursive macro invocation") end
+ mac_active[opname] = true
+ -- Setup substitution map.
+ local subst = {}
+ for i,mp in ipairs(mparams) do subst[mp] = params[i] end
+ local mcom
+ if g_opt.maccomment and g_opt.comment then
+ mcom = " MACRO "..name.." ("..#mparams..")"
+ wcomment("{"..mcom)
+ end
+ -- Loop through all captured statements
+ for _,stmt in ipairs(lines) do
+ -- Substitute macro parameters.
+ local st = gsub(stmt, "[%w_]+", subst)
+ st = definesubst(st)
+ st = gsub(st, "%s*%.%.%s*", "") -- Token paste a..b.
+ if mcom and sub(st, 1, 1) ~= "|" then wcomment(st) end
+ -- Emit statement. Use a protected call for better diagnostics.
+ local ok, err = pcall(dostmt, st)
+ if not ok then
+ -- Add the captured statement to the error.
+ wprinterr(err, "\n", g_indent, "| ", stmt,
+ "\t[MACRO ", name, " (", #mparams, ")]\n")
+ end
+ end
+ if mcom then wcomment("}"..mcom) end
+ mac_active[opname] = nil
+ end
+ end
+end
+
+-- An .endmacro pseudo-opcode outside of a macro definition is an error.
+map_coreop[".endmacro_0"] = function(params)
+ wfatal(".endmacro without .macro")
+end
+
+-- Dump all macros and their contents (with -PP only).
+local function dumpmacros(out, lvl)
+ sort(mac_list)
+ out:write("Macros:\n")
+ for _,opname in ipairs(mac_list) do
+ local name = sub(opname, 1, -3)
+ local params, lines = map_op[opname]()
+ out:write(format(" %-20s %s\n", name, concat(params, ", ")))
+ if lvl > 1 then
+ for _,line in ipairs(lines) do
+ out:write(" |", line, "\n")
+ end
+ out:write("\n")
+ end
+ end
+ out:write("\n")
+end
+
+-- Check for unfinished macro definitions.
+local function checkmacros()
+ if mac_capture then
+ wprinterr(g_fname, ":", mac_lineno,
+ ": error: unfinished .macro `", mac_name ,"'\n")
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Support variables for captures.
+local cap_lineno, cap_name
+local cap_buffers = {}
+local cap_used = {}
+
+-- Start a capture.
+map_coreop[".capture_1"] = function(params)
+ if not params then return "name" end
+ wflush()
+ local name = params[1]
+ if not match(name, "^[%a_][%w_]*$") then
+ wfatal("bad capture name `"..name.."'")
+ end
+ if cap_name then
+ wfatal("already capturing to `"..cap_name.."' since line "..cap_lineno)
+ end
+ cap_name = name
+ cap_lineno = g_lineno
+ -- Create or continue a capture buffer and start the output line capture.
+ local buf = cap_buffers[name]
+ if not buf then buf = {}; cap_buffers[name] = buf end
+ g_capbuffer = buf
+ g_synclineno = 0
+end
+
+-- Stop a capture.
+map_coreop[".endcapture_0"] = function(params)
+ wflush()
+ if not cap_name then wfatal(".endcapture without a valid .capture") end
+ cap_name = nil
+ cap_lineno = nil
+ g_capbuffer = nil
+ g_synclineno = 0
+end
+
+-- Dump a capture buffer.
+map_coreop[".dumpcapture_1"] = function(params)
+ if not params then return "name" end
+ wflush()
+ local name = params[1]
+ if not match(name, "^[%a_][%w_]*$") then
+ wfatal("bad capture name `"..name.."'")
+ end
+ cap_used[name] = true
+ wline(function(out)
+ local buf = cap_buffers[name]
+ if buf then wdumplines(out, buf) end
+ end)
+ g_synclineno = 0
+end
+
+-- Dump all captures and their buffers (with -PP only).
+local function dumpcaptures(out, lvl)
+ out:write("Captures:\n")
+ for name,buf in pairs(cap_buffers) do
+ out:write(format(" %-20s %4s)\n", name, "("..#buf))
+ if lvl > 1 then
+ local bar = rep("=", 76)
+ out:write(" ", bar, "\n")
+ for _,line in ipairs(buf) do
+ out:write(" ", line, "\n")
+ end
+ out:write(" ", bar, "\n\n")
+ end
+ end
+ out:write("\n")
+end
+
+-- Check for unfinished or unused captures.
+local function checkcaptures()
+ if cap_name then
+ wprinterr(g_fname, ":", cap_lineno,
+ ": error: unfinished .capture `", cap_name,"'\n")
+ return
+ end
+ for name in pairs(cap_buffers) do
+ if not cap_used[name] then
+ wprinterr(g_fname, ":*: error: missing .dumpcapture ", name ,"\n")
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Sections names.
+local map_sections = {}
+
+-- Pseudo-opcode to define code sections.
+-- TODO: Data sections, BSS sections. Needs extra C code and API.
+map_coreop[".section_*"] = function(params)
+ if not params then return "name..." end
+ if #map_sections > 0 then werror("duplicate section definition") end
+ wflush()
+ for sn,name in ipairs(params) do
+ local opname = "."..name.."_0"
+ if not match(name, "^[%a][%w_]*$") or
+ map_op[opname] or map_op["."..name.."_*"] then
+ werror("bad section name `"..name.."'")
+ end
+ map_sections[#map_sections+1] = name
+ wline(format("#define DASM_SECTION_%s\t%d", upper(name), sn-1))
+ map_op[opname] = function(params) g_arch.section(sn-1) end
+ end
+ wline(format("#define DASM_MAXSECTION\t\t%d", #map_sections))
+end
+
+-- Dump all sections.
+local function dumpsections(out, lvl)
+ out:write("Sections:\n")
+ for _,name in ipairs(map_sections) do
+ out:write(format(" %s\n", name))
+ end
+ out:write("\n")
+end
+
+------------------------------------------------------------------------------
+
+-- Replacement for customized Lua, which lacks the package library.
+local prefix = ""
+if not require then
+ function require(name)
+ local fp = assert(io.open(prefix..name..".lua"))
+ local s = fp:read("*a")
+ assert(fp:close())
+ return assert(loadstring(s, "@"..name..".lua"))()
+ end
+end
+
+-- Load architecture-specific module.
+local function loadarch(arch)
+ if not match(arch, "^[%w_]+$") then return "bad arch name" end
+ local ok, m_arch = pcall(require, "dasm_"..arch)
+ if not ok then return "cannot load module: "..m_arch end
+ g_arch = m_arch
+ wflush = m_arch.passcb(wline, werror, wfatal, wwarn)
+ m_arch.setup(arch, g_opt)
+ map_op, map_def = m_arch.mergemaps(map_coreop, map_def)
+end
+
+-- Dump architecture description.
+function opt_map.dumparch(args)
+ local name = optparam(args)
+ if not g_arch then
+ local err = loadarch(name)
+ if err then opterror(err) end
+ end
+
+ local t = {}
+ for name in pairs(map_coreop) do t[#t+1] = name end
+ for name in pairs(map_op) do t[#t+1] = name end
+ sort(t)
+
+ local out = stdout
+ local _arch = g_arch._info
+ out:write(format("%s version %s, released %s, %s\n",
+ _info.name, _info.version, _info.release, _info.url))
+ g_arch.dumparch(out)
+
+ local pseudo = true
+ out:write("Pseudo-Opcodes:\n")
+ for _,sname in ipairs(t) do
+ local name, nparam = match(sname, "^(.+)_([0-9%*])$")
+ if name then
+ if pseudo and sub(name, 1, 1) ~= "." then
+ out:write("\nOpcodes:\n")
+ pseudo = false
+ end
+ local f = map_op[sname]
+ local s
+ if nparam ~= "*" then nparam = nparam + 0 end
+ if nparam == 0 then
+ s = ""
+ elseif type(f) == "string" then
+ s = map_op[".template__"](nil, f, nparam)
+ else
+ s = f(nil, nparam)
+ end
+ if type(s) == "table" then
+ for _,s2 in ipairs(s) do
+ out:write(format(" %-12s %s\n", name, s2))
+ end
+ else
+ out:write(format(" %-12s %s\n", name, s))
+ end
+ end
+ end
+ out:write("\n")
+ exit(0)
+end
+
+-- Pseudo-opcode to set the architecture.
+-- Only initially available (map_op is replaced when called).
+map_op[".arch_1"] = function(params)
+ if not params then return "name" end
+ local err = loadarch(params[1])
+ if err then wfatal(err) end
+end
+
+-- Dummy .arch pseudo-opcode to improve the error report.
+map_coreop[".arch_1"] = function(params)
+ if not params then return "name" end
+ wfatal("duplicate .arch statement")
+end
+
+------------------------------------------------------------------------------
+
+-- Dummy pseudo-opcode. Don't confuse '.nop' with 'nop'.
+map_coreop[".nop_*"] = function(params)
+ if not params then return "[ignored...]" end
+end
+
+-- Pseudo-opcodes to raise errors.
+map_coreop[".error_1"] = function(params)
+ if not params then return "message" end
+ werror(params[1])
+end
+
+map_coreop[".fatal_1"] = function(params)
+ if not params then return "message" end
+ wfatal(params[1])
+end
+
+-- Dump all user defined elements.
+local function dumpdef(out)
+ local lvl = g_opt.dumpdef
+ if lvl == 0 then return end
+ dumpsections(out, lvl)
+ dumpdefines(out, lvl)
+ if g_arch then g_arch.dumpdef(out, lvl) end
+ dumpmacros(out, lvl)
+ dumpcaptures(out, lvl)
+end
+
+------------------------------------------------------------------------------
+
+-- Helper for splitstmt.
+local splitlvl
+
+local function splitstmt_one(c)
+ if c == "(" then
+ splitlvl = ")"..splitlvl
+ elseif c == "[" then
+ splitlvl = "]"..splitlvl
+ elseif c == "{" then
+ splitlvl = "}"..splitlvl
+ elseif c == ")" or c == "]" or c == "}" then
+ if sub(splitlvl, 1, 1) ~= c then werror("unbalanced (), [] or {}") end
+ splitlvl = sub(splitlvl, 2)
+ elseif splitlvl == "" then
+ return " \0 "
+ end
+ return c
+end
+
+-- Split statement into (pseudo-)opcode and params.
+local function splitstmt(stmt)
+ -- Convert label with trailing-colon into .label statement.
+ local label = match(stmt, "^%s*(.+):%s*$")
+ if label then return ".label", {label} end
+
+ -- Split at commas and equal signs, but obey parentheses and brackets.
+ splitlvl = ""
+ stmt = gsub(stmt, "[,%(%)%[%]{}]", splitstmt_one)
+ if splitlvl ~= "" then werror("unbalanced () or []") end
+
+ -- Split off opcode.
+ local op, other = match(stmt, "^%s*([^%s%z]+)%s*(.*)$")
+ if not op then werror("bad statement syntax") end
+
+ -- Split parameters.
+ local params = {}
+ for p in gmatch(other, "%s*(%Z+)%z?") do
+ params[#params+1] = gsub(p, "%s+$", "")
+ end
+ if #params > 16 then werror("too many parameters") end
+
+ params.op = op
+ return op, params
+end
+
+-- Process a single statement.
+dostmt = function(stmt)
+ -- Ignore empty statements.
+ if match(stmt, "^%s*$") then return end
+
+ -- Capture macro defs before substitution.
+ if mac_capture then return mac_capture(stmt) end
+ stmt = definesubst(stmt)
+
+ -- Emit C code without parsing the line.
+ if sub(stmt, 1, 1) == "|" then
+ local tail = sub(stmt, 2)
+ wflush()
+ if sub(tail, 1, 2) == "//" then wcomment(tail) else wline(tail, true) end
+ return
+ end
+
+ -- Split into (pseudo-)opcode and params.
+ local op, params = splitstmt(stmt)
+
+ -- Get opcode handler (matching # of parameters or generic handler).
+ local f = map_op[op.."_"..#params] or map_op[op.."_*"]
+ if not f then
+ if not g_arch then wfatal("first statement must be .arch") end
+ -- Improve error report.
+ for i=0,9 do
+ if map_op[op.."_"..i] then
+ werror("wrong number of parameters for `"..op.."'")
+ end
+ end
+ werror("unknown statement `"..op.."'")
+ end
+
+ -- Call opcode handler or special handler for template strings.
+ if type(f) == "string" then
+ map_op[".template__"](params, f)
+ else
+ f(params)
+ end
+end
+
+-- Process a single line.
+local function doline(line)
+ if g_opt.flushline then wflush() end
+
+ -- Assembler line?
+ local indent, aline = match(line, "^(%s*)%|(.*)$")
+ if not aline then
+ -- No, plain C code line, need to flush first.
+ wflush()
+ wsync()
+ wline(line, false)
+ return
+ end
+
+ g_indent = indent -- Remember current line indentation.
+
+ -- Emit C code (even from macros). Avoids echo and line parsing.
+ if sub(aline, 1, 1) == "|" then
+ if not mac_capture then
+ wsync()
+ elseif g_opt.comment then
+ wsync()
+ wcomment(aline)
+ end
+ dostmt(aline)
+ return
+ end
+
+ -- Echo assembler line as a comment.
+ if g_opt.comment then
+ wsync()
+ wcomment(aline)
+ end
+
+ -- Strip assembler comments.
+ aline = gsub(aline, "//.*$", "")
+
+ -- Split line into statements at semicolons.
+ if match(aline, ";") then
+ for stmt in gmatch(aline, "[^;]+") do dostmt(stmt) end
+ else
+ dostmt(aline)
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Write DynASM header.
+local function dasmhead(out)
+ out:write(format([[
+/*
+** This file has been pre-processed with DynASM.
+** %s
+** DynASM version %s, DynASM %s version %s
+** DO NOT EDIT! The original file is in "%s".
+*/
+
+#if DASM_VERSION != %d
+#error "Version mismatch between DynASM and included encoding engine"
+#endif
+
+]], _info.url,
+ _info.version, g_arch._info.arch, g_arch._info.version,
+ g_fname, _info.vernum))
+end
+
+-- Read input file.
+readfile = function(fin)
+ g_indent = ""
+ g_lineno = 0
+ g_synclineno = -1
+
+ -- Process all lines.
+ for line in fin:lines() do
+ g_lineno = g_lineno + 1
+ g_curline = line
+ local ok, err = pcall(doline, line)
+ if not ok and wprinterr(err, "\n") then return true end
+ end
+ wflush()
+
+ -- Close input file.
+ assert(fin == stdin or fin:close())
+end
+
+-- Write output file.
+local function writefile(outfile)
+ local fout
+
+ -- Open output file.
+ if outfile == nil or outfile == "-" then
+ fout = stdout
+ else
+ fout = assert(io.open(outfile, "w"))
+ end
+
+ -- Write all buffered lines
+ wdumplines(fout, g_wbuffer)
+
+ -- Close output file.
+ assert(fout == stdout or fout:close())
+
+ -- Optionally dump definitions.
+ dumpdef(fout == stdout and stderr or stdout)
+end
+
+-- Translate an input file to an output file.
+local function translate(infile, outfile)
+ g_wbuffer = {}
+ g_indent = ""
+ g_lineno = 0
+ g_synclineno = -1
+
+ -- Put header.
+ wline(dasmhead)
+
+ -- Read input file.
+ local fin
+ if infile == "-" then
+ g_fname = "(stdin)"
+ fin = stdin
+ else
+ g_fname = infile
+ fin = assert(io.open(infile, "r"))
+ end
+ readfile(fin)
+
+ -- Check for errors.
+ if not g_arch then
+ wprinterr(g_fname, ":*: error: missing .arch directive\n")
+ end
+ checkconds()
+ checkmacros()
+ checkcaptures()
+
+ if g_errcount ~= 0 then
+ stderr:write(g_fname, ":*: info: ", g_errcount, " error",
+ (type(g_errcount) == "number" and g_errcount > 1) and "s" or "",
+ " in input file -- no output file generated.\n")
+ dumpdef(stderr)
+ exit(1)
+ end
+
+ -- Write output file.
+ writefile(outfile)
+end
+
+------------------------------------------------------------------------------
+
+-- Print help text.
+function opt_map.help()
+ stdout:write("DynASM -- ", _info.description, ".\n")
+ stdout:write("DynASM ", _info.version, " ", _info.release, " ", _info.url, "\n")
+ stdout:write[[
+
+Usage: dynasm [OPTION]... INFILE.dasc|-
+
+ -h, --help Display this help text.
+ -V, --version Display version and copyright information.
+
+ -o, --outfile FILE Output file name (default is stdout).
+ -I, --include DIR Add directory to the include search path.
+
+ -c, --ccomment Use /* */ comments for assembler lines.
+ -C, --cppcomment Use // comments for assembler lines (default).
+ -N, --nocomment Suppress assembler lines in output.
+ -M, --maccomment Show macro expansions as comments (default off).
+
+ -L, --nolineno Suppress CPP line number information in output.
+ -F, --flushline Flush action list for every line.
+
+ -D NAME[=SUBST] Define a substitution.
+ -U NAME Undefine a substitution.
+
+ -P, --dumpdef Dump defines, macros, etc. Repeat for more output.
+ -A, --dumparch ARCH Load architecture ARCH and dump description.
+]]
+ exit(0)
+end
+
+-- Print version information.
+function opt_map.version()
+ stdout:write(format("%s version %s, released %s\n%s\n\n%s",
+ _info.name, _info.version, _info.release, _info.url, _info.copyright))
+ exit(0)
+end
+
+-- Misc. options.
+function opt_map.outfile(args) g_opt.outfile = optparam(args) end
+function opt_map.include(args) insert(g_opt.include, 1, optparam(args)) end
+function opt_map.ccomment() g_opt.comment = "/*|"; g_opt.endcomment = " */" end
+function opt_map.cppcomment() g_opt.comment = "//|"; g_opt.endcomment = "" end
+function opt_map.nocomment() g_opt.comment = false end
+function opt_map.maccomment() g_opt.maccomment = true end
+function opt_map.nolineno() g_opt.cpp = false end
+function opt_map.flushline() g_opt.flushline = true end
+function opt_map.dumpdef() g_opt.dumpdef = g_opt.dumpdef + 1 end
+
+------------------------------------------------------------------------------
+
+-- Short aliases for long options.
+local opt_alias = {
+ h = "help", ["?"] = "help", V = "version",
+ o = "outfile", I = "include",
+ c = "ccomment", C = "cppcomment", N = "nocomment", M = "maccomment",
+ L = "nolineno", F = "flushline",
+ P = "dumpdef", A = "dumparch",
+}
+
+-- Parse single option.
+local function parseopt(opt, args)
+ opt_current = #opt == 1 and "-"..opt or "--"..opt
+ local f = opt_map[opt] or opt_map[opt_alias[opt]]
+ if not f then
+ opterror("unrecognized option `", opt_current, "'. Try `--help'.\n")
+ end
+ f(args)
+end
+
+-- Parse arguments.
+local function parseargs(args)
+ -- Default options.
+ g_opt.comment = "//|"
+ g_opt.endcomment = ""
+ g_opt.cpp = true
+ g_opt.dumpdef = 0
+ g_opt.include = { "" }
+
+ -- Process all option arguments.
+ args.argn = 1
+ repeat
+ local a = args[args.argn]
+ if not a then break end
+ local lopt, opt = match(a, "^%-(%-?)(.+)")
+ if not opt then break end
+ args.argn = args.argn + 1
+ if lopt == "" then
+ -- Loop through short options.
+ for o in gmatch(opt, ".") do parseopt(o, args) end
+ else
+ -- Long option.
+ parseopt(opt, args)
+ end
+ until false
+
+ -- Check for proper number of arguments.
+ local nargs = #args - args.argn + 1
+ if nargs ~= 1 then
+ if nargs == 0 then
+ if g_opt.dumpdef > 0 then return dumpdef(stdout) end
+ end
+ opt_map.help()
+ end
+
+ -- Translate a single input file to a single output file
+ -- TODO: Handle multiple files?
+ translate(args[args.argn], g_opt.outfile)
+end
+
+------------------------------------------------------------------------------
+
+-- Add the directory dynasm.lua resides in to the Lua module search path.
+local arg = arg
+if arg and arg[0] then
+ prefix = match(arg[0], "^(.*[/\\])")
+ if package and prefix then package.path = prefix.."?.lua;"..package.path end
+end
+
+-- Start DynASM.
+parseargs{...}
+
+------------------------------------------------------------------------------
+
diff --git a/3rdparty/lua/etc/luajit.1 b/3rdparty/lua/etc/luajit.1
index 223a028..6489bd3 100644
--- a/3rdparty/lua/etc/luajit.1
+++ b/3rdparty/lua/etc/luajit.1
@@ -1,88 +1,88 @@
-.TH luajit 1 "" "" "LuaJIT documentation"
-.SH NAME
-luajit \- Just-In-Time Compiler for the Lua Language
-\fB
-.SH SYNOPSIS
-.B luajit
-[\fIoptions\fR]... [\fIscript\fR [\fIargs\fR]...]
-.SH "WEB SITE"
-.IR http://luajit.org
-.SH DESCRIPTION
-.PP
-This is the command-line program to run Lua programs with \fBLuaJIT\fR.
-.PP
-\fBLuaJIT\fR is a just-in-time (JIT) compiler for the Lua language.
-The virtual machine (VM) is based on a fast interpreter combined with
-a trace compiler. It can significantly improve the performance of Lua programs.
-.PP
-\fBLuaJIT\fR is API\- and ABI-compatible with the VM of the standard
-Lua\ 5.1 interpreter. When embedding the VM into an application,
-the built library can be used as a drop-in replacement.
-.SH OPTIONS
-.TP
-.BI "\-e " chunk
-Run the given chunk of Lua code.
-.TP
-.BI "\-l " library
-Load the named library, just like \fBrequire("\fR\fIlibrary\fR\fB")\fR.
-.TP
-.BI "\-b " ...
-Save or list bytecode. Run without arguments to get help on options.
-.TP
-.BI "\-j " command
-Perform LuaJIT control command (optional space after \fB\-j\fR).
-.TP
-.BI "\-O" [opt]
-Control LuaJIT optimizations.
-.TP
-.B "\-i"
-Run in interactive mode.
-.TP
-.B "\-v"
-Show \fBLuaJIT\fR version.
-.TP
-.B "\-E"
-Ignore environment variables.
-.TP
-.B "\-\-"
-Stop processing options.
-.TP
-.B "\-"
-Read script from stdin instead.
-.PP
-After all options are processed, the given \fIscript\fR is run.
-The arguments are passed in the global \fIarg\fR table.
-.PP
-Interactive mode is only entered, if no \fIscript\fR and no \fB\-e\fR
-option is given. Interactive mode can be left with EOF (\fICtrl\-Z\fB).
-.SH EXAMPLES
-.TP
-luajit hello.lua world
-
-Prints "Hello world", assuming \fIhello.lua\fR contains:
-.br
- print("Hello", arg[1])
-.TP
-luajit \-e "local x=0; for i=1,1e9 do x=x+i end; print(x)"
-
-Calculates the sum of the numbers from 1 to 1000000000.
-.br
-And finishes in a reasonable amount of time, too.
-.TP
-luajit \-jv \-e "for i=1,10 do for j=1,10 do for k=1,100 do end end end"
-
-Runs some nested loops and shows the resulting traces.
-.SH COPYRIGHT
-.PP
-\fBLuaJIT\fR is Copyright \(co 2005-2015 Mike Pall.
-.br
-\fBLuaJIT\fR is open source software, released under the MIT license.
-.SH SEE ALSO
-.PP
-More details in the provided HTML docs or at:
-.IR http://luajit.org
-.br
-More about the Lua language can be found at:
-.IR http://lua.org/docs.html
-.PP
-lua(1)
+.TH luajit 1 "" "" "LuaJIT documentation"
+.SH NAME
+luajit \- Just-In-Time Compiler for the Lua Language
+\fB
+.SH SYNOPSIS
+.B luajit
+[\fIoptions\fR]... [\fIscript\fR [\fIargs\fR]...]
+.SH "WEB SITE"
+.IR http://luajit.org
+.SH DESCRIPTION
+.PP
+This is the command-line program to run Lua programs with \fBLuaJIT\fR.
+.PP
+\fBLuaJIT\fR is a just-in-time (JIT) compiler for the Lua language.
+The virtual machine (VM) is based on a fast interpreter combined with
+a trace compiler. It can significantly improve the performance of Lua programs.
+.PP
+\fBLuaJIT\fR is API\- and ABI-compatible with the VM of the standard
+Lua\ 5.1 interpreter. When embedding the VM into an application,
+the built library can be used as a drop-in replacement.
+.SH OPTIONS
+.TP
+.BI "\-e " chunk
+Run the given chunk of Lua code.
+.TP
+.BI "\-l " library
+Load the named library, just like \fBrequire("\fR\fIlibrary\fR\fB")\fR.
+.TP
+.BI "\-b " ...
+Save or list bytecode. Run without arguments to get help on options.
+.TP
+.BI "\-j " command
+Perform LuaJIT control command (optional space after \fB\-j\fR).
+.TP
+.BI "\-O" [opt]
+Control LuaJIT optimizations.
+.TP
+.B "\-i"
+Run in interactive mode.
+.TP
+.B "\-v"
+Show \fBLuaJIT\fR version.
+.TP
+.B "\-E"
+Ignore environment variables.
+.TP
+.B "\-\-"
+Stop processing options.
+.TP
+.B "\-"
+Read script from stdin instead.
+.PP
+After all options are processed, the given \fIscript\fR is run.
+The arguments are passed in the global \fIarg\fR table.
+.PP
+Interactive mode is only entered, if no \fIscript\fR and no \fB\-e\fR
+option is given. Interactive mode can be left with EOF (\fICtrl\-Z\fB).
+.SH EXAMPLES
+.TP
+luajit hello.lua world
+
+Prints "Hello world", assuming \fIhello.lua\fR contains:
+.br
+ print("Hello", arg[1])
+.TP
+luajit \-e "local x=0; for i=1,1e9 do x=x+i end; print(x)"
+
+Calculates the sum of the numbers from 1 to 1000000000.
+.br
+And finishes in a reasonable amount of time, too.
+.TP
+luajit \-jv \-e "for i=1,10 do for j=1,10 do for k=1,100 do end end end"
+
+Runs some nested loops and shows the resulting traces.
+.SH COPYRIGHT
+.PP
+\fBLuaJIT\fR is Copyright \(co 2005-2013 Mike Pall.
+.br
+\fBLuaJIT\fR is open source software, released under the MIT license.
+.SH SEE ALSO
+.PP
+More details in the provided HTML docs or at:
+.IR http://luajit.org
+.br
+More about the Lua language can be found at:
+.IR http://lua.org/docs.html
+.PP
+lua(1)
diff --git a/3rdparty/lua/etc/luajit.pc b/3rdparty/lua/etc/luajit.pc
index 7292997..5a982a6 100644
--- a/3rdparty/lua/etc/luajit.pc
+++ b/3rdparty/lua/etc/luajit.pc
@@ -1,25 +1,24 @@
-# Package information for LuaJIT to be used by pkg-config.
-majver=2
-minver=0
-relver=4
-version=${majver}.${minver}.${relver}
-abiver=5.1
-
-prefix=/usr/local
-multilib=lib
-exec_prefix=${prefix}
-libdir=${exec_prefix}/${multilib}
-libname=luajit-${abiver}
-includedir=${prefix}/include/luajit-${majver}.${minver}
-
-INSTALL_LMOD=${prefix}/share/lua/${abiver}
-INSTALL_CMOD=${prefix}/${multilib}/lua/${abiver}
-
-Name: LuaJIT
-Description: Just-in-time compiler for Lua
-URL: http://luajit.org
-Version: ${version}
-Requires:
-Libs: -L${libdir} -l${libname}
-Libs.private: -Wl,-E -lm -ldl
-Cflags: -I${includedir}
+# Package information for LuaJIT to be used by pkg-config.
+majver=2
+minver=0
+relver=2
+version=${majver}.${minver}.${relver}
+abiver=5.1
+
+prefix=/usr/local
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+libname=luajit-${abiver}
+includedir=${prefix}/include/luajit-${majver}.${minver}
+
+INSTALL_LMOD=${prefix}/share/lua/${abiver}
+INSTALL_CMOD=${prefix}/lib/lua/${abiver}
+
+Name: LuaJIT
+Description: Just-in-time compiler for Lua
+URL: http://luajit.org
+Version: ${version}
+Requires:
+Libs: -L${libdir} -l${libname}
+Libs.private: -Wl,-E -lm -ldl
+Cflags: -I${includedir}
diff --git a/3rdparty/lua/src/Makefile b/3rdparty/lua/src/Makefile
index 1d38fa2..999e280 100644
--- a/3rdparty/lua/src/Makefile
+++ b/3rdparty/lua/src/Makefile
@@ -7,12 +7,12 @@
# Also works with MinGW and Cygwin on Windows.
# Please check msvcbuild.bat for building with MSVC on Windows.
#
-# Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+# Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
##############################################################################
MAJVER= 2
MINVER= 0
-RELVER= 4
+RELVER= 2
ABIVER= 5.1
NODOTABIVER= 51
@@ -122,10 +122,8 @@ XCFLAGS=
#
# Use the system provided memory allocator (realloc) instead of the
# bundled memory allocator. This is slower, but sometimes helpful for
-# debugging. This option cannot be enabled on x64, since realloc usually
-# doesn't return addresses in the right address range.
-# OTOH this option is mandatory for Valgrind's memcheck tool on x64 and
-# the only way to get useful results from it for all other architectures.
+# debugging. It's helpful for Valgrind's memcheck tool, too. This option
+# cannot be enabled on x64, since the built-in allocator is mandatory.
#XCFLAGS+= -DLUAJIT_USE_SYSMALLOC
#
# This define is required to run LuaJIT under Valgrind. The Valgrind
@@ -190,10 +188,9 @@ TARGET_LD= $(CROSS)$(CC)
TARGET_AR= $(CROSS)ar rcus
TARGET_STRIP= $(CROSS)strip
-TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib)
TARGET_SONAME= libluajit-$(ABIVER).so.$(MAJVER)
TARGET_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).dylib
-TARGET_DYLIBPATH= $(TARGET_LIBPATH)/$(TARGET_DYLIBNAME)
+TARGET_DYLIBPATH= $(or $(PREFIX),/usr/local)/lib/$(TARGET_DYLIBNAME)
TARGET_DLLNAME= lua$(NODOTABIVER).dll
TARGET_XSHLDFLAGS= -shared -fPIC -Wl,-soname,$(TARGET_SONAME)
TARGET_DYNXLDOPTS=
@@ -252,18 +249,12 @@ TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH))
ifneq (,$(PREFIX))
ifneq (/usr/local,$(PREFIX))
- TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\"
+ TARGET_XCFLAGS+= -DLUA_XROOT=\"$(PREFIX)/\"
ifneq (/usr,$(PREFIX))
- TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH)
+ TARGET_DYNXLDOPTS= -Wl,-rpath,$(PREFIX)/lib
endif
endif
endif
-ifneq (,$(MULTILIB))
- TARGET_XCFLAGS+= -DLUA_MULTILIB=\"$(MULTILIB)\"
-endif
-ifneq (,$(LMULTILIB))
- TARGET_XCFLAGS+= -DLUA_LMULTILIB=\"$(LMULTILIB)\"
-endif
##############################################################################
# System detection.
@@ -290,15 +281,13 @@ ifeq (Windows,$(TARGET_SYS))
TARGET_XSHLDFLAGS= -shared
TARGET_DYNXLDOPTS=
else
-ifeq (,$(shell $(TARGET_CC) -o /dev/null -c -x c /dev/null -fno-stack-protector 2>/dev/null || echo 1))
- TARGET_XCFLAGS+= -fno-stack-protector
-endif
ifeq (Darwin,$(TARGET_SYS))
ifeq (,$(MACOSX_DEPLOYMENT_TARGET))
export MACOSX_DEPLOYMENT_TARGET=10.4
endif
TARGET_STRIP+= -x
TARGET_AR+= 2>/dev/null
+ TARGET_XCFLAGS+= -fno-stack-protector
TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC
TARGET_DYNXLDOPTS=
TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER)
@@ -310,10 +299,14 @@ else
ifeq (iOS,$(TARGET_SYS))
TARGET_STRIP+= -x
TARGET_AR+= 2>/dev/null
+ TARGET_XCFLAGS+= -fno-stack-protector
TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC
TARGET_DYNXLDOPTS=
TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER)
else
+ ifneq (,$(findstring stack-protector,$(shell $(TARGET_CC) -dumpspecs)))
+ TARGET_XCFLAGS+= -fno-stack-protector
+ endif
ifneq (SunOS,$(TARGET_SYS))
ifneq (PS3,$(TARGET_SYS))
TARGET_XLDFLAGS+= -Wl,-E
diff --git a/3rdparty/lua/src/Makefile.dep b/3rdparty/lua/src/Makefile.dep
index f790aee..5d91723 100644
--- a/3rdparty/lua/src/Makefile.dep
+++ b/3rdparty/lua/src/Makefile.dep
@@ -1,226 +1,226 @@
-lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
- lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \
- lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h lj_alloc.h
-lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
- lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \
- lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \
- lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \
- lj_lib.h lj_libdef.h
-lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
- lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_lib.h lj_libdef.h
-lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
- lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \
- lj_libdef.h
-lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
- lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \
- lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \
- lj_ccallback.h lj_clib.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
-lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h
-lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
- lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_ff.h \
- lj_ffdef.h lj_lib.h lj_libdef.h
-lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h \
- lj_obj.h lj_def.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \
- lj_bc.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_target.h \
- lj_target_*.h lj_dispatch.h lj_vm.h lj_vmevent.h lj_lib.h luajit.h \
- lj_libdef.h
-lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
- lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_libdef.h
-lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
- lj_arch.h lj_err.h lj_errmsg.h lj_lib.h lj_libdef.h
-lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
- lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h
-lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
- lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h \
- lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h lj_char.h \
- lj_lib.h lj_libdef.h
-lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
- lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_lib.h \
- lj_libdef.h
-lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h
-lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
- lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \
- lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h
-lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \
- lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \
- lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \
- lj_asm_*.h
-lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \
- lj_bcdef.h
-lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_bc.h lj_ctype.h \
- lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h
-lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h lj_ir.h \
- lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h
-lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ctype.h lj_cconv.h \
- lj_cdata.h lj_carith.h
-lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \
- lj_cdata.h lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
- lj_traceerr.h
-lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \
- lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \
- lj_bc.h lj_ctype.h lj_cconv.h lj_ccall.h lj_ccallback.h lj_target.h \
- lj_target_*.h lj_mcode.h lj_jit.h lj_ir.h lj_trace.h lj_dispatch.h \
- lj_traceerr.h lj_vm.h
-lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \
- lj_ccallback.h
-lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \
- lj_cdata.h
-lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h
-lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \
- lj_cdata.h lj_clib.h
-lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_ctype.h lj_cparse.h lj_frame.h \
- lj_bc.h lj_vm.h lj_char.h lj_strscan.h
-lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h \
- lj_gc.h lj_cdata.h lj_cparse.h lj_cconv.h lj_clib.h lj_ccall.h lj_ff.h \
- lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
- lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \
- lj_crecord.h
-lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_ccallback.h
-lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_state.h lj_frame.h \
- lj_bc.h lj_vm.h lj_jit.h lj_ir.h
-lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_func.h lj_str.h lj_tab.h lj_meta.h lj_debug.h \
- lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h lj_jit.h lj_ir.h \
- lj_ccallback.h lj_ctype.h lj_gc.h lj_trace.h lj_dispatch.h lj_traceerr.h \
- lj_vm.h luajit.h
-lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \
- lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \
- lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
- lj_traceerr.h lj_vm.h
-lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \
- lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
- lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \
- lj_vm.h lj_strscan.h lj_recdef.h
-lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
- lj_traceerr.h lj_vm.h
-lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h \
- lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h \
- lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h
-lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_jit.h \
- lj_ir.h lj_dispatch.h
-lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
- lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h lj_carith.h \
- lj_vm.h lj_strscan.h lj_lib.h
-lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h lualib.h \
- lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h
-lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \
- lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_strscan.h lj_lib.h
-lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
- lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_func.h lj_frame.h \
- lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h
-lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h \
- lj_dispatch.h lj_bc.h lj_traceerr.h lj_vm.h
-lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
- lj_vm.h lj_strscan.h
-lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
-lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_ir.h lj_jit.h lj_iropt.h
-lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
- lj_bc.h lj_traceerr.h lj_ctype.h lj_gc.h lj_carith.h lj_vm.h \
- lj_strscan.h lj_folddef.h
-lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h \
- lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h lj_vm.h
-lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_tab.h lj_ir.h lj_jit.h lj_iropt.h
-lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \
- lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
- lj_traceerr.h lj_vm.h lj_strscan.h
-lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h
-lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \
- lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_ircall.h \
- lj_iropt.h lj_vm.h
-lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h \
- lj_state.h lj_bc.h lj_ctype.h lj_lex.h lj_parse.h lj_vm.h lj_vmevent.h
-lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
- lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h \
- lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h lj_record.h \
- lj_ffrecord.h lj_snap.h lj_vm.h
-lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \
- lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \
- lj_target_*.h lj_ctype.h lj_cdata.h
-lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_meta.h \
- lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h lj_ir.h \
- lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_alloc.h
-lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_char.h
-lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_char.h lj_strscan.h
-lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
- lj_err.h lj_errmsg.h lj_tab.h
-lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \
- lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \
- lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \
- lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h
-lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_gc.h lj_udata.h
-lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_str.h lj_tab.h lj_state.h lj_dispatch.h lj_bc.h lj_jit.h lj_ir.h \
- lj_vm.h lj_vmevent.h
-lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
- lj_ir.h lj_vm.h
-ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \
- lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h \
- lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h \
- lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_err.c \
- lj_debug.h lj_ff.h lj_ffdef.h lj_char.c lj_char.h lj_bc.c lj_bcdef.h \
- lj_obj.c lj_str.c lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_strscan.h \
- lj_debug.c lj_state.c lj_lex.h lj_alloc.h lj_dispatch.c lj_ccallback.h \
- luajit.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c lj_api.c \
- lj_lex.c lualib.h lj_parse.h lj_parse.c lj_bcread.c lj_bcdump.h \
- lj_bcwrite.c lj_load.c lj_ctype.c lj_cdata.c lj_cconv.h lj_cconv.c \
- lj_ccall.c lj_ccall.h lj_ccallback.c lj_target.h lj_target_*.h \
- lj_mcode.h lj_carith.c lj_carith.h lj_clib.c lj_clib.h lj_cparse.c \
- lj_cparse.h lj_lib.c lj_lib.h lj_ir.c lj_ircall.h lj_iropt.h \
- lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c lj_opt_dce.c \
- lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c lj_mcode.c \
- lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \
- lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h lj_emit_*.h \
- lj_asm_*.h lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c \
- lib_base.c lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c \
- lib_os.c lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c \
- lib_init.c
-luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h
-host/buildvm.o: host/buildvm.c host/buildvm.h lj_def.h lua.h luaconf.h \
- lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_gc.h lj_obj.h lj_bc.h lj_ir.h \
- lj_ircall.h lj_ir.h lj_jit.h lj_frame.h lj_bc.h lj_dispatch.h lj_ctype.h \
- lj_gc.h lj_ccall.h lj_ctype.h luajit.h \
- host/buildvm_arch.h lj_traceerr.h
-host/buildvm_asm.o: host/buildvm_asm.c host/buildvm.h lj_def.h lua.h luaconf.h \
- lj_arch.h lj_bc.h lj_def.h lj_arch.h
-host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \
- luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h
-host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \
- lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_lib.h lj_obj.h
-host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \
- luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h
-host/minilua.o: host/minilua.c
+lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h lj_alloc.h
+lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h \
+ lj_tab.h lj_meta.h lj_state.h lj_ctype.h lj_cconv.h lj_bc.h lj_ff.h \
+ lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h lj_strscan.h \
+ lj_lib.h lj_libdef.h
+lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_lib.h lj_libdef.h
+lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \
+ lj_libdef.h
+lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \
+ lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \
+ lj_ccallback.h lj_clib.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
+lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h
+lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_ff.h lj_ffdef.h \
+ lj_lib.h lj_libdef.h
+lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h \
+ lj_obj.h lj_def.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \
+ lj_bc.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_target.h \
+ lj_target_*.h lj_dispatch.h lj_vm.h lj_vmevent.h lj_lib.h luajit.h \
+ lj_libdef.h
+lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_libdef.h
+lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_lib.h lj_libdef.h
+lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h
+lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h \
+ lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h lj_char.h \
+ lj_lib.h lj_libdef.h
+lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_lib.h \
+ lj_libdef.h
+lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h
+lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
+ lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h
+lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h \
+ lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h lj_emit_*.h \
+ lj_asm_*.h
+lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \
+ lj_bcdef.h
+lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_bc.h lj_ctype.h \
+ lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h
+lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h lj_ir.h \
+ lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h
+lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_carith.h
+lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h
+lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \
+ lj_bc.h lj_ctype.h lj_cconv.h lj_ccall.h lj_ccallback.h lj_target.h \
+ lj_target_*.h lj_mcode.h lj_jit.h lj_ir.h lj_trace.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h
+lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_gc.h lj_cdata.h lj_cconv.h \
+ lj_ccallback.h
+lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h
+lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h
+lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_clib.h
+lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_ctype.h lj_cparse.h lj_frame.h \
+ lj_bc.h lj_vm.h lj_char.h lj_strscan.h
+lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h \
+ lj_gc.h lj_cdata.h lj_cparse.h lj_cconv.h lj_clib.h lj_ccall.h lj_ff.h \
+ lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \
+ lj_crecord.h
+lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_ccallback.h
+lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_state.h lj_frame.h \
+ lj_bc.h lj_jit.h lj_ir.h
+lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_func.h lj_str.h lj_tab.h lj_meta.h lj_debug.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h lj_jit.h lj_ir.h \
+ lj_ccallback.h lj_ctype.h lj_gc.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_vm.h luajit.h
+lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \
+ lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \
+ lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h
+lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ff.h \
+ lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_crecord.h \
+ lj_vm.h lj_strscan.h lj_recdef.h
+lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h lj_vm.h
+lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h \
+ lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h
+lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_jit.h \
+ lj_ir.h lj_dispatch.h
+lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h lj_carith.h \
+ lj_vm.h lj_strscan.h lj_lib.h
+lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h lualib.h \
+ lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h
+lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \
+ lj_dispatch.h lj_jit.h lj_ir.h lj_vm.h lj_strscan.h lj_lib.h
+lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_func.h lj_frame.h \
+ lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h
+lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h lj_vm.h
+lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
+ lj_vm.h lj_strscan.h
+lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
+lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_jit.h lj_iropt.h
+lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
+ lj_bc.h lj_traceerr.h lj_ctype.h lj_gc.h lj_carith.h lj_vm.h \
+ lj_strscan.h lj_folddef.h
+lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h lj_vm.h
+lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_tab.h lj_ir.h lj_jit.h lj_iropt.h
+lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h lj_strscan.h
+lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h
+lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_str.h lj_ir.h lj_jit.h lj_ircall.h \
+ lj_iropt.h lj_vm.h
+lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h \
+ lj_state.h lj_bc.h lj_ctype.h lj_lex.h lj_parse.h lj_vm.h lj_vmevent.h
+lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
+ lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h \
+ lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h lj_record.h \
+ lj_ffrecord.h lj_snap.h lj_vm.h
+lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \
+ lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \
+ lj_target_*.h lj_ctype.h lj_cdata.h
+lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_meta.h \
+ lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_traceerr.h lj_vm.h lj_lex.h lj_alloc.h
+lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_state.h lj_char.h
+lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_char.h lj_strscan.h
+lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_tab.h
+lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \
+ lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \
+ lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h
+lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_udata.h
+lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_str.h lj_tab.h lj_state.h lj_dispatch.h lj_bc.h lj_jit.h lj_ir.h \
+ lj_vm.h lj_vmevent.h
+lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_vm.h
+ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_gc.c lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h \
+ lj_udata.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h \
+ lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_err.c \
+ lj_debug.h lj_ff.h lj_ffdef.h lj_char.c lj_char.h lj_bc.c lj_bcdef.h \
+ lj_obj.c lj_str.c lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_strscan.h \
+ lj_debug.c lj_state.c lj_lex.h lj_alloc.h lj_dispatch.c lj_ccallback.h \
+ luajit.h lj_vmevent.c lj_vmevent.h lj_vmmath.c lj_strscan.c lj_api.c \
+ lj_lex.c lualib.h lj_parse.h lj_parse.c lj_bcread.c lj_bcdump.h \
+ lj_bcwrite.c lj_load.c lj_ctype.c lj_cdata.c lj_cconv.h lj_cconv.c \
+ lj_ccall.c lj_ccall.h lj_ccallback.c lj_target.h lj_target_*.h \
+ lj_mcode.h lj_carith.c lj_carith.h lj_clib.c lj_clib.h lj_cparse.c \
+ lj_cparse.h lj_lib.c lj_lib.h lj_ir.c lj_ircall.h lj_iropt.h \
+ lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c lj_opt_dce.c \
+ lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c lj_mcode.c \
+ lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \
+ lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h lj_emit_*.h \
+ lj_asm_*.h lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c \
+ lib_base.c lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c \
+ lib_os.c lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c \
+ lib_init.c
+luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h
+host/buildvm.o: host/buildvm.c host/buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_gc.h lj_obj.h lj_bc.h lj_ir.h \
+ lj_ircall.h lj_ir.h lj_jit.h lj_frame.h lj_bc.h lj_dispatch.h lj_ctype.h \
+ lj_gc.h lj_ccall.h lj_ctype.h luajit.h \
+ host/buildvm_arch.h lj_traceerr.h
+host/buildvm_asm.o: host/buildvm_asm.c host/buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_bc.h lj_def.h lj_arch.h
+host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \
+ luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h
+host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_lib.h lj_obj.h
+host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \
+ luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h
+host/minilua.o: host/minilua.c
diff --git a/3rdparty/lua/src/host/README b/3rdparty/lua/src/host/README
index 78f51a5..abfcdaa 100644
--- a/3rdparty/lua/src/host/README
+++ b/3rdparty/lua/src/host/README
@@ -1,4 +1,4 @@
-The files in this directory are only used during the build process of LuaJIT.
-For cross-compilation, they must be executed on the host, not on the target.
-
-These files should NOT be installed!
+The files in this directory are only used during the build process of LuaJIT.
+For cross-compilation, they must be executed on the host, not on the target.
+
+These files should NOT be installed!
diff --git a/3rdparty/lua/src/host/buildvm.c b/3rdparty/lua/src/host/buildvm.c
index 8ebec24..2ce3b63 100644
--- a/3rdparty/lua/src/host/buildvm.c
+++ b/3rdparty/lua/src/host/buildvm.c
@@ -1,516 +1,516 @@
-/*
-** LuaJIT VM builder.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** This is a tool to build the hand-tuned assembler code required for
-** LuaJIT's bytecode interpreter. It supports a variety of output formats
-** to feed different toolchains (see usage() below).
-**
-** This tool is not particularly optimized because it's only used while
-** _building_ LuaJIT. There's no point in distributing or installing it.
-** Only the object code generated by this tool is linked into LuaJIT.
-**
-** Caveat: some memory is not free'd, error handling is lazy.
-** It's a one-shot tool -- any effort fixing this would be wasted.
-*/
-
-#include "buildvm.h"
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_bc.h"
-#include "lj_ir.h"
-#include "lj_ircall.h"
-#include "lj_frame.h"
-#include "lj_dispatch.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#include "lj_ccall.h"
-#endif
-#include "luajit.h"
-
-#if defined(_WIN32)
-#include <fcntl.h>
-#include <io.h>
-#endif
-
-/* ------------------------------------------------------------------------ */
-
-/* DynASM glue definitions. */
-#define Dst ctx
-#define Dst_DECL BuildCtx *ctx
-#define Dst_REF (ctx->D)
-#define DASM_CHECKS 1
-
-#include "../dynasm/dasm_proto.h"
-
-/* Glue macros for DynASM. */
-static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type);
-
-#define DASM_EXTERN(ctx, addr, idx, type) \
- collect_reloc(ctx, addr, idx, type)
-
-/* ------------------------------------------------------------------------ */
-
-/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */
-#define DASM_ALIGNED_WRITES 1
-
-/* Embed architecture-specific DynASM encoder. */
-#if LJ_TARGET_X86ORX64
-#include "../dynasm/dasm_x86.h"
-#elif LJ_TARGET_ARM
-#include "../dynasm/dasm_arm.h"
-#elif LJ_TARGET_PPC
-#include "../dynasm/dasm_ppc.h"
-#elif LJ_TARGET_PPCSPE
-#include "../dynasm/dasm_ppc.h"
-#elif LJ_TARGET_MIPS
-#include "../dynasm/dasm_mips.h"
-#else
-#error "No support for this architecture (yet)"
-#endif
-
-/* Embed generated architecture-specific backend. */
-#include "buildvm_arch.h"
-
-/* ------------------------------------------------------------------------ */
-
-void owrite(BuildCtx *ctx, const void *ptr, size_t sz)
-{
- if (fwrite(ptr, 1, sz, ctx->fp) != sz) {
- fprintf(stderr, "Error: cannot write to output file: %s\n",
- strerror(errno));
- exit(1);
- }
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* Emit code as raw bytes. Only used for DynASM debugging. */
-static void emit_raw(BuildCtx *ctx)
-{
- owrite(ctx, ctx->code, ctx->codesz);
-}
-
-/* -- Build machine code -------------------------------------------------- */
-
-static const char *sym_decorate(BuildCtx *ctx,
- const char *prefix, const char *suffix)
-{
- char name[256];
- char *p;
-#if LJ_64
- const char *symprefix = ctx->mode == BUILD_machasm ? "_" : "";
-#elif LJ_TARGET_XBOX360
- const char *symprefix = "";
-#else
- const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : "";
-#endif
- sprintf(name, "%s%s%s", symprefix, prefix, suffix);
- p = strchr(name, '@');
- if (p) {
-#if LJ_TARGET_X86ORX64
- if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj))
- name[0] = '@';
- else
- *p = '\0';
-#elif (LJ_TARGET_PPC || LJ_TARGET_PPCSPE) && !LJ_TARGET_CONSOLE
- /* Keep @plt. */
-#else
- *p = '\0';
-#endif
- }
- p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */
- strcpy(p, name);
- return p;
-}
-
-#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1)
-
-static int relocmap[NRELOCSYM];
-
-/* Collect external relocations. */
-static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type)
-{
- if (ctx->nreloc >= BUILD_MAX_RELOC) {
- fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n");
- exit(1);
- }
- if (relocmap[idx] < 0) {
- relocmap[idx] = ctx->nrelocsym;
- ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]);
- ctx->nrelocsym++;
- }
- ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code);
- ctx->reloc[ctx->nreloc].sym = relocmap[idx];
- ctx->reloc[ctx->nreloc].type = type;
- ctx->nreloc++;
-#if LJ_TARGET_XBOX360
- return (int)(ctx->code - addr) + 4; /* Encode symbol offset of .text. */
-#else
- return 0; /* Encode symbol offset of 0. */
-#endif
-}
-
-/* Naive insertion sort. Performance doesn't matter here. */
-static void sym_insert(BuildCtx *ctx, int32_t ofs,
- const char *prefix, const char *suffix)
-{
- ptrdiff_t i = ctx->nsym++;
- while (i > 0) {
- if (ctx->sym[i-1].ofs <= ofs)
- break;
- ctx->sym[i] = ctx->sym[i-1];
- i--;
- }
- ctx->sym[i].ofs = ofs;
- ctx->sym[i].name = sym_decorate(ctx, prefix, suffix);
-}
-
-/* Build the machine code. */
-static int build_code(BuildCtx *ctx)
-{
- int status;
- int i;
-
- /* Initialize DynASM structures. */
- ctx->nglob = GLOB__MAX;
- ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *));
- memset(ctx->glob, 0, ctx->nglob*sizeof(void *));
- ctx->nreloc = 0;
-
- ctx->globnames = globnames;
- ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *));
- ctx->nrelocsym = 0;
- for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1;
-
- ctx->dasm_ident = DASM_IDENT;
- ctx->dasm_arch = DASM_ARCH;
-
- dasm_init(Dst, DASM_MAXSECTION);
- dasm_setupglobal(Dst, ctx->glob, ctx->nglob);
- dasm_setup(Dst, build_actionlist);
-
- /* Call arch-specific backend to emit the code. */
- ctx->npc = build_backend(ctx);
-
- /* Finalize the code. */
- (void)dasm_checkstep(Dst, -1);
- if ((status = dasm_link(Dst, &ctx->codesz))) return status;
- ctx->code = (uint8_t *)malloc(ctx->codesz);
- if ((status = dasm_encode(Dst, (void *)ctx->code))) return status;
-
- /* Allocate symbol table and bytecode offsets. */
- ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin");
- ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym));
- ctx->nsym = 0;
- ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t));
-
- /* Collect the opcodes (PC labels). */
- for (i = 0; i < ctx->npc; i++) {
- int32_t ofs = dasm_getpclabel(Dst, i);
- if (ofs < 0) return 0x22000000|i;
- ctx->bc_ofs[i] = ofs;
- if ((LJ_HASJIT ||
- !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP ||
- i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) &&
- (LJ_HASFFI || i != BC_KCDATA))
- sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]);
- }
-
- /* Collect the globals (named labels). */
- for (i = 0; i < ctx->nglob; i++) {
- const char *gl = globnames[i];
- int len = (int)strlen(gl);
- if (!ctx->glob[i]) {
- fprintf(stderr, "Error: undefined global %s\n", gl);
- exit(2);
- }
- /* Skip the _Z symbols. */
- if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z'))
- sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code),
- LABEL_PREFIX, globnames[i]);
- }
-
- /* Close the address range. */
- sym_insert(ctx, (int32_t)ctx->codesz, "", "");
- ctx->nsym--;
-
- dasm_free(Dst);
-
- return 0;
-}
-
-/* -- Generate VM enums --------------------------------------------------- */
-
-const char *const bc_names[] = {
-#define BCNAME(name, ma, mb, mc, mt) #name,
-BCDEF(BCNAME)
-#undef BCNAME
- NULL
-};
-
-const char *const ir_names[] = {
-#define IRNAME(name, m, m1, m2) #name,
-IRDEF(IRNAME)
-#undef IRNAME
- NULL
-};
-
-const char *const irt_names[] = {
-#define IRTNAME(name, size) #name,
-IRTDEF(IRTNAME)
-#undef IRTNAME
- NULL
-};
-
-const char *const irfpm_names[] = {
-#define FPMNAME(name) #name,
-IRFPMDEF(FPMNAME)
-#undef FPMNAME
- NULL
-};
-
-const char *const irfield_names[] = {
-#define FLNAME(name, ofs) #name,
-IRFLDEF(FLNAME)
-#undef FLNAME
- NULL
-};
-
-const char *const ircall_names[] = {
-#define IRCALLNAME(cond, name, nargs, kind, type, flags) #name,
-IRCALLDEF(IRCALLNAME)
-#undef IRCALLNAME
- NULL
-};
-
-static const char *const trace_errors[] = {
-#define TREDEF(name, msg) msg,
-#include "lj_traceerr.h"
- NULL
-};
-
-static const char *lower(char *buf, const char *s)
-{
- char *p = buf;
- while (*s) {
- *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s;
- s++;
- }
- *p = '\0';
- return buf;
-}
-
-/* Emit C source code for bytecode-related definitions. */
-static void emit_bcdef(BuildCtx *ctx)
-{
- int i;
- fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
- fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n");
- for (i = 0; i < ctx->npc; i++) {
- if (i != 0)
- fprintf(ctx->fp, ",\n");
- fprintf(ctx->fp, "%d", ctx->bc_ofs[i]);
- }
-}
-
-/* Emit VM definitions as Lua code for debug modules. */
-static void emit_vmdef(BuildCtx *ctx)
-{
- char buf[80];
- int i;
- fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n");
- fprintf(ctx->fp, "module(...)\n\n");
-
- fprintf(ctx->fp, "bcnames = \"");
- for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]);
- fprintf(ctx->fp, "\"\n\n");
-
- fprintf(ctx->fp, "irnames = \"");
- for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]);
- fprintf(ctx->fp, "\"\n\n");
-
- fprintf(ctx->fp, "irfpm = { [0]=");
- for (i = 0; irfpm_names[i]; i++)
- fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i]));
- fprintf(ctx->fp, "}\n\n");
-
- fprintf(ctx->fp, "irfield = { [0]=");
- for (i = 0; irfield_names[i]; i++) {
- char *p;
- lower(buf, irfield_names[i]);
- p = strchr(buf, '_');
- if (p) *p = '.';
- fprintf(ctx->fp, "\"%s\", ", buf);
- }
- fprintf(ctx->fp, "}\n\n");
-
- fprintf(ctx->fp, "ircall = {\n[0]=");
- for (i = 0; ircall_names[i]; i++)
- fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]);
- fprintf(ctx->fp, "}\n\n");
-
- fprintf(ctx->fp, "traceerr = {\n[0]=");
- for (i = 0; trace_errors[i]; i++)
- fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]);
- fprintf(ctx->fp, "}\n\n");
-}
-
-/* -- Argument parsing ---------------------------------------------------- */
-
-/* Build mode names. */
-static const char *const modenames[] = {
-#define BUILDNAME(name) #name,
-BUILDDEF(BUILDNAME)
-#undef BUILDNAME
- NULL
-};
-
-/* Print usage information and exit. */
-static void usage(void)
-{
- int i;
- fprintf(stderr, LUAJIT_VERSION " VM builder.\n");
- fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n");
- fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n");
- fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n");
- fprintf(stderr, "Available modes:\n");
- for (i = 0; i < BUILD__MAX; i++)
- fprintf(stderr, " %s\n", modenames[i]);
- exit(1);
-}
-
-/* Parse the output mode name. */
-static BuildMode parsemode(const char *mode)
-{
- int i;
- for (i = 0; modenames[i]; i++)
- if (!strcmp(mode, modenames[i]))
- return (BuildMode)i;
- usage();
- return (BuildMode)-1;
-}
-
-/* Parse arguments. */
-static void parseargs(BuildCtx *ctx, char **argv)
-{
- const char *a;
- int i;
- ctx->mode = (BuildMode)-1;
- ctx->outname = "-";
- for (i = 1; (a = argv[i]) != NULL; i++) {
- if (a[0] != '-')
- break;
- switch (a[1]) {
- case '-':
- if (a[2]) goto err;
- i++;
- goto ok;
- case '\0':
- goto ok;
- case 'm':
- i++;
- if (a[2] || argv[i] == NULL) goto err;
- ctx->mode = parsemode(argv[i]);
- break;
- case 'o':
- i++;
- if (a[2] || argv[i] == NULL) goto err;
- ctx->outname = argv[i];
- break;
- default: err:
- usage();
- break;
- }
- }
-ok:
- ctx->args = argv+i;
- if (ctx->mode == (BuildMode)-1) goto err;
-}
-
-int main(int argc, char **argv)
-{
- BuildCtx ctx_;
- BuildCtx *ctx = &ctx_;
- int status, binmode;
-
- if (sizeof(void *) != 4*LJ_32+8*LJ_64) {
- fprintf(stderr,"Error: pointer size mismatch in cross-build.\n");
- fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=...\n\n");
- return 1;
- }
-
- UNUSED(argc);
- parseargs(ctx, argv);
-
- if ((status = build_code(ctx))) {
- fprintf(stderr,"Error: DASM error %08x\n", status);
- return 1;
- }
-
- switch (ctx->mode) {
- case BUILD_peobj:
- case BUILD_raw:
- binmode = 1;
- break;
- default:
- binmode = 0;
- break;
- }
-
- if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') {
- ctx->fp = stdout;
-#if defined(_WIN32)
- if (binmode)
- _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */
-#endif
- } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) {
- fprintf(stderr, "Error: cannot open output file '%s': %s\n",
- ctx->outname, strerror(errno));
- exit(1);
- }
-
- switch (ctx->mode) {
- case BUILD_elfasm:
- case BUILD_coffasm:
- case BUILD_machasm:
- emit_asm(ctx);
- emit_asm_debug(ctx);
- break;
- case BUILD_peobj:
- emit_peobj(ctx);
- break;
- case BUILD_raw:
- emit_raw(ctx);
- break;
- case BUILD_bcdef:
- emit_bcdef(ctx);
- emit_lib(ctx);
- break;
- case BUILD_vmdef:
- emit_vmdef(ctx);
- emit_lib(ctx);
- break;
- case BUILD_ffdef:
- case BUILD_libdef:
- case BUILD_recdef:
- emit_lib(ctx);
- break;
- case BUILD_folddef:
- emit_fold(ctx);
- break;
- default:
- break;
- }
-
- fflush(ctx->fp);
- if (ferror(ctx->fp)) {
- fprintf(stderr, "Error: cannot write to output file: %s\n",
- strerror(errno));
- exit(1);
- }
- fclose(ctx->fp);
-
- return 0;
-}
-
+/*
+** LuaJIT VM builder.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** This is a tool to build the hand-tuned assembler code required for
+** LuaJIT's bytecode interpreter. It supports a variety of output formats
+** to feed different toolchains (see usage() below).
+**
+** This tool is not particularly optimized because it's only used while
+** _building_ LuaJIT. There's no point in distributing or installing it.
+** Only the object code generated by this tool is linked into LuaJIT.
+**
+** Caveat: some memory is not free'd, error handling is lazy.
+** It's a one-shot tool -- any effort fixing this would be wasted.
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_ircall.h"
+#include "lj_frame.h"
+#include "lj_dispatch.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_ccall.h"
+#endif
+#include "luajit.h"
+
+#if defined(_WIN32)
+#include <fcntl.h>
+#include <io.h>
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+/* DynASM glue definitions. */
+#define Dst ctx
+#define Dst_DECL BuildCtx *ctx
+#define Dst_REF (ctx->D)
+#define DASM_CHECKS 1
+
+#include "../dynasm/dasm_proto.h"
+
+/* Glue macros for DynASM. */
+static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type);
+
+#define DASM_EXTERN(ctx, addr, idx, type) \
+ collect_reloc(ctx, addr, idx, type)
+
+/* ------------------------------------------------------------------------ */
+
+/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */
+#define DASM_ALIGNED_WRITES 1
+
+/* Embed architecture-specific DynASM encoder. */
+#if LJ_TARGET_X86ORX64
+#include "../dynasm/dasm_x86.h"
+#elif LJ_TARGET_ARM
+#include "../dynasm/dasm_arm.h"
+#elif LJ_TARGET_PPC
+#include "../dynasm/dasm_ppc.h"
+#elif LJ_TARGET_PPCSPE
+#include "../dynasm/dasm_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "../dynasm/dasm_mips.h"
+#else
+#error "No support for this architecture (yet)"
+#endif
+
+/* Embed generated architecture-specific backend. */
+#include "buildvm_arch.h"
+
+/* ------------------------------------------------------------------------ */
+
+void owrite(BuildCtx *ctx, const void *ptr, size_t sz)
+{
+ if (fwrite(ptr, 1, sz, ctx->fp) != sz) {
+ fprintf(stderr, "Error: cannot write to output file: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Emit code as raw bytes. Only used for DynASM debugging. */
+static void emit_raw(BuildCtx *ctx)
+{
+ owrite(ctx, ctx->code, ctx->codesz);
+}
+
+/* -- Build machine code -------------------------------------------------- */
+
+static const char *sym_decorate(BuildCtx *ctx,
+ const char *prefix, const char *suffix)
+{
+ char name[256];
+ char *p;
+#if LJ_64
+ const char *symprefix = ctx->mode == BUILD_machasm ? "_" : "";
+#elif LJ_TARGET_XBOX360
+ const char *symprefix = "";
+#else
+ const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : "";
+#endif
+ sprintf(name, "%s%s%s", symprefix, prefix, suffix);
+ p = strchr(name, '@');
+ if (p) {
+#if LJ_TARGET_X86ORX64
+ if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj))
+ name[0] = '@';
+ else
+ *p = '\0';
+#elif (LJ_TARGET_PPC || LJ_TARGET_PPCSPE) && !LJ_TARGET_CONSOLE
+ /* Keep @plt. */
+#else
+ *p = '\0';
+#endif
+ }
+ p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */
+ strcpy(p, name);
+ return p;
+}
+
+#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1)
+
+static int relocmap[NRELOCSYM];
+
+/* Collect external relocations. */
+static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type)
+{
+ if (ctx->nreloc >= BUILD_MAX_RELOC) {
+ fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n");
+ exit(1);
+ }
+ if (relocmap[idx] < 0) {
+ relocmap[idx] = ctx->nrelocsym;
+ ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]);
+ ctx->nrelocsym++;
+ }
+ ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code);
+ ctx->reloc[ctx->nreloc].sym = relocmap[idx];
+ ctx->reloc[ctx->nreloc].type = type;
+ ctx->nreloc++;
+#if LJ_TARGET_XBOX360
+ return (int)(ctx->code - addr) + 4; /* Encode symbol offset of .text. */
+#else
+ return 0; /* Encode symbol offset of 0. */
+#endif
+}
+
+/* Naive insertion sort. Performance doesn't matter here. */
+static void sym_insert(BuildCtx *ctx, int32_t ofs,
+ const char *prefix, const char *suffix)
+{
+ ptrdiff_t i = ctx->nsym++;
+ while (i > 0) {
+ if (ctx->sym[i-1].ofs <= ofs)
+ break;
+ ctx->sym[i] = ctx->sym[i-1];
+ i--;
+ }
+ ctx->sym[i].ofs = ofs;
+ ctx->sym[i].name = sym_decorate(ctx, prefix, suffix);
+}
+
+/* Build the machine code. */
+static int build_code(BuildCtx *ctx)
+{
+ int status;
+ int i;
+
+ /* Initialize DynASM structures. */
+ ctx->nglob = GLOB__MAX;
+ ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *));
+ memset(ctx->glob, 0, ctx->nglob*sizeof(void *));
+ ctx->nreloc = 0;
+
+ ctx->globnames = globnames;
+ ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *));
+ ctx->nrelocsym = 0;
+ for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1;
+
+ ctx->dasm_ident = DASM_IDENT;
+ ctx->dasm_arch = DASM_ARCH;
+
+ dasm_init(Dst, DASM_MAXSECTION);
+ dasm_setupglobal(Dst, ctx->glob, ctx->nglob);
+ dasm_setup(Dst, build_actionlist);
+
+ /* Call arch-specific backend to emit the code. */
+ ctx->npc = build_backend(ctx);
+
+ /* Finalize the code. */
+ (void)dasm_checkstep(Dst, -1);
+ if ((status = dasm_link(Dst, &ctx->codesz))) return status;
+ ctx->code = (uint8_t *)malloc(ctx->codesz);
+ if ((status = dasm_encode(Dst, (void *)ctx->code))) return status;
+
+ /* Allocate symbol table and bytecode offsets. */
+ ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin");
+ ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym));
+ ctx->nsym = 0;
+ ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t));
+
+ /* Collect the opcodes (PC labels). */
+ for (i = 0; i < ctx->npc; i++) {
+ int32_t ofs = dasm_getpclabel(Dst, i);
+ if (ofs < 0) return 0x22000000|i;
+ ctx->bc_ofs[i] = ofs;
+ if ((LJ_HASJIT ||
+ !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP ||
+ i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) &&
+ (LJ_HASFFI || i != BC_KCDATA))
+ sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]);
+ }
+
+ /* Collect the globals (named labels). */
+ for (i = 0; i < ctx->nglob; i++) {
+ const char *gl = globnames[i];
+ int len = (int)strlen(gl);
+ if (!ctx->glob[i]) {
+ fprintf(stderr, "Error: undefined global %s\n", gl);
+ exit(2);
+ }
+ /* Skip the _Z symbols. */
+ if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z'))
+ sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code),
+ LABEL_PREFIX, globnames[i]);
+ }
+
+ /* Close the address range. */
+ sym_insert(ctx, (int32_t)ctx->codesz, "", "");
+ ctx->nsym--;
+
+ dasm_free(Dst);
+
+ return 0;
+}
+
+/* -- Generate VM enums --------------------------------------------------- */
+
+const char *const bc_names[] = {
+#define BCNAME(name, ma, mb, mc, mt) #name,
+BCDEF(BCNAME)
+#undef BCNAME
+ NULL
+};
+
+const char *const ir_names[] = {
+#define IRNAME(name, m, m1, m2) #name,
+IRDEF(IRNAME)
+#undef IRNAME
+ NULL
+};
+
+const char *const irt_names[] = {
+#define IRTNAME(name, size) #name,
+IRTDEF(IRTNAME)
+#undef IRTNAME
+ NULL
+};
+
+const char *const irfpm_names[] = {
+#define FPMNAME(name) #name,
+IRFPMDEF(FPMNAME)
+#undef FPMNAME
+ NULL
+};
+
+const char *const irfield_names[] = {
+#define FLNAME(name, ofs) #name,
+IRFLDEF(FLNAME)
+#undef FLNAME
+ NULL
+};
+
+const char *const ircall_names[] = {
+#define IRCALLNAME(cond, name, nargs, kind, type, flags) #name,
+IRCALLDEF(IRCALLNAME)
+#undef IRCALLNAME
+ NULL
+};
+
+static const char *const trace_errors[] = {
+#define TREDEF(name, msg) msg,
+#include "lj_traceerr.h"
+ NULL
+};
+
+static const char *lower(char *buf, const char *s)
+{
+ char *p = buf;
+ while (*s) {
+ *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s;
+ s++;
+ }
+ *p = '\0';
+ return buf;
+}
+
+/* Emit C source code for bytecode-related definitions. */
+static void emit_bcdef(BuildCtx *ctx)
+{
+ int i;
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n");
+ for (i = 0; i < ctx->npc; i++) {
+ if (i != 0)
+ fprintf(ctx->fp, ",\n");
+ fprintf(ctx->fp, "%d", ctx->bc_ofs[i]);
+ }
+}
+
+/* Emit VM definitions as Lua code for debug modules. */
+static void emit_vmdef(BuildCtx *ctx)
+{
+ char buf[80];
+ int i;
+ fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n");
+ fprintf(ctx->fp, "module(...)\n\n");
+
+ fprintf(ctx->fp, "bcnames = \"");
+ for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]);
+ fprintf(ctx->fp, "\"\n\n");
+
+ fprintf(ctx->fp, "irnames = \"");
+ for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]);
+ fprintf(ctx->fp, "\"\n\n");
+
+ fprintf(ctx->fp, "irfpm = { [0]=");
+ for (i = 0; irfpm_names[i]; i++)
+ fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i]));
+ fprintf(ctx->fp, "}\n\n");
+
+ fprintf(ctx->fp, "irfield = { [0]=");
+ for (i = 0; irfield_names[i]; i++) {
+ char *p;
+ lower(buf, irfield_names[i]);
+ p = strchr(buf, '_');
+ if (p) *p = '.';
+ fprintf(ctx->fp, "\"%s\", ", buf);
+ }
+ fprintf(ctx->fp, "}\n\n");
+
+ fprintf(ctx->fp, "ircall = {\n[0]=");
+ for (i = 0; ircall_names[i]; i++)
+ fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]);
+ fprintf(ctx->fp, "}\n\n");
+
+ fprintf(ctx->fp, "traceerr = {\n[0]=");
+ for (i = 0; trace_errors[i]; i++)
+ fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]);
+ fprintf(ctx->fp, "}\n\n");
+}
+
+/* -- Argument parsing ---------------------------------------------------- */
+
+/* Build mode names. */
+static const char *const modenames[] = {
+#define BUILDNAME(name) #name,
+BUILDDEF(BUILDNAME)
+#undef BUILDNAME
+ NULL
+};
+
+/* Print usage information and exit. */
+static void usage(void)
+{
+ int i;
+ fprintf(stderr, LUAJIT_VERSION " VM builder.\n");
+ fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n");
+ fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n");
+ fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n");
+ fprintf(stderr, "Available modes:\n");
+ for (i = 0; i < BUILD__MAX; i++)
+ fprintf(stderr, " %s\n", modenames[i]);
+ exit(1);
+}
+
+/* Parse the output mode name. */
+static BuildMode parsemode(const char *mode)
+{
+ int i;
+ for (i = 0; modenames[i]; i++)
+ if (!strcmp(mode, modenames[i]))
+ return (BuildMode)i;
+ usage();
+ return (BuildMode)-1;
+}
+
+/* Parse arguments. */
+static void parseargs(BuildCtx *ctx, char **argv)
+{
+ const char *a;
+ int i;
+ ctx->mode = (BuildMode)-1;
+ ctx->outname = "-";
+ for (i = 1; (a = argv[i]) != NULL; i++) {
+ if (a[0] != '-')
+ break;
+ switch (a[1]) {
+ case '-':
+ if (a[2]) goto err;
+ i++;
+ goto ok;
+ case '\0':
+ goto ok;
+ case 'm':
+ i++;
+ if (a[2] || argv[i] == NULL) goto err;
+ ctx->mode = parsemode(argv[i]);
+ break;
+ case 'o':
+ i++;
+ if (a[2] || argv[i] == NULL) goto err;
+ ctx->outname = argv[i];
+ break;
+ default: err:
+ usage();
+ break;
+ }
+ }
+ok:
+ ctx->args = argv+i;
+ if (ctx->mode == (BuildMode)-1) goto err;
+}
+
+int main(int argc, char **argv)
+{
+ BuildCtx ctx_;
+ BuildCtx *ctx = &ctx_;
+ int status, binmode;
+
+ if (sizeof(void *) != 4*LJ_32+8*LJ_64) {
+ fprintf(stderr,"Error: pointer size mismatch in cross-build.\n");
+ fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=...\n\n");
+ return 1;
+ }
+
+ UNUSED(argc);
+ parseargs(ctx, argv);
+
+ if ((status = build_code(ctx))) {
+ fprintf(stderr,"Error: DASM error %08x\n", status);
+ return 1;
+ }
+
+ switch (ctx->mode) {
+ case BUILD_peobj:
+ case BUILD_raw:
+ binmode = 1;
+ break;
+ default:
+ binmode = 0;
+ break;
+ }
+
+ if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') {
+ ctx->fp = stdout;
+#if defined(_WIN32)
+ if (binmode)
+ _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */
+#endif
+ } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) {
+ fprintf(stderr, "Error: cannot open output file '%s': %s\n",
+ ctx->outname, strerror(errno));
+ exit(1);
+ }
+
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ case BUILD_coffasm:
+ case BUILD_machasm:
+ emit_asm(ctx);
+ emit_asm_debug(ctx);
+ break;
+ case BUILD_peobj:
+ emit_peobj(ctx);
+ break;
+ case BUILD_raw:
+ emit_raw(ctx);
+ break;
+ case BUILD_bcdef:
+ emit_bcdef(ctx);
+ emit_lib(ctx);
+ break;
+ case BUILD_vmdef:
+ emit_vmdef(ctx);
+ emit_lib(ctx);
+ break;
+ case BUILD_ffdef:
+ case BUILD_libdef:
+ case BUILD_recdef:
+ emit_lib(ctx);
+ break;
+ case BUILD_folddef:
+ emit_fold(ctx);
+ break;
+ default:
+ break;
+ }
+
+ fflush(ctx->fp);
+ if (ferror(ctx->fp)) {
+ fprintf(stderr, "Error: cannot write to output file: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+ fclose(ctx->fp);
+
+ return 0;
+}
+
diff --git a/3rdparty/lua/src/host/buildvm.h b/3rdparty/lua/src/host/buildvm.h
index bad439f..1a037e1 100644
--- a/3rdparty/lua/src/host/buildvm.h
+++ b/3rdparty/lua/src/host/buildvm.h
@@ -1,104 +1,104 @@
-/*
-** LuaJIT VM builder.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _BUILDVM_H
-#define _BUILDVM_H
-
-#include <sys/types.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* Hardcoded limits. Increase as needed. */
-#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */
-#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */
-
-/* Prefix for scanned library definitions. */
-#define LIBDEF_PREFIX "LJLIB_"
-
-/* Prefix for scanned fold definitions. */
-#define FOLDDEF_PREFIX "LJFOLD"
-
-/* Prefixes for generated labels. */
-#define LABEL_PREFIX "lj_"
-#define LABEL_PREFIX_BC LABEL_PREFIX "BC_"
-#define LABEL_PREFIX_FF LABEL_PREFIX "ff_"
-#define LABEL_PREFIX_CF LABEL_PREFIX "cf_"
-#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_"
-#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_"
-#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_"
-
-/* Forward declaration. */
-struct dasm_State;
-
-/* Build modes. */
-#define BUILDDEF(_) \
- _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \
- _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \
- _(folddef)
-
-typedef enum {
-#define BUILDENUM(name) BUILD_##name,
-BUILDDEF(BUILDENUM)
-#undef BUILDENUM
- BUILD__MAX
-} BuildMode;
-
-/* Code relocation. */
-typedef struct BuildReloc {
- int32_t ofs;
- int sym;
- int type;
-} BuildReloc;
-
-typedef struct BuildSym {
- const char *name;
- int32_t ofs;
-} BuildSym;
-
-/* Build context structure. */
-typedef struct BuildCtx {
- /* DynASM state pointer. Should be first member. */
- struct dasm_State *D;
- /* Parsed command line. */
- BuildMode mode;
- FILE *fp;
- const char *outname;
- char **args;
- /* Code and symbols generated by DynASM. */
- uint8_t *code;
- size_t codesz;
- int npc, nglob, nsym, nreloc, nrelocsym;
- void **glob;
- BuildSym *sym;
- const char **relocsym;
- int32_t *bc_ofs;
- const char *beginsym;
- /* Strings generated by DynASM. */
- const char *const *globnames;
- const char *dasm_ident;
- const char *dasm_arch;
- /* Relocations. */
- BuildReloc reloc[BUILD_MAX_RELOC];
-} BuildCtx;
-
-extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz);
-extern void emit_asm(BuildCtx *ctx);
-extern void emit_peobj(BuildCtx *ctx);
-extern void emit_lib(BuildCtx *ctx);
-extern void emit_fold(BuildCtx *ctx);
-
-extern const char *const bc_names[];
-extern const char *const ir_names[];
-extern const char *const irt_names[];
-extern const char *const irfpm_names[];
-extern const char *const irfield_names[];
-extern const char *const ircall_names[];
-
-#endif
+/*
+** LuaJIT VM builder.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _BUILDVM_H
+#define _BUILDVM_H
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* Hardcoded limits. Increase as needed. */
+#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */
+#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */
+
+/* Prefix for scanned library definitions. */
+#define LIBDEF_PREFIX "LJLIB_"
+
+/* Prefix for scanned fold definitions. */
+#define FOLDDEF_PREFIX "LJFOLD"
+
+/* Prefixes for generated labels. */
+#define LABEL_PREFIX "lj_"
+#define LABEL_PREFIX_BC LABEL_PREFIX "BC_"
+#define LABEL_PREFIX_FF LABEL_PREFIX "ff_"
+#define LABEL_PREFIX_CF LABEL_PREFIX "cf_"
+#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_"
+#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_"
+#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_"
+
+/* Forward declaration. */
+struct dasm_State;
+
+/* Build modes. */
+#define BUILDDEF(_) \
+ _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \
+ _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \
+ _(folddef)
+
+typedef enum {
+#define BUILDENUM(name) BUILD_##name,
+BUILDDEF(BUILDENUM)
+#undef BUILDENUM
+ BUILD__MAX
+} BuildMode;
+
+/* Code relocation. */
+typedef struct BuildReloc {
+ int32_t ofs;
+ int sym;
+ int type;
+} BuildReloc;
+
+typedef struct BuildSym {
+ const char *name;
+ int32_t ofs;
+} BuildSym;
+
+/* Build context structure. */
+typedef struct BuildCtx {
+ /* DynASM state pointer. Should be first member. */
+ struct dasm_State *D;
+ /* Parsed command line. */
+ BuildMode mode;
+ FILE *fp;
+ const char *outname;
+ char **args;
+ /* Code and symbols generated by DynASM. */
+ uint8_t *code;
+ size_t codesz;
+ int npc, nglob, nsym, nreloc, nrelocsym;
+ void **glob;
+ BuildSym *sym;
+ const char **relocsym;
+ int32_t *bc_ofs;
+ const char *beginsym;
+ /* Strings generated by DynASM. */
+ const char *const *globnames;
+ const char *dasm_ident;
+ const char *dasm_arch;
+ /* Relocations. */
+ BuildReloc reloc[BUILD_MAX_RELOC];
+} BuildCtx;
+
+extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz);
+extern void emit_asm(BuildCtx *ctx);
+extern void emit_peobj(BuildCtx *ctx);
+extern void emit_lib(BuildCtx *ctx);
+extern void emit_fold(BuildCtx *ctx);
+
+extern const char *const bc_names[];
+extern const char *const ir_names[];
+extern const char *const irt_names[];
+extern const char *const irfpm_names[];
+extern const char *const irfield_names[];
+extern const char *const ircall_names[];
+
+#endif
diff --git a/3rdparty/lua/src/host/buildvm_asm.c b/3rdparty/lua/src/host/buildvm_asm.c
index 4fe1f03..f18d149 100644
--- a/3rdparty/lua/src/host/buildvm_asm.c
+++ b/3rdparty/lua/src/host/buildvm_asm.c
@@ -1,313 +1,313 @@
-/*
-** LuaJIT VM builder: Assembler source code emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "buildvm.h"
-#include "lj_bc.h"
-
-/* ------------------------------------------------------------------------ */
-
-#if LJ_TARGET_X86ORX64
-/* Emit bytes piecewise as assembler text. */
-static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n)
-{
- int i;
- for (i = 0; i < n; i++) {
- if ((i & 15) == 0)
- fprintf(ctx->fp, "\t.byte %d", p[i]);
- else
- fprintf(ctx->fp, ",%d", p[i]);
- if ((i & 15) == 15) putc('\n', ctx->fp);
- }
- if ((n & 15) != 0) putc('\n', ctx->fp);
-}
-
-/* Emit relocation */
-static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym)
-{
- switch (ctx->mode) {
- case BUILD_elfasm:
- if (type)
- fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
- else
- fprintf(ctx->fp, "\t.long %s\n", sym);
- break;
- case BUILD_coffasm:
- fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym);
- if (type)
- fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
- else
- fprintf(ctx->fp, "\t.long %s\n", sym);
- break;
- default: /* BUILD_machasm for relative relocations handled below. */
- fprintf(ctx->fp, "\t.long %s\n", sym);
- break;
- }
-}
-
-static const char *const jccnames[] = {
- "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja",
- "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg"
-};
-
-/* Emit relocation for the incredibly stupid OSX assembler. */
-static void emit_asm_reloc_mach(BuildCtx *ctx, uint8_t *cp, int n,
- const char *sym)
-{
- const char *opname = NULL;
- if (--n < 0) goto err;
- if (cp[n] == 0xe8) {
- opname = "call";
- } else if (cp[n] == 0xe9) {
- opname = "jmp";
- } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) {
- opname = jccnames[cp[n]-0x80];
- n--;
- } else {
-err:
- fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n",
- sym);
- exit(1);
- }
- emit_asm_bytes(ctx, cp, n);
- fprintf(ctx->fp, "\t%s %s\n", opname, sym);
-}
-#else
-/* Emit words piecewise as assembler text. */
-static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n)
-{
- int i;
- for (i = 0; i < n; i += 4) {
- if ((i & 15) == 0)
- fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i));
- else
- fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i));
- if ((i & 15) == 12) putc('\n', ctx->fp);
- }
- if ((n & 15) != 0) putc('\n', ctx->fp);
-}
-
-/* Emit relocation as part of an instruction. */
-static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n,
- const char *sym)
-{
- uint32_t ins;
- emit_asm_words(ctx, p, n-4);
- ins = *(uint32_t *)(p+n-4);
-#if LJ_TARGET_ARM
- if ((ins & 0xff000000u) == 0xfa000000u) {
- fprintf(ctx->fp, "\tblx %s\n", sym);
- } else if ((ins & 0x0e000000u) == 0x0a000000u) {
- fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b",
- &"eqnecsccmiplvsvchilsgeltgtle"[2*(ins >> 28)], sym);
- } else {
- fprintf(stderr,
- "Error: unsupported opcode %08x for %s symbol relocation.\n",
- ins, sym);
- exit(1);
- }
-#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE
-#if LJ_TARGET_PS3
-#define TOCPREFIX "."
-#else
-#define TOCPREFIX ""
-#endif
- if ((ins >> 26) == 16) {
- fprintf(ctx->fp, "\t%s %d, %d, " TOCPREFIX "%s\n",
- (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym);
- } else if ((ins >> 26) == 18) {
- fprintf(ctx->fp, "\t%s " TOCPREFIX "%s\n", (ins & 1) ? "bl" : "b", sym);
- } else {
- fprintf(stderr,
- "Error: unsupported opcode %08x for %s symbol relocation.\n",
- ins, sym);
- exit(1);
- }
-#elif LJ_TARGET_MIPS
- fprintf(stderr,
- "Error: unsupported opcode %08x for %s symbol relocation.\n",
- ins, sym);
- exit(1);
-#else
-#error "missing relocation support for this architecture"
-#endif
-}
-#endif
-
-#if LJ_TARGET_ARM
-#define ELFASM_PX "%%"
-#else
-#define ELFASM_PX "@"
-#endif
-
-/* Emit an assembler label. */
-static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc)
-{
- switch (ctx->mode) {
- case BUILD_elfasm:
-#if LJ_TARGET_PS3
- if (!strncmp(name, "lj_vm_", 6) &&
- strcmp(name, ctx->beginsym) &&
- !strstr(name, "hook")) {
- fprintf(ctx->fp,
- "\n\t.globl %s\n"
- "\t.section \".opd\",\"aw\"\n"
- "%s:\n"
- "\t.long .%s,.TOC.@tocbase32\n"
- "\t.size %s,8\n"
- "\t.previous\n"
- "\t.globl .%s\n"
- "\t.hidden .%s\n"
- "\t.type .%s, " ELFASM_PX "function\n"
- "\t.size .%s, %d\n"
- ".%s:\n",
- name, name, name, name, name, name, name, name, size, name);
- break;
- }
-#endif
- fprintf(ctx->fp,
- "\n\t.globl %s\n"
- "\t.hidden %s\n"
- "\t.type %s, " ELFASM_PX "%s\n"
- "\t.size %s, %d\n"
- "%s:\n",
- name, name, name, isfunc ? "function" : "object", name, size, name);
- break;
- case BUILD_coffasm:
- fprintf(ctx->fp, "\n\t.globl %s\n", name);
- if (isfunc)
- fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name);
- fprintf(ctx->fp, "%s:\n", name);
- break;
- case BUILD_machasm:
- fprintf(ctx->fp,
- "\n\t.private_extern %s\n"
- "%s:\n", name, name);
- break;
- default:
- break;
- }
-}
-
-/* Emit alignment. */
-static void emit_asm_align(BuildCtx *ctx, int bits)
-{
- switch (ctx->mode) {
- case BUILD_elfasm:
- case BUILD_coffasm:
- fprintf(ctx->fp, "\t.p2align %d\n", bits);
- break;
- case BUILD_machasm:
- fprintf(ctx->fp, "\t.align %d\n", bits);
- break;
- default:
- break;
- }
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* Emit assembler source code. */
-void emit_asm(BuildCtx *ctx)
-{
- int i, rel;
-
- fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch);
- fprintf(ctx->fp, "\t.text\n");
- emit_asm_align(ctx, 4);
-
-#if LJ_TARGET_PS3
- emit_asm_label(ctx, ctx->beginsym, ctx->codesz, 0);
-#else
- emit_asm_label(ctx, ctx->beginsym, 0, 0);
-#endif
- if (ctx->mode != BUILD_machasm)
- fprintf(ctx->fp, ".Lbegin:\n");
-
-#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
- /* This should really be moved into buildvm_arm.dasc. */
- fprintf(ctx->fp,
- ".fnstart\n"
- ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
- ".pad #28\n");
-#endif
-#if LJ_TARGET_MIPS
- fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n");
-#endif
-
- for (i = rel = 0; i < ctx->nsym; i++) {
- int32_t ofs = ctx->sym[i].ofs;
- int32_t next = ctx->sym[i+1].ofs;
-#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND && LJ_HASFFI
- if (!strcmp(ctx->sym[i].name, "lj_vm_ffi_call"))
- fprintf(ctx->fp,
- ".globl lj_err_unwind_arm\n"
- ".personality lj_err_unwind_arm\n"
- ".fnend\n"
- ".fnstart\n"
- ".save {r4, r5, r11, lr}\n"
- ".setfp r11, sp\n");
-#endif
- emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1);
- while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) {
- BuildReloc *r = &ctx->reloc[rel];
- int n = r->ofs - ofs;
-#if LJ_TARGET_X86ORX64
- if (ctx->mode == BUILD_machasm && r->type != 0) {
- emit_asm_reloc_mach(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
- } else {
- emit_asm_bytes(ctx, ctx->code+ofs, n);
- emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]);
- }
- ofs += n+4;
-#else
- emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
- ofs += n;
-#endif
- rel++;
- }
-#if LJ_TARGET_X86ORX64
- emit_asm_bytes(ctx, ctx->code+ofs, next-ofs);
-#else
- emit_asm_words(ctx, ctx->code+ofs, next-ofs);
-#endif
- }
-
-#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
- fprintf(ctx->fp,
-#if !LJ_HASFFI
- ".globl lj_err_unwind_arm\n"
- ".personality lj_err_unwind_arm\n"
-#endif
- ".fnend\n");
-#endif
-
- fprintf(ctx->fp, "\n");
- switch (ctx->mode) {
- case BUILD_elfasm:
-#if !(LJ_TARGET_PS3 || LJ_TARGET_PSVITA)
- fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n");
-#endif
-#if LJ_TARGET_PPCSPE
- /* Soft-float ABI + SPE. */
- fprintf(ctx->fp, "\t.gnu_attribute 4, 2\n\t.gnu_attribute 8, 3\n");
-#elif LJ_TARGET_PPC && !LJ_TARGET_PS3
- /* Hard-float ABI. */
- fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n");
-#endif
- /* fallthrough */
- case BUILD_coffasm:
- fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident);
- break;
- case BUILD_machasm:
- fprintf(ctx->fp,
- "\t.cstring\n"
- "\t.ascii \"%s\\0\"\n", ctx->dasm_ident);
- break;
- default:
- break;
- }
- fprintf(ctx->fp, "\n");
-}
-
+/*
+** LuaJIT VM builder: Assembler source code emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_bc.h"
+
+/* ------------------------------------------------------------------------ */
+
+#if LJ_TARGET_X86ORX64
+/* Emit bytes piecewise as assembler text. */
+static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if ((i & 15) == 0)
+ fprintf(ctx->fp, "\t.byte %d", p[i]);
+ else
+ fprintf(ctx->fp, ",%d", p[i]);
+ if ((i & 15) == 15) putc('\n', ctx->fp);
+ }
+ if ((n & 15) != 0) putc('\n', ctx->fp);
+}
+
+/* Emit relocation */
+static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ if (type)
+ fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
+ else
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym);
+ if (type)
+ fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
+ else
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ default: /* BUILD_machasm for relative relocations handled below. */
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ }
+}
+
+static const char *const jccnames[] = {
+ "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja",
+ "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg"
+};
+
+/* Emit relocation for the incredibly stupid OSX assembler. */
+static void emit_asm_reloc_mach(BuildCtx *ctx, uint8_t *cp, int n,
+ const char *sym)
+{
+ const char *opname = NULL;
+ if (--n < 0) goto err;
+ if (cp[n] == 0xe8) {
+ opname = "call";
+ } else if (cp[n] == 0xe9) {
+ opname = "jmp";
+ } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) {
+ opname = jccnames[cp[n]-0x80];
+ n--;
+ } else {
+err:
+ fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n",
+ sym);
+ exit(1);
+ }
+ emit_asm_bytes(ctx, cp, n);
+ fprintf(ctx->fp, "\t%s %s\n", opname, sym);
+}
+#else
+/* Emit words piecewise as assembler text. */
+static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i += 4) {
+ if ((i & 15) == 0)
+ fprintf(ctx->fp, "\t.long 0x%08x", *(uint32_t *)(p+i));
+ else
+ fprintf(ctx->fp, ",0x%08x", *(uint32_t *)(p+i));
+ if ((i & 15) == 12) putc('\n', ctx->fp);
+ }
+ if ((n & 15) != 0) putc('\n', ctx->fp);
+}
+
+/* Emit relocation as part of an instruction. */
+static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n,
+ const char *sym)
+{
+ uint32_t ins;
+ emit_asm_words(ctx, p, n-4);
+ ins = *(uint32_t *)(p+n-4);
+#if LJ_TARGET_ARM
+ if ((ins & 0xff000000u) == 0xfa000000u) {
+ fprintf(ctx->fp, "\tblx %s\n", sym);
+ } else if ((ins & 0x0e000000u) == 0x0a000000u) {
+ fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b",
+ "eqnecsccmiplvsvchilsgeltgtle" + 2*(ins >> 28), sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE
+#if LJ_TARGET_PS3
+#define TOCPREFIX "."
+#else
+#define TOCPREFIX ""
+#endif
+ if ((ins >> 26) == 16) {
+ fprintf(ctx->fp, "\t%s %d, %d, " TOCPREFIX "%s\n",
+ (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym);
+ } else if ((ins >> 26) == 18) {
+ fprintf(ctx->fp, "\t%s " TOCPREFIX "%s\n", (ins & 1) ? "bl" : "b", sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_MIPS
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+#else
+#error "missing relocation support for this architecture"
+#endif
+}
+#endif
+
+#if LJ_TARGET_ARM
+#define ELFASM_PX "%%"
+#else
+#define ELFASM_PX "@"
+#endif
+
+/* Emit an assembler label. */
+static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+#if LJ_TARGET_PS3
+ if (!strncmp(name, "lj_vm_", 6) &&
+ strcmp(name, ctx->beginsym) &&
+ !strstr(name, "hook")) {
+ fprintf(ctx->fp,
+ "\n\t.globl %s\n"
+ "\t.section \".opd\",\"aw\"\n"
+ "%s:\n"
+ "\t.long .%s,.TOC.@tocbase32\n"
+ "\t.size %s,8\n"
+ "\t.previous\n"
+ "\t.globl .%s\n"
+ "\t.hidden .%s\n"
+ "\t.type .%s, " ELFASM_PX "function\n"
+ "\t.size .%s, %d\n"
+ ".%s:\n",
+ name, name, name, name, name, name, name, name, size, name);
+ break;
+ }
+#endif
+ fprintf(ctx->fp,
+ "\n\t.globl %s\n"
+ "\t.hidden %s\n"
+ "\t.type %s, " ELFASM_PX "%s\n"
+ "\t.size %s, %d\n"
+ "%s:\n",
+ name, name, name, isfunc ? "function" : "object", name, size, name);
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\n\t.globl %s\n", name);
+ if (isfunc)
+ fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name);
+ fprintf(ctx->fp, "%s:\n", name);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp,
+ "\n\t.private_extern %s\n"
+ "%s:\n", name, name);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Emit alignment. */
+static void emit_asm_align(BuildCtx *ctx, int bits)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.p2align %d\n", bits);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp, "\t.align %d\n", bits);
+ break;
+ default:
+ break;
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Emit assembler source code. */
+void emit_asm(BuildCtx *ctx)
+{
+ int i, rel;
+
+ fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch);
+ fprintf(ctx->fp, "\t.text\n");
+ emit_asm_align(ctx, 4);
+
+#if LJ_TARGET_PS3
+ emit_asm_label(ctx, ctx->beginsym, ctx->codesz, 0);
+#else
+ emit_asm_label(ctx, ctx->beginsym, 0, 0);
+#endif
+ if (ctx->mode != BUILD_machasm)
+ fprintf(ctx->fp, ".Lbegin:\n");
+
+#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
+ /* This should really be moved into buildvm_arm.dasc. */
+ fprintf(ctx->fp,
+ ".fnstart\n"
+ ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
+ ".pad #28\n");
+#endif
+#if LJ_TARGET_MIPS
+ fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n");
+#endif
+
+ for (i = rel = 0; i < ctx->nsym; i++) {
+ int32_t ofs = ctx->sym[i].ofs;
+ int32_t next = ctx->sym[i+1].ofs;
+#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND && LJ_HASFFI
+ if (!strcmp(ctx->sym[i].name, "lj_vm_ffi_call"))
+ fprintf(ctx->fp,
+ ".globl lj_err_unwind_arm\n"
+ ".personality lj_err_unwind_arm\n"
+ ".fnend\n"
+ ".fnstart\n"
+ ".save {r4, r5, r11, lr}\n"
+ ".setfp r11, sp\n");
+#endif
+ emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1);
+ while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) {
+ BuildReloc *r = &ctx->reloc[rel];
+ int n = r->ofs - ofs;
+#if LJ_TARGET_X86ORX64
+ if (ctx->mode == BUILD_machasm && r->type != 0) {
+ emit_asm_reloc_mach(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
+ } else {
+ emit_asm_bytes(ctx, ctx->code+ofs, n);
+ emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]);
+ }
+ ofs += n+4;
+#else
+ emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
+ ofs += n;
+#endif
+ rel++;
+ }
+#if LJ_TARGET_X86ORX64
+ emit_asm_bytes(ctx, ctx->code+ofs, next-ofs);
+#else
+ emit_asm_words(ctx, ctx->code+ofs, next-ofs);
+#endif
+ }
+
+#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
+ fprintf(ctx->fp,
+#if !LJ_HASFFI
+ ".globl lj_err_unwind_arm\n"
+ ".personality lj_err_unwind_arm\n"
+#endif
+ ".fnend\n");
+#endif
+
+ fprintf(ctx->fp, "\n");
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+#if !LJ_TARGET_PS3
+ fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n");
+#endif
+#if LJ_TARGET_PPCSPE
+ /* Soft-float ABI + SPE. */
+ fprintf(ctx->fp, "\t.gnu_attribute 4, 2\n\t.gnu_attribute 8, 3\n");
+#elif LJ_TARGET_PPC && !LJ_TARGET_PS3
+ /* Hard-float ABI. */
+ fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n");
+#endif
+ /* fallthrough */
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp,
+ "\t.cstring\n"
+ "\t.ascii \"%s\\0\"\n", ctx->dasm_ident);
+ break;
+ default:
+ break;
+ }
+ fprintf(ctx->fp, "\n");
+}
+
diff --git a/3rdparty/lua/src/host/buildvm_fold.c b/3rdparty/lua/src/host/buildvm_fold.c
index 22385e3..085a4c3 100644
--- a/3rdparty/lua/src/host/buildvm_fold.c
+++ b/3rdparty/lua/src/host/buildvm_fold.c
@@ -1,229 +1,229 @@
-/*
-** LuaJIT VM builder: IR folding hash table generator.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "buildvm.h"
-#include "lj_obj.h"
-#include "lj_ir.h"
-
-/* Context for the folding hash table generator. */
-static int lineno;
-static int funcidx;
-static uint32_t foldkeys[BUILD_MAX_FOLD];
-static uint32_t nkeys;
-
-/* Try to fill the hash table with keys using the hash parameters. */
-static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol)
-{
- uint32_t i;
- if (dorol && ((r & 31) == 0 || (r>>5) == 0))
- return 0; /* Avoid zero rotates. */
- memset(htab, 0xff, (sz+1)*sizeof(uint32_t));
- for (i = 0; i < nkeys; i++) {
- uint32_t key = foldkeys[i];
- uint32_t k = key & 0xffffff;
- uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) :
- (((k << (r>>5)) - k) << (r&31))) % sz;
- if (htab[h] != 0xffffffff) { /* Collision on primary slot. */
- if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */
- /* Try to move the colliding key, if possible. */
- if (h < sz-1 && htab[h+2] == 0xffffffff) {
- uint32_t k2 = htab[h+1] & 0xffffff;
- uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) :
- (((k2 << (r>>5)) - k2) << (r&31))) % sz;
- if (h2 != h+1) return 0; /* Cannot resolve collision. */
- htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */
- } else {
- return 0; /* Collision. */
- }
- }
- htab[h+1] = key;
- } else {
- htab[h] = key;
- }
- }
- return 1; /* Success, all keys could be stored. */
-}
-
-/* Print the generated hash table. */
-static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz)
-{
- uint32_t i;
- fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x",
- sz+1, htab[0]);
- for (i = 1; i < sz+1; i++)
- fprintf(ctx->fp, ",\n0x%08x", htab[i]);
- fprintf(ctx->fp, "\n};\n\n");
-}
-
-/* Exhaustive search for the shortest semi-perfect hash table. */
-static void makehash(BuildCtx *ctx)
-{
- uint32_t htab[BUILD_MAX_FOLD*2+1];
- uint32_t sz, r;
- /* Search for the smallest hash table with an odd size. */
- for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) {
- /* First try all shift hash combinations. */
- for (r = 0; r < 32*32; r++) {
- if (tryhash(htab, sz, r, 0)) {
- printhash(ctx, htab, sz);
- fprintf(ctx->fp,
- "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n",
- r>>5, r&31, sz);
- return;
- }
- }
- /* Then try all rotate hash combinations. */
- for (r = 0; r < 32*32; r++) {
- if (tryhash(htab, sz, r, 1)) {
- printhash(ctx, htab, sz);
- fprintf(ctx->fp,
- "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n",
- r>>5, r&31, sz);
- return;
- }
- }
- }
- fprintf(stderr, "Error: search for perfect hash failed\n");
- exit(1);
-}
-
-/* Parse one token of a fold rule. */
-static uint32_t nexttoken(char **pp, int allowlit, int allowany)
-{
- char *p = *pp;
- if (p) {
- uint32_t i;
- char *q = strchr(p, ' ');
- if (q) *q++ = '\0';
- *pp = q;
- if (allowlit && !strncmp(p, "IRFPM_", 6)) {
- for (i = 0; irfpm_names[i]; i++)
- if (!strcmp(irfpm_names[i], p+6))
- return i;
- } else if (allowlit && !strncmp(p, "IRFL_", 5)) {
- for (i = 0; irfield_names[i]; i++)
- if (!strcmp(irfield_names[i], p+5))
- return i;
- } else if (allowlit && !strncmp(p, "IRCALL_", 7)) {
- for (i = 0; ircall_names[i]; i++)
- if (!strcmp(ircall_names[i], p+7))
- return i;
- } else if (allowlit && !strncmp(p, "IRCONV_", 7)) {
- for (i = 0; irt_names[i]; i++) {
- const char *r = strchr(p+7, '_');
- if (r && !strncmp(irt_names[i], p+7, r-(p+7))) {
- uint32_t j;
- for (j = 0; irt_names[j]; j++)
- if (!strcmp(irt_names[j], r+1))
- return (i << 5) + j;
- }
- }
- } else if (allowlit && *p >= '0' && *p <= '9') {
- for (i = 0; *p >= '0' && *p <= '9'; p++)
- i = i*10 + (*p - '0');
- if (*p == '\0')
- return i;
- } else if (allowany && !strcmp("any", p)) {
- return allowany;
- } else {
- for (i = 0; ir_names[i]; i++)
- if (!strcmp(ir_names[i], p))
- return i;
- }
- fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno);
- exit(1);
- }
- return 0;
-}
-
-/* Parse a fold rule. */
-static void foldrule(char *p)
-{
- uint32_t op = nexttoken(&p, 0, 0);
- uint32_t left = nexttoken(&p, 0, 0x7f);
- uint32_t right = nexttoken(&p, 1, 0x3ff);
- uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right;
- uint32_t i;
- if (nkeys >= BUILD_MAX_FOLD) {
- fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n");
- exit(1);
- }
- /* Simple insertion sort to detect duplicates. */
- for (i = nkeys; i > 0; i--) {
- if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff))
- break;
- if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) {
- fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno);
- exit(1);
- }
- foldkeys[i] = foldkeys[i-1];
- }
- foldkeys[i] = key;
- nkeys++;
-}
-
-/* Emit C source code for IR folding hash table. */
-void emit_fold(BuildCtx *ctx)
-{
- char buf[256]; /* We don't care about analyzing lines longer than that. */
- const char *fname = ctx->args[0];
- FILE *fp;
-
- if (fname == NULL) {
- fprintf(stderr, "Error: missing input filename\n");
- exit(1);
- }
-
- if (fname[0] == '-' && fname[1] == '\0') {
- fp = stdin;
- } else {
- fp = fopen(fname, "r");
- if (!fp) {
- fprintf(stderr, "Error: cannot open input file '%s': %s\n",
- fname, strerror(errno));
- exit(1);
- }
- }
-
- fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
- fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n");
-
- lineno = 0;
- funcidx = 0;
- nkeys = 0;
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- lineno++;
- /* The prefix must be at the start of a line, otherwise it's ignored. */
- if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) {
- char *p = buf+sizeof(FOLDDEF_PREFIX)-1;
- char *q = strchr(p, ')');
- if (p[0] == '(' && q) {
- p++;
- *q = '\0';
- foldrule(p);
- } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) {
- p += 2;
- *q = '\0';
- if (funcidx)
- fprintf(ctx->fp, ",\n");
- if (p[-2] == 'X')
- fprintf(ctx->fp, " %s", p);
- else
- fprintf(ctx->fp, " fold_%s", p);
- funcidx++;
- } else {
- buf[strlen(buf)-1] = '\0';
- fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n",
- FOLDDEF_PREFIX, p, lineno);
- exit(1);
- }
- }
- }
- fclose(fp);
- fprintf(ctx->fp, "\n};\n\n");
-
- makehash(ctx);
-}
-
+/*
+** LuaJIT VM builder: IR folding hash table generator.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_ir.h"
+
+/* Context for the folding hash table generator. */
+static int lineno;
+static int funcidx;
+static uint32_t foldkeys[BUILD_MAX_FOLD];
+static uint32_t nkeys;
+
+/* Try to fill the hash table with keys using the hash parameters. */
+static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol)
+{
+ uint32_t i;
+ if (dorol && ((r & 31) == 0 || (r>>5) == 0))
+ return 0; /* Avoid zero rotates. */
+ memset(htab, 0xff, (sz+1)*sizeof(uint32_t));
+ for (i = 0; i < nkeys; i++) {
+ uint32_t key = foldkeys[i];
+ uint32_t k = key & 0xffffff;
+ uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) :
+ (((k << (r>>5)) - k) << (r&31))) % sz;
+ if (htab[h] != 0xffffffff) { /* Collision on primary slot. */
+ if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */
+ /* Try to move the colliding key, if possible. */
+ if (h < sz-1 && htab[h+2] == 0xffffffff) {
+ uint32_t k2 = htab[h+1] & 0xffffff;
+ uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) :
+ (((k2 << (r>>5)) - k2) << (r&31))) % sz;
+ if (h2 != h+1) return 0; /* Cannot resolve collision. */
+ htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */
+ } else {
+ return 0; /* Collision. */
+ }
+ }
+ htab[h+1] = key;
+ } else {
+ htab[h] = key;
+ }
+ }
+ return 1; /* Success, all keys could be stored. */
+}
+
+/* Print the generated hash table. */
+static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz)
+{
+ uint32_t i;
+ fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x",
+ sz+1, htab[0]);
+ for (i = 1; i < sz+1; i++)
+ fprintf(ctx->fp, ",\n0x%08x", htab[i]);
+ fprintf(ctx->fp, "\n};\n\n");
+}
+
+/* Exhaustive search for the shortest semi-perfect hash table. */
+static void makehash(BuildCtx *ctx)
+{
+ uint32_t htab[BUILD_MAX_FOLD*2+1];
+ uint32_t sz, r;
+ /* Search for the smallest hash table with an odd size. */
+ for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) {
+ /* First try all shift hash combinations. */
+ for (r = 0; r < 32*32; r++) {
+ if (tryhash(htab, sz, r, 0)) {
+ printhash(ctx, htab, sz);
+ fprintf(ctx->fp,
+ "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n",
+ r>>5, r&31, sz);
+ return;
+ }
+ }
+ /* Then try all rotate hash combinations. */
+ for (r = 0; r < 32*32; r++) {
+ if (tryhash(htab, sz, r, 1)) {
+ printhash(ctx, htab, sz);
+ fprintf(ctx->fp,
+ "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n",
+ r>>5, r&31, sz);
+ return;
+ }
+ }
+ }
+ fprintf(stderr, "Error: search for perfect hash failed\n");
+ exit(1);
+}
+
+/* Parse one token of a fold rule. */
+static uint32_t nexttoken(char **pp, int allowlit, int allowany)
+{
+ char *p = *pp;
+ if (p) {
+ uint32_t i;
+ char *q = strchr(p, ' ');
+ if (q) *q++ = '\0';
+ *pp = q;
+ if (allowlit && !strncmp(p, "IRFPM_", 6)) {
+ for (i = 0; irfpm_names[i]; i++)
+ if (!strcmp(irfpm_names[i], p+6))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRFL_", 5)) {
+ for (i = 0; irfield_names[i]; i++)
+ if (!strcmp(irfield_names[i], p+5))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRCALL_", 7)) {
+ for (i = 0; ircall_names[i]; i++)
+ if (!strcmp(ircall_names[i], p+7))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRCONV_", 7)) {
+ for (i = 0; irt_names[i]; i++) {
+ const char *r = strchr(p+7, '_');
+ if (r && !strncmp(irt_names[i], p+7, r-(p+7))) {
+ uint32_t j;
+ for (j = 0; irt_names[j]; j++)
+ if (!strcmp(irt_names[j], r+1))
+ return (i << 5) + j;
+ }
+ }
+ } else if (allowlit && *p >= '0' && *p <= '9') {
+ for (i = 0; *p >= '0' && *p <= '9'; p++)
+ i = i*10 + (*p - '0');
+ if (*p == '\0')
+ return i;
+ } else if (allowany && !strcmp("any", p)) {
+ return allowany;
+ } else {
+ for (i = 0; ir_names[i]; i++)
+ if (!strcmp(ir_names[i], p))
+ return i;
+ }
+ fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno);
+ exit(1);
+ }
+ return 0;
+}
+
+/* Parse a fold rule. */
+static void foldrule(char *p)
+{
+ uint32_t op = nexttoken(&p, 0, 0);
+ uint32_t left = nexttoken(&p, 0, 0x7f);
+ uint32_t right = nexttoken(&p, 1, 0x3ff);
+ uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right;
+ uint32_t i;
+ if (nkeys >= BUILD_MAX_FOLD) {
+ fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n");
+ exit(1);
+ }
+ /* Simple insertion sort to detect duplicates. */
+ for (i = nkeys; i > 0; i--) {
+ if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff))
+ break;
+ if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) {
+ fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno);
+ exit(1);
+ }
+ foldkeys[i] = foldkeys[i-1];
+ }
+ foldkeys[i] = key;
+ nkeys++;
+}
+
+/* Emit C source code for IR folding hash table. */
+void emit_fold(BuildCtx *ctx)
+{
+ char buf[256]; /* We don't care about analyzing lines longer than that. */
+ const char *fname = ctx->args[0];
+ FILE *fp;
+
+ if (fname == NULL) {
+ fprintf(stderr, "Error: missing input filename\n");
+ exit(1);
+ }
+
+ if (fname[0] == '-' && fname[1] == '\0') {
+ fp = stdin;
+ } else {
+ fp = fopen(fname, "r");
+ if (!fp) {
+ fprintf(stderr, "Error: cannot open input file '%s': %s\n",
+ fname, strerror(errno));
+ exit(1);
+ }
+ }
+
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n");
+
+ lineno = 0;
+ funcidx = 0;
+ nkeys = 0;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ lineno++;
+ /* The prefix must be at the start of a line, otherwise it's ignored. */
+ if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) {
+ char *p = buf+sizeof(FOLDDEF_PREFIX)-1;
+ char *q = strchr(p, ')');
+ if (p[0] == '(' && q) {
+ p++;
+ *q = '\0';
+ foldrule(p);
+ } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) {
+ p += 2;
+ *q = '\0';
+ if (funcidx)
+ fprintf(ctx->fp, ",\n");
+ if (p[-2] == 'X')
+ fprintf(ctx->fp, " %s", p);
+ else
+ fprintf(ctx->fp, " fold_%s", p);
+ funcidx++;
+ } else {
+ buf[strlen(buf)-1] = '\0';
+ fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n",
+ FOLDDEF_PREFIX, p, lineno);
+ exit(1);
+ }
+ }
+ }
+ fclose(fp);
+ fprintf(ctx->fp, "\n};\n\n");
+
+ makehash(ctx);
+}
+
diff --git a/3rdparty/lua/src/host/buildvm_lib.c b/3rdparty/lua/src/host/buildvm_lib.c
index 71f2e15..40141df 100644
--- a/3rdparty/lua/src/host/buildvm_lib.c
+++ b/3rdparty/lua/src/host/buildvm_lib.c
@@ -1,398 +1,398 @@
-/*
-** LuaJIT VM builder: library definition compiler.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "buildvm.h"
-#include "lj_obj.h"
-#include "lj_lib.h"
-
-/* Context for library definitions. */
-static uint8_t obuf[8192];
-static uint8_t *optr;
-static char modname[80];
-static size_t modnamelen;
-static char funcname[80];
-static int modstate, regfunc;
-static int ffid, recffid, ffasmfunc;
-
-enum {
- REGFUNC_OK,
- REGFUNC_NOREG,
- REGFUNC_NOREGUV
-};
-
-static void libdef_name(const char *p, int kind)
-{
- size_t n = strlen(p);
- if (kind != LIBINIT_STRING) {
- if (n > modnamelen && p[modnamelen] == '_' &&
- !strncmp(p, modname, modnamelen)) {
- p += modnamelen+1;
- n -= modnamelen+1;
- }
- }
- if (n > LIBINIT_MAXSTR) {
- fprintf(stderr, "Error: string too long: '%s'\n", p);
- exit(1);
- }
- if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */
- fprintf(stderr, "Error: output buffer overflow\n");
- exit(1);
- }
- *optr++ = (uint8_t)(n | kind);
- memcpy(optr, p, n);
- optr += n;
-}
-
-static void libdef_endmodule(BuildCtx *ctx)
-{
- if (modstate != 0) {
- char line[80];
- const uint8_t *p;
- int n;
- if (modstate == 1)
- fprintf(ctx->fp, " (lua_CFunction)0");
- fprintf(ctx->fp, "\n};\n");
- fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n",
- LABEL_PREFIX_LIBINIT, modname);
- line[0] = '\0';
- for (n = 0, p = obuf; p < optr; p++) {
- n += sprintf(line+n, "%d,", *p);
- if (n >= 75) {
- fprintf(ctx->fp, "%s\n", line);
- n = 0;
- line[0] = '\0';
- }
- }
- fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END);
- }
-}
-
-static void libdef_module(BuildCtx *ctx, char *p, int arg)
-{
- UNUSED(arg);
- if (ctx->mode == BUILD_libdef) {
- libdef_endmodule(ctx);
- optr = obuf;
- *optr++ = (uint8_t)ffid;
- *optr++ = (uint8_t)ffasmfunc;
- *optr++ = 0; /* Hash table size. */
- modstate = 1;
- fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p);
- fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p);
- fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n",
- LABEL_PREFIX_LIBCF, p);
- }
- modnamelen = strlen(p);
- if (modnamelen > sizeof(modname)-1) {
- fprintf(stderr, "Error: module name too long: '%s'\n", p);
- exit(1);
- }
- strcpy(modname, p);
-}
-
-static int find_ffofs(BuildCtx *ctx, const char *name)
-{
- int i;
- for (i = 0; i < ctx->nglob; i++) {
- const char *gl = ctx->globnames[i];
- if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) {
- return (int)((uint8_t *)ctx->glob[i] - ctx->code);
- }
- }
- fprintf(stderr, "Error: undefined fast function %s%s\n",
- LABEL_PREFIX_FF, name);
- exit(1);
-}
-
-static void libdef_func(BuildCtx *ctx, char *p, int arg)
-{
- if (arg != LIBINIT_CF)
- ffasmfunc++;
- if (ctx->mode == BUILD_libdef) {
- if (modstate == 0) {
- fprintf(stderr, "Error: no module for function definition %s\n", p);
- exit(1);
- }
- if (regfunc == REGFUNC_NOREG) {
- if (optr+1 > obuf+sizeof(obuf)) {
- fprintf(stderr, "Error: output buffer overflow\n");
- exit(1);
- }
- *optr++ = LIBINIT_FFID;
- } else {
- if (arg != LIBINIT_ASM_) {
- if (modstate != 1) fprintf(ctx->fp, ",\n");
- modstate = 2;
- fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p);
- }
- if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */
- libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg);
- }
- } else if (ctx->mode == BUILD_ffdef) {
- fprintf(ctx->fp, "FFDEF(%s)\n", p);
- } else if (ctx->mode == BUILD_recdef) {
- if (strlen(p) > sizeof(funcname)-1) {
- fprintf(stderr, "Error: function name too long: '%s'\n", p);
- exit(1);
- }
- strcpy(funcname, p);
- } else if (ctx->mode == BUILD_vmdef) {
- int i;
- for (i = 1; p[i] && modname[i-1]; i++)
- if (p[i] == '_') p[i] = '.';
- fprintf(ctx->fp, "\"%s\",\n", p);
- } else if (ctx->mode == BUILD_bcdef) {
- if (arg != LIBINIT_CF)
- fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p));
- }
- ffid++;
- regfunc = REGFUNC_OK;
-}
-
-static uint32_t find_rec(char *name)
-{
- char *p = (char *)obuf;
- uint32_t n;
- for (n = 2; *p; n++) {
- if (strcmp(p, name) == 0)
- return n;
- p += strlen(p)+1;
- }
- if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) {
- fprintf(stderr, "Error: output buffer overflow\n");
- exit(1);
- }
- strcpy(p, name);
- return n;
-}
-
-static void libdef_rec(BuildCtx *ctx, char *p, int arg)
-{
- UNUSED(arg);
- if (ctx->mode == BUILD_recdef) {
- char *q;
- uint32_t n;
- for (; recffid+1 < ffid; recffid++)
- fprintf(ctx->fp, ",\n0");
- recffid = ffid;
- if (*p == '.') p = funcname;
- q = strchr(p, ' ');
- if (q) *q++ = '\0';
- n = find_rec(p);
- if (q)
- fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q);
- else
- fprintf(ctx->fp, ",\n0x%02x00", n);
- }
-}
-
-static void memcpy_endian(void *dst, void *src, size_t n)
-{
- union { uint8_t b; uint32_t u; } host_endian;
- host_endian.u = 1;
- if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) {
- memcpy(dst, src, n);
- } else {
- size_t i;
- for (i = 0; i < n; i++)
- ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1];
- }
-}
-
-static void libdef_push(BuildCtx *ctx, char *p, int arg)
-{
- UNUSED(arg);
- if (ctx->mode == BUILD_libdef) {
- int len = (int)strlen(p);
- if (*p == '"') {
- if (len > 1 && p[len-1] == '"') {
- p[len-1] = '\0';
- libdef_name(p+1, LIBINIT_STRING);
- return;
- }
- } else if (*p >= '0' && *p <= '9') {
- char *ep;
- double d = strtod(p, &ep);
- if (*ep == '\0') {
- if (optr+1+sizeof(double) > obuf+sizeof(obuf)) {
- fprintf(stderr, "Error: output buffer overflow\n");
- exit(1);
- }
- *optr++ = LIBINIT_NUMBER;
- memcpy_endian(optr, &d, sizeof(double));
- optr += sizeof(double);
- return;
- }
- } else if (!strcmp(p, "lastcl")) {
- if (optr+1 > obuf+sizeof(obuf)) {
- fprintf(stderr, "Error: output buffer overflow\n");
- exit(1);
- }
- *optr++ = LIBINIT_LASTCL;
- return;
- } else if (len > 4 && !strncmp(p, "top-", 4)) {
- if (optr+2 > obuf+sizeof(obuf)) {
- fprintf(stderr, "Error: output buffer overflow\n");
- exit(1);
- }
- *optr++ = LIBINIT_COPY;
- *optr++ = (uint8_t)atoi(p+4);
- return;
- }
- fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p);
- exit(1);
- }
-}
-
-static void libdef_set(BuildCtx *ctx, char *p, int arg)
-{
- UNUSED(arg);
- if (ctx->mode == BUILD_libdef) {
- if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */
- libdef_name(p, LIBINIT_STRING);
- *optr++ = LIBINIT_SET;
- obuf[2]++; /* Bump hash table size. */
- }
-}
-
-static void libdef_regfunc(BuildCtx *ctx, char *p, int arg)
-{
- UNUSED(ctx); UNUSED(p);
- regfunc = arg;
-}
-
-typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg);
-
-typedef struct LibDefHandler {
- const char *suffix;
- const char *stop;
- const LibDefFunc func;
- const int arg;
-} LibDefHandler;
-
-static const LibDefHandler libdef_handlers[] = {
- { "MODULE_", " \t\r\n", libdef_module, 0 },
- { "CF(", ")", libdef_func, LIBINIT_CF },
- { "ASM(", ")", libdef_func, LIBINIT_ASM },
- { "ASM_(", ")", libdef_func, LIBINIT_ASM_ },
- { "REC(", ")", libdef_rec, 0 },
- { "PUSH(", ")", libdef_push, 0 },
- { "SET(", ")", libdef_set, 0 },
- { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV },
- { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG },
- { NULL, NULL, (LibDefFunc)0, 0 }
-};
-
-/* Emit C source code for library function definitions. */
-void emit_lib(BuildCtx *ctx)
-{
- const char *fname;
-
- if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef ||
- ctx->mode == BUILD_recdef)
- fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
- else if (ctx->mode == BUILD_vmdef)
- fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n");
- if (ctx->mode == BUILD_recdef)
- fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100");
- recffid = ffid = FF_C+1;
- ffasmfunc = 0;
-
- while ((fname = *ctx->args++)) {
- char buf[256]; /* We don't care about analyzing lines longer than that. */
- FILE *fp;
- if (fname[0] == '-' && fname[1] == '\0') {
- fp = stdin;
- } else {
- fp = fopen(fname, "r");
- if (!fp) {
- fprintf(stderr, "Error: cannot open input file '%s': %s\n",
- fname, strerror(errno));
- exit(1);
- }
- }
- modstate = 0;
- regfunc = REGFUNC_OK;
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- char *p;
- /* Simplistic pre-processor. Only handles top-level #if/#endif. */
- if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') {
- int ok = 1;
- if (!strcmp(buf, "#if LJ_52\n"))
- ok = LJ_52;
- else if (!strcmp(buf, "#if LJ_HASJIT\n"))
- ok = LJ_HASJIT;
- else if (!strcmp(buf, "#if LJ_HASFFI\n"))
- ok = LJ_HASFFI;
- if (!ok) {
- int lvl = 1;
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- if (buf[0] == '#' && buf[1] == 'e' && buf[2] == 'n') {
- if (--lvl == 0) break;
- } else if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') {
- lvl++;
- }
- }
- continue;
- }
- }
- for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) {
- const LibDefHandler *ldh;
- p += sizeof(LIBDEF_PREFIX)-1;
- for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) {
- size_t n, len = strlen(ldh->suffix);
- if (!strncmp(p, ldh->suffix, len)) {
- p += len;
- n = ldh->stop ? strcspn(p, ldh->stop) : 0;
- if (!p[n]) break;
- p[n] = '\0';
- ldh->func(ctx, p, ldh->arg);
- p += n+1;
- break;
- }
- }
- if (ldh->suffix == NULL) {
- buf[strlen(buf)-1] = '\0';
- fprintf(stderr, "Error: unknown library definition tag %s%s\n",
- LIBDEF_PREFIX, p);
- exit(1);
- }
- }
- }
- fclose(fp);
- if (ctx->mode == BUILD_libdef) {
- libdef_endmodule(ctx);
- }
- }
-
- if (ctx->mode == BUILD_ffdef) {
- fprintf(ctx->fp, "\n#undef FFDEF\n\n");
- fprintf(ctx->fp,
- "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n",
- ffasmfunc);
- } else if (ctx->mode == BUILD_vmdef) {
- fprintf(ctx->fp, "}\n\n");
- } else if (ctx->mode == BUILD_bcdef) {
- int i;
- fprintf(ctx->fp, "\n};\n\n");
- fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n");
- fprintf(ctx->fp, "BCDEF(BCMODE)\n");
- for (i = ffasmfunc-1; i > 0; i--)
- fprintf(ctx->fp, "BCMODE_FF,\n");
- fprintf(ctx->fp, "BCMODE_FF\n};\n\n");
- } else if (ctx->mode == BUILD_recdef) {
- char *p = (char *)obuf;
- fprintf(ctx->fp, "\n};\n\n");
- fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n"
- "recff_nyi,\n"
- "recff_c");
- while (*p) {
- fprintf(ctx->fp, ",\nrecff_%s", p);
- p += strlen(p)+1;
- }
- fprintf(ctx->fp, "\n};\n\n");
- }
-}
-
+/*
+** LuaJIT VM builder: library definition compiler.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_lib.h"
+
+/* Context for library definitions. */
+static uint8_t obuf[8192];
+static uint8_t *optr;
+static char modname[80];
+static size_t modnamelen;
+static char funcname[80];
+static int modstate, regfunc;
+static int ffid, recffid, ffasmfunc;
+
+enum {
+ REGFUNC_OK,
+ REGFUNC_NOREG,
+ REGFUNC_NOREGUV
+};
+
+static void libdef_name(const char *p, int kind)
+{
+ size_t n = strlen(p);
+ if (kind != LIBINIT_STRING) {
+ if (n > modnamelen && p[modnamelen] == '_' &&
+ !strncmp(p, modname, modnamelen)) {
+ p += modnamelen+1;
+ n -= modnamelen+1;
+ }
+ }
+ if (n > LIBINIT_MAXSTR) {
+ fprintf(stderr, "Error: string too long: '%s'\n", p);
+ exit(1);
+ }
+ if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = (uint8_t)(n | kind);
+ memcpy(optr, p, n);
+ optr += n;
+}
+
+static void libdef_endmodule(BuildCtx *ctx)
+{
+ if (modstate != 0) {
+ char line[80];
+ const uint8_t *p;
+ int n;
+ if (modstate == 1)
+ fprintf(ctx->fp, " (lua_CFunction)0");
+ fprintf(ctx->fp, "\n};\n");
+ fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n",
+ LABEL_PREFIX_LIBINIT, modname);
+ line[0] = '\0';
+ for (n = 0, p = obuf; p < optr; p++) {
+ n += sprintf(line+n, "%d,", *p);
+ if (n >= 75) {
+ fprintf(ctx->fp, "%s\n", line);
+ n = 0;
+ line[0] = '\0';
+ }
+ }
+ fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END);
+ }
+}
+
+static void libdef_module(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ libdef_endmodule(ctx);
+ optr = obuf;
+ *optr++ = (uint8_t)ffid;
+ *optr++ = (uint8_t)ffasmfunc;
+ *optr++ = 0; /* Hash table size. */
+ modstate = 1;
+ fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p);
+ fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p);
+ fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n",
+ LABEL_PREFIX_LIBCF, p);
+ }
+ modnamelen = strlen(p);
+ if (modnamelen > sizeof(modname)-1) {
+ fprintf(stderr, "Error: module name too long: '%s'\n", p);
+ exit(1);
+ }
+ strcpy(modname, p);
+}
+
+static int find_ffofs(BuildCtx *ctx, const char *name)
+{
+ int i;
+ for (i = 0; i < ctx->nglob; i++) {
+ const char *gl = ctx->globnames[i];
+ if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) {
+ return (int)((uint8_t *)ctx->glob[i] - ctx->code);
+ }
+ }
+ fprintf(stderr, "Error: undefined fast function %s%s\n",
+ LABEL_PREFIX_FF, name);
+ exit(1);
+}
+
+static void libdef_func(BuildCtx *ctx, char *p, int arg)
+{
+ if (arg != LIBINIT_CF)
+ ffasmfunc++;
+ if (ctx->mode == BUILD_libdef) {
+ if (modstate == 0) {
+ fprintf(stderr, "Error: no module for function definition %s\n", p);
+ exit(1);
+ }
+ if (regfunc == REGFUNC_NOREG) {
+ if (optr+1 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_FFID;
+ } else {
+ if (arg != LIBINIT_ASM_) {
+ if (modstate != 1) fprintf(ctx->fp, ",\n");
+ modstate = 2;
+ fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p);
+ }
+ if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */
+ libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg);
+ }
+ } else if (ctx->mode == BUILD_ffdef) {
+ fprintf(ctx->fp, "FFDEF(%s)\n", p);
+ } else if (ctx->mode == BUILD_recdef) {
+ if (strlen(p) > sizeof(funcname)-1) {
+ fprintf(stderr, "Error: function name too long: '%s'\n", p);
+ exit(1);
+ }
+ strcpy(funcname, p);
+ } else if (ctx->mode == BUILD_vmdef) {
+ int i;
+ for (i = 1; p[i] && modname[i-1]; i++)
+ if (p[i] == '_') p[i] = '.';
+ fprintf(ctx->fp, "\"%s\",\n", p);
+ } else if (ctx->mode == BUILD_bcdef) {
+ if (arg != LIBINIT_CF)
+ fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p));
+ }
+ ffid++;
+ regfunc = REGFUNC_OK;
+}
+
+static uint32_t find_rec(char *name)
+{
+ char *p = (char *)obuf;
+ uint32_t n;
+ for (n = 2; *p; n++) {
+ if (strcmp(p, name) == 0)
+ return n;
+ p += strlen(p)+1;
+ }
+ if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ strcpy(p, name);
+ return n;
+}
+
+static void libdef_rec(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_recdef) {
+ char *q;
+ uint32_t n;
+ for (; recffid+1 < ffid; recffid++)
+ fprintf(ctx->fp, ",\n0");
+ recffid = ffid;
+ if (*p == '.') p = funcname;
+ q = strchr(p, ' ');
+ if (q) *q++ = '\0';
+ n = find_rec(p);
+ if (q)
+ fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q);
+ else
+ fprintf(ctx->fp, ",\n0x%02x00", n);
+ }
+}
+
+static void memcpy_endian(void *dst, void *src, size_t n)
+{
+ union { uint8_t b; uint32_t u; } host_endian;
+ host_endian.u = 1;
+ if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) {
+ memcpy(dst, src, n);
+ } else {
+ size_t i;
+ for (i = 0; i < n; i++)
+ ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1];
+ }
+}
+
+static void libdef_push(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ int len = (int)strlen(p);
+ if (*p == '"') {
+ if (len > 1 && p[len-1] == '"') {
+ p[len-1] = '\0';
+ libdef_name(p+1, LIBINIT_STRING);
+ return;
+ }
+ } else if (*p >= '0' && *p <= '9') {
+ char *ep;
+ double d = strtod(p, &ep);
+ if (*ep == '\0') {
+ if (optr+1+sizeof(double) > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_NUMBER;
+ memcpy_endian(optr, &d, sizeof(double));
+ optr += sizeof(double);
+ return;
+ }
+ } else if (!strcmp(p, "lastcl")) {
+ if (optr+1 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_LASTCL;
+ return;
+ } else if (len > 4 && !strncmp(p, "top-", 4)) {
+ if (optr+2 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_COPY;
+ *optr++ = (uint8_t)atoi(p+4);
+ return;
+ }
+ fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p);
+ exit(1);
+ }
+}
+
+static void libdef_set(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */
+ libdef_name(p, LIBINIT_STRING);
+ *optr++ = LIBINIT_SET;
+ obuf[2]++; /* Bump hash table size. */
+ }
+}
+
+static void libdef_regfunc(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(ctx); UNUSED(p);
+ regfunc = arg;
+}
+
+typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg);
+
+typedef struct LibDefHandler {
+ const char *suffix;
+ const char *stop;
+ const LibDefFunc func;
+ const int arg;
+} LibDefHandler;
+
+static const LibDefHandler libdef_handlers[] = {
+ { "MODULE_", " \t\r\n", libdef_module, 0 },
+ { "CF(", ")", libdef_func, LIBINIT_CF },
+ { "ASM(", ")", libdef_func, LIBINIT_ASM },
+ { "ASM_(", ")", libdef_func, LIBINIT_ASM_ },
+ { "REC(", ")", libdef_rec, 0 },
+ { "PUSH(", ")", libdef_push, 0 },
+ { "SET(", ")", libdef_set, 0 },
+ { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV },
+ { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG },
+ { NULL, NULL, (LibDefFunc)0, 0 }
+};
+
+/* Emit C source code for library function definitions. */
+void emit_lib(BuildCtx *ctx)
+{
+ const char *fname;
+
+ if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef ||
+ ctx->mode == BUILD_recdef)
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ else if (ctx->mode == BUILD_vmdef)
+ fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n");
+ if (ctx->mode == BUILD_recdef)
+ fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100");
+ recffid = ffid = FF_C+1;
+ ffasmfunc = 0;
+
+ while ((fname = *ctx->args++)) {
+ char buf[256]; /* We don't care about analyzing lines longer than that. */
+ FILE *fp;
+ if (fname[0] == '-' && fname[1] == '\0') {
+ fp = stdin;
+ } else {
+ fp = fopen(fname, "r");
+ if (!fp) {
+ fprintf(stderr, "Error: cannot open input file '%s': %s\n",
+ fname, strerror(errno));
+ exit(1);
+ }
+ }
+ modstate = 0;
+ regfunc = REGFUNC_OK;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ char *p;
+ /* Simplistic pre-processor. Only handles top-level #if/#endif. */
+ if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') {
+ int ok = 1;
+ if (!strcmp(buf, "#if LJ_52\n"))
+ ok = LJ_52;
+ else if (!strcmp(buf, "#if LJ_HASJIT\n"))
+ ok = LJ_HASJIT;
+ else if (!strcmp(buf, "#if LJ_HASFFI\n"))
+ ok = LJ_HASFFI;
+ if (!ok) {
+ int lvl = 1;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (buf[0] == '#' && buf[1] == 'e' && buf[2] == 'n') {
+ if (--lvl == 0) break;
+ } else if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') {
+ lvl++;
+ }
+ }
+ continue;
+ }
+ }
+ for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) {
+ const LibDefHandler *ldh;
+ p += sizeof(LIBDEF_PREFIX)-1;
+ for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) {
+ size_t n, len = strlen(ldh->suffix);
+ if (!strncmp(p, ldh->suffix, len)) {
+ p += len;
+ n = ldh->stop ? strcspn(p, ldh->stop) : 0;
+ if (!p[n]) break;
+ p[n] = '\0';
+ ldh->func(ctx, p, ldh->arg);
+ p += n+1;
+ break;
+ }
+ }
+ if (ldh->suffix == NULL) {
+ buf[strlen(buf)-1] = '\0';
+ fprintf(stderr, "Error: unknown library definition tag %s%s\n",
+ LIBDEF_PREFIX, p);
+ exit(1);
+ }
+ }
+ }
+ fclose(fp);
+ if (ctx->mode == BUILD_libdef) {
+ libdef_endmodule(ctx);
+ }
+ }
+
+ if (ctx->mode == BUILD_ffdef) {
+ fprintf(ctx->fp, "\n#undef FFDEF\n\n");
+ fprintf(ctx->fp,
+ "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n",
+ ffasmfunc);
+ } else if (ctx->mode == BUILD_vmdef) {
+ fprintf(ctx->fp, "}\n\n");
+ } else if (ctx->mode == BUILD_bcdef) {
+ int i;
+ fprintf(ctx->fp, "\n};\n\n");
+ fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n");
+ fprintf(ctx->fp, "BCDEF(BCMODE)\n");
+ for (i = ffasmfunc-1; i > 0; i--)
+ fprintf(ctx->fp, "BCMODE_FF,\n");
+ fprintf(ctx->fp, "BCMODE_FF\n};\n\n");
+ } else if (ctx->mode == BUILD_recdef) {
+ char *p = (char *)obuf;
+ fprintf(ctx->fp, "\n};\n\n");
+ fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n"
+ "recff_nyi,\n"
+ "recff_c");
+ while (*p) {
+ fprintf(ctx->fp, ",\nrecff_%s", p);
+ p += strlen(p)+1;
+ }
+ fprintf(ctx->fp, "\n};\n\n");
+ }
+}
+
diff --git a/3rdparty/lua/src/host/buildvm_peobj.c b/3rdparty/lua/src/host/buildvm_peobj.c
index b891817..1249445 100644
--- a/3rdparty/lua/src/host/buildvm_peobj.c
+++ b/3rdparty/lua/src/host/buildvm_peobj.c
@@ -1,368 +1,368 @@
-/*
-** LuaJIT VM builder: PE object emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Only used for building on Windows, since we cannot assume the presence
-** of a suitable assembler. The host and target byte order must match.
-*/
-
-#include "buildvm.h"
-#include "lj_bc.h"
-
-#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC
-
-/* Context for PE object emitter. */
-static char *strtab;
-static size_t strtabofs;
-
-/* -- PE object definitions ----------------------------------------------- */
-
-/* PE header. */
-typedef struct PEheader {
- uint16_t arch;
- uint16_t nsects;
- uint32_t time;
- uint32_t symtabofs;
- uint32_t nsyms;
- uint16_t opthdrsz;
- uint16_t flags;
-} PEheader;
-
-/* PE section. */
-typedef struct PEsection {
- char name[8];
- uint32_t vsize;
- uint32_t vaddr;
- uint32_t size;
- uint32_t ofs;
- uint32_t relocofs;
- uint32_t lineofs;
- uint16_t nreloc;
- uint16_t nline;
- uint32_t flags;
-} PEsection;
-
-/* PE relocation. */
-typedef struct PEreloc {
- uint32_t vaddr;
- uint32_t symidx;
- uint16_t type;
-} PEreloc;
-
-/* Cannot use sizeof, because it pads up to the max. alignment. */
-#define PEOBJ_RELOC_SIZE (4+4+2)
-
-/* PE symbol table entry. */
-typedef struct PEsym {
- union {
- char name[8];
- uint32_t nameref[2];
- } n;
- uint32_t value;
- int16_t sect;
- uint16_t type;
- uint8_t scl;
- uint8_t naux;
-} PEsym;
-
-/* PE symbol table auxiliary entry for a section. */
-typedef struct PEsymaux {
- uint32_t size;
- uint16_t nreloc;
- uint16_t nline;
- uint32_t cksum;
- uint16_t assoc;
- uint8_t comdatsel;
- uint8_t unused[3];
-} PEsymaux;
-
-/* Cannot use sizeof, because it pads up to the max. alignment. */
-#define PEOBJ_SYM_SIZE (8+4+2+2+1+1)
-
-/* PE object CPU specific defines. */
-#if LJ_TARGET_X86
-#define PEOBJ_ARCH_TARGET 0x014c
-#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */
-#define PEOBJ_RELOC_DIR32 0x06
-#define PEOBJ_RELOC_OFS 0
-#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
-#elif LJ_TARGET_X64
-#define PEOBJ_ARCH_TARGET 0x8664
-#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */
-#define PEOBJ_RELOC_DIR32 0x02
-#define PEOBJ_RELOC_ADDR32NB 0x03
-#define PEOBJ_RELOC_OFS 0
-#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
-#elif LJ_TARGET_PPC
-#define PEOBJ_ARCH_TARGET 0x01f2
-#define PEOBJ_RELOC_REL32 0x06
-#define PEOBJ_RELOC_DIR32 0x02
-#define PEOBJ_RELOC_OFS (-4)
-#define PEOBJ_TEXT_FLAGS 0x60400020 /* 60=r+x, 40=align8, 20=code. */
-#endif
-
-/* Section numbers (0-based). */
-enum {
- PEOBJ_SECT_ABS = -2,
- PEOBJ_SECT_UNDEF = -1,
- PEOBJ_SECT_TEXT,
-#if LJ_TARGET_X64
- PEOBJ_SECT_PDATA,
- PEOBJ_SECT_XDATA,
-#endif
- PEOBJ_SECT_RDATA_Z,
- PEOBJ_NSECTIONS
-};
-
-/* Symbol types. */
-#define PEOBJ_TYPE_NULL 0
-#define PEOBJ_TYPE_FUNC 0x20
-
-/* Symbol storage class. */
-#define PEOBJ_SCL_EXTERN 2
-#define PEOBJ_SCL_STATIC 3
-
-/* -- PE object emitter --------------------------------------------------- */
-
-/* Emit PE object symbol. */
-static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value,
- int sect, int type, int scl)
-{
- PEsym sym;
- size_t len = strlen(name);
- if (!strtab) { /* Pass 1: only calculate string table length. */
- if (len > 8) strtabofs += len+1;
- return;
- }
- if (len <= 8) {
- memcpy(sym.n.name, name, len);
- memset(sym.n.name+len, 0, 8-len);
- } else {
- sym.n.nameref[0] = 0;
- sym.n.nameref[1] = (uint32_t)strtabofs;
- memcpy(strtab + strtabofs, name, len);
- strtab[strtabofs+len] = 0;
- strtabofs += len+1;
- }
- sym.value = value;
- sym.sect = (int16_t)(sect+1); /* 1-based section number. */
- sym.type = (uint16_t)type;
- sym.scl = (uint8_t)scl;
- sym.naux = 0;
- owrite(ctx, &sym, PEOBJ_SYM_SIZE);
-}
-
-/* Emit PE object section symbol. */
-static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect)
-{
- PEsym sym;
- PEsymaux aux;
- if (!strtab) return; /* Pass 1: no output. */
- memcpy(sym.n.name, pesect[sect].name, 8);
- sym.value = 0;
- sym.sect = (int16_t)(sect+1); /* 1-based section number. */
- sym.type = PEOBJ_TYPE_NULL;
- sym.scl = PEOBJ_SCL_STATIC;
- sym.naux = 1;
- owrite(ctx, &sym, PEOBJ_SYM_SIZE);
- memset(&aux, 0, sizeof(PEsymaux));
- aux.size = pesect[sect].size;
- aux.nreloc = pesect[sect].nreloc;
- owrite(ctx, &aux, PEOBJ_SYM_SIZE);
-}
-
-/* Emit Windows PE object file. */
-void emit_peobj(BuildCtx *ctx)
-{
- PEheader pehdr;
- PEsection pesect[PEOBJ_NSECTIONS];
- uint32_t sofs;
- int i, nrsym;
- union { uint8_t b; uint32_t u; } host_endian;
-
- sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection);
-
- /* Fill in PE sections. */
- memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection));
- memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1);
- pesect[PEOBJ_SECT_TEXT].ofs = sofs;
- sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz);
- pesect[PEOBJ_SECT_TEXT].relocofs = sofs;
- sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE;
- /* Flags: 60 = read+execute, 50 = align16, 20 = code. */
- pesect[PEOBJ_SECT_TEXT].flags = PEOBJ_TEXT_FLAGS;
-
-#if LJ_TARGET_X64
- memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1);
- pesect[PEOBJ_SECT_PDATA].ofs = sofs;
- sofs += (pesect[PEOBJ_SECT_PDATA].size = 6*4);
- pesect[PEOBJ_SECT_PDATA].relocofs = sofs;
- sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 6) * PEOBJ_RELOC_SIZE;
- /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
- pesect[PEOBJ_SECT_PDATA].flags = 0x40300040;
-
- memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1);
- pesect[PEOBJ_SECT_XDATA].ofs = sofs;
- sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4+6*2); /* See below. */
- pesect[PEOBJ_SECT_XDATA].relocofs = sofs;
- sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE;
- /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
- pesect[PEOBJ_SECT_XDATA].flags = 0x40300040;
-#endif
-
- memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1);
- pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs;
- sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1);
- /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
- pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040;
-
- /* Fill in PE header. */
- pehdr.arch = PEOBJ_ARCH_TARGET;
- pehdr.nsects = PEOBJ_NSECTIONS;
- pehdr.time = 0; /* Timestamp is optional. */
- pehdr.symtabofs = sofs;
- pehdr.opthdrsz = 0;
- pehdr.flags = 0;
-
- /* Compute the size of the symbol table:
- ** @feat.00 + nsections*2
- ** + asm_start + nsym
- ** + nrsym
- */
- nrsym = ctx->nrelocsym;
- pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym;
-#if LJ_TARGET_X64
- pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */
-#endif
-
- /* Write PE object header and all sections. */
- owrite(ctx, &pehdr, sizeof(PEheader));
- owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS);
-
- /* Write .text section. */
- host_endian.u = 1;
- if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) {
-#if LJ_TARGET_PPC
- uint32_t *p = (uint32_t *)ctx->code;
- int n = (int)(ctx->codesz >> 2);
- for (i = 0; i < n; i++, p++)
- *p = lj_bswap(*p); /* Byteswap .text section. */
-#else
- fprintf(stderr, "Error: different byte order for host and target\n");
- exit(1);
-#endif
- }
- owrite(ctx, ctx->code, ctx->codesz);
- for (i = 0; i < ctx->nreloc; i++) {
- PEreloc reloc;
- reloc.vaddr = (uint32_t)ctx->reloc[i].ofs + PEOBJ_RELOC_OFS;
- reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */
- reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- }
-
-#if LJ_TARGET_X64
- { /* Write .pdata section. */
- uint32_t fcofs = (uint32_t)ctx->sym[ctx->nsym-1].ofs;
- uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */
- PEreloc reloc;
- pdata[0] = 0; pdata[1] = fcofs; pdata[2] = 0;
- owrite(ctx, &pdata, sizeof(pdata));
- pdata[0] = fcofs; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 20;
- owrite(ctx, &pdata, sizeof(pdata));
- reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- reloc.vaddr = 12; reloc.symidx = 1+2+nrsym+2+2+1;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- reloc.vaddr = 16; reloc.symidx = 1+2+nrsym+2+2+1;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- reloc.vaddr = 20; reloc.symidx = 1+2+nrsym+2;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- }
- { /* Write .xdata section. */
- uint16_t xdata[8+2+6];
- PEreloc reloc;
- xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhandler/ehandler, prolog size 0. */
- xdata[1] = 0x0005; /* Number of unwind codes, no frame pointer. */
- xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */
- xdata[3] = 0x3000; /* Push rbx. */
- xdata[4] = 0x6000; /* Push rsi. */
- xdata[5] = 0x7000; /* Push rdi. */
- xdata[6] = 0x5000; /* Push rbp. */
- xdata[7] = 0; /* Alignment. */
- xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */
- xdata[10] = 0x01; /* Ver. 1, no handler, prolog size 0. */
- xdata[11] = 0x1504; /* Number of unwind codes, fp = rbp, fpofs = 16. */
- xdata[12] = 0x0300; /* set_fpreg. */
- xdata[13] = 0x0200; /* stack offset 0*8+8 = aword*1. */
- xdata[14] = 0x3000; /* Push rbx. */
- xdata[15] = 0x5000; /* Push rbp. */
- owrite(ctx, &xdata, sizeof(xdata));
- reloc.vaddr = 2*8; reloc.symidx = 1+2+nrsym+2+2;
- reloc.type = PEOBJ_RELOC_ADDR32NB;
- owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
- }
-#endif
-
- /* Write .rdata$Z section. */
- owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1);
-
- /* Write symbol table. */
- strtab = NULL; /* 1st pass: collect string sizes. */
- for (;;) {
- strtabofs = 4;
- /* Mark as SafeSEH compliant. */
- emit_peobj_sym(ctx, "@feat.00", 1,
- PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC);
-
- emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT);
- for (i = 0; i < nrsym; i++)
- emit_peobj_sym(ctx, ctx->relocsym[i], 0,
- PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
-
-#if LJ_TARGET_X64
- emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA);
- emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA);
- emit_peobj_sym(ctx, "lj_err_unwind_win64", 0,
- PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
-#endif
-
- emit_peobj_sym(ctx, ctx->beginsym, 0,
- PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN);
- for (i = 0; i < ctx->nsym; i++)
- emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs,
- PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
-
- emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z);
-
- if (strtab)
- break;
- /* 2nd pass: alloc strtab, write syms and copy strings. */
- strtab = (char *)malloc(strtabofs);
- *(uint32_t *)strtab = (uint32_t)strtabofs;
- }
-
- /* Write string table. */
- owrite(ctx, strtab, strtabofs);
-}
-
-#else
-
-void emit_peobj(BuildCtx *ctx)
-{
- UNUSED(ctx);
- fprintf(stderr, "Error: no PE object support for this target\n");
- exit(1);
-}
-
-#endif
+/*
+** LuaJIT VM builder: PE object emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Only used for building on Windows, since we cannot assume the presence
+** of a suitable assembler. The host and target byte order must match.
+*/
+
+#include "buildvm.h"
+#include "lj_bc.h"
+
+#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC
+
+/* Context for PE object emitter. */
+static char *strtab;
+static size_t strtabofs;
+
+/* -- PE object definitions ----------------------------------------------- */
+
+/* PE header. */
+typedef struct PEheader {
+ uint16_t arch;
+ uint16_t nsects;
+ uint32_t time;
+ uint32_t symtabofs;
+ uint32_t nsyms;
+ uint16_t opthdrsz;
+ uint16_t flags;
+} PEheader;
+
+/* PE section. */
+typedef struct PEsection {
+ char name[8];
+ uint32_t vsize;
+ uint32_t vaddr;
+ uint32_t size;
+ uint32_t ofs;
+ uint32_t relocofs;
+ uint32_t lineofs;
+ uint16_t nreloc;
+ uint16_t nline;
+ uint32_t flags;
+} PEsection;
+
+/* PE relocation. */
+typedef struct PEreloc {
+ uint32_t vaddr;
+ uint32_t symidx;
+ uint16_t type;
+} PEreloc;
+
+/* Cannot use sizeof, because it pads up to the max. alignment. */
+#define PEOBJ_RELOC_SIZE (4+4+2)
+
+/* PE symbol table entry. */
+typedef struct PEsym {
+ union {
+ char name[8];
+ uint32_t nameref[2];
+ } n;
+ uint32_t value;
+ int16_t sect;
+ uint16_t type;
+ uint8_t scl;
+ uint8_t naux;
+} PEsym;
+
+/* PE symbol table auxiliary entry for a section. */
+typedef struct PEsymaux {
+ uint32_t size;
+ uint16_t nreloc;
+ uint16_t nline;
+ uint32_t cksum;
+ uint16_t assoc;
+ uint8_t comdatsel;
+ uint8_t unused[3];
+} PEsymaux;
+
+/* Cannot use sizeof, because it pads up to the max. alignment. */
+#define PEOBJ_SYM_SIZE (8+4+2+2+1+1)
+
+/* PE object CPU specific defines. */
+#if LJ_TARGET_X86
+#define PEOBJ_ARCH_TARGET 0x014c
+#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */
+#define PEOBJ_RELOC_DIR32 0x06
+#define PEOBJ_RELOC_OFS 0
+#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
+#elif LJ_TARGET_X64
+#define PEOBJ_ARCH_TARGET 0x8664
+#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */
+#define PEOBJ_RELOC_DIR32 0x02
+#define PEOBJ_RELOC_ADDR32NB 0x03
+#define PEOBJ_RELOC_OFS 0
+#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
+#elif LJ_TARGET_PPC
+#define PEOBJ_ARCH_TARGET 0x01f2
+#define PEOBJ_RELOC_REL32 0x06
+#define PEOBJ_RELOC_DIR32 0x02
+#define PEOBJ_RELOC_OFS (-4)
+#define PEOBJ_TEXT_FLAGS 0x60400020 /* 60=r+x, 40=align8, 20=code. */
+#endif
+
+/* Section numbers (0-based). */
+enum {
+ PEOBJ_SECT_ABS = -2,
+ PEOBJ_SECT_UNDEF = -1,
+ PEOBJ_SECT_TEXT,
+#if LJ_TARGET_X64
+ PEOBJ_SECT_PDATA,
+ PEOBJ_SECT_XDATA,
+#endif
+ PEOBJ_SECT_RDATA_Z,
+ PEOBJ_NSECTIONS
+};
+
+/* Symbol types. */
+#define PEOBJ_TYPE_NULL 0
+#define PEOBJ_TYPE_FUNC 0x20
+
+/* Symbol storage class. */
+#define PEOBJ_SCL_EXTERN 2
+#define PEOBJ_SCL_STATIC 3
+
+/* -- PE object emitter --------------------------------------------------- */
+
+/* Emit PE object symbol. */
+static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value,
+ int sect, int type, int scl)
+{
+ PEsym sym;
+ size_t len = strlen(name);
+ if (!strtab) { /* Pass 1: only calculate string table length. */
+ if (len > 8) strtabofs += len+1;
+ return;
+ }
+ if (len <= 8) {
+ memcpy(sym.n.name, name, len);
+ memset(sym.n.name+len, 0, 8-len);
+ } else {
+ sym.n.nameref[0] = 0;
+ sym.n.nameref[1] = (uint32_t)strtabofs;
+ memcpy(strtab + strtabofs, name, len);
+ strtab[strtabofs+len] = 0;
+ strtabofs += len+1;
+ }
+ sym.value = value;
+ sym.sect = (int16_t)(sect+1); /* 1-based section number. */
+ sym.type = (uint16_t)type;
+ sym.scl = (uint8_t)scl;
+ sym.naux = 0;
+ owrite(ctx, &sym, PEOBJ_SYM_SIZE);
+}
+
+/* Emit PE object section symbol. */
+static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect)
+{
+ PEsym sym;
+ PEsymaux aux;
+ if (!strtab) return; /* Pass 1: no output. */
+ memcpy(sym.n.name, pesect[sect].name, 8);
+ sym.value = 0;
+ sym.sect = (int16_t)(sect+1); /* 1-based section number. */
+ sym.type = PEOBJ_TYPE_NULL;
+ sym.scl = PEOBJ_SCL_STATIC;
+ sym.naux = 1;
+ owrite(ctx, &sym, PEOBJ_SYM_SIZE);
+ memset(&aux, 0, sizeof(PEsymaux));
+ aux.size = pesect[sect].size;
+ aux.nreloc = pesect[sect].nreloc;
+ owrite(ctx, &aux, PEOBJ_SYM_SIZE);
+}
+
+/* Emit Windows PE object file. */
+void emit_peobj(BuildCtx *ctx)
+{
+ PEheader pehdr;
+ PEsection pesect[PEOBJ_NSECTIONS];
+ uint32_t sofs;
+ int i, nrsym;
+ union { uint8_t b; uint32_t u; } host_endian;
+
+ sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection);
+
+ /* Fill in PE sections. */
+ memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection));
+ memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1);
+ pesect[PEOBJ_SECT_TEXT].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz);
+ pesect[PEOBJ_SECT_TEXT].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE;
+ /* Flags: 60 = read+execute, 50 = align16, 20 = code. */
+ pesect[PEOBJ_SECT_TEXT].flags = PEOBJ_TEXT_FLAGS;
+
+#if LJ_TARGET_X64
+ memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1);
+ pesect[PEOBJ_SECT_PDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_PDATA].size = 6*4);
+ pesect[PEOBJ_SECT_PDATA].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 6) * PEOBJ_RELOC_SIZE;
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_PDATA].flags = 0x40300040;
+
+ memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1);
+ pesect[PEOBJ_SECT_XDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4+6*2); /* See below. */
+ pesect[PEOBJ_SECT_XDATA].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE;
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_XDATA].flags = 0x40300040;
+#endif
+
+ memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1);
+ pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1);
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040;
+
+ /* Fill in PE header. */
+ pehdr.arch = PEOBJ_ARCH_TARGET;
+ pehdr.nsects = PEOBJ_NSECTIONS;
+ pehdr.time = 0; /* Timestamp is optional. */
+ pehdr.symtabofs = sofs;
+ pehdr.opthdrsz = 0;
+ pehdr.flags = 0;
+
+ /* Compute the size of the symbol table:
+ ** @feat.00 + nsections*2
+ ** + asm_start + nsym
+ ** + nrsym
+ */
+ nrsym = ctx->nrelocsym;
+ pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym;
+#if LJ_TARGET_X64
+ pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win64. */
+#endif
+
+ /* Write PE object header and all sections. */
+ owrite(ctx, &pehdr, sizeof(PEheader));
+ owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS);
+
+ /* Write .text section. */
+ host_endian.u = 1;
+ if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) {
+#if LJ_TARGET_PPC
+ uint32_t *p = (uint32_t *)ctx->code;
+ int n = (int)(ctx->codesz >> 2);
+ for (i = 0; i < n; i++, p++)
+ *p = lj_bswap(*p); /* Byteswap .text section. */
+#else
+ fprintf(stderr, "Error: different byte order for host and target\n");
+ exit(1);
+#endif
+ }
+ owrite(ctx, ctx->code, ctx->codesz);
+ for (i = 0; i < ctx->nreloc; i++) {
+ PEreloc reloc;
+ reloc.vaddr = (uint32_t)ctx->reloc[i].ofs + PEOBJ_RELOC_OFS;
+ reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */
+ reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+
+#if LJ_TARGET_X64
+ { /* Write .pdata section. */
+ uint32_t fcofs = (uint32_t)ctx->sym[ctx->nsym-1].ofs;
+ uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */
+ PEreloc reloc;
+ pdata[0] = 0; pdata[1] = fcofs; pdata[2] = 0;
+ owrite(ctx, &pdata, sizeof(pdata));
+ pdata[0] = fcofs; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 20;
+ owrite(ctx, &pdata, sizeof(pdata));
+ reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 12; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 16; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 20; reloc.symidx = 1+2+nrsym+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+ { /* Write .xdata section. */
+ uint16_t xdata[8+2+6];
+ PEreloc reloc;
+ xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhandler/ehandler, prolog size 0. */
+ xdata[1] = 0x0005; /* Number of unwind codes, no frame pointer. */
+ xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */
+ xdata[3] = 0x3000; /* Push rbx. */
+ xdata[4] = 0x6000; /* Push rsi. */
+ xdata[5] = 0x7000; /* Push rdi. */
+ xdata[6] = 0x5000; /* Push rbp. */
+ xdata[7] = 0; /* Alignment. */
+ xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */
+ xdata[10] = 0x01; /* Ver. 1, no handler, prolog size 0. */
+ xdata[11] = 0x1504; /* Number of unwind codes, fp = rbp, fpofs = 16. */
+ xdata[12] = 0x0300; /* set_fpreg. */
+ xdata[13] = 0x0200; /* stack offset 0*8+8 = aword*1. */
+ xdata[14] = 0x3000; /* Push rbx. */
+ xdata[15] = 0x5000; /* Push rbp. */
+ owrite(ctx, &xdata, sizeof(xdata));
+ reloc.vaddr = 2*8; reloc.symidx = 1+2+nrsym+2+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+#endif
+
+ /* Write .rdata$Z section. */
+ owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1);
+
+ /* Write symbol table. */
+ strtab = NULL; /* 1st pass: collect string sizes. */
+ for (;;) {
+ strtabofs = 4;
+ /* Mark as SafeSEH compliant. */
+ emit_peobj_sym(ctx, "@feat.00", 1,
+ PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC);
+
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT);
+ for (i = 0; i < nrsym; i++)
+ emit_peobj_sym(ctx, ctx->relocsym[i], 0,
+ PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+
+#if LJ_TARGET_X64
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA);
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA);
+ emit_peobj_sym(ctx, "lj_err_unwind_win64", 0,
+ PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+#endif
+
+ emit_peobj_sym(ctx, ctx->beginsym, 0,
+ PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN);
+ for (i = 0; i < ctx->nsym; i++)
+ emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs,
+ PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z);
+
+ if (strtab)
+ break;
+ /* 2nd pass: alloc strtab, write syms and copy strings. */
+ strtab = (char *)malloc(strtabofs);
+ *(uint32_t *)strtab = (uint32_t)strtabofs;
+ }
+
+ /* Write string table. */
+ owrite(ctx, strtab, strtabofs);
+}
+
+#else
+
+void emit_peobj(BuildCtx *ctx)
+{
+ UNUSED(ctx);
+ fprintf(stderr, "Error: no PE object support for this target\n");
+ exit(1);
+}
+
+#endif
diff --git a/3rdparty/lua/src/host/genminilua.lua b/3rdparty/lua/src/host/genminilua.lua
index ebb52d0..e666f08 100644
--- a/3rdparty/lua/src/host/genminilua.lua
+++ b/3rdparty/lua/src/host/genminilua.lua
@@ -1,428 +1,427 @@
-----------------------------------------------------------------------------
--- Lua script to generate a customized, minified version of Lua.
--- The resulting 'minilua' is used for the build process of LuaJIT.
-----------------------------------------------------------------------------
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
-
-local sub, match, gsub = string.sub, string.match, string.gsub
-
-local LUA_VERSION = "5.1.5"
-local LUA_SOURCE
-
-local function usage()
- io.stderr:write("Usage: ", arg and arg[0] or "genminilua",
- " lua-", LUA_VERSION, "-source-dir\n")
- os.exit(1)
-end
-
-local function find_sources()
- LUA_SOURCE = arg and arg[1]
- if not LUA_SOURCE then usage() end
- if sub(LUA_SOURCE, -1) ~= "/" then LUA_SOURCE = LUA_SOURCE.."/" end
- local fp = io.open(LUA_SOURCE .. "lua.h")
- if not fp then
- LUA_SOURCE = LUA_SOURCE.."src/"
- fp = io.open(LUA_SOURCE .. "lua.h")
- if not fp then usage() end
- end
- local all = fp:read("*a")
- fp:close()
- if not match(all, 'LUA_RELEASE%s*"Lua '..LUA_VERSION..'"') then
- io.stderr:write("Error: version mismatch\n")
- usage()
- end
-end
-
-local LUA_FILES = {
-"lmem.c", "lobject.c", "ltm.c", "lfunc.c", "ldo.c", "lstring.c", "ltable.c",
-"lgc.c", "lstate.c", "ldebug.c", "lzio.c", "lopcodes.c",
-"llex.c", "lcode.c", "lparser.c", "lvm.c", "lapi.c", "lauxlib.c",
-"lbaselib.c", "ltablib.c", "liolib.c", "loslib.c", "lstrlib.c", "linit.c",
-}
-
-local REMOVE_LIB = {}
-gsub([[
-collectgarbage dofile gcinfo getfenv getmetatable load print rawequal rawset
-select tostring xpcall
-foreach foreachi getn maxn setn
-popen tmpfile seek setvbuf __tostring
-clock date difftime execute getenv rename setlocale time tmpname
-dump gfind len reverse
-LUA_LOADLIBNAME LUA_MATHLIBNAME LUA_DBLIBNAME
-]], "%S+", function(name)
- REMOVE_LIB[name] = true
-end)
-
-local REMOVE_EXTINC = { ["<assert.h>"] = true, ["<locale.h>"] = true, }
-
-local CUSTOM_MAIN = [[
-typedef unsigned int UB;
-static UB barg(lua_State *L,int idx){
-union{lua_Number n;U64 b;}bn;
-bn.n=lua_tonumber(L,idx)+6755399441055744.0;
-if (bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number");
-return(UB)bn.b;
-}
-#define BRET(b) lua_pushnumber(L,(lua_Number)(int)(b));return 1;
-static int tobit(lua_State *L){
-BRET(barg(L,1))}
-static int bnot(lua_State *L){
-BRET(~barg(L,1))}
-static int band(lua_State *L){
-int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)}
-static int bor(lua_State *L){
-int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)}
-static int bxor(lua_State *L){
-int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)}
-static int lshift(lua_State *L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET(b<<n)}
-static int rshift(lua_State *L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET(b>>n)}
-static int arshift(lua_State *L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)}
-static int rol(lua_State *L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET((b<<n)|(b>>(32-n)))}
-static int ror(lua_State *L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))}
-static int bswap(lua_State *L){
-UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)}
-static int tohex(lua_State *L){
-UB b=barg(L,1);
-int n=lua_isnone(L,2)?8:(int)barg(L,2);
-const char *hexdigits="0123456789abcdef";
-char buf[8];
-int i;
-if(n<0){n=-n;hexdigits="0123456789ABCDEF";}
-if(n>8)n=8;
-for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;}
-lua_pushlstring(L,buf,(size_t)n);
-return 1;
-}
-static const struct luaL_Reg bitlib[] = {
-{"tobit",tobit},
-{"bnot",bnot},
-{"band",band},
-{"bor",bor},
-{"bxor",bxor},
-{"lshift",lshift},
-{"rshift",rshift},
-{"arshift",arshift},
-{"rol",rol},
-{"ror",ror},
-{"bswap",bswap},
-{"tohex",tohex},
-{NULL,NULL}
-};
-int main(int argc, char **argv){
- lua_State *L = luaL_newstate();
- int i;
- luaL_openlibs(L);
- luaL_register(L, "bit", bitlib);
- if (argc < 2) return sizeof(void *);
- lua_createtable(L, 0, 1);
- lua_pushstring(L, argv[1]);
- lua_rawseti(L, -2, 0);
- lua_setglobal(L, "arg");
- if (luaL_loadfile(L, argv[1]))
- goto err;
- for (i = 2; i < argc; i++)
- lua_pushstring(L, argv[i]);
- if (lua_pcall(L, argc - 2, 0, 0)) {
- err:
- fprintf(stderr, "Error: %s\n", lua_tostring(L, -1));
- return 1;
- }
- lua_close(L);
- return 0;
-}
-]]
-
-local function read_sources()
- local t = {}
- for i, name in ipairs(LUA_FILES) do
- local fp = assert(io.open(LUA_SOURCE..name, "r"))
- t[i] = fp:read("*a")
- assert(fp:close())
- end
- t[#t+1] = CUSTOM_MAIN
- return table.concat(t)
-end
-
-local includes = {}
-
-local function merge_includes(src)
- return gsub(src, '#include%s*"([^"]*)"%s*\n', function(name)
- if includes[name] then return "" end
- includes[name] = true
- local fp = assert(io.open(LUA_SOURCE..name, "r"))
- local src = fp:read("*a")
- assert(fp:close())
- src = gsub(src, "#ifndef%s+%w+_h\n#define%s+%w+_h\n", "")
- src = gsub(src, "#endif%s*$", "")
- return merge_includes(src)
- end)
-end
-
-local function get_license(src)
- return match(src, "/%*+\n%* Copyright %(.-%*/\n")
-end
-
-local function fold_lines(src)
- return gsub(src, "\\\n", " ")
-end
-
-local strings = {}
-
-local function save_str(str)
- local n = #strings+1
- strings[n] = str
- return "\1"..n.."\2"
-end
-
-local function save_strings(src)
- src = gsub(src, '"[^"\n]*"', save_str)
- return gsub(src, "'[^'\n]*'", save_str)
-end
-
-local function restore_strings(src)
- return gsub(src, "\1(%d+)\2", function(numstr)
- return strings[tonumber(numstr)]
- end)
-end
-
-local function def_istrue(def)
- return def == "INT_MAX > 2147483640L" or
- def == "LUAI_BITSINT >= 32" or
- def == "SIZE_Bx < LUAI_BITSINT-1" or
- def == "cast" or
- def == "defined(LUA_CORE)" or
- def == "MINSTRTABSIZE" or
- def == "LUA_MINBUFFER" or
- def == "HARDSTACKTESTS" or
- def == "UNUSED"
-end
-
-local head, defs = {[[
-#ifdef _MSC_VER
-typedef unsigned __int64 U64;
-#else
-typedef unsigned long long U64;
-#endif
-int _CRT_glob = 0;
-]]}, {}
-
-local function preprocess(src)
- local t = { match(src, "^(.-)#") }
- local lvl, on, oldon = 0, true, {}
- for pp, def, txt in string.gmatch(src, "#(%w+) *([^\n]*)\n([^#]*)") do
- if pp == "if" or pp == "ifdef" or pp == "ifndef" then
- lvl = lvl + 1
- oldon[lvl] = on
- on = def_istrue(def)
- elseif pp == "else" then
- if oldon[lvl] then
- if on == false then on = true else on = false end
- end
- elseif pp == "elif" then
- if oldon[lvl] then
- on = def_istrue(def)
- end
- elseif pp == "endif" then
- on = oldon[lvl]
- lvl = lvl - 1
- elseif on then
- if pp == "include" then
- if not head[def] and not REMOVE_EXTINC[def] then
- head[def] = true
- head[#head+1] = "#include "..def.."\n"
- end
- elseif pp == "define" then
- local k, sp, v = match(def, "([%w_]+)(%s*)(.*)")
- if k and not (sp == "" and sub(v, 1, 1) == "(") then
- defs[k] = gsub(v, "%a[%w_]*", function(tok)
- return defs[tok] or tok
- end)
- else
- t[#t+1] = "#define "..def.."\n"
- end
- elseif pp ~= "undef" then
- error("unexpected directive: "..pp.." "..def)
- end
- end
- if on then t[#t+1] = txt end
- end
- return gsub(table.concat(t), "%a[%w_]*", function(tok)
- return defs[tok] or tok
- end)
-end
-
-local function merge_header(src, license)
- local hdr = string.format([[
-/* This is a heavily customized and minimized copy of Lua %s. */
-/* It's only used to build LuaJIT. It does NOT have all standard functions! */
-]], LUA_VERSION)
- return hdr..license..table.concat(head)..src
-end
-
-local function strip_unused1(src)
- return gsub(src, '( {"?([%w_]+)"?,%s+%a[%w_]*},\n)', function(line, func)
- return REMOVE_LIB[func] and "" or line
- end)
-end
-
-local function strip_unused2(src)
- return gsub(src, "Symbolic Execution.-}=", "")
-end
-
-local function strip_unused3(src)
- src = gsub(src, "extern", "static")
- src = gsub(src, "\nstatic([^\n]-)%(([^)]*)%)%(", "\nstatic%1 %2(")
- src = gsub(src, "#define lua_assert[^\n]*\n", "")
- src = gsub(src, "lua_assert%b();?", "")
- src = gsub(src, "default:\n}", "default:;\n}")
- src = gsub(src, "lua_lock%b();", "")
- src = gsub(src, "lua_unlock%b();", "")
- src = gsub(src, "luai_threadyield%b();", "")
- src = gsub(src, "luai_userstateopen%b();", "{}")
- src = gsub(src, "luai_userstate%w+%b();", "")
- src = gsub(src, "%(%(c==.*luaY_parser%)", "luaY_parser")
- src = gsub(src, "trydecpoint%(ls,seminfo%)",
- "luaX_lexerror(ls,\"malformed number\",TK_NUMBER)")
- src = gsub(src, "int c=luaZ_lookahead%b();", "")
- src = gsub(src, "luaL_register%(L,[^,]*,co_funcs%);\nreturn 2;",
- "return 1;")
- src = gsub(src, "getfuncname%b():", "NULL:")
- src = gsub(src, "getobjname%b():", "NULL:")
- src = gsub(src, "if%([^\n]*hookmask[^\n]*%)\n[^\n]*\n", "")
- src = gsub(src, "if%([^\n]*hookmask[^\n]*%)%b{}\n", "")
- src = gsub(src, "if%([^\n]*hookmask[^\n]*&&\n[^\n]*%b{}\n", "")
- src = gsub(src, "(twoto%b()%()", "%1(size_t)")
- src = gsub(src, "i<sizenode", "i<(int)sizenode")
- return gsub(src, "\n\n+", "\n")
-end
-
-local function strip_comments(src)
- return gsub(src, "/%*.-%*/", " ")
-end
-
-local function strip_whitespace(src)
- src = gsub(src, "^%s+", "")
- src = gsub(src, "%s*\n%s*", "\n")
- src = gsub(src, "[ \t]+", " ")
- src = gsub(src, "(%W) ", "%1")
- return gsub(src, " (%W)", "%1")
-end
-
-local function rename_tokens1(src)
- src = gsub(src, "getline", "getline_")
- src = gsub(src, "struct ([%w_]+)", "ZX%1")
- return gsub(src, "union ([%w_]+)", "ZY%1")
-end
-
-local function rename_tokens2(src)
- src = gsub(src, "ZX([%w_]+)", "struct %1")
- return gsub(src, "ZY([%w_]+)", "union %1")
-end
-
-local function func_gather(src)
- local nodes, list = {}, {}
- local pos, len = 1, #src
- while pos < len do
- local d, w = match(src, "^(#define ([%w_]+)[^\n]*\n)", pos)
- if d then
- local n = #list+1
- list[n] = d
- nodes[w] = n
- else
- local s
- d, w, s = match(src, "^(([%w_]+)[^\n]*([{;])\n)", pos)
- if not d then
- d, w, s = match(src, "^(([%w_]+)[^(]*%b()([{;])\n)", pos)
- if not d then d = match(src, "^[^\n]*\n", pos) end
- end
- if s == "{" then
- d = d..sub(match(src, "^%b{}[^;\n]*;?\n", pos+#d-2), 3)
- if sub(d, -2) == "{\n" then
- d = d..sub(match(src, "^%b{}[^;\n]*;?\n", pos+#d-2), 3)
- end
- end
- local k, v = nil, d
- if w == "typedef" then
- if match(d, "^typedef enum") then
- head[#head+1] = d
- else
- k = match(d, "([%w_]+);\n$")
- if not k then k = match(d, "^.-%(.-([%w_]+)%)%(") end
- end
- elseif w == "enum" then
- head[#head+1] = v
- elseif w ~= nil then
- k = match(d, "^[^\n]-([%w_]+)[(%[=]")
- if k then
- if w ~= "static" and k ~= "main" then v = "static "..d end
- else
- k = w
- end
- end
- if w and k then
- local o = nodes[k]
- if o then nodes["*"..k] = o end
- local n = #list+1
- list[n] = v
- nodes[k] = n
- end
- end
- pos = pos + #d
- end
- return nodes, list
-end
-
-local function func_visit(nodes, list, used, n)
- local i = nodes[n]
- for m in string.gmatch(list[i], "[%w_]+") do
- if nodes[m] then
- local j = used[m]
- if not j then
- used[m] = i
- func_visit(nodes, list, used, m)
- elseif i < j then
- used[m] = i
- end
- end
- end
-end
-
-local function func_collect(src)
- local nodes, list = func_gather(src)
- local used = {}
- func_visit(nodes, list, used, "main")
- for n,i in pairs(nodes) do
- local j = used[n]
- if j and j < i then used["*"..n] = j end
- end
- for n,i in pairs(nodes) do
- if not used[n] then list[i] = "" end
- end
- return table.concat(list)
-end
-
-find_sources()
-local src = read_sources()
-src = merge_includes(src)
-local license = get_license(src)
-src = fold_lines(src)
-src = strip_unused1(src)
-src = save_strings(src)
-src = strip_unused2(src)
-src = strip_comments(src)
-src = preprocess(src)
-src = strip_whitespace(src)
-src = strip_unused3(src)
-src = rename_tokens1(src)
-src = func_collect(src)
-src = rename_tokens2(src)
-src = restore_strings(src)
-src = merge_header(src, license)
-io.write(src)
+----------------------------------------------------------------------------
+-- Lua script to generate a customized, minified version of Lua.
+-- The resulting 'minilua' is used for the build process of LuaJIT.
+----------------------------------------------------------------------------
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+
+local sub, match, gsub = string.sub, string.match, string.gsub
+
+local LUA_VERSION = "5.1.5"
+local LUA_SOURCE
+
+local function usage()
+ io.stderr:write("Usage: ", arg and arg[0] or "genminilua",
+ " lua-", LUA_VERSION, "-source-dir\n")
+ os.exit(1)
+end
+
+local function find_sources()
+ LUA_SOURCE = arg and arg[1]
+ if not LUA_SOURCE then usage() end
+ if sub(LUA_SOURCE, -1) ~= "/" then LUA_SOURCE = LUA_SOURCE.."/" end
+ local fp = io.open(LUA_SOURCE .. "lua.h")
+ if not fp then
+ LUA_SOURCE = LUA_SOURCE.."src/"
+ fp = io.open(LUA_SOURCE .. "lua.h")
+ if not fp then usage() end
+ end
+ local all = fp:read("*a")
+ fp:close()
+ if not match(all, 'LUA_RELEASE%s*"Lua '..LUA_VERSION..'"') then
+ io.stderr:write("Error: version mismatch\n")
+ usage()
+ end
+end
+
+local LUA_FILES = {
+"lmem.c", "lobject.c", "ltm.c", "lfunc.c", "ldo.c", "lstring.c", "ltable.c",
+"lgc.c", "lstate.c", "ldebug.c", "lzio.c", "lopcodes.c",
+"llex.c", "lcode.c", "lparser.c", "lvm.c", "lapi.c", "lauxlib.c",
+"lbaselib.c", "ltablib.c", "liolib.c", "loslib.c", "lstrlib.c", "linit.c",
+}
+
+local REMOVE_LIB = {}
+gsub([[
+collectgarbage dofile gcinfo getfenv getmetatable load print rawequal rawset
+select tostring xpcall
+foreach foreachi getn maxn setn
+popen tmpfile seek setvbuf __tostring
+clock date difftime execute getenv rename setlocale time tmpname
+dump gfind len reverse
+LUA_LOADLIBNAME LUA_MATHLIBNAME LUA_DBLIBNAME
+]], "%S+", function(name)
+ REMOVE_LIB[name] = true
+end)
+
+local REMOVE_EXTINC = { ["<assert.h>"] = true, ["<locale.h>"] = true, }
+
+local CUSTOM_MAIN = [[
+typedef unsigned int UB;
+static UB barg(lua_State *L,int idx){
+union{lua_Number n;U64 b;}bn;
+bn.n=lua_tonumber(L,idx)+6755399441055744.0;
+if (bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number");
+return(UB)bn.b;
+}
+#define BRET(b) lua_pushnumber(L,(lua_Number)(int)(b));return 1;
+static int tobit(lua_State *L){
+BRET(barg(L,1))}
+static int bnot(lua_State *L){
+BRET(~barg(L,1))}
+static int band(lua_State *L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)}
+static int bor(lua_State *L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)}
+static int bxor(lua_State *L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)}
+static int lshift(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b<<n)}
+static int rshift(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b>>n)}
+static int arshift(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)}
+static int rol(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b<<n)|(b>>(32-n)))}
+static int ror(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))}
+static int bswap(lua_State *L){
+UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)}
+static int tohex(lua_State *L){
+UB b=barg(L,1);
+int n=lua_isnone(L,2)?8:(int)barg(L,2);
+const char *hexdigits="0123456789abcdef";
+char buf[8];
+int i;
+if(n<0){n=-n;hexdigits="0123456789ABCDEF";}
+if(n>8)n=8;
+for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;}
+lua_pushlstring(L,buf,(size_t)n);
+return 1;
+}
+static const struct luaL_Reg bitlib[] = {
+{"tobit",tobit},
+{"bnot",bnot},
+{"band",band},
+{"bor",bor},
+{"bxor",bxor},
+{"lshift",lshift},
+{"rshift",rshift},
+{"arshift",arshift},
+{"rol",rol},
+{"ror",ror},
+{"bswap",bswap},
+{"tohex",tohex},
+{NULL,NULL}
+};
+int main(int argc, char **argv){
+ lua_State *L = luaL_newstate();
+ int i;
+ luaL_openlibs(L);
+ luaL_register(L, "bit", bitlib);
+ if (argc < 2) return sizeof(void *);
+ lua_createtable(L, 0, 1);
+ lua_pushstring(L, argv[1]);
+ lua_rawseti(L, -2, 0);
+ lua_setglobal(L, "arg");
+ if (luaL_loadfile(L, argv[1]))
+ goto err;
+ for (i = 2; i < argc; i++)
+ lua_pushstring(L, argv[i]);
+ if (lua_pcall(L, argc - 2, 0, 0)) {
+ err:
+ fprintf(stderr, "Error: %s\n", lua_tostring(L, -1));
+ return 1;
+ }
+ lua_close(L);
+ return 0;
+}
+]]
+
+local function read_sources()
+ local t = {}
+ for i, name in ipairs(LUA_FILES) do
+ local fp = assert(io.open(LUA_SOURCE..name, "r"))
+ t[i] = fp:read("*a")
+ assert(fp:close())
+ end
+ t[#t+1] = CUSTOM_MAIN
+ return table.concat(t)
+end
+
+local includes = {}
+
+local function merge_includes(src)
+ return gsub(src, '#include%s*"([^"]*)"%s*\n', function(name)
+ if includes[name] then return "" end
+ includes[name] = true
+ local fp = assert(io.open(LUA_SOURCE..name, "r"))
+ local src = fp:read("*a")
+ assert(fp:close())
+ src = gsub(src, "#ifndef%s+%w+_h\n#define%s+%w+_h\n", "")
+ src = gsub(src, "#endif%s*$", "")
+ return merge_includes(src)
+ end)
+end
+
+local function get_license(src)
+ return match(src, "/%*+\n%* Copyright %(.-%*/\n")
+end
+
+local function fold_lines(src)
+ return gsub(src, "\\\n", " ")
+end
+
+local strings = {}
+
+local function save_str(str)
+ local n = #strings+1
+ strings[n] = str
+ return "\1"..n.."\2"
+end
+
+local function save_strings(src)
+ src = gsub(src, '"[^"\n]*"', save_str)
+ return gsub(src, "'[^'\n]*'", save_str)
+end
+
+local function restore_strings(src)
+ return gsub(src, "\1(%d+)\2", function(numstr)
+ return strings[tonumber(numstr)]
+ end)
+end
+
+local function def_istrue(def)
+ return def == "INT_MAX > 2147483640L" or
+ def == "LUAI_BITSINT >= 32" or
+ def == "SIZE_Bx < LUAI_BITSINT-1" or
+ def == "cast" or
+ def == "defined(LUA_CORE)" or
+ def == "MINSTRTABSIZE" or
+ def == "LUA_MINBUFFER" or
+ def == "HARDSTACKTESTS" or
+ def == "UNUSED"
+end
+
+local head, defs = {[[
+#ifdef _MSC_VER
+typedef unsigned __int64 U64;
+#else
+typedef unsigned long long U64;
+#endif
+]]}, {}
+
+local function preprocess(src)
+ local t = { match(src, "^(.-)#") }
+ local lvl, on, oldon = 0, true, {}
+ for pp, def, txt in string.gmatch(src, "#(%w+) *([^\n]*)\n([^#]*)") do
+ if pp == "if" or pp == "ifdef" or pp == "ifndef" then
+ lvl = lvl + 1
+ oldon[lvl] = on
+ on = def_istrue(def)
+ elseif pp == "else" then
+ if oldon[lvl] then
+ if on == false then on = true else on = false end
+ end
+ elseif pp == "elif" then
+ if oldon[lvl] then
+ on = def_istrue(def)
+ end
+ elseif pp == "endif" then
+ on = oldon[lvl]
+ lvl = lvl - 1
+ elseif on then
+ if pp == "include" then
+ if not head[def] and not REMOVE_EXTINC[def] then
+ head[def] = true
+ head[#head+1] = "#include "..def.."\n"
+ end
+ elseif pp == "define" then
+ local k, sp, v = match(def, "([%w_]+)(%s*)(.*)")
+ if k and not (sp == "" and sub(v, 1, 1) == "(") then
+ defs[k] = gsub(v, "%a[%w_]*", function(tok)
+ return defs[tok] or tok
+ end)
+ else
+ t[#t+1] = "#define "..def.."\n"
+ end
+ elseif pp ~= "undef" then
+ error("unexpected directive: "..pp.." "..def)
+ end
+ end
+ if on then t[#t+1] = txt end
+ end
+ return gsub(table.concat(t), "%a[%w_]*", function(tok)
+ return defs[tok] or tok
+ end)
+end
+
+local function merge_header(src, license)
+ local hdr = string.format([[
+/* This is a heavily customized and minimized copy of Lua %s. */
+/* It's only used to build LuaJIT. It does NOT have all standard functions! */
+]], LUA_VERSION)
+ return hdr..license..table.concat(head)..src
+end
+
+local function strip_unused1(src)
+ return gsub(src, '( {"?([%w_]+)"?,%s+%a[%w_]*},\n)', function(line, func)
+ return REMOVE_LIB[func] and "" or line
+ end)
+end
+
+local function strip_unused2(src)
+ return gsub(src, "Symbolic Execution.-}=", "")
+end
+
+local function strip_unused3(src)
+ src = gsub(src, "extern", "static")
+ src = gsub(src, "\nstatic([^\n]-)%(([^)]*)%)%(", "\nstatic%1 %2(")
+ src = gsub(src, "#define lua_assert[^\n]*\n", "")
+ src = gsub(src, "lua_assert%b();?", "")
+ src = gsub(src, "default:\n}", "default:;\n}")
+ src = gsub(src, "lua_lock%b();", "")
+ src = gsub(src, "lua_unlock%b();", "")
+ src = gsub(src, "luai_threadyield%b();", "")
+ src = gsub(src, "luai_userstateopen%b();", "{}")
+ src = gsub(src, "luai_userstate%w+%b();", "")
+ src = gsub(src, "%(%(c==.*luaY_parser%)", "luaY_parser")
+ src = gsub(src, "trydecpoint%(ls,seminfo%)",
+ "luaX_lexerror(ls,\"malformed number\",TK_NUMBER)")
+ src = gsub(src, "int c=luaZ_lookahead%b();", "")
+ src = gsub(src, "luaL_register%(L,[^,]*,co_funcs%);\nreturn 2;",
+ "return 1;")
+ src = gsub(src, "getfuncname%b():", "NULL:")
+ src = gsub(src, "getobjname%b():", "NULL:")
+ src = gsub(src, "if%([^\n]*hookmask[^\n]*%)\n[^\n]*\n", "")
+ src = gsub(src, "if%([^\n]*hookmask[^\n]*%)%b{}\n", "")
+ src = gsub(src, "if%([^\n]*hookmask[^\n]*&&\n[^\n]*%b{}\n", "")
+ src = gsub(src, "(twoto%b()%()", "%1(size_t)")
+ src = gsub(src, "i<sizenode", "i<(int)sizenode")
+ return gsub(src, "\n\n+", "\n")
+end
+
+local function strip_comments(src)
+ return gsub(src, "/%*.-%*/", " ")
+end
+
+local function strip_whitespace(src)
+ src = gsub(src, "^%s+", "")
+ src = gsub(src, "%s*\n%s*", "\n")
+ src = gsub(src, "[ \t]+", " ")
+ src = gsub(src, "(%W) ", "%1")
+ return gsub(src, " (%W)", "%1")
+end
+
+local function rename_tokens1(src)
+ src = gsub(src, "getline", "getline_")
+ src = gsub(src, "struct ([%w_]+)", "ZX%1")
+ return gsub(src, "union ([%w_]+)", "ZY%1")
+end
+
+local function rename_tokens2(src)
+ src = gsub(src, "ZX([%w_]+)", "struct %1")
+ return gsub(src, "ZY([%w_]+)", "union %1")
+end
+
+local function func_gather(src)
+ local nodes, list = {}, {}
+ local pos, len = 1, #src
+ while pos < len do
+ local d, w = match(src, "^(#define ([%w_]+)[^\n]*\n)", pos)
+ if d then
+ local n = #list+1
+ list[n] = d
+ nodes[w] = n
+ else
+ local s
+ d, w, s = match(src, "^(([%w_]+)[^\n]*([{;])\n)", pos)
+ if not d then
+ d, w, s = match(src, "^(([%w_]+)[^(]*%b()([{;])\n)", pos)
+ if not d then d = match(src, "^[^\n]*\n", pos) end
+ end
+ if s == "{" then
+ d = d..sub(match(src, "^%b{}[^;\n]*;?\n", pos+#d-2), 3)
+ if sub(d, -2) == "{\n" then
+ d = d..sub(match(src, "^%b{}[^;\n]*;?\n", pos+#d-2), 3)
+ end
+ end
+ local k, v = nil, d
+ if w == "typedef" then
+ if match(d, "^typedef enum") then
+ head[#head+1] = d
+ else
+ k = match(d, "([%w_]+);\n$")
+ if not k then k = match(d, "^.-%(.-([%w_]+)%)%(") end
+ end
+ elseif w == "enum" then
+ head[#head+1] = v
+ elseif w ~= nil then
+ k = match(d, "^[^\n]-([%w_]+)[(%[=]")
+ if k then
+ if w ~= "static" and k ~= "main" then v = "static "..d end
+ else
+ k = w
+ end
+ end
+ if w and k then
+ local o = nodes[k]
+ if o then nodes["*"..k] = o end
+ local n = #list+1
+ list[n] = v
+ nodes[k] = n
+ end
+ end
+ pos = pos + #d
+ end
+ return nodes, list
+end
+
+local function func_visit(nodes, list, used, n)
+ local i = nodes[n]
+ for m in string.gmatch(list[i], "[%w_]+") do
+ if nodes[m] then
+ local j = used[m]
+ if not j then
+ used[m] = i
+ func_visit(nodes, list, used, m)
+ elseif i < j then
+ used[m] = i
+ end
+ end
+ end
+end
+
+local function func_collect(src)
+ local nodes, list = func_gather(src)
+ local used = {}
+ func_visit(nodes, list, used, "main")
+ for n,i in pairs(nodes) do
+ local j = used[n]
+ if j and j < i then used["*"..n] = j end
+ end
+ for n,i in pairs(nodes) do
+ if not used[n] then list[i] = "" end
+ end
+ return table.concat(list)
+end
+
+find_sources()
+local src = read_sources()
+src = merge_includes(src)
+local license = get_license(src)
+src = fold_lines(src)
+src = strip_unused1(src)
+src = save_strings(src)
+src = strip_unused2(src)
+src = strip_comments(src)
+src = preprocess(src)
+src = strip_whitespace(src)
+src = strip_unused3(src)
+src = rename_tokens1(src)
+src = func_collect(src)
+src = rename_tokens2(src)
+src = restore_strings(src)
+src = merge_header(src, license)
+io.write(src)
diff --git a/3rdparty/lua/src/host/minilua.c b/3rdparty/lua/src/host/minilua.c
index 4cc1e1d..93e9273 100644
--- a/3rdparty/lua/src/host/minilua.c
+++ b/3rdparty/lua/src/host/minilua.c
@@ -1,7770 +1,7769 @@
-/* This is a heavily customized and minimized copy of Lua 5.1.5. */
-/* It's only used to build LuaJIT. It does NOT have all standard functions! */
-/******************************************************************************
-* Copyright (C) 1994-2012 Lua.org, PUC-Rio. All rights reserved.
-*
-* Permission is hereby granted, free of charge, to any person obtaining
-* a copy of this software and associated documentation files (the
-* "Software"), to deal in the Software without restriction, including
-* without limitation the rights to use, copy, modify, merge, publish,
-* distribute, sublicense, and/or sell copies of the Software, and to
-* permit persons to whom the Software is furnished to do so, subject to
-* the following conditions:
-*
-* The above copyright notice and this permission notice shall be
-* included in all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-******************************************************************************/
-#ifdef _MSC_VER
-typedef unsigned __int64 U64;
-#else
-typedef unsigned long long U64;
-#endif
-int _CRT_glob = 0;
-#include <stddef.h>
-#include <stdarg.h>
-#include <limits.h>
-#include <math.h>
-#include <ctype.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <setjmp.h>
-#include <errno.h>
-#include <time.h>
-typedef enum{
-TM_INDEX,
-TM_NEWINDEX,
-TM_GC,
-TM_MODE,
-TM_EQ,
-TM_ADD,
-TM_SUB,
-TM_MUL,
-TM_DIV,
-TM_MOD,
-TM_POW,
-TM_UNM,
-TM_LEN,
-TM_LT,
-TM_LE,
-TM_CONCAT,
-TM_CALL,
-TM_N
-}TMS;
-enum OpMode{iABC,iABx,iAsBx};
-typedef enum{
-OP_MOVE,
-OP_LOADK,
-OP_LOADBOOL,
-OP_LOADNIL,
-OP_GETUPVAL,
-OP_GETGLOBAL,
-OP_GETTABLE,
-OP_SETGLOBAL,
-OP_SETUPVAL,
-OP_SETTABLE,
-OP_NEWTABLE,
-OP_SELF,
-OP_ADD,
-OP_SUB,
-OP_MUL,
-OP_DIV,
-OP_MOD,
-OP_POW,
-OP_UNM,
-OP_NOT,
-OP_LEN,
-OP_CONCAT,
-OP_JMP,
-OP_EQ,
-OP_LT,
-OP_LE,
-OP_TEST,
-OP_TESTSET,
-OP_CALL,
-OP_TAILCALL,
-OP_RETURN,
-OP_FORLOOP,
-OP_FORPREP,
-OP_TFORLOOP,
-OP_SETLIST,
-OP_CLOSE,
-OP_CLOSURE,
-OP_VARARG
-}OpCode;
-enum OpArgMask{
-OpArgN,
-OpArgU,
-OpArgR,
-OpArgK
-};
-typedef enum{
-VVOID,
-VNIL,
-VTRUE,
-VFALSE,
-VK,
-VKNUM,
-VLOCAL,
-VUPVAL,
-VGLOBAL,
-VINDEXED,
-VJMP,
-VRELOCABLE,
-VNONRELOC,
-VCALL,
-VVARARG
-}expkind;
-enum RESERVED{
-TK_AND=257,TK_BREAK,
-TK_DO,TK_ELSE,TK_ELSEIF,TK_END,TK_FALSE,TK_FOR,TK_FUNCTION,
-TK_IF,TK_IN,TK_LOCAL,TK_NIL,TK_NOT,TK_OR,TK_REPEAT,
-TK_RETURN,TK_THEN,TK_TRUE,TK_UNTIL,TK_WHILE,
-TK_CONCAT,TK_DOTS,TK_EQ,TK_GE,TK_LE,TK_NE,TK_NUMBER,
-TK_NAME,TK_STRING,TK_EOS
-};
-typedef enum BinOpr{
-OPR_ADD,OPR_SUB,OPR_MUL,OPR_DIV,OPR_MOD,OPR_POW,
-OPR_CONCAT,
-OPR_NE,OPR_EQ,
-OPR_LT,OPR_LE,OPR_GT,OPR_GE,
-OPR_AND,OPR_OR,
-OPR_NOBINOPR
-}BinOpr;
-typedef enum UnOpr{OPR_MINUS,OPR_NOT,OPR_LEN,OPR_NOUNOPR}UnOpr;
-#define LUA_QL(x)"'"x"'"
-#define luai_apicheck(L,o){(void)L;}
-#define lua_number2str(s,n)sprintf((s),"%.14g",(n))
-#define lua_str2number(s,p)strtod((s),(p))
-#define luai_numadd(a,b)((a)+(b))
-#define luai_numsub(a,b)((a)-(b))
-#define luai_nummul(a,b)((a)*(b))
-#define luai_numdiv(a,b)((a)/(b))
-#define luai_nummod(a,b)((a)-floor((a)/(b))*(b))
-#define luai_numpow(a,b)(pow(a,b))
-#define luai_numunm(a)(-(a))
-#define luai_numeq(a,b)((a)==(b))
-#define luai_numlt(a,b)((a)<(b))
-#define luai_numle(a,b)((a)<=(b))
-#define luai_numisnan(a)(!luai_numeq((a),(a)))
-#define lua_number2int(i,d)((i)=(int)(d))
-#define lua_number2integer(i,d)((i)=(lua_Integer)(d))
-#define LUAI_THROW(L,c)longjmp((c)->b,1)
-#define LUAI_TRY(L,c,a)if(setjmp((c)->b)==0){a}
-#define lua_pclose(L,file)((void)((void)L,file),0)
-#define lua_upvalueindex(i)((-10002)-(i))
-typedef struct lua_State lua_State;
-typedef int(*lua_CFunction)(lua_State*L);
-typedef const char*(*lua_Reader)(lua_State*L,void*ud,size_t*sz);
-typedef void*(*lua_Alloc)(void*ud,void*ptr,size_t osize,size_t nsize);
-typedef double lua_Number;
-typedef ptrdiff_t lua_Integer;
-static void lua_settop(lua_State*L,int idx);
-static int lua_type(lua_State*L,int idx);
-static const char* lua_tolstring(lua_State*L,int idx,size_t*len);
-static size_t lua_objlen(lua_State*L,int idx);
-static void lua_pushlstring(lua_State*L,const char*s,size_t l);
-static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n);
-static void lua_createtable(lua_State*L,int narr,int nrec);
-static void lua_setfield(lua_State*L,int idx,const char*k);
-#define lua_pop(L,n)lua_settop(L,-(n)-1)
-#define lua_newtable(L)lua_createtable(L,0,0)
-#define lua_pushcfunction(L,f)lua_pushcclosure(L,(f),0)
-#define lua_strlen(L,i)lua_objlen(L,(i))
-#define lua_isfunction(L,n)(lua_type(L,(n))==6)
-#define lua_istable(L,n)(lua_type(L,(n))==5)
-#define lua_isnil(L,n)(lua_type(L,(n))==0)
-#define lua_isboolean(L,n)(lua_type(L,(n))==1)
-#define lua_isnone(L,n)(lua_type(L,(n))==(-1))
-#define lua_isnoneornil(L,n)(lua_type(L,(n))<=0)
-#define lua_pushliteral(L,s)lua_pushlstring(L,""s,(sizeof(s)/sizeof(char))-1)
-#define lua_setglobal(L,s)lua_setfield(L,(-10002),(s))
-#define lua_tostring(L,i)lua_tolstring(L,(i),NULL)
-typedef struct lua_Debug lua_Debug;
-typedef void(*lua_Hook)(lua_State*L,lua_Debug*ar);
-struct lua_Debug{
-int event;
-const char*name;
-const char*namewhat;
-const char*what;
-const char*source;
-int currentline;
-int nups;
-int linedefined;
-int lastlinedefined;
-char short_src[60];
-int i_ci;
-};
-typedef unsigned int lu_int32;
-typedef size_t lu_mem;
-typedef ptrdiff_t l_mem;
-typedef unsigned char lu_byte;
-#define IntPoint(p)((unsigned int)(lu_mem)(p))
-typedef union{double u;void*s;long l;}L_Umaxalign;
-typedef double l_uacNumber;
-#define check_exp(c,e)(e)
-#define UNUSED(x)((void)(x))
-#define cast(t,exp)((t)(exp))
-#define cast_byte(i)cast(lu_byte,(i))
-#define cast_num(i)cast(lua_Number,(i))
-#define cast_int(i)cast(int,(i))
-typedef lu_int32 Instruction;
-#define condhardstacktests(x)((void)0)
-typedef union GCObject GCObject;
-typedef struct GCheader{
-GCObject*next;lu_byte tt;lu_byte marked;
-}GCheader;
-typedef union{
-GCObject*gc;
-void*p;
-lua_Number n;
-int b;
-}Value;
-typedef struct lua_TValue{
-Value value;int tt;
-}TValue;
-#define ttisnil(o)(ttype(o)==0)
-#define ttisnumber(o)(ttype(o)==3)
-#define ttisstring(o)(ttype(o)==4)
-#define ttistable(o)(ttype(o)==5)
-#define ttisfunction(o)(ttype(o)==6)
-#define ttisboolean(o)(ttype(o)==1)
-#define ttisuserdata(o)(ttype(o)==7)
-#define ttisthread(o)(ttype(o)==8)
-#define ttislightuserdata(o)(ttype(o)==2)
-#define ttype(o)((o)->tt)
-#define gcvalue(o)check_exp(iscollectable(o),(o)->value.gc)
-#define pvalue(o)check_exp(ttislightuserdata(o),(o)->value.p)
-#define nvalue(o)check_exp(ttisnumber(o),(o)->value.n)
-#define rawtsvalue(o)check_exp(ttisstring(o),&(o)->value.gc->ts)
-#define tsvalue(o)(&rawtsvalue(o)->tsv)
-#define rawuvalue(o)check_exp(ttisuserdata(o),&(o)->value.gc->u)
-#define uvalue(o)(&rawuvalue(o)->uv)
-#define clvalue(o)check_exp(ttisfunction(o),&(o)->value.gc->cl)
-#define hvalue(o)check_exp(ttistable(o),&(o)->value.gc->h)
-#define bvalue(o)check_exp(ttisboolean(o),(o)->value.b)
-#define thvalue(o)check_exp(ttisthread(o),&(o)->value.gc->th)
-#define l_isfalse(o)(ttisnil(o)||(ttisboolean(o)&&bvalue(o)==0))
-#define checkconsistency(obj)
-#define checkliveness(g,obj)
-#define setnilvalue(obj)((obj)->tt=0)
-#define setnvalue(obj,x){TValue*i_o=(obj);i_o->value.n=(x);i_o->tt=3;}
-#define setbvalue(obj,x){TValue*i_o=(obj);i_o->value.b=(x);i_o->tt=1;}
-#define setsvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=4;checkliveness(G(L),i_o);}
-#define setuvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=7;checkliveness(G(L),i_o);}
-#define setthvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=8;checkliveness(G(L),i_o);}
-#define setclvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=6;checkliveness(G(L),i_o);}
-#define sethvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=5;checkliveness(G(L),i_o);}
-#define setptvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=(8+1);checkliveness(G(L),i_o);}
-#define setobj(L,obj1,obj2){const TValue*o2=(obj2);TValue*o1=(obj1);o1->value=o2->value;o1->tt=o2->tt;checkliveness(G(L),o1);}
-#define setttype(obj,tt)(ttype(obj)=(tt))
-#define iscollectable(o)(ttype(o)>=4)
-typedef TValue*StkId;
-typedef union TString{
-L_Umaxalign dummy;
-struct{
-GCObject*next;lu_byte tt;lu_byte marked;
-lu_byte reserved;
-unsigned int hash;
-size_t len;
-}tsv;
-}TString;
-#define getstr(ts)cast(const char*,(ts)+1)
-#define svalue(o)getstr(rawtsvalue(o))
-typedef union Udata{
-L_Umaxalign dummy;
-struct{
-GCObject*next;lu_byte tt;lu_byte marked;
-struct Table*metatable;
-struct Table*env;
-size_t len;
-}uv;
-}Udata;
-typedef struct Proto{
-GCObject*next;lu_byte tt;lu_byte marked;
-TValue*k;
-Instruction*code;
-struct Proto**p;
-int*lineinfo;
-struct LocVar*locvars;
-TString**upvalues;
-TString*source;
-int sizeupvalues;
-int sizek;
-int sizecode;
-int sizelineinfo;
-int sizep;
-int sizelocvars;
-int linedefined;
-int lastlinedefined;
-GCObject*gclist;
-lu_byte nups;
-lu_byte numparams;
-lu_byte is_vararg;
-lu_byte maxstacksize;
-}Proto;
-typedef struct LocVar{
-TString*varname;
-int startpc;
-int endpc;
-}LocVar;
-typedef struct UpVal{
-GCObject*next;lu_byte tt;lu_byte marked;
-TValue*v;
-union{
-TValue value;
-struct{
-struct UpVal*prev;
-struct UpVal*next;
-}l;
-}u;
-}UpVal;
-typedef struct CClosure{
-GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env;
-lua_CFunction f;
-TValue upvalue[1];
-}CClosure;
-typedef struct LClosure{
-GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env;
-struct Proto*p;
-UpVal*upvals[1];
-}LClosure;
-typedef union Closure{
-CClosure c;
-LClosure l;
-}Closure;
-#define iscfunction(o)(ttype(o)==6&&clvalue(o)->c.isC)
-typedef union TKey{
-struct{
-Value value;int tt;
-struct Node*next;
-}nk;
-TValue tvk;
-}TKey;
-typedef struct Node{
-TValue i_val;
-TKey i_key;
-}Node;
-typedef struct Table{
-GCObject*next;lu_byte tt;lu_byte marked;
-lu_byte flags;
-lu_byte lsizenode;
-struct Table*metatable;
-TValue*array;
-Node*node;
-Node*lastfree;
-GCObject*gclist;
-int sizearray;
-}Table;
-#define lmod(s,size)(check_exp((size&(size-1))==0,(cast(int,(s)&((size)-1)))))
-#define twoto(x)((size_t)1<<(x))
-#define sizenode(t)(twoto((t)->lsizenode))
-static const TValue luaO_nilobject_;
-#define ceillog2(x)(luaO_log2((x)-1)+1)
-static int luaO_log2(unsigned int x);
-#define gfasttm(g,et,e)((et)==NULL?NULL:((et)->flags&(1u<<(e)))?NULL:luaT_gettm(et,e,(g)->tmname[e]))
-#define fasttm(l,et,e)gfasttm(G(l),et,e)
-static const TValue*luaT_gettm(Table*events,TMS event,TString*ename);
-#define luaM_reallocv(L,b,on,n,e)((cast(size_t,(n)+1)<=((size_t)(~(size_t)0)-2)/(e))?luaM_realloc_(L,(b),(on)*(e),(n)*(e)):luaM_toobig(L))
-#define luaM_freemem(L,b,s)luaM_realloc_(L,(b),(s),0)
-#define luaM_free(L,b)luaM_realloc_(L,(b),sizeof(*(b)),0)
-#define luaM_freearray(L,b,n,t)luaM_reallocv(L,(b),n,0,sizeof(t))
-#define luaM_malloc(L,t)luaM_realloc_(L,NULL,0,(t))
-#define luaM_new(L,t)cast(t*,luaM_malloc(L,sizeof(t)))
-#define luaM_newvector(L,n,t)cast(t*,luaM_reallocv(L,NULL,0,n,sizeof(t)))
-#define luaM_growvector(L,v,nelems,size,t,limit,e)if((nelems)+1>(size))((v)=cast(t*,luaM_growaux_(L,v,&(size),sizeof(t),limit,e)))
-#define luaM_reallocvector(L,v,oldn,n,t)((v)=cast(t*,luaM_reallocv(L,v,oldn,n,sizeof(t))))
-static void*luaM_realloc_(lua_State*L,void*block,size_t oldsize,
-size_t size);
-static void*luaM_toobig(lua_State*L);
-static void*luaM_growaux_(lua_State*L,void*block,int*size,
-size_t size_elem,int limit,
-const char*errormsg);
-typedef struct Zio ZIO;
-#define char2int(c)cast(int,cast(unsigned char,(c)))
-#define zgetc(z)(((z)->n--)>0?char2int(*(z)->p++):luaZ_fill(z))
-typedef struct Mbuffer{
-char*buffer;
-size_t n;
-size_t buffsize;
-}Mbuffer;
-#define luaZ_initbuffer(L,buff)((buff)->buffer=NULL,(buff)->buffsize=0)
-#define luaZ_buffer(buff)((buff)->buffer)
-#define luaZ_sizebuffer(buff)((buff)->buffsize)
-#define luaZ_bufflen(buff)((buff)->n)
-#define luaZ_resetbuffer(buff)((buff)->n=0)
-#define luaZ_resizebuffer(L,buff,size)(luaM_reallocvector(L,(buff)->buffer,(buff)->buffsize,size,char),(buff)->buffsize=size)
-#define luaZ_freebuffer(L,buff)luaZ_resizebuffer(L,buff,0)
-struct Zio{
-size_t n;
-const char*p;
-lua_Reader reader;
-void*data;
-lua_State*L;
-};
-static int luaZ_fill(ZIO*z);
-struct lua_longjmp;
-#define gt(L)(&L->l_gt)
-#define registry(L)(&G(L)->l_registry)
-typedef struct stringtable{
-GCObject**hash;
-lu_int32 nuse;
-int size;
-}stringtable;
-typedef struct CallInfo{
-StkId base;
-StkId func;
-StkId top;
-const Instruction*savedpc;
-int nresults;
-int tailcalls;
-}CallInfo;
-#define curr_func(L)(clvalue(L->ci->func))
-#define ci_func(ci)(clvalue((ci)->func))
-#define f_isLua(ci)(!ci_func(ci)->c.isC)
-#define isLua(ci)(ttisfunction((ci)->func)&&f_isLua(ci))
-typedef struct global_State{
-stringtable strt;
-lua_Alloc frealloc;
-void*ud;
-lu_byte currentwhite;
-lu_byte gcstate;
-int sweepstrgc;
-GCObject*rootgc;
-GCObject**sweepgc;
-GCObject*gray;
-GCObject*grayagain;
-GCObject*weak;
-GCObject*tmudata;
-Mbuffer buff;
-lu_mem GCthreshold;
-lu_mem totalbytes;
-lu_mem estimate;
-lu_mem gcdept;
-int gcpause;
-int gcstepmul;
-lua_CFunction panic;
-TValue l_registry;
-struct lua_State*mainthread;
-UpVal uvhead;
-struct Table*mt[(8+1)];
-TString*tmname[TM_N];
-}global_State;
-struct lua_State{
-GCObject*next;lu_byte tt;lu_byte marked;
-lu_byte status;
-StkId top;
-StkId base;
-global_State*l_G;
-CallInfo*ci;
-const Instruction*savedpc;
-StkId stack_last;
-StkId stack;
-CallInfo*end_ci;
-CallInfo*base_ci;
-int stacksize;
-int size_ci;
-unsigned short nCcalls;
-unsigned short baseCcalls;
-lu_byte hookmask;
-lu_byte allowhook;
-int basehookcount;
-int hookcount;
-lua_Hook hook;
-TValue l_gt;
-TValue env;
-GCObject*openupval;
-GCObject*gclist;
-struct lua_longjmp*errorJmp;
-ptrdiff_t errfunc;
-};
-#define G(L)(L->l_G)
-union GCObject{
-GCheader gch;
-union TString ts;
-union Udata u;
-union Closure cl;
-struct Table h;
-struct Proto p;
-struct UpVal uv;
-struct lua_State th;
-};
-#define rawgco2ts(o)check_exp((o)->gch.tt==4,&((o)->ts))
-#define gco2ts(o)(&rawgco2ts(o)->tsv)
-#define rawgco2u(o)check_exp((o)->gch.tt==7,&((o)->u))
-#define gco2u(o)(&rawgco2u(o)->uv)
-#define gco2cl(o)check_exp((o)->gch.tt==6,&((o)->cl))
-#define gco2h(o)check_exp((o)->gch.tt==5,&((o)->h))
-#define gco2p(o)check_exp((o)->gch.tt==(8+1),&((o)->p))
-#define gco2uv(o)check_exp((o)->gch.tt==(8+2),&((o)->uv))
-#define ngcotouv(o)check_exp((o)==NULL||(o)->gch.tt==(8+2),&((o)->uv))
-#define gco2th(o)check_exp((o)->gch.tt==8,&((o)->th))
-#define obj2gco(v)(cast(GCObject*,(v)))
-static void luaE_freethread(lua_State*L,lua_State*L1);
-#define pcRel(pc,p)(cast(int,(pc)-(p)->code)-1)
-#define getline_(f,pc)(((f)->lineinfo)?(f)->lineinfo[pc]:0)
-#define resethookcount(L)(L->hookcount=L->basehookcount)
-static void luaG_typeerror(lua_State*L,const TValue*o,
-const char*opname);
-static void luaG_runerror(lua_State*L,const char*fmt,...);
-#define luaD_checkstack(L,n)if((char*)L->stack_last-(char*)L->top<=(n)*(int)sizeof(TValue))luaD_growstack(L,n);else condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));
-#define incr_top(L){luaD_checkstack(L,1);L->top++;}
-#define savestack(L,p)((char*)(p)-(char*)L->stack)
-#define restorestack(L,n)((TValue*)((char*)L->stack+(n)))
-#define saveci(L,p)((char*)(p)-(char*)L->base_ci)
-#define restoreci(L,n)((CallInfo*)((char*)L->base_ci+(n)))
-typedef void(*Pfunc)(lua_State*L,void*ud);
-static int luaD_poscall(lua_State*L,StkId firstResult);
-static void luaD_reallocCI(lua_State*L,int newsize);
-static void luaD_reallocstack(lua_State*L,int newsize);
-static void luaD_growstack(lua_State*L,int n);
-static void luaD_throw(lua_State*L,int errcode);
-static void*luaM_growaux_(lua_State*L,void*block,int*size,size_t size_elems,
-int limit,const char*errormsg){
-void*newblock;
-int newsize;
-if(*size>=limit/2){
-if(*size>=limit)
-luaG_runerror(L,errormsg);
-newsize=limit;
-}
-else{
-newsize=(*size)*2;
-if(newsize<4)
-newsize=4;
-}
-newblock=luaM_reallocv(L,block,*size,newsize,size_elems);
-*size=newsize;
-return newblock;
-}
-static void*luaM_toobig(lua_State*L){
-luaG_runerror(L,"memory allocation error: block too big");
-return NULL;
-}
-static void*luaM_realloc_(lua_State*L,void*block,size_t osize,size_t nsize){
-global_State*g=G(L);
-block=(*g->frealloc)(g->ud,block,osize,nsize);
-if(block==NULL&&nsize>0)
-luaD_throw(L,4);
-g->totalbytes=(g->totalbytes-osize)+nsize;
-return block;
-}
-#define resetbits(x,m)((x)&=cast(lu_byte,~(m)))
-#define setbits(x,m)((x)|=(m))
-#define testbits(x,m)((x)&(m))
-#define bitmask(b)(1<<(b))
-#define bit2mask(b1,b2)(bitmask(b1)|bitmask(b2))
-#define l_setbit(x,b)setbits(x,bitmask(b))
-#define resetbit(x,b)resetbits(x,bitmask(b))
-#define testbit(x,b)testbits(x,bitmask(b))
-#define set2bits(x,b1,b2)setbits(x,(bit2mask(b1,b2)))
-#define reset2bits(x,b1,b2)resetbits(x,(bit2mask(b1,b2)))
-#define test2bits(x,b1,b2)testbits(x,(bit2mask(b1,b2)))
-#define iswhite(x)test2bits((x)->gch.marked,0,1)
-#define isblack(x)testbit((x)->gch.marked,2)
-#define isgray(x)(!isblack(x)&&!iswhite(x))
-#define otherwhite(g)(g->currentwhite^bit2mask(0,1))
-#define isdead(g,v)((v)->gch.marked&otherwhite(g)&bit2mask(0,1))
-#define changewhite(x)((x)->gch.marked^=bit2mask(0,1))
-#define gray2black(x)l_setbit((x)->gch.marked,2)
-#define valiswhite(x)(iscollectable(x)&&iswhite(gcvalue(x)))
-#define luaC_white(g)cast(lu_byte,(g)->currentwhite&bit2mask(0,1))
-#define luaC_checkGC(L){condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));if(G(L)->totalbytes>=G(L)->GCthreshold)luaC_step(L);}
-#define luaC_barrier(L,p,v){if(valiswhite(v)&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),gcvalue(v));}
-#define luaC_barriert(L,t,v){if(valiswhite(v)&&isblack(obj2gco(t)))luaC_barrierback(L,t);}
-#define luaC_objbarrier(L,p,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),obj2gco(o));}
-#define luaC_objbarriert(L,t,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(t)))luaC_barrierback(L,t);}
-static void luaC_step(lua_State*L);
-static void luaC_link(lua_State*L,GCObject*o,lu_byte tt);
-static void luaC_linkupval(lua_State*L,UpVal*uv);
-static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v);
-static void luaC_barrierback(lua_State*L,Table*t);
-#define sizestring(s)(sizeof(union TString)+((s)->len+1)*sizeof(char))
-#define sizeudata(u)(sizeof(union Udata)+(u)->len)
-#define luaS_new(L,s)(luaS_newlstr(L,s,strlen(s)))
-#define luaS_newliteral(L,s)(luaS_newlstr(L,""s,(sizeof(s)/sizeof(char))-1))
-#define luaS_fix(s)l_setbit((s)->tsv.marked,5)
-static TString*luaS_newlstr(lua_State*L,const char*str,size_t l);
-#define tostring(L,o)((ttype(o)==4)||(luaV_tostring(L,o)))
-#define tonumber(o,n)(ttype(o)==3||(((o)=luaV_tonumber(o,n))!=NULL))
-#define equalobj(L,o1,o2)(ttype(o1)==ttype(o2)&&luaV_equalval(L,o1,o2))
-static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2);
-static const TValue*luaV_tonumber(const TValue*obj,TValue*n);
-static int luaV_tostring(lua_State*L,StkId obj);
-static void luaV_execute(lua_State*L,int nexeccalls);
-static void luaV_concat(lua_State*L,int total,int last);
-static const TValue luaO_nilobject_={{NULL},0};
-static int luaO_int2fb(unsigned int x){
-int e=0;
-while(x>=16){
-x=(x+1)>>1;
-e++;
-}
-if(x<8)return x;
-else return((e+1)<<3)|(cast_int(x)-8);
-}
-static int luaO_fb2int(int x){
-int e=(x>>3)&31;
-if(e==0)return x;
-else return((x&7)+8)<<(e-1);
-}
-static int luaO_log2(unsigned int x){
-static const lu_byte log_2[256]={
-0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
-6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
-7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
-7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
-8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
-8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
-8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
-8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
-};
-int l=-1;
-while(x>=256){l+=8;x>>=8;}
-return l+log_2[x];
-}
-static int luaO_rawequalObj(const TValue*t1,const TValue*t2){
-if(ttype(t1)!=ttype(t2))return 0;
-else switch(ttype(t1)){
-case 0:
-return 1;
-case 3:
-return luai_numeq(nvalue(t1),nvalue(t2));
-case 1:
-return bvalue(t1)==bvalue(t2);
-case 2:
-return pvalue(t1)==pvalue(t2);
-default:
-return gcvalue(t1)==gcvalue(t2);
-}
-}
-static int luaO_str2d(const char*s,lua_Number*result){
-char*endptr;
-*result=lua_str2number(s,&endptr);
-if(endptr==s)return 0;
-if(*endptr=='x'||*endptr=='X')
-*result=cast_num(strtoul(s,&endptr,16));
-if(*endptr=='\0')return 1;
-while(isspace(cast(unsigned char,*endptr)))endptr++;
-if(*endptr!='\0')return 0;
-return 1;
-}
-static void pushstr(lua_State*L,const char*str){
-setsvalue(L,L->top,luaS_new(L,str));
-incr_top(L);
-}
-static const char*luaO_pushvfstring(lua_State*L,const char*fmt,va_list argp){
-int n=1;
-pushstr(L,"");
-for(;;){
-const char*e=strchr(fmt,'%');
-if(e==NULL)break;
-setsvalue(L,L->top,luaS_newlstr(L,fmt,e-fmt));
-incr_top(L);
-switch(*(e+1)){
-case's':{
-const char*s=va_arg(argp,char*);
-if(s==NULL)s="(null)";
-pushstr(L,s);
-break;
-}
-case'c':{
-char buff[2];
-buff[0]=cast(char,va_arg(argp,int));
-buff[1]='\0';
-pushstr(L,buff);
-break;
-}
-case'd':{
-setnvalue(L->top,cast_num(va_arg(argp,int)));
-incr_top(L);
-break;
-}
-case'f':{
-setnvalue(L->top,cast_num(va_arg(argp,l_uacNumber)));
-incr_top(L);
-break;
-}
-case'p':{
-char buff[4*sizeof(void*)+8];
-sprintf(buff,"%p",va_arg(argp,void*));
-pushstr(L,buff);
-break;
-}
-case'%':{
-pushstr(L,"%");
-break;
-}
-default:{
-char buff[3];
-buff[0]='%';
-buff[1]=*(e+1);
-buff[2]='\0';
-pushstr(L,buff);
-break;
-}
-}
-n+=2;
-fmt=e+2;
-}
-pushstr(L,fmt);
-luaV_concat(L,n+1,cast_int(L->top-L->base)-1);
-L->top-=n;
-return svalue(L->top-1);
-}
-static const char*luaO_pushfstring(lua_State*L,const char*fmt,...){
-const char*msg;
-va_list argp;
-va_start(argp,fmt);
-msg=luaO_pushvfstring(L,fmt,argp);
-va_end(argp);
-return msg;
-}
-static void luaO_chunkid(char*out,const char*source,size_t bufflen){
-if(*source=='='){
-strncpy(out,source+1,bufflen);
-out[bufflen-1]='\0';
-}
-else{
-if(*source=='@'){
-size_t l;
-source++;
-bufflen-=sizeof(" '...' ");
-l=strlen(source);
-strcpy(out,"");
-if(l>bufflen){
-source+=(l-bufflen);
-strcat(out,"...");
-}
-strcat(out,source);
-}
-else{
-size_t len=strcspn(source,"\n\r");
-bufflen-=sizeof(" [string \"...\"] ");
-if(len>bufflen)len=bufflen;
-strcpy(out,"[string \"");
-if(source[len]!='\0'){
-strncat(out,source,len);
-strcat(out,"...");
-}
-else
-strcat(out,source);
-strcat(out,"\"]");
-}
-}
-}
-#define gnode(t,i)(&(t)->node[i])
-#define gkey(n)(&(n)->i_key.nk)
-#define gval(n)(&(n)->i_val)
-#define gnext(n)((n)->i_key.nk.next)
-#define key2tval(n)(&(n)->i_key.tvk)
-static TValue*luaH_setnum(lua_State*L,Table*t,int key);
-static const TValue*luaH_getstr(Table*t,TString*key);
-static TValue*luaH_set(lua_State*L,Table*t,const TValue*key);
-static const char*const luaT_typenames[]={
-"nil","boolean","userdata","number",
-"string","table","function","userdata","thread",
-"proto","upval"
-};
-static void luaT_init(lua_State*L){
-static const char*const luaT_eventname[]={
-"__index","__newindex",
-"__gc","__mode","__eq",
-"__add","__sub","__mul","__div","__mod",
-"__pow","__unm","__len","__lt","__le",
-"__concat","__call"
-};
-int i;
-for(i=0;i<TM_N;i++){
-G(L)->tmname[i]=luaS_new(L,luaT_eventname[i]);
-luaS_fix(G(L)->tmname[i]);
-}
-}
-static const TValue*luaT_gettm(Table*events,TMS event,TString*ename){
-const TValue*tm=luaH_getstr(events,ename);
-if(ttisnil(tm)){
-events->flags|=cast_byte(1u<<event);
-return NULL;
-}
-else return tm;
-}
-static const TValue*luaT_gettmbyobj(lua_State*L,const TValue*o,TMS event){
-Table*mt;
-switch(ttype(o)){
-case 5:
-mt=hvalue(o)->metatable;
-break;
-case 7:
-mt=uvalue(o)->metatable;
-break;
-default:
-mt=G(L)->mt[ttype(o)];
-}
-return(mt?luaH_getstr(mt,G(L)->tmname[event]):(&luaO_nilobject_));
-}
-#define sizeCclosure(n)(cast(int,sizeof(CClosure))+cast(int,sizeof(TValue)*((n)-1)))
-#define sizeLclosure(n)(cast(int,sizeof(LClosure))+cast(int,sizeof(TValue*)*((n)-1)))
-static Closure*luaF_newCclosure(lua_State*L,int nelems,Table*e){
-Closure*c=cast(Closure*,luaM_malloc(L,sizeCclosure(nelems)));
-luaC_link(L,obj2gco(c),6);
-c->c.isC=1;
-c->c.env=e;
-c->c.nupvalues=cast_byte(nelems);
-return c;
-}
-static Closure*luaF_newLclosure(lua_State*L,int nelems,Table*e){
-Closure*c=cast(Closure*,luaM_malloc(L,sizeLclosure(nelems)));
-luaC_link(L,obj2gco(c),6);
-c->l.isC=0;
-c->l.env=e;
-c->l.nupvalues=cast_byte(nelems);
-while(nelems--)c->l.upvals[nelems]=NULL;
-return c;
-}
-static UpVal*luaF_newupval(lua_State*L){
-UpVal*uv=luaM_new(L,UpVal);
-luaC_link(L,obj2gco(uv),(8+2));
-uv->v=&uv->u.value;
-setnilvalue(uv->v);
-return uv;
-}
-static UpVal*luaF_findupval(lua_State*L,StkId level){
-global_State*g=G(L);
-GCObject**pp=&L->openupval;
-UpVal*p;
-UpVal*uv;
-while(*pp!=NULL&&(p=ngcotouv(*pp))->v>=level){
-if(p->v==level){
-if(isdead(g,obj2gco(p)))
-changewhite(obj2gco(p));
-return p;
-}
-pp=&p->next;
-}
-uv=luaM_new(L,UpVal);
-uv->tt=(8+2);
-uv->marked=luaC_white(g);
-uv->v=level;
-uv->next=*pp;
-*pp=obj2gco(uv);
-uv->u.l.prev=&g->uvhead;
-uv->u.l.next=g->uvhead.u.l.next;
-uv->u.l.next->u.l.prev=uv;
-g->uvhead.u.l.next=uv;
-return uv;
-}
-static void unlinkupval(UpVal*uv){
-uv->u.l.next->u.l.prev=uv->u.l.prev;
-uv->u.l.prev->u.l.next=uv->u.l.next;
-}
-static void luaF_freeupval(lua_State*L,UpVal*uv){
-if(uv->v!=&uv->u.value)
-unlinkupval(uv);
-luaM_free(L,uv);
-}
-static void luaF_close(lua_State*L,StkId level){
-UpVal*uv;
-global_State*g=G(L);
-while(L->openupval!=NULL&&(uv=ngcotouv(L->openupval))->v>=level){
-GCObject*o=obj2gco(uv);
-L->openupval=uv->next;
-if(isdead(g,o))
-luaF_freeupval(L,uv);
-else{
-unlinkupval(uv);
-setobj(L,&uv->u.value,uv->v);
-uv->v=&uv->u.value;
-luaC_linkupval(L,uv);
-}
-}
-}
-static Proto*luaF_newproto(lua_State*L){
-Proto*f=luaM_new(L,Proto);
-luaC_link(L,obj2gco(f),(8+1));
-f->k=NULL;
-f->sizek=0;
-f->p=NULL;
-f->sizep=0;
-f->code=NULL;
-f->sizecode=0;
-f->sizelineinfo=0;
-f->sizeupvalues=0;
-f->nups=0;
-f->upvalues=NULL;
-f->numparams=0;
-f->is_vararg=0;
-f->maxstacksize=0;
-f->lineinfo=NULL;
-f->sizelocvars=0;
-f->locvars=NULL;
-f->linedefined=0;
-f->lastlinedefined=0;
-f->source=NULL;
-return f;
-}
-static void luaF_freeproto(lua_State*L,Proto*f){
-luaM_freearray(L,f->code,f->sizecode,Instruction);
-luaM_freearray(L,f->p,f->sizep,Proto*);
-luaM_freearray(L,f->k,f->sizek,TValue);
-luaM_freearray(L,f->lineinfo,f->sizelineinfo,int);
-luaM_freearray(L,f->locvars,f->sizelocvars,struct LocVar);
-luaM_freearray(L,f->upvalues,f->sizeupvalues,TString*);
-luaM_free(L,f);
-}
-static void luaF_freeclosure(lua_State*L,Closure*c){
-int size=(c->c.isC)?sizeCclosure(c->c.nupvalues):
-sizeLclosure(c->l.nupvalues);
-luaM_freemem(L,c,size);
-}
-#define MASK1(n,p)((~((~(Instruction)0)<<n))<<p)
-#define MASK0(n,p)(~MASK1(n,p))
-#define GET_OPCODE(i)(cast(OpCode,((i)>>0)&MASK1(6,0)))
-#define SET_OPCODE(i,o)((i)=(((i)&MASK0(6,0))|((cast(Instruction,o)<<0)&MASK1(6,0))))
-#define GETARG_A(i)(cast(int,((i)>>(0+6))&MASK1(8,0)))
-#define SETARG_A(i,u)((i)=(((i)&MASK0(8,(0+6)))|((cast(Instruction,u)<<(0+6))&MASK1(8,(0+6)))))
-#define GETARG_B(i)(cast(int,((i)>>(((0+6)+8)+9))&MASK1(9,0)))
-#define SETARG_B(i,b)((i)=(((i)&MASK0(9,(((0+6)+8)+9)))|((cast(Instruction,b)<<(((0+6)+8)+9))&MASK1(9,(((0+6)+8)+9)))))
-#define GETARG_C(i)(cast(int,((i)>>((0+6)+8))&MASK1(9,0)))
-#define SETARG_C(i,b)((i)=(((i)&MASK0(9,((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1(9,((0+6)+8)))))
-#define GETARG_Bx(i)(cast(int,((i)>>((0+6)+8))&MASK1((9+9),0)))
-#define SETARG_Bx(i,b)((i)=(((i)&MASK0((9+9),((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1((9+9),((0+6)+8)))))
-#define GETARG_sBx(i)(GETARG_Bx(i)-(((1<<(9+9))-1)>>1))
-#define SETARG_sBx(i,b)SETARG_Bx((i),cast(unsigned int,(b)+(((1<<(9+9))-1)>>1)))
-#define CREATE_ABC(o,a,b,c)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,b)<<(((0+6)+8)+9))|(cast(Instruction,c)<<((0+6)+8)))
-#define CREATE_ABx(o,a,bc)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,bc)<<((0+6)+8)))
-#define ISK(x)((x)&(1<<(9-1)))
-#define INDEXK(r)((int)(r)&~(1<<(9-1)))
-#define RKASK(x)((x)|(1<<(9-1)))
-static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)];
-#define getBMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>4)&3))
-#define getCMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>2)&3))
-#define testTMode(m)(luaP_opmodes[m]&(1<<7))
-typedef struct expdesc{
-expkind k;
-union{
-struct{int info,aux;}s;
-lua_Number nval;
-}u;
-int t;
-int f;
-}expdesc;
-typedef struct upvaldesc{
-lu_byte k;
-lu_byte info;
-}upvaldesc;
-struct BlockCnt;
-typedef struct FuncState{
-Proto*f;
-Table*h;
-struct FuncState*prev;
-struct LexState*ls;
-struct lua_State*L;
-struct BlockCnt*bl;
-int pc;
-int lasttarget;
-int jpc;
-int freereg;
-int nk;
-int np;
-short nlocvars;
-lu_byte nactvar;
-upvaldesc upvalues[60];
-unsigned short actvar[200];
-}FuncState;
-static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,
-const char*name);
-struct lua_longjmp{
-struct lua_longjmp*previous;
-jmp_buf b;
-volatile int status;
-};
-static void luaD_seterrorobj(lua_State*L,int errcode,StkId oldtop){
-switch(errcode){
-case 4:{
-setsvalue(L,oldtop,luaS_newliteral(L,"not enough memory"));
-break;
-}
-case 5:{
-setsvalue(L,oldtop,luaS_newliteral(L,"error in error handling"));
-break;
-}
-case 3:
-case 2:{
-setobj(L,oldtop,L->top-1);
-break;
-}
-}
-L->top=oldtop+1;
-}
-static void restore_stack_limit(lua_State*L){
-if(L->size_ci>20000){
-int inuse=cast_int(L->ci-L->base_ci);
-if(inuse+1<20000)
-luaD_reallocCI(L,20000);
-}
-}
-static void resetstack(lua_State*L,int status){
-L->ci=L->base_ci;
-L->base=L->ci->base;
-luaF_close(L,L->base);
-luaD_seterrorobj(L,status,L->base);
-L->nCcalls=L->baseCcalls;
-L->allowhook=1;
-restore_stack_limit(L);
-L->errfunc=0;
-L->errorJmp=NULL;
-}
-static void luaD_throw(lua_State*L,int errcode){
-if(L->errorJmp){
-L->errorJmp->status=errcode;
-LUAI_THROW(L,L->errorJmp);
-}
-else{
-L->status=cast_byte(errcode);
-if(G(L)->panic){
-resetstack(L,errcode);
-G(L)->panic(L);
-}
-exit(EXIT_FAILURE);
-}
-}
-static int luaD_rawrunprotected(lua_State*L,Pfunc f,void*ud){
-struct lua_longjmp lj;
-lj.status=0;
-lj.previous=L->errorJmp;
-L->errorJmp=&lj;
-LUAI_TRY(L,&lj,
-(*f)(L,ud);
-);
-L->errorJmp=lj.previous;
-return lj.status;
-}
-static void correctstack(lua_State*L,TValue*oldstack){
-CallInfo*ci;
-GCObject*up;
-L->top=(L->top-oldstack)+L->stack;
-for(up=L->openupval;up!=NULL;up=up->gch.next)
-gco2uv(up)->v=(gco2uv(up)->v-oldstack)+L->stack;
-for(ci=L->base_ci;ci<=L->ci;ci++){
-ci->top=(ci->top-oldstack)+L->stack;
-ci->base=(ci->base-oldstack)+L->stack;
-ci->func=(ci->func-oldstack)+L->stack;
-}
-L->base=(L->base-oldstack)+L->stack;
-}
-static void luaD_reallocstack(lua_State*L,int newsize){
-TValue*oldstack=L->stack;
-int realsize=newsize+1+5;
-luaM_reallocvector(L,L->stack,L->stacksize,realsize,TValue);
-L->stacksize=realsize;
-L->stack_last=L->stack+newsize;
-correctstack(L,oldstack);
-}
-static void luaD_reallocCI(lua_State*L,int newsize){
-CallInfo*oldci=L->base_ci;
-luaM_reallocvector(L,L->base_ci,L->size_ci,newsize,CallInfo);
-L->size_ci=newsize;
-L->ci=(L->ci-oldci)+L->base_ci;
-L->end_ci=L->base_ci+L->size_ci-1;
-}
-static void luaD_growstack(lua_State*L,int n){
-if(n<=L->stacksize)
-luaD_reallocstack(L,2*L->stacksize);
-else
-luaD_reallocstack(L,L->stacksize+n);
-}
-static CallInfo*growCI(lua_State*L){
-if(L->size_ci>20000)
-luaD_throw(L,5);
-else{
-luaD_reallocCI(L,2*L->size_ci);
-if(L->size_ci>20000)
-luaG_runerror(L,"stack overflow");
-}
-return++L->ci;
-}
-static StkId adjust_varargs(lua_State*L,Proto*p,int actual){
-int i;
-int nfixargs=p->numparams;
-Table*htab=NULL;
-StkId base,fixed;
-for(;actual<nfixargs;++actual)
-setnilvalue(L->top++);
-fixed=L->top-actual;
-base=L->top;
-for(i=0;i<nfixargs;i++){
-setobj(L,L->top++,fixed+i);
-setnilvalue(fixed+i);
-}
-if(htab){
-sethvalue(L,L->top++,htab);
-}
-return base;
-}
-static StkId tryfuncTM(lua_State*L,StkId func){
-const TValue*tm=luaT_gettmbyobj(L,func,TM_CALL);
-StkId p;
-ptrdiff_t funcr=savestack(L,func);
-if(!ttisfunction(tm))
-luaG_typeerror(L,func,"call");
-for(p=L->top;p>func;p--)setobj(L,p,p-1);
-incr_top(L);
-func=restorestack(L,funcr);
-setobj(L,func,tm);
-return func;
-}
-#define inc_ci(L)((L->ci==L->end_ci)?growCI(L):(condhardstacktests(luaD_reallocCI(L,L->size_ci)),++L->ci))
-static int luaD_precall(lua_State*L,StkId func,int nresults){
-LClosure*cl;
-ptrdiff_t funcr;
-if(!ttisfunction(func))
-func=tryfuncTM(L,func);
-funcr=savestack(L,func);
-cl=&clvalue(func)->l;
-L->ci->savedpc=L->savedpc;
-if(!cl->isC){
-CallInfo*ci;
-StkId st,base;
-Proto*p=cl->p;
-luaD_checkstack(L,p->maxstacksize);
-func=restorestack(L,funcr);
-if(!p->is_vararg){
-base=func+1;
-if(L->top>base+p->numparams)
-L->top=base+p->numparams;
-}
-else{
-int nargs=cast_int(L->top-func)-1;
-base=adjust_varargs(L,p,nargs);
-func=restorestack(L,funcr);
-}
-ci=inc_ci(L);
-ci->func=func;
-L->base=ci->base=base;
-ci->top=L->base+p->maxstacksize;
-L->savedpc=p->code;
-ci->tailcalls=0;
-ci->nresults=nresults;
-for(st=L->top;st<ci->top;st++)
-setnilvalue(st);
-L->top=ci->top;
-return 0;
-}
-else{
-CallInfo*ci;
-int n;
-luaD_checkstack(L,20);
-ci=inc_ci(L);
-ci->func=restorestack(L,funcr);
-L->base=ci->base=ci->func+1;
-ci->top=L->top+20;
-ci->nresults=nresults;
-n=(*curr_func(L)->c.f)(L);
-if(n<0)
-return 2;
-else{
-luaD_poscall(L,L->top-n);
-return 1;
-}
-}
-}
-static int luaD_poscall(lua_State*L,StkId firstResult){
-StkId res;
-int wanted,i;
-CallInfo*ci;
-ci=L->ci--;
-res=ci->func;
-wanted=ci->nresults;
-L->base=(ci-1)->base;
-L->savedpc=(ci-1)->savedpc;
-for(i=wanted;i!=0&&firstResult<L->top;i--)
-setobj(L,res++,firstResult++);
-while(i-->0)
-setnilvalue(res++);
-L->top=res;
-return(wanted-(-1));
-}
-static void luaD_call(lua_State*L,StkId func,int nResults){
-if(++L->nCcalls>=200){
-if(L->nCcalls==200)
-luaG_runerror(L,"C stack overflow");
-else if(L->nCcalls>=(200+(200>>3)))
-luaD_throw(L,5);
-}
-if(luaD_precall(L,func,nResults)==0)
-luaV_execute(L,1);
-L->nCcalls--;
-luaC_checkGC(L);
-}
-static int luaD_pcall(lua_State*L,Pfunc func,void*u,
-ptrdiff_t old_top,ptrdiff_t ef){
-int status;
-unsigned short oldnCcalls=L->nCcalls;
-ptrdiff_t old_ci=saveci(L,L->ci);
-lu_byte old_allowhooks=L->allowhook;
-ptrdiff_t old_errfunc=L->errfunc;
-L->errfunc=ef;
-status=luaD_rawrunprotected(L,func,u);
-if(status!=0){
-StkId oldtop=restorestack(L,old_top);
-luaF_close(L,oldtop);
-luaD_seterrorobj(L,status,oldtop);
-L->nCcalls=oldnCcalls;
-L->ci=restoreci(L,old_ci);
-L->base=L->ci->base;
-L->savedpc=L->ci->savedpc;
-L->allowhook=old_allowhooks;
-restore_stack_limit(L);
-}
-L->errfunc=old_errfunc;
-return status;
-}
-struct SParser{
-ZIO*z;
-Mbuffer buff;
-const char*name;
-};
-static void f_parser(lua_State*L,void*ud){
-int i;
-Proto*tf;
-Closure*cl;
-struct SParser*p=cast(struct SParser*,ud);
-luaC_checkGC(L);
-tf=luaY_parser(L,p->z,
-&p->buff,p->name);
-cl=luaF_newLclosure(L,tf->nups,hvalue(gt(L)));
-cl->l.p=tf;
-for(i=0;i<tf->nups;i++)
-cl->l.upvals[i]=luaF_newupval(L);
-setclvalue(L,L->top,cl);
-incr_top(L);
-}
-static int luaD_protectedparser(lua_State*L,ZIO*z,const char*name){
-struct SParser p;
-int status;
-p.z=z;p.name=name;
-luaZ_initbuffer(L,&p.buff);
-status=luaD_pcall(L,f_parser,&p,savestack(L,L->top),L->errfunc);
-luaZ_freebuffer(L,&p.buff);
-return status;
-}
-static void luaS_resize(lua_State*L,int newsize){
-GCObject**newhash;
-stringtable*tb;
-int i;
-if(G(L)->gcstate==2)
-return;
-newhash=luaM_newvector(L,newsize,GCObject*);
-tb=&G(L)->strt;
-for(i=0;i<newsize;i++)newhash[i]=NULL;
-for(i=0;i<tb->size;i++){
-GCObject*p=tb->hash[i];
-while(p){
-GCObject*next=p->gch.next;
-unsigned int h=gco2ts(p)->hash;
-int h1=lmod(h,newsize);
-p->gch.next=newhash[h1];
-newhash[h1]=p;
-p=next;
-}
-}
-luaM_freearray(L,tb->hash,tb->size,TString*);
-tb->size=newsize;
-tb->hash=newhash;
-}
-static TString*newlstr(lua_State*L,const char*str,size_t l,
-unsigned int h){
-TString*ts;
-stringtable*tb;
-if(l+1>(((size_t)(~(size_t)0)-2)-sizeof(TString))/sizeof(char))
-luaM_toobig(L);
-ts=cast(TString*,luaM_malloc(L,(l+1)*sizeof(char)+sizeof(TString)));
-ts->tsv.len=l;
-ts->tsv.hash=h;
-ts->tsv.marked=luaC_white(G(L));
-ts->tsv.tt=4;
-ts->tsv.reserved=0;
-memcpy(ts+1,str,l*sizeof(char));
-((char*)(ts+1))[l]='\0';
-tb=&G(L)->strt;
-h=lmod(h,tb->size);
-ts->tsv.next=tb->hash[h];
-tb->hash[h]=obj2gco(ts);
-tb->nuse++;
-if(tb->nuse>cast(lu_int32,tb->size)&&tb->size<=(INT_MAX-2)/2)
-luaS_resize(L,tb->size*2);
-return ts;
-}
-static TString*luaS_newlstr(lua_State*L,const char*str,size_t l){
-GCObject*o;
-unsigned int h=cast(unsigned int,l);
-size_t step=(l>>5)+1;
-size_t l1;
-for(l1=l;l1>=step;l1-=step)
-h=h^((h<<5)+(h>>2)+cast(unsigned char,str[l1-1]));
-for(o=G(L)->strt.hash[lmod(h,G(L)->strt.size)];
-o!=NULL;
-o=o->gch.next){
-TString*ts=rawgco2ts(o);
-if(ts->tsv.len==l&&(memcmp(str,getstr(ts),l)==0)){
-if(isdead(G(L),o))changewhite(o);
-return ts;
-}
-}
-return newlstr(L,str,l,h);
-}
-static Udata*luaS_newudata(lua_State*L,size_t s,Table*e){
-Udata*u;
-if(s>((size_t)(~(size_t)0)-2)-sizeof(Udata))
-luaM_toobig(L);
-u=cast(Udata*,luaM_malloc(L,s+sizeof(Udata)));
-u->uv.marked=luaC_white(G(L));
-u->uv.tt=7;
-u->uv.len=s;
-u->uv.metatable=NULL;
-u->uv.env=e;
-u->uv.next=G(L)->mainthread->next;
-G(L)->mainthread->next=obj2gco(u);
-return u;
-}
-#define hashpow2(t,n)(gnode(t,lmod((n),sizenode(t))))
-#define hashstr(t,str)hashpow2(t,(str)->tsv.hash)
-#define hashboolean(t,p)hashpow2(t,p)
-#define hashmod(t,n)(gnode(t,((n)%((sizenode(t)-1)|1))))
-#define hashpointer(t,p)hashmod(t,IntPoint(p))
-static const Node dummynode_={
-{{NULL},0},
-{{{NULL},0,NULL}}
-};
-static Node*hashnum(const Table*t,lua_Number n){
-unsigned int a[cast_int(sizeof(lua_Number)/sizeof(int))];
-int i;
-if(luai_numeq(n,0))
-return gnode(t,0);
-memcpy(a,&n,sizeof(a));
-for(i=1;i<cast_int(sizeof(lua_Number)/sizeof(int));i++)a[0]+=a[i];
-return hashmod(t,a[0]);
-}
-static Node*mainposition(const Table*t,const TValue*key){
-switch(ttype(key)){
-case 3:
-return hashnum(t,nvalue(key));
-case 4:
-return hashstr(t,rawtsvalue(key));
-case 1:
-return hashboolean(t,bvalue(key));
-case 2:
-return hashpointer(t,pvalue(key));
-default:
-return hashpointer(t,gcvalue(key));
-}
-}
-static int arrayindex(const TValue*key){
-if(ttisnumber(key)){
-lua_Number n=nvalue(key);
-int k;
-lua_number2int(k,n);
-if(luai_numeq(cast_num(k),n))
-return k;
-}
-return-1;
-}
-static int findindex(lua_State*L,Table*t,StkId key){
-int i;
-if(ttisnil(key))return-1;
-i=arrayindex(key);
-if(0<i&&i<=t->sizearray)
-return i-1;
-else{
-Node*n=mainposition(t,key);
-do{
-if(luaO_rawequalObj(key2tval(n),key)||
-(ttype(gkey(n))==(8+3)&&iscollectable(key)&&
-gcvalue(gkey(n))==gcvalue(key))){
-i=cast_int(n-gnode(t,0));
-return i+t->sizearray;
-}
-else n=gnext(n);
-}while(n);
-luaG_runerror(L,"invalid key to "LUA_QL("next"));
-return 0;
-}
-}
-static int luaH_next(lua_State*L,Table*t,StkId key){
-int i=findindex(L,t,key);
-for(i++;i<t->sizearray;i++){
-if(!ttisnil(&t->array[i])){
-setnvalue(key,cast_num(i+1));
-setobj(L,key+1,&t->array[i]);
-return 1;
-}
-}
-for(i-=t->sizearray;i<(int)sizenode(t);i++){
-if(!ttisnil(gval(gnode(t,i)))){
-setobj(L,key,key2tval(gnode(t,i)));
-setobj(L,key+1,gval(gnode(t,i)));
-return 1;
-}
-}
-return 0;
-}
-static int computesizes(int nums[],int*narray){
-int i;
-int twotoi;
-int a=0;
-int na=0;
-int n=0;
-for(i=0,twotoi=1;twotoi/2<*narray;i++,twotoi*=2){
-if(nums[i]>0){
-a+=nums[i];
-if(a>twotoi/2){
-n=twotoi;
-na=a;
-}
-}
-if(a==*narray)break;
-}
-*narray=n;
-return na;
-}
-static int countint(const TValue*key,int*nums){
-int k=arrayindex(key);
-if(0<k&&k<=(1<<(32-2))){
-nums[ceillog2(k)]++;
-return 1;
-}
-else
-return 0;
-}
-static int numusearray(const Table*t,int*nums){
-int lg;
-int ttlg;
-int ause=0;
-int i=1;
-for(lg=0,ttlg=1;lg<=(32-2);lg++,ttlg*=2){
-int lc=0;
-int lim=ttlg;
-if(lim>t->sizearray){
-lim=t->sizearray;
-if(i>lim)
-break;
-}
-for(;i<=lim;i++){
-if(!ttisnil(&t->array[i-1]))
-lc++;
-}
-nums[lg]+=lc;
-ause+=lc;
-}
-return ause;
-}
-static int numusehash(const Table*t,int*nums,int*pnasize){
-int totaluse=0;
-int ause=0;
-int i=sizenode(t);
-while(i--){
-Node*n=&t->node[i];
-if(!ttisnil(gval(n))){
-ause+=countint(key2tval(n),nums);
-totaluse++;
-}
-}
-*pnasize+=ause;
-return totaluse;
-}
-static void setarrayvector(lua_State*L,Table*t,int size){
-int i;
-luaM_reallocvector(L,t->array,t->sizearray,size,TValue);
-for(i=t->sizearray;i<size;i++)
-setnilvalue(&t->array[i]);
-t->sizearray=size;
-}
-static void setnodevector(lua_State*L,Table*t,int size){
-int lsize;
-if(size==0){
-t->node=cast(Node*,(&dummynode_));
-lsize=0;
-}
-else{
-int i;
-lsize=ceillog2(size);
-if(lsize>(32-2))
-luaG_runerror(L,"table overflow");
-size=twoto(lsize);
-t->node=luaM_newvector(L,size,Node);
-for(i=0;i<size;i++){
-Node*n=gnode(t,i);
-gnext(n)=NULL;
-setnilvalue(gkey(n));
-setnilvalue(gval(n));
-}
-}
-t->lsizenode=cast_byte(lsize);
-t->lastfree=gnode(t,size);
-}
-static void resize(lua_State*L,Table*t,int nasize,int nhsize){
-int i;
-int oldasize=t->sizearray;
-int oldhsize=t->lsizenode;
-Node*nold=t->node;
-if(nasize>oldasize)
-setarrayvector(L,t,nasize);
-setnodevector(L,t,nhsize);
-if(nasize<oldasize){
-t->sizearray=nasize;
-for(i=nasize;i<oldasize;i++){
-if(!ttisnil(&t->array[i]))
-setobj(L,luaH_setnum(L,t,i+1),&t->array[i]);
-}
-luaM_reallocvector(L,t->array,oldasize,nasize,TValue);
-}
-for(i=twoto(oldhsize)-1;i>=0;i--){
-Node*old=nold+i;
-if(!ttisnil(gval(old)))
-setobj(L,luaH_set(L,t,key2tval(old)),gval(old));
-}
-if(nold!=(&dummynode_))
-luaM_freearray(L,nold,twoto(oldhsize),Node);
-}
-static void luaH_resizearray(lua_State*L,Table*t,int nasize){
-int nsize=(t->node==(&dummynode_))?0:sizenode(t);
-resize(L,t,nasize,nsize);
-}
-static void rehash(lua_State*L,Table*t,const TValue*ek){
-int nasize,na;
-int nums[(32-2)+1];
-int i;
-int totaluse;
-for(i=0;i<=(32-2);i++)nums[i]=0;
-nasize=numusearray(t,nums);
-totaluse=nasize;
-totaluse+=numusehash(t,nums,&nasize);
-nasize+=countint(ek,nums);
-totaluse++;
-na=computesizes(nums,&nasize);
-resize(L,t,nasize,totaluse-na);
-}
-static Table*luaH_new(lua_State*L,int narray,int nhash){
-Table*t=luaM_new(L,Table);
-luaC_link(L,obj2gco(t),5);
-t->metatable=NULL;
-t->flags=cast_byte(~0);
-t->array=NULL;
-t->sizearray=0;
-t->lsizenode=0;
-t->node=cast(Node*,(&dummynode_));
-setarrayvector(L,t,narray);
-setnodevector(L,t,nhash);
-return t;
-}
-static void luaH_free(lua_State*L,Table*t){
-if(t->node!=(&dummynode_))
-luaM_freearray(L,t->node,sizenode(t),Node);
-luaM_freearray(L,t->array,t->sizearray,TValue);
-luaM_free(L,t);
-}
-static Node*getfreepos(Table*t){
-while(t->lastfree-->t->node){
-if(ttisnil(gkey(t->lastfree)))
-return t->lastfree;
-}
-return NULL;
-}
-static TValue*newkey(lua_State*L,Table*t,const TValue*key){
-Node*mp=mainposition(t,key);
-if(!ttisnil(gval(mp))||mp==(&dummynode_)){
-Node*othern;
-Node*n=getfreepos(t);
-if(n==NULL){
-rehash(L,t,key);
-return luaH_set(L,t,key);
-}
-othern=mainposition(t,key2tval(mp));
-if(othern!=mp){
-while(gnext(othern)!=mp)othern=gnext(othern);
-gnext(othern)=n;
-*n=*mp;
-gnext(mp)=NULL;
-setnilvalue(gval(mp));
-}
-else{
-gnext(n)=gnext(mp);
-gnext(mp)=n;
-mp=n;
-}
-}
-gkey(mp)->value=key->value;gkey(mp)->tt=key->tt;
-luaC_barriert(L,t,key);
-return gval(mp);
-}
-static const TValue*luaH_getnum(Table*t,int key){
-if(cast(unsigned int,key-1)<cast(unsigned int,t->sizearray))
-return&t->array[key-1];
-else{
-lua_Number nk=cast_num(key);
-Node*n=hashnum(t,nk);
-do{
-if(ttisnumber(gkey(n))&&luai_numeq(nvalue(gkey(n)),nk))
-return gval(n);
-else n=gnext(n);
-}while(n);
-return(&luaO_nilobject_);
-}
-}
-static const TValue*luaH_getstr(Table*t,TString*key){
-Node*n=hashstr(t,key);
-do{
-if(ttisstring(gkey(n))&&rawtsvalue(gkey(n))==key)
-return gval(n);
-else n=gnext(n);
-}while(n);
-return(&luaO_nilobject_);
-}
-static const TValue*luaH_get(Table*t,const TValue*key){
-switch(ttype(key)){
-case 0:return(&luaO_nilobject_);
-case 4:return luaH_getstr(t,rawtsvalue(key));
-case 3:{
-int k;
-lua_Number n=nvalue(key);
-lua_number2int(k,n);
-if(luai_numeq(cast_num(k),nvalue(key)))
-return luaH_getnum(t,k);
-}
-default:{
-Node*n=mainposition(t,key);
-do{
-if(luaO_rawequalObj(key2tval(n),key))
-return gval(n);
-else n=gnext(n);
-}while(n);
-return(&luaO_nilobject_);
-}
-}
-}
-static TValue*luaH_set(lua_State*L,Table*t,const TValue*key){
-const TValue*p=luaH_get(t,key);
-t->flags=0;
-if(p!=(&luaO_nilobject_))
-return cast(TValue*,p);
-else{
-if(ttisnil(key))luaG_runerror(L,"table index is nil");
-else if(ttisnumber(key)&&luai_numisnan(nvalue(key)))
-luaG_runerror(L,"table index is NaN");
-return newkey(L,t,key);
-}
-}
-static TValue*luaH_setnum(lua_State*L,Table*t,int key){
-const TValue*p=luaH_getnum(t,key);
-if(p!=(&luaO_nilobject_))
-return cast(TValue*,p);
-else{
-TValue k;
-setnvalue(&k,cast_num(key));
-return newkey(L,t,&k);
-}
-}
-static TValue*luaH_setstr(lua_State*L,Table*t,TString*key){
-const TValue*p=luaH_getstr(t,key);
-if(p!=(&luaO_nilobject_))
-return cast(TValue*,p);
-else{
-TValue k;
-setsvalue(L,&k,key);
-return newkey(L,t,&k);
-}
-}
-static int unbound_search(Table*t,unsigned int j){
-unsigned int i=j;
-j++;
-while(!ttisnil(luaH_getnum(t,j))){
-i=j;
-j*=2;
-if(j>cast(unsigned int,(INT_MAX-2))){
-i=1;
-while(!ttisnil(luaH_getnum(t,i)))i++;
-return i-1;
-}
-}
-while(j-i>1){
-unsigned int m=(i+j)/2;
-if(ttisnil(luaH_getnum(t,m)))j=m;
-else i=m;
-}
-return i;
-}
-static int luaH_getn(Table*t){
-unsigned int j=t->sizearray;
-if(j>0&&ttisnil(&t->array[j-1])){
-unsigned int i=0;
-while(j-i>1){
-unsigned int m=(i+j)/2;
-if(ttisnil(&t->array[m-1]))j=m;
-else i=m;
-}
-return i;
-}
-else if(t->node==(&dummynode_))
-return j;
-else return unbound_search(t,j);
-}
-#define makewhite(g,x)((x)->gch.marked=cast_byte(((x)->gch.marked&cast_byte(~(bitmask(2)|bit2mask(0,1))))|luaC_white(g)))
-#define white2gray(x)reset2bits((x)->gch.marked,0,1)
-#define black2gray(x)resetbit((x)->gch.marked,2)
-#define stringmark(s)reset2bits((s)->tsv.marked,0,1)
-#define isfinalized(u)testbit((u)->marked,3)
-#define markfinalized(u)l_setbit((u)->marked,3)
-#define markvalue(g,o){checkconsistency(o);if(iscollectable(o)&&iswhite(gcvalue(o)))reallymarkobject(g,gcvalue(o));}
-#define markobject(g,t){if(iswhite(obj2gco(t)))reallymarkobject(g,obj2gco(t));}
-#define setthreshold(g)(g->GCthreshold=(g->estimate/100)*g->gcpause)
-static void removeentry(Node*n){
-if(iscollectable(gkey(n)))
-setttype(gkey(n),(8+3));
-}
-static void reallymarkobject(global_State*g,GCObject*o){
-white2gray(o);
-switch(o->gch.tt){
-case 4:{
-return;
-}
-case 7:{
-Table*mt=gco2u(o)->metatable;
-gray2black(o);
-if(mt)markobject(g,mt);
-markobject(g,gco2u(o)->env);
-return;
-}
-case(8+2):{
-UpVal*uv=gco2uv(o);
-markvalue(g,uv->v);
-if(uv->v==&uv->u.value)
-gray2black(o);
-return;
-}
-case 6:{
-gco2cl(o)->c.gclist=g->gray;
-g->gray=o;
-break;
-}
-case 5:{
-gco2h(o)->gclist=g->gray;
-g->gray=o;
-break;
-}
-case 8:{
-gco2th(o)->gclist=g->gray;
-g->gray=o;
-break;
-}
-case(8+1):{
-gco2p(o)->gclist=g->gray;
-g->gray=o;
-break;
-}
-default:;
-}
-}
-static void marktmu(global_State*g){
-GCObject*u=g->tmudata;
-if(u){
-do{
-u=u->gch.next;
-makewhite(g,u);
-reallymarkobject(g,u);
-}while(u!=g->tmudata);
-}
-}
-static size_t luaC_separateudata(lua_State*L,int all){
-global_State*g=G(L);
-size_t deadmem=0;
-GCObject**p=&g->mainthread->next;
-GCObject*curr;
-while((curr=*p)!=NULL){
-if(!(iswhite(curr)||all)||isfinalized(gco2u(curr)))
-p=&curr->gch.next;
-else if(fasttm(L,gco2u(curr)->metatable,TM_GC)==NULL){
-markfinalized(gco2u(curr));
-p=&curr->gch.next;
-}
-else{
-deadmem+=sizeudata(gco2u(curr));
-markfinalized(gco2u(curr));
-*p=curr->gch.next;
-if(g->tmudata==NULL)
-g->tmudata=curr->gch.next=curr;
-else{
-curr->gch.next=g->tmudata->gch.next;
-g->tmudata->gch.next=curr;
-g->tmudata=curr;
-}
-}
-}
-return deadmem;
-}
-static int traversetable(global_State*g,Table*h){
-int i;
-int weakkey=0;
-int weakvalue=0;
-const TValue*mode;
-if(h->metatable)
-markobject(g,h->metatable);
-mode=gfasttm(g,h->metatable,TM_MODE);
-if(mode&&ttisstring(mode)){
-weakkey=(strchr(svalue(mode),'k')!=NULL);
-weakvalue=(strchr(svalue(mode),'v')!=NULL);
-if(weakkey||weakvalue){
-h->marked&=~(bitmask(3)|bitmask(4));
-h->marked|=cast_byte((weakkey<<3)|
-(weakvalue<<4));
-h->gclist=g->weak;
-g->weak=obj2gco(h);
-}
-}
-if(weakkey&&weakvalue)return 1;
-if(!weakvalue){
-i=h->sizearray;
-while(i--)
-markvalue(g,&h->array[i]);
-}
-i=sizenode(h);
-while(i--){
-Node*n=gnode(h,i);
-if(ttisnil(gval(n)))
-removeentry(n);
-else{
-if(!weakkey)markvalue(g,gkey(n));
-if(!weakvalue)markvalue(g,gval(n));
-}
-}
-return weakkey||weakvalue;
-}
-static void traverseproto(global_State*g,Proto*f){
-int i;
-if(f->source)stringmark(f->source);
-for(i=0;i<f->sizek;i++)
-markvalue(g,&f->k[i]);
-for(i=0;i<f->sizeupvalues;i++){
-if(f->upvalues[i])
-stringmark(f->upvalues[i]);
-}
-for(i=0;i<f->sizep;i++){
-if(f->p[i])
-markobject(g,f->p[i]);
-}
-for(i=0;i<f->sizelocvars;i++){
-if(f->locvars[i].varname)
-stringmark(f->locvars[i].varname);
-}
-}
-static void traverseclosure(global_State*g,Closure*cl){
-markobject(g,cl->c.env);
-if(cl->c.isC){
-int i;
-for(i=0;i<cl->c.nupvalues;i++)
-markvalue(g,&cl->c.upvalue[i]);
-}
-else{
-int i;
-markobject(g,cl->l.p);
-for(i=0;i<cl->l.nupvalues;i++)
-markobject(g,cl->l.upvals[i]);
-}
-}
-static void checkstacksizes(lua_State*L,StkId max){
-int ci_used=cast_int(L->ci-L->base_ci);
-int s_used=cast_int(max-L->stack);
-if(L->size_ci>20000)
-return;
-if(4*ci_used<L->size_ci&&2*8<L->size_ci)
-luaD_reallocCI(L,L->size_ci/2);
-condhardstacktests(luaD_reallocCI(L,ci_used+1));
-if(4*s_used<L->stacksize&&
-2*((2*20)+5)<L->stacksize)
-luaD_reallocstack(L,L->stacksize/2);
-condhardstacktests(luaD_reallocstack(L,s_used));
-}
-static void traversestack(global_State*g,lua_State*l){
-StkId o,lim;
-CallInfo*ci;
-markvalue(g,gt(l));
-lim=l->top;
-for(ci=l->base_ci;ci<=l->ci;ci++){
-if(lim<ci->top)lim=ci->top;
-}
-for(o=l->stack;o<l->top;o++)
-markvalue(g,o);
-for(;o<=lim;o++)
-setnilvalue(o);
-checkstacksizes(l,lim);
-}
-static l_mem propagatemark(global_State*g){
-GCObject*o=g->gray;
-gray2black(o);
-switch(o->gch.tt){
-case 5:{
-Table*h=gco2h(o);
-g->gray=h->gclist;
-if(traversetable(g,h))
-black2gray(o);
-return sizeof(Table)+sizeof(TValue)*h->sizearray+
-sizeof(Node)*sizenode(h);
-}
-case 6:{
-Closure*cl=gco2cl(o);
-g->gray=cl->c.gclist;
-traverseclosure(g,cl);
-return(cl->c.isC)?sizeCclosure(cl->c.nupvalues):
-sizeLclosure(cl->l.nupvalues);
-}
-case 8:{
-lua_State*th=gco2th(o);
-g->gray=th->gclist;
-th->gclist=g->grayagain;
-g->grayagain=o;
-black2gray(o);
-traversestack(g,th);
-return sizeof(lua_State)+sizeof(TValue)*th->stacksize+
-sizeof(CallInfo)*th->size_ci;
-}
-case(8+1):{
-Proto*p=gco2p(o);
-g->gray=p->gclist;
-traverseproto(g,p);
-return sizeof(Proto)+sizeof(Instruction)*p->sizecode+
-sizeof(Proto*)*p->sizep+
-sizeof(TValue)*p->sizek+
-sizeof(int)*p->sizelineinfo+
-sizeof(LocVar)*p->sizelocvars+
-sizeof(TString*)*p->sizeupvalues;
-}
-default:return 0;
-}
-}
-static size_t propagateall(global_State*g){
-size_t m=0;
-while(g->gray)m+=propagatemark(g);
-return m;
-}
-static int iscleared(const TValue*o,int iskey){
-if(!iscollectable(o))return 0;
-if(ttisstring(o)){
-stringmark(rawtsvalue(o));
-return 0;
-}
-return iswhite(gcvalue(o))||
-(ttisuserdata(o)&&(!iskey&&isfinalized(uvalue(o))));
-}
-static void cleartable(GCObject*l){
-while(l){
-Table*h=gco2h(l);
-int i=h->sizearray;
-if(testbit(h->marked,4)){
-while(i--){
-TValue*o=&h->array[i];
-if(iscleared(o,0))
-setnilvalue(o);
-}
-}
-i=sizenode(h);
-while(i--){
-Node*n=gnode(h,i);
-if(!ttisnil(gval(n))&&
-(iscleared(key2tval(n),1)||iscleared(gval(n),0))){
-setnilvalue(gval(n));
-removeentry(n);
-}
-}
-l=h->gclist;
-}
-}
-static void freeobj(lua_State*L,GCObject*o){
-switch(o->gch.tt){
-case(8+1):luaF_freeproto(L,gco2p(o));break;
-case 6:luaF_freeclosure(L,gco2cl(o));break;
-case(8+2):luaF_freeupval(L,gco2uv(o));break;
-case 5:luaH_free(L,gco2h(o));break;
-case 8:{
-luaE_freethread(L,gco2th(o));
-break;
-}
-case 4:{
-G(L)->strt.nuse--;
-luaM_freemem(L,o,sizestring(gco2ts(o)));
-break;
-}
-case 7:{
-luaM_freemem(L,o,sizeudata(gco2u(o)));
-break;
-}
-default:;
-}
-}
-#define sweepwholelist(L,p)sweeplist(L,p,((lu_mem)(~(lu_mem)0)-2))
-static GCObject**sweeplist(lua_State*L,GCObject**p,lu_mem count){
-GCObject*curr;
-global_State*g=G(L);
-int deadmask=otherwhite(g);
-while((curr=*p)!=NULL&&count-->0){
-if(curr->gch.tt==8)
-sweepwholelist(L,&gco2th(curr)->openupval);
-if((curr->gch.marked^bit2mask(0,1))&deadmask){
-makewhite(g,curr);
-p=&curr->gch.next;
-}
-else{
-*p=curr->gch.next;
-if(curr==g->rootgc)
-g->rootgc=curr->gch.next;
-freeobj(L,curr);
-}
-}
-return p;
-}
-static void checkSizes(lua_State*L){
-global_State*g=G(L);
-if(g->strt.nuse<cast(lu_int32,g->strt.size/4)&&
-g->strt.size>32*2)
-luaS_resize(L,g->strt.size/2);
-if(luaZ_sizebuffer(&g->buff)>32*2){
-size_t newsize=luaZ_sizebuffer(&g->buff)/2;
-luaZ_resizebuffer(L,&g->buff,newsize);
-}
-}
-static void GCTM(lua_State*L){
-global_State*g=G(L);
-GCObject*o=g->tmudata->gch.next;
-Udata*udata=rawgco2u(o);
-const TValue*tm;
-if(o==g->tmudata)
-g->tmudata=NULL;
-else
-g->tmudata->gch.next=udata->uv.next;
-udata->uv.next=g->mainthread->next;
-g->mainthread->next=o;
-makewhite(g,o);
-tm=fasttm(L,udata->uv.metatable,TM_GC);
-if(tm!=NULL){
-lu_byte oldah=L->allowhook;
-lu_mem oldt=g->GCthreshold;
-L->allowhook=0;
-g->GCthreshold=2*g->totalbytes;
-setobj(L,L->top,tm);
-setuvalue(L,L->top+1,udata);
-L->top+=2;
-luaD_call(L,L->top-2,0);
-L->allowhook=oldah;
-g->GCthreshold=oldt;
-}
-}
-static void luaC_callGCTM(lua_State*L){
-while(G(L)->tmudata)
-GCTM(L);
-}
-static void luaC_freeall(lua_State*L){
-global_State*g=G(L);
-int i;
-g->currentwhite=bit2mask(0,1)|bitmask(6);
-sweepwholelist(L,&g->rootgc);
-for(i=0;i<g->strt.size;i++)
-sweepwholelist(L,&g->strt.hash[i]);
-}
-static void markmt(global_State*g){
-int i;
-for(i=0;i<(8+1);i++)
-if(g->mt[i])markobject(g,g->mt[i]);
-}
-static void markroot(lua_State*L){
-global_State*g=G(L);
-g->gray=NULL;
-g->grayagain=NULL;
-g->weak=NULL;
-markobject(g,g->mainthread);
-markvalue(g,gt(g->mainthread));
-markvalue(g,registry(L));
-markmt(g);
-g->gcstate=1;
-}
-static void remarkupvals(global_State*g){
-UpVal*uv;
-for(uv=g->uvhead.u.l.next;uv!=&g->uvhead;uv=uv->u.l.next){
-if(isgray(obj2gco(uv)))
-markvalue(g,uv->v);
-}
-}
-static void atomic(lua_State*L){
-global_State*g=G(L);
-size_t udsize;
-remarkupvals(g);
-propagateall(g);
-g->gray=g->weak;
-g->weak=NULL;
-markobject(g,L);
-markmt(g);
-propagateall(g);
-g->gray=g->grayagain;
-g->grayagain=NULL;
-propagateall(g);
-udsize=luaC_separateudata(L,0);
-marktmu(g);
-udsize+=propagateall(g);
-cleartable(g->weak);
-g->currentwhite=cast_byte(otherwhite(g));
-g->sweepstrgc=0;
-g->sweepgc=&g->rootgc;
-g->gcstate=2;
-g->estimate=g->totalbytes-udsize;
-}
-static l_mem singlestep(lua_State*L){
-global_State*g=G(L);
-switch(g->gcstate){
-case 0:{
-markroot(L);
-return 0;
-}
-case 1:{
-if(g->gray)
-return propagatemark(g);
-else{
-atomic(L);
-return 0;
-}
-}
-case 2:{
-lu_mem old=g->totalbytes;
-sweepwholelist(L,&g->strt.hash[g->sweepstrgc++]);
-if(g->sweepstrgc>=g->strt.size)
-g->gcstate=3;
-g->estimate-=old-g->totalbytes;
-return 10;
-}
-case 3:{
-lu_mem old=g->totalbytes;
-g->sweepgc=sweeplist(L,g->sweepgc,40);
-if(*g->sweepgc==NULL){
-checkSizes(L);
-g->gcstate=4;
-}
-g->estimate-=old-g->totalbytes;
-return 40*10;
-}
-case 4:{
-if(g->tmudata){
-GCTM(L);
-if(g->estimate>100)
-g->estimate-=100;
-return 100;
-}
-else{
-g->gcstate=0;
-g->gcdept=0;
-return 0;
-}
-}
-default:return 0;
-}
-}
-static void luaC_step(lua_State*L){
-global_State*g=G(L);
-l_mem lim=(1024u/100)*g->gcstepmul;
-if(lim==0)
-lim=(((lu_mem)(~(lu_mem)0)-2)-1)/2;
-g->gcdept+=g->totalbytes-g->GCthreshold;
-do{
-lim-=singlestep(L);
-if(g->gcstate==0)
-break;
-}while(lim>0);
-if(g->gcstate!=0){
-if(g->gcdept<1024u)
-g->GCthreshold=g->totalbytes+1024u;
-else{
-g->gcdept-=1024u;
-g->GCthreshold=g->totalbytes;
-}
-}
-else{
-setthreshold(g);
-}
-}
-static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v){
-global_State*g=G(L);
-if(g->gcstate==1)
-reallymarkobject(g,v);
-else
-makewhite(g,o);
-}
-static void luaC_barrierback(lua_State*L,Table*t){
-global_State*g=G(L);
-GCObject*o=obj2gco(t);
-black2gray(o);
-t->gclist=g->grayagain;
-g->grayagain=o;
-}
-static void luaC_link(lua_State*L,GCObject*o,lu_byte tt){
-global_State*g=G(L);
-o->gch.next=g->rootgc;
-g->rootgc=o;
-o->gch.marked=luaC_white(g);
-o->gch.tt=tt;
-}
-static void luaC_linkupval(lua_State*L,UpVal*uv){
-global_State*g=G(L);
-GCObject*o=obj2gco(uv);
-o->gch.next=g->rootgc;
-g->rootgc=o;
-if(isgray(o)){
-if(g->gcstate==1){
-gray2black(o);
-luaC_barrier(L,uv,uv->v);
-}
-else{
-makewhite(g,o);
-}
-}
-}
-typedef union{
-lua_Number r;
-TString*ts;
-}SemInfo;
-typedef struct Token{
-int token;
-SemInfo seminfo;
-}Token;
-typedef struct LexState{
-int current;
-int linenumber;
-int lastline;
-Token t;
-Token lookahead;
-struct FuncState*fs;
-struct lua_State*L;
-ZIO*z;
-Mbuffer*buff;
-TString*source;
-char decpoint;
-}LexState;
-static void luaX_init(lua_State*L);
-static void luaX_lexerror(LexState*ls,const char*msg,int token);
-#define state_size(x)(sizeof(x)+0)
-#define fromstate(l)(cast(lu_byte*,(l))-0)
-#define tostate(l)(cast(lua_State*,cast(lu_byte*,l)+0))
-typedef struct LG{
-lua_State l;
-global_State g;
-}LG;
-static void stack_init(lua_State*L1,lua_State*L){
-L1->base_ci=luaM_newvector(L,8,CallInfo);
-L1->ci=L1->base_ci;
-L1->size_ci=8;
-L1->end_ci=L1->base_ci+L1->size_ci-1;
-L1->stack=luaM_newvector(L,(2*20)+5,TValue);
-L1->stacksize=(2*20)+5;
-L1->top=L1->stack;
-L1->stack_last=L1->stack+(L1->stacksize-5)-1;
-L1->ci->func=L1->top;
-setnilvalue(L1->top++);
-L1->base=L1->ci->base=L1->top;
-L1->ci->top=L1->top+20;
-}
-static void freestack(lua_State*L,lua_State*L1){
-luaM_freearray(L,L1->base_ci,L1->size_ci,CallInfo);
-luaM_freearray(L,L1->stack,L1->stacksize,TValue);
-}
-static void f_luaopen(lua_State*L,void*ud){
-global_State*g=G(L);
-UNUSED(ud);
-stack_init(L,L);
-sethvalue(L,gt(L),luaH_new(L,0,2));
-sethvalue(L,registry(L),luaH_new(L,0,2));
-luaS_resize(L,32);
-luaT_init(L);
-luaX_init(L);
-luaS_fix(luaS_newliteral(L,"not enough memory"));
-g->GCthreshold=4*g->totalbytes;
-}
-static void preinit_state(lua_State*L,global_State*g){
-G(L)=g;
-L->stack=NULL;
-L->stacksize=0;
-L->errorJmp=NULL;
-L->hook=NULL;
-L->hookmask=0;
-L->basehookcount=0;
-L->allowhook=1;
-resethookcount(L);
-L->openupval=NULL;
-L->size_ci=0;
-L->nCcalls=L->baseCcalls=0;
-L->status=0;
-L->base_ci=L->ci=NULL;
-L->savedpc=NULL;
-L->errfunc=0;
-setnilvalue(gt(L));
-}
-static void close_state(lua_State*L){
-global_State*g=G(L);
-luaF_close(L,L->stack);
-luaC_freeall(L);
-luaM_freearray(L,G(L)->strt.hash,G(L)->strt.size,TString*);
-luaZ_freebuffer(L,&g->buff);
-freestack(L,L);
-(*g->frealloc)(g->ud,fromstate(L),state_size(LG),0);
-}
-static void luaE_freethread(lua_State*L,lua_State*L1){
-luaF_close(L1,L1->stack);
-freestack(L,L1);
-luaM_freemem(L,fromstate(L1),state_size(lua_State));
-}
-static lua_State*lua_newstate(lua_Alloc f,void*ud){
-int i;
-lua_State*L;
-global_State*g;
-void*l=(*f)(ud,NULL,0,state_size(LG));
-if(l==NULL)return NULL;
-L=tostate(l);
-g=&((LG*)L)->g;
-L->next=NULL;
-L->tt=8;
-g->currentwhite=bit2mask(0,5);
-L->marked=luaC_white(g);
-set2bits(L->marked,5,6);
-preinit_state(L,g);
-g->frealloc=f;
-g->ud=ud;
-g->mainthread=L;
-g->uvhead.u.l.prev=&g->uvhead;
-g->uvhead.u.l.next=&g->uvhead;
-g->GCthreshold=0;
-g->strt.size=0;
-g->strt.nuse=0;
-g->strt.hash=NULL;
-setnilvalue(registry(L));
-luaZ_initbuffer(L,&g->buff);
-g->panic=NULL;
-g->gcstate=0;
-g->rootgc=obj2gco(L);
-g->sweepstrgc=0;
-g->sweepgc=&g->rootgc;
-g->gray=NULL;
-g->grayagain=NULL;
-g->weak=NULL;
-g->tmudata=NULL;
-g->totalbytes=sizeof(LG);
-g->gcpause=200;
-g->gcstepmul=200;
-g->gcdept=0;
-for(i=0;i<(8+1);i++)g->mt[i]=NULL;
-if(luaD_rawrunprotected(L,f_luaopen,NULL)!=0){
-close_state(L);
-L=NULL;
-}
-else
-{}
-return L;
-}
-static void callallgcTM(lua_State*L,void*ud){
-UNUSED(ud);
-luaC_callGCTM(L);
-}
-static void lua_close(lua_State*L){
-L=G(L)->mainthread;
-luaF_close(L,L->stack);
-luaC_separateudata(L,1);
-L->errfunc=0;
-do{
-L->ci=L->base_ci;
-L->base=L->top=L->ci->base;
-L->nCcalls=L->baseCcalls=0;
-}while(luaD_rawrunprotected(L,callallgcTM,NULL)!=0);
-close_state(L);
-}
-#define getcode(fs,e)((fs)->f->code[(e)->u.s.info])
-#define luaK_codeAsBx(fs,o,A,sBx)luaK_codeABx(fs,o,A,(sBx)+(((1<<(9+9))-1)>>1))
-#define luaK_setmultret(fs,e)luaK_setreturns(fs,e,(-1))
-static int luaK_codeABx(FuncState*fs,OpCode o,int A,unsigned int Bx);
-static int luaK_codeABC(FuncState*fs,OpCode o,int A,int B,int C);
-static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults);
-static void luaK_patchtohere(FuncState*fs,int list);
-static void luaK_concat(FuncState*fs,int*l1,int l2);
-static int currentpc(lua_State*L,CallInfo*ci){
-if(!isLua(ci))return-1;
-if(ci==L->ci)
-ci->savedpc=L->savedpc;
-return pcRel(ci->savedpc,ci_func(ci)->l.p);
-}
-static int currentline(lua_State*L,CallInfo*ci){
-int pc=currentpc(L,ci);
-if(pc<0)
-return-1;
-else
-return getline_(ci_func(ci)->l.p,pc);
-}
-static int lua_getstack(lua_State*L,int level,lua_Debug*ar){
-int status;
-CallInfo*ci;
-for(ci=L->ci;level>0&&ci>L->base_ci;ci--){
-level--;
-if(f_isLua(ci))
-level-=ci->tailcalls;
-}
-if(level==0&&ci>L->base_ci){
-status=1;
-ar->i_ci=cast_int(ci-L->base_ci);
-}
-else if(level<0){
-status=1;
-ar->i_ci=0;
-}
-else status=0;
-return status;
-}
-static Proto*getluaproto(CallInfo*ci){
-return(isLua(ci)?ci_func(ci)->l.p:NULL);
-}
-static void funcinfo(lua_Debug*ar,Closure*cl){
-if(cl->c.isC){
-ar->source="=[C]";
-ar->linedefined=-1;
-ar->lastlinedefined=-1;
-ar->what="C";
-}
-else{
-ar->source=getstr(cl->l.p->source);
-ar->linedefined=cl->l.p->linedefined;
-ar->lastlinedefined=cl->l.p->lastlinedefined;
-ar->what=(ar->linedefined==0)?"main":"Lua";
-}
-luaO_chunkid(ar->short_src,ar->source,60);
-}
-static void info_tailcall(lua_Debug*ar){
-ar->name=ar->namewhat="";
-ar->what="tail";
-ar->lastlinedefined=ar->linedefined=ar->currentline=-1;
-ar->source="=(tail call)";
-luaO_chunkid(ar->short_src,ar->source,60);
-ar->nups=0;
-}
-static void collectvalidlines(lua_State*L,Closure*f){
-if(f==NULL||f->c.isC){
-setnilvalue(L->top);
-}
-else{
-Table*t=luaH_new(L,0,0);
-int*lineinfo=f->l.p->lineinfo;
-int i;
-for(i=0;i<f->l.p->sizelineinfo;i++)
-setbvalue(luaH_setnum(L,t,lineinfo[i]),1);
-sethvalue(L,L->top,t);
-}
-incr_top(L);
-}
-static int auxgetinfo(lua_State*L,const char*what,lua_Debug*ar,
-Closure*f,CallInfo*ci){
-int status=1;
-if(f==NULL){
-info_tailcall(ar);
-return status;
-}
-for(;*what;what++){
-switch(*what){
-case'S':{
-funcinfo(ar,f);
-break;
-}
-case'l':{
-ar->currentline=(ci)?currentline(L,ci):-1;
-break;
-}
-case'u':{
-ar->nups=f->c.nupvalues;
-break;
-}
-case'n':{
-ar->namewhat=(ci)?NULL:NULL;
-if(ar->namewhat==NULL){
-ar->namewhat="";
-ar->name=NULL;
-}
-break;
-}
-case'L':
-case'f':
-break;
-default:status=0;
-}
-}
-return status;
-}
-static int lua_getinfo(lua_State*L,const char*what,lua_Debug*ar){
-int status;
-Closure*f=NULL;
-CallInfo*ci=NULL;
-if(*what=='>'){
-StkId func=L->top-1;
-luai_apicheck(L,ttisfunction(func));
-what++;
-f=clvalue(func);
-L->top--;
-}
-else if(ar->i_ci!=0){
-ci=L->base_ci+ar->i_ci;
-f=clvalue(ci->func);
-}
-status=auxgetinfo(L,what,ar,f,ci);
-if(strchr(what,'f')){
-if(f==NULL)setnilvalue(L->top);
-else setclvalue(L,L->top,f);
-incr_top(L);
-}
-if(strchr(what,'L'))
-collectvalidlines(L,f);
-return status;
-}
-static int isinstack(CallInfo*ci,const TValue*o){
-StkId p;
-for(p=ci->base;p<ci->top;p++)
-if(o==p)return 1;
-return 0;
-}
-static void luaG_typeerror(lua_State*L,const TValue*o,const char*op){
-const char*name=NULL;
-const char*t=luaT_typenames[ttype(o)];
-const char*kind=(isinstack(L->ci,o))?
-NULL:
-NULL;
-if(kind)
-luaG_runerror(L,"attempt to %s %s "LUA_QL("%s")" (a %s value)",
-op,kind,name,t);
-else
-luaG_runerror(L,"attempt to %s a %s value",op,t);
-}
-static void luaG_concaterror(lua_State*L,StkId p1,StkId p2){
-if(ttisstring(p1)||ttisnumber(p1))p1=p2;
-luaG_typeerror(L,p1,"concatenate");
-}
-static void luaG_aritherror(lua_State*L,const TValue*p1,const TValue*p2){
-TValue temp;
-if(luaV_tonumber(p1,&temp)==NULL)
-p2=p1;
-luaG_typeerror(L,p2,"perform arithmetic on");
-}
-static int luaG_ordererror(lua_State*L,const TValue*p1,const TValue*p2){
-const char*t1=luaT_typenames[ttype(p1)];
-const char*t2=luaT_typenames[ttype(p2)];
-if(t1[2]==t2[2])
-luaG_runerror(L,"attempt to compare two %s values",t1);
-else
-luaG_runerror(L,"attempt to compare %s with %s",t1,t2);
-return 0;
-}
-static void addinfo(lua_State*L,const char*msg){
-CallInfo*ci=L->ci;
-if(isLua(ci)){
-char buff[60];
-int line=currentline(L,ci);
-luaO_chunkid(buff,getstr(getluaproto(ci)->source),60);
-luaO_pushfstring(L,"%s:%d: %s",buff,line,msg);
-}
-}
-static void luaG_errormsg(lua_State*L){
-if(L->errfunc!=0){
-StkId errfunc=restorestack(L,L->errfunc);
-if(!ttisfunction(errfunc))luaD_throw(L,5);
-setobj(L,L->top,L->top-1);
-setobj(L,L->top-1,errfunc);
-incr_top(L);
-luaD_call(L,L->top-2,1);
-}
-luaD_throw(L,2);
-}
-static void luaG_runerror(lua_State*L,const char*fmt,...){
-va_list argp;
-va_start(argp,fmt);
-addinfo(L,luaO_pushvfstring(L,fmt,argp));
-va_end(argp);
-luaG_errormsg(L);
-}
-static int luaZ_fill(ZIO*z){
-size_t size;
-lua_State*L=z->L;
-const char*buff;
-buff=z->reader(L,z->data,&size);
-if(buff==NULL||size==0)return(-1);
-z->n=size-1;
-z->p=buff;
-return char2int(*(z->p++));
-}
-static void luaZ_init(lua_State*L,ZIO*z,lua_Reader reader,void*data){
-z->L=L;
-z->reader=reader;
-z->data=data;
-z->n=0;
-z->p=NULL;
-}
-static char*luaZ_openspace(lua_State*L,Mbuffer*buff,size_t n){
-if(n>buff->buffsize){
-if(n<32)n=32;
-luaZ_resizebuffer(L,buff,n);
-}
-return buff->buffer;
-}
-#define opmode(t,a,b,c,m)(((t)<<7)|((a)<<6)|((b)<<4)|((c)<<2)|(m))
-static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]={
-opmode(0,1,OpArgR,OpArgN,iABC)
-,opmode(0,1,OpArgK,OpArgN,iABx)
-,opmode(0,1,OpArgU,OpArgU,iABC)
-,opmode(0,1,OpArgR,OpArgN,iABC)
-,opmode(0,1,OpArgU,OpArgN,iABC)
-,opmode(0,1,OpArgK,OpArgN,iABx)
-,opmode(0,1,OpArgR,OpArgK,iABC)
-,opmode(0,0,OpArgK,OpArgN,iABx)
-,opmode(0,0,OpArgU,OpArgN,iABC)
-,opmode(0,0,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgU,OpArgU,iABC)
-,opmode(0,1,OpArgR,OpArgK,iABC)
-,opmode(0,1,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgK,OpArgK,iABC)
-,opmode(0,1,OpArgR,OpArgN,iABC)
-,opmode(0,1,OpArgR,OpArgN,iABC)
-,opmode(0,1,OpArgR,OpArgN,iABC)
-,opmode(0,1,OpArgR,OpArgR,iABC)
-,opmode(0,0,OpArgR,OpArgN,iAsBx)
-,opmode(1,0,OpArgK,OpArgK,iABC)
-,opmode(1,0,OpArgK,OpArgK,iABC)
-,opmode(1,0,OpArgK,OpArgK,iABC)
-,opmode(1,1,OpArgR,OpArgU,iABC)
-,opmode(1,1,OpArgR,OpArgU,iABC)
-,opmode(0,1,OpArgU,OpArgU,iABC)
-,opmode(0,1,OpArgU,OpArgU,iABC)
-,opmode(0,0,OpArgU,OpArgN,iABC)
-,opmode(0,1,OpArgR,OpArgN,iAsBx)
-,opmode(0,1,OpArgR,OpArgN,iAsBx)
-,opmode(1,0,OpArgN,OpArgU,iABC)
-,opmode(0,0,OpArgU,OpArgU,iABC)
-,opmode(0,0,OpArgN,OpArgN,iABC)
-,opmode(0,1,OpArgU,OpArgN,iABx)
-,opmode(0,1,OpArgU,OpArgN,iABC)
-};
-#define next(ls)(ls->current=zgetc(ls->z))
-#define currIsNewline(ls)(ls->current=='\n'||ls->current=='\r')
-static const char*const luaX_tokens[]={
-"and","break","do","else","elseif",
-"end","false","for","function","if",
-"in","local","nil","not","or","repeat",
-"return","then","true","until","while",
-"..","...","==",">=","<=","~=",
-"<number>","<name>","<string>","<eof>",
-NULL
-};
-#define save_and_next(ls)(save(ls,ls->current),next(ls))
-static void save(LexState*ls,int c){
-Mbuffer*b=ls->buff;
-if(b->n+1>b->buffsize){
-size_t newsize;
-if(b->buffsize>=((size_t)(~(size_t)0)-2)/2)
-luaX_lexerror(ls,"lexical element too long",0);
-newsize=b->buffsize*2;
-luaZ_resizebuffer(ls->L,b,newsize);
-}
-b->buffer[b->n++]=cast(char,c);
-}
-static void luaX_init(lua_State*L){
-int i;
-for(i=0;i<(cast(int,TK_WHILE-257+1));i++){
-TString*ts=luaS_new(L,luaX_tokens[i]);
-luaS_fix(ts);
-ts->tsv.reserved=cast_byte(i+1);
-}
-}
-static const char*luaX_token2str(LexState*ls,int token){
-if(token<257){
-return(iscntrl(token))?luaO_pushfstring(ls->L,"char(%d)",token):
-luaO_pushfstring(ls->L,"%c",token);
-}
-else
-return luaX_tokens[token-257];
-}
-static const char*txtToken(LexState*ls,int token){
-switch(token){
-case TK_NAME:
-case TK_STRING:
-case TK_NUMBER:
-save(ls,'\0');
-return luaZ_buffer(ls->buff);
-default:
-return luaX_token2str(ls,token);
-}
-}
-static void luaX_lexerror(LexState*ls,const char*msg,int token){
-char buff[80];
-luaO_chunkid(buff,getstr(ls->source),80);
-msg=luaO_pushfstring(ls->L,"%s:%d: %s",buff,ls->linenumber,msg);
-if(token)
-luaO_pushfstring(ls->L,"%s near "LUA_QL("%s"),msg,txtToken(ls,token));
-luaD_throw(ls->L,3);
-}
-static void luaX_syntaxerror(LexState*ls,const char*msg){
-luaX_lexerror(ls,msg,ls->t.token);
-}
-static TString*luaX_newstring(LexState*ls,const char*str,size_t l){
-lua_State*L=ls->L;
-TString*ts=luaS_newlstr(L,str,l);
-TValue*o=luaH_setstr(L,ls->fs->h,ts);
-if(ttisnil(o)){
-setbvalue(o,1);
-luaC_checkGC(L);
-}
-return ts;
-}
-static void inclinenumber(LexState*ls){
-int old=ls->current;
-next(ls);
-if(currIsNewline(ls)&&ls->current!=old)
-next(ls);
-if(++ls->linenumber>=(INT_MAX-2))
-luaX_syntaxerror(ls,"chunk has too many lines");
-}
-static void luaX_setinput(lua_State*L,LexState*ls,ZIO*z,TString*source){
-ls->decpoint='.';
-ls->L=L;
-ls->lookahead.token=TK_EOS;
-ls->z=z;
-ls->fs=NULL;
-ls->linenumber=1;
-ls->lastline=1;
-ls->source=source;
-luaZ_resizebuffer(ls->L,ls->buff,32);
-next(ls);
-}
-static int check_next(LexState*ls,const char*set){
-if(!strchr(set,ls->current))
-return 0;
-save_and_next(ls);
-return 1;
-}
-static void buffreplace(LexState*ls,char from,char to){
-size_t n=luaZ_bufflen(ls->buff);
-char*p=luaZ_buffer(ls->buff);
-while(n--)
-if(p[n]==from)p[n]=to;
-}
-static void read_numeral(LexState*ls,SemInfo*seminfo){
-do{
-save_and_next(ls);
-}while(isdigit(ls->current)||ls->current=='.');
-if(check_next(ls,"Ee"))
-check_next(ls,"+-");
-while(isalnum(ls->current)||ls->current=='_')
-save_and_next(ls);
-save(ls,'\0');
-buffreplace(ls,'.',ls->decpoint);
-if(!luaO_str2d(luaZ_buffer(ls->buff),&seminfo->r))
-luaX_lexerror(ls,"malformed number",TK_NUMBER);
-}
-static int skip_sep(LexState*ls){
-int count=0;
-int s=ls->current;
-save_and_next(ls);
-while(ls->current=='='){
-save_and_next(ls);
-count++;
-}
-return(ls->current==s)?count:(-count)-1;
-}
-static void read_long_string(LexState*ls,SemInfo*seminfo,int sep){
-int cont=0;
-(void)(cont);
-save_and_next(ls);
-if(currIsNewline(ls))
-inclinenumber(ls);
-for(;;){
-switch(ls->current){
-case(-1):
-luaX_lexerror(ls,(seminfo)?"unfinished long string":
-"unfinished long comment",TK_EOS);
-break;
-case']':{
-if(skip_sep(ls)==sep){
-save_and_next(ls);
-goto endloop;
-}
-break;
-}
-case'\n':
-case'\r':{
-save(ls,'\n');
-inclinenumber(ls);
-if(!seminfo)luaZ_resetbuffer(ls->buff);
-break;
-}
-default:{
-if(seminfo)save_and_next(ls);
-else next(ls);
-}
-}
-}endloop:
-if(seminfo)
-seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+(2+sep),
-luaZ_bufflen(ls->buff)-2*(2+sep));
-}
-static void read_string(LexState*ls,int del,SemInfo*seminfo){
-save_and_next(ls);
-while(ls->current!=del){
-switch(ls->current){
-case(-1):
-luaX_lexerror(ls,"unfinished string",TK_EOS);
-continue;
-case'\n':
-case'\r':
-luaX_lexerror(ls,"unfinished string",TK_STRING);
-continue;
-case'\\':{
-int c;
-next(ls);
-switch(ls->current){
-case'a':c='\a';break;
-case'b':c='\b';break;
-case'f':c='\f';break;
-case'n':c='\n';break;
-case'r':c='\r';break;
-case't':c='\t';break;
-case'v':c='\v';break;
-case'\n':
-case'\r':save(ls,'\n');inclinenumber(ls);continue;
-case(-1):continue;
-default:{
-if(!isdigit(ls->current))
-save_and_next(ls);
-else{
-int i=0;
-c=0;
-do{
-c=10*c+(ls->current-'0');
-next(ls);
-}while(++i<3&&isdigit(ls->current));
-if(c>UCHAR_MAX)
-luaX_lexerror(ls,"escape sequence too large",TK_STRING);
-save(ls,c);
-}
-continue;
-}
-}
-save(ls,c);
-next(ls);
-continue;
-}
-default:
-save_and_next(ls);
-}
-}
-save_and_next(ls);
-seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+1,
-luaZ_bufflen(ls->buff)-2);
-}
-static int llex(LexState*ls,SemInfo*seminfo){
-luaZ_resetbuffer(ls->buff);
-for(;;){
-switch(ls->current){
-case'\n':
-case'\r':{
-inclinenumber(ls);
-continue;
-}
-case'-':{
-next(ls);
-if(ls->current!='-')return'-';
-next(ls);
-if(ls->current=='['){
-int sep=skip_sep(ls);
-luaZ_resetbuffer(ls->buff);
-if(sep>=0){
-read_long_string(ls,NULL,sep);
-luaZ_resetbuffer(ls->buff);
-continue;
-}
-}
-while(!currIsNewline(ls)&&ls->current!=(-1))
-next(ls);
-continue;
-}
-case'[':{
-int sep=skip_sep(ls);
-if(sep>=0){
-read_long_string(ls,seminfo,sep);
-return TK_STRING;
-}
-else if(sep==-1)return'[';
-else luaX_lexerror(ls,"invalid long string delimiter",TK_STRING);
-}
-case'=':{
-next(ls);
-if(ls->current!='=')return'=';
-else{next(ls);return TK_EQ;}
-}
-case'<':{
-next(ls);
-if(ls->current!='=')return'<';
-else{next(ls);return TK_LE;}
-}
-case'>':{
-next(ls);
-if(ls->current!='=')return'>';
-else{next(ls);return TK_GE;}
-}
-case'~':{
-next(ls);
-if(ls->current!='=')return'~';
-else{next(ls);return TK_NE;}
-}
-case'"':
-case'\'':{
-read_string(ls,ls->current,seminfo);
-return TK_STRING;
-}
-case'.':{
-save_and_next(ls);
-if(check_next(ls,".")){
-if(check_next(ls,"."))
-return TK_DOTS;
-else return TK_CONCAT;
-}
-else if(!isdigit(ls->current))return'.';
-else{
-read_numeral(ls,seminfo);
-return TK_NUMBER;
-}
-}
-case(-1):{
-return TK_EOS;
-}
-default:{
-if(isspace(ls->current)){
-next(ls);
-continue;
-}
-else if(isdigit(ls->current)){
-read_numeral(ls,seminfo);
-return TK_NUMBER;
-}
-else if(isalpha(ls->current)||ls->current=='_'){
-TString*ts;
-do{
-save_and_next(ls);
-}while(isalnum(ls->current)||ls->current=='_');
-ts=luaX_newstring(ls,luaZ_buffer(ls->buff),
-luaZ_bufflen(ls->buff));
-if(ts->tsv.reserved>0)
-return ts->tsv.reserved-1+257;
-else{
-seminfo->ts=ts;
-return TK_NAME;
-}
-}
-else{
-int c=ls->current;
-next(ls);
-return c;
-}
-}
-}
-}
-}
-static void luaX_next(LexState*ls){
-ls->lastline=ls->linenumber;
-if(ls->lookahead.token!=TK_EOS){
-ls->t=ls->lookahead;
-ls->lookahead.token=TK_EOS;
-}
-else
-ls->t.token=llex(ls,&ls->t.seminfo);
-}
-static void luaX_lookahead(LexState*ls){
-ls->lookahead.token=llex(ls,&ls->lookahead.seminfo);
-}
-#define hasjumps(e)((e)->t!=(e)->f)
-static int isnumeral(expdesc*e){
-return(e->k==VKNUM&&e->t==(-1)&&e->f==(-1));
-}
-static void luaK_nil(FuncState*fs,int from,int n){
-Instruction*previous;
-if(fs->pc>fs->lasttarget){
-if(fs->pc==0){
-if(from>=fs->nactvar)
-return;
-}
-else{
-previous=&fs->f->code[fs->pc-1];
-if(GET_OPCODE(*previous)==OP_LOADNIL){
-int pfrom=GETARG_A(*previous);
-int pto=GETARG_B(*previous);
-if(pfrom<=from&&from<=pto+1){
-if(from+n-1>pto)
-SETARG_B(*previous,from+n-1);
-return;
-}
-}
-}
-}
-luaK_codeABC(fs,OP_LOADNIL,from,from+n-1,0);
-}
-static int luaK_jump(FuncState*fs){
-int jpc=fs->jpc;
-int j;
-fs->jpc=(-1);
-j=luaK_codeAsBx(fs,OP_JMP,0,(-1));
-luaK_concat(fs,&j,jpc);
-return j;
-}
-static void luaK_ret(FuncState*fs,int first,int nret){
-luaK_codeABC(fs,OP_RETURN,first,nret+1,0);
-}
-static int condjump(FuncState*fs,OpCode op,int A,int B,int C){
-luaK_codeABC(fs,op,A,B,C);
-return luaK_jump(fs);
-}
-static void fixjump(FuncState*fs,int pc,int dest){
-Instruction*jmp=&fs->f->code[pc];
-int offset=dest-(pc+1);
-if(abs(offset)>(((1<<(9+9))-1)>>1))
-luaX_syntaxerror(fs->ls,"control structure too long");
-SETARG_sBx(*jmp,offset);
-}
-static int luaK_getlabel(FuncState*fs){
-fs->lasttarget=fs->pc;
-return fs->pc;
-}
-static int getjump(FuncState*fs,int pc){
-int offset=GETARG_sBx(fs->f->code[pc]);
-if(offset==(-1))
-return(-1);
-else
-return(pc+1)+offset;
-}
-static Instruction*getjumpcontrol(FuncState*fs,int pc){
-Instruction*pi=&fs->f->code[pc];
-if(pc>=1&&testTMode(GET_OPCODE(*(pi-1))))
-return pi-1;
-else
-return pi;
-}
-static int need_value(FuncState*fs,int list){
-for(;list!=(-1);list=getjump(fs,list)){
-Instruction i=*getjumpcontrol(fs,list);
-if(GET_OPCODE(i)!=OP_TESTSET)return 1;
-}
-return 0;
-}
-static int patchtestreg(FuncState*fs,int node,int reg){
-Instruction*i=getjumpcontrol(fs,node);
-if(GET_OPCODE(*i)!=OP_TESTSET)
-return 0;
-if(reg!=((1<<8)-1)&&reg!=GETARG_B(*i))
-SETARG_A(*i,reg);
-else
-*i=CREATE_ABC(OP_TEST,GETARG_B(*i),0,GETARG_C(*i));
-return 1;
-}
-static void removevalues(FuncState*fs,int list){
-for(;list!=(-1);list=getjump(fs,list))
-patchtestreg(fs,list,((1<<8)-1));
-}
-static void patchlistaux(FuncState*fs,int list,int vtarget,int reg,
-int dtarget){
-while(list!=(-1)){
-int next=getjump(fs,list);
-if(patchtestreg(fs,list,reg))
-fixjump(fs,list,vtarget);
-else
-fixjump(fs,list,dtarget);
-list=next;
-}
-}
-static void dischargejpc(FuncState*fs){
-patchlistaux(fs,fs->jpc,fs->pc,((1<<8)-1),fs->pc);
-fs->jpc=(-1);
-}
-static void luaK_patchlist(FuncState*fs,int list,int target){
-if(target==fs->pc)
-luaK_patchtohere(fs,list);
-else{
-patchlistaux(fs,list,target,((1<<8)-1),target);
-}
-}
-static void luaK_patchtohere(FuncState*fs,int list){
-luaK_getlabel(fs);
-luaK_concat(fs,&fs->jpc,list);
-}
-static void luaK_concat(FuncState*fs,int*l1,int l2){
-if(l2==(-1))return;
-else if(*l1==(-1))
-*l1=l2;
-else{
-int list=*l1;
-int next;
-while((next=getjump(fs,list))!=(-1))
-list=next;
-fixjump(fs,list,l2);
-}
-}
-static void luaK_checkstack(FuncState*fs,int n){
-int newstack=fs->freereg+n;
-if(newstack>fs->f->maxstacksize){
-if(newstack>=250)
-luaX_syntaxerror(fs->ls,"function or expression too complex");
-fs->f->maxstacksize=cast_byte(newstack);
-}
-}
-static void luaK_reserveregs(FuncState*fs,int n){
-luaK_checkstack(fs,n);
-fs->freereg+=n;
-}
-static void freereg(FuncState*fs,int reg){
-if(!ISK(reg)&&reg>=fs->nactvar){
-fs->freereg--;
-}
-}
-static void freeexp(FuncState*fs,expdesc*e){
-if(e->k==VNONRELOC)
-freereg(fs,e->u.s.info);
-}
-static int addk(FuncState*fs,TValue*k,TValue*v){
-lua_State*L=fs->L;
-TValue*idx=luaH_set(L,fs->h,k);
-Proto*f=fs->f;
-int oldsize=f->sizek;
-if(ttisnumber(idx)){
-return cast_int(nvalue(idx));
-}
-else{
-setnvalue(idx,cast_num(fs->nk));
-luaM_growvector(L,f->k,fs->nk,f->sizek,TValue,
-((1<<(9+9))-1),"constant table overflow");
-while(oldsize<f->sizek)setnilvalue(&f->k[oldsize++]);
-setobj(L,&f->k[fs->nk],v);
-luaC_barrier(L,f,v);
-return fs->nk++;
-}
-}
-static int luaK_stringK(FuncState*fs,TString*s){
-TValue o;
-setsvalue(fs->L,&o,s);
-return addk(fs,&o,&o);
-}
-static int luaK_numberK(FuncState*fs,lua_Number r){
-TValue o;
-setnvalue(&o,r);
-return addk(fs,&o,&o);
-}
-static int boolK(FuncState*fs,int b){
-TValue o;
-setbvalue(&o,b);
-return addk(fs,&o,&o);
-}
-static int nilK(FuncState*fs){
-TValue k,v;
-setnilvalue(&v);
-sethvalue(fs->L,&k,fs->h);
-return addk(fs,&k,&v);
-}
-static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults){
-if(e->k==VCALL){
-SETARG_C(getcode(fs,e),nresults+1);
-}
-else if(e->k==VVARARG){
-SETARG_B(getcode(fs,e),nresults+1);
-SETARG_A(getcode(fs,e),fs->freereg);
-luaK_reserveregs(fs,1);
-}
-}
-static void luaK_setoneret(FuncState*fs,expdesc*e){
-if(e->k==VCALL){
-e->k=VNONRELOC;
-e->u.s.info=GETARG_A(getcode(fs,e));
-}
-else if(e->k==VVARARG){
-SETARG_B(getcode(fs,e),2);
-e->k=VRELOCABLE;
-}
-}
-static void luaK_dischargevars(FuncState*fs,expdesc*e){
-switch(e->k){
-case VLOCAL:{
-e->k=VNONRELOC;
-break;
-}
-case VUPVAL:{
-e->u.s.info=luaK_codeABC(fs,OP_GETUPVAL,0,e->u.s.info,0);
-e->k=VRELOCABLE;
-break;
-}
-case VGLOBAL:{
-e->u.s.info=luaK_codeABx(fs,OP_GETGLOBAL,0,e->u.s.info);
-e->k=VRELOCABLE;
-break;
-}
-case VINDEXED:{
-freereg(fs,e->u.s.aux);
-freereg(fs,e->u.s.info);
-e->u.s.info=luaK_codeABC(fs,OP_GETTABLE,0,e->u.s.info,e->u.s.aux);
-e->k=VRELOCABLE;
-break;
-}
-case VVARARG:
-case VCALL:{
-luaK_setoneret(fs,e);
-break;
-}
-default:break;
-}
-}
-static int code_label(FuncState*fs,int A,int b,int jump){
-luaK_getlabel(fs);
-return luaK_codeABC(fs,OP_LOADBOOL,A,b,jump);
-}
-static void discharge2reg(FuncState*fs,expdesc*e,int reg){
-luaK_dischargevars(fs,e);
-switch(e->k){
-case VNIL:{
-luaK_nil(fs,reg,1);
-break;
-}
-case VFALSE:case VTRUE:{
-luaK_codeABC(fs,OP_LOADBOOL,reg,e->k==VTRUE,0);
-break;
-}
-case VK:{
-luaK_codeABx(fs,OP_LOADK,reg,e->u.s.info);
-break;
-}
-case VKNUM:{
-luaK_codeABx(fs,OP_LOADK,reg,luaK_numberK(fs,e->u.nval));
-break;
-}
-case VRELOCABLE:{
-Instruction*pc=&getcode(fs,e);
-SETARG_A(*pc,reg);
-break;
-}
-case VNONRELOC:{
-if(reg!=e->u.s.info)
-luaK_codeABC(fs,OP_MOVE,reg,e->u.s.info,0);
-break;
-}
-default:{
-return;
-}
-}
-e->u.s.info=reg;
-e->k=VNONRELOC;
-}
-static void discharge2anyreg(FuncState*fs,expdesc*e){
-if(e->k!=VNONRELOC){
-luaK_reserveregs(fs,1);
-discharge2reg(fs,e,fs->freereg-1);
-}
-}
-static void exp2reg(FuncState*fs,expdesc*e,int reg){
-discharge2reg(fs,e,reg);
-if(e->k==VJMP)
-luaK_concat(fs,&e->t,e->u.s.info);
-if(hasjumps(e)){
-int final;
-int p_f=(-1);
-int p_t=(-1);
-if(need_value(fs,e->t)||need_value(fs,e->f)){
-int fj=(e->k==VJMP)?(-1):luaK_jump(fs);
-p_f=code_label(fs,reg,0,1);
-p_t=code_label(fs,reg,1,0);
-luaK_patchtohere(fs,fj);
-}
-final=luaK_getlabel(fs);
-patchlistaux(fs,e->f,final,reg,p_f);
-patchlistaux(fs,e->t,final,reg,p_t);
-}
-e->f=e->t=(-1);
-e->u.s.info=reg;
-e->k=VNONRELOC;
-}
-static void luaK_exp2nextreg(FuncState*fs,expdesc*e){
-luaK_dischargevars(fs,e);
-freeexp(fs,e);
-luaK_reserveregs(fs,1);
-exp2reg(fs,e,fs->freereg-1);
-}
-static int luaK_exp2anyreg(FuncState*fs,expdesc*e){
-luaK_dischargevars(fs,e);
-if(e->k==VNONRELOC){
-if(!hasjumps(e))return e->u.s.info;
-if(e->u.s.info>=fs->nactvar){
-exp2reg(fs,e,e->u.s.info);
-return e->u.s.info;
-}
-}
-luaK_exp2nextreg(fs,e);
-return e->u.s.info;
-}
-static void luaK_exp2val(FuncState*fs,expdesc*e){
-if(hasjumps(e))
-luaK_exp2anyreg(fs,e);
-else
-luaK_dischargevars(fs,e);
-}
-static int luaK_exp2RK(FuncState*fs,expdesc*e){
-luaK_exp2val(fs,e);
-switch(e->k){
-case VKNUM:
-case VTRUE:
-case VFALSE:
-case VNIL:{
-if(fs->nk<=((1<<(9-1))-1)){
-e->u.s.info=(e->k==VNIL)?nilK(fs):
-(e->k==VKNUM)?luaK_numberK(fs,e->u.nval):
-boolK(fs,(e->k==VTRUE));
-e->k=VK;
-return RKASK(e->u.s.info);
-}
-else break;
-}
-case VK:{
-if(e->u.s.info<=((1<<(9-1))-1))
-return RKASK(e->u.s.info);
-else break;
-}
-default:break;
-}
-return luaK_exp2anyreg(fs,e);
-}
-static void luaK_storevar(FuncState*fs,expdesc*var,expdesc*ex){
-switch(var->k){
-case VLOCAL:{
-freeexp(fs,ex);
-exp2reg(fs,ex,var->u.s.info);
-return;
-}
-case VUPVAL:{
-int e=luaK_exp2anyreg(fs,ex);
-luaK_codeABC(fs,OP_SETUPVAL,e,var->u.s.info,0);
-break;
-}
-case VGLOBAL:{
-int e=luaK_exp2anyreg(fs,ex);
-luaK_codeABx(fs,OP_SETGLOBAL,e,var->u.s.info);
-break;
-}
-case VINDEXED:{
-int e=luaK_exp2RK(fs,ex);
-luaK_codeABC(fs,OP_SETTABLE,var->u.s.info,var->u.s.aux,e);
-break;
-}
-default:{
-break;
-}
-}
-freeexp(fs,ex);
-}
-static void luaK_self(FuncState*fs,expdesc*e,expdesc*key){
-int func;
-luaK_exp2anyreg(fs,e);
-freeexp(fs,e);
-func=fs->freereg;
-luaK_reserveregs(fs,2);
-luaK_codeABC(fs,OP_SELF,func,e->u.s.info,luaK_exp2RK(fs,key));
-freeexp(fs,key);
-e->u.s.info=func;
-e->k=VNONRELOC;
-}
-static void invertjump(FuncState*fs,expdesc*e){
-Instruction*pc=getjumpcontrol(fs,e->u.s.info);
-SETARG_A(*pc,!(GETARG_A(*pc)));
-}
-static int jumponcond(FuncState*fs,expdesc*e,int cond){
-if(e->k==VRELOCABLE){
-Instruction ie=getcode(fs,e);
-if(GET_OPCODE(ie)==OP_NOT){
-fs->pc--;
-return condjump(fs,OP_TEST,GETARG_B(ie),0,!cond);
-}
-}
-discharge2anyreg(fs,e);
-freeexp(fs,e);
-return condjump(fs,OP_TESTSET,((1<<8)-1),e->u.s.info,cond);
-}
-static void luaK_goiftrue(FuncState*fs,expdesc*e){
-int pc;
-luaK_dischargevars(fs,e);
-switch(e->k){
-case VK:case VKNUM:case VTRUE:{
-pc=(-1);
-break;
-}
-case VJMP:{
-invertjump(fs,e);
-pc=e->u.s.info;
-break;
-}
-default:{
-pc=jumponcond(fs,e,0);
-break;
-}
-}
-luaK_concat(fs,&e->f,pc);
-luaK_patchtohere(fs,e->t);
-e->t=(-1);
-}
-static void luaK_goiffalse(FuncState*fs,expdesc*e){
-int pc;
-luaK_dischargevars(fs,e);
-switch(e->k){
-case VNIL:case VFALSE:{
-pc=(-1);
-break;
-}
-case VJMP:{
-pc=e->u.s.info;
-break;
-}
-default:{
-pc=jumponcond(fs,e,1);
-break;
-}
-}
-luaK_concat(fs,&e->t,pc);
-luaK_patchtohere(fs,e->f);
-e->f=(-1);
-}
-static void codenot(FuncState*fs,expdesc*e){
-luaK_dischargevars(fs,e);
-switch(e->k){
-case VNIL:case VFALSE:{
-e->k=VTRUE;
-break;
-}
-case VK:case VKNUM:case VTRUE:{
-e->k=VFALSE;
-break;
-}
-case VJMP:{
-invertjump(fs,e);
-break;
-}
-case VRELOCABLE:
-case VNONRELOC:{
-discharge2anyreg(fs,e);
-freeexp(fs,e);
-e->u.s.info=luaK_codeABC(fs,OP_NOT,0,e->u.s.info,0);
-e->k=VRELOCABLE;
-break;
-}
-default:{
-break;
-}
-}
-{int temp=e->f;e->f=e->t;e->t=temp;}
-removevalues(fs,e->f);
-removevalues(fs,e->t);
-}
-static void luaK_indexed(FuncState*fs,expdesc*t,expdesc*k){
-t->u.s.aux=luaK_exp2RK(fs,k);
-t->k=VINDEXED;
-}
-static int constfolding(OpCode op,expdesc*e1,expdesc*e2){
-lua_Number v1,v2,r;
-if(!isnumeral(e1)||!isnumeral(e2))return 0;
-v1=e1->u.nval;
-v2=e2->u.nval;
-switch(op){
-case OP_ADD:r=luai_numadd(v1,v2);break;
-case OP_SUB:r=luai_numsub(v1,v2);break;
-case OP_MUL:r=luai_nummul(v1,v2);break;
-case OP_DIV:
-if(v2==0)return 0;
-r=luai_numdiv(v1,v2);break;
-case OP_MOD:
-if(v2==0)return 0;
-r=luai_nummod(v1,v2);break;
-case OP_POW:r=luai_numpow(v1,v2);break;
-case OP_UNM:r=luai_numunm(v1);break;
-case OP_LEN:return 0;
-default:r=0;break;
-}
-if(luai_numisnan(r))return 0;
-e1->u.nval=r;
-return 1;
-}
-static void codearith(FuncState*fs,OpCode op,expdesc*e1,expdesc*e2){
-if(constfolding(op,e1,e2))
-return;
-else{
-int o2=(op!=OP_UNM&&op!=OP_LEN)?luaK_exp2RK(fs,e2):0;
-int o1=luaK_exp2RK(fs,e1);
-if(o1>o2){
-freeexp(fs,e1);
-freeexp(fs,e2);
-}
-else{
-freeexp(fs,e2);
-freeexp(fs,e1);
-}
-e1->u.s.info=luaK_codeABC(fs,op,0,o1,o2);
-e1->k=VRELOCABLE;
-}
-}
-static void codecomp(FuncState*fs,OpCode op,int cond,expdesc*e1,
-expdesc*e2){
-int o1=luaK_exp2RK(fs,e1);
-int o2=luaK_exp2RK(fs,e2);
-freeexp(fs,e2);
-freeexp(fs,e1);
-if(cond==0&&op!=OP_EQ){
-int temp;
-temp=o1;o1=o2;o2=temp;
-cond=1;
-}
-e1->u.s.info=condjump(fs,op,cond,o1,o2);
-e1->k=VJMP;
-}
-static void luaK_prefix(FuncState*fs,UnOpr op,expdesc*e){
-expdesc e2;
-e2.t=e2.f=(-1);e2.k=VKNUM;e2.u.nval=0;
-switch(op){
-case OPR_MINUS:{
-if(!isnumeral(e))
-luaK_exp2anyreg(fs,e);
-codearith(fs,OP_UNM,e,&e2);
-break;
-}
-case OPR_NOT:codenot(fs,e);break;
-case OPR_LEN:{
-luaK_exp2anyreg(fs,e);
-codearith(fs,OP_LEN,e,&e2);
-break;
-}
-default:;
-}
-}
-static void luaK_infix(FuncState*fs,BinOpr op,expdesc*v){
-switch(op){
-case OPR_AND:{
-luaK_goiftrue(fs,v);
-break;
-}
-case OPR_OR:{
-luaK_goiffalse(fs,v);
-break;
-}
-case OPR_CONCAT:{
-luaK_exp2nextreg(fs,v);
-break;
-}
-case OPR_ADD:case OPR_SUB:case OPR_MUL:case OPR_DIV:
-case OPR_MOD:case OPR_POW:{
-if(!isnumeral(v))luaK_exp2RK(fs,v);
-break;
-}
-default:{
-luaK_exp2RK(fs,v);
-break;
-}
-}
-}
-static void luaK_posfix(FuncState*fs,BinOpr op,expdesc*e1,expdesc*e2){
-switch(op){
-case OPR_AND:{
-luaK_dischargevars(fs,e2);
-luaK_concat(fs,&e2->f,e1->f);
-*e1=*e2;
-break;
-}
-case OPR_OR:{
-luaK_dischargevars(fs,e2);
-luaK_concat(fs,&e2->t,e1->t);
-*e1=*e2;
-break;
-}
-case OPR_CONCAT:{
-luaK_exp2val(fs,e2);
-if(e2->k==VRELOCABLE&&GET_OPCODE(getcode(fs,e2))==OP_CONCAT){
-freeexp(fs,e1);
-SETARG_B(getcode(fs,e2),e1->u.s.info);
-e1->k=VRELOCABLE;e1->u.s.info=e2->u.s.info;
-}
-else{
-luaK_exp2nextreg(fs,e2);
-codearith(fs,OP_CONCAT,e1,e2);
-}
-break;
-}
-case OPR_ADD:codearith(fs,OP_ADD,e1,e2);break;
-case OPR_SUB:codearith(fs,OP_SUB,e1,e2);break;
-case OPR_MUL:codearith(fs,OP_MUL,e1,e2);break;
-case OPR_DIV:codearith(fs,OP_DIV,e1,e2);break;
-case OPR_MOD:codearith(fs,OP_MOD,e1,e2);break;
-case OPR_POW:codearith(fs,OP_POW,e1,e2);break;
-case OPR_EQ:codecomp(fs,OP_EQ,1,e1,e2);break;
-case OPR_NE:codecomp(fs,OP_EQ,0,e1,e2);break;
-case OPR_LT:codecomp(fs,OP_LT,1,e1,e2);break;
-case OPR_LE:codecomp(fs,OP_LE,1,e1,e2);break;
-case OPR_GT:codecomp(fs,OP_LT,0,e1,e2);break;
-case OPR_GE:codecomp(fs,OP_LE,0,e1,e2);break;
-default:;
-}
-}
-static void luaK_fixline(FuncState*fs,int line){
-fs->f->lineinfo[fs->pc-1]=line;
-}
-static int luaK_code(FuncState*fs,Instruction i,int line){
-Proto*f=fs->f;
-dischargejpc(fs);
-luaM_growvector(fs->L,f->code,fs->pc,f->sizecode,Instruction,
-(INT_MAX-2),"code size overflow");
-f->code[fs->pc]=i;
-luaM_growvector(fs->L,f->lineinfo,fs->pc,f->sizelineinfo,int,
-(INT_MAX-2),"code size overflow");
-f->lineinfo[fs->pc]=line;
-return fs->pc++;
-}
-static int luaK_codeABC(FuncState*fs,OpCode o,int a,int b,int c){
-return luaK_code(fs,CREATE_ABC(o,a,b,c),fs->ls->lastline);
-}
-static int luaK_codeABx(FuncState*fs,OpCode o,int a,unsigned int bc){
-return luaK_code(fs,CREATE_ABx(o,a,bc),fs->ls->lastline);
-}
-static void luaK_setlist(FuncState*fs,int base,int nelems,int tostore){
-int c=(nelems-1)/50+1;
-int b=(tostore==(-1))?0:tostore;
-if(c<=((1<<9)-1))
-luaK_codeABC(fs,OP_SETLIST,base,b,c);
-else{
-luaK_codeABC(fs,OP_SETLIST,base,b,0);
-luaK_code(fs,cast(Instruction,c),fs->ls->lastline);
-}
-fs->freereg=base+1;
-}
-#define hasmultret(k)((k)==VCALL||(k)==VVARARG)
-#define getlocvar(fs,i)((fs)->f->locvars[(fs)->actvar[i]])
-#define luaY_checklimit(fs,v,l,m)if((v)>(l))errorlimit(fs,l,m)
-typedef struct BlockCnt{
-struct BlockCnt*previous;
-int breaklist;
-lu_byte nactvar;
-lu_byte upval;
-lu_byte isbreakable;
-}BlockCnt;
-static void chunk(LexState*ls);
-static void expr(LexState*ls,expdesc*v);
-static void anchor_token(LexState*ls){
-if(ls->t.token==TK_NAME||ls->t.token==TK_STRING){
-TString*ts=ls->t.seminfo.ts;
-luaX_newstring(ls,getstr(ts),ts->tsv.len);
-}
-}
-static void error_expected(LexState*ls,int token){
-luaX_syntaxerror(ls,
-luaO_pushfstring(ls->L,LUA_QL("%s")" expected",luaX_token2str(ls,token)));
-}
-static void errorlimit(FuncState*fs,int limit,const char*what){
-const char*msg=(fs->f->linedefined==0)?
-luaO_pushfstring(fs->L,"main function has more than %d %s",limit,what):
-luaO_pushfstring(fs->L,"function at line %d has more than %d %s",
-fs->f->linedefined,limit,what);
-luaX_lexerror(fs->ls,msg,0);
-}
-static int testnext(LexState*ls,int c){
-if(ls->t.token==c){
-luaX_next(ls);
-return 1;
-}
-else return 0;
-}
-static void check(LexState*ls,int c){
-if(ls->t.token!=c)
-error_expected(ls,c);
-}
-static void checknext(LexState*ls,int c){
-check(ls,c);
-luaX_next(ls);
-}
-#define check_condition(ls,c,msg){if(!(c))luaX_syntaxerror(ls,msg);}
-static void check_match(LexState*ls,int what,int who,int where){
-if(!testnext(ls,what)){
-if(where==ls->linenumber)
-error_expected(ls,what);
-else{
-luaX_syntaxerror(ls,luaO_pushfstring(ls->L,
-LUA_QL("%s")" expected (to close "LUA_QL("%s")" at line %d)",
-luaX_token2str(ls,what),luaX_token2str(ls,who),where));
-}
-}
-}
-static TString*str_checkname(LexState*ls){
-TString*ts;
-check(ls,TK_NAME);
-ts=ls->t.seminfo.ts;
-luaX_next(ls);
-return ts;
-}
-static void init_exp(expdesc*e,expkind k,int i){
-e->f=e->t=(-1);
-e->k=k;
-e->u.s.info=i;
-}
-static void codestring(LexState*ls,expdesc*e,TString*s){
-init_exp(e,VK,luaK_stringK(ls->fs,s));
-}
-static void checkname(LexState*ls,expdesc*e){
-codestring(ls,e,str_checkname(ls));
-}
-static int registerlocalvar(LexState*ls,TString*varname){
-FuncState*fs=ls->fs;
-Proto*f=fs->f;
-int oldsize=f->sizelocvars;
-luaM_growvector(ls->L,f->locvars,fs->nlocvars,f->sizelocvars,
-LocVar,SHRT_MAX,"too many local variables");
-while(oldsize<f->sizelocvars)f->locvars[oldsize++].varname=NULL;
-f->locvars[fs->nlocvars].varname=varname;
-luaC_objbarrier(ls->L,f,varname);
-return fs->nlocvars++;
-}
-#define new_localvarliteral(ls,v,n)new_localvar(ls,luaX_newstring(ls,""v,(sizeof(v)/sizeof(char))-1),n)
-static void new_localvar(LexState*ls,TString*name,int n){
-FuncState*fs=ls->fs;
-luaY_checklimit(fs,fs->nactvar+n+1,200,"local variables");
-fs->actvar[fs->nactvar+n]=cast(unsigned short,registerlocalvar(ls,name));
-}
-static void adjustlocalvars(LexState*ls,int nvars){
-FuncState*fs=ls->fs;
-fs->nactvar=cast_byte(fs->nactvar+nvars);
-for(;nvars;nvars--){
-getlocvar(fs,fs->nactvar-nvars).startpc=fs->pc;
-}
-}
-static void removevars(LexState*ls,int tolevel){
-FuncState*fs=ls->fs;
-while(fs->nactvar>tolevel)
-getlocvar(fs,--fs->nactvar).endpc=fs->pc;
-}
-static int indexupvalue(FuncState*fs,TString*name,expdesc*v){
-int i;
-Proto*f=fs->f;
-int oldsize=f->sizeupvalues;
-for(i=0;i<f->nups;i++){
-if(fs->upvalues[i].k==v->k&&fs->upvalues[i].info==v->u.s.info){
-return i;
-}
-}
-luaY_checklimit(fs,f->nups+1,60,"upvalues");
-luaM_growvector(fs->L,f->upvalues,f->nups,f->sizeupvalues,
-TString*,(INT_MAX-2),"");
-while(oldsize<f->sizeupvalues)f->upvalues[oldsize++]=NULL;
-f->upvalues[f->nups]=name;
-luaC_objbarrier(fs->L,f,name);
-fs->upvalues[f->nups].k=cast_byte(v->k);
-fs->upvalues[f->nups].info=cast_byte(v->u.s.info);
-return f->nups++;
-}
-static int searchvar(FuncState*fs,TString*n){
-int i;
-for(i=fs->nactvar-1;i>=0;i--){
-if(n==getlocvar(fs,i).varname)
-return i;
-}
-return-1;
-}
-static void markupval(FuncState*fs,int level){
-BlockCnt*bl=fs->bl;
-while(bl&&bl->nactvar>level)bl=bl->previous;
-if(bl)bl->upval=1;
-}
-static int singlevaraux(FuncState*fs,TString*n,expdesc*var,int base){
-if(fs==NULL){
-init_exp(var,VGLOBAL,((1<<8)-1));
-return VGLOBAL;
-}
-else{
-int v=searchvar(fs,n);
-if(v>=0){
-init_exp(var,VLOCAL,v);
-if(!base)
-markupval(fs,v);
-return VLOCAL;
-}
-else{
-if(singlevaraux(fs->prev,n,var,0)==VGLOBAL)
-return VGLOBAL;
-var->u.s.info=indexupvalue(fs,n,var);
-var->k=VUPVAL;
-return VUPVAL;
-}
-}
-}
-static void singlevar(LexState*ls,expdesc*var){
-TString*varname=str_checkname(ls);
-FuncState*fs=ls->fs;
-if(singlevaraux(fs,varname,var,1)==VGLOBAL)
-var->u.s.info=luaK_stringK(fs,varname);
-}
-static void adjust_assign(LexState*ls,int nvars,int nexps,expdesc*e){
-FuncState*fs=ls->fs;
-int extra=nvars-nexps;
-if(hasmultret(e->k)){
-extra++;
-if(extra<0)extra=0;
-luaK_setreturns(fs,e,extra);
-if(extra>1)luaK_reserveregs(fs,extra-1);
-}
-else{
-if(e->k!=VVOID)luaK_exp2nextreg(fs,e);
-if(extra>0){
-int reg=fs->freereg;
-luaK_reserveregs(fs,extra);
-luaK_nil(fs,reg,extra);
-}
-}
-}
-static void enterlevel(LexState*ls){
-if(++ls->L->nCcalls>200)
-luaX_lexerror(ls,"chunk has too many syntax levels",0);
-}
-#define leavelevel(ls)((ls)->L->nCcalls--)
-static void enterblock(FuncState*fs,BlockCnt*bl,lu_byte isbreakable){
-bl->breaklist=(-1);
-bl->isbreakable=isbreakable;
-bl->nactvar=fs->nactvar;
-bl->upval=0;
-bl->previous=fs->bl;
-fs->bl=bl;
-}
-static void leaveblock(FuncState*fs){
-BlockCnt*bl=fs->bl;
-fs->bl=bl->previous;
-removevars(fs->ls,bl->nactvar);
-if(bl->upval)
-luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0);
-fs->freereg=fs->nactvar;
-luaK_patchtohere(fs,bl->breaklist);
-}
-static void pushclosure(LexState*ls,FuncState*func,expdesc*v){
-FuncState*fs=ls->fs;
-Proto*f=fs->f;
-int oldsize=f->sizep;
-int i;
-luaM_growvector(ls->L,f->p,fs->np,f->sizep,Proto*,
-((1<<(9+9))-1),"constant table overflow");
-while(oldsize<f->sizep)f->p[oldsize++]=NULL;
-f->p[fs->np++]=func->f;
-luaC_objbarrier(ls->L,f,func->f);
-init_exp(v,VRELOCABLE,luaK_codeABx(fs,OP_CLOSURE,0,fs->np-1));
-for(i=0;i<func->f->nups;i++){
-OpCode o=(func->upvalues[i].k==VLOCAL)?OP_MOVE:OP_GETUPVAL;
-luaK_codeABC(fs,o,0,func->upvalues[i].info,0);
-}
-}
-static void open_func(LexState*ls,FuncState*fs){
-lua_State*L=ls->L;
-Proto*f=luaF_newproto(L);
-fs->f=f;
-fs->prev=ls->fs;
-fs->ls=ls;
-fs->L=L;
-ls->fs=fs;
-fs->pc=0;
-fs->lasttarget=-1;
-fs->jpc=(-1);
-fs->freereg=0;
-fs->nk=0;
-fs->np=0;
-fs->nlocvars=0;
-fs->nactvar=0;
-fs->bl=NULL;
-f->source=ls->source;
-f->maxstacksize=2;
-fs->h=luaH_new(L,0,0);
-sethvalue(L,L->top,fs->h);
-incr_top(L);
-setptvalue(L,L->top,f);
-incr_top(L);
-}
-static void close_func(LexState*ls){
-lua_State*L=ls->L;
-FuncState*fs=ls->fs;
-Proto*f=fs->f;
-removevars(ls,0);
-luaK_ret(fs,0,0);
-luaM_reallocvector(L,f->code,f->sizecode,fs->pc,Instruction);
-f->sizecode=fs->pc;
-luaM_reallocvector(L,f->lineinfo,f->sizelineinfo,fs->pc,int);
-f->sizelineinfo=fs->pc;
-luaM_reallocvector(L,f->k,f->sizek,fs->nk,TValue);
-f->sizek=fs->nk;
-luaM_reallocvector(L,f->p,f->sizep,fs->np,Proto*);
-f->sizep=fs->np;
-luaM_reallocvector(L,f->locvars,f->sizelocvars,fs->nlocvars,LocVar);
-f->sizelocvars=fs->nlocvars;
-luaM_reallocvector(L,f->upvalues,f->sizeupvalues,f->nups,TString*);
-f->sizeupvalues=f->nups;
-ls->fs=fs->prev;
-if(fs)anchor_token(ls);
-L->top-=2;
-}
-static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,const char*name){
-struct LexState lexstate;
-struct FuncState funcstate;
-lexstate.buff=buff;
-luaX_setinput(L,&lexstate,z,luaS_new(L,name));
-open_func(&lexstate,&funcstate);
-funcstate.f->is_vararg=2;
-luaX_next(&lexstate);
-chunk(&lexstate);
-check(&lexstate,TK_EOS);
-close_func(&lexstate);
-return funcstate.f;
-}
-static void field(LexState*ls,expdesc*v){
-FuncState*fs=ls->fs;
-expdesc key;
-luaK_exp2anyreg(fs,v);
-luaX_next(ls);
-checkname(ls,&key);
-luaK_indexed(fs,v,&key);
-}
-static void yindex(LexState*ls,expdesc*v){
-luaX_next(ls);
-expr(ls,v);
-luaK_exp2val(ls->fs,v);
-checknext(ls,']');
-}
-struct ConsControl{
-expdesc v;
-expdesc*t;
-int nh;
-int na;
-int tostore;
-};
-static void recfield(LexState*ls,struct ConsControl*cc){
-FuncState*fs=ls->fs;
-int reg=ls->fs->freereg;
-expdesc key,val;
-int rkkey;
-if(ls->t.token==TK_NAME){
-luaY_checklimit(fs,cc->nh,(INT_MAX-2),"items in a constructor");
-checkname(ls,&key);
-}
-else
-yindex(ls,&key);
-cc->nh++;
-checknext(ls,'=');
-rkkey=luaK_exp2RK(fs,&key);
-expr(ls,&val);
-luaK_codeABC(fs,OP_SETTABLE,cc->t->u.s.info,rkkey,luaK_exp2RK(fs,&val));
-fs->freereg=reg;
-}
-static void closelistfield(FuncState*fs,struct ConsControl*cc){
-if(cc->v.k==VVOID)return;
-luaK_exp2nextreg(fs,&cc->v);
-cc->v.k=VVOID;
-if(cc->tostore==50){
-luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore);
-cc->tostore=0;
-}
-}
-static void lastlistfield(FuncState*fs,struct ConsControl*cc){
-if(cc->tostore==0)return;
-if(hasmultret(cc->v.k)){
-luaK_setmultret(fs,&cc->v);
-luaK_setlist(fs,cc->t->u.s.info,cc->na,(-1));
-cc->na--;
-}
-else{
-if(cc->v.k!=VVOID)
-luaK_exp2nextreg(fs,&cc->v);
-luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore);
-}
-}
-static void listfield(LexState*ls,struct ConsControl*cc){
-expr(ls,&cc->v);
-luaY_checklimit(ls->fs,cc->na,(INT_MAX-2),"items in a constructor");
-cc->na++;
-cc->tostore++;
-}
-static void constructor(LexState*ls,expdesc*t){
-FuncState*fs=ls->fs;
-int line=ls->linenumber;
-int pc=luaK_codeABC(fs,OP_NEWTABLE,0,0,0);
-struct ConsControl cc;
-cc.na=cc.nh=cc.tostore=0;
-cc.t=t;
-init_exp(t,VRELOCABLE,pc);
-init_exp(&cc.v,VVOID,0);
-luaK_exp2nextreg(ls->fs,t);
-checknext(ls,'{');
-do{
-if(ls->t.token=='}')break;
-closelistfield(fs,&cc);
-switch(ls->t.token){
-case TK_NAME:{
-luaX_lookahead(ls);
-if(ls->lookahead.token!='=')
-listfield(ls,&cc);
-else
-recfield(ls,&cc);
-break;
-}
-case'[':{
-recfield(ls,&cc);
-break;
-}
-default:{
-listfield(ls,&cc);
-break;
-}
-}
-}while(testnext(ls,',')||testnext(ls,';'));
-check_match(ls,'}','{',line);
-lastlistfield(fs,&cc);
-SETARG_B(fs->f->code[pc],luaO_int2fb(cc.na));
-SETARG_C(fs->f->code[pc],luaO_int2fb(cc.nh));
-}
-static void parlist(LexState*ls){
-FuncState*fs=ls->fs;
-Proto*f=fs->f;
-int nparams=0;
-f->is_vararg=0;
-if(ls->t.token!=')'){
-do{
-switch(ls->t.token){
-case TK_NAME:{
-new_localvar(ls,str_checkname(ls),nparams++);
-break;
-}
-case TK_DOTS:{
-luaX_next(ls);
-f->is_vararg|=2;
-break;
-}
-default:luaX_syntaxerror(ls,"<name> or "LUA_QL("...")" expected");
-}
-}while(!f->is_vararg&&testnext(ls,','));
-}
-adjustlocalvars(ls,nparams);
-f->numparams=cast_byte(fs->nactvar-(f->is_vararg&1));
-luaK_reserveregs(fs,fs->nactvar);
-}
-static void body(LexState*ls,expdesc*e,int needself,int line){
-FuncState new_fs;
-open_func(ls,&new_fs);
-new_fs.f->linedefined=line;
-checknext(ls,'(');
-if(needself){
-new_localvarliteral(ls,"self",0);
-adjustlocalvars(ls,1);
-}
-parlist(ls);
-checknext(ls,')');
-chunk(ls);
-new_fs.f->lastlinedefined=ls->linenumber;
-check_match(ls,TK_END,TK_FUNCTION,line);
-close_func(ls);
-pushclosure(ls,&new_fs,e);
-}
-static int explist1(LexState*ls,expdesc*v){
-int n=1;
-expr(ls,v);
-while(testnext(ls,',')){
-luaK_exp2nextreg(ls->fs,v);
-expr(ls,v);
-n++;
-}
-return n;
-}
-static void funcargs(LexState*ls,expdesc*f){
-FuncState*fs=ls->fs;
-expdesc args;
-int base,nparams;
-int line=ls->linenumber;
-switch(ls->t.token){
-case'(':{
-if(line!=ls->lastline)
-luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)");
-luaX_next(ls);
-if(ls->t.token==')')
-args.k=VVOID;
-else{
-explist1(ls,&args);
-luaK_setmultret(fs,&args);
-}
-check_match(ls,')','(',line);
-break;
-}
-case'{':{
-constructor(ls,&args);
-break;
-}
-case TK_STRING:{
-codestring(ls,&args,ls->t.seminfo.ts);
-luaX_next(ls);
-break;
-}
-default:{
-luaX_syntaxerror(ls,"function arguments expected");
-return;
-}
-}
-base=f->u.s.info;
-if(hasmultret(args.k))
-nparams=(-1);
-else{
-if(args.k!=VVOID)
-luaK_exp2nextreg(fs,&args);
-nparams=fs->freereg-(base+1);
-}
-init_exp(f,VCALL,luaK_codeABC(fs,OP_CALL,base,nparams+1,2));
-luaK_fixline(fs,line);
-fs->freereg=base+1;
-}
-static void prefixexp(LexState*ls,expdesc*v){
-switch(ls->t.token){
-case'(':{
-int line=ls->linenumber;
-luaX_next(ls);
-expr(ls,v);
-check_match(ls,')','(',line);
-luaK_dischargevars(ls->fs,v);
-return;
-}
-case TK_NAME:{
-singlevar(ls,v);
-return;
-}
-default:{
-luaX_syntaxerror(ls,"unexpected symbol");
-return;
-}
-}
-}
-static void primaryexp(LexState*ls,expdesc*v){
-FuncState*fs=ls->fs;
-prefixexp(ls,v);
-for(;;){
-switch(ls->t.token){
-case'.':{
-field(ls,v);
-break;
-}
-case'[':{
-expdesc key;
-luaK_exp2anyreg(fs,v);
-yindex(ls,&key);
-luaK_indexed(fs,v,&key);
-break;
-}
-case':':{
-expdesc key;
-luaX_next(ls);
-checkname(ls,&key);
-luaK_self(fs,v,&key);
-funcargs(ls,v);
-break;
-}
-case'(':case TK_STRING:case'{':{
-luaK_exp2nextreg(fs,v);
-funcargs(ls,v);
-break;
-}
-default:return;
-}
-}
-}
-static void simpleexp(LexState*ls,expdesc*v){
-switch(ls->t.token){
-case TK_NUMBER:{
-init_exp(v,VKNUM,0);
-v->u.nval=ls->t.seminfo.r;
-break;
-}
-case TK_STRING:{
-codestring(ls,v,ls->t.seminfo.ts);
-break;
-}
-case TK_NIL:{
-init_exp(v,VNIL,0);
-break;
-}
-case TK_TRUE:{
-init_exp(v,VTRUE,0);
-break;
-}
-case TK_FALSE:{
-init_exp(v,VFALSE,0);
-break;
-}
-case TK_DOTS:{
-FuncState*fs=ls->fs;
-check_condition(ls,fs->f->is_vararg,
-"cannot use "LUA_QL("...")" outside a vararg function");
-fs->f->is_vararg&=~4;
-init_exp(v,VVARARG,luaK_codeABC(fs,OP_VARARG,0,1,0));
-break;
-}
-case'{':{
-constructor(ls,v);
-return;
-}
-case TK_FUNCTION:{
-luaX_next(ls);
-body(ls,v,0,ls->linenumber);
-return;
-}
-default:{
-primaryexp(ls,v);
-return;
-}
-}
-luaX_next(ls);
-}
-static UnOpr getunopr(int op){
-switch(op){
-case TK_NOT:return OPR_NOT;
-case'-':return OPR_MINUS;
-case'#':return OPR_LEN;
-default:return OPR_NOUNOPR;
-}
-}
-static BinOpr getbinopr(int op){
-switch(op){
-case'+':return OPR_ADD;
-case'-':return OPR_SUB;
-case'*':return OPR_MUL;
-case'/':return OPR_DIV;
-case'%':return OPR_MOD;
-case'^':return OPR_POW;
-case TK_CONCAT:return OPR_CONCAT;
-case TK_NE:return OPR_NE;
-case TK_EQ:return OPR_EQ;
-case'<':return OPR_LT;
-case TK_LE:return OPR_LE;
-case'>':return OPR_GT;
-case TK_GE:return OPR_GE;
-case TK_AND:return OPR_AND;
-case TK_OR:return OPR_OR;
-default:return OPR_NOBINOPR;
-}
-}
-static const struct{
-lu_byte left;
-lu_byte right;
-}priority[]={
-{6,6},{6,6},{7,7},{7,7},{7,7},
-{10,9},{5,4},
-{3,3},{3,3},
-{3,3},{3,3},{3,3},{3,3},
-{2,2},{1,1}
-};
-static BinOpr subexpr(LexState*ls,expdesc*v,unsigned int limit){
-BinOpr op;
-UnOpr uop;
-enterlevel(ls);
-uop=getunopr(ls->t.token);
-if(uop!=OPR_NOUNOPR){
-luaX_next(ls);
-subexpr(ls,v,8);
-luaK_prefix(ls->fs,uop,v);
-}
-else simpleexp(ls,v);
-op=getbinopr(ls->t.token);
-while(op!=OPR_NOBINOPR&&priority[op].left>limit){
-expdesc v2;
-BinOpr nextop;
-luaX_next(ls);
-luaK_infix(ls->fs,op,v);
-nextop=subexpr(ls,&v2,priority[op].right);
-luaK_posfix(ls->fs,op,v,&v2);
-op=nextop;
-}
-leavelevel(ls);
-return op;
-}
-static void expr(LexState*ls,expdesc*v){
-subexpr(ls,v,0);
-}
-static int block_follow(int token){
-switch(token){
-case TK_ELSE:case TK_ELSEIF:case TK_END:
-case TK_UNTIL:case TK_EOS:
-return 1;
-default:return 0;
-}
-}
-static void block(LexState*ls){
-FuncState*fs=ls->fs;
-BlockCnt bl;
-enterblock(fs,&bl,0);
-chunk(ls);
-leaveblock(fs);
-}
-struct LHS_assign{
-struct LHS_assign*prev;
-expdesc v;
-};
-static void check_conflict(LexState*ls,struct LHS_assign*lh,expdesc*v){
-FuncState*fs=ls->fs;
-int extra=fs->freereg;
-int conflict=0;
-for(;lh;lh=lh->prev){
-if(lh->v.k==VINDEXED){
-if(lh->v.u.s.info==v->u.s.info){
-conflict=1;
-lh->v.u.s.info=extra;
-}
-if(lh->v.u.s.aux==v->u.s.info){
-conflict=1;
-lh->v.u.s.aux=extra;
-}
-}
-}
-if(conflict){
-luaK_codeABC(fs,OP_MOVE,fs->freereg,v->u.s.info,0);
-luaK_reserveregs(fs,1);
-}
-}
-static void assignment(LexState*ls,struct LHS_assign*lh,int nvars){
-expdesc e;
-check_condition(ls,VLOCAL<=lh->v.k&&lh->v.k<=VINDEXED,
-"syntax error");
-if(testnext(ls,',')){
-struct LHS_assign nv;
-nv.prev=lh;
-primaryexp(ls,&nv.v);
-if(nv.v.k==VLOCAL)
-check_conflict(ls,lh,&nv.v);
-luaY_checklimit(ls->fs,nvars,200-ls->L->nCcalls,
-"variables in assignment");
-assignment(ls,&nv,nvars+1);
-}
-else{
-int nexps;
-checknext(ls,'=');
-nexps=explist1(ls,&e);
-if(nexps!=nvars){
-adjust_assign(ls,nvars,nexps,&e);
-if(nexps>nvars)
-ls->fs->freereg-=nexps-nvars;
-}
-else{
-luaK_setoneret(ls->fs,&e);
-luaK_storevar(ls->fs,&lh->v,&e);
-return;
-}
-}
-init_exp(&e,VNONRELOC,ls->fs->freereg-1);
-luaK_storevar(ls->fs,&lh->v,&e);
-}
-static int cond(LexState*ls){
-expdesc v;
-expr(ls,&v);
-if(v.k==VNIL)v.k=VFALSE;
-luaK_goiftrue(ls->fs,&v);
-return v.f;
-}
-static void breakstat(LexState*ls){
-FuncState*fs=ls->fs;
-BlockCnt*bl=fs->bl;
-int upval=0;
-while(bl&&!bl->isbreakable){
-upval|=bl->upval;
-bl=bl->previous;
-}
-if(!bl)
-luaX_syntaxerror(ls,"no loop to break");
-if(upval)
-luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0);
-luaK_concat(fs,&bl->breaklist,luaK_jump(fs));
-}
-static void whilestat(LexState*ls,int line){
-FuncState*fs=ls->fs;
-int whileinit;
-int condexit;
-BlockCnt bl;
-luaX_next(ls);
-whileinit=luaK_getlabel(fs);
-condexit=cond(ls);
-enterblock(fs,&bl,1);
-checknext(ls,TK_DO);
-block(ls);
-luaK_patchlist(fs,luaK_jump(fs),whileinit);
-check_match(ls,TK_END,TK_WHILE,line);
-leaveblock(fs);
-luaK_patchtohere(fs,condexit);
-}
-static void repeatstat(LexState*ls,int line){
-int condexit;
-FuncState*fs=ls->fs;
-int repeat_init=luaK_getlabel(fs);
-BlockCnt bl1,bl2;
-enterblock(fs,&bl1,1);
-enterblock(fs,&bl2,0);
-luaX_next(ls);
-chunk(ls);
-check_match(ls,TK_UNTIL,TK_REPEAT,line);
-condexit=cond(ls);
-if(!bl2.upval){
-leaveblock(fs);
-luaK_patchlist(ls->fs,condexit,repeat_init);
-}
-else{
-breakstat(ls);
-luaK_patchtohere(ls->fs,condexit);
-leaveblock(fs);
-luaK_patchlist(ls->fs,luaK_jump(fs),repeat_init);
-}
-leaveblock(fs);
-}
-static int exp1(LexState*ls){
-expdesc e;
-int k;
-expr(ls,&e);
-k=e.k;
-luaK_exp2nextreg(ls->fs,&e);
-return k;
-}
-static void forbody(LexState*ls,int base,int line,int nvars,int isnum){
-BlockCnt bl;
-FuncState*fs=ls->fs;
-int prep,endfor;
-adjustlocalvars(ls,3);
-checknext(ls,TK_DO);
-prep=isnum?luaK_codeAsBx(fs,OP_FORPREP,base,(-1)):luaK_jump(fs);
-enterblock(fs,&bl,0);
-adjustlocalvars(ls,nvars);
-luaK_reserveregs(fs,nvars);
-block(ls);
-leaveblock(fs);
-luaK_patchtohere(fs,prep);
-endfor=(isnum)?luaK_codeAsBx(fs,OP_FORLOOP,base,(-1)):
-luaK_codeABC(fs,OP_TFORLOOP,base,0,nvars);
-luaK_fixline(fs,line);
-luaK_patchlist(fs,(isnum?endfor:luaK_jump(fs)),prep+1);
-}
-static void fornum(LexState*ls,TString*varname,int line){
-FuncState*fs=ls->fs;
-int base=fs->freereg;
-new_localvarliteral(ls,"(for index)",0);
-new_localvarliteral(ls,"(for limit)",1);
-new_localvarliteral(ls,"(for step)",2);
-new_localvar(ls,varname,3);
-checknext(ls,'=');
-exp1(ls);
-checknext(ls,',');
-exp1(ls);
-if(testnext(ls,','))
-exp1(ls);
-else{
-luaK_codeABx(fs,OP_LOADK,fs->freereg,luaK_numberK(fs,1));
-luaK_reserveregs(fs,1);
-}
-forbody(ls,base,line,1,1);
-}
-static void forlist(LexState*ls,TString*indexname){
-FuncState*fs=ls->fs;
-expdesc e;
-int nvars=0;
-int line;
-int base=fs->freereg;
-new_localvarliteral(ls,"(for generator)",nvars++);
-new_localvarliteral(ls,"(for state)",nvars++);
-new_localvarliteral(ls,"(for control)",nvars++);
-new_localvar(ls,indexname,nvars++);
-while(testnext(ls,','))
-new_localvar(ls,str_checkname(ls),nvars++);
-checknext(ls,TK_IN);
-line=ls->linenumber;
-adjust_assign(ls,3,explist1(ls,&e),&e);
-luaK_checkstack(fs,3);
-forbody(ls,base,line,nvars-3,0);
-}
-static void forstat(LexState*ls,int line){
-FuncState*fs=ls->fs;
-TString*varname;
-BlockCnt bl;
-enterblock(fs,&bl,1);
-luaX_next(ls);
-varname=str_checkname(ls);
-switch(ls->t.token){
-case'=':fornum(ls,varname,line);break;
-case',':case TK_IN:forlist(ls,varname);break;
-default:luaX_syntaxerror(ls,LUA_QL("=")" or "LUA_QL("in")" expected");
-}
-check_match(ls,TK_END,TK_FOR,line);
-leaveblock(fs);
-}
-static int test_then_block(LexState*ls){
-int condexit;
-luaX_next(ls);
-condexit=cond(ls);
-checknext(ls,TK_THEN);
-block(ls);
-return condexit;
-}
-static void ifstat(LexState*ls,int line){
-FuncState*fs=ls->fs;
-int flist;
-int escapelist=(-1);
-flist=test_then_block(ls);
-while(ls->t.token==TK_ELSEIF){
-luaK_concat(fs,&escapelist,luaK_jump(fs));
-luaK_patchtohere(fs,flist);
-flist=test_then_block(ls);
-}
-if(ls->t.token==TK_ELSE){
-luaK_concat(fs,&escapelist,luaK_jump(fs));
-luaK_patchtohere(fs,flist);
-luaX_next(ls);
-block(ls);
-}
-else
-luaK_concat(fs,&escapelist,flist);
-luaK_patchtohere(fs,escapelist);
-check_match(ls,TK_END,TK_IF,line);
-}
-static void localfunc(LexState*ls){
-expdesc v,b;
-FuncState*fs=ls->fs;
-new_localvar(ls,str_checkname(ls),0);
-init_exp(&v,VLOCAL,fs->freereg);
-luaK_reserveregs(fs,1);
-adjustlocalvars(ls,1);
-body(ls,&b,0,ls->linenumber);
-luaK_storevar(fs,&v,&b);
-getlocvar(fs,fs->nactvar-1).startpc=fs->pc;
-}
-static void localstat(LexState*ls){
-int nvars=0;
-int nexps;
-expdesc e;
-do{
-new_localvar(ls,str_checkname(ls),nvars++);
-}while(testnext(ls,','));
-if(testnext(ls,'='))
-nexps=explist1(ls,&e);
-else{
-e.k=VVOID;
-nexps=0;
-}
-adjust_assign(ls,nvars,nexps,&e);
-adjustlocalvars(ls,nvars);
-}
-static int funcname(LexState*ls,expdesc*v){
-int needself=0;
-singlevar(ls,v);
-while(ls->t.token=='.')
-field(ls,v);
-if(ls->t.token==':'){
-needself=1;
-field(ls,v);
-}
-return needself;
-}
-static void funcstat(LexState*ls,int line){
-int needself;
-expdesc v,b;
-luaX_next(ls);
-needself=funcname(ls,&v);
-body(ls,&b,needself,line);
-luaK_storevar(ls->fs,&v,&b);
-luaK_fixline(ls->fs,line);
-}
-static void exprstat(LexState*ls){
-FuncState*fs=ls->fs;
-struct LHS_assign v;
-primaryexp(ls,&v.v);
-if(v.v.k==VCALL)
-SETARG_C(getcode(fs,&v.v),1);
-else{
-v.prev=NULL;
-assignment(ls,&v,1);
-}
-}
-static void retstat(LexState*ls){
-FuncState*fs=ls->fs;
-expdesc e;
-int first,nret;
-luaX_next(ls);
-if(block_follow(ls->t.token)||ls->t.token==';')
-first=nret=0;
-else{
-nret=explist1(ls,&e);
-if(hasmultret(e.k)){
-luaK_setmultret(fs,&e);
-if(e.k==VCALL&&nret==1){
-SET_OPCODE(getcode(fs,&e),OP_TAILCALL);
-}
-first=fs->nactvar;
-nret=(-1);
-}
-else{
-if(nret==1)
-first=luaK_exp2anyreg(fs,&e);
-else{
-luaK_exp2nextreg(fs,&e);
-first=fs->nactvar;
-}
-}
-}
-luaK_ret(fs,first,nret);
-}
-static int statement(LexState*ls){
-int line=ls->linenumber;
-switch(ls->t.token){
-case TK_IF:{
-ifstat(ls,line);
-return 0;
-}
-case TK_WHILE:{
-whilestat(ls,line);
-return 0;
-}
-case TK_DO:{
-luaX_next(ls);
-block(ls);
-check_match(ls,TK_END,TK_DO,line);
-return 0;
-}
-case TK_FOR:{
-forstat(ls,line);
-return 0;
-}
-case TK_REPEAT:{
-repeatstat(ls,line);
-return 0;
-}
-case TK_FUNCTION:{
-funcstat(ls,line);
-return 0;
-}
-case TK_LOCAL:{
-luaX_next(ls);
-if(testnext(ls,TK_FUNCTION))
-localfunc(ls);
-else
-localstat(ls);
-return 0;
-}
-case TK_RETURN:{
-retstat(ls);
-return 1;
-}
-case TK_BREAK:{
-luaX_next(ls);
-breakstat(ls);
-return 1;
-}
-default:{
-exprstat(ls);
-return 0;
-}
-}
-}
-static void chunk(LexState*ls){
-int islast=0;
-enterlevel(ls);
-while(!islast&&!block_follow(ls->t.token)){
-islast=statement(ls);
-testnext(ls,';');
-ls->fs->freereg=ls->fs->nactvar;
-}
-leavelevel(ls);
-}
-static const TValue*luaV_tonumber(const TValue*obj,TValue*n){
-lua_Number num;
-if(ttisnumber(obj))return obj;
-if(ttisstring(obj)&&luaO_str2d(svalue(obj),&num)){
-setnvalue(n,num);
-return n;
-}
-else
-return NULL;
-}
-static int luaV_tostring(lua_State*L,StkId obj){
-if(!ttisnumber(obj))
-return 0;
-else{
-char s[32];
-lua_Number n=nvalue(obj);
-lua_number2str(s,n);
-setsvalue(L,obj,luaS_new(L,s));
-return 1;
-}
-}
-static void callTMres(lua_State*L,StkId res,const TValue*f,
-const TValue*p1,const TValue*p2){
-ptrdiff_t result=savestack(L,res);
-setobj(L,L->top,f);
-setobj(L,L->top+1,p1);
-setobj(L,L->top+2,p2);
-luaD_checkstack(L,3);
-L->top+=3;
-luaD_call(L,L->top-3,1);
-res=restorestack(L,result);
-L->top--;
-setobj(L,res,L->top);
-}
-static void callTM(lua_State*L,const TValue*f,const TValue*p1,
-const TValue*p2,const TValue*p3){
-setobj(L,L->top,f);
-setobj(L,L->top+1,p1);
-setobj(L,L->top+2,p2);
-setobj(L,L->top+3,p3);
-luaD_checkstack(L,4);
-L->top+=4;
-luaD_call(L,L->top-4,0);
-}
-static void luaV_gettable(lua_State*L,const TValue*t,TValue*key,StkId val){
-int loop;
-for(loop=0;loop<100;loop++){
-const TValue*tm;
-if(ttistable(t)){
-Table*h=hvalue(t);
-const TValue*res=luaH_get(h,key);
-if(!ttisnil(res)||
-(tm=fasttm(L,h->metatable,TM_INDEX))==NULL){
-setobj(L,val,res);
-return;
-}
-}
-else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_INDEX)))
-luaG_typeerror(L,t,"index");
-if(ttisfunction(tm)){
-callTMres(L,val,tm,t,key);
-return;
-}
-t=tm;
-}
-luaG_runerror(L,"loop in gettable");
-}
-static void luaV_settable(lua_State*L,const TValue*t,TValue*key,StkId val){
-int loop;
-TValue temp;
-for(loop=0;loop<100;loop++){
-const TValue*tm;
-if(ttistable(t)){
-Table*h=hvalue(t);
-TValue*oldval=luaH_set(L,h,key);
-if(!ttisnil(oldval)||
-(tm=fasttm(L,h->metatable,TM_NEWINDEX))==NULL){
-setobj(L,oldval,val);
-h->flags=0;
-luaC_barriert(L,h,val);
-return;
-}
-}
-else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_NEWINDEX)))
-luaG_typeerror(L,t,"index");
-if(ttisfunction(tm)){
-callTM(L,tm,t,key,val);
-return;
-}
-setobj(L,&temp,tm);
-t=&temp;
-}
-luaG_runerror(L,"loop in settable");
-}
-static int call_binTM(lua_State*L,const TValue*p1,const TValue*p2,
-StkId res,TMS event){
-const TValue*tm=luaT_gettmbyobj(L,p1,event);
-if(ttisnil(tm))
-tm=luaT_gettmbyobj(L,p2,event);
-if(ttisnil(tm))return 0;
-callTMres(L,res,tm,p1,p2);
-return 1;
-}
-static const TValue*get_compTM(lua_State*L,Table*mt1,Table*mt2,
-TMS event){
-const TValue*tm1=fasttm(L,mt1,event);
-const TValue*tm2;
-if(tm1==NULL)return NULL;
-if(mt1==mt2)return tm1;
-tm2=fasttm(L,mt2,event);
-if(tm2==NULL)return NULL;
-if(luaO_rawequalObj(tm1,tm2))
-return tm1;
-return NULL;
-}
-static int call_orderTM(lua_State*L,const TValue*p1,const TValue*p2,
-TMS event){
-const TValue*tm1=luaT_gettmbyobj(L,p1,event);
-const TValue*tm2;
-if(ttisnil(tm1))return-1;
-tm2=luaT_gettmbyobj(L,p2,event);
-if(!luaO_rawequalObj(tm1,tm2))
-return-1;
-callTMres(L,L->top,tm1,p1,p2);
-return!l_isfalse(L->top);
-}
-static int l_strcmp(const TString*ls,const TString*rs){
-const char*l=getstr(ls);
-size_t ll=ls->tsv.len;
-const char*r=getstr(rs);
-size_t lr=rs->tsv.len;
-for(;;){
-int temp=strcoll(l,r);
-if(temp!=0)return temp;
-else{
-size_t len=strlen(l);
-if(len==lr)
-return(len==ll)?0:1;
-else if(len==ll)
-return-1;
-len++;
-l+=len;ll-=len;r+=len;lr-=len;
-}
-}
-}
-static int luaV_lessthan(lua_State*L,const TValue*l,const TValue*r){
-int res;
-if(ttype(l)!=ttype(r))
-return luaG_ordererror(L,l,r);
-else if(ttisnumber(l))
-return luai_numlt(nvalue(l),nvalue(r));
-else if(ttisstring(l))
-return l_strcmp(rawtsvalue(l),rawtsvalue(r))<0;
-else if((res=call_orderTM(L,l,r,TM_LT))!=-1)
-return res;
-return luaG_ordererror(L,l,r);
-}
-static int lessequal(lua_State*L,const TValue*l,const TValue*r){
-int res;
-if(ttype(l)!=ttype(r))
-return luaG_ordererror(L,l,r);
-else if(ttisnumber(l))
-return luai_numle(nvalue(l),nvalue(r));
-else if(ttisstring(l))
-return l_strcmp(rawtsvalue(l),rawtsvalue(r))<=0;
-else if((res=call_orderTM(L,l,r,TM_LE))!=-1)
-return res;
-else if((res=call_orderTM(L,r,l,TM_LT))!=-1)
-return!res;
-return luaG_ordererror(L,l,r);
-}
-static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2){
-const TValue*tm;
-switch(ttype(t1)){
-case 0:return 1;
-case 3:return luai_numeq(nvalue(t1),nvalue(t2));
-case 1:return bvalue(t1)==bvalue(t2);
-case 2:return pvalue(t1)==pvalue(t2);
-case 7:{
-if(uvalue(t1)==uvalue(t2))return 1;
-tm=get_compTM(L,uvalue(t1)->metatable,uvalue(t2)->metatable,
-TM_EQ);
-break;
-}
-case 5:{
-if(hvalue(t1)==hvalue(t2))return 1;
-tm=get_compTM(L,hvalue(t1)->metatable,hvalue(t2)->metatable,TM_EQ);
-break;
-}
-default:return gcvalue(t1)==gcvalue(t2);
-}
-if(tm==NULL)return 0;
-callTMres(L,L->top,tm,t1,t2);
-return!l_isfalse(L->top);
-}
-static void luaV_concat(lua_State*L,int total,int last){
-do{
-StkId top=L->base+last+1;
-int n=2;
-if(!(ttisstring(top-2)||ttisnumber(top-2))||!tostring(L,top-1)){
-if(!call_binTM(L,top-2,top-1,top-2,TM_CONCAT))
-luaG_concaterror(L,top-2,top-1);
-}else if(tsvalue(top-1)->len==0)
-(void)tostring(L,top-2);
-else{
-size_t tl=tsvalue(top-1)->len;
-char*buffer;
-int i;
-for(n=1;n<total&&tostring(L,top-n-1);n++){
-size_t l=tsvalue(top-n-1)->len;
-if(l>=((size_t)(~(size_t)0)-2)-tl)luaG_runerror(L,"string length overflow");
-tl+=l;
-}
-buffer=luaZ_openspace(L,&G(L)->buff,tl);
-tl=0;
-for(i=n;i>0;i--){
-size_t l=tsvalue(top-i)->len;
-memcpy(buffer+tl,svalue(top-i),l);
-tl+=l;
-}
-setsvalue(L,top-n,luaS_newlstr(L,buffer,tl));
-}
-total-=n-1;
-last-=n-1;
-}while(total>1);
-}
-static void Arith(lua_State*L,StkId ra,const TValue*rb,
-const TValue*rc,TMS op){
-TValue tempb,tempc;
-const TValue*b,*c;
-if((b=luaV_tonumber(rb,&tempb))!=NULL&&
-(c=luaV_tonumber(rc,&tempc))!=NULL){
-lua_Number nb=nvalue(b),nc=nvalue(c);
-switch(op){
-case TM_ADD:setnvalue(ra,luai_numadd(nb,nc));break;
-case TM_SUB:setnvalue(ra,luai_numsub(nb,nc));break;
-case TM_MUL:setnvalue(ra,luai_nummul(nb,nc));break;
-case TM_DIV:setnvalue(ra,luai_numdiv(nb,nc));break;
-case TM_MOD:setnvalue(ra,luai_nummod(nb,nc));break;
-case TM_POW:setnvalue(ra,luai_numpow(nb,nc));break;
-case TM_UNM:setnvalue(ra,luai_numunm(nb));break;
-default:break;
-}
-}
-else if(!call_binTM(L,rb,rc,ra,op))
-luaG_aritherror(L,rb,rc);
-}
-#define runtime_check(L,c){if(!(c))break;}
-#define RA(i)(base+GETARG_A(i))
-#define RB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgR,base+GETARG_B(i))
-#define RKB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_B(i))?k+INDEXK(GETARG_B(i)):base+GETARG_B(i))
-#define RKC(i)check_exp(getCMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_C(i))?k+INDEXK(GETARG_C(i)):base+GETARG_C(i))
-#define KBx(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,k+GETARG_Bx(i))
-#define dojump(L,pc,i){(pc)+=(i);}
-#define Protect(x){L->savedpc=pc;{x;};base=L->base;}
-#define arith_op(op,tm){TValue*rb=RKB(i);TValue*rc=RKC(i);if(ttisnumber(rb)&&ttisnumber(rc)){lua_Number nb=nvalue(rb),nc=nvalue(rc);setnvalue(ra,op(nb,nc));}else Protect(Arith(L,ra,rb,rc,tm));}
-static void luaV_execute(lua_State*L,int nexeccalls){
-LClosure*cl;
-StkId base;
-TValue*k;
-const Instruction*pc;
-reentry:
-pc=L->savedpc;
-cl=&clvalue(L->ci->func)->l;
-base=L->base;
-k=cl->p->k;
-for(;;){
-const Instruction i=*pc++;
-StkId ra;
-ra=RA(i);
-switch(GET_OPCODE(i)){
-case OP_MOVE:{
-setobj(L,ra,RB(i));
-continue;
-}
-case OP_LOADK:{
-setobj(L,ra,KBx(i));
-continue;
-}
-case OP_LOADBOOL:{
-setbvalue(ra,GETARG_B(i));
-if(GETARG_C(i))pc++;
-continue;
-}
-case OP_LOADNIL:{
-TValue*rb=RB(i);
-do{
-setnilvalue(rb--);
-}while(rb>=ra);
-continue;
-}
-case OP_GETUPVAL:{
-int b=GETARG_B(i);
-setobj(L,ra,cl->upvals[b]->v);
-continue;
-}
-case OP_GETGLOBAL:{
-TValue g;
-TValue*rb=KBx(i);
-sethvalue(L,&g,cl->env);
-Protect(luaV_gettable(L,&g,rb,ra));
-continue;
-}
-case OP_GETTABLE:{
-Protect(luaV_gettable(L,RB(i),RKC(i),ra));
-continue;
-}
-case OP_SETGLOBAL:{
-TValue g;
-sethvalue(L,&g,cl->env);
-Protect(luaV_settable(L,&g,KBx(i),ra));
-continue;
-}
-case OP_SETUPVAL:{
-UpVal*uv=cl->upvals[GETARG_B(i)];
-setobj(L,uv->v,ra);
-luaC_barrier(L,uv,ra);
-continue;
-}
-case OP_SETTABLE:{
-Protect(luaV_settable(L,ra,RKB(i),RKC(i)));
-continue;
-}
-case OP_NEWTABLE:{
-int b=GETARG_B(i);
-int c=GETARG_C(i);
-sethvalue(L,ra,luaH_new(L,luaO_fb2int(b),luaO_fb2int(c)));
-Protect(luaC_checkGC(L));
-continue;
-}
-case OP_SELF:{
-StkId rb=RB(i);
-setobj(L,ra+1,rb);
-Protect(luaV_gettable(L,rb,RKC(i),ra));
-continue;
-}
-case OP_ADD:{
-arith_op(luai_numadd,TM_ADD);
-continue;
-}
-case OP_SUB:{
-arith_op(luai_numsub,TM_SUB);
-continue;
-}
-case OP_MUL:{
-arith_op(luai_nummul,TM_MUL);
-continue;
-}
-case OP_DIV:{
-arith_op(luai_numdiv,TM_DIV);
-continue;
-}
-case OP_MOD:{
-arith_op(luai_nummod,TM_MOD);
-continue;
-}
-case OP_POW:{
-arith_op(luai_numpow,TM_POW);
-continue;
-}
-case OP_UNM:{
-TValue*rb=RB(i);
-if(ttisnumber(rb)){
-lua_Number nb=nvalue(rb);
-setnvalue(ra,luai_numunm(nb));
-}
-else{
-Protect(Arith(L,ra,rb,rb,TM_UNM));
-}
-continue;
-}
-case OP_NOT:{
-int res=l_isfalse(RB(i));
-setbvalue(ra,res);
-continue;
-}
-case OP_LEN:{
-const TValue*rb=RB(i);
-switch(ttype(rb)){
-case 5:{
-setnvalue(ra,cast_num(luaH_getn(hvalue(rb))));
-break;
-}
-case 4:{
-setnvalue(ra,cast_num(tsvalue(rb)->len));
-break;
-}
-default:{
-Protect(
-if(!call_binTM(L,rb,(&luaO_nilobject_),ra,TM_LEN))
-luaG_typeerror(L,rb,"get length of");
-)
-}
-}
-continue;
-}
-case OP_CONCAT:{
-int b=GETARG_B(i);
-int c=GETARG_C(i);
-Protect(luaV_concat(L,c-b+1,c);luaC_checkGC(L));
-setobj(L,RA(i),base+b);
-continue;
-}
-case OP_JMP:{
-dojump(L,pc,GETARG_sBx(i));
-continue;
-}
-case OP_EQ:{
-TValue*rb=RKB(i);
-TValue*rc=RKC(i);
-Protect(
-if(equalobj(L,rb,rc)==GETARG_A(i))
-dojump(L,pc,GETARG_sBx(*pc));
-)
-pc++;
-continue;
-}
-case OP_LT:{
-Protect(
-if(luaV_lessthan(L,RKB(i),RKC(i))==GETARG_A(i))
-dojump(L,pc,GETARG_sBx(*pc));
-)
-pc++;
-continue;
-}
-case OP_LE:{
-Protect(
-if(lessequal(L,RKB(i),RKC(i))==GETARG_A(i))
-dojump(L,pc,GETARG_sBx(*pc));
-)
-pc++;
-continue;
-}
-case OP_TEST:{
-if(l_isfalse(ra)!=GETARG_C(i))
-dojump(L,pc,GETARG_sBx(*pc));
-pc++;
-continue;
-}
-case OP_TESTSET:{
-TValue*rb=RB(i);
-if(l_isfalse(rb)!=GETARG_C(i)){
-setobj(L,ra,rb);
-dojump(L,pc,GETARG_sBx(*pc));
-}
-pc++;
-continue;
-}
-case OP_CALL:{
-int b=GETARG_B(i);
-int nresults=GETARG_C(i)-1;
-if(b!=0)L->top=ra+b;
-L->savedpc=pc;
-switch(luaD_precall(L,ra,nresults)){
-case 0:{
-nexeccalls++;
-goto reentry;
-}
-case 1:{
-if(nresults>=0)L->top=L->ci->top;
-base=L->base;
-continue;
-}
-default:{
-return;
-}
-}
-}
-case OP_TAILCALL:{
-int b=GETARG_B(i);
-if(b!=0)L->top=ra+b;
-L->savedpc=pc;
-switch(luaD_precall(L,ra,(-1))){
-case 0:{
-CallInfo*ci=L->ci-1;
-int aux;
-StkId func=ci->func;
-StkId pfunc=(ci+1)->func;
-if(L->openupval)luaF_close(L,ci->base);
-L->base=ci->base=ci->func+((ci+1)->base-pfunc);
-for(aux=0;pfunc+aux<L->top;aux++)
-setobj(L,func+aux,pfunc+aux);
-ci->top=L->top=func+aux;
-ci->savedpc=L->savedpc;
-ci->tailcalls++;
-L->ci--;
-goto reentry;
-}
-case 1:{
-base=L->base;
-continue;
-}
-default:{
-return;
-}
-}
-}
-case OP_RETURN:{
-int b=GETARG_B(i);
-if(b!=0)L->top=ra+b-1;
-if(L->openupval)luaF_close(L,base);
-L->savedpc=pc;
-b=luaD_poscall(L,ra);
-if(--nexeccalls==0)
-return;
-else{
-if(b)L->top=L->ci->top;
-goto reentry;
-}
-}
-case OP_FORLOOP:{
-lua_Number step=nvalue(ra+2);
-lua_Number idx=luai_numadd(nvalue(ra),step);
-lua_Number limit=nvalue(ra+1);
-if(luai_numlt(0,step)?luai_numle(idx,limit)
-:luai_numle(limit,idx)){
-dojump(L,pc,GETARG_sBx(i));
-setnvalue(ra,idx);
-setnvalue(ra+3,idx);
-}
-continue;
-}
-case OP_FORPREP:{
-const TValue*init=ra;
-const TValue*plimit=ra+1;
-const TValue*pstep=ra+2;
-L->savedpc=pc;
-if(!tonumber(init,ra))
-luaG_runerror(L,LUA_QL("for")" initial value must be a number");
-else if(!tonumber(plimit,ra+1))
-luaG_runerror(L,LUA_QL("for")" limit must be a number");
-else if(!tonumber(pstep,ra+2))
-luaG_runerror(L,LUA_QL("for")" step must be a number");
-setnvalue(ra,luai_numsub(nvalue(ra),nvalue(pstep)));
-dojump(L,pc,GETARG_sBx(i));
-continue;
-}
-case OP_TFORLOOP:{
-StkId cb=ra+3;
-setobj(L,cb+2,ra+2);
-setobj(L,cb+1,ra+1);
-setobj(L,cb,ra);
-L->top=cb+3;
-Protect(luaD_call(L,cb,GETARG_C(i)));
-L->top=L->ci->top;
-cb=RA(i)+3;
-if(!ttisnil(cb)){
-setobj(L,cb-1,cb);
-dojump(L,pc,GETARG_sBx(*pc));
-}
-pc++;
-continue;
-}
-case OP_SETLIST:{
-int n=GETARG_B(i);
-int c=GETARG_C(i);
-int last;
-Table*h;
-if(n==0){
-n=cast_int(L->top-ra)-1;
-L->top=L->ci->top;
-}
-if(c==0)c=cast_int(*pc++);
-runtime_check(L,ttistable(ra));
-h=hvalue(ra);
-last=((c-1)*50)+n;
-if(last>h->sizearray)
-luaH_resizearray(L,h,last);
-for(;n>0;n--){
-TValue*val=ra+n;
-setobj(L,luaH_setnum(L,h,last--),val);
-luaC_barriert(L,h,val);
-}
-continue;
-}
-case OP_CLOSE:{
-luaF_close(L,ra);
-continue;
-}
-case OP_CLOSURE:{
-Proto*p;
-Closure*ncl;
-int nup,j;
-p=cl->p->p[GETARG_Bx(i)];
-nup=p->nups;
-ncl=luaF_newLclosure(L,nup,cl->env);
-ncl->l.p=p;
-for(j=0;j<nup;j++,pc++){
-if(GET_OPCODE(*pc)==OP_GETUPVAL)
-ncl->l.upvals[j]=cl->upvals[GETARG_B(*pc)];
-else{
-ncl->l.upvals[j]=luaF_findupval(L,base+GETARG_B(*pc));
-}
-}
-setclvalue(L,ra,ncl);
-Protect(luaC_checkGC(L));
-continue;
-}
-case OP_VARARG:{
-int b=GETARG_B(i)-1;
-int j;
-CallInfo*ci=L->ci;
-int n=cast_int(ci->base-ci->func)-cl->p->numparams-1;
-if(b==(-1)){
-Protect(luaD_checkstack(L,n));
-ra=RA(i);
-b=n;
-L->top=ra+n;
-}
-for(j=0;j<b;j++){
-if(j<n){
-setobj(L,ra+j,ci->base-n+j);
-}
-else{
-setnilvalue(ra+j);
-}
-}
-continue;
-}
-}
-}
-}
-#define api_checknelems(L,n)luai_apicheck(L,(n)<=(L->top-L->base))
-#define api_checkvalidindex(L,i)luai_apicheck(L,(i)!=(&luaO_nilobject_))
-#define api_incr_top(L){luai_apicheck(L,L->top<L->ci->top);L->top++;}
-static TValue*index2adr(lua_State*L,int idx){
-if(idx>0){
-TValue*o=L->base+(idx-1);
-luai_apicheck(L,idx<=L->ci->top-L->base);
-if(o>=L->top)return cast(TValue*,(&luaO_nilobject_));
-else return o;
-}
-else if(idx>(-10000)){
-luai_apicheck(L,idx!=0&&-idx<=L->top-L->base);
-return L->top+idx;
-}
-else switch(idx){
-case(-10000):return registry(L);
-case(-10001):{
-Closure*func=curr_func(L);
-sethvalue(L,&L->env,func->c.env);
-return&L->env;
-}
-case(-10002):return gt(L);
-default:{
-Closure*func=curr_func(L);
-idx=(-10002)-idx;
-return(idx<=func->c.nupvalues)
-?&func->c.upvalue[idx-1]
-:cast(TValue*,(&luaO_nilobject_));
-}
-}
-}
-static Table*getcurrenv(lua_State*L){
-if(L->ci==L->base_ci)
-return hvalue(gt(L));
-else{
-Closure*func=curr_func(L);
-return func->c.env;
-}
-}
-static int lua_checkstack(lua_State*L,int size){
-int res=1;
-if(size>8000||(L->top-L->base+size)>8000)
-res=0;
-else if(size>0){
-luaD_checkstack(L,size);
-if(L->ci->top<L->top+size)
-L->ci->top=L->top+size;
-}
-return res;
-}
-static lua_CFunction lua_atpanic(lua_State*L,lua_CFunction panicf){
-lua_CFunction old;
-old=G(L)->panic;
-G(L)->panic=panicf;
-return old;
-}
-static int lua_gettop(lua_State*L){
-return cast_int(L->top-L->base);
-}
-static void lua_settop(lua_State*L,int idx){
-if(idx>=0){
-luai_apicheck(L,idx<=L->stack_last-L->base);
-while(L->top<L->base+idx)
-setnilvalue(L->top++);
-L->top=L->base+idx;
-}
-else{
-luai_apicheck(L,-(idx+1)<=(L->top-L->base));
-L->top+=idx+1;
-}
-}
-static void lua_remove(lua_State*L,int idx){
-StkId p;
-p=index2adr(L,idx);
-api_checkvalidindex(L,p);
-while(++p<L->top)setobj(L,p-1,p);
-L->top--;
-}
-static void lua_insert(lua_State*L,int idx){
-StkId p;
-StkId q;
-p=index2adr(L,idx);
-api_checkvalidindex(L,p);
-for(q=L->top;q>p;q--)setobj(L,q,q-1);
-setobj(L,p,L->top);
-}
-static void lua_replace(lua_State*L,int idx){
-StkId o;
-if(idx==(-10001)&&L->ci==L->base_ci)
-luaG_runerror(L,"no calling environment");
-api_checknelems(L,1);
-o=index2adr(L,idx);
-api_checkvalidindex(L,o);
-if(idx==(-10001)){
-Closure*func=curr_func(L);
-luai_apicheck(L,ttistable(L->top-1));
-func->c.env=hvalue(L->top-1);
-luaC_barrier(L,func,L->top-1);
-}
-else{
-setobj(L,o,L->top-1);
-if(idx<(-10002))
-luaC_barrier(L,curr_func(L),L->top-1);
-}
-L->top--;
-}
-static void lua_pushvalue(lua_State*L,int idx){
-setobj(L,L->top,index2adr(L,idx));
-api_incr_top(L);
-}
-static int lua_type(lua_State*L,int idx){
-StkId o=index2adr(L,idx);
-return(o==(&luaO_nilobject_))?(-1):ttype(o);
-}
-static const char*lua_typename(lua_State*L,int t){
-UNUSED(L);
-return(t==(-1))?"no value":luaT_typenames[t];
-}
-static int lua_iscfunction(lua_State*L,int idx){
-StkId o=index2adr(L,idx);
-return iscfunction(o);
-}
-static int lua_isnumber(lua_State*L,int idx){
-TValue n;
-const TValue*o=index2adr(L,idx);
-return tonumber(o,&n);
-}
-static int lua_isstring(lua_State*L,int idx){
-int t=lua_type(L,idx);
-return(t==4||t==3);
-}
-static int lua_rawequal(lua_State*L,int index1,int index2){
-StkId o1=index2adr(L,index1);
-StkId o2=index2adr(L,index2);
-return(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0
-:luaO_rawequalObj(o1,o2);
-}
-static int lua_lessthan(lua_State*L,int index1,int index2){
-StkId o1,o2;
-int i;
-o1=index2adr(L,index1);
-o2=index2adr(L,index2);
-i=(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0
-:luaV_lessthan(L,o1,o2);
-return i;
-}
-static lua_Number lua_tonumber(lua_State*L,int idx){
-TValue n;
-const TValue*o=index2adr(L,idx);
-if(tonumber(o,&n))
-return nvalue(o);
-else
-return 0;
-}
-static lua_Integer lua_tointeger(lua_State*L,int idx){
-TValue n;
-const TValue*o=index2adr(L,idx);
-if(tonumber(o,&n)){
-lua_Integer res;
-lua_Number num=nvalue(o);
-lua_number2integer(res,num);
-return res;
-}
-else
-return 0;
-}
-static int lua_toboolean(lua_State*L,int idx){
-const TValue*o=index2adr(L,idx);
-return!l_isfalse(o);
-}
-static const char*lua_tolstring(lua_State*L,int idx,size_t*len){
-StkId o=index2adr(L,idx);
-if(!ttisstring(o)){
-if(!luaV_tostring(L,o)){
-if(len!=NULL)*len=0;
-return NULL;
-}
-luaC_checkGC(L);
-o=index2adr(L,idx);
-}
-if(len!=NULL)*len=tsvalue(o)->len;
-return svalue(o);
-}
-static size_t lua_objlen(lua_State*L,int idx){
-StkId o=index2adr(L,idx);
-switch(ttype(o)){
-case 4:return tsvalue(o)->len;
-case 7:return uvalue(o)->len;
-case 5:return luaH_getn(hvalue(o));
-case 3:{
-size_t l;
-l=(luaV_tostring(L,o)?tsvalue(o)->len:0);
-return l;
-}
-default:return 0;
-}
-}
-static lua_CFunction lua_tocfunction(lua_State*L,int idx){
-StkId o=index2adr(L,idx);
-return(!iscfunction(o))?NULL:clvalue(o)->c.f;
-}
-static void*lua_touserdata(lua_State*L,int idx){
-StkId o=index2adr(L,idx);
-switch(ttype(o)){
-case 7:return(rawuvalue(o)+1);
-case 2:return pvalue(o);
-default:return NULL;
-}
-}
-static void lua_pushnil(lua_State*L){
-setnilvalue(L->top);
-api_incr_top(L);
-}
-static void lua_pushnumber(lua_State*L,lua_Number n){
-setnvalue(L->top,n);
-api_incr_top(L);
-}
-static void lua_pushinteger(lua_State*L,lua_Integer n){
-setnvalue(L->top,cast_num(n));
-api_incr_top(L);
-}
-static void lua_pushlstring(lua_State*L,const char*s,size_t len){
-luaC_checkGC(L);
-setsvalue(L,L->top,luaS_newlstr(L,s,len));
-api_incr_top(L);
-}
-static void lua_pushstring(lua_State*L,const char*s){
-if(s==NULL)
-lua_pushnil(L);
-else
-lua_pushlstring(L,s,strlen(s));
-}
-static const char*lua_pushvfstring(lua_State*L,const char*fmt,
-va_list argp){
-const char*ret;
-luaC_checkGC(L);
-ret=luaO_pushvfstring(L,fmt,argp);
-return ret;
-}
-static const char*lua_pushfstring(lua_State*L,const char*fmt,...){
-const char*ret;
-va_list argp;
-luaC_checkGC(L);
-va_start(argp,fmt);
-ret=luaO_pushvfstring(L,fmt,argp);
-va_end(argp);
-return ret;
-}
-static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n){
-Closure*cl;
-luaC_checkGC(L);
-api_checknelems(L,n);
-cl=luaF_newCclosure(L,n,getcurrenv(L));
-cl->c.f=fn;
-L->top-=n;
-while(n--)
-setobj(L,&cl->c.upvalue[n],L->top+n);
-setclvalue(L,L->top,cl);
-api_incr_top(L);
-}
-static void lua_pushboolean(lua_State*L,int b){
-setbvalue(L->top,(b!=0));
-api_incr_top(L);
-}
-static int lua_pushthread(lua_State*L){
-setthvalue(L,L->top,L);
-api_incr_top(L);
-return(G(L)->mainthread==L);
-}
-static void lua_gettable(lua_State*L,int idx){
-StkId t;
-t=index2adr(L,idx);
-api_checkvalidindex(L,t);
-luaV_gettable(L,t,L->top-1,L->top-1);
-}
-static void lua_getfield(lua_State*L,int idx,const char*k){
-StkId t;
-TValue key;
-t=index2adr(L,idx);
-api_checkvalidindex(L,t);
-setsvalue(L,&key,luaS_new(L,k));
-luaV_gettable(L,t,&key,L->top);
-api_incr_top(L);
-}
-static void lua_rawget(lua_State*L,int idx){
-StkId t;
-t=index2adr(L,idx);
-luai_apicheck(L,ttistable(t));
-setobj(L,L->top-1,luaH_get(hvalue(t),L->top-1));
-}
-static void lua_rawgeti(lua_State*L,int idx,int n){
-StkId o;
-o=index2adr(L,idx);
-luai_apicheck(L,ttistable(o));
-setobj(L,L->top,luaH_getnum(hvalue(o),n));
-api_incr_top(L);
-}
-static void lua_createtable(lua_State*L,int narray,int nrec){
-luaC_checkGC(L);
-sethvalue(L,L->top,luaH_new(L,narray,nrec));
-api_incr_top(L);
-}
-static int lua_getmetatable(lua_State*L,int objindex){
-const TValue*obj;
-Table*mt=NULL;
-int res;
-obj=index2adr(L,objindex);
-switch(ttype(obj)){
-case 5:
-mt=hvalue(obj)->metatable;
-break;
-case 7:
-mt=uvalue(obj)->metatable;
-break;
-default:
-mt=G(L)->mt[ttype(obj)];
-break;
-}
-if(mt==NULL)
-res=0;
-else{
-sethvalue(L,L->top,mt);
-api_incr_top(L);
-res=1;
-}
-return res;
-}
-static void lua_getfenv(lua_State*L,int idx){
-StkId o;
-o=index2adr(L,idx);
-api_checkvalidindex(L,o);
-switch(ttype(o)){
-case 6:
-sethvalue(L,L->top,clvalue(o)->c.env);
-break;
-case 7:
-sethvalue(L,L->top,uvalue(o)->env);
-break;
-case 8:
-setobj(L,L->top,gt(thvalue(o)));
-break;
-default:
-setnilvalue(L->top);
-break;
-}
-api_incr_top(L);
-}
-static void lua_settable(lua_State*L,int idx){
-StkId t;
-api_checknelems(L,2);
-t=index2adr(L,idx);
-api_checkvalidindex(L,t);
-luaV_settable(L,t,L->top-2,L->top-1);
-L->top-=2;
-}
-static void lua_setfield(lua_State*L,int idx,const char*k){
-StkId t;
-TValue key;
-api_checknelems(L,1);
-t=index2adr(L,idx);
-api_checkvalidindex(L,t);
-setsvalue(L,&key,luaS_new(L,k));
-luaV_settable(L,t,&key,L->top-1);
-L->top--;
-}
-static void lua_rawset(lua_State*L,int idx){
-StkId t;
-api_checknelems(L,2);
-t=index2adr(L,idx);
-luai_apicheck(L,ttistable(t));
-setobj(L,luaH_set(L,hvalue(t),L->top-2),L->top-1);
-luaC_barriert(L,hvalue(t),L->top-1);
-L->top-=2;
-}
-static void lua_rawseti(lua_State*L,int idx,int n){
-StkId o;
-api_checknelems(L,1);
-o=index2adr(L,idx);
-luai_apicheck(L,ttistable(o));
-setobj(L,luaH_setnum(L,hvalue(o),n),L->top-1);
-luaC_barriert(L,hvalue(o),L->top-1);
-L->top--;
-}
-static int lua_setmetatable(lua_State*L,int objindex){
-TValue*obj;
-Table*mt;
-api_checknelems(L,1);
-obj=index2adr(L,objindex);
-api_checkvalidindex(L,obj);
-if(ttisnil(L->top-1))
-mt=NULL;
-else{
-luai_apicheck(L,ttistable(L->top-1));
-mt=hvalue(L->top-1);
-}
-switch(ttype(obj)){
-case 5:{
-hvalue(obj)->metatable=mt;
-if(mt)
-luaC_objbarriert(L,hvalue(obj),mt);
-break;
-}
-case 7:{
-uvalue(obj)->metatable=mt;
-if(mt)
-luaC_objbarrier(L,rawuvalue(obj),mt);
-break;
-}
-default:{
-G(L)->mt[ttype(obj)]=mt;
-break;
-}
-}
-L->top--;
-return 1;
-}
-static int lua_setfenv(lua_State*L,int idx){
-StkId o;
-int res=1;
-api_checknelems(L,1);
-o=index2adr(L,idx);
-api_checkvalidindex(L,o);
-luai_apicheck(L,ttistable(L->top-1));
-switch(ttype(o)){
-case 6:
-clvalue(o)->c.env=hvalue(L->top-1);
-break;
-case 7:
-uvalue(o)->env=hvalue(L->top-1);
-break;
-case 8:
-sethvalue(L,gt(thvalue(o)),hvalue(L->top-1));
-break;
-default:
-res=0;
-break;
-}
-if(res)luaC_objbarrier(L,gcvalue(o),hvalue(L->top-1));
-L->top--;
-return res;
-}
-#define adjustresults(L,nres){if(nres==(-1)&&L->top>=L->ci->top)L->ci->top=L->top;}
-#define checkresults(L,na,nr)luai_apicheck(L,(nr)==(-1)||(L->ci->top-L->top>=(nr)-(na)))
-static void lua_call(lua_State*L,int nargs,int nresults){
-StkId func;
-api_checknelems(L,nargs+1);
-checkresults(L,nargs,nresults);
-func=L->top-(nargs+1);
-luaD_call(L,func,nresults);
-adjustresults(L,nresults);
-}
-struct CallS{
-StkId func;
-int nresults;
-};
-static void f_call(lua_State*L,void*ud){
-struct CallS*c=cast(struct CallS*,ud);
-luaD_call(L,c->func,c->nresults);
-}
-static int lua_pcall(lua_State*L,int nargs,int nresults,int errfunc){
-struct CallS c;
-int status;
-ptrdiff_t func;
-api_checknelems(L,nargs+1);
-checkresults(L,nargs,nresults);
-if(errfunc==0)
-func=0;
-else{
-StkId o=index2adr(L,errfunc);
-api_checkvalidindex(L,o);
-func=savestack(L,o);
-}
-c.func=L->top-(nargs+1);
-c.nresults=nresults;
-status=luaD_pcall(L,f_call,&c,savestack(L,c.func),func);
-adjustresults(L,nresults);
-return status;
-}
-static int lua_load(lua_State*L,lua_Reader reader,void*data,
-const char*chunkname){
-ZIO z;
-int status;
-if(!chunkname)chunkname="?";
-luaZ_init(L,&z,reader,data);
-status=luaD_protectedparser(L,&z,chunkname);
-return status;
-}
-static int lua_error(lua_State*L){
-api_checknelems(L,1);
-luaG_errormsg(L);
-return 0;
-}
-static int lua_next(lua_State*L,int idx){
-StkId t;
-int more;
-t=index2adr(L,idx);
-luai_apicheck(L,ttistable(t));
-more=luaH_next(L,hvalue(t),L->top-1);
-if(more){
-api_incr_top(L);
-}
-else
-L->top-=1;
-return more;
-}
-static void lua_concat(lua_State*L,int n){
-api_checknelems(L,n);
-if(n>=2){
-luaC_checkGC(L);
-luaV_concat(L,n,cast_int(L->top-L->base)-1);
-L->top-=(n-1);
-}
-else if(n==0){
-setsvalue(L,L->top,luaS_newlstr(L,"",0));
-api_incr_top(L);
-}
-}
-static void*lua_newuserdata(lua_State*L,size_t size){
-Udata*u;
-luaC_checkGC(L);
-u=luaS_newudata(L,size,getcurrenv(L));
-setuvalue(L,L->top,u);
-api_incr_top(L);
-return u+1;
-}
-#define luaL_getn(L,i)((int)lua_objlen(L,i))
-#define luaL_setn(L,i,j)((void)0)
-typedef struct luaL_Reg{
-const char*name;
-lua_CFunction func;
-}luaL_Reg;
-static void luaI_openlib(lua_State*L,const char*libname,
-const luaL_Reg*l,int nup);
-static int luaL_argerror(lua_State*L,int numarg,const char*extramsg);
-static const char* luaL_checklstring(lua_State*L,int numArg,
-size_t*l);
-static const char* luaL_optlstring(lua_State*L,int numArg,
-const char*def,size_t*l);
-static lua_Integer luaL_checkinteger(lua_State*L,int numArg);
-static lua_Integer luaL_optinteger(lua_State*L,int nArg,
-lua_Integer def);
-static int luaL_error(lua_State*L,const char*fmt,...);
-static const char* luaL_findtable(lua_State*L,int idx,
-const char*fname,int szhint);
-#define luaL_argcheck(L,cond,numarg,extramsg)((void)((cond)||luaL_argerror(L,(numarg),(extramsg))))
-#define luaL_checkstring(L,n)(luaL_checklstring(L,(n),NULL))
-#define luaL_optstring(L,n,d)(luaL_optlstring(L,(n),(d),NULL))
-#define luaL_checkint(L,n)((int)luaL_checkinteger(L,(n)))
-#define luaL_optint(L,n,d)((int)luaL_optinteger(L,(n),(d)))
-#define luaL_typename(L,i)lua_typename(L,lua_type(L,(i)))
-#define luaL_getmetatable(L,n)(lua_getfield(L,(-10000),(n)))
-#define luaL_opt(L,f,n,d)(lua_isnoneornil(L,(n))?(d):f(L,(n)))
-typedef struct luaL_Buffer{
-char*p;
-int lvl;
-lua_State*L;
-char buffer[BUFSIZ];
-}luaL_Buffer;
-#define luaL_addchar(B,c)((void)((B)->p<((B)->buffer+BUFSIZ)||luaL_prepbuffer(B)),(*(B)->p++=(char)(c)))
-#define luaL_addsize(B,n)((B)->p+=(n))
-static char* luaL_prepbuffer(luaL_Buffer*B);
-static int luaL_argerror(lua_State*L,int narg,const char*extramsg){
-lua_Debug ar;
-if(!lua_getstack(L,0,&ar))
-return luaL_error(L,"bad argument #%d (%s)",narg,extramsg);
-lua_getinfo(L,"n",&ar);
-if(strcmp(ar.namewhat,"method")==0){
-narg--;
-if(narg==0)
-return luaL_error(L,"calling "LUA_QL("%s")" on bad self (%s)",
-ar.name,extramsg);
-}
-if(ar.name==NULL)
-ar.name="?";
-return luaL_error(L,"bad argument #%d to "LUA_QL("%s")" (%s)",
-narg,ar.name,extramsg);
-}
-static int luaL_typerror(lua_State*L,int narg,const char*tname){
-const char*msg=lua_pushfstring(L,"%s expected, got %s",
-tname,luaL_typename(L,narg));
-return luaL_argerror(L,narg,msg);
-}
-static void tag_error(lua_State*L,int narg,int tag){
-luaL_typerror(L,narg,lua_typename(L,tag));
-}
-static void luaL_where(lua_State*L,int level){
-lua_Debug ar;
-if(lua_getstack(L,level,&ar)){
-lua_getinfo(L,"Sl",&ar);
-if(ar.currentline>0){
-lua_pushfstring(L,"%s:%d: ",ar.short_src,ar.currentline);
-return;
-}
-}
-lua_pushliteral(L,"");
-}
-static int luaL_error(lua_State*L,const char*fmt,...){
-va_list argp;
-va_start(argp,fmt);
-luaL_where(L,1);
-lua_pushvfstring(L,fmt,argp);
-va_end(argp);
-lua_concat(L,2);
-return lua_error(L);
-}
-static int luaL_newmetatable(lua_State*L,const char*tname){
-lua_getfield(L,(-10000),tname);
-if(!lua_isnil(L,-1))
-return 0;
-lua_pop(L,1);
-lua_newtable(L);
-lua_pushvalue(L,-1);
-lua_setfield(L,(-10000),tname);
-return 1;
-}
-static void*luaL_checkudata(lua_State*L,int ud,const char*tname){
-void*p=lua_touserdata(L,ud);
-if(p!=NULL){
-if(lua_getmetatable(L,ud)){
-lua_getfield(L,(-10000),tname);
-if(lua_rawequal(L,-1,-2)){
-lua_pop(L,2);
-return p;
-}
-}
-}
-luaL_typerror(L,ud,tname);
-return NULL;
-}
-static void luaL_checkstack(lua_State*L,int space,const char*mes){
-if(!lua_checkstack(L,space))
-luaL_error(L,"stack overflow (%s)",mes);
-}
-static void luaL_checktype(lua_State*L,int narg,int t){
-if(lua_type(L,narg)!=t)
-tag_error(L,narg,t);
-}
-static void luaL_checkany(lua_State*L,int narg){
-if(lua_type(L,narg)==(-1))
-luaL_argerror(L,narg,"value expected");
-}
-static const char*luaL_checklstring(lua_State*L,int narg,size_t*len){
-const char*s=lua_tolstring(L,narg,len);
-if(!s)tag_error(L,narg,4);
-return s;
-}
-static const char*luaL_optlstring(lua_State*L,int narg,
-const char*def,size_t*len){
-if(lua_isnoneornil(L,narg)){
-if(len)
-*len=(def?strlen(def):0);
-return def;
-}
-else return luaL_checklstring(L,narg,len);
-}
-static lua_Number luaL_checknumber(lua_State*L,int narg){
-lua_Number d=lua_tonumber(L,narg);
-if(d==0&&!lua_isnumber(L,narg))
-tag_error(L,narg,3);
-return d;
-}
-static lua_Integer luaL_checkinteger(lua_State*L,int narg){
-lua_Integer d=lua_tointeger(L,narg);
-if(d==0&&!lua_isnumber(L,narg))
-tag_error(L,narg,3);
-return d;
-}
-static lua_Integer luaL_optinteger(lua_State*L,int narg,
-lua_Integer def){
-return luaL_opt(L,luaL_checkinteger,narg,def);
-}
-static int luaL_getmetafield(lua_State*L,int obj,const char*event){
-if(!lua_getmetatable(L,obj))
-return 0;
-lua_pushstring(L,event);
-lua_rawget(L,-2);
-if(lua_isnil(L,-1)){
-lua_pop(L,2);
-return 0;
-}
-else{
-lua_remove(L,-2);
-return 1;
-}
-}
-static void luaL_register(lua_State*L,const char*libname,
-const luaL_Reg*l){
-luaI_openlib(L,libname,l,0);
-}
-static int libsize(const luaL_Reg*l){
-int size=0;
-for(;l->name;l++)size++;
-return size;
-}
-static void luaI_openlib(lua_State*L,const char*libname,
-const luaL_Reg*l,int nup){
-if(libname){
-int size=libsize(l);
-luaL_findtable(L,(-10000),"_LOADED",1);
-lua_getfield(L,-1,libname);
-if(!lua_istable(L,-1)){
-lua_pop(L,1);
-if(luaL_findtable(L,(-10002),libname,size)!=NULL)
-luaL_error(L,"name conflict for module "LUA_QL("%s"),libname);
-lua_pushvalue(L,-1);
-lua_setfield(L,-3,libname);
-}
-lua_remove(L,-2);
-lua_insert(L,-(nup+1));
-}
-for(;l->name;l++){
-int i;
-for(i=0;i<nup;i++)
-lua_pushvalue(L,-nup);
-lua_pushcclosure(L,l->func,nup);
-lua_setfield(L,-(nup+2),l->name);
-}
-lua_pop(L,nup);
-}
-static const char*luaL_findtable(lua_State*L,int idx,
-const char*fname,int szhint){
-const char*e;
-lua_pushvalue(L,idx);
-do{
-e=strchr(fname,'.');
-if(e==NULL)e=fname+strlen(fname);
-lua_pushlstring(L,fname,e-fname);
-lua_rawget(L,-2);
-if(lua_isnil(L,-1)){
-lua_pop(L,1);
-lua_createtable(L,0,(*e=='.'?1:szhint));
-lua_pushlstring(L,fname,e-fname);
-lua_pushvalue(L,-2);
-lua_settable(L,-4);
-}
-else if(!lua_istable(L,-1)){
-lua_pop(L,2);
-return fname;
-}
-lua_remove(L,-2);
-fname=e+1;
-}while(*e=='.');
-return NULL;
-}
-#define bufflen(B)((B)->p-(B)->buffer)
-#define bufffree(B)((size_t)(BUFSIZ-bufflen(B)))
-static int emptybuffer(luaL_Buffer*B){
-size_t l=bufflen(B);
-if(l==0)return 0;
-else{
-lua_pushlstring(B->L,B->buffer,l);
-B->p=B->buffer;
-B->lvl++;
-return 1;
-}
-}
-static void adjuststack(luaL_Buffer*B){
-if(B->lvl>1){
-lua_State*L=B->L;
-int toget=1;
-size_t toplen=lua_strlen(L,-1);
-do{
-size_t l=lua_strlen(L,-(toget+1));
-if(B->lvl-toget+1>=(20/2)||toplen>l){
-toplen+=l;
-toget++;
-}
-else break;
-}while(toget<B->lvl);
-lua_concat(L,toget);
-B->lvl=B->lvl-toget+1;
-}
-}
-static char*luaL_prepbuffer(luaL_Buffer*B){
-if(emptybuffer(B))
-adjuststack(B);
-return B->buffer;
-}
-static void luaL_addlstring(luaL_Buffer*B,const char*s,size_t l){
-while(l--)
-luaL_addchar(B,*s++);
-}
-static void luaL_pushresult(luaL_Buffer*B){
-emptybuffer(B);
-lua_concat(B->L,B->lvl);
-B->lvl=1;
-}
-static void luaL_addvalue(luaL_Buffer*B){
-lua_State*L=B->L;
-size_t vl;
-const char*s=lua_tolstring(L,-1,&vl);
-if(vl<=bufffree(B)){
-memcpy(B->p,s,vl);
-B->p+=vl;
-lua_pop(L,1);
-}
-else{
-if(emptybuffer(B))
-lua_insert(L,-2);
-B->lvl++;
-adjuststack(B);
-}
-}
-static void luaL_buffinit(lua_State*L,luaL_Buffer*B){
-B->L=L;
-B->p=B->buffer;
-B->lvl=0;
-}
-typedef struct LoadF{
-int extraline;
-FILE*f;
-char buff[BUFSIZ];
-}LoadF;
-static const char*getF(lua_State*L,void*ud,size_t*size){
-LoadF*lf=(LoadF*)ud;
-(void)L;
-if(lf->extraline){
-lf->extraline=0;
-*size=1;
-return"\n";
-}
-if(feof(lf->f))return NULL;
-*size=fread(lf->buff,1,sizeof(lf->buff),lf->f);
-return(*size>0)?lf->buff:NULL;
-}
-static int errfile(lua_State*L,const char*what,int fnameindex){
-const char*serr=strerror(errno);
-const char*filename=lua_tostring(L,fnameindex)+1;
-lua_pushfstring(L,"cannot %s %s: %s",what,filename,serr);
-lua_remove(L,fnameindex);
-return(5+1);
-}
-static int luaL_loadfile(lua_State*L,const char*filename){
-LoadF lf;
-int status,readstatus;
-int c;
-int fnameindex=lua_gettop(L)+1;
-lf.extraline=0;
-if(filename==NULL){
-lua_pushliteral(L,"=stdin");
-lf.f=stdin;
-}
-else{
-lua_pushfstring(L,"@%s",filename);
-lf.f=fopen(filename,"r");
-if(lf.f==NULL)return errfile(L,"open",fnameindex);
-}
-c=getc(lf.f);
-if(c=='#'){
-lf.extraline=1;
-while((c=getc(lf.f))!=EOF&&c!='\n');
-if(c=='\n')c=getc(lf.f);
-}
-if(c=="\033Lua"[0]&&filename){
-lf.f=freopen(filename,"rb",lf.f);
-if(lf.f==NULL)return errfile(L,"reopen",fnameindex);
-while((c=getc(lf.f))!=EOF&&c!="\033Lua"[0]);
-lf.extraline=0;
-}
-ungetc(c,lf.f);
-status=lua_load(L,getF,&lf,lua_tostring(L,-1));
-readstatus=ferror(lf.f);
-if(filename)fclose(lf.f);
-if(readstatus){
-lua_settop(L,fnameindex);
-return errfile(L,"read",fnameindex);
-}
-lua_remove(L,fnameindex);
-return status;
-}
-typedef struct LoadS{
-const char*s;
-size_t size;
-}LoadS;
-static const char*getS(lua_State*L,void*ud,size_t*size){
-LoadS*ls=(LoadS*)ud;
-(void)L;
-if(ls->size==0)return NULL;
-*size=ls->size;
-ls->size=0;
-return ls->s;
-}
-static int luaL_loadbuffer(lua_State*L,const char*buff,size_t size,
-const char*name){
-LoadS ls;
-ls.s=buff;
-ls.size=size;
-return lua_load(L,getS,&ls,name);
-}
-static void*l_alloc(void*ud,void*ptr,size_t osize,size_t nsize){
-(void)ud;
-(void)osize;
-if(nsize==0){
-free(ptr);
-return NULL;
-}
-else
-return realloc(ptr,nsize);
-}
-static int panic(lua_State*L){
-(void)L;
-fprintf(stderr,"PANIC: unprotected error in call to Lua API (%s)\n",
-lua_tostring(L,-1));
-return 0;
-}
-static lua_State*luaL_newstate(void){
-lua_State*L=lua_newstate(l_alloc,NULL);
-if(L)lua_atpanic(L,&panic);
-return L;
-}
-static int luaB_tonumber(lua_State*L){
-int base=luaL_optint(L,2,10);
-if(base==10){
-luaL_checkany(L,1);
-if(lua_isnumber(L,1)){
-lua_pushnumber(L,lua_tonumber(L,1));
-return 1;
-}
-}
-else{
-const char*s1=luaL_checkstring(L,1);
-char*s2;
-unsigned long n;
-luaL_argcheck(L,2<=base&&base<=36,2,"base out of range");
-n=strtoul(s1,&s2,base);
-if(s1!=s2){
-while(isspace((unsigned char)(*s2)))s2++;
-if(*s2=='\0'){
-lua_pushnumber(L,(lua_Number)n);
-return 1;
-}
-}
-}
-lua_pushnil(L);
-return 1;
-}
-static int luaB_error(lua_State*L){
-int level=luaL_optint(L,2,1);
-lua_settop(L,1);
-if(lua_isstring(L,1)&&level>0){
-luaL_where(L,level);
-lua_pushvalue(L,1);
-lua_concat(L,2);
-}
-return lua_error(L);
-}
-static int luaB_setmetatable(lua_State*L){
-int t=lua_type(L,2);
-luaL_checktype(L,1,5);
-luaL_argcheck(L,t==0||t==5,2,
-"nil or table expected");
-if(luaL_getmetafield(L,1,"__metatable"))
-luaL_error(L,"cannot change a protected metatable");
-lua_settop(L,2);
-lua_setmetatable(L,1);
-return 1;
-}
-static void getfunc(lua_State*L,int opt){
-if(lua_isfunction(L,1))lua_pushvalue(L,1);
-else{
-lua_Debug ar;
-int level=opt?luaL_optint(L,1,1):luaL_checkint(L,1);
-luaL_argcheck(L,level>=0,1,"level must be non-negative");
-if(lua_getstack(L,level,&ar)==0)
-luaL_argerror(L,1,"invalid level");
-lua_getinfo(L,"f",&ar);
-if(lua_isnil(L,-1))
-luaL_error(L,"no function environment for tail call at level %d",
-level);
-}
-}
-static int luaB_setfenv(lua_State*L){
-luaL_checktype(L,2,5);
-getfunc(L,0);
-lua_pushvalue(L,2);
-if(lua_isnumber(L,1)&&lua_tonumber(L,1)==0){
-lua_pushthread(L);
-lua_insert(L,-2);
-lua_setfenv(L,-2);
-return 0;
-}
-else if(lua_iscfunction(L,-2)||lua_setfenv(L,-2)==0)
-luaL_error(L,
-LUA_QL("setfenv")" cannot change environment of given object");
-return 1;
-}
-static int luaB_rawget(lua_State*L){
-luaL_checktype(L,1,5);
-luaL_checkany(L,2);
-lua_settop(L,2);
-lua_rawget(L,1);
-return 1;
-}
-static int luaB_type(lua_State*L){
-luaL_checkany(L,1);
-lua_pushstring(L,luaL_typename(L,1));
-return 1;
-}
-static int luaB_next(lua_State*L){
-luaL_checktype(L,1,5);
-lua_settop(L,2);
-if(lua_next(L,1))
-return 2;
-else{
-lua_pushnil(L);
-return 1;
-}
-}
-static int luaB_pairs(lua_State*L){
-luaL_checktype(L,1,5);
-lua_pushvalue(L,lua_upvalueindex(1));
-lua_pushvalue(L,1);
-lua_pushnil(L);
-return 3;
-}
-static int ipairsaux(lua_State*L){
-int i=luaL_checkint(L,2);
-luaL_checktype(L,1,5);
-i++;
-lua_pushinteger(L,i);
-lua_rawgeti(L,1,i);
-return(lua_isnil(L,-1))?0:2;
-}
-static int luaB_ipairs(lua_State*L){
-luaL_checktype(L,1,5);
-lua_pushvalue(L,lua_upvalueindex(1));
-lua_pushvalue(L,1);
-lua_pushinteger(L,0);
-return 3;
-}
-static int load_aux(lua_State*L,int status){
-if(status==0)
-return 1;
-else{
-lua_pushnil(L);
-lua_insert(L,-2);
-return 2;
-}
-}
-static int luaB_loadstring(lua_State*L){
-size_t l;
-const char*s=luaL_checklstring(L,1,&l);
-const char*chunkname=luaL_optstring(L,2,s);
-return load_aux(L,luaL_loadbuffer(L,s,l,chunkname));
-}
-static int luaB_loadfile(lua_State*L){
-const char*fname=luaL_optstring(L,1,NULL);
-return load_aux(L,luaL_loadfile(L,fname));
-}
-static int luaB_assert(lua_State*L){
-luaL_checkany(L,1);
-if(!lua_toboolean(L,1))
-return luaL_error(L,"%s",luaL_optstring(L,2,"assertion failed!"));
-return lua_gettop(L);
-}
-static int luaB_unpack(lua_State*L){
-int i,e,n;
-luaL_checktype(L,1,5);
-i=luaL_optint(L,2,1);
-e=luaL_opt(L,luaL_checkint,3,luaL_getn(L,1));
-if(i>e)return 0;
-n=e-i+1;
-if(n<=0||!lua_checkstack(L,n))
-return luaL_error(L,"too many results to unpack");
-lua_rawgeti(L,1,i);
-while(i++<e)
-lua_rawgeti(L,1,i);
-return n;
-}
-static int luaB_pcall(lua_State*L){
-int status;
-luaL_checkany(L,1);
-status=lua_pcall(L,lua_gettop(L)-1,(-1),0);
-lua_pushboolean(L,(status==0));
-lua_insert(L,1);
-return lua_gettop(L);
-}
-static int luaB_newproxy(lua_State*L){
-lua_settop(L,1);
-lua_newuserdata(L,0);
-if(lua_toboolean(L,1)==0)
-return 1;
-else if(lua_isboolean(L,1)){
-lua_newtable(L);
-lua_pushvalue(L,-1);
-lua_pushboolean(L,1);
-lua_rawset(L,lua_upvalueindex(1));
-}
-else{
-int validproxy=0;
-if(lua_getmetatable(L,1)){
-lua_rawget(L,lua_upvalueindex(1));
-validproxy=lua_toboolean(L,-1);
-lua_pop(L,1);
-}
-luaL_argcheck(L,validproxy,1,"boolean or proxy expected");
-lua_getmetatable(L,1);
-}
-lua_setmetatable(L,2);
-return 1;
-}
-static const luaL_Reg base_funcs[]={
-{"assert",luaB_assert},
-{"error",luaB_error},
-{"loadfile",luaB_loadfile},
-{"loadstring",luaB_loadstring},
-{"next",luaB_next},
-{"pcall",luaB_pcall},
-{"rawget",luaB_rawget},
-{"setfenv",luaB_setfenv},
-{"setmetatable",luaB_setmetatable},
-{"tonumber",luaB_tonumber},
-{"type",luaB_type},
-{"unpack",luaB_unpack},
-{NULL,NULL}
-};
-static void auxopen(lua_State*L,const char*name,
-lua_CFunction f,lua_CFunction u){
-lua_pushcfunction(L,u);
-lua_pushcclosure(L,f,1);
-lua_setfield(L,-2,name);
-}
-static void base_open(lua_State*L){
-lua_pushvalue(L,(-10002));
-lua_setglobal(L,"_G");
-luaL_register(L,"_G",base_funcs);
-lua_pushliteral(L,"Lua 5.1");
-lua_setglobal(L,"_VERSION");
-auxopen(L,"ipairs",luaB_ipairs,ipairsaux);
-auxopen(L,"pairs",luaB_pairs,luaB_next);
-lua_createtable(L,0,1);
-lua_pushvalue(L,-1);
-lua_setmetatable(L,-2);
-lua_pushliteral(L,"kv");
-lua_setfield(L,-2,"__mode");
-lua_pushcclosure(L,luaB_newproxy,1);
-lua_setglobal(L,"newproxy");
-}
-static int luaopen_base(lua_State*L){
-base_open(L);
-return 1;
-}
-#define aux_getn(L,n)(luaL_checktype(L,n,5),luaL_getn(L,n))
-static int tinsert(lua_State*L){
-int e=aux_getn(L,1)+1;
-int pos;
-switch(lua_gettop(L)){
-case 2:{
-pos=e;
-break;
-}
-case 3:{
-int i;
-pos=luaL_checkint(L,2);
-if(pos>e)e=pos;
-for(i=e;i>pos;i--){
-lua_rawgeti(L,1,i-1);
-lua_rawseti(L,1,i);
-}
-break;
-}
-default:{
-return luaL_error(L,"wrong number of arguments to "LUA_QL("insert"));
-}
-}
-luaL_setn(L,1,e);
-lua_rawseti(L,1,pos);
-return 0;
-}
-static int tremove(lua_State*L){
-int e=aux_getn(L,1);
-int pos=luaL_optint(L,2,e);
-if(!(1<=pos&&pos<=e))
-return 0;
-luaL_setn(L,1,e-1);
-lua_rawgeti(L,1,pos);
-for(;pos<e;pos++){
-lua_rawgeti(L,1,pos+1);
-lua_rawseti(L,1,pos);
-}
-lua_pushnil(L);
-lua_rawseti(L,1,e);
-return 1;
-}
-static void addfield(lua_State*L,luaL_Buffer*b,int i){
-lua_rawgeti(L,1,i);
-if(!lua_isstring(L,-1))
-luaL_error(L,"invalid value (%s) at index %d in table for "
-LUA_QL("concat"),luaL_typename(L,-1),i);
-luaL_addvalue(b);
-}
-static int tconcat(lua_State*L){
-luaL_Buffer b;
-size_t lsep;
-int i,last;
-const char*sep=luaL_optlstring(L,2,"",&lsep);
-luaL_checktype(L,1,5);
-i=luaL_optint(L,3,1);
-last=luaL_opt(L,luaL_checkint,4,luaL_getn(L,1));
-luaL_buffinit(L,&b);
-for(;i<last;i++){
-addfield(L,&b,i);
-luaL_addlstring(&b,sep,lsep);
-}
-if(i==last)
-addfield(L,&b,i);
-luaL_pushresult(&b);
-return 1;
-}
-static void set2(lua_State*L,int i,int j){
-lua_rawseti(L,1,i);
-lua_rawseti(L,1,j);
-}
-static int sort_comp(lua_State*L,int a,int b){
-if(!lua_isnil(L,2)){
-int res;
-lua_pushvalue(L,2);
-lua_pushvalue(L,a-1);
-lua_pushvalue(L,b-2);
-lua_call(L,2,1);
-res=lua_toboolean(L,-1);
-lua_pop(L,1);
-return res;
-}
-else
-return lua_lessthan(L,a,b);
-}
-static void auxsort(lua_State*L,int l,int u){
-while(l<u){
-int i,j;
-lua_rawgeti(L,1,l);
-lua_rawgeti(L,1,u);
-if(sort_comp(L,-1,-2))
-set2(L,l,u);
-else
-lua_pop(L,2);
-if(u-l==1)break;
-i=(l+u)/2;
-lua_rawgeti(L,1,i);
-lua_rawgeti(L,1,l);
-if(sort_comp(L,-2,-1))
-set2(L,i,l);
-else{
-lua_pop(L,1);
-lua_rawgeti(L,1,u);
-if(sort_comp(L,-1,-2))
-set2(L,i,u);
-else
-lua_pop(L,2);
-}
-if(u-l==2)break;
-lua_rawgeti(L,1,i);
-lua_pushvalue(L,-1);
-lua_rawgeti(L,1,u-1);
-set2(L,i,u-1);
-i=l;j=u-1;
-for(;;){
-while(lua_rawgeti(L,1,++i),sort_comp(L,-1,-2)){
-if(i>u)luaL_error(L,"invalid order function for sorting");
-lua_pop(L,1);
-}
-while(lua_rawgeti(L,1,--j),sort_comp(L,-3,-1)){
-if(j<l)luaL_error(L,"invalid order function for sorting");
-lua_pop(L,1);
-}
-if(j<i){
-lua_pop(L,3);
-break;
-}
-set2(L,i,j);
-}
-lua_rawgeti(L,1,u-1);
-lua_rawgeti(L,1,i);
-set2(L,u-1,i);
-if(i-l<u-i){
-j=l;i=i-1;l=i+2;
-}
-else{
-j=i+1;i=u;u=j-2;
-}
-auxsort(L,j,i);
-}
-}
-static int sort(lua_State*L){
-int n=aux_getn(L,1);
-luaL_checkstack(L,40,"");
-if(!lua_isnoneornil(L,2))
-luaL_checktype(L,2,6);
-lua_settop(L,2);
-auxsort(L,1,n);
-return 0;
-}
-static const luaL_Reg tab_funcs[]={
-{"concat",tconcat},
-{"insert",tinsert},
-{"remove",tremove},
-{"sort",sort},
-{NULL,NULL}
-};
-static int luaopen_table(lua_State*L){
-luaL_register(L,"table",tab_funcs);
-return 1;
-}
-static const char*const fnames[]={"input","output"};
-static int pushresult(lua_State*L,int i,const char*filename){
-int en=errno;
-if(i){
-lua_pushboolean(L,1);
-return 1;
-}
-else{
-lua_pushnil(L);
-if(filename)
-lua_pushfstring(L,"%s: %s",filename,strerror(en));
-else
-lua_pushfstring(L,"%s",strerror(en));
-lua_pushinteger(L,en);
-return 3;
-}
-}
-static void fileerror(lua_State*L,int arg,const char*filename){
-lua_pushfstring(L,"%s: %s",filename,strerror(errno));
-luaL_argerror(L,arg,lua_tostring(L,-1));
-}
-#define tofilep(L)((FILE**)luaL_checkudata(L,1,"FILE*"))
-static int io_type(lua_State*L){
-void*ud;
-luaL_checkany(L,1);
-ud=lua_touserdata(L,1);
-lua_getfield(L,(-10000),"FILE*");
-if(ud==NULL||!lua_getmetatable(L,1)||!lua_rawequal(L,-2,-1))
-lua_pushnil(L);
-else if(*((FILE**)ud)==NULL)
-lua_pushliteral(L,"closed file");
-else
-lua_pushliteral(L,"file");
-return 1;
-}
-static FILE*tofile(lua_State*L){
-FILE**f=tofilep(L);
-if(*f==NULL)
-luaL_error(L,"attempt to use a closed file");
-return*f;
-}
-static FILE**newfile(lua_State*L){
-FILE**pf=(FILE**)lua_newuserdata(L,sizeof(FILE*));
-*pf=NULL;
-luaL_getmetatable(L,"FILE*");
-lua_setmetatable(L,-2);
-return pf;
-}
-static int io_noclose(lua_State*L){
-lua_pushnil(L);
-lua_pushliteral(L,"cannot close standard file");
-return 2;
-}
-static int io_pclose(lua_State*L){
-FILE**p=tofilep(L);
-int ok=lua_pclose(L,*p);
-*p=NULL;
-return pushresult(L,ok,NULL);
-}
-static int io_fclose(lua_State*L){
-FILE**p=tofilep(L);
-int ok=(fclose(*p)==0);
-*p=NULL;
-return pushresult(L,ok,NULL);
-}
-static int aux_close(lua_State*L){
-lua_getfenv(L,1);
-lua_getfield(L,-1,"__close");
-return(lua_tocfunction(L,-1))(L);
-}
-static int io_close(lua_State*L){
-if(lua_isnone(L,1))
-lua_rawgeti(L,(-10001),2);
-tofile(L);
-return aux_close(L);
-}
-static int io_gc(lua_State*L){
-FILE*f=*tofilep(L);
-if(f!=NULL)
-aux_close(L);
-return 0;
-}
-static int io_open(lua_State*L){
-const char*filename=luaL_checkstring(L,1);
-const char*mode=luaL_optstring(L,2,"r");
-FILE**pf=newfile(L);
-*pf=fopen(filename,mode);
-return(*pf==NULL)?pushresult(L,0,filename):1;
-}
-static FILE*getiofile(lua_State*L,int findex){
-FILE*f;
-lua_rawgeti(L,(-10001),findex);
-f=*(FILE**)lua_touserdata(L,-1);
-if(f==NULL)
-luaL_error(L,"standard %s file is closed",fnames[findex-1]);
-return f;
-}
-static int g_iofile(lua_State*L,int f,const char*mode){
-if(!lua_isnoneornil(L,1)){
-const char*filename=lua_tostring(L,1);
-if(filename){
-FILE**pf=newfile(L);
-*pf=fopen(filename,mode);
-if(*pf==NULL)
-fileerror(L,1,filename);
-}
-else{
-tofile(L);
-lua_pushvalue(L,1);
-}
-lua_rawseti(L,(-10001),f);
-}
-lua_rawgeti(L,(-10001),f);
-return 1;
-}
-static int io_input(lua_State*L){
-return g_iofile(L,1,"r");
-}
-static int io_output(lua_State*L){
-return g_iofile(L,2,"w");
-}
-static int io_readline(lua_State*L);
-static void aux_lines(lua_State*L,int idx,int toclose){
-lua_pushvalue(L,idx);
-lua_pushboolean(L,toclose);
-lua_pushcclosure(L,io_readline,2);
-}
-static int f_lines(lua_State*L){
-tofile(L);
-aux_lines(L,1,0);
-return 1;
-}
-static int io_lines(lua_State*L){
-if(lua_isnoneornil(L,1)){
-lua_rawgeti(L,(-10001),1);
-return f_lines(L);
-}
-else{
-const char*filename=luaL_checkstring(L,1);
-FILE**pf=newfile(L);
-*pf=fopen(filename,"r");
-if(*pf==NULL)
-fileerror(L,1,filename);
-aux_lines(L,lua_gettop(L),1);
-return 1;
-}
-}
-static int read_number(lua_State*L,FILE*f){
-lua_Number d;
-if(fscanf(f,"%lf",&d)==1){
-lua_pushnumber(L,d);
-return 1;
-}
-else{
-lua_pushnil(L);
-return 0;
-}
-}
-static int test_eof(lua_State*L,FILE*f){
-int c=getc(f);
-ungetc(c,f);
-lua_pushlstring(L,NULL,0);
-return(c!=EOF);
-}
-static int read_line(lua_State*L,FILE*f){
-luaL_Buffer b;
-luaL_buffinit(L,&b);
-for(;;){
-size_t l;
-char*p=luaL_prepbuffer(&b);
-if(fgets(p,BUFSIZ,f)==NULL){
-luaL_pushresult(&b);
-return(lua_objlen(L,-1)>0);
-}
-l=strlen(p);
-if(l==0||p[l-1]!='\n')
-luaL_addsize(&b,l);
-else{
-luaL_addsize(&b,l-1);
-luaL_pushresult(&b);
-return 1;
-}
-}
-}
-static int read_chars(lua_State*L,FILE*f,size_t n){
-size_t rlen;
-size_t nr;
-luaL_Buffer b;
-luaL_buffinit(L,&b);
-rlen=BUFSIZ;
-do{
-char*p=luaL_prepbuffer(&b);
-if(rlen>n)rlen=n;
-nr=fread(p,sizeof(char),rlen,f);
-luaL_addsize(&b,nr);
-n-=nr;
-}while(n>0&&nr==rlen);
-luaL_pushresult(&b);
-return(n==0||lua_objlen(L,-1)>0);
-}
-static int g_read(lua_State*L,FILE*f,int first){
-int nargs=lua_gettop(L)-1;
-int success;
-int n;
-clearerr(f);
-if(nargs==0){
-success=read_line(L,f);
-n=first+1;
-}
-else{
-luaL_checkstack(L,nargs+20,"too many arguments");
-success=1;
-for(n=first;nargs--&&success;n++){
-if(lua_type(L,n)==3){
-size_t l=(size_t)lua_tointeger(L,n);
-success=(l==0)?test_eof(L,f):read_chars(L,f,l);
-}
-else{
-const char*p=lua_tostring(L,n);
-luaL_argcheck(L,p&&p[0]=='*',n,"invalid option");
-switch(p[1]){
-case'n':
-success=read_number(L,f);
-break;
-case'l':
-success=read_line(L,f);
-break;
-case'a':
-read_chars(L,f,~((size_t)0));
-success=1;
-break;
-default:
-return luaL_argerror(L,n,"invalid format");
-}
-}
-}
-}
-if(ferror(f))
-return pushresult(L,0,NULL);
-if(!success){
-lua_pop(L,1);
-lua_pushnil(L);
-}
-return n-first;
-}
-static int io_read(lua_State*L){
-return g_read(L,getiofile(L,1),1);
-}
-static int f_read(lua_State*L){
-return g_read(L,tofile(L),2);
-}
-static int io_readline(lua_State*L){
-FILE*f=*(FILE**)lua_touserdata(L,lua_upvalueindex(1));
-int sucess;
-if(f==NULL)
-luaL_error(L,"file is already closed");
-sucess=read_line(L,f);
-if(ferror(f))
-return luaL_error(L,"%s",strerror(errno));
-if(sucess)return 1;
-else{
-if(lua_toboolean(L,lua_upvalueindex(2))){
-lua_settop(L,0);
-lua_pushvalue(L,lua_upvalueindex(1));
-aux_close(L);
-}
-return 0;
-}
-}
-static int g_write(lua_State*L,FILE*f,int arg){
-int nargs=lua_gettop(L)-1;
-int status=1;
-for(;nargs--;arg++){
-if(lua_type(L,arg)==3){
-status=status&&
-fprintf(f,"%.14g",lua_tonumber(L,arg))>0;
-}
-else{
-size_t l;
-const char*s=luaL_checklstring(L,arg,&l);
-status=status&&(fwrite(s,sizeof(char),l,f)==l);
-}
-}
-return pushresult(L,status,NULL);
-}
-static int io_write(lua_State*L){
-return g_write(L,getiofile(L,2),1);
-}
-static int f_write(lua_State*L){
-return g_write(L,tofile(L),2);
-}
-static int io_flush(lua_State*L){
-return pushresult(L,fflush(getiofile(L,2))==0,NULL);
-}
-static int f_flush(lua_State*L){
-return pushresult(L,fflush(tofile(L))==0,NULL);
-}
-static const luaL_Reg iolib[]={
-{"close",io_close},
-{"flush",io_flush},
-{"input",io_input},
-{"lines",io_lines},
-{"open",io_open},
-{"output",io_output},
-{"read",io_read},
-{"type",io_type},
-{"write",io_write},
-{NULL,NULL}
-};
-static const luaL_Reg flib[]={
-{"close",io_close},
-{"flush",f_flush},
-{"lines",f_lines},
-{"read",f_read},
-{"write",f_write},
-{"__gc",io_gc},
-{NULL,NULL}
-};
-static void createmeta(lua_State*L){
-luaL_newmetatable(L,"FILE*");
-lua_pushvalue(L,-1);
-lua_setfield(L,-2,"__index");
-luaL_register(L,NULL,flib);
-}
-static void createstdfile(lua_State*L,FILE*f,int k,const char*fname){
-*newfile(L)=f;
-if(k>0){
-lua_pushvalue(L,-1);
-lua_rawseti(L,(-10001),k);
-}
-lua_pushvalue(L,-2);
-lua_setfenv(L,-2);
-lua_setfield(L,-3,fname);
-}
-static void newfenv(lua_State*L,lua_CFunction cls){
-lua_createtable(L,0,1);
-lua_pushcfunction(L,cls);
-lua_setfield(L,-2,"__close");
-}
-static int luaopen_io(lua_State*L){
-createmeta(L);
-newfenv(L,io_fclose);
-lua_replace(L,(-10001));
-luaL_register(L,"io",iolib);
-newfenv(L,io_noclose);
-createstdfile(L,stdin,1,"stdin");
-createstdfile(L,stdout,2,"stdout");
-createstdfile(L,stderr,0,"stderr");
-lua_pop(L,1);
-lua_getfield(L,-1,"popen");
-newfenv(L,io_pclose);
-lua_setfenv(L,-2);
-lua_pop(L,1);
-return 1;
-}
-static int os_pushresult(lua_State*L,int i,const char*filename){
-int en=errno;
-if(i){
-lua_pushboolean(L,1);
-return 1;
-}
-else{
-lua_pushnil(L);
-lua_pushfstring(L,"%s: %s",filename,strerror(en));
-lua_pushinteger(L,en);
-return 3;
-}
-}
-static int os_remove(lua_State*L){
-const char*filename=luaL_checkstring(L,1);
-return os_pushresult(L,remove(filename)==0,filename);
-}
-static int os_exit(lua_State*L){
-exit(luaL_optint(L,1,EXIT_SUCCESS));
-}
-static const luaL_Reg syslib[]={
-{"exit",os_exit},
-{"remove",os_remove},
-{NULL,NULL}
-};
-static int luaopen_os(lua_State*L){
-luaL_register(L,"os",syslib);
-return 1;
-}
-#define uchar(c)((unsigned char)(c))
-static ptrdiff_t posrelat(ptrdiff_t pos,size_t len){
-if(pos<0)pos+=(ptrdiff_t)len+1;
-return(pos>=0)?pos:0;
-}
-static int str_sub(lua_State*L){
-size_t l;
-const char*s=luaL_checklstring(L,1,&l);
-ptrdiff_t start=posrelat(luaL_checkinteger(L,2),l);
-ptrdiff_t end=posrelat(luaL_optinteger(L,3,-1),l);
-if(start<1)start=1;
-if(end>(ptrdiff_t)l)end=(ptrdiff_t)l;
-if(start<=end)
-lua_pushlstring(L,s+start-1,end-start+1);
-else lua_pushliteral(L,"");
-return 1;
-}
-static int str_lower(lua_State*L){
-size_t l;
-size_t i;
-luaL_Buffer b;
-const char*s=luaL_checklstring(L,1,&l);
-luaL_buffinit(L,&b);
-for(i=0;i<l;i++)
-luaL_addchar(&b,tolower(uchar(s[i])));
-luaL_pushresult(&b);
-return 1;
-}
-static int str_upper(lua_State*L){
-size_t l;
-size_t i;
-luaL_Buffer b;
-const char*s=luaL_checklstring(L,1,&l);
-luaL_buffinit(L,&b);
-for(i=0;i<l;i++)
-luaL_addchar(&b,toupper(uchar(s[i])));
-luaL_pushresult(&b);
-return 1;
-}
-static int str_rep(lua_State*L){
-size_t l;
-luaL_Buffer b;
-const char*s=luaL_checklstring(L,1,&l);
-int n=luaL_checkint(L,2);
-luaL_buffinit(L,&b);
-while(n-->0)
-luaL_addlstring(&b,s,l);
-luaL_pushresult(&b);
-return 1;
-}
-static int str_byte(lua_State*L){
-size_t l;
-const char*s=luaL_checklstring(L,1,&l);
-ptrdiff_t posi=posrelat(luaL_optinteger(L,2,1),l);
-ptrdiff_t pose=posrelat(luaL_optinteger(L,3,posi),l);
-int n,i;
-if(posi<=0)posi=1;
-if((size_t)pose>l)pose=l;
-if(posi>pose)return 0;
-n=(int)(pose-posi+1);
-if(posi+n<=pose)
-luaL_error(L,"string slice too long");
-luaL_checkstack(L,n,"string slice too long");
-for(i=0;i<n;i++)
-lua_pushinteger(L,uchar(s[posi+i-1]));
-return n;
-}
-static int str_char(lua_State*L){
-int n=lua_gettop(L);
-int i;
-luaL_Buffer b;
-luaL_buffinit(L,&b);
-for(i=1;i<=n;i++){
-int c=luaL_checkint(L,i);
-luaL_argcheck(L,uchar(c)==c,i,"invalid value");
-luaL_addchar(&b,uchar(c));
-}
-luaL_pushresult(&b);
-return 1;
-}
-typedef struct MatchState{
-const char*src_init;
-const char*src_end;
-lua_State*L;
-int level;
-struct{
-const char*init;
-ptrdiff_t len;
-}capture[32];
-}MatchState;
-static int check_capture(MatchState*ms,int l){
-l-='1';
-if(l<0||l>=ms->level||ms->capture[l].len==(-1))
-return luaL_error(ms->L,"invalid capture index");
-return l;
-}
-static int capture_to_close(MatchState*ms){
-int level=ms->level;
-for(level--;level>=0;level--)
-if(ms->capture[level].len==(-1))return level;
-return luaL_error(ms->L,"invalid pattern capture");
-}
-static const char*classend(MatchState*ms,const char*p){
-switch(*p++){
-case'%':{
-if(*p=='\0')
-luaL_error(ms->L,"malformed pattern (ends with "LUA_QL("%%")")");
-return p+1;
-}
-case'[':{
-if(*p=='^')p++;
-do{
-if(*p=='\0')
-luaL_error(ms->L,"malformed pattern (missing "LUA_QL("]")")");
-if(*(p++)=='%'&&*p!='\0')
-p++;
-}while(*p!=']');
-return p+1;
-}
-default:{
-return p;
-}
-}
-}
-static int match_class(int c,int cl){
-int res;
-switch(tolower(cl)){
-case'a':res=isalpha(c);break;
-case'c':res=iscntrl(c);break;
-case'd':res=isdigit(c);break;
-case'l':res=islower(c);break;
-case'p':res=ispunct(c);break;
-case's':res=isspace(c);break;
-case'u':res=isupper(c);break;
-case'w':res=isalnum(c);break;
-case'x':res=isxdigit(c);break;
-case'z':res=(c==0);break;
-default:return(cl==c);
-}
-return(islower(cl)?res:!res);
-}
-static int matchbracketclass(int c,const char*p,const char*ec){
-int sig=1;
-if(*(p+1)=='^'){
-sig=0;
-p++;
-}
-while(++p<ec){
-if(*p=='%'){
-p++;
-if(match_class(c,uchar(*p)))
-return sig;
-}
-else if((*(p+1)=='-')&&(p+2<ec)){
-p+=2;
-if(uchar(*(p-2))<=c&&c<=uchar(*p))
-return sig;
-}
-else if(uchar(*p)==c)return sig;
-}
-return!sig;
-}
-static int singlematch(int c,const char*p,const char*ep){
-switch(*p){
-case'.':return 1;
-case'%':return match_class(c,uchar(*(p+1)));
-case'[':return matchbracketclass(c,p,ep-1);
-default:return(uchar(*p)==c);
-}
-}
-static const char*match(MatchState*ms,const char*s,const char*p);
-static const char*matchbalance(MatchState*ms,const char*s,
-const char*p){
-if(*p==0||*(p+1)==0)
-luaL_error(ms->L,"unbalanced pattern");
-if(*s!=*p)return NULL;
-else{
-int b=*p;
-int e=*(p+1);
-int cont=1;
-while(++s<ms->src_end){
-if(*s==e){
-if(--cont==0)return s+1;
-}
-else if(*s==b)cont++;
-}
-}
-return NULL;
-}
-static const char*max_expand(MatchState*ms,const char*s,
-const char*p,const char*ep){
-ptrdiff_t i=0;
-while((s+i)<ms->src_end&&singlematch(uchar(*(s+i)),p,ep))
-i++;
-while(i>=0){
-const char*res=match(ms,(s+i),ep+1);
-if(res)return res;
-i--;
-}
-return NULL;
-}
-static const char*min_expand(MatchState*ms,const char*s,
-const char*p,const char*ep){
-for(;;){
-const char*res=match(ms,s,ep+1);
-if(res!=NULL)
-return res;
-else if(s<ms->src_end&&singlematch(uchar(*s),p,ep))
-s++;
-else return NULL;
-}
-}
-static const char*start_capture(MatchState*ms,const char*s,
-const char*p,int what){
-const char*res;
-int level=ms->level;
-if(level>=32)luaL_error(ms->L,"too many captures");
-ms->capture[level].init=s;
-ms->capture[level].len=what;
-ms->level=level+1;
-if((res=match(ms,s,p))==NULL)
-ms->level--;
-return res;
-}
-static const char*end_capture(MatchState*ms,const char*s,
-const char*p){
-int l=capture_to_close(ms);
-const char*res;
-ms->capture[l].len=s-ms->capture[l].init;
-if((res=match(ms,s,p))==NULL)
-ms->capture[l].len=(-1);
-return res;
-}
-static const char*match_capture(MatchState*ms,const char*s,int l){
-size_t len;
-l=check_capture(ms,l);
-len=ms->capture[l].len;
-if((size_t)(ms->src_end-s)>=len&&
-memcmp(ms->capture[l].init,s,len)==0)
-return s+len;
-else return NULL;
-}
-static const char*match(MatchState*ms,const char*s,const char*p){
-init:
-switch(*p){
-case'(':{
-if(*(p+1)==')')
-return start_capture(ms,s,p+2,(-2));
-else
-return start_capture(ms,s,p+1,(-1));
-}
-case')':{
-return end_capture(ms,s,p+1);
-}
-case'%':{
-switch(*(p+1)){
-case'b':{
-s=matchbalance(ms,s,p+2);
-if(s==NULL)return NULL;
-p+=4;goto init;
-}
-case'f':{
-const char*ep;char previous;
-p+=2;
-if(*p!='[')
-luaL_error(ms->L,"missing "LUA_QL("[")" after "
-LUA_QL("%%f")" in pattern");
-ep=classend(ms,p);
-previous=(s==ms->src_init)?'\0':*(s-1);
-if(matchbracketclass(uchar(previous),p,ep-1)||
-!matchbracketclass(uchar(*s),p,ep-1))return NULL;
-p=ep;goto init;
-}
-default:{
-if(isdigit(uchar(*(p+1)))){
-s=match_capture(ms,s,uchar(*(p+1)));
-if(s==NULL)return NULL;
-p+=2;goto init;
-}
-goto dflt;
-}
-}
-}
-case'\0':{
-return s;
-}
-case'$':{
-if(*(p+1)=='\0')
-return(s==ms->src_end)?s:NULL;
-else goto dflt;
-}
-default:dflt:{
-const char*ep=classend(ms,p);
-int m=s<ms->src_end&&singlematch(uchar(*s),p,ep);
-switch(*ep){
-case'?':{
-const char*res;
-if(m&&((res=match(ms,s+1,ep+1))!=NULL))
-return res;
-p=ep+1;goto init;
-}
-case'*':{
-return max_expand(ms,s,p,ep);
-}
-case'+':{
-return(m?max_expand(ms,s+1,p,ep):NULL);
-}
-case'-':{
-return min_expand(ms,s,p,ep);
-}
-default:{
-if(!m)return NULL;
-s++;p=ep;goto init;
-}
-}
-}
-}
-}
-static const char*lmemfind(const char*s1,size_t l1,
-const char*s2,size_t l2){
-if(l2==0)return s1;
-else if(l2>l1)return NULL;
-else{
-const char*init;
-l2--;
-l1=l1-l2;
-while(l1>0&&(init=(const char*)memchr(s1,*s2,l1))!=NULL){
-init++;
-if(memcmp(init,s2+1,l2)==0)
-return init-1;
-else{
-l1-=init-s1;
-s1=init;
-}
-}
-return NULL;
-}
-}
-static void push_onecapture(MatchState*ms,int i,const char*s,
-const char*e){
-if(i>=ms->level){
-if(i==0)
-lua_pushlstring(ms->L,s,e-s);
-else
-luaL_error(ms->L,"invalid capture index");
-}
-else{
-ptrdiff_t l=ms->capture[i].len;
-if(l==(-1))luaL_error(ms->L,"unfinished capture");
-if(l==(-2))
-lua_pushinteger(ms->L,ms->capture[i].init-ms->src_init+1);
-else
-lua_pushlstring(ms->L,ms->capture[i].init,l);
-}
-}
-static int push_captures(MatchState*ms,const char*s,const char*e){
-int i;
-int nlevels=(ms->level==0&&s)?1:ms->level;
-luaL_checkstack(ms->L,nlevels,"too many captures");
-for(i=0;i<nlevels;i++)
-push_onecapture(ms,i,s,e);
-return nlevels;
-}
-static int str_find_aux(lua_State*L,int find){
-size_t l1,l2;
-const char*s=luaL_checklstring(L,1,&l1);
-const char*p=luaL_checklstring(L,2,&l2);
-ptrdiff_t init=posrelat(luaL_optinteger(L,3,1),l1)-1;
-if(init<0)init=0;
-else if((size_t)(init)>l1)init=(ptrdiff_t)l1;
-if(find&&(lua_toboolean(L,4)||
-strpbrk(p,"^$*+?.([%-")==NULL)){
-const char*s2=lmemfind(s+init,l1-init,p,l2);
-if(s2){
-lua_pushinteger(L,s2-s+1);
-lua_pushinteger(L,s2-s+l2);
-return 2;
-}
-}
-else{
-MatchState ms;
-int anchor=(*p=='^')?(p++,1):0;
-const char*s1=s+init;
-ms.L=L;
-ms.src_init=s;
-ms.src_end=s+l1;
-do{
-const char*res;
-ms.level=0;
-if((res=match(&ms,s1,p))!=NULL){
-if(find){
-lua_pushinteger(L,s1-s+1);
-lua_pushinteger(L,res-s);
-return push_captures(&ms,NULL,0)+2;
-}
-else
-return push_captures(&ms,s1,res);
-}
-}while(s1++<ms.src_end&&!anchor);
-}
-lua_pushnil(L);
-return 1;
-}
-static int str_find(lua_State*L){
-return str_find_aux(L,1);
-}
-static int str_match(lua_State*L){
-return str_find_aux(L,0);
-}
-static int gmatch_aux(lua_State*L){
-MatchState ms;
-size_t ls;
-const char*s=lua_tolstring(L,lua_upvalueindex(1),&ls);
-const char*p=lua_tostring(L,lua_upvalueindex(2));
-const char*src;
-ms.L=L;
-ms.src_init=s;
-ms.src_end=s+ls;
-for(src=s+(size_t)lua_tointeger(L,lua_upvalueindex(3));
-src<=ms.src_end;
-src++){
-const char*e;
-ms.level=0;
-if((e=match(&ms,src,p))!=NULL){
-lua_Integer newstart=e-s;
-if(e==src)newstart++;
-lua_pushinteger(L,newstart);
-lua_replace(L,lua_upvalueindex(3));
-return push_captures(&ms,src,e);
-}
-}
-return 0;
-}
-static int gmatch(lua_State*L){
-luaL_checkstring(L,1);
-luaL_checkstring(L,2);
-lua_settop(L,2);
-lua_pushinteger(L,0);
-lua_pushcclosure(L,gmatch_aux,3);
-return 1;
-}
-static void add_s(MatchState*ms,luaL_Buffer*b,const char*s,
-const char*e){
-size_t l,i;
-const char*news=lua_tolstring(ms->L,3,&l);
-for(i=0;i<l;i++){
-if(news[i]!='%')
-luaL_addchar(b,news[i]);
-else{
-i++;
-if(!isdigit(uchar(news[i])))
-luaL_addchar(b,news[i]);
-else if(news[i]=='0')
-luaL_addlstring(b,s,e-s);
-else{
-push_onecapture(ms,news[i]-'1',s,e);
-luaL_addvalue(b);
-}
-}
-}
-}
-static void add_value(MatchState*ms,luaL_Buffer*b,const char*s,
-const char*e){
-lua_State*L=ms->L;
-switch(lua_type(L,3)){
-case 3:
-case 4:{
-add_s(ms,b,s,e);
-return;
-}
-case 6:{
-int n;
-lua_pushvalue(L,3);
-n=push_captures(ms,s,e);
-lua_call(L,n,1);
-break;
-}
-case 5:{
-push_onecapture(ms,0,s,e);
-lua_gettable(L,3);
-break;
-}
-}
-if(!lua_toboolean(L,-1)){
-lua_pop(L,1);
-lua_pushlstring(L,s,e-s);
-}
-else if(!lua_isstring(L,-1))
-luaL_error(L,"invalid replacement value (a %s)",luaL_typename(L,-1));
-luaL_addvalue(b);
-}
-static int str_gsub(lua_State*L){
-size_t srcl;
-const char*src=luaL_checklstring(L,1,&srcl);
-const char*p=luaL_checkstring(L,2);
-int tr=lua_type(L,3);
-int max_s=luaL_optint(L,4,srcl+1);
-int anchor=(*p=='^')?(p++,1):0;
-int n=0;
-MatchState ms;
-luaL_Buffer b;
-luaL_argcheck(L,tr==3||tr==4||
-tr==6||tr==5,3,
-"string/function/table expected");
-luaL_buffinit(L,&b);
-ms.L=L;
-ms.src_init=src;
-ms.src_end=src+srcl;
-while(n<max_s){
-const char*e;
-ms.level=0;
-e=match(&ms,src,p);
-if(e){
-n++;
-add_value(&ms,&b,src,e);
-}
-if(e&&e>src)
-src=e;
-else if(src<ms.src_end)
-luaL_addchar(&b,*src++);
-else break;
-if(anchor)break;
-}
-luaL_addlstring(&b,src,ms.src_end-src);
-luaL_pushresult(&b);
-lua_pushinteger(L,n);
-return 2;
-}
-static void addquoted(lua_State*L,luaL_Buffer*b,int arg){
-size_t l;
-const char*s=luaL_checklstring(L,arg,&l);
-luaL_addchar(b,'"');
-while(l--){
-switch(*s){
-case'"':case'\\':case'\n':{
-luaL_addchar(b,'\\');
-luaL_addchar(b,*s);
-break;
-}
-case'\r':{
-luaL_addlstring(b,"\\r",2);
-break;
-}
-case'\0':{
-luaL_addlstring(b,"\\000",4);
-break;
-}
-default:{
-luaL_addchar(b,*s);
-break;
-}
-}
-s++;
-}
-luaL_addchar(b,'"');
-}
-static const char*scanformat(lua_State*L,const char*strfrmt,char*form){
-const char*p=strfrmt;
-while(*p!='\0'&&strchr("-+ #0",*p)!=NULL)p++;
-if((size_t)(p-strfrmt)>=sizeof("-+ #0"))
-luaL_error(L,"invalid format (repeated flags)");
-if(isdigit(uchar(*p)))p++;
-if(isdigit(uchar(*p)))p++;
-if(*p=='.'){
-p++;
-if(isdigit(uchar(*p)))p++;
-if(isdigit(uchar(*p)))p++;
-}
-if(isdigit(uchar(*p)))
-luaL_error(L,"invalid format (width or precision too long)");
-*(form++)='%';
-strncpy(form,strfrmt,p-strfrmt+1);
-form+=p-strfrmt+1;
-*form='\0';
-return p;
-}
-static void addintlen(char*form){
-size_t l=strlen(form);
-char spec=form[l-1];
-strcpy(form+l-1,"l");
-form[l+sizeof("l")-2]=spec;
-form[l+sizeof("l")-1]='\0';
-}
-static int str_format(lua_State*L){
-int top=lua_gettop(L);
-int arg=1;
-size_t sfl;
-const char*strfrmt=luaL_checklstring(L,arg,&sfl);
-const char*strfrmt_end=strfrmt+sfl;
-luaL_Buffer b;
-luaL_buffinit(L,&b);
-while(strfrmt<strfrmt_end){
-if(*strfrmt!='%')
-luaL_addchar(&b,*strfrmt++);
-else if(*++strfrmt=='%')
-luaL_addchar(&b,*strfrmt++);
-else{
-char form[(sizeof("-+ #0")+sizeof("l")+10)];
-char buff[512];
-if(++arg>top)
-luaL_argerror(L,arg,"no value");
-strfrmt=scanformat(L,strfrmt,form);
-switch(*strfrmt++){
-case'c':{
-sprintf(buff,form,(int)luaL_checknumber(L,arg));
-break;
-}
-case'd':case'i':{
-addintlen(form);
-sprintf(buff,form,(long)luaL_checknumber(L,arg));
-break;
-}
-case'o':case'u':case'x':case'X':{
-addintlen(form);
-sprintf(buff,form,(unsigned long)luaL_checknumber(L,arg));
-break;
-}
-case'e':case'E':case'f':
-case'g':case'G':{
-sprintf(buff,form,(double)luaL_checknumber(L,arg));
-break;
-}
-case'q':{
-addquoted(L,&b,arg);
-continue;
-}
-case's':{
-size_t l;
-const char*s=luaL_checklstring(L,arg,&l);
-if(!strchr(form,'.')&&l>=100){
-lua_pushvalue(L,arg);
-luaL_addvalue(&b);
-continue;
-}
-else{
-sprintf(buff,form,s);
-break;
-}
-}
-default:{
-return luaL_error(L,"invalid option "LUA_QL("%%%c")" to "
-LUA_QL("format"),*(strfrmt-1));
-}
-}
-luaL_addlstring(&b,buff,strlen(buff));
-}
-}
-luaL_pushresult(&b);
-return 1;
-}
-static const luaL_Reg strlib[]={
-{"byte",str_byte},
-{"char",str_char},
-{"find",str_find},
-{"format",str_format},
-{"gmatch",gmatch},
-{"gsub",str_gsub},
-{"lower",str_lower},
-{"match",str_match},
-{"rep",str_rep},
-{"sub",str_sub},
-{"upper",str_upper},
-{NULL,NULL}
-};
-static void createmetatable(lua_State*L){
-lua_createtable(L,0,1);
-lua_pushliteral(L,"");
-lua_pushvalue(L,-2);
-lua_setmetatable(L,-2);
-lua_pop(L,1);
-lua_pushvalue(L,-2);
-lua_setfield(L,-2,"__index");
-lua_pop(L,1);
-}
-static int luaopen_string(lua_State*L){
-luaL_register(L,"string",strlib);
-createmetatable(L);
-return 1;
-}
-static const luaL_Reg lualibs[]={
-{"",luaopen_base},
-{"table",luaopen_table},
-{"io",luaopen_io},
-{"os",luaopen_os},
-{"string",luaopen_string},
-{NULL,NULL}
-};
-static void luaL_openlibs(lua_State*L){
-const luaL_Reg*lib=lualibs;
-for(;lib->func;lib++){
-lua_pushcfunction(L,lib->func);
-lua_pushstring(L,lib->name);
-lua_call(L,1,0);
-}
-}
-typedef unsigned int UB;
-static UB barg(lua_State*L,int idx){
-union{lua_Number n;U64 b;}bn;
-bn.n=lua_tonumber(L,idx)+6755399441055744.0;
-if(bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number");
-return(UB)bn.b;
-}
-#define BRET(b)lua_pushnumber(L,(lua_Number)(int)(b));return 1;
-static int tobit(lua_State*L){
-BRET(barg(L,1))}
-static int bnot(lua_State*L){
-BRET(~barg(L,1))}
-static int band(lua_State*L){
-int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)}
-static int bor(lua_State*L){
-int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)}
-static int bxor(lua_State*L){
-int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)}
-static int lshift(lua_State*L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET(b<<n)}
-static int rshift(lua_State*L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET(b>>n)}
-static int arshift(lua_State*L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)}
-static int rol(lua_State*L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET((b<<n)|(b>>(32-n)))}
-static int ror(lua_State*L){
-UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))}
-static int bswap(lua_State*L){
-UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)}
-static int tohex(lua_State*L){
-UB b=barg(L,1);
-int n=lua_isnone(L,2)?8:(int)barg(L,2);
-const char*hexdigits="0123456789abcdef";
-char buf[8];
-int i;
-if(n<0){n=-n;hexdigits="0123456789ABCDEF";}
-if(n>8)n=8;
-for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;}
-lua_pushlstring(L,buf,(size_t)n);
-return 1;
-}
-static const struct luaL_Reg bitlib[]={
-{"tobit",tobit},
-{"bnot",bnot},
-{"band",band},
-{"bor",bor},
-{"bxor",bxor},
-{"lshift",lshift},
-{"rshift",rshift},
-{"arshift",arshift},
-{"rol",rol},
-{"ror",ror},
-{"bswap",bswap},
-{"tohex",tohex},
-{NULL,NULL}
-};
-int main(int argc,char**argv){
-lua_State*L=luaL_newstate();
-int i;
-luaL_openlibs(L);
-luaL_register(L,"bit",bitlib);
-if(argc<2)return sizeof(void*);
-lua_createtable(L,0,1);
-lua_pushstring(L,argv[1]);
-lua_rawseti(L,-2,0);
-lua_setglobal(L,"arg");
-if(luaL_loadfile(L,argv[1]))
-goto err;
-for(i=2;i<argc;i++)
-lua_pushstring(L,argv[i]);
-if(lua_pcall(L,argc-2,0,0)){
-err:
-fprintf(stderr,"Error: %s\n",lua_tostring(L,-1));
-return 1;
-}
-lua_close(L);
-return 0;
-}
+/* This is a heavily customized and minimized copy of Lua 5.1.5. */
+/* It's only used to build LuaJIT. It does NOT have all standard functions! */
+/******************************************************************************
+* Copyright (C) 1994-2012 Lua.org, PUC-Rio. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files (the
+* "Software"), to deal in the Software without restriction, including
+* without limitation the rights to use, copy, modify, merge, publish,
+* distribute, sublicense, and/or sell copies of the Software, and to
+* permit persons to whom the Software is furnished to do so, subject to
+* the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+#ifdef _MSC_VER
+typedef unsigned __int64 U64;
+#else
+typedef unsigned long long U64;
+#endif
+#include <stddef.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <math.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <setjmp.h>
+#include <errno.h>
+#include <time.h>
+typedef enum{
+TM_INDEX,
+TM_NEWINDEX,
+TM_GC,
+TM_MODE,
+TM_EQ,
+TM_ADD,
+TM_SUB,
+TM_MUL,
+TM_DIV,
+TM_MOD,
+TM_POW,
+TM_UNM,
+TM_LEN,
+TM_LT,
+TM_LE,
+TM_CONCAT,
+TM_CALL,
+TM_N
+}TMS;
+enum OpMode{iABC,iABx,iAsBx};
+typedef enum{
+OP_MOVE,
+OP_LOADK,
+OP_LOADBOOL,
+OP_LOADNIL,
+OP_GETUPVAL,
+OP_GETGLOBAL,
+OP_GETTABLE,
+OP_SETGLOBAL,
+OP_SETUPVAL,
+OP_SETTABLE,
+OP_NEWTABLE,
+OP_SELF,
+OP_ADD,
+OP_SUB,
+OP_MUL,
+OP_DIV,
+OP_MOD,
+OP_POW,
+OP_UNM,
+OP_NOT,
+OP_LEN,
+OP_CONCAT,
+OP_JMP,
+OP_EQ,
+OP_LT,
+OP_LE,
+OP_TEST,
+OP_TESTSET,
+OP_CALL,
+OP_TAILCALL,
+OP_RETURN,
+OP_FORLOOP,
+OP_FORPREP,
+OP_TFORLOOP,
+OP_SETLIST,
+OP_CLOSE,
+OP_CLOSURE,
+OP_VARARG
+}OpCode;
+enum OpArgMask{
+OpArgN,
+OpArgU,
+OpArgR,
+OpArgK
+};
+typedef enum{
+VVOID,
+VNIL,
+VTRUE,
+VFALSE,
+VK,
+VKNUM,
+VLOCAL,
+VUPVAL,
+VGLOBAL,
+VINDEXED,
+VJMP,
+VRELOCABLE,
+VNONRELOC,
+VCALL,
+VVARARG
+}expkind;
+enum RESERVED{
+TK_AND=257,TK_BREAK,
+TK_DO,TK_ELSE,TK_ELSEIF,TK_END,TK_FALSE,TK_FOR,TK_FUNCTION,
+TK_IF,TK_IN,TK_LOCAL,TK_NIL,TK_NOT,TK_OR,TK_REPEAT,
+TK_RETURN,TK_THEN,TK_TRUE,TK_UNTIL,TK_WHILE,
+TK_CONCAT,TK_DOTS,TK_EQ,TK_GE,TK_LE,TK_NE,TK_NUMBER,
+TK_NAME,TK_STRING,TK_EOS
+};
+typedef enum BinOpr{
+OPR_ADD,OPR_SUB,OPR_MUL,OPR_DIV,OPR_MOD,OPR_POW,
+OPR_CONCAT,
+OPR_NE,OPR_EQ,
+OPR_LT,OPR_LE,OPR_GT,OPR_GE,
+OPR_AND,OPR_OR,
+OPR_NOBINOPR
+}BinOpr;
+typedef enum UnOpr{OPR_MINUS,OPR_NOT,OPR_LEN,OPR_NOUNOPR}UnOpr;
+#define LUA_QL(x)"'"x"'"
+#define luai_apicheck(L,o){(void)L;}
+#define lua_number2str(s,n)sprintf((s),"%.14g",(n))
+#define lua_str2number(s,p)strtod((s),(p))
+#define luai_numadd(a,b)((a)+(b))
+#define luai_numsub(a,b)((a)-(b))
+#define luai_nummul(a,b)((a)*(b))
+#define luai_numdiv(a,b)((a)/(b))
+#define luai_nummod(a,b)((a)-floor((a)/(b))*(b))
+#define luai_numpow(a,b)(pow(a,b))
+#define luai_numunm(a)(-(a))
+#define luai_numeq(a,b)((a)==(b))
+#define luai_numlt(a,b)((a)<(b))
+#define luai_numle(a,b)((a)<=(b))
+#define luai_numisnan(a)(!luai_numeq((a),(a)))
+#define lua_number2int(i,d)((i)=(int)(d))
+#define lua_number2integer(i,d)((i)=(lua_Integer)(d))
+#define LUAI_THROW(L,c)longjmp((c)->b,1)
+#define LUAI_TRY(L,c,a)if(setjmp((c)->b)==0){a}
+#define lua_pclose(L,file)((void)((void)L,file),0)
+#define lua_upvalueindex(i)((-10002)-(i))
+typedef struct lua_State lua_State;
+typedef int(*lua_CFunction)(lua_State*L);
+typedef const char*(*lua_Reader)(lua_State*L,void*ud,size_t*sz);
+typedef void*(*lua_Alloc)(void*ud,void*ptr,size_t osize,size_t nsize);
+typedef double lua_Number;
+typedef ptrdiff_t lua_Integer;
+static void lua_settop(lua_State*L,int idx);
+static int lua_type(lua_State*L,int idx);
+static const char* lua_tolstring(lua_State*L,int idx,size_t*len);
+static size_t lua_objlen(lua_State*L,int idx);
+static void lua_pushlstring(lua_State*L,const char*s,size_t l);
+static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n);
+static void lua_createtable(lua_State*L,int narr,int nrec);
+static void lua_setfield(lua_State*L,int idx,const char*k);
+#define lua_pop(L,n)lua_settop(L,-(n)-1)
+#define lua_newtable(L)lua_createtable(L,0,0)
+#define lua_pushcfunction(L,f)lua_pushcclosure(L,(f),0)
+#define lua_strlen(L,i)lua_objlen(L,(i))
+#define lua_isfunction(L,n)(lua_type(L,(n))==6)
+#define lua_istable(L,n)(lua_type(L,(n))==5)
+#define lua_isnil(L,n)(lua_type(L,(n))==0)
+#define lua_isboolean(L,n)(lua_type(L,(n))==1)
+#define lua_isnone(L,n)(lua_type(L,(n))==(-1))
+#define lua_isnoneornil(L,n)(lua_type(L,(n))<=0)
+#define lua_pushliteral(L,s)lua_pushlstring(L,""s,(sizeof(s)/sizeof(char))-1)
+#define lua_setglobal(L,s)lua_setfield(L,(-10002),(s))
+#define lua_tostring(L,i)lua_tolstring(L,(i),NULL)
+typedef struct lua_Debug lua_Debug;
+typedef void(*lua_Hook)(lua_State*L,lua_Debug*ar);
+struct lua_Debug{
+int event;
+const char*name;
+const char*namewhat;
+const char*what;
+const char*source;
+int currentline;
+int nups;
+int linedefined;
+int lastlinedefined;
+char short_src[60];
+int i_ci;
+};
+typedef unsigned int lu_int32;
+typedef size_t lu_mem;
+typedef ptrdiff_t l_mem;
+typedef unsigned char lu_byte;
+#define IntPoint(p)((unsigned int)(lu_mem)(p))
+typedef union{double u;void*s;long l;}L_Umaxalign;
+typedef double l_uacNumber;
+#define check_exp(c,e)(e)
+#define UNUSED(x)((void)(x))
+#define cast(t,exp)((t)(exp))
+#define cast_byte(i)cast(lu_byte,(i))
+#define cast_num(i)cast(lua_Number,(i))
+#define cast_int(i)cast(int,(i))
+typedef lu_int32 Instruction;
+#define condhardstacktests(x)((void)0)
+typedef union GCObject GCObject;
+typedef struct GCheader{
+GCObject*next;lu_byte tt;lu_byte marked;
+}GCheader;
+typedef union{
+GCObject*gc;
+void*p;
+lua_Number n;
+int b;
+}Value;
+typedef struct lua_TValue{
+Value value;int tt;
+}TValue;
+#define ttisnil(o)(ttype(o)==0)
+#define ttisnumber(o)(ttype(o)==3)
+#define ttisstring(o)(ttype(o)==4)
+#define ttistable(o)(ttype(o)==5)
+#define ttisfunction(o)(ttype(o)==6)
+#define ttisboolean(o)(ttype(o)==1)
+#define ttisuserdata(o)(ttype(o)==7)
+#define ttisthread(o)(ttype(o)==8)
+#define ttislightuserdata(o)(ttype(o)==2)
+#define ttype(o)((o)->tt)
+#define gcvalue(o)check_exp(iscollectable(o),(o)->value.gc)
+#define pvalue(o)check_exp(ttislightuserdata(o),(o)->value.p)
+#define nvalue(o)check_exp(ttisnumber(o),(o)->value.n)
+#define rawtsvalue(o)check_exp(ttisstring(o),&(o)->value.gc->ts)
+#define tsvalue(o)(&rawtsvalue(o)->tsv)
+#define rawuvalue(o)check_exp(ttisuserdata(o),&(o)->value.gc->u)
+#define uvalue(o)(&rawuvalue(o)->uv)
+#define clvalue(o)check_exp(ttisfunction(o),&(o)->value.gc->cl)
+#define hvalue(o)check_exp(ttistable(o),&(o)->value.gc->h)
+#define bvalue(o)check_exp(ttisboolean(o),(o)->value.b)
+#define thvalue(o)check_exp(ttisthread(o),&(o)->value.gc->th)
+#define l_isfalse(o)(ttisnil(o)||(ttisboolean(o)&&bvalue(o)==0))
+#define checkconsistency(obj)
+#define checkliveness(g,obj)
+#define setnilvalue(obj)((obj)->tt=0)
+#define setnvalue(obj,x){TValue*i_o=(obj);i_o->value.n=(x);i_o->tt=3;}
+#define setbvalue(obj,x){TValue*i_o=(obj);i_o->value.b=(x);i_o->tt=1;}
+#define setsvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=4;checkliveness(G(L),i_o);}
+#define setuvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=7;checkliveness(G(L),i_o);}
+#define setthvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=8;checkliveness(G(L),i_o);}
+#define setclvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=6;checkliveness(G(L),i_o);}
+#define sethvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=5;checkliveness(G(L),i_o);}
+#define setptvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=(8+1);checkliveness(G(L),i_o);}
+#define setobj(L,obj1,obj2){const TValue*o2=(obj2);TValue*o1=(obj1);o1->value=o2->value;o1->tt=o2->tt;checkliveness(G(L),o1);}
+#define setttype(obj,tt)(ttype(obj)=(tt))
+#define iscollectable(o)(ttype(o)>=4)
+typedef TValue*StkId;
+typedef union TString{
+L_Umaxalign dummy;
+struct{
+GCObject*next;lu_byte tt;lu_byte marked;
+lu_byte reserved;
+unsigned int hash;
+size_t len;
+}tsv;
+}TString;
+#define getstr(ts)cast(const char*,(ts)+1)
+#define svalue(o)getstr(rawtsvalue(o))
+typedef union Udata{
+L_Umaxalign dummy;
+struct{
+GCObject*next;lu_byte tt;lu_byte marked;
+struct Table*metatable;
+struct Table*env;
+size_t len;
+}uv;
+}Udata;
+typedef struct Proto{
+GCObject*next;lu_byte tt;lu_byte marked;
+TValue*k;
+Instruction*code;
+struct Proto**p;
+int*lineinfo;
+struct LocVar*locvars;
+TString**upvalues;
+TString*source;
+int sizeupvalues;
+int sizek;
+int sizecode;
+int sizelineinfo;
+int sizep;
+int sizelocvars;
+int linedefined;
+int lastlinedefined;
+GCObject*gclist;
+lu_byte nups;
+lu_byte numparams;
+lu_byte is_vararg;
+lu_byte maxstacksize;
+}Proto;
+typedef struct LocVar{
+TString*varname;
+int startpc;
+int endpc;
+}LocVar;
+typedef struct UpVal{
+GCObject*next;lu_byte tt;lu_byte marked;
+TValue*v;
+union{
+TValue value;
+struct{
+struct UpVal*prev;
+struct UpVal*next;
+}l;
+}u;
+}UpVal;
+typedef struct CClosure{
+GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env;
+lua_CFunction f;
+TValue upvalue[1];
+}CClosure;
+typedef struct LClosure{
+GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env;
+struct Proto*p;
+UpVal*upvals[1];
+}LClosure;
+typedef union Closure{
+CClosure c;
+LClosure l;
+}Closure;
+#define iscfunction(o)(ttype(o)==6&&clvalue(o)->c.isC)
+typedef union TKey{
+struct{
+Value value;int tt;
+struct Node*next;
+}nk;
+TValue tvk;
+}TKey;
+typedef struct Node{
+TValue i_val;
+TKey i_key;
+}Node;
+typedef struct Table{
+GCObject*next;lu_byte tt;lu_byte marked;
+lu_byte flags;
+lu_byte lsizenode;
+struct Table*metatable;
+TValue*array;
+Node*node;
+Node*lastfree;
+GCObject*gclist;
+int sizearray;
+}Table;
+#define lmod(s,size)(check_exp((size&(size-1))==0,(cast(int,(s)&((size)-1)))))
+#define twoto(x)((size_t)1<<(x))
+#define sizenode(t)(twoto((t)->lsizenode))
+static const TValue luaO_nilobject_;
+#define ceillog2(x)(luaO_log2((x)-1)+1)
+static int luaO_log2(unsigned int x);
+#define gfasttm(g,et,e)((et)==NULL?NULL:((et)->flags&(1u<<(e)))?NULL:luaT_gettm(et,e,(g)->tmname[e]))
+#define fasttm(l,et,e)gfasttm(G(l),et,e)
+static const TValue*luaT_gettm(Table*events,TMS event,TString*ename);
+#define luaM_reallocv(L,b,on,n,e)((cast(size_t,(n)+1)<=((size_t)(~(size_t)0)-2)/(e))?luaM_realloc_(L,(b),(on)*(e),(n)*(e)):luaM_toobig(L))
+#define luaM_freemem(L,b,s)luaM_realloc_(L,(b),(s),0)
+#define luaM_free(L,b)luaM_realloc_(L,(b),sizeof(*(b)),0)
+#define luaM_freearray(L,b,n,t)luaM_reallocv(L,(b),n,0,sizeof(t))
+#define luaM_malloc(L,t)luaM_realloc_(L,NULL,0,(t))
+#define luaM_new(L,t)cast(t*,luaM_malloc(L,sizeof(t)))
+#define luaM_newvector(L,n,t)cast(t*,luaM_reallocv(L,NULL,0,n,sizeof(t)))
+#define luaM_growvector(L,v,nelems,size,t,limit,e)if((nelems)+1>(size))((v)=cast(t*,luaM_growaux_(L,v,&(size),sizeof(t),limit,e)))
+#define luaM_reallocvector(L,v,oldn,n,t)((v)=cast(t*,luaM_reallocv(L,v,oldn,n,sizeof(t))))
+static void*luaM_realloc_(lua_State*L,void*block,size_t oldsize,
+size_t size);
+static void*luaM_toobig(lua_State*L);
+static void*luaM_growaux_(lua_State*L,void*block,int*size,
+size_t size_elem,int limit,
+const char*errormsg);
+typedef struct Zio ZIO;
+#define char2int(c)cast(int,cast(unsigned char,(c)))
+#define zgetc(z)(((z)->n--)>0?char2int(*(z)->p++):luaZ_fill(z))
+typedef struct Mbuffer{
+char*buffer;
+size_t n;
+size_t buffsize;
+}Mbuffer;
+#define luaZ_initbuffer(L,buff)((buff)->buffer=NULL,(buff)->buffsize=0)
+#define luaZ_buffer(buff)((buff)->buffer)
+#define luaZ_sizebuffer(buff)((buff)->buffsize)
+#define luaZ_bufflen(buff)((buff)->n)
+#define luaZ_resetbuffer(buff)((buff)->n=0)
+#define luaZ_resizebuffer(L,buff,size)(luaM_reallocvector(L,(buff)->buffer,(buff)->buffsize,size,char),(buff)->buffsize=size)
+#define luaZ_freebuffer(L,buff)luaZ_resizebuffer(L,buff,0)
+struct Zio{
+size_t n;
+const char*p;
+lua_Reader reader;
+void*data;
+lua_State*L;
+};
+static int luaZ_fill(ZIO*z);
+struct lua_longjmp;
+#define gt(L)(&L->l_gt)
+#define registry(L)(&G(L)->l_registry)
+typedef struct stringtable{
+GCObject**hash;
+lu_int32 nuse;
+int size;
+}stringtable;
+typedef struct CallInfo{
+StkId base;
+StkId func;
+StkId top;
+const Instruction*savedpc;
+int nresults;
+int tailcalls;
+}CallInfo;
+#define curr_func(L)(clvalue(L->ci->func))
+#define ci_func(ci)(clvalue((ci)->func))
+#define f_isLua(ci)(!ci_func(ci)->c.isC)
+#define isLua(ci)(ttisfunction((ci)->func)&&f_isLua(ci))
+typedef struct global_State{
+stringtable strt;
+lua_Alloc frealloc;
+void*ud;
+lu_byte currentwhite;
+lu_byte gcstate;
+int sweepstrgc;
+GCObject*rootgc;
+GCObject**sweepgc;
+GCObject*gray;
+GCObject*grayagain;
+GCObject*weak;
+GCObject*tmudata;
+Mbuffer buff;
+lu_mem GCthreshold;
+lu_mem totalbytes;
+lu_mem estimate;
+lu_mem gcdept;
+int gcpause;
+int gcstepmul;
+lua_CFunction panic;
+TValue l_registry;
+struct lua_State*mainthread;
+UpVal uvhead;
+struct Table*mt[(8+1)];
+TString*tmname[TM_N];
+}global_State;
+struct lua_State{
+GCObject*next;lu_byte tt;lu_byte marked;
+lu_byte status;
+StkId top;
+StkId base;
+global_State*l_G;
+CallInfo*ci;
+const Instruction*savedpc;
+StkId stack_last;
+StkId stack;
+CallInfo*end_ci;
+CallInfo*base_ci;
+int stacksize;
+int size_ci;
+unsigned short nCcalls;
+unsigned short baseCcalls;
+lu_byte hookmask;
+lu_byte allowhook;
+int basehookcount;
+int hookcount;
+lua_Hook hook;
+TValue l_gt;
+TValue env;
+GCObject*openupval;
+GCObject*gclist;
+struct lua_longjmp*errorJmp;
+ptrdiff_t errfunc;
+};
+#define G(L)(L->l_G)
+union GCObject{
+GCheader gch;
+union TString ts;
+union Udata u;
+union Closure cl;
+struct Table h;
+struct Proto p;
+struct UpVal uv;
+struct lua_State th;
+};
+#define rawgco2ts(o)check_exp((o)->gch.tt==4,&((o)->ts))
+#define gco2ts(o)(&rawgco2ts(o)->tsv)
+#define rawgco2u(o)check_exp((o)->gch.tt==7,&((o)->u))
+#define gco2u(o)(&rawgco2u(o)->uv)
+#define gco2cl(o)check_exp((o)->gch.tt==6,&((o)->cl))
+#define gco2h(o)check_exp((o)->gch.tt==5,&((o)->h))
+#define gco2p(o)check_exp((o)->gch.tt==(8+1),&((o)->p))
+#define gco2uv(o)check_exp((o)->gch.tt==(8+2),&((o)->uv))
+#define ngcotouv(o)check_exp((o)==NULL||(o)->gch.tt==(8+2),&((o)->uv))
+#define gco2th(o)check_exp((o)->gch.tt==8,&((o)->th))
+#define obj2gco(v)(cast(GCObject*,(v)))
+static void luaE_freethread(lua_State*L,lua_State*L1);
+#define pcRel(pc,p)(cast(int,(pc)-(p)->code)-1)
+#define getline_(f,pc)(((f)->lineinfo)?(f)->lineinfo[pc]:0)
+#define resethookcount(L)(L->hookcount=L->basehookcount)
+static void luaG_typeerror(lua_State*L,const TValue*o,
+const char*opname);
+static void luaG_runerror(lua_State*L,const char*fmt,...);
+#define luaD_checkstack(L,n)if((char*)L->stack_last-(char*)L->top<=(n)*(int)sizeof(TValue))luaD_growstack(L,n);else condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));
+#define incr_top(L){luaD_checkstack(L,1);L->top++;}
+#define savestack(L,p)((char*)(p)-(char*)L->stack)
+#define restorestack(L,n)((TValue*)((char*)L->stack+(n)))
+#define saveci(L,p)((char*)(p)-(char*)L->base_ci)
+#define restoreci(L,n)((CallInfo*)((char*)L->base_ci+(n)))
+typedef void(*Pfunc)(lua_State*L,void*ud);
+static int luaD_poscall(lua_State*L,StkId firstResult);
+static void luaD_reallocCI(lua_State*L,int newsize);
+static void luaD_reallocstack(lua_State*L,int newsize);
+static void luaD_growstack(lua_State*L,int n);
+static void luaD_throw(lua_State*L,int errcode);
+static void*luaM_growaux_(lua_State*L,void*block,int*size,size_t size_elems,
+int limit,const char*errormsg){
+void*newblock;
+int newsize;
+if(*size>=limit/2){
+if(*size>=limit)
+luaG_runerror(L,errormsg);
+newsize=limit;
+}
+else{
+newsize=(*size)*2;
+if(newsize<4)
+newsize=4;
+}
+newblock=luaM_reallocv(L,block,*size,newsize,size_elems);
+*size=newsize;
+return newblock;
+}
+static void*luaM_toobig(lua_State*L){
+luaG_runerror(L,"memory allocation error: block too big");
+return NULL;
+}
+static void*luaM_realloc_(lua_State*L,void*block,size_t osize,size_t nsize){
+global_State*g=G(L);
+block=(*g->frealloc)(g->ud,block,osize,nsize);
+if(block==NULL&&nsize>0)
+luaD_throw(L,4);
+g->totalbytes=(g->totalbytes-osize)+nsize;
+return block;
+}
+#define resetbits(x,m)((x)&=cast(lu_byte,~(m)))
+#define setbits(x,m)((x)|=(m))
+#define testbits(x,m)((x)&(m))
+#define bitmask(b)(1<<(b))
+#define bit2mask(b1,b2)(bitmask(b1)|bitmask(b2))
+#define l_setbit(x,b)setbits(x,bitmask(b))
+#define resetbit(x,b)resetbits(x,bitmask(b))
+#define testbit(x,b)testbits(x,bitmask(b))
+#define set2bits(x,b1,b2)setbits(x,(bit2mask(b1,b2)))
+#define reset2bits(x,b1,b2)resetbits(x,(bit2mask(b1,b2)))
+#define test2bits(x,b1,b2)testbits(x,(bit2mask(b1,b2)))
+#define iswhite(x)test2bits((x)->gch.marked,0,1)
+#define isblack(x)testbit((x)->gch.marked,2)
+#define isgray(x)(!isblack(x)&&!iswhite(x))
+#define otherwhite(g)(g->currentwhite^bit2mask(0,1))
+#define isdead(g,v)((v)->gch.marked&otherwhite(g)&bit2mask(0,1))
+#define changewhite(x)((x)->gch.marked^=bit2mask(0,1))
+#define gray2black(x)l_setbit((x)->gch.marked,2)
+#define valiswhite(x)(iscollectable(x)&&iswhite(gcvalue(x)))
+#define luaC_white(g)cast(lu_byte,(g)->currentwhite&bit2mask(0,1))
+#define luaC_checkGC(L){condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));if(G(L)->totalbytes>=G(L)->GCthreshold)luaC_step(L);}
+#define luaC_barrier(L,p,v){if(valiswhite(v)&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),gcvalue(v));}
+#define luaC_barriert(L,t,v){if(valiswhite(v)&&isblack(obj2gco(t)))luaC_barrierback(L,t);}
+#define luaC_objbarrier(L,p,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),obj2gco(o));}
+#define luaC_objbarriert(L,t,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(t)))luaC_barrierback(L,t);}
+static void luaC_step(lua_State*L);
+static void luaC_link(lua_State*L,GCObject*o,lu_byte tt);
+static void luaC_linkupval(lua_State*L,UpVal*uv);
+static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v);
+static void luaC_barrierback(lua_State*L,Table*t);
+#define sizestring(s)(sizeof(union TString)+((s)->len+1)*sizeof(char))
+#define sizeudata(u)(sizeof(union Udata)+(u)->len)
+#define luaS_new(L,s)(luaS_newlstr(L,s,strlen(s)))
+#define luaS_newliteral(L,s)(luaS_newlstr(L,""s,(sizeof(s)/sizeof(char))-1))
+#define luaS_fix(s)l_setbit((s)->tsv.marked,5)
+static TString*luaS_newlstr(lua_State*L,const char*str,size_t l);
+#define tostring(L,o)((ttype(o)==4)||(luaV_tostring(L,o)))
+#define tonumber(o,n)(ttype(o)==3||(((o)=luaV_tonumber(o,n))!=NULL))
+#define equalobj(L,o1,o2)(ttype(o1)==ttype(o2)&&luaV_equalval(L,o1,o2))
+static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2);
+static const TValue*luaV_tonumber(const TValue*obj,TValue*n);
+static int luaV_tostring(lua_State*L,StkId obj);
+static void luaV_execute(lua_State*L,int nexeccalls);
+static void luaV_concat(lua_State*L,int total,int last);
+static const TValue luaO_nilobject_={{NULL},0};
+static int luaO_int2fb(unsigned int x){
+int e=0;
+while(x>=16){
+x=(x+1)>>1;
+e++;
+}
+if(x<8)return x;
+else return((e+1)<<3)|(cast_int(x)-8);
+}
+static int luaO_fb2int(int x){
+int e=(x>>3)&31;
+if(e==0)return x;
+else return((x&7)+8)<<(e-1);
+}
+static int luaO_log2(unsigned int x){
+static const lu_byte log_2[256]={
+0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+};
+int l=-1;
+while(x>=256){l+=8;x>>=8;}
+return l+log_2[x];
+}
+static int luaO_rawequalObj(const TValue*t1,const TValue*t2){
+if(ttype(t1)!=ttype(t2))return 0;
+else switch(ttype(t1)){
+case 0:
+return 1;
+case 3:
+return luai_numeq(nvalue(t1),nvalue(t2));
+case 1:
+return bvalue(t1)==bvalue(t2);
+case 2:
+return pvalue(t1)==pvalue(t2);
+default:
+return gcvalue(t1)==gcvalue(t2);
+}
+}
+static int luaO_str2d(const char*s,lua_Number*result){
+char*endptr;
+*result=lua_str2number(s,&endptr);
+if(endptr==s)return 0;
+if(*endptr=='x'||*endptr=='X')
+*result=cast_num(strtoul(s,&endptr,16));
+if(*endptr=='\0')return 1;
+while(isspace(cast(unsigned char,*endptr)))endptr++;
+if(*endptr!='\0')return 0;
+return 1;
+}
+static void pushstr(lua_State*L,const char*str){
+setsvalue(L,L->top,luaS_new(L,str));
+incr_top(L);
+}
+static const char*luaO_pushvfstring(lua_State*L,const char*fmt,va_list argp){
+int n=1;
+pushstr(L,"");
+for(;;){
+const char*e=strchr(fmt,'%');
+if(e==NULL)break;
+setsvalue(L,L->top,luaS_newlstr(L,fmt,e-fmt));
+incr_top(L);
+switch(*(e+1)){
+case's':{
+const char*s=va_arg(argp,char*);
+if(s==NULL)s="(null)";
+pushstr(L,s);
+break;
+}
+case'c':{
+char buff[2];
+buff[0]=cast(char,va_arg(argp,int));
+buff[1]='\0';
+pushstr(L,buff);
+break;
+}
+case'd':{
+setnvalue(L->top,cast_num(va_arg(argp,int)));
+incr_top(L);
+break;
+}
+case'f':{
+setnvalue(L->top,cast_num(va_arg(argp,l_uacNumber)));
+incr_top(L);
+break;
+}
+case'p':{
+char buff[4*sizeof(void*)+8];
+sprintf(buff,"%p",va_arg(argp,void*));
+pushstr(L,buff);
+break;
+}
+case'%':{
+pushstr(L,"%");
+break;
+}
+default:{
+char buff[3];
+buff[0]='%';
+buff[1]=*(e+1);
+buff[2]='\0';
+pushstr(L,buff);
+break;
+}
+}
+n+=2;
+fmt=e+2;
+}
+pushstr(L,fmt);
+luaV_concat(L,n+1,cast_int(L->top-L->base)-1);
+L->top-=n;
+return svalue(L->top-1);
+}
+static const char*luaO_pushfstring(lua_State*L,const char*fmt,...){
+const char*msg;
+va_list argp;
+va_start(argp,fmt);
+msg=luaO_pushvfstring(L,fmt,argp);
+va_end(argp);
+return msg;
+}
+static void luaO_chunkid(char*out,const char*source,size_t bufflen){
+if(*source=='='){
+strncpy(out,source+1,bufflen);
+out[bufflen-1]='\0';
+}
+else{
+if(*source=='@'){
+size_t l;
+source++;
+bufflen-=sizeof(" '...' ");
+l=strlen(source);
+strcpy(out,"");
+if(l>bufflen){
+source+=(l-bufflen);
+strcat(out,"...");
+}
+strcat(out,source);
+}
+else{
+size_t len=strcspn(source,"\n\r");
+bufflen-=sizeof(" [string \"...\"] ");
+if(len>bufflen)len=bufflen;
+strcpy(out,"[string \"");
+if(source[len]!='\0'){
+strncat(out,source,len);
+strcat(out,"...");
+}
+else
+strcat(out,source);
+strcat(out,"\"]");
+}
+}
+}
+#define gnode(t,i)(&(t)->node[i])
+#define gkey(n)(&(n)->i_key.nk)
+#define gval(n)(&(n)->i_val)
+#define gnext(n)((n)->i_key.nk.next)
+#define key2tval(n)(&(n)->i_key.tvk)
+static TValue*luaH_setnum(lua_State*L,Table*t,int key);
+static const TValue*luaH_getstr(Table*t,TString*key);
+static TValue*luaH_set(lua_State*L,Table*t,const TValue*key);
+static const char*const luaT_typenames[]={
+"nil","boolean","userdata","number",
+"string","table","function","userdata","thread",
+"proto","upval"
+};
+static void luaT_init(lua_State*L){
+static const char*const luaT_eventname[]={
+"__index","__newindex",
+"__gc","__mode","__eq",
+"__add","__sub","__mul","__div","__mod",
+"__pow","__unm","__len","__lt","__le",
+"__concat","__call"
+};
+int i;
+for(i=0;i<TM_N;i++){
+G(L)->tmname[i]=luaS_new(L,luaT_eventname[i]);
+luaS_fix(G(L)->tmname[i]);
+}
+}
+static const TValue*luaT_gettm(Table*events,TMS event,TString*ename){
+const TValue*tm=luaH_getstr(events,ename);
+if(ttisnil(tm)){
+events->flags|=cast_byte(1u<<event);
+return NULL;
+}
+else return tm;
+}
+static const TValue*luaT_gettmbyobj(lua_State*L,const TValue*o,TMS event){
+Table*mt;
+switch(ttype(o)){
+case 5:
+mt=hvalue(o)->metatable;
+break;
+case 7:
+mt=uvalue(o)->metatable;
+break;
+default:
+mt=G(L)->mt[ttype(o)];
+}
+return(mt?luaH_getstr(mt,G(L)->tmname[event]):(&luaO_nilobject_));
+}
+#define sizeCclosure(n)(cast(int,sizeof(CClosure))+cast(int,sizeof(TValue)*((n)-1)))
+#define sizeLclosure(n)(cast(int,sizeof(LClosure))+cast(int,sizeof(TValue*)*((n)-1)))
+static Closure*luaF_newCclosure(lua_State*L,int nelems,Table*e){
+Closure*c=cast(Closure*,luaM_malloc(L,sizeCclosure(nelems)));
+luaC_link(L,obj2gco(c),6);
+c->c.isC=1;
+c->c.env=e;
+c->c.nupvalues=cast_byte(nelems);
+return c;
+}
+static Closure*luaF_newLclosure(lua_State*L,int nelems,Table*e){
+Closure*c=cast(Closure*,luaM_malloc(L,sizeLclosure(nelems)));
+luaC_link(L,obj2gco(c),6);
+c->l.isC=0;
+c->l.env=e;
+c->l.nupvalues=cast_byte(nelems);
+while(nelems--)c->l.upvals[nelems]=NULL;
+return c;
+}
+static UpVal*luaF_newupval(lua_State*L){
+UpVal*uv=luaM_new(L,UpVal);
+luaC_link(L,obj2gco(uv),(8+2));
+uv->v=&uv->u.value;
+setnilvalue(uv->v);
+return uv;
+}
+static UpVal*luaF_findupval(lua_State*L,StkId level){
+global_State*g=G(L);
+GCObject**pp=&L->openupval;
+UpVal*p;
+UpVal*uv;
+while(*pp!=NULL&&(p=ngcotouv(*pp))->v>=level){
+if(p->v==level){
+if(isdead(g,obj2gco(p)))
+changewhite(obj2gco(p));
+return p;
+}
+pp=&p->next;
+}
+uv=luaM_new(L,UpVal);
+uv->tt=(8+2);
+uv->marked=luaC_white(g);
+uv->v=level;
+uv->next=*pp;
+*pp=obj2gco(uv);
+uv->u.l.prev=&g->uvhead;
+uv->u.l.next=g->uvhead.u.l.next;
+uv->u.l.next->u.l.prev=uv;
+g->uvhead.u.l.next=uv;
+return uv;
+}
+static void unlinkupval(UpVal*uv){
+uv->u.l.next->u.l.prev=uv->u.l.prev;
+uv->u.l.prev->u.l.next=uv->u.l.next;
+}
+static void luaF_freeupval(lua_State*L,UpVal*uv){
+if(uv->v!=&uv->u.value)
+unlinkupval(uv);
+luaM_free(L,uv);
+}
+static void luaF_close(lua_State*L,StkId level){
+UpVal*uv;
+global_State*g=G(L);
+while(L->openupval!=NULL&&(uv=ngcotouv(L->openupval))->v>=level){
+GCObject*o=obj2gco(uv);
+L->openupval=uv->next;
+if(isdead(g,o))
+luaF_freeupval(L,uv);
+else{
+unlinkupval(uv);
+setobj(L,&uv->u.value,uv->v);
+uv->v=&uv->u.value;
+luaC_linkupval(L,uv);
+}
+}
+}
+static Proto*luaF_newproto(lua_State*L){
+Proto*f=luaM_new(L,Proto);
+luaC_link(L,obj2gco(f),(8+1));
+f->k=NULL;
+f->sizek=0;
+f->p=NULL;
+f->sizep=0;
+f->code=NULL;
+f->sizecode=0;
+f->sizelineinfo=0;
+f->sizeupvalues=0;
+f->nups=0;
+f->upvalues=NULL;
+f->numparams=0;
+f->is_vararg=0;
+f->maxstacksize=0;
+f->lineinfo=NULL;
+f->sizelocvars=0;
+f->locvars=NULL;
+f->linedefined=0;
+f->lastlinedefined=0;
+f->source=NULL;
+return f;
+}
+static void luaF_freeproto(lua_State*L,Proto*f){
+luaM_freearray(L,f->code,f->sizecode,Instruction);
+luaM_freearray(L,f->p,f->sizep,Proto*);
+luaM_freearray(L,f->k,f->sizek,TValue);
+luaM_freearray(L,f->lineinfo,f->sizelineinfo,int);
+luaM_freearray(L,f->locvars,f->sizelocvars,struct LocVar);
+luaM_freearray(L,f->upvalues,f->sizeupvalues,TString*);
+luaM_free(L,f);
+}
+static void luaF_freeclosure(lua_State*L,Closure*c){
+int size=(c->c.isC)?sizeCclosure(c->c.nupvalues):
+sizeLclosure(c->l.nupvalues);
+luaM_freemem(L,c,size);
+}
+#define MASK1(n,p)((~((~(Instruction)0)<<n))<<p)
+#define MASK0(n,p)(~MASK1(n,p))
+#define GET_OPCODE(i)(cast(OpCode,((i)>>0)&MASK1(6,0)))
+#define SET_OPCODE(i,o)((i)=(((i)&MASK0(6,0))|((cast(Instruction,o)<<0)&MASK1(6,0))))
+#define GETARG_A(i)(cast(int,((i)>>(0+6))&MASK1(8,0)))
+#define SETARG_A(i,u)((i)=(((i)&MASK0(8,(0+6)))|((cast(Instruction,u)<<(0+6))&MASK1(8,(0+6)))))
+#define GETARG_B(i)(cast(int,((i)>>(((0+6)+8)+9))&MASK1(9,0)))
+#define SETARG_B(i,b)((i)=(((i)&MASK0(9,(((0+6)+8)+9)))|((cast(Instruction,b)<<(((0+6)+8)+9))&MASK1(9,(((0+6)+8)+9)))))
+#define GETARG_C(i)(cast(int,((i)>>((0+6)+8))&MASK1(9,0)))
+#define SETARG_C(i,b)((i)=(((i)&MASK0(9,((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1(9,((0+6)+8)))))
+#define GETARG_Bx(i)(cast(int,((i)>>((0+6)+8))&MASK1((9+9),0)))
+#define SETARG_Bx(i,b)((i)=(((i)&MASK0((9+9),((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1((9+9),((0+6)+8)))))
+#define GETARG_sBx(i)(GETARG_Bx(i)-(((1<<(9+9))-1)>>1))
+#define SETARG_sBx(i,b)SETARG_Bx((i),cast(unsigned int,(b)+(((1<<(9+9))-1)>>1)))
+#define CREATE_ABC(o,a,b,c)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,b)<<(((0+6)+8)+9))|(cast(Instruction,c)<<((0+6)+8)))
+#define CREATE_ABx(o,a,bc)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,bc)<<((0+6)+8)))
+#define ISK(x)((x)&(1<<(9-1)))
+#define INDEXK(r)((int)(r)&~(1<<(9-1)))
+#define RKASK(x)((x)|(1<<(9-1)))
+static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)];
+#define getBMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>4)&3))
+#define getCMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>2)&3))
+#define testTMode(m)(luaP_opmodes[m]&(1<<7))
+typedef struct expdesc{
+expkind k;
+union{
+struct{int info,aux;}s;
+lua_Number nval;
+}u;
+int t;
+int f;
+}expdesc;
+typedef struct upvaldesc{
+lu_byte k;
+lu_byte info;
+}upvaldesc;
+struct BlockCnt;
+typedef struct FuncState{
+Proto*f;
+Table*h;
+struct FuncState*prev;
+struct LexState*ls;
+struct lua_State*L;
+struct BlockCnt*bl;
+int pc;
+int lasttarget;
+int jpc;
+int freereg;
+int nk;
+int np;
+short nlocvars;
+lu_byte nactvar;
+upvaldesc upvalues[60];
+unsigned short actvar[200];
+}FuncState;
+static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,
+const char*name);
+struct lua_longjmp{
+struct lua_longjmp*previous;
+jmp_buf b;
+volatile int status;
+};
+static void luaD_seterrorobj(lua_State*L,int errcode,StkId oldtop){
+switch(errcode){
+case 4:{
+setsvalue(L,oldtop,luaS_newliteral(L,"not enough memory"));
+break;
+}
+case 5:{
+setsvalue(L,oldtop,luaS_newliteral(L,"error in error handling"));
+break;
+}
+case 3:
+case 2:{
+setobj(L,oldtop,L->top-1);
+break;
+}
+}
+L->top=oldtop+1;
+}
+static void restore_stack_limit(lua_State*L){
+if(L->size_ci>20000){
+int inuse=cast_int(L->ci-L->base_ci);
+if(inuse+1<20000)
+luaD_reallocCI(L,20000);
+}
+}
+static void resetstack(lua_State*L,int status){
+L->ci=L->base_ci;
+L->base=L->ci->base;
+luaF_close(L,L->base);
+luaD_seterrorobj(L,status,L->base);
+L->nCcalls=L->baseCcalls;
+L->allowhook=1;
+restore_stack_limit(L);
+L->errfunc=0;
+L->errorJmp=NULL;
+}
+static void luaD_throw(lua_State*L,int errcode){
+if(L->errorJmp){
+L->errorJmp->status=errcode;
+LUAI_THROW(L,L->errorJmp);
+}
+else{
+L->status=cast_byte(errcode);
+if(G(L)->panic){
+resetstack(L,errcode);
+G(L)->panic(L);
+}
+exit(EXIT_FAILURE);
+}
+}
+static int luaD_rawrunprotected(lua_State*L,Pfunc f,void*ud){
+struct lua_longjmp lj;
+lj.status=0;
+lj.previous=L->errorJmp;
+L->errorJmp=&lj;
+LUAI_TRY(L,&lj,
+(*f)(L,ud);
+);
+L->errorJmp=lj.previous;
+return lj.status;
+}
+static void correctstack(lua_State*L,TValue*oldstack){
+CallInfo*ci;
+GCObject*up;
+L->top=(L->top-oldstack)+L->stack;
+for(up=L->openupval;up!=NULL;up=up->gch.next)
+gco2uv(up)->v=(gco2uv(up)->v-oldstack)+L->stack;
+for(ci=L->base_ci;ci<=L->ci;ci++){
+ci->top=(ci->top-oldstack)+L->stack;
+ci->base=(ci->base-oldstack)+L->stack;
+ci->func=(ci->func-oldstack)+L->stack;
+}
+L->base=(L->base-oldstack)+L->stack;
+}
+static void luaD_reallocstack(lua_State*L,int newsize){
+TValue*oldstack=L->stack;
+int realsize=newsize+1+5;
+luaM_reallocvector(L,L->stack,L->stacksize,realsize,TValue);
+L->stacksize=realsize;
+L->stack_last=L->stack+newsize;
+correctstack(L,oldstack);
+}
+static void luaD_reallocCI(lua_State*L,int newsize){
+CallInfo*oldci=L->base_ci;
+luaM_reallocvector(L,L->base_ci,L->size_ci,newsize,CallInfo);
+L->size_ci=newsize;
+L->ci=(L->ci-oldci)+L->base_ci;
+L->end_ci=L->base_ci+L->size_ci-1;
+}
+static void luaD_growstack(lua_State*L,int n){
+if(n<=L->stacksize)
+luaD_reallocstack(L,2*L->stacksize);
+else
+luaD_reallocstack(L,L->stacksize+n);
+}
+static CallInfo*growCI(lua_State*L){
+if(L->size_ci>20000)
+luaD_throw(L,5);
+else{
+luaD_reallocCI(L,2*L->size_ci);
+if(L->size_ci>20000)
+luaG_runerror(L,"stack overflow");
+}
+return++L->ci;
+}
+static StkId adjust_varargs(lua_State*L,Proto*p,int actual){
+int i;
+int nfixargs=p->numparams;
+Table*htab=NULL;
+StkId base,fixed;
+for(;actual<nfixargs;++actual)
+setnilvalue(L->top++);
+fixed=L->top-actual;
+base=L->top;
+for(i=0;i<nfixargs;i++){
+setobj(L,L->top++,fixed+i);
+setnilvalue(fixed+i);
+}
+if(htab){
+sethvalue(L,L->top++,htab);
+}
+return base;
+}
+static StkId tryfuncTM(lua_State*L,StkId func){
+const TValue*tm=luaT_gettmbyobj(L,func,TM_CALL);
+StkId p;
+ptrdiff_t funcr=savestack(L,func);
+if(!ttisfunction(tm))
+luaG_typeerror(L,func,"call");
+for(p=L->top;p>func;p--)setobj(L,p,p-1);
+incr_top(L);
+func=restorestack(L,funcr);
+setobj(L,func,tm);
+return func;
+}
+#define inc_ci(L)((L->ci==L->end_ci)?growCI(L):(condhardstacktests(luaD_reallocCI(L,L->size_ci)),++L->ci))
+static int luaD_precall(lua_State*L,StkId func,int nresults){
+LClosure*cl;
+ptrdiff_t funcr;
+if(!ttisfunction(func))
+func=tryfuncTM(L,func);
+funcr=savestack(L,func);
+cl=&clvalue(func)->l;
+L->ci->savedpc=L->savedpc;
+if(!cl->isC){
+CallInfo*ci;
+StkId st,base;
+Proto*p=cl->p;
+luaD_checkstack(L,p->maxstacksize);
+func=restorestack(L,funcr);
+if(!p->is_vararg){
+base=func+1;
+if(L->top>base+p->numparams)
+L->top=base+p->numparams;
+}
+else{
+int nargs=cast_int(L->top-func)-1;
+base=adjust_varargs(L,p,nargs);
+func=restorestack(L,funcr);
+}
+ci=inc_ci(L);
+ci->func=func;
+L->base=ci->base=base;
+ci->top=L->base+p->maxstacksize;
+L->savedpc=p->code;
+ci->tailcalls=0;
+ci->nresults=nresults;
+for(st=L->top;st<ci->top;st++)
+setnilvalue(st);
+L->top=ci->top;
+return 0;
+}
+else{
+CallInfo*ci;
+int n;
+luaD_checkstack(L,20);
+ci=inc_ci(L);
+ci->func=restorestack(L,funcr);
+L->base=ci->base=ci->func+1;
+ci->top=L->top+20;
+ci->nresults=nresults;
+n=(*curr_func(L)->c.f)(L);
+if(n<0)
+return 2;
+else{
+luaD_poscall(L,L->top-n);
+return 1;
+}
+}
+}
+static int luaD_poscall(lua_State*L,StkId firstResult){
+StkId res;
+int wanted,i;
+CallInfo*ci;
+ci=L->ci--;
+res=ci->func;
+wanted=ci->nresults;
+L->base=(ci-1)->base;
+L->savedpc=(ci-1)->savedpc;
+for(i=wanted;i!=0&&firstResult<L->top;i--)
+setobj(L,res++,firstResult++);
+while(i-->0)
+setnilvalue(res++);
+L->top=res;
+return(wanted-(-1));
+}
+static void luaD_call(lua_State*L,StkId func,int nResults){
+if(++L->nCcalls>=200){
+if(L->nCcalls==200)
+luaG_runerror(L,"C stack overflow");
+else if(L->nCcalls>=(200+(200>>3)))
+luaD_throw(L,5);
+}
+if(luaD_precall(L,func,nResults)==0)
+luaV_execute(L,1);
+L->nCcalls--;
+luaC_checkGC(L);
+}
+static int luaD_pcall(lua_State*L,Pfunc func,void*u,
+ptrdiff_t old_top,ptrdiff_t ef){
+int status;
+unsigned short oldnCcalls=L->nCcalls;
+ptrdiff_t old_ci=saveci(L,L->ci);
+lu_byte old_allowhooks=L->allowhook;
+ptrdiff_t old_errfunc=L->errfunc;
+L->errfunc=ef;
+status=luaD_rawrunprotected(L,func,u);
+if(status!=0){
+StkId oldtop=restorestack(L,old_top);
+luaF_close(L,oldtop);
+luaD_seterrorobj(L,status,oldtop);
+L->nCcalls=oldnCcalls;
+L->ci=restoreci(L,old_ci);
+L->base=L->ci->base;
+L->savedpc=L->ci->savedpc;
+L->allowhook=old_allowhooks;
+restore_stack_limit(L);
+}
+L->errfunc=old_errfunc;
+return status;
+}
+struct SParser{
+ZIO*z;
+Mbuffer buff;
+const char*name;
+};
+static void f_parser(lua_State*L,void*ud){
+int i;
+Proto*tf;
+Closure*cl;
+struct SParser*p=cast(struct SParser*,ud);
+luaC_checkGC(L);
+tf=luaY_parser(L,p->z,
+&p->buff,p->name);
+cl=luaF_newLclosure(L,tf->nups,hvalue(gt(L)));
+cl->l.p=tf;
+for(i=0;i<tf->nups;i++)
+cl->l.upvals[i]=luaF_newupval(L);
+setclvalue(L,L->top,cl);
+incr_top(L);
+}
+static int luaD_protectedparser(lua_State*L,ZIO*z,const char*name){
+struct SParser p;
+int status;
+p.z=z;p.name=name;
+luaZ_initbuffer(L,&p.buff);
+status=luaD_pcall(L,f_parser,&p,savestack(L,L->top),L->errfunc);
+luaZ_freebuffer(L,&p.buff);
+return status;
+}
+static void luaS_resize(lua_State*L,int newsize){
+GCObject**newhash;
+stringtable*tb;
+int i;
+if(G(L)->gcstate==2)
+return;
+newhash=luaM_newvector(L,newsize,GCObject*);
+tb=&G(L)->strt;
+for(i=0;i<newsize;i++)newhash[i]=NULL;
+for(i=0;i<tb->size;i++){
+GCObject*p=tb->hash[i];
+while(p){
+GCObject*next=p->gch.next;
+unsigned int h=gco2ts(p)->hash;
+int h1=lmod(h,newsize);
+p->gch.next=newhash[h1];
+newhash[h1]=p;
+p=next;
+}
+}
+luaM_freearray(L,tb->hash,tb->size,TString*);
+tb->size=newsize;
+tb->hash=newhash;
+}
+static TString*newlstr(lua_State*L,const char*str,size_t l,
+unsigned int h){
+TString*ts;
+stringtable*tb;
+if(l+1>(((size_t)(~(size_t)0)-2)-sizeof(TString))/sizeof(char))
+luaM_toobig(L);
+ts=cast(TString*,luaM_malloc(L,(l+1)*sizeof(char)+sizeof(TString)));
+ts->tsv.len=l;
+ts->tsv.hash=h;
+ts->tsv.marked=luaC_white(G(L));
+ts->tsv.tt=4;
+ts->tsv.reserved=0;
+memcpy(ts+1,str,l*sizeof(char));
+((char*)(ts+1))[l]='\0';
+tb=&G(L)->strt;
+h=lmod(h,tb->size);
+ts->tsv.next=tb->hash[h];
+tb->hash[h]=obj2gco(ts);
+tb->nuse++;
+if(tb->nuse>cast(lu_int32,tb->size)&&tb->size<=(INT_MAX-2)/2)
+luaS_resize(L,tb->size*2);
+return ts;
+}
+static TString*luaS_newlstr(lua_State*L,const char*str,size_t l){
+GCObject*o;
+unsigned int h=cast(unsigned int,l);
+size_t step=(l>>5)+1;
+size_t l1;
+for(l1=l;l1>=step;l1-=step)
+h=h^((h<<5)+(h>>2)+cast(unsigned char,str[l1-1]));
+for(o=G(L)->strt.hash[lmod(h,G(L)->strt.size)];
+o!=NULL;
+o=o->gch.next){
+TString*ts=rawgco2ts(o);
+if(ts->tsv.len==l&&(memcmp(str,getstr(ts),l)==0)){
+if(isdead(G(L),o))changewhite(o);
+return ts;
+}
+}
+return newlstr(L,str,l,h);
+}
+static Udata*luaS_newudata(lua_State*L,size_t s,Table*e){
+Udata*u;
+if(s>((size_t)(~(size_t)0)-2)-sizeof(Udata))
+luaM_toobig(L);
+u=cast(Udata*,luaM_malloc(L,s+sizeof(Udata)));
+u->uv.marked=luaC_white(G(L));
+u->uv.tt=7;
+u->uv.len=s;
+u->uv.metatable=NULL;
+u->uv.env=e;
+u->uv.next=G(L)->mainthread->next;
+G(L)->mainthread->next=obj2gco(u);
+return u;
+}
+#define hashpow2(t,n)(gnode(t,lmod((n),sizenode(t))))
+#define hashstr(t,str)hashpow2(t,(str)->tsv.hash)
+#define hashboolean(t,p)hashpow2(t,p)
+#define hashmod(t,n)(gnode(t,((n)%((sizenode(t)-1)|1))))
+#define hashpointer(t,p)hashmod(t,IntPoint(p))
+static const Node dummynode_={
+{{NULL},0},
+{{{NULL},0,NULL}}
+};
+static Node*hashnum(const Table*t,lua_Number n){
+unsigned int a[cast_int(sizeof(lua_Number)/sizeof(int))];
+int i;
+if(luai_numeq(n,0))
+return gnode(t,0);
+memcpy(a,&n,sizeof(a));
+for(i=1;i<cast_int(sizeof(lua_Number)/sizeof(int));i++)a[0]+=a[i];
+return hashmod(t,a[0]);
+}
+static Node*mainposition(const Table*t,const TValue*key){
+switch(ttype(key)){
+case 3:
+return hashnum(t,nvalue(key));
+case 4:
+return hashstr(t,rawtsvalue(key));
+case 1:
+return hashboolean(t,bvalue(key));
+case 2:
+return hashpointer(t,pvalue(key));
+default:
+return hashpointer(t,gcvalue(key));
+}
+}
+static int arrayindex(const TValue*key){
+if(ttisnumber(key)){
+lua_Number n=nvalue(key);
+int k;
+lua_number2int(k,n);
+if(luai_numeq(cast_num(k),n))
+return k;
+}
+return-1;
+}
+static int findindex(lua_State*L,Table*t,StkId key){
+int i;
+if(ttisnil(key))return-1;
+i=arrayindex(key);
+if(0<i&&i<=t->sizearray)
+return i-1;
+else{
+Node*n=mainposition(t,key);
+do{
+if(luaO_rawequalObj(key2tval(n),key)||
+(ttype(gkey(n))==(8+3)&&iscollectable(key)&&
+gcvalue(gkey(n))==gcvalue(key))){
+i=cast_int(n-gnode(t,0));
+return i+t->sizearray;
+}
+else n=gnext(n);
+}while(n);
+luaG_runerror(L,"invalid key to "LUA_QL("next"));
+return 0;
+}
+}
+static int luaH_next(lua_State*L,Table*t,StkId key){
+int i=findindex(L,t,key);
+for(i++;i<t->sizearray;i++){
+if(!ttisnil(&t->array[i])){
+setnvalue(key,cast_num(i+1));
+setobj(L,key+1,&t->array[i]);
+return 1;
+}
+}
+for(i-=t->sizearray;i<(int)sizenode(t);i++){
+if(!ttisnil(gval(gnode(t,i)))){
+setobj(L,key,key2tval(gnode(t,i)));
+setobj(L,key+1,gval(gnode(t,i)));
+return 1;
+}
+}
+return 0;
+}
+static int computesizes(int nums[],int*narray){
+int i;
+int twotoi;
+int a=0;
+int na=0;
+int n=0;
+for(i=0,twotoi=1;twotoi/2<*narray;i++,twotoi*=2){
+if(nums[i]>0){
+a+=nums[i];
+if(a>twotoi/2){
+n=twotoi;
+na=a;
+}
+}
+if(a==*narray)break;
+}
+*narray=n;
+return na;
+}
+static int countint(const TValue*key,int*nums){
+int k=arrayindex(key);
+if(0<k&&k<=(1<<(32-2))){
+nums[ceillog2(k)]++;
+return 1;
+}
+else
+return 0;
+}
+static int numusearray(const Table*t,int*nums){
+int lg;
+int ttlg;
+int ause=0;
+int i=1;
+for(lg=0,ttlg=1;lg<=(32-2);lg++,ttlg*=2){
+int lc=0;
+int lim=ttlg;
+if(lim>t->sizearray){
+lim=t->sizearray;
+if(i>lim)
+break;
+}
+for(;i<=lim;i++){
+if(!ttisnil(&t->array[i-1]))
+lc++;
+}
+nums[lg]+=lc;
+ause+=lc;
+}
+return ause;
+}
+static int numusehash(const Table*t,int*nums,int*pnasize){
+int totaluse=0;
+int ause=0;
+int i=sizenode(t);
+while(i--){
+Node*n=&t->node[i];
+if(!ttisnil(gval(n))){
+ause+=countint(key2tval(n),nums);
+totaluse++;
+}
+}
+*pnasize+=ause;
+return totaluse;
+}
+static void setarrayvector(lua_State*L,Table*t,int size){
+int i;
+luaM_reallocvector(L,t->array,t->sizearray,size,TValue);
+for(i=t->sizearray;i<size;i++)
+setnilvalue(&t->array[i]);
+t->sizearray=size;
+}
+static void setnodevector(lua_State*L,Table*t,int size){
+int lsize;
+if(size==0){
+t->node=cast(Node*,(&dummynode_));
+lsize=0;
+}
+else{
+int i;
+lsize=ceillog2(size);
+if(lsize>(32-2))
+luaG_runerror(L,"table overflow");
+size=twoto(lsize);
+t->node=luaM_newvector(L,size,Node);
+for(i=0;i<size;i++){
+Node*n=gnode(t,i);
+gnext(n)=NULL;
+setnilvalue(gkey(n));
+setnilvalue(gval(n));
+}
+}
+t->lsizenode=cast_byte(lsize);
+t->lastfree=gnode(t,size);
+}
+static void resize(lua_State*L,Table*t,int nasize,int nhsize){
+int i;
+int oldasize=t->sizearray;
+int oldhsize=t->lsizenode;
+Node*nold=t->node;
+if(nasize>oldasize)
+setarrayvector(L,t,nasize);
+setnodevector(L,t,nhsize);
+if(nasize<oldasize){
+t->sizearray=nasize;
+for(i=nasize;i<oldasize;i++){
+if(!ttisnil(&t->array[i]))
+setobj(L,luaH_setnum(L,t,i+1),&t->array[i]);
+}
+luaM_reallocvector(L,t->array,oldasize,nasize,TValue);
+}
+for(i=twoto(oldhsize)-1;i>=0;i--){
+Node*old=nold+i;
+if(!ttisnil(gval(old)))
+setobj(L,luaH_set(L,t,key2tval(old)),gval(old));
+}
+if(nold!=(&dummynode_))
+luaM_freearray(L,nold,twoto(oldhsize),Node);
+}
+static void luaH_resizearray(lua_State*L,Table*t,int nasize){
+int nsize=(t->node==(&dummynode_))?0:sizenode(t);
+resize(L,t,nasize,nsize);
+}
+static void rehash(lua_State*L,Table*t,const TValue*ek){
+int nasize,na;
+int nums[(32-2)+1];
+int i;
+int totaluse;
+for(i=0;i<=(32-2);i++)nums[i]=0;
+nasize=numusearray(t,nums);
+totaluse=nasize;
+totaluse+=numusehash(t,nums,&nasize);
+nasize+=countint(ek,nums);
+totaluse++;
+na=computesizes(nums,&nasize);
+resize(L,t,nasize,totaluse-na);
+}
+static Table*luaH_new(lua_State*L,int narray,int nhash){
+Table*t=luaM_new(L,Table);
+luaC_link(L,obj2gco(t),5);
+t->metatable=NULL;
+t->flags=cast_byte(~0);
+t->array=NULL;
+t->sizearray=0;
+t->lsizenode=0;
+t->node=cast(Node*,(&dummynode_));
+setarrayvector(L,t,narray);
+setnodevector(L,t,nhash);
+return t;
+}
+static void luaH_free(lua_State*L,Table*t){
+if(t->node!=(&dummynode_))
+luaM_freearray(L,t->node,sizenode(t),Node);
+luaM_freearray(L,t->array,t->sizearray,TValue);
+luaM_free(L,t);
+}
+static Node*getfreepos(Table*t){
+while(t->lastfree-->t->node){
+if(ttisnil(gkey(t->lastfree)))
+return t->lastfree;
+}
+return NULL;
+}
+static TValue*newkey(lua_State*L,Table*t,const TValue*key){
+Node*mp=mainposition(t,key);
+if(!ttisnil(gval(mp))||mp==(&dummynode_)){
+Node*othern;
+Node*n=getfreepos(t);
+if(n==NULL){
+rehash(L,t,key);
+return luaH_set(L,t,key);
+}
+othern=mainposition(t,key2tval(mp));
+if(othern!=mp){
+while(gnext(othern)!=mp)othern=gnext(othern);
+gnext(othern)=n;
+*n=*mp;
+gnext(mp)=NULL;
+setnilvalue(gval(mp));
+}
+else{
+gnext(n)=gnext(mp);
+gnext(mp)=n;
+mp=n;
+}
+}
+gkey(mp)->value=key->value;gkey(mp)->tt=key->tt;
+luaC_barriert(L,t,key);
+return gval(mp);
+}
+static const TValue*luaH_getnum(Table*t,int key){
+if(cast(unsigned int,key-1)<cast(unsigned int,t->sizearray))
+return&t->array[key-1];
+else{
+lua_Number nk=cast_num(key);
+Node*n=hashnum(t,nk);
+do{
+if(ttisnumber(gkey(n))&&luai_numeq(nvalue(gkey(n)),nk))
+return gval(n);
+else n=gnext(n);
+}while(n);
+return(&luaO_nilobject_);
+}
+}
+static const TValue*luaH_getstr(Table*t,TString*key){
+Node*n=hashstr(t,key);
+do{
+if(ttisstring(gkey(n))&&rawtsvalue(gkey(n))==key)
+return gval(n);
+else n=gnext(n);
+}while(n);
+return(&luaO_nilobject_);
+}
+static const TValue*luaH_get(Table*t,const TValue*key){
+switch(ttype(key)){
+case 0:return(&luaO_nilobject_);
+case 4:return luaH_getstr(t,rawtsvalue(key));
+case 3:{
+int k;
+lua_Number n=nvalue(key);
+lua_number2int(k,n);
+if(luai_numeq(cast_num(k),nvalue(key)))
+return luaH_getnum(t,k);
+}
+default:{
+Node*n=mainposition(t,key);
+do{
+if(luaO_rawequalObj(key2tval(n),key))
+return gval(n);
+else n=gnext(n);
+}while(n);
+return(&luaO_nilobject_);
+}
+}
+}
+static TValue*luaH_set(lua_State*L,Table*t,const TValue*key){
+const TValue*p=luaH_get(t,key);
+t->flags=0;
+if(p!=(&luaO_nilobject_))
+return cast(TValue*,p);
+else{
+if(ttisnil(key))luaG_runerror(L,"table index is nil");
+else if(ttisnumber(key)&&luai_numisnan(nvalue(key)))
+luaG_runerror(L,"table index is NaN");
+return newkey(L,t,key);
+}
+}
+static TValue*luaH_setnum(lua_State*L,Table*t,int key){
+const TValue*p=luaH_getnum(t,key);
+if(p!=(&luaO_nilobject_))
+return cast(TValue*,p);
+else{
+TValue k;
+setnvalue(&k,cast_num(key));
+return newkey(L,t,&k);
+}
+}
+static TValue*luaH_setstr(lua_State*L,Table*t,TString*key){
+const TValue*p=luaH_getstr(t,key);
+if(p!=(&luaO_nilobject_))
+return cast(TValue*,p);
+else{
+TValue k;
+setsvalue(L,&k,key);
+return newkey(L,t,&k);
+}
+}
+static int unbound_search(Table*t,unsigned int j){
+unsigned int i=j;
+j++;
+while(!ttisnil(luaH_getnum(t,j))){
+i=j;
+j*=2;
+if(j>cast(unsigned int,(INT_MAX-2))){
+i=1;
+while(!ttisnil(luaH_getnum(t,i)))i++;
+return i-1;
+}
+}
+while(j-i>1){
+unsigned int m=(i+j)/2;
+if(ttisnil(luaH_getnum(t,m)))j=m;
+else i=m;
+}
+return i;
+}
+static int luaH_getn(Table*t){
+unsigned int j=t->sizearray;
+if(j>0&&ttisnil(&t->array[j-1])){
+unsigned int i=0;
+while(j-i>1){
+unsigned int m=(i+j)/2;
+if(ttisnil(&t->array[m-1]))j=m;
+else i=m;
+}
+return i;
+}
+else if(t->node==(&dummynode_))
+return j;
+else return unbound_search(t,j);
+}
+#define makewhite(g,x)((x)->gch.marked=cast_byte(((x)->gch.marked&cast_byte(~(bitmask(2)|bit2mask(0,1))))|luaC_white(g)))
+#define white2gray(x)reset2bits((x)->gch.marked,0,1)
+#define black2gray(x)resetbit((x)->gch.marked,2)
+#define stringmark(s)reset2bits((s)->tsv.marked,0,1)
+#define isfinalized(u)testbit((u)->marked,3)
+#define markfinalized(u)l_setbit((u)->marked,3)
+#define markvalue(g,o){checkconsistency(o);if(iscollectable(o)&&iswhite(gcvalue(o)))reallymarkobject(g,gcvalue(o));}
+#define markobject(g,t){if(iswhite(obj2gco(t)))reallymarkobject(g,obj2gco(t));}
+#define setthreshold(g)(g->GCthreshold=(g->estimate/100)*g->gcpause)
+static void removeentry(Node*n){
+if(iscollectable(gkey(n)))
+setttype(gkey(n),(8+3));
+}
+static void reallymarkobject(global_State*g,GCObject*o){
+white2gray(o);
+switch(o->gch.tt){
+case 4:{
+return;
+}
+case 7:{
+Table*mt=gco2u(o)->metatable;
+gray2black(o);
+if(mt)markobject(g,mt);
+markobject(g,gco2u(o)->env);
+return;
+}
+case(8+2):{
+UpVal*uv=gco2uv(o);
+markvalue(g,uv->v);
+if(uv->v==&uv->u.value)
+gray2black(o);
+return;
+}
+case 6:{
+gco2cl(o)->c.gclist=g->gray;
+g->gray=o;
+break;
+}
+case 5:{
+gco2h(o)->gclist=g->gray;
+g->gray=o;
+break;
+}
+case 8:{
+gco2th(o)->gclist=g->gray;
+g->gray=o;
+break;
+}
+case(8+1):{
+gco2p(o)->gclist=g->gray;
+g->gray=o;
+break;
+}
+default:;
+}
+}
+static void marktmu(global_State*g){
+GCObject*u=g->tmudata;
+if(u){
+do{
+u=u->gch.next;
+makewhite(g,u);
+reallymarkobject(g,u);
+}while(u!=g->tmudata);
+}
+}
+static size_t luaC_separateudata(lua_State*L,int all){
+global_State*g=G(L);
+size_t deadmem=0;
+GCObject**p=&g->mainthread->next;
+GCObject*curr;
+while((curr=*p)!=NULL){
+if(!(iswhite(curr)||all)||isfinalized(gco2u(curr)))
+p=&curr->gch.next;
+else if(fasttm(L,gco2u(curr)->metatable,TM_GC)==NULL){
+markfinalized(gco2u(curr));
+p=&curr->gch.next;
+}
+else{
+deadmem+=sizeudata(gco2u(curr));
+markfinalized(gco2u(curr));
+*p=curr->gch.next;
+if(g->tmudata==NULL)
+g->tmudata=curr->gch.next=curr;
+else{
+curr->gch.next=g->tmudata->gch.next;
+g->tmudata->gch.next=curr;
+g->tmudata=curr;
+}
+}
+}
+return deadmem;
+}
+static int traversetable(global_State*g,Table*h){
+int i;
+int weakkey=0;
+int weakvalue=0;
+const TValue*mode;
+if(h->metatable)
+markobject(g,h->metatable);
+mode=gfasttm(g,h->metatable,TM_MODE);
+if(mode&&ttisstring(mode)){
+weakkey=(strchr(svalue(mode),'k')!=NULL);
+weakvalue=(strchr(svalue(mode),'v')!=NULL);
+if(weakkey||weakvalue){
+h->marked&=~(bitmask(3)|bitmask(4));
+h->marked|=cast_byte((weakkey<<3)|
+(weakvalue<<4));
+h->gclist=g->weak;
+g->weak=obj2gco(h);
+}
+}
+if(weakkey&&weakvalue)return 1;
+if(!weakvalue){
+i=h->sizearray;
+while(i--)
+markvalue(g,&h->array[i]);
+}
+i=sizenode(h);
+while(i--){
+Node*n=gnode(h,i);
+if(ttisnil(gval(n)))
+removeentry(n);
+else{
+if(!weakkey)markvalue(g,gkey(n));
+if(!weakvalue)markvalue(g,gval(n));
+}
+}
+return weakkey||weakvalue;
+}
+static void traverseproto(global_State*g,Proto*f){
+int i;
+if(f->source)stringmark(f->source);
+for(i=0;i<f->sizek;i++)
+markvalue(g,&f->k[i]);
+for(i=0;i<f->sizeupvalues;i++){
+if(f->upvalues[i])
+stringmark(f->upvalues[i]);
+}
+for(i=0;i<f->sizep;i++){
+if(f->p[i])
+markobject(g,f->p[i]);
+}
+for(i=0;i<f->sizelocvars;i++){
+if(f->locvars[i].varname)
+stringmark(f->locvars[i].varname);
+}
+}
+static void traverseclosure(global_State*g,Closure*cl){
+markobject(g,cl->c.env);
+if(cl->c.isC){
+int i;
+for(i=0;i<cl->c.nupvalues;i++)
+markvalue(g,&cl->c.upvalue[i]);
+}
+else{
+int i;
+markobject(g,cl->l.p);
+for(i=0;i<cl->l.nupvalues;i++)
+markobject(g,cl->l.upvals[i]);
+}
+}
+static void checkstacksizes(lua_State*L,StkId max){
+int ci_used=cast_int(L->ci-L->base_ci);
+int s_used=cast_int(max-L->stack);
+if(L->size_ci>20000)
+return;
+if(4*ci_used<L->size_ci&&2*8<L->size_ci)
+luaD_reallocCI(L,L->size_ci/2);
+condhardstacktests(luaD_reallocCI(L,ci_used+1));
+if(4*s_used<L->stacksize&&
+2*((2*20)+5)<L->stacksize)
+luaD_reallocstack(L,L->stacksize/2);
+condhardstacktests(luaD_reallocstack(L,s_used));
+}
+static void traversestack(global_State*g,lua_State*l){
+StkId o,lim;
+CallInfo*ci;
+markvalue(g,gt(l));
+lim=l->top;
+for(ci=l->base_ci;ci<=l->ci;ci++){
+if(lim<ci->top)lim=ci->top;
+}
+for(o=l->stack;o<l->top;o++)
+markvalue(g,o);
+for(;o<=lim;o++)
+setnilvalue(o);
+checkstacksizes(l,lim);
+}
+static l_mem propagatemark(global_State*g){
+GCObject*o=g->gray;
+gray2black(o);
+switch(o->gch.tt){
+case 5:{
+Table*h=gco2h(o);
+g->gray=h->gclist;
+if(traversetable(g,h))
+black2gray(o);
+return sizeof(Table)+sizeof(TValue)*h->sizearray+
+sizeof(Node)*sizenode(h);
+}
+case 6:{
+Closure*cl=gco2cl(o);
+g->gray=cl->c.gclist;
+traverseclosure(g,cl);
+return(cl->c.isC)?sizeCclosure(cl->c.nupvalues):
+sizeLclosure(cl->l.nupvalues);
+}
+case 8:{
+lua_State*th=gco2th(o);
+g->gray=th->gclist;
+th->gclist=g->grayagain;
+g->grayagain=o;
+black2gray(o);
+traversestack(g,th);
+return sizeof(lua_State)+sizeof(TValue)*th->stacksize+
+sizeof(CallInfo)*th->size_ci;
+}
+case(8+1):{
+Proto*p=gco2p(o);
+g->gray=p->gclist;
+traverseproto(g,p);
+return sizeof(Proto)+sizeof(Instruction)*p->sizecode+
+sizeof(Proto*)*p->sizep+
+sizeof(TValue)*p->sizek+
+sizeof(int)*p->sizelineinfo+
+sizeof(LocVar)*p->sizelocvars+
+sizeof(TString*)*p->sizeupvalues;
+}
+default:return 0;
+}
+}
+static size_t propagateall(global_State*g){
+size_t m=0;
+while(g->gray)m+=propagatemark(g);
+return m;
+}
+static int iscleared(const TValue*o,int iskey){
+if(!iscollectable(o))return 0;
+if(ttisstring(o)){
+stringmark(rawtsvalue(o));
+return 0;
+}
+return iswhite(gcvalue(o))||
+(ttisuserdata(o)&&(!iskey&&isfinalized(uvalue(o))));
+}
+static void cleartable(GCObject*l){
+while(l){
+Table*h=gco2h(l);
+int i=h->sizearray;
+if(testbit(h->marked,4)){
+while(i--){
+TValue*o=&h->array[i];
+if(iscleared(o,0))
+setnilvalue(o);
+}
+}
+i=sizenode(h);
+while(i--){
+Node*n=gnode(h,i);
+if(!ttisnil(gval(n))&&
+(iscleared(key2tval(n),1)||iscleared(gval(n),0))){
+setnilvalue(gval(n));
+removeentry(n);
+}
+}
+l=h->gclist;
+}
+}
+static void freeobj(lua_State*L,GCObject*o){
+switch(o->gch.tt){
+case(8+1):luaF_freeproto(L,gco2p(o));break;
+case 6:luaF_freeclosure(L,gco2cl(o));break;
+case(8+2):luaF_freeupval(L,gco2uv(o));break;
+case 5:luaH_free(L,gco2h(o));break;
+case 8:{
+luaE_freethread(L,gco2th(o));
+break;
+}
+case 4:{
+G(L)->strt.nuse--;
+luaM_freemem(L,o,sizestring(gco2ts(o)));
+break;
+}
+case 7:{
+luaM_freemem(L,o,sizeudata(gco2u(o)));
+break;
+}
+default:;
+}
+}
+#define sweepwholelist(L,p)sweeplist(L,p,((lu_mem)(~(lu_mem)0)-2))
+static GCObject**sweeplist(lua_State*L,GCObject**p,lu_mem count){
+GCObject*curr;
+global_State*g=G(L);
+int deadmask=otherwhite(g);
+while((curr=*p)!=NULL&&count-->0){
+if(curr->gch.tt==8)
+sweepwholelist(L,&gco2th(curr)->openupval);
+if((curr->gch.marked^bit2mask(0,1))&deadmask){
+makewhite(g,curr);
+p=&curr->gch.next;
+}
+else{
+*p=curr->gch.next;
+if(curr==g->rootgc)
+g->rootgc=curr->gch.next;
+freeobj(L,curr);
+}
+}
+return p;
+}
+static void checkSizes(lua_State*L){
+global_State*g=G(L);
+if(g->strt.nuse<cast(lu_int32,g->strt.size/4)&&
+g->strt.size>32*2)
+luaS_resize(L,g->strt.size/2);
+if(luaZ_sizebuffer(&g->buff)>32*2){
+size_t newsize=luaZ_sizebuffer(&g->buff)/2;
+luaZ_resizebuffer(L,&g->buff,newsize);
+}
+}
+static void GCTM(lua_State*L){
+global_State*g=G(L);
+GCObject*o=g->tmudata->gch.next;
+Udata*udata=rawgco2u(o);
+const TValue*tm;
+if(o==g->tmudata)
+g->tmudata=NULL;
+else
+g->tmudata->gch.next=udata->uv.next;
+udata->uv.next=g->mainthread->next;
+g->mainthread->next=o;
+makewhite(g,o);
+tm=fasttm(L,udata->uv.metatable,TM_GC);
+if(tm!=NULL){
+lu_byte oldah=L->allowhook;
+lu_mem oldt=g->GCthreshold;
+L->allowhook=0;
+g->GCthreshold=2*g->totalbytes;
+setobj(L,L->top,tm);
+setuvalue(L,L->top+1,udata);
+L->top+=2;
+luaD_call(L,L->top-2,0);
+L->allowhook=oldah;
+g->GCthreshold=oldt;
+}
+}
+static void luaC_callGCTM(lua_State*L){
+while(G(L)->tmudata)
+GCTM(L);
+}
+static void luaC_freeall(lua_State*L){
+global_State*g=G(L);
+int i;
+g->currentwhite=bit2mask(0,1)|bitmask(6);
+sweepwholelist(L,&g->rootgc);
+for(i=0;i<g->strt.size;i++)
+sweepwholelist(L,&g->strt.hash[i]);
+}
+static void markmt(global_State*g){
+int i;
+for(i=0;i<(8+1);i++)
+if(g->mt[i])markobject(g,g->mt[i]);
+}
+static void markroot(lua_State*L){
+global_State*g=G(L);
+g->gray=NULL;
+g->grayagain=NULL;
+g->weak=NULL;
+markobject(g,g->mainthread);
+markvalue(g,gt(g->mainthread));
+markvalue(g,registry(L));
+markmt(g);
+g->gcstate=1;
+}
+static void remarkupvals(global_State*g){
+UpVal*uv;
+for(uv=g->uvhead.u.l.next;uv!=&g->uvhead;uv=uv->u.l.next){
+if(isgray(obj2gco(uv)))
+markvalue(g,uv->v);
+}
+}
+static void atomic(lua_State*L){
+global_State*g=G(L);
+size_t udsize;
+remarkupvals(g);
+propagateall(g);
+g->gray=g->weak;
+g->weak=NULL;
+markobject(g,L);
+markmt(g);
+propagateall(g);
+g->gray=g->grayagain;
+g->grayagain=NULL;
+propagateall(g);
+udsize=luaC_separateudata(L,0);
+marktmu(g);
+udsize+=propagateall(g);
+cleartable(g->weak);
+g->currentwhite=cast_byte(otherwhite(g));
+g->sweepstrgc=0;
+g->sweepgc=&g->rootgc;
+g->gcstate=2;
+g->estimate=g->totalbytes-udsize;
+}
+static l_mem singlestep(lua_State*L){
+global_State*g=G(L);
+switch(g->gcstate){
+case 0:{
+markroot(L);
+return 0;
+}
+case 1:{
+if(g->gray)
+return propagatemark(g);
+else{
+atomic(L);
+return 0;
+}
+}
+case 2:{
+lu_mem old=g->totalbytes;
+sweepwholelist(L,&g->strt.hash[g->sweepstrgc++]);
+if(g->sweepstrgc>=g->strt.size)
+g->gcstate=3;
+g->estimate-=old-g->totalbytes;
+return 10;
+}
+case 3:{
+lu_mem old=g->totalbytes;
+g->sweepgc=sweeplist(L,g->sweepgc,40);
+if(*g->sweepgc==NULL){
+checkSizes(L);
+g->gcstate=4;
+}
+g->estimate-=old-g->totalbytes;
+return 40*10;
+}
+case 4:{
+if(g->tmudata){
+GCTM(L);
+if(g->estimate>100)
+g->estimate-=100;
+return 100;
+}
+else{
+g->gcstate=0;
+g->gcdept=0;
+return 0;
+}
+}
+default:return 0;
+}
+}
+static void luaC_step(lua_State*L){
+global_State*g=G(L);
+l_mem lim=(1024u/100)*g->gcstepmul;
+if(lim==0)
+lim=(((lu_mem)(~(lu_mem)0)-2)-1)/2;
+g->gcdept+=g->totalbytes-g->GCthreshold;
+do{
+lim-=singlestep(L);
+if(g->gcstate==0)
+break;
+}while(lim>0);
+if(g->gcstate!=0){
+if(g->gcdept<1024u)
+g->GCthreshold=g->totalbytes+1024u;
+else{
+g->gcdept-=1024u;
+g->GCthreshold=g->totalbytes;
+}
+}
+else{
+setthreshold(g);
+}
+}
+static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v){
+global_State*g=G(L);
+if(g->gcstate==1)
+reallymarkobject(g,v);
+else
+makewhite(g,o);
+}
+static void luaC_barrierback(lua_State*L,Table*t){
+global_State*g=G(L);
+GCObject*o=obj2gco(t);
+black2gray(o);
+t->gclist=g->grayagain;
+g->grayagain=o;
+}
+static void luaC_link(lua_State*L,GCObject*o,lu_byte tt){
+global_State*g=G(L);
+o->gch.next=g->rootgc;
+g->rootgc=o;
+o->gch.marked=luaC_white(g);
+o->gch.tt=tt;
+}
+static void luaC_linkupval(lua_State*L,UpVal*uv){
+global_State*g=G(L);
+GCObject*o=obj2gco(uv);
+o->gch.next=g->rootgc;
+g->rootgc=o;
+if(isgray(o)){
+if(g->gcstate==1){
+gray2black(o);
+luaC_barrier(L,uv,uv->v);
+}
+else{
+makewhite(g,o);
+}
+}
+}
+typedef union{
+lua_Number r;
+TString*ts;
+}SemInfo;
+typedef struct Token{
+int token;
+SemInfo seminfo;
+}Token;
+typedef struct LexState{
+int current;
+int linenumber;
+int lastline;
+Token t;
+Token lookahead;
+struct FuncState*fs;
+struct lua_State*L;
+ZIO*z;
+Mbuffer*buff;
+TString*source;
+char decpoint;
+}LexState;
+static void luaX_init(lua_State*L);
+static void luaX_lexerror(LexState*ls,const char*msg,int token);
+#define state_size(x)(sizeof(x)+0)
+#define fromstate(l)(cast(lu_byte*,(l))-0)
+#define tostate(l)(cast(lua_State*,cast(lu_byte*,l)+0))
+typedef struct LG{
+lua_State l;
+global_State g;
+}LG;
+static void stack_init(lua_State*L1,lua_State*L){
+L1->base_ci=luaM_newvector(L,8,CallInfo);
+L1->ci=L1->base_ci;
+L1->size_ci=8;
+L1->end_ci=L1->base_ci+L1->size_ci-1;
+L1->stack=luaM_newvector(L,(2*20)+5,TValue);
+L1->stacksize=(2*20)+5;
+L1->top=L1->stack;
+L1->stack_last=L1->stack+(L1->stacksize-5)-1;
+L1->ci->func=L1->top;
+setnilvalue(L1->top++);
+L1->base=L1->ci->base=L1->top;
+L1->ci->top=L1->top+20;
+}
+static void freestack(lua_State*L,lua_State*L1){
+luaM_freearray(L,L1->base_ci,L1->size_ci,CallInfo);
+luaM_freearray(L,L1->stack,L1->stacksize,TValue);
+}
+static void f_luaopen(lua_State*L,void*ud){
+global_State*g=G(L);
+UNUSED(ud);
+stack_init(L,L);
+sethvalue(L,gt(L),luaH_new(L,0,2));
+sethvalue(L,registry(L),luaH_new(L,0,2));
+luaS_resize(L,32);
+luaT_init(L);
+luaX_init(L);
+luaS_fix(luaS_newliteral(L,"not enough memory"));
+g->GCthreshold=4*g->totalbytes;
+}
+static void preinit_state(lua_State*L,global_State*g){
+G(L)=g;
+L->stack=NULL;
+L->stacksize=0;
+L->errorJmp=NULL;
+L->hook=NULL;
+L->hookmask=0;
+L->basehookcount=0;
+L->allowhook=1;
+resethookcount(L);
+L->openupval=NULL;
+L->size_ci=0;
+L->nCcalls=L->baseCcalls=0;
+L->status=0;
+L->base_ci=L->ci=NULL;
+L->savedpc=NULL;
+L->errfunc=0;
+setnilvalue(gt(L));
+}
+static void close_state(lua_State*L){
+global_State*g=G(L);
+luaF_close(L,L->stack);
+luaC_freeall(L);
+luaM_freearray(L,G(L)->strt.hash,G(L)->strt.size,TString*);
+luaZ_freebuffer(L,&g->buff);
+freestack(L,L);
+(*g->frealloc)(g->ud,fromstate(L),state_size(LG),0);
+}
+static void luaE_freethread(lua_State*L,lua_State*L1){
+luaF_close(L1,L1->stack);
+freestack(L,L1);
+luaM_freemem(L,fromstate(L1),state_size(lua_State));
+}
+static lua_State*lua_newstate(lua_Alloc f,void*ud){
+int i;
+lua_State*L;
+global_State*g;
+void*l=(*f)(ud,NULL,0,state_size(LG));
+if(l==NULL)return NULL;
+L=tostate(l);
+g=&((LG*)L)->g;
+L->next=NULL;
+L->tt=8;
+g->currentwhite=bit2mask(0,5);
+L->marked=luaC_white(g);
+set2bits(L->marked,5,6);
+preinit_state(L,g);
+g->frealloc=f;
+g->ud=ud;
+g->mainthread=L;
+g->uvhead.u.l.prev=&g->uvhead;
+g->uvhead.u.l.next=&g->uvhead;
+g->GCthreshold=0;
+g->strt.size=0;
+g->strt.nuse=0;
+g->strt.hash=NULL;
+setnilvalue(registry(L));
+luaZ_initbuffer(L,&g->buff);
+g->panic=NULL;
+g->gcstate=0;
+g->rootgc=obj2gco(L);
+g->sweepstrgc=0;
+g->sweepgc=&g->rootgc;
+g->gray=NULL;
+g->grayagain=NULL;
+g->weak=NULL;
+g->tmudata=NULL;
+g->totalbytes=sizeof(LG);
+g->gcpause=200;
+g->gcstepmul=200;
+g->gcdept=0;
+for(i=0;i<(8+1);i++)g->mt[i]=NULL;
+if(luaD_rawrunprotected(L,f_luaopen,NULL)!=0){
+close_state(L);
+L=NULL;
+}
+else
+{}
+return L;
+}
+static void callallgcTM(lua_State*L,void*ud){
+UNUSED(ud);
+luaC_callGCTM(L);
+}
+static void lua_close(lua_State*L){
+L=G(L)->mainthread;
+luaF_close(L,L->stack);
+luaC_separateudata(L,1);
+L->errfunc=0;
+do{
+L->ci=L->base_ci;
+L->base=L->top=L->ci->base;
+L->nCcalls=L->baseCcalls=0;
+}while(luaD_rawrunprotected(L,callallgcTM,NULL)!=0);
+close_state(L);
+}
+#define getcode(fs,e)((fs)->f->code[(e)->u.s.info])
+#define luaK_codeAsBx(fs,o,A,sBx)luaK_codeABx(fs,o,A,(sBx)+(((1<<(9+9))-1)>>1))
+#define luaK_setmultret(fs,e)luaK_setreturns(fs,e,(-1))
+static int luaK_codeABx(FuncState*fs,OpCode o,int A,unsigned int Bx);
+static int luaK_codeABC(FuncState*fs,OpCode o,int A,int B,int C);
+static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults);
+static void luaK_patchtohere(FuncState*fs,int list);
+static void luaK_concat(FuncState*fs,int*l1,int l2);
+static int currentpc(lua_State*L,CallInfo*ci){
+if(!isLua(ci))return-1;
+if(ci==L->ci)
+ci->savedpc=L->savedpc;
+return pcRel(ci->savedpc,ci_func(ci)->l.p);
+}
+static int currentline(lua_State*L,CallInfo*ci){
+int pc=currentpc(L,ci);
+if(pc<0)
+return-1;
+else
+return getline_(ci_func(ci)->l.p,pc);
+}
+static int lua_getstack(lua_State*L,int level,lua_Debug*ar){
+int status;
+CallInfo*ci;
+for(ci=L->ci;level>0&&ci>L->base_ci;ci--){
+level--;
+if(f_isLua(ci))
+level-=ci->tailcalls;
+}
+if(level==0&&ci>L->base_ci){
+status=1;
+ar->i_ci=cast_int(ci-L->base_ci);
+}
+else if(level<0){
+status=1;
+ar->i_ci=0;
+}
+else status=0;
+return status;
+}
+static Proto*getluaproto(CallInfo*ci){
+return(isLua(ci)?ci_func(ci)->l.p:NULL);
+}
+static void funcinfo(lua_Debug*ar,Closure*cl){
+if(cl->c.isC){
+ar->source="=[C]";
+ar->linedefined=-1;
+ar->lastlinedefined=-1;
+ar->what="C";
+}
+else{
+ar->source=getstr(cl->l.p->source);
+ar->linedefined=cl->l.p->linedefined;
+ar->lastlinedefined=cl->l.p->lastlinedefined;
+ar->what=(ar->linedefined==0)?"main":"Lua";
+}
+luaO_chunkid(ar->short_src,ar->source,60);
+}
+static void info_tailcall(lua_Debug*ar){
+ar->name=ar->namewhat="";
+ar->what="tail";
+ar->lastlinedefined=ar->linedefined=ar->currentline=-1;
+ar->source="=(tail call)";
+luaO_chunkid(ar->short_src,ar->source,60);
+ar->nups=0;
+}
+static void collectvalidlines(lua_State*L,Closure*f){
+if(f==NULL||f->c.isC){
+setnilvalue(L->top);
+}
+else{
+Table*t=luaH_new(L,0,0);
+int*lineinfo=f->l.p->lineinfo;
+int i;
+for(i=0;i<f->l.p->sizelineinfo;i++)
+setbvalue(luaH_setnum(L,t,lineinfo[i]),1);
+sethvalue(L,L->top,t);
+}
+incr_top(L);
+}
+static int auxgetinfo(lua_State*L,const char*what,lua_Debug*ar,
+Closure*f,CallInfo*ci){
+int status=1;
+if(f==NULL){
+info_tailcall(ar);
+return status;
+}
+for(;*what;what++){
+switch(*what){
+case'S':{
+funcinfo(ar,f);
+break;
+}
+case'l':{
+ar->currentline=(ci)?currentline(L,ci):-1;
+break;
+}
+case'u':{
+ar->nups=f->c.nupvalues;
+break;
+}
+case'n':{
+ar->namewhat=(ci)?NULL:NULL;
+if(ar->namewhat==NULL){
+ar->namewhat="";
+ar->name=NULL;
+}
+break;
+}
+case'L':
+case'f':
+break;
+default:status=0;
+}
+}
+return status;
+}
+static int lua_getinfo(lua_State*L,const char*what,lua_Debug*ar){
+int status;
+Closure*f=NULL;
+CallInfo*ci=NULL;
+if(*what=='>'){
+StkId func=L->top-1;
+luai_apicheck(L,ttisfunction(func));
+what++;
+f=clvalue(func);
+L->top--;
+}
+else if(ar->i_ci!=0){
+ci=L->base_ci+ar->i_ci;
+f=clvalue(ci->func);
+}
+status=auxgetinfo(L,what,ar,f,ci);
+if(strchr(what,'f')){
+if(f==NULL)setnilvalue(L->top);
+else setclvalue(L,L->top,f);
+incr_top(L);
+}
+if(strchr(what,'L'))
+collectvalidlines(L,f);
+return status;
+}
+static int isinstack(CallInfo*ci,const TValue*o){
+StkId p;
+for(p=ci->base;p<ci->top;p++)
+if(o==p)return 1;
+return 0;
+}
+static void luaG_typeerror(lua_State*L,const TValue*o,const char*op){
+const char*name=NULL;
+const char*t=luaT_typenames[ttype(o)];
+const char*kind=(isinstack(L->ci,o))?
+NULL:
+NULL;
+if(kind)
+luaG_runerror(L,"attempt to %s %s "LUA_QL("%s")" (a %s value)",
+op,kind,name,t);
+else
+luaG_runerror(L,"attempt to %s a %s value",op,t);
+}
+static void luaG_concaterror(lua_State*L,StkId p1,StkId p2){
+if(ttisstring(p1)||ttisnumber(p1))p1=p2;
+luaG_typeerror(L,p1,"concatenate");
+}
+static void luaG_aritherror(lua_State*L,const TValue*p1,const TValue*p2){
+TValue temp;
+if(luaV_tonumber(p1,&temp)==NULL)
+p2=p1;
+luaG_typeerror(L,p2,"perform arithmetic on");
+}
+static int luaG_ordererror(lua_State*L,const TValue*p1,const TValue*p2){
+const char*t1=luaT_typenames[ttype(p1)];
+const char*t2=luaT_typenames[ttype(p2)];
+if(t1[2]==t2[2])
+luaG_runerror(L,"attempt to compare two %s values",t1);
+else
+luaG_runerror(L,"attempt to compare %s with %s",t1,t2);
+return 0;
+}
+static void addinfo(lua_State*L,const char*msg){
+CallInfo*ci=L->ci;
+if(isLua(ci)){
+char buff[60];
+int line=currentline(L,ci);
+luaO_chunkid(buff,getstr(getluaproto(ci)->source),60);
+luaO_pushfstring(L,"%s:%d: %s",buff,line,msg);
+}
+}
+static void luaG_errormsg(lua_State*L){
+if(L->errfunc!=0){
+StkId errfunc=restorestack(L,L->errfunc);
+if(!ttisfunction(errfunc))luaD_throw(L,5);
+setobj(L,L->top,L->top-1);
+setobj(L,L->top-1,errfunc);
+incr_top(L);
+luaD_call(L,L->top-2,1);
+}
+luaD_throw(L,2);
+}
+static void luaG_runerror(lua_State*L,const char*fmt,...){
+va_list argp;
+va_start(argp,fmt);
+addinfo(L,luaO_pushvfstring(L,fmt,argp));
+va_end(argp);
+luaG_errormsg(L);
+}
+static int luaZ_fill(ZIO*z){
+size_t size;
+lua_State*L=z->L;
+const char*buff;
+buff=z->reader(L,z->data,&size);
+if(buff==NULL||size==0)return(-1);
+z->n=size-1;
+z->p=buff;
+return char2int(*(z->p++));
+}
+static void luaZ_init(lua_State*L,ZIO*z,lua_Reader reader,void*data){
+z->L=L;
+z->reader=reader;
+z->data=data;
+z->n=0;
+z->p=NULL;
+}
+static char*luaZ_openspace(lua_State*L,Mbuffer*buff,size_t n){
+if(n>buff->buffsize){
+if(n<32)n=32;
+luaZ_resizebuffer(L,buff,n);
+}
+return buff->buffer;
+}
+#define opmode(t,a,b,c,m)(((t)<<7)|((a)<<6)|((b)<<4)|((c)<<2)|(m))
+static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]={
+opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgK,OpArgN,iABx)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgU,OpArgN,iABC)
+,opmode(0,1,OpArgK,OpArgN,iABx)
+,opmode(0,1,OpArgR,OpArgK,iABC)
+,opmode(0,0,OpArgK,OpArgN,iABx)
+,opmode(0,0,OpArgU,OpArgN,iABC)
+,opmode(0,0,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,1,OpArgR,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgR,iABC)
+,opmode(0,0,OpArgR,OpArgN,iAsBx)
+,opmode(1,0,OpArgK,OpArgK,iABC)
+,opmode(1,0,OpArgK,OpArgK,iABC)
+,opmode(1,0,OpArgK,OpArgK,iABC)
+,opmode(1,1,OpArgR,OpArgU,iABC)
+,opmode(1,1,OpArgR,OpArgU,iABC)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,0,OpArgU,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgN,iAsBx)
+,opmode(0,1,OpArgR,OpArgN,iAsBx)
+,opmode(1,0,OpArgN,OpArgU,iABC)
+,opmode(0,0,OpArgU,OpArgU,iABC)
+,opmode(0,0,OpArgN,OpArgN,iABC)
+,opmode(0,1,OpArgU,OpArgN,iABx)
+,opmode(0,1,OpArgU,OpArgN,iABC)
+};
+#define next(ls)(ls->current=zgetc(ls->z))
+#define currIsNewline(ls)(ls->current=='\n'||ls->current=='\r')
+static const char*const luaX_tokens[]={
+"and","break","do","else","elseif",
+"end","false","for","function","if",
+"in","local","nil","not","or","repeat",
+"return","then","true","until","while",
+"..","...","==",">=","<=","~=",
+"<number>","<name>","<string>","<eof>",
+NULL
+};
+#define save_and_next(ls)(save(ls,ls->current),next(ls))
+static void save(LexState*ls,int c){
+Mbuffer*b=ls->buff;
+if(b->n+1>b->buffsize){
+size_t newsize;
+if(b->buffsize>=((size_t)(~(size_t)0)-2)/2)
+luaX_lexerror(ls,"lexical element too long",0);
+newsize=b->buffsize*2;
+luaZ_resizebuffer(ls->L,b,newsize);
+}
+b->buffer[b->n++]=cast(char,c);
+}
+static void luaX_init(lua_State*L){
+int i;
+for(i=0;i<(cast(int,TK_WHILE-257+1));i++){
+TString*ts=luaS_new(L,luaX_tokens[i]);
+luaS_fix(ts);
+ts->tsv.reserved=cast_byte(i+1);
+}
+}
+static const char*luaX_token2str(LexState*ls,int token){
+if(token<257){
+return(iscntrl(token))?luaO_pushfstring(ls->L,"char(%d)",token):
+luaO_pushfstring(ls->L,"%c",token);
+}
+else
+return luaX_tokens[token-257];
+}
+static const char*txtToken(LexState*ls,int token){
+switch(token){
+case TK_NAME:
+case TK_STRING:
+case TK_NUMBER:
+save(ls,'\0');
+return luaZ_buffer(ls->buff);
+default:
+return luaX_token2str(ls,token);
+}
+}
+static void luaX_lexerror(LexState*ls,const char*msg,int token){
+char buff[80];
+luaO_chunkid(buff,getstr(ls->source),80);
+msg=luaO_pushfstring(ls->L,"%s:%d: %s",buff,ls->linenumber,msg);
+if(token)
+luaO_pushfstring(ls->L,"%s near "LUA_QL("%s"),msg,txtToken(ls,token));
+luaD_throw(ls->L,3);
+}
+static void luaX_syntaxerror(LexState*ls,const char*msg){
+luaX_lexerror(ls,msg,ls->t.token);
+}
+static TString*luaX_newstring(LexState*ls,const char*str,size_t l){
+lua_State*L=ls->L;
+TString*ts=luaS_newlstr(L,str,l);
+TValue*o=luaH_setstr(L,ls->fs->h,ts);
+if(ttisnil(o)){
+setbvalue(o,1);
+luaC_checkGC(L);
+}
+return ts;
+}
+static void inclinenumber(LexState*ls){
+int old=ls->current;
+next(ls);
+if(currIsNewline(ls)&&ls->current!=old)
+next(ls);
+if(++ls->linenumber>=(INT_MAX-2))
+luaX_syntaxerror(ls,"chunk has too many lines");
+}
+static void luaX_setinput(lua_State*L,LexState*ls,ZIO*z,TString*source){
+ls->decpoint='.';
+ls->L=L;
+ls->lookahead.token=TK_EOS;
+ls->z=z;
+ls->fs=NULL;
+ls->linenumber=1;
+ls->lastline=1;
+ls->source=source;
+luaZ_resizebuffer(ls->L,ls->buff,32);
+next(ls);
+}
+static int check_next(LexState*ls,const char*set){
+if(!strchr(set,ls->current))
+return 0;
+save_and_next(ls);
+return 1;
+}
+static void buffreplace(LexState*ls,char from,char to){
+size_t n=luaZ_bufflen(ls->buff);
+char*p=luaZ_buffer(ls->buff);
+while(n--)
+if(p[n]==from)p[n]=to;
+}
+static void read_numeral(LexState*ls,SemInfo*seminfo){
+do{
+save_and_next(ls);
+}while(isdigit(ls->current)||ls->current=='.');
+if(check_next(ls,"Ee"))
+check_next(ls,"+-");
+while(isalnum(ls->current)||ls->current=='_')
+save_and_next(ls);
+save(ls,'\0');
+buffreplace(ls,'.',ls->decpoint);
+if(!luaO_str2d(luaZ_buffer(ls->buff),&seminfo->r))
+luaX_lexerror(ls,"malformed number",TK_NUMBER);
+}
+static int skip_sep(LexState*ls){
+int count=0;
+int s=ls->current;
+save_and_next(ls);
+while(ls->current=='='){
+save_and_next(ls);
+count++;
+}
+return(ls->current==s)?count:(-count)-1;
+}
+static void read_long_string(LexState*ls,SemInfo*seminfo,int sep){
+int cont=0;
+(void)(cont);
+save_and_next(ls);
+if(currIsNewline(ls))
+inclinenumber(ls);
+for(;;){
+switch(ls->current){
+case(-1):
+luaX_lexerror(ls,(seminfo)?"unfinished long string":
+"unfinished long comment",TK_EOS);
+break;
+case']':{
+if(skip_sep(ls)==sep){
+save_and_next(ls);
+goto endloop;
+}
+break;
+}
+case'\n':
+case'\r':{
+save(ls,'\n');
+inclinenumber(ls);
+if(!seminfo)luaZ_resetbuffer(ls->buff);
+break;
+}
+default:{
+if(seminfo)save_and_next(ls);
+else next(ls);
+}
+}
+}endloop:
+if(seminfo)
+seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+(2+sep),
+luaZ_bufflen(ls->buff)-2*(2+sep));
+}
+static void read_string(LexState*ls,int del,SemInfo*seminfo){
+save_and_next(ls);
+while(ls->current!=del){
+switch(ls->current){
+case(-1):
+luaX_lexerror(ls,"unfinished string",TK_EOS);
+continue;
+case'\n':
+case'\r':
+luaX_lexerror(ls,"unfinished string",TK_STRING);
+continue;
+case'\\':{
+int c;
+next(ls);
+switch(ls->current){
+case'a':c='\a';break;
+case'b':c='\b';break;
+case'f':c='\f';break;
+case'n':c='\n';break;
+case'r':c='\r';break;
+case't':c='\t';break;
+case'v':c='\v';break;
+case'\n':
+case'\r':save(ls,'\n');inclinenumber(ls);continue;
+case(-1):continue;
+default:{
+if(!isdigit(ls->current))
+save_and_next(ls);
+else{
+int i=0;
+c=0;
+do{
+c=10*c+(ls->current-'0');
+next(ls);
+}while(++i<3&&isdigit(ls->current));
+if(c>UCHAR_MAX)
+luaX_lexerror(ls,"escape sequence too large",TK_STRING);
+save(ls,c);
+}
+continue;
+}
+}
+save(ls,c);
+next(ls);
+continue;
+}
+default:
+save_and_next(ls);
+}
+}
+save_and_next(ls);
+seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+1,
+luaZ_bufflen(ls->buff)-2);
+}
+static int llex(LexState*ls,SemInfo*seminfo){
+luaZ_resetbuffer(ls->buff);
+for(;;){
+switch(ls->current){
+case'\n':
+case'\r':{
+inclinenumber(ls);
+continue;
+}
+case'-':{
+next(ls);
+if(ls->current!='-')return'-';
+next(ls);
+if(ls->current=='['){
+int sep=skip_sep(ls);
+luaZ_resetbuffer(ls->buff);
+if(sep>=0){
+read_long_string(ls,NULL,sep);
+luaZ_resetbuffer(ls->buff);
+continue;
+}
+}
+while(!currIsNewline(ls)&&ls->current!=(-1))
+next(ls);
+continue;
+}
+case'[':{
+int sep=skip_sep(ls);
+if(sep>=0){
+read_long_string(ls,seminfo,sep);
+return TK_STRING;
+}
+else if(sep==-1)return'[';
+else luaX_lexerror(ls,"invalid long string delimiter",TK_STRING);
+}
+case'=':{
+next(ls);
+if(ls->current!='=')return'=';
+else{next(ls);return TK_EQ;}
+}
+case'<':{
+next(ls);
+if(ls->current!='=')return'<';
+else{next(ls);return TK_LE;}
+}
+case'>':{
+next(ls);
+if(ls->current!='=')return'>';
+else{next(ls);return TK_GE;}
+}
+case'~':{
+next(ls);
+if(ls->current!='=')return'~';
+else{next(ls);return TK_NE;}
+}
+case'"':
+case'\'':{
+read_string(ls,ls->current,seminfo);
+return TK_STRING;
+}
+case'.':{
+save_and_next(ls);
+if(check_next(ls,".")){
+if(check_next(ls,"."))
+return TK_DOTS;
+else return TK_CONCAT;
+}
+else if(!isdigit(ls->current))return'.';
+else{
+read_numeral(ls,seminfo);
+return TK_NUMBER;
+}
+}
+case(-1):{
+return TK_EOS;
+}
+default:{
+if(isspace(ls->current)){
+next(ls);
+continue;
+}
+else if(isdigit(ls->current)){
+read_numeral(ls,seminfo);
+return TK_NUMBER;
+}
+else if(isalpha(ls->current)||ls->current=='_'){
+TString*ts;
+do{
+save_and_next(ls);
+}while(isalnum(ls->current)||ls->current=='_');
+ts=luaX_newstring(ls,luaZ_buffer(ls->buff),
+luaZ_bufflen(ls->buff));
+if(ts->tsv.reserved>0)
+return ts->tsv.reserved-1+257;
+else{
+seminfo->ts=ts;
+return TK_NAME;
+}
+}
+else{
+int c=ls->current;
+next(ls);
+return c;
+}
+}
+}
+}
+}
+static void luaX_next(LexState*ls){
+ls->lastline=ls->linenumber;
+if(ls->lookahead.token!=TK_EOS){
+ls->t=ls->lookahead;
+ls->lookahead.token=TK_EOS;
+}
+else
+ls->t.token=llex(ls,&ls->t.seminfo);
+}
+static void luaX_lookahead(LexState*ls){
+ls->lookahead.token=llex(ls,&ls->lookahead.seminfo);
+}
+#define hasjumps(e)((e)->t!=(e)->f)
+static int isnumeral(expdesc*e){
+return(e->k==VKNUM&&e->t==(-1)&&e->f==(-1));
+}
+static void luaK_nil(FuncState*fs,int from,int n){
+Instruction*previous;
+if(fs->pc>fs->lasttarget){
+if(fs->pc==0){
+if(from>=fs->nactvar)
+return;
+}
+else{
+previous=&fs->f->code[fs->pc-1];
+if(GET_OPCODE(*previous)==OP_LOADNIL){
+int pfrom=GETARG_A(*previous);
+int pto=GETARG_B(*previous);
+if(pfrom<=from&&from<=pto+1){
+if(from+n-1>pto)
+SETARG_B(*previous,from+n-1);
+return;
+}
+}
+}
+}
+luaK_codeABC(fs,OP_LOADNIL,from,from+n-1,0);
+}
+static int luaK_jump(FuncState*fs){
+int jpc=fs->jpc;
+int j;
+fs->jpc=(-1);
+j=luaK_codeAsBx(fs,OP_JMP,0,(-1));
+luaK_concat(fs,&j,jpc);
+return j;
+}
+static void luaK_ret(FuncState*fs,int first,int nret){
+luaK_codeABC(fs,OP_RETURN,first,nret+1,0);
+}
+static int condjump(FuncState*fs,OpCode op,int A,int B,int C){
+luaK_codeABC(fs,op,A,B,C);
+return luaK_jump(fs);
+}
+static void fixjump(FuncState*fs,int pc,int dest){
+Instruction*jmp=&fs->f->code[pc];
+int offset=dest-(pc+1);
+if(abs(offset)>(((1<<(9+9))-1)>>1))
+luaX_syntaxerror(fs->ls,"control structure too long");
+SETARG_sBx(*jmp,offset);
+}
+static int luaK_getlabel(FuncState*fs){
+fs->lasttarget=fs->pc;
+return fs->pc;
+}
+static int getjump(FuncState*fs,int pc){
+int offset=GETARG_sBx(fs->f->code[pc]);
+if(offset==(-1))
+return(-1);
+else
+return(pc+1)+offset;
+}
+static Instruction*getjumpcontrol(FuncState*fs,int pc){
+Instruction*pi=&fs->f->code[pc];
+if(pc>=1&&testTMode(GET_OPCODE(*(pi-1))))
+return pi-1;
+else
+return pi;
+}
+static int need_value(FuncState*fs,int list){
+for(;list!=(-1);list=getjump(fs,list)){
+Instruction i=*getjumpcontrol(fs,list);
+if(GET_OPCODE(i)!=OP_TESTSET)return 1;
+}
+return 0;
+}
+static int patchtestreg(FuncState*fs,int node,int reg){
+Instruction*i=getjumpcontrol(fs,node);
+if(GET_OPCODE(*i)!=OP_TESTSET)
+return 0;
+if(reg!=((1<<8)-1)&&reg!=GETARG_B(*i))
+SETARG_A(*i,reg);
+else
+*i=CREATE_ABC(OP_TEST,GETARG_B(*i),0,GETARG_C(*i));
+return 1;
+}
+static void removevalues(FuncState*fs,int list){
+for(;list!=(-1);list=getjump(fs,list))
+patchtestreg(fs,list,((1<<8)-1));
+}
+static void patchlistaux(FuncState*fs,int list,int vtarget,int reg,
+int dtarget){
+while(list!=(-1)){
+int next=getjump(fs,list);
+if(patchtestreg(fs,list,reg))
+fixjump(fs,list,vtarget);
+else
+fixjump(fs,list,dtarget);
+list=next;
+}
+}
+static void dischargejpc(FuncState*fs){
+patchlistaux(fs,fs->jpc,fs->pc,((1<<8)-1),fs->pc);
+fs->jpc=(-1);
+}
+static void luaK_patchlist(FuncState*fs,int list,int target){
+if(target==fs->pc)
+luaK_patchtohere(fs,list);
+else{
+patchlistaux(fs,list,target,((1<<8)-1),target);
+}
+}
+static void luaK_patchtohere(FuncState*fs,int list){
+luaK_getlabel(fs);
+luaK_concat(fs,&fs->jpc,list);
+}
+static void luaK_concat(FuncState*fs,int*l1,int l2){
+if(l2==(-1))return;
+else if(*l1==(-1))
+*l1=l2;
+else{
+int list=*l1;
+int next;
+while((next=getjump(fs,list))!=(-1))
+list=next;
+fixjump(fs,list,l2);
+}
+}
+static void luaK_checkstack(FuncState*fs,int n){
+int newstack=fs->freereg+n;
+if(newstack>fs->f->maxstacksize){
+if(newstack>=250)
+luaX_syntaxerror(fs->ls,"function or expression too complex");
+fs->f->maxstacksize=cast_byte(newstack);
+}
+}
+static void luaK_reserveregs(FuncState*fs,int n){
+luaK_checkstack(fs,n);
+fs->freereg+=n;
+}
+static void freereg(FuncState*fs,int reg){
+if(!ISK(reg)&&reg>=fs->nactvar){
+fs->freereg--;
+}
+}
+static void freeexp(FuncState*fs,expdesc*e){
+if(e->k==VNONRELOC)
+freereg(fs,e->u.s.info);
+}
+static int addk(FuncState*fs,TValue*k,TValue*v){
+lua_State*L=fs->L;
+TValue*idx=luaH_set(L,fs->h,k);
+Proto*f=fs->f;
+int oldsize=f->sizek;
+if(ttisnumber(idx)){
+return cast_int(nvalue(idx));
+}
+else{
+setnvalue(idx,cast_num(fs->nk));
+luaM_growvector(L,f->k,fs->nk,f->sizek,TValue,
+((1<<(9+9))-1),"constant table overflow");
+while(oldsize<f->sizek)setnilvalue(&f->k[oldsize++]);
+setobj(L,&f->k[fs->nk],v);
+luaC_barrier(L,f,v);
+return fs->nk++;
+}
+}
+static int luaK_stringK(FuncState*fs,TString*s){
+TValue o;
+setsvalue(fs->L,&o,s);
+return addk(fs,&o,&o);
+}
+static int luaK_numberK(FuncState*fs,lua_Number r){
+TValue o;
+setnvalue(&o,r);
+return addk(fs,&o,&o);
+}
+static int boolK(FuncState*fs,int b){
+TValue o;
+setbvalue(&o,b);
+return addk(fs,&o,&o);
+}
+static int nilK(FuncState*fs){
+TValue k,v;
+setnilvalue(&v);
+sethvalue(fs->L,&k,fs->h);
+return addk(fs,&k,&v);
+}
+static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults){
+if(e->k==VCALL){
+SETARG_C(getcode(fs,e),nresults+1);
+}
+else if(e->k==VVARARG){
+SETARG_B(getcode(fs,e),nresults+1);
+SETARG_A(getcode(fs,e),fs->freereg);
+luaK_reserveregs(fs,1);
+}
+}
+static void luaK_setoneret(FuncState*fs,expdesc*e){
+if(e->k==VCALL){
+e->k=VNONRELOC;
+e->u.s.info=GETARG_A(getcode(fs,e));
+}
+else if(e->k==VVARARG){
+SETARG_B(getcode(fs,e),2);
+e->k=VRELOCABLE;
+}
+}
+static void luaK_dischargevars(FuncState*fs,expdesc*e){
+switch(e->k){
+case VLOCAL:{
+e->k=VNONRELOC;
+break;
+}
+case VUPVAL:{
+e->u.s.info=luaK_codeABC(fs,OP_GETUPVAL,0,e->u.s.info,0);
+e->k=VRELOCABLE;
+break;
+}
+case VGLOBAL:{
+e->u.s.info=luaK_codeABx(fs,OP_GETGLOBAL,0,e->u.s.info);
+e->k=VRELOCABLE;
+break;
+}
+case VINDEXED:{
+freereg(fs,e->u.s.aux);
+freereg(fs,e->u.s.info);
+e->u.s.info=luaK_codeABC(fs,OP_GETTABLE,0,e->u.s.info,e->u.s.aux);
+e->k=VRELOCABLE;
+break;
+}
+case VVARARG:
+case VCALL:{
+luaK_setoneret(fs,e);
+break;
+}
+default:break;
+}
+}
+static int code_label(FuncState*fs,int A,int b,int jump){
+luaK_getlabel(fs);
+return luaK_codeABC(fs,OP_LOADBOOL,A,b,jump);
+}
+static void discharge2reg(FuncState*fs,expdesc*e,int reg){
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VNIL:{
+luaK_nil(fs,reg,1);
+break;
+}
+case VFALSE:case VTRUE:{
+luaK_codeABC(fs,OP_LOADBOOL,reg,e->k==VTRUE,0);
+break;
+}
+case VK:{
+luaK_codeABx(fs,OP_LOADK,reg,e->u.s.info);
+break;
+}
+case VKNUM:{
+luaK_codeABx(fs,OP_LOADK,reg,luaK_numberK(fs,e->u.nval));
+break;
+}
+case VRELOCABLE:{
+Instruction*pc=&getcode(fs,e);
+SETARG_A(*pc,reg);
+break;
+}
+case VNONRELOC:{
+if(reg!=e->u.s.info)
+luaK_codeABC(fs,OP_MOVE,reg,e->u.s.info,0);
+break;
+}
+default:{
+return;
+}
+}
+e->u.s.info=reg;
+e->k=VNONRELOC;
+}
+static void discharge2anyreg(FuncState*fs,expdesc*e){
+if(e->k!=VNONRELOC){
+luaK_reserveregs(fs,1);
+discharge2reg(fs,e,fs->freereg-1);
+}
+}
+static void exp2reg(FuncState*fs,expdesc*e,int reg){
+discharge2reg(fs,e,reg);
+if(e->k==VJMP)
+luaK_concat(fs,&e->t,e->u.s.info);
+if(hasjumps(e)){
+int final;
+int p_f=(-1);
+int p_t=(-1);
+if(need_value(fs,e->t)||need_value(fs,e->f)){
+int fj=(e->k==VJMP)?(-1):luaK_jump(fs);
+p_f=code_label(fs,reg,0,1);
+p_t=code_label(fs,reg,1,0);
+luaK_patchtohere(fs,fj);
+}
+final=luaK_getlabel(fs);
+patchlistaux(fs,e->f,final,reg,p_f);
+patchlistaux(fs,e->t,final,reg,p_t);
+}
+e->f=e->t=(-1);
+e->u.s.info=reg;
+e->k=VNONRELOC;
+}
+static void luaK_exp2nextreg(FuncState*fs,expdesc*e){
+luaK_dischargevars(fs,e);
+freeexp(fs,e);
+luaK_reserveregs(fs,1);
+exp2reg(fs,e,fs->freereg-1);
+}
+static int luaK_exp2anyreg(FuncState*fs,expdesc*e){
+luaK_dischargevars(fs,e);
+if(e->k==VNONRELOC){
+if(!hasjumps(e))return e->u.s.info;
+if(e->u.s.info>=fs->nactvar){
+exp2reg(fs,e,e->u.s.info);
+return e->u.s.info;
+}
+}
+luaK_exp2nextreg(fs,e);
+return e->u.s.info;
+}
+static void luaK_exp2val(FuncState*fs,expdesc*e){
+if(hasjumps(e))
+luaK_exp2anyreg(fs,e);
+else
+luaK_dischargevars(fs,e);
+}
+static int luaK_exp2RK(FuncState*fs,expdesc*e){
+luaK_exp2val(fs,e);
+switch(e->k){
+case VKNUM:
+case VTRUE:
+case VFALSE:
+case VNIL:{
+if(fs->nk<=((1<<(9-1))-1)){
+e->u.s.info=(e->k==VNIL)?nilK(fs):
+(e->k==VKNUM)?luaK_numberK(fs,e->u.nval):
+boolK(fs,(e->k==VTRUE));
+e->k=VK;
+return RKASK(e->u.s.info);
+}
+else break;
+}
+case VK:{
+if(e->u.s.info<=((1<<(9-1))-1))
+return RKASK(e->u.s.info);
+else break;
+}
+default:break;
+}
+return luaK_exp2anyreg(fs,e);
+}
+static void luaK_storevar(FuncState*fs,expdesc*var,expdesc*ex){
+switch(var->k){
+case VLOCAL:{
+freeexp(fs,ex);
+exp2reg(fs,ex,var->u.s.info);
+return;
+}
+case VUPVAL:{
+int e=luaK_exp2anyreg(fs,ex);
+luaK_codeABC(fs,OP_SETUPVAL,e,var->u.s.info,0);
+break;
+}
+case VGLOBAL:{
+int e=luaK_exp2anyreg(fs,ex);
+luaK_codeABx(fs,OP_SETGLOBAL,e,var->u.s.info);
+break;
+}
+case VINDEXED:{
+int e=luaK_exp2RK(fs,ex);
+luaK_codeABC(fs,OP_SETTABLE,var->u.s.info,var->u.s.aux,e);
+break;
+}
+default:{
+break;
+}
+}
+freeexp(fs,ex);
+}
+static void luaK_self(FuncState*fs,expdesc*e,expdesc*key){
+int func;
+luaK_exp2anyreg(fs,e);
+freeexp(fs,e);
+func=fs->freereg;
+luaK_reserveregs(fs,2);
+luaK_codeABC(fs,OP_SELF,func,e->u.s.info,luaK_exp2RK(fs,key));
+freeexp(fs,key);
+e->u.s.info=func;
+e->k=VNONRELOC;
+}
+static void invertjump(FuncState*fs,expdesc*e){
+Instruction*pc=getjumpcontrol(fs,e->u.s.info);
+SETARG_A(*pc,!(GETARG_A(*pc)));
+}
+static int jumponcond(FuncState*fs,expdesc*e,int cond){
+if(e->k==VRELOCABLE){
+Instruction ie=getcode(fs,e);
+if(GET_OPCODE(ie)==OP_NOT){
+fs->pc--;
+return condjump(fs,OP_TEST,GETARG_B(ie),0,!cond);
+}
+}
+discharge2anyreg(fs,e);
+freeexp(fs,e);
+return condjump(fs,OP_TESTSET,((1<<8)-1),e->u.s.info,cond);
+}
+static void luaK_goiftrue(FuncState*fs,expdesc*e){
+int pc;
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VK:case VKNUM:case VTRUE:{
+pc=(-1);
+break;
+}
+case VJMP:{
+invertjump(fs,e);
+pc=e->u.s.info;
+break;
+}
+default:{
+pc=jumponcond(fs,e,0);
+break;
+}
+}
+luaK_concat(fs,&e->f,pc);
+luaK_patchtohere(fs,e->t);
+e->t=(-1);
+}
+static void luaK_goiffalse(FuncState*fs,expdesc*e){
+int pc;
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VNIL:case VFALSE:{
+pc=(-1);
+break;
+}
+case VJMP:{
+pc=e->u.s.info;
+break;
+}
+default:{
+pc=jumponcond(fs,e,1);
+break;
+}
+}
+luaK_concat(fs,&e->t,pc);
+luaK_patchtohere(fs,e->f);
+e->f=(-1);
+}
+static void codenot(FuncState*fs,expdesc*e){
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VNIL:case VFALSE:{
+e->k=VTRUE;
+break;
+}
+case VK:case VKNUM:case VTRUE:{
+e->k=VFALSE;
+break;
+}
+case VJMP:{
+invertjump(fs,e);
+break;
+}
+case VRELOCABLE:
+case VNONRELOC:{
+discharge2anyreg(fs,e);
+freeexp(fs,e);
+e->u.s.info=luaK_codeABC(fs,OP_NOT,0,e->u.s.info,0);
+e->k=VRELOCABLE;
+break;
+}
+default:{
+break;
+}
+}
+{int temp=e->f;e->f=e->t;e->t=temp;}
+removevalues(fs,e->f);
+removevalues(fs,e->t);
+}
+static void luaK_indexed(FuncState*fs,expdesc*t,expdesc*k){
+t->u.s.aux=luaK_exp2RK(fs,k);
+t->k=VINDEXED;
+}
+static int constfolding(OpCode op,expdesc*e1,expdesc*e2){
+lua_Number v1,v2,r;
+if(!isnumeral(e1)||!isnumeral(e2))return 0;
+v1=e1->u.nval;
+v2=e2->u.nval;
+switch(op){
+case OP_ADD:r=luai_numadd(v1,v2);break;
+case OP_SUB:r=luai_numsub(v1,v2);break;
+case OP_MUL:r=luai_nummul(v1,v2);break;
+case OP_DIV:
+if(v2==0)return 0;
+r=luai_numdiv(v1,v2);break;
+case OP_MOD:
+if(v2==0)return 0;
+r=luai_nummod(v1,v2);break;
+case OP_POW:r=luai_numpow(v1,v2);break;
+case OP_UNM:r=luai_numunm(v1);break;
+case OP_LEN:return 0;
+default:r=0;break;
+}
+if(luai_numisnan(r))return 0;
+e1->u.nval=r;
+return 1;
+}
+static void codearith(FuncState*fs,OpCode op,expdesc*e1,expdesc*e2){
+if(constfolding(op,e1,e2))
+return;
+else{
+int o2=(op!=OP_UNM&&op!=OP_LEN)?luaK_exp2RK(fs,e2):0;
+int o1=luaK_exp2RK(fs,e1);
+if(o1>o2){
+freeexp(fs,e1);
+freeexp(fs,e2);
+}
+else{
+freeexp(fs,e2);
+freeexp(fs,e1);
+}
+e1->u.s.info=luaK_codeABC(fs,op,0,o1,o2);
+e1->k=VRELOCABLE;
+}
+}
+static void codecomp(FuncState*fs,OpCode op,int cond,expdesc*e1,
+expdesc*e2){
+int o1=luaK_exp2RK(fs,e1);
+int o2=luaK_exp2RK(fs,e2);
+freeexp(fs,e2);
+freeexp(fs,e1);
+if(cond==0&&op!=OP_EQ){
+int temp;
+temp=o1;o1=o2;o2=temp;
+cond=1;
+}
+e1->u.s.info=condjump(fs,op,cond,o1,o2);
+e1->k=VJMP;
+}
+static void luaK_prefix(FuncState*fs,UnOpr op,expdesc*e){
+expdesc e2;
+e2.t=e2.f=(-1);e2.k=VKNUM;e2.u.nval=0;
+switch(op){
+case OPR_MINUS:{
+if(!isnumeral(e))
+luaK_exp2anyreg(fs,e);
+codearith(fs,OP_UNM,e,&e2);
+break;
+}
+case OPR_NOT:codenot(fs,e);break;
+case OPR_LEN:{
+luaK_exp2anyreg(fs,e);
+codearith(fs,OP_LEN,e,&e2);
+break;
+}
+default:;
+}
+}
+static void luaK_infix(FuncState*fs,BinOpr op,expdesc*v){
+switch(op){
+case OPR_AND:{
+luaK_goiftrue(fs,v);
+break;
+}
+case OPR_OR:{
+luaK_goiffalse(fs,v);
+break;
+}
+case OPR_CONCAT:{
+luaK_exp2nextreg(fs,v);
+break;
+}
+case OPR_ADD:case OPR_SUB:case OPR_MUL:case OPR_DIV:
+case OPR_MOD:case OPR_POW:{
+if(!isnumeral(v))luaK_exp2RK(fs,v);
+break;
+}
+default:{
+luaK_exp2RK(fs,v);
+break;
+}
+}
+}
+static void luaK_posfix(FuncState*fs,BinOpr op,expdesc*e1,expdesc*e2){
+switch(op){
+case OPR_AND:{
+luaK_dischargevars(fs,e2);
+luaK_concat(fs,&e2->f,e1->f);
+*e1=*e2;
+break;
+}
+case OPR_OR:{
+luaK_dischargevars(fs,e2);
+luaK_concat(fs,&e2->t,e1->t);
+*e1=*e2;
+break;
+}
+case OPR_CONCAT:{
+luaK_exp2val(fs,e2);
+if(e2->k==VRELOCABLE&&GET_OPCODE(getcode(fs,e2))==OP_CONCAT){
+freeexp(fs,e1);
+SETARG_B(getcode(fs,e2),e1->u.s.info);
+e1->k=VRELOCABLE;e1->u.s.info=e2->u.s.info;
+}
+else{
+luaK_exp2nextreg(fs,e2);
+codearith(fs,OP_CONCAT,e1,e2);
+}
+break;
+}
+case OPR_ADD:codearith(fs,OP_ADD,e1,e2);break;
+case OPR_SUB:codearith(fs,OP_SUB,e1,e2);break;
+case OPR_MUL:codearith(fs,OP_MUL,e1,e2);break;
+case OPR_DIV:codearith(fs,OP_DIV,e1,e2);break;
+case OPR_MOD:codearith(fs,OP_MOD,e1,e2);break;
+case OPR_POW:codearith(fs,OP_POW,e1,e2);break;
+case OPR_EQ:codecomp(fs,OP_EQ,1,e1,e2);break;
+case OPR_NE:codecomp(fs,OP_EQ,0,e1,e2);break;
+case OPR_LT:codecomp(fs,OP_LT,1,e1,e2);break;
+case OPR_LE:codecomp(fs,OP_LE,1,e1,e2);break;
+case OPR_GT:codecomp(fs,OP_LT,0,e1,e2);break;
+case OPR_GE:codecomp(fs,OP_LE,0,e1,e2);break;
+default:;
+}
+}
+static void luaK_fixline(FuncState*fs,int line){
+fs->f->lineinfo[fs->pc-1]=line;
+}
+static int luaK_code(FuncState*fs,Instruction i,int line){
+Proto*f=fs->f;
+dischargejpc(fs);
+luaM_growvector(fs->L,f->code,fs->pc,f->sizecode,Instruction,
+(INT_MAX-2),"code size overflow");
+f->code[fs->pc]=i;
+luaM_growvector(fs->L,f->lineinfo,fs->pc,f->sizelineinfo,int,
+(INT_MAX-2),"code size overflow");
+f->lineinfo[fs->pc]=line;
+return fs->pc++;
+}
+static int luaK_codeABC(FuncState*fs,OpCode o,int a,int b,int c){
+return luaK_code(fs,CREATE_ABC(o,a,b,c),fs->ls->lastline);
+}
+static int luaK_codeABx(FuncState*fs,OpCode o,int a,unsigned int bc){
+return luaK_code(fs,CREATE_ABx(o,a,bc),fs->ls->lastline);
+}
+static void luaK_setlist(FuncState*fs,int base,int nelems,int tostore){
+int c=(nelems-1)/50+1;
+int b=(tostore==(-1))?0:tostore;
+if(c<=((1<<9)-1))
+luaK_codeABC(fs,OP_SETLIST,base,b,c);
+else{
+luaK_codeABC(fs,OP_SETLIST,base,b,0);
+luaK_code(fs,cast(Instruction,c),fs->ls->lastline);
+}
+fs->freereg=base+1;
+}
+#define hasmultret(k)((k)==VCALL||(k)==VVARARG)
+#define getlocvar(fs,i)((fs)->f->locvars[(fs)->actvar[i]])
+#define luaY_checklimit(fs,v,l,m)if((v)>(l))errorlimit(fs,l,m)
+typedef struct BlockCnt{
+struct BlockCnt*previous;
+int breaklist;
+lu_byte nactvar;
+lu_byte upval;
+lu_byte isbreakable;
+}BlockCnt;
+static void chunk(LexState*ls);
+static void expr(LexState*ls,expdesc*v);
+static void anchor_token(LexState*ls){
+if(ls->t.token==TK_NAME||ls->t.token==TK_STRING){
+TString*ts=ls->t.seminfo.ts;
+luaX_newstring(ls,getstr(ts),ts->tsv.len);
+}
+}
+static void error_expected(LexState*ls,int token){
+luaX_syntaxerror(ls,
+luaO_pushfstring(ls->L,LUA_QL("%s")" expected",luaX_token2str(ls,token)));
+}
+static void errorlimit(FuncState*fs,int limit,const char*what){
+const char*msg=(fs->f->linedefined==0)?
+luaO_pushfstring(fs->L,"main function has more than %d %s",limit,what):
+luaO_pushfstring(fs->L,"function at line %d has more than %d %s",
+fs->f->linedefined,limit,what);
+luaX_lexerror(fs->ls,msg,0);
+}
+static int testnext(LexState*ls,int c){
+if(ls->t.token==c){
+luaX_next(ls);
+return 1;
+}
+else return 0;
+}
+static void check(LexState*ls,int c){
+if(ls->t.token!=c)
+error_expected(ls,c);
+}
+static void checknext(LexState*ls,int c){
+check(ls,c);
+luaX_next(ls);
+}
+#define check_condition(ls,c,msg){if(!(c))luaX_syntaxerror(ls,msg);}
+static void check_match(LexState*ls,int what,int who,int where){
+if(!testnext(ls,what)){
+if(where==ls->linenumber)
+error_expected(ls,what);
+else{
+luaX_syntaxerror(ls,luaO_pushfstring(ls->L,
+LUA_QL("%s")" expected (to close "LUA_QL("%s")" at line %d)",
+luaX_token2str(ls,what),luaX_token2str(ls,who),where));
+}
+}
+}
+static TString*str_checkname(LexState*ls){
+TString*ts;
+check(ls,TK_NAME);
+ts=ls->t.seminfo.ts;
+luaX_next(ls);
+return ts;
+}
+static void init_exp(expdesc*e,expkind k,int i){
+e->f=e->t=(-1);
+e->k=k;
+e->u.s.info=i;
+}
+static void codestring(LexState*ls,expdesc*e,TString*s){
+init_exp(e,VK,luaK_stringK(ls->fs,s));
+}
+static void checkname(LexState*ls,expdesc*e){
+codestring(ls,e,str_checkname(ls));
+}
+static int registerlocalvar(LexState*ls,TString*varname){
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+int oldsize=f->sizelocvars;
+luaM_growvector(ls->L,f->locvars,fs->nlocvars,f->sizelocvars,
+LocVar,SHRT_MAX,"too many local variables");
+while(oldsize<f->sizelocvars)f->locvars[oldsize++].varname=NULL;
+f->locvars[fs->nlocvars].varname=varname;
+luaC_objbarrier(ls->L,f,varname);
+return fs->nlocvars++;
+}
+#define new_localvarliteral(ls,v,n)new_localvar(ls,luaX_newstring(ls,""v,(sizeof(v)/sizeof(char))-1),n)
+static void new_localvar(LexState*ls,TString*name,int n){
+FuncState*fs=ls->fs;
+luaY_checklimit(fs,fs->nactvar+n+1,200,"local variables");
+fs->actvar[fs->nactvar+n]=cast(unsigned short,registerlocalvar(ls,name));
+}
+static void adjustlocalvars(LexState*ls,int nvars){
+FuncState*fs=ls->fs;
+fs->nactvar=cast_byte(fs->nactvar+nvars);
+for(;nvars;nvars--){
+getlocvar(fs,fs->nactvar-nvars).startpc=fs->pc;
+}
+}
+static void removevars(LexState*ls,int tolevel){
+FuncState*fs=ls->fs;
+while(fs->nactvar>tolevel)
+getlocvar(fs,--fs->nactvar).endpc=fs->pc;
+}
+static int indexupvalue(FuncState*fs,TString*name,expdesc*v){
+int i;
+Proto*f=fs->f;
+int oldsize=f->sizeupvalues;
+for(i=0;i<f->nups;i++){
+if(fs->upvalues[i].k==v->k&&fs->upvalues[i].info==v->u.s.info){
+return i;
+}
+}
+luaY_checklimit(fs,f->nups+1,60,"upvalues");
+luaM_growvector(fs->L,f->upvalues,f->nups,f->sizeupvalues,
+TString*,(INT_MAX-2),"");
+while(oldsize<f->sizeupvalues)f->upvalues[oldsize++]=NULL;
+f->upvalues[f->nups]=name;
+luaC_objbarrier(fs->L,f,name);
+fs->upvalues[f->nups].k=cast_byte(v->k);
+fs->upvalues[f->nups].info=cast_byte(v->u.s.info);
+return f->nups++;
+}
+static int searchvar(FuncState*fs,TString*n){
+int i;
+for(i=fs->nactvar-1;i>=0;i--){
+if(n==getlocvar(fs,i).varname)
+return i;
+}
+return-1;
+}
+static void markupval(FuncState*fs,int level){
+BlockCnt*bl=fs->bl;
+while(bl&&bl->nactvar>level)bl=bl->previous;
+if(bl)bl->upval=1;
+}
+static int singlevaraux(FuncState*fs,TString*n,expdesc*var,int base){
+if(fs==NULL){
+init_exp(var,VGLOBAL,((1<<8)-1));
+return VGLOBAL;
+}
+else{
+int v=searchvar(fs,n);
+if(v>=0){
+init_exp(var,VLOCAL,v);
+if(!base)
+markupval(fs,v);
+return VLOCAL;
+}
+else{
+if(singlevaraux(fs->prev,n,var,0)==VGLOBAL)
+return VGLOBAL;
+var->u.s.info=indexupvalue(fs,n,var);
+var->k=VUPVAL;
+return VUPVAL;
+}
+}
+}
+static void singlevar(LexState*ls,expdesc*var){
+TString*varname=str_checkname(ls);
+FuncState*fs=ls->fs;
+if(singlevaraux(fs,varname,var,1)==VGLOBAL)
+var->u.s.info=luaK_stringK(fs,varname);
+}
+static void adjust_assign(LexState*ls,int nvars,int nexps,expdesc*e){
+FuncState*fs=ls->fs;
+int extra=nvars-nexps;
+if(hasmultret(e->k)){
+extra++;
+if(extra<0)extra=0;
+luaK_setreturns(fs,e,extra);
+if(extra>1)luaK_reserveregs(fs,extra-1);
+}
+else{
+if(e->k!=VVOID)luaK_exp2nextreg(fs,e);
+if(extra>0){
+int reg=fs->freereg;
+luaK_reserveregs(fs,extra);
+luaK_nil(fs,reg,extra);
+}
+}
+}
+static void enterlevel(LexState*ls){
+if(++ls->L->nCcalls>200)
+luaX_lexerror(ls,"chunk has too many syntax levels",0);
+}
+#define leavelevel(ls)((ls)->L->nCcalls--)
+static void enterblock(FuncState*fs,BlockCnt*bl,lu_byte isbreakable){
+bl->breaklist=(-1);
+bl->isbreakable=isbreakable;
+bl->nactvar=fs->nactvar;
+bl->upval=0;
+bl->previous=fs->bl;
+fs->bl=bl;
+}
+static void leaveblock(FuncState*fs){
+BlockCnt*bl=fs->bl;
+fs->bl=bl->previous;
+removevars(fs->ls,bl->nactvar);
+if(bl->upval)
+luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0);
+fs->freereg=fs->nactvar;
+luaK_patchtohere(fs,bl->breaklist);
+}
+static void pushclosure(LexState*ls,FuncState*func,expdesc*v){
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+int oldsize=f->sizep;
+int i;
+luaM_growvector(ls->L,f->p,fs->np,f->sizep,Proto*,
+((1<<(9+9))-1),"constant table overflow");
+while(oldsize<f->sizep)f->p[oldsize++]=NULL;
+f->p[fs->np++]=func->f;
+luaC_objbarrier(ls->L,f,func->f);
+init_exp(v,VRELOCABLE,luaK_codeABx(fs,OP_CLOSURE,0,fs->np-1));
+for(i=0;i<func->f->nups;i++){
+OpCode o=(func->upvalues[i].k==VLOCAL)?OP_MOVE:OP_GETUPVAL;
+luaK_codeABC(fs,o,0,func->upvalues[i].info,0);
+}
+}
+static void open_func(LexState*ls,FuncState*fs){
+lua_State*L=ls->L;
+Proto*f=luaF_newproto(L);
+fs->f=f;
+fs->prev=ls->fs;
+fs->ls=ls;
+fs->L=L;
+ls->fs=fs;
+fs->pc=0;
+fs->lasttarget=-1;
+fs->jpc=(-1);
+fs->freereg=0;
+fs->nk=0;
+fs->np=0;
+fs->nlocvars=0;
+fs->nactvar=0;
+fs->bl=NULL;
+f->source=ls->source;
+f->maxstacksize=2;
+fs->h=luaH_new(L,0,0);
+sethvalue(L,L->top,fs->h);
+incr_top(L);
+setptvalue(L,L->top,f);
+incr_top(L);
+}
+static void close_func(LexState*ls){
+lua_State*L=ls->L;
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+removevars(ls,0);
+luaK_ret(fs,0,0);
+luaM_reallocvector(L,f->code,f->sizecode,fs->pc,Instruction);
+f->sizecode=fs->pc;
+luaM_reallocvector(L,f->lineinfo,f->sizelineinfo,fs->pc,int);
+f->sizelineinfo=fs->pc;
+luaM_reallocvector(L,f->k,f->sizek,fs->nk,TValue);
+f->sizek=fs->nk;
+luaM_reallocvector(L,f->p,f->sizep,fs->np,Proto*);
+f->sizep=fs->np;
+luaM_reallocvector(L,f->locvars,f->sizelocvars,fs->nlocvars,LocVar);
+f->sizelocvars=fs->nlocvars;
+luaM_reallocvector(L,f->upvalues,f->sizeupvalues,f->nups,TString*);
+f->sizeupvalues=f->nups;
+ls->fs=fs->prev;
+if(fs)anchor_token(ls);
+L->top-=2;
+}
+static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,const char*name){
+struct LexState lexstate;
+struct FuncState funcstate;
+lexstate.buff=buff;
+luaX_setinput(L,&lexstate,z,luaS_new(L,name));
+open_func(&lexstate,&funcstate);
+funcstate.f->is_vararg=2;
+luaX_next(&lexstate);
+chunk(&lexstate);
+check(&lexstate,TK_EOS);
+close_func(&lexstate);
+return funcstate.f;
+}
+static void field(LexState*ls,expdesc*v){
+FuncState*fs=ls->fs;
+expdesc key;
+luaK_exp2anyreg(fs,v);
+luaX_next(ls);
+checkname(ls,&key);
+luaK_indexed(fs,v,&key);
+}
+static void yindex(LexState*ls,expdesc*v){
+luaX_next(ls);
+expr(ls,v);
+luaK_exp2val(ls->fs,v);
+checknext(ls,']');
+}
+struct ConsControl{
+expdesc v;
+expdesc*t;
+int nh;
+int na;
+int tostore;
+};
+static void recfield(LexState*ls,struct ConsControl*cc){
+FuncState*fs=ls->fs;
+int reg=ls->fs->freereg;
+expdesc key,val;
+int rkkey;
+if(ls->t.token==TK_NAME){
+luaY_checklimit(fs,cc->nh,(INT_MAX-2),"items in a constructor");
+checkname(ls,&key);
+}
+else
+yindex(ls,&key);
+cc->nh++;
+checknext(ls,'=');
+rkkey=luaK_exp2RK(fs,&key);
+expr(ls,&val);
+luaK_codeABC(fs,OP_SETTABLE,cc->t->u.s.info,rkkey,luaK_exp2RK(fs,&val));
+fs->freereg=reg;
+}
+static void closelistfield(FuncState*fs,struct ConsControl*cc){
+if(cc->v.k==VVOID)return;
+luaK_exp2nextreg(fs,&cc->v);
+cc->v.k=VVOID;
+if(cc->tostore==50){
+luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore);
+cc->tostore=0;
+}
+}
+static void lastlistfield(FuncState*fs,struct ConsControl*cc){
+if(cc->tostore==0)return;
+if(hasmultret(cc->v.k)){
+luaK_setmultret(fs,&cc->v);
+luaK_setlist(fs,cc->t->u.s.info,cc->na,(-1));
+cc->na--;
+}
+else{
+if(cc->v.k!=VVOID)
+luaK_exp2nextreg(fs,&cc->v);
+luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore);
+}
+}
+static void listfield(LexState*ls,struct ConsControl*cc){
+expr(ls,&cc->v);
+luaY_checklimit(ls->fs,cc->na,(INT_MAX-2),"items in a constructor");
+cc->na++;
+cc->tostore++;
+}
+static void constructor(LexState*ls,expdesc*t){
+FuncState*fs=ls->fs;
+int line=ls->linenumber;
+int pc=luaK_codeABC(fs,OP_NEWTABLE,0,0,0);
+struct ConsControl cc;
+cc.na=cc.nh=cc.tostore=0;
+cc.t=t;
+init_exp(t,VRELOCABLE,pc);
+init_exp(&cc.v,VVOID,0);
+luaK_exp2nextreg(ls->fs,t);
+checknext(ls,'{');
+do{
+if(ls->t.token=='}')break;
+closelistfield(fs,&cc);
+switch(ls->t.token){
+case TK_NAME:{
+luaX_lookahead(ls);
+if(ls->lookahead.token!='=')
+listfield(ls,&cc);
+else
+recfield(ls,&cc);
+break;
+}
+case'[':{
+recfield(ls,&cc);
+break;
+}
+default:{
+listfield(ls,&cc);
+break;
+}
+}
+}while(testnext(ls,',')||testnext(ls,';'));
+check_match(ls,'}','{',line);
+lastlistfield(fs,&cc);
+SETARG_B(fs->f->code[pc],luaO_int2fb(cc.na));
+SETARG_C(fs->f->code[pc],luaO_int2fb(cc.nh));
+}
+static void parlist(LexState*ls){
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+int nparams=0;
+f->is_vararg=0;
+if(ls->t.token!=')'){
+do{
+switch(ls->t.token){
+case TK_NAME:{
+new_localvar(ls,str_checkname(ls),nparams++);
+break;
+}
+case TK_DOTS:{
+luaX_next(ls);
+f->is_vararg|=2;
+break;
+}
+default:luaX_syntaxerror(ls,"<name> or "LUA_QL("...")" expected");
+}
+}while(!f->is_vararg&&testnext(ls,','));
+}
+adjustlocalvars(ls,nparams);
+f->numparams=cast_byte(fs->nactvar-(f->is_vararg&1));
+luaK_reserveregs(fs,fs->nactvar);
+}
+static void body(LexState*ls,expdesc*e,int needself,int line){
+FuncState new_fs;
+open_func(ls,&new_fs);
+new_fs.f->linedefined=line;
+checknext(ls,'(');
+if(needself){
+new_localvarliteral(ls,"self",0);
+adjustlocalvars(ls,1);
+}
+parlist(ls);
+checknext(ls,')');
+chunk(ls);
+new_fs.f->lastlinedefined=ls->linenumber;
+check_match(ls,TK_END,TK_FUNCTION,line);
+close_func(ls);
+pushclosure(ls,&new_fs,e);
+}
+static int explist1(LexState*ls,expdesc*v){
+int n=1;
+expr(ls,v);
+while(testnext(ls,',')){
+luaK_exp2nextreg(ls->fs,v);
+expr(ls,v);
+n++;
+}
+return n;
+}
+static void funcargs(LexState*ls,expdesc*f){
+FuncState*fs=ls->fs;
+expdesc args;
+int base,nparams;
+int line=ls->linenumber;
+switch(ls->t.token){
+case'(':{
+if(line!=ls->lastline)
+luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)");
+luaX_next(ls);
+if(ls->t.token==')')
+args.k=VVOID;
+else{
+explist1(ls,&args);
+luaK_setmultret(fs,&args);
+}
+check_match(ls,')','(',line);
+break;
+}
+case'{':{
+constructor(ls,&args);
+break;
+}
+case TK_STRING:{
+codestring(ls,&args,ls->t.seminfo.ts);
+luaX_next(ls);
+break;
+}
+default:{
+luaX_syntaxerror(ls,"function arguments expected");
+return;
+}
+}
+base=f->u.s.info;
+if(hasmultret(args.k))
+nparams=(-1);
+else{
+if(args.k!=VVOID)
+luaK_exp2nextreg(fs,&args);
+nparams=fs->freereg-(base+1);
+}
+init_exp(f,VCALL,luaK_codeABC(fs,OP_CALL,base,nparams+1,2));
+luaK_fixline(fs,line);
+fs->freereg=base+1;
+}
+static void prefixexp(LexState*ls,expdesc*v){
+switch(ls->t.token){
+case'(':{
+int line=ls->linenumber;
+luaX_next(ls);
+expr(ls,v);
+check_match(ls,')','(',line);
+luaK_dischargevars(ls->fs,v);
+return;
+}
+case TK_NAME:{
+singlevar(ls,v);
+return;
+}
+default:{
+luaX_syntaxerror(ls,"unexpected symbol");
+return;
+}
+}
+}
+static void primaryexp(LexState*ls,expdesc*v){
+FuncState*fs=ls->fs;
+prefixexp(ls,v);
+for(;;){
+switch(ls->t.token){
+case'.':{
+field(ls,v);
+break;
+}
+case'[':{
+expdesc key;
+luaK_exp2anyreg(fs,v);
+yindex(ls,&key);
+luaK_indexed(fs,v,&key);
+break;
+}
+case':':{
+expdesc key;
+luaX_next(ls);
+checkname(ls,&key);
+luaK_self(fs,v,&key);
+funcargs(ls,v);
+break;
+}
+case'(':case TK_STRING:case'{':{
+luaK_exp2nextreg(fs,v);
+funcargs(ls,v);
+break;
+}
+default:return;
+}
+}
+}
+static void simpleexp(LexState*ls,expdesc*v){
+switch(ls->t.token){
+case TK_NUMBER:{
+init_exp(v,VKNUM,0);
+v->u.nval=ls->t.seminfo.r;
+break;
+}
+case TK_STRING:{
+codestring(ls,v,ls->t.seminfo.ts);
+break;
+}
+case TK_NIL:{
+init_exp(v,VNIL,0);
+break;
+}
+case TK_TRUE:{
+init_exp(v,VTRUE,0);
+break;
+}
+case TK_FALSE:{
+init_exp(v,VFALSE,0);
+break;
+}
+case TK_DOTS:{
+FuncState*fs=ls->fs;
+check_condition(ls,fs->f->is_vararg,
+"cannot use "LUA_QL("...")" outside a vararg function");
+fs->f->is_vararg&=~4;
+init_exp(v,VVARARG,luaK_codeABC(fs,OP_VARARG,0,1,0));
+break;
+}
+case'{':{
+constructor(ls,v);
+return;
+}
+case TK_FUNCTION:{
+luaX_next(ls);
+body(ls,v,0,ls->linenumber);
+return;
+}
+default:{
+primaryexp(ls,v);
+return;
+}
+}
+luaX_next(ls);
+}
+static UnOpr getunopr(int op){
+switch(op){
+case TK_NOT:return OPR_NOT;
+case'-':return OPR_MINUS;
+case'#':return OPR_LEN;
+default:return OPR_NOUNOPR;
+}
+}
+static BinOpr getbinopr(int op){
+switch(op){
+case'+':return OPR_ADD;
+case'-':return OPR_SUB;
+case'*':return OPR_MUL;
+case'/':return OPR_DIV;
+case'%':return OPR_MOD;
+case'^':return OPR_POW;
+case TK_CONCAT:return OPR_CONCAT;
+case TK_NE:return OPR_NE;
+case TK_EQ:return OPR_EQ;
+case'<':return OPR_LT;
+case TK_LE:return OPR_LE;
+case'>':return OPR_GT;
+case TK_GE:return OPR_GE;
+case TK_AND:return OPR_AND;
+case TK_OR:return OPR_OR;
+default:return OPR_NOBINOPR;
+}
+}
+static const struct{
+lu_byte left;
+lu_byte right;
+}priority[]={
+{6,6},{6,6},{7,7},{7,7},{7,7},
+{10,9},{5,4},
+{3,3},{3,3},
+{3,3},{3,3},{3,3},{3,3},
+{2,2},{1,1}
+};
+static BinOpr subexpr(LexState*ls,expdesc*v,unsigned int limit){
+BinOpr op;
+UnOpr uop;
+enterlevel(ls);
+uop=getunopr(ls->t.token);
+if(uop!=OPR_NOUNOPR){
+luaX_next(ls);
+subexpr(ls,v,8);
+luaK_prefix(ls->fs,uop,v);
+}
+else simpleexp(ls,v);
+op=getbinopr(ls->t.token);
+while(op!=OPR_NOBINOPR&&priority[op].left>limit){
+expdesc v2;
+BinOpr nextop;
+luaX_next(ls);
+luaK_infix(ls->fs,op,v);
+nextop=subexpr(ls,&v2,priority[op].right);
+luaK_posfix(ls->fs,op,v,&v2);
+op=nextop;
+}
+leavelevel(ls);
+return op;
+}
+static void expr(LexState*ls,expdesc*v){
+subexpr(ls,v,0);
+}
+static int block_follow(int token){
+switch(token){
+case TK_ELSE:case TK_ELSEIF:case TK_END:
+case TK_UNTIL:case TK_EOS:
+return 1;
+default:return 0;
+}
+}
+static void block(LexState*ls){
+FuncState*fs=ls->fs;
+BlockCnt bl;
+enterblock(fs,&bl,0);
+chunk(ls);
+leaveblock(fs);
+}
+struct LHS_assign{
+struct LHS_assign*prev;
+expdesc v;
+};
+static void check_conflict(LexState*ls,struct LHS_assign*lh,expdesc*v){
+FuncState*fs=ls->fs;
+int extra=fs->freereg;
+int conflict=0;
+for(;lh;lh=lh->prev){
+if(lh->v.k==VINDEXED){
+if(lh->v.u.s.info==v->u.s.info){
+conflict=1;
+lh->v.u.s.info=extra;
+}
+if(lh->v.u.s.aux==v->u.s.info){
+conflict=1;
+lh->v.u.s.aux=extra;
+}
+}
+}
+if(conflict){
+luaK_codeABC(fs,OP_MOVE,fs->freereg,v->u.s.info,0);
+luaK_reserveregs(fs,1);
+}
+}
+static void assignment(LexState*ls,struct LHS_assign*lh,int nvars){
+expdesc e;
+check_condition(ls,VLOCAL<=lh->v.k&&lh->v.k<=VINDEXED,
+"syntax error");
+if(testnext(ls,',')){
+struct LHS_assign nv;
+nv.prev=lh;
+primaryexp(ls,&nv.v);
+if(nv.v.k==VLOCAL)
+check_conflict(ls,lh,&nv.v);
+luaY_checklimit(ls->fs,nvars,200-ls->L->nCcalls,
+"variables in assignment");
+assignment(ls,&nv,nvars+1);
+}
+else{
+int nexps;
+checknext(ls,'=');
+nexps=explist1(ls,&e);
+if(nexps!=nvars){
+adjust_assign(ls,nvars,nexps,&e);
+if(nexps>nvars)
+ls->fs->freereg-=nexps-nvars;
+}
+else{
+luaK_setoneret(ls->fs,&e);
+luaK_storevar(ls->fs,&lh->v,&e);
+return;
+}
+}
+init_exp(&e,VNONRELOC,ls->fs->freereg-1);
+luaK_storevar(ls->fs,&lh->v,&e);
+}
+static int cond(LexState*ls){
+expdesc v;
+expr(ls,&v);
+if(v.k==VNIL)v.k=VFALSE;
+luaK_goiftrue(ls->fs,&v);
+return v.f;
+}
+static void breakstat(LexState*ls){
+FuncState*fs=ls->fs;
+BlockCnt*bl=fs->bl;
+int upval=0;
+while(bl&&!bl->isbreakable){
+upval|=bl->upval;
+bl=bl->previous;
+}
+if(!bl)
+luaX_syntaxerror(ls,"no loop to break");
+if(upval)
+luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0);
+luaK_concat(fs,&bl->breaklist,luaK_jump(fs));
+}
+static void whilestat(LexState*ls,int line){
+FuncState*fs=ls->fs;
+int whileinit;
+int condexit;
+BlockCnt bl;
+luaX_next(ls);
+whileinit=luaK_getlabel(fs);
+condexit=cond(ls);
+enterblock(fs,&bl,1);
+checknext(ls,TK_DO);
+block(ls);
+luaK_patchlist(fs,luaK_jump(fs),whileinit);
+check_match(ls,TK_END,TK_WHILE,line);
+leaveblock(fs);
+luaK_patchtohere(fs,condexit);
+}
+static void repeatstat(LexState*ls,int line){
+int condexit;
+FuncState*fs=ls->fs;
+int repeat_init=luaK_getlabel(fs);
+BlockCnt bl1,bl2;
+enterblock(fs,&bl1,1);
+enterblock(fs,&bl2,0);
+luaX_next(ls);
+chunk(ls);
+check_match(ls,TK_UNTIL,TK_REPEAT,line);
+condexit=cond(ls);
+if(!bl2.upval){
+leaveblock(fs);
+luaK_patchlist(ls->fs,condexit,repeat_init);
+}
+else{
+breakstat(ls);
+luaK_patchtohere(ls->fs,condexit);
+leaveblock(fs);
+luaK_patchlist(ls->fs,luaK_jump(fs),repeat_init);
+}
+leaveblock(fs);
+}
+static int exp1(LexState*ls){
+expdesc e;
+int k;
+expr(ls,&e);
+k=e.k;
+luaK_exp2nextreg(ls->fs,&e);
+return k;
+}
+static void forbody(LexState*ls,int base,int line,int nvars,int isnum){
+BlockCnt bl;
+FuncState*fs=ls->fs;
+int prep,endfor;
+adjustlocalvars(ls,3);
+checknext(ls,TK_DO);
+prep=isnum?luaK_codeAsBx(fs,OP_FORPREP,base,(-1)):luaK_jump(fs);
+enterblock(fs,&bl,0);
+adjustlocalvars(ls,nvars);
+luaK_reserveregs(fs,nvars);
+block(ls);
+leaveblock(fs);
+luaK_patchtohere(fs,prep);
+endfor=(isnum)?luaK_codeAsBx(fs,OP_FORLOOP,base,(-1)):
+luaK_codeABC(fs,OP_TFORLOOP,base,0,nvars);
+luaK_fixline(fs,line);
+luaK_patchlist(fs,(isnum?endfor:luaK_jump(fs)),prep+1);
+}
+static void fornum(LexState*ls,TString*varname,int line){
+FuncState*fs=ls->fs;
+int base=fs->freereg;
+new_localvarliteral(ls,"(for index)",0);
+new_localvarliteral(ls,"(for limit)",1);
+new_localvarliteral(ls,"(for step)",2);
+new_localvar(ls,varname,3);
+checknext(ls,'=');
+exp1(ls);
+checknext(ls,',');
+exp1(ls);
+if(testnext(ls,','))
+exp1(ls);
+else{
+luaK_codeABx(fs,OP_LOADK,fs->freereg,luaK_numberK(fs,1));
+luaK_reserveregs(fs,1);
+}
+forbody(ls,base,line,1,1);
+}
+static void forlist(LexState*ls,TString*indexname){
+FuncState*fs=ls->fs;
+expdesc e;
+int nvars=0;
+int line;
+int base=fs->freereg;
+new_localvarliteral(ls,"(for generator)",nvars++);
+new_localvarliteral(ls,"(for state)",nvars++);
+new_localvarliteral(ls,"(for control)",nvars++);
+new_localvar(ls,indexname,nvars++);
+while(testnext(ls,','))
+new_localvar(ls,str_checkname(ls),nvars++);
+checknext(ls,TK_IN);
+line=ls->linenumber;
+adjust_assign(ls,3,explist1(ls,&e),&e);
+luaK_checkstack(fs,3);
+forbody(ls,base,line,nvars-3,0);
+}
+static void forstat(LexState*ls,int line){
+FuncState*fs=ls->fs;
+TString*varname;
+BlockCnt bl;
+enterblock(fs,&bl,1);
+luaX_next(ls);
+varname=str_checkname(ls);
+switch(ls->t.token){
+case'=':fornum(ls,varname,line);break;
+case',':case TK_IN:forlist(ls,varname);break;
+default:luaX_syntaxerror(ls,LUA_QL("=")" or "LUA_QL("in")" expected");
+}
+check_match(ls,TK_END,TK_FOR,line);
+leaveblock(fs);
+}
+static int test_then_block(LexState*ls){
+int condexit;
+luaX_next(ls);
+condexit=cond(ls);
+checknext(ls,TK_THEN);
+block(ls);
+return condexit;
+}
+static void ifstat(LexState*ls,int line){
+FuncState*fs=ls->fs;
+int flist;
+int escapelist=(-1);
+flist=test_then_block(ls);
+while(ls->t.token==TK_ELSEIF){
+luaK_concat(fs,&escapelist,luaK_jump(fs));
+luaK_patchtohere(fs,flist);
+flist=test_then_block(ls);
+}
+if(ls->t.token==TK_ELSE){
+luaK_concat(fs,&escapelist,luaK_jump(fs));
+luaK_patchtohere(fs,flist);
+luaX_next(ls);
+block(ls);
+}
+else
+luaK_concat(fs,&escapelist,flist);
+luaK_patchtohere(fs,escapelist);
+check_match(ls,TK_END,TK_IF,line);
+}
+static void localfunc(LexState*ls){
+expdesc v,b;
+FuncState*fs=ls->fs;
+new_localvar(ls,str_checkname(ls),0);
+init_exp(&v,VLOCAL,fs->freereg);
+luaK_reserveregs(fs,1);
+adjustlocalvars(ls,1);
+body(ls,&b,0,ls->linenumber);
+luaK_storevar(fs,&v,&b);
+getlocvar(fs,fs->nactvar-1).startpc=fs->pc;
+}
+static void localstat(LexState*ls){
+int nvars=0;
+int nexps;
+expdesc e;
+do{
+new_localvar(ls,str_checkname(ls),nvars++);
+}while(testnext(ls,','));
+if(testnext(ls,'='))
+nexps=explist1(ls,&e);
+else{
+e.k=VVOID;
+nexps=0;
+}
+adjust_assign(ls,nvars,nexps,&e);
+adjustlocalvars(ls,nvars);
+}
+static int funcname(LexState*ls,expdesc*v){
+int needself=0;
+singlevar(ls,v);
+while(ls->t.token=='.')
+field(ls,v);
+if(ls->t.token==':'){
+needself=1;
+field(ls,v);
+}
+return needself;
+}
+static void funcstat(LexState*ls,int line){
+int needself;
+expdesc v,b;
+luaX_next(ls);
+needself=funcname(ls,&v);
+body(ls,&b,needself,line);
+luaK_storevar(ls->fs,&v,&b);
+luaK_fixline(ls->fs,line);
+}
+static void exprstat(LexState*ls){
+FuncState*fs=ls->fs;
+struct LHS_assign v;
+primaryexp(ls,&v.v);
+if(v.v.k==VCALL)
+SETARG_C(getcode(fs,&v.v),1);
+else{
+v.prev=NULL;
+assignment(ls,&v,1);
+}
+}
+static void retstat(LexState*ls){
+FuncState*fs=ls->fs;
+expdesc e;
+int first,nret;
+luaX_next(ls);
+if(block_follow(ls->t.token)||ls->t.token==';')
+first=nret=0;
+else{
+nret=explist1(ls,&e);
+if(hasmultret(e.k)){
+luaK_setmultret(fs,&e);
+if(e.k==VCALL&&nret==1){
+SET_OPCODE(getcode(fs,&e),OP_TAILCALL);
+}
+first=fs->nactvar;
+nret=(-1);
+}
+else{
+if(nret==1)
+first=luaK_exp2anyreg(fs,&e);
+else{
+luaK_exp2nextreg(fs,&e);
+first=fs->nactvar;
+}
+}
+}
+luaK_ret(fs,first,nret);
+}
+static int statement(LexState*ls){
+int line=ls->linenumber;
+switch(ls->t.token){
+case TK_IF:{
+ifstat(ls,line);
+return 0;
+}
+case TK_WHILE:{
+whilestat(ls,line);
+return 0;
+}
+case TK_DO:{
+luaX_next(ls);
+block(ls);
+check_match(ls,TK_END,TK_DO,line);
+return 0;
+}
+case TK_FOR:{
+forstat(ls,line);
+return 0;
+}
+case TK_REPEAT:{
+repeatstat(ls,line);
+return 0;
+}
+case TK_FUNCTION:{
+funcstat(ls,line);
+return 0;
+}
+case TK_LOCAL:{
+luaX_next(ls);
+if(testnext(ls,TK_FUNCTION))
+localfunc(ls);
+else
+localstat(ls);
+return 0;
+}
+case TK_RETURN:{
+retstat(ls);
+return 1;
+}
+case TK_BREAK:{
+luaX_next(ls);
+breakstat(ls);
+return 1;
+}
+default:{
+exprstat(ls);
+return 0;
+}
+}
+}
+static void chunk(LexState*ls){
+int islast=0;
+enterlevel(ls);
+while(!islast&&!block_follow(ls->t.token)){
+islast=statement(ls);
+testnext(ls,';');
+ls->fs->freereg=ls->fs->nactvar;
+}
+leavelevel(ls);
+}
+static const TValue*luaV_tonumber(const TValue*obj,TValue*n){
+lua_Number num;
+if(ttisnumber(obj))return obj;
+if(ttisstring(obj)&&luaO_str2d(svalue(obj),&num)){
+setnvalue(n,num);
+return n;
+}
+else
+return NULL;
+}
+static int luaV_tostring(lua_State*L,StkId obj){
+if(!ttisnumber(obj))
+return 0;
+else{
+char s[32];
+lua_Number n=nvalue(obj);
+lua_number2str(s,n);
+setsvalue(L,obj,luaS_new(L,s));
+return 1;
+}
+}
+static void callTMres(lua_State*L,StkId res,const TValue*f,
+const TValue*p1,const TValue*p2){
+ptrdiff_t result=savestack(L,res);
+setobj(L,L->top,f);
+setobj(L,L->top+1,p1);
+setobj(L,L->top+2,p2);
+luaD_checkstack(L,3);
+L->top+=3;
+luaD_call(L,L->top-3,1);
+res=restorestack(L,result);
+L->top--;
+setobj(L,res,L->top);
+}
+static void callTM(lua_State*L,const TValue*f,const TValue*p1,
+const TValue*p2,const TValue*p3){
+setobj(L,L->top,f);
+setobj(L,L->top+1,p1);
+setobj(L,L->top+2,p2);
+setobj(L,L->top+3,p3);
+luaD_checkstack(L,4);
+L->top+=4;
+luaD_call(L,L->top-4,0);
+}
+static void luaV_gettable(lua_State*L,const TValue*t,TValue*key,StkId val){
+int loop;
+for(loop=0;loop<100;loop++){
+const TValue*tm;
+if(ttistable(t)){
+Table*h=hvalue(t);
+const TValue*res=luaH_get(h,key);
+if(!ttisnil(res)||
+(tm=fasttm(L,h->metatable,TM_INDEX))==NULL){
+setobj(L,val,res);
+return;
+}
+}
+else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_INDEX)))
+luaG_typeerror(L,t,"index");
+if(ttisfunction(tm)){
+callTMres(L,val,tm,t,key);
+return;
+}
+t=tm;
+}
+luaG_runerror(L,"loop in gettable");
+}
+static void luaV_settable(lua_State*L,const TValue*t,TValue*key,StkId val){
+int loop;
+TValue temp;
+for(loop=0;loop<100;loop++){
+const TValue*tm;
+if(ttistable(t)){
+Table*h=hvalue(t);
+TValue*oldval=luaH_set(L,h,key);
+if(!ttisnil(oldval)||
+(tm=fasttm(L,h->metatable,TM_NEWINDEX))==NULL){
+setobj(L,oldval,val);
+h->flags=0;
+luaC_barriert(L,h,val);
+return;
+}
+}
+else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_NEWINDEX)))
+luaG_typeerror(L,t,"index");
+if(ttisfunction(tm)){
+callTM(L,tm,t,key,val);
+return;
+}
+setobj(L,&temp,tm);
+t=&temp;
+}
+luaG_runerror(L,"loop in settable");
+}
+static int call_binTM(lua_State*L,const TValue*p1,const TValue*p2,
+StkId res,TMS event){
+const TValue*tm=luaT_gettmbyobj(L,p1,event);
+if(ttisnil(tm))
+tm=luaT_gettmbyobj(L,p2,event);
+if(ttisnil(tm))return 0;
+callTMres(L,res,tm,p1,p2);
+return 1;
+}
+static const TValue*get_compTM(lua_State*L,Table*mt1,Table*mt2,
+TMS event){
+const TValue*tm1=fasttm(L,mt1,event);
+const TValue*tm2;
+if(tm1==NULL)return NULL;
+if(mt1==mt2)return tm1;
+tm2=fasttm(L,mt2,event);
+if(tm2==NULL)return NULL;
+if(luaO_rawequalObj(tm1,tm2))
+return tm1;
+return NULL;
+}
+static int call_orderTM(lua_State*L,const TValue*p1,const TValue*p2,
+TMS event){
+const TValue*tm1=luaT_gettmbyobj(L,p1,event);
+const TValue*tm2;
+if(ttisnil(tm1))return-1;
+tm2=luaT_gettmbyobj(L,p2,event);
+if(!luaO_rawequalObj(tm1,tm2))
+return-1;
+callTMres(L,L->top,tm1,p1,p2);
+return!l_isfalse(L->top);
+}
+static int l_strcmp(const TString*ls,const TString*rs){
+const char*l=getstr(ls);
+size_t ll=ls->tsv.len;
+const char*r=getstr(rs);
+size_t lr=rs->tsv.len;
+for(;;){
+int temp=strcoll(l,r);
+if(temp!=0)return temp;
+else{
+size_t len=strlen(l);
+if(len==lr)
+return(len==ll)?0:1;
+else if(len==ll)
+return-1;
+len++;
+l+=len;ll-=len;r+=len;lr-=len;
+}
+}
+}
+static int luaV_lessthan(lua_State*L,const TValue*l,const TValue*r){
+int res;
+if(ttype(l)!=ttype(r))
+return luaG_ordererror(L,l,r);
+else if(ttisnumber(l))
+return luai_numlt(nvalue(l),nvalue(r));
+else if(ttisstring(l))
+return l_strcmp(rawtsvalue(l),rawtsvalue(r))<0;
+else if((res=call_orderTM(L,l,r,TM_LT))!=-1)
+return res;
+return luaG_ordererror(L,l,r);
+}
+static int lessequal(lua_State*L,const TValue*l,const TValue*r){
+int res;
+if(ttype(l)!=ttype(r))
+return luaG_ordererror(L,l,r);
+else if(ttisnumber(l))
+return luai_numle(nvalue(l),nvalue(r));
+else if(ttisstring(l))
+return l_strcmp(rawtsvalue(l),rawtsvalue(r))<=0;
+else if((res=call_orderTM(L,l,r,TM_LE))!=-1)
+return res;
+else if((res=call_orderTM(L,r,l,TM_LT))!=-1)
+return!res;
+return luaG_ordererror(L,l,r);
+}
+static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2){
+const TValue*tm;
+switch(ttype(t1)){
+case 0:return 1;
+case 3:return luai_numeq(nvalue(t1),nvalue(t2));
+case 1:return bvalue(t1)==bvalue(t2);
+case 2:return pvalue(t1)==pvalue(t2);
+case 7:{
+if(uvalue(t1)==uvalue(t2))return 1;
+tm=get_compTM(L,uvalue(t1)->metatable,uvalue(t2)->metatable,
+TM_EQ);
+break;
+}
+case 5:{
+if(hvalue(t1)==hvalue(t2))return 1;
+tm=get_compTM(L,hvalue(t1)->metatable,hvalue(t2)->metatable,TM_EQ);
+break;
+}
+default:return gcvalue(t1)==gcvalue(t2);
+}
+if(tm==NULL)return 0;
+callTMres(L,L->top,tm,t1,t2);
+return!l_isfalse(L->top);
+}
+static void luaV_concat(lua_State*L,int total,int last){
+do{
+StkId top=L->base+last+1;
+int n=2;
+if(!(ttisstring(top-2)||ttisnumber(top-2))||!tostring(L,top-1)){
+if(!call_binTM(L,top-2,top-1,top-2,TM_CONCAT))
+luaG_concaterror(L,top-2,top-1);
+}else if(tsvalue(top-1)->len==0)
+(void)tostring(L,top-2);
+else{
+size_t tl=tsvalue(top-1)->len;
+char*buffer;
+int i;
+for(n=1;n<total&&tostring(L,top-n-1);n++){
+size_t l=tsvalue(top-n-1)->len;
+if(l>=((size_t)(~(size_t)0)-2)-tl)luaG_runerror(L,"string length overflow");
+tl+=l;
+}
+buffer=luaZ_openspace(L,&G(L)->buff,tl);
+tl=0;
+for(i=n;i>0;i--){
+size_t l=tsvalue(top-i)->len;
+memcpy(buffer+tl,svalue(top-i),l);
+tl+=l;
+}
+setsvalue(L,top-n,luaS_newlstr(L,buffer,tl));
+}
+total-=n-1;
+last-=n-1;
+}while(total>1);
+}
+static void Arith(lua_State*L,StkId ra,const TValue*rb,
+const TValue*rc,TMS op){
+TValue tempb,tempc;
+const TValue*b,*c;
+if((b=luaV_tonumber(rb,&tempb))!=NULL&&
+(c=luaV_tonumber(rc,&tempc))!=NULL){
+lua_Number nb=nvalue(b),nc=nvalue(c);
+switch(op){
+case TM_ADD:setnvalue(ra,luai_numadd(nb,nc));break;
+case TM_SUB:setnvalue(ra,luai_numsub(nb,nc));break;
+case TM_MUL:setnvalue(ra,luai_nummul(nb,nc));break;
+case TM_DIV:setnvalue(ra,luai_numdiv(nb,nc));break;
+case TM_MOD:setnvalue(ra,luai_nummod(nb,nc));break;
+case TM_POW:setnvalue(ra,luai_numpow(nb,nc));break;
+case TM_UNM:setnvalue(ra,luai_numunm(nb));break;
+default:break;
+}
+}
+else if(!call_binTM(L,rb,rc,ra,op))
+luaG_aritherror(L,rb,rc);
+}
+#define runtime_check(L,c){if(!(c))break;}
+#define RA(i)(base+GETARG_A(i))
+#define RB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgR,base+GETARG_B(i))
+#define RKB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_B(i))?k+INDEXK(GETARG_B(i)):base+GETARG_B(i))
+#define RKC(i)check_exp(getCMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_C(i))?k+INDEXK(GETARG_C(i)):base+GETARG_C(i))
+#define KBx(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,k+GETARG_Bx(i))
+#define dojump(L,pc,i){(pc)+=(i);}
+#define Protect(x){L->savedpc=pc;{x;};base=L->base;}
+#define arith_op(op,tm){TValue*rb=RKB(i);TValue*rc=RKC(i);if(ttisnumber(rb)&&ttisnumber(rc)){lua_Number nb=nvalue(rb),nc=nvalue(rc);setnvalue(ra,op(nb,nc));}else Protect(Arith(L,ra,rb,rc,tm));}
+static void luaV_execute(lua_State*L,int nexeccalls){
+LClosure*cl;
+StkId base;
+TValue*k;
+const Instruction*pc;
+reentry:
+pc=L->savedpc;
+cl=&clvalue(L->ci->func)->l;
+base=L->base;
+k=cl->p->k;
+for(;;){
+const Instruction i=*pc++;
+StkId ra;
+ra=RA(i);
+switch(GET_OPCODE(i)){
+case OP_MOVE:{
+setobj(L,ra,RB(i));
+continue;
+}
+case OP_LOADK:{
+setobj(L,ra,KBx(i));
+continue;
+}
+case OP_LOADBOOL:{
+setbvalue(ra,GETARG_B(i));
+if(GETARG_C(i))pc++;
+continue;
+}
+case OP_LOADNIL:{
+TValue*rb=RB(i);
+do{
+setnilvalue(rb--);
+}while(rb>=ra);
+continue;
+}
+case OP_GETUPVAL:{
+int b=GETARG_B(i);
+setobj(L,ra,cl->upvals[b]->v);
+continue;
+}
+case OP_GETGLOBAL:{
+TValue g;
+TValue*rb=KBx(i);
+sethvalue(L,&g,cl->env);
+Protect(luaV_gettable(L,&g,rb,ra));
+continue;
+}
+case OP_GETTABLE:{
+Protect(luaV_gettable(L,RB(i),RKC(i),ra));
+continue;
+}
+case OP_SETGLOBAL:{
+TValue g;
+sethvalue(L,&g,cl->env);
+Protect(luaV_settable(L,&g,KBx(i),ra));
+continue;
+}
+case OP_SETUPVAL:{
+UpVal*uv=cl->upvals[GETARG_B(i)];
+setobj(L,uv->v,ra);
+luaC_barrier(L,uv,ra);
+continue;
+}
+case OP_SETTABLE:{
+Protect(luaV_settable(L,ra,RKB(i),RKC(i)));
+continue;
+}
+case OP_NEWTABLE:{
+int b=GETARG_B(i);
+int c=GETARG_C(i);
+sethvalue(L,ra,luaH_new(L,luaO_fb2int(b),luaO_fb2int(c)));
+Protect(luaC_checkGC(L));
+continue;
+}
+case OP_SELF:{
+StkId rb=RB(i);
+setobj(L,ra+1,rb);
+Protect(luaV_gettable(L,rb,RKC(i),ra));
+continue;
+}
+case OP_ADD:{
+arith_op(luai_numadd,TM_ADD);
+continue;
+}
+case OP_SUB:{
+arith_op(luai_numsub,TM_SUB);
+continue;
+}
+case OP_MUL:{
+arith_op(luai_nummul,TM_MUL);
+continue;
+}
+case OP_DIV:{
+arith_op(luai_numdiv,TM_DIV);
+continue;
+}
+case OP_MOD:{
+arith_op(luai_nummod,TM_MOD);
+continue;
+}
+case OP_POW:{
+arith_op(luai_numpow,TM_POW);
+continue;
+}
+case OP_UNM:{
+TValue*rb=RB(i);
+if(ttisnumber(rb)){
+lua_Number nb=nvalue(rb);
+setnvalue(ra,luai_numunm(nb));
+}
+else{
+Protect(Arith(L,ra,rb,rb,TM_UNM));
+}
+continue;
+}
+case OP_NOT:{
+int res=l_isfalse(RB(i));
+setbvalue(ra,res);
+continue;
+}
+case OP_LEN:{
+const TValue*rb=RB(i);
+switch(ttype(rb)){
+case 5:{
+setnvalue(ra,cast_num(luaH_getn(hvalue(rb))));
+break;
+}
+case 4:{
+setnvalue(ra,cast_num(tsvalue(rb)->len));
+break;
+}
+default:{
+Protect(
+if(!call_binTM(L,rb,(&luaO_nilobject_),ra,TM_LEN))
+luaG_typeerror(L,rb,"get length of");
+)
+}
+}
+continue;
+}
+case OP_CONCAT:{
+int b=GETARG_B(i);
+int c=GETARG_C(i);
+Protect(luaV_concat(L,c-b+1,c);luaC_checkGC(L));
+setobj(L,RA(i),base+b);
+continue;
+}
+case OP_JMP:{
+dojump(L,pc,GETARG_sBx(i));
+continue;
+}
+case OP_EQ:{
+TValue*rb=RKB(i);
+TValue*rc=RKC(i);
+Protect(
+if(equalobj(L,rb,rc)==GETARG_A(i))
+dojump(L,pc,GETARG_sBx(*pc));
+)
+pc++;
+continue;
+}
+case OP_LT:{
+Protect(
+if(luaV_lessthan(L,RKB(i),RKC(i))==GETARG_A(i))
+dojump(L,pc,GETARG_sBx(*pc));
+)
+pc++;
+continue;
+}
+case OP_LE:{
+Protect(
+if(lessequal(L,RKB(i),RKC(i))==GETARG_A(i))
+dojump(L,pc,GETARG_sBx(*pc));
+)
+pc++;
+continue;
+}
+case OP_TEST:{
+if(l_isfalse(ra)!=GETARG_C(i))
+dojump(L,pc,GETARG_sBx(*pc));
+pc++;
+continue;
+}
+case OP_TESTSET:{
+TValue*rb=RB(i);
+if(l_isfalse(rb)!=GETARG_C(i)){
+setobj(L,ra,rb);
+dojump(L,pc,GETARG_sBx(*pc));
+}
+pc++;
+continue;
+}
+case OP_CALL:{
+int b=GETARG_B(i);
+int nresults=GETARG_C(i)-1;
+if(b!=0)L->top=ra+b;
+L->savedpc=pc;
+switch(luaD_precall(L,ra,nresults)){
+case 0:{
+nexeccalls++;
+goto reentry;
+}
+case 1:{
+if(nresults>=0)L->top=L->ci->top;
+base=L->base;
+continue;
+}
+default:{
+return;
+}
+}
+}
+case OP_TAILCALL:{
+int b=GETARG_B(i);
+if(b!=0)L->top=ra+b;
+L->savedpc=pc;
+switch(luaD_precall(L,ra,(-1))){
+case 0:{
+CallInfo*ci=L->ci-1;
+int aux;
+StkId func=ci->func;
+StkId pfunc=(ci+1)->func;
+if(L->openupval)luaF_close(L,ci->base);
+L->base=ci->base=ci->func+((ci+1)->base-pfunc);
+for(aux=0;pfunc+aux<L->top;aux++)
+setobj(L,func+aux,pfunc+aux);
+ci->top=L->top=func+aux;
+ci->savedpc=L->savedpc;
+ci->tailcalls++;
+L->ci--;
+goto reentry;
+}
+case 1:{
+base=L->base;
+continue;
+}
+default:{
+return;
+}
+}
+}
+case OP_RETURN:{
+int b=GETARG_B(i);
+if(b!=0)L->top=ra+b-1;
+if(L->openupval)luaF_close(L,base);
+L->savedpc=pc;
+b=luaD_poscall(L,ra);
+if(--nexeccalls==0)
+return;
+else{
+if(b)L->top=L->ci->top;
+goto reentry;
+}
+}
+case OP_FORLOOP:{
+lua_Number step=nvalue(ra+2);
+lua_Number idx=luai_numadd(nvalue(ra),step);
+lua_Number limit=nvalue(ra+1);
+if(luai_numlt(0,step)?luai_numle(idx,limit)
+:luai_numle(limit,idx)){
+dojump(L,pc,GETARG_sBx(i));
+setnvalue(ra,idx);
+setnvalue(ra+3,idx);
+}
+continue;
+}
+case OP_FORPREP:{
+const TValue*init=ra;
+const TValue*plimit=ra+1;
+const TValue*pstep=ra+2;
+L->savedpc=pc;
+if(!tonumber(init,ra))
+luaG_runerror(L,LUA_QL("for")" initial value must be a number");
+else if(!tonumber(plimit,ra+1))
+luaG_runerror(L,LUA_QL("for")" limit must be a number");
+else if(!tonumber(pstep,ra+2))
+luaG_runerror(L,LUA_QL("for")" step must be a number");
+setnvalue(ra,luai_numsub(nvalue(ra),nvalue(pstep)));
+dojump(L,pc,GETARG_sBx(i));
+continue;
+}
+case OP_TFORLOOP:{
+StkId cb=ra+3;
+setobj(L,cb+2,ra+2);
+setobj(L,cb+1,ra+1);
+setobj(L,cb,ra);
+L->top=cb+3;
+Protect(luaD_call(L,cb,GETARG_C(i)));
+L->top=L->ci->top;
+cb=RA(i)+3;
+if(!ttisnil(cb)){
+setobj(L,cb-1,cb);
+dojump(L,pc,GETARG_sBx(*pc));
+}
+pc++;
+continue;
+}
+case OP_SETLIST:{
+int n=GETARG_B(i);
+int c=GETARG_C(i);
+int last;
+Table*h;
+if(n==0){
+n=cast_int(L->top-ra)-1;
+L->top=L->ci->top;
+}
+if(c==0)c=cast_int(*pc++);
+runtime_check(L,ttistable(ra));
+h=hvalue(ra);
+last=((c-1)*50)+n;
+if(last>h->sizearray)
+luaH_resizearray(L,h,last);
+for(;n>0;n--){
+TValue*val=ra+n;
+setobj(L,luaH_setnum(L,h,last--),val);
+luaC_barriert(L,h,val);
+}
+continue;
+}
+case OP_CLOSE:{
+luaF_close(L,ra);
+continue;
+}
+case OP_CLOSURE:{
+Proto*p;
+Closure*ncl;
+int nup,j;
+p=cl->p->p[GETARG_Bx(i)];
+nup=p->nups;
+ncl=luaF_newLclosure(L,nup,cl->env);
+ncl->l.p=p;
+for(j=0;j<nup;j++,pc++){
+if(GET_OPCODE(*pc)==OP_GETUPVAL)
+ncl->l.upvals[j]=cl->upvals[GETARG_B(*pc)];
+else{
+ncl->l.upvals[j]=luaF_findupval(L,base+GETARG_B(*pc));
+}
+}
+setclvalue(L,ra,ncl);
+Protect(luaC_checkGC(L));
+continue;
+}
+case OP_VARARG:{
+int b=GETARG_B(i)-1;
+int j;
+CallInfo*ci=L->ci;
+int n=cast_int(ci->base-ci->func)-cl->p->numparams-1;
+if(b==(-1)){
+Protect(luaD_checkstack(L,n));
+ra=RA(i);
+b=n;
+L->top=ra+n;
+}
+for(j=0;j<b;j++){
+if(j<n){
+setobj(L,ra+j,ci->base-n+j);
+}
+else{
+setnilvalue(ra+j);
+}
+}
+continue;
+}
+}
+}
+}
+#define api_checknelems(L,n)luai_apicheck(L,(n)<=(L->top-L->base))
+#define api_checkvalidindex(L,i)luai_apicheck(L,(i)!=(&luaO_nilobject_))
+#define api_incr_top(L){luai_apicheck(L,L->top<L->ci->top);L->top++;}
+static TValue*index2adr(lua_State*L,int idx){
+if(idx>0){
+TValue*o=L->base+(idx-1);
+luai_apicheck(L,idx<=L->ci->top-L->base);
+if(o>=L->top)return cast(TValue*,(&luaO_nilobject_));
+else return o;
+}
+else if(idx>(-10000)){
+luai_apicheck(L,idx!=0&&-idx<=L->top-L->base);
+return L->top+idx;
+}
+else switch(idx){
+case(-10000):return registry(L);
+case(-10001):{
+Closure*func=curr_func(L);
+sethvalue(L,&L->env,func->c.env);
+return&L->env;
+}
+case(-10002):return gt(L);
+default:{
+Closure*func=curr_func(L);
+idx=(-10002)-idx;
+return(idx<=func->c.nupvalues)
+?&func->c.upvalue[idx-1]
+:cast(TValue*,(&luaO_nilobject_));
+}
+}
+}
+static Table*getcurrenv(lua_State*L){
+if(L->ci==L->base_ci)
+return hvalue(gt(L));
+else{
+Closure*func=curr_func(L);
+return func->c.env;
+}
+}
+static int lua_checkstack(lua_State*L,int size){
+int res=1;
+if(size>8000||(L->top-L->base+size)>8000)
+res=0;
+else if(size>0){
+luaD_checkstack(L,size);
+if(L->ci->top<L->top+size)
+L->ci->top=L->top+size;
+}
+return res;
+}
+static lua_CFunction lua_atpanic(lua_State*L,lua_CFunction panicf){
+lua_CFunction old;
+old=G(L)->panic;
+G(L)->panic=panicf;
+return old;
+}
+static int lua_gettop(lua_State*L){
+return cast_int(L->top-L->base);
+}
+static void lua_settop(lua_State*L,int idx){
+if(idx>=0){
+luai_apicheck(L,idx<=L->stack_last-L->base);
+while(L->top<L->base+idx)
+setnilvalue(L->top++);
+L->top=L->base+idx;
+}
+else{
+luai_apicheck(L,-(idx+1)<=(L->top-L->base));
+L->top+=idx+1;
+}
+}
+static void lua_remove(lua_State*L,int idx){
+StkId p;
+p=index2adr(L,idx);
+api_checkvalidindex(L,p);
+while(++p<L->top)setobj(L,p-1,p);
+L->top--;
+}
+static void lua_insert(lua_State*L,int idx){
+StkId p;
+StkId q;
+p=index2adr(L,idx);
+api_checkvalidindex(L,p);
+for(q=L->top;q>p;q--)setobj(L,q,q-1);
+setobj(L,p,L->top);
+}
+static void lua_replace(lua_State*L,int idx){
+StkId o;
+if(idx==(-10001)&&L->ci==L->base_ci)
+luaG_runerror(L,"no calling environment");
+api_checknelems(L,1);
+o=index2adr(L,idx);
+api_checkvalidindex(L,o);
+if(idx==(-10001)){
+Closure*func=curr_func(L);
+luai_apicheck(L,ttistable(L->top-1));
+func->c.env=hvalue(L->top-1);
+luaC_barrier(L,func,L->top-1);
+}
+else{
+setobj(L,o,L->top-1);
+if(idx<(-10002))
+luaC_barrier(L,curr_func(L),L->top-1);
+}
+L->top--;
+}
+static void lua_pushvalue(lua_State*L,int idx){
+setobj(L,L->top,index2adr(L,idx));
+api_incr_top(L);
+}
+static int lua_type(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+return(o==(&luaO_nilobject_))?(-1):ttype(o);
+}
+static const char*lua_typename(lua_State*L,int t){
+UNUSED(L);
+return(t==(-1))?"no value":luaT_typenames[t];
+}
+static int lua_iscfunction(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+return iscfunction(o);
+}
+static int lua_isnumber(lua_State*L,int idx){
+TValue n;
+const TValue*o=index2adr(L,idx);
+return tonumber(o,&n);
+}
+static int lua_isstring(lua_State*L,int idx){
+int t=lua_type(L,idx);
+return(t==4||t==3);
+}
+static int lua_rawequal(lua_State*L,int index1,int index2){
+StkId o1=index2adr(L,index1);
+StkId o2=index2adr(L,index2);
+return(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0
+:luaO_rawequalObj(o1,o2);
+}
+static int lua_lessthan(lua_State*L,int index1,int index2){
+StkId o1,o2;
+int i;
+o1=index2adr(L,index1);
+o2=index2adr(L,index2);
+i=(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0
+:luaV_lessthan(L,o1,o2);
+return i;
+}
+static lua_Number lua_tonumber(lua_State*L,int idx){
+TValue n;
+const TValue*o=index2adr(L,idx);
+if(tonumber(o,&n))
+return nvalue(o);
+else
+return 0;
+}
+static lua_Integer lua_tointeger(lua_State*L,int idx){
+TValue n;
+const TValue*o=index2adr(L,idx);
+if(tonumber(o,&n)){
+lua_Integer res;
+lua_Number num=nvalue(o);
+lua_number2integer(res,num);
+return res;
+}
+else
+return 0;
+}
+static int lua_toboolean(lua_State*L,int idx){
+const TValue*o=index2adr(L,idx);
+return!l_isfalse(o);
+}
+static const char*lua_tolstring(lua_State*L,int idx,size_t*len){
+StkId o=index2adr(L,idx);
+if(!ttisstring(o)){
+if(!luaV_tostring(L,o)){
+if(len!=NULL)*len=0;
+return NULL;
+}
+luaC_checkGC(L);
+o=index2adr(L,idx);
+}
+if(len!=NULL)*len=tsvalue(o)->len;
+return svalue(o);
+}
+static size_t lua_objlen(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+switch(ttype(o)){
+case 4:return tsvalue(o)->len;
+case 7:return uvalue(o)->len;
+case 5:return luaH_getn(hvalue(o));
+case 3:{
+size_t l;
+l=(luaV_tostring(L,o)?tsvalue(o)->len:0);
+return l;
+}
+default:return 0;
+}
+}
+static lua_CFunction lua_tocfunction(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+return(!iscfunction(o))?NULL:clvalue(o)->c.f;
+}
+static void*lua_touserdata(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+switch(ttype(o)){
+case 7:return(rawuvalue(o)+1);
+case 2:return pvalue(o);
+default:return NULL;
+}
+}
+static void lua_pushnil(lua_State*L){
+setnilvalue(L->top);
+api_incr_top(L);
+}
+static void lua_pushnumber(lua_State*L,lua_Number n){
+setnvalue(L->top,n);
+api_incr_top(L);
+}
+static void lua_pushinteger(lua_State*L,lua_Integer n){
+setnvalue(L->top,cast_num(n));
+api_incr_top(L);
+}
+static void lua_pushlstring(lua_State*L,const char*s,size_t len){
+luaC_checkGC(L);
+setsvalue(L,L->top,luaS_newlstr(L,s,len));
+api_incr_top(L);
+}
+static void lua_pushstring(lua_State*L,const char*s){
+if(s==NULL)
+lua_pushnil(L);
+else
+lua_pushlstring(L,s,strlen(s));
+}
+static const char*lua_pushvfstring(lua_State*L,const char*fmt,
+va_list argp){
+const char*ret;
+luaC_checkGC(L);
+ret=luaO_pushvfstring(L,fmt,argp);
+return ret;
+}
+static const char*lua_pushfstring(lua_State*L,const char*fmt,...){
+const char*ret;
+va_list argp;
+luaC_checkGC(L);
+va_start(argp,fmt);
+ret=luaO_pushvfstring(L,fmt,argp);
+va_end(argp);
+return ret;
+}
+static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n){
+Closure*cl;
+luaC_checkGC(L);
+api_checknelems(L,n);
+cl=luaF_newCclosure(L,n,getcurrenv(L));
+cl->c.f=fn;
+L->top-=n;
+while(n--)
+setobj(L,&cl->c.upvalue[n],L->top+n);
+setclvalue(L,L->top,cl);
+api_incr_top(L);
+}
+static void lua_pushboolean(lua_State*L,int b){
+setbvalue(L->top,(b!=0));
+api_incr_top(L);
+}
+static int lua_pushthread(lua_State*L){
+setthvalue(L,L->top,L);
+api_incr_top(L);
+return(G(L)->mainthread==L);
+}
+static void lua_gettable(lua_State*L,int idx){
+StkId t;
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+luaV_gettable(L,t,L->top-1,L->top-1);
+}
+static void lua_getfield(lua_State*L,int idx,const char*k){
+StkId t;
+TValue key;
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+setsvalue(L,&key,luaS_new(L,k));
+luaV_gettable(L,t,&key,L->top);
+api_incr_top(L);
+}
+static void lua_rawget(lua_State*L,int idx){
+StkId t;
+t=index2adr(L,idx);
+luai_apicheck(L,ttistable(t));
+setobj(L,L->top-1,luaH_get(hvalue(t),L->top-1));
+}
+static void lua_rawgeti(lua_State*L,int idx,int n){
+StkId o;
+o=index2adr(L,idx);
+luai_apicheck(L,ttistable(o));
+setobj(L,L->top,luaH_getnum(hvalue(o),n));
+api_incr_top(L);
+}
+static void lua_createtable(lua_State*L,int narray,int nrec){
+luaC_checkGC(L);
+sethvalue(L,L->top,luaH_new(L,narray,nrec));
+api_incr_top(L);
+}
+static int lua_getmetatable(lua_State*L,int objindex){
+const TValue*obj;
+Table*mt=NULL;
+int res;
+obj=index2adr(L,objindex);
+switch(ttype(obj)){
+case 5:
+mt=hvalue(obj)->metatable;
+break;
+case 7:
+mt=uvalue(obj)->metatable;
+break;
+default:
+mt=G(L)->mt[ttype(obj)];
+break;
+}
+if(mt==NULL)
+res=0;
+else{
+sethvalue(L,L->top,mt);
+api_incr_top(L);
+res=1;
+}
+return res;
+}
+static void lua_getfenv(lua_State*L,int idx){
+StkId o;
+o=index2adr(L,idx);
+api_checkvalidindex(L,o);
+switch(ttype(o)){
+case 6:
+sethvalue(L,L->top,clvalue(o)->c.env);
+break;
+case 7:
+sethvalue(L,L->top,uvalue(o)->env);
+break;
+case 8:
+setobj(L,L->top,gt(thvalue(o)));
+break;
+default:
+setnilvalue(L->top);
+break;
+}
+api_incr_top(L);
+}
+static void lua_settable(lua_State*L,int idx){
+StkId t;
+api_checknelems(L,2);
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+luaV_settable(L,t,L->top-2,L->top-1);
+L->top-=2;
+}
+static void lua_setfield(lua_State*L,int idx,const char*k){
+StkId t;
+TValue key;
+api_checknelems(L,1);
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+setsvalue(L,&key,luaS_new(L,k));
+luaV_settable(L,t,&key,L->top-1);
+L->top--;
+}
+static void lua_rawset(lua_State*L,int idx){
+StkId t;
+api_checknelems(L,2);
+t=index2adr(L,idx);
+luai_apicheck(L,ttistable(t));
+setobj(L,luaH_set(L,hvalue(t),L->top-2),L->top-1);
+luaC_barriert(L,hvalue(t),L->top-1);
+L->top-=2;
+}
+static void lua_rawseti(lua_State*L,int idx,int n){
+StkId o;
+api_checknelems(L,1);
+o=index2adr(L,idx);
+luai_apicheck(L,ttistable(o));
+setobj(L,luaH_setnum(L,hvalue(o),n),L->top-1);
+luaC_barriert(L,hvalue(o),L->top-1);
+L->top--;
+}
+static int lua_setmetatable(lua_State*L,int objindex){
+TValue*obj;
+Table*mt;
+api_checknelems(L,1);
+obj=index2adr(L,objindex);
+api_checkvalidindex(L,obj);
+if(ttisnil(L->top-1))
+mt=NULL;
+else{
+luai_apicheck(L,ttistable(L->top-1));
+mt=hvalue(L->top-1);
+}
+switch(ttype(obj)){
+case 5:{
+hvalue(obj)->metatable=mt;
+if(mt)
+luaC_objbarriert(L,hvalue(obj),mt);
+break;
+}
+case 7:{
+uvalue(obj)->metatable=mt;
+if(mt)
+luaC_objbarrier(L,rawuvalue(obj),mt);
+break;
+}
+default:{
+G(L)->mt[ttype(obj)]=mt;
+break;
+}
+}
+L->top--;
+return 1;
+}
+static int lua_setfenv(lua_State*L,int idx){
+StkId o;
+int res=1;
+api_checknelems(L,1);
+o=index2adr(L,idx);
+api_checkvalidindex(L,o);
+luai_apicheck(L,ttistable(L->top-1));
+switch(ttype(o)){
+case 6:
+clvalue(o)->c.env=hvalue(L->top-1);
+break;
+case 7:
+uvalue(o)->env=hvalue(L->top-1);
+break;
+case 8:
+sethvalue(L,gt(thvalue(o)),hvalue(L->top-1));
+break;
+default:
+res=0;
+break;
+}
+if(res)luaC_objbarrier(L,gcvalue(o),hvalue(L->top-1));
+L->top--;
+return res;
+}
+#define adjustresults(L,nres){if(nres==(-1)&&L->top>=L->ci->top)L->ci->top=L->top;}
+#define checkresults(L,na,nr)luai_apicheck(L,(nr)==(-1)||(L->ci->top-L->top>=(nr)-(na)))
+static void lua_call(lua_State*L,int nargs,int nresults){
+StkId func;
+api_checknelems(L,nargs+1);
+checkresults(L,nargs,nresults);
+func=L->top-(nargs+1);
+luaD_call(L,func,nresults);
+adjustresults(L,nresults);
+}
+struct CallS{
+StkId func;
+int nresults;
+};
+static void f_call(lua_State*L,void*ud){
+struct CallS*c=cast(struct CallS*,ud);
+luaD_call(L,c->func,c->nresults);
+}
+static int lua_pcall(lua_State*L,int nargs,int nresults,int errfunc){
+struct CallS c;
+int status;
+ptrdiff_t func;
+api_checknelems(L,nargs+1);
+checkresults(L,nargs,nresults);
+if(errfunc==0)
+func=0;
+else{
+StkId o=index2adr(L,errfunc);
+api_checkvalidindex(L,o);
+func=savestack(L,o);
+}
+c.func=L->top-(nargs+1);
+c.nresults=nresults;
+status=luaD_pcall(L,f_call,&c,savestack(L,c.func),func);
+adjustresults(L,nresults);
+return status;
+}
+static int lua_load(lua_State*L,lua_Reader reader,void*data,
+const char*chunkname){
+ZIO z;
+int status;
+if(!chunkname)chunkname="?";
+luaZ_init(L,&z,reader,data);
+status=luaD_protectedparser(L,&z,chunkname);
+return status;
+}
+static int lua_error(lua_State*L){
+api_checknelems(L,1);
+luaG_errormsg(L);
+return 0;
+}
+static int lua_next(lua_State*L,int idx){
+StkId t;
+int more;
+t=index2adr(L,idx);
+luai_apicheck(L,ttistable(t));
+more=luaH_next(L,hvalue(t),L->top-1);
+if(more){
+api_incr_top(L);
+}
+else
+L->top-=1;
+return more;
+}
+static void lua_concat(lua_State*L,int n){
+api_checknelems(L,n);
+if(n>=2){
+luaC_checkGC(L);
+luaV_concat(L,n,cast_int(L->top-L->base)-1);
+L->top-=(n-1);
+}
+else if(n==0){
+setsvalue(L,L->top,luaS_newlstr(L,"",0));
+api_incr_top(L);
+}
+}
+static void*lua_newuserdata(lua_State*L,size_t size){
+Udata*u;
+luaC_checkGC(L);
+u=luaS_newudata(L,size,getcurrenv(L));
+setuvalue(L,L->top,u);
+api_incr_top(L);
+return u+1;
+}
+#define luaL_getn(L,i)((int)lua_objlen(L,i))
+#define luaL_setn(L,i,j)((void)0)
+typedef struct luaL_Reg{
+const char*name;
+lua_CFunction func;
+}luaL_Reg;
+static void luaI_openlib(lua_State*L,const char*libname,
+const luaL_Reg*l,int nup);
+static int luaL_argerror(lua_State*L,int numarg,const char*extramsg);
+static const char* luaL_checklstring(lua_State*L,int numArg,
+size_t*l);
+static const char* luaL_optlstring(lua_State*L,int numArg,
+const char*def,size_t*l);
+static lua_Integer luaL_checkinteger(lua_State*L,int numArg);
+static lua_Integer luaL_optinteger(lua_State*L,int nArg,
+lua_Integer def);
+static int luaL_error(lua_State*L,const char*fmt,...);
+static const char* luaL_findtable(lua_State*L,int idx,
+const char*fname,int szhint);
+#define luaL_argcheck(L,cond,numarg,extramsg)((void)((cond)||luaL_argerror(L,(numarg),(extramsg))))
+#define luaL_checkstring(L,n)(luaL_checklstring(L,(n),NULL))
+#define luaL_optstring(L,n,d)(luaL_optlstring(L,(n),(d),NULL))
+#define luaL_checkint(L,n)((int)luaL_checkinteger(L,(n)))
+#define luaL_optint(L,n,d)((int)luaL_optinteger(L,(n),(d)))
+#define luaL_typename(L,i)lua_typename(L,lua_type(L,(i)))
+#define luaL_getmetatable(L,n)(lua_getfield(L,(-10000),(n)))
+#define luaL_opt(L,f,n,d)(lua_isnoneornil(L,(n))?(d):f(L,(n)))
+typedef struct luaL_Buffer{
+char*p;
+int lvl;
+lua_State*L;
+char buffer[BUFSIZ];
+}luaL_Buffer;
+#define luaL_addchar(B,c)((void)((B)->p<((B)->buffer+BUFSIZ)||luaL_prepbuffer(B)),(*(B)->p++=(char)(c)))
+#define luaL_addsize(B,n)((B)->p+=(n))
+static char* luaL_prepbuffer(luaL_Buffer*B);
+static int luaL_argerror(lua_State*L,int narg,const char*extramsg){
+lua_Debug ar;
+if(!lua_getstack(L,0,&ar))
+return luaL_error(L,"bad argument #%d (%s)",narg,extramsg);
+lua_getinfo(L,"n",&ar);
+if(strcmp(ar.namewhat,"method")==0){
+narg--;
+if(narg==0)
+return luaL_error(L,"calling "LUA_QL("%s")" on bad self (%s)",
+ar.name,extramsg);
+}
+if(ar.name==NULL)
+ar.name="?";
+return luaL_error(L,"bad argument #%d to "LUA_QL("%s")" (%s)",
+narg,ar.name,extramsg);
+}
+static int luaL_typerror(lua_State*L,int narg,const char*tname){
+const char*msg=lua_pushfstring(L,"%s expected, got %s",
+tname,luaL_typename(L,narg));
+return luaL_argerror(L,narg,msg);
+}
+static void tag_error(lua_State*L,int narg,int tag){
+luaL_typerror(L,narg,lua_typename(L,tag));
+}
+static void luaL_where(lua_State*L,int level){
+lua_Debug ar;
+if(lua_getstack(L,level,&ar)){
+lua_getinfo(L,"Sl",&ar);
+if(ar.currentline>0){
+lua_pushfstring(L,"%s:%d: ",ar.short_src,ar.currentline);
+return;
+}
+}
+lua_pushliteral(L,"");
+}
+static int luaL_error(lua_State*L,const char*fmt,...){
+va_list argp;
+va_start(argp,fmt);
+luaL_where(L,1);
+lua_pushvfstring(L,fmt,argp);
+va_end(argp);
+lua_concat(L,2);
+return lua_error(L);
+}
+static int luaL_newmetatable(lua_State*L,const char*tname){
+lua_getfield(L,(-10000),tname);
+if(!lua_isnil(L,-1))
+return 0;
+lua_pop(L,1);
+lua_newtable(L);
+lua_pushvalue(L,-1);
+lua_setfield(L,(-10000),tname);
+return 1;
+}
+static void*luaL_checkudata(lua_State*L,int ud,const char*tname){
+void*p=lua_touserdata(L,ud);
+if(p!=NULL){
+if(lua_getmetatable(L,ud)){
+lua_getfield(L,(-10000),tname);
+if(lua_rawequal(L,-1,-2)){
+lua_pop(L,2);
+return p;
+}
+}
+}
+luaL_typerror(L,ud,tname);
+return NULL;
+}
+static void luaL_checkstack(lua_State*L,int space,const char*mes){
+if(!lua_checkstack(L,space))
+luaL_error(L,"stack overflow (%s)",mes);
+}
+static void luaL_checktype(lua_State*L,int narg,int t){
+if(lua_type(L,narg)!=t)
+tag_error(L,narg,t);
+}
+static void luaL_checkany(lua_State*L,int narg){
+if(lua_type(L,narg)==(-1))
+luaL_argerror(L,narg,"value expected");
+}
+static const char*luaL_checklstring(lua_State*L,int narg,size_t*len){
+const char*s=lua_tolstring(L,narg,len);
+if(!s)tag_error(L,narg,4);
+return s;
+}
+static const char*luaL_optlstring(lua_State*L,int narg,
+const char*def,size_t*len){
+if(lua_isnoneornil(L,narg)){
+if(len)
+*len=(def?strlen(def):0);
+return def;
+}
+else return luaL_checklstring(L,narg,len);
+}
+static lua_Number luaL_checknumber(lua_State*L,int narg){
+lua_Number d=lua_tonumber(L,narg);
+if(d==0&&!lua_isnumber(L,narg))
+tag_error(L,narg,3);
+return d;
+}
+static lua_Integer luaL_checkinteger(lua_State*L,int narg){
+lua_Integer d=lua_tointeger(L,narg);
+if(d==0&&!lua_isnumber(L,narg))
+tag_error(L,narg,3);
+return d;
+}
+static lua_Integer luaL_optinteger(lua_State*L,int narg,
+lua_Integer def){
+return luaL_opt(L,luaL_checkinteger,narg,def);
+}
+static int luaL_getmetafield(lua_State*L,int obj,const char*event){
+if(!lua_getmetatable(L,obj))
+return 0;
+lua_pushstring(L,event);
+lua_rawget(L,-2);
+if(lua_isnil(L,-1)){
+lua_pop(L,2);
+return 0;
+}
+else{
+lua_remove(L,-2);
+return 1;
+}
+}
+static void luaL_register(lua_State*L,const char*libname,
+const luaL_Reg*l){
+luaI_openlib(L,libname,l,0);
+}
+static int libsize(const luaL_Reg*l){
+int size=0;
+for(;l->name;l++)size++;
+return size;
+}
+static void luaI_openlib(lua_State*L,const char*libname,
+const luaL_Reg*l,int nup){
+if(libname){
+int size=libsize(l);
+luaL_findtable(L,(-10000),"_LOADED",1);
+lua_getfield(L,-1,libname);
+if(!lua_istable(L,-1)){
+lua_pop(L,1);
+if(luaL_findtable(L,(-10002),libname,size)!=NULL)
+luaL_error(L,"name conflict for module "LUA_QL("%s"),libname);
+lua_pushvalue(L,-1);
+lua_setfield(L,-3,libname);
+}
+lua_remove(L,-2);
+lua_insert(L,-(nup+1));
+}
+for(;l->name;l++){
+int i;
+for(i=0;i<nup;i++)
+lua_pushvalue(L,-nup);
+lua_pushcclosure(L,l->func,nup);
+lua_setfield(L,-(nup+2),l->name);
+}
+lua_pop(L,nup);
+}
+static const char*luaL_findtable(lua_State*L,int idx,
+const char*fname,int szhint){
+const char*e;
+lua_pushvalue(L,idx);
+do{
+e=strchr(fname,'.');
+if(e==NULL)e=fname+strlen(fname);
+lua_pushlstring(L,fname,e-fname);
+lua_rawget(L,-2);
+if(lua_isnil(L,-1)){
+lua_pop(L,1);
+lua_createtable(L,0,(*e=='.'?1:szhint));
+lua_pushlstring(L,fname,e-fname);
+lua_pushvalue(L,-2);
+lua_settable(L,-4);
+}
+else if(!lua_istable(L,-1)){
+lua_pop(L,2);
+return fname;
+}
+lua_remove(L,-2);
+fname=e+1;
+}while(*e=='.');
+return NULL;
+}
+#define bufflen(B)((B)->p-(B)->buffer)
+#define bufffree(B)((size_t)(BUFSIZ-bufflen(B)))
+static int emptybuffer(luaL_Buffer*B){
+size_t l=bufflen(B);
+if(l==0)return 0;
+else{
+lua_pushlstring(B->L,B->buffer,l);
+B->p=B->buffer;
+B->lvl++;
+return 1;
+}
+}
+static void adjuststack(luaL_Buffer*B){
+if(B->lvl>1){
+lua_State*L=B->L;
+int toget=1;
+size_t toplen=lua_strlen(L,-1);
+do{
+size_t l=lua_strlen(L,-(toget+1));
+if(B->lvl-toget+1>=(20/2)||toplen>l){
+toplen+=l;
+toget++;
+}
+else break;
+}while(toget<B->lvl);
+lua_concat(L,toget);
+B->lvl=B->lvl-toget+1;
+}
+}
+static char*luaL_prepbuffer(luaL_Buffer*B){
+if(emptybuffer(B))
+adjuststack(B);
+return B->buffer;
+}
+static void luaL_addlstring(luaL_Buffer*B,const char*s,size_t l){
+while(l--)
+luaL_addchar(B,*s++);
+}
+static void luaL_pushresult(luaL_Buffer*B){
+emptybuffer(B);
+lua_concat(B->L,B->lvl);
+B->lvl=1;
+}
+static void luaL_addvalue(luaL_Buffer*B){
+lua_State*L=B->L;
+size_t vl;
+const char*s=lua_tolstring(L,-1,&vl);
+if(vl<=bufffree(B)){
+memcpy(B->p,s,vl);
+B->p+=vl;
+lua_pop(L,1);
+}
+else{
+if(emptybuffer(B))
+lua_insert(L,-2);
+B->lvl++;
+adjuststack(B);
+}
+}
+static void luaL_buffinit(lua_State*L,luaL_Buffer*B){
+B->L=L;
+B->p=B->buffer;
+B->lvl=0;
+}
+typedef struct LoadF{
+int extraline;
+FILE*f;
+char buff[BUFSIZ];
+}LoadF;
+static const char*getF(lua_State*L,void*ud,size_t*size){
+LoadF*lf=(LoadF*)ud;
+(void)L;
+if(lf->extraline){
+lf->extraline=0;
+*size=1;
+return"\n";
+}
+if(feof(lf->f))return NULL;
+*size=fread(lf->buff,1,sizeof(lf->buff),lf->f);
+return(*size>0)?lf->buff:NULL;
+}
+static int errfile(lua_State*L,const char*what,int fnameindex){
+const char*serr=strerror(errno);
+const char*filename=lua_tostring(L,fnameindex)+1;
+lua_pushfstring(L,"cannot %s %s: %s",what,filename,serr);
+lua_remove(L,fnameindex);
+return(5+1);
+}
+static int luaL_loadfile(lua_State*L,const char*filename){
+LoadF lf;
+int status,readstatus;
+int c;
+int fnameindex=lua_gettop(L)+1;
+lf.extraline=0;
+if(filename==NULL){
+lua_pushliteral(L,"=stdin");
+lf.f=stdin;
+}
+else{
+lua_pushfstring(L,"@%s",filename);
+lf.f=fopen(filename,"r");
+if(lf.f==NULL)return errfile(L,"open",fnameindex);
+}
+c=getc(lf.f);
+if(c=='#'){
+lf.extraline=1;
+while((c=getc(lf.f))!=EOF&&c!='\n');
+if(c=='\n')c=getc(lf.f);
+}
+if(c=="\033Lua"[0]&&filename){
+lf.f=freopen(filename,"rb",lf.f);
+if(lf.f==NULL)return errfile(L,"reopen",fnameindex);
+while((c=getc(lf.f))!=EOF&&c!="\033Lua"[0]);
+lf.extraline=0;
+}
+ungetc(c,lf.f);
+status=lua_load(L,getF,&lf,lua_tostring(L,-1));
+readstatus=ferror(lf.f);
+if(filename)fclose(lf.f);
+if(readstatus){
+lua_settop(L,fnameindex);
+return errfile(L,"read",fnameindex);
+}
+lua_remove(L,fnameindex);
+return status;
+}
+typedef struct LoadS{
+const char*s;
+size_t size;
+}LoadS;
+static const char*getS(lua_State*L,void*ud,size_t*size){
+LoadS*ls=(LoadS*)ud;
+(void)L;
+if(ls->size==0)return NULL;
+*size=ls->size;
+ls->size=0;
+return ls->s;
+}
+static int luaL_loadbuffer(lua_State*L,const char*buff,size_t size,
+const char*name){
+LoadS ls;
+ls.s=buff;
+ls.size=size;
+return lua_load(L,getS,&ls,name);
+}
+static void*l_alloc(void*ud,void*ptr,size_t osize,size_t nsize){
+(void)ud;
+(void)osize;
+if(nsize==0){
+free(ptr);
+return NULL;
+}
+else
+return realloc(ptr,nsize);
+}
+static int panic(lua_State*L){
+(void)L;
+fprintf(stderr,"PANIC: unprotected error in call to Lua API (%s)\n",
+lua_tostring(L,-1));
+return 0;
+}
+static lua_State*luaL_newstate(void){
+lua_State*L=lua_newstate(l_alloc,NULL);
+if(L)lua_atpanic(L,&panic);
+return L;
+}
+static int luaB_tonumber(lua_State*L){
+int base=luaL_optint(L,2,10);
+if(base==10){
+luaL_checkany(L,1);
+if(lua_isnumber(L,1)){
+lua_pushnumber(L,lua_tonumber(L,1));
+return 1;
+}
+}
+else{
+const char*s1=luaL_checkstring(L,1);
+char*s2;
+unsigned long n;
+luaL_argcheck(L,2<=base&&base<=36,2,"base out of range");
+n=strtoul(s1,&s2,base);
+if(s1!=s2){
+while(isspace((unsigned char)(*s2)))s2++;
+if(*s2=='\0'){
+lua_pushnumber(L,(lua_Number)n);
+return 1;
+}
+}
+}
+lua_pushnil(L);
+return 1;
+}
+static int luaB_error(lua_State*L){
+int level=luaL_optint(L,2,1);
+lua_settop(L,1);
+if(lua_isstring(L,1)&&level>0){
+luaL_where(L,level);
+lua_pushvalue(L,1);
+lua_concat(L,2);
+}
+return lua_error(L);
+}
+static int luaB_setmetatable(lua_State*L){
+int t=lua_type(L,2);
+luaL_checktype(L,1,5);
+luaL_argcheck(L,t==0||t==5,2,
+"nil or table expected");
+if(luaL_getmetafield(L,1,"__metatable"))
+luaL_error(L,"cannot change a protected metatable");
+lua_settop(L,2);
+lua_setmetatable(L,1);
+return 1;
+}
+static void getfunc(lua_State*L,int opt){
+if(lua_isfunction(L,1))lua_pushvalue(L,1);
+else{
+lua_Debug ar;
+int level=opt?luaL_optint(L,1,1):luaL_checkint(L,1);
+luaL_argcheck(L,level>=0,1,"level must be non-negative");
+if(lua_getstack(L,level,&ar)==0)
+luaL_argerror(L,1,"invalid level");
+lua_getinfo(L,"f",&ar);
+if(lua_isnil(L,-1))
+luaL_error(L,"no function environment for tail call at level %d",
+level);
+}
+}
+static int luaB_setfenv(lua_State*L){
+luaL_checktype(L,2,5);
+getfunc(L,0);
+lua_pushvalue(L,2);
+if(lua_isnumber(L,1)&&lua_tonumber(L,1)==0){
+lua_pushthread(L);
+lua_insert(L,-2);
+lua_setfenv(L,-2);
+return 0;
+}
+else if(lua_iscfunction(L,-2)||lua_setfenv(L,-2)==0)
+luaL_error(L,
+LUA_QL("setfenv")" cannot change environment of given object");
+return 1;
+}
+static int luaB_rawget(lua_State*L){
+luaL_checktype(L,1,5);
+luaL_checkany(L,2);
+lua_settop(L,2);
+lua_rawget(L,1);
+return 1;
+}
+static int luaB_type(lua_State*L){
+luaL_checkany(L,1);
+lua_pushstring(L,luaL_typename(L,1));
+return 1;
+}
+static int luaB_next(lua_State*L){
+luaL_checktype(L,1,5);
+lua_settop(L,2);
+if(lua_next(L,1))
+return 2;
+else{
+lua_pushnil(L);
+return 1;
+}
+}
+static int luaB_pairs(lua_State*L){
+luaL_checktype(L,1,5);
+lua_pushvalue(L,lua_upvalueindex(1));
+lua_pushvalue(L,1);
+lua_pushnil(L);
+return 3;
+}
+static int ipairsaux(lua_State*L){
+int i=luaL_checkint(L,2);
+luaL_checktype(L,1,5);
+i++;
+lua_pushinteger(L,i);
+lua_rawgeti(L,1,i);
+return(lua_isnil(L,-1))?0:2;
+}
+static int luaB_ipairs(lua_State*L){
+luaL_checktype(L,1,5);
+lua_pushvalue(L,lua_upvalueindex(1));
+lua_pushvalue(L,1);
+lua_pushinteger(L,0);
+return 3;
+}
+static int load_aux(lua_State*L,int status){
+if(status==0)
+return 1;
+else{
+lua_pushnil(L);
+lua_insert(L,-2);
+return 2;
+}
+}
+static int luaB_loadstring(lua_State*L){
+size_t l;
+const char*s=luaL_checklstring(L,1,&l);
+const char*chunkname=luaL_optstring(L,2,s);
+return load_aux(L,luaL_loadbuffer(L,s,l,chunkname));
+}
+static int luaB_loadfile(lua_State*L){
+const char*fname=luaL_optstring(L,1,NULL);
+return load_aux(L,luaL_loadfile(L,fname));
+}
+static int luaB_assert(lua_State*L){
+luaL_checkany(L,1);
+if(!lua_toboolean(L,1))
+return luaL_error(L,"%s",luaL_optstring(L,2,"assertion failed!"));
+return lua_gettop(L);
+}
+static int luaB_unpack(lua_State*L){
+int i,e,n;
+luaL_checktype(L,1,5);
+i=luaL_optint(L,2,1);
+e=luaL_opt(L,luaL_checkint,3,luaL_getn(L,1));
+if(i>e)return 0;
+n=e-i+1;
+if(n<=0||!lua_checkstack(L,n))
+return luaL_error(L,"too many results to unpack");
+lua_rawgeti(L,1,i);
+while(i++<e)
+lua_rawgeti(L,1,i);
+return n;
+}
+static int luaB_pcall(lua_State*L){
+int status;
+luaL_checkany(L,1);
+status=lua_pcall(L,lua_gettop(L)-1,(-1),0);
+lua_pushboolean(L,(status==0));
+lua_insert(L,1);
+return lua_gettop(L);
+}
+static int luaB_newproxy(lua_State*L){
+lua_settop(L,1);
+lua_newuserdata(L,0);
+if(lua_toboolean(L,1)==0)
+return 1;
+else if(lua_isboolean(L,1)){
+lua_newtable(L);
+lua_pushvalue(L,-1);
+lua_pushboolean(L,1);
+lua_rawset(L,lua_upvalueindex(1));
+}
+else{
+int validproxy=0;
+if(lua_getmetatable(L,1)){
+lua_rawget(L,lua_upvalueindex(1));
+validproxy=lua_toboolean(L,-1);
+lua_pop(L,1);
+}
+luaL_argcheck(L,validproxy,1,"boolean or proxy expected");
+lua_getmetatable(L,1);
+}
+lua_setmetatable(L,2);
+return 1;
+}
+static const luaL_Reg base_funcs[]={
+{"assert",luaB_assert},
+{"error",luaB_error},
+{"loadfile",luaB_loadfile},
+{"loadstring",luaB_loadstring},
+{"next",luaB_next},
+{"pcall",luaB_pcall},
+{"rawget",luaB_rawget},
+{"setfenv",luaB_setfenv},
+{"setmetatable",luaB_setmetatable},
+{"tonumber",luaB_tonumber},
+{"type",luaB_type},
+{"unpack",luaB_unpack},
+{NULL,NULL}
+};
+static void auxopen(lua_State*L,const char*name,
+lua_CFunction f,lua_CFunction u){
+lua_pushcfunction(L,u);
+lua_pushcclosure(L,f,1);
+lua_setfield(L,-2,name);
+}
+static void base_open(lua_State*L){
+lua_pushvalue(L,(-10002));
+lua_setglobal(L,"_G");
+luaL_register(L,"_G",base_funcs);
+lua_pushliteral(L,"Lua 5.1");
+lua_setglobal(L,"_VERSION");
+auxopen(L,"ipairs",luaB_ipairs,ipairsaux);
+auxopen(L,"pairs",luaB_pairs,luaB_next);
+lua_createtable(L,0,1);
+lua_pushvalue(L,-1);
+lua_setmetatable(L,-2);
+lua_pushliteral(L,"kv");
+lua_setfield(L,-2,"__mode");
+lua_pushcclosure(L,luaB_newproxy,1);
+lua_setglobal(L,"newproxy");
+}
+static int luaopen_base(lua_State*L){
+base_open(L);
+return 1;
+}
+#define aux_getn(L,n)(luaL_checktype(L,n,5),luaL_getn(L,n))
+static int tinsert(lua_State*L){
+int e=aux_getn(L,1)+1;
+int pos;
+switch(lua_gettop(L)){
+case 2:{
+pos=e;
+break;
+}
+case 3:{
+int i;
+pos=luaL_checkint(L,2);
+if(pos>e)e=pos;
+for(i=e;i>pos;i--){
+lua_rawgeti(L,1,i-1);
+lua_rawseti(L,1,i);
+}
+break;
+}
+default:{
+return luaL_error(L,"wrong number of arguments to "LUA_QL("insert"));
+}
+}
+luaL_setn(L,1,e);
+lua_rawseti(L,1,pos);
+return 0;
+}
+static int tremove(lua_State*L){
+int e=aux_getn(L,1);
+int pos=luaL_optint(L,2,e);
+if(!(1<=pos&&pos<=e))
+return 0;
+luaL_setn(L,1,e-1);
+lua_rawgeti(L,1,pos);
+for(;pos<e;pos++){
+lua_rawgeti(L,1,pos+1);
+lua_rawseti(L,1,pos);
+}
+lua_pushnil(L);
+lua_rawseti(L,1,e);
+return 1;
+}
+static void addfield(lua_State*L,luaL_Buffer*b,int i){
+lua_rawgeti(L,1,i);
+if(!lua_isstring(L,-1))
+luaL_error(L,"invalid value (%s) at index %d in table for "
+LUA_QL("concat"),luaL_typename(L,-1),i);
+luaL_addvalue(b);
+}
+static int tconcat(lua_State*L){
+luaL_Buffer b;
+size_t lsep;
+int i,last;
+const char*sep=luaL_optlstring(L,2,"",&lsep);
+luaL_checktype(L,1,5);
+i=luaL_optint(L,3,1);
+last=luaL_opt(L,luaL_checkint,4,luaL_getn(L,1));
+luaL_buffinit(L,&b);
+for(;i<last;i++){
+addfield(L,&b,i);
+luaL_addlstring(&b,sep,lsep);
+}
+if(i==last)
+addfield(L,&b,i);
+luaL_pushresult(&b);
+return 1;
+}
+static void set2(lua_State*L,int i,int j){
+lua_rawseti(L,1,i);
+lua_rawseti(L,1,j);
+}
+static int sort_comp(lua_State*L,int a,int b){
+if(!lua_isnil(L,2)){
+int res;
+lua_pushvalue(L,2);
+lua_pushvalue(L,a-1);
+lua_pushvalue(L,b-2);
+lua_call(L,2,1);
+res=lua_toboolean(L,-1);
+lua_pop(L,1);
+return res;
+}
+else
+return lua_lessthan(L,a,b);
+}
+static void auxsort(lua_State*L,int l,int u){
+while(l<u){
+int i,j;
+lua_rawgeti(L,1,l);
+lua_rawgeti(L,1,u);
+if(sort_comp(L,-1,-2))
+set2(L,l,u);
+else
+lua_pop(L,2);
+if(u-l==1)break;
+i=(l+u)/2;
+lua_rawgeti(L,1,i);
+lua_rawgeti(L,1,l);
+if(sort_comp(L,-2,-1))
+set2(L,i,l);
+else{
+lua_pop(L,1);
+lua_rawgeti(L,1,u);
+if(sort_comp(L,-1,-2))
+set2(L,i,u);
+else
+lua_pop(L,2);
+}
+if(u-l==2)break;
+lua_rawgeti(L,1,i);
+lua_pushvalue(L,-1);
+lua_rawgeti(L,1,u-1);
+set2(L,i,u-1);
+i=l;j=u-1;
+for(;;){
+while(lua_rawgeti(L,1,++i),sort_comp(L,-1,-2)){
+if(i>u)luaL_error(L,"invalid order function for sorting");
+lua_pop(L,1);
+}
+while(lua_rawgeti(L,1,--j),sort_comp(L,-3,-1)){
+if(j<l)luaL_error(L,"invalid order function for sorting");
+lua_pop(L,1);
+}
+if(j<i){
+lua_pop(L,3);
+break;
+}
+set2(L,i,j);
+}
+lua_rawgeti(L,1,u-1);
+lua_rawgeti(L,1,i);
+set2(L,u-1,i);
+if(i-l<u-i){
+j=l;i=i-1;l=i+2;
+}
+else{
+j=i+1;i=u;u=j-2;
+}
+auxsort(L,j,i);
+}
+}
+static int sort(lua_State*L){
+int n=aux_getn(L,1);
+luaL_checkstack(L,40,"");
+if(!lua_isnoneornil(L,2))
+luaL_checktype(L,2,6);
+lua_settop(L,2);
+auxsort(L,1,n);
+return 0;
+}
+static const luaL_Reg tab_funcs[]={
+{"concat",tconcat},
+{"insert",tinsert},
+{"remove",tremove},
+{"sort",sort},
+{NULL,NULL}
+};
+static int luaopen_table(lua_State*L){
+luaL_register(L,"table",tab_funcs);
+return 1;
+}
+static const char*const fnames[]={"input","output"};
+static int pushresult(lua_State*L,int i,const char*filename){
+int en=errno;
+if(i){
+lua_pushboolean(L,1);
+return 1;
+}
+else{
+lua_pushnil(L);
+if(filename)
+lua_pushfstring(L,"%s: %s",filename,strerror(en));
+else
+lua_pushfstring(L,"%s",strerror(en));
+lua_pushinteger(L,en);
+return 3;
+}
+}
+static void fileerror(lua_State*L,int arg,const char*filename){
+lua_pushfstring(L,"%s: %s",filename,strerror(errno));
+luaL_argerror(L,arg,lua_tostring(L,-1));
+}
+#define tofilep(L)((FILE**)luaL_checkudata(L,1,"FILE*"))
+static int io_type(lua_State*L){
+void*ud;
+luaL_checkany(L,1);
+ud=lua_touserdata(L,1);
+lua_getfield(L,(-10000),"FILE*");
+if(ud==NULL||!lua_getmetatable(L,1)||!lua_rawequal(L,-2,-1))
+lua_pushnil(L);
+else if(*((FILE**)ud)==NULL)
+lua_pushliteral(L,"closed file");
+else
+lua_pushliteral(L,"file");
+return 1;
+}
+static FILE*tofile(lua_State*L){
+FILE**f=tofilep(L);
+if(*f==NULL)
+luaL_error(L,"attempt to use a closed file");
+return*f;
+}
+static FILE**newfile(lua_State*L){
+FILE**pf=(FILE**)lua_newuserdata(L,sizeof(FILE*));
+*pf=NULL;
+luaL_getmetatable(L,"FILE*");
+lua_setmetatable(L,-2);
+return pf;
+}
+static int io_noclose(lua_State*L){
+lua_pushnil(L);
+lua_pushliteral(L,"cannot close standard file");
+return 2;
+}
+static int io_pclose(lua_State*L){
+FILE**p=tofilep(L);
+int ok=lua_pclose(L,*p);
+*p=NULL;
+return pushresult(L,ok,NULL);
+}
+static int io_fclose(lua_State*L){
+FILE**p=tofilep(L);
+int ok=(fclose(*p)==0);
+*p=NULL;
+return pushresult(L,ok,NULL);
+}
+static int aux_close(lua_State*L){
+lua_getfenv(L,1);
+lua_getfield(L,-1,"__close");
+return(lua_tocfunction(L,-1))(L);
+}
+static int io_close(lua_State*L){
+if(lua_isnone(L,1))
+lua_rawgeti(L,(-10001),2);
+tofile(L);
+return aux_close(L);
+}
+static int io_gc(lua_State*L){
+FILE*f=*tofilep(L);
+if(f!=NULL)
+aux_close(L);
+return 0;
+}
+static int io_open(lua_State*L){
+const char*filename=luaL_checkstring(L,1);
+const char*mode=luaL_optstring(L,2,"r");
+FILE**pf=newfile(L);
+*pf=fopen(filename,mode);
+return(*pf==NULL)?pushresult(L,0,filename):1;
+}
+static FILE*getiofile(lua_State*L,int findex){
+FILE*f;
+lua_rawgeti(L,(-10001),findex);
+f=*(FILE**)lua_touserdata(L,-1);
+if(f==NULL)
+luaL_error(L,"standard %s file is closed",fnames[findex-1]);
+return f;
+}
+static int g_iofile(lua_State*L,int f,const char*mode){
+if(!lua_isnoneornil(L,1)){
+const char*filename=lua_tostring(L,1);
+if(filename){
+FILE**pf=newfile(L);
+*pf=fopen(filename,mode);
+if(*pf==NULL)
+fileerror(L,1,filename);
+}
+else{
+tofile(L);
+lua_pushvalue(L,1);
+}
+lua_rawseti(L,(-10001),f);
+}
+lua_rawgeti(L,(-10001),f);
+return 1;
+}
+static int io_input(lua_State*L){
+return g_iofile(L,1,"r");
+}
+static int io_output(lua_State*L){
+return g_iofile(L,2,"w");
+}
+static int io_readline(lua_State*L);
+static void aux_lines(lua_State*L,int idx,int toclose){
+lua_pushvalue(L,idx);
+lua_pushboolean(L,toclose);
+lua_pushcclosure(L,io_readline,2);
+}
+static int f_lines(lua_State*L){
+tofile(L);
+aux_lines(L,1,0);
+return 1;
+}
+static int io_lines(lua_State*L){
+if(lua_isnoneornil(L,1)){
+lua_rawgeti(L,(-10001),1);
+return f_lines(L);
+}
+else{
+const char*filename=luaL_checkstring(L,1);
+FILE**pf=newfile(L);
+*pf=fopen(filename,"r");
+if(*pf==NULL)
+fileerror(L,1,filename);
+aux_lines(L,lua_gettop(L),1);
+return 1;
+}
+}
+static int read_number(lua_State*L,FILE*f){
+lua_Number d;
+if(fscanf(f,"%lf",&d)==1){
+lua_pushnumber(L,d);
+return 1;
+}
+else{
+lua_pushnil(L);
+return 0;
+}
+}
+static int test_eof(lua_State*L,FILE*f){
+int c=getc(f);
+ungetc(c,f);
+lua_pushlstring(L,NULL,0);
+return(c!=EOF);
+}
+static int read_line(lua_State*L,FILE*f){
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+for(;;){
+size_t l;
+char*p=luaL_prepbuffer(&b);
+if(fgets(p,BUFSIZ,f)==NULL){
+luaL_pushresult(&b);
+return(lua_objlen(L,-1)>0);
+}
+l=strlen(p);
+if(l==0||p[l-1]!='\n')
+luaL_addsize(&b,l);
+else{
+luaL_addsize(&b,l-1);
+luaL_pushresult(&b);
+return 1;
+}
+}
+}
+static int read_chars(lua_State*L,FILE*f,size_t n){
+size_t rlen;
+size_t nr;
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+rlen=BUFSIZ;
+do{
+char*p=luaL_prepbuffer(&b);
+if(rlen>n)rlen=n;
+nr=fread(p,sizeof(char),rlen,f);
+luaL_addsize(&b,nr);
+n-=nr;
+}while(n>0&&nr==rlen);
+luaL_pushresult(&b);
+return(n==0||lua_objlen(L,-1)>0);
+}
+static int g_read(lua_State*L,FILE*f,int first){
+int nargs=lua_gettop(L)-1;
+int success;
+int n;
+clearerr(f);
+if(nargs==0){
+success=read_line(L,f);
+n=first+1;
+}
+else{
+luaL_checkstack(L,nargs+20,"too many arguments");
+success=1;
+for(n=first;nargs--&&success;n++){
+if(lua_type(L,n)==3){
+size_t l=(size_t)lua_tointeger(L,n);
+success=(l==0)?test_eof(L,f):read_chars(L,f,l);
+}
+else{
+const char*p=lua_tostring(L,n);
+luaL_argcheck(L,p&&p[0]=='*',n,"invalid option");
+switch(p[1]){
+case'n':
+success=read_number(L,f);
+break;
+case'l':
+success=read_line(L,f);
+break;
+case'a':
+read_chars(L,f,~((size_t)0));
+success=1;
+break;
+default:
+return luaL_argerror(L,n,"invalid format");
+}
+}
+}
+}
+if(ferror(f))
+return pushresult(L,0,NULL);
+if(!success){
+lua_pop(L,1);
+lua_pushnil(L);
+}
+return n-first;
+}
+static int io_read(lua_State*L){
+return g_read(L,getiofile(L,1),1);
+}
+static int f_read(lua_State*L){
+return g_read(L,tofile(L),2);
+}
+static int io_readline(lua_State*L){
+FILE*f=*(FILE**)lua_touserdata(L,lua_upvalueindex(1));
+int sucess;
+if(f==NULL)
+luaL_error(L,"file is already closed");
+sucess=read_line(L,f);
+if(ferror(f))
+return luaL_error(L,"%s",strerror(errno));
+if(sucess)return 1;
+else{
+if(lua_toboolean(L,lua_upvalueindex(2))){
+lua_settop(L,0);
+lua_pushvalue(L,lua_upvalueindex(1));
+aux_close(L);
+}
+return 0;
+}
+}
+static int g_write(lua_State*L,FILE*f,int arg){
+int nargs=lua_gettop(L)-1;
+int status=1;
+for(;nargs--;arg++){
+if(lua_type(L,arg)==3){
+status=status&&
+fprintf(f,"%.14g",lua_tonumber(L,arg))>0;
+}
+else{
+size_t l;
+const char*s=luaL_checklstring(L,arg,&l);
+status=status&&(fwrite(s,sizeof(char),l,f)==l);
+}
+}
+return pushresult(L,status,NULL);
+}
+static int io_write(lua_State*L){
+return g_write(L,getiofile(L,2),1);
+}
+static int f_write(lua_State*L){
+return g_write(L,tofile(L),2);
+}
+static int io_flush(lua_State*L){
+return pushresult(L,fflush(getiofile(L,2))==0,NULL);
+}
+static int f_flush(lua_State*L){
+return pushresult(L,fflush(tofile(L))==0,NULL);
+}
+static const luaL_Reg iolib[]={
+{"close",io_close},
+{"flush",io_flush},
+{"input",io_input},
+{"lines",io_lines},
+{"open",io_open},
+{"output",io_output},
+{"read",io_read},
+{"type",io_type},
+{"write",io_write},
+{NULL,NULL}
+};
+static const luaL_Reg flib[]={
+{"close",io_close},
+{"flush",f_flush},
+{"lines",f_lines},
+{"read",f_read},
+{"write",f_write},
+{"__gc",io_gc},
+{NULL,NULL}
+};
+static void createmeta(lua_State*L){
+luaL_newmetatable(L,"FILE*");
+lua_pushvalue(L,-1);
+lua_setfield(L,-2,"__index");
+luaL_register(L,NULL,flib);
+}
+static void createstdfile(lua_State*L,FILE*f,int k,const char*fname){
+*newfile(L)=f;
+if(k>0){
+lua_pushvalue(L,-1);
+lua_rawseti(L,(-10001),k);
+}
+lua_pushvalue(L,-2);
+lua_setfenv(L,-2);
+lua_setfield(L,-3,fname);
+}
+static void newfenv(lua_State*L,lua_CFunction cls){
+lua_createtable(L,0,1);
+lua_pushcfunction(L,cls);
+lua_setfield(L,-2,"__close");
+}
+static int luaopen_io(lua_State*L){
+createmeta(L);
+newfenv(L,io_fclose);
+lua_replace(L,(-10001));
+luaL_register(L,"io",iolib);
+newfenv(L,io_noclose);
+createstdfile(L,stdin,1,"stdin");
+createstdfile(L,stdout,2,"stdout");
+createstdfile(L,stderr,0,"stderr");
+lua_pop(L,1);
+lua_getfield(L,-1,"popen");
+newfenv(L,io_pclose);
+lua_setfenv(L,-2);
+lua_pop(L,1);
+return 1;
+}
+static int os_pushresult(lua_State*L,int i,const char*filename){
+int en=errno;
+if(i){
+lua_pushboolean(L,1);
+return 1;
+}
+else{
+lua_pushnil(L);
+lua_pushfstring(L,"%s: %s",filename,strerror(en));
+lua_pushinteger(L,en);
+return 3;
+}
+}
+static int os_remove(lua_State*L){
+const char*filename=luaL_checkstring(L,1);
+return os_pushresult(L,remove(filename)==0,filename);
+}
+static int os_exit(lua_State*L){
+exit(luaL_optint(L,1,EXIT_SUCCESS));
+}
+static const luaL_Reg syslib[]={
+{"exit",os_exit},
+{"remove",os_remove},
+{NULL,NULL}
+};
+static int luaopen_os(lua_State*L){
+luaL_register(L,"os",syslib);
+return 1;
+}
+#define uchar(c)((unsigned char)(c))
+static ptrdiff_t posrelat(ptrdiff_t pos,size_t len){
+if(pos<0)pos+=(ptrdiff_t)len+1;
+return(pos>=0)?pos:0;
+}
+static int str_sub(lua_State*L){
+size_t l;
+const char*s=luaL_checklstring(L,1,&l);
+ptrdiff_t start=posrelat(luaL_checkinteger(L,2),l);
+ptrdiff_t end=posrelat(luaL_optinteger(L,3,-1),l);
+if(start<1)start=1;
+if(end>(ptrdiff_t)l)end=(ptrdiff_t)l;
+if(start<=end)
+lua_pushlstring(L,s+start-1,end-start+1);
+else lua_pushliteral(L,"");
+return 1;
+}
+static int str_lower(lua_State*L){
+size_t l;
+size_t i;
+luaL_Buffer b;
+const char*s=luaL_checklstring(L,1,&l);
+luaL_buffinit(L,&b);
+for(i=0;i<l;i++)
+luaL_addchar(&b,tolower(uchar(s[i])));
+luaL_pushresult(&b);
+return 1;
+}
+static int str_upper(lua_State*L){
+size_t l;
+size_t i;
+luaL_Buffer b;
+const char*s=luaL_checklstring(L,1,&l);
+luaL_buffinit(L,&b);
+for(i=0;i<l;i++)
+luaL_addchar(&b,toupper(uchar(s[i])));
+luaL_pushresult(&b);
+return 1;
+}
+static int str_rep(lua_State*L){
+size_t l;
+luaL_Buffer b;
+const char*s=luaL_checklstring(L,1,&l);
+int n=luaL_checkint(L,2);
+luaL_buffinit(L,&b);
+while(n-->0)
+luaL_addlstring(&b,s,l);
+luaL_pushresult(&b);
+return 1;
+}
+static int str_byte(lua_State*L){
+size_t l;
+const char*s=luaL_checklstring(L,1,&l);
+ptrdiff_t posi=posrelat(luaL_optinteger(L,2,1),l);
+ptrdiff_t pose=posrelat(luaL_optinteger(L,3,posi),l);
+int n,i;
+if(posi<=0)posi=1;
+if((size_t)pose>l)pose=l;
+if(posi>pose)return 0;
+n=(int)(pose-posi+1);
+if(posi+n<=pose)
+luaL_error(L,"string slice too long");
+luaL_checkstack(L,n,"string slice too long");
+for(i=0;i<n;i++)
+lua_pushinteger(L,uchar(s[posi+i-1]));
+return n;
+}
+static int str_char(lua_State*L){
+int n=lua_gettop(L);
+int i;
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+for(i=1;i<=n;i++){
+int c=luaL_checkint(L,i);
+luaL_argcheck(L,uchar(c)==c,i,"invalid value");
+luaL_addchar(&b,uchar(c));
+}
+luaL_pushresult(&b);
+return 1;
+}
+typedef struct MatchState{
+const char*src_init;
+const char*src_end;
+lua_State*L;
+int level;
+struct{
+const char*init;
+ptrdiff_t len;
+}capture[32];
+}MatchState;
+static int check_capture(MatchState*ms,int l){
+l-='1';
+if(l<0||l>=ms->level||ms->capture[l].len==(-1))
+return luaL_error(ms->L,"invalid capture index");
+return l;
+}
+static int capture_to_close(MatchState*ms){
+int level=ms->level;
+for(level--;level>=0;level--)
+if(ms->capture[level].len==(-1))return level;
+return luaL_error(ms->L,"invalid pattern capture");
+}
+static const char*classend(MatchState*ms,const char*p){
+switch(*p++){
+case'%':{
+if(*p=='\0')
+luaL_error(ms->L,"malformed pattern (ends with "LUA_QL("%%")")");
+return p+1;
+}
+case'[':{
+if(*p=='^')p++;
+do{
+if(*p=='\0')
+luaL_error(ms->L,"malformed pattern (missing "LUA_QL("]")")");
+if(*(p++)=='%'&&*p!='\0')
+p++;
+}while(*p!=']');
+return p+1;
+}
+default:{
+return p;
+}
+}
+}
+static int match_class(int c,int cl){
+int res;
+switch(tolower(cl)){
+case'a':res=isalpha(c);break;
+case'c':res=iscntrl(c);break;
+case'd':res=isdigit(c);break;
+case'l':res=islower(c);break;
+case'p':res=ispunct(c);break;
+case's':res=isspace(c);break;
+case'u':res=isupper(c);break;
+case'w':res=isalnum(c);break;
+case'x':res=isxdigit(c);break;
+case'z':res=(c==0);break;
+default:return(cl==c);
+}
+return(islower(cl)?res:!res);
+}
+static int matchbracketclass(int c,const char*p,const char*ec){
+int sig=1;
+if(*(p+1)=='^'){
+sig=0;
+p++;
+}
+while(++p<ec){
+if(*p=='%'){
+p++;
+if(match_class(c,uchar(*p)))
+return sig;
+}
+else if((*(p+1)=='-')&&(p+2<ec)){
+p+=2;
+if(uchar(*(p-2))<=c&&c<=uchar(*p))
+return sig;
+}
+else if(uchar(*p)==c)return sig;
+}
+return!sig;
+}
+static int singlematch(int c,const char*p,const char*ep){
+switch(*p){
+case'.':return 1;
+case'%':return match_class(c,uchar(*(p+1)));
+case'[':return matchbracketclass(c,p,ep-1);
+default:return(uchar(*p)==c);
+}
+}
+static const char*match(MatchState*ms,const char*s,const char*p);
+static const char*matchbalance(MatchState*ms,const char*s,
+const char*p){
+if(*p==0||*(p+1)==0)
+luaL_error(ms->L,"unbalanced pattern");
+if(*s!=*p)return NULL;
+else{
+int b=*p;
+int e=*(p+1);
+int cont=1;
+while(++s<ms->src_end){
+if(*s==e){
+if(--cont==0)return s+1;
+}
+else if(*s==b)cont++;
+}
+}
+return NULL;
+}
+static const char*max_expand(MatchState*ms,const char*s,
+const char*p,const char*ep){
+ptrdiff_t i=0;
+while((s+i)<ms->src_end&&singlematch(uchar(*(s+i)),p,ep))
+i++;
+while(i>=0){
+const char*res=match(ms,(s+i),ep+1);
+if(res)return res;
+i--;
+}
+return NULL;
+}
+static const char*min_expand(MatchState*ms,const char*s,
+const char*p,const char*ep){
+for(;;){
+const char*res=match(ms,s,ep+1);
+if(res!=NULL)
+return res;
+else if(s<ms->src_end&&singlematch(uchar(*s),p,ep))
+s++;
+else return NULL;
+}
+}
+static const char*start_capture(MatchState*ms,const char*s,
+const char*p,int what){
+const char*res;
+int level=ms->level;
+if(level>=32)luaL_error(ms->L,"too many captures");
+ms->capture[level].init=s;
+ms->capture[level].len=what;
+ms->level=level+1;
+if((res=match(ms,s,p))==NULL)
+ms->level--;
+return res;
+}
+static const char*end_capture(MatchState*ms,const char*s,
+const char*p){
+int l=capture_to_close(ms);
+const char*res;
+ms->capture[l].len=s-ms->capture[l].init;
+if((res=match(ms,s,p))==NULL)
+ms->capture[l].len=(-1);
+return res;
+}
+static const char*match_capture(MatchState*ms,const char*s,int l){
+size_t len;
+l=check_capture(ms,l);
+len=ms->capture[l].len;
+if((size_t)(ms->src_end-s)>=len&&
+memcmp(ms->capture[l].init,s,len)==0)
+return s+len;
+else return NULL;
+}
+static const char*match(MatchState*ms,const char*s,const char*p){
+init:
+switch(*p){
+case'(':{
+if(*(p+1)==')')
+return start_capture(ms,s,p+2,(-2));
+else
+return start_capture(ms,s,p+1,(-1));
+}
+case')':{
+return end_capture(ms,s,p+1);
+}
+case'%':{
+switch(*(p+1)){
+case'b':{
+s=matchbalance(ms,s,p+2);
+if(s==NULL)return NULL;
+p+=4;goto init;
+}
+case'f':{
+const char*ep;char previous;
+p+=2;
+if(*p!='[')
+luaL_error(ms->L,"missing "LUA_QL("[")" after "
+LUA_QL("%%f")" in pattern");
+ep=classend(ms,p);
+previous=(s==ms->src_init)?'\0':*(s-1);
+if(matchbracketclass(uchar(previous),p,ep-1)||
+!matchbracketclass(uchar(*s),p,ep-1))return NULL;
+p=ep;goto init;
+}
+default:{
+if(isdigit(uchar(*(p+1)))){
+s=match_capture(ms,s,uchar(*(p+1)));
+if(s==NULL)return NULL;
+p+=2;goto init;
+}
+goto dflt;
+}
+}
+}
+case'\0':{
+return s;
+}
+case'$':{
+if(*(p+1)=='\0')
+return(s==ms->src_end)?s:NULL;
+else goto dflt;
+}
+default:dflt:{
+const char*ep=classend(ms,p);
+int m=s<ms->src_end&&singlematch(uchar(*s),p,ep);
+switch(*ep){
+case'?':{
+const char*res;
+if(m&&((res=match(ms,s+1,ep+1))!=NULL))
+return res;
+p=ep+1;goto init;
+}
+case'*':{
+return max_expand(ms,s,p,ep);
+}
+case'+':{
+return(m?max_expand(ms,s+1,p,ep):NULL);
+}
+case'-':{
+return min_expand(ms,s,p,ep);
+}
+default:{
+if(!m)return NULL;
+s++;p=ep;goto init;
+}
+}
+}
+}
+}
+static const char*lmemfind(const char*s1,size_t l1,
+const char*s2,size_t l2){
+if(l2==0)return s1;
+else if(l2>l1)return NULL;
+else{
+const char*init;
+l2--;
+l1=l1-l2;
+while(l1>0&&(init=(const char*)memchr(s1,*s2,l1))!=NULL){
+init++;
+if(memcmp(init,s2+1,l2)==0)
+return init-1;
+else{
+l1-=init-s1;
+s1=init;
+}
+}
+return NULL;
+}
+}
+static void push_onecapture(MatchState*ms,int i,const char*s,
+const char*e){
+if(i>=ms->level){
+if(i==0)
+lua_pushlstring(ms->L,s,e-s);
+else
+luaL_error(ms->L,"invalid capture index");
+}
+else{
+ptrdiff_t l=ms->capture[i].len;
+if(l==(-1))luaL_error(ms->L,"unfinished capture");
+if(l==(-2))
+lua_pushinteger(ms->L,ms->capture[i].init-ms->src_init+1);
+else
+lua_pushlstring(ms->L,ms->capture[i].init,l);
+}
+}
+static int push_captures(MatchState*ms,const char*s,const char*e){
+int i;
+int nlevels=(ms->level==0&&s)?1:ms->level;
+luaL_checkstack(ms->L,nlevels,"too many captures");
+for(i=0;i<nlevels;i++)
+push_onecapture(ms,i,s,e);
+return nlevels;
+}
+static int str_find_aux(lua_State*L,int find){
+size_t l1,l2;
+const char*s=luaL_checklstring(L,1,&l1);
+const char*p=luaL_checklstring(L,2,&l2);
+ptrdiff_t init=posrelat(luaL_optinteger(L,3,1),l1)-1;
+if(init<0)init=0;
+else if((size_t)(init)>l1)init=(ptrdiff_t)l1;
+if(find&&(lua_toboolean(L,4)||
+strpbrk(p,"^$*+?.([%-")==NULL)){
+const char*s2=lmemfind(s+init,l1-init,p,l2);
+if(s2){
+lua_pushinteger(L,s2-s+1);
+lua_pushinteger(L,s2-s+l2);
+return 2;
+}
+}
+else{
+MatchState ms;
+int anchor=(*p=='^')?(p++,1):0;
+const char*s1=s+init;
+ms.L=L;
+ms.src_init=s;
+ms.src_end=s+l1;
+do{
+const char*res;
+ms.level=0;
+if((res=match(&ms,s1,p))!=NULL){
+if(find){
+lua_pushinteger(L,s1-s+1);
+lua_pushinteger(L,res-s);
+return push_captures(&ms,NULL,0)+2;
+}
+else
+return push_captures(&ms,s1,res);
+}
+}while(s1++<ms.src_end&&!anchor);
+}
+lua_pushnil(L);
+return 1;
+}
+static int str_find(lua_State*L){
+return str_find_aux(L,1);
+}
+static int str_match(lua_State*L){
+return str_find_aux(L,0);
+}
+static int gmatch_aux(lua_State*L){
+MatchState ms;
+size_t ls;
+const char*s=lua_tolstring(L,lua_upvalueindex(1),&ls);
+const char*p=lua_tostring(L,lua_upvalueindex(2));
+const char*src;
+ms.L=L;
+ms.src_init=s;
+ms.src_end=s+ls;
+for(src=s+(size_t)lua_tointeger(L,lua_upvalueindex(3));
+src<=ms.src_end;
+src++){
+const char*e;
+ms.level=0;
+if((e=match(&ms,src,p))!=NULL){
+lua_Integer newstart=e-s;
+if(e==src)newstart++;
+lua_pushinteger(L,newstart);
+lua_replace(L,lua_upvalueindex(3));
+return push_captures(&ms,src,e);
+}
+}
+return 0;
+}
+static int gmatch(lua_State*L){
+luaL_checkstring(L,1);
+luaL_checkstring(L,2);
+lua_settop(L,2);
+lua_pushinteger(L,0);
+lua_pushcclosure(L,gmatch_aux,3);
+return 1;
+}
+static void add_s(MatchState*ms,luaL_Buffer*b,const char*s,
+const char*e){
+size_t l,i;
+const char*news=lua_tolstring(ms->L,3,&l);
+for(i=0;i<l;i++){
+if(news[i]!='%')
+luaL_addchar(b,news[i]);
+else{
+i++;
+if(!isdigit(uchar(news[i])))
+luaL_addchar(b,news[i]);
+else if(news[i]=='0')
+luaL_addlstring(b,s,e-s);
+else{
+push_onecapture(ms,news[i]-'1',s,e);
+luaL_addvalue(b);
+}
+}
+}
+}
+static void add_value(MatchState*ms,luaL_Buffer*b,const char*s,
+const char*e){
+lua_State*L=ms->L;
+switch(lua_type(L,3)){
+case 3:
+case 4:{
+add_s(ms,b,s,e);
+return;
+}
+case 6:{
+int n;
+lua_pushvalue(L,3);
+n=push_captures(ms,s,e);
+lua_call(L,n,1);
+break;
+}
+case 5:{
+push_onecapture(ms,0,s,e);
+lua_gettable(L,3);
+break;
+}
+}
+if(!lua_toboolean(L,-1)){
+lua_pop(L,1);
+lua_pushlstring(L,s,e-s);
+}
+else if(!lua_isstring(L,-1))
+luaL_error(L,"invalid replacement value (a %s)",luaL_typename(L,-1));
+luaL_addvalue(b);
+}
+static int str_gsub(lua_State*L){
+size_t srcl;
+const char*src=luaL_checklstring(L,1,&srcl);
+const char*p=luaL_checkstring(L,2);
+int tr=lua_type(L,3);
+int max_s=luaL_optint(L,4,srcl+1);
+int anchor=(*p=='^')?(p++,1):0;
+int n=0;
+MatchState ms;
+luaL_Buffer b;
+luaL_argcheck(L,tr==3||tr==4||
+tr==6||tr==5,3,
+"string/function/table expected");
+luaL_buffinit(L,&b);
+ms.L=L;
+ms.src_init=src;
+ms.src_end=src+srcl;
+while(n<max_s){
+const char*e;
+ms.level=0;
+e=match(&ms,src,p);
+if(e){
+n++;
+add_value(&ms,&b,src,e);
+}
+if(e&&e>src)
+src=e;
+else if(src<ms.src_end)
+luaL_addchar(&b,*src++);
+else break;
+if(anchor)break;
+}
+luaL_addlstring(&b,src,ms.src_end-src);
+luaL_pushresult(&b);
+lua_pushinteger(L,n);
+return 2;
+}
+static void addquoted(lua_State*L,luaL_Buffer*b,int arg){
+size_t l;
+const char*s=luaL_checklstring(L,arg,&l);
+luaL_addchar(b,'"');
+while(l--){
+switch(*s){
+case'"':case'\\':case'\n':{
+luaL_addchar(b,'\\');
+luaL_addchar(b,*s);
+break;
+}
+case'\r':{
+luaL_addlstring(b,"\\r",2);
+break;
+}
+case'\0':{
+luaL_addlstring(b,"\\000",4);
+break;
+}
+default:{
+luaL_addchar(b,*s);
+break;
+}
+}
+s++;
+}
+luaL_addchar(b,'"');
+}
+static const char*scanformat(lua_State*L,const char*strfrmt,char*form){
+const char*p=strfrmt;
+while(*p!='\0'&&strchr("-+ #0",*p)!=NULL)p++;
+if((size_t)(p-strfrmt)>=sizeof("-+ #0"))
+luaL_error(L,"invalid format (repeated flags)");
+if(isdigit(uchar(*p)))p++;
+if(isdigit(uchar(*p)))p++;
+if(*p=='.'){
+p++;
+if(isdigit(uchar(*p)))p++;
+if(isdigit(uchar(*p)))p++;
+}
+if(isdigit(uchar(*p)))
+luaL_error(L,"invalid format (width or precision too long)");
+*(form++)='%';
+strncpy(form,strfrmt,p-strfrmt+1);
+form+=p-strfrmt+1;
+*form='\0';
+return p;
+}
+static void addintlen(char*form){
+size_t l=strlen(form);
+char spec=form[l-1];
+strcpy(form+l-1,"l");
+form[l+sizeof("l")-2]=spec;
+form[l+sizeof("l")-1]='\0';
+}
+static int str_format(lua_State*L){
+int top=lua_gettop(L);
+int arg=1;
+size_t sfl;
+const char*strfrmt=luaL_checklstring(L,arg,&sfl);
+const char*strfrmt_end=strfrmt+sfl;
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+while(strfrmt<strfrmt_end){
+if(*strfrmt!='%')
+luaL_addchar(&b,*strfrmt++);
+else if(*++strfrmt=='%')
+luaL_addchar(&b,*strfrmt++);
+else{
+char form[(sizeof("-+ #0")+sizeof("l")+10)];
+char buff[512];
+if(++arg>top)
+luaL_argerror(L,arg,"no value");
+strfrmt=scanformat(L,strfrmt,form);
+switch(*strfrmt++){
+case'c':{
+sprintf(buff,form,(int)luaL_checknumber(L,arg));
+break;
+}
+case'd':case'i':{
+addintlen(form);
+sprintf(buff,form,(long)luaL_checknumber(L,arg));
+break;
+}
+case'o':case'u':case'x':case'X':{
+addintlen(form);
+sprintf(buff,form,(unsigned long)luaL_checknumber(L,arg));
+break;
+}
+case'e':case'E':case'f':
+case'g':case'G':{
+sprintf(buff,form,(double)luaL_checknumber(L,arg));
+break;
+}
+case'q':{
+addquoted(L,&b,arg);
+continue;
+}
+case's':{
+size_t l;
+const char*s=luaL_checklstring(L,arg,&l);
+if(!strchr(form,'.')&&l>=100){
+lua_pushvalue(L,arg);
+luaL_addvalue(&b);
+continue;
+}
+else{
+sprintf(buff,form,s);
+break;
+}
+}
+default:{
+return luaL_error(L,"invalid option "LUA_QL("%%%c")" to "
+LUA_QL("format"),*(strfrmt-1));
+}
+}
+luaL_addlstring(&b,buff,strlen(buff));
+}
+}
+luaL_pushresult(&b);
+return 1;
+}
+static const luaL_Reg strlib[]={
+{"byte",str_byte},
+{"char",str_char},
+{"find",str_find},
+{"format",str_format},
+{"gmatch",gmatch},
+{"gsub",str_gsub},
+{"lower",str_lower},
+{"match",str_match},
+{"rep",str_rep},
+{"sub",str_sub},
+{"upper",str_upper},
+{NULL,NULL}
+};
+static void createmetatable(lua_State*L){
+lua_createtable(L,0,1);
+lua_pushliteral(L,"");
+lua_pushvalue(L,-2);
+lua_setmetatable(L,-2);
+lua_pop(L,1);
+lua_pushvalue(L,-2);
+lua_setfield(L,-2,"__index");
+lua_pop(L,1);
+}
+static int luaopen_string(lua_State*L){
+luaL_register(L,"string",strlib);
+createmetatable(L);
+return 1;
+}
+static const luaL_Reg lualibs[]={
+{"",luaopen_base},
+{"table",luaopen_table},
+{"io",luaopen_io},
+{"os",luaopen_os},
+{"string",luaopen_string},
+{NULL,NULL}
+};
+static void luaL_openlibs(lua_State*L){
+const luaL_Reg*lib=lualibs;
+for(;lib->func;lib++){
+lua_pushcfunction(L,lib->func);
+lua_pushstring(L,lib->name);
+lua_call(L,1,0);
+}
+}
+typedef unsigned int UB;
+static UB barg(lua_State*L,int idx){
+union{lua_Number n;U64 b;}bn;
+bn.n=lua_tonumber(L,idx)+6755399441055744.0;
+if(bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number");
+return(UB)bn.b;
+}
+#define BRET(b)lua_pushnumber(L,(lua_Number)(int)(b));return 1;
+static int tobit(lua_State*L){
+BRET(barg(L,1))}
+static int bnot(lua_State*L){
+BRET(~barg(L,1))}
+static int band(lua_State*L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)}
+static int bor(lua_State*L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)}
+static int bxor(lua_State*L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)}
+static int lshift(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b<<n)}
+static int rshift(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b>>n)}
+static int arshift(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)}
+static int rol(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b<<n)|(b>>(32-n)))}
+static int ror(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))}
+static int bswap(lua_State*L){
+UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)}
+static int tohex(lua_State*L){
+UB b=barg(L,1);
+int n=lua_isnone(L,2)?8:(int)barg(L,2);
+const char*hexdigits="0123456789abcdef";
+char buf[8];
+int i;
+if(n<0){n=-n;hexdigits="0123456789ABCDEF";}
+if(n>8)n=8;
+for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;}
+lua_pushlstring(L,buf,(size_t)n);
+return 1;
+}
+static const struct luaL_Reg bitlib[]={
+{"tobit",tobit},
+{"bnot",bnot},
+{"band",band},
+{"bor",bor},
+{"bxor",bxor},
+{"lshift",lshift},
+{"rshift",rshift},
+{"arshift",arshift},
+{"rol",rol},
+{"ror",ror},
+{"bswap",bswap},
+{"tohex",tohex},
+{NULL,NULL}
+};
+int main(int argc,char**argv){
+lua_State*L=luaL_newstate();
+int i;
+luaL_openlibs(L);
+luaL_register(L,"bit",bitlib);
+if(argc<2)return sizeof(void*);
+lua_createtable(L,0,1);
+lua_pushstring(L,argv[1]);
+lua_rawseti(L,-2,0);
+lua_setglobal(L,"arg");
+if(luaL_loadfile(L,argv[1]))
+goto err;
+for(i=2;i<argc;i++)
+lua_pushstring(L,argv[i]);
+if(lua_pcall(L,argc-2,0,0)){
+err:
+fprintf(stderr,"Error: %s\n",lua_tostring(L,-1));
+return 1;
+}
+lua_close(L);
+return 0;
+}
diff --git a/3rdparty/lua/src/jit/bc.lua b/3rdparty/lua/src/jit/bc.lua
index 327c4d3..cef4752 100644
--- a/3rdparty/lua/src/jit/bc.lua
+++ b/3rdparty/lua/src/jit/bc.lua
@@ -1,191 +1,191 @@
-----------------------------------------------------------------------------
--- LuaJIT bytecode listing module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
---
--- This module lists the bytecode of a Lua function. If it's loaded by -jbc
--- it hooks into the parser and lists all functions of a chunk as they
--- are parsed.
---
--- Example usage:
---
--- luajit -jbc -e 'local x=0; for i=1,1e6 do x=x+i end; print(x)'
--- luajit -jbc=- foo.lua
--- luajit -jbc=foo.list foo.lua
---
--- Default output is to stderr. To redirect the output to a file, pass a
--- filename as an argument (use '-' for stdout) or set the environment
--- variable LUAJIT_LISTFILE. The file is overwritten every time the module
--- is started.
---
--- This module can also be used programmatically:
---
--- local bc = require("jit.bc")
---
--- local function foo() print("hello") end
---
--- bc.dump(foo) --> -- BYTECODE -- [...]
--- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello"
---
--- local out = {
--- -- Do something with each line:
--- write = function(t, ...) io.write(...) end,
--- close = function(t) end,
--- flush = function(t) end,
--- }
--- bc.dump(foo, out)
---
-------------------------------------------------------------------------------
-
--- Cache some library functions and objects.
-local jit = require("jit")
-assert(jit.version_num == 20004, "LuaJIT core/library version mismatch")
-local jutil = require("jit.util")
-local vmdef = require("jit.vmdef")
-local bit = require("bit")
-local sub, gsub, format = string.sub, string.gsub, string.format
-local byte, band, shr = string.byte, bit.band, bit.rshift
-local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck
-local funcuvname = jutil.funcuvname
-local bcnames = vmdef.bcnames
-local stdout, stderr = io.stdout, io.stderr
-
-------------------------------------------------------------------------------
-
-local function ctlsub(c)
- if c == "\n" then return "\\n"
- elseif c == "\r" then return "\\r"
- elseif c == "\t" then return "\\t"
- else return format("\\%03d", byte(c))
- end
-end
-
--- Return one bytecode line.
-local function bcline(func, pc, prefix)
- local ins, m = funcbc(func, pc)
- if not ins then return end
- local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128)
- local a = band(shr(ins, 8), 0xff)
- local oidx = 6*band(ins, 0xff)
- local op = sub(bcnames, oidx+1, oidx+6)
- local s = format("%04d %s %-6s %3s ",
- pc, prefix or " ", op, ma == 0 and "" or a)
- local d = shr(ins, 16)
- if mc == 13*128 then -- BCMjump
- return format("%s=> %04d\n", s, pc+d-0x7fff)
- end
- if mb ~= 0 then
- d = band(d, 0xff)
- elseif mc == 0 then
- return s.."\n"
- end
- local kc
- if mc == 10*128 then -- BCMstr
- kc = funck(func, -d-1)
- kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub))
- elseif mc == 9*128 then -- BCMnum
- kc = funck(func, d)
- if op == "TSETM " then kc = kc - 2^52 end
- elseif mc == 12*128 then -- BCMfunc
- local fi = funcinfo(funck(func, -d-1))
- if fi.ffid then
- kc = vmdef.ffnames[fi.ffid]
- else
- kc = fi.loc
- end
- elseif mc == 5*128 then -- BCMuv
- kc = funcuvname(func, d)
- end
- if ma == 5 then -- BCMuv
- local ka = funcuvname(func, a)
- if kc then kc = ka.." ; "..kc else kc = ka end
- end
- if mb ~= 0 then
- local b = shr(ins, 24)
- if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end
- return format("%s%3d %3d\n", s, b, d)
- end
- if kc then return format("%s%3d ; %s\n", s, d, kc) end
- if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits
- return format("%s%3d\n", s, d)
-end
-
--- Collect branch targets of a function.
-local function bctargets(func)
- local target = {}
- for pc=1,1000000000 do
- local ins, m = funcbc(func, pc)
- if not ins then break end
- if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end
- end
- return target
-end
-
--- Dump bytecode instructions of a function.
-local function bcdump(func, out, all)
- if not out then out = stdout end
- local fi = funcinfo(func)
- if all and fi.children then
- for n=-1,-1000000000,-1 do
- local k = funck(func, n)
- if not k then break end
- if type(k) == "proto" then bcdump(k, out, true) end
- end
- end
- out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined))
- local target = bctargets(func)
- for pc=1,1000000000 do
- local s = bcline(func, pc, target[pc] and "=>")
- if not s then break end
- out:write(s)
- end
- out:write("\n")
- out:flush()
-end
-
-------------------------------------------------------------------------------
-
--- Active flag and output file handle.
-local active, out
-
--- List handler.
-local function h_list(func)
- return bcdump(func, out)
-end
-
--- Detach list handler.
-local function bclistoff()
- if active then
- active = false
- jit.attach(h_list)
- if out and out ~= stdout and out ~= stderr then out:close() end
- out = nil
- end
-end
-
--- Open the output file and attach list handler.
-local function bcliston(outfile)
- if active then bclistoff() end
- if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end
- if outfile then
- out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
- else
- out = stderr
- end
- jit.attach(h_list, "bc")
- active = true
-end
-
--- Public module functions.
-module(...)
-
-line = bcline
-dump = bcdump
-targets = bctargets
-
-on = bcliston
-off = bclistoff
-start = bcliston -- For -j command line option.
-
+----------------------------------------------------------------------------
+-- LuaJIT bytecode listing module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module lists the bytecode of a Lua function. If it's loaded by -jbc
+-- it hooks into the parser and lists all functions of a chunk as they
+-- are parsed.
+--
+-- Example usage:
+--
+-- luajit -jbc -e 'local x=0; for i=1,1e6 do x=x+i end; print(x)'
+-- luajit -jbc=- foo.lua
+-- luajit -jbc=foo.list foo.lua
+--
+-- Default output is to stderr. To redirect the output to a file, pass a
+-- filename as an argument (use '-' for stdout) or set the environment
+-- variable LUAJIT_LISTFILE. The file is overwritten every time the module
+-- is started.
+--
+-- This module can also be used programmatically:
+--
+-- local bc = require("jit.bc")
+--
+-- local function foo() print("hello") end
+--
+-- bc.dump(foo) --> -- BYTECODE -- [...]
+-- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello"
+--
+-- local out = {
+-- -- Do something with each line:
+-- write = function(t, ...) io.write(...) end,
+-- close = function(t) end,
+-- flush = function(t) end,
+-- }
+-- bc.dump(foo, out)
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20002, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local bit = require("bit")
+local sub, gsub, format = string.sub, string.gsub, string.format
+local byte, band, shr = string.byte, bit.band, bit.rshift
+local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck
+local funcuvname = jutil.funcuvname
+local bcnames = vmdef.bcnames
+local stdout, stderr = io.stdout, io.stderr
+
+------------------------------------------------------------------------------
+
+local function ctlsub(c)
+ if c == "\n" then return "\\n"
+ elseif c == "\r" then return "\\r"
+ elseif c == "\t" then return "\\t"
+ else return format("\\%03d", byte(c))
+ end
+end
+
+-- Return one bytecode line.
+local function bcline(func, pc, prefix)
+ local ins, m = funcbc(func, pc)
+ if not ins then return end
+ local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128)
+ local a = band(shr(ins, 8), 0xff)
+ local oidx = 6*band(ins, 0xff)
+ local op = sub(bcnames, oidx+1, oidx+6)
+ local s = format("%04d %s %-6s %3s ",
+ pc, prefix or " ", op, ma == 0 and "" or a)
+ local d = shr(ins, 16)
+ if mc == 13*128 then -- BCMjump
+ return format("%s=> %04d\n", s, pc+d-0x7fff)
+ end
+ if mb ~= 0 then
+ d = band(d, 0xff)
+ elseif mc == 0 then
+ return s.."\n"
+ end
+ local kc
+ if mc == 10*128 then -- BCMstr
+ kc = funck(func, -d-1)
+ kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub))
+ elseif mc == 9*128 then -- BCMnum
+ kc = funck(func, d)
+ if op == "TSETM " then kc = kc - 2^52 end
+ elseif mc == 12*128 then -- BCMfunc
+ local fi = funcinfo(funck(func, -d-1))
+ if fi.ffid then
+ kc = vmdef.ffnames[fi.ffid]
+ else
+ kc = fi.loc
+ end
+ elseif mc == 5*128 then -- BCMuv
+ kc = funcuvname(func, d)
+ end
+ if ma == 5 then -- BCMuv
+ local ka = funcuvname(func, a)
+ if kc then kc = ka.." ; "..kc else kc = ka end
+ end
+ if mb ~= 0 then
+ local b = shr(ins, 24)
+ if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end
+ return format("%s%3d %3d\n", s, b, d)
+ end
+ if kc then return format("%s%3d ; %s\n", s, d, kc) end
+ if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits
+ return format("%s%3d\n", s, d)
+end
+
+-- Collect branch targets of a function.
+local function bctargets(func)
+ local target = {}
+ for pc=1,1000000000 do
+ local ins, m = funcbc(func, pc)
+ if not ins then break end
+ if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end
+ end
+ return target
+end
+
+-- Dump bytecode instructions of a function.
+local function bcdump(func, out, all)
+ if not out then out = stdout end
+ local fi = funcinfo(func)
+ if all and fi.children then
+ for n=-1,-1000000000,-1 do
+ local k = funck(func, n)
+ if not k then break end
+ if type(k) == "proto" then bcdump(k, out, true) end
+ end
+ end
+ out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined))
+ local target = bctargets(func)
+ for pc=1,1000000000 do
+ local s = bcline(func, pc, target[pc] and "=>")
+ if not s then break end
+ out:write(s)
+ end
+ out:write("\n")
+ out:flush()
+end
+
+------------------------------------------------------------------------------
+
+-- Active flag and output file handle.
+local active, out
+
+-- List handler.
+local function h_list(func)
+ return bcdump(func, out)
+end
+
+-- Detach list handler.
+local function bclistoff()
+ if active then
+ active = false
+ jit.attach(h_list)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach list handler.
+local function bcliston(outfile)
+ if active then bclistoff() end
+ if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stderr
+ end
+ jit.attach(h_list, "bc")
+ active = true
+end
+
+-- Public module functions.
+module(...)
+
+line = bcline
+dump = bcdump
+targets = bctargets
+
+on = bcliston
+off = bclistoff
+start = bcliston -- For -j command line option.
+
diff --git a/3rdparty/lua/src/jit/bcsave.lua b/3rdparty/lua/src/jit/bcsave.lua
index 55ca295..e6d566e 100644
--- a/3rdparty/lua/src/jit/bcsave.lua
+++ b/3rdparty/lua/src/jit/bcsave.lua
@@ -1,659 +1,659 @@
-----------------------------------------------------------------------------
--- LuaJIT module to save/list bytecode.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
---
--- This module saves or lists the bytecode for an input file.
--- It's run by the -b command line option.
---
-------------------------------------------------------------------------------
-
-local jit = require("jit")
-assert(jit.version_num == 20004, "LuaJIT core/library version mismatch")
-local bit = require("bit")
-
--- Symbol name prefix for LuaJIT bytecode.
-local LJBC_PREFIX = "luaJIT_BC_"
-
-------------------------------------------------------------------------------
-
-local function usage()
- io.stderr:write[[
-Save LuaJIT bytecode: luajit -b[options] input output
- -l Only list bytecode.
- -s Strip debug info (default).
- -g Keep debug info.
- -n name Set module name (default: auto-detect from input name).
- -t type Set output file type (default: auto-detect from output name).
- -a arch Override architecture for object files (default: native).
- -o os Override OS for object files (default: native).
- -e chunk Use chunk string as input.
- -- Stop handling options.
- - Use stdin as input and/or stdout as output.
-
-File types: c h obj o raw (default)
-]]
- os.exit(1)
-end
-
-local function check(ok, ...)
- if ok then return ok, ... end
- io.stderr:write("luajit: ", ...)
- io.stderr:write("\n")
- os.exit(1)
-end
-
-local function readfile(input)
- if type(input) == "function" then return input end
- if input == "-" then input = nil end
- return check(loadfile(input))
-end
-
-local function savefile(name, mode)
- if name == "-" then return io.stdout end
- return check(io.open(name, mode))
-end
-
-------------------------------------------------------------------------------
-
-local map_type = {
- raw = "raw", c = "c", h = "h", o = "obj", obj = "obj",
-}
-
-local map_arch = {
- x86 = true, x64 = true, arm = true, ppc = true, ppcspe = true,
- mips = true, mipsel = true,
-}
-
-local map_os = {
- linux = true, windows = true, osx = true, freebsd = true, netbsd = true,
- openbsd = true, dragonfly = true, solaris = true,
-}
-
-local function checkarg(str, map, err)
- str = string.lower(str)
- local s = check(map[str], "unknown ", err)
- return s == true and str or s
-end
-
-local function detecttype(str)
- local ext = string.match(string.lower(str), "%.(%a+)$")
- return map_type[ext] or "raw"
-end
-
-local function checkmodname(str)
- check(string.match(str, "^[%w_.%-]+$"), "bad module name")
- return string.gsub(str, "[%.%-]", "_")
-end
-
-local function detectmodname(str)
- if type(str) == "string" then
- local tail = string.match(str, "[^/\\]+$")
- if tail then str = tail end
- local head = string.match(str, "^(.*)%.[^.]*$")
- if head then str = head end
- str = string.match(str, "^[%w_.%-]+")
- else
- str = nil
- end
- check(str, "cannot derive module name, use -n name")
- return string.gsub(str, "[%.%-]", "_")
-end
-
-------------------------------------------------------------------------------
-
-local function bcsave_tail(fp, output, s)
- local ok, err = fp:write(s)
- if ok and output ~= "-" then ok, err = fp:close() end
- check(ok, "cannot write ", output, ": ", err)
-end
-
-local function bcsave_raw(output, s)
- local fp = savefile(output, "wb")
- bcsave_tail(fp, output, s)
-end
-
-local function bcsave_c(ctx, output, s)
- local fp = savefile(output, "w")
- if ctx.type == "c" then
- fp:write(string.format([[
-#ifdef _cplusplus
-extern "C"
-#endif
-#ifdef _WIN32
-__declspec(dllexport)
-#endif
-const char %s%s[] = {
-]], LJBC_PREFIX, ctx.modname))
- else
- fp:write(string.format([[
-#define %s%s_SIZE %d
-static const char %s%s[] = {
-]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname))
- end
- local t, n, m = {}, 0, 0
- for i=1,#s do
- local b = tostring(string.byte(s, i))
- m = m + #b + 1
- if m > 78 then
- fp:write(table.concat(t, ",", 1, n), ",\n")
- n, m = 0, #b + 1
- end
- n = n + 1
- t[n] = b
- end
- bcsave_tail(fp, output, table.concat(t, ",", 1, n).."\n};\n")
-end
-
-local function bcsave_elfobj(ctx, output, s, ffi)
- ffi.cdef[[
-typedef struct {
- uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
- uint16_t type, machine;
- uint32_t version;
- uint32_t entry, phofs, shofs;
- uint32_t flags;
- uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
-} ELF32header;
-typedef struct {
- uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
- uint16_t type, machine;
- uint32_t version;
- uint64_t entry, phofs, shofs;
- uint32_t flags;
- uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
-} ELF64header;
-typedef struct {
- uint32_t name, type, flags, addr, ofs, size, link, info, align, entsize;
-} ELF32sectheader;
-typedef struct {
- uint32_t name, type;
- uint64_t flags, addr, ofs, size;
- uint32_t link, info;
- uint64_t align, entsize;
-} ELF64sectheader;
-typedef struct {
- uint32_t name, value, size;
- uint8_t info, other;
- uint16_t sectidx;
-} ELF32symbol;
-typedef struct {
- uint32_t name;
- uint8_t info, other;
- uint16_t sectidx;
- uint64_t value, size;
-} ELF64symbol;
-typedef struct {
- ELF32header hdr;
- ELF32sectheader sect[6];
- ELF32symbol sym[2];
- uint8_t space[4096];
-} ELF32obj;
-typedef struct {
- ELF64header hdr;
- ELF64sectheader sect[6];
- ELF64symbol sym[2];
- uint8_t space[4096];
-} ELF64obj;
-]]
- local symname = LJBC_PREFIX..ctx.modname
- local is64, isbe = false, false
- if ctx.arch == "x64" then
- is64 = true
- elseif ctx.arch == "ppc" or ctx.arch == "ppcspe" or ctx.arch == "mips" then
- isbe = true
- end
-
- -- Handle different host/target endianess.
- local function f32(x) return x end
- local f16, fofs = f32, f32
- if ffi.abi("be") ~= isbe then
- f32 = bit.bswap
- function f16(x) return bit.rshift(bit.bswap(x), 16) end
- if is64 then
- local two32 = ffi.cast("int64_t", 2^32)
- function fofs(x) return bit.bswap(x)*two32 end
- else
- fofs = f32
- end
- end
-
- -- Create ELF object and fill in header.
- local o = ffi.new(is64 and "ELF64obj" or "ELF32obj")
- local hdr = o.hdr
- if ctx.os == "bsd" or ctx.os == "other" then -- Determine native hdr.eosabi.
- local bf = assert(io.open("/bin/ls", "rb"))
- local bs = bf:read(9)
- bf:close()
- ffi.copy(o, bs, 9)
- check(hdr.emagic[0] == 127, "no support for writing native object files")
- else
- hdr.emagic = "\127ELF"
- hdr.eosabi = ({ freebsd=9, netbsd=2, openbsd=12, solaris=6 })[ctx.os] or 0
- end
- hdr.eclass = is64 and 2 or 1
- hdr.eendian = isbe and 2 or 1
- hdr.eversion = 1
- hdr.type = f16(1)
- hdr.machine = f16(({ x86=3, x64=62, arm=40, ppc=20, ppcspe=20, mips=8, mipsel=8 })[ctx.arch])
- if ctx.arch == "mips" or ctx.arch == "mipsel" then
- hdr.flags = 0x50001006
- end
- hdr.version = f32(1)
- hdr.shofs = fofs(ffi.offsetof(o, "sect"))
- hdr.ehsize = f16(ffi.sizeof(hdr))
- hdr.shentsize = f16(ffi.sizeof(o.sect[0]))
- hdr.shnum = f16(6)
- hdr.shstridx = f16(2)
-
- -- Fill in sections and symbols.
- local sofs, ofs = ffi.offsetof(o, "space"), 1
- for i,name in ipairs{
- ".symtab", ".shstrtab", ".strtab", ".rodata", ".note.GNU-stack",
- } do
- local sect = o.sect[i]
- sect.align = fofs(1)
- sect.name = f32(ofs)
- ffi.copy(o.space+ofs, name)
- ofs = ofs + #name+1
- end
- o.sect[1].type = f32(2) -- .symtab
- o.sect[1].link = f32(3)
- o.sect[1].info = f32(1)
- o.sect[1].align = fofs(8)
- o.sect[1].ofs = fofs(ffi.offsetof(o, "sym"))
- o.sect[1].entsize = fofs(ffi.sizeof(o.sym[0]))
- o.sect[1].size = fofs(ffi.sizeof(o.sym))
- o.sym[1].name = f32(1)
- o.sym[1].sectidx = f16(4)
- o.sym[1].size = fofs(#s)
- o.sym[1].info = 17
- o.sect[2].type = f32(3) -- .shstrtab
- o.sect[2].ofs = fofs(sofs)
- o.sect[2].size = fofs(ofs)
- o.sect[3].type = f32(3) -- .strtab
- o.sect[3].ofs = fofs(sofs + ofs)
- o.sect[3].size = fofs(#symname+1)
- ffi.copy(o.space+ofs+1, symname)
- ofs = ofs + #symname + 2
- o.sect[4].type = f32(1) -- .rodata
- o.sect[4].flags = fofs(2)
- o.sect[4].ofs = fofs(sofs + ofs)
- o.sect[4].size = fofs(#s)
- o.sect[5].type = f32(1) -- .note.GNU-stack
- o.sect[5].ofs = fofs(sofs + ofs + #s)
-
- -- Write ELF object file.
- local fp = savefile(output, "wb")
- fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
- bcsave_tail(fp, output, s)
-end
-
-local function bcsave_peobj(ctx, output, s, ffi)
- ffi.cdef[[
-typedef struct {
- uint16_t arch, nsects;
- uint32_t time, symtabofs, nsyms;
- uint16_t opthdrsz, flags;
-} PEheader;
-typedef struct {
- char name[8];
- uint32_t vsize, vaddr, size, ofs, relocofs, lineofs;
- uint16_t nreloc, nline;
- uint32_t flags;
-} PEsection;
-typedef struct __attribute((packed)) {
- union {
- char name[8];
- uint32_t nameref[2];
- };
- uint32_t value;
- int16_t sect;
- uint16_t type;
- uint8_t scl, naux;
-} PEsym;
-typedef struct __attribute((packed)) {
- uint32_t size;
- uint16_t nreloc, nline;
- uint32_t cksum;
- uint16_t assoc;
- uint8_t comdatsel, unused[3];
-} PEsymaux;
-typedef struct {
- PEheader hdr;
- PEsection sect[2];
- // Must be an even number of symbol structs.
- PEsym sym0;
- PEsymaux sym0aux;
- PEsym sym1;
- PEsymaux sym1aux;
- PEsym sym2;
- PEsym sym3;
- uint32_t strtabsize;
- uint8_t space[4096];
-} PEobj;
-]]
- local symname = LJBC_PREFIX..ctx.modname
- local is64 = false
- if ctx.arch == "x86" then
- symname = "_"..symname
- elseif ctx.arch == "x64" then
- is64 = true
- end
- local symexport = " /EXPORT:"..symname..",DATA "
-
- -- The file format is always little-endian. Swap if the host is big-endian.
- local function f32(x) return x end
- local f16 = f32
- if ffi.abi("be") then
- f32 = bit.bswap
- function f16(x) return bit.rshift(bit.bswap(x), 16) end
- end
-
- -- Create PE object and fill in header.
- local o = ffi.new("PEobj")
- local hdr = o.hdr
- hdr.arch = f16(({ x86=0x14c, x64=0x8664, arm=0x1c0, ppc=0x1f2, mips=0x366, mipsel=0x366 })[ctx.arch])
- hdr.nsects = f16(2)
- hdr.symtabofs = f32(ffi.offsetof(o, "sym0"))
- hdr.nsyms = f32(6)
-
- -- Fill in sections and symbols.
- o.sect[0].name = ".drectve"
- o.sect[0].size = f32(#symexport)
- o.sect[0].flags = f32(0x00100a00)
- o.sym0.sect = f16(1)
- o.sym0.scl = 3
- o.sym0.name = ".drectve"
- o.sym0.naux = 1
- o.sym0aux.size = f32(#symexport)
- o.sect[1].name = ".rdata"
- o.sect[1].size = f32(#s)
- o.sect[1].flags = f32(0x40300040)
- o.sym1.sect = f16(2)
- o.sym1.scl = 3
- o.sym1.name = ".rdata"
- o.sym1.naux = 1
- o.sym1aux.size = f32(#s)
- o.sym2.sect = f16(2)
- o.sym2.scl = 2
- o.sym2.nameref[1] = f32(4)
- o.sym3.sect = f16(-1)
- o.sym3.scl = 2
- o.sym3.value = f32(1)
- o.sym3.name = "@feat.00" -- Mark as SafeSEH compliant.
- ffi.copy(o.space, symname)
- local ofs = #symname + 1
- o.strtabsize = f32(ofs + 4)
- o.sect[0].ofs = f32(ffi.offsetof(o, "space") + ofs)
- ffi.copy(o.space + ofs, symexport)
- ofs = ofs + #symexport
- o.sect[1].ofs = f32(ffi.offsetof(o, "space") + ofs)
-
- -- Write PE object file.
- local fp = savefile(output, "wb")
- fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
- bcsave_tail(fp, output, s)
-end
-
-local function bcsave_machobj(ctx, output, s, ffi)
- ffi.cdef[[
-typedef struct
-{
- uint32_t magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags;
-} mach_header;
-typedef struct
-{
- mach_header; uint32_t reserved;
-} mach_header_64;
-typedef struct {
- uint32_t cmd, cmdsize;
- char segname[16];
- uint32_t vmaddr, vmsize, fileoff, filesize;
- uint32_t maxprot, initprot, nsects, flags;
-} mach_segment_command;
-typedef struct {
- uint32_t cmd, cmdsize;
- char segname[16];
- uint64_t vmaddr, vmsize, fileoff, filesize;
- uint32_t maxprot, initprot, nsects, flags;
-} mach_segment_command_64;
-typedef struct {
- char sectname[16], segname[16];
- uint32_t addr, size;
- uint32_t offset, align, reloff, nreloc, flags;
- uint32_t reserved1, reserved2;
-} mach_section;
-typedef struct {
- char sectname[16], segname[16];
- uint64_t addr, size;
- uint32_t offset, align, reloff, nreloc, flags;
- uint32_t reserved1, reserved2, reserved3;
-} mach_section_64;
-typedef struct {
- uint32_t cmd, cmdsize, symoff, nsyms, stroff, strsize;
-} mach_symtab_command;
-typedef struct {
- int32_t strx;
- uint8_t type, sect;
- int16_t desc;
- uint32_t value;
-} mach_nlist;
-typedef struct {
- uint32_t strx;
- uint8_t type, sect;
- uint16_t desc;
- uint64_t value;
-} mach_nlist_64;
-typedef struct
-{
- uint32_t magic, nfat_arch;
-} mach_fat_header;
-typedef struct
-{
- uint32_t cputype, cpusubtype, offset, size, align;
-} mach_fat_arch;
-typedef struct {
- struct {
- mach_header hdr;
- mach_segment_command seg;
- mach_section sec;
- mach_symtab_command sym;
- } arch[1];
- mach_nlist sym_entry;
- uint8_t space[4096];
-} mach_obj;
-typedef struct {
- struct {
- mach_header_64 hdr;
- mach_segment_command_64 seg;
- mach_section_64 sec;
- mach_symtab_command sym;
- } arch[1];
- mach_nlist_64 sym_entry;
- uint8_t space[4096];
-} mach_obj_64;
-typedef struct {
- mach_fat_header fat;
- mach_fat_arch fat_arch[4];
- struct {
- mach_header hdr;
- mach_segment_command seg;
- mach_section sec;
- mach_symtab_command sym;
- } arch[4];
- mach_nlist sym_entry;
- uint8_t space[4096];
-} mach_fat_obj;
-]]
- local symname = '_'..LJBC_PREFIX..ctx.modname
- local isfat, is64, align, mobj = false, false, 4, "mach_obj"
- if ctx.arch == "x64" then
- is64, align, mobj = true, 8, "mach_obj_64"
- elseif ctx.arch == "arm" then
- isfat, mobj = true, "mach_fat_obj"
- else
- check(ctx.arch == "x86", "unsupported architecture for OSX")
- end
- local function aligned(v, a) return bit.band(v+a-1, -a) end
- local be32 = bit.bswap -- Mach-O FAT is BE, supported archs are LE.
-
- -- Create Mach-O object and fill in header.
- local o = ffi.new(mobj)
- local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align)
- local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12,12,12} })[ctx.arch]
- local cpusubtype = ({ x86={3}, x64={3}, arm={3,6,9,11} })[ctx.arch]
- if isfat then
- o.fat.magic = be32(0xcafebabe)
- o.fat.nfat_arch = be32(#cpusubtype)
- end
-
- -- Fill in sections and symbols.
- for i=0,#cpusubtype-1 do
- local ofs = 0
- if isfat then
- local a = o.fat_arch[i]
- a.cputype = be32(cputype[i+1])
- a.cpusubtype = be32(cpusubtype[i+1])
- -- Subsequent slices overlap each other to share data.
- ofs = ffi.offsetof(o, "arch") + i*ffi.sizeof(o.arch[0])
- a.offset = be32(ofs)
- a.size = be32(mach_size-ofs+#s)
- end
- local a = o.arch[i]
- a.hdr.magic = is64 and 0xfeedfacf or 0xfeedface
- a.hdr.cputype = cputype[i+1]
- a.hdr.cpusubtype = cpusubtype[i+1]
- a.hdr.filetype = 1
- a.hdr.ncmds = 2
- a.hdr.sizeofcmds = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)+ffi.sizeof(a.sym)
- a.seg.cmd = is64 and 0x19 or 0x1
- a.seg.cmdsize = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)
- a.seg.vmsize = #s
- a.seg.fileoff = mach_size-ofs
- a.seg.filesize = #s
- a.seg.maxprot = 1
- a.seg.initprot = 1
- a.seg.nsects = 1
- ffi.copy(a.sec.sectname, "__data")
- ffi.copy(a.sec.segname, "__DATA")
- a.sec.size = #s
- a.sec.offset = mach_size-ofs
- a.sym.cmd = 2
- a.sym.cmdsize = ffi.sizeof(a.sym)
- a.sym.symoff = ffi.offsetof(o, "sym_entry")-ofs
- a.sym.nsyms = 1
- a.sym.stroff = ffi.offsetof(o, "sym_entry")+ffi.sizeof(o.sym_entry)-ofs
- a.sym.strsize = aligned(#symname+2, align)
- end
- o.sym_entry.type = 0xf
- o.sym_entry.sect = 1
- o.sym_entry.strx = 1
- ffi.copy(o.space+1, symname)
-
- -- Write Macho-O object file.
- local fp = savefile(output, "wb")
- fp:write(ffi.string(o, mach_size))
- bcsave_tail(fp, output, s)
-end
-
-local function bcsave_obj(ctx, output, s)
- local ok, ffi = pcall(require, "ffi")
- check(ok, "FFI library required to write this file type")
- if ctx.os == "windows" then
- return bcsave_peobj(ctx, output, s, ffi)
- elseif ctx.os == "osx" then
- return bcsave_machobj(ctx, output, s, ffi)
- else
- return bcsave_elfobj(ctx, output, s, ffi)
- end
-end
-
-------------------------------------------------------------------------------
-
-local function bclist(input, output)
- local f = readfile(input)
- require("jit.bc").dump(f, savefile(output, "w"), true)
-end
-
-local function bcsave(ctx, input, output)
- local f = readfile(input)
- local s = string.dump(f, ctx.strip)
- local t = ctx.type
- if not t then
- t = detecttype(output)
- ctx.type = t
- end
- if t == "raw" then
- bcsave_raw(output, s)
- else
- if not ctx.modname then ctx.modname = detectmodname(input) end
- if t == "obj" then
- bcsave_obj(ctx, output, s)
- else
- bcsave_c(ctx, output, s)
- end
- end
-end
-
-local function docmd(...)
- local arg = {...}
- local n = 1
- local list = false
- local ctx = {
- strip = true, arch = jit.arch, os = string.lower(jit.os),
- type = false, modname = false,
- }
- while n <= #arg do
- local a = arg[n]
- if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then
- table.remove(arg, n)
- if a == "--" then break end
- for m=2,#a do
- local opt = string.sub(a, m, m)
- if opt == "l" then
- list = true
- elseif opt == "s" then
- ctx.strip = true
- elseif opt == "g" then
- ctx.strip = false
- else
- if arg[n] == nil or m ~= #a then usage() end
- if opt == "e" then
- if n ~= 1 then usage() end
- arg[1] = check(loadstring(arg[1]))
- elseif opt == "n" then
- ctx.modname = checkmodname(table.remove(arg, n))
- elseif opt == "t" then
- ctx.type = checkarg(table.remove(arg, n), map_type, "file type")
- elseif opt == "a" then
- ctx.arch = checkarg(table.remove(arg, n), map_arch, "architecture")
- elseif opt == "o" then
- ctx.os = checkarg(table.remove(arg, n), map_os, "OS name")
- else
- usage()
- end
- end
- end
- else
- n = n + 1
- end
- end
- if list then
- if #arg == 0 or #arg > 2 then usage() end
- bclist(arg[1], arg[2] or "-")
- else
- if #arg ~= 2 then usage() end
- bcsave(ctx, arg[1], arg[2])
- end
-end
-
-------------------------------------------------------------------------------
-
--- Public module functions.
-module(...)
-
-start = docmd -- Process -b command line option.
-
+----------------------------------------------------------------------------
+-- LuaJIT module to save/list bytecode.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module saves or lists the bytecode for an input file.
+-- It's run by the -b command line option.
+--
+------------------------------------------------------------------------------
+
+local jit = require("jit")
+assert(jit.version_num == 20002, "LuaJIT core/library version mismatch")
+local bit = require("bit")
+
+-- Symbol name prefix for LuaJIT bytecode.
+local LJBC_PREFIX = "luaJIT_BC_"
+
+------------------------------------------------------------------------------
+
+local function usage()
+ io.stderr:write[[
+Save LuaJIT bytecode: luajit -b[options] input output
+ -l Only list bytecode.
+ -s Strip debug info (default).
+ -g Keep debug info.
+ -n name Set module name (default: auto-detect from input name).
+ -t type Set output file type (default: auto-detect from output name).
+ -a arch Override architecture for object files (default: native).
+ -o os Override OS for object files (default: native).
+ -e chunk Use chunk string as input.
+ -- Stop handling options.
+ - Use stdin as input and/or stdout as output.
+
+File types: c h obj o raw (default)
+]]
+ os.exit(1)
+end
+
+local function check(ok, ...)
+ if ok then return ok, ... end
+ io.stderr:write("luajit: ", ...)
+ io.stderr:write("\n")
+ os.exit(1)
+end
+
+local function readfile(input)
+ if type(input) == "function" then return input end
+ if input == "-" then input = nil end
+ return check(loadfile(input))
+end
+
+local function savefile(name, mode)
+ if name == "-" then return io.stdout end
+ return check(io.open(name, mode))
+end
+
+------------------------------------------------------------------------------
+
+local map_type = {
+ raw = "raw", c = "c", h = "h", o = "obj", obj = "obj",
+}
+
+local map_arch = {
+ x86 = true, x64 = true, arm = true, ppc = true, ppcspe = true,
+ mips = true, mipsel = true,
+}
+
+local map_os = {
+ linux = true, windows = true, osx = true, freebsd = true, netbsd = true,
+ openbsd = true, solaris = true,
+}
+
+local function checkarg(str, map, err)
+ str = string.lower(str)
+ local s = check(map[str], "unknown ", err)
+ return s == true and str or s
+end
+
+local function detecttype(str)
+ local ext = string.match(string.lower(str), "%.(%a+)$")
+ return map_type[ext] or "raw"
+end
+
+local function checkmodname(str)
+ check(string.match(str, "^[%w_.%-]+$"), "bad module name")
+ return string.gsub(str, "[%.%-]", "_")
+end
+
+local function detectmodname(str)
+ if type(str) == "string" then
+ local tail = string.match(str, "[^/\\]+$")
+ if tail then str = tail end
+ local head = string.match(str, "^(.*)%.[^.]*$")
+ if head then str = head end
+ str = string.match(str, "^[%w_.%-]+")
+ else
+ str = nil
+ end
+ check(str, "cannot derive module name, use -n name")
+ return string.gsub(str, "[%.%-]", "_")
+end
+
+------------------------------------------------------------------------------
+
+local function bcsave_tail(fp, output, s)
+ local ok, err = fp:write(s)
+ if ok and output ~= "-" then ok, err = fp:close() end
+ check(ok, "cannot write ", output, ": ", err)
+end
+
+local function bcsave_raw(output, s)
+ local fp = savefile(output, "wb")
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_c(ctx, output, s)
+ local fp = savefile(output, "w")
+ if ctx.type == "c" then
+ fp:write(string.format([[
+#ifdef _cplusplus
+extern "C"
+#endif
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+const char %s%s[] = {
+]], LJBC_PREFIX, ctx.modname))
+ else
+ fp:write(string.format([[
+#define %s%s_SIZE %d
+static const char %s%s[] = {
+]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname))
+ end
+ local t, n, m = {}, 0, 0
+ for i=1,#s do
+ local b = tostring(string.byte(s, i))
+ m = m + #b + 1
+ if m > 78 then
+ fp:write(table.concat(t, ",", 1, n), ",\n")
+ n, m = 0, #b + 1
+ end
+ n = n + 1
+ t[n] = b
+ end
+ bcsave_tail(fp, output, table.concat(t, ",", 1, n).."\n};\n")
+end
+
+local function bcsave_elfobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct {
+ uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
+ uint16_t type, machine;
+ uint32_t version;
+ uint32_t entry, phofs, shofs;
+ uint32_t flags;
+ uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
+} ELF32header;
+typedef struct {
+ uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
+ uint16_t type, machine;
+ uint32_t version;
+ uint64_t entry, phofs, shofs;
+ uint32_t flags;
+ uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
+} ELF64header;
+typedef struct {
+ uint32_t name, type, flags, addr, ofs, size, link, info, align, entsize;
+} ELF32sectheader;
+typedef struct {
+ uint32_t name, type;
+ uint64_t flags, addr, ofs, size;
+ uint32_t link, info;
+ uint64_t align, entsize;
+} ELF64sectheader;
+typedef struct {
+ uint32_t name, value, size;
+ uint8_t info, other;
+ uint16_t sectidx;
+} ELF32symbol;
+typedef struct {
+ uint32_t name;
+ uint8_t info, other;
+ uint16_t sectidx;
+ uint64_t value, size;
+} ELF64symbol;
+typedef struct {
+ ELF32header hdr;
+ ELF32sectheader sect[6];
+ ELF32symbol sym[2];
+ uint8_t space[4096];
+} ELF32obj;
+typedef struct {
+ ELF64header hdr;
+ ELF64sectheader sect[6];
+ ELF64symbol sym[2];
+ uint8_t space[4096];
+} ELF64obj;
+]]
+ local symname = LJBC_PREFIX..ctx.modname
+ local is64, isbe = false, false
+ if ctx.arch == "x64" then
+ is64 = true
+ elseif ctx.arch == "ppc" or ctx.arch == "ppcspe" or ctx.arch == "mips" then
+ isbe = true
+ end
+
+ -- Handle different host/target endianess.
+ local function f32(x) return x end
+ local f16, fofs = f32, f32
+ if ffi.abi("be") ~= isbe then
+ f32 = bit.bswap
+ function f16(x) return bit.rshift(bit.bswap(x), 16) end
+ if is64 then
+ local two32 = ffi.cast("int64_t", 2^32)
+ function fofs(x) return bit.bswap(x)*two32 end
+ else
+ fofs = f32
+ end
+ end
+
+ -- Create ELF object and fill in header.
+ local o = ffi.new(is64 and "ELF64obj" or "ELF32obj")
+ local hdr = o.hdr
+ if ctx.os == "bsd" or ctx.os == "other" then -- Determine native hdr.eosabi.
+ local bf = assert(io.open("/bin/ls", "rb"))
+ local bs = bf:read(9)
+ bf:close()
+ ffi.copy(o, bs, 9)
+ check(hdr.emagic[0] == 127, "no support for writing native object files")
+ else
+ hdr.emagic = "\127ELF"
+ hdr.eosabi = ({ freebsd=9, netbsd=2, openbsd=12, solaris=6 })[ctx.os] or 0
+ end
+ hdr.eclass = is64 and 2 or 1
+ hdr.eendian = isbe and 2 or 1
+ hdr.eversion = 1
+ hdr.type = f16(1)
+ hdr.machine = f16(({ x86=3, x64=62, arm=40, ppc=20, ppcspe=20, mips=8, mipsel=8 })[ctx.arch])
+ if ctx.arch == "mips" or ctx.arch == "mipsel" then
+ hdr.flags = 0x50001006
+ end
+ hdr.version = f32(1)
+ hdr.shofs = fofs(ffi.offsetof(o, "sect"))
+ hdr.ehsize = f16(ffi.sizeof(hdr))
+ hdr.shentsize = f16(ffi.sizeof(o.sect[0]))
+ hdr.shnum = f16(6)
+ hdr.shstridx = f16(2)
+
+ -- Fill in sections and symbols.
+ local sofs, ofs = ffi.offsetof(o, "space"), 1
+ for i,name in ipairs{
+ ".symtab", ".shstrtab", ".strtab", ".rodata", ".note.GNU-stack",
+ } do
+ local sect = o.sect[i]
+ sect.align = fofs(1)
+ sect.name = f32(ofs)
+ ffi.copy(o.space+ofs, name)
+ ofs = ofs + #name+1
+ end
+ o.sect[1].type = f32(2) -- .symtab
+ o.sect[1].link = f32(3)
+ o.sect[1].info = f32(1)
+ o.sect[1].align = fofs(8)
+ o.sect[1].ofs = fofs(ffi.offsetof(o, "sym"))
+ o.sect[1].entsize = fofs(ffi.sizeof(o.sym[0]))
+ o.sect[1].size = fofs(ffi.sizeof(o.sym))
+ o.sym[1].name = f32(1)
+ o.sym[1].sectidx = f16(4)
+ o.sym[1].size = fofs(#s)
+ o.sym[1].info = 17
+ o.sect[2].type = f32(3) -- .shstrtab
+ o.sect[2].ofs = fofs(sofs)
+ o.sect[2].size = fofs(ofs)
+ o.sect[3].type = f32(3) -- .strtab
+ o.sect[3].ofs = fofs(sofs + ofs)
+ o.sect[3].size = fofs(#symname+1)
+ ffi.copy(o.space+ofs+1, symname)
+ ofs = ofs + #symname + 2
+ o.sect[4].type = f32(1) -- .rodata
+ o.sect[4].flags = fofs(2)
+ o.sect[4].ofs = fofs(sofs + ofs)
+ o.sect[4].size = fofs(#s)
+ o.sect[5].type = f32(1) -- .note.GNU-stack
+ o.sect[5].ofs = fofs(sofs + ofs + #s)
+
+ -- Write ELF object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_peobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct {
+ uint16_t arch, nsects;
+ uint32_t time, symtabofs, nsyms;
+ uint16_t opthdrsz, flags;
+} PEheader;
+typedef struct {
+ char name[8];
+ uint32_t vsize, vaddr, size, ofs, relocofs, lineofs;
+ uint16_t nreloc, nline;
+ uint32_t flags;
+} PEsection;
+typedef struct __attribute((packed)) {
+ union {
+ char name[8];
+ uint32_t nameref[2];
+ };
+ uint32_t value;
+ int16_t sect;
+ uint16_t type;
+ uint8_t scl, naux;
+} PEsym;
+typedef struct __attribute((packed)) {
+ uint32_t size;
+ uint16_t nreloc, nline;
+ uint32_t cksum;
+ uint16_t assoc;
+ uint8_t comdatsel, unused[3];
+} PEsymaux;
+typedef struct {
+ PEheader hdr;
+ PEsection sect[2];
+ // Must be an even number of symbol structs.
+ PEsym sym0;
+ PEsymaux sym0aux;
+ PEsym sym1;
+ PEsymaux sym1aux;
+ PEsym sym2;
+ PEsym sym3;
+ uint32_t strtabsize;
+ uint8_t space[4096];
+} PEobj;
+]]
+ local symname = LJBC_PREFIX..ctx.modname
+ local is64 = false
+ if ctx.arch == "x86" then
+ symname = "_"..symname
+ elseif ctx.arch == "x64" then
+ is64 = true
+ end
+ local symexport = " /EXPORT:"..symname..",DATA "
+
+ -- The file format is always little-endian. Swap if the host is big-endian.
+ local function f32(x) return x end
+ local f16 = f32
+ if ffi.abi("be") then
+ f32 = bit.bswap
+ function f16(x) return bit.rshift(bit.bswap(x), 16) end
+ end
+
+ -- Create PE object and fill in header.
+ local o = ffi.new("PEobj")
+ local hdr = o.hdr
+ hdr.arch = f16(({ x86=0x14c, x64=0x8664, arm=0x1c0, ppc=0x1f2, mips=0x366, mipsel=0x366 })[ctx.arch])
+ hdr.nsects = f16(2)
+ hdr.symtabofs = f32(ffi.offsetof(o, "sym0"))
+ hdr.nsyms = f32(6)
+
+ -- Fill in sections and symbols.
+ o.sect[0].name = ".drectve"
+ o.sect[0].size = f32(#symexport)
+ o.sect[0].flags = f32(0x00100a00)
+ o.sym0.sect = f16(1)
+ o.sym0.scl = 3
+ o.sym0.name = ".drectve"
+ o.sym0.naux = 1
+ o.sym0aux.size = f32(#symexport)
+ o.sect[1].name = ".rdata"
+ o.sect[1].size = f32(#s)
+ o.sect[1].flags = f32(0x40300040)
+ o.sym1.sect = f16(2)
+ o.sym1.scl = 3
+ o.sym1.name = ".rdata"
+ o.sym1.naux = 1
+ o.sym1aux.size = f32(#s)
+ o.sym2.sect = f16(2)
+ o.sym2.scl = 2
+ o.sym2.nameref[1] = f32(4)
+ o.sym3.sect = f16(-1)
+ o.sym3.scl = 2
+ o.sym3.value = f32(1)
+ o.sym3.name = "@feat.00" -- Mark as SafeSEH compliant.
+ ffi.copy(o.space, symname)
+ local ofs = #symname + 1
+ o.strtabsize = f32(ofs + 4)
+ o.sect[0].ofs = f32(ffi.offsetof(o, "space") + ofs)
+ ffi.copy(o.space + ofs, symexport)
+ ofs = ofs + #symexport
+ o.sect[1].ofs = f32(ffi.offsetof(o, "space") + ofs)
+
+ -- Write PE object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_machobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct
+{
+ uint32_t magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags;
+} mach_header;
+typedef struct
+{
+ mach_header; uint32_t reserved;
+} mach_header_64;
+typedef struct {
+ uint32_t cmd, cmdsize;
+ char segname[16];
+ uint32_t vmaddr, vmsize, fileoff, filesize;
+ uint32_t maxprot, initprot, nsects, flags;
+} mach_segment_command;
+typedef struct {
+ uint32_t cmd, cmdsize;
+ char segname[16];
+ uint64_t vmaddr, vmsize, fileoff, filesize;
+ uint32_t maxprot, initprot, nsects, flags;
+} mach_segment_command_64;
+typedef struct {
+ char sectname[16], segname[16];
+ uint32_t addr, size;
+ uint32_t offset, align, reloff, nreloc, flags;
+ uint32_t reserved1, reserved2;
+} mach_section;
+typedef struct {
+ char sectname[16], segname[16];
+ uint64_t addr, size;
+ uint32_t offset, align, reloff, nreloc, flags;
+ uint32_t reserved1, reserved2, reserved3;
+} mach_section_64;
+typedef struct {
+ uint32_t cmd, cmdsize, symoff, nsyms, stroff, strsize;
+} mach_symtab_command;
+typedef struct {
+ int32_t strx;
+ uint8_t type, sect;
+ int16_t desc;
+ uint32_t value;
+} mach_nlist;
+typedef struct {
+ uint32_t strx;
+ uint8_t type, sect;
+ uint16_t desc;
+ uint64_t value;
+} mach_nlist_64;
+typedef struct
+{
+ uint32_t magic, nfat_arch;
+} mach_fat_header;
+typedef struct
+{
+ uint32_t cputype, cpusubtype, offset, size, align;
+} mach_fat_arch;
+typedef struct {
+ struct {
+ mach_header hdr;
+ mach_segment_command seg;
+ mach_section sec;
+ mach_symtab_command sym;
+ } arch[1];
+ mach_nlist sym_entry;
+ uint8_t space[4096];
+} mach_obj;
+typedef struct {
+ struct {
+ mach_header_64 hdr;
+ mach_segment_command_64 seg;
+ mach_section_64 sec;
+ mach_symtab_command sym;
+ } arch[1];
+ mach_nlist_64 sym_entry;
+ uint8_t space[4096];
+} mach_obj_64;
+typedef struct {
+ mach_fat_header fat;
+ mach_fat_arch fat_arch[4];
+ struct {
+ mach_header hdr;
+ mach_segment_command seg;
+ mach_section sec;
+ mach_symtab_command sym;
+ } arch[4];
+ mach_nlist sym_entry;
+ uint8_t space[4096];
+} mach_fat_obj;
+]]
+ local symname = '_'..LJBC_PREFIX..ctx.modname
+ local isfat, is64, align, mobj = false, false, 4, "mach_obj"
+ if ctx.arch == "x64" then
+ is64, align, mobj = true, 8, "mach_obj_64"
+ elseif ctx.arch == "arm" then
+ isfat, mobj = true, "mach_fat_obj"
+ else
+ check(ctx.arch == "x86", "unsupported architecture for OSX")
+ end
+ local function aligned(v, a) return bit.band(v+a-1, -a) end
+ local be32 = bit.bswap -- Mach-O FAT is BE, supported archs are LE.
+
+ -- Create Mach-O object and fill in header.
+ local o = ffi.new(mobj)
+ local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align)
+ local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12,12,12} })[ctx.arch]
+ local cpusubtype = ({ x86={3}, x64={3}, arm={3,6,9,11} })[ctx.arch]
+ if isfat then
+ o.fat.magic = be32(0xcafebabe)
+ o.fat.nfat_arch = be32(#cpusubtype)
+ end
+
+ -- Fill in sections and symbols.
+ for i=0,#cpusubtype-1 do
+ local ofs = 0
+ if isfat then
+ local a = o.fat_arch[i]
+ a.cputype = be32(cputype[i+1])
+ a.cpusubtype = be32(cpusubtype[i+1])
+ -- Subsequent slices overlap each other to share data.
+ ofs = ffi.offsetof(o, "arch") + i*ffi.sizeof(o.arch[0])
+ a.offset = be32(ofs)
+ a.size = be32(mach_size-ofs+#s)
+ end
+ local a = o.arch[i]
+ a.hdr.magic = is64 and 0xfeedfacf or 0xfeedface
+ a.hdr.cputype = cputype[i+1]
+ a.hdr.cpusubtype = cpusubtype[i+1]
+ a.hdr.filetype = 1
+ a.hdr.ncmds = 2
+ a.hdr.sizeofcmds = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)+ffi.sizeof(a.sym)
+ a.seg.cmd = is64 and 0x19 or 0x1
+ a.seg.cmdsize = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)
+ a.seg.vmsize = #s
+ a.seg.fileoff = mach_size-ofs
+ a.seg.filesize = #s
+ a.seg.maxprot = 1
+ a.seg.initprot = 1
+ a.seg.nsects = 1
+ ffi.copy(a.sec.sectname, "__data")
+ ffi.copy(a.sec.segname, "__DATA")
+ a.sec.size = #s
+ a.sec.offset = mach_size-ofs
+ a.sym.cmd = 2
+ a.sym.cmdsize = ffi.sizeof(a.sym)
+ a.sym.symoff = ffi.offsetof(o, "sym_entry")-ofs
+ a.sym.nsyms = 1
+ a.sym.stroff = ffi.offsetof(o, "sym_entry")+ffi.sizeof(o.sym_entry)-ofs
+ a.sym.strsize = aligned(#symname+2, align)
+ end
+ o.sym_entry.type = 0xf
+ o.sym_entry.sect = 1
+ o.sym_entry.strx = 1
+ ffi.copy(o.space+1, symname)
+
+ -- Write Macho-O object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, mach_size))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_obj(ctx, output, s)
+ local ok, ffi = pcall(require, "ffi")
+ check(ok, "FFI library required to write this file type")
+ if ctx.os == "windows" then
+ return bcsave_peobj(ctx, output, s, ffi)
+ elseif ctx.os == "osx" then
+ return bcsave_machobj(ctx, output, s, ffi)
+ else
+ return bcsave_elfobj(ctx, output, s, ffi)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local function bclist(input, output)
+ local f = readfile(input)
+ require("jit.bc").dump(f, savefile(output, "w"), true)
+end
+
+local function bcsave(ctx, input, output)
+ local f = readfile(input)
+ local s = string.dump(f, ctx.strip)
+ local t = ctx.type
+ if not t then
+ t = detecttype(output)
+ ctx.type = t
+ end
+ if t == "raw" then
+ bcsave_raw(output, s)
+ else
+ if not ctx.modname then ctx.modname = detectmodname(input) end
+ if t == "obj" then
+ bcsave_obj(ctx, output, s)
+ else
+ bcsave_c(ctx, output, s)
+ end
+ end
+end
+
+local function docmd(...)
+ local arg = {...}
+ local n = 1
+ local list = false
+ local ctx = {
+ strip = true, arch = jit.arch, os = string.lower(jit.os),
+ type = false, modname = false,
+ }
+ while n <= #arg do
+ local a = arg[n]
+ if type(a) == "string" and string.sub(a, 1, 1) == "-" and a ~= "-" then
+ table.remove(arg, n)
+ if a == "--" then break end
+ for m=2,#a do
+ local opt = string.sub(a, m, m)
+ if opt == "l" then
+ list = true
+ elseif opt == "s" then
+ ctx.strip = true
+ elseif opt == "g" then
+ ctx.strip = false
+ else
+ if arg[n] == nil or m ~= #a then usage() end
+ if opt == "e" then
+ if n ~= 1 then usage() end
+ arg[1] = check(loadstring(arg[1]))
+ elseif opt == "n" then
+ ctx.modname = checkmodname(table.remove(arg, n))
+ elseif opt == "t" then
+ ctx.type = checkarg(table.remove(arg, n), map_type, "file type")
+ elseif opt == "a" then
+ ctx.arch = checkarg(table.remove(arg, n), map_arch, "architecture")
+ elseif opt == "o" then
+ ctx.os = checkarg(table.remove(arg, n), map_os, "OS name")
+ else
+ usage()
+ end
+ end
+ end
+ else
+ n = n + 1
+ end
+ end
+ if list then
+ if #arg == 0 or #arg > 2 then usage() end
+ bclist(arg[1], arg[2] or "-")
+ else
+ if #arg ~= 2 then usage() end
+ bcsave(ctx, arg[1], arg[2])
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Public module functions.
+module(...)
+
+start = docmd -- Process -b command line option.
+
diff --git a/3rdparty/lua/src/jit/dis_arm.lua b/3rdparty/lua/src/jit/dis_arm.lua
index 30c3d2c..dc7ca71 100644
--- a/3rdparty/lua/src/jit/dis_arm.lua
+++ b/3rdparty/lua/src/jit/dis_arm.lua
@@ -1,689 +1,689 @@
-----------------------------------------------------------------------------
--- LuaJIT ARM disassembler module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
--- This is a helper module used by the LuaJIT machine code dumper module.
---
--- It disassembles most user-mode ARMv7 instructions
--- NYI: Advanced SIMD and VFP instructions.
-------------------------------------------------------------------------------
-
-local type = type
-local sub, byte, format = string.sub, string.byte, string.format
-local match, gmatch, gsub = string.match, string.gmatch, string.gsub
-local concat = table.concat
-local bit = require("bit")
-local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex
-local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
-
-------------------------------------------------------------------------------
--- Opcode maps
-------------------------------------------------------------------------------
-
-local map_loadc = {
- shift = 8, mask = 15,
- [10] = {
- shift = 20, mask = 1,
- [0] = {
- shift = 23, mask = 3,
- [0] = "vmovFmDN", "vstmFNdr",
- _ = {
- shift = 21, mask = 1,
- [0] = "vstrFdl",
- { shift = 16, mask = 15, [13] = "vpushFdr", _ = "vstmdbFNdr", }
- },
- },
- {
- shift = 23, mask = 3,
- [0] = "vmovFDNm",
- { shift = 16, mask = 15, [13] = "vpopFdr", _ = "vldmFNdr", },
- _ = {
- shift = 21, mask = 1,
- [0] = "vldrFdl", "vldmdbFNdr",
- },
- },
- },
- [11] = {
- shift = 20, mask = 1,
- [0] = {
- shift = 23, mask = 3,
- [0] = "vmovGmDN", "vstmGNdr",
- _ = {
- shift = 21, mask = 1,
- [0] = "vstrGdl",
- { shift = 16, mask = 15, [13] = "vpushGdr", _ = "vstmdbGNdr", }
- },
- },
- {
- shift = 23, mask = 3,
- [0] = "vmovGDNm",
- { shift = 16, mask = 15, [13] = "vpopGdr", _ = "vldmGNdr", },
- _ = {
- shift = 21, mask = 1,
- [0] = "vldrGdl", "vldmdbGNdr",
- },
- },
- },
- _ = {
- shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc.
- },
-}
-
-local map_vfps = {
- shift = 6, mask = 0x2c001,
- [0] = "vmlaF.dnm", "vmlsF.dnm",
- [0x04000] = "vnmlsF.dnm", [0x04001] = "vnmlaF.dnm",
- [0x08000] = "vmulF.dnm", [0x08001] = "vnmulF.dnm",
- [0x0c000] = "vaddF.dnm", [0x0c001] = "vsubF.dnm",
- [0x20000] = "vdivF.dnm",
- [0x24000] = "vfnmsF.dnm", [0x24001] = "vfnmaF.dnm",
- [0x28000] = "vfmaF.dnm", [0x28001] = "vfmsF.dnm",
- [0x2c000] = "vmovF.dY",
- [0x2c001] = {
- shift = 7, mask = 0x1e01,
- [0] = "vmovF.dm", "vabsF.dm",
- [0x0200] = "vnegF.dm", [0x0201] = "vsqrtF.dm",
- [0x0800] = "vcmpF.dm", [0x0801] = "vcmpeF.dm",
- [0x0a00] = "vcmpzF.d", [0x0a01] = "vcmpzeF.d",
- [0x0e01] = "vcvtG.dF.m",
- [0x1000] = "vcvt.f32.u32Fdm", [0x1001] = "vcvt.f32.s32Fdm",
- [0x1800] = "vcvtr.u32F.dm", [0x1801] = "vcvt.u32F.dm",
- [0x1a00] = "vcvtr.s32F.dm", [0x1a01] = "vcvt.s32F.dm",
- },
-}
-
-local map_vfpd = {
- shift = 6, mask = 0x2c001,
- [0] = "vmlaG.dnm", "vmlsG.dnm",
- [0x04000] = "vnmlsG.dnm", [0x04001] = "vnmlaG.dnm",
- [0x08000] = "vmulG.dnm", [0x08001] = "vnmulG.dnm",
- [0x0c000] = "vaddG.dnm", [0x0c001] = "vsubG.dnm",
- [0x20000] = "vdivG.dnm",
- [0x24000] = "vfnmsG.dnm", [0x24001] = "vfnmaG.dnm",
- [0x28000] = "vfmaG.dnm", [0x28001] = "vfmsG.dnm",
- [0x2c000] = "vmovG.dY",
- [0x2c001] = {
- shift = 7, mask = 0x1e01,
- [0] = "vmovG.dm", "vabsG.dm",
- [0x0200] = "vnegG.dm", [0x0201] = "vsqrtG.dm",
- [0x0800] = "vcmpG.dm", [0x0801] = "vcmpeG.dm",
- [0x0a00] = "vcmpzG.d", [0x0a01] = "vcmpzeG.d",
- [0x0e01] = "vcvtF.dG.m",
- [0x1000] = "vcvt.f64.u32GdFm", [0x1001] = "vcvt.f64.s32GdFm",
- [0x1800] = "vcvtr.u32FdG.m", [0x1801] = "vcvt.u32FdG.m",
- [0x1a00] = "vcvtr.s32FdG.m", [0x1a01] = "vcvt.s32FdG.m",
- },
-}
-
-local map_datac = {
- shift = 24, mask = 1,
- [0] = {
- shift = 4, mask = 1,
- [0] = {
- shift = 8, mask = 15,
- [10] = map_vfps,
- [11] = map_vfpd,
- -- NYI cdp, mcr, mrc.
- },
- {
- shift = 8, mask = 15,
- [10] = {
- shift = 20, mask = 15,
- [0] = "vmovFnD", "vmovFDn",
- [14] = "vmsrD",
- [15] = { shift = 12, mask = 15, [15] = "vmrs", _ = "vmrsD", },
- },
- },
- },
- "svcT",
-}
-
-local map_loadcu = {
- shift = 0, mask = 0, -- NYI unconditional CP load/store.
-}
-
-local map_datacu = {
- shift = 0, mask = 0, -- NYI unconditional CP data.
-}
-
-local map_simddata = {
- shift = 0, mask = 0, -- NYI SIMD data.
-}
-
-local map_simdload = {
- shift = 0, mask = 0, -- NYI SIMD load/store, preload.
-}
-
-local map_preload = {
- shift = 0, mask = 0, -- NYI preload.
-}
-
-local map_media = {
- shift = 20, mask = 31,
- [0] = false,
- { --01
- shift = 5, mask = 7,
- [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM",
- "sadd8DNM", false, false, "ssub8DNM",
- },
- { --02
- shift = 5, mask = 7,
- [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM",
- "qadd8DNM", false, false, "qsub8DNM",
- },
- { --03
- shift = 5, mask = 7,
- [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM",
- "shadd8DNM", false, false, "shsub8DNM",
- },
- false,
- { --05
- shift = 5, mask = 7,
- [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM",
- "uadd8DNM", false, false, "usub8DNM",
- },
- { --06
- shift = 5, mask = 7,
- [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM",
- "uqadd8DNM", false, false, "uqsub8DNM",
- },
- { --07
- shift = 5, mask = 7,
- [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM",
- "uhadd8DNM", false, false, "uhsub8DNM",
- },
- { --08
- shift = 5, mask = 7,
- [0] = "pkhbtDNMU", false, "pkhtbDNMU",
- { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", },
- "pkhbtDNMU", "selDNM", "pkhtbDNMU",
- },
- false,
- { --0a
- shift = 5, mask = 7,
- [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu",
- { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", },
- "ssatDxMu", false, "ssatDxMu",
- },
- { --0b
- shift = 5, mask = 7,
- [0] = "ssatDxMu", "revDM", "ssatDxMu",
- { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", },
- "ssatDxMu", "rev16DM", "ssatDxMu",
- },
- { --0c
- shift = 5, mask = 7,
- [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", },
- },
- false,
- { --0e
- shift = 5, mask = 7,
- [0] = "usatDwMu", "usat16DwM", "usatDwMu",
- { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", },
- "usatDwMu", false, "usatDwMu",
- },
- { --0f
- shift = 5, mask = 7,
- [0] = "usatDwMu", "rbitDM", "usatDwMu",
- { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", },
- "usatDwMu", "revshDM", "usatDwMu",
- },
- { --10
- shift = 12, mask = 15,
- [15] = {
- shift = 5, mask = 7,
- "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS",
- },
- _ = {
- shift = 5, mask = 7,
- [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD",
- },
- },
- false, false, false,
- { --14
- shift = 5, mask = 7,
- [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS",
- },
- { --15
- shift = 5, mask = 7,
- [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", },
- { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", },
- false, false, false, false,
- "smmlsNMSD", "smmlsrNMSD",
- },
- false, false,
- { --18
- shift = 5, mask = 7,
- [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", },
- },
- false,
- { --1a
- shift = 5, mask = 3, [2] = "sbfxDMvw",
- },
- { --1b
- shift = 5, mask = 3, [2] = "sbfxDMvw",
- },
- { --1c
- shift = 5, mask = 3,
- [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
- },
- { --1d
- shift = 5, mask = 3,
- [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
- },
- { --1e
- shift = 5, mask = 3, [2] = "ubfxDMvw",
- },
- { --1f
- shift = 5, mask = 3, [2] = "ubfxDMvw",
- },
-}
-
-local map_load = {
- shift = 21, mask = 9,
- {
- shift = 20, mask = 5,
- [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL",
- },
- _ = {
- shift = 20, mask = 5,
- [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL",
- }
-}
-
-local map_load1 = {
- shift = 4, mask = 1,
- [0] = map_load, map_media,
-}
-
-local map_loadm = {
- shift = 20, mask = 1,
- [0] = {
- shift = 23, mask = 3,
- [0] = "stmdaNR", "stmNR",
- { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR",
- },
- {
- shift = 23, mask = 3,
- [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", },
- "ldmdbNR", "ldmibNR",
- },
-}
-
-local map_data = {
- shift = 21, mask = 15,
- [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs",
- "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs",
- "tstNP", "teqNP", "cmpNP", "cmnNP",
- "orrDNPs", "movDPs", "bicDNPs", "mvnDPs",
-}
-
-local map_mul = {
- shift = 21, mask = 7,
- [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS",
- "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs",
-}
-
-local map_sync = {
- shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd.
- [0] = "swpDMN", false, false, false,
- "swpbDMN", false, false, false,
- "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN",
- "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN",
-}
-
-local map_mulh = {
- shift = 21, mask = 3,
- [0] = { shift = 5, mask = 3,
- [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", },
- { shift = 5, mask = 3,
- [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", },
- { shift = 5, mask = 3,
- [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", },
- { shift = 5, mask = 3,
- [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", },
-}
-
-local map_misc = {
- shift = 4, mask = 7,
- -- NYI: decode PSR bits of msr.
- [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", },
- { shift = 21, mask = 3, "bxM", false, "clzDM", },
- { shift = 21, mask = 3, "bxjM", },
- { shift = 21, mask = 3, "blxM", },
- false,
- { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", },
- false,
- { shift = 21, mask = 3, "bkptK", },
-}
-
-local map_datar = {
- shift = 4, mask = 9,
- [9] = {
- shift = 5, mask = 3,
- [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, },
- { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", },
- { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", },
- { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", },
- },
- _ = {
- shift = 20, mask = 25,
- [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, },
- _ = {
- shift = 0, mask = 0xffffffff,
- [bor(0xe1a00000)] = "nop",
- _ = map_data,
- }
- },
-}
-
-local map_datai = {
- shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12.
- [16] = "movwDW", [20] = "movtDW",
- [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", },
- [22] = "msrNW",
- _ = map_data,
-}
-
-local map_branch = {
- shift = 24, mask = 1,
- [0] = "bB", "blB"
-}
-
-local map_condins = {
- [0] = map_datar, map_datai, map_load, map_load1,
- map_loadm, map_branch, map_loadc, map_datac
-}
-
--- NYI: setend.
-local map_uncondins = {
- [0] = false, map_simddata, map_simdload, map_preload,
- false, "blxB", map_loadcu, map_datacu,
-}
-
-------------------------------------------------------------------------------
-
-local map_gpr = {
- [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc",
-}
-
-local map_cond = {
- [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt", "gt", "le", "al",
-}
-
-local map_shift = { [0] = "lsl", "lsr", "asr", "ror", }
-
-------------------------------------------------------------------------------
-
--- Output a nicely formatted line with an opcode and operands.
-local function putop(ctx, text, operands)
- local pos = ctx.pos
- local extra = ""
- if ctx.rel then
- local sym = ctx.symtab[ctx.rel]
- if sym then
- extra = "\t->"..sym
- elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then
- extra = "\t; 0x"..tohex(ctx.rel)
- end
- end
- if ctx.hexdump > 0 then
- ctx.out(format("%08x %s %-5s %s%s\n",
- ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
- else
- ctx.out(format("%08x %-5s %s%s\n",
- ctx.addr+pos, text, concat(operands, ", "), extra))
- end
- ctx.pos = pos + 4
-end
-
--- Fallback for unknown opcodes.
-local function unknown(ctx)
- return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
-end
-
--- Format operand 2 of load/store opcodes.
-local function fmtload(ctx, op, pos)
- local base = map_gpr[band(rshift(op, 16), 15)]
- local x, ofs
- local ext = (band(op, 0x04000000) == 0)
- if not ext and band(op, 0x02000000) == 0 then
- ofs = band(op, 4095)
- if band(op, 0x00800000) == 0 then ofs = -ofs end
- if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
- ofs = "#"..ofs
- elseif ext and band(op, 0x00400000) ~= 0 then
- ofs = band(op, 15) + band(rshift(op, 4), 0xf0)
- if band(op, 0x00800000) == 0 then ofs = -ofs end
- if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
- ofs = "#"..ofs
- else
- ofs = map_gpr[band(op, 15)]
- if ext or band(op, 0xfe0) == 0 then
- elseif band(op, 0xfe0) == 0x60 then
- ofs = format("%s, rrx", ofs)
- else
- local sh = band(rshift(op, 7), 31)
- if sh == 0 then sh = 32 end
- ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh)
- end
- if band(op, 0x00800000) == 0 then ofs = "-"..ofs end
- end
- if ofs == "#0" then
- x = format("[%s]", base)
- elseif band(op, 0x01000000) == 0 then
- x = format("[%s], %s", base, ofs)
- else
- x = format("[%s, %s]", base, ofs)
- end
- if band(op, 0x01200000) == 0x01200000 then x = x.."!" end
- return x
-end
-
--- Format operand 2 of vector load/store opcodes.
-local function fmtvload(ctx, op, pos)
- local base = map_gpr[band(rshift(op, 16), 15)]
- local ofs = band(op, 255)*4
- if band(op, 0x00800000) == 0 then ofs = -ofs end
- if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
- if ofs == 0 then
- return format("[%s]", base)
- else
- return format("[%s, #%d]", base, ofs)
- end
-end
-
-local function fmtvr(op, vr, sh0, sh1)
- if vr == "s" then
- return format("s%d", 2*band(rshift(op, sh0), 15)+band(rshift(op, sh1), 1))
- else
- return format("d%d", band(rshift(op, sh0), 15)+band(rshift(op, sh1-4), 16))
- end
-end
-
--- Disassemble a single instruction.
-local function disass_ins(ctx)
- local pos = ctx.pos
- local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
- local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
- local operands = {}
- local suffix = ""
- local last, name, pat
- local vr
- ctx.op = op
- ctx.rel = nil
-
- local cond = rshift(op, 28)
- local opat
- if cond == 15 then
- opat = map_uncondins[band(rshift(op, 25), 7)]
- else
- if cond ~= 14 then suffix = map_cond[cond] end
- opat = map_condins[band(rshift(op, 25), 7)]
- end
- while type(opat) ~= "string" do
- if not opat then return unknown(ctx) end
- opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
- end
- name, pat = match(opat, "^([a-z0-9]*)(.*)")
- if sub(pat, 1, 1) == "." then
- local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)")
- suffix = suffix..s2
- pat = p2
- end
-
- for p in gmatch(pat, ".") do
- local x = nil
- if p == "D" then
- x = map_gpr[band(rshift(op, 12), 15)]
- elseif p == "N" then
- x = map_gpr[band(rshift(op, 16), 15)]
- elseif p == "S" then
- x = map_gpr[band(rshift(op, 8), 15)]
- elseif p == "M" then
- x = map_gpr[band(op, 15)]
- elseif p == "d" then
- x = fmtvr(op, vr, 12, 22)
- elseif p == "n" then
- x = fmtvr(op, vr, 16, 7)
- elseif p == "m" then
- x = fmtvr(op, vr, 0, 5)
- elseif p == "P" then
- if band(op, 0x02000000) ~= 0 then
- x = ror(band(op, 255), 2*band(rshift(op, 8), 15))
- else
- x = map_gpr[band(op, 15)]
- if band(op, 0xff0) ~= 0 then
- operands[#operands+1] = x
- local s = map_shift[band(rshift(op, 5), 3)]
- local r = nil
- if band(op, 0xf90) == 0 then
- if s == "ror" then s = "rrx" else r = "#32" end
- elseif band(op, 0x10) == 0 then
- r = "#"..band(rshift(op, 7), 31)
- else
- r = map_gpr[band(rshift(op, 8), 15)]
- end
- if name == "mov" then name = s; x = r
- elseif r then x = format("%s %s", s, r)
- else x = s end
- end
- end
- elseif p == "L" then
- x = fmtload(ctx, op, pos)
- elseif p == "l" then
- x = fmtvload(ctx, op, pos)
- elseif p == "B" then
- local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6)
- if cond == 15 then addr = addr + band(rshift(op, 23), 2) end
- ctx.rel = addr
- x = "0x"..tohex(addr)
- elseif p == "F" then
- vr = "s"
- elseif p == "G" then
- vr = "d"
- elseif p == "." then
- suffix = suffix..(vr == "s" and ".f32" or ".f64")
- elseif p == "R" then
- if band(op, 0x00200000) ~= 0 and #operands == 1 then
- operands[1] = operands[1].."!"
- end
- local t = {}
- for i=0,15 do
- if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end
- end
- x = "{"..concat(t, ", ").."}"
- elseif p == "r" then
- if band(op, 0x00200000) ~= 0 and #operands == 2 then
- operands[1] = operands[1].."!"
- end
- local s = tonumber(sub(last, 2))
- local n = band(op, 255)
- if vr == "d" then n = rshift(n, 1) end
- operands[#operands] = format("{%s-%s%d}", last, vr, s+n-1)
- elseif p == "W" then
- x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000)
- elseif p == "T" then
- x = "#0x"..tohex(band(op, 0x00ffffff), 6)
- elseif p == "U" then
- x = band(rshift(op, 7), 31)
- if x == 0 then x = nil end
- elseif p == "u" then
- x = band(rshift(op, 7), 31)
- if band(op, 0x40) == 0 then
- if x == 0 then x = nil else x = "lsl #"..x end
- else
- if x == 0 then x = "asr #32" else x = "asr #"..x end
- end
- elseif p == "v" then
- x = band(rshift(op, 7), 31)
- elseif p == "w" then
- x = band(rshift(op, 16), 31)
- elseif p == "x" then
- x = band(rshift(op, 16), 31) + 1
- elseif p == "X" then
- x = band(rshift(op, 16), 31) - last + 1
- elseif p == "Y" then
- x = band(rshift(op, 12), 0xf0) + band(op, 0x0f)
- elseif p == "K" then
- x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4)
- elseif p == "s" then
- if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end
- else
- assert(false)
- end
- if x then
- last = x
- if type(x) == "number" then x = "#"..x end
- operands[#operands+1] = x
- end
- end
-
- return putop(ctx, name..suffix, operands)
-end
-
-------------------------------------------------------------------------------
-
--- Disassemble a block of code.
-local function disass_block(ctx, ofs, len)
- if not ofs then ofs = 0 end
- local stop = len and ofs+len or #ctx.code
- ctx.pos = ofs
- ctx.rel = nil
- while ctx.pos < stop do disass_ins(ctx) end
-end
-
--- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
-local function create_(code, addr, out)
- local ctx = {}
- ctx.code = code
- ctx.addr = addr or 0
- ctx.out = out or io.write
- ctx.symtab = {}
- ctx.disass = disass_block
- ctx.hexdump = 8
- return ctx
-end
-
--- Simple API: disassemble code (a string) at address and output via out.
-local function disass_(code, addr, out)
- create_(code, addr, out):disass()
-end
-
--- Return register name for RID.
-local function regname_(r)
- if r < 16 then return map_gpr[r] end
- return "d"..(r-16)
-end
-
--- Public module functions.
-module(...)
-
-create = create_
-disass = disass_
-regname = regname_
-
+----------------------------------------------------------------------------
+-- LuaJIT ARM disassembler module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles most user-mode ARMv7 instructions
+-- NYI: Advanced SIMD and VFP instructions.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Opcode maps
+------------------------------------------------------------------------------
+
+local map_loadc = {
+ shift = 8, mask = 15,
+ [10] = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "vmovFmDN", "vstmFNdr",
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vstrFdl",
+ { shift = 16, mask = 15, [13] = "vpushFdr", _ = "vstmdbFNdr", }
+ },
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "vmovFDNm",
+ { shift = 16, mask = 15, [13] = "vpopFdr", _ = "vldmFNdr", },
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vldrFdl", "vldmdbFNdr",
+ },
+ },
+ },
+ [11] = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "vmovGmDN", "vstmGNdr",
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vstrGdl",
+ { shift = 16, mask = 15, [13] = "vpushGdr", _ = "vstmdbGNdr", }
+ },
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "vmovGDNm",
+ { shift = 16, mask = 15, [13] = "vpopGdr", _ = "vldmGNdr", },
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vldrGdl", "vldmdbGNdr",
+ },
+ },
+ },
+ _ = {
+ shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc.
+ },
+}
+
+local map_vfps = {
+ shift = 6, mask = 0x2c001,
+ [0] = "vmlaF.dnm", "vmlsF.dnm",
+ [0x04000] = "vnmlsF.dnm", [0x04001] = "vnmlaF.dnm",
+ [0x08000] = "vmulF.dnm", [0x08001] = "vnmulF.dnm",
+ [0x0c000] = "vaddF.dnm", [0x0c001] = "vsubF.dnm",
+ [0x20000] = "vdivF.dnm",
+ [0x24000] = "vfnmsF.dnm", [0x24001] = "vfnmaF.dnm",
+ [0x28000] = "vfmaF.dnm", [0x28001] = "vfmsF.dnm",
+ [0x2c000] = "vmovF.dY",
+ [0x2c001] = {
+ shift = 7, mask = 0x1e01,
+ [0] = "vmovF.dm", "vabsF.dm",
+ [0x0200] = "vnegF.dm", [0x0201] = "vsqrtF.dm",
+ [0x0800] = "vcmpF.dm", [0x0801] = "vcmpeF.dm",
+ [0x0a00] = "vcmpzF.d", [0x0a01] = "vcmpzeF.d",
+ [0x0e01] = "vcvtG.dF.m",
+ [0x1000] = "vcvt.f32.u32Fdm", [0x1001] = "vcvt.f32.s32Fdm",
+ [0x1800] = "vcvtr.u32F.dm", [0x1801] = "vcvt.u32F.dm",
+ [0x1a00] = "vcvtr.s32F.dm", [0x1a01] = "vcvt.s32F.dm",
+ },
+}
+
+local map_vfpd = {
+ shift = 6, mask = 0x2c001,
+ [0] = "vmlaG.dnm", "vmlsG.dnm",
+ [0x04000] = "vnmlsG.dnm", [0x04001] = "vnmlaG.dnm",
+ [0x08000] = "vmulG.dnm", [0x08001] = "vnmulG.dnm",
+ [0x0c000] = "vaddG.dnm", [0x0c001] = "vsubG.dnm",
+ [0x20000] = "vdivG.dnm",
+ [0x24000] = "vfnmsG.dnm", [0x24001] = "vfnmaG.dnm",
+ [0x28000] = "vfmaG.dnm", [0x28001] = "vfmsG.dnm",
+ [0x2c000] = "vmovG.dY",
+ [0x2c001] = {
+ shift = 7, mask = 0x1e01,
+ [0] = "vmovG.dm", "vabsG.dm",
+ [0x0200] = "vnegG.dm", [0x0201] = "vsqrtG.dm",
+ [0x0800] = "vcmpG.dm", [0x0801] = "vcmpeG.dm",
+ [0x0a00] = "vcmpzG.d", [0x0a01] = "vcmpzeG.d",
+ [0x0e01] = "vcvtF.dG.m",
+ [0x1000] = "vcvt.f64.u32GdFm", [0x1001] = "vcvt.f64.s32GdFm",
+ [0x1800] = "vcvtr.u32FdG.m", [0x1801] = "vcvt.u32FdG.m",
+ [0x1a00] = "vcvtr.s32FdG.m", [0x1a01] = "vcvt.s32FdG.m",
+ },
+}
+
+local map_datac = {
+ shift = 24, mask = 1,
+ [0] = {
+ shift = 4, mask = 1,
+ [0] = {
+ shift = 8, mask = 15,
+ [10] = map_vfps,
+ [11] = map_vfpd,
+ -- NYI cdp, mcr, mrc.
+ },
+ {
+ shift = 8, mask = 15,
+ [10] = {
+ shift = 20, mask = 15,
+ [0] = "vmovFnD", "vmovFDn",
+ [14] = "vmsrD",
+ [15] = { shift = 12, mask = 15, [15] = "vmrs", _ = "vmrsD", },
+ },
+ },
+ },
+ "svcT",
+}
+
+local map_loadcu = {
+ shift = 0, mask = 0, -- NYI unconditional CP load/store.
+}
+
+local map_datacu = {
+ shift = 0, mask = 0, -- NYI unconditional CP data.
+}
+
+local map_simddata = {
+ shift = 0, mask = 0, -- NYI SIMD data.
+}
+
+local map_simdload = {
+ shift = 0, mask = 0, -- NYI SIMD load/store, preload.
+}
+
+local map_preload = {
+ shift = 0, mask = 0, -- NYI preload.
+}
+
+local map_media = {
+ shift = 20, mask = 31,
+ [0] = false,
+ { --01
+ shift = 5, mask = 7,
+ [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM",
+ "sadd8DNM", false, false, "ssub8DNM",
+ },
+ { --02
+ shift = 5, mask = 7,
+ [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM",
+ "qadd8DNM", false, false, "qsub8DNM",
+ },
+ { --03
+ shift = 5, mask = 7,
+ [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM",
+ "shadd8DNM", false, false, "shsub8DNM",
+ },
+ false,
+ { --05
+ shift = 5, mask = 7,
+ [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM",
+ "uadd8DNM", false, false, "usub8DNM",
+ },
+ { --06
+ shift = 5, mask = 7,
+ [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM",
+ "uqadd8DNM", false, false, "uqsub8DNM",
+ },
+ { --07
+ shift = 5, mask = 7,
+ [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM",
+ "uhadd8DNM", false, false, "uhsub8DNM",
+ },
+ { --08
+ shift = 5, mask = 7,
+ [0] = "pkhbtDNMU", false, "pkhtbDNMU",
+ { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", },
+ "pkhbtDNMU", "selDNM", "pkhtbDNMU",
+ },
+ false,
+ { --0a
+ shift = 5, mask = 7,
+ [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu",
+ { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", },
+ "ssatDxMu", false, "ssatDxMu",
+ },
+ { --0b
+ shift = 5, mask = 7,
+ [0] = "ssatDxMu", "revDM", "ssatDxMu",
+ { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", },
+ "ssatDxMu", "rev16DM", "ssatDxMu",
+ },
+ { --0c
+ shift = 5, mask = 7,
+ [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", },
+ },
+ false,
+ { --0e
+ shift = 5, mask = 7,
+ [0] = "usatDwMu", "usat16DwM", "usatDwMu",
+ { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", },
+ "usatDwMu", false, "usatDwMu",
+ },
+ { --0f
+ shift = 5, mask = 7,
+ [0] = "usatDwMu", "rbitDM", "usatDwMu",
+ { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", },
+ "usatDwMu", "revshDM", "usatDwMu",
+ },
+ { --10
+ shift = 12, mask = 15,
+ [15] = {
+ shift = 5, mask = 7,
+ "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS",
+ },
+ _ = {
+ shift = 5, mask = 7,
+ [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD",
+ },
+ },
+ false, false, false,
+ { --14
+ shift = 5, mask = 7,
+ [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS",
+ },
+ { --15
+ shift = 5, mask = 7,
+ [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", },
+ { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", },
+ false, false, false, false,
+ "smmlsNMSD", "smmlsrNMSD",
+ },
+ false, false,
+ { --18
+ shift = 5, mask = 7,
+ [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", },
+ },
+ false,
+ { --1a
+ shift = 5, mask = 3, [2] = "sbfxDMvw",
+ },
+ { --1b
+ shift = 5, mask = 3, [2] = "sbfxDMvw",
+ },
+ { --1c
+ shift = 5, mask = 3,
+ [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
+ },
+ { --1d
+ shift = 5, mask = 3,
+ [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
+ },
+ { --1e
+ shift = 5, mask = 3, [2] = "ubfxDMvw",
+ },
+ { --1f
+ shift = 5, mask = 3, [2] = "ubfxDMvw",
+ },
+}
+
+local map_load = {
+ shift = 21, mask = 9,
+ {
+ shift = 20, mask = 5,
+ [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL",
+ },
+ _ = {
+ shift = 20, mask = 5,
+ [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL",
+ }
+}
+
+local map_load1 = {
+ shift = 4, mask = 1,
+ [0] = map_load, map_media,
+}
+
+local map_loadm = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "stmdaNR", "stmNR",
+ { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR",
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", },
+ "ldmdbNR", "ldmibNR",
+ },
+}
+
+local map_data = {
+ shift = 21, mask = 15,
+ [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs",
+ "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs",
+ "tstNP", "teqNP", "cmpNP", "cmnNP",
+ "orrDNPs", "movDPs", "bicDNPs", "mvnDPs",
+}
+
+local map_mul = {
+ shift = 21, mask = 7,
+ [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS",
+ "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs",
+}
+
+local map_sync = {
+ shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd.
+ [0] = "swpDMN", false, false, false,
+ "swpbDMN", false, false, false,
+ "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN",
+ "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN",
+}
+
+local map_mulh = {
+ shift = 21, mask = 3,
+ [0] = { shift = 5, mask = 3,
+ [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", },
+ { shift = 5, mask = 3,
+ [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", },
+ { shift = 5, mask = 3,
+ [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", },
+ { shift = 5, mask = 3,
+ [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", },
+}
+
+local map_misc = {
+ shift = 4, mask = 7,
+ -- NYI: decode PSR bits of msr.
+ [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", },
+ { shift = 21, mask = 3, "bxM", false, "clzDM", },
+ { shift = 21, mask = 3, "bxjM", },
+ { shift = 21, mask = 3, "blxM", },
+ false,
+ { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", },
+ false,
+ { shift = 21, mask = 3, "bkptK", },
+}
+
+local map_datar = {
+ shift = 4, mask = 9,
+ [9] = {
+ shift = 5, mask = 3,
+ [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, },
+ { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", },
+ { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", },
+ { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", },
+ },
+ _ = {
+ shift = 20, mask = 25,
+ [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, },
+ _ = {
+ shift = 0, mask = 0xffffffff,
+ [bor(0xe1a00000)] = "nop",
+ _ = map_data,
+ }
+ },
+}
+
+local map_datai = {
+ shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12.
+ [16] = "movwDW", [20] = "movtDW",
+ [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", },
+ [22] = "msrNW",
+ _ = map_data,
+}
+
+local map_branch = {
+ shift = 24, mask = 1,
+ [0] = "bB", "blB"
+}
+
+local map_condins = {
+ [0] = map_datar, map_datai, map_load, map_load1,
+ map_loadm, map_branch, map_loadc, map_datac
+}
+
+-- NYI: setend.
+local map_uncondins = {
+ [0] = false, map_simddata, map_simdload, map_preload,
+ false, "blxB", map_loadcu, map_datacu,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc",
+}
+
+local map_cond = {
+ [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al",
+}
+
+local map_shift = { [0] = "lsl", "lsr", "asr", "ror", }
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then
+ extra = "\t->"..sym
+ elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then
+ extra = "\t; 0x"..tohex(ctx.rel)
+ end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-5s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-5s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+-- Format operand 2 of load/store opcodes.
+local function fmtload(ctx, op, pos)
+ local base = map_gpr[band(rshift(op, 16), 15)]
+ local x, ofs
+ local ext = (band(op, 0x04000000) == 0)
+ if not ext and band(op, 0x02000000) == 0 then
+ ofs = band(op, 4095)
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ ofs = "#"..ofs
+ elseif ext and band(op, 0x00400000) ~= 0 then
+ ofs = band(op, 15) + band(rshift(op, 4), 0xf0)
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ ofs = "#"..ofs
+ else
+ ofs = map_gpr[band(op, 15)]
+ if ext or band(op, 0xfe0) == 0 then
+ elseif band(op, 0xfe0) == 0x60 then
+ ofs = format("%s, rrx", ofs)
+ else
+ local sh = band(rshift(op, 7), 31)
+ if sh == 0 then sh = 32 end
+ ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh)
+ end
+ if band(op, 0x00800000) == 0 then ofs = "-"..ofs end
+ end
+ if ofs == "#0" then
+ x = format("[%s]", base)
+ elseif band(op, 0x01000000) == 0 then
+ x = format("[%s], %s", base, ofs)
+ else
+ x = format("[%s, %s]", base, ofs)
+ end
+ if band(op, 0x01200000) == 0x01200000 then x = x.."!" end
+ return x
+end
+
+-- Format operand 2 of vector load/store opcodes.
+local function fmtvload(ctx, op, pos)
+ local base = map_gpr[band(rshift(op, 16), 15)]
+ local ofs = band(op, 255)*4
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ if ofs == 0 then
+ return format("[%s]", base)
+ else
+ return format("[%s, #%d]", base, ofs)
+ end
+end
+
+local function fmtvr(op, vr, sh0, sh1)
+ if vr == "s" then
+ return format("s%d", 2*band(rshift(op, sh0), 15)+band(rshift(op, sh1), 1))
+ else
+ return format("d%d", band(rshift(op, sh0), 15)+band(rshift(op, sh1-4), 16))
+ end
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+ local operands = {}
+ local suffix = ""
+ local last, name, pat
+ local vr
+ ctx.op = op
+ ctx.rel = nil
+
+ local cond = rshift(op, 28)
+ local opat
+ if cond == 15 then
+ opat = map_uncondins[band(rshift(op, 25), 7)]
+ else
+ if cond ~= 14 then suffix = map_cond[cond] end
+ opat = map_condins[band(rshift(op, 25), 7)]
+ end
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ name, pat = match(opat, "^([a-z0-9]*)(.*)")
+ if sub(pat, 1, 1) == "." then
+ local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)")
+ suffix = suffix..s2
+ pat = p2
+ end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "D" then
+ x = map_gpr[band(rshift(op, 12), 15)]
+ elseif p == "N" then
+ x = map_gpr[band(rshift(op, 16), 15)]
+ elseif p == "S" then
+ x = map_gpr[band(rshift(op, 8), 15)]
+ elseif p == "M" then
+ x = map_gpr[band(op, 15)]
+ elseif p == "d" then
+ x = fmtvr(op, vr, 12, 22)
+ elseif p == "n" then
+ x = fmtvr(op, vr, 16, 7)
+ elseif p == "m" then
+ x = fmtvr(op, vr, 0, 5)
+ elseif p == "P" then
+ if band(op, 0x02000000) ~= 0 then
+ x = ror(band(op, 255), 2*band(rshift(op, 8), 15))
+ else
+ x = map_gpr[band(op, 15)]
+ if band(op, 0xff0) ~= 0 then
+ operands[#operands+1] = x
+ local s = map_shift[band(rshift(op, 5), 3)]
+ local r = nil
+ if band(op, 0xf90) == 0 then
+ if s == "ror" then s = "rrx" else r = "#32" end
+ elseif band(op, 0x10) == 0 then
+ r = "#"..band(rshift(op, 7), 31)
+ else
+ r = map_gpr[band(rshift(op, 8), 15)]
+ end
+ if name == "mov" then name = s; x = r
+ elseif r then x = format("%s %s", s, r)
+ else x = s end
+ end
+ end
+ elseif p == "L" then
+ x = fmtload(ctx, op, pos)
+ elseif p == "l" then
+ x = fmtvload(ctx, op, pos)
+ elseif p == "B" then
+ local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6)
+ if cond == 15 then addr = addr + band(rshift(op, 23), 2) end
+ ctx.rel = addr
+ x = "0x"..tohex(addr)
+ elseif p == "F" then
+ vr = "s"
+ elseif p == "G" then
+ vr = "d"
+ elseif p == "." then
+ suffix = suffix..(vr == "s" and ".f32" or ".f64")
+ elseif p == "R" then
+ if band(op, 0x00200000) ~= 0 and #operands == 1 then
+ operands[1] = operands[1].."!"
+ end
+ local t = {}
+ for i=0,15 do
+ if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end
+ end
+ x = "{"..concat(t, ", ").."}"
+ elseif p == "r" then
+ if band(op, 0x00200000) ~= 0 and #operands == 2 then
+ operands[1] = operands[1].."!"
+ end
+ local s = tonumber(sub(last, 2))
+ local n = band(op, 255)
+ if vr == "d" then n = rshift(n, 1) end
+ operands[#operands] = format("{%s-%s%d}", last, vr, s+n-1)
+ elseif p == "W" then
+ x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000)
+ elseif p == "T" then
+ x = "#0x"..tohex(band(op, 0x00ffffff), 6)
+ elseif p == "U" then
+ x = band(rshift(op, 7), 31)
+ if x == 0 then x = nil end
+ elseif p == "u" then
+ x = band(rshift(op, 7), 31)
+ if band(op, 0x40) == 0 then
+ if x == 0 then x = nil else x = "lsl #"..x end
+ else
+ if x == 0 then x = "asr #32" else x = "asr #"..x end
+ end
+ elseif p == "v" then
+ x = band(rshift(op, 7), 31)
+ elseif p == "w" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "x" then
+ x = band(rshift(op, 16), 31) + 1
+ elseif p == "X" then
+ x = band(rshift(op, 16), 31) - last + 1
+ elseif p == "Y" then
+ x = band(rshift(op, 12), 0xf0) + band(op, 0x0f)
+ elseif p == "K" then
+ x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4)
+ elseif p == "s" then
+ if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end
+ else
+ assert(false)
+ end
+ if x then
+ last = x
+ if type(x) == "number" then x = "#"..x end
+ operands[#operands+1] = x
+ end
+ end
+
+ return putop(ctx, name..suffix, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ctx.pos = ofs
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 16 then return map_gpr[r] end
+ return "d"..(r-16)
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+disass = disass_
+regname = regname_
+
diff --git a/3rdparty/lua/src/jit/dis_mips.lua b/3rdparty/lua/src/jit/dis_mips.lua
index be59150..830db40 100644
--- a/3rdparty/lua/src/jit/dis_mips.lua
+++ b/3rdparty/lua/src/jit/dis_mips.lua
@@ -1,428 +1,428 @@
-----------------------------------------------------------------------------
--- LuaJIT MIPS disassembler module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT/X license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
--- This is a helper module used by the LuaJIT machine code dumper module.
---
--- It disassembles all standard MIPS32R1/R2 instructions.
--- Default mode is big-endian, but see: dis_mipsel.lua
-------------------------------------------------------------------------------
-
-local type = type
-local sub, byte, format = string.sub, string.byte, string.format
-local match, gmatch, gsub = string.match, string.gmatch, string.gsub
-local concat = table.concat
-local bit = require("bit")
-local band, bor, tohex = bit.band, bit.bor, bit.tohex
-local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
-
-------------------------------------------------------------------------------
--- Primary and extended opcode maps
-------------------------------------------------------------------------------
-
-local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", }
-local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", }
-local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", }
-
-local map_special = {
- shift = 0, mask = 63,
- [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
- map_movci, map_srl, "sraDTA",
- "sllvDTS", false, map_srlv, "sravDTS",
- "jrS", "jalrD1S", "movzDST", "movnDST",
- "syscallY", "breakY", false, "sync",
- "mfhiD", "mthiS", "mfloD", "mtloS",
- false, false, false, false,
- "multST", "multuST", "divST", "divuST",
- false, false, false, false,
- "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
- "andDST", "orDST", "xorDST", "nor|notDST0",
- false, false, "sltDST", "sltuDST",
- false, false, false, false,
- "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
- "teqSTZ", false, "tneSTZ",
-}
-
-local map_special2 = {
- shift = 0, mask = 63,
- [0] = "maddST", "madduST", "mulDST", false,
- "msubST", "msubuST",
- [32] = "clzDS", [33] = "cloDS",
- [63] = "sdbbpY",
-}
-
-local map_bshfl = {
- shift = 6, mask = 31,
- [2] = "wsbhDT",
- [16] = "sebDT",
- [24] = "sehDT",
-}
-
-local map_special3 = {
- shift = 0, mask = 63,
- [0] = "extTSAK", [4] = "insTSAL",
- [32] = map_bshfl,
- [59] = "rdhwrTD",
-}
-
-local map_regimm = {
- shift = 16, mask = 31,
- [0] = "bltzSB", "bgezSB", "bltzlSB", "bgezlSB",
- false, false, false, false,
- "tgeiSI", "tgeiuSI", "tltiSI", "tltiuSI",
- "teqiSI", false, "tneiSI", false,
- "bltzalSB", "bgezalSB", "bltzallSB", "bgezallSB",
- false, false, false, false,
- false, false, false, false,
- false, false, false, "synciSO",
-}
-
-local map_cop0 = {
- shift = 25, mask = 1,
- [0] = {
- shift = 21, mask = 15,
- [0] = "mfc0TDW", [4] = "mtc0TDW",
- [10] = "rdpgprDT",
- [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", },
- [14] = "wrpgprDT",
- }, {
- shift = 0, mask = 63,
- [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp",
- [24] = "eret", [31] = "deret",
- [32] = "wait",
- },
-}
-
-local map_cop1s = {
- shift = 0, mask = 63,
- [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
- "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG",
- "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG",
- "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG",
- false,
- { shift = 16, mask = 1, [0] = "movf.sFGC", "movt.sFGC" },
- "movz.sFGT", "movn.sFGT",
- false, "recip.sFG", "rsqrt.sFG", false,
- false, false, false, false,
- false, false, false, false,
- false, "cvt.d.sFG", false, false,
- "cvt.w.sFG", "cvt.l.sFG", "cvt.ps.sFGH", false,
- false, false, false, false,
- false, false, false, false,
- "c.f.sVGH", "c.un.sVGH", "c.eq.sVGH", "c.ueq.sVGH",
- "c.olt.sVGH", "c.ult.sVGH", "c.ole.sVGH", "c.ule.sVGH",
- "c.sf.sVGH", "c.ngle.sVGH", "c.seq.sVGH", "c.ngl.sVGH",
- "c.lt.sVGH", "c.nge.sVGH", "c.le.sVGH", "c.ngt.sVGH",
-}
-
-local map_cop1d = {
- shift = 0, mask = 63,
- [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH",
- "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG",
- "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG",
- "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG",
- false,
- { shift = 16, mask = 1, [0] = "movf.dFGC", "movt.dFGC" },
- "movz.dFGT", "movn.dFGT",
- false, "recip.dFG", "rsqrt.dFG", false,
- false, false, false, false,
- false, false, false, false,
- "cvt.s.dFG", false, false, false,
- "cvt.w.dFG", "cvt.l.dFG", false, false,
- false, false, false, false,
- false, false, false, false,
- "c.f.dVGH", "c.un.dVGH", "c.eq.dVGH", "c.ueq.dVGH",
- "c.olt.dVGH", "c.ult.dVGH", "c.ole.dVGH", "c.ule.dVGH",
- "c.df.dVGH", "c.ngle.dVGH", "c.deq.dVGH", "c.ngl.dVGH",
- "c.lt.dVGH", "c.nge.dVGH", "c.le.dVGH", "c.ngt.dVGH",
-}
-
-local map_cop1ps = {
- shift = 0, mask = 63,
- [0] = "add.psFGH", "sub.psFGH", "mul.psFGH", false,
- false, "abs.psFG", "mov.psFG", "neg.psFG",
- false, false, false, false,
- false, false, false, false,
- false,
- { shift = 16, mask = 1, [0] = "movf.psFGC", "movt.psFGC" },
- "movz.psFGT", "movn.psFGT",
- false, false, false, false,
- false, false, false, false,
- false, false, false, false,
- "cvt.s.puFG", false, false, false,
- false, false, false, false,
- "cvt.s.plFG", false, false, false,
- "pll.psFGH", "plu.psFGH", "pul.psFGH", "puu.psFGH",
- "c.f.psVGH", "c.un.psVGH", "c.eq.psVGH", "c.ueq.psVGH",
- "c.olt.psVGH", "c.ult.psVGH", "c.ole.psVGH", "c.ule.psVGH",
- "c.psf.psVGH", "c.ngle.psVGH", "c.pseq.psVGH", "c.ngl.psVGH",
- "c.lt.psVGH", "c.nge.psVGH", "c.le.psVGH", "c.ngt.psVGH",
-}
-
-local map_cop1w = {
- shift = 0, mask = 63,
- [32] = "cvt.s.wFG", [33] = "cvt.d.wFG",
-}
-
-local map_cop1l = {
- shift = 0, mask = 63,
- [32] = "cvt.s.lFG", [33] = "cvt.d.lFG",
-}
-
-local map_cop1bc = {
- shift = 16, mask = 3,
- [0] = "bc1fCB", "bc1tCB", "bc1flCB", "bc1tlCB",
-}
-
-local map_cop1 = {
- shift = 21, mask = 31,
- [0] = "mfc1TG", false, "cfc1TG", "mfhc1TG",
- "mtc1TG", false, "ctc1TG", "mthc1TG",
- map_cop1bc, false, false, false,
- false, false, false, false,
- map_cop1s, map_cop1d, false, false,
- map_cop1w, map_cop1l, map_cop1ps,
-}
-
-local map_cop1x = {
- shift = 0, mask = 63,
- [0] = "lwxc1FSX", "ldxc1FSX", false, false,
- false, "luxc1FSX", false, false,
- "swxc1FSX", "sdxc1FSX", false, false,
- false, "suxc1FSX", false, "prefxMSX",
- false, false, false, false,
- false, false, false, false,
- false, false, false, false,
- false, false, "alnv.psFGHS", false,
- "madd.sFRGH", "madd.dFRGH", false, false,
- false, false, "madd.psFRGH", false,
- "msub.sFRGH", "msub.dFRGH", false, false,
- false, false, "msub.psFRGH", false,
- "nmadd.sFRGH", "nmadd.dFRGH", false, false,
- false, false, "nmadd.psFRGH", false,
- "nmsub.sFRGH", "nmsub.dFRGH", false, false,
- false, false, "nmsub.psFRGH", false,
-}
-
-local map_pri = {
- [0] = map_special, map_regimm, "jJ", "jalJ",
- "beq|beqz|bST00B", "bne|bnezST0B", "blezSB", "bgtzSB",
- "addiTSI", "addiu|liTS0I", "sltiTSI", "sltiuTSI",
- "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU",
- map_cop0, map_cop1, false, map_cop1x,
- "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB",
- false, false, false, false,
- map_special2, false, false, map_special3,
- "lbTSO", "lhTSO", "lwlTSO", "lwTSO",
- "lbuTSO", "lhuTSO", "lwrTSO", false,
- "sbTSO", "shTSO", "swlTSO", "swTSO",
- false, false, "swrTSO", "cacheNSO",
- "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO",
- false, "ldc1HSO", "ldc2TSO", false,
- "scTSO", "swc1HSO", "swc2TSO", false,
- false, "sdc1HSO", "sdc2TSO", false,
-}
-
-------------------------------------------------------------------------------
-
-local map_gpr = {
- [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "sp", "r30", "ra",
-}
-
-------------------------------------------------------------------------------
-
--- Output a nicely formatted line with an opcode and operands.
-local function putop(ctx, text, operands)
- local pos = ctx.pos
- local extra = ""
- if ctx.rel then
- local sym = ctx.symtab[ctx.rel]
- if sym then extra = "\t->"..sym end
- end
- if ctx.hexdump > 0 then
- ctx.out(format("%08x %s %-7s %s%s\n",
- ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
- else
- ctx.out(format("%08x %-7s %s%s\n",
- ctx.addr+pos, text, concat(operands, ", "), extra))
- end
- ctx.pos = pos + 4
-end
-
--- Fallback for unknown opcodes.
-local function unknown(ctx)
- return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
-end
-
-local function get_be(ctx)
- local pos = ctx.pos
- local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
- return bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
-end
-
-local function get_le(ctx)
- local pos = ctx.pos
- local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
- return bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
-end
-
--- Disassemble a single instruction.
-local function disass_ins(ctx)
- local op = ctx:get()
- local operands = {}
- local last = nil
- ctx.op = op
- ctx.rel = nil
-
- local opat = map_pri[rshift(op, 26)]
- while type(opat) ~= "string" do
- if not opat then return unknown(ctx) end
- opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
- end
- local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
- local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
- if altname then pat = pat2 end
-
- for p in gmatch(pat, ".") do
- local x = nil
- if p == "S" then
- x = map_gpr[band(rshift(op, 21), 31)]
- elseif p == "T" then
- x = map_gpr[band(rshift(op, 16), 31)]
- elseif p == "D" then
- x = map_gpr[band(rshift(op, 11), 31)]
- elseif p == "F" then
- x = "f"..band(rshift(op, 6), 31)
- elseif p == "G" then
- x = "f"..band(rshift(op, 11), 31)
- elseif p == "H" then
- x = "f"..band(rshift(op, 16), 31)
- elseif p == "R" then
- x = "f"..band(rshift(op, 21), 31)
- elseif p == "A" then
- x = band(rshift(op, 6), 31)
- elseif p == "M" then
- x = band(rshift(op, 11), 31)
- elseif p == "N" then
- x = band(rshift(op, 16), 31)
- elseif p == "C" then
- x = band(rshift(op, 18), 7)
- if x == 0 then x = nil end
- elseif p == "K" then
- x = band(rshift(op, 11), 31) + 1
- elseif p == "L" then
- x = band(rshift(op, 11), 31) - last + 1
- elseif p == "I" then
- x = arshift(lshift(op, 16), 16)
- elseif p == "U" then
- x = band(op, 0xffff)
- elseif p == "O" then
- local disp = arshift(lshift(op, 16), 16)
- operands[#operands] = format("%d(%s)", disp, last)
- elseif p == "X" then
- local index = map_gpr[band(rshift(op, 16), 31)]
- operands[#operands] = format("%s(%s)", index, last)
- elseif p == "B" then
- x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 16)*4 + 4
- ctx.rel = x
- x = "0x"..tohex(x)
- elseif p == "J" then
- x = band(ctx.addr + ctx.pos, 0xf0000000) + band(op, 0x03ffffff)*4
- ctx.rel = x
- x = "0x"..tohex(x)
- elseif p == "V" then
- x = band(rshift(op, 8), 7)
- if x == 0 then x = nil end
- elseif p == "W" then
- x = band(op, 7)
- if x == 0 then x = nil end
- elseif p == "Y" then
- x = band(rshift(op, 6), 0x000fffff)
- if x == 0 then x = nil end
- elseif p == "Z" then
- x = band(rshift(op, 6), 1023)
- if x == 0 then x = nil end
- elseif p == "0" then
- if last == "r0" or last == 0 then
- local n = #operands
- operands[n] = nil
- last = operands[n-1]
- if altname then
- local a1, a2 = match(altname, "([^|]*)|(.*)")
- if a1 then name, altname = a1, a2
- else name = altname end
- end
- end
- elseif p == "1" then
- if last == "ra" then
- operands[#operands] = nil
- end
- else
- assert(false)
- end
- if x then operands[#operands+1] = x; last = x end
- end
-
- return putop(ctx, name, operands)
-end
-
-------------------------------------------------------------------------------
-
--- Disassemble a block of code.
-local function disass_block(ctx, ofs, len)
- if not ofs then ofs = 0 end
- local stop = len and ofs+len or #ctx.code
- stop = stop - stop % 4
- ctx.pos = ofs - ofs % 4
- ctx.rel = nil
- while ctx.pos < stop do disass_ins(ctx) end
-end
-
--- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
-local function create_(code, addr, out)
- local ctx = {}
- ctx.code = code
- ctx.addr = addr or 0
- ctx.out = out or io.write
- ctx.symtab = {}
- ctx.disass = disass_block
- ctx.hexdump = 8
- ctx.get = get_be
- return ctx
-end
-
-local function create_el_(code, addr, out)
- local ctx = create_(code, addr, out)
- ctx.get = get_le
- return ctx
-end
-
--- Simple API: disassemble code (a string) at address and output via out.
-local function disass_(code, addr, out)
- create_(code, addr, out):disass()
-end
-
-local function disass_el_(code, addr, out)
- create_el_(code, addr, out):disass()
-end
-
--- Return register name for RID.
-local function regname_(r)
- if r < 32 then return map_gpr[r] end
- return "f"..(r-32)
-end
-
--- Public module functions.
-module(...)
-
-create = create_
-create_el = create_el_
-disass = disass_
-disass_el = disass_el_
-regname = regname_
-
+----------------------------------------------------------------------------
+-- LuaJIT MIPS disassembler module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT/X license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles all standard MIPS32R1/R2 instructions.
+-- Default mode is big-endian, but see: dis_mipsel.lua
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, tohex = bit.band, bit.bor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps
+------------------------------------------------------------------------------
+
+local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", }
+local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", }
+local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", }
+
+local map_special = {
+ shift = 0, mask = 63,
+ [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
+ map_movci, map_srl, "sraDTA",
+ "sllvDTS", false, map_srlv, "sravDTS",
+ "jrS", "jalrD1S", "movzDST", "movnDST",
+ "syscallY", "breakY", false, "sync",
+ "mfhiD", "mthiS", "mfloD", "mtloS",
+ false, false, false, false,
+ "multST", "multuST", "divST", "divuST",
+ false, false, false, false,
+ "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
+ "andDST", "orDST", "xorDST", "nor|notDST0",
+ false, false, "sltDST", "sltuDST",
+ false, false, false, false,
+ "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
+ "teqSTZ", false, "tneSTZ",
+}
+
+local map_special2 = {
+ shift = 0, mask = 63,
+ [0] = "maddST", "madduST", "mulDST", false,
+ "msubST", "msubuST",
+ [32] = "clzDS", [33] = "cloDS",
+ [63] = "sdbbpY",
+}
+
+local map_bshfl = {
+ shift = 6, mask = 31,
+ [2] = "wsbhDT",
+ [16] = "sebDT",
+ [24] = "sehDT",
+}
+
+local map_special3 = {
+ shift = 0, mask = 63,
+ [0] = "extTSAK", [4] = "insTSAL",
+ [32] = map_bshfl,
+ [59] = "rdhwrTD",
+}
+
+local map_regimm = {
+ shift = 16, mask = 31,
+ [0] = "bltzSB", "bgezSB", "bltzlSB", "bgezlSB",
+ false, false, false, false,
+ "tgeiSI", "tgeiuSI", "tltiSI", "tltiuSI",
+ "teqiSI", false, "tneiSI", false,
+ "bltzalSB", "bgezalSB", "bltzallSB", "bgezallSB",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, "synciSO",
+}
+
+local map_cop0 = {
+ shift = 25, mask = 1,
+ [0] = {
+ shift = 21, mask = 15,
+ [0] = "mfc0TDW", [4] = "mtc0TDW",
+ [10] = "rdpgprDT",
+ [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", },
+ [14] = "wrpgprDT",
+ }, {
+ shift = 0, mask = 63,
+ [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp",
+ [24] = "eret", [31] = "deret",
+ [32] = "wait",
+ },
+}
+
+local map_cop1s = {
+ shift = 0, mask = 63,
+ [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
+ "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG",
+ "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG",
+ "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG",
+ false,
+ { shift = 16, mask = 1, [0] = "movf.sFGC", "movt.sFGC" },
+ "movz.sFGT", "movn.sFGT",
+ false, "recip.sFG", "rsqrt.sFG", false,
+ false, false, false, false,
+ false, false, false, false,
+ false, "cvt.d.sFG", false, false,
+ "cvt.w.sFG", "cvt.l.sFG", "cvt.ps.sFGH", false,
+ false, false, false, false,
+ false, false, false, false,
+ "c.f.sVGH", "c.un.sVGH", "c.eq.sVGH", "c.ueq.sVGH",
+ "c.olt.sVGH", "c.ult.sVGH", "c.ole.sVGH", "c.ule.sVGH",
+ "c.sf.sVGH", "c.ngle.sVGH", "c.seq.sVGH", "c.ngl.sVGH",
+ "c.lt.sVGH", "c.nge.sVGH", "c.le.sVGH", "c.ngt.sVGH",
+}
+
+local map_cop1d = {
+ shift = 0, mask = 63,
+ [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH",
+ "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG",
+ "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG",
+ "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG",
+ false,
+ { shift = 16, mask = 1, [0] = "movf.dFGC", "movt.dFGC" },
+ "movz.dFGT", "movn.dFGT",
+ false, "recip.dFG", "rsqrt.dFG", false,
+ false, false, false, false,
+ false, false, false, false,
+ "cvt.s.dFG", false, false, false,
+ "cvt.w.dFG", "cvt.l.dFG", false, false,
+ false, false, false, false,
+ false, false, false, false,
+ "c.f.dVGH", "c.un.dVGH", "c.eq.dVGH", "c.ueq.dVGH",
+ "c.olt.dVGH", "c.ult.dVGH", "c.ole.dVGH", "c.ule.dVGH",
+ "c.df.dVGH", "c.ngle.dVGH", "c.deq.dVGH", "c.ngl.dVGH",
+ "c.lt.dVGH", "c.nge.dVGH", "c.le.dVGH", "c.ngt.dVGH",
+}
+
+local map_cop1ps = {
+ shift = 0, mask = 63,
+ [0] = "add.psFGH", "sub.psFGH", "mul.psFGH", false,
+ false, "abs.psFG", "mov.psFG", "neg.psFG",
+ false, false, false, false,
+ false, false, false, false,
+ false,
+ { shift = 16, mask = 1, [0] = "movf.psFGC", "movt.psFGC" },
+ "movz.psFGT", "movn.psFGT",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, false,
+ "cvt.s.puFG", false, false, false,
+ false, false, false, false,
+ "cvt.s.plFG", false, false, false,
+ "pll.psFGH", "plu.psFGH", "pul.psFGH", "puu.psFGH",
+ "c.f.psVGH", "c.un.psVGH", "c.eq.psVGH", "c.ueq.psVGH",
+ "c.olt.psVGH", "c.ult.psVGH", "c.ole.psVGH", "c.ule.psVGH",
+ "c.psf.psVGH", "c.ngle.psVGH", "c.pseq.psVGH", "c.ngl.psVGH",
+ "c.lt.psVGH", "c.nge.psVGH", "c.le.psVGH", "c.ngt.psVGH",
+}
+
+local map_cop1w = {
+ shift = 0, mask = 63,
+ [32] = "cvt.s.wFG", [33] = "cvt.d.wFG",
+}
+
+local map_cop1l = {
+ shift = 0, mask = 63,
+ [32] = "cvt.s.lFG", [33] = "cvt.d.lFG",
+}
+
+local map_cop1bc = {
+ shift = 16, mask = 3,
+ [0] = "bc1fCB", "bc1tCB", "bc1flCB", "bc1tlCB",
+}
+
+local map_cop1 = {
+ shift = 21, mask = 31,
+ [0] = "mfc1TG", false, "cfc1TG", "mfhc1TG",
+ "mtc1TG", false, "ctc1TG", "mthc1TG",
+ map_cop1bc, false, false, false,
+ false, false, false, false,
+ map_cop1s, map_cop1d, false, false,
+ map_cop1w, map_cop1l, map_cop1ps,
+}
+
+local map_cop1x = {
+ shift = 0, mask = 63,
+ [0] = "lwxc1FSX", "ldxc1FSX", false, false,
+ false, "luxc1FSX", false, false,
+ "swxc1FSX", "sdxc1FSX", false, false,
+ false, "suxc1FSX", false, "prefxMSX",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, false,
+ false, false, "alnv.psFGHS", false,
+ "madd.sFRGH", "madd.dFRGH", false, false,
+ false, false, "madd.psFRGH", false,
+ "msub.sFRGH", "msub.dFRGH", false, false,
+ false, false, "msub.psFRGH", false,
+ "nmadd.sFRGH", "nmadd.dFRGH", false, false,
+ false, false, "nmadd.psFRGH", false,
+ "nmsub.sFRGH", "nmsub.dFRGH", false, false,
+ false, false, "nmsub.psFRGH", false,
+}
+
+local map_pri = {
+ [0] = map_special, map_regimm, "jJ", "jalJ",
+ "beq|beqz|bST00B", "bne|bnezST0B", "blezSB", "bgtzSB",
+ "addiTSI", "addiu|liTS0I", "sltiTSI", "sltiuTSI",
+ "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU",
+ map_cop0, map_cop1, false, map_cop1x,
+ "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB",
+ false, false, false, false,
+ map_special2, false, false, map_special3,
+ "lbTSO", "lhTSO", "lwlTSO", "lwTSO",
+ "lbuTSO", "lhuTSO", "lwrTSO", false,
+ "sbTSO", "shTSO", "swlTSO", "swTSO",
+ false, false, "swrTSO", "cacheNSO",
+ "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO",
+ false, "ldc1HSO", "ldc2TSO", false,
+ "scTSO", "swc1HSO", "swc2TSO", false,
+ false, "sdc1HSO", "sdc2TSO", false,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "sp", "r30", "ra",
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then extra = "\t->"..sym end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-7s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-7s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+local function get_be(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ return bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
+end
+
+local function get_le(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ return bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local op = ctx:get()
+ local operands = {}
+ local last = nil
+ ctx.op = op
+ ctx.rel = nil
+
+ local opat = map_pri[rshift(op, 26)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
+ if altname then pat = pat2 end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "S" then
+ x = map_gpr[band(rshift(op, 21), 31)]
+ elseif p == "T" then
+ x = map_gpr[band(rshift(op, 16), 31)]
+ elseif p == "D" then
+ x = map_gpr[band(rshift(op, 11), 31)]
+ elseif p == "F" then
+ x = "f"..band(rshift(op, 6), 31)
+ elseif p == "G" then
+ x = "f"..band(rshift(op, 11), 31)
+ elseif p == "H" then
+ x = "f"..band(rshift(op, 16), 31)
+ elseif p == "R" then
+ x = "f"..band(rshift(op, 21), 31)
+ elseif p == "A" then
+ x = band(rshift(op, 6), 31)
+ elseif p == "M" then
+ x = band(rshift(op, 11), 31)
+ elseif p == "N" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "C" then
+ x = band(rshift(op, 18), 7)
+ if x == 0 then x = nil end
+ elseif p == "K" then
+ x = band(rshift(op, 11), 31) + 1
+ elseif p == "L" then
+ x = band(rshift(op, 11), 31) - last + 1
+ elseif p == "I" then
+ x = arshift(lshift(op, 16), 16)
+ elseif p == "U" then
+ x = band(op, 0xffff)
+ elseif p == "O" then
+ local disp = arshift(lshift(op, 16), 16)
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p == "X" then
+ local index = map_gpr[band(rshift(op, 16), 31)]
+ operands[#operands] = format("%s(%s)", index, last)
+ elseif p == "B" then
+ x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 16)*4 + 4
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "J" then
+ x = band(ctx.addr + ctx.pos, 0xf0000000) + band(op, 0x03ffffff)*4
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "V" then
+ x = band(rshift(op, 8), 7)
+ if x == 0 then x = nil end
+ elseif p == "W" then
+ x = band(op, 7)
+ if x == 0 then x = nil end
+ elseif p == "Y" then
+ x = band(rshift(op, 6), 0x000fffff)
+ if x == 0 then x = nil end
+ elseif p == "Z" then
+ x = band(rshift(op, 6), 1023)
+ if x == 0 then x = nil end
+ elseif p == "0" then
+ if last == "r0" or last == 0 then
+ local n = #operands
+ operands[n] = nil
+ last = operands[n-1]
+ if altname then
+ local a1, a2 = match(altname, "([^|]*)|(.*)")
+ if a1 then name, altname = a1, a2
+ else name = altname end
+ end
+ end
+ elseif p == "1" then
+ if last == "ra" then
+ operands[#operands] = nil
+ end
+ else
+ assert(false)
+ end
+ if x then operands[#operands+1] = x; last = x end
+ end
+
+ return putop(ctx, name, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ stop = stop - stop % 4
+ ctx.pos = ofs - ofs % 4
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ ctx.get = get_be
+ return ctx
+end
+
+local function create_el_(code, addr, out)
+ local ctx = create_(code, addr, out)
+ ctx.get = get_le
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+local function disass_el_(code, addr, out)
+ create_el_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 32 then return map_gpr[r] end
+ return "f"..(r-32)
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+create_el = create_el_
+disass = disass_
+disass_el = disass_el_
+regname = regname_
+
diff --git a/3rdparty/lua/src/jit/dis_mipsel.lua b/3rdparty/lua/src/jit/dis_mipsel.lua
index 7aeb032..8a10c46 100644
--- a/3rdparty/lua/src/jit/dis_mipsel.lua
+++ b/3rdparty/lua/src/jit/dis_mipsel.lua
@@ -1,20 +1,20 @@
-----------------------------------------------------------------------------
--- LuaJIT MIPSEL disassembler wrapper module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
--- This module just exports the little-endian functions from the
--- MIPS disassembler module. All the interesting stuff is there.
-------------------------------------------------------------------------------
-
-local require = require
-
-module(...)
-
-local dis_mips = require(_PACKAGE.."dis_mips")
-
-create = dis_mips.create_el
-disass = dis_mips.disass_el
-regname = dis_mips.regname
-
+----------------------------------------------------------------------------
+-- LuaJIT MIPSEL disassembler wrapper module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the little-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local require = require
+
+module(...)
+
+local dis_mips = require(_PACKAGE.."dis_mips")
+
+create = dis_mips.create_el
+disass = dis_mips.disass_el
+regname = dis_mips.regname
+
diff --git a/3rdparty/lua/src/jit/dis_ppc.lua b/3rdparty/lua/src/jit/dis_ppc.lua
index 00cb833..169a534 100644
--- a/3rdparty/lua/src/jit/dis_ppc.lua
+++ b/3rdparty/lua/src/jit/dis_ppc.lua
@@ -1,591 +1,591 @@
-----------------------------------------------------------------------------
--- LuaJIT PPC disassembler module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT/X license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
--- This is a helper module used by the LuaJIT machine code dumper module.
---
--- It disassembles all common, non-privileged 32/64 bit PowerPC instructions
--- plus the e500 SPE instructions and some Cell/Xenon extensions.
---
--- NYI: VMX, VMX128
-------------------------------------------------------------------------------
-
-local type = type
-local sub, byte, format = string.sub, string.byte, string.format
-local match, gmatch, gsub = string.match, string.gmatch, string.gsub
-local concat = table.concat
-local bit = require("bit")
-local band, bor, tohex = bit.band, bit.bor, bit.tohex
-local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
-
-------------------------------------------------------------------------------
--- Primary and extended opcode maps
-------------------------------------------------------------------------------
-
-local map_crops = {
- shift = 1, mask = 1023,
- [0] = "mcrfXX",
- [33] = "crnor|crnotCCC=", [129] = "crandcCCC",
- [193] = "crxor|crclrCCC%", [225] = "crnandCCC",
- [257] = "crandCCC", [289] = "creqv|crsetCCC%",
- [417] = "crorcCCC", [449] = "cror|crmoveCCC=",
- [16] = "b_lrKB", [528] = "b_ctrKB",
- [150] = "isync",
-}
-
-local map_rlwinm = setmetatable({
- shift = 0, mask = -1,
-},
-{ __index = function(t, x)
- local rot = band(rshift(x, 11), 31)
- local mb = band(rshift(x, 6), 31)
- local me = band(rshift(x, 1), 31)
- if mb == 0 and me == 31-rot then
- return "slwiRR~A."
- elseif me == 31 and mb == 32-rot then
- return "srwiRR~-A."
- else
- return "rlwinmRR~AAA."
- end
- end
-})
-
-local map_rld = {
- shift = 2, mask = 7,
- [0] = "rldiclRR~HM.", "rldicrRR~HM.", "rldicRR~HM.", "rldimiRR~HM.",
- {
- shift = 1, mask = 1,
- [0] = "rldclRR~RM.", "rldcrRR~RM.",
- },
-}
-
-local map_ext = setmetatable({
- shift = 1, mask = 1023,
-
- [0] = "cmp_YLRR", [32] = "cmpl_YLRR",
- [4] = "twARR", [68] = "tdARR",
-
- [8] = "subfcRRR.", [40] = "subfRRR.",
- [104] = "negRR.", [136] = "subfeRRR.",
- [200] = "subfzeRR.", [232] = "subfmeRR.",
- [520] = "subfcoRRR.", [552] = "subfoRRR.",
- [616] = "negoRR.", [648] = "subfeoRRR.",
- [712] = "subfzeoRR.", [744] = "subfmeoRR.",
-
- [9] = "mulhduRRR.", [73] = "mulhdRRR.", [233] = "mulldRRR.",
- [457] = "divduRRR.", [489] = "divdRRR.",
- [745] = "mulldoRRR.",
- [969] = "divduoRRR.", [1001] = "divdoRRR.",
-
- [10] = "addcRRR.", [138] = "addeRRR.",
- [202] = "addzeRR.", [234] = "addmeRR.", [266] = "addRRR.",
- [522] = "addcoRRR.", [650] = "addeoRRR.",
- [714] = "addzeoRR.", [746] = "addmeoRR.", [778] = "addoRRR.",
-
- [11] = "mulhwuRRR.", [75] = "mulhwRRR.", [235] = "mullwRRR.",
- [459] = "divwuRRR.", [491] = "divwRRR.",
- [747] = "mullwoRRR.",
- [971] = "divwouRRR.", [1003] = "divwoRRR.",
-
- [15] = "iselltRRR", [47] = "iselgtRRR", [79] = "iseleqRRR",
-
- [144] = { shift = 20, mask = 1, [0] = "mtcrfRZ~", "mtocrfRZ~", },
- [19] = { shift = 20, mask = 1, [0] = "mfcrR", "mfocrfRZ", },
- [371] = { shift = 11, mask = 1023, [392] = "mftbR", [424] = "mftbuR", },
- [339] = {
- shift = 11, mask = 1023,
- [32] = "mferR", [256] = "mflrR", [288] = "mfctrR", [16] = "mfspefscrR",
- },
- [467] = {
- shift = 11, mask = 1023,
- [32] = "mtxerR", [256] = "mtlrR", [288] = "mtctrR", [16] = "mtspefscrR",
- },
-
- [20] = "lwarxRR0R", [84] = "ldarxRR0R",
-
- [21] = "ldxRR0R", [53] = "lduxRRR",
- [149] = "stdxRR0R", [181] = "stduxRRR",
- [341] = "lwaxRR0R", [373] = "lwauxRRR",
-
- [23] = "lwzxRR0R", [55] = "lwzuxRRR",
- [87] = "lbzxRR0R", [119] = "lbzuxRRR",
- [151] = "stwxRR0R", [183] = "stwuxRRR",
- [215] = "stbxRR0R", [247] = "stbuxRRR",
- [279] = "lhzxRR0R", [311] = "lhzuxRRR",
- [343] = "lhaxRR0R", [375] = "lhauxRRR",
- [407] = "sthxRR0R", [439] = "sthuxRRR",
-
- [54] = "dcbst-R0R", [86] = "dcbf-R0R",
- [150] = "stwcxRR0R.", [214] = "stdcxRR0R.",
- [246] = "dcbtst-R0R", [278] = "dcbt-R0R",
- [310] = "eciwxRR0R", [438] = "ecowxRR0R",
- [470] = "dcbi-RR",
-
- [598] = {
- shift = 21, mask = 3,
- [0] = "sync", "lwsync", "ptesync",
- },
- [758] = "dcba-RR",
- [854] = "eieio", [982] = "icbi-R0R", [1014] = "dcbz-R0R",
-
- [26] = "cntlzwRR~", [58] = "cntlzdRR~",
- [122] = "popcntbRR~",
- [154] = "prtywRR~", [186] = "prtydRR~",
-
- [28] = "andRR~R.", [60] = "andcRR~R.", [124] = "nor|notRR~R=.",
- [284] = "eqvRR~R.", [316] = "xorRR~R.",
- [412] = "orcRR~R.", [444] = "or|mrRR~R=.", [476] = "nandRR~R.",
- [508] = "cmpbRR~R",
-
- [512] = "mcrxrX",
-
- [532] = "ldbrxRR0R", [660] = "stdbrxRR0R",
-
- [533] = "lswxRR0R", [597] = "lswiRR0A",
- [661] = "stswxRR0R", [725] = "stswiRR0A",
-
- [534] = "lwbrxRR0R", [662] = "stwbrxRR0R",
- [790] = "lhbrxRR0R", [918] = "sthbrxRR0R",
-
- [535] = "lfsxFR0R", [567] = "lfsuxFRR",
- [599] = "lfdxFR0R", [631] = "lfduxFRR",
- [663] = "stfsxFR0R", [695] = "stfsuxFRR",
- [727] = "stfdxFR0R", [759] = "stfduxFR0R",
- [855] = "lfiwaxFR0R",
- [983] = "stfiwxFR0R",
-
- [24] = "slwRR~R.",
-
- [27] = "sldRR~R.", [536] = "srwRR~R.",
- [792] = "srawRR~R.", [824] = "srawiRR~A.",
-
- [794] = "sradRR~R.", [826] = "sradiRR~H.", [827] = "sradiRR~H.",
- [922] = "extshRR~.", [954] = "extsbRR~.", [986] = "extswRR~.",
-
- [539] = "srdRR~R.",
-},
-{ __index = function(t, x)
- if band(x, 31) == 15 then return "iselRRRC" end
- end
-})
-
-local map_ld = {
- shift = 0, mask = 3,
- [0] = "ldRRE", "lduRRE", "lwaRRE",
-}
-
-local map_std = {
- shift = 0, mask = 3,
- [0] = "stdRRE", "stduRRE",
-}
-
-local map_fps = {
- shift = 5, mask = 1,
- {
- shift = 1, mask = 15,
- [0] = false, false, "fdivsFFF.", false,
- "fsubsFFF.", "faddsFFF.", "fsqrtsF-F.", false,
- "fresF-F.", "fmulsFF-F.", "frsqrtesF-F.", false,
- "fmsubsFFFF~.", "fmaddsFFFF~.", "fnmsubsFFFF~.", "fnmaddsFFFF~.",
- }
-}
-
-local map_fpd = {
- shift = 5, mask = 1,
- [0] = {
- shift = 1, mask = 1023,
- [0] = "fcmpuXFF", [32] = "fcmpoXFF", [64] = "mcrfsXX",
- [38] = "mtfsb1A.", [70] = "mtfsb0A.", [134] = "mtfsfiA>>-A>",
- [8] = "fcpsgnFFF.", [40] = "fnegF-F.", [72] = "fmrF-F.",
- [136] = "fnabsF-F.", [264] = "fabsF-F.",
- [12] = "frspF-F.",
- [14] = "fctiwF-F.", [15] = "fctiwzF-F.",
- [583] = "mffsF.", [711] = "mtfsfZF.",
- [392] = "frinF-F.", [424] = "frizF-F.",
- [456] = "fripF-F.", [488] = "frimF-F.",
- [814] = "fctidF-F.", [815] = "fctidzF-F.", [846] = "fcfidF-F.",
- },
- {
- shift = 1, mask = 15,
- [0] = false, false, "fdivFFF.", false,
- "fsubFFF.", "faddFFF.", "fsqrtF-F.", "fselFFFF~.",
- "freF-F.", "fmulFF-F.", "frsqrteF-F.", false,
- "fmsubFFFF~.", "fmaddFFFF~.", "fnmsubFFFF~.", "fnmaddFFFF~.",
- }
-}
-
-local map_spe = {
- shift = 0, mask = 2047,
-
- [512] = "evaddwRRR", [514] = "evaddiwRAR~",
- [516] = "evsubwRRR~", [518] = "evsubiwRAR~",
- [520] = "evabsRR", [521] = "evnegRR",
- [522] = "evextsbRR", [523] = "evextshRR", [524] = "evrndwRR",
- [525] = "evcntlzwRR", [526] = "evcntlswRR",
-
- [527] = "brincRRR",
-
- [529] = "evandRRR", [530] = "evandcRRR", [534] = "evxorRRR",
- [535] = "evor|evmrRRR=", [536] = "evnor|evnotRRR=",
- [537] = "eveqvRRR", [539] = "evorcRRR", [542] = "evnandRRR",
-
- [544] = "evsrwuRRR", [545] = "evsrwsRRR",
- [546] = "evsrwiuRRA", [547] = "evsrwisRRA",
- [548] = "evslwRRR", [550] = "evslwiRRA",
- [552] = "evrlwRRR", [553] = "evsplatiRS",
- [554] = "evrlwiRRA", [555] = "evsplatfiRS",
- [556] = "evmergehiRRR", [557] = "evmergeloRRR",
- [558] = "evmergehiloRRR", [559] = "evmergelohiRRR",
-
- [560] = "evcmpgtuYRR", [561] = "evcmpgtsYRR",
- [562] = "evcmpltuYRR", [563] = "evcmpltsYRR",
- [564] = "evcmpeqYRR",
-
- [632] = "evselRRR", [633] = "evselRRRW",
- [634] = "evselRRRW", [635] = "evselRRRW",
- [636] = "evselRRRW", [637] = "evselRRRW",
- [638] = "evselRRRW", [639] = "evselRRRW",
-
- [640] = "evfsaddRRR", [641] = "evfssubRRR",
- [644] = "evfsabsRR", [645] = "evfsnabsRR", [646] = "evfsnegRR",
- [648] = "evfsmulRRR", [649] = "evfsdivRRR",
- [652] = "evfscmpgtYRR", [653] = "evfscmpltYRR", [654] = "evfscmpeqYRR",
- [656] = "evfscfuiR-R", [657] = "evfscfsiR-R",
- [658] = "evfscfufR-R", [659] = "evfscfsfR-R",
- [660] = "evfsctuiR-R", [661] = "evfsctsiR-R",
- [662] = "evfsctufR-R", [663] = "evfsctsfR-R",
- [664] = "evfsctuizR-R", [666] = "evfsctsizR-R",
- [668] = "evfststgtYRR", [669] = "evfststltYRR", [670] = "evfststeqYRR",
-
- [704] = "efsaddRRR", [705] = "efssubRRR",
- [708] = "efsabsRR", [709] = "efsnabsRR", [710] = "efsnegRR",
- [712] = "efsmulRRR", [713] = "efsdivRRR",
- [716] = "efscmpgtYRR", [717] = "efscmpltYRR", [718] = "efscmpeqYRR",
- [719] = "efscfdR-R",
- [720] = "efscfuiR-R", [721] = "efscfsiR-R",
- [722] = "efscfufR-R", [723] = "efscfsfR-R",
- [724] = "efsctuiR-R", [725] = "efsctsiR-R",
- [726] = "efsctufR-R", [727] = "efsctsfR-R",
- [728] = "efsctuizR-R", [730] = "efsctsizR-R",
- [732] = "efststgtYRR", [733] = "efststltYRR", [734] = "efststeqYRR",
-
- [736] = "efdaddRRR", [737] = "efdsubRRR",
- [738] = "efdcfuidR-R", [739] = "efdcfsidR-R",
- [740] = "efdabsRR", [741] = "efdnabsRR", [742] = "efdnegRR",
- [744] = "efdmulRRR", [745] = "efddivRRR",
- [746] = "efdctuidzR-R", [747] = "efdctsidzR-R",
- [748] = "efdcmpgtYRR", [749] = "efdcmpltYRR", [750] = "efdcmpeqYRR",
- [751] = "efdcfsR-R",
- [752] = "efdcfuiR-R", [753] = "efdcfsiR-R",
- [754] = "efdcfufR-R", [755] = "efdcfsfR-R",
- [756] = "efdctuiR-R", [757] = "efdctsiR-R",
- [758] = "efdctufR-R", [759] = "efdctsfR-R",
- [760] = "efdctuizR-R", [762] = "efdctsizR-R",
- [764] = "efdtstgtYRR", [765] = "efdtstltYRR", [766] = "efdtsteqYRR",
-
- [768] = "evlddxRR0R", [769] = "evlddRR8",
- [770] = "evldwxRR0R", [771] = "evldwRR8",
- [772] = "evldhxRR0R", [773] = "evldhRR8",
- [776] = "evlhhesplatxRR0R", [777] = "evlhhesplatRR2",
- [780] = "evlhhousplatxRR0R", [781] = "evlhhousplatRR2",
- [782] = "evlhhossplatxRR0R", [783] = "evlhhossplatRR2",
- [784] = "evlwhexRR0R", [785] = "evlwheRR4",
- [788] = "evlwhouxRR0R", [789] = "evlwhouRR4",
- [790] = "evlwhosxRR0R", [791] = "evlwhosRR4",
- [792] = "evlwwsplatxRR0R", [793] = "evlwwsplatRR4",
- [796] = "evlwhsplatxRR0R", [797] = "evlwhsplatRR4",
-
- [800] = "evstddxRR0R", [801] = "evstddRR8",
- [802] = "evstdwxRR0R", [803] = "evstdwRR8",
- [804] = "evstdhxRR0R", [805] = "evstdhRR8",
- [816] = "evstwhexRR0R", [817] = "evstwheRR4",
- [820] = "evstwhoxRR0R", [821] = "evstwhoRR4",
- [824] = "evstwwexRR0R", [825] = "evstwweRR4",
- [828] = "evstwwoxRR0R", [829] = "evstwwoRR4",
-
- [1027] = "evmhessfRRR", [1031] = "evmhossfRRR", [1032] = "evmheumiRRR",
- [1033] = "evmhesmiRRR", [1035] = "evmhesmfRRR", [1036] = "evmhoumiRRR",
- [1037] = "evmhosmiRRR", [1039] = "evmhosmfRRR", [1059] = "evmhessfaRRR",
- [1063] = "evmhossfaRRR", [1064] = "evmheumiaRRR", [1065] = "evmhesmiaRRR",
- [1067] = "evmhesmfaRRR", [1068] = "evmhoumiaRRR", [1069] = "evmhosmiaRRR",
- [1071] = "evmhosmfaRRR", [1095] = "evmwhssfRRR", [1096] = "evmwlumiRRR",
- [1100] = "evmwhumiRRR", [1101] = "evmwhsmiRRR", [1103] = "evmwhsmfRRR",
- [1107] = "evmwssfRRR", [1112] = "evmwumiRRR", [1113] = "evmwsmiRRR",
- [1115] = "evmwsmfRRR", [1127] = "evmwhssfaRRR", [1128] = "evmwlumiaRRR",
- [1132] = "evmwhumiaRRR", [1133] = "evmwhsmiaRRR", [1135] = "evmwhsmfaRRR",
- [1139] = "evmwssfaRRR", [1144] = "evmwumiaRRR", [1145] = "evmwsmiaRRR",
- [1147] = "evmwsmfaRRR",
-
- [1216] = "evaddusiaawRR", [1217] = "evaddssiaawRR",
- [1218] = "evsubfusiaawRR", [1219] = "evsubfssiaawRR",
- [1220] = "evmraRR",
- [1222] = "evdivwsRRR", [1223] = "evdivwuRRR",
- [1224] = "evaddumiaawRR", [1225] = "evaddsmiaawRR",
- [1226] = "evsubfumiaawRR", [1227] = "evsubfsmiaawRR",
-
- [1280] = "evmheusiaawRRR", [1281] = "evmhessiaawRRR",
- [1283] = "evmhessfaawRRR", [1284] = "evmhousiaawRRR",
- [1285] = "evmhossiaawRRR", [1287] = "evmhossfaawRRR",
- [1288] = "evmheumiaawRRR", [1289] = "evmhesmiaawRRR",
- [1291] = "evmhesmfaawRRR", [1292] = "evmhoumiaawRRR",
- [1293] = "evmhosmiaawRRR", [1295] = "evmhosmfaawRRR",
- [1320] = "evmhegumiaaRRR", [1321] = "evmhegsmiaaRRR",
- [1323] = "evmhegsmfaaRRR", [1324] = "evmhogumiaaRRR",
- [1325] = "evmhogsmiaaRRR", [1327] = "evmhogsmfaaRRR",
- [1344] = "evmwlusiaawRRR", [1345] = "evmwlssiaawRRR",
- [1352] = "evmwlumiaawRRR", [1353] = "evmwlsmiaawRRR",
- [1363] = "evmwssfaaRRR", [1368] = "evmwumiaaRRR",
- [1369] = "evmwsmiaaRRR", [1371] = "evmwsmfaaRRR",
- [1408] = "evmheusianwRRR", [1409] = "evmhessianwRRR",
- [1411] = "evmhessfanwRRR", [1412] = "evmhousianwRRR",
- [1413] = "evmhossianwRRR", [1415] = "evmhossfanwRRR",
- [1416] = "evmheumianwRRR", [1417] = "evmhesmianwRRR",
- [1419] = "evmhesmfanwRRR", [1420] = "evmhoumianwRRR",
- [1421] = "evmhosmianwRRR", [1423] = "evmhosmfanwRRR",
- [1448] = "evmhegumianRRR", [1449] = "evmhegsmianRRR",
- [1451] = "evmhegsmfanRRR", [1452] = "evmhogumianRRR",
- [1453] = "evmhogsmianRRR", [1455] = "evmhogsmfanRRR",
- [1472] = "evmwlusianwRRR", [1473] = "evmwlssianwRRR",
- [1480] = "evmwlumianwRRR", [1481] = "evmwlsmianwRRR",
- [1491] = "evmwssfanRRR", [1496] = "evmwumianRRR",
- [1497] = "evmwsmianRRR", [1499] = "evmwsmfanRRR",
-}
-
-local map_pri = {
- [0] = false, false, "tdiARI", "twiARI",
- map_spe, false, false, "mulliRRI",
- "subficRRI", false, "cmpl_iYLRU", "cmp_iYLRI",
- "addicRRI", "addic.RRI", "addi|liRR0I", "addis|lisRR0I",
- "b_KBJ", "sc", "bKJ", map_crops,
- "rlwimiRR~AAA.", map_rlwinm, false, "rlwnmRR~RAA.",
- "oriNRR~U", "orisRR~U", "xoriRR~U", "xorisRR~U",
- "andi.RR~U", "andis.RR~U", map_rld, map_ext,
- "lwzRRD", "lwzuRRD", "lbzRRD", "lbzuRRD",
- "stwRRD", "stwuRRD", "stbRRD", "stbuRRD",
- "lhzRRD", "lhzuRRD", "lhaRRD", "lhauRRD",
- "sthRRD", "sthuRRD", "lmwRRD", "stmwRRD",
- "lfsFRD", "lfsuFRD", "lfdFRD", "lfduFRD",
- "stfsFRD", "stfsuFRD", "stfdFRD", "stfduFRD",
- false, false, map_ld, map_fps,
- false, false, map_std, map_fpd,
-}
-
-------------------------------------------------------------------------------
-
-local map_gpr = {
- [0] = "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
-}
-
-local map_cond = { [0] = "lt", "gt", "eq", "so", "ge", "le", "ne", "ns", }
-
--- Format a condition bit.
-local function condfmt(cond)
- if cond <= 3 then
- return map_cond[band(cond, 3)]
- else
- return format("4*cr%d+%s", rshift(cond, 2), map_cond[band(cond, 3)])
- end
-end
-
-------------------------------------------------------------------------------
-
--- Output a nicely formatted line with an opcode and operands.
-local function putop(ctx, text, operands)
- local pos = ctx.pos
- local extra = ""
- if ctx.rel then
- local sym = ctx.symtab[ctx.rel]
- if sym then extra = "\t->"..sym end
- end
- if ctx.hexdump > 0 then
- ctx.out(format("%08x %s %-7s %s%s\n",
- ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
- else
- ctx.out(format("%08x %-7s %s%s\n",
- ctx.addr+pos, text, concat(operands, ", "), extra))
- end
- ctx.pos = pos + 4
-end
-
--- Fallback for unknown opcodes.
-local function unknown(ctx)
- return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
-end
-
--- Disassemble a single instruction.
-local function disass_ins(ctx)
- local pos = ctx.pos
- local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
- local op = bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
- local operands = {}
- local last = nil
- local rs = 21
- ctx.op = op
- ctx.rel = nil
-
- local opat = map_pri[rshift(b0, 2)]
- while type(opat) ~= "string" do
- if not opat then return unknown(ctx) end
- opat = opat[band(rshift(op, opat.shift), opat.mask)]
- end
- local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
- local altname, pat2 = match(pat, "|([a-z0-9_.]*)(.*)")
- if altname then pat = pat2 end
-
- for p in gmatch(pat, ".") do
- local x = nil
- if p == "R" then
- x = map_gpr[band(rshift(op, rs), 31)]
- rs = rs - 5
- elseif p == "F" then
- x = "f"..band(rshift(op, rs), 31)
- rs = rs - 5
- elseif p == "A" then
- x = band(rshift(op, rs), 31)
- rs = rs - 5
- elseif p == "S" then
- x = arshift(lshift(op, 27-rs), 27)
- rs = rs - 5
- elseif p == "I" then
- x = arshift(lshift(op, 16), 16)
- elseif p == "U" then
- x = band(op, 0xffff)
- elseif p == "D" or p == "E" then
- local disp = arshift(lshift(op, 16), 16)
- if p == "E" then disp = band(disp, -4) end
- if last == "r0" then last = "0" end
- operands[#operands] = format("%d(%s)", disp, last)
- elseif p >= "2" and p <= "8" then
- local disp = band(rshift(op, rs), 31) * p
- if last == "r0" then last = "0" end
- operands[#operands] = format("%d(%s)", disp, last)
- elseif p == "H" then
- x = band(rshift(op, rs), 31) + lshift(band(op, 2), 4)
- rs = rs - 5
- elseif p == "M" then
- x = band(rshift(op, rs), 31) + band(op, 0x20)
- elseif p == "C" then
- x = condfmt(band(rshift(op, rs), 31))
- rs = rs - 5
- elseif p == "B" then
- local bo = rshift(op, 21)
- local cond = band(rshift(op, 16), 31)
- local cn = ""
- rs = rs - 10
- if band(bo, 4) == 0 then
- cn = band(bo, 2) == 0 and "dnz" or "dz"
- if band(bo, 0x10) == 0 then
- cn = cn..(band(bo, 8) == 0 and "f" or "t")
- end
- if band(bo, 0x10) == 0 then x = condfmt(cond) end
- name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
- elseif band(bo, 0x10) == 0 then
- cn = map_cond[band(cond, 3) + (band(bo, 8) == 0 and 4 or 0)]
- if cond > 3 then x = "cr"..rshift(cond, 2) end
- name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
- end
- name = gsub(name, "_", cn)
- elseif p == "J" then
- x = arshift(lshift(op, 27-rs), 29-rs)*4
- if band(op, 2) == 0 then x = ctx.addr + pos + x end
- ctx.rel = x
- x = "0x"..tohex(x)
- elseif p == "K" then
- if band(op, 1) ~= 0 then name = name.."l" end
- if band(op, 2) ~= 0 then name = name.."a" end
- elseif p == "X" or p == "Y" then
- x = band(rshift(op, rs+2), 7)
- if x == 0 and p == "Y" then x = nil else x = "cr"..x end
- rs = rs - 5
- elseif p == "W" then
- x = "cr"..band(op, 7)
- elseif p == "Z" then
- x = band(rshift(op, rs-4), 255)
- rs = rs - 10
- elseif p == ">" then
- operands[#operands] = rshift(operands[#operands], 1)
- elseif p == "0" then
- if last == "r0" then
- operands[#operands] = nil
- if altname then name = altname end
- end
- elseif p == "L" then
- name = gsub(name, "_", band(op, 0x00200000) ~= 0 and "d" or "w")
- elseif p == "." then
- if band(op, 1) == 1 then name = name.."." end
- elseif p == "N" then
- if op == 0x60000000 then name = "nop"; break end
- elseif p == "~" then
- local n = #operands
- operands[n-1], operands[n] = operands[n], operands[n-1]
- elseif p == "=" then
- local n = #operands
- if last == operands[n-1] then
- operands[n] = nil
- name = altname
- end
- elseif p == "%" then
- local n = #operands
- if last == operands[n-1] and last == operands[n-2] then
- operands[n] = nil
- operands[n-1] = nil
- name = altname
- end
- elseif p == "-" then
- rs = rs - 5
- else
- assert(false)
- end
- if x then operands[#operands+1] = x; last = x end
- end
-
- return putop(ctx, name, operands)
-end
-
-------------------------------------------------------------------------------
-
--- Disassemble a block of code.
-local function disass_block(ctx, ofs, len)
- if not ofs then ofs = 0 end
- local stop = len and ofs+len or #ctx.code
- stop = stop - stop % 4
- ctx.pos = ofs - ofs % 4
- ctx.rel = nil
- while ctx.pos < stop do disass_ins(ctx) end
-end
-
--- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
-local function create_(code, addr, out)
- local ctx = {}
- ctx.code = code
- ctx.addr = addr or 0
- ctx.out = out or io.write
- ctx.symtab = {}
- ctx.disass = disass_block
- ctx.hexdump = 8
- return ctx
-end
-
--- Simple API: disassemble code (a string) at address and output via out.
-local function disass_(code, addr, out)
- create_(code, addr, out):disass()
-end
-
--- Return register name for RID.
-local function regname_(r)
- if r < 32 then return map_gpr[r] end
- return "f"..(r-32)
-end
-
--- Public module functions.
-module(...)
-
-create = create_
-disass = disass_
-regname = regname_
-
+----------------------------------------------------------------------------
+-- LuaJIT PPC disassembler module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT/X license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles all common, non-privileged 32/64 bit PowerPC instructions
+-- plus the e500 SPE instructions and some Cell/Xenon extensions.
+--
+-- NYI: VMX, VMX128
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, tohex = bit.band, bit.bor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps
+------------------------------------------------------------------------------
+
+local map_crops = {
+ shift = 1, mask = 1023,
+ [0] = "mcrfXX",
+ [33] = "crnor|crnotCCC=", [129] = "crandcCCC",
+ [193] = "crxor|crclrCCC%", [225] = "crnandCCC",
+ [257] = "crandCCC", [289] = "creqv|crsetCCC%",
+ [417] = "crorcCCC", [449] = "cror|crmoveCCC=",
+ [16] = "b_lrKB", [528] = "b_ctrKB",
+ [150] = "isync",
+}
+
+local map_rlwinm = setmetatable({
+ shift = 0, mask = -1,
+},
+{ __index = function(t, x)
+ local rot = band(rshift(x, 11), 31)
+ local mb = band(rshift(x, 6), 31)
+ local me = band(rshift(x, 1), 31)
+ if mb == 0 and me == 31-rot then
+ return "slwiRR~A."
+ elseif me == 31 and mb == 32-rot then
+ return "srwiRR~-A."
+ else
+ return "rlwinmRR~AAA."
+ end
+ end
+})
+
+local map_rld = {
+ shift = 2, mask = 7,
+ [0] = "rldiclRR~HM.", "rldicrRR~HM.", "rldicRR~HM.", "rldimiRR~HM.",
+ {
+ shift = 1, mask = 1,
+ [0] = "rldclRR~RM.", "rldcrRR~RM.",
+ },
+}
+
+local map_ext = setmetatable({
+ shift = 1, mask = 1023,
+
+ [0] = "cmp_YLRR", [32] = "cmpl_YLRR",
+ [4] = "twARR", [68] = "tdARR",
+
+ [8] = "subfcRRR.", [40] = "subfRRR.",
+ [104] = "negRR.", [136] = "subfeRRR.",
+ [200] = "subfzeRR.", [232] = "subfmeRR.",
+ [520] = "subfcoRRR.", [552] = "subfoRRR.",
+ [616] = "negoRR.", [648] = "subfeoRRR.",
+ [712] = "subfzeoRR.", [744] = "subfmeoRR.",
+
+ [9] = "mulhduRRR.", [73] = "mulhdRRR.", [233] = "mulldRRR.",
+ [457] = "divduRRR.", [489] = "divdRRR.",
+ [745] = "mulldoRRR.",
+ [969] = "divduoRRR.", [1001] = "divdoRRR.",
+
+ [10] = "addcRRR.", [138] = "addeRRR.",
+ [202] = "addzeRR.", [234] = "addmeRR.", [266] = "addRRR.",
+ [522] = "addcoRRR.", [650] = "addeoRRR.",
+ [714] = "addzeoRR.", [746] = "addmeoRR.", [778] = "addoRRR.",
+
+ [11] = "mulhwuRRR.", [75] = "mulhwRRR.", [235] = "mullwRRR.",
+ [459] = "divwuRRR.", [491] = "divwRRR.",
+ [747] = "mullwoRRR.",
+ [971] = "divwouRRR.", [1003] = "divwoRRR.",
+
+ [15] = "iselltRRR", [47] = "iselgtRRR", [79] = "iseleqRRR",
+
+ [144] = { shift = 20, mask = 1, [0] = "mtcrfRZ~", "mtocrfRZ~", },
+ [19] = { shift = 20, mask = 1, [0] = "mfcrR", "mfocrfRZ", },
+ [371] = { shift = 11, mask = 1023, [392] = "mftbR", [424] = "mftbuR", },
+ [339] = {
+ shift = 11, mask = 1023,
+ [32] = "mferR", [256] = "mflrR", [288] = "mfctrR", [16] = "mfspefscrR",
+ },
+ [467] = {
+ shift = 11, mask = 1023,
+ [32] = "mtxerR", [256] = "mtlrR", [288] = "mtctrR", [16] = "mtspefscrR",
+ },
+
+ [20] = "lwarxRR0R", [84] = "ldarxRR0R",
+
+ [21] = "ldxRR0R", [53] = "lduxRRR",
+ [149] = "stdxRR0R", [181] = "stduxRRR",
+ [341] = "lwaxRR0R", [373] = "lwauxRRR",
+
+ [23] = "lwzxRR0R", [55] = "lwzuxRRR",
+ [87] = "lbzxRR0R", [119] = "lbzuxRRR",
+ [151] = "stwxRR0R", [183] = "stwuxRRR",
+ [215] = "stbxRR0R", [247] = "stbuxRRR",
+ [279] = "lhzxRR0R", [311] = "lhzuxRRR",
+ [343] = "lhaxRR0R", [375] = "lhauxRRR",
+ [407] = "sthxRR0R", [439] = "sthuxRRR",
+
+ [54] = "dcbst-R0R", [86] = "dcbf-R0R",
+ [150] = "stwcxRR0R.", [214] = "stdcxRR0R.",
+ [246] = "dcbtst-R0R", [278] = "dcbt-R0R",
+ [310] = "eciwxRR0R", [438] = "ecowxRR0R",
+ [470] = "dcbi-RR",
+
+ [598] = {
+ shift = 21, mask = 3,
+ [0] = "sync", "lwsync", "ptesync",
+ },
+ [758] = "dcba-RR",
+ [854] = "eieio", [982] = "icbi-R0R", [1014] = "dcbz-R0R",
+
+ [26] = "cntlzwRR~", [58] = "cntlzdRR~",
+ [122] = "popcntbRR~",
+ [154] = "prtywRR~", [186] = "prtydRR~",
+
+ [28] = "andRR~R.", [60] = "andcRR~R.", [124] = "nor|notRR~R=.",
+ [284] = "eqvRR~R.", [316] = "xorRR~R.",
+ [412] = "orcRR~R.", [444] = "or|mrRR~R=.", [476] = "nandRR~R.",
+ [508] = "cmpbRR~R",
+
+ [512] = "mcrxrX",
+
+ [532] = "ldbrxRR0R", [660] = "stdbrxRR0R",
+
+ [533] = "lswxRR0R", [597] = "lswiRR0A",
+ [661] = "stswxRR0R", [725] = "stswiRR0A",
+
+ [534] = "lwbrxRR0R", [662] = "stwbrxRR0R",
+ [790] = "lhbrxRR0R", [918] = "sthbrxRR0R",
+
+ [535] = "lfsxFR0R", [567] = "lfsuxFRR",
+ [599] = "lfdxFR0R", [631] = "lfduxFRR",
+ [663] = "stfsxFR0R", [695] = "stfsuxFRR",
+ [727] = "stfdxFR0R", [759] = "stfduxFR0R",
+ [855] = "lfiwaxFR0R",
+ [983] = "stfiwxFR0R",
+
+ [24] = "slwRR~R.",
+
+ [27] = "sldRR~R.", [536] = "srwRR~R.",
+ [792] = "srawRR~R.", [824] = "srawiRR~A.",
+
+ [794] = "sradRR~R.", [826] = "sradiRR~H.", [827] = "sradiRR~H.",
+ [922] = "extshRR~.", [954] = "extsbRR~.", [986] = "extswRR~.",
+
+ [539] = "srdRR~R.",
+},
+{ __index = function(t, x)
+ if band(x, 31) == 15 then return "iselRRRC" end
+ end
+})
+
+local map_ld = {
+ shift = 0, mask = 3,
+ [0] = "ldRRE", "lduRRE", "lwaRRE",
+}
+
+local map_std = {
+ shift = 0, mask = 3,
+ [0] = "stdRRE", "stduRRE",
+}
+
+local map_fps = {
+ shift = 5, mask = 1,
+ {
+ shift = 1, mask = 15,
+ [0] = false, false, "fdivsFFF.", false,
+ "fsubsFFF.", "faddsFFF.", "fsqrtsF-F.", false,
+ "fresF-F.", "fmulsFF-F.", "frsqrtesF-F.", false,
+ "fmsubsFFFF~.", "fmaddsFFFF~.", "fnmsubsFFFF~.", "fnmaddsFFFF~.",
+ }
+}
+
+local map_fpd = {
+ shift = 5, mask = 1,
+ [0] = {
+ shift = 1, mask = 1023,
+ [0] = "fcmpuXFF", [32] = "fcmpoXFF", [64] = "mcrfsXX",
+ [38] = "mtfsb1A.", [70] = "mtfsb0A.", [134] = "mtfsfiA>>-A>",
+ [8] = "fcpsgnFFF.", [40] = "fnegF-F.", [72] = "fmrF-F.",
+ [136] = "fnabsF-F.", [264] = "fabsF-F.",
+ [12] = "frspF-F.",
+ [14] = "fctiwF-F.", [15] = "fctiwzF-F.",
+ [583] = "mffsF.", [711] = "mtfsfZF.",
+ [392] = "frinF-F.", [424] = "frizF-F.",
+ [456] = "fripF-F.", [488] = "frimF-F.",
+ [814] = "fctidF-F.", [815] = "fctidzF-F.", [846] = "fcfidF-F.",
+ },
+ {
+ shift = 1, mask = 15,
+ [0] = false, false, "fdivFFF.", false,
+ "fsubFFF.", "faddFFF.", "fsqrtF-F.", "fselFFFF~.",
+ "freF-F.", "fmulFF-F.", "frsqrteF-F.", false,
+ "fmsubFFFF~.", "fmaddFFFF~.", "fnmsubFFFF~.", "fnmaddFFFF~.",
+ }
+}
+
+local map_spe = {
+ shift = 0, mask = 2047,
+
+ [512] = "evaddwRRR", [514] = "evaddiwRAR~",
+ [516] = "evsubwRRR~", [518] = "evsubiwRAR~",
+ [520] = "evabsRR", [521] = "evnegRR",
+ [522] = "evextsbRR", [523] = "evextshRR", [524] = "evrndwRR",
+ [525] = "evcntlzwRR", [526] = "evcntlswRR",
+
+ [527] = "brincRRR",
+
+ [529] = "evandRRR", [530] = "evandcRRR", [534] = "evxorRRR",
+ [535] = "evor|evmrRRR=", [536] = "evnor|evnotRRR=",
+ [537] = "eveqvRRR", [539] = "evorcRRR", [542] = "evnandRRR",
+
+ [544] = "evsrwuRRR", [545] = "evsrwsRRR",
+ [546] = "evsrwiuRRA", [547] = "evsrwisRRA",
+ [548] = "evslwRRR", [550] = "evslwiRRA",
+ [552] = "evrlwRRR", [553] = "evsplatiRS",
+ [554] = "evrlwiRRA", [555] = "evsplatfiRS",
+ [556] = "evmergehiRRR", [557] = "evmergeloRRR",
+ [558] = "evmergehiloRRR", [559] = "evmergelohiRRR",
+
+ [560] = "evcmpgtuYRR", [561] = "evcmpgtsYRR",
+ [562] = "evcmpltuYRR", [563] = "evcmpltsYRR",
+ [564] = "evcmpeqYRR",
+
+ [632] = "evselRRR", [633] = "evselRRRW",
+ [634] = "evselRRRW", [635] = "evselRRRW",
+ [636] = "evselRRRW", [637] = "evselRRRW",
+ [638] = "evselRRRW", [639] = "evselRRRW",
+
+ [640] = "evfsaddRRR", [641] = "evfssubRRR",
+ [644] = "evfsabsRR", [645] = "evfsnabsRR", [646] = "evfsnegRR",
+ [648] = "evfsmulRRR", [649] = "evfsdivRRR",
+ [652] = "evfscmpgtYRR", [653] = "evfscmpltYRR", [654] = "evfscmpeqYRR",
+ [656] = "evfscfuiR-R", [657] = "evfscfsiR-R",
+ [658] = "evfscfufR-R", [659] = "evfscfsfR-R",
+ [660] = "evfsctuiR-R", [661] = "evfsctsiR-R",
+ [662] = "evfsctufR-R", [663] = "evfsctsfR-R",
+ [664] = "evfsctuizR-R", [666] = "evfsctsizR-R",
+ [668] = "evfststgtYRR", [669] = "evfststltYRR", [670] = "evfststeqYRR",
+
+ [704] = "efsaddRRR", [705] = "efssubRRR",
+ [708] = "efsabsRR", [709] = "efsnabsRR", [710] = "efsnegRR",
+ [712] = "efsmulRRR", [713] = "efsdivRRR",
+ [716] = "efscmpgtYRR", [717] = "efscmpltYRR", [718] = "efscmpeqYRR",
+ [719] = "efscfdR-R",
+ [720] = "efscfuiR-R", [721] = "efscfsiR-R",
+ [722] = "efscfufR-R", [723] = "efscfsfR-R",
+ [724] = "efsctuiR-R", [725] = "efsctsiR-R",
+ [726] = "efsctufR-R", [727] = "efsctsfR-R",
+ [728] = "efsctuizR-R", [730] = "efsctsizR-R",
+ [732] = "efststgtYRR", [733] = "efststltYRR", [734] = "efststeqYRR",
+
+ [736] = "efdaddRRR", [737] = "efdsubRRR",
+ [738] = "efdcfuidR-R", [739] = "efdcfsidR-R",
+ [740] = "efdabsRR", [741] = "efdnabsRR", [742] = "efdnegRR",
+ [744] = "efdmulRRR", [745] = "efddivRRR",
+ [746] = "efdctuidzR-R", [747] = "efdctsidzR-R",
+ [748] = "efdcmpgtYRR", [749] = "efdcmpltYRR", [750] = "efdcmpeqYRR",
+ [751] = "efdcfsR-R",
+ [752] = "efdcfuiR-R", [753] = "efdcfsiR-R",
+ [754] = "efdcfufR-R", [755] = "efdcfsfR-R",
+ [756] = "efdctuiR-R", [757] = "efdctsiR-R",
+ [758] = "efdctufR-R", [759] = "efdctsfR-R",
+ [760] = "efdctuizR-R", [762] = "efdctsizR-R",
+ [764] = "efdtstgtYRR", [765] = "efdtstltYRR", [766] = "efdtsteqYRR",
+
+ [768] = "evlddxRR0R", [769] = "evlddRR8",
+ [770] = "evldwxRR0R", [771] = "evldwRR8",
+ [772] = "evldhxRR0R", [773] = "evldhRR8",
+ [776] = "evlhhesplatxRR0R", [777] = "evlhhesplatRR2",
+ [780] = "evlhhousplatxRR0R", [781] = "evlhhousplatRR2",
+ [782] = "evlhhossplatxRR0R", [783] = "evlhhossplatRR2",
+ [784] = "evlwhexRR0R", [785] = "evlwheRR4",
+ [788] = "evlwhouxRR0R", [789] = "evlwhouRR4",
+ [790] = "evlwhosxRR0R", [791] = "evlwhosRR4",
+ [792] = "evlwwsplatxRR0R", [793] = "evlwwsplatRR4",
+ [796] = "evlwhsplatxRR0R", [797] = "evlwhsplatRR4",
+
+ [800] = "evstddxRR0R", [801] = "evstddRR8",
+ [802] = "evstdwxRR0R", [803] = "evstdwRR8",
+ [804] = "evstdhxRR0R", [805] = "evstdhRR8",
+ [816] = "evstwhexRR0R", [817] = "evstwheRR4",
+ [820] = "evstwhoxRR0R", [821] = "evstwhoRR4",
+ [824] = "evstwwexRR0R", [825] = "evstwweRR4",
+ [828] = "evstwwoxRR0R", [829] = "evstwwoRR4",
+
+ [1027] = "evmhessfRRR", [1031] = "evmhossfRRR", [1032] = "evmheumiRRR",
+ [1033] = "evmhesmiRRR", [1035] = "evmhesmfRRR", [1036] = "evmhoumiRRR",
+ [1037] = "evmhosmiRRR", [1039] = "evmhosmfRRR", [1059] = "evmhessfaRRR",
+ [1063] = "evmhossfaRRR", [1064] = "evmheumiaRRR", [1065] = "evmhesmiaRRR",
+ [1067] = "evmhesmfaRRR", [1068] = "evmhoumiaRRR", [1069] = "evmhosmiaRRR",
+ [1071] = "evmhosmfaRRR", [1095] = "evmwhssfRRR", [1096] = "evmwlumiRRR",
+ [1100] = "evmwhumiRRR", [1101] = "evmwhsmiRRR", [1103] = "evmwhsmfRRR",
+ [1107] = "evmwssfRRR", [1112] = "evmwumiRRR", [1113] = "evmwsmiRRR",
+ [1115] = "evmwsmfRRR", [1127] = "evmwhssfaRRR", [1128] = "evmwlumiaRRR",
+ [1132] = "evmwhumiaRRR", [1133] = "evmwhsmiaRRR", [1135] = "evmwhsmfaRRR",
+ [1139] = "evmwssfaRRR", [1144] = "evmwumiaRRR", [1145] = "evmwsmiaRRR",
+ [1147] = "evmwsmfaRRR",
+
+ [1216] = "evaddusiaawRR", [1217] = "evaddssiaawRR",
+ [1218] = "evsubfusiaawRR", [1219] = "evsubfssiaawRR",
+ [1220] = "evmraRR",
+ [1222] = "evdivwsRRR", [1223] = "evdivwuRRR",
+ [1224] = "evaddumiaawRR", [1225] = "evaddsmiaawRR",
+ [1226] = "evsubfumiaawRR", [1227] = "evsubfsmiaawRR",
+
+ [1280] = "evmheusiaawRRR", [1281] = "evmhessiaawRRR",
+ [1283] = "evmhessfaawRRR", [1284] = "evmhousiaawRRR",
+ [1285] = "evmhossiaawRRR", [1287] = "evmhossfaawRRR",
+ [1288] = "evmheumiaawRRR", [1289] = "evmhesmiaawRRR",
+ [1291] = "evmhesmfaawRRR", [1292] = "evmhoumiaawRRR",
+ [1293] = "evmhosmiaawRRR", [1295] = "evmhosmfaawRRR",
+ [1320] = "evmhegumiaaRRR", [1321] = "evmhegsmiaaRRR",
+ [1323] = "evmhegsmfaaRRR", [1324] = "evmhogumiaaRRR",
+ [1325] = "evmhogsmiaaRRR", [1327] = "evmhogsmfaaRRR",
+ [1344] = "evmwlusiaawRRR", [1345] = "evmwlssiaawRRR",
+ [1352] = "evmwlumiaawRRR", [1353] = "evmwlsmiaawRRR",
+ [1363] = "evmwssfaaRRR", [1368] = "evmwumiaaRRR",
+ [1369] = "evmwsmiaaRRR", [1371] = "evmwsmfaaRRR",
+ [1408] = "evmheusianwRRR", [1409] = "evmhessianwRRR",
+ [1411] = "evmhessfanwRRR", [1412] = "evmhousianwRRR",
+ [1413] = "evmhossianwRRR", [1415] = "evmhossfanwRRR",
+ [1416] = "evmheumianwRRR", [1417] = "evmhesmianwRRR",
+ [1419] = "evmhesmfanwRRR", [1420] = "evmhoumianwRRR",
+ [1421] = "evmhosmianwRRR", [1423] = "evmhosmfanwRRR",
+ [1448] = "evmhegumianRRR", [1449] = "evmhegsmianRRR",
+ [1451] = "evmhegsmfanRRR", [1452] = "evmhogumianRRR",
+ [1453] = "evmhogsmianRRR", [1455] = "evmhogsmfanRRR",
+ [1472] = "evmwlusianwRRR", [1473] = "evmwlssianwRRR",
+ [1480] = "evmwlumianwRRR", [1481] = "evmwlsmianwRRR",
+ [1491] = "evmwssfanRRR", [1496] = "evmwumianRRR",
+ [1497] = "evmwsmianRRR", [1499] = "evmwsmfanRRR",
+}
+
+local map_pri = {
+ [0] = false, false, "tdiARI", "twiARI",
+ map_spe, false, false, "mulliRRI",
+ "subficRRI", false, "cmpl_iYLRU", "cmp_iYLRI",
+ "addicRRI", "addic.RRI", "addi|liRR0I", "addis|lisRR0I",
+ "b_KBJ", "sc", "bKJ", map_crops,
+ "rlwimiRR~AAA.", map_rlwinm, false, "rlwnmRR~RAA.",
+ "oriNRR~U", "orisRR~U", "xoriRR~U", "xorisRR~U",
+ "andi.RR~U", "andis.RR~U", map_rld, map_ext,
+ "lwzRRD", "lwzuRRD", "lbzRRD", "lbzuRRD",
+ "stwRRD", "stwuRRD", "stbRRD", "stbuRRD",
+ "lhzRRD", "lhzuRRD", "lhaRRD", "lhauRRD",
+ "sthRRD", "sthuRRD", "lmwRRD", "stmwRRD",
+ "lfsFRD", "lfsuFRD", "lfdFRD", "lfduFRD",
+ "stfsFRD", "stfsuFRD", "stfdFRD", "stfduFRD",
+ false, false, map_ld, map_fps,
+ false, false, map_std, map_fpd,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+}
+
+local map_cond = { [0] = "lt", "gt", "eq", "so", "ge", "le", "ne", "ns", }
+
+-- Format a condition bit.
+local function condfmt(cond)
+ if cond <= 3 then
+ return map_cond[band(cond, 3)]
+ else
+ return format("4*cr%d+%s", rshift(cond, 2), map_cond[band(cond, 3)])
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then extra = "\t->"..sym end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-7s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-7s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
+ local operands = {}
+ local last = nil
+ local rs = 21
+ ctx.op = op
+ ctx.rel = nil
+
+ local opat = map_pri[rshift(b0, 2)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)]
+ end
+ local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.]*)(.*)")
+ if altname then pat = pat2 end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "R" then
+ x = map_gpr[band(rshift(op, rs), 31)]
+ rs = rs - 5
+ elseif p == "F" then
+ x = "f"..band(rshift(op, rs), 31)
+ rs = rs - 5
+ elseif p == "A" then
+ x = band(rshift(op, rs), 31)
+ rs = rs - 5
+ elseif p == "S" then
+ x = arshift(lshift(op, 27-rs), 27)
+ rs = rs - 5
+ elseif p == "I" then
+ x = arshift(lshift(op, 16), 16)
+ elseif p == "U" then
+ x = band(op, 0xffff)
+ elseif p == "D" or p == "E" then
+ local disp = arshift(lshift(op, 16), 16)
+ if p == "E" then disp = band(disp, -4) end
+ if last == "r0" then last = "0" end
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p >= "2" and p <= "8" then
+ local disp = band(rshift(op, rs), 31) * p
+ if last == "r0" then last = "0" end
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p == "H" then
+ x = band(rshift(op, rs), 31) + lshift(band(op, 2), 4)
+ rs = rs - 5
+ elseif p == "M" then
+ x = band(rshift(op, rs), 31) + band(op, 0x20)
+ elseif p == "C" then
+ x = condfmt(band(rshift(op, rs), 31))
+ rs = rs - 5
+ elseif p == "B" then
+ local bo = rshift(op, 21)
+ local cond = band(rshift(op, 16), 31)
+ local cn = ""
+ rs = rs - 10
+ if band(bo, 4) == 0 then
+ cn = band(bo, 2) == 0 and "dnz" or "dz"
+ if band(bo, 0x10) == 0 then
+ cn = cn..(band(bo, 8) == 0 and "f" or "t")
+ end
+ if band(bo, 0x10) == 0 then x = condfmt(cond) end
+ name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
+ elseif band(bo, 0x10) == 0 then
+ cn = map_cond[band(cond, 3) + (band(bo, 8) == 0 and 4 or 0)]
+ if cond > 3 then x = "cr"..rshift(cond, 2) end
+ name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
+ end
+ name = gsub(name, "_", cn)
+ elseif p == "J" then
+ x = arshift(lshift(op, 27-rs), 29-rs)*4
+ if band(op, 2) == 0 then x = ctx.addr + pos + x end
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "K" then
+ if band(op, 1) ~= 0 then name = name.."l" end
+ if band(op, 2) ~= 0 then name = name.."a" end
+ elseif p == "X" or p == "Y" then
+ x = band(rshift(op, rs+2), 7)
+ if x == 0 and p == "Y" then x = nil else x = "cr"..x end
+ rs = rs - 5
+ elseif p == "W" then
+ x = "cr"..band(op, 7)
+ elseif p == "Z" then
+ x = band(rshift(op, rs-4), 255)
+ rs = rs - 10
+ elseif p == ">" then
+ operands[#operands] = rshift(operands[#operands], 1)
+ elseif p == "0" then
+ if last == "r0" then
+ operands[#operands] = nil
+ if altname then name = altname end
+ end
+ elseif p == "L" then
+ name = gsub(name, "_", band(op, 0x00200000) ~= 0 and "d" or "w")
+ elseif p == "." then
+ if band(op, 1) == 1 then name = name.."." end
+ elseif p == "N" then
+ if op == 0x60000000 then name = "nop"; break end
+ elseif p == "~" then
+ local n = #operands
+ operands[n-1], operands[n] = operands[n], operands[n-1]
+ elseif p == "=" then
+ local n = #operands
+ if last == operands[n-1] then
+ operands[n] = nil
+ name = altname
+ end
+ elseif p == "%" then
+ local n = #operands
+ if last == operands[n-1] and last == operands[n-2] then
+ operands[n] = nil
+ operands[n-1] = nil
+ name = altname
+ end
+ elseif p == "-" then
+ rs = rs - 5
+ else
+ assert(false)
+ end
+ if x then operands[#operands+1] = x; last = x end
+ end
+
+ return putop(ctx, name, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ stop = stop - stop % 4
+ ctx.pos = ofs - ofs % 4
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 32 then return map_gpr[r] end
+ return "f"..(r-32)
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+disass = disass_
+regname = regname_
+
diff --git a/3rdparty/lua/src/jit/dis_x64.lua b/3rdparty/lua/src/jit/dis_x64.lua
index fad87a3..4a1894a 100644
--- a/3rdparty/lua/src/jit/dis_x64.lua
+++ b/3rdparty/lua/src/jit/dis_x64.lua
@@ -1,20 +1,20 @@
-----------------------------------------------------------------------------
--- LuaJIT x64 disassembler wrapper module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
--- This module just exports the 64 bit functions from the combined
--- x86/x64 disassembler module. All the interesting stuff is there.
-------------------------------------------------------------------------------
-
-local require = require
-
-module(...)
-
-local dis_x86 = require(_PACKAGE.."dis_x86")
-
-create = dis_x86.create64
-disass = dis_x86.disass64
-regname = dis_x86.regname64
-
+----------------------------------------------------------------------------
+-- LuaJIT x64 disassembler wrapper module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the 64 bit functions from the combined
+-- x86/x64 disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local require = require
+
+module(...)
+
+local dis_x86 = require(_PACKAGE.."dis_x86")
+
+create = dis_x86.create64
+disass = dis_x86.disass64
+regname = dis_x86.regname64
+
diff --git a/3rdparty/lua/src/jit/dis_x86.lua b/3rdparty/lua/src/jit/dis_x86.lua
index 12ed345..c442a17 100644
--- a/3rdparty/lua/src/jit/dis_x86.lua
+++ b/3rdparty/lua/src/jit/dis_x86.lua
@@ -1,836 +1,836 @@
-----------------------------------------------------------------------------
--- LuaJIT x86/x64 disassembler module.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
--- This is a helper module used by the LuaJIT machine code dumper module.
---
--- Sending small code snippets to an external disassembler and mixing the
--- output with our own stuff was too fragile. So I had to bite the bullet
--- and write yet another x86 disassembler. Oh well ...
---
--- The output format is very similar to what ndisasm generates. But it has
--- been developed independently by looking at the opcode tables from the
--- Intel and AMD manuals. The supported instruction set is quite extensive
--- and reflects what a current generation Intel or AMD CPU implements in
--- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3,
--- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM)
--- instructions.
---
--- Notes:
--- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported.
--- * No attempt at optimization has been made -- it's fast enough for my needs.
--- * The public API may change when more architectures are added.
-------------------------------------------------------------------------------
-
-local type = type
-local sub, byte, format = string.sub, string.byte, string.format
-local match, gmatch, gsub = string.match, string.gmatch, string.gsub
-local lower, rep = string.lower, string.rep
-
--- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on.
-local map_opc1_32 = {
---0x
-[0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es",
-"orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*",
---1x
-"adcBmr","adcVmr","adcBrm","adcVrm","adcBai","adcVai","push ss","pop ss",
-"sbbBmr","sbbVmr","sbbBrm","sbbVrm","sbbBai","sbbVai","push ds","pop ds",
---2x
-"andBmr","andVmr","andBrm","andVrm","andBai","andVai","es:seg","daa",
-"subBmr","subVmr","subBrm","subVrm","subBai","subVai","cs:seg","das",
---3x
-"xorBmr","xorVmr","xorBrm","xorVrm","xorBai","xorVai","ss:seg","aaa",
-"cmpBmr","cmpVmr","cmpBrm","cmpVrm","cmpBai","cmpVai","ds:seg","aas",
---4x
-"incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR",
-"decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR",
---5x
-"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR",
-"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR",
---6x
-"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr",
-"fs:seg","gs:seg","o16:","a16",
-"pushUi","imulVrmi","pushBs","imulVrms",
-"insb","insVS","outsb","outsVS",
---7x
-"joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj",
-"jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj",
---8x
-"arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms",
-"testBmr","testVmr","xchgBrm","xchgVrm",
-"movBmr","movVmr","movBrm","movVrm",
-"movVmg","leaVrm","movWgm","popUm",
---9x
-"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR",
-"xchgVaR","xchgVaR","xchgVaR","xchgVaR",
-"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait",
-"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf",
---Ax
-"movBao","movVao","movBoa","movVoa",
-"movsb","movsVS","cmpsb","cmpsVS",
-"testBai","testVai","stosb","stosVS",
-"lodsb","lodsVS","scasb","scasVS",
---Bx
-"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi",
-"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI",
---Cx
-"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi",
-"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS",
---Dx
-"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb",
-"fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7",
---Ex
-"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj",
-"inBau","inVau","outBua","outVua",
-"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda",
---Fx
-"lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm",
-"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm",
-}
-assert(#map_opc1_32 == 255)
-
--- Map for 1st opcode byte in 64 bit mode (overrides only).
-local map_opc1_64 = setmetatable({
- [0x06]=false, [0x07]=false, [0x0e]=false,
- [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false,
- [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false,
- [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:",
- [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb",
- [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb",
- [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb",
- [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb",
- [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false,
- [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false,
-}, { __index = map_opc1_32 })
-
--- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you.
--- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2
-local map_opc2 = {
---0x
-[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret",
-"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu",
---1x
-"movupsXrm|movssXrm|movupdXrm|movsdXrm",
-"movupsXmr|movssXmr|movupdXmr|movsdXmr",
-"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm",
-"movlpsXmr||movlpdXmr",
-"unpcklpsXrm||unpcklpdXrm",
-"unpckhpsXrm||unpckhpdXrm",
-"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm",
-"movhpsXmr||movhpdXmr",
-"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm",
-"hintnopVm","hintnopVm","hintnopVm","hintnopVm",
---2x
-"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil,
-"movapsXrm||movapdXrm",
-"movapsXmr||movapdXmr",
-"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt",
-"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr",
-"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm",
-"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm",
-"ucomissXrm||ucomisdXrm",
-"comissXrm||comisdXrm",
---3x
-"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec",
-"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil,
---4x
-"cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm",
-"cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm",
-"cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm",
-"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm",
---5x
-"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm",
-"rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm",
-"andpsXrm||andpdXrm","andnpsXrm||andnpdXrm",
-"orpsXrm||orpdXrm","xorpsXrm||xorpdXrm",
-"addpsXrm|addssXrm|addpdXrm|addsdXrm","mulpsXrm|mulssXrm|mulpdXrm|mulsdXrm",
-"cvtps2pdXrm|cvtss2sdXrm|cvtpd2psXrm|cvtsd2ssXrm",
-"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm",
-"subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm",
-"divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm",
---6x
-"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm",
-"pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm",
-"punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm",
-"||punpcklqdqXrm","||punpckhqdqXrm",
-"movPrVSm","movqMrm|movdquXrm|movdqaXrm",
---7x
-"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu",
-"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu",
-"pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|",
-"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$",
-nil,nil,
-"||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm",
-"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr",
---8x
-"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj",
-"jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj",
---9x
-"setoBm","setnoBm","setbBm","setnbBm","setzBm","setnzBm","setbeBm","setaBm",
-"setsBm","setnsBm","setpeBm","setpoBm","setlBm","setgeBm","setleBm","setgBm",
---Ax
-"push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil,
-"push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm",
---Bx
-"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr",
-"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt",
-"|popcntVrm","ud2Dp","bt!Vmu","btcVmr",
-"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt",
---Cx
-"xaddBmr","xaddVmr",
-"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|",
-"pinsrwPrWmu","pextrwDrPmu",
-"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp",
-"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR",
---Dx
-"||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm",
-"paddqPrm","pmullwPrm",
-"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm",
-"psubusbPrm","psubuswPrm","pminubPrm","pandPrm",
-"paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm",
---Ex
-"pavgbPrm","psrawPrm","psradPrm","pavgwPrm",
-"pmulhuwPrm","pmulhwPrm",
-"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr",
-"psubsbPrm","psubswPrm","pminswPrm","porPrm",
-"paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm",
---Fx
-"|||lddquXrm","psllwPrm","pslldPrm","psllqPrm",
-"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$",
-"psubbPrm","psubwPrm","psubdPrm","psubqPrm",
-"paddbPrm","paddwPrm","padddPrm","ud",
-}
-assert(map_opc2[255] == "ud")
-
--- Map for three-byte opcodes. Can't wait for their next invention.
-local map_opc3 = {
-["38"] = { -- [66] 0f 38 xx
---0x
-[0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm",
-"pmaddubswPrm","phsubwPrm","phsubdPrm","phsubswPrm",
-"psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm",
-nil,nil,nil,nil,
---1x
-"||pblendvbXrma",nil,nil,nil,
-"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm",
-nil,nil,nil,nil,
-"pabsbPrm","pabswPrm","pabsdPrm",nil,
---2x
-"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm",
-"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil,
-"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm",
-nil,nil,nil,nil,
---3x
-"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm",
-"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm",
-"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm",
-"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm",
---4x
-"||pmulddXrm","||phminposuwXrm",
---Fx
-[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt",
-},
-
-["3a"] = { -- [66] 0f 3a xx
---0x
-[0x00]=nil,nil,nil,nil,nil,nil,nil,nil,
-"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu",
-"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu",
---1x
-nil,nil,nil,nil,
-"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru",
-nil,nil,nil,nil,nil,nil,nil,nil,
---2x
-"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil,
---4x
-[0x40] = "||dppsXrmu",
-[0x41] = "||dppdXrmu",
-[0x42] = "||mpsadbwXrmu",
---6x
-[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu",
-[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu",
-},
-}
-
--- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands).
-local map_opcvm = {
-[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff",
-[0xc8]="monitor",[0xc9]="mwait",
-[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave",
-[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga",
-[0xf8]="swapgs",[0xf9]="rdtscp",
-}
-
--- Map for FP opcodes. And you thought stack machines are simple?
-local map_opcfp = {
--- D8-DF 00-BF: opcodes with a memory operand.
--- D8
-[0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm",
-"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm",
--- DA
-"fiaddDm","fimulDm","ficomDm","ficompDm",
-"fisubDm","fisubrDm","fidivDm","fidivrDm",
--- DB
-"fildDm","fisttpDm","fistDm","fistpDm",nil,"fld twordFmp",nil,"fstp twordFmp",
--- DC
-"faddGm","fmulGm","fcomGm","fcompGm","fsubGm","fsubrGm","fdivGm","fdivrGm",
--- DD
-"fldGm","fisttpQm","fstGm","fstpGm","frstorDmp",nil,"fnsaveDmp","fnstswWm",
--- DE
-"fiaddWm","fimulWm","ficomWm","ficompWm",
-"fisubWm","fisubrWm","fidivWm","fidivrWm",
--- DF
-"fildWm","fisttpWm","fistWm","fistpWm",
-"fbld twordFmp","fildQm","fbstp twordFmp","fistpQm",
--- xx C0-FF: opcodes with a pseudo-register operand.
--- D8
-"faddFf","fmulFf","fcomFf","fcompFf","fsubFf","fsubrFf","fdivFf","fdivrFf",
--- D9
-"fldFf","fxchFf",{"fnop"},nil,
-{"fchs","fabs",nil,nil,"ftst","fxam"},
-{"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz"},
-{"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp"},
-{"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"},
--- DA
-"fcmovbFf","fcmoveFf","fcmovbeFf","fcmovuFf",nil,{nil,"fucompp"},nil,nil,
--- DB
-"fcmovnbFf","fcmovneFf","fcmovnbeFf","fcmovnuFf",
-{nil,nil,"fnclex","fninit"},"fucomiFf","fcomiFf",nil,
--- DC
-"fadd toFf","fmul toFf",nil,nil,
-"fsub toFf","fsubr toFf","fdivr toFf","fdiv toFf",
--- DD
-"ffreeFf",nil,"fstFf","fstpFf","fucomFf","fucompFf",nil,nil,
--- DE
-"faddpFf","fmulpFf",nil,{nil,"fcompp"},
-"fsubrpFf","fsubpFf","fdivrpFf","fdivpFf",
--- DF
-nil,nil,nil,nil,{"fnstsw ax"},"fucomipFf","fcomipFf",nil,
-}
-assert(map_opcfp[126] == "fcomipFf")
-
--- Map for opcode groups. The subkey is sp from the ModRM byte.
-local map_opcgroup = {
- arith = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" },
- shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" },
- testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" },
- testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" },
- incb = { "inc", "dec" },
- incd = { "inc", "dec", "callUmp", "$call farDmp",
- "jmpUmp", "$jmp farDmp", "pushUm" },
- sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" },
- sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt",
- "smsw", nil, "lmsw", "vm*$invlpg" },
- bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" },
- cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil,
- nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" },
- pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" },
- pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" },
- pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" },
- pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" },
- fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr",
- nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" },
- prefetch = { "prefetch", "prefetchw" },
- prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" },
-}
-
-------------------------------------------------------------------------------
-
--- Maps for register names.
-local map_regs = {
- B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
- "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
- B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
- "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
- W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
- "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" },
- D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
- "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" },
- Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" },
- M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
- "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext!
- X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
- "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" },
-}
-local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" }
-
--- Maps for size names.
-local map_sz2n = {
- B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16,
-}
-local map_sz2prefix = {
- B = "byte", W = "word", D = "dword",
- Q = "qword",
- M = "qword", X = "xword",
- F = "dword", G = "qword", -- No need for sizes/register names for these two.
-}
-
-------------------------------------------------------------------------------
-
--- Output a nicely formatted line with an opcode and operands.
-local function putop(ctx, text, operands)
- local code, pos, hex = ctx.code, ctx.pos, ""
- local hmax = ctx.hexdump
- if hmax > 0 then
- for i=ctx.start,pos-1 do
- hex = hex..format("%02X", byte(code, i, i))
- end
- if #hex > hmax then hex = sub(hex, 1, hmax)..". "
- else hex = hex..rep(" ", hmax-#hex+2) end
- end
- if operands then text = text.." "..operands end
- if ctx.o16 then text = "o16 "..text; ctx.o16 = false end
- if ctx.a32 then text = "a32 "..text; ctx.a32 = false end
- if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end
- if ctx.rex then
- local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "")..
- (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "")
- if t ~= "" then text = "rex."..t.." "..text end
- ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
- ctx.rex = false
- end
- if ctx.seg then
- local text2, n = gsub(text, "%[", "["..ctx.seg..":")
- if n == 0 then text = ctx.seg.." "..text else text = text2 end
- ctx.seg = false
- end
- if ctx.lock then text = "lock "..text; ctx.lock = false end
- local imm = ctx.imm
- if imm then
- local sym = ctx.symtab[imm]
- if sym then text = text.."\t->"..sym end
- end
- ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text))
- ctx.mrm = false
- ctx.start = pos
- ctx.imm = nil
-end
-
--- Clear all prefix flags.
-local function clearprefixes(ctx)
- ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false
- ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
- ctx.rex = false; ctx.a32 = false
-end
-
--- Fallback for incomplete opcodes at the end.
-local function incomplete(ctx)
- ctx.pos = ctx.stop+1
- clearprefixes(ctx)
- return putop(ctx, "(incomplete)")
-end
-
--- Fallback for unknown opcodes.
-local function unknown(ctx)
- clearprefixes(ctx)
- return putop(ctx, "(unknown)")
-end
-
--- Return an immediate of the specified size.
-local function getimm(ctx, pos, n)
- if pos+n-1 > ctx.stop then return incomplete(ctx) end
- local code = ctx.code
- if n == 1 then
- local b1 = byte(code, pos, pos)
- return b1
- elseif n == 2 then
- local b1, b2 = byte(code, pos, pos+1)
- return b1+b2*256
- else
- local b1, b2, b3, b4 = byte(code, pos, pos+3)
- local imm = b1+b2*256+b3*65536+b4*16777216
- ctx.imm = imm
- return imm
- end
-end
-
--- Process pattern string and generate the operands.
-local function putpat(ctx, name, pat)
- local operands, regs, sz, mode, sp, rm, sc, rx, sdisp
- local code, pos, stop = ctx.code, ctx.pos, ctx.stop
-
- -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz
- for p in gmatch(pat, ".") do
- local x = nil
- if p == "V" or p == "U" then
- if ctx.rexw then sz = "Q"; ctx.rexw = false
- elseif ctx.o16 then sz = "W"; ctx.o16 = false
- elseif p == "U" and ctx.x64 then sz = "Q"
- else sz = "D" end
- regs = map_regs[sz]
- elseif p == "T" then
- if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end
- regs = map_regs[sz]
- elseif p == "B" then
- sz = "B"
- regs = ctx.rex and map_regs.B64 or map_regs.B
- elseif match(p, "[WDQMXFG]") then
- sz = p
- regs = map_regs[sz]
- elseif p == "P" then
- sz = ctx.o16 and "X" or "M"; ctx.o16 = false
- regs = map_regs[sz]
- elseif p == "S" then
- name = name..lower(sz)
- elseif p == "s" then
- local imm = getimm(ctx, pos, 1); if not imm then return end
- x = imm <= 127 and format("+0x%02x", imm)
- or format("-0x%02x", 256-imm)
- pos = pos+1
- elseif p == "u" then
- local imm = getimm(ctx, pos, 1); if not imm then return end
- x = format("0x%02x", imm)
- pos = pos+1
- elseif p == "w" then
- local imm = getimm(ctx, pos, 2); if not imm then return end
- x = format("0x%x", imm)
- pos = pos+2
- elseif p == "o" then -- [offset]
- if ctx.x64 then
- local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
- local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
- x = format("[0x%08x%08x]", imm2, imm1)
- pos = pos+8
- else
- local imm = getimm(ctx, pos, 4); if not imm then return end
- x = format("[0x%08x]", imm)
- pos = pos+4
- end
- elseif p == "i" or p == "I" then
- local n = map_sz2n[sz]
- if n == 8 and ctx.x64 and p == "I" then
- local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
- local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
- x = format("0x%08x%08x", imm2, imm1)
- else
- if n == 8 then n = 4 end
- local imm = getimm(ctx, pos, n); if not imm then return end
- if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then
- imm = (0xffffffff+1)-imm
- x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm)
- else
- x = format(imm > 65535 and "0x%08x" or "0x%x", imm)
- end
- end
- pos = pos+n
- elseif p == "j" then
- local n = map_sz2n[sz]
- if n == 8 then n = 4 end
- local imm = getimm(ctx, pos, n); if not imm then return end
- if sz == "B" and imm > 127 then imm = imm-256
- elseif imm > 2147483647 then imm = imm-4294967296 end
- pos = pos+n
- imm = imm + pos + ctx.addr
- if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end
- ctx.imm = imm
- if sz == "W" then
- x = format("word 0x%04x", imm%65536)
- elseif ctx.x64 then
- local lo = imm % 0x1000000
- x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo)
- else
- x = format("0x%08x", imm)
- end
- elseif p == "R" then
- local r = byte(code, pos-1, pos-1)%8
- if ctx.rexb then r = r + 8; ctx.rexb = false end
- x = regs[r+1]
- elseif p == "a" then x = regs[1]
- elseif p == "c" then x = "cl"
- elseif p == "d" then x = "dx"
- elseif p == "1" then x = "1"
- else
- if not mode then
- mode = ctx.mrm
- if not mode then
- if pos > stop then return incomplete(ctx) end
- mode = byte(code, pos, pos)
- pos = pos+1
- end
- rm = mode%8; mode = (mode-rm)/8
- sp = mode%8; mode = (mode-sp)/8
- sdisp = ""
- if mode < 3 then
- if rm == 4 then
- if pos > stop then return incomplete(ctx) end
- sc = byte(code, pos, pos)
- pos = pos+1
- rm = sc%8; sc = (sc-rm)/8
- rx = sc%8; sc = (sc-rx)/8
- if ctx.rexx then rx = rx + 8; ctx.rexx = false end
- if rx == 4 then rx = nil end
- end
- if mode > 0 or rm == 5 then
- local dsz = mode
- if dsz ~= 1 then dsz = 4 end
- local disp = getimm(ctx, pos, dsz); if not disp then return end
- if mode == 0 then rm = nil end
- if rm or rx or (not sc and ctx.x64 and not ctx.a32) then
- if dsz == 1 and disp > 127 then
- sdisp = format("-0x%x", 256-disp)
- elseif disp >= 0 and disp <= 0x7fffffff then
- sdisp = format("+0x%x", disp)
- else
- sdisp = format("-0x%x", (0xffffffff+1)-disp)
- end
- else
- sdisp = format(ctx.x64 and not ctx.a32 and
- not (disp >= 0 and disp <= 0x7fffffff)
- and "0xffffffff%08x" or "0x%08x", disp)
- end
- pos = pos+dsz
- end
- end
- if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end
- if ctx.rexr then sp = sp + 8; ctx.rexr = false end
- end
- if p == "m" then
- if mode == 3 then x = regs[rm+1]
- else
- local aregs = ctx.a32 and map_regs.D or ctx.aregs
- local srm, srx = "", ""
- if rm then srm = aregs[rm+1]
- elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end
- ctx.a32 = false
- if rx then
- if rm then srm = srm.."+" end
- srx = aregs[rx+1]
- if sc > 0 then srx = srx.."*"..(2^sc) end
- end
- x = format("[%s%s%s]", srm, srx, sdisp)
- end
- if mode < 3 and
- (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck.
- x = map_sz2prefix[sz].." "..x
- end
- elseif p == "r" then x = regs[sp+1]
- elseif p == "g" then x = map_segregs[sp+1]
- elseif p == "p" then -- Suppress prefix.
- elseif p == "f" then x = "st"..rm
- elseif p == "x" then
- if sp == 0 and ctx.lock and not ctx.x64 then
- x = "CR8"; ctx.lock = false
- else
- x = "CR"..sp
- end
- elseif p == "y" then x = "DR"..sp
- elseif p == "z" then x = "TR"..sp
- elseif p == "t" then
- else
- error("bad pattern `"..pat.."'")
- end
- end
- if x then operands = operands and operands..", "..x or x end
- end
- ctx.pos = pos
- return putop(ctx, name, operands)
-end
-
--- Forward declaration.
-local map_act
-
--- Fetch and cache MRM byte.
-local function getmrm(ctx)
- local mrm = ctx.mrm
- if not mrm then
- local pos = ctx.pos
- if pos > ctx.stop then return nil end
- mrm = byte(ctx.code, pos, pos)
- ctx.pos = pos+1
- ctx.mrm = mrm
- end
- return mrm
-end
-
--- Dispatch to handler depending on pattern.
-local function dispatch(ctx, opat, patgrp)
- if not opat then return unknown(ctx) end
- if match(opat, "%|") then -- MMX/SSE variants depending on prefix.
- local p
- if ctx.rep then
- p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)"
- ctx.rep = false
- elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false
- else p = "^[^%|]*" end
- opat = match(opat, p)
- if not opat then return unknown(ctx) end
--- ctx.rep = false; ctx.o16 = false
- --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi]
- --XXX remove in branches?
- end
- if match(opat, "%$") then -- reg$mem variants.
- local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
- opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)")
- if opat == "" then return unknown(ctx) end
- end
- if opat == "" then return unknown(ctx) end
- local name, pat = match(opat, "^([a-z0-9 ]*)(.*)")
- if pat == "" and patgrp then pat = patgrp end
- return map_act[sub(pat, 1, 1)](ctx, name, pat)
-end
-
--- Get a pattern from an opcode map and dispatch to handler.
-local function dispatchmap(ctx, opcmap)
- local pos = ctx.pos
- local opat = opcmap[byte(ctx.code, pos, pos)]
- pos = pos + 1
- ctx.pos = pos
- return dispatch(ctx, opat)
-end
-
--- Map for action codes. The key is the first char after the name.
-map_act = {
- -- Simple opcodes without operands.
- [""] = function(ctx, name, pat)
- return putop(ctx, name)
- end,
-
- -- Operand size chars fall right through.
- B = putpat, W = putpat, D = putpat, Q = putpat,
- V = putpat, U = putpat, T = putpat,
- M = putpat, X = putpat, P = putpat,
- F = putpat, G = putpat,
-
- -- Collect prefixes.
- [":"] = function(ctx, name, pat)
- ctx[pat == ":" and name or sub(pat, 2)] = name
- if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes.
- end,
-
- -- Chain to special handler specified by name.
- ["*"] = function(ctx, name, pat)
- return map_act[name](ctx, name, sub(pat, 2))
- end,
-
- -- Use named subtable for opcode group.
- ["!"] = function(ctx, name, pat)
- local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
- return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2))
- end,
-
- -- o16,o32[,o64] variants.
- sz = function(ctx, name, pat)
- if ctx.o16 then ctx.o16 = false
- else
- pat = match(pat, ",(.*)")
- if ctx.rexw then
- local p = match(pat, ",(.*)")
- if p then pat = p; ctx.rexw = false end
- end
- end
- pat = match(pat, "^[^,]*")
- return dispatch(ctx, pat)
- end,
-
- -- Two-byte opcode dispatch.
- opc2 = function(ctx, name, pat)
- return dispatchmap(ctx, map_opc2)
- end,
-
- -- Three-byte opcode dispatch.
- opc3 = function(ctx, name, pat)
- return dispatchmap(ctx, map_opc3[pat])
- end,
-
- -- VMX/SVM dispatch.
- vm = function(ctx, name, pat)
- return dispatch(ctx, map_opcvm[ctx.mrm])
- end,
-
- -- Floating point opcode dispatch.
- fp = function(ctx, name, pat)
- local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
- local rm = mrm%8
- local idx = pat*8 + ((mrm-rm)/8)%8
- if mrm >= 192 then idx = idx + 64 end
- local opat = map_opcfp[idx]
- if type(opat) == "table" then opat = opat[rm+1] end
- return dispatch(ctx, opat)
- end,
-
- -- REX prefix.
- rex = function(ctx, name, pat)
- if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed.
- for p in gmatch(pat, ".") do ctx["rex"..p] = true end
- ctx.rex = true
- end,
-
- -- Special case for nop with REX prefix.
- nop = function(ctx, name, pat)
- return dispatch(ctx, ctx.rex and pat or "nop")
- end,
-}
-
-------------------------------------------------------------------------------
-
--- Disassemble a block of code.
-local function disass_block(ctx, ofs, len)
- if not ofs then ofs = 0 end
- local stop = len and ofs+len or #ctx.code
- ofs = ofs + 1
- ctx.start = ofs
- ctx.pos = ofs
- ctx.stop = stop
- ctx.imm = nil
- ctx.mrm = false
- clearprefixes(ctx)
- while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end
- if ctx.pos ~= ctx.start then incomplete(ctx) end
-end
-
--- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
-local function create_(code, addr, out)
- local ctx = {}
- ctx.code = code
- ctx.addr = (addr or 0) - 1
- ctx.out = out or io.write
- ctx.symtab = {}
- ctx.disass = disass_block
- ctx.hexdump = 16
- ctx.x64 = false
- ctx.map1 = map_opc1_32
- ctx.aregs = map_regs.D
- return ctx
-end
-
-local function create64_(code, addr, out)
- local ctx = create_(code, addr, out)
- ctx.x64 = true
- ctx.map1 = map_opc1_64
- ctx.aregs = map_regs.Q
- return ctx
-end
-
--- Simple API: disassemble code (a string) at address and output via out.
-local function disass_(code, addr, out)
- create_(code, addr, out):disass()
-end
-
-local function disass64_(code, addr, out)
- create64_(code, addr, out):disass()
-end
-
--- Return register name for RID.
-local function regname_(r)
- if r < 8 then return map_regs.D[r+1] end
- return map_regs.X[r-7]
-end
-
-local function regname64_(r)
- if r < 16 then return map_regs.Q[r+1] end
- return map_regs.X[r-15]
-end
-
--- Public module functions.
-module(...)
-
-create = create_
-create64 = create64_
-disass = disass_
-disass64 = disass64_
-regname = regname_
-regname64 = regname64_
-
+----------------------------------------------------------------------------
+-- LuaJIT x86/x64 disassembler module.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- Sending small code snippets to an external disassembler and mixing the
+-- output with our own stuff was too fragile. So I had to bite the bullet
+-- and write yet another x86 disassembler. Oh well ...
+--
+-- The output format is very similar to what ndisasm generates. But it has
+-- been developed independently by looking at the opcode tables from the
+-- Intel and AMD manuals. The supported instruction set is quite extensive
+-- and reflects what a current generation Intel or AMD CPU implements in
+-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3,
+-- SSE4.1, SSE4.2, SSE4a and even privileged and hypervisor (VMX/SVM)
+-- instructions.
+--
+-- Notes:
+-- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported.
+-- * No attempt at optimization has been made -- it's fast enough for my needs.
+-- * The public API may change when more architectures are added.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local lower, rep = string.lower, string.rep
+
+-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on.
+local map_opc1_32 = {
+--0x
+[0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es",
+"orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*",
+--1x
+"adcBmr","adcVmr","adcBrm","adcVrm","adcBai","adcVai","push ss","pop ss",
+"sbbBmr","sbbVmr","sbbBrm","sbbVrm","sbbBai","sbbVai","push ds","pop ds",
+--2x
+"andBmr","andVmr","andBrm","andVrm","andBai","andVai","es:seg","daa",
+"subBmr","subVmr","subBrm","subVrm","subBai","subVai","cs:seg","das",
+--3x
+"xorBmr","xorVmr","xorBrm","xorVrm","xorBai","xorVai","ss:seg","aaa",
+"cmpBmr","cmpVmr","cmpBrm","cmpVrm","cmpBai","cmpVai","ds:seg","aas",
+--4x
+"incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR",
+"decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR",
+--5x
+"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR",
+"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR",
+--6x
+"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr",
+"fs:seg","gs:seg","o16:","a16",
+"pushUi","imulVrmi","pushBs","imulVrms",
+"insb","insVS","outsb","outsVS",
+--7x
+"joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj",
+"jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj",
+--8x
+"arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms",
+"testBmr","testVmr","xchgBrm","xchgVrm",
+"movBmr","movVmr","movBrm","movVrm",
+"movVmg","leaVrm","movWgm","popUm",
+--9x
+"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR",
+"xchgVaR","xchgVaR","xchgVaR","xchgVaR",
+"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait",
+"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf",
+--Ax
+"movBao","movVao","movBoa","movVoa",
+"movsb","movsVS","cmpsb","cmpsVS",
+"testBai","testVai","stosb","stosVS",
+"lodsb","lodsVS","scasb","scasVS",
+--Bx
+"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi",
+"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI",
+--Cx
+"shift!Bmu","shift!Vmu","retBw","ret","$lesVrm","$ldsVrm","movBmi","movVmi",
+"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS",
+--Dx
+"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb",
+"fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7",
+--Ex
+"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj",
+"inBau","inVau","outBua","outVua",
+"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda",
+--Fx
+"lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm",
+"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm",
+}
+assert(#map_opc1_32 == 255)
+
+-- Map for 1st opcode byte in 64 bit mode (overrides only).
+local map_opc1_64 = setmetatable({
+ [0x06]=false, [0x07]=false, [0x0e]=false,
+ [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false,
+ [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false,
+ [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:",
+ [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb",
+ [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb",
+ [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb",
+ [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb",
+ [0x82]=false, [0x9a]=false, [0xc4]=false, [0xc5]=false, [0xce]=false,
+ [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false,
+}, { __index = map_opc1_32 })
+
+-- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you.
+-- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2
+local map_opc2 = {
+--0x
+[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret",
+"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu",
+--1x
+"movupsXrm|movssXrm|movupdXrm|movsdXrm",
+"movupsXmr|movssXmr|movupdXmr|movsdXmr",
+"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm",
+"movlpsXmr||movlpdXmr",
+"unpcklpsXrm||unpcklpdXrm",
+"unpckhpsXrm||unpckhpdXrm",
+"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm",
+"movhpsXmr||movhpdXmr",
+"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm",
+"hintnopVm","hintnopVm","hintnopVm","hintnopVm",
+--2x
+"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil,
+"movapsXrm||movapdXrm",
+"movapsXmr||movapdXmr",
+"cvtpi2psXrMm|cvtsi2ssXrVmt|cvtpi2pdXrMm|cvtsi2sdXrVmt",
+"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr",
+"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm",
+"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm",
+"ucomissXrm||ucomisdXrm",
+"comissXrm||comisdXrm",
+--3x
+"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec",
+"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil,
+--4x
+"cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm",
+"cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm",
+"cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm",
+"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm",
+--5x
+"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm",
+"rsqrtpsXrm|rsqrtssXrm","rcppsXrm|rcpssXrm",
+"andpsXrm||andpdXrm","andnpsXrm||andnpdXrm",
+"orpsXrm||orpdXrm","xorpsXrm||xorpdXrm",
+"addpsXrm|addssXrm|addpdXrm|addsdXrm","mulpsXrm|mulssXrm|mulpdXrm|mulsdXrm",
+"cvtps2pdXrm|cvtss2sdXrm|cvtpd2psXrm|cvtsd2ssXrm",
+"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm",
+"subpsXrm|subssXrm|subpdXrm|subsdXrm","minpsXrm|minssXrm|minpdXrm|minsdXrm",
+"divpsXrm|divssXrm|divpdXrm|divsdXrm","maxpsXrm|maxssXrm|maxpdXrm|maxsdXrm",
+--6x
+"punpcklbwPrm","punpcklwdPrm","punpckldqPrm","packsswbPrm",
+"pcmpgtbPrm","pcmpgtwPrm","pcmpgtdPrm","packuswbPrm",
+"punpckhbwPrm","punpckhwdPrm","punpckhdqPrm","packssdwPrm",
+"||punpcklqdqXrm","||punpckhqdqXrm",
+"movPrVSm","movqMrm|movdquXrm|movdqaXrm",
+--7x
+"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pmu",
+"pshiftd!Pmu","pshiftq!Mmu||pshiftdq!Xmu",
+"pcmpeqbPrm","pcmpeqwPrm","pcmpeqdPrm","emms|",
+"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$",
+nil,nil,
+"||haddpdXrm|haddpsXrm","||hsubpdXrm|hsubpsXrm",
+"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr",
+--8x
+"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj",
+"jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj",
+--9x
+"setoBm","setnoBm","setbBm","setnbBm","setzBm","setnzBm","setbeBm","setaBm",
+"setsBm","setnsBm","setpeBm","setpoBm","setlBm","setgeBm","setleBm","setgBm",
+--Ax
+"push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil,
+"push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm",
+--Bx
+"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr",
+"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt",
+"|popcntVrm","ud2Dp","bt!Vmu","btcVmr",
+"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt",
+--Cx
+"xaddBmr","xaddVmr",
+"cmppsXrmu|cmpssXrmu|cmppdXrmu|cmpsdXrmu","$movntiVmr|",
+"pinsrwPrWmu","pextrwDrPmu",
+"shufpsXrmu||shufpdXrmu","$cmpxchg!Qmp",
+"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR",
+--Dx
+"||addsubpdXrm|addsubpsXrm","psrlwPrm","psrldPrm","psrlqPrm",
+"paddqPrm","pmullwPrm",
+"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm",
+"psubusbPrm","psubuswPrm","pminubPrm","pandPrm",
+"paddusbPrm","padduswPrm","pmaxubPrm","pandnPrm",
+--Ex
+"pavgbPrm","psrawPrm","psradPrm","pavgwPrm",
+"pmulhuwPrm","pmulhwPrm",
+"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr",
+"psubsbPrm","psubswPrm","pminswPrm","porPrm",
+"paddsbPrm","paddswPrm","pmaxswPrm","pxorPrm",
+--Fx
+"|||lddquXrm","psllwPrm","pslldPrm","psllqPrm",
+"pmuludqPrm","pmaddwdPrm","psadbwPrm","maskmovqMrm||maskmovdquXrm$",
+"psubbPrm","psubwPrm","psubdPrm","psubqPrm",
+"paddbPrm","paddwPrm","padddPrm","ud",
+}
+assert(map_opc2[255] == "ud")
+
+-- Map for three-byte opcodes. Can't wait for their next invention.
+local map_opc3 = {
+["38"] = { -- [66] 0f 38 xx
+--0x
+[0]="pshufbPrm","phaddwPrm","phadddPrm","phaddswPrm",
+"pmaddubswPrm","phsubwPrm","phsubdPrm","phsubswPrm",
+"psignbPrm","psignwPrm","psigndPrm","pmulhrswPrm",
+nil,nil,nil,nil,
+--1x
+"||pblendvbXrma",nil,nil,nil,
+"||blendvpsXrma","||blendvpdXrma",nil,"||ptestXrm",
+nil,nil,nil,nil,
+"pabsbPrm","pabswPrm","pabsdPrm",nil,
+--2x
+"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm",
+"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil,
+"||pmuldqXrm","||pcmpeqqXrm","||$movntdqaXrm","||packusdwXrm",
+nil,nil,nil,nil,
+--3x
+"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm",
+"||pmovzxwqXrm","||pmovzxdqXrm",nil,"||pcmpgtqXrm",
+"||pminsbXrm","||pminsdXrm","||pminuwXrm","||pminudXrm",
+"||pmaxsbXrm","||pmaxsdXrm","||pmaxuwXrm","||pmaxudXrm",
+--4x
+"||pmulddXrm","||phminposuwXrm",
+--Fx
+[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt",
+},
+
+["3a"] = { -- [66] 0f 3a xx
+--0x
+[0x00]=nil,nil,nil,nil,nil,nil,nil,nil,
+"||roundpsXrmu","||roundpdXrmu","||roundssXrmu","||roundsdXrmu",
+"||blendpsXrmu","||blendpdXrmu","||pblendwXrmu","palignrPrmu",
+--1x
+nil,nil,nil,nil,
+"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru",
+nil,nil,nil,nil,nil,nil,nil,nil,
+--2x
+"||pinsrbXrVmu","||insertpsXrmu","||pinsrXrVmuS",nil,
+--4x
+[0x40] = "||dppsXrmu",
+[0x41] = "||dppdXrmu",
+[0x42] = "||mpsadbwXrmu",
+--6x
+[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu",
+[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu",
+},
+}
+
+-- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands).
+local map_opcvm = {
+[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff",
+[0xc8]="monitor",[0xc9]="mwait",
+[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave",
+[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga",
+[0xf8]="swapgs",[0xf9]="rdtscp",
+}
+
+-- Map for FP opcodes. And you thought stack machines are simple?
+local map_opcfp = {
+-- D8-DF 00-BF: opcodes with a memory operand.
+-- D8
+[0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm",
+"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm",
+-- DA
+"fiaddDm","fimulDm","ficomDm","ficompDm",
+"fisubDm","fisubrDm","fidivDm","fidivrDm",
+-- DB
+"fildDm","fisttpDm","fistDm","fistpDm",nil,"fld twordFmp",nil,"fstp twordFmp",
+-- DC
+"faddGm","fmulGm","fcomGm","fcompGm","fsubGm","fsubrGm","fdivGm","fdivrGm",
+-- DD
+"fldGm","fisttpQm","fstGm","fstpGm","frstorDmp",nil,"fnsaveDmp","fnstswWm",
+-- DE
+"fiaddWm","fimulWm","ficomWm","ficompWm",
+"fisubWm","fisubrWm","fidivWm","fidivrWm",
+-- DF
+"fildWm","fisttpWm","fistWm","fistpWm",
+"fbld twordFmp","fildQm","fbstp twordFmp","fistpQm",
+-- xx C0-FF: opcodes with a pseudo-register operand.
+-- D8
+"faddFf","fmulFf","fcomFf","fcompFf","fsubFf","fsubrFf","fdivFf","fdivrFf",
+-- D9
+"fldFf","fxchFf",{"fnop"},nil,
+{"fchs","fabs",nil,nil,"ftst","fxam"},
+{"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz"},
+{"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp"},
+{"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"},
+-- DA
+"fcmovbFf","fcmoveFf","fcmovbeFf","fcmovuFf",nil,{nil,"fucompp"},nil,nil,
+-- DB
+"fcmovnbFf","fcmovneFf","fcmovnbeFf","fcmovnuFf",
+{nil,nil,"fnclex","fninit"},"fucomiFf","fcomiFf",nil,
+-- DC
+"fadd toFf","fmul toFf",nil,nil,
+"fsub toFf","fsubr toFf","fdivr toFf","fdiv toFf",
+-- DD
+"ffreeFf",nil,"fstFf","fstpFf","fucomFf","fucompFf",nil,nil,
+-- DE
+"faddpFf","fmulpFf",nil,{nil,"fcompp"},
+"fsubrpFf","fsubpFf","fdivrpFf","fdivpFf",
+-- DF
+nil,nil,nil,nil,{"fnstsw ax"},"fucomipFf","fcomipFf",nil,
+}
+assert(map_opcfp[126] == "fcomipFf")
+
+-- Map for opcode groups. The subkey is sp from the ModRM byte.
+local map_opcgroup = {
+ arith = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" },
+ shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" },
+ testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" },
+ testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" },
+ incb = { "inc", "dec" },
+ incd = { "inc", "dec", "callUmp", "$call farDmp",
+ "jmpUmp", "$jmp farDmp", "pushUm" },
+ sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" },
+ sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt",
+ "smsw", nil, "lmsw", "vm*$invlpg" },
+ bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" },
+ cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil,
+ nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" },
+ pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" },
+ pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" },
+ pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" },
+ pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" },
+ fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr",
+ nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" },
+ prefetch = { "prefetch", "prefetchw" },
+ prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" },
+}
+
+------------------------------------------------------------------------------
+
+-- Maps for register names.
+local map_regs = {
+ B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
+ B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
+ W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
+ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" },
+ D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" },
+ Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" },
+ M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext!
+ X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" },
+}
+local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" }
+
+-- Maps for size names.
+local map_sz2n = {
+ B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16,
+}
+local map_sz2prefix = {
+ B = "byte", W = "word", D = "dword",
+ Q = "qword",
+ M = "qword", X = "xword",
+ F = "dword", G = "qword", -- No need for sizes/register names for these two.
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local code, pos, hex = ctx.code, ctx.pos, ""
+ local hmax = ctx.hexdump
+ if hmax > 0 then
+ for i=ctx.start,pos-1 do
+ hex = hex..format("%02X", byte(code, i, i))
+ end
+ if #hex > hmax then hex = sub(hex, 1, hmax)..". "
+ else hex = hex..rep(" ", hmax-#hex+2) end
+ end
+ if operands then text = text.." "..operands end
+ if ctx.o16 then text = "o16 "..text; ctx.o16 = false end
+ if ctx.a32 then text = "a32 "..text; ctx.a32 = false end
+ if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end
+ if ctx.rex then
+ local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "")..
+ (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "")
+ if t ~= "" then text = "rex."..t.." "..text end
+ ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
+ ctx.rex = false
+ end
+ if ctx.seg then
+ local text2, n = gsub(text, "%[", "["..ctx.seg..":")
+ if n == 0 then text = ctx.seg.." "..text else text = text2 end
+ ctx.seg = false
+ end
+ if ctx.lock then text = "lock "..text; ctx.lock = false end
+ local imm = ctx.imm
+ if imm then
+ local sym = ctx.symtab[imm]
+ if sym then text = text.."\t->"..sym end
+ end
+ ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text))
+ ctx.mrm = false
+ ctx.start = pos
+ ctx.imm = nil
+end
+
+-- Clear all prefix flags.
+local function clearprefixes(ctx)
+ ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false
+ ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
+ ctx.rex = false; ctx.a32 = false
+end
+
+-- Fallback for incomplete opcodes at the end.
+local function incomplete(ctx)
+ ctx.pos = ctx.stop+1
+ clearprefixes(ctx)
+ return putop(ctx, "(incomplete)")
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ clearprefixes(ctx)
+ return putop(ctx, "(unknown)")
+end
+
+-- Return an immediate of the specified size.
+local function getimm(ctx, pos, n)
+ if pos+n-1 > ctx.stop then return incomplete(ctx) end
+ local code = ctx.code
+ if n == 1 then
+ local b1 = byte(code, pos, pos)
+ return b1
+ elseif n == 2 then
+ local b1, b2 = byte(code, pos, pos+1)
+ return b1+b2*256
+ else
+ local b1, b2, b3, b4 = byte(code, pos, pos+3)
+ local imm = b1+b2*256+b3*65536+b4*16777216
+ ctx.imm = imm
+ return imm
+ end
+end
+
+-- Process pattern string and generate the operands.
+local function putpat(ctx, name, pat)
+ local operands, regs, sz, mode, sp, rm, sc, rx, sdisp
+ local code, pos, stop = ctx.code, ctx.pos, ctx.stop
+
+ -- Chars used: 1DFGIMPQRSTUVWXacdfgijmoprstuwxyz
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "V" or p == "U" then
+ if ctx.rexw then sz = "Q"; ctx.rexw = false
+ elseif ctx.o16 then sz = "W"; ctx.o16 = false
+ elseif p == "U" and ctx.x64 then sz = "Q"
+ else sz = "D" end
+ regs = map_regs[sz]
+ elseif p == "T" then
+ if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end
+ regs = map_regs[sz]
+ elseif p == "B" then
+ sz = "B"
+ regs = ctx.rex and map_regs.B64 or map_regs.B
+ elseif match(p, "[WDQMXFG]") then
+ sz = p
+ regs = map_regs[sz]
+ elseif p == "P" then
+ sz = ctx.o16 and "X" or "M"; ctx.o16 = false
+ regs = map_regs[sz]
+ elseif p == "S" then
+ name = name..lower(sz)
+ elseif p == "s" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = imm <= 127 and format("+0x%02x", imm)
+ or format("-0x%02x", 256-imm)
+ pos = pos+1
+ elseif p == "u" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = format("0x%02x", imm)
+ pos = pos+1
+ elseif p == "w" then
+ local imm = getimm(ctx, pos, 2); if not imm then return end
+ x = format("0x%x", imm)
+ pos = pos+2
+ elseif p == "o" then -- [offset]
+ if ctx.x64 then
+ local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
+ local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
+ x = format("[0x%08x%08x]", imm2, imm1)
+ pos = pos+8
+ else
+ local imm = getimm(ctx, pos, 4); if not imm then return end
+ x = format("[0x%08x]", imm)
+ pos = pos+4
+ end
+ elseif p == "i" or p == "I" then
+ local n = map_sz2n[sz]
+ if n == 8 and ctx.x64 and p == "I" then
+ local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
+ local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
+ x = format("0x%08x%08x", imm2, imm1)
+ else
+ if n == 8 then n = 4 end
+ local imm = getimm(ctx, pos, n); if not imm then return end
+ if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then
+ imm = (0xffffffff+1)-imm
+ x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm)
+ else
+ x = format(imm > 65535 and "0x%08x" or "0x%x", imm)
+ end
+ end
+ pos = pos+n
+ elseif p == "j" then
+ local n = map_sz2n[sz]
+ if n == 8 then n = 4 end
+ local imm = getimm(ctx, pos, n); if not imm then return end
+ if sz == "B" and imm > 127 then imm = imm-256
+ elseif imm > 2147483647 then imm = imm-4294967296 end
+ pos = pos+n
+ imm = imm + pos + ctx.addr
+ if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end
+ ctx.imm = imm
+ if sz == "W" then
+ x = format("word 0x%04x", imm%65536)
+ elseif ctx.x64 then
+ local lo = imm % 0x1000000
+ x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo)
+ else
+ x = format("0x%08x", imm)
+ end
+ elseif p == "R" then
+ local r = byte(code, pos-1, pos-1)%8
+ if ctx.rexb then r = r + 8; ctx.rexb = false end
+ x = regs[r+1]
+ elseif p == "a" then x = regs[1]
+ elseif p == "c" then x = "cl"
+ elseif p == "d" then x = "dx"
+ elseif p == "1" then x = "1"
+ else
+ if not mode then
+ mode = ctx.mrm
+ if not mode then
+ if pos > stop then return incomplete(ctx) end
+ mode = byte(code, pos, pos)
+ pos = pos+1
+ end
+ rm = mode%8; mode = (mode-rm)/8
+ sp = mode%8; mode = (mode-sp)/8
+ sdisp = ""
+ if mode < 3 then
+ if rm == 4 then
+ if pos > stop then return incomplete(ctx) end
+ sc = byte(code, pos, pos)
+ pos = pos+1
+ rm = sc%8; sc = (sc-rm)/8
+ rx = sc%8; sc = (sc-rx)/8
+ if ctx.rexx then rx = rx + 8; ctx.rexx = false end
+ if rx == 4 then rx = nil end
+ end
+ if mode > 0 or rm == 5 then
+ local dsz = mode
+ if dsz ~= 1 then dsz = 4 end
+ local disp = getimm(ctx, pos, dsz); if not disp then return end
+ if mode == 0 then rm = nil end
+ if rm or rx or (not sc and ctx.x64 and not ctx.a32) then
+ if dsz == 1 and disp > 127 then
+ sdisp = format("-0x%x", 256-disp)
+ elseif disp >= 0 and disp <= 0x7fffffff then
+ sdisp = format("+0x%x", disp)
+ else
+ sdisp = format("-0x%x", (0xffffffff+1)-disp)
+ end
+ else
+ sdisp = format(ctx.x64 and not ctx.a32 and
+ not (disp >= 0 and disp <= 0x7fffffff)
+ and "0xffffffff%08x" or "0x%08x", disp)
+ end
+ pos = pos+dsz
+ end
+ end
+ if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end
+ if ctx.rexr then sp = sp + 8; ctx.rexr = false end
+ end
+ if p == "m" then
+ if mode == 3 then x = regs[rm+1]
+ else
+ local aregs = ctx.a32 and map_regs.D or ctx.aregs
+ local srm, srx = "", ""
+ if rm then srm = aregs[rm+1]
+ elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end
+ ctx.a32 = false
+ if rx then
+ if rm then srm = srm.."+" end
+ srx = aregs[rx+1]
+ if sc > 0 then srx = srx.."*"..(2^sc) end
+ end
+ x = format("[%s%s%s]", srm, srx, sdisp)
+ end
+ if mode < 3 and
+ (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck.
+ x = map_sz2prefix[sz].." "..x
+ end
+ elseif p == "r" then x = regs[sp+1]
+ elseif p == "g" then x = map_segregs[sp+1]
+ elseif p == "p" then -- Suppress prefix.
+ elseif p == "f" then x = "st"..rm
+ elseif p == "x" then
+ if sp == 0 and ctx.lock and not ctx.x64 then
+ x = "CR8"; ctx.lock = false
+ else
+ x = "CR"..sp
+ end
+ elseif p == "y" then x = "DR"..sp
+ elseif p == "z" then x = "TR"..sp
+ elseif p == "t" then
+ else
+ error("bad pattern `"..pat.."'")
+ end
+ end
+ if x then operands = operands and operands..", "..x or x end
+ end
+ ctx.pos = pos
+ return putop(ctx, name, operands)
+end
+
+-- Forward declaration.
+local map_act
+
+-- Fetch and cache MRM byte.
+local function getmrm(ctx)
+ local mrm = ctx.mrm
+ if not mrm then
+ local pos = ctx.pos
+ if pos > ctx.stop then return nil end
+ mrm = byte(ctx.code, pos, pos)
+ ctx.pos = pos+1
+ ctx.mrm = mrm
+ end
+ return mrm
+end
+
+-- Dispatch to handler depending on pattern.
+local function dispatch(ctx, opat, patgrp)
+ if not opat then return unknown(ctx) end
+ if match(opat, "%|") then -- MMX/SSE variants depending on prefix.
+ local p
+ if ctx.rep then
+ p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)"
+ ctx.rep = false
+ elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false
+ else p = "^[^%|]*" end
+ opat = match(opat, p)
+ if not opat then return unknown(ctx) end
+-- ctx.rep = false; ctx.o16 = false
+ --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi]
+ --XXX remove in branches?
+ end
+ if match(opat, "%$") then -- reg$mem variants.
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)")
+ if opat == "" then return unknown(ctx) end
+ end
+ if opat == "" then return unknown(ctx) end
+ local name, pat = match(opat, "^([a-z0-9 ]*)(.*)")
+ if pat == "" and patgrp then pat = patgrp end
+ return map_act[sub(pat, 1, 1)](ctx, name, pat)
+end
+
+-- Get a pattern from an opcode map and dispatch to handler.
+local function dispatchmap(ctx, opcmap)
+ local pos = ctx.pos
+ local opat = opcmap[byte(ctx.code, pos, pos)]
+ pos = pos + 1
+ ctx.pos = pos
+ return dispatch(ctx, opat)
+end
+
+-- Map for action codes. The key is the first char after the name.
+map_act = {
+ -- Simple opcodes without operands.
+ [""] = function(ctx, name, pat)
+ return putop(ctx, name)
+ end,
+
+ -- Operand size chars fall right through.
+ B = putpat, W = putpat, D = putpat, Q = putpat,
+ V = putpat, U = putpat, T = putpat,
+ M = putpat, X = putpat, P = putpat,
+ F = putpat, G = putpat,
+
+ -- Collect prefixes.
+ [":"] = function(ctx, name, pat)
+ ctx[pat == ":" and name or sub(pat, 2)] = name
+ if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes.
+ end,
+
+ -- Chain to special handler specified by name.
+ ["*"] = function(ctx, name, pat)
+ return map_act[name](ctx, name, sub(pat, 2))
+ end,
+
+ -- Use named subtable for opcode group.
+ ["!"] = function(ctx, name, pat)
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2))
+ end,
+
+ -- o16,o32[,o64] variants.
+ sz = function(ctx, name, pat)
+ if ctx.o16 then ctx.o16 = false
+ else
+ pat = match(pat, ",(.*)")
+ if ctx.rexw then
+ local p = match(pat, ",(.*)")
+ if p then pat = p; ctx.rexw = false end
+ end
+ end
+ pat = match(pat, "^[^,]*")
+ return dispatch(ctx, pat)
+ end,
+
+ -- Two-byte opcode dispatch.
+ opc2 = function(ctx, name, pat)
+ return dispatchmap(ctx, map_opc2)
+ end,
+
+ -- Three-byte opcode dispatch.
+ opc3 = function(ctx, name, pat)
+ return dispatchmap(ctx, map_opc3[pat])
+ end,
+
+ -- VMX/SVM dispatch.
+ vm = function(ctx, name, pat)
+ return dispatch(ctx, map_opcvm[ctx.mrm])
+ end,
+
+ -- Floating point opcode dispatch.
+ fp = function(ctx, name, pat)
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ local rm = mrm%8
+ local idx = pat*8 + ((mrm-rm)/8)%8
+ if mrm >= 192 then idx = idx + 64 end
+ local opat = map_opcfp[idx]
+ if type(opat) == "table" then opat = opat[rm+1] end
+ return dispatch(ctx, opat)
+ end,
+
+ -- REX prefix.
+ rex = function(ctx, name, pat)
+ if ctx.rex then return unknown(ctx) end -- Only 1 REX prefix allowed.
+ for p in gmatch(pat, ".") do ctx["rex"..p] = true end
+ ctx.rex = true
+ end,
+
+ -- Special case for nop with REX prefix.
+ nop = function(ctx, name, pat)
+ return dispatch(ctx, ctx.rex and pat or "nop")
+ end,
+}
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ofs = ofs + 1
+ ctx.start = ofs
+ ctx.pos = ofs
+ ctx.stop = stop
+ ctx.imm = nil
+ ctx.mrm = false
+ clearprefixes(ctx)
+ while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end
+ if ctx.pos ~= ctx.start then incomplete(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create_(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = (addr or 0) - 1
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 16
+ ctx.x64 = false
+ ctx.map1 = map_opc1_32
+ ctx.aregs = map_regs.D
+ return ctx
+end
+
+local function create64_(code, addr, out)
+ local ctx = create_(code, addr, out)
+ ctx.x64 = true
+ ctx.map1 = map_opc1_64
+ ctx.aregs = map_regs.Q
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass_(code, addr, out)
+ create_(code, addr, out):disass()
+end
+
+local function disass64_(code, addr, out)
+ create64_(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname_(r)
+ if r < 8 then return map_regs.D[r+1] end
+ return map_regs.X[r-7]
+end
+
+local function regname64_(r)
+ if r < 16 then return map_regs.Q[r+1] end
+ return map_regs.X[r-15]
+end
+
+-- Public module functions.
+module(...)
+
+create = create_
+create64 = create64_
+disass = disass_
+disass64 = disass64_
+regname = regname_
+regname64 = regname64_
+
diff --git a/3rdparty/lua/src/jit/dump.lua b/3rdparty/lua/src/jit/dump.lua
index d15c528..7441b74 100644
--- a/3rdparty/lua/src/jit/dump.lua
+++ b/3rdparty/lua/src/jit/dump.lua
@@ -1,7 +1,7 @@
----------------------------------------------------------------------------
-- LuaJIT compiler dump module.
--
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
-- Released under the MIT license. See Copyright Notice in luajit.h
----------------------------------------------------------------------------
--
@@ -36,7 +36,6 @@
-- * m Dump the generated machine code.
-- x Print each taken trace exit.
-- X Print each taken trace exit and the contents of all registers.
--- a Print the IR of aborted traces, too.
--
-- The output format can be set with the following characters:
--
@@ -55,7 +54,7 @@
-- Cache some library functions and objects.
local jit = require("jit")
-assert(jit.version_num == 20004, "LuaJIT core/library version mismatch")
+assert(jit.version_num == 20002, "LuaJIT core/library version mismatch")
local jutil = require("jit.util")
local vmdef = require("jit.vmdef")
local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc
@@ -547,8 +546,10 @@ local function dump_trace(what, tr, func, pc, otr, oex)
out:write("---- TRACE ", tr, " ", what)
if otr then out:write(" ", otr, "/", oex) end
out:write(" ", fmtfunc(func, pc), "\n")
+ recprefix = ""
elseif what == "stop" or what == "abort" then
out:write("---- TRACE ", tr, " ", what)
+ recprefix = nil
if what == "abort" then
out:write(" ", fmtfunc(func, pc), " -- ", fmterr(otr, oex), "\n")
else
diff --git a/3rdparty/lua/src/jit/v.lua b/3rdparty/lua/src/jit/v.lua
index 8e55d8d..d7df712 100644
--- a/3rdparty/lua/src/jit/v.lua
+++ b/3rdparty/lua/src/jit/v.lua
@@ -1,167 +1,167 @@
-----------------------------------------------------------------------------
--- Verbose mode of the LuaJIT compiler.
---
--- Copyright (C) 2005-2015 Mike Pall. All rights reserved.
--- Released under the MIT license. See Copyright Notice in luajit.h
-----------------------------------------------------------------------------
---
--- This module shows verbose information about the progress of the
--- JIT compiler. It prints one line for each generated trace. This module
--- is useful to see which code has been compiled or where the compiler
--- punts and falls back to the interpreter.
---
--- Example usage:
---
--- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end"
--- luajit -jv=myapp.out myapp.lua
---
--- Default output is to stderr. To redirect the output to a file, pass a
--- filename as an argument (use '-' for stdout) or set the environment
--- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the
--- module is started.
---
--- The output from the first example should look like this:
---
--- [TRACE 1 (command line):1 loop]
--- [TRACE 2 (1/3) (command line):1 -> 1]
---
--- The first number in each line is the internal trace number. Next are
--- the file name ('(command line)') and the line number (':1') where the
--- trace has started. Side traces also show the parent trace number and
--- the exit number where they are attached to in parentheses ('(1/3)').
--- An arrow at the end shows where the trace links to ('-> 1'), unless
--- it loops to itself.
---
--- In this case the inner loop gets hot and is traced first, generating
--- a root trace. Then the last exit from the 1st trace gets hot, too,
--- and triggers generation of the 2nd trace. The side trace follows the
--- path along the outer loop and *around* the inner loop, back to its
--- start, and then links to the 1st trace. Yes, this may seem unusual,
--- if you know how traditional compilers work. Trace compilers are full
--- of surprises like this -- have fun! :-)
---
--- Aborted traces are shown like this:
---
--- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50]
---
--- Don't worry -- trace aborts are quite common, even in programs which
--- can be fully compiled. The compiler may retry several times until it
--- finds a suitable trace.
---
--- Of course this doesn't work with features that are not-yet-implemented
--- (NYI error messages). The VM simply falls back to the interpreter. This
--- may not matter at all if the particular trace is not very high up in
--- the CPU usage profile. Oh, and the interpreter is quite fast, too.
---
--- Also check out the -jdump module, which prints all the gory details.
---
-------------------------------------------------------------------------------
-
--- Cache some library functions and objects.
-local jit = require("jit")
-assert(jit.version_num == 20004, "LuaJIT core/library version mismatch")
-local jutil = require("jit.util")
-local vmdef = require("jit.vmdef")
-local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo
-local type, format = type, string.format
-local stdout, stderr = io.stdout, io.stderr
-
--- Active flag and output file handle.
-local active, out
-
-------------------------------------------------------------------------------
-
-local startloc, startex
-
-local function fmtfunc(func, pc)
- local fi = funcinfo(func, pc)
- if fi.loc then
- return fi.loc
- elseif fi.ffid then
- return vmdef.ffnames[fi.ffid]
- elseif fi.addr then
- return format("C:%x", fi.addr)
- else
- return "(?)"
- end
-end
-
--- Format trace error message.
-local function fmterr(err, info)
- if type(err) == "number" then
- if type(info) == "function" then info = fmtfunc(info) end
- err = format(vmdef.traceerr[err], info)
- end
- return err
-end
-
--- Dump trace states.
-local function dump_trace(what, tr, func, pc, otr, oex)
- if what == "start" then
- startloc = fmtfunc(func, pc)
- startex = otr and "("..otr.."/"..oex..") " or ""
- else
- if what == "abort" then
- local loc = fmtfunc(func, pc)
- if loc ~= startloc then
- out:write(format("[TRACE --- %s%s -- %s at %s]\n",
- startex, startloc, fmterr(otr, oex), loc))
- else
- out:write(format("[TRACE --- %s%s -- %s]\n",
- startex, startloc, fmterr(otr, oex)))
- end
- elseif what == "stop" then
- local info = traceinfo(tr)
- local link, ltype = info.link, info.linktype
- if ltype == "interpreter" then
- out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n",
- tr, startex, startloc))
- elseif link == tr or link == 0 then
- out:write(format("[TRACE %3s %s%s %s]\n",
- tr, startex, startloc, ltype))
- elseif ltype == "root" then
- out:write(format("[TRACE %3s %s%s -> %d]\n",
- tr, startex, startloc, link))
- else
- out:write(format("[TRACE %3s %s%s -> %d %s]\n",
- tr, startex, startloc, link, ltype))
- end
- else
- out:write(format("[TRACE %s]\n", what))
- end
- out:flush()
- end
-end
-
-------------------------------------------------------------------------------
-
--- Detach dump handlers.
-local function dumpoff()
- if active then
- active = false
- jit.attach(dump_trace)
- if out and out ~= stdout and out ~= stderr then out:close() end
- out = nil
- end
-end
-
--- Open the output file and attach dump handlers.
-local function dumpon(outfile)
- if active then dumpoff() end
- if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end
- if outfile then
- out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
- else
- out = stderr
- end
- jit.attach(dump_trace, "trace")
- active = true
-end
-
--- Public module functions.
-module(...)
-
-on = dumpon
-off = dumpoff
-start = dumpon -- For -j command line option.
-
+----------------------------------------------------------------------------
+-- Verbose mode of the LuaJIT compiler.
+--
+-- Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module shows verbose information about the progress of the
+-- JIT compiler. It prints one line for each generated trace. This module
+-- is useful to see which code has been compiled or where the compiler
+-- punts and falls back to the interpreter.
+--
+-- Example usage:
+--
+-- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end"
+-- luajit -jv=myapp.out myapp.lua
+--
+-- Default output is to stderr. To redirect the output to a file, pass a
+-- filename as an argument (use '-' for stdout) or set the environment
+-- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the
+-- module is started.
+--
+-- The output from the first example should look like this:
+--
+-- [TRACE 1 (command line):1 loop]
+-- [TRACE 2 (1/3) (command line):1 -> 1]
+--
+-- The first number in each line is the internal trace number. Next are
+-- the file name ('(command line)') and the line number (':1') where the
+-- trace has started. Side traces also show the parent trace number and
+-- the exit number where they are attached to in parentheses ('(1/3)').
+-- An arrow at the end shows where the trace links to ('-> 1'), unless
+-- it loops to itself.
+--
+-- In this case the inner loop gets hot and is traced first, generating
+-- a root trace. Then the last exit from the 1st trace gets hot, too,
+-- and triggers generation of the 2nd trace. The side trace follows the
+-- path along the outer loop and *around* the inner loop, back to its
+-- start, and then links to the 1st trace. Yes, this may seem unusual,
+-- if you know how traditional compilers work. Trace compilers are full
+-- of surprises like this -- have fun! :-)
+--
+-- Aborted traces are shown like this:
+--
+-- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50]
+--
+-- Don't worry -- trace aborts are quite common, even in programs which
+-- can be fully compiled. The compiler may retry several times until it
+-- finds a suitable trace.
+--
+-- Of course this doesn't work with features that are not-yet-implemented
+-- (NYI error messages). The VM simply falls back to the interpreter. This
+-- may not matter at all if the particular trace is not very high up in
+-- the CPU usage profile. Oh, and the interpreter is quite fast, too.
+--
+-- Also check out the -jdump module, which prints all the gory details.
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20002, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo
+local type, format = type, string.format
+local stdout, stderr = io.stdout, io.stderr
+
+-- Active flag and output file handle.
+local active, out
+
+------------------------------------------------------------------------------
+
+local startloc, startex
+
+local function fmtfunc(func, pc)
+ local fi = funcinfo(func, pc)
+ if fi.loc then
+ return fi.loc
+ elseif fi.ffid then
+ return vmdef.ffnames[fi.ffid]
+ elseif fi.addr then
+ return format("C:%x", fi.addr)
+ else
+ return "(?)"
+ end
+end
+
+-- Format trace error message.
+local function fmterr(err, info)
+ if type(err) == "number" then
+ if type(info) == "function" then info = fmtfunc(info) end
+ err = format(vmdef.traceerr[err], info)
+ end
+ return err
+end
+
+-- Dump trace states.
+local function dump_trace(what, tr, func, pc, otr, oex)
+ if what == "start" then
+ startloc = fmtfunc(func, pc)
+ startex = otr and "("..otr.."/"..oex..") " or ""
+ else
+ if what == "abort" then
+ local loc = fmtfunc(func, pc)
+ if loc ~= startloc then
+ out:write(format("[TRACE --- %s%s -- %s at %s]\n",
+ startex, startloc, fmterr(otr, oex), loc))
+ else
+ out:write(format("[TRACE --- %s%s -- %s]\n",
+ startex, startloc, fmterr(otr, oex)))
+ end
+ elseif what == "stop" then
+ local info = traceinfo(tr)
+ local link, ltype = info.link, info.linktype
+ if ltype == "interpreter" then
+ out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n",
+ tr, startex, startloc))
+ elseif link == tr or link == 0 then
+ out:write(format("[TRACE %3s %s%s %s]\n",
+ tr, startex, startloc, ltype))
+ elseif ltype == "root" then
+ out:write(format("[TRACE %3s %s%s -> %d]\n",
+ tr, startex, startloc, link))
+ else
+ out:write(format("[TRACE %3s %s%s -> %d %s]\n",
+ tr, startex, startloc, link, ltype))
+ end
+ else
+ out:write(format("[TRACE %s]\n", what))
+ end
+ out:flush()
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Detach dump handlers.
+local function dumpoff()
+ if active then
+ active = false
+ jit.attach(dump_trace)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach dump handlers.
+local function dumpon(outfile)
+ if active then dumpoff() end
+ if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stderr
+ end
+ jit.attach(dump_trace, "trace")
+ active = true
+end
+
+-- Public module functions.
+module(...)
+
+on = dumpon
+off = dumpoff
+start = dumpon -- For -j command line option.
+
diff --git a/3rdparty/lua/src/lauxlib.h b/3rdparty/lua/src/lauxlib.h
index 61de59b..d201c51 100644
--- a/3rdparty/lua/src/lauxlib.h
+++ b/3rdparty/lua/src/lauxlib.h
@@ -1,167 +1,167 @@
-/*
-** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $
-** Auxiliary functions for building Lua libraries
-** See Copyright Notice in lua.h
-*/
-
-
-#ifndef lauxlib_h
-#define lauxlib_h
-
-
-#include <stddef.h>
-#include <stdio.h>
-
-#include "lua.h"
-
-
-#define luaL_getn(L,i) ((int)lua_objlen(L, i))
-#define luaL_setn(L,i,j) ((void)0) /* no op! */
-
-/* extra error code for `luaL_load' */
-#define LUA_ERRFILE (LUA_ERRERR+1)
-
-typedef struct luaL_Reg {
- const char *name;
- lua_CFunction func;
-} luaL_Reg;
-
-LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname,
- const luaL_Reg *l, int nup);
-LUALIB_API void (luaL_register) (lua_State *L, const char *libname,
- const luaL_Reg *l);
-LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e);
-LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e);
-LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname);
-LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg);
-LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg,
- size_t *l);
-LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg,
- const char *def, size_t *l);
-LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg);
-LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def);
-
-LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg);
-LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg,
- lua_Integer def);
-
-LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg);
-LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t);
-LUALIB_API void (luaL_checkany) (lua_State *L, int narg);
-
-LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname);
-LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname);
-
-LUALIB_API void (luaL_where) (lua_State *L, int lvl);
-LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...);
-
-LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def,
- const char *const lst[]);
-
-LUALIB_API int (luaL_ref) (lua_State *L, int t);
-LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref);
-
-LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename);
-LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz,
- const char *name);
-LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s);
-
-LUALIB_API lua_State *(luaL_newstate) (void);
-
-
-LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p,
- const char *r);
-
-LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx,
- const char *fname, int szhint);
-
-/* From Lua 5.2. */
-LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname);
-LUALIB_API int luaL_execresult(lua_State *L, int stat);
-LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename,
- const char *mode);
-LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz,
- const char *name, const char *mode);
-LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
- int level);
-
-
-/*
-** ===============================================================
-** some useful macros
-** ===============================================================
-*/
-
-#define luaL_argcheck(L, cond,numarg,extramsg) \
- ((void)((cond) || luaL_argerror(L, (numarg), (extramsg))))
-#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL))
-#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL))
-#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n)))
-#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d)))
-#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n)))
-#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d)))
-
-#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i)))
-
-#define luaL_dofile(L, fn) \
- (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0))
-
-#define luaL_dostring(L, s) \
- (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0))
-
-#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n)))
-
-#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n)))
-
-/*
-** {======================================================
-** Generic Buffer manipulation
-** =======================================================
-*/
-
-
-
-typedef struct luaL_Buffer {
- char *p; /* current position in buffer */
- int lvl; /* number of strings in the stack (level) */
- lua_State *L;
- char buffer[LUAL_BUFFERSIZE];
-} luaL_Buffer;
-
-#define luaL_addchar(B,c) \
- ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \
- (*(B)->p++ = (char)(c)))
-
-/* compatibility only */
-#define luaL_putchar(B,c) luaL_addchar(B,c)
-
-#define luaL_addsize(B,n) ((B)->p += (n))
-
-LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B);
-LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B);
-LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l);
-LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s);
-LUALIB_API void (luaL_addvalue) (luaL_Buffer *B);
-LUALIB_API void (luaL_pushresult) (luaL_Buffer *B);
-
-
-/* }====================================================== */
-
-
-/* compatibility with ref system */
-
-/* pre-defined references */
-#define LUA_NOREF (-2)
-#define LUA_REFNIL (-1)
-
-#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \
- (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0))
-
-#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref))
-
-#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref))
-
-
-#define luaL_reg luaL_Reg
-
-#endif
+/*
+** $Id: lauxlib.h,v ec74646b41df 2013/11/19 00:23:10 oliver $
+** Auxiliary functions for building Lua libraries
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lauxlib_h
+#define lauxlib_h
+
+
+#include <stddef.h>
+#include <stdio.h>
+
+#include "lua.h"
+
+
+#define luaL_getn(L,i) ((int)lua_objlen(L, i))
+#define luaL_setn(L,i,j) ((void)0) /* no op! */
+
+/* extra error code for `luaL_load' */
+#define LUA_ERRFILE (LUA_ERRERR+1)
+
+typedef struct luaL_Reg {
+ const char *name;
+ lua_CFunction func;
+} luaL_Reg;
+
+LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname,
+ const luaL_Reg *l, int nup);
+LUALIB_API void (luaL_register) (lua_State *L, const char *libname,
+ const luaL_Reg *l);
+LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e);
+LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e);
+LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname);
+LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg);
+LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg,
+ size_t *l);
+LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg,
+ const char *def, size_t *l);
+LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg);
+LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def);
+
+LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg);
+LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg,
+ lua_Integer def);
+
+LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg);
+LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t);
+LUALIB_API void (luaL_checkany) (lua_State *L, int narg);
+
+LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname);
+LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname);
+
+LUALIB_API void (luaL_where) (lua_State *L, int lvl);
+LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...);
+
+LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def,
+ const char *const lst[]);
+
+LUALIB_API int (luaL_ref) (lua_State *L, int t);
+LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref);
+
+LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename);
+LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz,
+ const char *name);
+LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s);
+
+LUALIB_API lua_State *(luaL_newstate) (void);
+
+
+LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p,
+ const char *r);
+
+LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx,
+ const char *fname, int szhint);
+
+/* From Lua 5.2. */
+LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname);
+LUALIB_API int luaL_execresult(lua_State *L, int stat);
+LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename,
+ const char *mode);
+LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz,
+ const char *name, const char *mode);
+LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
+ int level);
+
+
+/*
+** ===============================================================
+** some useful macros
+** ===============================================================
+*/
+
+#define luaL_argcheck(L, cond,numarg,extramsg) \
+ ((void)((cond) || luaL_argerror(L, (numarg), (extramsg))))
+#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL))
+#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL))
+#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n)))
+#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d)))
+#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n)))
+#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d)))
+
+#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i)))
+
+#define luaL_dofile(L, fn) \
+ (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0))
+
+#define luaL_dostring(L, s) \
+ (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0))
+
+#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n)))
+
+#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n)))
+
+/*
+** {======================================================
+** Generic Buffer manipulation
+** =======================================================
+*/
+
+
+
+typedef struct luaL_Buffer {
+ char *p; /* current position in buffer */
+ int lvl; /* number of strings in the stack (level) */
+ lua_State *L;
+ char buffer[LUAL_BUFFERSIZE];
+} luaL_Buffer;
+
+#define luaL_addchar(B,c) \
+ ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \
+ (*(B)->p++ = (char)(c)))
+
+/* compatibility only */
+#define luaL_putchar(B,c) luaL_addchar(B,c)
+
+#define luaL_addsize(B,n) ((B)->p += (n))
+
+LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B);
+LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B);
+LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l);
+LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s);
+LUALIB_API void (luaL_addvalue) (luaL_Buffer *B);
+LUALIB_API void (luaL_pushresult) (luaL_Buffer *B);
+
+
+/* }====================================================== */
+
+
+/* compatibility with ref system */
+
+/* pre-defined references */
+#define LUA_NOREF (-2)
+#define LUA_REFNIL (-1)
+
+#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \
+ (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0))
+
+#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref))
+
+#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref))
+
+
+#define luaL_reg luaL_Reg
+
+#endif
diff --git a/3rdparty/lua/src/lib_aux.c b/3rdparty/lua/src/lib_aux.c
index 4a5b4f4..05fa6b1 100644
--- a/3rdparty/lua/src/lib_aux.c
+++ b/3rdparty/lua/src/lib_aux.c
@@ -1,356 +1,356 @@
-/*
-** Auxiliary library for the Lua/C API.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major parts taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#include <errno.h>
-#include <stdarg.h>
-#include <stdio.h>
-
-#define lib_aux_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-
-#include "lj_obj.h"
-#include "lj_err.h"
-#include "lj_state.h"
-#include "lj_trace.h"
-#include "lj_lib.h"
-
-#if LJ_TARGET_POSIX
-#include <sys/wait.h>
-#endif
-
-/* -- I/O error handling -------------------------------------------------- */
-
-LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname)
-{
- if (stat) {
- setboolV(L->top++, 1);
- return 1;
- } else {
- int en = errno; /* Lua API calls may change this value. */
- setnilV(L->top++);
- if (fname)
- lua_pushfstring(L, "%s: %s", fname, strerror(en));
- else
- lua_pushfstring(L, "%s", strerror(en));
- setintV(L->top++, en);
- lj_trace_abort(G(L));
- return 3;
- }
-}
-
-LUALIB_API int luaL_execresult(lua_State *L, int stat)
-{
- if (stat != -1) {
-#if LJ_TARGET_POSIX
- if (WIFSIGNALED(stat)) {
- stat = WTERMSIG(stat);
- setnilV(L->top++);
- lua_pushliteral(L, "signal");
- } else {
- if (WIFEXITED(stat))
- stat = WEXITSTATUS(stat);
- if (stat == 0)
- setboolV(L->top++, 1);
- else
- setnilV(L->top++);
- lua_pushliteral(L, "exit");
- }
-#else
- if (stat == 0)
- setboolV(L->top++, 1);
- else
- setnilV(L->top++);
- lua_pushliteral(L, "exit");
-#endif
- setintV(L->top++, stat);
- return 3;
- }
- return luaL_fileresult(L, 0, NULL);
-}
-
-/* -- Module registration ------------------------------------------------- */
-
-LUALIB_API const char *luaL_findtable(lua_State *L, int idx,
- const char *fname, int szhint)
-{
- const char *e;
- lua_pushvalue(L, idx);
- do {
- e = strchr(fname, '.');
- if (e == NULL) e = fname + strlen(fname);
- lua_pushlstring(L, fname, (size_t)(e - fname));
- lua_rawget(L, -2);
- if (lua_isnil(L, -1)) { /* no such field? */
- lua_pop(L, 1); /* remove this nil */
- lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
- lua_pushlstring(L, fname, (size_t)(e - fname));
- lua_pushvalue(L, -2);
- lua_settable(L, -4); /* set new table into field */
- } else if (!lua_istable(L, -1)) { /* field has a non-table value? */
- lua_pop(L, 2); /* remove table and value */
- return fname; /* return problematic part of the name */
- }
- lua_remove(L, -2); /* remove previous table */
- fname = e + 1;
- } while (*e == '.');
- return NULL;
-}
-
-static int libsize(const luaL_Reg *l)
-{
- int size = 0;
- for (; l->name; l++) size++;
- return size;
-}
-
-LUALIB_API void luaL_openlib(lua_State *L, const char *libname,
- const luaL_Reg *l, int nup)
-{
- lj_lib_checkfpu(L);
- if (libname) {
- int size = libsize(l);
- /* check whether lib already exists */
- luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
- lua_getfield(L, -1, libname); /* get _LOADED[libname] */
- if (!lua_istable(L, -1)) { /* not found? */
- lua_pop(L, 1); /* remove previous result */
- /* try global variable (and create one if it does not exist) */
- if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL)
- lj_err_callerv(L, LJ_ERR_BADMODN, libname);
- lua_pushvalue(L, -1);
- lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
- }
- lua_remove(L, -2); /* remove _LOADED table */
- lua_insert(L, -(nup+1)); /* move library table to below upvalues */
- }
- for (; l->name; l++) {
- int i;
- for (i = 0; i < nup; i++) /* copy upvalues to the top */
- lua_pushvalue(L, -nup);
- lua_pushcclosure(L, l->func, nup);
- lua_setfield(L, -(nup+2), l->name);
- }
- lua_pop(L, nup); /* remove upvalues */
-}
-
-LUALIB_API void luaL_register(lua_State *L, const char *libname,
- const luaL_Reg *l)
-{
- luaL_openlib(L, libname, l, 0);
-}
-
-LUALIB_API const char *luaL_gsub(lua_State *L, const char *s,
- const char *p, const char *r)
-{
- const char *wild;
- size_t l = strlen(p);
- luaL_Buffer b;
- luaL_buffinit(L, &b);
- while ((wild = strstr(s, p)) != NULL) {
- luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */
- luaL_addstring(&b, r); /* push replacement in place of pattern */
- s = wild + l; /* continue after `p' */
- }
- luaL_addstring(&b, s); /* push last suffix */
- luaL_pushresult(&b);
- return lua_tostring(L, -1);
-}
-
-/* -- Buffer handling ----------------------------------------------------- */
-
-#define bufflen(B) ((size_t)((B)->p - (B)->buffer))
-#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B)))
-
-static int emptybuffer(luaL_Buffer *B)
-{
- size_t l = bufflen(B);
- if (l == 0)
- return 0; /* put nothing on stack */
- lua_pushlstring(B->L, B->buffer, l);
- B->p = B->buffer;
- B->lvl++;
- return 1;
-}
-
-static void adjuststack(luaL_Buffer *B)
-{
- if (B->lvl > 1) {
- lua_State *L = B->L;
- int toget = 1; /* number of levels to concat */
- size_t toplen = lua_strlen(L, -1);
- do {
- size_t l = lua_strlen(L, -(toget+1));
- if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l))
- break;
- toplen += l;
- toget++;
- } while (toget < B->lvl);
- lua_concat(L, toget);
- B->lvl = B->lvl - toget + 1;
- }
-}
-
-LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B)
-{
- if (emptybuffer(B))
- adjuststack(B);
- return B->buffer;
-}
-
-LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l)
-{
- while (l--)
- luaL_addchar(B, *s++);
-}
-
-LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s)
-{
- luaL_addlstring(B, s, strlen(s));
-}
-
-LUALIB_API void luaL_pushresult(luaL_Buffer *B)
-{
- emptybuffer(B);
- lua_concat(B->L, B->lvl);
- B->lvl = 1;
-}
-
-LUALIB_API void luaL_addvalue(luaL_Buffer *B)
-{
- lua_State *L = B->L;
- size_t vl;
- const char *s = lua_tolstring(L, -1, &vl);
- if (vl <= bufffree(B)) { /* fit into buffer? */
- memcpy(B->p, s, vl); /* put it there */
- B->p += vl;
- lua_pop(L, 1); /* remove from stack */
- } else {
- if (emptybuffer(B))
- lua_insert(L, -2); /* put buffer before new value */
- B->lvl++; /* add new value into B stack */
- adjuststack(B);
- }
-}
-
-LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B)
-{
- B->L = L;
- B->p = B->buffer;
- B->lvl = 0;
-}
-
-/* -- Reference management ------------------------------------------------ */
-
-#define FREELIST_REF 0
-
-/* Convert a stack index to an absolute index. */
-#define abs_index(L, i) \
- ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1)
-
-LUALIB_API int luaL_ref(lua_State *L, int t)
-{
- int ref;
- t = abs_index(L, t);
- if (lua_isnil(L, -1)) {
- lua_pop(L, 1); /* remove from stack */
- return LUA_REFNIL; /* `nil' has a unique fixed reference */
- }
- lua_rawgeti(L, t, FREELIST_REF); /* get first free element */
- ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */
- lua_pop(L, 1); /* remove it from stack */
- if (ref != 0) { /* any free element? */
- lua_rawgeti(L, t, ref); /* remove it from list */
- lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */
- } else { /* no free elements */
- ref = (int)lua_objlen(L, t);
- ref++; /* create new reference */
- }
- lua_rawseti(L, t, ref);
- return ref;
-}
-
-LUALIB_API void luaL_unref(lua_State *L, int t, int ref)
-{
- if (ref >= 0) {
- t = abs_index(L, t);
- lua_rawgeti(L, t, FREELIST_REF);
- lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */
- lua_pushinteger(L, ref);
- lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */
- }
-}
-
-/* -- Default allocator and panic function -------------------------------- */
-
-static int panic(lua_State *L)
-{
- const char *s = lua_tostring(L, -1);
- fputs("PANIC: unprotected error in call to Lua API (", stderr);
- fputs(s ? s : "?", stderr);
- fputc(')', stderr); fputc('\n', stderr);
- fflush(stderr);
- return 0;
-}
-
-#ifdef LUAJIT_USE_SYSMALLOC
-
-#if LJ_64 && !defined(LUAJIT_USE_VALGRIND)
-#error "Must use builtin allocator for 64 bit target"
-#endif
-
-static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
-{
- (void)ud;
- (void)osize;
- if (nsize == 0) {
- free(ptr);
- return NULL;
- } else {
- return realloc(ptr, nsize);
- }
-}
-
-LUALIB_API lua_State *luaL_newstate(void)
-{
- lua_State *L = lua_newstate(mem_alloc, NULL);
- if (L) G(L)->panic = panic;
- return L;
-}
-
-#else
-
-#include "lj_alloc.h"
-
-LUALIB_API lua_State *luaL_newstate(void)
-{
- lua_State *L;
- void *ud = lj_alloc_create();
- if (ud == NULL) return NULL;
-#if LJ_64
- L = lj_state_newstate(lj_alloc_f, ud);
-#else
- L = lua_newstate(lj_alloc_f, ud);
-#endif
- if (L) G(L)->panic = panic;
- return L;
-}
-
-#if LJ_64
-LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
-{
- UNUSED(f); UNUSED(ud);
- fputs("Must use luaL_newstate() for 64 bit target\n", stderr);
- return NULL;
-}
-#endif
-
-#endif
-
+/*
+** Auxiliary library for the Lua/C API.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major parts taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#define lib_aux_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_state.h"
+#include "lj_trace.h"
+#include "lj_lib.h"
+
+#if LJ_TARGET_POSIX
+#include <sys/wait.h>
+#endif
+
+/* -- I/O error handling -------------------------------------------------- */
+
+LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname)
+{
+ if (stat) {
+ setboolV(L->top++, 1);
+ return 1;
+ } else {
+ int en = errno; /* Lua API calls may change this value. */
+ setnilV(L->top++);
+ if (fname)
+ lua_pushfstring(L, "%s: %s", fname, strerror(en));
+ else
+ lua_pushfstring(L, "%s", strerror(en));
+ setintV(L->top++, en);
+ lj_trace_abort(G(L));
+ return 3;
+ }
+}
+
+LUALIB_API int luaL_execresult(lua_State *L, int stat)
+{
+ if (stat != -1) {
+#if LJ_TARGET_POSIX
+ if (WIFSIGNALED(stat)) {
+ stat = WTERMSIG(stat);
+ setnilV(L->top++);
+ lua_pushliteral(L, "signal");
+ } else {
+ if (WIFEXITED(stat))
+ stat = WEXITSTATUS(stat);
+ if (stat == 0)
+ setboolV(L->top++, 1);
+ else
+ setnilV(L->top++);
+ lua_pushliteral(L, "exit");
+ }
+#else
+ if (stat == 0)
+ setboolV(L->top++, 1);
+ else
+ setnilV(L->top++);
+ lua_pushliteral(L, "exit");
+#endif
+ setintV(L->top++, stat);
+ return 3;
+ }
+ return luaL_fileresult(L, 0, NULL);
+}
+
+/* -- Module registration ------------------------------------------------- */
+
+LUALIB_API const char *luaL_findtable(lua_State *L, int idx,
+ const char *fname, int szhint)
+{
+ const char *e;
+ lua_pushvalue(L, idx);
+ do {
+ e = strchr(fname, '.');
+ if (e == NULL) e = fname + strlen(fname);
+ lua_pushlstring(L, fname, (size_t)(e - fname));
+ lua_rawget(L, -2);
+ if (lua_isnil(L, -1)) { /* no such field? */
+ lua_pop(L, 1); /* remove this nil */
+ lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
+ lua_pushlstring(L, fname, (size_t)(e - fname));
+ lua_pushvalue(L, -2);
+ lua_settable(L, -4); /* set new table into field */
+ } else if (!lua_istable(L, -1)) { /* field has a non-table value? */
+ lua_pop(L, 2); /* remove table and value */
+ return fname; /* return problematic part of the name */
+ }
+ lua_remove(L, -2); /* remove previous table */
+ fname = e + 1;
+ } while (*e == '.');
+ return NULL;
+}
+
+static int libsize(const luaL_Reg *l)
+{
+ int size = 0;
+ for (; l->name; l++) size++;
+ return size;
+}
+
+LUALIB_API void luaL_openlib(lua_State *L, const char *libname,
+ const luaL_Reg *l, int nup)
+{
+ lj_lib_checkfpu(L);
+ if (libname) {
+ int size = libsize(l);
+ /* check whether lib already exists */
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, libname); /* get _LOADED[libname] */
+ if (!lua_istable(L, -1)) { /* not found? */
+ lua_pop(L, 1); /* remove previous result */
+ /* try global variable (and create one if it does not exist) */
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, libname);
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
+ }
+ lua_remove(L, -2); /* remove _LOADED table */
+ lua_insert(L, -(nup+1)); /* move library table to below upvalues */
+ }
+ for (; l->name; l++) {
+ int i;
+ for (i = 0; i < nup; i++) /* copy upvalues to the top */
+ lua_pushvalue(L, -nup);
+ lua_pushcclosure(L, l->func, nup);
+ lua_setfield(L, -(nup+2), l->name);
+ }
+ lua_pop(L, nup); /* remove upvalues */
+}
+
+LUALIB_API void luaL_register(lua_State *L, const char *libname,
+ const luaL_Reg *l)
+{
+ luaL_openlib(L, libname, l, 0);
+}
+
+LUALIB_API const char *luaL_gsub(lua_State *L, const char *s,
+ const char *p, const char *r)
+{
+ const char *wild;
+ size_t l = strlen(p);
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while ((wild = strstr(s, p)) != NULL) {
+ luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */
+ luaL_addstring(&b, r); /* push replacement in place of pattern */
+ s = wild + l; /* continue after `p' */
+ }
+ luaL_addstring(&b, s); /* push last suffix */
+ luaL_pushresult(&b);
+ return lua_tostring(L, -1);
+}
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define bufflen(B) ((size_t)((B)->p - (B)->buffer))
+#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B)))
+
+static int emptybuffer(luaL_Buffer *B)
+{
+ size_t l = bufflen(B);
+ if (l == 0)
+ return 0; /* put nothing on stack */
+ lua_pushlstring(B->L, B->buffer, l);
+ B->p = B->buffer;
+ B->lvl++;
+ return 1;
+}
+
+static void adjuststack(luaL_Buffer *B)
+{
+ if (B->lvl > 1) {
+ lua_State *L = B->L;
+ int toget = 1; /* number of levels to concat */
+ size_t toplen = lua_strlen(L, -1);
+ do {
+ size_t l = lua_strlen(L, -(toget+1));
+ if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l))
+ break;
+ toplen += l;
+ toget++;
+ } while (toget < B->lvl);
+ lua_concat(L, toget);
+ B->lvl = B->lvl - toget + 1;
+ }
+}
+
+LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B)
+{
+ if (emptybuffer(B))
+ adjuststack(B);
+ return B->buffer;
+}
+
+LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l)
+{
+ while (l--)
+ luaL_addchar(B, *s++);
+}
+
+LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s)
+{
+ luaL_addlstring(B, s, strlen(s));
+}
+
+LUALIB_API void luaL_pushresult(luaL_Buffer *B)
+{
+ emptybuffer(B);
+ lua_concat(B->L, B->lvl);
+ B->lvl = 1;
+}
+
+LUALIB_API void luaL_addvalue(luaL_Buffer *B)
+{
+ lua_State *L = B->L;
+ size_t vl;
+ const char *s = lua_tolstring(L, -1, &vl);
+ if (vl <= bufffree(B)) { /* fit into buffer? */
+ memcpy(B->p, s, vl); /* put it there */
+ B->p += vl;
+ lua_pop(L, 1); /* remove from stack */
+ } else {
+ if (emptybuffer(B))
+ lua_insert(L, -2); /* put buffer before new value */
+ B->lvl++; /* add new value into B stack */
+ adjuststack(B);
+ }
+}
+
+LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B)
+{
+ B->L = L;
+ B->p = B->buffer;
+ B->lvl = 0;
+}
+
+/* -- Reference management ------------------------------------------------ */
+
+#define FREELIST_REF 0
+
+/* Convert a stack index to an absolute index. */
+#define abs_index(L, i) \
+ ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1)
+
+LUALIB_API int luaL_ref(lua_State *L, int t)
+{
+ int ref;
+ t = abs_index(L, t);
+ if (lua_isnil(L, -1)) {
+ lua_pop(L, 1); /* remove from stack */
+ return LUA_REFNIL; /* `nil' has a unique fixed reference */
+ }
+ lua_rawgeti(L, t, FREELIST_REF); /* get first free element */
+ ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */
+ lua_pop(L, 1); /* remove it from stack */
+ if (ref != 0) { /* any free element? */
+ lua_rawgeti(L, t, ref); /* remove it from list */
+ lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */
+ } else { /* no free elements */
+ ref = (int)lua_objlen(L, t);
+ ref++; /* create new reference */
+ }
+ lua_rawseti(L, t, ref);
+ return ref;
+}
+
+LUALIB_API void luaL_unref(lua_State *L, int t, int ref)
+{
+ if (ref >= 0) {
+ t = abs_index(L, t);
+ lua_rawgeti(L, t, FREELIST_REF);
+ lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */
+ lua_pushinteger(L, ref);
+ lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */
+ }
+}
+
+/* -- Default allocator and panic function -------------------------------- */
+
+static int panic(lua_State *L)
+{
+ const char *s = lua_tostring(L, -1);
+ fputs("PANIC: unprotected error in call to Lua API (", stderr);
+ fputs(s ? s : "?", stderr);
+ fputc(')', stderr); fputc('\n', stderr);
+ fflush(stderr);
+ return 0;
+}
+
+#ifdef LUAJIT_USE_SYSMALLOC
+
+#if LJ_64
+#error "Must use builtin allocator for 64 bit target"
+#endif
+
+static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
+{
+ (void)ud;
+ (void)osize;
+ if (nsize == 0) {
+ free(ptr);
+ return NULL;
+ } else {
+ return realloc(ptr, nsize);
+ }
+}
+
+LUALIB_API lua_State *luaL_newstate(void)
+{
+ lua_State *L = lua_newstate(mem_alloc, NULL);
+ if (L) G(L)->panic = panic;
+ return L;
+}
+
+#else
+
+#include "lj_alloc.h"
+
+LUALIB_API lua_State *luaL_newstate(void)
+{
+ lua_State *L;
+ void *ud = lj_alloc_create();
+ if (ud == NULL) return NULL;
+#if LJ_64
+ L = lj_state_newstate(lj_alloc_f, ud);
+#else
+ L = lua_newstate(lj_alloc_f, ud);
+#endif
+ if (L) G(L)->panic = panic;
+ return L;
+}
+
+#if LJ_64
+LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
+{
+ UNUSED(f); UNUSED(ud);
+ fputs("Must use luaL_newstate() for 64 bit target\n", stderr);
+ return NULL;
+}
+#endif
+
+#endif
+
diff --git a/3rdparty/lua/src/lib_base.c b/3rdparty/lua/src/lib_base.c
index c26b568..070970e 100644
--- a/3rdparty/lua/src/lib_base.c
+++ b/3rdparty/lua/src/lib_base.c
@@ -1,683 +1,683 @@
-/*
-** Base and coroutine library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#include <stdio.h>
-
-#define lib_base_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_state.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#include "lj_cconv.h"
-#endif
-#include "lj_bc.h"
-#include "lj_ff.h"
-#include "lj_dispatch.h"
-#include "lj_char.h"
-#include "lj_strscan.h"
-#include "lj_lib.h"
-
-/* -- Base library: checks ------------------------------------------------ */
-
-#define LJLIB_MODULE_base
-
-LJLIB_ASM(assert) LJLIB_REC(.)
-{
- GCstr *s;
- lj_lib_checkany(L, 1);
- s = lj_lib_optstr(L, 2);
- if (s)
- lj_err_callermsg(L, strdata(s));
- else
- lj_err_caller(L, LJ_ERR_ASSERT);
- return FFH_UNREACHABLE;
-}
-
-/* ORDER LJ_T */
-LJLIB_PUSH("nil")
-LJLIB_PUSH("boolean")
-LJLIB_PUSH(top-1) /* boolean */
-LJLIB_PUSH("userdata")
-LJLIB_PUSH("string")
-LJLIB_PUSH("upval")
-LJLIB_PUSH("thread")
-LJLIB_PUSH("proto")
-LJLIB_PUSH("function")
-LJLIB_PUSH("trace")
-LJLIB_PUSH("cdata")
-LJLIB_PUSH("table")
-LJLIB_PUSH(top-9) /* userdata */
-LJLIB_PUSH("number")
-LJLIB_ASM_(type) LJLIB_REC(.)
-/* Recycle the lj_lib_checkany(L, 1) from assert. */
-
-/* -- Base library: iterators --------------------------------------------- */
-
-/* This solves a circular dependency problem -- change FF_next_N as needed. */
-LJ_STATIC_ASSERT((int)FF_next == FF_next_N);
-
-LJLIB_ASM(next)
-{
- lj_lib_checktab(L, 1);
- return FFH_UNREACHABLE;
-}
-
-#if LJ_52 || LJ_HASFFI
-static int ffh_pairs(lua_State *L, MMS mm)
-{
- TValue *o = lj_lib_checkany(L, 1);
- cTValue *mo = lj_meta_lookup(L, o, mm);
- if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) {
- L->top = o+1; /* Only keep one argument. */
- copyTV(L, L->base-1, mo); /* Replace callable. */
- return FFH_TAILCALL;
- } else {
- if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE);
- setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1)));
- if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0);
- return FFH_RES(3);
- }
-}
-#else
-#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE)
-#endif
-
-LJLIB_PUSH(lastcl)
-LJLIB_ASM(pairs)
-{
- return ffh_pairs(L, MM_pairs);
-}
-
-LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.)
-{
- lj_lib_checktab(L, 1);
- lj_lib_checkint(L, 2);
- return FFH_UNREACHABLE;
-}
-
-LJLIB_PUSH(lastcl)
-LJLIB_ASM(ipairs) LJLIB_REC(.)
-{
- return ffh_pairs(L, MM_ipairs);
-}
-
-/* -- Base library: getters and setters ----------------------------------- */
-
-LJLIB_ASM_(getmetatable) LJLIB_REC(.)
-/* Recycle the lj_lib_checkany(L, 1) from assert. */
-
-LJLIB_ASM(setmetatable) LJLIB_REC(.)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- GCtab *mt = lj_lib_checktabornil(L, 2);
- if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable)))
- lj_err_caller(L, LJ_ERR_PROTMT);
- setgcref(t->metatable, obj2gco(mt));
- if (mt) { lj_gc_objbarriert(L, t, mt); }
- settabV(L, L->base-1, t);
- return FFH_RES(1);
-}
-
-LJLIB_CF(getfenv)
-{
- GCfunc *fn;
- cTValue *o = L->base;
- if (!(o < L->top && tvisfunc(o))) {
- int level = lj_lib_optint(L, 1, 1);
- o = lj_debug_frame(L, level, &level);
- if (o == NULL)
- lj_err_arg(L, 1, LJ_ERR_INVLVL);
- }
- fn = &gcval(o)->fn;
- settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env));
- return 1;
-}
-
-LJLIB_CF(setfenv)
-{
- GCfunc *fn;
- GCtab *t = lj_lib_checktab(L, 2);
- cTValue *o = L->base;
- if (!(o < L->top && tvisfunc(o))) {
- int level = lj_lib_checkint(L, 1);
- if (level == 0) {
- /* NOBARRIER: A thread (i.e. L) is never black. */
- setgcref(L->env, obj2gco(t));
- return 0;
- }
- o = lj_debug_frame(L, level, &level);
- if (o == NULL)
- lj_err_arg(L, 1, LJ_ERR_INVLVL);
- }
- fn = &gcval(o)->fn;
- if (!isluafunc(fn))
- lj_err_caller(L, LJ_ERR_SETFENV);
- setgcref(fn->l.env, obj2gco(t));
- lj_gc_objbarrier(L, obj2gco(fn), t);
- setfuncV(L, L->top++, fn);
- return 1;
-}
-
-LJLIB_ASM(rawget) LJLIB_REC(.)
-{
- lj_lib_checktab(L, 1);
- lj_lib_checkany(L, 2);
- return FFH_UNREACHABLE;
-}
-
-LJLIB_CF(rawset) LJLIB_REC(.)
-{
- lj_lib_checktab(L, 1);
- lj_lib_checkany(L, 2);
- L->top = 1+lj_lib_checkany(L, 3);
- lua_rawset(L, 1);
- return 1;
-}
-
-LJLIB_CF(rawequal) LJLIB_REC(.)
-{
- cTValue *o1 = lj_lib_checkany(L, 1);
- cTValue *o2 = lj_lib_checkany(L, 2);
- setboolV(L->top-1, lj_obj_equal(o1, o2));
- return 1;
-}
-
-#if LJ_52
-LJLIB_CF(rawlen) LJLIB_REC(.)
-{
- cTValue *o = L->base;
- int32_t len;
- if (L->top > o && tvisstr(o))
- len = (int32_t)strV(o)->len;
- else
- len = (int32_t)lj_tab_len(lj_lib_checktab(L, 1));
- setintV(L->top-1, len);
- return 1;
-}
-#endif
-
-LJLIB_CF(unpack)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- int32_t n, i = lj_lib_optint(L, 2, 1);
- int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ?
- lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t);
- if (i > e) return 0;
- n = e - i + 1;
- if (n <= 0 || !lua_checkstack(L, n))
- lj_err_caller(L, LJ_ERR_UNPACK);
- do {
- cTValue *tv = lj_tab_getint(t, i);
- if (tv) {
- copyTV(L, L->top++, tv);
- } else {
- setnilV(L->top++);
- }
- } while (i++ < e);
- return n;
-}
-
-LJLIB_CF(select) LJLIB_REC(.)
-{
- int32_t n = (int32_t)(L->top - L->base);
- if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') {
- setintV(L->top-1, n-1);
- return 1;
- } else {
- int32_t i = lj_lib_checkint(L, 1);
- if (i < 0) i = n + i; else if (i > n) i = n;
- if (i < 1)
- lj_err_arg(L, 1, LJ_ERR_IDXRNG);
- return n - i;
- }
-}
-
-/* -- Base library: conversions ------------------------------------------- */
-
-LJLIB_ASM(tonumber) LJLIB_REC(.)
-{
- int32_t base = lj_lib_optint(L, 2, 10);
- if (base == 10) {
- TValue *o = lj_lib_checkany(L, 1);
- if (lj_strscan_numberobj(o)) {
- copyTV(L, L->base-1, o);
- return FFH_RES(1);
- }
-#if LJ_HASFFI
- if (tviscdata(o)) {
- CTState *cts = ctype_cts(L);
- CType *ct = lj_ctype_rawref(cts, cdataV(o)->ctypeid);
- if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
- if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
- if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) &&
- ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) {
- int32_t i;
- lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0);
- setintV(L->base-1, i);
- return FFH_RES(1);
- }
- lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE),
- (uint8_t *)&(L->base-1)->n, o, 0);
- return FFH_RES(1);
- }
- }
-#endif
- } else {
- const char *p = strdata(lj_lib_checkstr(L, 1));
- char *ep;
- unsigned long ul;
- if (base < 2 || base > 36)
- lj_err_arg(L, 2, LJ_ERR_BASERNG);
- ul = strtoul(p, &ep, base);
- if (p != ep) {
- while (lj_char_isspace((unsigned char)(*ep))) ep++;
- if (*ep == '\0') {
- if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u))
- setintV(L->base-1, (int32_t)ul);
- else
- setnumV(L->base-1, (lua_Number)ul);
- return FFH_RES(1);
- }
- }
- }
- setnilV(L->base-1);
- return FFH_RES(1);
-}
-
-LJLIB_PUSH("nil")
-LJLIB_PUSH("false")
-LJLIB_PUSH("true")
-LJLIB_ASM(tostring) LJLIB_REC(.)
-{
- TValue *o = lj_lib_checkany(L, 1);
- cTValue *mo;
- L->top = o+1; /* Only keep one argument. */
- if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
- copyTV(L, L->base-1, mo); /* Replace callable. */
- return FFH_TAILCALL;
- } else {
- GCstr *s;
- if (tvisnumber(o)) {
- s = lj_str_fromnumber(L, o);
- } else if (tvispri(o)) {
- s = strV(lj_lib_upvalue(L, -(int32_t)itype(o)));
- } else {
- if (tvisfunc(o) && isffunc(funcV(o)))
- lua_pushfstring(L, "function: builtin#%d", funcV(o)->c.ffid);
- else
- lua_pushfstring(L, "%s: %p", lj_typename(o), lua_topointer(L, 1));
- /* Note: lua_pushfstring calls the GC which may invalidate o. */
- s = strV(L->top-1);
- }
- setstrV(L, L->base-1, s);
- return FFH_RES(1);
- }
-}
-
-/* -- Base library: throw and catch errors -------------------------------- */
-
-LJLIB_CF(error)
-{
- int32_t level = lj_lib_optint(L, 2, 1);
- lua_settop(L, 1);
- if (lua_isstring(L, 1) && level > 0) {
- luaL_where(L, level);
- lua_pushvalue(L, 1);
- lua_concat(L, 2);
- }
- return lua_error(L);
-}
-
-LJLIB_ASM(pcall) LJLIB_REC(.)
-{
- lj_lib_checkany(L, 1);
- lj_lib_checkfunc(L, 2); /* For xpcall only. */
- return FFH_UNREACHABLE;
-}
-LJLIB_ASM_(xpcall) LJLIB_REC(.)
-
-/* -- Base library: load Lua code ----------------------------------------- */
-
-static int load_aux(lua_State *L, int status, int envarg)
-{
- if (status == 0) {
- if (tvistab(L->base+envarg-1)) {
- GCfunc *fn = funcV(L->top-1);
- GCtab *t = tabV(L->base+envarg-1);
- setgcref(fn->c.env, obj2gco(t));
- lj_gc_objbarrier(L, fn, t);
- }
- return 1;
- } else {
- setnilV(L->top-2);
- return 2;
- }
-}
-
-LJLIB_CF(loadfile)
-{
- GCstr *fname = lj_lib_optstr(L, 1);
- GCstr *mode = lj_lib_optstr(L, 2);
- int status;
- lua_settop(L, 3); /* Ensure env arg exists. */
- status = luaL_loadfilex(L, fname ? strdata(fname) : NULL,
- mode ? strdata(mode) : NULL);
- return load_aux(L, status, 3);
-}
-
-static const char *reader_func(lua_State *L, void *ud, size_t *size)
-{
- UNUSED(ud);
- luaL_checkstack(L, 2, "too many nested functions");
- copyTV(L, L->top++, L->base);
- lua_call(L, 0, 1); /* Call user-supplied function. */
- L->top--;
- if (tvisnil(L->top)) {
- *size = 0;
- return NULL;
- } else if (tvisstr(L->top) || tvisnumber(L->top)) {
- copyTV(L, L->base+4, L->top); /* Anchor string in reserved stack slot. */
- return lua_tolstring(L, 5, size);
- } else {
- lj_err_caller(L, LJ_ERR_RDRSTR);
- return NULL;
- }
-}
-
-LJLIB_CF(load)
-{
- GCstr *name = lj_lib_optstr(L, 2);
- GCstr *mode = lj_lib_optstr(L, 3);
- int status;
- if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base))) {
- GCstr *s = lj_lib_checkstr(L, 1);
- lua_settop(L, 4); /* Ensure env arg exists. */
- status = luaL_loadbufferx(L, strdata(s), s->len, strdata(name ? name : s),
- mode ? strdata(mode) : NULL);
- } else {
- lj_lib_checkfunc(L, 1);
- lua_settop(L, 5); /* Reserve a slot for the string from the reader. */
- status = lua_loadx(L, reader_func, NULL, name ? strdata(name) : "=(load)",
- mode ? strdata(mode) : NULL);
- }
- return load_aux(L, status, 4);
-}
-
-LJLIB_CF(loadstring)
-{
- return lj_cf_load(L);
-}
-
-LJLIB_CF(dofile)
-{
- GCstr *fname = lj_lib_optstr(L, 1);
- setnilV(L->top);
- L->top = L->base+1;
- if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0)
- lua_error(L);
- lua_call(L, 0, LUA_MULTRET);
- return (int)(L->top - L->base) - 1;
-}
-
-/* -- Base library: GC control -------------------------------------------- */
-
-LJLIB_CF(gcinfo)
-{
- setintV(L->top++, (G(L)->gc.total >> 10));
- return 1;
-}
-
-LJLIB_CF(collectgarbage)
-{
- int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */
- "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul");
- int32_t data = lj_lib_optint(L, 2, 0);
- if (opt == LUA_GCCOUNT) {
- setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0);
- } else {
- int res = lua_gc(L, opt, data);
- if (opt == LUA_GCSTEP)
- setboolV(L->top, res);
- else
- setintV(L->top, res);
- }
- L->top++;
- return 1;
-}
-
-/* -- Base library: miscellaneous functions ------------------------------- */
-
-LJLIB_PUSH(top-2) /* Upvalue holds weak table. */
-LJLIB_CF(newproxy)
-{
- lua_settop(L, 1);
- lua_newuserdata(L, 0);
- if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */
- return 1;
- } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */
- lua_newtable(L);
- lua_pushvalue(L, -1);
- lua_pushboolean(L, 1);
- lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */
- } else { /* newproxy(proxy): inherit metatable. */
- int validproxy = 0;
- if (lua_getmetatable(L, 1)) {
- lua_rawget(L, lua_upvalueindex(1));
- validproxy = lua_toboolean(L, -1);
- lua_pop(L, 1);
- }
- if (!validproxy)
- lj_err_arg(L, 1, LJ_ERR_NOPROXY);
- lua_getmetatable(L, 1);
- }
- lua_setmetatable(L, 2);
- return 1;
-}
-
-LJLIB_PUSH("tostring")
-LJLIB_CF(print)
-{
- ptrdiff_t i, nargs = L->top - L->base;
- cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1)));
- int shortcut;
- if (tv && !tvisnil(tv)) {
- copyTV(L, L->top++, tv);
- } else {
- setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1)));
- lua_gettable(L, LUA_GLOBALSINDEX);
- tv = L->top-1;
- }
- shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring);
- for (i = 0; i < nargs; i++) {
- const char *str;
- size_t size;
- cTValue *o = &L->base[i];
- if (shortcut && tvisstr(o)) {
- str = strVdata(o);
- size = strV(o)->len;
- } else if (shortcut && tvisint(o)) {
- char buf[LJ_STR_INTBUF];
- char *p = lj_str_bufint(buf, intV(o));
- size = (size_t)(buf+LJ_STR_INTBUF-p);
- str = p;
- } else if (shortcut && tvisnum(o)) {
- char buf[LJ_STR_NUMBUF];
- size = lj_str_bufnum(buf, o);
- str = buf;
- } else {
- copyTV(L, L->top+1, o);
- copyTV(L, L->top, L->top-1);
- L->top += 2;
- lua_call(L, 1, 1);
- str = lua_tolstring(L, -1, &size);
- if (!str)
- lj_err_caller(L, LJ_ERR_PRTOSTR);
- L->top--;
- }
- if (i)
- putchar('\t');
- fwrite(str, 1, size, stdout);
- }
- putchar('\n');
- return 0;
-}
-
-LJLIB_PUSH(top-3)
-LJLIB_SET(_VERSION)
-
-#include "lj_libdef.h"
-
-/* -- Coroutine library --------------------------------------------------- */
-
-#define LJLIB_MODULE_coroutine
-
-LJLIB_CF(coroutine_status)
-{
- const char *s;
- lua_State *co;
- if (!(L->top > L->base && tvisthread(L->base)))
- lj_err_arg(L, 1, LJ_ERR_NOCORO);
- co = threadV(L->base);
- if (co == L) s = "running";
- else if (co->status == LUA_YIELD) s = "suspended";
- else if (co->status != 0) s = "dead";
- else if (co->base > tvref(co->stack)+1) s = "normal";
- else if (co->top == co->base) s = "dead";
- else s = "suspended";
- lua_pushstring(L, s);
- return 1;
-}
-
-LJLIB_CF(coroutine_running)
-{
-#if LJ_52
- int ismain = lua_pushthread(L);
- setboolV(L->top++, ismain);
- return 2;
-#else
- if (lua_pushthread(L))
- setnilV(L->top++);
- return 1;
-#endif
-}
-
-LJLIB_CF(coroutine_create)
-{
- lua_State *L1;
- if (!(L->base < L->top && tvisfunc(L->base)))
- lj_err_argt(L, 1, LUA_TFUNCTION);
- L1 = lua_newthread(L);
- setfuncV(L, L1->top++, funcV(L->base));
- return 1;
-}
-
-LJLIB_ASM(coroutine_yield)
-{
- lj_err_caller(L, LJ_ERR_CYIELD);
- return FFH_UNREACHABLE;
-}
-
-static int ffh_resume(lua_State *L, lua_State *co, int wrap)
-{
- if (co->cframe != NULL || co->status > LUA_YIELD ||
- (co->status == 0 && co->top == co->base)) {
- ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD;
- if (wrap) lj_err_caller(L, em);
- setboolV(L->base-1, 0);
- setstrV(L, L->base, lj_err_str(L, em));
- return FFH_RES(2);
- }
- lj_state_growstack(co, (MSize)(L->top - L->base));
- return FFH_RETRY;
-}
-
-LJLIB_ASM(coroutine_resume)
-{
- if (!(L->top > L->base && tvisthread(L->base)))
- lj_err_arg(L, 1, LJ_ERR_NOCORO);
- return ffh_resume(L, threadV(L->base), 0);
-}
-
-LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux)
-{
- return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1);
-}
-
-/* Inline declarations. */
-LJ_ASMF void lj_ff_coroutine_wrap_aux(void);
-#if !(LJ_TARGET_MIPS && defined(ljamalg_c))
-LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
- lua_State *co);
-#endif
-
-/* Error handler, called from assembler VM. */
-void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co)
-{
- co->top--; copyTV(L, L->top, co->top); L->top++;
- if (tvisstr(L->top-1))
- lj_err_callermsg(L, strVdata(L->top-1));
- else
- lj_err_run(L);
-}
-
-/* Forward declaration. */
-static void setpc_wrap_aux(lua_State *L, GCfunc *fn);
-
-LJLIB_CF(coroutine_wrap)
-{
- lj_cf_coroutine_create(L);
- lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1);
- setpc_wrap_aux(L, funcV(L->top-1));
- return 1;
-}
-
-#include "lj_libdef.h"
-
-/* Fix the PC of wrap_aux. Really ugly workaround. */
-static void setpc_wrap_aux(lua_State *L, GCfunc *fn)
-{
- setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]);
-}
-
-/* ------------------------------------------------------------------------ */
-
-static void newproxy_weaktable(lua_State *L)
-{
- /* NOBARRIER: The table is new (marked white). */
- GCtab *t = lj_tab_new(L, 0, 1);
- settabV(L, L->top++, t);
- setgcref(t->metatable, obj2gco(t));
- setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
- lj_str_newlit(L, "kv"));
- t->nomm = (uint8_t)(~(1u<<MM_mode));
-}
-
-LUALIB_API int luaopen_base(lua_State *L)
-{
- /* NOBARRIER: Table and value are the same. */
- GCtab *env = tabref(L->env);
- settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env);
- lua_pushliteral(L, LUA_VERSION); /* top-3. */
- newproxy_weaktable(L); /* top-2. */
- LJ_LIB_REG(L, "_G", base);
- LJ_LIB_REG(L, LUA_COLIBNAME, coroutine);
- return 2;
-}
-
+/*
+** Base and coroutine library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+
+#define lib_base_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#endif
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_dispatch.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+#include "lj_lib.h"
+
+/* -- Base library: checks ------------------------------------------------ */
+
+#define LJLIB_MODULE_base
+
+LJLIB_ASM(assert) LJLIB_REC(.)
+{
+ GCstr *s;
+ lj_lib_checkany(L, 1);
+ s = lj_lib_optstr(L, 2);
+ if (s)
+ lj_err_callermsg(L, strdata(s));
+ else
+ lj_err_caller(L, LJ_ERR_ASSERT);
+ return FFH_UNREACHABLE;
+}
+
+/* ORDER LJ_T */
+LJLIB_PUSH("nil")
+LJLIB_PUSH("boolean")
+LJLIB_PUSH(top-1) /* boolean */
+LJLIB_PUSH("userdata")
+LJLIB_PUSH("string")
+LJLIB_PUSH("upval")
+LJLIB_PUSH("thread")
+LJLIB_PUSH("proto")
+LJLIB_PUSH("function")
+LJLIB_PUSH("trace")
+LJLIB_PUSH("cdata")
+LJLIB_PUSH("table")
+LJLIB_PUSH(top-9) /* userdata */
+LJLIB_PUSH("number")
+LJLIB_ASM_(type) LJLIB_REC(.)
+/* Recycle the lj_lib_checkany(L, 1) from assert. */
+
+/* -- Base library: iterators --------------------------------------------- */
+
+/* This solves a circular dependency problem -- change FF_next_N as needed. */
+LJ_STATIC_ASSERT((int)FF_next == FF_next_N);
+
+LJLIB_ASM(next)
+{
+ lj_lib_checktab(L, 1);
+ return FFH_UNREACHABLE;
+}
+
+#if LJ_52 || LJ_HASFFI
+static int ffh_pairs(lua_State *L, MMS mm)
+{
+ TValue *o = lj_lib_checkany(L, 1);
+ cTValue *mo = lj_meta_lookup(L, o, mm);
+ if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) {
+ L->top = o+1; /* Only keep one argument. */
+ copyTV(L, L->base-1, mo); /* Replace callable. */
+ return FFH_TAILCALL;
+ } else {
+ if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE);
+ setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1)));
+ if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0);
+ return FFH_RES(3);
+ }
+}
+#else
+#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE)
+#endif
+
+LJLIB_PUSH(lastcl)
+LJLIB_ASM(pairs)
+{
+ return ffh_pairs(L, MM_pairs);
+}
+
+LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkint(L, 2);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_PUSH(lastcl)
+LJLIB_ASM(ipairs) LJLIB_REC(.)
+{
+ return ffh_pairs(L, MM_ipairs);
+}
+
+/* -- Base library: getters and setters ----------------------------------- */
+
+LJLIB_ASM_(getmetatable) LJLIB_REC(.)
+/* Recycle the lj_lib_checkany(L, 1) from assert. */
+
+LJLIB_ASM(setmetatable) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCtab *mt = lj_lib_checktabornil(L, 2);
+ if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable)))
+ lj_err_caller(L, LJ_ERR_PROTMT);
+ setgcref(t->metatable, obj2gco(mt));
+ if (mt) { lj_gc_objbarriert(L, t, mt); }
+ settabV(L, L->base-1, t);
+ return FFH_RES(1);
+}
+
+LJLIB_CF(getfenv)
+{
+ GCfunc *fn;
+ cTValue *o = L->base;
+ if (!(o < L->top && tvisfunc(o))) {
+ int level = lj_lib_optint(L, 1, 1);
+ o = lj_debug_frame(L, level, &level);
+ if (o == NULL)
+ lj_err_arg(L, 1, LJ_ERR_INVLVL);
+ }
+ fn = &gcval(o)->fn;
+ settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env));
+ return 1;
+}
+
+LJLIB_CF(setfenv)
+{
+ GCfunc *fn;
+ GCtab *t = lj_lib_checktab(L, 2);
+ cTValue *o = L->base;
+ if (!(o < L->top && tvisfunc(o))) {
+ int level = lj_lib_checkint(L, 1);
+ if (level == 0) {
+ /* NOBARRIER: A thread (i.e. L) is never black. */
+ setgcref(L->env, obj2gco(t));
+ return 0;
+ }
+ o = lj_debug_frame(L, level, &level);
+ if (o == NULL)
+ lj_err_arg(L, 1, LJ_ERR_INVLVL);
+ }
+ fn = &gcval(o)->fn;
+ if (!isluafunc(fn))
+ lj_err_caller(L, LJ_ERR_SETFENV);
+ setgcref(fn->l.env, obj2gco(t));
+ lj_gc_objbarrier(L, obj2gco(fn), t);
+ setfuncV(L, L->top++, fn);
+ return 1;
+}
+
+LJLIB_ASM(rawget) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkany(L, 2);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_CF(rawset) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkany(L, 2);
+ L->top = 1+lj_lib_checkany(L, 3);
+ lua_rawset(L, 1);
+ return 1;
+}
+
+LJLIB_CF(rawequal) LJLIB_REC(.)
+{
+ cTValue *o1 = lj_lib_checkany(L, 1);
+ cTValue *o2 = lj_lib_checkany(L, 2);
+ setboolV(L->top-1, lj_obj_equal(o1, o2));
+ return 1;
+}
+
+#if LJ_52
+LJLIB_CF(rawlen) LJLIB_REC(.)
+{
+ cTValue *o = L->base;
+ int32_t len;
+ if (L->top > o && tvisstr(o))
+ len = (int32_t)strV(o)->len;
+ else
+ len = (int32_t)lj_tab_len(lj_lib_checktab(L, 1));
+ setintV(L->top-1, len);
+ return 1;
+}
+#endif
+
+LJLIB_CF(unpack)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n, i = lj_lib_optint(L, 2, 1);
+ int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ?
+ lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t);
+ if (i > e) return 0;
+ n = e - i + 1;
+ if (n <= 0 || !lua_checkstack(L, n))
+ lj_err_caller(L, LJ_ERR_UNPACK);
+ do {
+ cTValue *tv = lj_tab_getint(t, i);
+ if (tv) {
+ copyTV(L, L->top++, tv);
+ } else {
+ setnilV(L->top++);
+ }
+ } while (i++ < e);
+ return n;
+}
+
+LJLIB_CF(select) LJLIB_REC(.)
+{
+ int32_t n = (int32_t)(L->top - L->base);
+ if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') {
+ setintV(L->top-1, n-1);
+ return 1;
+ } else {
+ int32_t i = lj_lib_checkint(L, 1);
+ if (i < 0) i = n + i; else if (i > n) i = n;
+ if (i < 1)
+ lj_err_arg(L, 1, LJ_ERR_IDXRNG);
+ return n - i;
+ }
+}
+
+/* -- Base library: conversions ------------------------------------------- */
+
+LJLIB_ASM(tonumber) LJLIB_REC(.)
+{
+ int32_t base = lj_lib_optint(L, 2, 10);
+ if (base == 10) {
+ TValue *o = lj_lib_checkany(L, 1);
+ if (lj_strscan_numberobj(o)) {
+ copyTV(L, L->base-1, o);
+ return FFH_RES(1);
+ }
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ CTState *cts = ctype_cts(L);
+ CType *ct = lj_ctype_rawref(cts, cdataV(o)->ctypeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
+ if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) &&
+ ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) {
+ int32_t i;
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0);
+ setintV(L->base-1, i);
+ return FFH_RES(1);
+ }
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE),
+ (uint8_t *)&(L->base-1)->n, o, 0);
+ return FFH_RES(1);
+ }
+ }
+#endif
+ } else {
+ const char *p = strdata(lj_lib_checkstr(L, 1));
+ char *ep;
+ unsigned long ul;
+ if (base < 2 || base > 36)
+ lj_err_arg(L, 2, LJ_ERR_BASERNG);
+ ul = strtoul(p, &ep, base);
+ if (p != ep) {
+ while (lj_char_isspace((unsigned char)(*ep))) ep++;
+ if (*ep == '\0') {
+ if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u))
+ setintV(L->base-1, (int32_t)ul);
+ else
+ setnumV(L->base-1, (lua_Number)ul);
+ return FFH_RES(1);
+ }
+ }
+ }
+ setnilV(L->base-1);
+ return FFH_RES(1);
+}
+
+LJLIB_PUSH("nil")
+LJLIB_PUSH("false")
+LJLIB_PUSH("true")
+LJLIB_ASM(tostring) LJLIB_REC(.)
+{
+ TValue *o = lj_lib_checkany(L, 1);
+ cTValue *mo;
+ L->top = o+1; /* Only keep one argument. */
+ if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
+ copyTV(L, L->base-1, mo); /* Replace callable. */
+ return FFH_TAILCALL;
+ } else {
+ GCstr *s;
+ if (tvisnumber(o)) {
+ s = lj_str_fromnumber(L, o);
+ } else if (tvispri(o)) {
+ s = strV(lj_lib_upvalue(L, -(int32_t)itype(o)));
+ } else {
+ if (tvisfunc(o) && isffunc(funcV(o)))
+ lua_pushfstring(L, "function: builtin#%d", funcV(o)->c.ffid);
+ else
+ lua_pushfstring(L, "%s: %p", lj_typename(o), lua_topointer(L, 1));
+ /* Note: lua_pushfstring calls the GC which may invalidate o. */
+ s = strV(L->top-1);
+ }
+ setstrV(L, L->base-1, s);
+ return FFH_RES(1);
+ }
+}
+
+/* -- Base library: throw and catch errors -------------------------------- */
+
+LJLIB_CF(error)
+{
+ int32_t level = lj_lib_optint(L, 2, 1);
+ lua_settop(L, 1);
+ if (lua_isstring(L, 1) && level > 0) {
+ luaL_where(L, level);
+ lua_pushvalue(L, 1);
+ lua_concat(L, 2);
+ }
+ return lua_error(L);
+}
+
+LJLIB_ASM(pcall) LJLIB_REC(.)
+{
+ lj_lib_checkany(L, 1);
+ lj_lib_checkfunc(L, 2); /* For xpcall only. */
+ return FFH_UNREACHABLE;
+}
+LJLIB_ASM_(xpcall) LJLIB_REC(.)
+
+/* -- Base library: load Lua code ----------------------------------------- */
+
+static int load_aux(lua_State *L, int status, int envarg)
+{
+ if (status == 0) {
+ if (tvistab(L->base+envarg-1)) {
+ GCfunc *fn = funcV(L->top-1);
+ GCtab *t = tabV(L->base+envarg-1);
+ setgcref(fn->c.env, obj2gco(t));
+ lj_gc_objbarrier(L, fn, t);
+ }
+ return 1;
+ } else {
+ setnilV(L->top-2);
+ return 2;
+ }
+}
+
+LJLIB_CF(loadfile)
+{
+ GCstr *fname = lj_lib_optstr(L, 1);
+ GCstr *mode = lj_lib_optstr(L, 2);
+ int status;
+ lua_settop(L, 3); /* Ensure env arg exists. */
+ status = luaL_loadfilex(L, fname ? strdata(fname) : NULL,
+ mode ? strdata(mode) : NULL);
+ return load_aux(L, status, 3);
+}
+
+static const char *reader_func(lua_State *L, void *ud, size_t *size)
+{
+ UNUSED(ud);
+ luaL_checkstack(L, 2, "too many nested functions");
+ copyTV(L, L->top++, L->base);
+ lua_call(L, 0, 1); /* Call user-supplied function. */
+ L->top--;
+ if (tvisnil(L->top)) {
+ *size = 0;
+ return NULL;
+ } else if (tvisstr(L->top) || tvisnumber(L->top)) {
+ copyTV(L, L->base+4, L->top); /* Anchor string in reserved stack slot. */
+ return lua_tolstring(L, 5, size);
+ } else {
+ lj_err_caller(L, LJ_ERR_RDRSTR);
+ return NULL;
+ }
+}
+
+LJLIB_CF(load)
+{
+ GCstr *name = lj_lib_optstr(L, 2);
+ GCstr *mode = lj_lib_optstr(L, 3);
+ int status;
+ if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base))) {
+ GCstr *s = lj_lib_checkstr(L, 1);
+ lua_settop(L, 4); /* Ensure env arg exists. */
+ status = luaL_loadbufferx(L, strdata(s), s->len, strdata(name ? name : s),
+ mode ? strdata(mode) : NULL);
+ } else {
+ lj_lib_checkfunc(L, 1);
+ lua_settop(L, 5); /* Reserve a slot for the string from the reader. */
+ status = lua_loadx(L, reader_func, NULL, name ? strdata(name) : "=(load)",
+ mode ? strdata(mode) : NULL);
+ }
+ return load_aux(L, status, 4);
+}
+
+LJLIB_CF(loadstring)
+{
+ return lj_cf_load(L);
+}
+
+LJLIB_CF(dofile)
+{
+ GCstr *fname = lj_lib_optstr(L, 1);
+ setnilV(L->top);
+ L->top = L->base+1;
+ if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != 0)
+ lua_error(L);
+ lua_call(L, 0, LUA_MULTRET);
+ return (int)(L->top - L->base) - 1;
+}
+
+/* -- Base library: GC control -------------------------------------------- */
+
+LJLIB_CF(gcinfo)
+{
+ setintV(L->top++, (G(L)->gc.total >> 10));
+ return 1;
+}
+
+LJLIB_CF(collectgarbage)
+{
+ int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */
+ "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul");
+ int32_t data = lj_lib_optint(L, 2, 0);
+ if (opt == LUA_GCCOUNT) {
+ setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0);
+ } else {
+ int res = lua_gc(L, opt, data);
+ if (opt == LUA_GCSTEP)
+ setboolV(L->top, res);
+ else
+ setintV(L->top, res);
+ }
+ L->top++;
+ return 1;
+}
+
+/* -- Base library: miscellaneous functions ------------------------------- */
+
+LJLIB_PUSH(top-2) /* Upvalue holds weak table. */
+LJLIB_CF(newproxy)
+{
+ lua_settop(L, 1);
+ lua_newuserdata(L, 0);
+ if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */
+ return 1;
+ } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */
+ lua_newtable(L);
+ lua_pushvalue(L, -1);
+ lua_pushboolean(L, 1);
+ lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */
+ } else { /* newproxy(proxy): inherit metatable. */
+ int validproxy = 0;
+ if (lua_getmetatable(L, 1)) {
+ lua_rawget(L, lua_upvalueindex(1));
+ validproxy = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ }
+ if (!validproxy)
+ lj_err_arg(L, 1, LJ_ERR_NOPROXY);
+ lua_getmetatable(L, 1);
+ }
+ lua_setmetatable(L, 2);
+ return 1;
+}
+
+LJLIB_PUSH("tostring")
+LJLIB_CF(print)
+{
+ ptrdiff_t i, nargs = L->top - L->base;
+ cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1)));
+ int shortcut;
+ if (tv && !tvisnil(tv)) {
+ copyTV(L, L->top++, tv);
+ } else {
+ setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1)));
+ lua_gettable(L, LUA_GLOBALSINDEX);
+ tv = L->top-1;
+ }
+ shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring);
+ for (i = 0; i < nargs; i++) {
+ const char *str;
+ size_t size;
+ cTValue *o = &L->base[i];
+ if (shortcut && tvisstr(o)) {
+ str = strVdata(o);
+ size = strV(o)->len;
+ } else if (shortcut && tvisint(o)) {
+ char buf[LJ_STR_INTBUF];
+ char *p = lj_str_bufint(buf, intV(o));
+ size = (size_t)(buf+LJ_STR_INTBUF-p);
+ str = p;
+ } else if (shortcut && tvisnum(o)) {
+ char buf[LJ_STR_NUMBUF];
+ size = lj_str_bufnum(buf, o);
+ str = buf;
+ } else {
+ copyTV(L, L->top+1, o);
+ copyTV(L, L->top, L->top-1);
+ L->top += 2;
+ lua_call(L, 1, 1);
+ str = lua_tolstring(L, -1, &size);
+ if (!str)
+ lj_err_caller(L, LJ_ERR_PRTOSTR);
+ L->top--;
+ }
+ if (i)
+ putchar('\t');
+ fwrite(str, 1, size, stdout);
+ }
+ putchar('\n');
+ return 0;
+}
+
+LJLIB_PUSH(top-3)
+LJLIB_SET(_VERSION)
+
+#include "lj_libdef.h"
+
+/* -- Coroutine library --------------------------------------------------- */
+
+#define LJLIB_MODULE_coroutine
+
+LJLIB_CF(coroutine_status)
+{
+ const char *s;
+ lua_State *co;
+ if (!(L->top > L->base && tvisthread(L->base)))
+ lj_err_arg(L, 1, LJ_ERR_NOCORO);
+ co = threadV(L->base);
+ if (co == L) s = "running";
+ else if (co->status == LUA_YIELD) s = "suspended";
+ else if (co->status != 0) s = "dead";
+ else if (co->base > tvref(co->stack)+1) s = "normal";
+ else if (co->top == co->base) s = "dead";
+ else s = "suspended";
+ lua_pushstring(L, s);
+ return 1;
+}
+
+LJLIB_CF(coroutine_running)
+{
+#if LJ_52
+ int ismain = lua_pushthread(L);
+ setboolV(L->top++, ismain);
+ return 2;
+#else
+ if (lua_pushthread(L))
+ setnilV(L->top++);
+ return 1;
+#endif
+}
+
+LJLIB_CF(coroutine_create)
+{
+ lua_State *L1;
+ if (!(L->base < L->top && tvisfunc(L->base)))
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ L1 = lua_newthread(L);
+ setfuncV(L, L1->top++, funcV(L->base));
+ return 1;
+}
+
+LJLIB_ASM(coroutine_yield)
+{
+ lj_err_caller(L, LJ_ERR_CYIELD);
+ return FFH_UNREACHABLE;
+}
+
+static int ffh_resume(lua_State *L, lua_State *co, int wrap)
+{
+ if (co->cframe != NULL || co->status > LUA_YIELD ||
+ (co->status == 0 && co->top == co->base)) {
+ ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD;
+ if (wrap) lj_err_caller(L, em);
+ setboolV(L->base-1, 0);
+ setstrV(L, L->base, lj_err_str(L, em));
+ return FFH_RES(2);
+ }
+ lj_state_growstack(co, (MSize)(L->top - L->base));
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(coroutine_resume)
+{
+ if (!(L->top > L->base && tvisthread(L->base)))
+ lj_err_arg(L, 1, LJ_ERR_NOCORO);
+ return ffh_resume(L, threadV(L->base), 0);
+}
+
+LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux)
+{
+ return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1);
+}
+
+/* Inline declarations. */
+LJ_ASMF void lj_ff_coroutine_wrap_aux(void);
+#if !(LJ_TARGET_MIPS && defined(ljamalg_c))
+LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
+ lua_State *co);
+#endif
+
+/* Error handler, called from assembler VM. */
+void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co)
+{
+ co->top--; copyTV(L, L->top, co->top); L->top++;
+ if (tvisstr(L->top-1))
+ lj_err_callermsg(L, strVdata(L->top-1));
+ else
+ lj_err_run(L);
+}
+
+/* Forward declaration. */
+static void setpc_wrap_aux(lua_State *L, GCfunc *fn);
+
+LJLIB_CF(coroutine_wrap)
+{
+ lj_cf_coroutine_create(L);
+ lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1);
+ setpc_wrap_aux(L, funcV(L->top-1));
+ return 1;
+}
+
+#include "lj_libdef.h"
+
+/* Fix the PC of wrap_aux. Really ugly workaround. */
+static void setpc_wrap_aux(lua_State *L, GCfunc *fn)
+{
+ setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void newproxy_weaktable(lua_State *L)
+{
+ /* NOBARRIER: The table is new (marked white). */
+ GCtab *t = lj_tab_new(L, 0, 1);
+ settabV(L, L->top++, t);
+ setgcref(t->metatable, obj2gco(t));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
+ lj_str_newlit(L, "kv"));
+ t->nomm = (uint8_t)(~(1u<<MM_mode));
+}
+
+LUALIB_API int luaopen_base(lua_State *L)
+{
+ /* NOBARRIER: Table and value are the same. */
+ GCtab *env = tabref(L->env);
+ settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env);
+ lua_pushliteral(L, LUA_VERSION); /* top-3. */
+ newproxy_weaktable(L); /* top-2. */
+ LJ_LIB_REG(L, "_G", base);
+ LJ_LIB_REG(L, LUA_COLIBNAME, coroutine);
+ return 2;
+}
+
diff --git a/3rdparty/lua/src/lib_bit.c b/3rdparty/lua/src/lib_bit.c
index 8785661..93fead9 100644
--- a/3rdparty/lua/src/lib_bit.c
+++ b/3rdparty/lua/src/lib_bit.c
@@ -1,74 +1,74 @@
-/*
-** Bit manipulation library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lib_bit_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_lib.h"
-
-/* ------------------------------------------------------------------------ */
-
-#define LJLIB_MODULE_bit
-
-LJLIB_ASM(bit_tobit) LJLIB_REC(bit_unary IR_TOBIT)
-{
- lj_lib_checknumber(L, 1);
- return FFH_RETRY;
-}
-LJLIB_ASM_(bit_bnot) LJLIB_REC(bit_unary IR_BNOT)
-LJLIB_ASM_(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP)
-
-LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL)
-{
- lj_lib_checknumber(L, 1);
- lj_lib_checkbit(L, 2);
- return FFH_RETRY;
-}
-LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR)
-LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR)
-LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL)
-LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR)
-
-LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND)
-{
- int i = 0;
- do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
- return FFH_RETRY;
-}
-LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR)
-LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR)
-
-/* ------------------------------------------------------------------------ */
-
-LJLIB_CF(bit_tohex)
-{
- uint32_t b = (uint32_t)lj_lib_checkbit(L, 1);
- int32_t i, n = L->base+1 >= L->top ? 8 : lj_lib_checkbit(L, 2);
- const char *hexdigits = "0123456789abcdef";
- char buf[8];
- if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; }
- if (n > 8) n = 8;
- for (i = n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; }
- lua_pushlstring(L, buf, (size_t)n);
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-#include "lj_libdef.h"
-
-LUALIB_API int luaopen_bit(lua_State *L)
-{
- LJ_LIB_REG(L, LUA_BITLIBNAME, bit);
- return 1;
-}
-
+/*
+** Bit manipulation library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_bit_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_bit
+
+LJLIB_ASM(bit_tobit) LJLIB_REC(bit_unary IR_TOBIT)
+{
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(bit_bnot) LJLIB_REC(bit_unary IR_BNOT)
+LJLIB_ASM_(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP)
+
+LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL)
+{
+ lj_lib_checknumber(L, 1);
+ lj_lib_checkbit(L, 2);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR)
+LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR)
+LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL)
+LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR)
+
+LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND)
+{
+ int i = 0;
+ do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR)
+LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR)
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(bit_tohex)
+{
+ uint32_t b = (uint32_t)lj_lib_checkbit(L, 1);
+ int32_t i, n = L->base+1 >= L->top ? 8 : lj_lib_checkbit(L, 2);
+ const char *hexdigits = "0123456789abcdef";
+ char buf[8];
+ if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; }
+ if (n > 8) n = 8;
+ for (i = n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; }
+ lua_pushlstring(L, buf, (size_t)n);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_bit(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_BITLIBNAME, bit);
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lib_debug.c b/3rdparty/lua/src/lib_debug.c
index 6d5ff2b..38e6054 100644
--- a/3rdparty/lua/src/lib_debug.c
+++ b/3rdparty/lua/src/lib_debug.c
@@ -1,405 +1,405 @@
-/*
-** Debug library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lib_debug_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_lib.h"
-
-/* ------------------------------------------------------------------------ */
-
-#define LJLIB_MODULE_debug
-
-LJLIB_CF(debug_getregistry)
-{
- copyTV(L, L->top++, registry(L));
- return 1;
-}
-
-LJLIB_CF(debug_getmetatable)
-{
- lj_lib_checkany(L, 1);
- if (!lua_getmetatable(L, 1)) {
- setnilV(L->top-1);
- }
- return 1;
-}
-
-LJLIB_CF(debug_setmetatable)
-{
- lj_lib_checktabornil(L, 2);
- L->top = L->base+2;
- lua_setmetatable(L, 1);
-#if !LJ_52
- setboolV(L->top-1, 1);
-#endif
- return 1;
-}
-
-LJLIB_CF(debug_getfenv)
-{
- lj_lib_checkany(L, 1);
- lua_getfenv(L, 1);
- return 1;
-}
-
-LJLIB_CF(debug_setfenv)
-{
- lj_lib_checktab(L, 2);
- L->top = L->base+2;
- if (!lua_setfenv(L, 1))
- lj_err_caller(L, LJ_ERR_SETFENV);
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-static void settabss(lua_State *L, const char *i, const char *v)
-{
- lua_pushstring(L, v);
- lua_setfield(L, -2, i);
-}
-
-static void settabsi(lua_State *L, const char *i, int v)
-{
- lua_pushinteger(L, v);
- lua_setfield(L, -2, i);
-}
-
-static void settabsb(lua_State *L, const char *i, int v)
-{
- lua_pushboolean(L, v);
- lua_setfield(L, -2, i);
-}
-
-static lua_State *getthread(lua_State *L, int *arg)
-{
- if (L->base < L->top && tvisthread(L->base)) {
- *arg = 1;
- return threadV(L->base);
- } else {
- *arg = 0;
- return L;
- }
-}
-
-static void treatstackoption(lua_State *L, lua_State *L1, const char *fname)
-{
- if (L == L1) {
- lua_pushvalue(L, -2);
- lua_remove(L, -3);
- }
- else
- lua_xmove(L1, L, 1);
- lua_setfield(L, -2, fname);
-}
-
-LJLIB_CF(debug_getinfo)
-{
- lj_Debug ar;
- int arg, opt_f = 0, opt_L = 0;
- lua_State *L1 = getthread(L, &arg);
- const char *options = luaL_optstring(L, arg+2, "flnSu");
- if (lua_isnumber(L, arg+1)) {
- if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), (lua_Debug *)&ar)) {
- setnilV(L->top-1);
- return 1;
- }
- } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) {
- options = lua_pushfstring(L, ">%s", options);
- setfuncV(L1, L1->top++, funcV(L->base+arg));
- } else {
- lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL);
- }
- if (!lj_debug_getinfo(L1, options, &ar, 1))
- lj_err_arg(L, arg+2, LJ_ERR_INVOPT);
- lua_createtable(L, 0, 16); /* Create result table. */
- for (; *options; options++) {
- switch (*options) {
- case 'S':
- settabss(L, "source", ar.source);
- settabss(L, "short_src", ar.short_src);
- settabsi(L, "linedefined", ar.linedefined);
- settabsi(L, "lastlinedefined", ar.lastlinedefined);
- settabss(L, "what", ar.what);
- break;
- case 'l':
- settabsi(L, "currentline", ar.currentline);
- break;
- case 'u':
- settabsi(L, "nups", ar.nups);
- settabsi(L, "nparams", ar.nparams);
- settabsb(L, "isvararg", ar.isvararg);
- break;
- case 'n':
- settabss(L, "name", ar.name);
- settabss(L, "namewhat", ar.namewhat);
- break;
- case 'f': opt_f = 1; break;
- case 'L': opt_L = 1; break;
- default: break;
- }
- }
- if (opt_L) treatstackoption(L, L1, "activelines");
- if (opt_f) treatstackoption(L, L1, "func");
- return 1; /* Return result table. */
-}
-
-LJLIB_CF(debug_getlocal)
-{
- int arg;
- lua_State *L1 = getthread(L, &arg);
- lua_Debug ar;
- const char *name;
- int slot = lj_lib_checkint(L, arg+2);
- if (tvisfunc(L->base+arg)) {
- L->top = L->base+arg+1;
- lua_pushstring(L, lua_getlocal(L, NULL, slot));
- return 1;
- }
- if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
- lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
- name = lua_getlocal(L1, &ar, slot);
- if (name) {
- lua_xmove(L1, L, 1);
- lua_pushstring(L, name);
- lua_pushvalue(L, -2);
- return 2;
- } else {
- setnilV(L->top-1);
- return 1;
- }
-}
-
-LJLIB_CF(debug_setlocal)
-{
- int arg;
- lua_State *L1 = getthread(L, &arg);
- lua_Debug ar;
- TValue *tv;
- if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
- lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
- tv = lj_lib_checkany(L, arg+3);
- copyTV(L1, L1->top++, tv);
- lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2)));
- return 1;
-}
-
-static int debug_getupvalue(lua_State *L, int get)
-{
- int32_t n = lj_lib_checkint(L, 2);
- const char *name;
- lj_lib_checkfunc(L, 1);
- name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n);
- if (name) {
- lua_pushstring(L, name);
- if (!get) return 1;
- copyTV(L, L->top, L->top-2);
- L->top++;
- return 2;
- }
- return 0;
-}
-
-LJLIB_CF(debug_getupvalue)
-{
- return debug_getupvalue(L, 1);
-}
-
-LJLIB_CF(debug_setupvalue)
-{
- lj_lib_checkany(L, 3);
- return debug_getupvalue(L, 0);
-}
-
-LJLIB_CF(debug_upvalueid)
-{
- GCfunc *fn = lj_lib_checkfunc(L, 1);
- int32_t n = lj_lib_checkint(L, 2) - 1;
- if ((uint32_t)n >= fn->l.nupvalues)
- lj_err_arg(L, 2, LJ_ERR_IDXRNG);
- setlightudV(L->top-1, isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
- (void *)&fn->c.upvalue[n]);
- return 1;
-}
-
-LJLIB_CF(debug_upvaluejoin)
-{
- GCfunc *fn[2];
- GCRef *p[2];
- int i;
- for (i = 0; i < 2; i++) {
- int32_t n;
- fn[i] = lj_lib_checkfunc(L, 2*i+1);
- if (!isluafunc(fn[i]))
- lj_err_arg(L, 2*i+1, LJ_ERR_NOLFUNC);
- n = lj_lib_checkint(L, 2*i+2) - 1;
- if ((uint32_t)n >= fn[i]->l.nupvalues)
- lj_err_arg(L, 2*i+2, LJ_ERR_IDXRNG);
- p[i] = &fn[i]->l.uvptr[n];
- }
- setgcrefr(*p[0], *p[1]);
- lj_gc_objbarrier(L, fn[0], gcref(*p[1]));
- return 0;
-}
-
-#if LJ_52
-LJLIB_CF(debug_getuservalue)
-{
- TValue *o = L->base;
- if (o < L->top && tvisudata(o))
- settabV(L, o, tabref(udataV(o)->env));
- else
- setnilV(o);
- L->top = o+1;
- return 1;
-}
-
-LJLIB_CF(debug_setuservalue)
-{
- TValue *o = L->base;
- if (!(o < L->top && tvisudata(o)))
- lj_err_argt(L, 1, LUA_TUSERDATA);
- if (!(o+1 < L->top && tvistab(o+1)))
- lj_err_argt(L, 2, LUA_TTABLE);
- L->top = o+2;
- lua_setfenv(L, 1);
- return 1;
-}
-#endif
-
-/* ------------------------------------------------------------------------ */
-
-static const char KEY_HOOK = 'h';
-
-static void hookf(lua_State *L, lua_Debug *ar)
-{
- static const char *const hooknames[] =
- {"call", "return", "line", "count", "tail return"};
- lua_pushlightuserdata(L, (void *)&KEY_HOOK);
- lua_rawget(L, LUA_REGISTRYINDEX);
- if (lua_isfunction(L, -1)) {
- lua_pushstring(L, hooknames[(int)ar->event]);
- if (ar->currentline >= 0)
- lua_pushinteger(L, ar->currentline);
- else lua_pushnil(L);
- lua_call(L, 2, 0);
- }
-}
-
-static int makemask(const char *smask, int count)
-{
- int mask = 0;
- if (strchr(smask, 'c')) mask |= LUA_MASKCALL;
- if (strchr(smask, 'r')) mask |= LUA_MASKRET;
- if (strchr(smask, 'l')) mask |= LUA_MASKLINE;
- if (count > 0) mask |= LUA_MASKCOUNT;
- return mask;
-}
-
-static char *unmakemask(int mask, char *smask)
-{
- int i = 0;
- if (mask & LUA_MASKCALL) smask[i++] = 'c';
- if (mask & LUA_MASKRET) smask[i++] = 'r';
- if (mask & LUA_MASKLINE) smask[i++] = 'l';
- smask[i] = '\0';
- return smask;
-}
-
-LJLIB_CF(debug_sethook)
-{
- int arg, mask, count;
- lua_Hook func;
- (void)getthread(L, &arg);
- if (lua_isnoneornil(L, arg+1)) {
- lua_settop(L, arg+1);
- func = NULL; mask = 0; count = 0; /* turn off hooks */
- } else {
- const char *smask = luaL_checkstring(L, arg+2);
- luaL_checktype(L, arg+1, LUA_TFUNCTION);
- count = luaL_optint(L, arg+3, 0);
- func = hookf; mask = makemask(smask, count);
- }
- lua_pushlightuserdata(L, (void *)&KEY_HOOK);
- lua_pushvalue(L, arg+1);
- lua_rawset(L, LUA_REGISTRYINDEX);
- lua_sethook(L, func, mask, count);
- return 0;
-}
-
-LJLIB_CF(debug_gethook)
-{
- char buff[5];
- int mask = lua_gethookmask(L);
- lua_Hook hook = lua_gethook(L);
- if (hook != NULL && hook != hookf) { /* external hook? */
- lua_pushliteral(L, "external hook");
- } else {
- lua_pushlightuserdata(L, (void *)&KEY_HOOK);
- lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */
- }
- lua_pushstring(L, unmakemask(mask, buff));
- lua_pushinteger(L, lua_gethookcount(L));
- return 3;
-}
-
-/* ------------------------------------------------------------------------ */
-
-LJLIB_CF(debug_debug)
-{
- for (;;) {
- char buffer[250];
- fputs("lua_debug> ", stderr);
- if (fgets(buffer, sizeof(buffer), stdin) == 0 ||
- strcmp(buffer, "cont\n") == 0)
- return 0;
- if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") ||
- lua_pcall(L, 0, 0, 0)) {
- fputs(lua_tostring(L, -1), stderr);
- fputs("\n", stderr);
- }
- lua_settop(L, 0); /* remove eventual returns */
- }
-}
-
-/* ------------------------------------------------------------------------ */
-
-#define LEVELS1 12 /* size of the first part of the stack */
-#define LEVELS2 10 /* size of the second part of the stack */
-
-LJLIB_CF(debug_traceback)
-{
- int arg;
- lua_State *L1 = getthread(L, &arg);
- const char *msg = lua_tostring(L, arg+1);
- if (msg == NULL && L->top > L->base+arg)
- L->top = L->base+arg+1;
- else
- luaL_traceback(L, L1, msg, lj_lib_optint(L, arg+2, (L == L1)));
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-#include "lj_libdef.h"
-
-LUALIB_API int luaopen_debug(lua_State *L)
-{
- LJ_LIB_REG(L, LUA_DBLIBNAME, debug);
- return 1;
-}
-
+/*
+** Debug library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_debug_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_debug
+
+LJLIB_CF(debug_getregistry)
+{
+ copyTV(L, L->top++, registry(L));
+ return 1;
+}
+
+LJLIB_CF(debug_getmetatable)
+{
+ lj_lib_checkany(L, 1);
+ if (!lua_getmetatable(L, 1)) {
+ setnilV(L->top-1);
+ }
+ return 1;
+}
+
+LJLIB_CF(debug_setmetatable)
+{
+ lj_lib_checktabornil(L, 2);
+ L->top = L->base+2;
+ lua_setmetatable(L, 1);
+#if !LJ_52
+ setboolV(L->top-1, 1);
+#endif
+ return 1;
+}
+
+LJLIB_CF(debug_getfenv)
+{
+ lj_lib_checkany(L, 1);
+ lua_getfenv(L, 1);
+ return 1;
+}
+
+LJLIB_CF(debug_setfenv)
+{
+ lj_lib_checktab(L, 2);
+ L->top = L->base+2;
+ if (!lua_setfenv(L, 1))
+ lj_err_caller(L, LJ_ERR_SETFENV);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void settabss(lua_State *L, const char *i, const char *v)
+{
+ lua_pushstring(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static void settabsi(lua_State *L, const char *i, int v)
+{
+ lua_pushinteger(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static void settabsb(lua_State *L, const char *i, int v)
+{
+ lua_pushboolean(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static lua_State *getthread(lua_State *L, int *arg)
+{
+ if (L->base < L->top && tvisthread(L->base)) {
+ *arg = 1;
+ return threadV(L->base);
+ } else {
+ *arg = 0;
+ return L;
+ }
+}
+
+static void treatstackoption(lua_State *L, lua_State *L1, const char *fname)
+{
+ if (L == L1) {
+ lua_pushvalue(L, -2);
+ lua_remove(L, -3);
+ }
+ else
+ lua_xmove(L1, L, 1);
+ lua_setfield(L, -2, fname);
+}
+
+LJLIB_CF(debug_getinfo)
+{
+ lj_Debug ar;
+ int arg, opt_f = 0, opt_L = 0;
+ lua_State *L1 = getthread(L, &arg);
+ const char *options = luaL_optstring(L, arg+2, "flnSu");
+ if (lua_isnumber(L, arg+1)) {
+ if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), (lua_Debug *)&ar)) {
+ setnilV(L->top-1);
+ return 1;
+ }
+ } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) {
+ options = lua_pushfstring(L, ">%s", options);
+ setfuncV(L1, L1->top++, funcV(L->base+arg));
+ } else {
+ lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL);
+ }
+ if (!lj_debug_getinfo(L1, options, &ar, 1))
+ lj_err_arg(L, arg+2, LJ_ERR_INVOPT);
+ lua_createtable(L, 0, 16); /* Create result table. */
+ for (; *options; options++) {
+ switch (*options) {
+ case 'S':
+ settabss(L, "source", ar.source);
+ settabss(L, "short_src", ar.short_src);
+ settabsi(L, "linedefined", ar.linedefined);
+ settabsi(L, "lastlinedefined", ar.lastlinedefined);
+ settabss(L, "what", ar.what);
+ break;
+ case 'l':
+ settabsi(L, "currentline", ar.currentline);
+ break;
+ case 'u':
+ settabsi(L, "nups", ar.nups);
+ settabsi(L, "nparams", ar.nparams);
+ settabsb(L, "isvararg", ar.isvararg);
+ break;
+ case 'n':
+ settabss(L, "name", ar.name);
+ settabss(L, "namewhat", ar.namewhat);
+ break;
+ case 'f': opt_f = 1; break;
+ case 'L': opt_L = 1; break;
+ default: break;
+ }
+ }
+ if (opt_L) treatstackoption(L, L1, "activelines");
+ if (opt_f) treatstackoption(L, L1, "func");
+ return 1; /* Return result table. */
+}
+
+LJLIB_CF(debug_getlocal)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ const char *name;
+ int slot = lj_lib_checkint(L, arg+2);
+ if (tvisfunc(L->base+arg)) {
+ L->top = L->base+arg+1;
+ lua_pushstring(L, lua_getlocal(L, NULL, slot));
+ return 1;
+ }
+ if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
+ lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
+ name = lua_getlocal(L1, &ar, slot);
+ if (name) {
+ lua_xmove(L1, L, 1);
+ lua_pushstring(L, name);
+ lua_pushvalue(L, -2);
+ return 2;
+ } else {
+ setnilV(L->top-1);
+ return 1;
+ }
+}
+
+LJLIB_CF(debug_setlocal)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ TValue *tv;
+ if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
+ lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
+ tv = lj_lib_checkany(L, arg+3);
+ copyTV(L1, L1->top++, tv);
+ lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2)));
+ return 1;
+}
+
+static int debug_getupvalue(lua_State *L, int get)
+{
+ int32_t n = lj_lib_checkint(L, 2);
+ const char *name;
+ lj_lib_checkfunc(L, 1);
+ name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n);
+ if (name) {
+ lua_pushstring(L, name);
+ if (!get) return 1;
+ copyTV(L, L->top, L->top-2);
+ L->top++;
+ return 2;
+ }
+ return 0;
+}
+
+LJLIB_CF(debug_getupvalue)
+{
+ return debug_getupvalue(L, 1);
+}
+
+LJLIB_CF(debug_setupvalue)
+{
+ lj_lib_checkany(L, 3);
+ return debug_getupvalue(L, 0);
+}
+
+LJLIB_CF(debug_upvalueid)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ int32_t n = lj_lib_checkint(L, 2) - 1;
+ if ((uint32_t)n >= fn->l.nupvalues)
+ lj_err_arg(L, 2, LJ_ERR_IDXRNG);
+ setlightudV(L->top-1, isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
+ (void *)&fn->c.upvalue[n]);
+ return 1;
+}
+
+LJLIB_CF(debug_upvaluejoin)
+{
+ GCfunc *fn[2];
+ GCRef *p[2];
+ int i;
+ for (i = 0; i < 2; i++) {
+ int32_t n;
+ fn[i] = lj_lib_checkfunc(L, 2*i+1);
+ if (!isluafunc(fn[i]))
+ lj_err_arg(L, 2*i+1, LJ_ERR_NOLFUNC);
+ n = lj_lib_checkint(L, 2*i+2) - 1;
+ if ((uint32_t)n >= fn[i]->l.nupvalues)
+ lj_err_arg(L, 2*i+2, LJ_ERR_IDXRNG);
+ p[i] = &fn[i]->l.uvptr[n];
+ }
+ setgcrefr(*p[0], *p[1]);
+ lj_gc_objbarrier(L, fn[0], gcref(*p[1]));
+ return 0;
+}
+
+#if LJ_52
+LJLIB_CF(debug_getuservalue)
+{
+ TValue *o = L->base;
+ if (o < L->top && tvisudata(o))
+ settabV(L, o, tabref(udataV(o)->env));
+ else
+ setnilV(o);
+ L->top = o+1;
+ return 1;
+}
+
+LJLIB_CF(debug_setuservalue)
+{
+ TValue *o = L->base;
+ if (!(o < L->top && tvisudata(o)))
+ lj_err_argt(L, 1, LUA_TUSERDATA);
+ if (!(o+1 < L->top && tvistab(o+1)))
+ lj_err_argt(L, 2, LUA_TTABLE);
+ L->top = o+2;
+ lua_setfenv(L, 1);
+ return 1;
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static const char KEY_HOOK = 'h';
+
+static void hookf(lua_State *L, lua_Debug *ar)
+{
+ static const char *const hooknames[] =
+ {"call", "return", "line", "count", "tail return"};
+ lua_pushlightuserdata(L, (void *)&KEY_HOOK);
+ lua_rawget(L, LUA_REGISTRYINDEX);
+ if (lua_isfunction(L, -1)) {
+ lua_pushstring(L, hooknames[(int)ar->event]);
+ if (ar->currentline >= 0)
+ lua_pushinteger(L, ar->currentline);
+ else lua_pushnil(L);
+ lua_call(L, 2, 0);
+ }
+}
+
+static int makemask(const char *smask, int count)
+{
+ int mask = 0;
+ if (strchr(smask, 'c')) mask |= LUA_MASKCALL;
+ if (strchr(smask, 'r')) mask |= LUA_MASKRET;
+ if (strchr(smask, 'l')) mask |= LUA_MASKLINE;
+ if (count > 0) mask |= LUA_MASKCOUNT;
+ return mask;
+}
+
+static char *unmakemask(int mask, char *smask)
+{
+ int i = 0;
+ if (mask & LUA_MASKCALL) smask[i++] = 'c';
+ if (mask & LUA_MASKRET) smask[i++] = 'r';
+ if (mask & LUA_MASKLINE) smask[i++] = 'l';
+ smask[i] = '\0';
+ return smask;
+}
+
+LJLIB_CF(debug_sethook)
+{
+ int arg, mask, count;
+ lua_Hook func;
+ (void)getthread(L, &arg);
+ if (lua_isnoneornil(L, arg+1)) {
+ lua_settop(L, arg+1);
+ func = NULL; mask = 0; count = 0; /* turn off hooks */
+ } else {
+ const char *smask = luaL_checkstring(L, arg+2);
+ luaL_checktype(L, arg+1, LUA_TFUNCTION);
+ count = luaL_optint(L, arg+3, 0);
+ func = hookf; mask = makemask(smask, count);
+ }
+ lua_pushlightuserdata(L, (void *)&KEY_HOOK);
+ lua_pushvalue(L, arg+1);
+ lua_rawset(L, LUA_REGISTRYINDEX);
+ lua_sethook(L, func, mask, count);
+ return 0;
+}
+
+LJLIB_CF(debug_gethook)
+{
+ char buff[5];
+ int mask = lua_gethookmask(L);
+ lua_Hook hook = lua_gethook(L);
+ if (hook != NULL && hook != hookf) { /* external hook? */
+ lua_pushliteral(L, "external hook");
+ } else {
+ lua_pushlightuserdata(L, (void *)&KEY_HOOK);
+ lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */
+ }
+ lua_pushstring(L, unmakemask(mask, buff));
+ lua_pushinteger(L, lua_gethookcount(L));
+ return 3;
+}
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(debug_debug)
+{
+ for (;;) {
+ char buffer[250];
+ fputs("lua_debug> ", stderr);
+ if (fgets(buffer, sizeof(buffer), stdin) == 0 ||
+ strcmp(buffer, "cont\n") == 0)
+ return 0;
+ if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") ||
+ lua_pcall(L, 0, 0, 0)) {
+ fputs(lua_tostring(L, -1), stderr);
+ fputs("\n", stderr);
+ }
+ lua_settop(L, 0); /* remove eventual returns */
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define LEVELS1 12 /* size of the first part of the stack */
+#define LEVELS2 10 /* size of the second part of the stack */
+
+LJLIB_CF(debug_traceback)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ const char *msg = lua_tostring(L, arg+1);
+ if (msg == NULL && L->top > L->base+arg)
+ L->top = L->base+arg+1;
+ else
+ luaL_traceback(L, L1, msg, lj_lib_optint(L, arg+2, (L == L1)));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_debug(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_DBLIBNAME, debug);
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lib_ffi.c b/3rdparty/lua/src/lib_ffi.c
index 7aeedba..f61fabc 100644
--- a/3rdparty/lua/src/lib_ffi.c
+++ b/3rdparty/lua/src/lib_ffi.c
@@ -1,851 +1,850 @@
-/*
-** FFI library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lib_ffi_c
-#define LUA_LIB
-
-#include <errno.h>
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_ctype.h"
-#include "lj_cparse.h"
-#include "lj_cdata.h"
-#include "lj_cconv.h"
-#include "lj_carith.h"
-#include "lj_ccall.h"
-#include "lj_ccallback.h"
-#include "lj_clib.h"
-#include "lj_ff.h"
-#include "lj_lib.h"
-
-/* -- C type checks ------------------------------------------------------- */
-
-/* Check first argument for a C type and returns its ID. */
-static CTypeID ffi_checkctype(lua_State *L, CTState *cts, TValue *param)
-{
- TValue *o = L->base;
- if (!(o < L->top)) {
- err_argtype:
- lj_err_argtype(L, 1, "C type");
- }
- if (tvisstr(o)) { /* Parse an abstract C type declaration. */
- GCstr *s = strV(o);
- CPState cp;
- int errcode;
- cp.L = L;
- cp.cts = cts;
- cp.srcname = strdata(s);
- cp.p = strdata(s);
- cp.param = param;
- cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
- errcode = lj_cparse(&cp);
- if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
- return cp.val.id;
- } else {
- GCcdata *cd;
- if (!tviscdata(o)) goto err_argtype;
- if (param && param < L->top) lj_err_arg(L, 1, LJ_ERR_FFI_NUMPARAM);
- cd = cdataV(o);
- return cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->ctypeid;
- }
-}
-
-/* Check argument for C data and return it. */
-static GCcdata *ffi_checkcdata(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top && tviscdata(o)))
- lj_err_argt(L, narg, LUA_TCDATA);
- return cdataV(o);
-}
-
-/* Convert argument to C pointer. */
-static void *ffi_checkptr(lua_State *L, int narg, CTypeID id)
-{
- CTState *cts = ctype_cts(L);
- TValue *o = L->base + narg-1;
- void *p;
- if (o >= L->top)
- lj_err_arg(L, narg, LJ_ERR_NOVAL);
- lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg));
- return p;
-}
-
-/* Convert argument to int32_t. */
-static int32_t ffi_checkint(lua_State *L, int narg)
-{
- CTState *cts = ctype_cts(L);
- TValue *o = L->base + narg-1;
- int32_t i;
- if (o >= L->top)
- lj_err_arg(L, narg, LJ_ERR_NOVAL);
- lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o,
- CCF_ARG(narg));
- return i;
-}
-
-/* -- C type metamethods -------------------------------------------------- */
-
-#define LJLIB_MODULE_ffi_meta
-
-/* Handle ctype __index/__newindex metamethods. */
-static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm)
-{
- CTypeID id = ctype_typeid(cts, ct);
- cTValue *tv = lj_ctype_meta(cts, id, mm);
- TValue *base = L->base;
- if (!tv) {
- const char *s;
- err_index:
- s = strdata(lj_ctype_repr(L, id, NULL));
- if (tvisstr(L->base+1)) {
- lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1));
- } else {
- const char *key = tviscdata(L->base+1) ?
- strdata(lj_ctype_repr(L, cdataV(L->base+1)->ctypeid, NULL)) :
- lj_typename(L->base+1);
- lj_err_callerv(L, LJ_ERR_FFI_BADIDXW, s, key);
- }
- }
- if (!tvisfunc(tv)) {
- if (mm == MM_index) {
- cTValue *o = lj_meta_tget(L, tv, base+1);
- if (o) {
- if (tvisnil(o)) goto err_index;
- copyTV(L, L->top-1, o);
- return 1;
- }
- } else {
- TValue *o = lj_meta_tset(L, tv, base+1);
- if (o) {
- copyTV(L, o, base+2);
- return 0;
- }
- }
- copyTV(L, base, L->top);
- tv = L->top-1;
- }
- return lj_meta_tailcall(L, tv);
-}
-
-LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0)
-{
- CTState *cts = ctype_cts(L);
- CTInfo qual = 0;
- CType *ct;
- uint8_t *p;
- TValue *o = L->base;
- if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */
- lj_err_argt(L, 1, LUA_TCDATA);
- ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
- if ((qual & 1))
- return ffi_index_meta(L, cts, ct, MM_index);
- if (lj_cdata_get(cts, ct, L->top-1, p))
- lj_gc_check(L);
- return 1;
-}
-
-LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1)
-{
- CTState *cts = ctype_cts(L);
- CTInfo qual = 0;
- CType *ct;
- uint8_t *p;
- TValue *o = L->base;
- if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */
- lj_err_argt(L, 1, LUA_TCDATA);
- ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
- if ((qual & 1)) {
- if ((qual & CTF_CONST))
- lj_err_caller(L, LJ_ERR_FFI_WRCONST);
- return ffi_index_meta(L, cts, ct, MM_newindex);
- }
- lj_cdata_set(cts, ct, p, o+2, qual);
- return 0;
-}
-
-/* Common handler for cdata arithmetic. */
-static int ffi_arith(lua_State *L)
-{
- MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq);
- return lj_carith_op(L, mm);
-}
-
-/* The following functions must be in contiguous ORDER MM. */
-LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat)
-{
- return ffi_arith(L);
-}
-
-/* Forward declaration. */
-static int lj_cf_ffi_new(lua_State *L);
-
-LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call)
-{
- CTState *cts = ctype_cts(L);
- GCcdata *cd = ffi_checkcdata(L, 1);
- CTypeID id = cd->ctypeid;
- CType *ct;
- cTValue *tv;
- MMS mm = MM_call;
- if (cd->ctypeid == CTID_CTYPEID) {
- id = *(CTypeID *)cdataptr(cd);
- mm = MM_new;
- } else {
- int ret = lj_ccall_func(L, cd);
- if (ret >= 0)
- return ret;
- }
- /* Handle ctype __call/__new metamethod. */
- ct = ctype_raw(cts, id);
- if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
- tv = lj_ctype_meta(cts, id, mm);
- if (tv)
- return lj_meta_tailcall(L, tv);
- else if (mm == MM_call)
- lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL)));
- return lj_cf_ffi_new(L);
-}
-
-LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow)
-{
- return ffi_arith(L);
-}
-
-LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm)
-{
- return ffi_arith(L);
-}
-/* End of contiguous ORDER MM. */
-
-LJLIB_CF(ffi_meta___tostring)
-{
- GCcdata *cd = ffi_checkcdata(L, 1);
- const char *msg = "cdata<%s>: %p";
- CTypeID id = cd->ctypeid;
- void *p = cdataptr(cd);
- if (id == CTID_CTYPEID) {
- msg = "ctype<%s>";
- id = *(CTypeID *)p;
- } else {
- CTState *cts = ctype_cts(L);
- CType *ct = ctype_raw(cts, id);
- if (ctype_isref(ct->info)) {
- p = *(void **)p;
- ct = ctype_rawchild(cts, ct);
- }
- if (ctype_iscomplex(ct->info)) {
- setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size));
- goto checkgc;
- } else if (ct->size == 8 && ctype_isinteger(ct->info)) {
- setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd),
- (ct->info & CTF_UNSIGNED)));
- goto checkgc;
- } else if (ctype_isfunc(ct->info)) {
- p = *(void **)p;
- } else if (ctype_isenum(ct->info)) {
- msg = "cdata<%s>: %d";
- p = (void *)(uintptr_t)*(uint32_t **)p;
- } else {
- if (ctype_isptr(ct->info)) {
- p = cdata_getptr(p, ct->size);
- ct = ctype_rawchild(cts, ct);
- }
- if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) {
- /* Handle ctype __tostring metamethod. */
- cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring);
- if (tv)
- return lj_meta_tailcall(L, tv);
- }
- }
- }
- lj_str_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p);
-checkgc:
- lj_gc_check(L);
- return 1;
-}
-
-static int ffi_pairs(lua_State *L, MMS mm)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkcdata(L, 1)->ctypeid;
- CType *ct = ctype_raw(cts, id);
- cTValue *tv;
- if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
- tv = lj_ctype_meta(cts, id, mm);
- if (!tv)
- lj_err_callerv(L, LJ_ERR_FFI_BADMM, strdata(lj_ctype_repr(L, id, NULL)),
- strdata(mmname_str(G(L), mm)));
- return lj_meta_tailcall(L, tv);
-}
-
-LJLIB_CF(ffi_meta___pairs)
-{
- return ffi_pairs(L, MM_pairs);
-}
-
-LJLIB_CF(ffi_meta___ipairs)
-{
- return ffi_pairs(L, MM_ipairs);
-}
-
-LJLIB_PUSH("ffi") LJLIB_SET(__metatable)
-
-#include "lj_libdef.h"
-
-/* -- C library metamethods ----------------------------------------------- */
-
-#define LJLIB_MODULE_ffi_clib
-
-/* Index C library by a name. */
-static TValue *ffi_clib_index(lua_State *L)
-{
- TValue *o = L->base;
- CLibrary *cl;
- if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB))
- lj_err_argt(L, 1, LUA_TUSERDATA);
- cl = (CLibrary *)uddata(udataV(o));
- if (!(o+1 < L->top && tvisstr(o+1)))
- lj_err_argt(L, 2, LUA_TSTRING);
- return lj_clib_index(L, cl, strV(o+1));
-}
-
-LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index 1)
-{
- TValue *tv = ffi_clib_index(L);
- if (tviscdata(tv)) {
- CTState *cts = ctype_cts(L);
- GCcdata *cd = cdataV(tv);
- CType *s = ctype_get(cts, cd->ctypeid);
- if (ctype_isextern(s->info)) {
- CTypeID sid = ctype_cid(s->info);
- void *sp = *(void **)cdataptr(cd);
- CType *ct = ctype_raw(cts, sid);
- if (lj_cconv_tv_ct(cts, ct, sid, L->top-1, sp))
- lj_gc_check(L);
- return 1;
- }
- }
- copyTV(L, L->top-1, tv);
- return 1;
-}
-
-LJLIB_CF(ffi_clib___newindex) LJLIB_REC(clib_index 0)
-{
- TValue *tv = ffi_clib_index(L);
- TValue *o = L->base+2;
- if (o < L->top && tviscdata(tv)) {
- CTState *cts = ctype_cts(L);
- GCcdata *cd = cdataV(tv);
- CType *d = ctype_get(cts, cd->ctypeid);
- if (ctype_isextern(d->info)) {
- CTInfo qual = 0;
- for (;;) { /* Skip attributes and collect qualifiers. */
- d = ctype_child(cts, d);
- if (!ctype_isattrib(d->info)) break;
- if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
- }
- if (!((d->info|qual) & CTF_CONST)) {
- lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0);
- return 0;
- }
- }
- }
- lj_err_caller(L, LJ_ERR_FFI_WRCONST);
- return 0; /* unreachable */
-}
-
-LJLIB_CF(ffi_clib___gc)
-{
- TValue *o = L->base;
- if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)
- lj_clib_unload((CLibrary *)uddata(udataV(o)));
- return 0;
-}
-
-#include "lj_libdef.h"
-
-/* -- Callback function metamethods --------------------------------------- */
-
-#define LJLIB_MODULE_ffi_callback
-
-static int ffi_callback_set(lua_State *L, GCfunc *fn)
-{
- GCcdata *cd = ffi_checkcdata(L, 1);
- CTState *cts = ctype_cts(L);
- CType *ct = ctype_raw(cts, cd->ctypeid);
- if (ctype_isptr(ct->info) && (LJ_32 || ct->size == 8)) {
- MSize slot = lj_ccallback_ptr2slot(cts, *(void **)cdataptr(cd));
- if (slot < cts->cb.sizeid && cts->cb.cbid[slot] != 0) {
- GCtab *t = cts->miscmap;
- TValue *tv = lj_tab_setint(L, t, (int32_t)slot);
- if (fn) {
- setfuncV(L, tv, fn);
- lj_gc_anybarriert(L, t);
- } else {
- setnilV(tv);
- cts->cb.cbid[slot] = 0;
- cts->cb.topid = slot < cts->cb.topid ? slot : cts->cb.topid;
- }
- return 0;
- }
- }
- lj_err_caller(L, LJ_ERR_FFI_BADCBACK);
- return 0;
-}
-
-LJLIB_CF(ffi_callback_free)
-{
- return ffi_callback_set(L, NULL);
-}
-
-LJLIB_CF(ffi_callback_set)
-{
- GCfunc *fn = lj_lib_checkfunc(L, 2);
- return ffi_callback_set(L, fn);
-}
-
-LJLIB_PUSH(top-1) LJLIB_SET(__index)
-
-#include "lj_libdef.h"
-
-/* -- FFI library functions ----------------------------------------------- */
-
-#define LJLIB_MODULE_ffi
-
-LJLIB_CF(ffi_cdef)
-{
- GCstr *s = lj_lib_checkstr(L, 1);
- CPState cp;
- int errcode;
- cp.L = L;
- cp.cts = ctype_cts(L);
- cp.srcname = strdata(s);
- cp.p = strdata(s);
- cp.param = L->base+1;
- cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT;
- errcode = lj_cparse(&cp);
- if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
- lj_gc_check(L);
- return 0;
-}
-
-LJLIB_CF(ffi_new) LJLIB_REC(.)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, NULL);
- CType *ct = ctype_raw(cts, id);
- CTSize sz;
- CTInfo info = lj_ctype_info(cts, id, &sz);
- TValue *o = L->base+1;
- GCcdata *cd;
- if ((info & CTF_VLA)) {
- o++;
- sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
- }
- if (sz == CTSIZE_INVALID)
- lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE);
- if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN)
- cd = lj_cdata_new(cts, id, sz);
- else
- cd = lj_cdata_newv(cts, id, sz, ctype_align(info));
- setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */
- lj_cconv_ct_init(cts, ct, sz, cdataptr(cd),
- o, (MSize)(L->top - o)); /* Initialize cdata. */
- if (ctype_isstruct(ct->info)) {
- /* Handle ctype __gc metamethod. Use the fast lookup here. */
- cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
- if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) {
- GCtab *t = cts->finalizer;
- if (gcref(t->metatable)) {
- /* Add to finalizer table, if still enabled. */
- copyTV(L, lj_tab_set(L, t, o-1), tv);
- lj_gc_anybarriert(L, t);
- cd->marked |= LJ_GC_CDATA_FIN;
- }
- }
- }
- L->top = o; /* Only return the cdata itself. */
- lj_gc_check(L);
- return 1;
-}
-
-LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, NULL);
- CType *d = ctype_raw(cts, id);
- TValue *o = lj_lib_checkany(L, 2);
- L->top = o+1; /* Make sure this is the last item on the stack. */
- if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info)))
- lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
- if (!(tviscdata(o) && cdataV(o)->ctypeid == id)) {
- GCcdata *cd = lj_cdata_new(cts, id, d->size);
- lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST);
- setcdataV(L, o, cd);
- lj_gc_check(L);
- }
- return 1;
-}
-
-LJLIB_CF(ffi_typeof) LJLIB_REC(.)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, L->base+1);
- GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
- *(CTypeID *)cdataptr(cd) = id;
- setcdataV(L, L->top-1, cd);
- lj_gc_check(L);
- return 1;
-}
-
-LJLIB_CF(ffi_istype) LJLIB_REC(.)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id1 = ffi_checkctype(L, cts, NULL);
- TValue *o = lj_lib_checkany(L, 2);
- int b = 0;
- if (tviscdata(o)) {
- GCcdata *cd = cdataV(o);
- CTypeID id2 = cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) :
- cd->ctypeid;
- CType *ct1 = lj_ctype_rawref(cts, id1);
- CType *ct2 = lj_ctype_rawref(cts, id2);
- if (ct1 == ct2) {
- b = 1;
- } else if (ctype_type(ct1->info) == ctype_type(ct2->info) &&
- ct1->size == ct2->size) {
- if (ctype_ispointer(ct1->info))
- b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL);
- else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info))
- b = (((ct1->info ^ ct2->info) & ~(CTF_QUAL|CTF_LONG)) == 0);
- } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) &&
- ct1 == ctype_rawchild(cts, ct2)) {
- b = 1;
- }
- }
- setboolV(L->top-1, b);
- setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
- return 1;
-}
-
-LJLIB_CF(ffi_sizeof) LJLIB_REC(ffi_xof FF_ffi_sizeof)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, NULL);
- CTSize sz;
- if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) {
- sz = cdatavlen(cdataV(L->base));
- } else {
- CType *ct = lj_ctype_rawref(cts, id);
- if (ctype_isvltype(ct->info))
- sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
- else
- sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
- if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) {
- setnilV(L->top-1);
- return 1;
- }
- }
- setintV(L->top-1, (int32_t)sz);
- return 1;
-}
-
-LJLIB_CF(ffi_alignof) LJLIB_REC(ffi_xof FF_ffi_alignof)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, NULL);
- CTSize sz = 0;
- CTInfo info = lj_ctype_info(cts, id, &sz);
- setintV(L->top-1, 1 << ctype_align(info));
- return 1;
-}
-
-LJLIB_CF(ffi_offsetof) LJLIB_REC(ffi_xof FF_ffi_offsetof)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, NULL);
- GCstr *name = lj_lib_checkstr(L, 2);
- CType *ct = lj_ctype_rawref(cts, id);
- CTSize ofs;
- if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) {
- CType *fct = lj_ctype_getfield(cts, ct, name, &ofs);
- if (fct) {
- setintV(L->top-1, ofs);
- if (ctype_isfield(fct->info)) {
- return 1;
- } else if (ctype_isbitfield(fct->info)) {
- setintV(L->top++, ctype_bitpos(fct->info));
- setintV(L->top++, ctype_bitbsz(fct->info));
- return 3;
- }
- }
- }
- return 0;
-}
-
-LJLIB_CF(ffi_errno) LJLIB_REC(.)
-{
- int err = errno;
- if (L->top > L->base)
- errno = ffi_checkint(L, 1);
- setintV(L->top++, err);
- return 1;
-}
-
-LJLIB_CF(ffi_string) LJLIB_REC(.)
-{
- CTState *cts = ctype_cts(L);
- TValue *o = lj_lib_checkany(L, 1);
- const char *p;
- size_t len;
- if (o+1 < L->top && !tvisnil(o+1)) {
- len = (size_t)ffi_checkint(L, 2);
- lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o,
- CCF_ARG(1));
- } else {
- lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o,
- CCF_ARG(1));
- len = strlen(p);
- }
- L->top = o+1; /* Make sure this is the last item on the stack. */
- setstrV(L, o, lj_str_new(L, p, len));
- lj_gc_check(L);
- return 1;
-}
-
-LJLIB_CF(ffi_copy) LJLIB_REC(.)
-{
- void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
- void *sp = ffi_checkptr(L, 2, CTID_P_CVOID);
- TValue *o = L->base+1;
- CTSize len;
- if (tvisstr(o) && o+1 >= L->top)
- len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */
- else
- len = (CTSize)ffi_checkint(L, 3);
- memcpy(dp, sp, len);
- return 0;
-}
-
-LJLIB_CF(ffi_fill) LJLIB_REC(.)
-{
- void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
- CTSize len = (CTSize)ffi_checkint(L, 2);
- int32_t fill = 0;
- if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3);
- memset(dp, fill, len);
- return 0;
-}
-
-#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be)
-
-/* Test ABI string. */
-LJLIB_CF(ffi_abi) LJLIB_REC(.)
-{
- GCstr *s = lj_lib_checkstr(L, 1);
- int b = 0;
- switch (s->hash) {
-#if LJ_64
- case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */
-#else
- case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */
-#endif
-#if LJ_ARCH_HASFPU
- case H_(e33ee463,e33ee463): b = 1; break; /* fpu */
-#endif
-#if LJ_ABI_SOFTFP
- case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */
-#else
- case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */
-#endif
-#if LJ_ABI_EABI
- case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */
-#endif
-#if LJ_ABI_WIN
- case H_(4ab624a8,4ab624a8): b = 1; break; /* win */
-#endif
- case H_(3af93066,1f001464): b = 1; break; /* le/be */
- default:
- break;
- }
- setboolV(L->top-1, b);
- setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
- return 1;
-}
-
-#undef H_
-
-LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */
-
-LJLIB_CF(ffi_metatype)
-{
- CTState *cts = ctype_cts(L);
- CTypeID id = ffi_checkctype(L, cts, NULL);
- GCtab *mt = lj_lib_checktab(L, 2);
- GCtab *t = cts->miscmap;
- CType *ct = ctype_get(cts, id); /* Only allow raw types. */
- TValue *tv;
- GCcdata *cd;
- if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) ||
- ctype_isvector(ct->info)))
- lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
- tv = lj_tab_setinth(L, t, -(int32_t)id);
- if (!tvisnil(tv))
- lj_err_caller(L, LJ_ERR_PROTMT);
- settabV(L, tv, mt);
- lj_gc_anybarriert(L, t);
- cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
- *(CTypeID *)cdataptr(cd) = id;
- setcdataV(L, L->top-1, cd);
- lj_gc_check(L);
- return 1;
-}
-
-LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */
-
-LJLIB_CF(ffi_gc) LJLIB_REC(.)
-{
- GCcdata *cd = ffi_checkcdata(L, 1);
- TValue *fin = lj_lib_checkany(L, 2);
- CTState *cts = ctype_cts(L);
- GCtab *t = cts->finalizer;
- CType *ct = ctype_raw(cts, cd->ctypeid);
- if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) ||
- ctype_isrefarray(ct->info)))
- lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
- if (gcref(t->metatable)) { /* Update finalizer table, if still enabled. */
- copyTV(L, lj_tab_set(L, t, L->base), fin);
- lj_gc_anybarriert(L, t);
- if (!tvisnil(fin))
- cd->marked |= LJ_GC_CDATA_FIN;
- else
- cd->marked &= ~LJ_GC_CDATA_FIN;
- }
- L->top = L->base+1; /* Pass through the cdata object. */
- return 1;
-}
-
-LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */
-
-LJLIB_CF(ffi_load)
-{
- GCstr *name = lj_lib_checkstr(L, 1);
- int global = (L->base+1 < L->top && tvistruecond(L->base+1));
- lj_clib_load(L, tabref(curr_func(L)->c.env), name, global);
- return 1;
-}
-
-LJLIB_PUSH(top-4) LJLIB_SET(C)
-LJLIB_PUSH(top-3) LJLIB_SET(os)
-LJLIB_PUSH(top-2) LJLIB_SET(arch)
-
-#include "lj_libdef.h"
-
-/* ------------------------------------------------------------------------ */
-
-/* Create special weak-keyed finalizer table. */
-static GCtab *ffi_finalizer(lua_State *L)
-{
- /* NOBARRIER: The table is new (marked white). */
- GCtab *t = lj_tab_new(L, 0, 1);
- settabV(L, L->top++, t);
- setgcref(t->metatable, obj2gco(t));
- setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
- lj_str_newlit(L, "K"));
- t->nomm = (uint8_t)(~(1u<<MM_mode));
- return t;
-}
-
-/* Register FFI module as loaded. */
-static void ffi_register_module(lua_State *L)
-{
- cTValue *tmp = lj_tab_getstr(tabV(registry(L)), lj_str_newlit(L, "_LOADED"));
- if (tmp && tvistab(tmp)) {
- GCtab *t = tabV(tmp);
- copyTV(L, lj_tab_setstr(L, t, lj_str_newlit(L, LUA_FFILIBNAME)), L->top-1);
- lj_gc_anybarriert(L, t);
- }
-}
-
-LUALIB_API int luaopen_ffi(lua_State *L)
-{
- CTState *cts = lj_ctype_init(L);
- settabV(L, L->top++, (cts->miscmap = lj_tab_new(L, 0, 1)));
- cts->finalizer = ffi_finalizer(L);
- LJ_LIB_REG(L, NULL, ffi_meta);
- /* NOBARRIER: basemt is a GC root. */
- setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1)));
- LJ_LIB_REG(L, NULL, ffi_clib);
- LJ_LIB_REG(L, NULL, ffi_callback);
- /* NOBARRIER: the key is new and lj_tab_newkey() handles the barrier. */
- settabV(L, lj_tab_setstr(L, cts->miscmap, &cts->g->strempty), tabV(L->top-1));
- L->top--;
- lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */
- lua_pushliteral(L, LJ_OS_NAME);
- lua_pushliteral(L, LJ_ARCH_NAME);
- LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */
- ffi_register_module(L);
- return 1;
-}
-
-#endif
+/*
+** FFI library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_ffi_c
+#define LUA_LIB
+
+#include <errno.h>
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_ctype.h"
+#include "lj_cparse.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_carith.h"
+#include "lj_ccall.h"
+#include "lj_ccallback.h"
+#include "lj_clib.h"
+#include "lj_ff.h"
+#include "lj_lib.h"
+
+/* -- C type checks ------------------------------------------------------- */
+
+/* Check first argument for a C type and returns its ID. */
+static CTypeID ffi_checkctype(lua_State *L, CTState *cts, TValue *param)
+{
+ TValue *o = L->base;
+ if (!(o < L->top)) {
+ err_argtype:
+ lj_err_argtype(L, 1, "C type");
+ }
+ if (tvisstr(o)) { /* Parse an abstract C type declaration. */
+ GCstr *s = strV(o);
+ CPState cp;
+ int errcode;
+ cp.L = L;
+ cp.cts = cts;
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.param = param;
+ cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
+ errcode = lj_cparse(&cp);
+ if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
+ return cp.val.id;
+ } else {
+ GCcdata *cd;
+ if (!tviscdata(o)) goto err_argtype;
+ if (param && param < L->top) lj_err_arg(L, 1, LJ_ERR_FFI_NUMPARAM);
+ cd = cdataV(o);
+ return cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->ctypeid;
+ }
+}
+
+/* Check argument for C data and return it. */
+static GCcdata *ffi_checkcdata(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tviscdata(o)))
+ lj_err_argt(L, narg, LUA_TCDATA);
+ return cdataV(o);
+}
+
+/* Convert argument to C pointer. */
+static void *ffi_checkptr(lua_State *L, int narg, CTypeID id)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = L->base + narg-1;
+ void *p;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg));
+ return p;
+}
+
+/* Convert argument to int32_t. */
+static int32_t ffi_checkint(lua_State *L, int narg)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = L->base + narg-1;
+ int32_t i;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o,
+ CCF_ARG(narg));
+ return i;
+}
+
+/* -- C type metamethods -------------------------------------------------- */
+
+#define LJLIB_MODULE_ffi_meta
+
+/* Handle ctype __index/__newindex metamethods. */
+static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ cTValue *tv = lj_ctype_meta(cts, id, mm);
+ TValue *base = L->base;
+ if (!tv) {
+ const char *s;
+ err_index:
+ s = strdata(lj_ctype_repr(L, id, NULL));
+ if (tvisstr(L->base+1)) {
+ lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1));
+ } else {
+ const char *key = tviscdata(L->base+1) ?
+ strdata(lj_ctype_repr(L, cdataV(L->base+1)->ctypeid, NULL)) :
+ lj_typename(L->base+1);
+ lj_err_callerv(L, LJ_ERR_FFI_BADIDXW, s, key);
+ }
+ }
+ if (!tvisfunc(tv)) {
+ if (mm == MM_index) {
+ cTValue *o = lj_meta_tget(L, tv, base+1);
+ if (o) {
+ if (tvisnil(o)) goto err_index;
+ copyTV(L, L->top-1, o);
+ return 1;
+ }
+ } else {
+ TValue *o = lj_meta_tset(L, tv, base+1);
+ if (o) {
+ copyTV(L, o, base+2);
+ return 0;
+ }
+ }
+ tv = L->top-1;
+ }
+ return lj_meta_tailcall(L, tv);
+}
+
+LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0)
+{
+ CTState *cts = ctype_cts(L);
+ CTInfo qual = 0;
+ CType *ct;
+ uint8_t *p;
+ TValue *o = L->base;
+ if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */
+ lj_err_argt(L, 1, LUA_TCDATA);
+ ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
+ if ((qual & 1))
+ return ffi_index_meta(L, cts, ct, MM_index);
+ if (lj_cdata_get(cts, ct, L->top-1, p))
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1)
+{
+ CTState *cts = ctype_cts(L);
+ CTInfo qual = 0;
+ CType *ct;
+ uint8_t *p;
+ TValue *o = L->base;
+ if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */
+ lj_err_argt(L, 1, LUA_TCDATA);
+ ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
+ if ((qual & 1)) {
+ if ((qual & CTF_CONST))
+ lj_err_caller(L, LJ_ERR_FFI_WRCONST);
+ return ffi_index_meta(L, cts, ct, MM_newindex);
+ }
+ lj_cdata_set(cts, ct, p, o+2, qual);
+ return 0;
+}
+
+/* Common handler for cdata arithmetic. */
+static int ffi_arith(lua_State *L)
+{
+ MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq);
+ return lj_carith_op(L, mm);
+}
+
+/* The following functions must be in contiguous ORDER MM. */
+LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat)
+{
+ return ffi_arith(L);
+}
+
+/* Forward declaration. */
+static int lj_cf_ffi_new(lua_State *L);
+
+LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call)
+{
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ CTypeID id = cd->ctypeid;
+ CType *ct;
+ cTValue *tv;
+ MMS mm = MM_call;
+ if (cd->ctypeid == CTID_CTYPEID) {
+ id = *(CTypeID *)cdataptr(cd);
+ mm = MM_new;
+ } else {
+ int ret = lj_ccall_func(L, cd);
+ if (ret >= 0)
+ return ret;
+ }
+ /* Handle ctype __call/__new metamethod. */
+ ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ if (tv)
+ return lj_meta_tailcall(L, tv);
+ else if (mm == MM_call)
+ lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL)));
+ return lj_cf_ffi_new(L);
+}
+
+LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm)
+{
+ return ffi_arith(L);
+}
+/* End of contiguous ORDER MM. */
+
+LJLIB_CF(ffi_meta___tostring)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ const char *msg = "cdata<%s>: %p";
+ CTypeID id = cd->ctypeid;
+ void *p = cdataptr(cd);
+ if (id == CTID_CTYPEID) {
+ msg = "ctype<%s>";
+ id = *(CTypeID *)p;
+ } else {
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isref(ct->info)) {
+ p = *(void **)p;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_iscomplex(ct->info)) {
+ setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size));
+ goto checkgc;
+ } else if (ct->size == 8 && ctype_isinteger(ct->info)) {
+ setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd),
+ (ct->info & CTF_UNSIGNED)));
+ goto checkgc;
+ } else if (ctype_isfunc(ct->info)) {
+ p = *(void **)p;
+ } else if (ctype_isenum(ct->info)) {
+ msg = "cdata<%s>: %d";
+ p = (void *)(uintptr_t)*(uint32_t **)p;
+ } else {
+ if (ctype_isptr(ct->info)) {
+ p = cdata_getptr(p, ct->size);
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) {
+ /* Handle ctype __tostring metamethod. */
+ cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring);
+ if (tv)
+ return lj_meta_tailcall(L, tv);
+ }
+ }
+ }
+ lj_str_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p);
+checkgc:
+ lj_gc_check(L);
+ return 1;
+}
+
+static int ffi_pairs(lua_State *L, MMS mm)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkcdata(L, 1)->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ cTValue *tv;
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ if (!tv)
+ lj_err_callerv(L, LJ_ERR_FFI_BADMM, strdata(lj_ctype_repr(L, id, NULL)),
+ strdata(mmname_str(G(L), mm)));
+ return lj_meta_tailcall(L, tv);
+}
+
+LJLIB_CF(ffi_meta___pairs)
+{
+ return ffi_pairs(L, MM_pairs);
+}
+
+LJLIB_CF(ffi_meta___ipairs)
+{
+ return ffi_pairs(L, MM_ipairs);
+}
+
+LJLIB_PUSH("ffi") LJLIB_SET(__metatable)
+
+#include "lj_libdef.h"
+
+/* -- C library metamethods ----------------------------------------------- */
+
+#define LJLIB_MODULE_ffi_clib
+
+/* Index C library by a name. */
+static TValue *ffi_clib_index(lua_State *L)
+{
+ TValue *o = L->base;
+ CLibrary *cl;
+ if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB))
+ lj_err_argt(L, 1, LUA_TUSERDATA);
+ cl = (CLibrary *)uddata(udataV(o));
+ if (!(o+1 < L->top && tvisstr(o+1)))
+ lj_err_argt(L, 2, LUA_TSTRING);
+ return lj_clib_index(L, cl, strV(o+1));
+}
+
+LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index 1)
+{
+ TValue *tv = ffi_clib_index(L);
+ if (tviscdata(tv)) {
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = cdataV(tv);
+ CType *s = ctype_get(cts, cd->ctypeid);
+ if (ctype_isextern(s->info)) {
+ CTypeID sid = ctype_cid(s->info);
+ void *sp = *(void **)cdataptr(cd);
+ CType *ct = ctype_raw(cts, sid);
+ if (lj_cconv_tv_ct(cts, ct, sid, L->top-1, sp))
+ lj_gc_check(L);
+ return 1;
+ }
+ }
+ copyTV(L, L->top-1, tv);
+ return 1;
+}
+
+LJLIB_CF(ffi_clib___newindex) LJLIB_REC(clib_index 0)
+{
+ TValue *tv = ffi_clib_index(L);
+ TValue *o = L->base+2;
+ if (o < L->top && tviscdata(tv)) {
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = cdataV(tv);
+ CType *d = ctype_get(cts, cd->ctypeid);
+ if (ctype_isextern(d->info)) {
+ CTInfo qual = 0;
+ for (;;) { /* Skip attributes and collect qualifiers. */
+ d = ctype_child(cts, d);
+ if (!ctype_isattrib(d->info)) break;
+ if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
+ }
+ if (!((d->info|qual) & CTF_CONST)) {
+ lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0);
+ return 0;
+ }
+ }
+ }
+ lj_err_caller(L, LJ_ERR_FFI_WRCONST);
+ return 0; /* unreachable */
+}
+
+LJLIB_CF(ffi_clib___gc)
+{
+ TValue *o = L->base;
+ if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)
+ lj_clib_unload((CLibrary *)uddata(udataV(o)));
+ return 0;
+}
+
+#include "lj_libdef.h"
+
+/* -- Callback function metamethods --------------------------------------- */
+
+#define LJLIB_MODULE_ffi_callback
+
+static int ffi_callback_set(lua_State *L, GCfunc *fn)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ if (ctype_isptr(ct->info) && (LJ_32 || ct->size == 8)) {
+ MSize slot = lj_ccallback_ptr2slot(cts, *(void **)cdataptr(cd));
+ if (slot < cts->cb.sizeid && cts->cb.cbid[slot] != 0) {
+ GCtab *t = cts->miscmap;
+ TValue *tv = lj_tab_setint(L, t, (int32_t)slot);
+ if (fn) {
+ setfuncV(L, tv, fn);
+ lj_gc_anybarriert(L, t);
+ } else {
+ setnilV(tv);
+ cts->cb.cbid[slot] = 0;
+ cts->cb.topid = slot < cts->cb.topid ? slot : cts->cb.topid;
+ }
+ return 0;
+ }
+ }
+ lj_err_caller(L, LJ_ERR_FFI_BADCBACK);
+ return 0;
+}
+
+LJLIB_CF(ffi_callback_free)
+{
+ return ffi_callback_set(L, NULL);
+}
+
+LJLIB_CF(ffi_callback_set)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 2);
+ return ffi_callback_set(L, fn);
+}
+
+LJLIB_PUSH(top-1) LJLIB_SET(__index)
+
+#include "lj_libdef.h"
+
+/* -- FFI library functions ----------------------------------------------- */
+
+#define LJLIB_MODULE_ffi
+
+LJLIB_CF(ffi_cdef)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ CPState cp;
+ int errcode;
+ cp.L = L;
+ cp.cts = ctype_cts(L);
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.param = L->base+1;
+ cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT;
+ errcode = lj_cparse(&cp);
+ if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
+ lj_gc_check(L);
+ return 0;
+}
+
+LJLIB_CF(ffi_new) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CType *ct = ctype_raw(cts, id);
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ TValue *o = L->base+1;
+ GCcdata *cd;
+ if ((info & CTF_VLA)) {
+ o++;
+ sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
+ }
+ if (sz == CTSIZE_INVALID)
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE);
+ if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN)
+ cd = lj_cdata_new(cts, id, sz);
+ else
+ cd = lj_cdata_newv(cts, id, sz, ctype_align(info));
+ setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */
+ lj_cconv_ct_init(cts, ct, sz, cdataptr(cd),
+ o, (MSize)(L->top - o)); /* Initialize cdata. */
+ if (ctype_isstruct(ct->info)) {
+ /* Handle ctype __gc metamethod. Use the fast lookup here. */
+ cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
+ if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) {
+ GCtab *t = cts->finalizer;
+ if (gcref(t->metatable)) {
+ /* Add to finalizer table, if still enabled. */
+ copyTV(L, lj_tab_set(L, t, o-1), tv);
+ lj_gc_anybarriert(L, t);
+ cd->marked |= LJ_GC_CDATA_FIN;
+ }
+ }
+ }
+ L->top = o; /* Only return the cdata itself. */
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CType *d = ctype_raw(cts, id);
+ TValue *o = lj_lib_checkany(L, 2);
+ L->top = o+1; /* Make sure this is the last item on the stack. */
+ if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ if (!(tviscdata(o) && cdataV(o)->ctypeid == id)) {
+ GCcdata *cd = lj_cdata_new(cts, id, d->size);
+ lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST);
+ setcdataV(L, o, cd);
+ lj_gc_check(L);
+ }
+ return 1;
+}
+
+LJLIB_CF(ffi_typeof) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, L->base+1);
+ GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
+ *(CTypeID *)cdataptr(cd) = id;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_istype) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id1 = ffi_checkctype(L, cts, NULL);
+ TValue *o = lj_lib_checkany(L, 2);
+ int b = 0;
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ CTypeID id2 = cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) :
+ cd->ctypeid;
+ CType *ct1 = lj_ctype_rawref(cts, id1);
+ CType *ct2 = lj_ctype_rawref(cts, id2);
+ if (ct1 == ct2) {
+ b = 1;
+ } else if (ctype_type(ct1->info) == ctype_type(ct2->info) &&
+ ct1->size == ct2->size) {
+ if (ctype_ispointer(ct1->info))
+ b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL);
+ else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info))
+ b = (((ct1->info ^ ct2->info) & ~(CTF_QUAL|CTF_LONG)) == 0);
+ } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) &&
+ ct1 == ctype_rawchild(cts, ct2)) {
+ b = 1;
+ }
+ }
+ setboolV(L->top-1, b);
+ setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
+ return 1;
+}
+
+LJLIB_CF(ffi_sizeof) LJLIB_REC(ffi_xof FF_ffi_sizeof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CTSize sz;
+ if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) {
+ sz = cdatavlen(cdataV(L->base));
+ } else {
+ CType *ct = lj_ctype_rawref(cts, id);
+ if (ctype_isvltype(ct->info))
+ sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
+ else
+ sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
+ if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) {
+ setnilV(L->top-1);
+ return 1;
+ }
+ }
+ setintV(L->top-1, (int32_t)sz);
+ return 1;
+}
+
+LJLIB_CF(ffi_alignof) LJLIB_REC(ffi_xof FF_ffi_alignof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CTSize sz = 0;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ setintV(L->top-1, 1 << ctype_align(info));
+ return 1;
+}
+
+LJLIB_CF(ffi_offsetof) LJLIB_REC(ffi_xof FF_ffi_offsetof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ GCstr *name = lj_lib_checkstr(L, 2);
+ CType *ct = lj_ctype_rawref(cts, id);
+ CTSize ofs;
+ if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) {
+ CType *fct = lj_ctype_getfield(cts, ct, name, &ofs);
+ if (fct) {
+ setintV(L->top-1, ofs);
+ if (ctype_isfield(fct->info)) {
+ return 1;
+ } else if (ctype_isbitfield(fct->info)) {
+ setintV(L->top++, ctype_bitpos(fct->info));
+ setintV(L->top++, ctype_bitbsz(fct->info));
+ return 3;
+ }
+ }
+ }
+ return 0;
+}
+
+LJLIB_CF(ffi_errno) LJLIB_REC(.)
+{
+ int err = errno;
+ if (L->top > L->base)
+ errno = ffi_checkint(L, 1);
+ setintV(L->top++, err);
+ return 1;
+}
+
+LJLIB_CF(ffi_string) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = lj_lib_checkany(L, 1);
+ const char *p;
+ size_t len;
+ if (o+1 < L->top) {
+ len = (size_t)ffi_checkint(L, 2);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o,
+ CCF_ARG(1));
+ } else {
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o,
+ CCF_ARG(1));
+ len = strlen(p);
+ }
+ L->top = o+1; /* Make sure this is the last item on the stack. */
+ setstrV(L, o, lj_str_new(L, p, len));
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_copy) LJLIB_REC(.)
+{
+ void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
+ void *sp = ffi_checkptr(L, 2, CTID_P_CVOID);
+ TValue *o = L->base+1;
+ CTSize len;
+ if (tvisstr(o) && o+1 >= L->top)
+ len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */
+ else
+ len = (CTSize)ffi_checkint(L, 3);
+ memcpy(dp, sp, len);
+ return 0;
+}
+
+LJLIB_CF(ffi_fill) LJLIB_REC(.)
+{
+ void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
+ CTSize len = (CTSize)ffi_checkint(L, 2);
+ int32_t fill = 0;
+ if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3);
+ memset(dp, fill, len);
+ return 0;
+}
+
+#define H_(le, be) LJ_ENDIAN_SELECT(0x##le, 0x##be)
+
+/* Test ABI string. */
+LJLIB_CF(ffi_abi) LJLIB_REC(.)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int b = 0;
+ switch (s->hash) {
+#if LJ_64
+ case H_(849858eb,ad35fd06): b = 1; break; /* 64bit */
+#else
+ case H_(662d3c79,d0e22477): b = 1; break; /* 32bit */
+#endif
+#if LJ_ARCH_HASFPU
+ case H_(e33ee463,e33ee463): b = 1; break; /* fpu */
+#endif
+#if LJ_ABI_SOFTFP
+ case H_(61211a23,c2e8c81c): b = 1; break; /* softfp */
+#else
+ case H_(539417a8,8ce0812f): b = 1; break; /* hardfp */
+#endif
+#if LJ_ABI_EABI
+ case H_(2182df8f,f2ed1152): b = 1; break; /* eabi */
+#endif
+#if LJ_ABI_WIN
+ case H_(4ab624a8,4ab624a8): b = 1; break; /* win */
+#endif
+ case H_(3af93066,1f001464): b = 1; break; /* le/be */
+ default:
+ break;
+ }
+ setboolV(L->top-1, b);
+ setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
+ return 1;
+}
+
+#undef H_
+
+LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */
+
+LJLIB_CF(ffi_metatype)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ GCtab *mt = lj_lib_checktab(L, 2);
+ GCtab *t = cts->miscmap;
+ CType *ct = ctype_get(cts, id); /* Only allow raw types. */
+ TValue *tv;
+ GCcdata *cd;
+ if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) ||
+ ctype_isvector(ct->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ tv = lj_tab_setinth(L, t, -(int32_t)id);
+ if (!tvisnil(tv))
+ lj_err_caller(L, LJ_ERR_PROTMT);
+ settabV(L, tv, mt);
+ lj_gc_anybarriert(L, t);
+ cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
+ *(CTypeID *)cdataptr(cd) = id;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */
+
+LJLIB_CF(ffi_gc) LJLIB_REC(.)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ TValue *fin = lj_lib_checkany(L, 2);
+ CTState *cts = ctype_cts(L);
+ GCtab *t = cts->finalizer;
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) ||
+ ctype_isrefarray(ct->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ if (gcref(t->metatable)) { /* Update finalizer table, if still enabled. */
+ copyTV(L, lj_tab_set(L, t, L->base), fin);
+ lj_gc_anybarriert(L, t);
+ if (!tvisnil(fin))
+ cd->marked |= LJ_GC_CDATA_FIN;
+ else
+ cd->marked &= ~LJ_GC_CDATA_FIN;
+ }
+ L->top = L->base+1; /* Pass through the cdata object. */
+ return 1;
+}
+
+LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */
+
+LJLIB_CF(ffi_load)
+{
+ GCstr *name = lj_lib_checkstr(L, 1);
+ int global = (L->base+1 < L->top && tvistruecond(L->base+1));
+ lj_clib_load(L, tabref(curr_func(L)->c.env), name, global);
+ return 1;
+}
+
+LJLIB_PUSH(top-4) LJLIB_SET(C)
+LJLIB_PUSH(top-3) LJLIB_SET(os)
+LJLIB_PUSH(top-2) LJLIB_SET(arch)
+
+#include "lj_libdef.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Create special weak-keyed finalizer table. */
+static GCtab *ffi_finalizer(lua_State *L)
+{
+ /* NOBARRIER: The table is new (marked white). */
+ GCtab *t = lj_tab_new(L, 0, 1);
+ settabV(L, L->top++, t);
+ setgcref(t->metatable, obj2gco(t));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
+ lj_str_newlit(L, "K"));
+ t->nomm = (uint8_t)(~(1u<<MM_mode));
+ return t;
+}
+
+/* Register FFI module as loaded. */
+static void ffi_register_module(lua_State *L)
+{
+ cTValue *tmp = lj_tab_getstr(tabV(registry(L)), lj_str_newlit(L, "_LOADED"));
+ if (tmp && tvistab(tmp)) {
+ GCtab *t = tabV(tmp);
+ copyTV(L, lj_tab_setstr(L, t, lj_str_newlit(L, LUA_FFILIBNAME)), L->top-1);
+ lj_gc_anybarriert(L, t);
+ }
+}
+
+LUALIB_API int luaopen_ffi(lua_State *L)
+{
+ CTState *cts = lj_ctype_init(L);
+ settabV(L, L->top++, (cts->miscmap = lj_tab_new(L, 0, 1)));
+ cts->finalizer = ffi_finalizer(L);
+ LJ_LIB_REG(L, NULL, ffi_meta);
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1)));
+ LJ_LIB_REG(L, NULL, ffi_clib);
+ LJ_LIB_REG(L, NULL, ffi_callback);
+ /* NOBARRIER: the key is new and lj_tab_newkey() handles the barrier. */
+ settabV(L, lj_tab_setstr(L, cts->miscmap, &cts->g->strempty), tabV(L->top-1));
+ L->top--;
+ lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */
+ lua_pushliteral(L, LJ_OS_NAME);
+ lua_pushliteral(L, LJ_ARCH_NAME);
+ LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */
+ ffi_register_module(L);
+ return 1;
+}
+
+#endif
diff --git a/3rdparty/lua/src/lib_init.c b/3rdparty/lua/src/lib_init.c
index dc9c12e..2d6bd59 100644
--- a/3rdparty/lua/src/lib_init.c
+++ b/3rdparty/lua/src/lib_init.c
@@ -1,55 +1,55 @@
-/*
-** Library initialization.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major parts taken verbatim from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lib_init_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_arch.h"
-
-static const luaL_Reg lj_lib_load[] = {
- { "", luaopen_base },
- { LUA_LOADLIBNAME, luaopen_package },
- { LUA_TABLIBNAME, luaopen_table },
- { LUA_IOLIBNAME, luaopen_io },
- { LUA_OSLIBNAME, luaopen_os },
- { LUA_STRLIBNAME, luaopen_string },
- { LUA_MATHLIBNAME, luaopen_math },
- { LUA_DBLIBNAME, luaopen_debug },
- { LUA_BITLIBNAME, luaopen_bit },
- { LUA_JITLIBNAME, luaopen_jit },
- { NULL, NULL }
-};
-
-static const luaL_Reg lj_lib_preload[] = {
-#if LJ_HASFFI
- { LUA_FFILIBNAME, luaopen_ffi },
-#endif
- { NULL, NULL }
-};
-
-LUALIB_API void luaL_openlibs(lua_State *L)
-{
- const luaL_Reg *lib;
- for (lib = lj_lib_load; lib->func; lib++) {
- lua_pushcfunction(L, lib->func);
- lua_pushstring(L, lib->name);
- lua_call(L, 1, 0);
- }
- luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD",
- sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1);
- for (lib = lj_lib_preload; lib->func; lib++) {
- lua_pushcfunction(L, lib->func);
- lua_setfield(L, -2, lib->name);
- }
- lua_pop(L, 1);
-}
-
+/*
+** Library initialization.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major parts taken verbatim from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_init_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_arch.h"
+
+static const luaL_Reg lj_lib_load[] = {
+ { "", luaopen_base },
+ { LUA_LOADLIBNAME, luaopen_package },
+ { LUA_TABLIBNAME, luaopen_table },
+ { LUA_IOLIBNAME, luaopen_io },
+ { LUA_OSLIBNAME, luaopen_os },
+ { LUA_STRLIBNAME, luaopen_string },
+ { LUA_MATHLIBNAME, luaopen_math },
+ { LUA_DBLIBNAME, luaopen_debug },
+ { LUA_BITLIBNAME, luaopen_bit },
+ { LUA_JITLIBNAME, luaopen_jit },
+ { NULL, NULL }
+};
+
+static const luaL_Reg lj_lib_preload[] = {
+#if LJ_HASFFI
+ { LUA_FFILIBNAME, luaopen_ffi },
+#endif
+ { NULL, NULL }
+};
+
+LUALIB_API void luaL_openlibs(lua_State *L)
+{
+ const luaL_Reg *lib;
+ for (lib = lj_lib_load; lib->func; lib++) {
+ lua_pushcfunction(L, lib->func);
+ lua_pushstring(L, lib->name);
+ lua_call(L, 1, 0);
+ }
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD",
+ sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1);
+ for (lib = lj_lib_preload; lib->func; lib++) {
+ lua_pushcfunction(L, lib->func);
+ lua_setfield(L, -2, lib->name);
+ }
+ lua_pop(L, 1);
+}
+
diff --git a/3rdparty/lua/src/lib_io.c b/3rdparty/lua/src/lib_io.c
index 037aa28..e0c6908 100644
--- a/3rdparty/lua/src/lib_io.c
+++ b/3rdparty/lua/src/lib_io.c
@@ -1,6 +1,6 @@
/*
** I/O library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
**
** Major portions taken verbatim or adapted from the Lua interpreter.
** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
@@ -426,7 +426,7 @@ LJLIB_CF(io_popen)
LJLIB_CF(io_tmpfile)
{
IOFileUD *iof = io_file_new(L);
-#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA
+#if LJ_TARGET_PS3
iof->fp = NULL; errno = ENOSYS;
#else
iof->fp = tmpfile();
diff --git a/3rdparty/lua/src/lib_jit.c b/3rdparty/lua/src/lib_jit.c
index 0c4551a..82e6825 100644
--- a/3rdparty/lua/src/lib_jit.c
+++ b/3rdparty/lua/src/lib_jit.c
@@ -1,663 +1,663 @@
-/*
-** JIT library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lib_jit_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_arch.h"
-#include "lj_obj.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_bc.h"
-#if LJ_HASJIT
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_ircall.h"
-#include "lj_iropt.h"
-#include "lj_target.h"
-#endif
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-#include "lj_vmevent.h"
-#include "lj_lib.h"
-
-#include "luajit.h"
-
-/* -- jit.* functions ----------------------------------------------------- */
-
-#define LJLIB_MODULE_jit
-
-static int setjitmode(lua_State *L, int mode)
-{
- int idx = 0;
- if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */
- mode |= LUAJIT_MODE_ENGINE;
- } else {
- /* jit.on/off/flush(func|proto, nil|true|false) */
- if (tvisfunc(L->base) || tvisproto(L->base))
- idx = 1;
- else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */
- goto err;
- if (L->base+1 < L->top && tvisbool(L->base+1))
- mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC;
- else
- mode |= LUAJIT_MODE_FUNC;
- }
- if (luaJIT_setmode(L, idx, mode) != 1) {
- if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE)
- lj_err_caller(L, LJ_ERR_NOJIT);
- err:
- lj_err_argt(L, 1, LUA_TFUNCTION);
- }
- return 0;
-}
-
-LJLIB_CF(jit_on)
-{
- return setjitmode(L, LUAJIT_MODE_ON);
-}
-
-LJLIB_CF(jit_off)
-{
- return setjitmode(L, LUAJIT_MODE_OFF);
-}
-
-LJLIB_CF(jit_flush)
-{
-#if LJ_HASJIT
- if (L->base < L->top && tvisnumber(L->base)) {
- int traceno = lj_lib_checkint(L, 1);
- luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE);
- return 0;
- }
-#endif
- return setjitmode(L, LUAJIT_MODE_FLUSH);
-}
-
-#if LJ_HASJIT
-/* Push a string for every flag bit that is set. */
-static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base,
- const char *str)
-{
- for (; *str; base <<= 1, str += 1+*str)
- if (flags & base)
- setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str));
-}
-#endif
-
-LJLIB_CF(jit_status)
-{
-#if LJ_HASJIT
- jit_State *J = L2J(L);
- L->top = L->base;
- setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0);
- flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING);
- flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING);
- return (int)(L->top - L->base);
-#else
- setboolV(L->top++, 0);
- return 1;
-#endif
-}
-
-LJLIB_CF(jit_attach)
-{
-#ifdef LUAJIT_DISABLE_VMEVENT
- luaL_error(L, "vmevent API disabled");
-#else
- GCfunc *fn = lj_lib_checkfunc(L, 1);
- GCstr *s = lj_lib_optstr(L, 2);
- luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE);
- if (s) { /* Attach to given event. */
- const uint8_t *p = (const uint8_t *)strdata(s);
- uint32_t h = s->len;
- while (*p) h = h ^ (lj_rol(h, 6) + *p++);
- lua_pushvalue(L, 1);
- lua_rawseti(L, -2, VMEVENT_HASHIDX(h));
- G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */
- } else { /* Detach if no event given. */
- setnilV(L->top++);
- while (lua_next(L, -2)) {
- L->top--;
- if (tvisfunc(L->top) && funcV(L->top) == fn) {
- setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1));
- }
- }
- }
-#endif
- return 0;
-}
-
-LJLIB_PUSH(top-5) LJLIB_SET(os)
-LJLIB_PUSH(top-4) LJLIB_SET(arch)
-LJLIB_PUSH(top-3) LJLIB_SET(version_num)
-LJLIB_PUSH(top-2) LJLIB_SET(version)
-
-#include "lj_libdef.h"
-
-/* -- jit.util.* functions ------------------------------------------------ */
-
-#define LJLIB_MODULE_jit_util
-
-/* -- Reflection API for Lua functions ------------------------------------ */
-
-/* Return prototype of first argument (Lua function or prototype object) */
-static GCproto *check_Lproto(lua_State *L, int nolua)
-{
- TValue *o = L->base;
- if (L->top > o) {
- if (tvisproto(o)) {
- return protoV(o);
- } else if (tvisfunc(o)) {
- if (isluafunc(funcV(o)))
- return funcproto(funcV(o));
- else if (nolua)
- return NULL;
- }
- }
- lj_err_argt(L, 1, LUA_TFUNCTION);
- return NULL; /* unreachable */
-}
-
-static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val)
-{
- setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val);
-}
-
-/* local info = jit.util.funcinfo(func [,pc]) */
-LJLIB_CF(jit_util_funcinfo)
-{
- GCproto *pt = check_Lproto(L, 1);
- if (pt) {
- BCPos pc = (BCPos)lj_lib_optint(L, 2, 0);
- GCtab *t;
- lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */
- t = tabV(L->top-1);
- setintfield(L, t, "linedefined", pt->firstline);
- setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline);
- setintfield(L, t, "stackslots", pt->framesize);
- setintfield(L, t, "params", pt->numparams);
- setintfield(L, t, "bytecodes", (int32_t)pt->sizebc);
- setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc);
- setintfield(L, t, "nconsts", (int32_t)pt->sizekn);
- setintfield(L, t, "upvalues", (int32_t)pt->sizeuv);
- if (pc < pt->sizebc)
- setintfield(L, t, "currentline", lj_debug_line(pt, pc));
- lua_pushboolean(L, (pt->flags & PROTO_VARARG));
- lua_setfield(L, -2, "isvararg");
- lua_pushboolean(L, (pt->flags & PROTO_CHILD));
- lua_setfield(L, -2, "children");
- setstrV(L, L->top++, proto_chunkname(pt));
- lua_setfield(L, -2, "source");
- lj_debug_pushloc(L, pt, pc);
- lua_setfield(L, -2, "loc");
- } else {
- GCfunc *fn = funcV(L->base);
- GCtab *t;
- lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */
- t = tabV(L->top-1);
- if (!iscfunc(fn))
- setintfield(L, t, "ffid", fn->c.ffid);
- setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")),
- (intptr_t)(void *)fn->c.f);
- setintfield(L, t, "upvalues", fn->c.nupvalues);
- }
- return 1;
-}
-
-/* local ins, m = jit.util.funcbc(func, pc) */
-LJLIB_CF(jit_util_funcbc)
-{
- GCproto *pt = check_Lproto(L, 0);
- BCPos pc = (BCPos)lj_lib_checkint(L, 2);
- if (pc < pt->sizebc) {
- BCIns ins = proto_bc(pt)[pc];
- BCOp op = bc_op(ins);
- lua_assert(op < BC__MAX);
- setintV(L->top, ins);
- setintV(L->top+1, lj_bc_mode[op]);
- L->top += 2;
- return 2;
- }
- return 0;
-}
-
-/* local k = jit.util.funck(func, idx) */
-LJLIB_CF(jit_util_funck)
-{
- GCproto *pt = check_Lproto(L, 0);
- ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2);
- if (idx >= 0) {
- if (idx < (ptrdiff_t)pt->sizekn) {
- copyTV(L, L->top-1, proto_knumtv(pt, idx));
- return 1;
- }
- } else {
- if (~idx < (ptrdiff_t)pt->sizekgc) {
- GCobj *gc = proto_kgc(pt, idx);
- setgcV(L, L->top-1, gc, ~gc->gch.gct);
- return 1;
- }
- }
- return 0;
-}
-
-/* local name = jit.util.funcuvname(func, idx) */
-LJLIB_CF(jit_util_funcuvname)
-{
- GCproto *pt = check_Lproto(L, 0);
- uint32_t idx = (uint32_t)lj_lib_checkint(L, 2);
- if (idx < pt->sizeuv) {
- setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx)));
- return 1;
- }
- return 0;
-}
-
-/* -- Reflection API for traces ------------------------------------------- */
-
-#if LJ_HASJIT
-
-/* Check trace argument. Must not throw for non-existent trace numbers. */
-static GCtrace *jit_checktrace(lua_State *L)
-{
- TraceNo tr = (TraceNo)lj_lib_checkint(L, 1);
- jit_State *J = L2J(L);
- if (tr > 0 && tr < J->sizetrace)
- return traceref(J, tr);
- return NULL;
-}
-
-/* Names of link types. ORDER LJ_TRLINK */
-static const char *const jit_trlinkname[] = {
- "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion",
- "interpreter", "return"
-};
-
-/* local info = jit.util.traceinfo(tr) */
-LJLIB_CF(jit_util_traceinfo)
-{
- GCtrace *T = jit_checktrace(L);
- if (T) {
- GCtab *t;
- lua_createtable(L, 0, 8); /* Increment hash size if fields are added. */
- t = tabV(L->top-1);
- setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1);
- setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk);
- setintfield(L, t, "link", T->link);
- setintfield(L, t, "nexit", T->nsnap);
- setstrV(L, L->top++, lj_str_newz(L, jit_trlinkname[T->linktype]));
- lua_setfield(L, -2, "linktype");
- /* There are many more fields. Add them only when needed. */
- return 1;
- }
- return 0;
-}
-
-/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */
-LJLIB_CF(jit_util_traceir)
-{
- GCtrace *T = jit_checktrace(L);
- IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
- if (T && ref >= REF_BIAS && ref < T->nins) {
- IRIns *ir = &T->ir[ref];
- int32_t m = lj_ir_mode[ir->o];
- setintV(L->top-2, m);
- setintV(L->top-1, ir->ot);
- setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0));
- setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0));
- setintV(L->top++, ir->prev);
- return 5;
- }
- return 0;
-}
-
-/* local k, t [, slot] = jit.util.tracek(tr, idx) */
-LJLIB_CF(jit_util_tracek)
-{
- GCtrace *T = jit_checktrace(L);
- IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
- if (T && ref >= T->nk && ref < REF_BIAS) {
- IRIns *ir = &T->ir[ref];
- int32_t slot = -1;
- if (ir->o == IR_KSLOT) {
- slot = ir->op2;
- ir = &T->ir[ir->op1];
- }
- lj_ir_kvalue(L, L->top-2, ir);
- setintV(L->top-1, (int32_t)irt_type(ir->t));
- if (slot == -1)
- return 2;
- setintV(L->top++, slot);
- return 3;
- }
- return 0;
-}
-
-/* local snap = jit.util.tracesnap(tr, sn) */
-LJLIB_CF(jit_util_tracesnap)
-{
- GCtrace *T = jit_checktrace(L);
- SnapNo sn = (SnapNo)lj_lib_checkint(L, 2);
- if (T && sn < T->nsnap) {
- SnapShot *snap = &T->snap[sn];
- SnapEntry *map = &T->snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- GCtab *t;
- lua_createtable(L, nent+2, 0);
- t = tabV(L->top-1);
- setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS);
- setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots);
- for (n = 0; n < nent; n++)
- setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]);
- setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0));
- return 1;
- }
- return 0;
-}
-
-/* local mcode, addr, loop = jit.util.tracemc(tr) */
-LJLIB_CF(jit_util_tracemc)
-{
- GCtrace *T = jit_checktrace(L);
- if (T && T->mcode != NULL) {
- setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode));
- setintptrV(L->top++, (intptr_t)(void *)T->mcode);
- setintV(L->top++, T->mcloop);
- return 3;
- }
- return 0;
-}
-
-/* local addr = jit.util.traceexitstub([tr,] exitno) */
-LJLIB_CF(jit_util_traceexitstub)
-{
-#ifdef EXITSTUBS_PER_GROUP
- ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
- jit_State *J = L2J(L);
- if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
- setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
- return 1;
- }
-#else
- if (L->top > L->base+1) { /* Don't throw for one-argument variant. */
- GCtrace *T = jit_checktrace(L);
- ExitNo exitno = (ExitNo)lj_lib_checkint(L, 2);
- ExitNo maxexit = T->root ? T->nsnap+1 : T->nsnap;
- if (T && T->mcode != NULL && exitno < maxexit) {
- setintptrV(L->top-1, (intptr_t)(void *)exitstub_trace_addr(T, exitno));
- return 1;
- }
- }
-#endif
- return 0;
-}
-
-/* local addr = jit.util.ircalladdr(idx) */
-LJLIB_CF(jit_util_ircalladdr)
-{
- uint32_t idx = (uint32_t)lj_lib_checkint(L, 1);
- if (idx < IRCALL__MAX) {
- setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func);
- return 1;
- }
- return 0;
-}
-
-#endif
-
-#include "lj_libdef.h"
-
-/* -- jit.opt module ------------------------------------------------------ */
-
-#if LJ_HASJIT
-
-#define LJLIB_MODULE_jit_opt
-
-/* Parse optimization level. */
-static int jitopt_level(jit_State *J, const char *str)
-{
- if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') {
- uint32_t flags;
- if (str[0] == '0') flags = JIT_F_OPT_0;
- else if (str[0] == '1') flags = JIT_F_OPT_1;
- else if (str[0] == '2') flags = JIT_F_OPT_2;
- else flags = JIT_F_OPT_3;
- J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags;
- return 1; /* Ok. */
- }
- return 0; /* No match. */
-}
-
-/* Parse optimization flag. */
-static int jitopt_flag(jit_State *J, const char *str)
-{
- const char *lst = JIT_F_OPTSTRING;
- uint32_t opt;
- int set = 1;
- if (str[0] == '+') {
- str++;
- } else if (str[0] == '-') {
- str++;
- set = 0;
- } else if (str[0] == 'n' && str[1] == 'o') {
- str += str[2] == '-' ? 3 : 2;
- set = 0;
- }
- for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) {
- size_t len = *(const uint8_t *)lst;
- if (len == 0)
- break;
- if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') {
- if (set) J->flags |= opt; else J->flags &= ~opt;
- return 1; /* Ok. */
- }
- lst += 1+len;
- }
- return 0; /* No match. */
-}
-
-/* Parse optimization parameter. */
-static int jitopt_param(jit_State *J, const char *str)
-{
- const char *lst = JIT_P_STRING;
- int i;
- for (i = 0; i < JIT_P__MAX; i++) {
- size_t len = *(const uint8_t *)lst;
- lua_assert(len != 0);
- if (strncmp(str, lst+1, len) == 0 && str[len] == '=') {
- int32_t n = 0;
- const char *p = &str[len+1];
- while (*p >= '0' && *p <= '9')
- n = n*10 + (*p++ - '0');
- if (*p) return 0; /* Malformed number. */
- J->param[i] = n;
- if (i == JIT_P_hotloop)
- lj_dispatch_init_hotcount(J2G(J));
- return 1; /* Ok. */
- }
- lst += 1+len;
- }
- return 0; /* No match. */
-}
-
-/* jit.opt.start(flags...) */
-LJLIB_CF(jit_opt_start)
-{
- jit_State *J = L2J(L);
- int nargs = (int)(L->top - L->base);
- if (nargs == 0) {
- J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT;
- } else {
- int i;
- for (i = 1; i <= nargs; i++) {
- const char *str = strdata(lj_lib_checkstr(L, i));
- if (!jitopt_level(J, str) &&
- !jitopt_flag(J, str) &&
- !jitopt_param(J, str))
- lj_err_callerv(L, LJ_ERR_JITOPT, str);
- }
- }
- return 0;
-}
-
-#include "lj_libdef.h"
-
-#endif
-
-/* -- JIT compiler initialization ----------------------------------------- */
-
-#if LJ_HASJIT
-/* Default values for JIT parameters. */
-static const int32_t jit_param_default[JIT_P__MAX+1] = {
-#define JIT_PARAMINIT(len, name, value) (value),
-JIT_PARAMDEF(JIT_PARAMINIT)
-#undef JIT_PARAMINIT
- 0
-};
-#endif
-
-#if LJ_TARGET_ARM && LJ_TARGET_LINUX
-#include <sys/utsname.h>
-#endif
-
-/* Arch-dependent CPU detection. */
-static uint32_t jit_cpudetect(lua_State *L)
-{
- uint32_t flags = 0;
-#if LJ_TARGET_X86ORX64
- uint32_t vendor[4];
- uint32_t features[4];
- if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) {
-#if !LJ_HASJIT
-#define JIT_F_CMOV 1
-#define JIT_F_SSE2 2
-#endif
- flags |= ((features[3] >> 15)&1) * JIT_F_CMOV;
- flags |= ((features[3] >> 26)&1) * JIT_F_SSE2;
-#if LJ_HASJIT
- flags |= ((features[2] >> 0)&1) * JIT_F_SSE3;
- flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1;
- if (vendor[2] == 0x6c65746e) { /* Intel. */
- if ((features[0] & 0x0ff00f00) == 0x00000f00) /* P4. */
- flags |= JIT_F_P4; /* Currently unused. */
- else if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */
- flags |= JIT_F_LEA_AGU;
- } else if (vendor[2] == 0x444d4163) { /* AMD. */
- uint32_t fam = (features[0] & 0x0ff00f00);
- if (fam == 0x00000f00) /* K8. */
- flags |= JIT_F_SPLIT_XMM;
- if (fam >= 0x00000f00) /* K8, K10. */
- flags |= JIT_F_PREFER_IMUL;
- }
-#endif
- }
- /* Check for required instruction set support on x86 (unnecessary on x64). */
-#if LJ_TARGET_X86
-#if !defined(LUAJIT_CPU_NOCMOV)
- if (!(flags & JIT_F_CMOV))
- luaL_error(L, "CPU not supported");
-#endif
-#if defined(LUAJIT_CPU_SSE2)
- if (!(flags & JIT_F_SSE2))
- luaL_error(L, "CPU does not support SSE2 (recompile without -DLUAJIT_CPU_SSE2)");
-#endif
-#endif
-#elif LJ_TARGET_ARM
-#if LJ_HASJIT
- int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */
-#if LJ_TARGET_LINUX
- if (ver < 70) { /* Runtime ARM CPU detection. */
- struct utsname ut;
- uname(&ut);
- if (strncmp(ut.machine, "armv", 4) == 0) {
- if (ut.machine[4] >= '7')
- ver = 70;
- else if (ut.machine[4] == '6')
- ver = 60;
- }
- }
-#endif
- flags |= ver >= 70 ? JIT_F_ARMV7 :
- ver >= 61 ? JIT_F_ARMV6T2_ :
- ver >= 60 ? JIT_F_ARMV6_ : 0;
- flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2;
-#endif
-#elif LJ_TARGET_PPC
-#if LJ_HASJIT
-#if LJ_ARCH_SQRT
- flags |= JIT_F_SQRT;
-#endif
-#if LJ_ARCH_ROUND
- flags |= JIT_F_ROUND;
-#endif
-#endif
-#elif LJ_TARGET_PPCSPE
- /* Nothing to do. */
-#elif LJ_TARGET_MIPS
-#if LJ_HASJIT
- /* Compile-time MIPS CPU detection. */
-#if LJ_ARCH_VERSION >= 20
- flags |= JIT_F_MIPS32R2;
-#endif
- /* Runtime MIPS CPU detection. */
-#if defined(__GNUC__)
- if (!(flags & JIT_F_MIPS32R2)) {
- int x;
- /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */
- __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2");
- if (x) flags |= JIT_F_MIPS32R2; /* Either 0x80000000 (R2) or 0 (R1). */
- }
-#endif
-#endif
-#else
-#error "Missing CPU detection for this architecture"
-#endif
- UNUSED(L);
- return flags;
-}
-
-/* Initialize JIT compiler. */
-static void jit_init(lua_State *L)
-{
- uint32_t flags = jit_cpudetect(L);
-#if LJ_HASJIT
- jit_State *J = L2J(L);
-#if LJ_TARGET_X86
- /* Silently turn off the JIT compiler on CPUs without SSE2. */
- if ((flags & JIT_F_SSE2))
-#endif
- J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT;
- memcpy(J->param, jit_param_default, sizeof(J->param));
- lj_dispatch_update(G(L));
-#else
- UNUSED(flags);
-#endif
-}
-
-LUALIB_API int luaopen_jit(lua_State *L)
-{
- lua_pushliteral(L, LJ_OS_NAME);
- lua_pushliteral(L, LJ_ARCH_NAME);
- lua_pushinteger(L, LUAJIT_VERSION_NUM);
- lua_pushliteral(L, LUAJIT_VERSION);
- LJ_LIB_REG(L, LUA_JITLIBNAME, jit);
-#ifndef LUAJIT_DISABLE_JITUTIL
- LJ_LIB_REG(L, "jit.util", jit_util);
-#endif
-#if LJ_HASJIT
- LJ_LIB_REG(L, "jit.opt", jit_opt);
-#endif
- L->top -= 2;
- jit_init(L);
- return 1;
-}
-
+/*
+** JIT library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_jit_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_arch.h"
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+#endif
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+#include "lj_lib.h"
+
+#include "luajit.h"
+
+/* -- jit.* functions ----------------------------------------------------- */
+
+#define LJLIB_MODULE_jit
+
+static int setjitmode(lua_State *L, int mode)
+{
+ int idx = 0;
+ if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */
+ mode |= LUAJIT_MODE_ENGINE;
+ } else {
+ /* jit.on/off/flush(func|proto, nil|true|false) */
+ if (tvisfunc(L->base) || tvisproto(L->base))
+ idx = 1;
+ else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */
+ goto err;
+ if (L->base+1 < L->top && tvisbool(L->base+1))
+ mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC;
+ else
+ mode |= LUAJIT_MODE_FUNC;
+ }
+ if (luaJIT_setmode(L, idx, mode) != 1) {
+ if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE)
+ lj_err_caller(L, LJ_ERR_NOJIT);
+ err:
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ }
+ return 0;
+}
+
+LJLIB_CF(jit_on)
+{
+ return setjitmode(L, LUAJIT_MODE_ON);
+}
+
+LJLIB_CF(jit_off)
+{
+ return setjitmode(L, LUAJIT_MODE_OFF);
+}
+
+LJLIB_CF(jit_flush)
+{
+#if LJ_HASJIT
+ if (L->base < L->top && !tvisnil(L->base)) {
+ int traceno = lj_lib_checkint(L, 1);
+ luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE);
+ return 0;
+ }
+#endif
+ return setjitmode(L, LUAJIT_MODE_FLUSH);
+}
+
+#if LJ_HASJIT
+/* Push a string for every flag bit that is set. */
+static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base,
+ const char *str)
+{
+ for (; *str; base <<= 1, str += 1+*str)
+ if (flags & base)
+ setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str));
+}
+#endif
+
+LJLIB_CF(jit_status)
+{
+#if LJ_HASJIT
+ jit_State *J = L2J(L);
+ L->top = L->base;
+ setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0);
+ flagbits_to_strings(L, J->flags, JIT_F_CPU_FIRST, JIT_F_CPUSTRING);
+ flagbits_to_strings(L, J->flags, JIT_F_OPT_FIRST, JIT_F_OPTSTRING);
+ return (int)(L->top - L->base);
+#else
+ setboolV(L->top++, 0);
+ return 1;
+#endif
+}
+
+LJLIB_CF(jit_attach)
+{
+#ifdef LUAJIT_DISABLE_VMEVENT
+ luaL_error(L, "vmevent API disabled");
+#else
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ GCstr *s = lj_lib_optstr(L, 2);
+ luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE);
+ if (s) { /* Attach to given event. */
+ const uint8_t *p = (const uint8_t *)strdata(s);
+ uint32_t h = s->len;
+ while (*p) h = h ^ (lj_rol(h, 6) + *p++);
+ lua_pushvalue(L, 1);
+ lua_rawseti(L, -2, VMEVENT_HASHIDX(h));
+ G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */
+ } else { /* Detach if no event given. */
+ setnilV(L->top++);
+ while (lua_next(L, -2)) {
+ L->top--;
+ if (tvisfunc(L->top) && funcV(L->top) == fn) {
+ setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1));
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+LJLIB_PUSH(top-5) LJLIB_SET(os)
+LJLIB_PUSH(top-4) LJLIB_SET(arch)
+LJLIB_PUSH(top-3) LJLIB_SET(version_num)
+LJLIB_PUSH(top-2) LJLIB_SET(version)
+
+#include "lj_libdef.h"
+
+/* -- jit.util.* functions ------------------------------------------------ */
+
+#define LJLIB_MODULE_jit_util
+
+/* -- Reflection API for Lua functions ------------------------------------ */
+
+/* Return prototype of first argument (Lua function or prototype object) */
+static GCproto *check_Lproto(lua_State *L, int nolua)
+{
+ TValue *o = L->base;
+ if (L->top > o) {
+ if (tvisproto(o)) {
+ return protoV(o);
+ } else if (tvisfunc(o)) {
+ if (isluafunc(funcV(o)))
+ return funcproto(funcV(o));
+ else if (nolua)
+ return NULL;
+ }
+ }
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ return NULL; /* unreachable */
+}
+
+static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val)
+{
+ setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val);
+}
+
+/* local info = jit.util.funcinfo(func [,pc]) */
+LJLIB_CF(jit_util_funcinfo)
+{
+ GCproto *pt = check_Lproto(L, 1);
+ if (pt) {
+ BCPos pc = (BCPos)lj_lib_optint(L, 2, 0);
+ GCtab *t;
+ lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintfield(L, t, "linedefined", pt->firstline);
+ setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline);
+ setintfield(L, t, "stackslots", pt->framesize);
+ setintfield(L, t, "params", pt->numparams);
+ setintfield(L, t, "bytecodes", (int32_t)pt->sizebc);
+ setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc);
+ setintfield(L, t, "nconsts", (int32_t)pt->sizekn);
+ setintfield(L, t, "upvalues", (int32_t)pt->sizeuv);
+ if (pc < pt->sizebc)
+ setintfield(L, t, "currentline", lj_debug_line(pt, pc));
+ lua_pushboolean(L, (pt->flags & PROTO_VARARG));
+ lua_setfield(L, -2, "isvararg");
+ lua_pushboolean(L, (pt->flags & PROTO_CHILD));
+ lua_setfield(L, -2, "children");
+ setstrV(L, L->top++, proto_chunkname(pt));
+ lua_setfield(L, -2, "source");
+ lj_debug_pushloc(L, pt, pc);
+ lua_setfield(L, -2, "loc");
+ } else {
+ GCfunc *fn = funcV(L->base);
+ GCtab *t;
+ lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ if (!iscfunc(fn))
+ setintfield(L, t, "ffid", fn->c.ffid);
+ setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")),
+ (intptr_t)(void *)fn->c.f);
+ setintfield(L, t, "upvalues", fn->c.nupvalues);
+ }
+ return 1;
+}
+
+/* local ins, m = jit.util.funcbc(func, pc) */
+LJLIB_CF(jit_util_funcbc)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ BCPos pc = (BCPos)lj_lib_checkint(L, 2);
+ if (pc < pt->sizebc) {
+ BCIns ins = proto_bc(pt)[pc];
+ BCOp op = bc_op(ins);
+ lua_assert(op < BC__MAX);
+ setintV(L->top, ins);
+ setintV(L->top+1, lj_bc_mode[op]);
+ L->top += 2;
+ return 2;
+ }
+ return 0;
+}
+
+/* local k = jit.util.funck(func, idx) */
+LJLIB_CF(jit_util_funck)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2);
+ if (idx >= 0) {
+ if (idx < (ptrdiff_t)pt->sizekn) {
+ copyTV(L, L->top-1, proto_knumtv(pt, idx));
+ return 1;
+ }
+ } else {
+ if (~idx < (ptrdiff_t)pt->sizekgc) {
+ GCobj *gc = proto_kgc(pt, idx);
+ setgcV(L, L->top-1, gc, ~gc->gch.gct);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* local name = jit.util.funcuvname(func, idx) */
+LJLIB_CF(jit_util_funcuvname)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ uint32_t idx = (uint32_t)lj_lib_checkint(L, 2);
+ if (idx < pt->sizeuv) {
+ setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx)));
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Reflection API for traces ------------------------------------------- */
+
+#if LJ_HASJIT
+
+/* Check trace argument. Must not throw for non-existent trace numbers. */
+static GCtrace *jit_checktrace(lua_State *L)
+{
+ TraceNo tr = (TraceNo)lj_lib_checkint(L, 1);
+ jit_State *J = L2J(L);
+ if (tr > 0 && tr < J->sizetrace)
+ return traceref(J, tr);
+ return NULL;
+}
+
+/* Names of link types. ORDER LJ_TRLINK */
+static const char *const jit_trlinkname[] = {
+ "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion",
+ "interpreter", "return"
+};
+
+/* local info = jit.util.traceinfo(tr) */
+LJLIB_CF(jit_util_traceinfo)
+{
+ GCtrace *T = jit_checktrace(L);
+ if (T) {
+ GCtab *t;
+ lua_createtable(L, 0, 8); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1);
+ setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk);
+ setintfield(L, t, "link", T->link);
+ setintfield(L, t, "nexit", T->nsnap);
+ setstrV(L, L->top++, lj_str_newz(L, jit_trlinkname[T->linktype]));
+ lua_setfield(L, -2, "linktype");
+ /* There are many more fields. Add them only when needed. */
+ return 1;
+ }
+ return 0;
+}
+
+/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */
+LJLIB_CF(jit_util_traceir)
+{
+ GCtrace *T = jit_checktrace(L);
+ IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
+ if (T && ref >= REF_BIAS && ref < T->nins) {
+ IRIns *ir = &T->ir[ref];
+ int32_t m = lj_ir_mode[ir->o];
+ setintV(L->top-2, m);
+ setintV(L->top-1, ir->ot);
+ setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0));
+ setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0));
+ setintV(L->top++, ir->prev);
+ return 5;
+ }
+ return 0;
+}
+
+/* local k, t [, slot] = jit.util.tracek(tr, idx) */
+LJLIB_CF(jit_util_tracek)
+{
+ GCtrace *T = jit_checktrace(L);
+ IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
+ if (T && ref >= T->nk && ref < REF_BIAS) {
+ IRIns *ir = &T->ir[ref];
+ int32_t slot = -1;
+ if (ir->o == IR_KSLOT) {
+ slot = ir->op2;
+ ir = &T->ir[ir->op1];
+ }
+ lj_ir_kvalue(L, L->top-2, ir);
+ setintV(L->top-1, (int32_t)irt_type(ir->t));
+ if (slot == -1)
+ return 2;
+ setintV(L->top++, slot);
+ return 3;
+ }
+ return 0;
+}
+
+/* local snap = jit.util.tracesnap(tr, sn) */
+LJLIB_CF(jit_util_tracesnap)
+{
+ GCtrace *T = jit_checktrace(L);
+ SnapNo sn = (SnapNo)lj_lib_checkint(L, 2);
+ if (T && sn < T->nsnap) {
+ SnapShot *snap = &T->snap[sn];
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ GCtab *t;
+ lua_createtable(L, nent+2, 0);
+ t = tabV(L->top-1);
+ setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS);
+ setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots);
+ for (n = 0; n < nent; n++)
+ setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]);
+ setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0));
+ return 1;
+ }
+ return 0;
+}
+
+/* local mcode, addr, loop = jit.util.tracemc(tr) */
+LJLIB_CF(jit_util_tracemc)
+{
+ GCtrace *T = jit_checktrace(L);
+ if (T && T->mcode != NULL) {
+ setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode));
+ setintptrV(L->top++, (intptr_t)(void *)T->mcode);
+ setintV(L->top++, T->mcloop);
+ return 3;
+ }
+ return 0;
+}
+
+/* local addr = jit.util.traceexitstub([tr,] exitno) */
+LJLIB_CF(jit_util_traceexitstub)
+{
+#ifdef EXITSTUBS_PER_GROUP
+ ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
+ jit_State *J = L2J(L);
+ if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
+ setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
+ return 1;
+ }
+#else
+ if (L->top > L->base+1) { /* Don't throw for one-argument variant. */
+ GCtrace *T = jit_checktrace(L);
+ ExitNo exitno = (ExitNo)lj_lib_checkint(L, 2);
+ ExitNo maxexit = T->root ? T->nsnap+1 : T->nsnap;
+ if (T && T->mcode != NULL && exitno < maxexit) {
+ setintptrV(L->top-1, (intptr_t)(void *)exitstub_trace_addr(T, exitno));
+ return 1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* local addr = jit.util.ircalladdr(idx) */
+LJLIB_CF(jit_util_ircalladdr)
+{
+ uint32_t idx = (uint32_t)lj_lib_checkint(L, 1);
+ if (idx < IRCALL__MAX) {
+ setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func);
+ return 1;
+ }
+ return 0;
+}
+
+#endif
+
+#include "lj_libdef.h"
+
+/* -- jit.opt module ------------------------------------------------------ */
+
+#if LJ_HASJIT
+
+#define LJLIB_MODULE_jit_opt
+
+/* Parse optimization level. */
+static int jitopt_level(jit_State *J, const char *str)
+{
+ if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') {
+ uint32_t flags;
+ if (str[0] == '0') flags = JIT_F_OPT_0;
+ else if (str[0] == '1') flags = JIT_F_OPT_1;
+ else if (str[0] == '2') flags = JIT_F_OPT_2;
+ else flags = JIT_F_OPT_3;
+ J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags;
+ return 1; /* Ok. */
+ }
+ return 0; /* No match. */
+}
+
+/* Parse optimization flag. */
+static int jitopt_flag(jit_State *J, const char *str)
+{
+ const char *lst = JIT_F_OPTSTRING;
+ uint32_t opt;
+ int set = 1;
+ if (str[0] == '+') {
+ str++;
+ } else if (str[0] == '-') {
+ str++;
+ set = 0;
+ } else if (str[0] == 'n' && str[1] == 'o') {
+ str += str[2] == '-' ? 3 : 2;
+ set = 0;
+ }
+ for (opt = JIT_F_OPT_FIRST; ; opt <<= 1) {
+ size_t len = *(const uint8_t *)lst;
+ if (len == 0)
+ break;
+ if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') {
+ if (set) J->flags |= opt; else J->flags &= ~opt;
+ return 1; /* Ok. */
+ }
+ lst += 1+len;
+ }
+ return 0; /* No match. */
+}
+
+/* Parse optimization parameter. */
+static int jitopt_param(jit_State *J, const char *str)
+{
+ const char *lst = JIT_P_STRING;
+ int i;
+ for (i = 0; i < JIT_P__MAX; i++) {
+ size_t len = *(const uint8_t *)lst;
+ lua_assert(len != 0);
+ if (strncmp(str, lst+1, len) == 0 && str[len] == '=') {
+ int32_t n = 0;
+ const char *p = &str[len+1];
+ while (*p >= '0' && *p <= '9')
+ n = n*10 + (*p++ - '0');
+ if (*p) return 0; /* Malformed number. */
+ J->param[i] = n;
+ if (i == JIT_P_hotloop)
+ lj_dispatch_init_hotcount(J2G(J));
+ return 1; /* Ok. */
+ }
+ lst += 1+len;
+ }
+ return 0; /* No match. */
+}
+
+/* jit.opt.start(flags...) */
+LJLIB_CF(jit_opt_start)
+{
+ jit_State *J = L2J(L);
+ int nargs = (int)(L->top - L->base);
+ if (nargs == 0) {
+ J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT;
+ } else {
+ int i;
+ for (i = 1; i <= nargs; i++) {
+ const char *str = strdata(lj_lib_checkstr(L, i));
+ if (!jitopt_level(J, str) &&
+ !jitopt_flag(J, str) &&
+ !jitopt_param(J, str))
+ lj_err_callerv(L, LJ_ERR_JITOPT, str);
+ }
+ }
+ return 0;
+}
+
+#include "lj_libdef.h"
+
+#endif
+
+/* -- JIT compiler initialization ----------------------------------------- */
+
+#if LJ_HASJIT
+/* Default values for JIT parameters. */
+static const int32_t jit_param_default[JIT_P__MAX+1] = {
+#define JIT_PARAMINIT(len, name, value) (value),
+JIT_PARAMDEF(JIT_PARAMINIT)
+#undef JIT_PARAMINIT
+ 0
+};
+#endif
+
+#if LJ_TARGET_ARM && LJ_TARGET_LINUX
+#include <sys/utsname.h>
+#endif
+
+/* Arch-dependent CPU detection. */
+static uint32_t jit_cpudetect(lua_State *L)
+{
+ uint32_t flags = 0;
+#if LJ_TARGET_X86ORX64
+ uint32_t vendor[4];
+ uint32_t features[4];
+ if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) {
+#if !LJ_HASJIT
+#define JIT_F_CMOV 1
+#define JIT_F_SSE2 2
+#endif
+ flags |= ((features[3] >> 15)&1) * JIT_F_CMOV;
+ flags |= ((features[3] >> 26)&1) * JIT_F_SSE2;
+#if LJ_HASJIT
+ flags |= ((features[2] >> 0)&1) * JIT_F_SSE3;
+ flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1;
+ if (vendor[2] == 0x6c65746e) { /* Intel. */
+ if ((features[0] & 0x0ff00f00) == 0x00000f00) /* P4. */
+ flags |= JIT_F_P4; /* Currently unused. */
+ else if ((features[0] & 0x0fff0ff0) == 0x000106c0) /* Atom. */
+ flags |= JIT_F_LEA_AGU;
+ } else if (vendor[2] == 0x444d4163) { /* AMD. */
+ uint32_t fam = (features[0] & 0x0ff00f00);
+ if (fam == 0x00000f00) /* K8. */
+ flags |= JIT_F_SPLIT_XMM;
+ if (fam >= 0x00000f00) /* K8, K10. */
+ flags |= JIT_F_PREFER_IMUL;
+ }
+#endif
+ }
+ /* Check for required instruction set support on x86 (unnecessary on x64). */
+#if LJ_TARGET_X86
+#if !defined(LUAJIT_CPU_NOCMOV)
+ if (!(flags & JIT_F_CMOV))
+ luaL_error(L, "CPU not supported");
+#endif
+#if defined(LUAJIT_CPU_SSE2)
+ if (!(flags & JIT_F_SSE2))
+ luaL_error(L, "CPU does not support SSE2 (recompile without -DLUAJIT_CPU_SSE2)");
+#endif
+#endif
+#elif LJ_TARGET_ARM
+#if LJ_HASJIT
+ int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */
+#if LJ_TARGET_LINUX
+ if (ver < 70) { /* Runtime ARM CPU detection. */
+ struct utsname ut;
+ uname(&ut);
+ if (strncmp(ut.machine, "armv", 4) == 0) {
+ if (ut.machine[4] >= '7')
+ ver = 70;
+ else if (ut.machine[4] == '6')
+ ver = 60;
+ }
+ }
+#endif
+ flags |= ver >= 70 ? JIT_F_ARMV7 :
+ ver >= 61 ? JIT_F_ARMV6T2_ :
+ ver >= 60 ? JIT_F_ARMV6_ : 0;
+ flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2;
+#endif
+#elif LJ_TARGET_PPC
+#if LJ_HASJIT
+#if LJ_ARCH_SQRT
+ flags |= JIT_F_SQRT;
+#endif
+#if LJ_ARCH_ROUND
+ flags |= JIT_F_ROUND;
+#endif
+#endif
+#elif LJ_TARGET_PPCSPE
+ /* Nothing to do. */
+#elif LJ_TARGET_MIPS
+#if LJ_HASJIT
+ /* Compile-time MIPS CPU detection. */
+#if LJ_ARCH_VERSION >= 20
+ flags |= JIT_F_MIPS32R2;
+#endif
+ /* Runtime MIPS CPU detection. */
+#if defined(__GNUC__)
+ if (!(flags & JIT_F_MIPS32R2)) {
+ int x;
+ /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */
+ __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2");
+ if (x) flags |= JIT_F_MIPS32R2; /* Either 0x80000000 (R2) or 0 (R1). */
+ }
+#endif
+#endif
+#else
+#error "Missing CPU detection for this architecture"
+#endif
+ UNUSED(L);
+ return flags;
+}
+
+/* Initialize JIT compiler. */
+static void jit_init(lua_State *L)
+{
+ uint32_t flags = jit_cpudetect(L);
+#if LJ_HASJIT
+ jit_State *J = L2J(L);
+#if LJ_TARGET_X86
+ /* Silently turn off the JIT compiler on CPUs without SSE2. */
+ if ((flags & JIT_F_SSE2))
+#endif
+ J->flags = flags | JIT_F_ON | JIT_F_OPT_DEFAULT;
+ memcpy(J->param, jit_param_default, sizeof(J->param));
+ lj_dispatch_update(G(L));
+#else
+ UNUSED(flags);
+#endif
+}
+
+LUALIB_API int luaopen_jit(lua_State *L)
+{
+ lua_pushliteral(L, LJ_OS_NAME);
+ lua_pushliteral(L, LJ_ARCH_NAME);
+ lua_pushinteger(L, LUAJIT_VERSION_NUM);
+ lua_pushliteral(L, LUAJIT_VERSION);
+ LJ_LIB_REG(L, LUA_JITLIBNAME, jit);
+#ifndef LUAJIT_DISABLE_JITUTIL
+ LJ_LIB_REG(L, "jit.util", jit_util);
+#endif
+#if LJ_HASJIT
+ LJ_LIB_REG(L, "jit.opt", jit_opt);
+#endif
+ L->top -= 2;
+ jit_init(L);
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lib_math.c b/3rdparty/lua/src/lib_math.c
index 7b0523e..b23d9a2 100644
--- a/3rdparty/lua/src/lib_math.c
+++ b/3rdparty/lua/src/lib_math.c
@@ -1,233 +1,233 @@
-/*
-** Math library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include <math.h>
-
-#define lib_math_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_lib.h"
-#include "lj_vm.h"
-
-/* ------------------------------------------------------------------------ */
-
-#define LJLIB_MODULE_math
-
-LJLIB_ASM(math_abs) LJLIB_REC(.)
-{
- lj_lib_checknumber(L, 1);
- return FFH_RETRY;
-}
-LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR)
-LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL)
-
-LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT)
-{
- lj_lib_checknum(L, 1);
- return FFH_RETRY;
-}
-LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10)
-LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP)
-LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN)
-LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS)
-LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN)
-LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin)
-LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos)
-LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan)
-LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh)
-LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh)
-LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh)
-LJLIB_ASM_(math_frexp)
-LJLIB_ASM_(math_modf) LJLIB_REC(.)
-
-LJLIB_PUSH(57.29577951308232)
-LJLIB_ASM_(math_deg) LJLIB_REC(math_degrad)
-
-LJLIB_PUSH(0.017453292519943295)
-LJLIB_ASM_(math_rad) LJLIB_REC(math_degrad)
-
-LJLIB_ASM(math_log) LJLIB_REC(math_log)
-{
- double x = lj_lib_checknum(L, 1);
- if (L->base+1 < L->top) {
- double y = lj_lib_checknum(L, 2);
-#ifdef LUAJIT_NO_LOG2
- x = log(x); y = 1.0 / log(y);
-#else
- x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y);
-#endif
- setnumV(L->base-1, x*y); /* Do NOT join the expression to x / y. */
- return FFH_RES(1);
- }
- return FFH_RETRY;
-}
-
-LJLIB_ASM(math_atan2) LJLIB_REC(.)
-{
- lj_lib_checknum(L, 1);
- lj_lib_checknum(L, 2);
- return FFH_RETRY;
-}
-LJLIB_ASM_(math_pow) LJLIB_REC(.)
-LJLIB_ASM_(math_fmod)
-
-LJLIB_ASM(math_ldexp) LJLIB_REC(.)
-{
- lj_lib_checknum(L, 1);
-#if LJ_DUALNUM && !LJ_TARGET_X86ORX64
- lj_lib_checkint(L, 2);
-#else
- lj_lib_checknum(L, 2);
-#endif
- return FFH_RETRY;
-}
-
-LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN)
-{
- int i = 0;
- do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
- return FFH_RETRY;
-}
-LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX)
-
-LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi)
-LJLIB_PUSH(1e310) LJLIB_SET(huge)
-
-/* ------------------------------------------------------------------------ */
-
-/* This implements a Tausworthe PRNG with period 2^223. Based on:
-** Tables of maximally-equidistributed combined LFSR generators,
-** Pierre L'Ecuyer, 1991, table 3, 1st entry.
-** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
-*/
-
-/* PRNG state. */
-struct RandomState {
- uint64_t gen[4]; /* State of the 4 LFSR generators. */
- int valid; /* State is valid. */
-};
-
-/* Union needed for bit-pattern conversion between uint64_t and double. */
-typedef union { uint64_t u64; double d; } U64double;
-
-/* Update generator i and compute a running xor of all states. */
-#define TW223_GEN(i, k, q, s) \
- z = rs->gen[i]; \
- z = (((z<<q)^z) >> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<<s); \
- r ^= z; rs->gen[i] = z;
-
-/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */
-LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs)
-{
- uint64_t z, r = 0;
- TW223_GEN(0, 63, 31, 18)
- TW223_GEN(1, 58, 19, 28)
- TW223_GEN(2, 55, 24, 7)
- TW223_GEN(3, 47, 21, 8)
- return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000);
-}
-
-/* PRNG initialization function. */
-static void random_init(RandomState *rs, double d)
-{
- uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */
- int i;
- for (i = 0; i < 4; i++) {
- U64double u;
- uint32_t m = 1u << (r&255);
- r >>= 8;
- u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354;
- if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */
- rs->gen[i] = u.u64;
- }
- rs->valid = 1;
- for (i = 0; i < 10; i++)
- lj_math_random_step(rs);
-}
-
-/* PRNG extract function. */
-LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */
-LJLIB_CF(math_random) LJLIB_REC(.)
-{
- int n = (int)(L->top - L->base);
- RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
- U64double u;
- double d;
- if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0);
- u.u64 = lj_math_random_step(rs);
- d = u.d - 1.0;
- if (n > 0) {
-#if LJ_DUALNUM
- int isint = 1;
- double r1;
- lj_lib_checknumber(L, 1);
- if (tvisint(L->base)) {
- r1 = (lua_Number)intV(L->base);
- } else {
- isint = 0;
- r1 = numV(L->base);
- }
-#else
- double r1 = lj_lib_checknum(L, 1);
-#endif
- if (n == 1) {
- d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */
- } else {
-#if LJ_DUALNUM
- double r2;
- lj_lib_checknumber(L, 2);
- if (tvisint(L->base+1)) {
- r2 = (lua_Number)intV(L->base+1);
- } else {
- isint = 0;
- r2 = numV(L->base+1);
- }
-#else
- double r2 = lj_lib_checknum(L, 2);
-#endif
- d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */
- }
-#if LJ_DUALNUM
- if (isint) {
- setintV(L->top-1, lj_num2int(d));
- return 1;
- }
-#endif
- } /* else: d is a double in range [0, 1] */
- setnumV(L->top++, d);
- return 1;
-}
-
-/* PRNG seed function. */
-LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */
-LJLIB_CF(math_randomseed)
-{
- RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
- random_init(rs, lj_lib_checknum(L, 1));
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-#include "lj_libdef.h"
-
-LUALIB_API int luaopen_math(lua_State *L)
-{
- RandomState *rs;
- rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState));
- rs->valid = 0; /* Use lazy initialization to save some time on startup. */
- LJ_LIB_REG(L, LUA_MATHLIBNAME, math);
-#if defined(LUA_COMPAT_MOD) && !LJ_52
- lua_getfield(L, -1, "fmod");
- lua_setfield(L, -2, "mod");
-#endif
- return 1;
-}
-
+/*
+** Math library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <math.h>
+
+#define lib_math_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_lib.h"
+#include "lj_vm.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_math
+
+LJLIB_ASM(math_abs) LJLIB_REC(.)
+{
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR)
+LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL)
+
+LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT)
+{
+ lj_lib_checknum(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_log10) LJLIB_REC(math_unary IRFPM_LOG10)
+LJLIB_ASM_(math_exp) LJLIB_REC(math_unary IRFPM_EXP)
+LJLIB_ASM_(math_sin) LJLIB_REC(math_unary IRFPM_SIN)
+LJLIB_ASM_(math_cos) LJLIB_REC(math_unary IRFPM_COS)
+LJLIB_ASM_(math_tan) LJLIB_REC(math_unary IRFPM_TAN)
+LJLIB_ASM_(math_asin) LJLIB_REC(math_atrig FF_math_asin)
+LJLIB_ASM_(math_acos) LJLIB_REC(math_atrig FF_math_acos)
+LJLIB_ASM_(math_atan) LJLIB_REC(math_atrig FF_math_atan)
+LJLIB_ASM_(math_sinh) LJLIB_REC(math_htrig IRCALL_sinh)
+LJLIB_ASM_(math_cosh) LJLIB_REC(math_htrig IRCALL_cosh)
+LJLIB_ASM_(math_tanh) LJLIB_REC(math_htrig IRCALL_tanh)
+LJLIB_ASM_(math_frexp)
+LJLIB_ASM_(math_modf) LJLIB_REC(.)
+
+LJLIB_ASM(math_log) LJLIB_REC(math_log)
+{
+ double x = lj_lib_checknum(L, 1);
+ if (L->base+1 < L->top) {
+ double y = lj_lib_checknum(L, 2);
+#ifdef LUAJIT_NO_LOG2
+ x = log(x); y = 1.0 / log(y);
+#else
+ x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y);
+#endif
+ setnumV(L->base-1, x*y); /* Do NOT join the expression to x / y. */
+ return FFH_RES(1);
+ }
+ return FFH_RETRY;
+}
+
+LJLIB_PUSH(57.29577951308232)
+LJLIB_ASM_(math_deg) LJLIB_REC(math_degrad)
+
+LJLIB_PUSH(0.017453292519943295)
+LJLIB_ASM_(math_rad) LJLIB_REC(math_degrad)
+
+LJLIB_ASM(math_atan2) LJLIB_REC(.)
+{
+ lj_lib_checknum(L, 1);
+ lj_lib_checknum(L, 2);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_pow) LJLIB_REC(.)
+LJLIB_ASM_(math_fmod)
+
+LJLIB_ASM(math_ldexp) LJLIB_REC(.)
+{
+ lj_lib_checknum(L, 1);
+#if LJ_DUALNUM && !LJ_TARGET_X86ORX64
+ lj_lib_checkint(L, 2);
+#else
+ lj_lib_checknum(L, 2);
+#endif
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN)
+{
+ int i = 0;
+ do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX)
+
+LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi)
+LJLIB_PUSH(1e310) LJLIB_SET(huge)
+
+/* ------------------------------------------------------------------------ */
+
+/* This implements a Tausworthe PRNG with period 2^223. Based on:
+** Tables of maximally-equidistributed combined LFSR generators,
+** Pierre L'Ecuyer, 1991, table 3, 1st entry.
+** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
+*/
+
+/* PRNG state. */
+struct RandomState {
+ uint64_t gen[4]; /* State of the 4 LFSR generators. */
+ int valid; /* State is valid. */
+};
+
+/* Union needed for bit-pattern conversion between uint64_t and double. */
+typedef union { uint64_t u64; double d; } U64double;
+
+/* Update generator i and compute a running xor of all states. */
+#define TW223_GEN(i, k, q, s) \
+ z = rs->gen[i]; \
+ z = (((z<<q)^z) >> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<<s); \
+ r ^= z; rs->gen[i] = z;
+
+/* PRNG step function. Returns a double in the range 1.0 <= d < 2.0. */
+LJ_NOINLINE uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs)
+{
+ uint64_t z, r = 0;
+ TW223_GEN(0, 63, 31, 18)
+ TW223_GEN(1, 58, 19, 28)
+ TW223_GEN(2, 55, 24, 7)
+ TW223_GEN(3, 47, 21, 8)
+ return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000);
+}
+
+/* PRNG initialization function. */
+static void random_init(RandomState *rs, double d)
+{
+ uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */
+ int i;
+ for (i = 0; i < 4; i++) {
+ U64double u;
+ uint32_t m = 1u << (r&255);
+ r >>= 8;
+ u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354;
+ if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of gen[i] are non-zero. */
+ rs->gen[i] = u.u64;
+ }
+ rs->valid = 1;
+ for (i = 0; i < 10; i++)
+ lj_math_random_step(rs);
+}
+
+/* PRNG extract function. */
+LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */
+LJLIB_CF(math_random) LJLIB_REC(.)
+{
+ int n = (int)(L->top - L->base);
+ RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
+ U64double u;
+ double d;
+ if (LJ_UNLIKELY(!rs->valid)) random_init(rs, 0.0);
+ u.u64 = lj_math_random_step(rs);
+ d = u.d - 1.0;
+ if (n > 0) {
+#if LJ_DUALNUM
+ int isint = 1;
+ double r1;
+ lj_lib_checknumber(L, 1);
+ if (tvisint(L->base)) {
+ r1 = (lua_Number)intV(L->base);
+ } else {
+ isint = 0;
+ r1 = numV(L->base);
+ }
+#else
+ double r1 = lj_lib_checknum(L, 1);
+#endif
+ if (n == 1) {
+ d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */
+ } else {
+#if LJ_DUALNUM
+ double r2;
+ lj_lib_checknumber(L, 2);
+ if (tvisint(L->base+1)) {
+ r2 = (lua_Number)intV(L->base+1);
+ } else {
+ isint = 0;
+ r2 = numV(L->base+1);
+ }
+#else
+ double r2 = lj_lib_checknum(L, 2);
+#endif
+ d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */
+ }
+#if LJ_DUALNUM
+ if (isint) {
+ setintV(L->top-1, lj_num2int(d));
+ return 1;
+ }
+#endif
+ } /* else: d is a double in range [0, 1] */
+ setnumV(L->top++, d);
+ return 1;
+}
+
+/* PRNG seed function. */
+LJLIB_PUSH(top-2) /* Upvalue holds userdata with RandomState. */
+LJLIB_CF(math_randomseed)
+{
+ RandomState *rs = (RandomState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
+ random_init(rs, lj_lib_checknum(L, 1));
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_math(lua_State *L)
+{
+ RandomState *rs;
+ rs = (RandomState *)lua_newuserdata(L, sizeof(RandomState));
+ rs->valid = 0; /* Use lazy initialization to save some time on startup. */
+ LJ_LIB_REG(L, LUA_MATHLIBNAME, math);
+#if defined(LUA_COMPAT_MOD) && !LJ_52
+ lua_getfield(L, -1, "fmod");
+ lua_setfield(L, -2, "mod");
+#endif
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lib_os.c b/3rdparty/lua/src/lib_os.c
index bb5a141..0a78412 100644
--- a/3rdparty/lua/src/lib_os.c
+++ b/3rdparty/lua/src/lib_os.c
@@ -1,12 +1,13 @@
/*
** OS library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
**
** Major portions taken verbatim or adapted from the Lua interpreter.
** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
*/
#include <errno.h>
+#include <locale.h>
#include <time.h>
#define lib_os_c
@@ -26,10 +27,6 @@
#include <stdio.h>
#endif
-#if !LJ_TARGET_PSVITA
-#include <locale.h>
-#endif
-
/* ------------------------------------------------------------------------ */
#define LJLIB_MODULE_os
@@ -73,7 +70,7 @@ LJLIB_CF(os_rename)
LJLIB_CF(os_tmpname)
{
-#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PSVITA
+#if LJ_TARGET_PS3
lj_err_caller(L, LJ_ERR_OSUNIQF);
return 0;
#else
@@ -257,9 +254,6 @@ LJLIB_CF(os_difftime)
LJLIB_CF(os_setlocale)
{
-#if LJ_TARGET_PSVITA
- lua_pushliteral(L, "C");
-#else
GCstr *s = lj_lib_optstr(L, 1);
const char *str = s ? strdata(s) : NULL;
int opt = lj_lib_checkopt(L, 2, 6,
@@ -271,7 +265,6 @@ LJLIB_CF(os_setlocale)
else if (opt == 4) opt = LC_MONETARY;
else if (opt == 6) opt = LC_ALL;
lua_pushstring(L, setlocale(opt, str));
-#endif
return 1;
}
diff --git a/3rdparty/lua/src/lib_package.c b/3rdparty/lua/src/lib_package.c
index 8d9f030..f0e672d 100644
--- a/3rdparty/lua/src/lib_package.c
+++ b/3rdparty/lua/src/lib_package.c
@@ -1,602 +1,605 @@
-/*
-** Package library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2012 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lib_package_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_err.h"
-#include "lj_lib.h"
-
-/* ------------------------------------------------------------------------ */
-
-/* Error codes for ll_loadfunc. */
-#define PACKAGE_ERR_LIB 1
-#define PACKAGE_ERR_FUNC 2
-#define PACKAGE_ERR_LOAD 3
-
-/* Redefined in platform specific part. */
-#define PACKAGE_LIB_FAIL "open"
-#define setprogdir(L) ((void)0)
-
-/* Symbol name prefixes. */
-#define SYMPREFIX_CF "luaopen_%s"
-#define SYMPREFIX_BC "luaJIT_BC_%s"
-
-#if LJ_TARGET_DLOPEN
-
-#include <dlfcn.h>
-
-static void ll_unloadlib(void *lib)
-{
- dlclose(lib);
-}
-
-static void *ll_load(lua_State *L, const char *path, int gl)
-{
- void *lib = dlopen(path, RTLD_NOW | (gl ? RTLD_GLOBAL : RTLD_LOCAL));
- if (lib == NULL) lua_pushstring(L, dlerror());
- return lib;
-}
-
-static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
-{
- lua_CFunction f = (lua_CFunction)dlsym(lib, sym);
- if (f == NULL) lua_pushstring(L, dlerror());
- return f;
-}
-
-static const char *ll_bcsym(void *lib, const char *sym)
-{
-#if defined(RTLD_DEFAULT)
- if (lib == NULL) lib = RTLD_DEFAULT;
-#elif LJ_TARGET_OSX || LJ_TARGET_BSD
- if (lib == NULL) lib = (void *)(intptr_t)-2;
-#endif
- return (const char *)dlsym(lib, sym);
-}
-
-#elif LJ_TARGET_WINDOWS
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
-#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
-#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
-BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
-#endif
-
-#undef setprogdir
-
-static void setprogdir(lua_State *L)
-{
- char buff[MAX_PATH + 1];
- char *lb;
- DWORD nsize = sizeof(buff);
- DWORD n = GetModuleFileNameA(NULL, buff, nsize);
- if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) {
- luaL_error(L, "unable to get ModuleFileName");
- } else {
- *lb = '\0';
- luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff);
- lua_remove(L, -2); /* remove original string */
- }
-}
-
-static void pusherror(lua_State *L)
-{
- DWORD error = GetLastError();
- char buffer[128];
- if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
- NULL, error, 0, buffer, sizeof(buffer), NULL))
- lua_pushstring(L, buffer);
- else
- lua_pushfstring(L, "system error %d\n", error);
-}
-
-static void ll_unloadlib(void *lib)
-{
- FreeLibrary((HINSTANCE)lib);
-}
-
-static void *ll_load(lua_State *L, const char *path, int gl)
-{
- HINSTANCE lib = LoadLibraryA(path);
- if (lib == NULL) pusherror(L);
- UNUSED(gl);
- return lib;
-}
-
-static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
-{
- lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym);
- if (f == NULL) pusherror(L);
- return f;
-}
-
-static const char *ll_bcsym(void *lib, const char *sym)
-{
- if (lib) {
- return (const char *)GetProcAddress((HINSTANCE)lib, sym);
- } else {
- HINSTANCE h = GetModuleHandleA(NULL);
- const char *p = (const char *)GetProcAddress(h, sym);
- if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
- (const char *)ll_bcsym, &h))
- p = (const char *)GetProcAddress(h, sym);
- return p;
- }
-}
-
-#else
-
-#undef PACKAGE_LIB_FAIL
-#define PACKAGE_LIB_FAIL "absent"
-
-#define DLMSG "dynamic libraries not enabled; no support for target OS"
-
-static void ll_unloadlib(void *lib)
-{
- UNUSED(lib);
-}
-
-static void *ll_load(lua_State *L, const char *path, int gl)
-{
- UNUSED(path); UNUSED(gl);
- lua_pushliteral(L, DLMSG);
- return NULL;
-}
-
-static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
-{
- UNUSED(lib); UNUSED(sym);
- lua_pushliteral(L, DLMSG);
- return NULL;
-}
-
-static const char *ll_bcsym(void *lib, const char *sym)
-{
- UNUSED(lib); UNUSED(sym);
- return NULL;
-}
-
-#endif
-
-/* ------------------------------------------------------------------------ */
-
-static void **ll_register(lua_State *L, const char *path)
-{
- void **plib;
- lua_pushfstring(L, "LOADLIB: %s", path);
- lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */
- if (!lua_isnil(L, -1)) { /* is there an entry? */
- plib = (void **)lua_touserdata(L, -1);
- } else { /* no entry yet; create one */
- lua_pop(L, 1);
- plib = (void **)lua_newuserdata(L, sizeof(void *));
- *plib = NULL;
- luaL_getmetatable(L, "_LOADLIB");
- lua_setmetatable(L, -2);
- lua_pushfstring(L, "LOADLIB: %s", path);
- lua_pushvalue(L, -2);
- lua_settable(L, LUA_REGISTRYINDEX);
- }
- return plib;
-}
-
-static const char *mksymname(lua_State *L, const char *modname,
- const char *prefix)
-{
- const char *funcname;
- const char *mark = strchr(modname, *LUA_IGMARK);
- if (mark) modname = mark + 1;
- funcname = luaL_gsub(L, modname, ".", "_");
- funcname = lua_pushfstring(L, prefix, funcname);
- lua_remove(L, -2); /* remove 'gsub' result */
- return funcname;
-}
-
-static int ll_loadfunc(lua_State *L, const char *path, const char *name, int r)
-{
- void **reg = ll_register(L, path);
- if (*reg == NULL) *reg = ll_load(L, path, (*name == '*'));
- if (*reg == NULL) {
- return PACKAGE_ERR_LIB; /* Unable to load library. */
- } else if (*name == '*') { /* Only load library into global namespace. */
- lua_pushboolean(L, 1);
- return 0;
- } else {
- const char *sym = r ? name : mksymname(L, name, SYMPREFIX_CF);
- lua_CFunction f = ll_sym(L, *reg, sym);
- if (f) {
- lua_pushcfunction(L, f);
- return 0;
- }
- if (!r) {
- const char *bcdata = ll_bcsym(*reg, mksymname(L, name, SYMPREFIX_BC));
- lua_pop(L, 1);
- if (bcdata) {
- if (luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
- return PACKAGE_ERR_LOAD;
- return 0;
- }
- }
- return PACKAGE_ERR_FUNC; /* Unable to find function. */
- }
-}
-
-static int lj_cf_package_loadlib(lua_State *L)
-{
- const char *path = luaL_checkstring(L, 1);
- const char *init = luaL_checkstring(L, 2);
- int st = ll_loadfunc(L, path, init, 1);
- if (st == 0) { /* no errors? */
- return 1; /* return the loaded function */
- } else { /* error; error message is on stack top */
- lua_pushnil(L);
- lua_insert(L, -2);
- lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init");
- return 3; /* return nil, error message, and where */
- }
-}
-
-static int lj_cf_package_unloadlib(lua_State *L)
-{
- void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB");
- if (*lib) ll_unloadlib(*lib);
- *lib = NULL; /* mark library as closed */
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-static int readable(const char *filename)
-{
- FILE *f = fopen(filename, "r"); /* try to open file */
- if (f == NULL) return 0; /* open failed */
- fclose(f);
- return 1;
-}
-
-static const char *pushnexttemplate(lua_State *L, const char *path)
-{
- const char *l;
- while (*path == *LUA_PATHSEP) path++; /* skip separators */
- if (*path == '\0') return NULL; /* no more templates */
- l = strchr(path, *LUA_PATHSEP); /* find next separator */
- if (l == NULL) l = path + strlen(path);
- lua_pushlstring(L, path, (size_t)(l - path)); /* template */
- return l;
-}
-
-static const char *searchpath (lua_State *L, const char *name,
- const char *path, const char *sep,
- const char *dirsep)
-{
- luaL_Buffer msg; /* to build error message */
- luaL_buffinit(L, &msg);
- if (*sep != '\0') /* non-empty separator? */
- name = luaL_gsub(L, name, sep, dirsep); /* replace it by 'dirsep' */
- while ((path = pushnexttemplate(L, path)) != NULL) {
- const char *filename = luaL_gsub(L, lua_tostring(L, -1),
- LUA_PATH_MARK, name);
- lua_remove(L, -2); /* remove path template */
- if (readable(filename)) /* does file exist and is readable? */
- return filename; /* return that file name */
- lua_pushfstring(L, "\n\tno file " LUA_QS, filename);
- lua_remove(L, -2); /* remove file name */
- luaL_addvalue(&msg); /* concatenate error msg. entry */
- }
- luaL_pushresult(&msg); /* create error message */
- return NULL; /* not found */
-}
-
-static int lj_cf_package_searchpath(lua_State *L)
-{
- const char *f = searchpath(L, luaL_checkstring(L, 1),
- luaL_checkstring(L, 2),
- luaL_optstring(L, 3, "."),
- luaL_optstring(L, 4, LUA_DIRSEP));
- if (f != NULL) {
- return 1;
- } else { /* error message is on top of the stack */
- lua_pushnil(L);
- lua_insert(L, -2);
- return 2; /* return nil + error message */
- }
-}
-
-static const char *findfile(lua_State *L, const char *name,
- const char *pname)
-{
- const char *path;
- lua_getfield(L, LUA_ENVIRONINDEX, pname);
- path = lua_tostring(L, -1);
- if (path == NULL)
- luaL_error(L, LUA_QL("package.%s") " must be a string", pname);
- return searchpath(L, name, path, ".", LUA_DIRSEP);
-}
-
-static void loaderror(lua_State *L, const char *filename)
-{
- luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s",
- lua_tostring(L, 1), filename, lua_tostring(L, -1));
-}
-
-static int lj_cf_package_loader_lua(lua_State *L)
-{
- const char *filename;
- const char *name = luaL_checkstring(L, 1);
- filename = findfile(L, name, "path");
- if (filename == NULL) return 1; /* library not found in this path */
- if (luaL_loadfile(L, filename) != 0)
- loaderror(L, filename);
- return 1; /* library loaded successfully */
-}
-
-static int lj_cf_package_loader_c(lua_State *L)
-{
- const char *name = luaL_checkstring(L, 1);
- const char *filename = findfile(L, name, "cpath");
- if (filename == NULL) return 1; /* library not found in this path */
- if (ll_loadfunc(L, filename, name, 0) != 0)
- loaderror(L, filename);
- return 1; /* library loaded successfully */
-}
-
-static int lj_cf_package_loader_croot(lua_State *L)
-{
- const char *filename;
- const char *name = luaL_checkstring(L, 1);
- const char *p = strchr(name, '.');
- int st;
- if (p == NULL) return 0; /* is root */
- lua_pushlstring(L, name, (size_t)(p - name));
- filename = findfile(L, lua_tostring(L, -1), "cpath");
- if (filename == NULL) return 1; /* root not found */
- if ((st = ll_loadfunc(L, filename, name, 0)) != 0) {
- if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */
- lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS,
- name, filename);
- return 1; /* function not found */
- }
- return 1;
-}
-
-static int lj_cf_package_loader_preload(lua_State *L)
-{
- const char *name = luaL_checkstring(L, 1);
- lua_getfield(L, LUA_ENVIRONINDEX, "preload");
- if (!lua_istable(L, -1))
- luaL_error(L, LUA_QL("package.preload") " must be a table");
- lua_getfield(L, -1, name);
- if (lua_isnil(L, -1)) { /* Not found? */
- const char *bcname = mksymname(L, name, SYMPREFIX_BC);
- const char *bcdata = ll_bcsym(NULL, bcname);
- if (bcdata == NULL || luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
- lua_pushfstring(L, "\n\tno field package.preload['%s']", name);
- }
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-static const int sentinel_ = 0;
-#define sentinel ((void *)&sentinel_)
-
-static int lj_cf_package_require(lua_State *L)
-{
- const char *name = luaL_checkstring(L, 1);
- int i;
- lua_settop(L, 1); /* _LOADED table will be at index 2 */
- lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
- lua_getfield(L, 2, name);
- if (lua_toboolean(L, -1)) { /* is it there? */
- if (lua_touserdata(L, -1) == sentinel) /* check loops */
- luaL_error(L, "loop or previous error loading module " LUA_QS, name);
- return 1; /* package is already loaded */
- }
- /* else must load it; iterate over available loaders */
- lua_getfield(L, LUA_ENVIRONINDEX, "loaders");
- if (!lua_istable(L, -1))
- luaL_error(L, LUA_QL("package.loaders") " must be a table");
- lua_pushliteral(L, ""); /* error message accumulator */
- for (i = 1; ; i++) {
- lua_rawgeti(L, -2, i); /* get a loader */
- if (lua_isnil(L, -1))
- luaL_error(L, "module " LUA_QS " not found:%s",
- name, lua_tostring(L, -2));
- lua_pushstring(L, name);
- lua_call(L, 1, 1); /* call it */
- if (lua_isfunction(L, -1)) /* did it find module? */
- break; /* module loaded successfully */
- else if (lua_isstring(L, -1)) /* loader returned error message? */
- lua_concat(L, 2); /* accumulate it */
- else
- lua_pop(L, 1);
- }
- lua_pushlightuserdata(L, sentinel);
- lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */
- lua_pushstring(L, name); /* pass name as argument to module */
- lua_call(L, 1, 1); /* run loaded module */
- if (!lua_isnil(L, -1)) /* non-nil return? */
- lua_setfield(L, 2, name); /* _LOADED[name] = returned value */
- lua_getfield(L, 2, name);
- if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */
- lua_pushboolean(L, 1); /* use true as result */
- lua_pushvalue(L, -1); /* extra copy to be returned */
- lua_setfield(L, 2, name); /* _LOADED[name] = true */
- }
- lj_lib_checkfpu(L);
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-static void setfenv(lua_State *L)
-{
- lua_Debug ar;
- if (lua_getstack(L, 1, &ar) == 0 ||
- lua_getinfo(L, "f", &ar) == 0 || /* get calling function */
- lua_iscfunction(L, -1))
- luaL_error(L, LUA_QL("module") " not called from a Lua function");
- lua_pushvalue(L, -2);
- lua_setfenv(L, -2);
- lua_pop(L, 1);
-}
-
-static void dooptions(lua_State *L, int n)
-{
- int i;
- for (i = 2; i <= n; i++) {
- lua_pushvalue(L, i); /* get option (a function) */
- lua_pushvalue(L, -2); /* module */
- lua_call(L, 1, 0);
- }
-}
-
-static void modinit(lua_State *L, const char *modname)
-{
- const char *dot;
- lua_pushvalue(L, -1);
- lua_setfield(L, -2, "_M"); /* module._M = module */
- lua_pushstring(L, modname);
- lua_setfield(L, -2, "_NAME");
- dot = strrchr(modname, '.'); /* look for last dot in module name */
- if (dot == NULL) dot = modname; else dot++;
- /* set _PACKAGE as package name (full module name minus last part) */
- lua_pushlstring(L, modname, (size_t)(dot - modname));
- lua_setfield(L, -2, "_PACKAGE");
-}
-
-static int lj_cf_package_module(lua_State *L)
-{
- const char *modname = luaL_checkstring(L, 1);
- int loaded = lua_gettop(L) + 1; /* index of _LOADED table */
- lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
- lua_getfield(L, loaded, modname); /* get _LOADED[modname] */
- if (!lua_istable(L, -1)) { /* not found? */
- lua_pop(L, 1); /* remove previous result */
- /* try global variable (and create one if it does not exist) */
- if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL)
- lj_err_callerv(L, LJ_ERR_BADMODN, modname);
- lua_pushvalue(L, -1);
- lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */
- }
- /* check whether table already has a _NAME field */
- lua_getfield(L, -1, "_NAME");
- if (!lua_isnil(L, -1)) { /* is table an initialized module? */
- lua_pop(L, 1);
- } else { /* no; initialize it */
- lua_pop(L, 1);
- modinit(L, modname);
- }
- lua_pushvalue(L, -1);
- setfenv(L);
- dooptions(L, loaded - 1);
- return 0;
-}
-
-static int lj_cf_package_seeall(lua_State *L)
-{
- luaL_checktype(L, 1, LUA_TTABLE);
- if (!lua_getmetatable(L, 1)) {
- lua_createtable(L, 0, 1); /* create new metatable */
- lua_pushvalue(L, -1);
- lua_setmetatable(L, 1);
- }
- lua_pushvalue(L, LUA_GLOBALSINDEX);
- lua_setfield(L, -2, "__index"); /* mt.__index = _G */
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-#define AUXMARK "\1"
-
-static void setpath(lua_State *L, const char *fieldname, const char *envname,
- const char *def, int noenv)
-{
-#if LJ_TARGET_CONSOLE
- const char *path = NULL;
- UNUSED(envname);
-#else
- const char *path = getenv(envname);
-#endif
- if (path == NULL || noenv) {
- lua_pushstring(L, def);
- } else {
- path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP,
- LUA_PATHSEP AUXMARK LUA_PATHSEP);
- luaL_gsub(L, path, AUXMARK, def);
- lua_remove(L, -2);
- }
- setprogdir(L);
- lua_setfield(L, -2, fieldname);
-}
-
-static const luaL_Reg package_lib[] = {
- { "loadlib", lj_cf_package_loadlib },
- { "searchpath", lj_cf_package_searchpath },
- { "seeall", lj_cf_package_seeall },
- { NULL, NULL }
-};
-
-static const luaL_Reg package_global[] = {
- { "module", lj_cf_package_module },
- { "require", lj_cf_package_require },
- { NULL, NULL }
-};
-
-static const lua_CFunction package_loaders[] =
-{
- lj_cf_package_loader_preload,
- lj_cf_package_loader_lua,
- lj_cf_package_loader_c,
- lj_cf_package_loader_croot,
- NULL
-};
-
-LUALIB_API int luaopen_package(lua_State *L)
-{
- int i;
- int noenv;
- luaL_newmetatable(L, "_LOADLIB");
- lj_lib_pushcf(L, lj_cf_package_unloadlib, 1);
- lua_setfield(L, -2, "__gc");
- luaL_register(L, LUA_LOADLIBNAME, package_lib);
- lua_pushvalue(L, -1);
- lua_replace(L, LUA_ENVIRONINDEX);
- lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0);
- for (i = 0; package_loaders[i] != NULL; i++) {
- lj_lib_pushcf(L, package_loaders[i], 1);
- lua_rawseti(L, -2, i+1);
- }
- lua_setfield(L, -2, "loaders");
- lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
- noenv = lua_toboolean(L, -1);
- lua_pop(L, 1);
- setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT, noenv);
- setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT, noenv);
- lua_pushliteral(L, LUA_PATH_CONFIG);
- lua_setfield(L, -2, "config");
- luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
- lua_setfield(L, -2, "loaded");
- luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4);
- lua_setfield(L, -2, "preload");
- lua_pushvalue(L, LUA_GLOBALSINDEX);
- luaL_register(L, NULL, package_global);
- lua_pop(L, 1);
- return 1;
-}
-
+/*
+** Package library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2012 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_package_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Error codes for ll_loadfunc. */
+#define PACKAGE_ERR_LIB 1
+#define PACKAGE_ERR_FUNC 2
+#define PACKAGE_ERR_LOAD 3
+
+/* Redefined in platform specific part. */
+#define PACKAGE_LIB_FAIL "open"
+#define setprogdir(L) ((void)0)
+
+/* Symbol name prefixes. */
+#define SYMPREFIX_CF "luaopen_%s"
+#define SYMPREFIX_BC "luaJIT_BC_%s"
+
+#if LJ_TARGET_DLOPEN
+
+#include <dlfcn.h>
+
+static void ll_unloadlib(void *lib)
+{
+ dlclose(lib);
+}
+
+static void *ll_load(lua_State *L, const char *path, int gl)
+{
+ void *lib = dlopen(path, RTLD_NOW | (gl ? RTLD_GLOBAL : RTLD_LOCAL));
+ if (lib == NULL) lua_pushstring(L, dlerror());
+ return lib;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ lua_CFunction f = (lua_CFunction)dlsym(lib, sym);
+ if (f == NULL) lua_pushstring(L, dlerror());
+ return f;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+#if defined(RTLD_DEFAULT)
+ if (lib == NULL) lib = RTLD_DEFAULT;
+#elif LJ_TARGET_OSX || LJ_TARGET_BSD
+ if (lib == NULL) lib = (void *)(intptr_t)-2;
+#endif
+ return (const char *)dlsym(lib, sym);
+}
+
+#elif LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+#include <windows.h>
+
+#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
+#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
+BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
+#endif
+
+#undef setprogdir
+
+static void setprogdir(lua_State *L)
+{
+ char buff[MAX_PATH + 1];
+ char *lb;
+ DWORD nsize = sizeof(buff);
+ DWORD n = GetModuleFileNameA(NULL, buff, nsize);
+ if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) {
+ luaL_error(L, "unable to get ModuleFileName");
+ } else {
+ *lb = '\0';
+ luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff);
+ lua_remove(L, -2); /* remove original string */
+ }
+}
+
+static void pusherror(lua_State *L)
+{
+ DWORD error = GetLastError();
+ char buffer[128];
+ if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, error, 0, buffer, sizeof(buffer), NULL))
+ lua_pushstring(L, buffer);
+ else
+ lua_pushfstring(L, "system error %d\n", error);
+}
+
+static void ll_unloadlib(void *lib)
+{
+ FreeLibrary((HINSTANCE)lib);
+}
+
+static void *ll_load(lua_State *L, const char *path, int gl)
+{
+ HINSTANCE lib = LoadLibraryA(path);
+ if (lib == NULL) pusherror(L);
+ UNUSED(gl);
+ return lib;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym);
+ if (f == NULL) pusherror(L);
+ return f;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+ if (lib) {
+ return (const char *)GetProcAddress((HINSTANCE)lib, sym);
+ } else {
+ HINSTANCE h = GetModuleHandleA(NULL);
+ const char *p = (const char *)GetProcAddress(h, sym);
+ if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)ll_bcsym, &h))
+ p = (const char *)GetProcAddress(h, sym);
+ return p;
+ }
+}
+
+#else
+
+#undef PACKAGE_LIB_FAIL
+#define PACKAGE_LIB_FAIL "absent"
+
+#define DLMSG "dynamic libraries not enabled; no support for target OS"
+
+static void ll_unloadlib(void *lib)
+{
+ UNUSED(lib);
+}
+
+static void *ll_load(lua_State *L, const char *path, int gl)
+{
+ UNUSED(path); UNUSED(gl);
+ lua_pushliteral(L, DLMSG);
+ return NULL;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ UNUSED(lib); UNUSED(sym);
+ lua_pushliteral(L, DLMSG);
+ return NULL;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+ UNUSED(lib); UNUSED(sym);
+ return NULL;
+}
+
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static void **ll_register(lua_State *L, const char *path)
+{
+ void **plib;
+ lua_pushfstring(L, "LOADLIB: %s", path);
+ lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */
+ if (!lua_isnil(L, -1)) { /* is there an entry? */
+ plib = (void **)lua_touserdata(L, -1);
+ } else { /* no entry yet; create one */
+ lua_pop(L, 1);
+ plib = (void **)lua_newuserdata(L, sizeof(void *));
+ *plib = NULL;
+ luaL_getmetatable(L, "_LOADLIB");
+ lua_setmetatable(L, -2);
+ lua_pushfstring(L, "LOADLIB: %s", path);
+ lua_pushvalue(L, -2);
+ lua_settable(L, LUA_REGISTRYINDEX);
+ }
+ return plib;
+}
+
+static const char *mksymname(lua_State *L, const char *modname,
+ const char *prefix)
+{
+ const char *funcname;
+ const char *mark = strchr(modname, *LUA_IGMARK);
+ if (mark) modname = mark + 1;
+ funcname = luaL_gsub(L, modname, ".", "_");
+ funcname = lua_pushfstring(L, prefix, funcname);
+ lua_remove(L, -2); /* remove 'gsub' result */
+ return funcname;
+}
+
+static int ll_loadfunc(lua_State *L, const char *path, const char *name, int r)
+{
+ void **reg = ll_register(L, path);
+ if (*reg == NULL) *reg = ll_load(L, path, (*name == '*'));
+ if (*reg == NULL) {
+ return PACKAGE_ERR_LIB; /* Unable to load library. */
+ } else if (*name == '*') { /* Only load library into global namespace. */
+ lua_pushboolean(L, 1);
+ return 0;
+ } else {
+ const char *sym = r ? name : mksymname(L, name, SYMPREFIX_CF);
+ lua_CFunction f = ll_sym(L, *reg, sym);
+ if (f) {
+ lua_pushcfunction(L, f);
+ return 0;
+ }
+ if (!r) {
+ const char *bcdata = ll_bcsym(*reg, mksymname(L, name, SYMPREFIX_BC));
+ lua_pop(L, 1);
+ if (bcdata) {
+ if (luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
+ return PACKAGE_ERR_LOAD;
+ return 0;
+ }
+ }
+ return PACKAGE_ERR_FUNC; /* Unable to find function. */
+ }
+}
+
+static int lj_cf_package_loadlib(lua_State *L)
+{
+ const char *path = luaL_checkstring(L, 1);
+ const char *init = luaL_checkstring(L, 2);
+ int st = ll_loadfunc(L, path, init, 1);
+ if (st == 0) { /* no errors? */
+ return 1; /* return the loaded function */
+ } else { /* error; error message is on stack top */
+ lua_pushnil(L);
+ lua_insert(L, -2);
+ lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init");
+ return 3; /* return nil, error message, and where */
+ }
+}
+
+static int lj_cf_package_unloadlib(lua_State *L)
+{
+ void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB");
+ if (*lib) ll_unloadlib(*lib);
+ *lib = NULL; /* mark library as closed */
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int readable(const char *filename)
+{
+ FILE *f = fopen(filename, "r"); /* try to open file */
+ if (f == NULL) return 0; /* open failed */
+ fclose(f);
+ return 1;
+}
+
+static const char *pushnexttemplate(lua_State *L, const char *path)
+{
+ const char *l;
+ while (*path == *LUA_PATHSEP) path++; /* skip separators */
+ if (*path == '\0') return NULL; /* no more templates */
+ l = strchr(path, *LUA_PATHSEP); /* find next separator */
+ if (l == NULL) l = path + strlen(path);
+ lua_pushlstring(L, path, (size_t)(l - path)); /* template */
+ return l;
+}
+
+static const char *searchpath (lua_State *L, const char *name,
+ const char *path, const char *sep,
+ const char *dirsep)
+{
+ luaL_Buffer msg; /* to build error message */
+ luaL_buffinit(L, &msg);
+ if (*sep != '\0') /* non-empty separator? */
+ name = luaL_gsub(L, name, sep, dirsep); /* replace it by 'dirsep' */
+ while ((path = pushnexttemplate(L, path)) != NULL) {
+ const char *filename = luaL_gsub(L, lua_tostring(L, -1),
+ LUA_PATH_MARK, name);
+ lua_remove(L, -2); /* remove path template */
+ if (readable(filename)) /* does file exist and is readable? */
+ return filename; /* return that file name */
+ lua_pushfstring(L, "\n\tno file " LUA_QS, filename);
+ lua_remove(L, -2); /* remove file name */
+ luaL_addvalue(&msg); /* concatenate error msg. entry */
+ }
+ luaL_pushresult(&msg); /* create error message */
+ return NULL; /* not found */
+}
+
+static int lj_cf_package_searchpath(lua_State *L)
+{
+ const char *f = searchpath(L, luaL_checkstring(L, 1),
+ luaL_checkstring(L, 2),
+ luaL_optstring(L, 3, "."),
+ luaL_optstring(L, 4, LUA_DIRSEP));
+ if (f != NULL) {
+ return 1;
+ } else { /* error message is on top of the stack */
+ lua_pushnil(L);
+ lua_insert(L, -2);
+ return 2; /* return nil + error message */
+ }
+}
+
+static const char *findfile(lua_State *L, const char *name,
+ const char *pname)
+{
+ const char *path;
+ lua_getfield(L, LUA_ENVIRONINDEX, pname);
+ path = lua_tostring(L, -1);
+ if (path == NULL)
+ luaL_error(L, LUA_QL("package.%s") " must be a string", pname);
+ return searchpath(L, name, path, ".", LUA_DIRSEP);
+}
+
+static void loaderror(lua_State *L, const char *filename)
+{
+ luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s",
+ lua_tostring(L, 1), filename, lua_tostring(L, -1));
+}
+
+static int lj_cf_package_loader_lua(lua_State *L)
+{
+ const char *filename;
+ const char *name = luaL_checkstring(L, 1);
+ filename = findfile(L, name, "path");
+ if (filename == NULL) return 1; /* library not found in this path */
+ if (luaL_loadfile(L, filename) != 0)
+ loaderror(L, filename);
+ return 1; /* library loaded successfully */
+}
+
+static int lj_cf_package_loader_c(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ const char *filename = findfile(L, name, "cpath");
+ if (filename == NULL) return 1; /* library not found in this path */
+ if (ll_loadfunc(L, filename, name, 0) != 0)
+ loaderror(L, filename);
+ return 1; /* library loaded successfully */
+}
+
+static int lj_cf_package_loader_croot(lua_State *L)
+{
+ const char *filename;
+ const char *name = luaL_checkstring(L, 1);
+ const char *p = strchr(name, '.');
+ int st;
+ if (p == NULL) return 0; /* is root */
+ lua_pushlstring(L, name, (size_t)(p - name));
+ filename = findfile(L, lua_tostring(L, -1), "cpath");
+ if (filename == NULL) return 1; /* root not found */
+ if ((st = ll_loadfunc(L, filename, name, 0)) != 0) {
+ if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */
+ lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS,
+ name, filename);
+ return 1; /* function not found */
+ }
+ return 1;
+}
+
+static int lj_cf_package_loader_preload(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ lua_getfield(L, LUA_ENVIRONINDEX, "preload");
+ if (!lua_istable(L, -1))
+ luaL_error(L, LUA_QL("package.preload") " must be a table");
+ lua_getfield(L, -1, name);
+ if (lua_isnil(L, -1)) { /* Not found? */
+ const char *bcname = mksymname(L, name, SYMPREFIX_BC);
+ const char *bcdata = ll_bcsym(NULL, bcname);
+ if (bcdata == NULL || luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
+ lua_pushfstring(L, "\n\tno field package.preload['%s']", name);
+ }
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static const int sentinel_ = 0;
+#define sentinel ((void *)&sentinel_)
+
+static int lj_cf_package_require(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ int i;
+ lua_settop(L, 1); /* _LOADED table will be at index 2 */
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, 2, name);
+ if (lua_toboolean(L, -1)) { /* is it there? */
+ if (lua_touserdata(L, -1) == sentinel) /* check loops */
+ luaL_error(L, "loop or previous error loading module " LUA_QS, name);
+ return 1; /* package is already loaded */
+ }
+ /* else must load it; iterate over available loaders */
+ lua_getfield(L, LUA_ENVIRONINDEX, "loaders");
+ if (!lua_istable(L, -1))
+ luaL_error(L, LUA_QL("package.loaders") " must be a table");
+ lua_pushliteral(L, ""); /* error message accumulator */
+ for (i = 1; ; i++) {
+ lua_rawgeti(L, -2, i); /* get a loader */
+ if (lua_isnil(L, -1))
+ luaL_error(L, "module " LUA_QS " not found:%s",
+ name, lua_tostring(L, -2));
+ lua_pushstring(L, name);
+ lua_call(L, 1, 1); /* call it */
+ if (lua_isfunction(L, -1)) /* did it find module? */
+ break; /* module loaded successfully */
+ else if (lua_isstring(L, -1)) /* loader returned error message? */
+ lua_concat(L, 2); /* accumulate it */
+ else
+ lua_pop(L, 1);
+ }
+ lua_pushlightuserdata(L, sentinel);
+ lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */
+ lua_pushstring(L, name); /* pass name as argument to module */
+ lua_call(L, 1, 1); /* run loaded module */
+ if (!lua_isnil(L, -1)) /* non-nil return? */
+ lua_setfield(L, 2, name); /* _LOADED[name] = returned value */
+ lua_getfield(L, 2, name);
+ if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */
+ lua_pushboolean(L, 1); /* use true as result */
+ lua_pushvalue(L, -1); /* extra copy to be returned */
+ lua_setfield(L, 2, name); /* _LOADED[name] = true */
+ }
+ lj_lib_checkfpu(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void setfenv(lua_State *L)
+{
+ lua_Debug ar;
+ if (lua_getstack(L, 1, &ar) == 0 ||
+ lua_getinfo(L, "f", &ar) == 0 || /* get calling function */
+ lua_iscfunction(L, -1))
+ luaL_error(L, LUA_QL("module") " not called from a Lua function");
+ lua_pushvalue(L, -2);
+ lua_setfenv(L, -2);
+ lua_pop(L, 1);
+}
+
+static void dooptions(lua_State *L, int n)
+{
+ int i;
+ for (i = 2; i <= n; i++) {
+ lua_pushvalue(L, i); /* get option (a function) */
+ lua_pushvalue(L, -2); /* module */
+ lua_call(L, 1, 0);
+ }
+}
+
+static void modinit(lua_State *L, const char *modname)
+{
+ const char *dot;
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -2, "_M"); /* module._M = module */
+ lua_pushstring(L, modname);
+ lua_setfield(L, -2, "_NAME");
+ dot = strrchr(modname, '.'); /* look for last dot in module name */
+ if (dot == NULL) dot = modname; else dot++;
+ /* set _PACKAGE as package name (full module name minus last part) */
+ lua_pushlstring(L, modname, (size_t)(dot - modname));
+ lua_setfield(L, -2, "_PACKAGE");
+}
+
+static int lj_cf_package_module(lua_State *L)
+{
+ const char *modname = luaL_checkstring(L, 1);
+ int loaded = lua_gettop(L) + 1; /* index of _LOADED table */
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, loaded, modname); /* get _LOADED[modname] */
+ if (!lua_istable(L, -1)) { /* not found? */
+ lua_pop(L, 1); /* remove previous result */
+ /* try global variable (and create one if it does not exist) */
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, modname);
+ lua_pushvalue(L, -1);
+ lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */
+ }
+ /* check whether table already has a _NAME field */
+ lua_getfield(L, -1, "_NAME");
+ if (!lua_isnil(L, -1)) { /* is table an initialized module? */
+ lua_pop(L, 1);
+ } else { /* no; initialize it */
+ lua_pop(L, 1);
+ modinit(L, modname);
+ }
+ lua_pushvalue(L, -1);
+ setfenv(L);
+ dooptions(L, loaded - 1);
+ return 0;
+}
+
+static int lj_cf_package_seeall(lua_State *L)
+{
+ luaL_checktype(L, 1, LUA_TTABLE);
+ if (!lua_getmetatable(L, 1)) {
+ lua_createtable(L, 0, 1); /* create new metatable */
+ lua_pushvalue(L, -1);
+ lua_setmetatable(L, 1);
+ }
+ lua_pushvalue(L, LUA_GLOBALSINDEX);
+ lua_setfield(L, -2, "__index"); /* mt.__index = _G */
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define AUXMARK "\1"
+
+static void setpath(lua_State *L, const char *fieldname, const char *envname,
+ const char *def, int noenv)
+{
+#if LJ_TARGET_CONSOLE
+ const char *path = NULL;
+ UNUSED(envname);
+#else
+ const char *path = getenv(envname);
+#endif
+ if (path == NULL || noenv) {
+ lua_pushstring(L, def);
+ } else {
+ path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP,
+ LUA_PATHSEP AUXMARK LUA_PATHSEP);
+ luaL_gsub(L, path, AUXMARK, def);
+ lua_remove(L, -2);
+ }
+ setprogdir(L);
+ lua_setfield(L, -2, fieldname);
+}
+
+static const luaL_Reg package_lib[] = {
+ { "loadlib", lj_cf_package_loadlib },
+ { "searchpath", lj_cf_package_searchpath },
+ { "seeall", lj_cf_package_seeall },
+ { NULL, NULL }
+};
+
+static const luaL_Reg package_global[] = {
+ { "module", lj_cf_package_module },
+ { "require", lj_cf_package_require },
+ { NULL, NULL }
+};
+
+static const lua_CFunction package_loaders[] =
+{
+ lj_cf_package_loader_preload,
+ lj_cf_package_loader_lua,
+ lj_cf_package_loader_c,
+ lj_cf_package_loader_croot,
+ NULL
+};
+
+LUALIB_API int luaopen_package(lua_State *L)
+{
+ int i;
+ int noenv;
+ luaL_newmetatable(L, "_LOADLIB");
+ lj_lib_pushcf(L, lj_cf_package_unloadlib, 1);
+ lua_setfield(L, -2, "__gc");
+ luaL_register(L, LUA_LOADLIBNAME, package_lib);
+ lua_pushvalue(L, -1);
+ lua_replace(L, LUA_ENVIRONINDEX);
+ lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0);
+ for (i = 0; package_loaders[i] != NULL; i++) {
+ lj_lib_pushcf(L, package_loaders[i], 1);
+ lua_rawseti(L, -2, i+1);
+ }
+ lua_setfield(L, -2, "loaders");
+ lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
+ noenv = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT, noenv);
+ setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT, noenv);
+ lua_pushliteral(L, LUA_PATH_CONFIG);
+ lua_setfield(L, -2, "config");
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_setfield(L, -2, "loaded");
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4);
+ lua_setfield(L, -2, "preload");
+ lua_pushvalue(L, LUA_GLOBALSINDEX);
+ luaL_register(L, NULL, package_global);
+ lua_pop(L, 1);
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lib_string.c b/3rdparty/lua/src/lib_string.c
index 7dd401a..0f245b0 100644
--- a/3rdparty/lua/src/lib_string.c
+++ b/3rdparty/lua/src/lib_string.c
@@ -1,940 +1,940 @@
-/*
-** String library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#include <stdio.h>
-
-#define lib_string_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_state.h"
-#include "lj_ff.h"
-#include "lj_bcdump.h"
-#include "lj_char.h"
-#include "lj_lib.h"
-
-/* ------------------------------------------------------------------------ */
-
-#define LJLIB_MODULE_string
-
-LJLIB_ASM(string_len) LJLIB_REC(.)
-{
- lj_lib_checkstr(L, 1);
- return FFH_RETRY;
-}
-
-LJLIB_ASM(string_byte) LJLIB_REC(string_range 0)
-{
- GCstr *s = lj_lib_checkstr(L, 1);
- int32_t len = (int32_t)s->len;
- int32_t start = lj_lib_optint(L, 2, 1);
- int32_t stop = lj_lib_optint(L, 3, start);
- int32_t n, i;
- const unsigned char *p;
- if (stop < 0) stop += len+1;
- if (start < 0) start += len+1;
- if (start <= 0) start = 1;
- if (stop > len) stop = len;
- if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */
- start--;
- n = stop - start;
- if ((uint32_t)n > LUAI_MAXCSTACK)
- lj_err_caller(L, LJ_ERR_STRSLC);
- lj_state_checkstack(L, (MSize)n);
- p = (const unsigned char *)strdata(s) + start;
- for (i = 0; i < n; i++)
- setintV(L->base + i-1, p[i]);
- return FFH_RES(n);
-}
-
-LJLIB_ASM(string_char)
-{
- int i, nargs = (int)(L->top - L->base);
- char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, (MSize)nargs);
- for (i = 1; i <= nargs; i++) {
- int32_t k = lj_lib_checkint(L, i);
- if (!checku8(k))
- lj_err_arg(L, i, LJ_ERR_BADVAL);
- buf[i-1] = (char)k;
- }
- setstrV(L, L->base-1, lj_str_new(L, buf, (size_t)nargs));
- return FFH_RES(1);
-}
-
-LJLIB_ASM(string_sub) LJLIB_REC(string_range 1)
-{
- lj_lib_checkstr(L, 1);
- lj_lib_checkint(L, 2);
- setintV(L->base+2, lj_lib_optint(L, 3, -1));
- return FFH_RETRY;
-}
-
-LJLIB_ASM(string_rep)
-{
- GCstr *s = lj_lib_checkstr(L, 1);
- int32_t k = lj_lib_checkint(L, 2);
- GCstr *sep = lj_lib_optstr(L, 3);
- int32_t len = (int32_t)s->len;
- global_State *g = G(L);
- int64_t tlen;
- const char *src;
- char *buf;
- if (k <= 0) {
- empty:
- setstrV(L, L->base-1, &g->strempty);
- return FFH_RES(1);
- }
- if (sep) {
- tlen = (int64_t)len + sep->len;
- if (tlen > LJ_MAX_STR)
- lj_err_caller(L, LJ_ERR_STROV);
- tlen *= k;
- if (tlen > LJ_MAX_STR)
- lj_err_caller(L, LJ_ERR_STROV);
- } else {
- tlen = (int64_t)k * len;
- if (tlen > LJ_MAX_STR)
- lj_err_caller(L, LJ_ERR_STROV);
- }
- if (tlen == 0) goto empty;
- buf = lj_str_needbuf(L, &g->tmpbuf, (MSize)tlen);
- src = strdata(s);
- if (sep) {
- tlen -= sep->len; /* Ignore trailing separator. */
- if (k > 1) { /* Paste one string and one separator. */
- int32_t i;
- i = 0; while (i < len) *buf++ = src[i++];
- src = strdata(sep); len = sep->len;
- i = 0; while (i < len) *buf++ = src[i++];
- src = g->tmpbuf.buf; len += s->len; k--; /* Now copy that k-1 times. */
- }
- }
- do {
- int32_t i = 0;
- do { *buf++ = src[i++]; } while (i < len);
- } while (--k > 0);
- setstrV(L, L->base-1, lj_str_new(L, g->tmpbuf.buf, (size_t)tlen));
- return FFH_RES(1);
-}
-
-LJLIB_ASM(string_reverse)
-{
- GCstr *s = lj_lib_checkstr(L, 1);
- lj_str_needbuf(L, &G(L)->tmpbuf, s->len);
- return FFH_RETRY;
-}
-LJLIB_ASM_(string_lower)
-LJLIB_ASM_(string_upper)
-
-/* ------------------------------------------------------------------------ */
-
-static int writer_buf(lua_State *L, const void *p, size_t size, void *b)
-{
- luaL_addlstring((luaL_Buffer *)b, (const char *)p, size);
- UNUSED(L);
- return 0;
-}
-
-LJLIB_CF(string_dump)
-{
- GCfunc *fn = lj_lib_checkfunc(L, 1);
- int strip = L->base+1 < L->top && tvistruecond(L->base+1);
- luaL_Buffer b;
- L->top = L->base+1;
- luaL_buffinit(L, &b);
- if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, &b, strip))
- lj_err_caller(L, LJ_ERR_STRDUMP);
- luaL_pushresult(&b);
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* macro to `unsign' a character */
-#define uchar(c) ((unsigned char)(c))
-
-#define CAP_UNFINISHED (-1)
-#define CAP_POSITION (-2)
-
-typedef struct MatchState {
- const char *src_init; /* init of source string */
- const char *src_end; /* end (`\0') of source string */
- lua_State *L;
- int level; /* total number of captures (finished or unfinished) */
- int depth;
- struct {
- const char *init;
- ptrdiff_t len;
- } capture[LUA_MAXCAPTURES];
-} MatchState;
-
-#define L_ESC '%'
-#define SPECIALS "^$*+?.([%-"
-
-static int check_capture(MatchState *ms, int l)
-{
- l -= '1';
- if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED)
- lj_err_caller(ms->L, LJ_ERR_STRCAPI);
- return l;
-}
-
-static int capture_to_close(MatchState *ms)
-{
- int level = ms->level;
- for (level--; level>=0; level--)
- if (ms->capture[level].len == CAP_UNFINISHED) return level;
- lj_err_caller(ms->L, LJ_ERR_STRPATC);
- return 0; /* unreachable */
-}
-
-static const char *classend(MatchState *ms, const char *p)
-{
- switch (*p++) {
- case L_ESC:
- if (*p == '\0')
- lj_err_caller(ms->L, LJ_ERR_STRPATE);
- return p+1;
- case '[':
- if (*p == '^') p++;
- do { /* look for a `]' */
- if (*p == '\0')
- lj_err_caller(ms->L, LJ_ERR_STRPATM);
- if (*(p++) == L_ESC && *p != '\0')
- p++; /* skip escapes (e.g. `%]') */
- } while (*p != ']');
- return p+1;
- default:
- return p;
- }
-}
-
-static const unsigned char match_class_map[32] = {
- 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0,
- LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0,
- LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0
-};
-
-static int match_class(int c, int cl)
-{
- if ((cl & 0xc0) == 0x40) {
- int t = match_class_map[(cl&0x1f)];
- if (t) {
- t = lj_char_isa(c, t);
- return (cl & 0x20) ? t : !t;
- }
- if (cl == 'z') return c == 0;
- if (cl == 'Z') return c != 0;
- }
- return (cl == c);
-}
-
-static int matchbracketclass(int c, const char *p, const char *ec)
-{
- int sig = 1;
- if (*(p+1) == '^') {
- sig = 0;
- p++; /* skip the `^' */
- }
- while (++p < ec) {
- if (*p == L_ESC) {
- p++;
- if (match_class(c, uchar(*p)))
- return sig;
- }
- else if ((*(p+1) == '-') && (p+2 < ec)) {
- p+=2;
- if (uchar(*(p-2)) <= c && c <= uchar(*p))
- return sig;
- }
- else if (uchar(*p) == c) return sig;
- }
- return !sig;
-}
-
-static int singlematch(int c, const char *p, const char *ep)
-{
- switch (*p) {
- case '.': return 1; /* matches any char */
- case L_ESC: return match_class(c, uchar(*(p+1)));
- case '[': return matchbracketclass(c, p, ep-1);
- default: return (uchar(*p) == c);
- }
-}
-
-static const char *match(MatchState *ms, const char *s, const char *p);
-
-static const char *matchbalance(MatchState *ms, const char *s, const char *p)
-{
- if (*p == 0 || *(p+1) == 0)
- lj_err_caller(ms->L, LJ_ERR_STRPATU);
- if (*s != *p) {
- return NULL;
- } else {
- int b = *p;
- int e = *(p+1);
- int cont = 1;
- while (++s < ms->src_end) {
- if (*s == e) {
- if (--cont == 0) return s+1;
- } else if (*s == b) {
- cont++;
- }
- }
- }
- return NULL; /* string ends out of balance */
-}
-
-static const char *max_expand(MatchState *ms, const char *s,
- const char *p, const char *ep)
-{
- ptrdiff_t i = 0; /* counts maximum expand for item */
- while ((s+i)<ms->src_end && singlematch(uchar(*(s+i)), p, ep))
- i++;
- /* keeps trying to match with the maximum repetitions */
- while (i>=0) {
- const char *res = match(ms, (s+i), ep+1);
- if (res) return res;
- i--; /* else didn't match; reduce 1 repetition to try again */
- }
- return NULL;
-}
-
-static const char *min_expand(MatchState *ms, const char *s,
- const char *p, const char *ep)
-{
- for (;;) {
- const char *res = match(ms, s, ep+1);
- if (res != NULL)
- return res;
- else if (s<ms->src_end && singlematch(uchar(*s), p, ep))
- s++; /* try with one more repetition */
- else
- return NULL;
- }
-}
-
-static const char *start_capture(MatchState *ms, const char *s,
- const char *p, int what)
-{
- const char *res;
- int level = ms->level;
- if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN);
- ms->capture[level].init = s;
- ms->capture[level].len = what;
- ms->level = level+1;
- if ((res=match(ms, s, p)) == NULL) /* match failed? */
- ms->level--; /* undo capture */
- return res;
-}
-
-static const char *end_capture(MatchState *ms, const char *s,
- const char *p)
-{
- int l = capture_to_close(ms);
- const char *res;
- ms->capture[l].len = s - ms->capture[l].init; /* close capture */
- if ((res = match(ms, s, p)) == NULL) /* match failed? */
- ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
- return res;
-}
-
-static const char *match_capture(MatchState *ms, const char *s, int l)
-{
- size_t len;
- l = check_capture(ms, l);
- len = (size_t)ms->capture[l].len;
- if ((size_t)(ms->src_end-s) >= len &&
- memcmp(ms->capture[l].init, s, len) == 0)
- return s+len;
- else
- return NULL;
-}
-
-static const char *match(MatchState *ms, const char *s, const char *p)
-{
- if (++ms->depth > LJ_MAX_XLEVEL)
- lj_err_caller(ms->L, LJ_ERR_STRPATX);
- init: /* using goto's to optimize tail recursion */
- switch (*p) {
- case '(': /* start capture */
- if (*(p+1) == ')') /* position capture? */
- s = start_capture(ms, s, p+2, CAP_POSITION);
- else
- s = start_capture(ms, s, p+1, CAP_UNFINISHED);
- break;
- case ')': /* end capture */
- s = end_capture(ms, s, p+1);
- break;
- case L_ESC:
- switch (*(p+1)) {
- case 'b': /* balanced string? */
- s = matchbalance(ms, s, p+2);
- if (s == NULL) break;
- p+=4;
- goto init; /* else s = match(ms, s, p+4); */
- case 'f': { /* frontier? */
- const char *ep; char previous;
- p += 2;
- if (*p != '[')
- lj_err_caller(ms->L, LJ_ERR_STRPATB);
- ep = classend(ms, p); /* points to what is next */
- previous = (s == ms->src_init) ? '\0' : *(s-1);
- if (matchbracketclass(uchar(previous), p, ep-1) ||
- !matchbracketclass(uchar(*s), p, ep-1)) { s = NULL; break; }
- p=ep;
- goto init; /* else s = match(ms, s, ep); */
- }
- default:
- if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */
- s = match_capture(ms, s, uchar(*(p+1)));
- if (s == NULL) break;
- p+=2;
- goto init; /* else s = match(ms, s, p+2) */
- }
- goto dflt; /* case default */
- }
- break;
- case '\0': /* end of pattern */
- break; /* match succeeded */
- case '$':
- /* is the `$' the last char in pattern? */
- if (*(p+1) != '\0') goto dflt;
- if (s != ms->src_end) s = NULL; /* check end of string */
- break;
- default: dflt: { /* it is a pattern item */
- const char *ep = classend(ms, p); /* points to what is next */
- int m = s<ms->src_end && singlematch(uchar(*s), p, ep);
- switch (*ep) {
- case '?': { /* optional */
- const char *res;
- if (m && ((res=match(ms, s+1, ep+1)) != NULL)) {
- s = res;
- break;
- }
- p=ep+1;
- goto init; /* else s = match(ms, s, ep+1); */
- }
- case '*': /* 0 or more repetitions */
- s = max_expand(ms, s, p, ep);
- break;
- case '+': /* 1 or more repetitions */
- s = (m ? max_expand(ms, s+1, p, ep) : NULL);
- break;
- case '-': /* 0 or more repetitions (minimum) */
- s = min_expand(ms, s, p, ep);
- break;
- default:
- if (m) { s++; p=ep; goto init; } /* else s = match(ms, s+1, ep); */
- s = NULL;
- break;
- }
- break;
- }
- }
- ms->depth--;
- return s;
-}
-
-static const char *lmemfind(const char *s1, size_t l1,
- const char *s2, size_t l2)
-{
- if (l2 == 0) {
- return s1; /* empty strings are everywhere */
- } else if (l2 > l1) {
- return NULL; /* avoids a negative `l1' */
- } else {
- const char *init; /* to search for a `*s2' inside `s1' */
- l2--; /* 1st char will be checked by `memchr' */
- l1 = l1-l2; /* `s2' cannot be found after that */
- while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
- init++; /* 1st char is already checked */
- if (memcmp(init, s2+1, l2) == 0) {
- return init-1;
- } else { /* correct `l1' and `s1' to try again */
- l1 -= (size_t)(init-s1);
- s1 = init;
- }
- }
- return NULL; /* not found */
- }
-}
-
-static void push_onecapture(MatchState *ms, int i, const char *s, const char *e)
-{
- if (i >= ms->level) {
- if (i == 0) /* ms->level == 0, too */
- lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */
- else
- lj_err_caller(ms->L, LJ_ERR_STRCAPI);
- } else {
- ptrdiff_t l = ms->capture[i].len;
- if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU);
- if (l == CAP_POSITION)
- lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
- else
- lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l);
- }
-}
-
-static int push_captures(MatchState *ms, const char *s, const char *e)
-{
- int i;
- int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
- luaL_checkstack(ms->L, nlevels, "too many captures");
- for (i = 0; i < nlevels; i++)
- push_onecapture(ms, i, s, e);
- return nlevels; /* number of strings pushed */
-}
-
-static ptrdiff_t posrelat(ptrdiff_t pos, size_t len)
-{
- /* relative string position: negative means back from end */
- if (pos < 0) pos += (ptrdiff_t)len + 1;
- return (pos >= 0) ? pos : 0;
-}
-
-static int str_find_aux(lua_State *L, int find)
-{
- size_t l1, l2;
- const char *s = luaL_checklstring(L, 1, &l1);
- const char *p = luaL_checklstring(L, 2, &l2);
- ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
- if (init < 0) {
- init = 0;
- } else if ((size_t)(init) > l1) {
-#if LJ_52
- setnilV(L->top-1);
- return 1;
-#else
- init = (ptrdiff_t)l1;
-#endif
- }
- if (find && (lua_toboolean(L, 4) || /* explicit request? */
- strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
- /* do a plain search */
- const char *s2 = lmemfind(s+init, l1-(size_t)init, p, l2);
- if (s2) {
- lua_pushinteger(L, s2-s+1);
- lua_pushinteger(L, s2-s+(ptrdiff_t)l2);
- return 2;
- }
- } else {
- MatchState ms;
- int anchor = (*p == '^') ? (p++, 1) : 0;
- const char *s1=s+init;
- ms.L = L;
- ms.src_init = s;
- ms.src_end = s+l1;
- do {
- const char *res;
- ms.level = ms.depth = 0;
- if ((res=match(&ms, s1, p)) != NULL) {
- if (find) {
- lua_pushinteger(L, s1-s+1); /* start */
- lua_pushinteger(L, res-s); /* end */
- return push_captures(&ms, NULL, 0) + 2;
- } else {
- return push_captures(&ms, s1, res);
- }
- }
- } while (s1++ < ms.src_end && !anchor);
- }
- lua_pushnil(L); /* not found */
- return 1;
-}
-
-LJLIB_CF(string_find)
-{
- return str_find_aux(L, 1);
-}
-
-LJLIB_CF(string_match)
-{
- return str_find_aux(L, 0);
-}
-
-LJLIB_NOREG LJLIB_CF(string_gmatch_aux)
-{
- const char *p = strVdata(lj_lib_upvalue(L, 2));
- GCstr *str = strV(lj_lib_upvalue(L, 1));
- const char *s = strdata(str);
- TValue *tvpos = lj_lib_upvalue(L, 3);
- const char *src = s + tvpos->u32.lo;
- MatchState ms;
- ms.L = L;
- ms.src_init = s;
- ms.src_end = s + str->len;
- for (; src <= ms.src_end; src++) {
- const char *e;
- ms.level = ms.depth = 0;
- if ((e = match(&ms, src, p)) != NULL) {
- int32_t pos = (int32_t)(e - s);
- if (e == src) pos++; /* Ensure progress for empty match. */
- tvpos->u32.lo = (uint32_t)pos;
- return push_captures(&ms, src, e);
- }
- }
- return 0; /* not found */
-}
-
-LJLIB_CF(string_gmatch)
-{
- lj_lib_checkstr(L, 1);
- lj_lib_checkstr(L, 2);
- L->top = L->base+3;
- (L->top-1)->u64 = 0;
- lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3);
- return 1;
-}
-
-static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e)
-{
- size_t l, i;
- const char *news = lua_tolstring(ms->L, 3, &l);
- for (i = 0; i < l; i++) {
- if (news[i] != L_ESC) {
- luaL_addchar(b, news[i]);
- } else {
- i++; /* skip ESC */
- if (!lj_char_isdigit(uchar(news[i]))) {
- luaL_addchar(b, news[i]);
- } else if (news[i] == '0') {
- luaL_addlstring(b, s, (size_t)(e - s));
- } else {
- push_onecapture(ms, news[i] - '1', s, e);
- luaL_addvalue(b); /* add capture to accumulated result */
- }
- }
- }
-}
-
-static void add_value(MatchState *ms, luaL_Buffer *b,
- const char *s, const char *e)
-{
- lua_State *L = ms->L;
- switch (lua_type(L, 3)) {
- case LUA_TNUMBER:
- case LUA_TSTRING: {
- add_s(ms, b, s, e);
- return;
- }
- case LUA_TFUNCTION: {
- int n;
- lua_pushvalue(L, 3);
- n = push_captures(ms, s, e);
- lua_call(L, n, 1);
- break;
- }
- case LUA_TTABLE: {
- push_onecapture(ms, 0, s, e);
- lua_gettable(L, 3);
- break;
- }
- }
- if (!lua_toboolean(L, -1)) { /* nil or false? */
- lua_pop(L, 1);
- lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */
- } else if (!lua_isstring(L, -1)) {
- lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1));
- }
- luaL_addvalue(b); /* add result to accumulator */
-}
-
-LJLIB_CF(string_gsub)
-{
- size_t srcl;
- const char *src = luaL_checklstring(L, 1, &srcl);
- const char *p = luaL_checkstring(L, 2);
- int tr = lua_type(L, 3);
- int max_s = luaL_optint(L, 4, (int)(srcl+1));
- int anchor = (*p == '^') ? (p++, 1) : 0;
- int n = 0;
- MatchState ms;
- luaL_Buffer b;
- if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING ||
- tr == LUA_TFUNCTION || tr == LUA_TTABLE))
- lj_err_arg(L, 3, LJ_ERR_NOSFT);
- luaL_buffinit(L, &b);
- ms.L = L;
- ms.src_init = src;
- ms.src_end = src+srcl;
- while (n < max_s) {
- const char *e;
- ms.level = ms.depth = 0;
- e = match(&ms, src, p);
- if (e) {
- n++;
- add_value(&ms, &b, src, e);
- }
- if (e && e>src) /* non empty match? */
- src = e; /* skip it */
- else if (src < ms.src_end)
- luaL_addchar(&b, *src++);
- else
- break;
- if (anchor)
- break;
- }
- luaL_addlstring(&b, src, (size_t)(ms.src_end-src));
- luaL_pushresult(&b);
- lua_pushinteger(L, n); /* number of substitutions */
- return 2;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */
-#define MAX_FMTITEM 512
-/* valid flags in a format specification */
-#define FMT_FLAGS "-+ #0"
-/*
-** maximum size of each format specification (such as '%-099.99d')
-** (+10 accounts for %99.99x plus margin of error)
-*/
-#define MAX_FMTSPEC (sizeof(FMT_FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
-
-static void addquoted(lua_State *L, luaL_Buffer *b, int arg)
-{
- GCstr *str = lj_lib_checkstr(L, arg);
- int32_t len = (int32_t)str->len;
- const char *s = strdata(str);
- luaL_addchar(b, '"');
- while (len--) {
- uint32_t c = uchar(*s);
- if (c == '"' || c == '\\' || c == '\n') {
- luaL_addchar(b, '\\');
- } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */
- uint32_t d;
- luaL_addchar(b, '\\');
- if (c >= 100 || lj_char_isdigit(uchar(s[1]))) {
- luaL_addchar(b, '0'+(c >= 100)); if (c >= 100) c -= 100;
- goto tens;
- } else if (c >= 10) {
- tens:
- d = (c * 205) >> 11; c -= d * 10; luaL_addchar(b, '0'+d);
- }
- c += '0';
- }
- luaL_addchar(b, c);
- s++;
- }
- luaL_addchar(b, '"');
-}
-
-static const char *scanformat(lua_State *L, const char *strfrmt, char *form)
-{
- const char *p = strfrmt;
- while (*p != '\0' && strchr(FMT_FLAGS, *p) != NULL) p++; /* skip flags */
- if ((size_t)(p - strfrmt) >= sizeof(FMT_FLAGS))
- lj_err_caller(L, LJ_ERR_STRFMTR);
- if (lj_char_isdigit(uchar(*p))) p++; /* skip width */
- if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
- if (*p == '.') {
- p++;
- if (lj_char_isdigit(uchar(*p))) p++; /* skip precision */
- if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
- }
- if (lj_char_isdigit(uchar(*p)))
- lj_err_caller(L, LJ_ERR_STRFMTW);
- *(form++) = '%';
- strncpy(form, strfrmt, (size_t)(p - strfrmt + 1));
- form += p - strfrmt + 1;
- *form = '\0';
- return p;
-}
-
-static void addintlen(char *form)
-{
- size_t l = strlen(form);
- char spec = form[l - 1];
- strcpy(form + l - 1, LUA_INTFRMLEN);
- form[l + sizeof(LUA_INTFRMLEN) - 2] = spec;
- form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0';
-}
-
-static unsigned LUA_INTFRM_T num2intfrm(lua_State *L, int arg)
-{
- if (sizeof(LUA_INTFRM_T) == 4) {
- return (LUA_INTFRM_T)lj_lib_checkbit(L, arg);
- } else {
- cTValue *o;
- lj_lib_checknumber(L, arg);
- o = L->base+arg-1;
- if (tvisint(o))
- return (LUA_INTFRM_T)intV(o);
- else
- return (LUA_INTFRM_T)numV(o);
- }
-}
-
-static unsigned LUA_INTFRM_T num2uintfrm(lua_State *L, int arg)
-{
- if (sizeof(LUA_INTFRM_T) == 4) {
- return (unsigned LUA_INTFRM_T)lj_lib_checkbit(L, arg);
- } else {
- cTValue *o;
- lj_lib_checknumber(L, arg);
- o = L->base+arg-1;
- if (tvisint(o))
- return (unsigned LUA_INTFRM_T)intV(o);
- else if ((int32_t)o->u32.hi < 0)
- return (unsigned LUA_INTFRM_T)(LUA_INTFRM_T)numV(o);
- else
- return (unsigned LUA_INTFRM_T)numV(o);
- }
-}
-
-static GCstr *meta_tostring(lua_State *L, int arg)
-{
- TValue *o = L->base+arg-1;
- cTValue *mo;
- lua_assert(o < L->top); /* Caller already checks for existence. */
- if (LJ_LIKELY(tvisstr(o)))
- return strV(o);
- if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
- copyTV(L, L->top++, mo);
- copyTV(L, L->top++, o);
- lua_call(L, 1, 1);
- L->top--;
- if (tvisstr(L->top))
- return strV(L->top);
- o = L->base+arg-1;
- copyTV(L, o, L->top);
- }
- if (tvisnumber(o)) {
- return lj_str_fromnumber(L, o);
- } else if (tvisnil(o)) {
- return lj_str_newlit(L, "nil");
- } else if (tvisfalse(o)) {
- return lj_str_newlit(L, "false");
- } else if (tvistrue(o)) {
- return lj_str_newlit(L, "true");
- } else {
- if (tvisfunc(o) && isffunc(funcV(o)))
- lj_str_pushf(L, "function: builtin#%d", funcV(o)->c.ffid);
- else
- lj_str_pushf(L, "%s: %p", lj_typename(o), lua_topointer(L, arg));
- L->top--;
- return strV(L->top);
- }
-}
-
-LJLIB_CF(string_format)
-{
- int arg = 1, top = (int)(L->top - L->base);
- GCstr *fmt = lj_lib_checkstr(L, arg);
- const char *strfrmt = strdata(fmt);
- const char *strfrmt_end = strfrmt + fmt->len;
- luaL_Buffer b;
- luaL_buffinit(L, &b);
- while (strfrmt < strfrmt_end) {
- if (*strfrmt != L_ESC) {
- luaL_addchar(&b, *strfrmt++);
- } else if (*++strfrmt == L_ESC) {
- luaL_addchar(&b, *strfrmt++); /* %% */
- } else { /* format item */
- char form[MAX_FMTSPEC]; /* to store the format (`%...') */
- char buff[MAX_FMTITEM]; /* to store the formatted item */
- if (++arg > top)
- luaL_argerror(L, arg, lj_obj_typename[0]);
- strfrmt = scanformat(L, strfrmt, form);
- switch (*strfrmt++) {
- case 'c':
- sprintf(buff, form, lj_lib_checkint(L, arg));
- break;
- case 'd': case 'i':
- addintlen(form);
- sprintf(buff, form, num2intfrm(L, arg));
- break;
- case 'o': case 'u': case 'x': case 'X':
- addintlen(form);
- sprintf(buff, form, num2uintfrm(L, arg));
- break;
- case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': {
- TValue tv;
- tv.n = lj_lib_checknum(L, arg);
- if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) {
- /* Canonicalize output of non-finite values. */
- char *p, nbuf[LJ_STR_NUMBUF];
- size_t len = lj_str_bufnum(nbuf, &tv);
- if (strfrmt[-1] < 'a') {
- nbuf[len-3] = nbuf[len-3] - 0x20;
- nbuf[len-2] = nbuf[len-2] - 0x20;
- nbuf[len-1] = nbuf[len-1] - 0x20;
- }
- nbuf[len] = '\0';
- for (p = form; *p < 'A' && *p != '.'; p++) ;
- *p++ = 's'; *p = '\0';
- sprintf(buff, form, nbuf);
- break;
- }
- sprintf(buff, form, (double)tv.n);
- break;
- }
- case 'q':
- addquoted(L, &b, arg);
- continue;
- case 'p':
- lj_str_pushf(L, "%p", lua_topointer(L, arg));
- luaL_addvalue(&b);
- continue;
- case 's': {
- GCstr *str = meta_tostring(L, arg);
- if (!strchr(form, '.') && str->len >= 100) {
- /* no precision and string is too long to be formatted;
- keep original string */
- setstrV(L, L->top++, str);
- luaL_addvalue(&b);
- continue;
- }
- sprintf(buff, form, strdata(str));
- break;
- }
- default:
- lj_err_callerv(L, LJ_ERR_STRFMTO, *(strfrmt -1));
- break;
- }
- luaL_addlstring(&b, buff, strlen(buff));
- }
- }
- luaL_pushresult(&b);
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-#include "lj_libdef.h"
-
-LUALIB_API int luaopen_string(lua_State *L)
-{
- GCtab *mt;
- global_State *g;
- LJ_LIB_REG(L, LUA_STRLIBNAME, string);
-#if defined(LUA_COMPAT_GFIND) && !LJ_52
- lua_getfield(L, -1, "gmatch");
- lua_setfield(L, -2, "gfind");
-#endif
- mt = lj_tab_new(L, 0, 1);
- /* NOBARRIER: basemt is a GC root. */
- g = G(L);
- setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt));
- settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1));
- mt->nomm = (uint8_t)(~(1u<<MM_index));
- return 1;
-}
-
+/*
+** String library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+
+#define lib_string_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_ff.h"
+#include "lj_bcdump.h"
+#include "lj_char.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_string
+
+LJLIB_ASM(string_len) LJLIB_REC(.)
+{
+ lj_lib_checkstr(L, 1);
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(string_byte) LJLIB_REC(string_range 0)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int32_t len = (int32_t)s->len;
+ int32_t start = lj_lib_optint(L, 2, 1);
+ int32_t stop = lj_lib_optint(L, 3, start);
+ int32_t n, i;
+ const unsigned char *p;
+ if (stop < 0) stop += len+1;
+ if (start < 0) start += len+1;
+ if (start <= 0) start = 1;
+ if (stop > len) stop = len;
+ if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */
+ start--;
+ n = stop - start;
+ if ((uint32_t)n > LUAI_MAXCSTACK)
+ lj_err_caller(L, LJ_ERR_STRSLC);
+ lj_state_checkstack(L, (MSize)n);
+ p = (const unsigned char *)strdata(s) + start;
+ for (i = 0; i < n; i++)
+ setintV(L->base + i-1, p[i]);
+ return FFH_RES(n);
+}
+
+LJLIB_ASM(string_char)
+{
+ int i, nargs = (int)(L->top - L->base);
+ char *buf = lj_str_needbuf(L, &G(L)->tmpbuf, (MSize)nargs);
+ for (i = 1; i <= nargs; i++) {
+ int32_t k = lj_lib_checkint(L, i);
+ if (!checku8(k))
+ lj_err_arg(L, i, LJ_ERR_BADVAL);
+ buf[i-1] = (char)k;
+ }
+ setstrV(L, L->base-1, lj_str_new(L, buf, (size_t)nargs));
+ return FFH_RES(1);
+}
+
+LJLIB_ASM(string_sub) LJLIB_REC(string_range 1)
+{
+ lj_lib_checkstr(L, 1);
+ lj_lib_checkint(L, 2);
+ setintV(L->base+2, lj_lib_optint(L, 3, -1));
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(string_rep)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int32_t k = lj_lib_checkint(L, 2);
+ GCstr *sep = lj_lib_optstr(L, 3);
+ int32_t len = (int32_t)s->len;
+ global_State *g = G(L);
+ int64_t tlen;
+ const char *src;
+ char *buf;
+ if (k <= 0) {
+ empty:
+ setstrV(L, L->base-1, &g->strempty);
+ return FFH_RES(1);
+ }
+ if (sep) {
+ tlen = (int64_t)len + sep->len;
+ if (tlen > LJ_MAX_STR)
+ lj_err_caller(L, LJ_ERR_STROV);
+ tlen *= k;
+ if (tlen > LJ_MAX_STR)
+ lj_err_caller(L, LJ_ERR_STROV);
+ } else {
+ tlen = (int64_t)k * len;
+ if (tlen > LJ_MAX_STR)
+ lj_err_caller(L, LJ_ERR_STROV);
+ }
+ if (tlen == 0) goto empty;
+ buf = lj_str_needbuf(L, &g->tmpbuf, (MSize)tlen);
+ src = strdata(s);
+ if (sep) {
+ tlen -= sep->len; /* Ignore trailing separator. */
+ if (k > 1) { /* Paste one string and one separator. */
+ int32_t i;
+ i = 0; while (i < len) *buf++ = src[i++];
+ src = strdata(sep); len = sep->len;
+ i = 0; while (i < len) *buf++ = src[i++];
+ src = g->tmpbuf.buf; len += s->len; k--; /* Now copy that k-1 times. */
+ }
+ }
+ do {
+ int32_t i = 0;
+ do { *buf++ = src[i++]; } while (i < len);
+ } while (--k > 0);
+ setstrV(L, L->base-1, lj_str_new(L, g->tmpbuf.buf, (size_t)tlen));
+ return FFH_RES(1);
+}
+
+LJLIB_ASM(string_reverse)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ lj_str_needbuf(L, &G(L)->tmpbuf, s->len);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(string_lower)
+LJLIB_ASM_(string_upper)
+
+/* ------------------------------------------------------------------------ */
+
+static int writer_buf(lua_State *L, const void *p, size_t size, void *b)
+{
+ luaL_addlstring((luaL_Buffer *)b, (const char *)p, size);
+ UNUSED(L);
+ return 0;
+}
+
+LJLIB_CF(string_dump)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ int strip = L->base+1 < L->top && tvistruecond(L->base+1);
+ luaL_Buffer b;
+ L->top = L->base+1;
+ luaL_buffinit(L, &b);
+ if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, &b, strip))
+ lj_err_caller(L, LJ_ERR_STRDUMP);
+ luaL_pushresult(&b);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* macro to `unsign' a character */
+#define uchar(c) ((unsigned char)(c))
+
+#define CAP_UNFINISHED (-1)
+#define CAP_POSITION (-2)
+
+typedef struct MatchState {
+ const char *src_init; /* init of source string */
+ const char *src_end; /* end (`\0') of source string */
+ lua_State *L;
+ int level; /* total number of captures (finished or unfinished) */
+ int depth;
+ struct {
+ const char *init;
+ ptrdiff_t len;
+ } capture[LUA_MAXCAPTURES];
+} MatchState;
+
+#define L_ESC '%'
+#define SPECIALS "^$*+?.([%-"
+
+static int check_capture(MatchState *ms, int l)
+{
+ l -= '1';
+ if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED)
+ lj_err_caller(ms->L, LJ_ERR_STRCAPI);
+ return l;
+}
+
+static int capture_to_close(MatchState *ms)
+{
+ int level = ms->level;
+ for (level--; level>=0; level--)
+ if (ms->capture[level].len == CAP_UNFINISHED) return level;
+ lj_err_caller(ms->L, LJ_ERR_STRPATC);
+ return 0; /* unreachable */
+}
+
+static const char *classend(MatchState *ms, const char *p)
+{
+ switch (*p++) {
+ case L_ESC:
+ if (*p == '\0')
+ lj_err_caller(ms->L, LJ_ERR_STRPATE);
+ return p+1;
+ case '[':
+ if (*p == '^') p++;
+ do { /* look for a `]' */
+ if (*p == '\0')
+ lj_err_caller(ms->L, LJ_ERR_STRPATM);
+ if (*(p++) == L_ESC && *p != '\0')
+ p++; /* skip escapes (e.g. `%]') */
+ } while (*p != ']');
+ return p+1;
+ default:
+ return p;
+ }
+}
+
+static const unsigned char match_class_map[32] = {
+ 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0,
+ LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0,
+ LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0
+};
+
+static int match_class(int c, int cl)
+{
+ if ((cl & 0xc0) == 0x40) {
+ int t = match_class_map[(cl&0x1f)];
+ if (t) {
+ t = lj_char_isa(c, t);
+ return (cl & 0x20) ? t : !t;
+ }
+ if (cl == 'z') return c == 0;
+ if (cl == 'Z') return c != 0;
+ }
+ return (cl == c);
+}
+
+static int matchbracketclass(int c, const char *p, const char *ec)
+{
+ int sig = 1;
+ if (*(p+1) == '^') {
+ sig = 0;
+ p++; /* skip the `^' */
+ }
+ while (++p < ec) {
+ if (*p == L_ESC) {
+ p++;
+ if (match_class(c, uchar(*p)))
+ return sig;
+ }
+ else if ((*(p+1) == '-') && (p+2 < ec)) {
+ p+=2;
+ if (uchar(*(p-2)) <= c && c <= uchar(*p))
+ return sig;
+ }
+ else if (uchar(*p) == c) return sig;
+ }
+ return !sig;
+}
+
+static int singlematch(int c, const char *p, const char *ep)
+{
+ switch (*p) {
+ case '.': return 1; /* matches any char */
+ case L_ESC: return match_class(c, uchar(*(p+1)));
+ case '[': return matchbracketclass(c, p, ep-1);
+ default: return (uchar(*p) == c);
+ }
+}
+
+static const char *match(MatchState *ms, const char *s, const char *p);
+
+static const char *matchbalance(MatchState *ms, const char *s, const char *p)
+{
+ if (*p == 0 || *(p+1) == 0)
+ lj_err_caller(ms->L, LJ_ERR_STRPATU);
+ if (*s != *p) {
+ return NULL;
+ } else {
+ int b = *p;
+ int e = *(p+1);
+ int cont = 1;
+ while (++s < ms->src_end) {
+ if (*s == e) {
+ if (--cont == 0) return s+1;
+ } else if (*s == b) {
+ cont++;
+ }
+ }
+ }
+ return NULL; /* string ends out of balance */
+}
+
+static const char *max_expand(MatchState *ms, const char *s,
+ const char *p, const char *ep)
+{
+ ptrdiff_t i = 0; /* counts maximum expand for item */
+ while ((s+i)<ms->src_end && singlematch(uchar(*(s+i)), p, ep))
+ i++;
+ /* keeps trying to match with the maximum repetitions */
+ while (i>=0) {
+ const char *res = match(ms, (s+i), ep+1);
+ if (res) return res;
+ i--; /* else didn't match; reduce 1 repetition to try again */
+ }
+ return NULL;
+}
+
+static const char *min_expand(MatchState *ms, const char *s,
+ const char *p, const char *ep)
+{
+ for (;;) {
+ const char *res = match(ms, s, ep+1);
+ if (res != NULL)
+ return res;
+ else if (s<ms->src_end && singlematch(uchar(*s), p, ep))
+ s++; /* try with one more repetition */
+ else
+ return NULL;
+ }
+}
+
+static const char *start_capture(MatchState *ms, const char *s,
+ const char *p, int what)
+{
+ const char *res;
+ int level = ms->level;
+ if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN);
+ ms->capture[level].init = s;
+ ms->capture[level].len = what;
+ ms->level = level+1;
+ if ((res=match(ms, s, p)) == NULL) /* match failed? */
+ ms->level--; /* undo capture */
+ return res;
+}
+
+static const char *end_capture(MatchState *ms, const char *s,
+ const char *p)
+{
+ int l = capture_to_close(ms);
+ const char *res;
+ ms->capture[l].len = s - ms->capture[l].init; /* close capture */
+ if ((res = match(ms, s, p)) == NULL) /* match failed? */
+ ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
+ return res;
+}
+
+static const char *match_capture(MatchState *ms, const char *s, int l)
+{
+ size_t len;
+ l = check_capture(ms, l);
+ len = (size_t)ms->capture[l].len;
+ if ((size_t)(ms->src_end-s) >= len &&
+ memcmp(ms->capture[l].init, s, len) == 0)
+ return s+len;
+ else
+ return NULL;
+}
+
+static const char *match(MatchState *ms, const char *s, const char *p)
+{
+ if (++ms->depth > LJ_MAX_XLEVEL)
+ lj_err_caller(ms->L, LJ_ERR_STRPATX);
+ init: /* using goto's to optimize tail recursion */
+ switch (*p) {
+ case '(': /* start capture */
+ if (*(p+1) == ')') /* position capture? */
+ s = start_capture(ms, s, p+2, CAP_POSITION);
+ else
+ s = start_capture(ms, s, p+1, CAP_UNFINISHED);
+ break;
+ case ')': /* end capture */
+ s = end_capture(ms, s, p+1);
+ break;
+ case L_ESC:
+ switch (*(p+1)) {
+ case 'b': /* balanced string? */
+ s = matchbalance(ms, s, p+2);
+ if (s == NULL) break;
+ p+=4;
+ goto init; /* else s = match(ms, s, p+4); */
+ case 'f': { /* frontier? */
+ const char *ep; char previous;
+ p += 2;
+ if (*p != '[')
+ lj_err_caller(ms->L, LJ_ERR_STRPATB);
+ ep = classend(ms, p); /* points to what is next */
+ previous = (s == ms->src_init) ? '\0' : *(s-1);
+ if (matchbracketclass(uchar(previous), p, ep-1) ||
+ !matchbracketclass(uchar(*s), p, ep-1)) { s = NULL; break; }
+ p=ep;
+ goto init; /* else s = match(ms, s, ep); */
+ }
+ default:
+ if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */
+ s = match_capture(ms, s, uchar(*(p+1)));
+ if (s == NULL) break;
+ p+=2;
+ goto init; /* else s = match(ms, s, p+2) */
+ }
+ goto dflt; /* case default */
+ }
+ break;
+ case '\0': /* end of pattern */
+ break; /* match succeeded */
+ case '$':
+ /* is the `$' the last char in pattern? */
+ if (*(p+1) != '\0') goto dflt;
+ if (s != ms->src_end) s = NULL; /* check end of string */
+ break;
+ default: dflt: { /* it is a pattern item */
+ const char *ep = classend(ms, p); /* points to what is next */
+ int m = s<ms->src_end && singlematch(uchar(*s), p, ep);
+ switch (*ep) {
+ case '?': { /* optional */
+ const char *res;
+ if (m && ((res=match(ms, s+1, ep+1)) != NULL)) {
+ s = res;
+ break;
+ }
+ p=ep+1;
+ goto init; /* else s = match(ms, s, ep+1); */
+ }
+ case '*': /* 0 or more repetitions */
+ s = max_expand(ms, s, p, ep);
+ break;
+ case '+': /* 1 or more repetitions */
+ s = (m ? max_expand(ms, s+1, p, ep) : NULL);
+ break;
+ case '-': /* 0 or more repetitions (minimum) */
+ s = min_expand(ms, s, p, ep);
+ break;
+ default:
+ if (m) { s++; p=ep; goto init; } /* else s = match(ms, s+1, ep); */
+ s = NULL;
+ break;
+ }
+ break;
+ }
+ }
+ ms->depth--;
+ return s;
+}
+
+static const char *lmemfind(const char *s1, size_t l1,
+ const char *s2, size_t l2)
+{
+ if (l2 == 0) {
+ return s1; /* empty strings are everywhere */
+ } else if (l2 > l1) {
+ return NULL; /* avoids a negative `l1' */
+ } else {
+ const char *init; /* to search for a `*s2' inside `s1' */
+ l2--; /* 1st char will be checked by `memchr' */
+ l1 = l1-l2; /* `s2' cannot be found after that */
+ while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
+ init++; /* 1st char is already checked */
+ if (memcmp(init, s2+1, l2) == 0) {
+ return init-1;
+ } else { /* correct `l1' and `s1' to try again */
+ l1 -= (size_t)(init-s1);
+ s1 = init;
+ }
+ }
+ return NULL; /* not found */
+ }
+}
+
+static void push_onecapture(MatchState *ms, int i, const char *s, const char *e)
+{
+ if (i >= ms->level) {
+ if (i == 0) /* ms->level == 0, too */
+ lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */
+ else
+ lj_err_caller(ms->L, LJ_ERR_STRCAPI);
+ } else {
+ ptrdiff_t l = ms->capture[i].len;
+ if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU);
+ if (l == CAP_POSITION)
+ lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
+ else
+ lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l);
+ }
+}
+
+static int push_captures(MatchState *ms, const char *s, const char *e)
+{
+ int i;
+ int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
+ luaL_checkstack(ms->L, nlevels, "too many captures");
+ for (i = 0; i < nlevels; i++)
+ push_onecapture(ms, i, s, e);
+ return nlevels; /* number of strings pushed */
+}
+
+static ptrdiff_t posrelat(ptrdiff_t pos, size_t len)
+{
+ /* relative string position: negative means back from end */
+ if (pos < 0) pos += (ptrdiff_t)len + 1;
+ return (pos >= 0) ? pos : 0;
+}
+
+static int str_find_aux(lua_State *L, int find)
+{
+ size_t l1, l2;
+ const char *s = luaL_checklstring(L, 1, &l1);
+ const char *p = luaL_checklstring(L, 2, &l2);
+ ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
+ if (init < 0) {
+ init = 0;
+ } else if ((size_t)(init) > l1) {
+#if LJ_52
+ setnilV(L->top-1);
+ return 1;
+#else
+ init = (ptrdiff_t)l1;
+#endif
+ }
+ if (find && (lua_toboolean(L, 4) || /* explicit request? */
+ strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
+ /* do a plain search */
+ const char *s2 = lmemfind(s+init, l1-(size_t)init, p, l2);
+ if (s2) {
+ lua_pushinteger(L, s2-s+1);
+ lua_pushinteger(L, s2-s+(ptrdiff_t)l2);
+ return 2;
+ }
+ } else {
+ MatchState ms;
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ const char *s1=s+init;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s+l1;
+ do {
+ const char *res;
+ ms.level = ms.depth = 0;
+ if ((res=match(&ms, s1, p)) != NULL) {
+ if (find) {
+ lua_pushinteger(L, s1-s+1); /* start */
+ lua_pushinteger(L, res-s); /* end */
+ return push_captures(&ms, NULL, 0) + 2;
+ } else {
+ return push_captures(&ms, s1, res);
+ }
+ }
+ } while (s1++ < ms.src_end && !anchor);
+ }
+ lua_pushnil(L); /* not found */
+ return 1;
+}
+
+LJLIB_CF(string_find)
+{
+ return str_find_aux(L, 1);
+}
+
+LJLIB_CF(string_match)
+{
+ return str_find_aux(L, 0);
+}
+
+LJLIB_NOREG LJLIB_CF(string_gmatch_aux)
+{
+ const char *p = strVdata(lj_lib_upvalue(L, 2));
+ GCstr *str = strV(lj_lib_upvalue(L, 1));
+ const char *s = strdata(str);
+ TValue *tvpos = lj_lib_upvalue(L, 3);
+ const char *src = s + tvpos->u32.lo;
+ MatchState ms;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s + str->len;
+ for (; src <= ms.src_end; src++) {
+ const char *e;
+ ms.level = ms.depth = 0;
+ if ((e = match(&ms, src, p)) != NULL) {
+ int32_t pos = (int32_t)(e - s);
+ if (e == src) pos++; /* Ensure progress for empty match. */
+ tvpos->u32.lo = (uint32_t)pos;
+ return push_captures(&ms, src, e);
+ }
+ }
+ return 0; /* not found */
+}
+
+LJLIB_CF(string_gmatch)
+{
+ lj_lib_checkstr(L, 1);
+ lj_lib_checkstr(L, 2);
+ L->top = L->base+3;
+ (L->top-1)->u64 = 0;
+ lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3);
+ return 1;
+}
+
+static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e)
+{
+ size_t l, i;
+ const char *news = lua_tolstring(ms->L, 3, &l);
+ for (i = 0; i < l; i++) {
+ if (news[i] != L_ESC) {
+ luaL_addchar(b, news[i]);
+ } else {
+ i++; /* skip ESC */
+ if (!lj_char_isdigit(uchar(news[i]))) {
+ luaL_addchar(b, news[i]);
+ } else if (news[i] == '0') {
+ luaL_addlstring(b, s, (size_t)(e - s));
+ } else {
+ push_onecapture(ms, news[i] - '1', s, e);
+ luaL_addvalue(b); /* add capture to accumulated result */
+ }
+ }
+ }
+}
+
+static void add_value(MatchState *ms, luaL_Buffer *b,
+ const char *s, const char *e)
+{
+ lua_State *L = ms->L;
+ switch (lua_type(L, 3)) {
+ case LUA_TNUMBER:
+ case LUA_TSTRING: {
+ add_s(ms, b, s, e);
+ return;
+ }
+ case LUA_TFUNCTION: {
+ int n;
+ lua_pushvalue(L, 3);
+ n = push_captures(ms, s, e);
+ lua_call(L, n, 1);
+ break;
+ }
+ case LUA_TTABLE: {
+ push_onecapture(ms, 0, s, e);
+ lua_gettable(L, 3);
+ break;
+ }
+ }
+ if (!lua_toboolean(L, -1)) { /* nil or false? */
+ lua_pop(L, 1);
+ lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */
+ } else if (!lua_isstring(L, -1)) {
+ lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1));
+ }
+ luaL_addvalue(b); /* add result to accumulator */
+}
+
+LJLIB_CF(string_gsub)
+{
+ size_t srcl;
+ const char *src = luaL_checklstring(L, 1, &srcl);
+ const char *p = luaL_checkstring(L, 2);
+ int tr = lua_type(L, 3);
+ int max_s = luaL_optint(L, 4, (int)(srcl+1));
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ int n = 0;
+ MatchState ms;
+ luaL_Buffer b;
+ if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING ||
+ tr == LUA_TFUNCTION || tr == LUA_TTABLE))
+ lj_err_arg(L, 3, LJ_ERR_NOSFT);
+ luaL_buffinit(L, &b);
+ ms.L = L;
+ ms.src_init = src;
+ ms.src_end = src+srcl;
+ while (n < max_s) {
+ const char *e;
+ ms.level = ms.depth = 0;
+ e = match(&ms, src, p);
+ if (e) {
+ n++;
+ add_value(&ms, &b, src, e);
+ }
+ if (e && e>src) /* non empty match? */
+ src = e; /* skip it */
+ else if (src < ms.src_end)
+ luaL_addchar(&b, *src++);
+ else
+ break;
+ if (anchor)
+ break;
+ }
+ luaL_addlstring(&b, src, (size_t)(ms.src_end-src));
+ luaL_pushresult(&b);
+ lua_pushinteger(L, n); /* number of substitutions */
+ return 2;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */
+#define MAX_FMTITEM 512
+/* valid flags in a format specification */
+#define FMT_FLAGS "-+ #0"
+/*
+** maximum size of each format specification (such as '%-099.99d')
+** (+10 accounts for %99.99x plus margin of error)
+*/
+#define MAX_FMTSPEC (sizeof(FMT_FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
+
+static void addquoted(lua_State *L, luaL_Buffer *b, int arg)
+{
+ GCstr *str = lj_lib_checkstr(L, arg);
+ int32_t len = (int32_t)str->len;
+ const char *s = strdata(str);
+ luaL_addchar(b, '"');
+ while (len--) {
+ uint32_t c = uchar(*s);
+ if (c == '"' || c == '\\' || c == '\n') {
+ luaL_addchar(b, '\\');
+ } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */
+ uint32_t d;
+ luaL_addchar(b, '\\');
+ if (c >= 100 || lj_char_isdigit(uchar(s[1]))) {
+ luaL_addchar(b, '0'+(c >= 100)); if (c >= 100) c -= 100;
+ goto tens;
+ } else if (c >= 10) {
+ tens:
+ d = (c * 205) >> 11; c -= d * 10; luaL_addchar(b, '0'+d);
+ }
+ c += '0';
+ }
+ luaL_addchar(b, c);
+ s++;
+ }
+ luaL_addchar(b, '"');
+}
+
+static const char *scanformat(lua_State *L, const char *strfrmt, char *form)
+{
+ const char *p = strfrmt;
+ while (*p != '\0' && strchr(FMT_FLAGS, *p) != NULL) p++; /* skip flags */
+ if ((size_t)(p - strfrmt) >= sizeof(FMT_FLAGS))
+ lj_err_caller(L, LJ_ERR_STRFMTR);
+ if (lj_char_isdigit(uchar(*p))) p++; /* skip width */
+ if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ if (*p == '.') {
+ p++;
+ if (lj_char_isdigit(uchar(*p))) p++; /* skip precision */
+ if (lj_char_isdigit(uchar(*p))) p++; /* (2 digits at most) */
+ }
+ if (lj_char_isdigit(uchar(*p)))
+ lj_err_caller(L, LJ_ERR_STRFMTW);
+ *(form++) = '%';
+ strncpy(form, strfrmt, (size_t)(p - strfrmt + 1));
+ form += p - strfrmt + 1;
+ *form = '\0';
+ return p;
+}
+
+static void addintlen(char *form)
+{
+ size_t l = strlen(form);
+ char spec = form[l - 1];
+ strcpy(form + l - 1, LUA_INTFRMLEN);
+ form[l + sizeof(LUA_INTFRMLEN) - 2] = spec;
+ form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0';
+}
+
+static unsigned LUA_INTFRM_T num2intfrm(lua_State *L, int arg)
+{
+ if (sizeof(LUA_INTFRM_T) == 4) {
+ return (LUA_INTFRM_T)lj_lib_checkbit(L, arg);
+ } else {
+ cTValue *o;
+ lj_lib_checknumber(L, arg);
+ o = L->base+arg-1;
+ if (tvisint(o))
+ return (LUA_INTFRM_T)intV(o);
+ else
+ return (LUA_INTFRM_T)numV(o);
+ }
+}
+
+static unsigned LUA_INTFRM_T num2uintfrm(lua_State *L, int arg)
+{
+ if (sizeof(LUA_INTFRM_T) == 4) {
+ return (unsigned LUA_INTFRM_T)lj_lib_checkbit(L, arg);
+ } else {
+ cTValue *o;
+ lj_lib_checknumber(L, arg);
+ o = L->base+arg-1;
+ if (tvisint(o))
+ return (unsigned LUA_INTFRM_T)intV(o);
+ else if ((int32_t)o->u32.hi < 0)
+ return (unsigned LUA_INTFRM_T)(LUA_INTFRM_T)numV(o);
+ else
+ return (unsigned LUA_INTFRM_T)numV(o);
+ }
+}
+
+static GCstr *meta_tostring(lua_State *L, int arg)
+{
+ TValue *o = L->base+arg-1;
+ cTValue *mo;
+ lua_assert(o < L->top); /* Caller already checks for existence. */
+ if (LJ_LIKELY(tvisstr(o)))
+ return strV(o);
+ if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
+ copyTV(L, L->top++, mo);
+ copyTV(L, L->top++, o);
+ lua_call(L, 1, 1);
+ L->top--;
+ if (tvisstr(L->top))
+ return strV(L->top);
+ o = L->base+arg-1;
+ copyTV(L, o, L->top);
+ }
+ if (tvisnumber(o)) {
+ return lj_str_fromnumber(L, o);
+ } else if (tvisnil(o)) {
+ return lj_str_newlit(L, "nil");
+ } else if (tvisfalse(o)) {
+ return lj_str_newlit(L, "false");
+ } else if (tvistrue(o)) {
+ return lj_str_newlit(L, "true");
+ } else {
+ if (tvisfunc(o) && isffunc(funcV(o)))
+ lj_str_pushf(L, "function: builtin#%d", funcV(o)->c.ffid);
+ else
+ lj_str_pushf(L, "%s: %p", lj_typename(o), lua_topointer(L, arg));
+ L->top--;
+ return strV(L->top);
+ }
+}
+
+LJLIB_CF(string_format)
+{
+ int arg = 1, top = (int)(L->top - L->base);
+ GCstr *fmt = lj_lib_checkstr(L, arg);
+ const char *strfrmt = strdata(fmt);
+ const char *strfrmt_end = strfrmt + fmt->len;
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while (strfrmt < strfrmt_end) {
+ if (*strfrmt != L_ESC) {
+ luaL_addchar(&b, *strfrmt++);
+ } else if (*++strfrmt == L_ESC) {
+ luaL_addchar(&b, *strfrmt++); /* %% */
+ } else { /* format item */
+ char form[MAX_FMTSPEC]; /* to store the format (`%...') */
+ char buff[MAX_FMTITEM]; /* to store the formatted item */
+ if (++arg > top)
+ luaL_argerror(L, arg, lj_obj_typename[0]);
+ strfrmt = scanformat(L, strfrmt, form);
+ switch (*strfrmt++) {
+ case 'c':
+ sprintf(buff, form, lj_lib_checkint(L, arg));
+ break;
+ case 'd': case 'i':
+ addintlen(form);
+ sprintf(buff, form, num2intfrm(L, arg));
+ break;
+ case 'o': case 'u': case 'x': case 'X':
+ addintlen(form);
+ sprintf(buff, form, num2uintfrm(L, arg));
+ break;
+ case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': {
+ TValue tv;
+ tv.n = lj_lib_checknum(L, arg);
+ if (LJ_UNLIKELY((tv.u32.hi << 1) >= 0xffe00000)) {
+ /* Canonicalize output of non-finite values. */
+ char *p, nbuf[LJ_STR_NUMBUF];
+ size_t len = lj_str_bufnum(nbuf, &tv);
+ if (strfrmt[-1] < 'a') {
+ nbuf[len-3] = nbuf[len-3] - 0x20;
+ nbuf[len-2] = nbuf[len-2] - 0x20;
+ nbuf[len-1] = nbuf[len-1] - 0x20;
+ }
+ nbuf[len] = '\0';
+ for (p = form; *p < 'A' && *p != '.'; p++) ;
+ *p++ = 's'; *p = '\0';
+ sprintf(buff, form, nbuf);
+ break;
+ }
+ sprintf(buff, form, (double)tv.n);
+ break;
+ }
+ case 'q':
+ addquoted(L, &b, arg);
+ continue;
+ case 'p':
+ lj_str_pushf(L, "%p", lua_topointer(L, arg));
+ luaL_addvalue(&b);
+ continue;
+ case 's': {
+ GCstr *str = meta_tostring(L, arg);
+ if (!strchr(form, '.') && str->len >= 100) {
+ /* no precision and string is too long to be formatted;
+ keep original string */
+ setstrV(L, L->top++, str);
+ luaL_addvalue(&b);
+ continue;
+ }
+ sprintf(buff, form, strdata(str));
+ break;
+ }
+ default:
+ lj_err_callerv(L, LJ_ERR_STRFMTO, *(strfrmt -1));
+ break;
+ }
+ luaL_addlstring(&b, buff, strlen(buff));
+ }
+ }
+ luaL_pushresult(&b);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_string(lua_State *L)
+{
+ GCtab *mt;
+ global_State *g;
+ LJ_LIB_REG(L, LUA_STRLIBNAME, string);
+#if defined(LUA_COMPAT_GFIND) && !LJ_52
+ lua_getfield(L, -1, "gmatch");
+ lua_setfield(L, -2, "gfind");
+#endif
+ mt = lj_tab_new(L, 0, 1);
+ /* NOBARRIER: basemt is a GC root. */
+ g = G(L);
+ setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt));
+ settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1));
+ mt->nomm = (uint8_t)(~(1u<<MM_index));
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lib_table.c b/3rdparty/lua/src/lib_table.c
index 734d393..542ed1f 100644
--- a/3rdparty/lua/src/lib_table.c
+++ b/3rdparty/lua/src/lib_table.c
@@ -1,300 +1,300 @@
-/*
-** Table library.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lib_table_c
-#define LUA_LIB
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_tab.h"
-#include "lj_lib.h"
-
-/* ------------------------------------------------------------------------ */
-
-#define LJLIB_MODULE_table
-
-LJLIB_CF(table_foreachi)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- GCfunc *func = lj_lib_checkfunc(L, 2);
- MSize i, n = lj_tab_len(t);
- for (i = 1; i <= n; i++) {
- cTValue *val;
- setfuncV(L, L->top, func);
- setintV(L->top+1, i);
- val = lj_tab_getint(t, (int32_t)i);
- if (val) { copyTV(L, L->top+2, val); } else { setnilV(L->top+2); }
- L->top += 3;
- lua_call(L, 2, 1);
- if (!tvisnil(L->top-1))
- return 1;
- L->top--;
- }
- return 0;
-}
-
-LJLIB_CF(table_foreach)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- GCfunc *func = lj_lib_checkfunc(L, 2);
- L->top = L->base+3;
- setnilV(L->top-1);
- while (lj_tab_next(L, t, L->top-1)) {
- copyTV(L, L->top+2, L->top);
- copyTV(L, L->top+1, L->top-1);
- setfuncV(L, L->top, func);
- L->top += 3;
- lua_call(L, 2, 1);
- if (!tvisnil(L->top-1))
- return 1;
- L->top--;
- }
- return 0;
-}
-
-LJLIB_ASM(table_getn) LJLIB_REC(.)
-{
- lj_lib_checktab(L, 1);
- return FFH_UNREACHABLE;
-}
-
-LJLIB_CF(table_maxn)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- TValue *array = tvref(t->array);
- Node *node;
- lua_Number m = 0;
- ptrdiff_t i;
- for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--)
- if (!tvisnil(&array[i])) {
- m = (lua_Number)(int32_t)i;
- break;
- }
- node = noderef(t->node);
- for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
- if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) {
- lua_Number n = numberVnum(&node[i].key);
- if (n > m) m = n;
- }
- setnumV(L->top-1, m);
- return 1;
-}
-
-LJLIB_CF(table_insert) LJLIB_REC(.)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- int32_t n, i = (int32_t)lj_tab_len(t) + 1;
- int nargs = (int)((char *)L->top - (char *)L->base);
- if (nargs != 2*sizeof(TValue)) {
- if (nargs != 3*sizeof(TValue))
- lj_err_caller(L, LJ_ERR_TABINS);
- /* NOBARRIER: This just moves existing elements around. */
- for (n = lj_lib_checkint(L, 2); i > n; i--) {
- /* The set may invalidate the get pointer, so need to do it first! */
- TValue *dst = lj_tab_setint(L, t, i);
- cTValue *src = lj_tab_getint(t, i-1);
- if (src) {
- copyTV(L, dst, src);
- } else {
- setnilV(dst);
- }
- }
- i = n;
- }
- {
- TValue *dst = lj_tab_setint(L, t, i);
- copyTV(L, dst, L->top-1); /* Set new value. */
- lj_gc_barriert(L, t, dst);
- }
- return 0;
-}
-
-LJLIB_CF(table_remove) LJLIB_REC(.)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- int32_t e = (int32_t)lj_tab_len(t);
- int32_t pos = lj_lib_optint(L, 2, e);
- if (!(1 <= pos && pos <= e)) /* Nothing to remove? */
- return 0;
- lua_rawgeti(L, 1, pos); /* Get previous value. */
- /* NOBARRIER: This just moves existing elements around. */
- for (; pos < e; pos++) {
- cTValue *src = lj_tab_getint(t, pos+1);
- TValue *dst = lj_tab_setint(L, t, pos);
- if (src) {
- copyTV(L, dst, src);
- } else {
- setnilV(dst);
- }
- }
- setnilV(lj_tab_setint(L, t, e)); /* Remove (last) value. */
- return 1; /* Return previous value. */
-}
-
-LJLIB_CF(table_concat)
-{
- luaL_Buffer b;
- GCtab *t = lj_lib_checktab(L, 1);
- GCstr *sep = lj_lib_optstr(L, 2);
- MSize seplen = sep ? sep->len : 0;
- int32_t i = lj_lib_optint(L, 3, 1);
- int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ?
- lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t);
- luaL_buffinit(L, &b);
- if (i <= e) {
- for (;;) {
- cTValue *o;
- lua_rawgeti(L, 1, i);
- o = L->top-1;
- if (!(tvisstr(o) || tvisnumber(o)))
- lj_err_callerv(L, LJ_ERR_TABCAT, lj_typename(o), i);
- luaL_addvalue(&b);
- if (i++ == e) break;
- if (seplen)
- luaL_addlstring(&b, strdata(sep), seplen);
- }
- }
- luaL_pushresult(&b);
- return 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-static void set2(lua_State *L, int i, int j)
-{
- lua_rawseti(L, 1, i);
- lua_rawseti(L, 1, j);
-}
-
-static int sort_comp(lua_State *L, int a, int b)
-{
- if (!lua_isnil(L, 2)) { /* function? */
- int res;
- lua_pushvalue(L, 2);
- lua_pushvalue(L, a-1); /* -1 to compensate function */
- lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */
- lua_call(L, 2, 1);
- res = lua_toboolean(L, -1);
- lua_pop(L, 1);
- return res;
- } else { /* a < b? */
- return lua_lessthan(L, a, b);
- }
-}
-
-static void auxsort(lua_State *L, int l, int u)
-{
- while (l < u) { /* for tail recursion */
- int i, j;
- /* sort elements a[l], a[(l+u)/2] and a[u] */
- lua_rawgeti(L, 1, l);
- lua_rawgeti(L, 1, u);
- if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */
- set2(L, l, u); /* swap a[l] - a[u] */
- else
- lua_pop(L, 2);
- if (u-l == 1) break; /* only 2 elements */
- i = (l+u)/2;
- lua_rawgeti(L, 1, i);
- lua_rawgeti(L, 1, l);
- if (sort_comp(L, -2, -1)) { /* a[i]<a[l]? */
- set2(L, i, l);
- } else {
- lua_pop(L, 1); /* remove a[l] */
- lua_rawgeti(L, 1, u);
- if (sort_comp(L, -1, -2)) /* a[u]<a[i]? */
- set2(L, i, u);
- else
- lua_pop(L, 2);
- }
- if (u-l == 2) break; /* only 3 elements */
- lua_rawgeti(L, 1, i); /* Pivot */
- lua_pushvalue(L, -1);
- lua_rawgeti(L, 1, u-1);
- set2(L, i, u-1);
- /* a[l] <= P == a[u-1] <= a[u], only need to sort from l+1 to u-2 */
- i = l; j = u-1;
- for (;;) { /* invariant: a[l..i] <= P <= a[j..u] */
- /* repeat ++i until a[i] >= P */
- while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
- if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT);
- lua_pop(L, 1); /* remove a[i] */
- }
- /* repeat --j until a[j] <= P */
- while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
- if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT);
- lua_pop(L, 1); /* remove a[j] */
- }
- if (j<i) {
- lua_pop(L, 3); /* pop pivot, a[i], a[j] */
- break;
- }
- set2(L, i, j);
- }
- lua_rawgeti(L, 1, u-1);
- lua_rawgeti(L, 1, i);
- set2(L, u-1, i); /* swap pivot (a[u-1]) with a[i] */
- /* a[l..i-1] <= a[i] == P <= a[i+1..u] */
- /* adjust so that smaller half is in [j..i] and larger one in [l..u] */
- if (i-l < u-i) {
- j=l; i=i-1; l=i+2;
- } else {
- j=i+1; i=u; u=j-2;
- }
- auxsort(L, j, i); /* call recursively the smaller one */
- } /* repeat the routine for the larger one */
-}
-
-LJLIB_CF(table_sort)
-{
- GCtab *t = lj_lib_checktab(L, 1);
- int32_t n = (int32_t)lj_tab_len(t);
- lua_settop(L, 2);
- if (!tvisnil(L->base+1))
- lj_lib_checkfunc(L, 2);
- auxsort(L, 1, n);
- return 0;
-}
-
-#if LJ_52
-LJLIB_PUSH("n")
-LJLIB_CF(table_pack)
-{
- TValue *array, *base = L->base;
- MSize i, n = (uint32_t)(L->top - base);
- GCtab *t = lj_tab_new(L, n ? n+1 : 0, 1);
- /* NOBARRIER: The table is new (marked white). */
- setintV(lj_tab_setstr(L, t, strV(lj_lib_upvalue(L, 1))), (int32_t)n);
- for (array = tvref(t->array) + 1, i = 0; i < n; i++)
- copyTV(L, &array[i], &base[i]);
- settabV(L, base, t);
- L->top = base+1;
- lj_gc_check(L);
- return 1;
-}
-#endif
-
-/* ------------------------------------------------------------------------ */
-
-#include "lj_libdef.h"
-
-LUALIB_API int luaopen_table(lua_State *L)
-{
- LJ_LIB_REG(L, LUA_TABLIBNAME, table);
-#if LJ_52
- lua_getglobal(L, "unpack");
- lua_setfield(L, -2, "unpack");
-#endif
- return 1;
-}
-
+/*
+** Table library.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_table_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_table
+
+LJLIB_CF(table_foreachi)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCfunc *func = lj_lib_checkfunc(L, 2);
+ MSize i, n = lj_tab_len(t);
+ for (i = 1; i <= n; i++) {
+ cTValue *val;
+ setfuncV(L, L->top, func);
+ setintV(L->top+1, i);
+ val = lj_tab_getint(t, (int32_t)i);
+ if (val) { copyTV(L, L->top+2, val); } else { setnilV(L->top+2); }
+ L->top += 3;
+ lua_call(L, 2, 1);
+ if (!tvisnil(L->top-1))
+ return 1;
+ L->top--;
+ }
+ return 0;
+}
+
+LJLIB_CF(table_foreach)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCfunc *func = lj_lib_checkfunc(L, 2);
+ L->top = L->base+3;
+ setnilV(L->top-1);
+ while (lj_tab_next(L, t, L->top-1)) {
+ copyTV(L, L->top+2, L->top);
+ copyTV(L, L->top+1, L->top-1);
+ setfuncV(L, L->top, func);
+ L->top += 3;
+ lua_call(L, 2, 1);
+ if (!tvisnil(L->top-1))
+ return 1;
+ L->top--;
+ }
+ return 0;
+}
+
+LJLIB_ASM(table_getn) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_CF(table_maxn)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ TValue *array = tvref(t->array);
+ Node *node;
+ lua_Number m = 0;
+ ptrdiff_t i;
+ for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--)
+ if (!tvisnil(&array[i])) {
+ m = (lua_Number)(int32_t)i;
+ break;
+ }
+ node = noderef(t->node);
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
+ if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) {
+ lua_Number n = numberVnum(&node[i].key);
+ if (n > m) m = n;
+ }
+ setnumV(L->top-1, m);
+ return 1;
+}
+
+LJLIB_CF(table_insert) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n, i = (int32_t)lj_tab_len(t) + 1;
+ int nargs = (int)((char *)L->top - (char *)L->base);
+ if (nargs != 2*sizeof(TValue)) {
+ if (nargs != 3*sizeof(TValue))
+ lj_err_caller(L, LJ_ERR_TABINS);
+ /* NOBARRIER: This just moves existing elements around. */
+ for (n = lj_lib_checkint(L, 2); i > n; i--) {
+ /* The set may invalidate the get pointer, so need to do it first! */
+ TValue *dst = lj_tab_setint(L, t, i);
+ cTValue *src = lj_tab_getint(t, i-1);
+ if (src) {
+ copyTV(L, dst, src);
+ } else {
+ setnilV(dst);
+ }
+ }
+ i = n;
+ }
+ {
+ TValue *dst = lj_tab_setint(L, t, i);
+ copyTV(L, dst, L->top-1); /* Set new value. */
+ lj_gc_barriert(L, t, dst);
+ }
+ return 0;
+}
+
+LJLIB_CF(table_remove) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t e = (int32_t)lj_tab_len(t);
+ int32_t pos = lj_lib_optint(L, 2, e);
+ if (!(1 <= pos && pos <= e)) /* Nothing to remove? */
+ return 0;
+ lua_rawgeti(L, 1, pos); /* Get previous value. */
+ /* NOBARRIER: This just moves existing elements around. */
+ for (; pos < e; pos++) {
+ cTValue *src = lj_tab_getint(t, pos+1);
+ TValue *dst = lj_tab_setint(L, t, pos);
+ if (src) {
+ copyTV(L, dst, src);
+ } else {
+ setnilV(dst);
+ }
+ }
+ setnilV(lj_tab_setint(L, t, e)); /* Remove (last) value. */
+ return 1; /* Return previous value. */
+}
+
+LJLIB_CF(table_concat)
+{
+ luaL_Buffer b;
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCstr *sep = lj_lib_optstr(L, 2);
+ MSize seplen = sep ? sep->len : 0;
+ int32_t i = lj_lib_optint(L, 3, 1);
+ int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ?
+ lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t);
+ luaL_buffinit(L, &b);
+ if (i <= e) {
+ for (;;) {
+ cTValue *o;
+ lua_rawgeti(L, 1, i);
+ o = L->top-1;
+ if (!(tvisstr(o) || tvisnumber(o)))
+ lj_err_callerv(L, LJ_ERR_TABCAT, lj_typename(o), i);
+ luaL_addvalue(&b);
+ if (i++ == e) break;
+ if (seplen)
+ luaL_addlstring(&b, strdata(sep), seplen);
+ }
+ }
+ luaL_pushresult(&b);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void set2(lua_State *L, int i, int j)
+{
+ lua_rawseti(L, 1, i);
+ lua_rawseti(L, 1, j);
+}
+
+static int sort_comp(lua_State *L, int a, int b)
+{
+ if (!lua_isnil(L, 2)) { /* function? */
+ int res;
+ lua_pushvalue(L, 2);
+ lua_pushvalue(L, a-1); /* -1 to compensate function */
+ lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */
+ lua_call(L, 2, 1);
+ res = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+ } else { /* a < b? */
+ return lua_lessthan(L, a, b);
+ }
+}
+
+static void auxsort(lua_State *L, int l, int u)
+{
+ while (l < u) { /* for tail recursion */
+ int i, j;
+ /* sort elements a[l], a[(l+u)/2] and a[u] */
+ lua_rawgeti(L, 1, l);
+ lua_rawgeti(L, 1, u);
+ if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */
+ set2(L, l, u); /* swap a[l] - a[u] */
+ else
+ lua_pop(L, 2);
+ if (u-l == 1) break; /* only 2 elements */
+ i = (l+u)/2;
+ lua_rawgeti(L, 1, i);
+ lua_rawgeti(L, 1, l);
+ if (sort_comp(L, -2, -1)) { /* a[i]<a[l]? */
+ set2(L, i, l);
+ } else {
+ lua_pop(L, 1); /* remove a[l] */
+ lua_rawgeti(L, 1, u);
+ if (sort_comp(L, -1, -2)) /* a[u]<a[i]? */
+ set2(L, i, u);
+ else
+ lua_pop(L, 2);
+ }
+ if (u-l == 2) break; /* only 3 elements */
+ lua_rawgeti(L, 1, i); /* Pivot */
+ lua_pushvalue(L, -1);
+ lua_rawgeti(L, 1, u-1);
+ set2(L, i, u-1);
+ /* a[l] <= P == a[u-1] <= a[u], only need to sort from l+1 to u-2 */
+ i = l; j = u-1;
+ for (;;) { /* invariant: a[l..i] <= P <= a[j..u] */
+ /* repeat ++i until a[i] >= P */
+ while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
+ if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT);
+ lua_pop(L, 1); /* remove a[i] */
+ }
+ /* repeat --j until a[j] <= P */
+ while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
+ if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT);
+ lua_pop(L, 1); /* remove a[j] */
+ }
+ if (j<i) {
+ lua_pop(L, 3); /* pop pivot, a[i], a[j] */
+ break;
+ }
+ set2(L, i, j);
+ }
+ lua_rawgeti(L, 1, u-1);
+ lua_rawgeti(L, 1, i);
+ set2(L, u-1, i); /* swap pivot (a[u-1]) with a[i] */
+ /* a[l..i-1] <= a[i] == P <= a[i+1..u] */
+ /* adjust so that smaller half is in [j..i] and larger one in [l..u] */
+ if (i-l < u-i) {
+ j=l; i=i-1; l=i+2;
+ } else {
+ j=i+1; i=u; u=j-2;
+ }
+ auxsort(L, j, i); /* call recursively the smaller one */
+ } /* repeat the routine for the larger one */
+}
+
+LJLIB_CF(table_sort)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n = (int32_t)lj_tab_len(t);
+ lua_settop(L, 2);
+ if (!tvisnil(L->base+1))
+ lj_lib_checkfunc(L, 2);
+ auxsort(L, 1, n);
+ return 0;
+}
+
+#if LJ_52
+LJLIB_PUSH("n")
+LJLIB_CF(table_pack)
+{
+ TValue *array, *base = L->base;
+ MSize i, n = (uint32_t)(L->top - base);
+ GCtab *t = lj_tab_new(L, n ? n+1 : 0, 1);
+ /* NOBARRIER: The table is new (marked white). */
+ setintV(lj_tab_setstr(L, t, strV(lj_lib_upvalue(L, 1))), (int32_t)n);
+ for (array = tvref(t->array) + 1, i = 0; i < n; i++)
+ copyTV(L, &array[i], &base[i]);
+ settabV(L, base, t);
+ L->top = base+1;
+ lj_gc_check(L);
+ return 1;
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_table(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_TABLIBNAME, table);
+#if LJ_52
+ lua_getglobal(L, "unpack");
+ lua_setfield(L, -2, "unpack");
+#endif
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lj_alloc.c b/3rdparty/lua/src/lj_alloc.c
index 7c7ec67..8f285d1 100644
--- a/3rdparty/lua/src/lj_alloc.c
+++ b/3rdparty/lua/src/lj_alloc.c
@@ -177,40 +177,32 @@ static LJ_AINLINE int CALL_MUNMAP(void *ptr, size_t size)
#if LJ_64
/* 64 bit mode needs special support for allocating memory in the lower 2GB. */
-#if defined(MAP_32BIT)
+#if LJ_TARGET_LINUX
-#if defined(__sun__)
-#define MMAP_REGION_START ((uintptr_t)0x1000)
-#else
/* Actually this only gives us max. 1GB in current Linux kernels. */
-#define MMAP_REGION_START ((uintptr_t)0)
-#endif
-
static LJ_AINLINE void *CALL_MMAP(size_t size)
{
int olderr = errno;
- void *ptr = mmap((void *)MMAP_REGION_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
+ void *ptr = mmap(NULL, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
errno = olderr;
return ptr;
}
-#elif LJ_TARGET_OSX || LJ_TARGET_PS4 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__sun__)
+#elif LJ_TARGET_OSX || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__) || defined(__sun__)
/* OSX and FreeBSD mmap() use a naive first-fit linear search.
** That's perfect for us. Except that -pagezero_size must be set for OSX,
** otherwise the lower 4GB are blocked. And the 32GB RLIMIT_DATA needs
** to be reduced to 250MB on FreeBSD.
*/
-#if LJ_TARGET_OSX || defined(__DragonFly__)
+#if LJ_TARGET_OSX
#define MMAP_REGION_START ((uintptr_t)0x10000)
-#elif LJ_TARGET_PS4
-#define MMAP_REGION_START ((uintptr_t)0x4000)
#else
#define MMAP_REGION_START ((uintptr_t)0x10000000)
#endif
#define MMAP_REGION_END ((uintptr_t)0x80000000)
-#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/resource.h>
#endif
@@ -220,7 +212,7 @@ static LJ_AINLINE void *CALL_MMAP(size_t size)
/* Hint for next allocation. Doesn't need to be thread-safe. */
static uintptr_t alloc_hint = MMAP_REGION_START;
int retry = 0;
-#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
static int rlimit_modified = 0;
if (LJ_UNLIKELY(rlimit_modified == 0)) {
struct rlimit rlim;
@@ -238,7 +230,7 @@ static LJ_AINLINE void *CALL_MMAP(size_t size)
return p;
}
if (p != CMFAIL) munmap(p, size);
-#if defined(__sun__) || defined(__DragonFly__)
+#ifdef __sun__
alloc_hint += 0x1000000; /* Need near-exhaustive linear scan. */
if (alloc_hint + size < MMAP_REGION_END) continue;
#endif
diff --git a/3rdparty/lua/src/lj_alloc.h b/3rdparty/lua/src/lj_alloc.h
index 58255f5..f87a7cf 100644
--- a/3rdparty/lua/src/lj_alloc.h
+++ b/3rdparty/lua/src/lj_alloc.h
@@ -1,17 +1,17 @@
-/*
-** Bundled memory allocator.
-** Donated to the public domain.
-*/
-
-#ifndef _LJ_ALLOC_H
-#define _LJ_ALLOC_H
-
-#include "lj_def.h"
-
-#ifndef LUAJIT_USE_SYSMALLOC
-LJ_FUNC void *lj_alloc_create(void);
-LJ_FUNC void lj_alloc_destroy(void *msp);
-LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
-#endif
-
-#endif
+/*
+** Bundled memory allocator.
+** Donated to the public domain.
+*/
+
+#ifndef _LJ_ALLOC_H
+#define _LJ_ALLOC_H
+
+#include "lj_def.h"
+
+#ifndef LUAJIT_USE_SYSMALLOC
+LJ_FUNC void *lj_alloc_create(void);
+LJ_FUNC void lj_alloc_destroy(void *msp);
+LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_api.c b/3rdparty/lua/src/lj_api.c
index 165aabf..edb2d62 100644
--- a/3rdparty/lua/src/lj_api.c
+++ b/3rdparty/lua/src/lj_api.c
@@ -1,1200 +1,1200 @@
-/*
-** Public Lua/C API.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_api_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_func.h"
-#include "lj_udata.h"
-#include "lj_meta.h"
-#include "lj_state.h"
-#include "lj_bc.h"
-#include "lj_frame.h"
-#include "lj_trace.h"
-#include "lj_vm.h"
-#include "lj_strscan.h"
-
-/* -- Common helper functions --------------------------------------------- */
-
-#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base))
-#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L))
-
-static TValue *index2adr(lua_State *L, int idx)
-{
- if (idx > 0) {
- TValue *o = L->base + (idx - 1);
- return o < L->top ? o : niltv(L);
- } else if (idx > LUA_REGISTRYINDEX) {
- api_check(L, idx != 0 && -idx <= L->top - L->base);
- return L->top + idx;
- } else if (idx == LUA_GLOBALSINDEX) {
- TValue *o = &G(L)->tmptv;
- settabV(L, o, tabref(L->env));
- return o;
- } else if (idx == LUA_REGISTRYINDEX) {
- return registry(L);
- } else {
- GCfunc *fn = curr_func(L);
- api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn));
- if (idx == LUA_ENVIRONINDEX) {
- TValue *o = &G(L)->tmptv;
- settabV(L, o, tabref(fn->c.env));
- return o;
- } else {
- idx = LUA_GLOBALSINDEX - idx;
- return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L);
- }
- }
-}
-
-static TValue *stkindex2adr(lua_State *L, int idx)
-{
- if (idx > 0) {
- TValue *o = L->base + (idx - 1);
- return o < L->top ? o : niltv(L);
- } else {
- api_check(L, idx != 0 && -idx <= L->top - L->base);
- return L->top + idx;
- }
-}
-
-static GCtab *getcurrenv(lua_State *L)
-{
- GCfunc *fn = curr_func(L);
- return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env);
-}
-
-/* -- Miscellaneous API functions ----------------------------------------- */
-
-LUA_API int lua_status(lua_State *L)
-{
- return L->status;
-}
-
-LUA_API int lua_checkstack(lua_State *L, int size)
-{
- if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) {
- return 0; /* Stack overflow. */
- } else if (size > 0) {
- lj_state_checkstack(L, (MSize)size);
- }
- return 1;
-}
-
-LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg)
-{
- if (!lua_checkstack(L, size))
- lj_err_callerv(L, LJ_ERR_STKOVM, msg);
-}
-
-LUA_API void lua_xmove(lua_State *from, lua_State *to, int n)
-{
- TValue *f, *t;
- if (from == to) return;
- api_checknelems(from, n);
- api_check(from, G(from) == G(to));
- lj_state_checkstack(to, (MSize)n);
- f = from->top;
- t = to->top = to->top + n;
- while (--n >= 0) copyTV(to, --t, --f);
- from->top = f;
-}
-
-/* -- Stack manipulation -------------------------------------------------- */
-
-LUA_API int lua_gettop(lua_State *L)
-{
- return (int)(L->top - L->base);
-}
-
-LUA_API void lua_settop(lua_State *L, int idx)
-{
- if (idx >= 0) {
- api_check(L, idx <= tvref(L->maxstack) - L->base);
- if (L->base + idx > L->top) {
- if (L->base + idx >= tvref(L->maxstack))
- lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base));
- do { setnilV(L->top++); } while (L->top < L->base + idx);
- } else {
- L->top = L->base + idx;
- }
- } else {
- api_check(L, -(idx+1) <= (L->top - L->base));
- L->top += idx+1; /* Shrinks top (idx < 0). */
- }
-}
-
-LUA_API void lua_remove(lua_State *L, int idx)
-{
- TValue *p = stkindex2adr(L, idx);
- api_checkvalidindex(L, p);
- while (++p < L->top) copyTV(L, p-1, p);
- L->top--;
-}
-
-LUA_API void lua_insert(lua_State *L, int idx)
-{
- TValue *q, *p = stkindex2adr(L, idx);
- api_checkvalidindex(L, p);
- for (q = L->top; q > p; q--) copyTV(L, q, q-1);
- copyTV(L, p, L->top);
-}
-
-LUA_API void lua_replace(lua_State *L, int idx)
-{
- api_checknelems(L, 1);
- if (idx == LUA_GLOBALSINDEX) {
- api_check(L, tvistab(L->top-1));
- /* NOBARRIER: A thread (i.e. L) is never black. */
- setgcref(L->env, obj2gco(tabV(L->top-1)));
- } else if (idx == LUA_ENVIRONINDEX) {
- GCfunc *fn = curr_func(L);
- if (fn->c.gct != ~LJ_TFUNC)
- lj_err_msg(L, LJ_ERR_NOENV);
- api_check(L, tvistab(L->top-1));
- setgcref(fn->c.env, obj2gco(tabV(L->top-1)));
- lj_gc_barrier(L, fn, L->top-1);
- } else {
- TValue *o = index2adr(L, idx);
- api_checkvalidindex(L, o);
- copyTV(L, o, L->top-1);
- if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */
- lj_gc_barrier(L, curr_func(L), L->top-1);
- }
- L->top--;
-}
-
-LUA_API void lua_pushvalue(lua_State *L, int idx)
-{
- copyTV(L, L->top, index2adr(L, idx));
- incr_top(L);
-}
-
-/* -- Stack getters ------------------------------------------------------- */
-
-LUA_API int lua_type(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- if (tvisnumber(o)) {
- return LUA_TNUMBER;
-#if LJ_64
- } else if (tvislightud(o)) {
- return LUA_TLIGHTUSERDATA;
-#endif
- } else if (o == niltv(L)) {
- return LUA_TNONE;
- } else { /* Magic internal/external tag conversion. ORDER LJ_T */
- uint32_t t = ~itype(o);
-#if LJ_64
- int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u);
-#else
- int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u);
-#endif
- lua_assert(tt != LUA_TNIL || tvisnil(o));
- return tt;
- }
-}
-
-LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt)
-{
- if (lua_type(L, idx) != tt)
- lj_err_argt(L, idx, tt);
-}
-
-LUALIB_API void luaL_checkany(lua_State *L, int idx)
-{
- if (index2adr(L, idx) == niltv(L))
- lj_err_arg(L, idx, LJ_ERR_NOVAL);
-}
-
-LUA_API const char *lua_typename(lua_State *L, int t)
-{
- UNUSED(L);
- return lj_obj_typename[t+1];
-}
-
-LUA_API int lua_iscfunction(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- return tvisfunc(o) && !isluafunc(funcV(o));
-}
-
-LUA_API int lua_isnumber(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- return (tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), &tmp)));
-}
-
-LUA_API int lua_isstring(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- return (tvisstr(o) || tvisnumber(o));
-}
-
-LUA_API int lua_isuserdata(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- return (tvisudata(o) || tvislightud(o));
-}
-
-LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2)
-{
- cTValue *o1 = index2adr(L, idx1);
- cTValue *o2 = index2adr(L, idx2);
- return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2);
-}
-
-LUA_API int lua_equal(lua_State *L, int idx1, int idx2)
-{
- cTValue *o1 = index2adr(L, idx1);
- cTValue *o2 = index2adr(L, idx2);
- if (tvisint(o1) && tvisint(o2)) {
- return intV(o1) == intV(o2);
- } else if (tvisnumber(o1) && tvisnumber(o2)) {
- return numberVnum(o1) == numberVnum(o2);
- } else if (itype(o1) != itype(o2)) {
- return 0;
- } else if (tvispri(o1)) {
- return o1 != niltv(L) && o2 != niltv(L);
-#if LJ_64
- } else if (tvislightud(o1)) {
- return o1->u64 == o2->u64;
-#endif
- } else if (gcrefeq(o1->gcr, o2->gcr)) {
- return 1;
- } else if (!tvistabud(o1)) {
- return 0;
- } else {
- TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0);
- if ((uintptr_t)base <= 1) {
- return (int)(uintptr_t)base;
- } else {
- L->top = base+2;
- lj_vm_call(L, base, 1+1);
- L->top -= 2;
- return tvistruecond(L->top+1);
- }
- }
-}
-
-LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2)
-{
- cTValue *o1 = index2adr(L, idx1);
- cTValue *o2 = index2adr(L, idx2);
- if (o1 == niltv(L) || o2 == niltv(L)) {
- return 0;
- } else if (tvisint(o1) && tvisint(o2)) {
- return intV(o1) < intV(o2);
- } else if (tvisnumber(o1) && tvisnumber(o2)) {
- return numberVnum(o1) < numberVnum(o2);
- } else {
- TValue *base = lj_meta_comp(L, o1, o2, 0);
- if ((uintptr_t)base <= 1) {
- return (int)(uintptr_t)base;
- } else {
- L->top = base+2;
- lj_vm_call(L, base, 1+1);
- L->top -= 2;
- return tvistruecond(L->top+1);
- }
- }
-}
-
-LUA_API lua_Number lua_tonumber(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- if (LJ_LIKELY(tvisnumber(o)))
- return numberVnum(o);
- else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp))
- return numV(&tmp);
- else
- return 0;
-}
-
-LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- if (LJ_LIKELY(tvisnumber(o)))
- return numberVnum(o);
- else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp)))
- lj_err_argt(L, idx, LUA_TNUMBER);
- return numV(&tmp);
-}
-
-LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- if (LJ_LIKELY(tvisnumber(o)))
- return numberVnum(o);
- else if (tvisnil(o))
- return def;
- else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp)))
- lj_err_argt(L, idx, LUA_TNUMBER);
- return numV(&tmp);
-}
-
-LUA_API lua_Integer lua_tointeger(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- lua_Number n;
- if (LJ_LIKELY(tvisint(o))) {
- return intV(o);
- } else if (LJ_LIKELY(tvisnum(o))) {
- n = numV(o);
- } else {
- if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
- return 0;
- if (tvisint(&tmp))
- return (lua_Integer)intV(&tmp);
- n = numV(&tmp);
- }
-#if LJ_64
- return (lua_Integer)n;
-#else
- return lj_num2int(n);
-#endif
-}
-
-LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- lua_Number n;
- if (LJ_LIKELY(tvisint(o))) {
- return intV(o);
- } else if (LJ_LIKELY(tvisnum(o))) {
- n = numV(o);
- } else {
- if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
- lj_err_argt(L, idx, LUA_TNUMBER);
- if (tvisint(&tmp))
- return (lua_Integer)intV(&tmp);
- n = numV(&tmp);
- }
-#if LJ_64
- return (lua_Integer)n;
-#else
- return lj_num2int(n);
-#endif
-}
-
-LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def)
-{
- cTValue *o = index2adr(L, idx);
- TValue tmp;
- lua_Number n;
- if (LJ_LIKELY(tvisint(o))) {
- return intV(o);
- } else if (LJ_LIKELY(tvisnum(o))) {
- n = numV(o);
- } else if (tvisnil(o)) {
- return def;
- } else {
- if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
- lj_err_argt(L, idx, LUA_TNUMBER);
- if (tvisint(&tmp))
- return (lua_Integer)intV(&tmp);
- n = numV(&tmp);
- }
-#if LJ_64
- return (lua_Integer)n;
-#else
- return lj_num2int(n);
-#endif
-}
-
-LUA_API int lua_toboolean(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- return tvistruecond(o);
-}
-
-LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len)
-{
- TValue *o = index2adr(L, idx);
- GCstr *s;
- if (LJ_LIKELY(tvisstr(o))) {
- s = strV(o);
- } else if (tvisnumber(o)) {
- lj_gc_check(L);
- o = index2adr(L, idx); /* GC may move the stack. */
- s = lj_str_fromnumber(L, o);
- setstrV(L, o, s);
- } else {
- if (len != NULL) *len = 0;
- return NULL;
- }
- if (len != NULL) *len = s->len;
- return strdata(s);
-}
-
-LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len)
-{
- TValue *o = index2adr(L, idx);
- GCstr *s;
- if (LJ_LIKELY(tvisstr(o))) {
- s = strV(o);
- } else if (tvisnumber(o)) {
- lj_gc_check(L);
- o = index2adr(L, idx); /* GC may move the stack. */
- s = lj_str_fromnumber(L, o);
- setstrV(L, o, s);
- } else {
- lj_err_argt(L, idx, LUA_TSTRING);
- }
- if (len != NULL) *len = s->len;
- return strdata(s);
-}
-
-LUALIB_API const char *luaL_optlstring(lua_State *L, int idx,
- const char *def, size_t *len)
-{
- TValue *o = index2adr(L, idx);
- GCstr *s;
- if (LJ_LIKELY(tvisstr(o))) {
- s = strV(o);
- } else if (tvisnil(o)) {
- if (len != NULL) *len = def ? strlen(def) : 0;
- return def;
- } else if (tvisnumber(o)) {
- lj_gc_check(L);
- o = index2adr(L, idx); /* GC may move the stack. */
- s = lj_str_fromnumber(L, o);
- setstrV(L, o, s);
- } else {
- lj_err_argt(L, idx, LUA_TSTRING);
- }
- if (len != NULL) *len = s->len;
- return strdata(s);
-}
-
-LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def,
- const char *const lst[])
-{
- ptrdiff_t i;
- const char *s = lua_tolstring(L, idx, NULL);
- if (s == NULL && (s = def) == NULL)
- lj_err_argt(L, idx, LUA_TSTRING);
- for (i = 0; lst[i]; i++)
- if (strcmp(lst[i], s) == 0)
- return (int)i;
- lj_err_argv(L, idx, LJ_ERR_INVOPTM, s);
-}
-
-LUA_API size_t lua_objlen(lua_State *L, int idx)
-{
- TValue *o = index2adr(L, idx);
- if (tvisstr(o)) {
- return strV(o)->len;
- } else if (tvistab(o)) {
- return (size_t)lj_tab_len(tabV(o));
- } else if (tvisudata(o)) {
- return udataV(o)->len;
- } else if (tvisnumber(o)) {
- GCstr *s = lj_str_fromnumber(L, o);
- setstrV(L, o, s);
- return s->len;
- } else {
- return 0;
- }
-}
-
-LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- if (tvisfunc(o)) {
- BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns));
- if (op == BC_FUNCC || op == BC_FUNCCW)
- return funcV(o)->c.f;
- }
- return NULL;
-}
-
-LUA_API void *lua_touserdata(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- if (tvisudata(o))
- return uddata(udataV(o));
- else if (tvislightud(o))
- return lightudV(o);
- else
- return NULL;
-}
-
-LUA_API lua_State *lua_tothread(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- return (!tvisthread(o)) ? NULL : threadV(o);
-}
-
-LUA_API const void *lua_topointer(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- if (tvisudata(o))
- return uddata(udataV(o));
- else if (tvislightud(o))
- return lightudV(o);
- else if (tviscdata(o))
- return cdataptr(cdataV(o));
- else if (tvisgcv(o))
- return gcV(o);
- else
- return NULL;
-}
-
-/* -- Stack setters (object creation) ------------------------------------- */
-
-LUA_API void lua_pushnil(lua_State *L)
-{
- setnilV(L->top);
- incr_top(L);
-}
-
-LUA_API void lua_pushnumber(lua_State *L, lua_Number n)
-{
- setnumV(L->top, n);
- if (LJ_UNLIKELY(tvisnan(L->top)))
- setnanV(L->top); /* Canonicalize injected NaNs. */
- incr_top(L);
-}
-
-LUA_API void lua_pushinteger(lua_State *L, lua_Integer n)
-{
- setintptrV(L->top, n);
- incr_top(L);
-}
-
-LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len)
-{
- GCstr *s;
- lj_gc_check(L);
- s = lj_str_new(L, str, len);
- setstrV(L, L->top, s);
- incr_top(L);
-}
-
-LUA_API void lua_pushstring(lua_State *L, const char *str)
-{
- if (str == NULL) {
- setnilV(L->top);
- } else {
- GCstr *s;
- lj_gc_check(L);
- s = lj_str_newz(L, str);
- setstrV(L, L->top, s);
- }
- incr_top(L);
-}
-
-LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt,
- va_list argp)
-{
- lj_gc_check(L);
- return lj_str_pushvf(L, fmt, argp);
-}
-
-LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...)
-{
- const char *ret;
- va_list argp;
- lj_gc_check(L);
- va_start(argp, fmt);
- ret = lj_str_pushvf(L, fmt, argp);
- va_end(argp);
- return ret;
-}
-
-LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n)
-{
- GCfunc *fn;
- lj_gc_check(L);
- api_checknelems(L, n);
- fn = lj_func_newC(L, (MSize)n, getcurrenv(L));
- fn->c.f = f;
- L->top -= n;
- while (n--)
- copyTV(L, &fn->c.upvalue[n], L->top+n);
- setfuncV(L, L->top, fn);
- lua_assert(iswhite(obj2gco(fn)));
- incr_top(L);
-}
-
-LUA_API void lua_pushboolean(lua_State *L, int b)
-{
- setboolV(L->top, (b != 0));
- incr_top(L);
-}
-
-LUA_API void lua_pushlightuserdata(lua_State *L, void *p)
-{
- setlightudV(L->top, checklightudptr(L, p));
- incr_top(L);
-}
-
-LUA_API void lua_createtable(lua_State *L, int narray, int nrec)
-{
- GCtab *t;
- lj_gc_check(L);
- t = lj_tab_new(L, (uint32_t)(narray > 0 ? narray+1 : 0), hsize2hbits(nrec));
- settabV(L, L->top, t);
- incr_top(L);
-}
-
-LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname)
-{
- GCtab *regt = tabV(registry(L));
- TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname));
- if (tvisnil(tv)) {
- GCtab *mt = lj_tab_new(L, 0, 1);
- settabV(L, tv, mt);
- settabV(L, L->top++, mt);
- lj_gc_anybarriert(L, regt);
- return 1;
- } else {
- copyTV(L, L->top++, tv);
- return 0;
- }
-}
-
-LUA_API int lua_pushthread(lua_State *L)
-{
- setthreadV(L, L->top, L);
- incr_top(L);
- return (mainthread(G(L)) == L);
-}
-
-LUA_API lua_State *lua_newthread(lua_State *L)
-{
- lua_State *L1;
- lj_gc_check(L);
- L1 = lj_state_new(L);
- setthreadV(L, L->top, L1);
- incr_top(L);
- return L1;
-}
-
-LUA_API void *lua_newuserdata(lua_State *L, size_t size)
-{
- GCudata *ud;
- lj_gc_check(L);
- if (size > LJ_MAX_UDATA)
- lj_err_msg(L, LJ_ERR_UDATAOV);
- ud = lj_udata_new(L, (MSize)size, getcurrenv(L));
- setudataV(L, L->top, ud);
- incr_top(L);
- return uddata(ud);
-}
-
-LUA_API void lua_concat(lua_State *L, int n)
-{
- api_checknelems(L, n);
- if (n >= 2) {
- n--;
- do {
- TValue *top = lj_meta_cat(L, L->top-1, -n);
- if (top == NULL) {
- L->top -= n;
- break;
- }
- n -= (int)(L->top - top);
- L->top = top+2;
- lj_vm_call(L, top, 1+1);
- L->top--;
- copyTV(L, L->top-1, L->top);
- } while (--n > 0);
- } else if (n == 0) { /* Push empty string. */
- setstrV(L, L->top, &G(L)->strempty);
- incr_top(L);
- }
- /* else n == 1: nothing to do. */
-}
-
-/* -- Object getters ------------------------------------------------------ */
-
-LUA_API void lua_gettable(lua_State *L, int idx)
-{
- cTValue *v, *t = index2adr(L, idx);
- api_checkvalidindex(L, t);
- v = lj_meta_tget(L, t, L->top-1);
- if (v == NULL) {
- L->top += 2;
- lj_vm_call(L, L->top-2, 1+1);
- L->top -= 2;
- v = L->top+1;
- }
- copyTV(L, L->top-1, v);
-}
-
-LUA_API void lua_getfield(lua_State *L, int idx, const char *k)
-{
- cTValue *v, *t = index2adr(L, idx);
- TValue key;
- api_checkvalidindex(L, t);
- setstrV(L, &key, lj_str_newz(L, k));
- v = lj_meta_tget(L, t, &key);
- if (v == NULL) {
- L->top += 2;
- lj_vm_call(L, L->top-2, 1+1);
- L->top -= 2;
- v = L->top+1;
- }
- copyTV(L, L->top, v);
- incr_top(L);
-}
-
-LUA_API void lua_rawget(lua_State *L, int idx)
-{
- cTValue *t = index2adr(L, idx);
- api_check(L, tvistab(t));
- copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1));
-}
-
-LUA_API void lua_rawgeti(lua_State *L, int idx, int n)
-{
- cTValue *v, *t = index2adr(L, idx);
- api_check(L, tvistab(t));
- v = lj_tab_getint(tabV(t), n);
- if (v) {
- copyTV(L, L->top, v);
- } else {
- setnilV(L->top);
- }
- incr_top(L);
-}
-
-LUA_API int lua_getmetatable(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- GCtab *mt = NULL;
- if (tvistab(o))
- mt = tabref(tabV(o)->metatable);
- else if (tvisudata(o))
- mt = tabref(udataV(o)->metatable);
- else
- mt = tabref(basemt_obj(G(L), o));
- if (mt == NULL)
- return 0;
- settabV(L, L->top, mt);
- incr_top(L);
- return 1;
-}
-
-LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field)
-{
- if (lua_getmetatable(L, idx)) {
- cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field));
- if (tv && !tvisnil(tv)) {
- copyTV(L, L->top-1, tv);
- return 1;
- }
- L->top--;
- }
- return 0;
-}
-
-LUA_API void lua_getfenv(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- api_checkvalidindex(L, o);
- if (tvisfunc(o)) {
- settabV(L, L->top, tabref(funcV(o)->c.env));
- } else if (tvisudata(o)) {
- settabV(L, L->top, tabref(udataV(o)->env));
- } else if (tvisthread(o)) {
- settabV(L, L->top, tabref(threadV(o)->env));
- } else {
- setnilV(L->top);
- }
- incr_top(L);
-}
-
-LUA_API int lua_next(lua_State *L, int idx)
-{
- cTValue *t = index2adr(L, idx);
- int more;
- api_check(L, tvistab(t));
- more = lj_tab_next(L, tabV(t), L->top-1);
- if (more) {
- incr_top(L); /* Return new key and value slot. */
- } else { /* End of traversal. */
- L->top--; /* Remove key slot. */
- }
- return more;
-}
-
-LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n)
-{
- TValue *val;
- const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val);
- if (name) {
- copyTV(L, L->top, val);
- incr_top(L);
- }
- return name;
-}
-
-LUA_API void *lua_upvalueid(lua_State *L, int idx, int n)
-{
- GCfunc *fn = funcV(index2adr(L, idx));
- n--;
- api_check(L, (uint32_t)n < fn->l.nupvalues);
- return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
- (void *)&fn->c.upvalue[n];
-}
-
-LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2)
-{
- GCfunc *fn1 = funcV(index2adr(L, idx1));
- GCfunc *fn2 = funcV(index2adr(L, idx2));
- n1--; n2--;
- api_check(L, isluafunc(fn1) && (uint32_t)n1 < fn1->l.nupvalues);
- api_check(L, isluafunc(fn2) && (uint32_t)n2 < fn2->l.nupvalues);
- setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]);
- lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1]));
-}
-
-LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
-{
- cTValue *o = index2adr(L, idx);
- if (tvisudata(o)) {
- GCudata *ud = udataV(o);
- cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname));
- if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable))
- return uddata(ud);
- }
- lj_err_argtype(L, idx, tname);
- return NULL; /* unreachable */
-}
-
-/* -- Object setters ------------------------------------------------------ */
-
-LUA_API void lua_settable(lua_State *L, int idx)
-{
- TValue *o;
- cTValue *t = index2adr(L, idx);
- api_checknelems(L, 2);
- api_checkvalidindex(L, t);
- o = lj_meta_tset(L, t, L->top-2);
- if (o) {
- /* NOBARRIER: lj_meta_tset ensures the table is not black. */
- copyTV(L, o, L->top-1);
- L->top -= 2;
- } else {
- L->top += 3;
- copyTV(L, L->top-1, L->top-6);
- lj_vm_call(L, L->top-3, 0+1);
- L->top -= 3;
- }
-}
-
-LUA_API void lua_setfield(lua_State *L, int idx, const char *k)
-{
- TValue *o;
- TValue key;
- cTValue *t = index2adr(L, idx);
- api_checknelems(L, 1);
- api_checkvalidindex(L, t);
- setstrV(L, &key, lj_str_newz(L, k));
- o = lj_meta_tset(L, t, &key);
- if (o) {
- L->top--;
- /* NOBARRIER: lj_meta_tset ensures the table is not black. */
- copyTV(L, o, L->top);
- } else {
- L->top += 3;
- copyTV(L, L->top-1, L->top-6);
- lj_vm_call(L, L->top-3, 0+1);
- L->top -= 2;
- }
-}
-
-LUA_API void lua_rawset(lua_State *L, int idx)
-{
- GCtab *t = tabV(index2adr(L, idx));
- TValue *dst, *key;
- api_checknelems(L, 2);
- key = L->top-2;
- dst = lj_tab_set(L, t, key);
- copyTV(L, dst, key+1);
- lj_gc_anybarriert(L, t);
- L->top = key;
-}
-
-LUA_API void lua_rawseti(lua_State *L, int idx, int n)
-{
- GCtab *t = tabV(index2adr(L, idx));
- TValue *dst, *src;
- api_checknelems(L, 1);
- dst = lj_tab_setint(L, t, n);
- src = L->top-1;
- copyTV(L, dst, src);
- lj_gc_barriert(L, t, dst);
- L->top = src;
-}
-
-LUA_API int lua_setmetatable(lua_State *L, int idx)
-{
- global_State *g;
- GCtab *mt;
- cTValue *o = index2adr(L, idx);
- api_checknelems(L, 1);
- api_checkvalidindex(L, o);
- if (tvisnil(L->top-1)) {
- mt = NULL;
- } else {
- api_check(L, tvistab(L->top-1));
- mt = tabV(L->top-1);
- }
- g = G(L);
- if (tvistab(o)) {
- setgcref(tabV(o)->metatable, obj2gco(mt));
- if (mt)
- lj_gc_objbarriert(L, tabV(o), mt);
- } else if (tvisudata(o)) {
- setgcref(udataV(o)->metatable, obj2gco(mt));
- if (mt)
- lj_gc_objbarrier(L, udataV(o), mt);
- } else {
- /* Flush cache, since traces specialize to basemt. But not during __gc. */
- if (lj_trace_flushall(L))
- lj_err_caller(L, LJ_ERR_NOGCMM);
- if (tvisbool(o)) {
- /* NOBARRIER: basemt is a GC root. */
- setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt));
- setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt));
- } else {
- /* NOBARRIER: basemt is a GC root. */
- setgcref(basemt_obj(g, o), obj2gco(mt));
- }
- }
- L->top--;
- return 1;
-}
-
-LUA_API int lua_setfenv(lua_State *L, int idx)
-{
- cTValue *o = index2adr(L, idx);
- GCtab *t;
- api_checknelems(L, 1);
- api_checkvalidindex(L, o);
- api_check(L, tvistab(L->top-1));
- t = tabV(L->top-1);
- if (tvisfunc(o)) {
- setgcref(funcV(o)->c.env, obj2gco(t));
- } else if (tvisudata(o)) {
- setgcref(udataV(o)->env, obj2gco(t));
- } else if (tvisthread(o)) {
- setgcref(threadV(o)->env, obj2gco(t));
- } else {
- L->top--;
- return 0;
- }
- lj_gc_objbarrier(L, gcV(o), t);
- L->top--;
- return 1;
-}
-
-LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n)
-{
- cTValue *f = index2adr(L, idx);
- TValue *val;
- const char *name;
- api_checknelems(L, 1);
- name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val);
- if (name) {
- L->top--;
- copyTV(L, val, L->top);
- lj_gc_barrier(L, funcV(f), L->top);
- }
- return name;
-}
-
-/* -- Calls --------------------------------------------------------------- */
-
-LUA_API void lua_call(lua_State *L, int nargs, int nresults)
-{
- api_check(L, L->status == 0 || L->status == LUA_ERRERR);
- api_checknelems(L, nargs+1);
- lj_vm_call(L, L->top - nargs, nresults+1);
-}
-
-LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
-{
- global_State *g = G(L);
- uint8_t oldh = hook_save(g);
- ptrdiff_t ef;
- int status;
- api_check(L, L->status == 0 || L->status == LUA_ERRERR);
- api_checknelems(L, nargs+1);
- if (errfunc == 0) {
- ef = 0;
- } else {
- cTValue *o = stkindex2adr(L, errfunc);
- api_checkvalidindex(L, o);
- ef = savestack(L, o);
- }
- status = lj_vm_pcall(L, L->top - nargs, nresults+1, ef);
- if (status) hook_restore(g, oldh);
- return status;
-}
-
-static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud)
-{
- GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L));
- fn->c.f = func;
- setfuncV(L, L->top, fn);
- setlightudV(L->top+1, checklightudptr(L, ud));
- cframe_nres(L->cframe) = 1+0; /* Zero results. */
- L->top += 2;
- return L->top-1; /* Now call the newly allocated C function. */
-}
-
-LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
-{
- global_State *g = G(L);
- uint8_t oldh = hook_save(g);
- int status;
- api_check(L, L->status == 0 || L->status == LUA_ERRERR);
- status = lj_vm_cpcall(L, func, ud, cpcall);
- if (status) hook_restore(g, oldh);
- return status;
-}
-
-LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field)
-{
- if (luaL_getmetafield(L, idx, field)) {
- TValue *base = L->top--;
- copyTV(L, base, index2adr(L, idx));
- L->top = base+1;
- lj_vm_call(L, base, 1+1);
- return 1;
- }
- return 0;
-}
-
-/* -- Coroutine yield and resume ------------------------------------------ */
-
-LUA_API int lua_yield(lua_State *L, int nresults)
-{
- void *cf = L->cframe;
- global_State *g = G(L);
- if (cframe_canyield(cf)) {
- cf = cframe_raw(cf);
- if (!hook_active(g)) { /* Regular yield: move results down if needed. */
- cTValue *f = L->top - nresults;
- if (f > L->base) {
- TValue *t = L->base;
- while (--nresults >= 0) copyTV(L, t++, f++);
- L->top = t;
- }
- L->cframe = NULL;
- L->status = LUA_YIELD;
- return -1;
- } else { /* Yield from hook: add a pseudo-frame. */
- TValue *top = L->top;
- hook_leave(g);
- top->u64 = cframe_multres(cf);
- setcont(top+1, lj_cont_hook);
- setframe_pc(top+1, cframe_pc(cf)-1);
- setframe_gc(top+2, obj2gco(L));
- setframe_ftsz(top+2, (int)((char *)(top+3)-(char *)L->base)+FRAME_CONT);
- L->top = L->base = top+3;
-#if LJ_TARGET_X64
- lj_err_throw(L, LUA_YIELD);
-#else
- L->cframe = NULL;
- L->status = LUA_YIELD;
- lj_vm_unwind_c(cf, LUA_YIELD);
-#endif
- }
- }
- lj_err_msg(L, LJ_ERR_CYIELD);
- return 0; /* unreachable */
-}
-
-LUA_API int lua_resume(lua_State *L, int nargs)
-{
- if (L->cframe == NULL && L->status <= LUA_YIELD)
- return lj_vm_resume(L, L->top - nargs, 0, 0);
- L->top = L->base;
- setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP));
- incr_top(L);
- return LUA_ERRRUN;
-}
-
-/* -- GC and memory management -------------------------------------------- */
-
-LUA_API int lua_gc(lua_State *L, int what, int data)
-{
- global_State *g = G(L);
- int res = 0;
- switch (what) {
- case LUA_GCSTOP:
- g->gc.threshold = LJ_MAX_MEM;
- break;
- case LUA_GCRESTART:
- g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total;
- break;
- case LUA_GCCOLLECT:
- lj_gc_fullgc(L);
- break;
- case LUA_GCCOUNT:
- res = (int)(g->gc.total >> 10);
- break;
- case LUA_GCCOUNTB:
- res = (int)(g->gc.total & 0x3ff);
- break;
- case LUA_GCSTEP: {
- MSize a = (MSize)data << 10;
- g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0;
- while (g->gc.total >= g->gc.threshold)
- if (lj_gc_step(L) > 0) {
- res = 1;
- break;
- }
- break;
- }
- case LUA_GCSETPAUSE:
- res = (int)(g->gc.pause);
- g->gc.pause = (MSize)data;
- break;
- case LUA_GCSETSTEPMUL:
- res = (int)(g->gc.stepmul);
- g->gc.stepmul = (MSize)data;
- break;
- default:
- res = -1; /* Invalid option. */
- }
- return res;
-}
-
-LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud)
-{
- global_State *g = G(L);
- if (ud) *ud = g->allocd;
- return g->allocf;
-}
-
-LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud)
-{
- global_State *g = G(L);
- g->allocd = ud;
- g->allocf = f;
-}
-
+/*
+** Public Lua/C API.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_api_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#include "lj_frame.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+
+/* -- Common helper functions --------------------------------------------- */
+
+#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base))
+#define api_checkvalidindex(L, i) api_check(L, (i) != niltv(L))
+
+static TValue *index2adr(lua_State *L, int idx)
+{
+ if (idx > 0) {
+ TValue *o = L->base + (idx - 1);
+ return o < L->top ? o : niltv(L);
+ } else if (idx > LUA_REGISTRYINDEX) {
+ api_check(L, idx != 0 && -idx <= L->top - L->base);
+ return L->top + idx;
+ } else if (idx == LUA_GLOBALSINDEX) {
+ TValue *o = &G(L)->tmptv;
+ settabV(L, o, tabref(L->env));
+ return o;
+ } else if (idx == LUA_REGISTRYINDEX) {
+ return registry(L);
+ } else {
+ GCfunc *fn = curr_func(L);
+ api_check(L, fn->c.gct == ~LJ_TFUNC && !isluafunc(fn));
+ if (idx == LUA_ENVIRONINDEX) {
+ TValue *o = &G(L)->tmptv;
+ settabV(L, o, tabref(fn->c.env));
+ return o;
+ } else {
+ idx = LUA_GLOBALSINDEX - idx;
+ return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L);
+ }
+ }
+}
+
+static TValue *stkindex2adr(lua_State *L, int idx)
+{
+ if (idx > 0) {
+ TValue *o = L->base + (idx - 1);
+ return o < L->top ? o : niltv(L);
+ } else {
+ api_check(L, idx != 0 && -idx <= L->top - L->base);
+ return L->top + idx;
+ }
+}
+
+static GCtab *getcurrenv(lua_State *L)
+{
+ GCfunc *fn = curr_func(L);
+ return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env);
+}
+
+/* -- Miscellaneous API functions ----------------------------------------- */
+
+LUA_API int lua_status(lua_State *L)
+{
+ return L->status;
+}
+
+LUA_API int lua_checkstack(lua_State *L, int size)
+{
+ if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) {
+ return 0; /* Stack overflow. */
+ } else if (size > 0) {
+ lj_state_checkstack(L, (MSize)size);
+ }
+ return 1;
+}
+
+LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg)
+{
+ if (!lua_checkstack(L, size))
+ lj_err_callerv(L, LJ_ERR_STKOVM, msg);
+}
+
+LUA_API void lua_xmove(lua_State *from, lua_State *to, int n)
+{
+ TValue *f, *t;
+ if (from == to) return;
+ api_checknelems(from, n);
+ api_check(from, G(from) == G(to));
+ lj_state_checkstack(to, (MSize)n);
+ f = from->top;
+ t = to->top = to->top + n;
+ while (--n >= 0) copyTV(to, --t, --f);
+ from->top = f;
+}
+
+/* -- Stack manipulation -------------------------------------------------- */
+
+LUA_API int lua_gettop(lua_State *L)
+{
+ return (int)(L->top - L->base);
+}
+
+LUA_API void lua_settop(lua_State *L, int idx)
+{
+ if (idx >= 0) {
+ api_check(L, idx <= tvref(L->maxstack) - L->base);
+ if (L->base + idx > L->top) {
+ if (L->base + idx >= tvref(L->maxstack))
+ lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base));
+ do { setnilV(L->top++); } while (L->top < L->base + idx);
+ } else {
+ L->top = L->base + idx;
+ }
+ } else {
+ api_check(L, -(idx+1) <= (L->top - L->base));
+ L->top += idx+1; /* Shrinks top (idx < 0). */
+ }
+}
+
+LUA_API void lua_remove(lua_State *L, int idx)
+{
+ TValue *p = stkindex2adr(L, idx);
+ api_checkvalidindex(L, p);
+ while (++p < L->top) copyTV(L, p-1, p);
+ L->top--;
+}
+
+LUA_API void lua_insert(lua_State *L, int idx)
+{
+ TValue *q, *p = stkindex2adr(L, idx);
+ api_checkvalidindex(L, p);
+ for (q = L->top; q > p; q--) copyTV(L, q, q-1);
+ copyTV(L, p, L->top);
+}
+
+LUA_API void lua_replace(lua_State *L, int idx)
+{
+ api_checknelems(L, 1);
+ if (idx == LUA_GLOBALSINDEX) {
+ api_check(L, tvistab(L->top-1));
+ /* NOBARRIER: A thread (i.e. L) is never black. */
+ setgcref(L->env, obj2gco(tabV(L->top-1)));
+ } else if (idx == LUA_ENVIRONINDEX) {
+ GCfunc *fn = curr_func(L);
+ if (fn->c.gct != ~LJ_TFUNC)
+ lj_err_msg(L, LJ_ERR_NOENV);
+ api_check(L, tvistab(L->top-1));
+ setgcref(fn->c.env, obj2gco(tabV(L->top-1)));
+ lj_gc_barrier(L, fn, L->top-1);
+ } else {
+ TValue *o = index2adr(L, idx);
+ api_checkvalidindex(L, o);
+ copyTV(L, o, L->top-1);
+ if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */
+ lj_gc_barrier(L, curr_func(L), L->top-1);
+ }
+ L->top--;
+}
+
+LUA_API void lua_pushvalue(lua_State *L, int idx)
+{
+ copyTV(L, L->top, index2adr(L, idx));
+ incr_top(L);
+}
+
+/* -- Stack getters ------------------------------------------------------- */
+
+LUA_API int lua_type(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisnumber(o)) {
+ return LUA_TNUMBER;
+#if LJ_64
+ } else if (tvislightud(o)) {
+ return LUA_TLIGHTUSERDATA;
+#endif
+ } else if (o == niltv(L)) {
+ return LUA_TNONE;
+ } else { /* Magic internal/external tag conversion. ORDER LJ_T */
+ uint32_t t = ~itype(o);
+#if LJ_64
+ int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u);
+#else
+ int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u);
+#endif
+ lua_assert(tt != LUA_TNIL || tvisnil(o));
+ return tt;
+ }
+}
+
+LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt)
+{
+ if (lua_type(L, idx) != tt)
+ lj_err_argt(L, idx, tt);
+}
+
+LUALIB_API void luaL_checkany(lua_State *L, int idx)
+{
+ if (index2adr(L, idx) == niltv(L))
+ lj_err_arg(L, idx, LJ_ERR_NOVAL);
+}
+
+LUA_API const char *lua_typename(lua_State *L, int t)
+{
+ UNUSED(L);
+ return lj_obj_typename[t+1];
+}
+
+LUA_API int lua_iscfunction(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return tvisfunc(o) && !isluafunc(funcV(o));
+}
+
+LUA_API int lua_isnumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ return (tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), &tmp)));
+}
+
+LUA_API int lua_isstring(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (tvisstr(o) || tvisnumber(o));
+}
+
+LUA_API int lua_isuserdata(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (tvisudata(o) || tvislightud(o));
+}
+
+LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2);
+}
+
+LUA_API int lua_equal(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ if (tvisint(o1) && tvisint(o2)) {
+ return intV(o1) == intV(o2);
+ } else if (tvisnumber(o1) && tvisnumber(o2)) {
+ return numberVnum(o1) == numberVnum(o2);
+ } else if (itype(o1) != itype(o2)) {
+ return 0;
+ } else if (tvispri(o1)) {
+ return o1 != niltv(L) && o2 != niltv(L);
+#if LJ_64
+ } else if (tvislightud(o1)) {
+ return o1->u64 == o2->u64;
+#endif
+ } else if (gcrefeq(o1->gcr, o2->gcr)) {
+ return 1;
+ } else if (!tvistabud(o1)) {
+ return 0;
+ } else {
+ TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0);
+ if ((uintptr_t)base <= 1) {
+ return (int)(uintptr_t)base;
+ } else {
+ L->top = base+2;
+ lj_vm_call(L, base, 1+1);
+ L->top -= 2;
+ return tvistruecond(L->top+1);
+ }
+ }
+}
+
+LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ if (o1 == niltv(L) || o2 == niltv(L)) {
+ return 0;
+ } else if (tvisint(o1) && tvisint(o2)) {
+ return intV(o1) < intV(o2);
+ } else if (tvisnumber(o1) && tvisnumber(o2)) {
+ return numberVnum(o1) < numberVnum(o2);
+ } else {
+ TValue *base = lj_meta_comp(L, o1, o2, 0);
+ if ((uintptr_t)base <= 1) {
+ return (int)(uintptr_t)base;
+ } else {
+ L->top = base+2;
+ lj_vm_call(L, base, 1+1);
+ L->top -= 2;
+ return tvistruecond(L->top+1);
+ }
+ }
+}
+
+LUA_API lua_Number lua_tonumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp))
+ return numV(&tmp);
+ else
+ return 0;
+}
+
+LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ return numV(&tmp);
+}
+
+LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (tvisnil(o))
+ return def;
+ else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ return numV(&tmp);
+}
+
+LUA_API lua_Integer lua_tointeger(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
+ return 0;
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else if (tvisnil(o)) {
+ return def;
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUA_API int lua_toboolean(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return tvistruecond(o);
+}
+
+LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ } else {
+ if (len != NULL) *len = 0;
+ return NULL;
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ } else {
+ lj_err_argt(L, idx, LUA_TSTRING);
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API const char *luaL_optlstring(lua_State *L, int idx,
+ const char *def, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnil(o)) {
+ if (len != NULL) *len = def ? strlen(def) : 0;
+ return def;
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ } else {
+ lj_err_argt(L, idx, LUA_TSTRING);
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def,
+ const char *const lst[])
+{
+ ptrdiff_t i;
+ const char *s = lua_tolstring(L, idx, NULL);
+ if (s == NULL && (s = def) == NULL)
+ lj_err_argt(L, idx, LUA_TSTRING);
+ for (i = 0; lst[i]; i++)
+ if (strcmp(lst[i], s) == 0)
+ return (int)i;
+ lj_err_argv(L, idx, LJ_ERR_INVOPTM, s);
+}
+
+LUA_API size_t lua_objlen(lua_State *L, int idx)
+{
+ TValue *o = index2adr(L, idx);
+ if (tvisstr(o)) {
+ return strV(o)->len;
+ } else if (tvistab(o)) {
+ return (size_t)lj_tab_len(tabV(o));
+ } else if (tvisudata(o)) {
+ return udataV(o)->len;
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ return s->len;
+ } else {
+ return 0;
+ }
+}
+
+LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisfunc(o)) {
+ BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns));
+ if (op == BC_FUNCC || op == BC_FUNCCW)
+ return funcV(o)->c.f;
+ }
+ return NULL;
+}
+
+LUA_API void *lua_touserdata(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o))
+ return uddata(udataV(o));
+ else if (tvislightud(o))
+ return lightudV(o);
+ else
+ return NULL;
+}
+
+LUA_API lua_State *lua_tothread(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (!tvisthread(o)) ? NULL : threadV(o);
+}
+
+LUA_API const void *lua_topointer(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o))
+ return uddata(udataV(o));
+ else if (tvislightud(o))
+ return lightudV(o);
+ else if (tviscdata(o))
+ return cdataptr(cdataV(o));
+ else if (tvisgcv(o))
+ return gcV(o);
+ else
+ return NULL;
+}
+
+/* -- Stack setters (object creation) ------------------------------------- */
+
+LUA_API void lua_pushnil(lua_State *L)
+{
+ setnilV(L->top);
+ incr_top(L);
+}
+
+LUA_API void lua_pushnumber(lua_State *L, lua_Number n)
+{
+ setnumV(L->top, n);
+ if (LJ_UNLIKELY(tvisnan(L->top)))
+ setnanV(L->top); /* Canonicalize injected NaNs. */
+ incr_top(L);
+}
+
+LUA_API void lua_pushinteger(lua_State *L, lua_Integer n)
+{
+ setintptrV(L->top, n);
+ incr_top(L);
+}
+
+LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len)
+{
+ GCstr *s;
+ lj_gc_check(L);
+ s = lj_str_new(L, str, len);
+ setstrV(L, L->top, s);
+ incr_top(L);
+}
+
+LUA_API void lua_pushstring(lua_State *L, const char *str)
+{
+ if (str == NULL) {
+ setnilV(L->top);
+ } else {
+ GCstr *s;
+ lj_gc_check(L);
+ s = lj_str_newz(L, str);
+ setstrV(L, L->top, s);
+ }
+ incr_top(L);
+}
+
+LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt,
+ va_list argp)
+{
+ lj_gc_check(L);
+ return lj_str_pushvf(L, fmt, argp);
+}
+
+LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...)
+{
+ const char *ret;
+ va_list argp;
+ lj_gc_check(L);
+ va_start(argp, fmt);
+ ret = lj_str_pushvf(L, fmt, argp);
+ va_end(argp);
+ return ret;
+}
+
+LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n)
+{
+ GCfunc *fn;
+ lj_gc_check(L);
+ api_checknelems(L, n);
+ fn = lj_func_newC(L, (MSize)n, getcurrenv(L));
+ fn->c.f = f;
+ L->top -= n;
+ while (n--)
+ copyTV(L, &fn->c.upvalue[n], L->top+n);
+ setfuncV(L, L->top, fn);
+ lua_assert(iswhite(obj2gco(fn)));
+ incr_top(L);
+}
+
+LUA_API void lua_pushboolean(lua_State *L, int b)
+{
+ setboolV(L->top, (b != 0));
+ incr_top(L);
+}
+
+LUA_API void lua_pushlightuserdata(lua_State *L, void *p)
+{
+ setlightudV(L->top, checklightudptr(L, p));
+ incr_top(L);
+}
+
+LUA_API void lua_createtable(lua_State *L, int narray, int nrec)
+{
+ GCtab *t;
+ lj_gc_check(L);
+ t = lj_tab_new(L, (uint32_t)(narray > 0 ? narray+1 : 0), hsize2hbits(nrec));
+ settabV(L, L->top, t);
+ incr_top(L);
+}
+
+LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname)
+{
+ GCtab *regt = tabV(registry(L));
+ TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname));
+ if (tvisnil(tv)) {
+ GCtab *mt = lj_tab_new(L, 0, 1);
+ settabV(L, tv, mt);
+ settabV(L, L->top++, mt);
+ lj_gc_anybarriert(L, regt);
+ return 1;
+ } else {
+ copyTV(L, L->top++, tv);
+ return 0;
+ }
+}
+
+LUA_API int lua_pushthread(lua_State *L)
+{
+ setthreadV(L, L->top, L);
+ incr_top(L);
+ return (mainthread(G(L)) == L);
+}
+
+LUA_API lua_State *lua_newthread(lua_State *L)
+{
+ lua_State *L1;
+ lj_gc_check(L);
+ L1 = lj_state_new(L);
+ setthreadV(L, L->top, L1);
+ incr_top(L);
+ return L1;
+}
+
+LUA_API void *lua_newuserdata(lua_State *L, size_t size)
+{
+ GCudata *ud;
+ lj_gc_check(L);
+ if (size > LJ_MAX_UDATA)
+ lj_err_msg(L, LJ_ERR_UDATAOV);
+ ud = lj_udata_new(L, (MSize)size, getcurrenv(L));
+ setudataV(L, L->top, ud);
+ incr_top(L);
+ return uddata(ud);
+}
+
+LUA_API void lua_concat(lua_State *L, int n)
+{
+ api_checknelems(L, n);
+ if (n >= 2) {
+ n--;
+ do {
+ TValue *top = lj_meta_cat(L, L->top-1, -n);
+ if (top == NULL) {
+ L->top -= n;
+ break;
+ }
+ n -= (int)(L->top - top);
+ L->top = top+2;
+ lj_vm_call(L, top, 1+1);
+ L->top--;
+ copyTV(L, L->top-1, L->top);
+ } while (--n > 0);
+ } else if (n == 0) { /* Push empty string. */
+ setstrV(L, L->top, &G(L)->strempty);
+ incr_top(L);
+ }
+ /* else n == 1: nothing to do. */
+}
+
+/* -- Object getters ------------------------------------------------------ */
+
+LUA_API void lua_gettable(lua_State *L, int idx)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ api_checkvalidindex(L, t);
+ v = lj_meta_tget(L, t, L->top-1);
+ if (v == NULL) {
+ L->top += 2;
+ lj_vm_call(L, L->top-2, 1+1);
+ L->top -= 2;
+ v = L->top+1;
+ }
+ copyTV(L, L->top-1, v);
+}
+
+LUA_API void lua_getfield(lua_State *L, int idx, const char *k)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ TValue key;
+ api_checkvalidindex(L, t);
+ setstrV(L, &key, lj_str_newz(L, k));
+ v = lj_meta_tget(L, t, &key);
+ if (v == NULL) {
+ L->top += 2;
+ lj_vm_call(L, L->top-2, 1+1);
+ L->top -= 2;
+ v = L->top+1;
+ }
+ copyTV(L, L->top, v);
+ incr_top(L);
+}
+
+LUA_API void lua_rawget(lua_State *L, int idx)
+{
+ cTValue *t = index2adr(L, idx);
+ api_check(L, tvistab(t));
+ copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1));
+}
+
+LUA_API void lua_rawgeti(lua_State *L, int idx, int n)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ api_check(L, tvistab(t));
+ v = lj_tab_getint(tabV(t), n);
+ if (v) {
+ copyTV(L, L->top, v);
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+}
+
+LUA_API int lua_getmetatable(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ GCtab *mt = NULL;
+ if (tvistab(o))
+ mt = tabref(tabV(o)->metatable);
+ else if (tvisudata(o))
+ mt = tabref(udataV(o)->metatable);
+ else
+ mt = tabref(basemt_obj(G(L), o));
+ if (mt == NULL)
+ return 0;
+ settabV(L, L->top, mt);
+ incr_top(L);
+ return 1;
+}
+
+LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field)
+{
+ if (lua_getmetatable(L, idx)) {
+ cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field));
+ if (tv && !tvisnil(tv)) {
+ copyTV(L, L->top-1, tv);
+ return 1;
+ }
+ L->top--;
+ }
+ return 0;
+}
+
+LUA_API void lua_getfenv(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ api_checkvalidindex(L, o);
+ if (tvisfunc(o)) {
+ settabV(L, L->top, tabref(funcV(o)->c.env));
+ } else if (tvisudata(o)) {
+ settabV(L, L->top, tabref(udataV(o)->env));
+ } else if (tvisthread(o)) {
+ settabV(L, L->top, tabref(threadV(o)->env));
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+}
+
+LUA_API int lua_next(lua_State *L, int idx)
+{
+ cTValue *t = index2adr(L, idx);
+ int more;
+ api_check(L, tvistab(t));
+ more = lj_tab_next(L, tabV(t), L->top-1);
+ if (more) {
+ incr_top(L); /* Return new key and value slot. */
+ } else { /* End of traversal. */
+ L->top--; /* Remove key slot. */
+ }
+ return more;
+}
+
+LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n)
+{
+ TValue *val;
+ const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val);
+ if (name) {
+ copyTV(L, L->top, val);
+ incr_top(L);
+ }
+ return name;
+}
+
+LUA_API void *lua_upvalueid(lua_State *L, int idx, int n)
+{
+ GCfunc *fn = funcV(index2adr(L, idx));
+ n--;
+ api_check(L, (uint32_t)n < fn->l.nupvalues);
+ return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
+ (void *)&fn->c.upvalue[n];
+}
+
+LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2)
+{
+ GCfunc *fn1 = funcV(index2adr(L, idx1));
+ GCfunc *fn2 = funcV(index2adr(L, idx2));
+ n1--; n2--;
+ api_check(L, isluafunc(fn1) && (uint32_t)n1 < fn1->l.nupvalues);
+ api_check(L, isluafunc(fn2) && (uint32_t)n2 < fn2->l.nupvalues);
+ setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]);
+ lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1]));
+}
+
+LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o)) {
+ GCudata *ud = udataV(o);
+ cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname));
+ if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable))
+ return uddata(ud);
+ }
+ lj_err_argtype(L, idx, tname);
+ return NULL; /* unreachable */
+}
+
+/* -- Object setters ------------------------------------------------------ */
+
+LUA_API void lua_settable(lua_State *L, int idx)
+{
+ TValue *o;
+ cTValue *t = index2adr(L, idx);
+ api_checknelems(L, 2);
+ api_checkvalidindex(L, t);
+ o = lj_meta_tset(L, t, L->top-2);
+ if (o) {
+ /* NOBARRIER: lj_meta_tset ensures the table is not black. */
+ copyTV(L, o, L->top-1);
+ L->top -= 2;
+ } else {
+ L->top += 3;
+ copyTV(L, L->top-1, L->top-6);
+ lj_vm_call(L, L->top-3, 0+1);
+ L->top -= 3;
+ }
+}
+
+LUA_API void lua_setfield(lua_State *L, int idx, const char *k)
+{
+ TValue *o;
+ TValue key;
+ cTValue *t = index2adr(L, idx);
+ api_checknelems(L, 1);
+ api_checkvalidindex(L, t);
+ setstrV(L, &key, lj_str_newz(L, k));
+ o = lj_meta_tset(L, t, &key);
+ if (o) {
+ L->top--;
+ /* NOBARRIER: lj_meta_tset ensures the table is not black. */
+ copyTV(L, o, L->top);
+ } else {
+ L->top += 3;
+ copyTV(L, L->top-1, L->top-6);
+ lj_vm_call(L, L->top-3, 0+1);
+ L->top -= 2;
+ }
+}
+
+LUA_API void lua_rawset(lua_State *L, int idx)
+{
+ GCtab *t = tabV(index2adr(L, idx));
+ TValue *dst, *key;
+ api_checknelems(L, 2);
+ key = L->top-2;
+ dst = lj_tab_set(L, t, key);
+ copyTV(L, dst, key+1);
+ lj_gc_anybarriert(L, t);
+ L->top = key;
+}
+
+LUA_API void lua_rawseti(lua_State *L, int idx, int n)
+{
+ GCtab *t = tabV(index2adr(L, idx));
+ TValue *dst, *src;
+ api_checknelems(L, 1);
+ dst = lj_tab_setint(L, t, n);
+ src = L->top-1;
+ copyTV(L, dst, src);
+ lj_gc_barriert(L, t, dst);
+ L->top = src;
+}
+
+LUA_API int lua_setmetatable(lua_State *L, int idx)
+{
+ global_State *g;
+ GCtab *mt;
+ cTValue *o = index2adr(L, idx);
+ api_checknelems(L, 1);
+ api_checkvalidindex(L, o);
+ if (tvisnil(L->top-1)) {
+ mt = NULL;
+ } else {
+ api_check(L, tvistab(L->top-1));
+ mt = tabV(L->top-1);
+ }
+ g = G(L);
+ if (tvistab(o)) {
+ setgcref(tabV(o)->metatable, obj2gco(mt));
+ if (mt)
+ lj_gc_objbarriert(L, tabV(o), mt);
+ } else if (tvisudata(o)) {
+ setgcref(udataV(o)->metatable, obj2gco(mt));
+ if (mt)
+ lj_gc_objbarrier(L, udataV(o), mt);
+ } else {
+ /* Flush cache, since traces specialize to basemt. But not during __gc. */
+ if (lj_trace_flushall(L))
+ lj_err_caller(L, LJ_ERR_NOGCMM);
+ if (tvisbool(o)) {
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt));
+ setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt));
+ } else {
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_obj(g, o), obj2gco(mt));
+ }
+ }
+ L->top--;
+ return 1;
+}
+
+LUA_API int lua_setfenv(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ GCtab *t;
+ api_checknelems(L, 1);
+ api_checkvalidindex(L, o);
+ api_check(L, tvistab(L->top-1));
+ t = tabV(L->top-1);
+ if (tvisfunc(o)) {
+ setgcref(funcV(o)->c.env, obj2gco(t));
+ } else if (tvisudata(o)) {
+ setgcref(udataV(o)->env, obj2gco(t));
+ } else if (tvisthread(o)) {
+ setgcref(threadV(o)->env, obj2gco(t));
+ } else {
+ L->top--;
+ return 0;
+ }
+ lj_gc_objbarrier(L, gcV(o), t);
+ L->top--;
+ return 1;
+}
+
+LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n)
+{
+ cTValue *f = index2adr(L, idx);
+ TValue *val;
+ const char *name;
+ api_checknelems(L, 1);
+ name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val);
+ if (name) {
+ L->top--;
+ copyTV(L, val, L->top);
+ lj_gc_barrier(L, funcV(f), L->top);
+ }
+ return name;
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+LUA_API void lua_call(lua_State *L, int nargs, int nresults)
+{
+ api_check(L, L->status == 0 || L->status == LUA_ERRERR);
+ api_checknelems(L, nargs+1);
+ lj_vm_call(L, L->top - nargs, nresults+1);
+}
+
+LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
+{
+ global_State *g = G(L);
+ uint8_t oldh = hook_save(g);
+ ptrdiff_t ef;
+ int status;
+ api_check(L, L->status == 0 || L->status == LUA_ERRERR);
+ api_checknelems(L, nargs+1);
+ if (errfunc == 0) {
+ ef = 0;
+ } else {
+ cTValue *o = stkindex2adr(L, errfunc);
+ api_checkvalidindex(L, o);
+ ef = savestack(L, o);
+ }
+ status = lj_vm_pcall(L, L->top - nargs, nresults+1, ef);
+ if (status) hook_restore(g, oldh);
+ return status;
+}
+
+static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud)
+{
+ GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L));
+ fn->c.f = func;
+ setfuncV(L, L->top, fn);
+ setlightudV(L->top+1, checklightudptr(L, ud));
+ cframe_nres(L->cframe) = 1+0; /* Zero results. */
+ L->top += 2;
+ return L->top-1; /* Now call the newly allocated C function. */
+}
+
+LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
+{
+ global_State *g = G(L);
+ uint8_t oldh = hook_save(g);
+ int status;
+ api_check(L, L->status == 0 || L->status == LUA_ERRERR);
+ status = lj_vm_cpcall(L, func, ud, cpcall);
+ if (status) hook_restore(g, oldh);
+ return status;
+}
+
+LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field)
+{
+ if (luaL_getmetafield(L, idx, field)) {
+ TValue *base = L->top--;
+ copyTV(L, base, index2adr(L, idx));
+ L->top = base+1;
+ lj_vm_call(L, base, 1+1);
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Coroutine yield and resume ------------------------------------------ */
+
+LUA_API int lua_yield(lua_State *L, int nresults)
+{
+ void *cf = L->cframe;
+ global_State *g = G(L);
+ if (cframe_canyield(cf)) {
+ cf = cframe_raw(cf);
+ if (!hook_active(g)) { /* Regular yield: move results down if needed. */
+ cTValue *f = L->top - nresults;
+ if (f > L->base) {
+ TValue *t = L->base;
+ while (--nresults >= 0) copyTV(L, t++, f++);
+ L->top = t;
+ }
+ L->cframe = NULL;
+ L->status = LUA_YIELD;
+ return -1;
+ } else { /* Yield from hook: add a pseudo-frame. */
+ TValue *top = L->top;
+ hook_leave(g);
+ top->u64 = cframe_multres(cf);
+ setcont(top+1, lj_cont_hook);
+ setframe_pc(top+1, cframe_pc(cf)-1);
+ setframe_gc(top+2, obj2gco(L));
+ setframe_ftsz(top+2, (int)((char *)(top+3)-(char *)L->base)+FRAME_CONT);
+ L->top = L->base = top+3;
+#if LJ_TARGET_X64
+ lj_err_throw(L, LUA_YIELD);
+#else
+ L->cframe = NULL;
+ L->status = LUA_YIELD;
+ lj_vm_unwind_c(cf, LUA_YIELD);
+#endif
+ }
+ }
+ lj_err_msg(L, LJ_ERR_CYIELD);
+ return 0; /* unreachable */
+}
+
+LUA_API int lua_resume(lua_State *L, int nargs)
+{
+ if (L->cframe == NULL && L->status <= LUA_YIELD)
+ return lj_vm_resume(L, L->top - nargs, 0, 0);
+ L->top = L->base;
+ setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP));
+ incr_top(L);
+ return LUA_ERRRUN;
+}
+
+/* -- GC and memory management -------------------------------------------- */
+
+LUA_API int lua_gc(lua_State *L, int what, int data)
+{
+ global_State *g = G(L);
+ int res = 0;
+ switch (what) {
+ case LUA_GCSTOP:
+ g->gc.threshold = LJ_MAX_MEM;
+ break;
+ case LUA_GCRESTART:
+ g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total;
+ break;
+ case LUA_GCCOLLECT:
+ lj_gc_fullgc(L);
+ break;
+ case LUA_GCCOUNT:
+ res = (int)(g->gc.total >> 10);
+ break;
+ case LUA_GCCOUNTB:
+ res = (int)(g->gc.total & 0x3ff);
+ break;
+ case LUA_GCSTEP: {
+ MSize a = (MSize)data << 10;
+ g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0;
+ while (g->gc.total >= g->gc.threshold)
+ if (lj_gc_step(L)) {
+ res = 1;
+ break;
+ }
+ break;
+ }
+ case LUA_GCSETPAUSE:
+ res = (int)(g->gc.pause);
+ g->gc.pause = (MSize)data;
+ break;
+ case LUA_GCSETSTEPMUL:
+ res = (int)(g->gc.stepmul);
+ g->gc.stepmul = (MSize)data;
+ break;
+ default:
+ res = -1; /* Invalid option. */
+ }
+ return res;
+}
+
+LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud)
+{
+ global_State *g = G(L);
+ if (ud) *ud = g->allocd;
+ return g->allocf;
+}
+
+LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud)
+{
+ global_State *g = G(L);
+ g->allocd = ud;
+ g->allocf = f;
+}
+
diff --git a/3rdparty/lua/src/lj_arch.h b/3rdparty/lua/src/lj_arch.h
index d3a9d57..9ea10d0 100644
--- a/3rdparty/lua/src/lj_arch.h
+++ b/3rdparty/lua/src/lj_arch.h
@@ -1,6 +1,6 @@
/*
** Target architecture selection.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#ifndef _LJ_ARCH_H
@@ -66,9 +66,8 @@
#define LUAJIT_OS LUAJIT_OS_LINUX
#elif defined(__MACH__) && defined(__APPLE__)
#define LUAJIT_OS LUAJIT_OS_OSX
-#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
- defined(__NetBSD__) || defined(__OpenBSD__) || \
- defined(__DragonFly__)) && !defined(__ORBIS__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
#define LUAJIT_OS LUAJIT_OS_BSD
#elif (defined(__sun__) && defined(__svr4__)) || defined(__CYGWIN__)
#define LUAJIT_OS LUAJIT_OS_POSIX
@@ -105,18 +104,6 @@
#define LJ_TARGET_CONSOLE 1
#endif
-#ifdef __ORBIS__
-#define LJ_TARGET_PS4 1
-#define LJ_TARGET_CONSOLE 1
-#undef NULL
-#define NULL ((void*)0)
-#endif
-
-#ifdef __psp2__
-#define LJ_TARGET_PSVITA 1
-#define LJ_TARGET_CONSOLE 1
-#endif
-
#if _XBOX_VER >= 200
#define LJ_TARGET_XBOX360 1
#define LJ_TARGET_CONSOLE 1
@@ -181,9 +168,7 @@
#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
-#if __ARM_ARCH____ARM_ARCH_8__ || __ARM_ARCH_8A__
-#define LJ_ARCH_VERSION 80
-#elif __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__
+#if __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__
#define LJ_ARCH_VERSION 70
#elif __ARM_ARCH_6T2__
#define LJ_ARCH_VERSION 61
@@ -342,9 +327,6 @@
#if defined(__mips_soft_float)
#error "No support for MIPS CPUs without FPU"
#endif
-#if defined(_LP64)
-#error "No support for MIPS64"
-#endif
#endif
#endif
diff --git a/3rdparty/lua/src/lj_asm.c b/3rdparty/lua/src/lj_asm.c
index 91f8112..316e81d 100644
--- a/3rdparty/lua/src/lj_asm.c
+++ b/3rdparty/lua/src/lj_asm.c
@@ -1,1920 +1,1912 @@
-/*
-** IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_asm_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_gc.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_frame.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#endif
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_ircall.h"
-#include "lj_iropt.h"
-#include "lj_mcode.h"
-#include "lj_iropt.h"
-#include "lj_trace.h"
-#include "lj_snap.h"
-#include "lj_asm.h"
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-#include "lj_target.h"
-
-#ifdef LUA_USE_ASSERT
-#include <stdio.h>
-#endif
-
-/* -- Assembler state and common macros ----------------------------------- */
-
-/* Assembler state. */
-typedef struct ASMState {
- RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
-
- MCode *mcp; /* Current MCode pointer (grows down). */
- MCode *mclim; /* Lower limit for MCode memory + red zone. */
-#ifdef LUA_USE_ASSERT
- MCode *mcp_prev; /* Red zone overflow check. */
-#endif
-
- IRIns *ir; /* Copy of pointer to IR instructions/constants. */
- jit_State *J; /* JIT compiler state. */
-
-#if LJ_TARGET_X86ORX64
- x86ModRM mrm; /* Fused x86 address operand. */
-#endif
-
- RegSet freeset; /* Set of free registers. */
- RegSet modset; /* Set of registers modified inside the loop. */
- RegSet weakset; /* Set of weakly referenced registers. */
- RegSet phiset; /* Set of PHI registers. */
-
- uint32_t flags; /* Copy of JIT compiler flags. */
- int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
-
- int32_t evenspill; /* Next even spill slot. */
- int32_t oddspill; /* Next odd spill slot (or 0). */
-
- IRRef curins; /* Reference of current instruction. */
- IRRef stopins; /* Stop assembly before hitting this instruction. */
- IRRef orignins; /* Original T->nins. */
-
- IRRef snapref; /* Current snapshot is active after this reference. */
- IRRef snaprename; /* Rename highwater mark for snapshot check. */
- SnapNo snapno; /* Current snapshot number. */
- SnapNo loopsnapno; /* Loop snapshot number. */
-
- IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
- IRRef sectref; /* Section base reference (loopref or 0). */
- IRRef loopref; /* Reference of LOOP instruction (or 0). */
-
- BCReg topslot; /* Number of slots for stack check (unless 0). */
- int32_t gcsteps; /* Accumulated number of GC steps (per section). */
-
- GCtrace *T; /* Trace to assemble. */
- GCtrace *parent; /* Parent trace (or NULL). */
-
- MCode *mcbot; /* Bottom of reserved MCode. */
- MCode *mctop; /* Top of generated MCode. */
- MCode *mcloop; /* Pointer to loop MCode (or NULL). */
- MCode *invmcp; /* Points to invertible loop branch (or NULL). */
- MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
- MCode *realign; /* Realign loop if not NULL. */
-
-#ifdef RID_NUM_KREF
- int32_t krefk[RID_NUM_KREF];
-#endif
- IRRef1 phireg[RID_MAX]; /* PHI register references. */
- uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
-} ASMState;
-
-#define IR(ref) (&as->ir[(ref)])
-
-#define ASMREF_TMP1 REF_TRUE /* Temp. register. */
-#define ASMREF_TMP2 REF_FALSE /* Temp. register. */
-#define ASMREF_L REF_NIL /* Stores register for L. */
-
-/* Check for variant to invariant references. */
-#define iscrossref(as, ref) ((ref) < as->sectref)
-
-/* Inhibit memory op fusion from variant to invariant references. */
-#define FUSE_DISABLED (~(IRRef)0)
-#define mayfuse(as, ref) ((ref) > as->fuseref)
-#define neverfuse(as) (as->fuseref == FUSE_DISABLED)
-#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
-#define opisfusableload(o) \
- ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
- (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
-
-/* Sparse limit checks using a red zone before the actual limit. */
-#define MCLIM_REDZONE 64
-
-static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
-{
- lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
-}
-
-static LJ_AINLINE void checkmclim(ASMState *as)
-{
-#ifdef LUA_USE_ASSERT
- if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
- IRIns *ir = IR(as->curins+1);
- fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp,
- as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
- lua_assert(0);
- }
-#endif
- if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
-#ifdef LUA_USE_ASSERT
- as->mcp_prev = as->mcp;
-#endif
-}
-
-#ifdef RID_NUM_KREF
-#define ra_iskref(ref) ((ref) < RID_NUM_KREF)
-#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
-#define ra_krefk(as, ref) (as->krefk[(ref)])
-
-static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k)
-{
- IRRef ref = (IRRef)(r - RID_MIN_KREF);
- as->krefk[ref] = k;
- as->cost[r] = REGCOST(ref, ref);
-}
-
-#else
-#define ra_iskref(ref) 0
-#define ra_krefreg(ref) RID_MIN_GPR
-#define ra_krefk(as, ref) 0
-#endif
-
-/* Arch-specific field offsets. */
-static const uint8_t field_ofs[IRFL__MAX+1] = {
-#define FLOFS(name, ofs) (uint8_t)(ofs),
-IRFLDEF(FLOFS)
-#undef FLOFS
- 0
-};
-
-/* -- Target-specific instruction emitter --------------------------------- */
-
-#if LJ_TARGET_X86ORX64
-#include "lj_emit_x86.h"
-#elif LJ_TARGET_ARM
-#include "lj_emit_arm.h"
-#elif LJ_TARGET_PPC
-#include "lj_emit_ppc.h"
-#elif LJ_TARGET_MIPS
-#include "lj_emit_mips.h"
-#else
-#error "Missing instruction emitter for target CPU"
-#endif
-
-/* -- Register allocator debugging ---------------------------------------- */
-
-/* #define LUAJIT_DEBUG_RA */
-
-#ifdef LUAJIT_DEBUG_RA
-
-#include <stdio.h>
-#include <stdarg.h>
-
-#define RIDNAME(name) #name,
-static const char *const ra_regname[] = {
- GPRDEF(RIDNAME)
- FPRDEF(RIDNAME)
- VRIDDEF(RIDNAME)
- NULL
-};
-#undef RIDNAME
-
-static char ra_dbg_buf[65536];
-static char *ra_dbg_p;
-static char *ra_dbg_merge;
-static MCode *ra_dbg_mcp;
-
-static void ra_dstart(void)
-{
- ra_dbg_p = ra_dbg_buf;
- ra_dbg_merge = NULL;
- ra_dbg_mcp = NULL;
-}
-
-static void ra_dflush(void)
-{
- fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
- ra_dstart();
-}
-
-static void ra_dprintf(ASMState *as, const char *fmt, ...)
-{
- char *p;
- va_list argp;
- va_start(argp, fmt);
- p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
- ra_dbg_mcp = NULL;
- p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
- for (;;) {
- const char *e = strchr(fmt, '$');
- if (e == NULL) break;
- memcpy(p, fmt, (size_t)(e-fmt));
- p += e-fmt;
- if (e[1] == 'r') {
- Reg r = va_arg(argp, Reg) & RID_MASK;
- if (r <= RID_MAX) {
- const char *q;
- for (q = ra_regname[r]; *q; q++)
- *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
- } else {
- *p++ = '?';
- lua_assert(0);
- }
- } else if (e[1] == 'f' || e[1] == 'i') {
- IRRef ref;
- if (e[1] == 'f')
- ref = va_arg(argp, IRRef);
- else
- ref = va_arg(argp, IRIns *) - as->ir;
- if (ref >= REF_BIAS)
- p += sprintf(p, "%04d", ref - REF_BIAS);
- else
- p += sprintf(p, "K%03d", REF_BIAS - ref);
- } else if (e[1] == 's') {
- uint32_t slot = va_arg(argp, uint32_t);
- p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
- } else if (e[1] == 'x') {
- p += sprintf(p, "%08x", va_arg(argp, int32_t));
- } else {
- lua_assert(0);
- }
- fmt = e+2;
- }
- va_end(argp);
- while (*fmt)
- *p++ = *fmt++;
- *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
- if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
- fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
- p = ra_dbg_buf;
- }
- ra_dbg_p = p;
-}
-
-#define RA_DBG_START() ra_dstart()
-#define RA_DBG_FLUSH() ra_dflush()
-#define RA_DBG_REF() \
- do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
- ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
-#define RA_DBGX(x) ra_dprintf x
-
-#else
-#define RA_DBG_START() ((void)0)
-#define RA_DBG_FLUSH() ((void)0)
-#define RA_DBG_REF() ((void)0)
-#define RA_DBGX(x) ((void)0)
-#endif
-
-/* -- Register allocator -------------------------------------------------- */
-
-#define ra_free(as, r) rset_set(as->freeset, (r))
-#define ra_modified(as, r) rset_set(as->modset, (r))
-#define ra_weak(as, r) rset_set(as->weakset, (r))
-#define ra_noweak(as, r) rset_clear(as->weakset, (r))
-
-#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
-
-/* Setup register allocator. */
-static void ra_setup(ASMState *as)
-{
- Reg r;
- /* Initially all regs (except the stack pointer) are free for use. */
- as->freeset = RSET_INIT;
- as->modset = RSET_EMPTY;
- as->weakset = RSET_EMPTY;
- as->phiset = RSET_EMPTY;
- memset(as->phireg, 0, sizeof(as->phireg));
- for (r = RID_MIN_GPR; r < RID_MAX; r++)
- as->cost[r] = REGCOST(~0u, 0u);
-}
-
-/* Rematerialize constants. */
-static Reg ra_rematk(ASMState *as, IRRef ref)
-{
- IRIns *ir;
- Reg r;
- if (ra_iskref(ref)) {
- r = ra_krefreg(ref);
- lua_assert(!rset_test(as->freeset, r));
- ra_free(as, r);
- ra_modified(as, r);
- emit_loadi(as, r, ra_krefk(as, ref));
- return r;
- }
- ir = IR(ref);
- r = ir->r;
- lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
- ra_free(as, r);
- ra_modified(as, r);
- ir->r = RID_INIT; /* Do not keep any hint. */
- RA_DBGX((as, "remat $i $r", ir, r));
-#if !LJ_SOFTFP
- if (ir->o == IR_KNUM) {
- emit_loadn(as, r, ir_knum(ir));
- } else
-#endif
- if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
- ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
- emit_getgl(as, r, jit_base);
- } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
- lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */
- emit_getgl(as, r, jit_L);
-#if LJ_64
- } else if (ir->o == IR_KINT64) {
- emit_loadu64(as, r, ir_kint64(ir)->u64);
-#endif
- } else {
- lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
- ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
- emit_loadi(as, r, ir->i);
- }
- return r;
-}
-
-/* Force a spill. Allocate a new spill slot if needed. */
-static int32_t ra_spill(ASMState *as, IRIns *ir)
-{
- int32_t slot = ir->s;
- lua_assert(ir >= as->ir + REF_TRUE);
- if (!ra_hasspill(slot)) {
- if (irt_is64(ir->t)) {
- slot = as->evenspill;
- as->evenspill += 2;
- } else if (as->oddspill) {
- slot = as->oddspill;
- as->oddspill = 0;
- } else {
- slot = as->evenspill;
- as->oddspill = slot+1;
- as->evenspill += 2;
- }
- if (as->evenspill > 256)
- lj_trace_err(as->J, LJ_TRERR_SPILLOV);
- ir->s = (uint8_t)slot;
- }
- return sps_scale(slot);
-}
-
-/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
-static Reg ra_releasetmp(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- Reg r = ir->r;
- lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
- ra_free(as, r);
- ra_modified(as, r);
- ir->r = RID_INIT;
- return r;
-}
-
-/* Restore a register (marked as free). Rematerialize or force a spill. */
-static Reg ra_restore(ASMState *as, IRRef ref)
-{
- if (emit_canremat(ref)) {
- return ra_rematk(as, ref);
- } else {
- IRIns *ir = IR(ref);
- int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
- Reg r = ir->r;
- lua_assert(ra_hasreg(r));
- ra_sethint(ir->r, r); /* Keep hint. */
- ra_free(as, r);
- if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
- ra_modified(as, r);
- RA_DBGX((as, "restore $i $r", ir, r));
- emit_spload(as, ir, r, ofs);
- }
- return r;
- }
-}
-
-/* Save a register to a spill slot. */
-static void ra_save(ASMState *as, IRIns *ir, Reg r)
-{
- RA_DBGX((as, "save $i $r", ir, r));
- emit_spstore(as, ir, r, sps_scale(ir->s));
-}
-
-#define MINCOST(name) \
- if (rset_test(RSET_ALL, RID_##name) && \
- LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
- cost = as->cost[RID_##name];
-
-/* Evict the register with the lowest cost, forcing a restore. */
-static Reg ra_evict(ASMState *as, RegSet allow)
-{
- IRRef ref;
- RegCost cost = ~(RegCost)0;
- lua_assert(allow != RSET_EMPTY);
- if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
- GPRDEF(MINCOST)
- } else {
- FPRDEF(MINCOST)
- }
- ref = regcost_ref(cost);
- lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins));
- /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
- if (!irref_isk(ref) && (as->weakset & allow)) {
- IRIns *ir = IR(ref);
- if (!rset_test(as->weakset, ir->r))
- ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
- }
- return ra_restore(as, ref);
-}
-
-/* Pick any register (marked as free). Evict on-demand. */
-static Reg ra_pick(ASMState *as, RegSet allow)
-{
- RegSet pick = as->freeset & allow;
- if (!pick)
- return ra_evict(as, allow);
- else
- return rset_picktop(pick);
-}
-
-/* Get a scratch register (marked as free). */
-static Reg ra_scratch(ASMState *as, RegSet allow)
-{
- Reg r = ra_pick(as, allow);
- ra_modified(as, r);
- RA_DBGX((as, "scratch $r", r));
- return r;
-}
-
-/* Evict all registers from a set (if not free). */
-static void ra_evictset(ASMState *as, RegSet drop)
-{
- RegSet work;
- as->modset |= drop;
-#if !LJ_SOFTFP
- work = (drop & ~as->freeset) & RSET_FPR;
- while (work) {
- Reg r = rset_pickbot(work);
- ra_restore(as, regcost_ref(as->cost[r]));
- rset_clear(work, r);
- checkmclim(as);
- }
-#endif
- work = (drop & ~as->freeset);
- while (work) {
- Reg r = rset_pickbot(work);
- ra_restore(as, regcost_ref(as->cost[r]));
- rset_clear(work, r);
- checkmclim(as);
- }
-}
-
-/* Evict (rematerialize) all registers allocated to constants. */
-static void ra_evictk(ASMState *as)
-{
- RegSet work;
-#if !LJ_SOFTFP
- work = ~as->freeset & RSET_FPR;
- while (work) {
- Reg r = rset_pickbot(work);
- IRRef ref = regcost_ref(as->cost[r]);
- if (emit_canremat(ref) && irref_isk(ref)) {
- ra_rematk(as, ref);
- checkmclim(as);
- }
- rset_clear(work, r);
- }
-#endif
- work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_pickbot(work);
- IRRef ref = regcost_ref(as->cost[r]);
- if (emit_canremat(ref) && irref_isk(ref)) {
- ra_rematk(as, ref);
- checkmclim(as);
- }
- rset_clear(work, r);
- }
-}
-
-#ifdef RID_NUM_KREF
-/* Allocate a register for a constant. */
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
-{
- /* First try to find a register which already holds the same constant. */
- RegSet pick, work = ~as->freeset & RSET_GPR;
- Reg r;
- while (work) {
- IRRef ref;
- r = rset_pickbot(work);
- ref = regcost_ref(as->cost[r]);
- if (ref < ASMREF_L &&
- k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
- return r;
- rset_clear(work, r);
- }
- pick = as->freeset & allow;
- if (pick) {
- /* Constants should preferably get unmodified registers. */
- if ((pick & ~as->modset))
- pick &= ~as->modset;
- r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
- } else {
- r = ra_evict(as, allow);
- }
- RA_DBGX((as, "allock $x $r", k, r));
- ra_setkref(as, r, k);
- rset_clear(as->freeset, r);
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate a specific register for a constant. */
-static void ra_allockreg(ASMState *as, int32_t k, Reg r)
-{
- Reg kr = ra_allock(as, k, RID2RSET(r));
- if (kr != r) {
- IRIns irdummy;
- irdummy.t.irt = IRT_INT;
- ra_scratch(as, RID2RSET(r));
- emit_movrr(as, &irdummy, r, kr);
- }
-}
-#else
-#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
-#endif
-
-/* Allocate a register for ref from the allowed set of registers.
-** Note: this function assumes the ref does NOT have a register yet!
-** Picks an optimal register, sets the cost and marks the register as non-free.
-*/
-static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- RegSet pick = as->freeset & allow;
- Reg r;
- lua_assert(ra_noreg(ir->r));
- if (pick) {
- /* First check register hint from propagation or PHI. */
- if (ra_hashint(ir->r)) {
- r = ra_gethint(ir->r);
- if (rset_test(pick, r)) /* Use hint register if possible. */
- goto found;
- /* Rematerialization is cheaper than missing a hint. */
- if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
- ra_rematk(as, regcost_ref(as->cost[r]));
- goto found;
- }
- RA_DBGX((as, "hintmiss $f $r", ref, r));
- }
- /* Invariants should preferably get unmodified registers. */
- if (ref < as->loopref && !irt_isphi(ir->t)) {
- if ((pick & ~as->modset))
- pick &= ~as->modset;
- r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
- } else {
- /* We've got plenty of regs, so get callee-save regs if possible. */
- if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
- pick &= ~RSET_SCRATCH;
- r = rset_picktop(pick);
- }
- } else {
- r = ra_evict(as, allow);
- }
-found:
- RA_DBGX((as, "alloc $f $r", ref, r));
- ir->r = (uint8_t)r;
- rset_clear(as->freeset, r);
- ra_noweak(as, r);
- as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
- return r;
-}
-
-/* Allocate a register on-demand. */
-static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
-{
- Reg r = IR(ref)->r;
- /* Note: allow is ignored if the register is already allocated. */
- if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
- ra_noweak(as, r);
- return r;
-}
-
-/* Rename register allocation and emit move. */
-static void ra_rename(ASMState *as, Reg down, Reg up)
-{
- IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]);
- IRIns *ir = IR(ref);
- ir->r = (uint8_t)up;
- as->cost[down] = 0;
- lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR));
- lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up));
- ra_free(as, down); /* 'down' is free ... */
- ra_modified(as, down);
- rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
- ra_noweak(as, up);
- RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
- emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
- if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
- lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno);
- ren = tref_ref(lj_ir_emit(as->J));
- as->ir = as->T->ir; /* The IR may have been reallocated. */
- IR(ren)->r = (uint8_t)down;
- IR(ren)->s = SPS_NONE;
- }
-}
-
-/* Pick a destination register (marked as free).
-** Caveat: allow is ignored if there's already a destination register.
-** Use ra_destreg() to get a specific register.
-*/
-static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
-{
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- } else {
- if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
- dest = ra_gethint(dest);
- ra_modified(as, dest);
- RA_DBGX((as, "dest $r", dest));
- } else {
- dest = ra_scratch(as, allow);
- }
- ir->r = dest;
- }
- if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
- return dest;
-}
-
-/* Force a specific destination register (marked as free). */
-static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
-{
- Reg dest = ra_dest(as, ir, RID2RSET(r));
- if (dest != r) {
- lua_assert(rset_test(as->freeset, r));
- ra_modified(as, r);
- emit_movrr(as, ir, dest, r);
- }
-}
-
-#if LJ_TARGET_X86ORX64
-/* Propagate dest register to left reference. Emit moves as needed.
-** This is a required fixup step for all 2-operand machine instructions.
-*/
-static void ra_left(ASMState *as, Reg dest, IRRef lref)
-{
- IRIns *ir = IR(lref);
- Reg left = ir->r;
- if (ra_noreg(left)) {
- if (irref_isk(lref)) {
- if (ir->o == IR_KNUM) {
- cTValue *tv = ir_knum(ir);
- /* FP remat needs a load except for +0. Still better than eviction. */
- if (tvispzero(tv) || !(as->freeset & RSET_FPR)) {
- emit_loadn(as, dest, tv);
- return;
- }
-#if LJ_64
- } else if (ir->o == IR_KINT64) {
- emit_loadu64(as, dest, ir_kint64(ir)->u64);
- return;
-#endif
- } else {
- lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
- ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
- emit_loadi(as, dest, ir->i);
- return;
- }
- }
- if (!ra_hashint(left) && !iscrossref(as, lref))
- ra_sethint(ir->r, dest); /* Propagate register hint. */
- left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
- }
- ra_noweak(as, left);
- /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
- if (dest != left) {
- /* Use register renaming if dest is the PHI reg. */
- if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
- ra_modified(as, left);
- ra_rename(as, left, dest);
- } else {
- emit_movrr(as, ir, dest, left);
- }
- }
-}
-#else
-/* Similar to ra_left, except we override any hints. */
-static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
-{
- IRIns *ir = IR(lref);
- Reg left = ir->r;
- if (ra_noreg(left)) {
- ra_sethint(ir->r, dest); /* Propagate register hint. */
- left = ra_allocref(as, lref,
- (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
- }
- ra_noweak(as, left);
- if (dest != left) {
- /* Use register renaming if dest is the PHI reg. */
- if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
- ra_modified(as, left);
- ra_rename(as, left, dest);
- } else {
- emit_movrr(as, ir, dest, left);
- }
- }
-}
-#endif
-
-#if !LJ_64
-/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
-static void ra_destpair(ASMState *as, IRIns *ir)
-{
- Reg destlo = ir->r, desthi = (ir+1)->r;
- /* First spill unrelated refs blocking the destination registers. */
- if (!rset_test(as->freeset, RID_RETLO) &&
- destlo != RID_RETLO && desthi != RID_RETLO)
- ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
- if (!rset_test(as->freeset, RID_RETHI) &&
- destlo != RID_RETHI && desthi != RID_RETHI)
- ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
- /* Next free the destination registers (if any). */
- if (ra_hasreg(destlo)) {
- ra_free(as, destlo);
- ra_modified(as, destlo);
- } else {
- destlo = RID_RETLO;
- }
- if (ra_hasreg(desthi)) {
- ra_free(as, desthi);
- ra_modified(as, desthi);
- } else {
- desthi = RID_RETHI;
- }
- /* Check for conflicts and shuffle the registers as needed. */
- if (destlo == RID_RETHI) {
- if (desthi == RID_RETLO) {
-#if LJ_TARGET_X86
- *--as->mcp = XI_XCHGa + RID_RETHI;
-#else
- emit_movrr(as, ir, RID_RETHI, RID_TMP);
- emit_movrr(as, ir, RID_RETLO, RID_RETHI);
- emit_movrr(as, ir, RID_TMP, RID_RETLO);
-#endif
- } else {
- emit_movrr(as, ir, RID_RETHI, RID_RETLO);
- if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
- }
- } else if (desthi == RID_RETLO) {
- emit_movrr(as, ir, RID_RETLO, RID_RETHI);
- if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
- } else {
- if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
- if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
- }
- /* Restore spill slots (if any). */
- if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
- if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
-}
-#endif
-
-/* -- Snapshot handling --------- ----------------------------------------- */
-
-/* Can we rematerialize a KNUM instead of forcing a spill? */
-static int asm_snap_canremat(ASMState *as)
-{
- Reg r;
- for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
- if (irref_isk(regcost_ref(as->cost[r])))
- return 1;
- return 0;
-}
-
-/* Check whether a sunk store corresponds to an allocation. */
-static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
-{
- if (irs->s == 255) {
- if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
- irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
- IRIns *irk = IR(irs->op1);
- if (irk->o == IR_AREF || irk->o == IR_HREFK)
- irk = IR(irk->op1);
- return (IR(irk->op1) == ira);
- }
- return 0;
- } else {
- return (ira + irs->s == irs); /* Quick check. */
- }
-}
-
-/* Allocate register or spill slot for a ref that escapes to a snapshot. */
-static void asm_snap_alloc1(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) {
- if (ir->r == RID_SINK) {
- ir->r = RID_SUNK;
-#if LJ_HASFFI
- if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */
- asm_snap_alloc1(as, ir->op2);
- if (LJ_32 && (ir+1)->o == IR_HIOP)
- asm_snap_alloc1(as, (ir+1)->op2);
- } else
-#endif
- { /* Allocate stored values for TNEW, TDUP and CNEW. */
- IRIns *irs;
- lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW);
- for (irs = IR(as->snapref-1); irs > ir; irs--)
- if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
- lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
- irs->o == IR_FSTORE || irs->o == IR_XSTORE);
- asm_snap_alloc1(as, irs->op2);
- if (LJ_32 && (irs+1)->o == IR_HIOP)
- asm_snap_alloc1(as, (irs+1)->op2);
- }
- }
- } else {
- RegSet allow;
- if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
- IRIns *irc;
- for (irc = IR(as->curins); irc > ir; irc--)
- if ((irc->op1 == ref || irc->op2 == ref) &&
- !(irc->r == RID_SINK || irc->r == RID_SUNK))
- goto nosink; /* Don't sink conversion if result is used. */
- asm_snap_alloc1(as, ir->op1);
- return;
- }
- nosink:
- allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
- if ((as->freeset & allow) ||
- (allow == RSET_FPR && asm_snap_canremat(as))) {
- /* Get a weak register if we have a free one or can rematerialize. */
- Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
- if (!irt_isphi(ir->t))
- ra_weak(as, r); /* But mark it as weakly referenced. */
- checkmclim(as);
- RA_DBGX((as, "snapreg $f $r", ref, ir->r));
- } else {
- ra_spill(as, ir); /* Otherwise force a spill slot. */
- RA_DBGX((as, "snapspill $f $s", ref, ir->s));
- }
- }
- }
-}
-
-/* Allocate refs escaping to a snapshot. */
-static void asm_snap_alloc(ASMState *as)
-{
- SnapShot *snap = &as->T->snap[as->snapno];
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- IRRef ref = snap_ref(sn);
- if (!irref_isk(ref)) {
- asm_snap_alloc1(as, ref);
- if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
- lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP);
- asm_snap_alloc1(as, ref+1);
- }
- }
- }
-}
-
-/* All guards for a snapshot use the same exitno. This is currently the
-** same as the snapshot number. Since the exact origin of the exit cannot
-** be determined, all guards for the same snapshot must exit with the same
-** RegSP mapping.
-** A renamed ref which has been used in a prior guard for the same snapshot
-** would cause an inconsistency. The easy way out is to force a spill slot.
-*/
-static int asm_snap_checkrename(ASMState *as, IRRef ren)
-{
- SnapShot *snap = &as->T->snap[as->snapno];
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- IRRef ref = snap_ref(sn);
- if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
- IRIns *ir = IR(ref);
- ra_spill(as, ir); /* Register renamed, so force a spill slot. */
- RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
- return 1; /* Found. */
- }
- }
- return 0; /* Not found. */
-}
-
-/* Prepare snapshot for next guard instruction. */
-static void asm_snap_prep(ASMState *as)
-{
- if (as->curins < as->snapref) {
- do {
- if (as->snapno == 0) return; /* Called by sunk stores before snap #0. */
- as->snapno--;
- as->snapref = as->T->snap[as->snapno].ref;
- } while (as->curins < as->snapref);
- asm_snap_alloc(as);
- as->snaprename = as->T->nins;
- } else {
- /* Process any renames above the highwater mark. */
- for (; as->snaprename < as->T->nins; as->snaprename++) {
- IRIns *ir = IR(as->snaprename);
- if (asm_snap_checkrename(as, ir->op1))
- ir->op2 = REF_BIAS-1; /* Kill rename. */
- }
- }
-}
-
-/* -- Miscellaneous helpers ----------------------------------------------- */
-
-/* Collect arguments from CALL* and CARG instructions. */
-static void asm_collectargs(ASMState *as, IRIns *ir,
- const CCallInfo *ci, IRRef *args)
-{
- uint32_t n = CCI_NARGS(ci);
- lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */
- if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
- while (n-- > 1) {
- ir = IR(ir->op1);
- lua_assert(ir->o == IR_CARG);
- args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
- }
- args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
- lua_assert(IR(ir->op1)->o != IR_CARG);
-}
-
-/* Reconstruct CCallInfo flags for CALLX*. */
-static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
-{
- uint32_t nargs = 0;
- if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
- IRIns *ira = IR(ir->op1);
- nargs++;
- while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
- }
-#if LJ_HASFFI
- if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
- CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
- CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
- nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
-#if LJ_TARGET_X86
- nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
-#endif
- }
-#endif
- return (nargs | (ir->t.irt << CCI_OTSHIFT));
-}
-
-/* Calculate stack adjustment. */
-static int32_t asm_stack_adjust(ASMState *as)
-{
- if (as->evenspill <= SPS_FIXED)
- return 0;
- return sps_scale(sps_align(as->evenspill));
-}
-
-/* Must match with hash*() in lj_tab.c. */
-static uint32_t ir_khash(IRIns *ir)
-{
- uint32_t lo, hi;
- if (irt_isstr(ir->t)) {
- return ir_kstr(ir)->hash;
- } else if (irt_isnum(ir->t)) {
- lo = ir_knum(ir)->u32.lo;
- hi = ir_knum(ir)->u32.hi << 1;
- } else if (irt_ispri(ir->t)) {
- lua_assert(!irt_isnil(ir->t));
- return irt_type(ir->t)-IRT_FALSE;
- } else {
- lua_assert(irt_isgcv(ir->t));
- lo = u32ptr(ir_kgc(ir));
- hi = lo + HASH_BIAS;
- }
- return hashrot(lo, hi);
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
-
-static void asm_snew(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
- IRRef args[3];
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* const char *str */
- args[2] = ir->op2; /* size_t len */
- as->gcsteps++;
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
-}
-
-static void asm_tnew(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
- IRRef args[2];
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* uint32_t ahsize */
- as->gcsteps++;
- asm_setupresult(as, ir, ci); /* GCtab * */
- asm_gencall(as, ci, args);
- ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
-}
-
-static void asm_tdup(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
- IRRef args[2];
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* const GCtab *kt */
- as->gcsteps++;
- asm_setupresult(as, ir, ci); /* GCtab * */
- asm_gencall(as, ci, args);
-}
-
-static void asm_gc_check(ASMState *as);
-
-/* Explicit GC step. */
-static void asm_gcstep(ASMState *as, IRIns *ir)
-{
- IRIns *ira;
- for (ira = IR(as->stopins+1); ira < ir; ira++)
- if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
- (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
- ra_used(ira))
- as->gcsteps++;
- if (as->gcsteps)
- asm_gc_check(as);
- as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
-}
-
-/* -- PHI and loop handling ----------------------------------------------- */
-
-/* Break a PHI cycle by renaming to a free register (evict if needed). */
-static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
- RegSet allow)
-{
- RegSet candidates = blocked & allow;
- if (candidates) { /* If this register file has candidates. */
- /* Note: the set for ra_pick cannot be empty, since each register file
- ** has some registers never allocated to PHIs.
- */
- Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
- if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
- candidates = candidates & ~blockedby;
- down = rset_picktop(candidates); /* Pick candidate PHI register. */
- ra_rename(as, down, up); /* And rename it to the free register. */
- }
-}
-
-/* PHI register shuffling.
-**
-** The allocator tries hard to preserve PHI register assignments across
-** the loop body. Most of the time this loop does nothing, since there
-** are no register mismatches.
-**
-** If a register mismatch is detected and ...
-** - the register is currently free: rename it.
-** - the register is blocked by an invariant: restore/remat and rename it.
-** - Otherwise the register is used by another PHI, so mark it as blocked.
-**
-** The renames are order-sensitive, so just retry the loop if a register
-** is marked as blocked, but has been freed in the meantime. A cycle is
-** detected if all of the blocked registers are allocated. To break the
-** cycle rename one of them to a free register and retry.
-**
-** Note that PHI spill slots are kept in sync and don't need to be shuffled.
-*/
-static void asm_phi_shuffle(ASMState *as)
-{
- RegSet work;
-
- /* Find and resolve PHI register mismatches. */
- for (;;) {
- RegSet blocked = RSET_EMPTY;
- RegSet blockedby = RSET_EMPTY;
- RegSet phiset = as->phiset;
- while (phiset) { /* Check all left PHI operand registers. */
- Reg r = rset_pickbot(phiset);
- IRIns *irl = IR(as->phireg[r]);
- Reg left = irl->r;
- if (r != left) { /* Mismatch? */
- if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
- IRRef ref = regcost_ref(as->cost[r]);
- /* Blocked by other PHI (w/reg)? */
- if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
- rset_set(blocked, r);
- if (ra_hasreg(left))
- rset_set(blockedby, left);
- left = RID_NONE;
- } else { /* Otherwise grab register from invariant. */
- ra_restore(as, ref);
- checkmclim(as);
- }
- }
- if (ra_hasreg(left)) {
- ra_rename(as, left, r);
- checkmclim(as);
- }
- }
- rset_clear(phiset, r);
- }
- if (!blocked) break; /* Finished. */
- if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
- asm_phi_break(as, blocked, blockedby, RSET_GPR);
- if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
- checkmclim(as);
- } /* Else retry some more renames. */
- }
-
- /* Restore/remat invariants whose registers are modified inside the loop. */
-#if !LJ_SOFTFP
- work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
- while (work) {
- Reg r = rset_pickbot(work);
- ra_restore(as, regcost_ref(as->cost[r]));
- rset_clear(work, r);
- checkmclim(as);
- }
-#endif
- work = as->modset & ~(as->freeset | as->phiset);
- while (work) {
- Reg r = rset_pickbot(work);
- ra_restore(as, regcost_ref(as->cost[r]));
- rset_clear(work, r);
- checkmclim(as);
- }
-
- /* Allocate and save all unsaved PHI regs and clear marks. */
- work = as->phiset;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef lref = as->phireg[r];
- IRIns *ir = IR(lref);
- if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
- irt_clearmark(ir->t); /* Handled here, so clear marker now. */
- ra_alloc1(as, lref, RID2RSET(r));
- ra_save(as, ir, r); /* Save to spill slot inside the loop. */
- checkmclim(as);
- }
- rset_clear(work, r);
- }
-}
-
-/* Copy unsynced left/right PHI spill slots. Rarely needed. */
-static void asm_phi_copyspill(ASMState *as)
-{
- int need = 0;
- IRIns *ir;
- for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
- if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
- need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
- if ((need & 1)) { /* Copy integer spill slots. */
-#if !LJ_TARGET_X86ORX64
- Reg r = RID_TMP;
-#else
- Reg r = RID_RET;
- if ((as->freeset & RSET_GPR))
- r = rset_pickbot((as->freeset & RSET_GPR));
- else
- emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
-#endif
- for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
- if (ra_hasspill(ir->s)) {
- IRIns *irl = IR(ir->op1);
- if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
- emit_spstore(as, irl, r, sps_scale(irl->s));
- emit_spload(as, ir, r, sps_scale(ir->s));
- checkmclim(as);
- }
- }
- }
-#if LJ_TARGET_X86ORX64
- if (!rset_test(as->freeset, r))
- emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
-#endif
- }
-#if !LJ_SOFTFP
- if ((need & 2)) { /* Copy FP spill slots. */
-#if LJ_TARGET_X86
- Reg r = RID_XMM0;
-#else
- Reg r = RID_FPRET;
-#endif
- if ((as->freeset & RSET_FPR))
- r = rset_pickbot((as->freeset & RSET_FPR));
- if (!rset_test(as->freeset, r))
- emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
- for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
- if (ra_hasspill(ir->s)) {
- IRIns *irl = IR(ir->op1);
- if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
- emit_spstore(as, irl, r, sps_scale(irl->s));
- emit_spload(as, ir, r, sps_scale(ir->s));
- checkmclim(as);
- }
- }
- }
- if (!rset_test(as->freeset, r))
- emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
- }
-#endif
-}
-
-/* Emit renames for left PHIs which are only spilled outside the loop. */
-static void asm_phi_fixup(ASMState *as)
-{
- RegSet work = as->phiset;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef lref = as->phireg[r];
- IRIns *ir = IR(lref);
- if (irt_ismarked(ir->t)) {
- irt_clearmark(ir->t);
- /* Left PHI gained a spill slot before the loop? */
- if (ra_hasspill(ir->s)) {
- IRRef ren;
- lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno);
- ren = tref_ref(lj_ir_emit(as->J));
- as->ir = as->T->ir; /* The IR may have been reallocated. */
- IR(ren)->r = (uint8_t)r;
- IR(ren)->s = SPS_NONE;
- }
- }
- rset_clear(work, r);
- }
-}
-
-/* Setup right PHI reference. */
-static void asm_phi(ASMState *as, IRIns *ir)
-{
- RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
- ~as->phiset;
- RegSet afree = (as->freeset & allow);
- IRIns *irl = IR(ir->op1);
- IRIns *irr = IR(ir->op2);
- if (ir->r == RID_SINK) /* Sink PHI. */
- return;
- /* Spill slot shuffling is not implemented yet (but rarely needed). */
- if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
- lj_trace_err(as->J, LJ_TRERR_NYIPHI);
- /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
- if ((afree & (afree-1))) { /* Two or more free registers? */
- Reg r;
- if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
- r = ra_allocref(as, ir->op2, allow);
- } else { /* Duplicate right PHI, need a copy (rare). */
- r = ra_scratch(as, allow);
- emit_movrr(as, irr, r, irr->r);
- }
- ir->r = (uint8_t)r;
- rset_set(as->phiset, r);
- as->phireg[r] = (IRRef1)ir->op1;
- irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
- if (ra_noreg(irl->r))
- ra_sethint(irl->r, r); /* Set register hint for left PHI. */
- } else { /* Otherwise allocate a spill slot. */
- /* This is overly restrictive, but it triggers only on synthetic code. */
- if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
- lj_trace_err(as->J, LJ_TRERR_NYIPHI);
- ra_spill(as, ir);
- irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
- }
-}
-
-static void asm_loop_fixup(ASMState *as);
-
-/* Middle part of a loop. */
-static void asm_loop(ASMState *as)
-{
- MCode *mcspill;
- /* LOOP is a guard, so the snapno is up to date. */
- as->loopsnapno = as->snapno;
- if (as->gcsteps)
- asm_gc_check(as);
- /* LOOP marks the transition from the variant to the invariant part. */
- as->flagmcp = as->invmcp = NULL;
- as->sectref = 0;
- if (!neverfuse(as)) as->fuseref = 0;
- asm_phi_shuffle(as);
- mcspill = as->mcp;
- asm_phi_copyspill(as);
- asm_loop_fixup(as);
- as->mcloop = as->mcp;
- RA_DBGX((as, "===== LOOP ====="));
- if (!as->realign) RA_DBG_FLUSH();
- if (as->mcp != mcspill)
- emit_jmp(as, mcspill);
-}
-
-/* -- Target-specific assembler ------------------------------------------- */
-
-#if LJ_TARGET_X86ORX64
-#include "lj_asm_x86.h"
-#elif LJ_TARGET_ARM
-#include "lj_asm_arm.h"
-#elif LJ_TARGET_PPC
-#include "lj_asm_ppc.h"
-#elif LJ_TARGET_MIPS
-#include "lj_asm_mips.h"
-#else
-#error "Missing assembler for target CPU"
-#endif
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Head of a root trace. */
-static void asm_head_root(ASMState *as)
-{
- int32_t spadj;
- asm_head_root_base(as);
- emit_setvmstate(as, (int32_t)as->T->traceno);
- spadj = asm_stack_adjust(as);
- as->T->spadjust = (uint16_t)spadj;
- emit_spsub(as, spadj);
- /* Root traces assume a checked stack for the starting proto. */
- as->T->topslot = gcref(as->T->startpt)->pt.framesize;
-}
-
-/* Head of a side trace.
-**
-** The current simplistic algorithm requires that all slots inherited
-** from the parent are live in a register between pass 2 and pass 3. This
-** avoids the complexity of stack slot shuffling. But of course this may
-** overflow the register set in some cases and cause the dreaded error:
-** "NYI: register coalescing too complex". A refined algorithm is needed.
-*/
-static void asm_head_side(ASMState *as)
-{
- IRRef1 sloadins[RID_MAX];
- RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
- RegSet live = RSET_EMPTY; /* Live parent registers. */
- IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
- int32_t spadj, spdelta;
- int pass2 = 0;
- int pass3 = 0;
- IRRef i;
-
- if (as->snapno && as->topslot > as->parent->topslot) {
- /* Force snap #0 alloc to prevent register overwrite in stack check. */
- as->snapno = 0;
- asm_snap_alloc(as);
- }
- allow = asm_head_side_base(as, irp, allow);
-
- /* Scan all parent SLOADs and collect register dependencies. */
- for (i = as->stopins; i > REF_BASE; i--) {
- IRIns *ir = IR(i);
- RegSP rs;
- lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
- (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL);
- rs = as->parentmap[i - REF_FIRST];
- if (ra_hasreg(ir->r)) {
- rset_clear(allow, ir->r);
- if (ra_hasspill(ir->s)) {
- ra_save(as, ir, ir->r);
- checkmclim(as);
- }
- } else if (ra_hasspill(ir->s)) {
- irt_setmark(ir->t);
- pass2 = 1;
- }
- if (ir->r == rs) { /* Coalesce matching registers right now. */
- ra_free(as, ir->r);
- } else if (ra_hasspill(regsp_spill(rs))) {
- if (ra_hasreg(ir->r))
- pass3 = 1;
- } else if (ra_used(ir)) {
- sloadins[rs] = (IRRef1)i;
- rset_set(live, rs); /* Block live parent register. */
- }
- }
-
- /* Calculate stack frame adjustment. */
- spadj = asm_stack_adjust(as);
- spdelta = spadj - (int32_t)as->parent->spadjust;
- if (spdelta < 0) { /* Don't shrink the stack frame. */
- spadj = (int32_t)as->parent->spadjust;
- spdelta = 0;
- }
- as->T->spadjust = (uint16_t)spadj;
-
- /* Reload spilled target registers. */
- if (pass2) {
- for (i = as->stopins; i > REF_BASE; i--) {
- IRIns *ir = IR(i);
- if (irt_ismarked(ir->t)) {
- RegSet mask;
- Reg r;
- RegSP rs;
- irt_clearmark(ir->t);
- rs = as->parentmap[i - REF_FIRST];
- if (!ra_hasspill(regsp_spill(rs)))
- ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
- else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
- continue; /* Same spill slot, do nothing. */
- mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
- if (mask == RSET_EMPTY)
- lj_trace_err(as->J, LJ_TRERR_NYICOAL);
- r = ra_allocref(as, i, mask);
- ra_save(as, ir, r);
- rset_clear(allow, r);
- if (r == rs) { /* Coalesce matching registers right now. */
- ra_free(as, r);
- rset_clear(live, r);
- } else if (ra_hasspill(regsp_spill(rs))) {
- pass3 = 1;
- }
- checkmclim(as);
- }
- }
- }
-
- /* Store trace number and adjust stack frame relative to the parent. */
- emit_setvmstate(as, (int32_t)as->T->traceno);
- emit_spsub(as, spdelta);
-
-#if !LJ_TARGET_X86ORX64
- /* Restore BASE register from parent spill slot. */
- if (ra_hasspill(irp->s))
- emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
-#endif
-
- /* Restore target registers from parent spill slots. */
- if (pass3) {
- RegSet work = ~as->freeset & RSET_ALL;
- while (work) {
- Reg r = rset_pickbot(work);
- IRRef ref = regcost_ref(as->cost[r]);
- RegSP rs = as->parentmap[ref - REF_FIRST];
- rset_clear(work, r);
- if (ra_hasspill(regsp_spill(rs))) {
- int32_t ofs = sps_scale(regsp_spill(rs));
- ra_free(as, r);
- emit_spload(as, IR(ref), r, ofs);
- checkmclim(as);
- }
- }
- }
-
- /* Shuffle registers to match up target regs with parent regs. */
- for (;;) {
- RegSet work;
-
- /* Repeatedly coalesce free live registers by moving to their target. */
- while ((work = as->freeset & live) != RSET_EMPTY) {
- Reg rp = rset_pickbot(work);
- IRIns *ir = IR(sloadins[rp]);
- rset_clear(live, rp);
- rset_clear(allow, rp);
- ra_free(as, ir->r);
- emit_movrr(as, ir, ir->r, rp);
- checkmclim(as);
- }
-
- /* We're done if no live registers remain. */
- if (live == RSET_EMPTY)
- break;
-
- /* Break cycles by renaming one target to a temp. register. */
- if (live & RSET_GPR) {
- RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
- if (tmpset == RSET_EMPTY)
- lj_trace_err(as->J, LJ_TRERR_NYICOAL);
- ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
- }
- if (!LJ_SOFTFP && (live & RSET_FPR)) {
- RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
- if (tmpset == RSET_EMPTY)
- lj_trace_err(as->J, LJ_TRERR_NYICOAL);
- ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
- }
- checkmclim(as);
- /* Continue with coalescing to fix up the broken cycle(s). */
- }
-
- /* Inherit top stack slot already checked by parent trace. */
- as->T->topslot = as->parent->topslot;
- if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
-#ifdef EXITSTATE_CHECKEXIT
- /* Highest exit + 1 indicates stack check. */
- ExitNo exitno = as->T->nsnap;
-#else
- /* Reuse the parent exit in the context of the parent trace. */
- ExitNo exitno = as->J->exitno;
-#endif
- as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
- asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
- }
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Get base slot for a snapshot. */
-static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- MSize n;
- for (n = snap->nent; n > 0; n--) {
- SnapEntry sn = map[n-1];
- if ((sn & SNAP_FRAME)) {
- *gotframe = 1;
- return snap_slot(sn);
- }
- }
- return 0;
-}
-
-/* Link to another trace. */
-static void asm_tail_link(ASMState *as)
-{
- SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
- SnapShot *snap = &as->T->snap[snapno];
- int gotframe = 0;
- BCReg baseslot = asm_baseslot(as, snap, &gotframe);
-
- as->topslot = snap->topslot;
- checkmclim(as);
- ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
-
- if (as->T->link == 0) {
- /* Setup fixed registers for exit to interpreter. */
- const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]);
- int32_t mres;
- if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
- BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
- if (bc_isret(bc_op(*retpc)))
- pc = retpc;
- }
- ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
- ra_allockreg(as, i32ptr(pc), RID_LPC);
- mres = (int32_t)(snap->nslots - baseslot);
- switch (bc_op(*pc)) {
- case BC_CALLM: case BC_CALLMT:
- mres -= (int32_t)(1 + bc_a(*pc) + bc_c(*pc)); break;
- case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
- case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
- default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
- }
- ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
- } else if (baseslot) {
- /* Save modified BASE for linking to trace with higher start frame. */
- emit_setgl(as, RID_BASE, jit_base);
- }
- emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
-
- /* Sync the interpreter state with the on-trace state. */
- asm_stack_restore(as, snap);
-
- /* Root traces that add frames need to check the stack at the end. */
- if (!as->parent && gotframe)
- asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Clear reg/sp for all instructions and add register hints. */
-static void asm_setup_regsp(ASMState *as)
-{
- GCtrace *T = as->T;
- int sink = T->sinktags;
- IRRef nins = T->nins;
- IRIns *ir, *lastir;
- int inloop;
-#if LJ_TARGET_ARM
- uint32_t rload = 0xa6402a64;
-#endif
-
- ra_setup(as);
-
- /* Clear reg/sp for constants. */
- for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++)
- ir->prev = REGSP_INIT;
-
- /* REF_BASE is used for implicit references to the BASE register. */
- lastir->prev = REGSP_HINT(RID_BASE);
-
- ir = IR(nins-1);
- if (ir->o == IR_RENAME) {
- do { ir--; nins--; } while (ir->o == IR_RENAME);
- T->nins = nins; /* Remove any renames left over from ASM restart. */
- }
- as->snaprename = nins;
- as->snapref = nins;
- as->snapno = T->nsnap;
-
- as->stopins = REF_BASE;
- as->orignins = nins;
- as->curins = nins;
-
- /* Setup register hints for parent link instructions. */
- ir = IR(REF_FIRST);
- if (as->parent) {
- uint16_t *p;
- lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir);
- if (lastir - ir > LJ_MAX_JSLOTS)
- lj_trace_err(as->J, LJ_TRERR_NYICOAL);
- as->stopins = (IRRef)((lastir-1) - as->ir);
- for (p = as->parentmap; ir < lastir; ir++) {
- RegSP rs = ir->prev;
- *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */
- if (!ra_hasspill(regsp_spill(rs)))
- ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
- else
- ir->prev = REGSP_INIT;
- }
- }
-
- inloop = 0;
- as->evenspill = SPS_FIRST;
- for (lastir = IR(nins); ir < lastir; ir++) {
- if (sink) {
- if (ir->r == RID_SINK)
- continue;
- if (ir->r == RID_SUNK) { /* Revert after ASM restart. */
- ir->r = RID_SINK;
- continue;
- }
- }
- switch (ir->o) {
- case IR_LOOP:
- inloop = 1;
- break;
-#if LJ_TARGET_ARM
- case IR_SLOAD:
- if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
- break;
- /* fallthrough */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
- ir->prev = (uint16_t)REGSP_HINT((rload & 15));
- rload = lj_ror(rload, 4);
- continue;
-#endif
- case IR_CALLXS: {
- CCallInfo ci;
- ci.flags = asm_callx_flags(as, ir);
- ir->prev = asm_setup_call_slots(as, ir, &ci);
- if (inloop)
- as->modset |= RSET_SCRATCH;
- continue;
- }
- case IR_CALLN: case IR_CALLL: case IR_CALLS: {
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- ir->prev = asm_setup_call_slots(as, ir, ci);
- if (inloop)
- as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
- (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
- continue;
- }
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
- case IR_HIOP:
- switch ((ir-1)->o) {
-#if LJ_SOFTFP && LJ_TARGET_ARM
- case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- if (ra_hashint((ir-1)->r)) {
- ir->prev = (ir-1)->prev + 1;
- continue;
- }
- break;
-#endif
-#if !LJ_SOFTFP && LJ_NEED_FP64
- case IR_CONV:
- if (irt_isfp((ir-1)->t)) {
- ir->prev = REGSP_HINT(RID_FPRET);
- continue;
- }
- /* fallthrough */
-#endif
- case IR_CALLN: case IR_CALLXS:
-#if LJ_SOFTFP
- case IR_MIN: case IR_MAX:
-#endif
- (ir-1)->prev = REGSP_HINT(RID_RETLO);
- ir->prev = REGSP_HINT(RID_RETHI);
- continue;
- default:
- break;
- }
- break;
-#endif
-#if LJ_SOFTFP
- case IR_MIN: case IR_MAX:
- if ((ir+1)->o != IR_HIOP) break;
- /* fallthrough */
-#endif
- /* C calls evict all scratch regs and return results in RID_RET. */
- case IR_SNEW: case IR_XSNEW: case IR_NEWREF:
- if (REGARG_NUMGPR < 3 && as->evenspill < 3)
- as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
- case IR_TNEW: case IR_TDUP: case IR_CNEW: case IR_CNEWI: case IR_TOSTR:
- ir->prev = REGSP_HINT(RID_RET);
- if (inloop)
- as->modset = RSET_SCRATCH;
- continue;
- case IR_STRTO: case IR_OBAR:
- if (inloop)
- as->modset = RSET_SCRATCH;
- break;
-#if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP
- case IR_ATAN2: case IR_LDEXP:
-#endif
- case IR_POW:
- if (!LJ_SOFTFP && irt_isnum(ir->t)) {
-#if LJ_TARGET_X86ORX64
- ir->prev = REGSP_HINT(RID_XMM0);
- if (inloop)
- as->modset |= RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
-#else
- ir->prev = REGSP_HINT(RID_FPRET);
- if (inloop)
- as->modset |= RSET_SCRATCH;
-#endif
- continue;
- }
- /* fallthrough for integer POW */
- case IR_DIV: case IR_MOD:
- if (!irt_isnum(ir->t)) {
- ir->prev = REGSP_HINT(RID_RET);
- if (inloop)
- as->modset |= (RSET_SCRATCH & RSET_GPR);
- continue;
- }
- break;
- case IR_FPMATH:
-#if LJ_TARGET_X86ORX64
- if (ir->op2 == IRFPM_EXP2) { /* May be joined to lj_vm_pow_sse. */
- ir->prev = REGSP_HINT(RID_XMM0);
-#if !LJ_64
- if (as->evenspill < 4) /* Leave room for 16 byte scratch area. */
- as->evenspill = 4;
-#endif
- if (inloop)
- as->modset |= RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
- continue;
- } else if (ir->op2 <= IRFPM_TRUNC && !(as->flags & JIT_F_SSE4_1)) {
- ir->prev = REGSP_HINT(RID_XMM0);
- if (inloop)
- as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
- continue;
- }
- break;
-#else
- ir->prev = REGSP_HINT(RID_FPRET);
- if (inloop)
- as->modset |= RSET_SCRATCH;
- continue;
-#endif
-#if LJ_TARGET_X86ORX64
- /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
- case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
- if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
- IR(ir->op2)->r = REGSP_HINT(RID_ECX);
- if (inloop)
- rset_set(as->modset, RID_ECX);
- }
- break;
-#endif
- /* Do not propagate hints across type conversions or loads. */
- case IR_TOBIT:
- case IR_XLOAD:
-#if !LJ_TARGET_ARM
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
-#endif
- break;
- case IR_CONV:
- if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
- (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
- break;
- /* fallthrough */
- default:
- /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
- if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
- ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
- ir->prev = IR(ir->op1)->prev;
- continue;
- }
- break;
- }
- ir->prev = REGSP_INIT;
- }
- if ((as->evenspill & 1))
- as->oddspill = as->evenspill++;
- else
- as->oddspill = 0;
-}
-
-/* -- Assembler core ------------------------------------------------------ */
-
-/* Assemble a trace. */
-void lj_asm_trace(jit_State *J, GCtrace *T)
-{
- ASMState as_;
- ASMState *as = &as_;
- MCode *origtop;
-
- /* Ensure an initialized instruction beyond the last one for HIOP checks. */
- J->cur.nins = lj_ir_nextins(J);
- J->cur.ir[J->cur.nins].o = IR_NOP;
-
- /* Setup initial state. Copy some fields to reduce indirections. */
- as->J = J;
- as->T = T;
- as->ir = T->ir;
- as->flags = J->flags;
- as->loopref = J->loopref;
- as->realign = NULL;
- as->loopinv = 0;
- as->parent = J->parent ? traceref(J, J->parent) : NULL;
-
- /* Reserve MCode memory. */
- as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
- as->mcp = as->mctop;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- asm_setup_target(as);
-
- do {
- as->mcp = as->mctop;
-#ifdef LUA_USE_ASSERT
- as->mcp_prev = as->mcp;
-#endif
- as->curins = T->nins;
- RA_DBG_START();
- RA_DBGX((as, "===== STOP ====="));
-
- /* General trace setup. Emit tail of trace. */
- asm_tail_prep(as);
- as->mcloop = NULL;
- as->flagmcp = NULL;
- as->topslot = 0;
- as->gcsteps = 0;
- as->sectref = as->loopref;
- as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
- asm_setup_regsp(as);
- if (!as->loopref)
- asm_tail_link(as);
-
- /* Assemble a trace in linear backwards order. */
- for (as->curins--; as->curins > as->stopins; as->curins--) {
- IRIns *ir = IR(as->curins);
- lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */
- if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
- continue; /* Dead-code elimination can be soooo easy. */
- if (irt_isguard(ir->t))
- asm_snap_prep(as);
- RA_DBG_REF();
- checkmclim(as);
- asm_ir(as, ir);
- }
- } while (as->realign); /* Retry in case the MCode needs to be realigned. */
-
- /* Emit head of trace. */
- RA_DBG_REF();
- checkmclim(as);
- if (as->gcsteps > 0) {
- as->curins = as->T->snap[0].ref;
- asm_snap_prep(as); /* The GC check is a guard. */
- asm_gc_check(as);
- }
- ra_evictk(as);
- if (as->parent)
- asm_head_side(as);
- else
- asm_head_root(as);
- asm_phi_fixup(as);
-
- RA_DBGX((as, "===== START ===="));
- RA_DBG_FLUSH();
- if (as->freeset != RSET_ALL)
- lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
-
- /* Set trace entry point before fixing up tail to allow link to self. */
- T->mcode = as->mcp;
- T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
- if (!as->loopref)
- asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
- T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
- lj_mcode_sync(T->mcode, origtop);
-}
-
-#undef IR
-
-#endif
+/*
+** IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_asm_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_mcode.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_asm.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_target.h"
+
+#ifdef LUA_USE_ASSERT
+#include <stdio.h>
+#endif
+
+/* -- Assembler state and common macros ----------------------------------- */
+
+/* Assembler state. */
+typedef struct ASMState {
+ RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
+
+ MCode *mcp; /* Current MCode pointer (grows down). */
+ MCode *mclim; /* Lower limit for MCode memory + red zone. */
+#ifdef LUA_USE_ASSERT
+ MCode *mcp_prev; /* Red zone overflow check. */
+#endif
+
+ IRIns *ir; /* Copy of pointer to IR instructions/constants. */
+ jit_State *J; /* JIT compiler state. */
+
+#if LJ_TARGET_X86ORX64
+ x86ModRM mrm; /* Fused x86 address operand. */
+#endif
+
+ RegSet freeset; /* Set of free registers. */
+ RegSet modset; /* Set of registers modified inside the loop. */
+ RegSet weakset; /* Set of weakly referenced registers. */
+ RegSet phiset; /* Set of PHI registers. */
+
+ uint32_t flags; /* Copy of JIT compiler flags. */
+ int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
+
+ int32_t evenspill; /* Next even spill slot. */
+ int32_t oddspill; /* Next odd spill slot (or 0). */
+
+ IRRef curins; /* Reference of current instruction. */
+ IRRef stopins; /* Stop assembly before hitting this instruction. */
+ IRRef orignins; /* Original T->nins. */
+
+ IRRef snapref; /* Current snapshot is active after this reference. */
+ IRRef snaprename; /* Rename highwater mark for snapshot check. */
+ SnapNo snapno; /* Current snapshot number. */
+ SnapNo loopsnapno; /* Loop snapshot number. */
+
+ IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
+ IRRef sectref; /* Section base reference (loopref or 0). */
+ IRRef loopref; /* Reference of LOOP instruction (or 0). */
+
+ BCReg topslot; /* Number of slots for stack check (unless 0). */
+ int32_t gcsteps; /* Accumulated number of GC steps (per section). */
+
+ GCtrace *T; /* Trace to assemble. */
+ GCtrace *parent; /* Parent trace (or NULL). */
+
+ MCode *mcbot; /* Bottom of reserved MCode. */
+ MCode *mctop; /* Top of generated MCode. */
+ MCode *mcloop; /* Pointer to loop MCode (or NULL). */
+ MCode *invmcp; /* Points to invertible loop branch (or NULL). */
+ MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
+ MCode *realign; /* Realign loop if not NULL. */
+
+#ifdef RID_NUM_KREF
+ int32_t krefk[RID_NUM_KREF];
+#endif
+ IRRef1 phireg[RID_MAX]; /* PHI register references. */
+ uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
+} ASMState;
+
+#define IR(ref) (&as->ir[(ref)])
+
+#define ASMREF_TMP1 REF_TRUE /* Temp. register. */
+#define ASMREF_TMP2 REF_FALSE /* Temp. register. */
+#define ASMREF_L REF_NIL /* Stores register for L. */
+
+/* Check for variant to invariant references. */
+#define iscrossref(as, ref) ((ref) < as->sectref)
+
+/* Inhibit memory op fusion from variant to invariant references. */
+#define FUSE_DISABLED (~(IRRef)0)
+#define mayfuse(as, ref) ((ref) > as->fuseref)
+#define neverfuse(as) (as->fuseref == FUSE_DISABLED)
+#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
+#define opisfusableload(o) \
+ ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
+ (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
+
+/* Sparse limit checks using a red zone before the actual limit. */
+#define MCLIM_REDZONE 64
+
+static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
+{
+ lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
+}
+
+static LJ_AINLINE void checkmclim(ASMState *as)
+{
+#ifdef LUA_USE_ASSERT
+ if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
+ IRIns *ir = IR(as->curins+1);
+ fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp,
+ as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
+ lua_assert(0);
+ }
+#endif
+ if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
+#ifdef LUA_USE_ASSERT
+ as->mcp_prev = as->mcp;
+#endif
+}
+
+#ifdef RID_NUM_KREF
+#define ra_iskref(ref) ((ref) < RID_NUM_KREF)
+#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
+#define ra_krefk(as, ref) (as->krefk[(ref)])
+
+static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k)
+{
+ IRRef ref = (IRRef)(r - RID_MIN_KREF);
+ as->krefk[ref] = k;
+ as->cost[r] = REGCOST(ref, ref);
+}
+
+#else
+#define ra_iskref(ref) 0
+#define ra_krefreg(ref) RID_MIN_GPR
+#define ra_krefk(as, ref) 0
+#endif
+
+/* Arch-specific field offsets. */
+static const uint8_t field_ofs[IRFL__MAX+1] = {
+#define FLOFS(name, ofs) (uint8_t)(ofs),
+IRFLDEF(FLOFS)
+#undef FLOFS
+ 0
+};
+
+/* -- Target-specific instruction emitter --------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_emit_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_emit_arm.h"
+#elif LJ_TARGET_PPC
+#include "lj_emit_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_emit_mips.h"
+#else
+#error "Missing instruction emitter for target CPU"
+#endif
+
+/* -- Register allocator debugging ---------------------------------------- */
+
+/* #define LUAJIT_DEBUG_RA */
+
+#ifdef LUAJIT_DEBUG_RA
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#define RIDNAME(name) #name,
+static const char *const ra_regname[] = {
+ GPRDEF(RIDNAME)
+ FPRDEF(RIDNAME)
+ VRIDDEF(RIDNAME)
+ NULL
+};
+#undef RIDNAME
+
+static char ra_dbg_buf[65536];
+static char *ra_dbg_p;
+static char *ra_dbg_merge;
+static MCode *ra_dbg_mcp;
+
+static void ra_dstart(void)
+{
+ ra_dbg_p = ra_dbg_buf;
+ ra_dbg_merge = NULL;
+ ra_dbg_mcp = NULL;
+}
+
+static void ra_dflush(void)
+{
+ fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
+ ra_dstart();
+}
+
+static void ra_dprintf(ASMState *as, const char *fmt, ...)
+{
+ char *p;
+ va_list argp;
+ va_start(argp, fmt);
+ p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
+ ra_dbg_mcp = NULL;
+ p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
+ for (;;) {
+ const char *e = strchr(fmt, '$');
+ if (e == NULL) break;
+ memcpy(p, fmt, (size_t)(e-fmt));
+ p += e-fmt;
+ if (e[1] == 'r') {
+ Reg r = va_arg(argp, Reg) & RID_MASK;
+ if (r <= RID_MAX) {
+ const char *q;
+ for (q = ra_regname[r]; *q; q++)
+ *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
+ } else {
+ *p++ = '?';
+ lua_assert(0);
+ }
+ } else if (e[1] == 'f' || e[1] == 'i') {
+ IRRef ref;
+ if (e[1] == 'f')
+ ref = va_arg(argp, IRRef);
+ else
+ ref = va_arg(argp, IRIns *) - as->ir;
+ if (ref >= REF_BIAS)
+ p += sprintf(p, "%04d", ref - REF_BIAS);
+ else
+ p += sprintf(p, "K%03d", REF_BIAS - ref);
+ } else if (e[1] == 's') {
+ uint32_t slot = va_arg(argp, uint32_t);
+ p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
+ } else if (e[1] == 'x') {
+ p += sprintf(p, "%08x", va_arg(argp, int32_t));
+ } else {
+ lua_assert(0);
+ }
+ fmt = e+2;
+ }
+ va_end(argp);
+ while (*fmt)
+ *p++ = *fmt++;
+ *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
+ if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
+ fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
+ p = ra_dbg_buf;
+ }
+ ra_dbg_p = p;
+}
+
+#define RA_DBG_START() ra_dstart()
+#define RA_DBG_FLUSH() ra_dflush()
+#define RA_DBG_REF() \
+ do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
+ ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
+#define RA_DBGX(x) ra_dprintf x
+
+#else
+#define RA_DBG_START() ((void)0)
+#define RA_DBG_FLUSH() ((void)0)
+#define RA_DBG_REF() ((void)0)
+#define RA_DBGX(x) ((void)0)
+#endif
+
+/* -- Register allocator -------------------------------------------------- */
+
+#define ra_free(as, r) rset_set(as->freeset, (r))
+#define ra_modified(as, r) rset_set(as->modset, (r))
+#define ra_weak(as, r) rset_set(as->weakset, (r))
+#define ra_noweak(as, r) rset_clear(as->weakset, (r))
+
+#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
+
+/* Setup register allocator. */
+static void ra_setup(ASMState *as)
+{
+ Reg r;
+ /* Initially all regs (except the stack pointer) are free for use. */
+ as->freeset = RSET_INIT;
+ as->modset = RSET_EMPTY;
+ as->weakset = RSET_EMPTY;
+ as->phiset = RSET_EMPTY;
+ memset(as->phireg, 0, sizeof(as->phireg));
+ for (r = RID_MIN_GPR; r < RID_MAX; r++)
+ as->cost[r] = REGCOST(~0u, 0u);
+}
+
+/* Rematerialize constants. */
+static Reg ra_rematk(ASMState *as, IRRef ref)
+{
+ IRIns *ir;
+ Reg r;
+ if (ra_iskref(ref)) {
+ r = ra_krefreg(ref);
+ lua_assert(!rset_test(as->freeset, r));
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_loadi(as, r, ra_krefk(as, ref));
+ return r;
+ }
+ ir = IR(ref);
+ r = ir->r;
+ lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
+ ra_free(as, r);
+ ra_modified(as, r);
+ ir->r = RID_INIT; /* Do not keep any hint. */
+ RA_DBGX((as, "remat $i $r", ir, r));
+#if !LJ_SOFTFP
+ if (ir->o == IR_KNUM) {
+ emit_loadn(as, r, ir_knum(ir));
+ } else
+#endif
+ if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
+ ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
+ emit_getgl(as, r, jit_base);
+ } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
+ lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */
+ emit_getgl(as, r, jit_L);
+#if LJ_64
+ } else if (ir->o == IR_KINT64) {
+ emit_loadu64(as, r, ir_kint64(ir)->u64);
+#endif
+ } else {
+ lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
+ ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
+ emit_loadi(as, r, ir->i);
+ }
+ return r;
+}
+
+/* Force a spill. Allocate a new spill slot if needed. */
+static int32_t ra_spill(ASMState *as, IRIns *ir)
+{
+ int32_t slot = ir->s;
+ if (!ra_hasspill(slot)) {
+ if (irt_is64(ir->t)) {
+ slot = as->evenspill;
+ as->evenspill += 2;
+ } else if (as->oddspill) {
+ slot = as->oddspill;
+ as->oddspill = 0;
+ } else {
+ slot = as->evenspill;
+ as->oddspill = slot+1;
+ as->evenspill += 2;
+ }
+ if (as->evenspill > 256)
+ lj_trace_err(as->J, LJ_TRERR_SPILLOV);
+ ir->s = (uint8_t)slot;
+ }
+ return sps_scale(slot);
+}
+
+/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
+static Reg ra_releasetmp(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ Reg r = ir->r;
+ lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
+ ra_free(as, r);
+ ra_modified(as, r);
+ ir->r = RID_INIT;
+ return r;
+}
+
+/* Restore a register (marked as free). Rematerialize or force a spill. */
+static Reg ra_restore(ASMState *as, IRRef ref)
+{
+ if (emit_canremat(ref)) {
+ return ra_rematk(as, ref);
+ } else {
+ IRIns *ir = IR(ref);
+ int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
+ Reg r = ir->r;
+ lua_assert(ra_hasreg(r));
+ ra_sethint(ir->r, r); /* Keep hint. */
+ ra_free(as, r);
+ if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
+ ra_modified(as, r);
+ RA_DBGX((as, "restore $i $r", ir, r));
+ emit_spload(as, ir, r, ofs);
+ }
+ return r;
+ }
+}
+
+/* Save a register to a spill slot. */
+static void ra_save(ASMState *as, IRIns *ir, Reg r)
+{
+ RA_DBGX((as, "save $i $r", ir, r));
+ emit_spstore(as, ir, r, sps_scale(ir->s));
+}
+
+#define MINCOST(name) \
+ if (rset_test(RSET_ALL, RID_##name) && \
+ LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
+ cost = as->cost[RID_##name];
+
+/* Evict the register with the lowest cost, forcing a restore. */
+static Reg ra_evict(ASMState *as, RegSet allow)
+{
+ IRRef ref;
+ RegCost cost = ~(RegCost)0;
+ lua_assert(allow != RSET_EMPTY);
+ if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
+ GPRDEF(MINCOST)
+ } else {
+ FPRDEF(MINCOST)
+ }
+ ref = regcost_ref(cost);
+ lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins));
+ /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
+ if (!irref_isk(ref) && (as->weakset & allow)) {
+ IRIns *ir = IR(ref);
+ if (!rset_test(as->weakset, ir->r))
+ ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
+ }
+ return ra_restore(as, ref);
+}
+
+/* Pick any register (marked as free). Evict on-demand. */
+static Reg ra_pick(ASMState *as, RegSet allow)
+{
+ RegSet pick = as->freeset & allow;
+ if (!pick)
+ return ra_evict(as, allow);
+ else
+ return rset_picktop(pick);
+}
+
+/* Get a scratch register (marked as free). */
+static Reg ra_scratch(ASMState *as, RegSet allow)
+{
+ Reg r = ra_pick(as, allow);
+ ra_modified(as, r);
+ RA_DBGX((as, "scratch $r", r));
+ return r;
+}
+
+/* Evict all registers from a set (if not free). */
+static void ra_evictset(ASMState *as, RegSet drop)
+{
+ RegSet work;
+ as->modset |= drop;
+#if !LJ_SOFTFP
+ work = (drop & ~as->freeset) & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+#endif
+ work = (drop & ~as->freeset);
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+}
+
+/* Evict (rematerialize) all registers allocated to constants. */
+static void ra_evictk(ASMState *as)
+{
+ RegSet work;
+#if !LJ_SOFTFP
+ work = ~as->freeset & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ if (emit_canremat(ref) && irref_isk(ref)) {
+ ra_rematk(as, ref);
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+#endif
+ work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ if (emit_canremat(ref) && irref_isk(ref)) {
+ ra_rematk(as, ref);
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+}
+
+#ifdef RID_NUM_KREF
+/* Allocate a register for a constant. */
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
+{
+ /* First try to find a register which already holds the same constant. */
+ RegSet pick, work = ~as->freeset & RSET_GPR;
+ Reg r;
+ while (work) {
+ IRRef ref;
+ r = rset_pickbot(work);
+ ref = regcost_ref(as->cost[r]);
+ if (ref < ASMREF_L &&
+ k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
+ return r;
+ rset_clear(work, r);
+ }
+ pick = as->freeset & allow;
+ if (pick) {
+ /* Constants should preferably get unmodified registers. */
+ if ((pick & ~as->modset))
+ pick &= ~as->modset;
+ r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
+ } else {
+ r = ra_evict(as, allow);
+ }
+ RA_DBGX((as, "allock $x $r", k, r));
+ ra_setkref(as, r, k);
+ rset_clear(as->freeset, r);
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a specific register for a constant. */
+static void ra_allockreg(ASMState *as, int32_t k, Reg r)
+{
+ Reg kr = ra_allock(as, k, RID2RSET(r));
+ if (kr != r) {
+ IRIns irdummy;
+ irdummy.t.irt = IRT_INT;
+ ra_scratch(as, RID2RSET(r));
+ emit_movrr(as, &irdummy, r, kr);
+ }
+}
+#else
+#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
+#endif
+
+/* Allocate a register for ref from the allowed set of registers.
+** Note: this function assumes the ref does NOT have a register yet!
+** Picks an optimal register, sets the cost and marks the register as non-free.
+*/
+static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ RegSet pick = as->freeset & allow;
+ Reg r;
+ lua_assert(ra_noreg(ir->r));
+ if (pick) {
+ /* First check register hint from propagation or PHI. */
+ if (ra_hashint(ir->r)) {
+ r = ra_gethint(ir->r);
+ if (rset_test(pick, r)) /* Use hint register if possible. */
+ goto found;
+ /* Rematerialization is cheaper than missing a hint. */
+ if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
+ ra_rematk(as, regcost_ref(as->cost[r]));
+ goto found;
+ }
+ RA_DBGX((as, "hintmiss $f $r", ref, r));
+ }
+ /* Invariants should preferably get unmodified registers. */
+ if (ref < as->loopref && !irt_isphi(ir->t)) {
+ if ((pick & ~as->modset))
+ pick &= ~as->modset;
+ r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
+ } else {
+ /* We've got plenty of regs, so get callee-save regs if possible. */
+ if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
+ pick &= ~RSET_SCRATCH;
+ r = rset_picktop(pick);
+ }
+ } else {
+ r = ra_evict(as, allow);
+ }
+found:
+ RA_DBGX((as, "alloc $f $r", ref, r));
+ ir->r = (uint8_t)r;
+ rset_clear(as->freeset, r);
+ ra_noweak(as, r);
+ as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
+ return r;
+}
+
+/* Allocate a register on-demand. */
+static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ /* Note: allow is ignored if the register is already allocated. */
+ if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Rename register allocation and emit move. */
+static void ra_rename(ASMState *as, Reg down, Reg up)
+{
+ IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]);
+ IRIns *ir = IR(ref);
+ ir->r = (uint8_t)up;
+ as->cost[down] = 0;
+ lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR));
+ lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up));
+ ra_free(as, down); /* 'down' is free ... */
+ ra_modified(as, down);
+ rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
+ ra_noweak(as, up);
+ RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
+ emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
+ if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
+ lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno);
+ ren = tref_ref(lj_ir_emit(as->J));
+ as->ir = as->T->ir; /* The IR may have been reallocated. */
+ IR(ren)->r = (uint8_t)down;
+ IR(ren)->s = SPS_NONE;
+ }
+}
+
+/* Pick a destination register (marked as free).
+** Caveat: allow is ignored if there's already a destination register.
+** Use ra_destreg() to get a specific register.
+*/
+static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
+{
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ } else {
+ if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
+ dest = ra_gethint(dest);
+ ra_modified(as, dest);
+ RA_DBGX((as, "dest $r", dest));
+ } else {
+ dest = ra_scratch(as, allow);
+ }
+ ir->r = dest;
+ }
+ if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
+ return dest;
+}
+
+/* Force a specific destination register (marked as free). */
+static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
+{
+ Reg dest = ra_dest(as, ir, RID2RSET(r));
+ if (dest != r) {
+ lua_assert(rset_test(as->freeset, r));
+ ra_modified(as, r);
+ emit_movrr(as, ir, dest, r);
+ }
+}
+
+#if LJ_TARGET_X86ORX64
+/* Propagate dest register to left reference. Emit moves as needed.
+** This is a required fixup step for all 2-operand machine instructions.
+*/
+static void ra_left(ASMState *as, Reg dest, IRRef lref)
+{
+ IRIns *ir = IR(lref);
+ Reg left = ir->r;
+ if (ra_noreg(left)) {
+ if (irref_isk(lref)) {
+ if (ir->o == IR_KNUM) {
+ cTValue *tv = ir_knum(ir);
+ /* FP remat needs a load except for +0. Still better than eviction. */
+ if (tvispzero(tv) || !(as->freeset & RSET_FPR)) {
+ emit_loadn(as, dest, tv);
+ return;
+ }
+#if LJ_64
+ } else if (ir->o == IR_KINT64) {
+ emit_loadu64(as, dest, ir_kint64(ir)->u64);
+ return;
+#endif
+ } else {
+ lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
+ ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
+ emit_loadi(as, dest, ir->i);
+ return;
+ }
+ }
+ if (!ra_hashint(left) && !iscrossref(as, lref))
+ ra_sethint(ir->r, dest); /* Propagate register hint. */
+ left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
+ }
+ ra_noweak(as, left);
+ /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
+ if (dest != left) {
+ /* Use register renaming if dest is the PHI reg. */
+ if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
+ ra_modified(as, left);
+ ra_rename(as, left, dest);
+ } else {
+ emit_movrr(as, ir, dest, left);
+ }
+ }
+}
+#else
+/* Similar to ra_left, except we override any hints. */
+static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
+{
+ IRIns *ir = IR(lref);
+ Reg left = ir->r;
+ if (ra_noreg(left)) {
+ ra_sethint(ir->r, dest); /* Propagate register hint. */
+ left = ra_allocref(as, lref,
+ (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
+ }
+ ra_noweak(as, left);
+ if (dest != left) {
+ /* Use register renaming if dest is the PHI reg. */
+ if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
+ ra_modified(as, left);
+ ra_rename(as, left, dest);
+ } else {
+ emit_movrr(as, ir, dest, left);
+ }
+ }
+}
+#endif
+
+#if !LJ_64
+/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
+static void ra_destpair(ASMState *as, IRIns *ir)
+{
+ Reg destlo = ir->r, desthi = (ir+1)->r;
+ /* First spill unrelated refs blocking the destination registers. */
+ if (!rset_test(as->freeset, RID_RETLO) &&
+ destlo != RID_RETLO && desthi != RID_RETLO)
+ ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
+ if (!rset_test(as->freeset, RID_RETHI) &&
+ destlo != RID_RETHI && desthi != RID_RETHI)
+ ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
+ /* Next free the destination registers (if any). */
+ if (ra_hasreg(destlo)) {
+ ra_free(as, destlo);
+ ra_modified(as, destlo);
+ } else {
+ destlo = RID_RETLO;
+ }
+ if (ra_hasreg(desthi)) {
+ ra_free(as, desthi);
+ ra_modified(as, desthi);
+ } else {
+ desthi = RID_RETHI;
+ }
+ /* Check for conflicts and shuffle the registers as needed. */
+ if (destlo == RID_RETHI) {
+ if (desthi == RID_RETLO) {
+#if LJ_TARGET_X86
+ *--as->mcp = XI_XCHGa + RID_RETHI;
+#else
+ emit_movrr(as, ir, RID_RETHI, RID_TMP);
+ emit_movrr(as, ir, RID_RETLO, RID_RETHI);
+ emit_movrr(as, ir, RID_TMP, RID_RETLO);
+#endif
+ } else {
+ emit_movrr(as, ir, RID_RETHI, RID_RETLO);
+ if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
+ }
+ } else if (desthi == RID_RETLO) {
+ emit_movrr(as, ir, RID_RETLO, RID_RETHI);
+ if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
+ } else {
+ if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
+ if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
+ }
+ /* Restore spill slots (if any). */
+ if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
+ if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
+}
+#endif
+
+/* -- Snapshot handling --------- ----------------------------------------- */
+
+/* Can we rematerialize a KNUM instead of forcing a spill? */
+static int asm_snap_canremat(ASMState *as)
+{
+ Reg r;
+ for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
+ if (irref_isk(regcost_ref(as->cost[r])))
+ return 1;
+ return 0;
+}
+
+/* Check whether a sunk store corresponds to an allocation. */
+static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
+{
+ if (irs->s == 255) {
+ if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+ irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
+ IRIns *irk = IR(irs->op1);
+ if (irk->o == IR_AREF || irk->o == IR_HREFK)
+ irk = IR(irk->op1);
+ return (IR(irk->op1) == ira);
+ }
+ return 0;
+ } else {
+ return (ira + irs->s == irs); /* Quick check. */
+ }
+}
+
+/* Allocate register or spill slot for a ref that escapes to a snapshot. */
+static void asm_snap_alloc1(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) {
+ if (ir->r == RID_SINK) {
+ ir->r = RID_SUNK;
+#if LJ_HASFFI
+ if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */
+ asm_snap_alloc1(as, ir->op2);
+ if (LJ_32 && (ir+1)->o == IR_HIOP)
+ asm_snap_alloc1(as, (ir+1)->op2);
+ } else
+#endif
+ { /* Allocate stored values for TNEW, TDUP and CNEW. */
+ IRIns *irs;
+ lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW);
+ for (irs = IR(as->snapref-1); irs > ir; irs--)
+ if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
+ lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+ irs->o == IR_FSTORE || irs->o == IR_XSTORE);
+ asm_snap_alloc1(as, irs->op2);
+ if (LJ_32 && (irs+1)->o == IR_HIOP)
+ asm_snap_alloc1(as, (irs+1)->op2);
+ }
+ }
+ } else {
+ RegSet allow;
+ if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
+ IRIns *irc;
+ for (irc = IR(as->curins); irc > ir; irc--)
+ if ((irc->op1 == ref || irc->op2 == ref) &&
+ !(irc->r == RID_SINK || irc->r == RID_SUNK))
+ goto nosink; /* Don't sink conversion if result is used. */
+ asm_snap_alloc1(as, ir->op1);
+ return;
+ }
+ nosink:
+ allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
+ if ((as->freeset & allow) ||
+ (allow == RSET_FPR && asm_snap_canremat(as))) {
+ /* Get a weak register if we have a free one or can rematerialize. */
+ Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
+ if (!irt_isphi(ir->t))
+ ra_weak(as, r); /* But mark it as weakly referenced. */
+ checkmclim(as);
+ RA_DBGX((as, "snapreg $f $r", ref, ir->r));
+ } else {
+ ra_spill(as, ir); /* Otherwise force a spill slot. */
+ RA_DBGX((as, "snapspill $f $s", ref, ir->s));
+ }
+ }
+ }
+}
+
+/* Allocate refs escaping to a snapshot. */
+static void asm_snap_alloc(ASMState *as)
+{
+ SnapShot *snap = &as->T->snap[as->snapno];
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ if (!irref_isk(ref)) {
+ asm_snap_alloc1(as, ref);
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
+ lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP);
+ asm_snap_alloc1(as, ref+1);
+ }
+ }
+ }
+}
+
+/* All guards for a snapshot use the same exitno. This is currently the
+** same as the snapshot number. Since the exact origin of the exit cannot
+** be determined, all guards for the same snapshot must exit with the same
+** RegSP mapping.
+** A renamed ref which has been used in a prior guard for the same snapshot
+** would cause an inconsistency. The easy way out is to force a spill slot.
+*/
+static int asm_snap_checkrename(ASMState *as, IRRef ren)
+{
+ SnapShot *snap = &as->T->snap[as->snapno];
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
+ IRIns *ir = IR(ref);
+ ra_spill(as, ir); /* Register renamed, so force a spill slot. */
+ RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
+ return 1; /* Found. */
+ }
+ }
+ return 0; /* Not found. */
+}
+
+/* Prepare snapshot for next guard instruction. */
+static void asm_snap_prep(ASMState *as)
+{
+ if (as->curins < as->snapref) {
+ do {
+ if (as->snapno == 0) return; /* Called by sunk stores before snap #0. */
+ as->snapno--;
+ as->snapref = as->T->snap[as->snapno].ref;
+ } while (as->curins < as->snapref);
+ asm_snap_alloc(as);
+ as->snaprename = as->T->nins;
+ } else {
+ /* Process any renames above the highwater mark. */
+ for (; as->snaprename < as->T->nins; as->snaprename++) {
+ IRIns *ir = IR(as->snaprename);
+ if (asm_snap_checkrename(as, ir->op1))
+ ir->op2 = REF_BIAS-1; /* Kill rename. */
+ }
+ }
+}
+
+/* -- Miscellaneous helpers ----------------------------------------------- */
+
+/* Collect arguments from CALL* and CARG instructions. */
+static void asm_collectargs(ASMState *as, IRIns *ir,
+ const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n = CCI_NARGS(ci);
+ lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */
+ if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
+ while (n-- > 1) {
+ ir = IR(ir->op1);
+ lua_assert(ir->o == IR_CARG);
+ args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
+ }
+ args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
+ lua_assert(IR(ir->op1)->o != IR_CARG);
+}
+
+/* Reconstruct CCallInfo flags for CALLX*. */
+static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
+{
+ uint32_t nargs = 0;
+ if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
+ IRIns *ira = IR(ir->op1);
+ nargs++;
+ while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
+ }
+#if LJ_HASFFI
+ if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
+ CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
+ CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
+ nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
+#if LJ_TARGET_X86
+ nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
+#endif
+ }
+#endif
+ return (nargs | (ir->t.irt << CCI_OTSHIFT));
+}
+
+/* Calculate stack adjustment. */
+static int32_t asm_stack_adjust(ASMState *as)
+{
+ if (as->evenspill <= SPS_FIXED)
+ return 0;
+ return sps_scale(sps_align(as->evenspill));
+}
+
+/* Must match with hash*() in lj_tab.c. */
+static uint32_t ir_khash(IRIns *ir)
+{
+ uint32_t lo, hi;
+ if (irt_isstr(ir->t)) {
+ return ir_kstr(ir)->hash;
+ } else if (irt_isnum(ir->t)) {
+ lo = ir_knum(ir)->u32.lo;
+ hi = ir_knum(ir)->u32.hi << 1;
+ } else if (irt_ispri(ir->t)) {
+ lua_assert(!irt_isnil(ir->t));
+ return irt_type(ir->t)-IRT_FALSE;
+ } else {
+ lua_assert(irt_isgcv(ir->t));
+ lo = u32ptr(ir_kgc(ir));
+ hi = lo + HASH_BIAS;
+ }
+ return hashrot(lo, hi);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
+
+static void asm_snew(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
+ IRRef args[3];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* const char *str */
+ args[2] = ir->op2; /* size_t len */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+}
+
+static void asm_tnew(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
+ IRRef args[2];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* uint32_t ahsize */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCtab * */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
+}
+
+static void asm_tdup(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
+ IRRef args[2];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* const GCtab *kt */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCtab * */
+ asm_gencall(as, ci, args);
+}
+
+static void asm_gc_check(ASMState *as);
+
+/* Explicit GC step. */
+static void asm_gcstep(ASMState *as, IRIns *ir)
+{
+ IRIns *ira;
+ for (ira = IR(as->stopins+1); ira < ir; ira++)
+ if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
+ (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
+ ra_used(ira))
+ as->gcsteps++;
+ if (as->gcsteps)
+ asm_gc_check(as);
+ as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
+}
+
+/* -- PHI and loop handling ----------------------------------------------- */
+
+/* Break a PHI cycle by renaming to a free register (evict if needed). */
+static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
+ RegSet allow)
+{
+ RegSet candidates = blocked & allow;
+ if (candidates) { /* If this register file has candidates. */
+ /* Note: the set for ra_pick cannot be empty, since each register file
+ ** has some registers never allocated to PHIs.
+ */
+ Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
+ if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
+ candidates = candidates & ~blockedby;
+ down = rset_picktop(candidates); /* Pick candidate PHI register. */
+ ra_rename(as, down, up); /* And rename it to the free register. */
+ }
+}
+
+/* PHI register shuffling.
+**
+** The allocator tries hard to preserve PHI register assignments across
+** the loop body. Most of the time this loop does nothing, since there
+** are no register mismatches.
+**
+** If a register mismatch is detected and ...
+** - the register is currently free: rename it.
+** - the register is blocked by an invariant: restore/remat and rename it.
+** - Otherwise the register is used by another PHI, so mark it as blocked.
+**
+** The renames are order-sensitive, so just retry the loop if a register
+** is marked as blocked, but has been freed in the meantime. A cycle is
+** detected if all of the blocked registers are allocated. To break the
+** cycle rename one of them to a free register and retry.
+**
+** Note that PHI spill slots are kept in sync and don't need to be shuffled.
+*/
+static void asm_phi_shuffle(ASMState *as)
+{
+ RegSet work;
+
+ /* Find and resolve PHI register mismatches. */
+ for (;;) {
+ RegSet blocked = RSET_EMPTY;
+ RegSet blockedby = RSET_EMPTY;
+ RegSet phiset = as->phiset;
+ while (phiset) { /* Check all left PHI operand registers. */
+ Reg r = rset_pickbot(phiset);
+ IRIns *irl = IR(as->phireg[r]);
+ Reg left = irl->r;
+ if (r != left) { /* Mismatch? */
+ if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
+ IRRef ref = regcost_ref(as->cost[r]);
+ /* Blocked by other PHI (w/reg)? */
+ if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
+ rset_set(blocked, r);
+ if (ra_hasreg(left))
+ rset_set(blockedby, left);
+ left = RID_NONE;
+ } else { /* Otherwise grab register from invariant. */
+ ra_restore(as, ref);
+ checkmclim(as);
+ }
+ }
+ if (ra_hasreg(left)) {
+ ra_rename(as, left, r);
+ checkmclim(as);
+ }
+ }
+ rset_clear(phiset, r);
+ }
+ if (!blocked) break; /* Finished. */
+ if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
+ asm_phi_break(as, blocked, blockedby, RSET_GPR);
+ if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
+ checkmclim(as);
+ } /* Else retry some more renames. */
+ }
+
+ /* Restore/remat invariants whose registers are modified inside the loop. */
+#if !LJ_SOFTFP
+ work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+#endif
+ work = as->modset & ~(as->freeset | as->phiset);
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+
+ /* Allocate and save all unsaved PHI regs and clear marks. */
+ work = as->phiset;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef lref = as->phireg[r];
+ IRIns *ir = IR(lref);
+ if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
+ irt_clearmark(ir->t); /* Handled here, so clear marker now. */
+ ra_alloc1(as, lref, RID2RSET(r));
+ ra_save(as, ir, r); /* Save to spill slot inside the loop. */
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+}
+
+/* Copy unsynced left/right PHI spill slots. Rarely needed. */
+static void asm_phi_copyspill(ASMState *as)
+{
+ int need = 0;
+ IRIns *ir;
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
+ if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
+ need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
+ if ((need & 1)) { /* Copy integer spill slots. */
+#if !LJ_TARGET_X86ORX64
+ Reg r = RID_TMP;
+#else
+ Reg r = RID_RET;
+ if ((as->freeset & RSET_GPR))
+ r = rset_pickbot((as->freeset & RSET_GPR));
+ else
+ emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+#endif
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
+ if (ra_hasspill(ir->s)) {
+ IRIns *irl = IR(ir->op1);
+ if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
+ emit_spstore(as, irl, r, sps_scale(irl->s));
+ emit_spload(as, ir, r, sps_scale(ir->s));
+ checkmclim(as);
+ }
+ }
+ }
+#if LJ_TARGET_X86ORX64
+ if (!rset_test(as->freeset, r))
+ emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+#endif
+ }
+#if !LJ_SOFTFP
+ if ((need & 2)) { /* Copy FP spill slots. */
+#if LJ_TARGET_X86
+ Reg r = RID_XMM0;
+#else
+ Reg r = RID_FPRET;
+#endif
+ if ((as->freeset & RSET_FPR))
+ r = rset_pickbot((as->freeset & RSET_FPR));
+ if (!rset_test(as->freeset, r))
+ emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
+ if (ra_hasspill(ir->s)) {
+ IRIns *irl = IR(ir->op1);
+ if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
+ emit_spstore(as, irl, r, sps_scale(irl->s));
+ emit_spload(as, ir, r, sps_scale(ir->s));
+ checkmclim(as);
+ }
+ }
+ }
+ if (!rset_test(as->freeset, r))
+ emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+ }
+#endif
+}
+
+/* Emit renames for left PHIs which are only spilled outside the loop. */
+static void asm_phi_fixup(ASMState *as)
+{
+ RegSet work = as->phiset;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef lref = as->phireg[r];
+ IRIns *ir = IR(lref);
+ /* Left PHI gained a spill slot before the loop? */
+ if (irt_ismarked(ir->t) && ra_hasspill(ir->s)) {
+ IRRef ren;
+ lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno);
+ ren = tref_ref(lj_ir_emit(as->J));
+ as->ir = as->T->ir; /* The IR may have been reallocated. */
+ IR(ren)->r = (uint8_t)r;
+ IR(ren)->s = SPS_NONE;
+ }
+ irt_clearmark(ir->t); /* Always clear marker. */
+ rset_clear(work, r);
+ }
+}
+
+/* Setup right PHI reference. */
+static void asm_phi(ASMState *as, IRIns *ir)
+{
+ RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
+ ~as->phiset;
+ RegSet afree = (as->freeset & allow);
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ if (ir->r == RID_SINK) /* Sink PHI. */
+ return;
+ /* Spill slot shuffling is not implemented yet (but rarely needed). */
+ if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
+ lj_trace_err(as->J, LJ_TRERR_NYIPHI);
+ /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
+ if ((afree & (afree-1))) { /* Two or more free registers? */
+ Reg r;
+ if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
+ r = ra_allocref(as, ir->op2, allow);
+ } else { /* Duplicate right PHI, need a copy (rare). */
+ r = ra_scratch(as, allow);
+ emit_movrr(as, irr, r, irr->r);
+ }
+ ir->r = (uint8_t)r;
+ rset_set(as->phiset, r);
+ as->phireg[r] = (IRRef1)ir->op1;
+ irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
+ if (ra_noreg(irl->r))
+ ra_sethint(irl->r, r); /* Set register hint for left PHI. */
+ } else { /* Otherwise allocate a spill slot. */
+ /* This is overly restrictive, but it triggers only on synthetic code. */
+ if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
+ lj_trace_err(as->J, LJ_TRERR_NYIPHI);
+ ra_spill(as, ir);
+ irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
+ }
+}
+
+static void asm_loop_fixup(ASMState *as);
+
+/* Middle part of a loop. */
+static void asm_loop(ASMState *as)
+{
+ MCode *mcspill;
+ /* LOOP is a guard, so the snapno is up to date. */
+ as->loopsnapno = as->snapno;
+ if (as->gcsteps)
+ asm_gc_check(as);
+ /* LOOP marks the transition from the variant to the invariant part. */
+ as->flagmcp = as->invmcp = NULL;
+ as->sectref = 0;
+ if (!neverfuse(as)) as->fuseref = 0;
+ asm_phi_shuffle(as);
+ mcspill = as->mcp;
+ asm_phi_copyspill(as);
+ asm_loop_fixup(as);
+ as->mcloop = as->mcp;
+ RA_DBGX((as, "===== LOOP ====="));
+ if (!as->realign) RA_DBG_FLUSH();
+ if (as->mcp != mcspill)
+ emit_jmp(as, mcspill);
+}
+
+/* -- Target-specific assembler ------------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_asm_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_asm_arm.h"
+#elif LJ_TARGET_PPC
+#include "lj_asm_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_asm_mips.h"
+#else
+#error "Missing assembler for target CPU"
+#endif
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Head of a root trace. */
+static void asm_head_root(ASMState *as)
+{
+ int32_t spadj;
+ asm_head_root_base(as);
+ emit_setvmstate(as, (int32_t)as->T->traceno);
+ spadj = asm_stack_adjust(as);
+ as->T->spadjust = (uint16_t)spadj;
+ emit_spsub(as, spadj);
+ /* Root traces assume a checked stack for the starting proto. */
+ as->T->topslot = gcref(as->T->startpt)->pt.framesize;
+}
+
+/* Head of a side trace.
+**
+** The current simplistic algorithm requires that all slots inherited
+** from the parent are live in a register between pass 2 and pass 3. This
+** avoids the complexity of stack slot shuffling. But of course this may
+** overflow the register set in some cases and cause the dreaded error:
+** "NYI: register coalescing too complex". A refined algorithm is needed.
+*/
+static void asm_head_side(ASMState *as)
+{
+ IRRef1 sloadins[RID_MAX];
+ RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
+ RegSet live = RSET_EMPTY; /* Live parent registers. */
+ IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
+ int32_t spadj, spdelta;
+ int pass2 = 0;
+ int pass3 = 0;
+ IRRef i;
+
+ allow = asm_head_side_base(as, irp, allow);
+
+ /* Scan all parent SLOADs and collect register dependencies. */
+ for (i = as->stopins; i > REF_BASE; i--) {
+ IRIns *ir = IR(i);
+ RegSP rs;
+ lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
+ (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL);
+ rs = as->parentmap[i - REF_FIRST];
+ if (ra_hasreg(ir->r)) {
+ rset_clear(allow, ir->r);
+ if (ra_hasspill(ir->s)) {
+ ra_save(as, ir, ir->r);
+ checkmclim(as);
+ }
+ } else if (ra_hasspill(ir->s)) {
+ irt_setmark(ir->t);
+ pass2 = 1;
+ }
+ if (ir->r == rs) { /* Coalesce matching registers right now. */
+ ra_free(as, ir->r);
+ } else if (ra_hasspill(regsp_spill(rs))) {
+ if (ra_hasreg(ir->r))
+ pass3 = 1;
+ } else if (ra_used(ir)) {
+ sloadins[rs] = (IRRef1)i;
+ rset_set(live, rs); /* Block live parent register. */
+ }
+ }
+
+ /* Calculate stack frame adjustment. */
+ spadj = asm_stack_adjust(as);
+ spdelta = spadj - (int32_t)as->parent->spadjust;
+ if (spdelta < 0) { /* Don't shrink the stack frame. */
+ spadj = (int32_t)as->parent->spadjust;
+ spdelta = 0;
+ }
+ as->T->spadjust = (uint16_t)spadj;
+
+ /* Reload spilled target registers. */
+ if (pass2) {
+ for (i = as->stopins; i > REF_BASE; i--) {
+ IRIns *ir = IR(i);
+ if (irt_ismarked(ir->t)) {
+ RegSet mask;
+ Reg r;
+ RegSP rs;
+ irt_clearmark(ir->t);
+ rs = as->parentmap[i - REF_FIRST];
+ if (!ra_hasspill(regsp_spill(rs)))
+ ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
+ else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
+ continue; /* Same spill slot, do nothing. */
+ mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
+ if (mask == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ r = ra_allocref(as, i, mask);
+ ra_save(as, ir, r);
+ rset_clear(allow, r);
+ if (r == rs) { /* Coalesce matching registers right now. */
+ ra_free(as, r);
+ rset_clear(live, r);
+ } else if (ra_hasspill(regsp_spill(rs))) {
+ pass3 = 1;
+ }
+ checkmclim(as);
+ }
+ }
+ }
+
+ /* Store trace number and adjust stack frame relative to the parent. */
+ emit_setvmstate(as, (int32_t)as->T->traceno);
+ emit_spsub(as, spdelta);
+
+#if !LJ_TARGET_X86ORX64
+ /* Restore BASE register from parent spill slot. */
+ if (ra_hasspill(irp->s))
+ emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
+#endif
+
+ /* Restore target registers from parent spill slots. */
+ if (pass3) {
+ RegSet work = ~as->freeset & RSET_ALL;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ RegSP rs = as->parentmap[ref - REF_FIRST];
+ rset_clear(work, r);
+ if (ra_hasspill(regsp_spill(rs))) {
+ int32_t ofs = sps_scale(regsp_spill(rs));
+ ra_free(as, r);
+ emit_spload(as, IR(ref), r, ofs);
+ checkmclim(as);
+ }
+ }
+ }
+
+ /* Shuffle registers to match up target regs with parent regs. */
+ for (;;) {
+ RegSet work;
+
+ /* Repeatedly coalesce free live registers by moving to their target. */
+ while ((work = as->freeset & live) != RSET_EMPTY) {
+ Reg rp = rset_pickbot(work);
+ IRIns *ir = IR(sloadins[rp]);
+ rset_clear(live, rp);
+ rset_clear(allow, rp);
+ ra_free(as, ir->r);
+ emit_movrr(as, ir, ir->r, rp);
+ checkmclim(as);
+ }
+
+ /* We're done if no live registers remain. */
+ if (live == RSET_EMPTY)
+ break;
+
+ /* Break cycles by renaming one target to a temp. register. */
+ if (live & RSET_GPR) {
+ RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
+ if (tmpset == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
+ }
+ if (!LJ_SOFTFP && (live & RSET_FPR)) {
+ RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
+ if (tmpset == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
+ }
+ checkmclim(as);
+ /* Continue with coalescing to fix up the broken cycle(s). */
+ }
+
+ /* Inherit top stack slot already checked by parent trace. */
+ as->T->topslot = as->parent->topslot;
+ if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
+#ifdef EXITSTATE_CHECKEXIT
+ /* Highest exit + 1 indicates stack check. */
+ ExitNo exitno = as->T->nsnap;
+#else
+ /* Reuse the parent exit in the context of the parent trace. */
+ ExitNo exitno = as->J->exitno;
+#endif
+ as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
+ asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
+ }
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Get base slot for a snapshot. */
+static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n;
+ for (n = snap->nent; n > 0; n--) {
+ SnapEntry sn = map[n-1];
+ if ((sn & SNAP_FRAME)) {
+ *gotframe = 1;
+ return snap_slot(sn);
+ }
+ }
+ return 0;
+}
+
+/* Link to another trace. */
+static void asm_tail_link(ASMState *as)
+{
+ SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
+ SnapShot *snap = &as->T->snap[snapno];
+ int gotframe = 0;
+ BCReg baseslot = asm_baseslot(as, snap, &gotframe);
+
+ as->topslot = snap->topslot;
+ checkmclim(as);
+ ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
+
+ if (as->T->link == 0) {
+ /* Setup fixed registers for exit to interpreter. */
+ const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]);
+ int32_t mres;
+ if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
+ BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
+ if (bc_isret(bc_op(*retpc)))
+ pc = retpc;
+ }
+ ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
+ ra_allockreg(as, i32ptr(pc), RID_LPC);
+ mres = (int32_t)(snap->nslots - baseslot);
+ switch (bc_op(*pc)) {
+ case BC_CALLM: case BC_CALLMT:
+ mres -= (int32_t)(1 + bc_a(*pc) + bc_c(*pc)); break;
+ case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
+ case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
+ default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
+ }
+ ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
+ } else if (baseslot) {
+ /* Save modified BASE for linking to trace with higher start frame. */
+ emit_setgl(as, RID_BASE, jit_base);
+ }
+ emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
+
+ /* Sync the interpreter state with the on-trace state. */
+ asm_stack_restore(as, snap);
+
+ /* Root traces that add frames need to check the stack at the end. */
+ if (!as->parent && gotframe)
+ asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Clear reg/sp for all instructions and add register hints. */
+static void asm_setup_regsp(ASMState *as)
+{
+ GCtrace *T = as->T;
+ int sink = T->sinktags;
+ IRRef nins = T->nins;
+ IRIns *ir, *lastir;
+ int inloop;
+#if LJ_TARGET_ARM
+ uint32_t rload = 0xa6402a64;
+#endif
+
+ ra_setup(as);
+
+ /* Clear reg/sp for constants. */
+ for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++)
+ ir->prev = REGSP_INIT;
+
+ /* REF_BASE is used for implicit references to the BASE register. */
+ lastir->prev = REGSP_HINT(RID_BASE);
+
+ ir = IR(nins-1);
+ if (ir->o == IR_RENAME) {
+ do { ir--; nins--; } while (ir->o == IR_RENAME);
+ T->nins = nins; /* Remove any renames left over from ASM restart. */
+ }
+ as->snaprename = nins;
+ as->snapref = nins;
+ as->snapno = T->nsnap;
+
+ as->stopins = REF_BASE;
+ as->orignins = nins;
+ as->curins = nins;
+
+ /* Setup register hints for parent link instructions. */
+ ir = IR(REF_FIRST);
+ if (as->parent) {
+ uint16_t *p;
+ lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir);
+ if (lastir - ir > LJ_MAX_JSLOTS)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ as->stopins = (IRRef)((lastir-1) - as->ir);
+ for (p = as->parentmap; ir < lastir; ir++) {
+ RegSP rs = ir->prev;
+ *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */
+ if (!ra_hasspill(regsp_spill(rs)))
+ ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
+ else
+ ir->prev = REGSP_INIT;
+ }
+ }
+
+ inloop = 0;
+ as->evenspill = SPS_FIRST;
+ for (lastir = IR(nins); ir < lastir; ir++) {
+ if (sink) {
+ if (ir->r == RID_SINK)
+ continue;
+ if (ir->r == RID_SUNK) { /* Revert after ASM restart. */
+ ir->r = RID_SINK;
+ continue;
+ }
+ }
+ switch (ir->o) {
+ case IR_LOOP:
+ inloop = 1;
+ break;
+#if LJ_TARGET_ARM
+ case IR_SLOAD:
+ if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
+ break;
+ /* fallthrough */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
+ ir->prev = (uint16_t)REGSP_HINT((rload & 15));
+ rload = lj_ror(rload, 4);
+ continue;
+#endif
+ case IR_CALLXS: {
+ CCallInfo ci;
+ ci.flags = asm_callx_flags(as, ir);
+ ir->prev = asm_setup_call_slots(as, ir, &ci);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+ continue;
+ }
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: {
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ ir->prev = asm_setup_call_slots(as, ir, ci);
+ if (inloop)
+ as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
+ (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
+ continue;
+ }
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+ case IR_HIOP:
+ switch ((ir-1)->o) {
+#if LJ_SOFTFP && LJ_TARGET_ARM
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ if (ra_hashint((ir-1)->r)) {
+ ir->prev = (ir-1)->prev + 1;
+ continue;
+ }
+ break;
+#endif
+#if !LJ_SOFTFP && LJ_NEED_FP64
+ case IR_CONV:
+ if (irt_isfp((ir-1)->t)) {
+ ir->prev = REGSP_HINT(RID_FPRET);
+ continue;
+ }
+ /* fallthrough */
+#endif
+ case IR_CALLN: case IR_CALLXS:
+#if LJ_SOFTFP
+ case IR_MIN: case IR_MAX:
+#endif
+ (ir-1)->prev = REGSP_HINT(RID_RETLO);
+ ir->prev = REGSP_HINT(RID_RETHI);
+ continue;
+ default:
+ break;
+ }
+ break;
+#endif
+#if LJ_SOFTFP
+ case IR_MIN: case IR_MAX:
+ if ((ir+1)->o != IR_HIOP) break;
+ /* fallthrough */
+#endif
+ /* C calls evict all scratch regs and return results in RID_RET. */
+ case IR_SNEW: case IR_XSNEW: case IR_NEWREF:
+ if (REGARG_NUMGPR < 3 && as->evenspill < 3)
+ as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
+ case IR_TNEW: case IR_TDUP: case IR_CNEW: case IR_CNEWI: case IR_TOSTR:
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset = RSET_SCRATCH;
+ continue;
+ case IR_STRTO: case IR_OBAR:
+ if (inloop)
+ as->modset = RSET_SCRATCH;
+ break;
+#if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP
+ case IR_ATAN2: case IR_LDEXP:
+#endif
+ case IR_POW:
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+#if LJ_TARGET_X86ORX64
+ ir->prev = REGSP_HINT(RID_XMM0);
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
+#else
+ ir->prev = REGSP_HINT(RID_FPRET);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+#endif
+ continue;
+ }
+ /* fallthrough for integer POW */
+ case IR_DIV: case IR_MOD:
+ if (!irt_isnum(ir->t)) {
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset |= (RSET_SCRATCH & RSET_GPR);
+ continue;
+ }
+ break;
+ case IR_FPMATH:
+#if LJ_TARGET_X86ORX64
+ if (ir->op2 == IRFPM_EXP2) { /* May be joined to lj_vm_pow_sse. */
+ ir->prev = REGSP_HINT(RID_XMM0);
+#if !LJ_64
+ if (as->evenspill < 4) /* Leave room for 16 byte scratch area. */
+ as->evenspill = 4;
+#endif
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
+ continue;
+ } else if (ir->op2 <= IRFPM_TRUNC && !(as->flags & JIT_F_SSE4_1)) {
+ ir->prev = REGSP_HINT(RID_XMM0);
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
+ continue;
+ }
+ break;
+#else
+ ir->prev = REGSP_HINT(RID_FPRET);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+ continue;
+#endif
+#if LJ_TARGET_X86ORX64
+ /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
+ case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
+ if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
+ IR(ir->op2)->r = REGSP_HINT(RID_ECX);
+ if (inloop)
+ rset_set(as->modset, RID_ECX);
+ }
+ break;
+#endif
+ /* Do not propagate hints across type conversions or loads. */
+ case IR_TOBIT:
+ case IR_XLOAD:
+#if !LJ_TARGET_ARM
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+#endif
+ break;
+ case IR_CONV:
+ if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
+ (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
+ break;
+ /* fallthrough */
+ default:
+ /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
+ if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
+ ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
+ ir->prev = IR(ir->op1)->prev;
+ continue;
+ }
+ break;
+ }
+ ir->prev = REGSP_INIT;
+ }
+ if ((as->evenspill & 1))
+ as->oddspill = as->evenspill++;
+ else
+ as->oddspill = 0;
+}
+
+/* -- Assembler core ------------------------------------------------------ */
+
+/* Assemble a trace. */
+void lj_asm_trace(jit_State *J, GCtrace *T)
+{
+ ASMState as_;
+ ASMState *as = &as_;
+ MCode *origtop;
+
+ /* Ensure an initialized instruction beyond the last one for HIOP checks. */
+ J->cur.nins = lj_ir_nextins(J);
+ J->cur.ir[J->cur.nins].o = IR_NOP;
+
+ /* Setup initial state. Copy some fields to reduce indirections. */
+ as->J = J;
+ as->T = T;
+ as->ir = T->ir;
+ as->flags = J->flags;
+ as->loopref = J->loopref;
+ as->realign = NULL;
+ as->loopinv = 0;
+ as->parent = J->parent ? traceref(J, J->parent) : NULL;
+
+ /* Reserve MCode memory. */
+ as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
+ as->mcp = as->mctop;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ asm_setup_target(as);
+
+ do {
+ as->mcp = as->mctop;
+#ifdef LUA_USE_ASSERT
+ as->mcp_prev = as->mcp;
+#endif
+ as->curins = T->nins;
+ RA_DBG_START();
+ RA_DBGX((as, "===== STOP ====="));
+
+ /* General trace setup. Emit tail of trace. */
+ asm_tail_prep(as);
+ as->mcloop = NULL;
+ as->flagmcp = NULL;
+ as->topslot = 0;
+ as->gcsteps = 0;
+ as->sectref = as->loopref;
+ as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
+ asm_setup_regsp(as);
+ if (!as->loopref)
+ asm_tail_link(as);
+
+ /* Assemble a trace in linear backwards order. */
+ for (as->curins--; as->curins > as->stopins; as->curins--) {
+ IRIns *ir = IR(as->curins);
+ lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */
+ if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
+ continue; /* Dead-code elimination can be soooo easy. */
+ if (irt_isguard(ir->t))
+ asm_snap_prep(as);
+ RA_DBG_REF();
+ checkmclim(as);
+ asm_ir(as, ir);
+ }
+ } while (as->realign); /* Retry in case the MCode needs to be realigned. */
+
+ /* Emit head of trace. */
+ RA_DBG_REF();
+ checkmclim(as);
+ if (as->gcsteps > 0) {
+ as->curins = as->T->snap[0].ref;
+ asm_snap_prep(as); /* The GC check is a guard. */
+ asm_gc_check(as);
+ }
+ ra_evictk(as);
+ if (as->parent)
+ asm_head_side(as);
+ else
+ asm_head_root(as);
+ asm_phi_fixup(as);
+
+ RA_DBGX((as, "===== START ===="));
+ RA_DBG_FLUSH();
+ if (as->freeset != RSET_ALL)
+ lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
+
+ /* Set trace entry point before fixing up tail to allow link to self. */
+ T->mcode = as->mcp;
+ T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
+ if (!as->loopref)
+ asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
+ T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
+ lj_mcode_sync(T->mcode, origtop);
+}
+
+#undef IR
+
+#endif
diff --git a/3rdparty/lua/src/lj_asm.h b/3rdparty/lua/src/lj_asm.h
index dd71c34..a88e7da 100644
--- a/3rdparty/lua/src/lj_asm.h
+++ b/3rdparty/lua/src/lj_asm.h
@@ -1,17 +1,17 @@
-/*
-** IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_ASM_H
-#define _LJ_ASM_H
-
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T);
-LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno,
- MCode *target);
-#endif
-
-#endif
+/*
+** IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ASM_H
+#define _LJ_ASM_H
+
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T);
+LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno,
+ MCode *target);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_asm_arm.h b/3rdparty/lua/src/lj_asm_arm.h
index d19ddc3..a66573c 100644
--- a/3rdparty/lua/src/lj_asm_arm.h
+++ b/3rdparty/lua/src/lj_asm_arm.h
@@ -1,2361 +1,2358 @@
-/*
-** ARM IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Register allocator extensions --------------------------------------- */
-
-/* Allocate a register with a hint. */
-static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!ra_hashint(r) && !iscrossref(as, ref))
- ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
- r = ra_allocref(as, ref, allow);
- }
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate a scratch register pair. */
-static Reg ra_scratchpair(ASMState *as, RegSet allow)
-{
- RegSet pick1 = as->freeset & allow;
- RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
- Reg r;
- if (pick2) {
- r = rset_picktop(pick2);
- } else {
- RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
- if (pick) {
- r = rset_picktop(pick);
- ra_restore(as, regcost_ref(as->cost[r+1]));
- } else {
- pick = pick1 & (allow << 1) & RSET_GPRODD;
- if (pick) {
- r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
- } else {
- r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
- ra_restore(as, regcost_ref(as->cost[r+1]));
- }
- }
- }
- lua_assert(rset_test(RSET_GPREVEN, r));
- ra_modified(as, r);
- ra_modified(as, r+1);
- RA_DBGX((as, "scratchpair $r $r", r, r+1));
- return r;
-}
-
-#if !LJ_SOFTFP
-/* Allocate two source registers for three-operand instructions. */
-static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- Reg left = irl->r, right = irr->r;
- if (ra_hasreg(left)) {
- ra_noweak(as, left);
- if (ra_noreg(right))
- right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
- else
- ra_noweak(as, right);
- } else if (ra_hasreg(right)) {
- ra_noweak(as, right);
- left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
- } else if (ra_hashint(right)) {
- right = ra_allocref(as, ir->op2, allow);
- left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
- } else {
- left = ra_allocref(as, ir->op1, allow);
- right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
- }
- return left | (right << 8);
-}
-#endif
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Generate an exit stub group at the bottom of the reserved MCode memory. */
-static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
-{
- MCode *mxp = as->mcbot;
- int i;
- if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
- asm_mclimit(as);
- /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
- *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
- *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
- mxp++;
- *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
- *mxp++ = group*EXITSTUBS_PER_GROUP;
- for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
- *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
- lj_mcode_sync(as->mcbot, mxp);
- lj_mcode_commitbot(as->J, mxp);
- as->mcbot = mxp;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- return mxp - EXITSTUBS_PER_GROUP;
-}
-
-/* Setup all needed exit stubs. */
-static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
-{
- ExitNo i;
- if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
- lj_trace_err(as->J, LJ_TRERR_SNAPOV);
- for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
- if (as->J->exitstubgroup[i] == NULL)
- as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
-}
-
-/* Emit conditional branch to exit for guard. */
-static void asm_guardcc(ASMState *as, ARMCC cc)
-{
- MCode *target = exitstub_addr(as->J, as->snapno);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->loopinv = 1;
- *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
- emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
- return;
- }
- emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
-}
-
-/* -- Operand fusion ------------------------------------------------------ */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if there's no conflicting instruction between curins and ref. */
-static int noconflict(ASMState *as, IRRef ref, IROp conflict)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref)
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse the array base of colocated arrays. */
-static int32_t asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
- return (int32_t)sizeof(GCtab);
- return 0;
-}
-
-/* Fuse array/hash/upvalue reference into register+offset operand. */
-static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
- int lim)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- if (ir->o == IR_AREF) {
- if (mayfuse(as, ref)) {
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (ofs > -lim && ofs < lim) {
- *ofsp = ofs;
- return ra_alloc1(as, refa, allow);
- }
- }
- }
- } else if (ir->o == IR_HREFK) {
- if (mayfuse(as, ref)) {
- int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- if (ofs < lim) {
- *ofsp = ofs;
- return ra_alloc1(as, ir->op1, allow);
- }
- }
- } else if (ir->o == IR_UREFC) {
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
- *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
- return ra_allock(as, (ofs & ~255), allow);
- }
- }
- }
- *ofsp = 0;
- return ra_alloc1(as, ref, allow);
-}
-
-/* Fuse m operand into arithmetic/logic instructions. */
-static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_hasreg(ir->r)) {
- ra_noweak(as, ir->r);
- return ARMF_M(ir->r);
- } else if (irref_isk(ref)) {
- uint32_t k = emit_isk12(ai, ir->i);
- if (k)
- return k;
- } else if (mayfuse(as, ref)) {
- if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
- Reg m = ra_alloc1(as, ir->op1, allow);
- ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
- ir->o == IR_BSHR ? ARMSH_LSR :
- ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
- if (irref_isk(ir->op2)) {
- return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
- } else {
- Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
- return m | ARMF_RSH(sh, s);
- }
- } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
- Reg m = ra_alloc1(as, ir->op1, allow);
- return m | ARMF_SH(ARMSH_LSL, 1);
- }
- }
- return ra_allocref(as, ref, allow);
-}
-
-/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
-static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
- irref_isk(ir->op2) && IR(ir->op2)->i == 2)
- return ir->op1;
- return 0; /* No fusion. */
-}
-
-/* Fuse XLOAD/XSTORE reference into load/store operand. */
-static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
- RegSet allow, int32_t ofs)
-{
- IRIns *ir = IR(ref);
- Reg base;
- if (ra_noreg(ir->r) && canfuse(as, ir)) {
- int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 :
- (ai & 0x04000000) ? 4096 : 256;
- if (ir->o == IR_ADD) {
- int32_t ofs2;
- if (irref_isk(ir->op2) &&
- (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim &&
- (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) {
- ofs = ofs2;
- ref = ir->op1;
- } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) {
- IRRef lref = ir->op1, rref = ir->op2;
- Reg rn, rm;
- if ((ai & 0x04000000)) {
- IRRef sref = asm_fuselsl2(as, rref);
- if (sref) {
- rref = sref;
- ai |= ARMF_SH(ARMSH_LSL, 2);
- } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
- lref = rref;
- rref = sref;
- ai |= ARMF_SH(ARMSH_LSL, 2);
- }
- }
- rn = ra_alloc1(as, lref, allow);
- rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
- if ((ai & 0x04000000)) ai |= ARMI_LS_R;
- emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
- return;
- }
- } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
- lua_assert(ofs == 0);
- ofs = (int32_t)sizeof(GCstr);
- if (irref_isk(ir->op2)) {
- ofs += IR(ir->op2)->i;
- ref = ir->op1;
- } else if (irref_isk(ir->op1)) {
- ofs += IR(ir->op1)->i;
- ref = ir->op2;
- } else {
- /* NYI: Fuse ADD with constant. */
- Reg rn = ra_alloc1(as, ir->op1, allow);
- uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
- if ((ai & 0x04000000))
- emit_lso(as, ai, rd, rd, ofs);
- else
- emit_lsox(as, ai, rd, rd, ofs);
- emit_dn(as, ARMI_ADD^m, rd, rn);
- return;
- }
- if (ofs <= -lim || ofs >= lim) {
- Reg rn = ra_alloc1(as, ref, allow);
- Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
- if ((ai & 0x04000000)) ai |= ARMI_LS_R;
- emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
- return;
- }
- }
- }
- base = ra_alloc1(as, ref, allow);
-#if !LJ_SOFTFP
- if ((ai & 0x08000000))
- emit_vlso(as, ai, rd, base, ofs);
- else
-#endif
- if ((ai & 0x04000000))
- emit_lso(as, ai, rd, base, ofs);
- else
- emit_lsox(as, ai, rd, base, ofs);
-}
-
-#if !LJ_SOFTFP
-/* Fuse to multiply-add/sub instruction. */
-static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
-{
- IRRef lref = ir->op1, rref = ir->op2;
- IRIns *irm;
- if (lref != rref &&
- ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
- ra_noreg(irm->r)) ||
- (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
- (rref = lref, ai = air, ra_noreg(irm->r))))) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
- Reg right, left = ra_alloc2(as, irm,
- rset_exclude(rset_exclude(RSET_FPR, dest), add));
- right = (left >> 8); left &= 255;
- emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
- if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15));
- return 1;
- }
- return 0;
-}
-#endif
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = 0;
-#if LJ_SOFTFP
- Reg gpr = REGARG_FIRSTGPR;
-#else
- Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0;
-#endif
- if ((void *)ci->func)
- emit_call(as, (void *)ci->func);
-#if !LJ_SOFTFP
- for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
- as->cost[gpr] = REGCOST(~0u, ASMREF_L);
- gpr = REGARG_FIRSTGPR;
-#endif
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- IRIns *ir = IR(ref);
-#if !LJ_SOFTFP
- if (ref && irt_isfp(ir->t)) {
- RegSet of = as->freeset;
- Reg src;
- if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
- if (irt_isnum(ir->t)) {
- if (fpr <= REGARG_LASTFPR) {
- ra_leftov(as, fpr, ref);
- fpr++;
- continue;
- }
- } else if (fprodd) { /* Ick. */
- src = ra_alloc1(as, ref, RSET_FPR);
- emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000);
- fprodd = 0;
- continue;
- } else if (fpr <= REGARG_LASTFPR) {
- ra_leftov(as, fpr, ref);
- fprodd = fpr++;
- continue;
- }
- /* Workaround to protect argument GPRs from being used for remat. */
- as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
- src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
- as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
- fprodd = 0;
- goto stackfp;
- }
- /* Workaround to protect argument GPRs from being used for remat. */
- as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
- src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
- as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
- if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
- if (irt_isnum(ir->t)) {
- lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */
- emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
- gpr += 2;
- } else {
- emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15));
- gpr++;
- }
- } else {
- stackfp:
- if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
- emit_spstore(as, ir, src, ofs);
- ofs += irt_isnum(ir->t) ? 8 : 4;
- }
- } else
-#endif
- {
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
- if (ref) ra_leftov(as, gpr, ref);
- gpr++;
- } else {
- if (ref) {
- Reg r = ra_alloc1(as, ref, RSET_GPR);
- emit_spstore(as, ir, r, ofs);
- }
- ofs += 4;
- }
- }
- }
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = ((ir+1)->o == IR_HIOP);
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- lua_assert(!irt_ispri(ir->t));
- if (!LJ_SOFTFP && irt_isfp(ir->t)) {
- if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
- Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
- if (irt_isnum(ir->t))
- emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest);
- else
- emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest);
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
- } else if (hiop) {
- ra_destpair(as, ir);
- } else {
- ra_destreg(as, ir, RID_RET);
- }
- }
- UNUSED(ci);
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- if (irref_isk(func)) { /* Call to constant address. */
- ci.func = (ASMFunction)(void *)(irf->i);
- } else { /* Need a non-argument register for indirect calls. */
- Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
- emit_m(as, ARMI_BLXr, freg);
- ci.func = (ASMFunction)(void *)0;
- }
- asm_gencall(as, &ci, args);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- /* Need to force a spill on REF_BASE now to update the stack slot. */
- emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guardcc(as, CC_NE);
- emit_nm(as, ARMI_CMP, RID_TMP,
- ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
- emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-#if !LJ_SOFTFP
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_guardcc(as, CC_NE);
- emit_d(as, ARMI_VMRS, 0);
- emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15));
- emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15));
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15));
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_FPR;
- Reg left = ra_alloc1(as, ir->op1, allow);
- Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
- Reg tmp = ra_scratch(as, rset_clear(allow, right));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15));
-}
-#endif
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
-#if !LJ_SOFTFP
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
-#endif
- IRRef lref = ir->op1;
- /* 64 bit integer conversions are handled by SPLIT. */
- lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64));
-#if LJ_SOFTFP
- /* FP conversions are handled by SPLIT. */
- lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT));
- /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
-#else
- lua_assert(irt_type(ir->t) != st);
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32,
- (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15));
- } else { /* Integer to FP conversion. */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- ARMIns ai = irt_isfloat(ir->t) ?
- (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) :
- (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32);
- emit_dm(as, ai, (dest & 15), (dest & 15));
- emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15));
- }
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg left = ra_alloc1(as, lref, RSET_FPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- ARMIns ai;
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- ai = irt_isint(ir->t) ?
- (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) :
- (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32);
- emit_dm(as, ai, (tmp & 15), (left & 15));
- }
- } else
-#endif
- {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if ((as->flags & JIT_F_ARMV6)) {
- ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
- st == IRT_U8 ? ARMI_UXTB :
- st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
- emit_dm(as, ai, dest, left);
- } else if (st == IRT_U8) {
- emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
- } else {
- uint32_t shift = st == IRT_I8 ? 24 : 16;
- ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
- emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
- emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
- }
- } else { /* Handle 32/32 bit no-op (cast). */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
-}
-
-#if !LJ_SOFTFP && LJ_HASFFI
-static void asm_conv64(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- IRCallID id;
- CCallInfo ci;
- IRRef args[2];
- args[0] = (ir-1)->op1;
- args[1] = ir->op1;
- if (st == IRT_NUM || st == IRT_FLOAT) {
- id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
- ir--;
- } else {
- id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
- }
- ci = lj_ir_callinfo[id];
-#if !LJ_ABI_SOFTFP
- ci.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
-#endif
- asm_setupresult(as, ir, &ci);
- asm_gencall(as, &ci, args);
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- Reg rlo = 0, rhi = 0, tmp;
- int destused = ra_used(ir);
- int32_t ofs = 0;
- ra_evictset(as, RSET_SCRATCH);
-#if LJ_SOFTFP
- if (destused) {
- if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
- (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
- int i;
- for (i = 0; i < 2; i++) {
- Reg r = (ir+i)->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- ra_modified(as, r);
- emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
- }
- }
- ofs = sps_scale(ir->s);
- destused = 0;
- } else {
- rhi = ra_dest(as, ir+1, RSET_GPR);
- rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
- }
- }
- asm_guardcc(as, CC_EQ);
- if (destused) {
- emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
- emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
- }
-#else
- UNUSED(rhi);
- if (destused) {
- if (ra_hasspill(ir->s)) {
- ofs = sps_scale(ir->s);
- destused = 0;
- if (ra_hasreg(ir->r)) {
- ra_free(as, ir->r);
- ra_modified(as, ir->r);
- emit_spload(as, ir, ir->r, ofs);
- }
- } else {
- rlo = ra_dest(as, ir, RSET_FPR);
- }
- }
- asm_guardcc(as, CC_EQ);
- if (destused)
- emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0);
-#endif
- emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- tmp = ra_releasetmp(as, ASMREF_TMP1);
- if (ofs == 0)
- emit_dm(as, ARMI_MOV, tmp, RID_SP);
- else
- emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
-}
-
-/* Get pointer to TValue. */
-static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isnum(ir->t)) {
- if (irref_isk(ref)) {
- /* Use the number constant itself as a TValue. */
- ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
- } else {
-#if LJ_SOFTFP
- lua_assert(0);
-#else
- /* Otherwise force a spill and use the spill slot. */
- emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
-#endif
- }
- } else {
- /* Otherwise use [sp] and [sp+4] to hold the TValue. */
- RegSet allow = rset_exclude(RSET_GPR, dest);
- Reg type;
- emit_dm(as, ARMI_MOV, dest, RID_SP);
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- emit_lso(as, ARMI_STR, src, RID_SP, 0);
- }
- if ((ir+1)->o == IR_HIOP)
- type = ra_alloc1(as, ref+1, allow);
- else
- type = ra_allock(as, irt_toitype(ir->t), allow);
- emit_lso(as, ARMI_STR, type, RID_SP, 4);
- }
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx, base;
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
- if (k) {
- base = ra_alloc1(as, refa, RSET_GPR);
- emit_dn(as, ARMI_ADD^k, dest, base);
- return;
- }
- }
- base = ra_alloc1(as, ir->op1, RSET_GPR);
- idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir, IROp merge)
-{
- RegSet allow = RSET_GPR;
- int destused = ra_used(ir);
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
- IRRef refkey = ir->op2;
- IRIns *irkey = IR(refkey);
- IRType1 kt = irkey->t;
- int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
- uint32_t khash;
- MCLabel l_end, l_loop;
- rset_clear(allow, tab);
- if (!irref_isk(refkey) || irt_isstr(kt)) {
-#if LJ_SOFTFP
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- if (irkey[1].o == IR_HIOP) {
- if (ra_hasreg((irkey+1)->r)) {
- keynumhi = (irkey+1)->r;
- keyhi = RID_TMP;
- ra_noweak(as, keynumhi);
- } else {
- keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
- }
- rset_clear(allow, keynumhi);
- khi = 0;
- }
-#else
- if (irt_isnum(kt)) {
- key = ra_scratch(as, allow);
- rset_clear(allow, key);
- keyhi = keynumhi = ra_scratch(as, allow);
- rset_clear(allow, keyhi);
- khi = 0;
- } else {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- }
-#endif
- } else if (irt_isnum(kt)) {
- int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
- k = emit_isk12(ARMI_CMP, val);
- if (!k) {
- key = ra_allock(as, val, allow);
- rset_clear(allow, key);
- }
- val = (int32_t)ir_knum(irkey)->u32.hi;
- khi = emit_isk12(ARMI_CMP, val);
- if (!khi) {
- keyhi = ra_allock(as, val, allow);
- rset_clear(allow, keyhi);
- }
- } else if (!irt_ispri(kt)) {
- k = emit_isk12(ARMI_CMP, irkey->i);
- if (!k) {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- }
- }
- if (!irt_ispri(kt))
- tmp = ra_scratchpair(as, allow);
-
- /* Key not found in chain: jump to exit (if merged) or load niltv. */
- l_end = emit_label(as);
- as->invmcp = NULL;
- if (merge == IR_NE)
- asm_guardcc(as, CC_AL);
- else if (destused)
- emit_loada(as, dest, niltvg(J2G(as->J)));
-
- /* Follow hash chain until the end. */
- l_loop = --as->mcp;
- emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
- emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
-
- /* Type and value comparison. */
- if (merge == IR_EQ)
- asm_guardcc(as, CC_EQ);
- else
- emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
- if (!irt_ispri(kt)) {
- emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
- emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
- emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
- } else {
- emit_n(as, ARMI_CMP^khi, tmp);
- emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
- }
- *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
-
- /* Load main position relative to tab->node into dest. */
- khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- } else {
- emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
- emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
- if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */
- emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash));
- emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
- } else if (irref_isk(refkey)) {
- emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
- rset_exclude(rset_exclude(RSET_GPR, tab), dest));
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
- } else { /* Must match with hash*() in lj_tab.c. */
- if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
- if (keyhi == RID_TMP)
- emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
- emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
- }
- emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
- emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
- tmp, tmp+1, tmp);
- emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
- emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
- if (ra_hasreg(keynumhi)) {
- emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
- emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
- emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
-#if !LJ_SOFTFP
- emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi,
- (ra_alloc1(as, refkey, RSET_FPR) & 15));
-#endif
- } else {
- emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
- emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
- rset_exclude(rset_exclude(RSET_GPR, tab), key));
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- int32_t kofs = ofs + (int32_t)offsetof(Node, key);
- Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg key = RID_NONE, type = RID_TMP, idx = node;
- RegSet allow = rset_exclude(RSET_GPR, node);
- lua_assert(ofs % sizeof(Node) == 0);
- if (ofs > 4095) {
- idx = dest;
- rset_clear(allow, dest);
- kofs = (int32_t)offsetof(Node, key);
- } else if (ra_hasreg(dest)) {
- emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
- }
- asm_guardcc(as, CC_NE);
- if (!irt_ispri(irkey->t)) {
- RegSet even = (as->freeset & allow);
- even = even & (even >> 1) & RSET_GPREVEN;
- if (even) {
- key = ra_scratch(as, even);
- if (rset_test(as->freeset, key+1)) {
- type = key+1;
- ra_modified(as, type);
- }
- } else {
- key = ra_scratch(as, allow);
- }
- rset_clear(allow, key);
- }
- rset_clear(allow, type);
- if (irt_isnum(irkey->t)) {
- emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
- (int32_t)ir_knum(irkey)->u32.hi, allow);
- emit_opk(as, ARMI_CMP, 0, key,
- (int32_t)ir_knum(irkey)->u32.lo, allow);
- } else {
- if (ra_hasreg(key))
- emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
- emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
- }
- emit_lso(as, ARMI_LDR, type, idx, kofs+4);
- if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
- if (ofs > 4095)
- emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- if (ir->r == RID_SINK)
- return;
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- /* NYI: Check that UREFO is still open and not aliasing a slot. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_lsptr(as, ARMI_LDR, dest, v);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- asm_guardcc(as, CC_NE);
- emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
- emit_opk(as, ARMI_ADD, dest, uv,
- (int32_t)offsetof(GCupval, tv), RSET_GPR);
- emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
- } else {
- emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
- }
- emit_lso(as, ARMI_LDR, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- UNUSED(as); UNUSED(ir);
- lua_assert(!ra_used(ir));
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef ref = ir->op2, refk = ir->op1;
- Reg r;
- if (irref_isk(ref)) {
- IRRef tmp = refk; refk = ref; ref = tmp;
- } else if (!irref_isk(refk)) {
- uint32_t k, m = ARMI_K12|sizeof(GCstr);
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- IRIns *irr = IR(ir->op2);
- if (ra_hasreg(irr->r)) {
- ra_noweak(as, irr->r);
- right = irr->r;
- } else if (mayfuse(as, irr->op2) &&
- irr->o == IR_ADD && irref_isk(irr->op2) &&
- (k = emit_isk12(ARMI_ADD,
- (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
- m = k;
- right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
- } else {
- right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_dn(as, ARMI_ADD^m, dest, dest);
- emit_dnm(as, ARMI_ADD, dest, left, right);
- return;
- }
- r = ra_alloc1(as, ref, RSET_GPR);
- emit_opk(as, ARMI_ADD, dest, r,
- sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static ARMIns asm_fxloadins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: return ARMI_LDRSB;
- case IRT_U8: return ARMI_LDRB;
- case IRT_I16: return ARMI_LDRSH;
- case IRT_U16: return ARMI_LDRH;
- case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D;
- case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S;
- default: return ARMI_LDR;
- }
-}
-
-static ARMIns asm_fxstoreins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: return ARMI_STRB;
- case IRT_I16: case IRT_U16: return ARMI_STRH;
- case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D;
- case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S;
- default: return ARMI_STR;
- }
-}
-
-static void asm_fload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
- ARMIns ai = asm_fxloadins(ir);
- int32_t ofs;
- if (ir->op2 == IRFL_TAB_ARRAY) {
- ofs = asm_fuseabase(as, ir->op1);
- if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
- emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
- return;
- }
- }
- ofs = field_ofs[ir->op2];
- if ((ai & 0x04000000))
- emit_lso(as, ai, dest, idx, ofs);
- else
- emit_lsox(as, ai, dest, idx, ofs);
-}
-
-static void asm_fstore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
- IRIns *irf = IR(ir->op1);
- Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
- int32_t ofs = field_ofs[irf->op2];
- ARMIns ai = asm_fxstoreins(ir);
- if ((ai & 0x04000000))
- emit_lso(as, ai, src, idx, ofs);
- else
- emit_lsox(as, ai, src, idx, ofs);
- }
-}
-
-static void asm_xload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir,
- (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
- lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
- asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
-}
-
-static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1(as, ir->op2,
- (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
- asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
- rset_exclude(RSET_GPR, src), ofs);
- }
-}
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
- IRType t = hiop ? IRT_NUM : irt_type(ir->t);
- Reg dest = RID_NONE, type = RID_NONE, idx;
- RegSet allow = RSET_GPR;
- int32_t ofs = 0;
- if (hiop && ra_used(ir+1)) {
- type = ra_dest(as, ir+1, allow);
- rset_clear(allow, type);
- }
- if (ra_used(ir)) {
- lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
- irt_isint(ir->t) || irt_isaddr(ir->t));
- dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
- rset_clear(allow, dest);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow,
- (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096);
- if (!hiop || type == RID_NONE) {
- rset_clear(allow, idx);
- if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
- rset_test((as->freeset & allow), dest+1)) {
- type = dest+1;
- ra_modified(as, type);
- } else {
- type = RID_TMP;
- }
- }
- asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
- emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
- if (ra_hasreg(dest)) {
-#if !LJ_SOFTFP
- if (t == IRT_NUM)
- emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs);
- else
-#endif
- emit_lso(as, ARMI_LDR, dest, idx, ofs);
- }
- emit_lso(as, ARMI_LDR, type, idx, ofs+4);
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- RegSet allow = RSET_GPR;
- Reg idx, src = RID_NONE, type = RID_NONE;
- int32_t ofs = 0;
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- src = ra_alloc1(as, ir->op2, RSET_FPR);
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024);
- emit_vlso(as, ARMI_VSTR_D, src, idx, ofs);
- } else
-#endif
- {
- int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
- if (!irt_ispri(ir->t)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- }
- if (hiop)
- type = ra_alloc1(as, (ir+1)->op2, allow);
- else
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096);
- if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
- emit_lso(as, ARMI_STR, type, idx, ofs+4);
- }
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
- int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
- IRType t = hiop ? IRT_NUM : irt_type(ir->t);
- Reg dest = RID_NONE, type = RID_NONE, base;
- RegSet allow = RSET_GPR;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK));
-#if LJ_SOFTFP
- lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */
- if (hiop && ra_used(ir+1)) {
- type = ra_dest(as, ir+1, allow);
- rset_clear(allow, type);
- }
-#else
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) {
- dest = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, dest);
- t = IRT_NUM; /* Continue with a regular number type check. */
- } else
-#endif
- if (ra_used(ir)) {
- Reg tmp = RID_NONE;
- if ((ir->op2 & IRSLOAD_CONVERT))
- tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR);
- lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
- irt_isint(ir->t) || irt_isaddr(ir->t));
- dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
- rset_clear(allow, dest);
- base = ra_alloc1(as, REF_BASE, allow);
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- if (t == IRT_INT) {
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15));
- t = IRT_NUM; /* Check for original type. */
- } else {
- emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15));
- emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15));
- t = IRT_INT; /* Check for original type. */
- }
- dest = tmp;
- }
- goto dotypecheck;
- }
- base = ra_alloc1(as, REF_BASE, allow);
-dotypecheck:
- rset_clear(allow, base);
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- if (ra_noreg(type)) {
- if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
- rset_test((as->freeset & allow), dest+1)) {
- type = dest+1;
- ra_modified(as, type);
- } else {
- type = RID_TMP;
- }
- }
- asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
- emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
- }
- if (ra_hasreg(dest)) {
-#if !LJ_SOFTFP
- if (t == IRT_NUM) {
- if (ofs < 1024) {
- emit_vlso(as, ARMI_VLDR_D, dest, base, ofs);
- } else {
- if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
- emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0);
- emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow);
- return;
- }
- } else
-#endif
- emit_lso(as, ARMI_LDR, dest, base, ofs);
- }
- if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- RegSet drop = RSET_SCRATCH;
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
-
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- if (ra_used(ir))
- ra_destreg(as, ir, RID_RET); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- int32_t ofs = sizeof(GCcdata);
- lua_assert(sz == 4 || sz == 8);
- if (sz == 8) {
- ofs += 4; ir++;
- lua_assert(ir->o == IR_HIOP);
- }
- for (;;) {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_lso(as, ARMI_STR, r, RID_RET, ofs);
- rset_clear(allow, r);
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; ir--;
- }
- }
- /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
- {
- uint32_t k = emit_isk12(ARMI_MOV, ctypeid);
- Reg r = k ? RID_R1 : ra_allock(as, ctypeid, allow);
- emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
- emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
- emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
- if (k) emit_d(as, ARMI_MOV^k, RID_R1);
- }
- asm_gencall(as, ci, args);
- ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
- ra_releasetmp(as, ASMREF_TMP1));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
- rset_exclude(rset_exclude(RSET_GPR, tab), link));
- Reg mark = RID_TMP;
- MCLabel l_end = emit_label(as);
- emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
- emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
- emit_lso(as, ARMI_STR, tab, gr,
- (int32_t)offsetof(global_State, gc.grayagain));
- emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
- emit_lso(as, ARMI_LDR, link, gr,
- (int32_t)offsetof(global_State, gc.grayagain));
- emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
- emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
- emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj, val, tmp;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- if ((l_end[-1] >> 28) == CC_AL)
- l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
- else
- emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
- ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
- obj = IR(ir->op1)->r;
- tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
- emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
- emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
- val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
- emit_lso(as, ARMI_LDRB, tmp, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
- emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
-}
-
-/* -- Arithmetic and logic operations ------------------------------------- */
-
-#if !LJ_SOFTFP
-static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
-}
-
-static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
- emit_dm(as, ai, (dest & 15), (left & 15));
-}
-
-static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
- IRRef args[2];
- args[0] = irpp->op1;
- args[1] = irp->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
- return 1;
- }
- }
- return 0;
-}
-#endif
-
-static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
-{
- IRIns *ir;
- if (irref_isk(rref))
- return 0; /* Don't swap constants to the left. */
- if (irref_isk(lref))
- return 1; /* But swap constants to the right. */
- ir = IR(rref);
- if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
- (ir->o == IR_ADD && ir->op1 == ir->op2))
- return 0; /* Don't swap fusable operands to the left. */
- ir = IR(lref);
- if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
- (ir->o == IR_ADD && ir->op1 == ir->op2))
- return 1; /* But swap fusable operands to the right. */
- return 0; /* Otherwise don't swap. */
-}
-
-static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
-{
- IRRef lref = ir->op1, rref = ir->op2;
- Reg left, dest = ra_dest(as, ir, RSET_GPR);
- uint32_t m;
- if (asm_swapops(as, lref, rref)) {
- IRRef tmp = lref; lref = rref; rref = tmp;
- if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
- ai ^= (ARMI_SUB^ARMI_RSB);
- }
- left = ra_hintalloc(as, lref, dest, RSET_GPR);
- m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
- if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
- asm_guardcc(as, CC_VS);
- ai |= ARMI_S;
- }
- emit_dn(as, ai^m, dest, left);
-}
-
-static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
-{
- if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
- as->flagmcp = NULL;
- as->mcp++;
- ai |= ARMI_S;
- }
- asm_intop(as, ir, ai);
-}
-
-static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
-{
- if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */
- uint32_t cc = (as->mcp[1] >> 28);
- as->flagmcp = NULL;
- if (cc <= CC_NE) {
- as->mcp++;
- ai |= ARMI_S;
- } else if (cc == CC_GE) {
- *++as->mcp ^= ((CC_GE^CC_PL) << 28);
- ai |= ARMI_S;
- } else if (cc == CC_LT) {
- *++as->mcp ^= ((CC_LT^CC_MI) << 28);
- ai |= ARMI_S;
- } /* else: other conds don't work with bit ops. */
- }
- if (ir->op2 == 0) {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
- emit_d(as, ai^m, dest);
- } else {
- /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
- asm_intop(as, ir, ai);
- }
-}
-
-static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_dn(as, ai|ARMI_K12|0, dest, left);
-}
-
-/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
-static void asm_intmul(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
- Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- Reg tmp = RID_NONE;
- /* ARMv5 restriction: dest != left and dest_hi != left. */
- if (dest == left && left != right) { left = right; right = dest; }
- if (irt_isguard(ir->t)) { /* IR_MULOV */
- if (!(as->flags & JIT_F_ARMV6) && dest == left)
- tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left));
- asm_guardcc(as, CC_NE);
- emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
- emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
- } else {
- if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
- emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
- }
- /* Only need this for the dest == left == right case. */
- if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D))
- asm_fparith(as, ir, ARMI_VADD_D);
- return;
- }
-#endif
- asm_intop_s(as, ir, ARMI_ADD);
-}
-
-static void asm_sub(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D))
- asm_fparith(as, ir, ARMI_VSUB_D);
- return;
- }
-#endif
- asm_intop_s(as, ir, ARMI_SUB);
-}
-
-static void asm_mul(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, ARMI_VMUL_D);
- return;
- }
-#endif
- asm_intmul(as, ir);
-}
-
-static void asm_neg(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- asm_fpunary(as, ir, ARMI_VNEG_D);
- return;
- }
-#endif
- asm_intneg(as, ir, ARMI_RSB);
-}
-
-static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-#if !LJ_SOFTFP
-static void asm_callround(ASMState *as, IRIns *ir, int id)
-{
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
- RID2RSET(RID_R3)|RID2RSET(RID_R12);
- RegSet of;
- Reg dest, src;
- ra_evictset(as, drop);
- dest = ra_dest(as, ir, RSET_FPR);
- emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
- emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
- id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
- (void *)lj_vm_trunc_sf);
- /* Workaround to protect argument GPRs from being used for remat. */
- of = as->freeset;
- as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
- as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
- src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
- as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
- emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
-}
-#endif
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- if ((as->flags & JIT_F_ARMV6)) {
- emit_dm(as, ARMI_REV, dest, left);
- } else {
- Reg tmp2 = dest;
- if (tmp2 == left)
- tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
- emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
- emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
- emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
- emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
- }
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
-{
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
- /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- int32_t shift = (IR(ir->op2)->i & 31);
- emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
- }
-}
-
-static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
-{
- uint32_t kcmp = 0, kmov = 0;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- Reg right = 0;
- if (irref_isk(ir->op2)) {
- kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
- if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
- }
- if (!kmov) {
- kcmp = 0;
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- if (kmov || dest != right) {
- emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
- cc ^= 1; /* Must use opposite conditions for paired moves. */
- } else {
- cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
- }
- if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left);
- emit_nm(as, ARMI_CMP^kcmp, left, right);
-}
-
-#if LJ_SOFTFP
-static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
- RegSet drop = RSET_SCRATCH;
- Reg r;
- IRRef args[4];
- args[0] = ir->op1; args[1] = (ir+1)->op1;
- args[2] = ir->op2; args[3] = (ir+1)->op2;
- /* __aeabi_cdcmple preserves r0-r3. */
- if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
- if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
- if (!rset_test(as->freeset, RID_R2) &&
- regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
- if (!rset_test(as->freeset, RID_R3) &&
- regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
- ra_evictset(as, drop);
- ra_destpair(as, ir);
- emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
- emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
- emit_call(as, (void *)ci->func);
- for (r = RID_R0; r <= RID_R3; r++)
- ra_leftov(as, r, args[r-RID_R0]);
-}
-#else
-static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
-{
- Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = ((left >> 8) & 15); left &= 15;
- if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left);
- if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right);
- emit_d(as, ARMI_VMRS, 0);
- emit_dm(as, ARMI_VCMP_D, left, right);
-}
-#endif
-
-static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
-{
-#if LJ_SOFTFP
- UNUSED(fcc);
-#else
- if (irt_isnum(ir->t))
- asm_fpmin_max(as, ir, fcc);
- else
-#endif
- asm_intmin_max(as, ir, cc);
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-/* Map of comparisons to flags. ORDER IR. */
-static const uint8_t asm_compmap[IR_ABC+1] = {
- /* op FP swp int cc FP cc */
- /* LT */ CC_GE + (CC_HS << 4),
- /* GE x */ CC_LT + (CC_HI << 4),
- /* LE */ CC_GT + (CC_HI << 4),
- /* GT x */ CC_LE + (CC_HS << 4),
- /* ULT x */ CC_HS + (CC_LS << 4),
- /* UGE */ CC_LO + (CC_LO << 4),
- /* ULE x */ CC_HI + (CC_LO << 4),
- /* UGT */ CC_LS + (CC_LS << 4),
- /* EQ */ CC_NE + (CC_NE << 4),
- /* NE */ CC_EQ + (CC_EQ << 4),
- /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
-};
-
-#if LJ_SOFTFP
-/* FP comparisons. */
-static void asm_sfpcomp(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
- RegSet drop = RSET_SCRATCH;
- Reg r;
- IRRef args[4];
- int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
- args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
- args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
- /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
- for (r = RID_R0; r <= RID_R3; r++)
- if (!rset_test(as->freeset, r) &&
- regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
- ra_evictset(as, drop);
- asm_guardcc(as, (asm_compmap[ir->o] >> 4));
- emit_call(as, (void *)ci->func);
- for (r = RID_R0; r <= RID_R3; r++)
- ra_leftov(as, r, args[r-RID_R0]);
-}
-#else
-/* FP comparisons. */
-static void asm_fpcomp(ASMState *as, IRIns *ir)
-{
- Reg left, right;
- ARMIns ai;
- int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
- if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
- left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15);
- right = 0;
- ai = ARMI_VCMPZ_D;
- } else {
- left = ra_alloc2(as, ir, RSET_FPR);
- if (swp) {
- right = (left & 15); left = ((left >> 8) & 15);
- } else {
- right = ((left >> 8) & 15); left &= 15;
- }
- ai = ARMI_VCMP_D;
- }
- asm_guardcc(as, (asm_compmap[ir->o] >> 4));
- emit_d(as, ARMI_VMRS, 0);
- emit_dm(as, ai, left, right);
-}
-#endif
-
-/* Integer comparisons. */
-static void asm_intcomp(ASMState *as, IRIns *ir)
-{
- ARMCC cc = (asm_compmap[ir->o] & 15);
- IRRef lref = ir->op1, rref = ir->op2;
- Reg left;
- uint32_t m;
- int cmpprev0 = 0;
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
- if (asm_swapops(as, lref, rref)) {
- Reg tmp = lref; lref = rref; rref = tmp;
- if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
- else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
- }
- if (irref_isk(rref) && IR(rref)->i == 0) {
- IRIns *irl = IR(lref);
- cmpprev0 = (irl+1 == ir);
- /* Combine comp(BAND(left, right), 0) into tst left, right. */
- if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
- IRRef blref = irl->op1, brref = irl->op2;
- uint32_t m2 = 0;
- Reg bleft;
- if (asm_swapops(as, blref, brref)) {
- Reg tmp = blref; blref = brref; brref = tmp;
- }
- if (irref_isk(brref)) {
- m2 = emit_isk12(ARMI_AND, IR(brref)->i);
- if ((m2 & (ARMI_AND^ARMI_BIC)))
- goto notst; /* Not beneficial if we miss a constant operand. */
- }
- if (cc == CC_GE) cc = CC_PL;
- else if (cc == CC_LT) cc = CC_MI;
- else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
- bleft = ra_alloc1(as, blref, RSET_GPR);
- if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
- asm_guardcc(as, cc);
- emit_n(as, ARMI_TST^m2, bleft);
- return;
- }
- }
-notst:
- left = ra_alloc1(as, lref, RSET_GPR);
- m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
- asm_guardcc(as, cc);
- emit_n(as, ARMI_CMP^m, left);
- /* Signed comparison with zero and referencing previous ins? */
- if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
- as->flagmcp = as->mcp; /* Allow elimination of the compare. */
-}
-
-#if LJ_HASFFI
-/* 64 bit integer comparisons. */
-static void asm_int64comp(ASMState *as, IRIns *ir)
-{
- int signedcomp = (ir->o <= IR_GT);
- ARMCC cclo, cchi;
- Reg leftlo, lefthi;
- uint32_t mlo, mhi;
- RegSet allow = RSET_GPR, oldfree;
-
- /* Always use unsigned comparison for loword. */
- cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
- leftlo = ra_alloc1(as, ir->op1, allow);
- oldfree = as->freeset;
- mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
- allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
-
- /* Use signed or unsigned comparison for hiword. */
- cchi = asm_compmap[ir->o] & 15;
- lefthi = ra_alloc1(as, (ir+1)->op1, allow);
- mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
-
- /* All register allocations must be performed _before_ this point. */
- if (signedcomp) {
- MCLabel l_around = emit_label(as);
- asm_guardcc(as, cclo);
- emit_n(as, ARMI_CMP^mlo, leftlo);
- emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
- if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
- asm_guardcc(as, cchi);
- } else {
- asm_guardcc(as, cclo);
- emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
- }
- emit_n(as, ARMI_CMP^mhi, lefthi);
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_HASFFI || LJ_SOFTFP
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
-#if LJ_SOFTFP
- if (!irt_isint(ir->t)) {
- asm_sfpcomp(as, ir-1);
- return;
- }
-#endif
-#if LJ_HASFFI
- asm_int64comp(as, ir-1);
-#endif
- return;
-#if LJ_SOFTFP
- } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
- as->curins--; /* Always skip the loword min/max. */
- if (uselo || usehi)
- asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO);
- return;
-#elif LJ_HASFFI
- } else if ((ir-1)->o == IR_CONV) {
- as->curins--; /* Always skip the CONV. */
- if (usehi || uselo)
- asm_conv64(as, ir);
- return;
-#endif
- } else if ((ir-1)->o == IR_XSTORE) {
- if ((ir-1)->r != RID_SINK)
- asm_xstore(as, ir, 4);
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
-#if LJ_HASFFI
- case IR_ADD:
- as->curins--;
- asm_intop(as, ir, ARMI_ADC);
- asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
- break;
- case IR_SUB:
- as->curins--;
- asm_intop(as, ir, ARMI_SBC);
- asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
- break;
- case IR_NEG:
- as->curins--;
- asm_intneg(as, ir, ARMI_RSC);
- asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
- break;
-#endif
-#if LJ_SOFTFP
- case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- case IR_STRTO:
- if (!uselo)
- ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
- break;
-#endif
- case IR_CALLN:
- case IR_CALLS:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
-#if LJ_SOFTFP
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
-#endif
- case IR_CNEWI:
- /* Nothing to do here. Handled by lo op itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0);
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- Reg pbase;
- uint32_t k;
- if (irp) {
- if (!ra_hasspill(irp->s)) {
- pbase = irp->r;
- lua_assert(ra_hasreg(pbase));
- } else if (allow) {
- pbase = rset_pickbot(allow);
- } else {
- pbase = RID_RET;
- emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
- }
- } else {
- pbase = RID_BASE;
- }
- emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
- k = emit_isk12(0, (int32_t)(8*topslot));
- lua_assert(k);
- emit_n(as, ARMI_CMP^k, RID_TMP);
- emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
- emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
- (int32_t)offsetof(lua_State, maxstack));
- if (irp) { /* Must not spill arbitrary registers in head of side trace. */
- int32_t i = i32ptr(&J2G(as->J)->jit_L);
- if (ra_hasspill(irp->s))
- emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
- emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
- if (ra_hasspill(irp->s) && !allow)
- emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
- emit_loadi(as, RID_TMP, (i & ~4095));
- } else {
- emit_getgl(as, RID_TMP, jit_L);
- }
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
-#if LJ_SOFTFP
- RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
- Reg tmp;
- lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */
- tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
- rset_exclude(RSET_GPREVEN, RID_BASE));
- emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
- if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
- tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
- emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
-#else
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs);
-#endif
- } else {
- RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
- Reg type;
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
- emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
- if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s == 0) continue; /* Do not overwrite link to previous frame. */
- type = ra_allock(as, (int32_t)(*flinks--), odd);
-#if LJ_SOFTFP
- } else if ((sn & SNAP_SOFTFPNUM)) {
- type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
-#endif
- } else {
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
- }
- emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp1, tmp2;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
- emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- tmp1 = ra_releasetmp(as, ASMREF_TMP1);
- tmp2 = ra_releasetmp(as, ASMREF_TMP2);
- emit_loadi(as, tmp2, as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
- emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
- emit_lso(as, ARMI_LDR, tmp2, tmp1,
- (int32_t)offsetof(global_State, gc.threshold));
- emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
- (int32_t)offsetof(global_State, gc.total));
- ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guardcc already inverted the bcc and patched the final bl. */
- p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
- } else {
- p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Reload L register from g->jit_L. */
-static void asm_head_lreg(ASMState *as)
-{
- IRIns *ir = IR(ASMREF_L);
- if (ra_used(ir)) {
- Reg r = ra_dest(as, ir, RSET_GPR);
- emit_getgl(as, r, jit_L);
- ra_evictk(as);
- }
-}
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir;
- asm_head_lreg(as);
- ir = IR(REF_BASE);
- if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
- ra_spill(as, ir);
- ra_destreg(as, ir, RID_BASE);
-}
-
-/* Coalesce BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir;
- asm_head_lreg(as);
- ir = IR(REF_BASE);
- if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
- ra_spill(as, ir);
- if (ra_hasspill(irp->s)) {
- rset_clear(allow, ra_dest(as, ir, allow));
- } else {
- Reg r = irp->r;
- lua_assert(ra_hasreg(r));
- rset_clear(allow, r);
- if (r != ir->r && !rset_test(as->freeset, r))
- ra_restore(as, regcost_ref(as->cost[r]));
- ra_destreg(as, ir, r);
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- MCode *p = as->mctop;
- MCode *target;
- int32_t spadj = as->T->spadjust;
- if (spadj == 0) {
- as->mctop = --p;
- } else {
- /* Patch stack adjustment. */
- uint32_t k = emit_isk12(ARMI_ADD, spadj);
- lua_assert(k);
- p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
- }
- /* Patch exit branch. */
- target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
- p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- MCode *p = as->mctop - 1; /* Leave room for exit branch. */
- if (as->loopref) {
- as->invmcp = as->mcp = p;
- } else {
- as->mcp = p-1; /* Leave room for stack pointer adjustment. */
- as->invmcp = NULL;
- }
- *p = 0; /* Prevent load/store merging. */
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_EQ: case IR_NE:
- if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
- as->curins--;
- asm_href(as, ir-1, (IROp)ir->o);
- break;
- }
- /* fallthrough */
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_ABC:
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; }
-#endif
- asm_intcomp(as, ir);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_bitop(as, ir, ARMI_AND); break;
- case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break;
- case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break;
-
- case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break;
- case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break;
- case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break;
- case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break;
- case IR_BROL: lua_assert(0); break;
-
- /* Arithmetic ops. */
- case IR_ADD: case IR_ADDOV: asm_add(as, ir); break;
- case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break;
- case IR_MUL: case IR_MULOV: asm_mul(as, ir); break;
- case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
- case IR_NEG: asm_neg(as, ir); break;
-
-#if LJ_SOFTFP
- case IR_DIV: case IR_POW: case IR_ABS:
- case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
- lua_assert(0); /* Unused for LJ_SOFTFP. */
- break;
-#else
- case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break;
- case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
- case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break;
- case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
- case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
- case IR_FPMATH:
- if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
- break;
- if (ir->op2 <= IRFPM_TRUNC)
- asm_callround(as, ir, ir->op2);
- else if (ir->op2 == IRFPM_SQRT)
- asm_fpunary(as, ir, ARMI_VSQRT_D);
- else
- asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
- case IR_TOBIT: asm_tobit(as, ir); break;
-#endif
-
- case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break;
- case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir, 0); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: asm_fload(as, ir); break;
- case IR_XLOAD: asm_xload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: asm_fstore(as, ir); break;
- case IR_XSTORE: asm_xstore(as, ir, 0); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- uint32_t i, nargs = (int)CCI_NARGS(ci);
- int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
- asm_collectargs(as, ir, ci, args);
- for (i = 0; i < nargs; i++) {
- if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
- if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
- if (irt_isnum(IR(args[i])->t)) {
- if (nfpr > 0) nfpr--;
- else fprodd = 0, nslots = (nslots + 3) & ~1;
- } else {
- if (fprodd) fprodd--;
- else if (nfpr > 0) fprodd = 1, nfpr--;
- else nslots++;
- }
- } else if (irt_isnum(IR(args[i])->t)) {
- ngpr &= ~1;
- if (ngpr > 0) ngpr -= 2; else nslots += 2;
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
- }
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
- return REGSP_HINT(RID_RET);
-}
-
-static void asm_setup_target(ASMState *as)
-{
- /* May need extra exit for asm_stack_check on side traces. */
- asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *pe = (MCode *)((char *)p + T->szmcode);
- MCode *cstart = NULL, *cend = p;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- MCode *px = exitstub_addr(J, exitno) - 2;
- for (; p < pe; p++) {
- /* Look for bl_cc exitstub, replace with b_cc target. */
- uint32_t ins = *p;
- if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
- ((ins ^ (px-p)) & 0x00ffffffu) == 0) {
- *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
- cend = p+1;
- if (!cstart) cstart = p;
- }
- }
- lua_assert(cstart != NULL);
- lj_mcode_sync(cstart, cend);
- lj_mcode_patch(J, mcarea, 1);
-}
-
+/*
+** ARM IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a scratch register pair. */
+static Reg ra_scratchpair(ASMState *as, RegSet allow)
+{
+ RegSet pick1 = as->freeset & allow;
+ RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
+ Reg r;
+ if (pick2) {
+ r = rset_picktop(pick2);
+ } else {
+ RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
+ if (pick) {
+ r = rset_picktop(pick);
+ ra_restore(as, regcost_ref(as->cost[r+1]));
+ } else {
+ pick = pick1 & (allow << 1) & RSET_GPRODD;
+ if (pick) {
+ r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
+ } else {
+ r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
+ ra_restore(as, regcost_ref(as->cost[r+1]));
+ }
+ }
+ }
+ lua_assert(rset_test(RSET_GPREVEN, r));
+ ra_modified(as, r);
+ ra_modified(as, r+1);
+ RA_DBGX((as, "scratchpair $r $r", r, r+1));
+ return r;
+}
+
+#if !LJ_SOFTFP
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_allocref(as, ir->op2, allow);
+ left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_allocref(as, ir->op1, allow);
+ right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+#endif
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Generate an exit stub group at the bottom of the reserved MCode memory. */
+static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
+{
+ MCode *mxp = as->mcbot;
+ int i;
+ if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
+ asm_mclimit(as);
+ /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
+ *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
+ *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
+ mxp++;
+ *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
+ *mxp++ = group*EXITSTUBS_PER_GROUP;
+ for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
+ *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
+ lj_mcode_sync(as->mcbot, mxp);
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ return mxp - EXITSTUBS_PER_GROUP;
+}
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
+ lj_trace_err(as->J, LJ_TRERR_SNAPOV);
+ for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
+ if (as->J->exitstubgroup[i] == NULL)
+ as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, ARMCC cc)
+{
+ MCode *target = exitstub_addr(as->J, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
+ emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
+ return;
+ }
+ emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
+ int lim)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (ofs > -lim && ofs < lim) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (ofs < lim) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
+ return ra_allock(as, (ofs & ~255), allow);
+ }
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse m operand into arithmetic/logic instructions. */
+static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ return ARMF_M(ir->r);
+ } else if (irref_isk(ref)) {
+ uint32_t k = emit_isk12(ai, ir->i);
+ if (k)
+ return k;
+ } else if (mayfuse(as, ref)) {
+ if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
+ ir->o == IR_BSHR ? ARMSH_LSR :
+ ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
+ if (irref_isk(ir->op2)) {
+ return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
+ } else {
+ Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
+ return m | ARMF_RSH(sh, s);
+ }
+ } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ return m | ARMF_SH(ARMSH_LSL, 1);
+ }
+ }
+ return ra_allocref(as, ref, allow);
+}
+
+/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
+static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
+ irref_isk(ir->op2) && IR(ir->op2)->i == 2)
+ return ir->op1;
+ return 0; /* No fusion. */
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
+ RegSet allow, int32_t ofs)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 :
+ (ai & 0x04000000) ? 4096 : 256;
+ if (ir->o == IR_ADD) {
+ int32_t ofs2;
+ if (irref_isk(ir->op2) &&
+ (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim &&
+ (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) {
+ ofs = ofs2;
+ ref = ir->op1;
+ } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) {
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg rn, rm;
+ if ((ai & 0x04000000)) {
+ IRRef sref = asm_fuselsl2(as, rref);
+ if (sref) {
+ rref = sref;
+ ai |= ARMF_SH(ARMSH_LSL, 2);
+ } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
+ lref = rref;
+ rref = sref;
+ ai |= ARMF_SH(ARMSH_LSL, 2);
+ }
+ }
+ rn = ra_alloc1(as, lref, allow);
+ rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
+ if ((ai & 0x04000000)) ai |= ARMI_LS_R;
+ emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
+ return;
+ }
+ } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
+ lua_assert(ofs == 0);
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs += IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs += IR(ir->op1)->i;
+ ref = ir->op2;
+ } else {
+ /* NYI: Fuse ADD with constant. */
+ Reg rn = ra_alloc1(as, ir->op1, allow);
+ uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, rd, rd, ofs);
+ else
+ emit_lsox(as, ai, rd, rd, ofs);
+ emit_dn(as, ARMI_ADD^m, rd, rn);
+ return;
+ }
+ if (ofs <= -lim || ofs >= lim) {
+ Reg rn = ra_alloc1(as, ref, allow);
+ Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
+ if ((ai & 0x04000000)) ai |= ARMI_LS_R;
+ emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+#if !LJ_SOFTFP
+ if ((ai & 0x08000000))
+ emit_vlso(as, ai, rd, base, ofs);
+ else
+#endif
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, rd, base, ofs);
+ else
+ emit_lsox(as, ai, rd, base, ofs);
+}
+
+#if !LJ_SOFTFP
+/* Fuse to multiply-add/sub instruction. */
+static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irm;
+ if (lref != rref &&
+ ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
+ ra_noreg(irm->r)) ||
+ (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
+ (rref = lref, ai = air, ra_noreg(irm->r))))) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
+ Reg right, left = ra_alloc2(as, irm,
+ rset_exclude(rset_exclude(RSET_FPR, dest), add));
+ right = (left >> 8); left &= 255;
+ emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
+ if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15));
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = 0;
+#if LJ_SOFTFP
+ Reg gpr = REGARG_FIRSTGPR;
+#else
+ Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0;
+#endif
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+#if !LJ_SOFTFP
+ for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
+ as->cost[gpr] = REGCOST(~0u, ASMREF_L);
+ gpr = REGARG_FIRSTGPR;
+#endif
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+#if !LJ_SOFTFP
+ if (ref && irt_isfp(ir->t)) {
+ RegSet of = as->freeset;
+ Reg src;
+ if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
+ if (irt_isnum(ir->t)) {
+ if (fpr <= REGARG_LASTFPR) {
+ ra_leftov(as, fpr, ref);
+ fpr++;
+ continue;
+ }
+ } else if (fprodd) { /* Ick. */
+ src = ra_alloc1(as, ref, RSET_FPR);
+ emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000);
+ fprodd = 0;
+ continue;
+ } else if (fpr <= REGARG_LASTFPR) {
+ ra_leftov(as, fpr, ref);
+ fprodd = fpr++;
+ continue;
+ }
+ /* Workaround to protect argument GPRs from being used for remat. */
+ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
+ src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
+ as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
+ fprodd = 0;
+ goto stackfp;
+ }
+ /* Workaround to protect argument GPRs from being used for remat. */
+ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
+ src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
+ as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
+ if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
+ if (irt_isnum(ir->t)) {
+ lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */
+ emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
+ gpr += 2;
+ } else {
+ emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15));
+ gpr++;
+ }
+ } else {
+ stackfp:
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, src, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ } else
+#endif
+ {
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
+ if (ref) ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ if (ref) {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs);
+ }
+ ofs += 4;
+ }
+ }
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lua_assert(!irt_ispri(ir->t));
+ if (!LJ_SOFTFP && irt_isfp(ir->t)) {
+ if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
+ Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
+ if (irt_isnum(ir->t))
+ emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest);
+ else
+ emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+ UNUSED(ci);
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need a non-argument register for indirect calls. */
+ Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
+ emit_m(as, ARMI_BLXr, freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ /* Need to force a spill on REF_BASE now to update the stack slot. */
+ emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, ARMI_CMP, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guardcc(as, CC_NE);
+ emit_d(as, ARMI_VMRS, 0);
+ emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15));
+ emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15));
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15));
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15));
+}
+#endif
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if !LJ_SOFTFP
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+#endif
+ IRRef lref = ir->op1;
+ /* 64 bit integer conversions are handled by SPLIT. */
+ lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64));
+#if LJ_SOFTFP
+ /* FP conversions are handled by SPLIT. */
+ lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT));
+ /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
+#else
+ lua_assert(irt_type(ir->t) != st);
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32,
+ (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15));
+ } else { /* Integer to FP conversion. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ ARMIns ai = irt_isfloat(ir->t) ?
+ (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) :
+ (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32);
+ emit_dm(as, ai, (dest & 15), (dest & 15));
+ emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15));
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ ARMIns ai;
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ ai = irt_isint(ir->t) ?
+ (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) :
+ (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32);
+ emit_dm(as, ai, (tmp & 15), (left & 15));
+ }
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if ((as->flags & JIT_F_ARMV6)) {
+ ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
+ st == IRT_U8 ? ARMI_UXTB :
+ st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
+ emit_dm(as, ai, dest, left);
+ } else if (st == IRT_U8) {
+ emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
+ } else {
+ uint32_t shift = st == IRT_I8 ? 24 : 16;
+ ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
+ emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
+ emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
+ }
+ } else { /* Handle 32/32 bit no-op (cast). */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+#if !LJ_SOFTFP && LJ_HASFFI
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ IRCallID id;
+ CCallInfo ci;
+ IRRef args[2];
+ args[0] = (ir-1)->op1;
+ args[1] = ir->op1;
+ if (st == IRT_NUM || st == IRT_FLOAT) {
+ id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
+ ir--;
+ } else {
+ id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
+ }
+ ci = lj_ir_callinfo[id];
+#if !LJ_ABI_SOFTFP
+ ci.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
+#endif
+ asm_setupresult(as, ir, &ci);
+ asm_gencall(as, &ci, args);
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ Reg rlo = 0, rhi = 0, tmp;
+ int destused = ra_used(ir);
+ int32_t ofs = 0;
+ ra_evictset(as, RSET_SCRATCH);
+#if LJ_SOFTFP
+ if (destused) {
+ if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
+ (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ Reg r = (ir+i)->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
+ }
+ }
+ ofs = sps_scale(ir->s);
+ destused = 0;
+ } else {
+ rhi = ra_dest(as, ir+1, RSET_GPR);
+ rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
+ }
+ }
+ asm_guardcc(as, CC_EQ);
+ if (destused) {
+ emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
+ emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
+ }
+#else
+ UNUSED(rhi);
+ if (destused) {
+ if (ra_hasspill(ir->s)) {
+ ofs = sps_scale(ir->s);
+ destused = 0;
+ if (ra_hasreg(ir->r)) {
+ ra_free(as, ir->r);
+ ra_modified(as, ir->r);
+ emit_spload(as, ir, ir->r, ofs);
+ }
+ } else {
+ rlo = ra_dest(as, ir, RSET_FPR);
+ }
+ }
+ asm_guardcc(as, CC_EQ);
+ if (destused)
+ emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0);
+#endif
+ emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ if (ofs == 0)
+ emit_dm(as, ARMI_MOV, tmp, RID_SP);
+ else
+ emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref)) {
+ /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ } else {
+#if LJ_SOFTFP
+ lua_assert(0);
+#else
+ /* Otherwise force a spill and use the spill slot. */
+ emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
+#endif
+ }
+ } else {
+ /* Otherwise use [sp] and [sp+4] to hold the TValue. */
+ RegSet allow = rset_exclude(RSET_GPR, dest);
+ Reg type;
+ emit_dm(as, ARMI_MOV, dest, RID_SP);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ emit_lso(as, ARMI_STR, src, RID_SP, 0);
+ }
+ if ((ir+1)->o == IR_HIOP)
+ type = ra_alloc1(as, ref+1, allow);
+ else
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ emit_lso(as, ARMI_STR, type, RID_SP, 4);
+ }
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
+ if (k) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_dn(as, ARMI_ADD^k, dest, base);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
+ uint32_t khash;
+ MCLabel l_end, l_loop;
+ rset_clear(allow, tab);
+ if (!irref_isk(refkey) || irt_isstr(kt)) {
+#if LJ_SOFTFP
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ if (irkey[1].o == IR_HIOP) {
+ if (ra_hasreg((irkey+1)->r)) {
+ keynumhi = (irkey+1)->r;
+ keyhi = RID_TMP;
+ ra_noweak(as, keynumhi);
+ } else {
+ keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
+ }
+ rset_clear(allow, keynumhi);
+ khi = 0;
+ }
+#else
+ if (irt_isnum(kt)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ keyhi = keynumhi = ra_scratch(as, allow);
+ rset_clear(allow, keyhi);
+ khi = 0;
+ } else {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+#endif
+ } else if (irt_isnum(kt)) {
+ int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
+ k = emit_isk12(ARMI_CMP, val);
+ if (!k) {
+ key = ra_allock(as, val, allow);
+ rset_clear(allow, key);
+ }
+ val = (int32_t)ir_knum(irkey)->u32.hi;
+ khi = emit_isk12(ARMI_CMP, val);
+ if (!khi) {
+ keyhi = ra_allock(as, val, allow);
+ rset_clear(allow, keyhi);
+ }
+ } else if (!irt_ispri(kt)) {
+ k = emit_isk12(ARMI_CMP, irkey->i);
+ if (!k) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+ }
+ if (!irt_ispri(kt))
+ tmp = ra_scratchpair(as, allow);
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_AL);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
+ emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ if (!irt_ispri(kt)) {
+ emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
+ emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
+ emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
+ } else {
+ emit_n(as, ARMI_CMP^khi, tmp);
+ emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
+ }
+ *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
+ if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */
+ emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash));
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ } else if (irref_isk(refkey)) {
+ emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
+ rset_exclude(rset_exclude(RSET_GPR, tab), dest));
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
+ if (keyhi == RID_TMP)
+ emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
+ emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
+ }
+ emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
+ emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
+ tmp, tmp+1, tmp);
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
+ if (ra_hasreg(keynumhi)) {
+ emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
+ emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
+ emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
+#if !LJ_SOFTFP
+ emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi,
+ (ra_alloc1(as, refkey, RSET_FPR) & 15));
+#endif
+ } else {
+ emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
+ emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
+ rset_exclude(rset_exclude(RSET_GPR, tab), key));
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ofs > 4095) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
+ }
+ asm_guardcc(as, CC_NE);
+ if (!irt_ispri(irkey->t)) {
+ RegSet even = (as->freeset & allow);
+ even = even & (even >> 1) & RSET_GPREVEN;
+ if (even) {
+ key = ra_scratch(as, even);
+ if (rset_test(as->freeset, key+1)) {
+ type = key+1;
+ ra_modified(as, type);
+ }
+ } else {
+ key = ra_scratch(as, allow);
+ }
+ rset_clear(allow, key);
+ }
+ rset_clear(allow, type);
+ if (irt_isnum(irkey->t)) {
+ emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
+ (int32_t)ir_knum(irkey)->u32.hi, allow);
+ emit_opk(as, ARMI_CMP, 0, key,
+ (int32_t)ir_knum(irkey)->u32.lo, allow);
+ } else {
+ if (ra_hasreg(key))
+ emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
+ }
+ emit_lso(as, ARMI_LDR, type, idx, kofs+4);
+ if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
+ if (ofs > 4095)
+ emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ if (ir->r == RID_SINK)
+ return;
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, ARMI_LDR, dest, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
+ emit_opk(as, ARMI_ADD, dest, uv,
+ (int32_t)offsetof(GCupval, tv), RSET_GPR);
+ emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_lso(as, ARMI_LDR, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lua_assert(!ra_used(ir));
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ uint32_t k, m = ARMI_K12|sizeof(GCstr);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ (k = emit_isk12(ARMI_ADD,
+ (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
+ m = k;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_dn(as, ARMI_ADD^m, dest, dest);
+ emit_dnm(as, ARMI_ADD, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ emit_opk(as, ARMI_ADD, dest, r,
+ sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static ARMIns asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return ARMI_LDRSB;
+ case IRT_U8: return ARMI_LDRB;
+ case IRT_I16: return ARMI_LDRSH;
+ case IRT_U16: return ARMI_LDRH;
+ case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D;
+ case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S;
+ default: return ARMI_LDR;
+ }
+}
+
+static ARMIns asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return ARMI_STRB;
+ case IRT_I16: case IRT_U16: return ARMI_STRH;
+ case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D;
+ case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S;
+ default: return ARMI_STR;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ ARMIns ai = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, dest, idx, ofs);
+ else
+ emit_lsox(as, ai, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ ARMIns ai = asm_fxstoreins(ir);
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, src, idx, ofs);
+ else
+ emit_lsox(as, ai, src, idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src), ofs);
+ }
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ IRType t = hiop ? IRT_NUM : irt_type(ir->t);
+ Reg dest = RID_NONE, type = RID_NONE, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = 0;
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+ if (ra_used(ir)) {
+ lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow,
+ (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096);
+ if (!hiop || type == RID_NONE) {
+ rset_clear(allow, idx);
+ if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
+ rset_test((as->freeset & allow), dest+1)) {
+ type = dest+1;
+ ra_modified(as, type);
+ } else {
+ type = RID_TMP;
+ }
+ }
+ asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
+ if (ra_hasreg(dest)) {
+#if !LJ_SOFTFP
+ if (t == IRT_NUM)
+ emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs);
+ else
+#endif
+ emit_lso(as, ARMI_LDR, dest, idx, ofs);
+ }
+ emit_lso(as, ARMI_LDR, type, idx, ofs+4);
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = 0;
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024);
+ emit_vlso(as, ARMI_VSTR_D, src, idx, ofs);
+ } else
+#endif
+ {
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ if (hiop)
+ type = ra_alloc1(as, (ir+1)->op2, allow);
+ else
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096);
+ if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
+ emit_lso(as, ARMI_STR, type, idx, ofs+4);
+ }
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ IRType t = hiop ? IRT_NUM : irt_type(ir->t);
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+#if LJ_SOFTFP
+ lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+#else
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t = IRT_NUM; /* Continue with a regular number type check. */
+ } else
+#endif
+ if (ra_used(ir)) {
+ lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (t == IRT_INT) {
+ Reg tmp = ra_scratch(as, RSET_FPR);
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15));
+ dest = tmp;
+ t = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, RSET_GPR);
+ emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15));
+ emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15));
+ dest = tmp;
+ t = IRT_INT; /* Check for original type. */
+ }
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+dotypecheck:
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ if (ra_noreg(type)) {
+ if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
+ rset_test((as->freeset & allow), dest+1)) {
+ type = dest+1;
+ ra_modified(as, type);
+ } else {
+ type = RID_TMP;
+ }
+ }
+ asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
+ }
+ if (ra_hasreg(dest)) {
+#if !LJ_SOFTFP
+ if (t == IRT_NUM) {
+ if (ofs < 1024) {
+ emit_vlso(as, ARMI_VLDR_D, dest, base, ofs);
+ } else {
+ if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
+ emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0);
+ emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow);
+ return;
+ }
+ } else
+#endif
+ emit_lso(as, ARMI_LDR, dest, base, ofs);
+ }
+ if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lua_assert(sz == 4 || sz == 8);
+ if (sz == 8) {
+ ofs += 4; ir++;
+ lua_assert(ir->o == IR_HIOP);
+ }
+ for (;;) {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_lso(as, ARMI_STR, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir--;
+ }
+ }
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ {
+ uint32_t k = emit_isk12(ARMI_MOV, ctypeid);
+ Reg r = k ? RID_R1 : ra_allock(as, ctypeid, allow);
+ emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
+ emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
+ if (k) emit_d(as, ARMI_MOV^k, RID_R1);
+ }
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
+ rset_exclude(rset_exclude(RSET_GPR, tab), link));
+ Reg mark = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_lso(as, ARMI_STR, tab, gr,
+ (int32_t)offsetof(global_State, gc.grayagain));
+ emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
+ emit_lso(as, ARMI_LDR, link, gr,
+ (int32_t)offsetof(global_State, gc.grayagain));
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
+ emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ if ((l_end[-1] >> 28) == CC_AL)
+ l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
+ else
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
+ emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_lso(as, ARMI_LDRB, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_dm(as, ai, (dest & 15), (left & 15));
+}
+
+static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
+ IRRef args[2];
+ args[0] = irpp->op1;
+ args[1] = irp->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
+{
+ IRIns *ir;
+ if (irref_isk(rref))
+ return 0; /* Don't swap constants to the left. */
+ if (irref_isk(lref))
+ return 1; /* But swap constants to the right. */
+ ir = IR(rref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2))
+ return 0; /* Don't swap fusable operands to the left. */
+ ir = IR(lref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2))
+ return 1; /* But swap fusable operands to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m;
+ if (asm_swapops(as, lref, rref)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
+ ai ^= (ARMI_SUB^ARMI_RSB);
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
+ if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_VS);
+ ai |= ARMI_S;
+ }
+ emit_dn(as, ai^m, dest, left);
+}
+
+static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
+ as->flagmcp = NULL;
+ as->mcp++;
+ ai |= ARMI_S;
+ }
+ asm_intop(as, ir, ai);
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */
+ uint32_t cc = (as->mcp[1] >> 28);
+ as->flagmcp = NULL;
+ if (cc <= CC_NE) {
+ as->mcp++;
+ ai |= ARMI_S;
+ } else if (cc == CC_GE) {
+ *++as->mcp ^= ((CC_GE^CC_PL) << 28);
+ ai |= ARMI_S;
+ } else if (cc == CC_LT) {
+ *++as->mcp ^= ((CC_LT^CC_MI) << 28);
+ ai |= ARMI_S;
+ } /* else: other conds don't work with bit ops. */
+ }
+ if (ir->op2 == 0) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
+ emit_d(as, ai^m, dest);
+ } else {
+ /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
+ asm_intop(as, ir, ai);
+ }
+}
+
+static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dn(as, ai|ARMI_K12|0, dest, left);
+}
+
+/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
+static void asm_intmul(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ Reg tmp = RID_NONE;
+ /* ARMv5 restriction: dest != left and dest_hi != left. */
+ if (dest == left && left != right) { left = right; right = dest; }
+ if (irt_isguard(ir->t)) { /* IR_MULOV */
+ if (!(as->flags & JIT_F_ARMV6) && dest == left)
+ tmp = left = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
+ emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
+ } else {
+ if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
+ emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
+ }
+ /* Only need this for the dest == left == right case. */
+ if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D))
+ asm_fparith(as, ir, ARMI_VADD_D);
+ return;
+ }
+#endif
+ asm_intop_s(as, ir, ARMI_ADD);
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D))
+ asm_fparith(as, ir, ARMI_VSUB_D);
+ return;
+ }
+#endif
+ asm_intop_s(as, ir, ARMI_SUB);
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, ARMI_VMUL_D);
+ return;
+ }
+#endif
+ asm_intmul(as, ir);
+}
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, ARMI_VNEG_D);
+ return;
+ }
+#endif
+ asm_intneg(as, ir, ARMI_RSB);
+}
+
+static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+#if !LJ_SOFTFP
+static void asm_callround(ASMState *as, IRIns *ir, int id)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
+ RID2RSET(RID_R3)|RID2RSET(RID_R12);
+ RegSet of;
+ Reg dest, src;
+ ra_evictset(as, drop);
+ dest = ra_dest(as, ir, RSET_FPR);
+ emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
+ emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
+ id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
+ (void *)lj_vm_trunc_sf);
+ /* Workaround to protect argument GPRs from being used for remat. */
+ of = as->freeset;
+ as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
+ as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
+ src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
+ as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
+ emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
+}
+#endif
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if ((as->flags & JIT_F_ARMV6)) {
+ emit_dm(as, ARMI_REV, dest, left);
+ } else {
+ Reg tmp2 = dest;
+ if (tmp2 == left)
+ tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
+ emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
+ emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
+ }
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
+{
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
+ /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ int32_t shift = (IR(ir->op2)->i & 31);
+ emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
+ }
+}
+
+static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ uint32_t kcmp = 0, kmov = 0;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ Reg right = 0;
+ if (irref_isk(ir->op2)) {
+ kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
+ if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
+ }
+ if (!kmov) {
+ kcmp = 0;
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ if (kmov || dest != right) {
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
+ cc ^= 1; /* Must use opposite conditions for paired moves. */
+ } else {
+ cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
+ }
+ if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left);
+ emit_nm(as, ARMI_CMP^kcmp, left, right);
+}
+
+#if LJ_SOFTFP
+static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ args[0] = ir->op1; args[1] = (ir+1)->op1;
+ args[2] = ir->op2; args[3] = (ir+1)->op2;
+ /* __aeabi_cdcmple preserves r0-r3. */
+ if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
+ if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
+ if (!rset_test(as->freeset, RID_R2) &&
+ regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
+ if (!rset_test(as->freeset, RID_R3) &&
+ regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
+ ra_evictset(as, drop);
+ ra_destpair(as, ir);
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
+ emit_call(as, (void *)ci->func);
+ for (r = RID_R0; r <= RID_R3; r++)
+ ra_leftov(as, r, args[r-RID_R0]);
+}
+#else
+static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = ((left >> 8) & 15); left &= 15;
+ if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left);
+ if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right);
+ emit_d(as, ARMI_VMRS, 0);
+ emit_dm(as, ARMI_VCMP_D, left, right);
+}
+#endif
+
+static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
+{
+#if LJ_SOFTFP
+ UNUSED(fcc);
+#else
+ if (irt_isnum(ir->t))
+ asm_fpmin_max(as, ir, fcc);
+ else
+#endif
+ asm_intmin_max(as, ir, cc);
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op FP swp int cc FP cc */
+ /* LT */ CC_GE + (CC_HS << 4),
+ /* GE x */ CC_LT + (CC_HI << 4),
+ /* LE */ CC_GT + (CC_HI << 4),
+ /* GT x */ CC_LE + (CC_HS << 4),
+ /* ULT x */ CC_HS + (CC_LS << 4),
+ /* UGE */ CC_LO + (CC_LO << 4),
+ /* ULE x */ CC_HI + (CC_LO << 4),
+ /* UGT */ CC_LS + (CC_LS << 4),
+ /* EQ */ CC_NE + (CC_NE << 4),
+ /* NE */ CC_EQ + (CC_EQ << 4),
+ /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
+};
+
+#if LJ_SOFTFP
+/* FP comparisons. */
+static void asm_sfpcomp(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
+ args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
+ args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
+ /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
+ for (r = RID_R0; r <= RID_R3; r++)
+ if (!rset_test(as->freeset, r) &&
+ regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
+ ra_evictset(as, drop);
+ asm_guardcc(as, (asm_compmap[ir->o] >> 4));
+ emit_call(as, (void *)ci->func);
+ for (r = RID_R0; r <= RID_R3; r++)
+ ra_leftov(as, r, args[r-RID_R0]);
+}
+#else
+/* FP comparisons. */
+static void asm_fpcomp(ASMState *as, IRIns *ir)
+{
+ Reg left, right;
+ ARMIns ai;
+ int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
+ if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
+ left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15);
+ right = 0;
+ ai = ARMI_VCMPZ_D;
+ } else {
+ left = ra_alloc2(as, ir, RSET_FPR);
+ if (swp) {
+ right = (left & 15); left = ((left >> 8) & 15);
+ } else {
+ right = ((left >> 8) & 15); left &= 15;
+ }
+ ai = ARMI_VCMP_D;
+ }
+ asm_guardcc(as, (asm_compmap[ir->o] >> 4));
+ emit_d(as, ARMI_VMRS, 0);
+ emit_dm(as, ai, left, right);
+}
+#endif
+
+/* Integer comparisons. */
+static void asm_intcomp(ASMState *as, IRIns *ir)
+{
+ ARMCC cc = (asm_compmap[ir->o] & 15);
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left;
+ uint32_t m;
+ int cmpprev0 = 0;
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
+ if (asm_swapops(as, lref, rref)) {
+ Reg tmp = lref; lref = rref; rref = tmp;
+ if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
+ else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
+ }
+ if (irref_isk(rref) && IR(rref)->i == 0) {
+ IRIns *irl = IR(lref);
+ cmpprev0 = (irl+1 == ir);
+ /* Combine comp(BAND(left, right), 0) into tst left, right. */
+ if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
+ IRRef blref = irl->op1, brref = irl->op2;
+ uint32_t m2 = 0;
+ Reg bleft;
+ if (asm_swapops(as, blref, brref)) {
+ Reg tmp = blref; blref = brref; brref = tmp;
+ }
+ if (irref_isk(brref)) {
+ m2 = emit_isk12(ARMI_AND, IR(brref)->i);
+ if ((m2 & (ARMI_AND^ARMI_BIC)))
+ goto notst; /* Not beneficial if we miss a constant operand. */
+ }
+ if (cc == CC_GE) cc = CC_PL;
+ else if (cc == CC_LT) cc = CC_MI;
+ else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
+ bleft = ra_alloc1(as, blref, RSET_GPR);
+ if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
+ asm_guardcc(as, cc);
+ emit_n(as, ARMI_TST^m2, bleft);
+ return;
+ }
+ }
+notst:
+ left = ra_alloc1(as, lref, RSET_GPR);
+ m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
+ asm_guardcc(as, cc);
+ emit_n(as, ARMI_CMP^m, left);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+}
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_int64comp(ASMState *as, IRIns *ir)
+{
+ int signedcomp = (ir->o <= IR_GT);
+ ARMCC cclo, cchi;
+ Reg leftlo, lefthi;
+ uint32_t mlo, mhi;
+ RegSet allow = RSET_GPR, oldfree;
+
+ /* Always use unsigned comparison for loword. */
+ cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
+ leftlo = ra_alloc1(as, ir->op1, allow);
+ oldfree = as->freeset;
+ mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
+ allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
+
+ /* Use signed or unsigned comparison for hiword. */
+ cchi = asm_compmap[ir->o] & 15;
+ lefthi = ra_alloc1(as, (ir+1)->op1, allow);
+ mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
+
+ /* All register allocations must be performed _before_ this point. */
+ if (signedcomp) {
+ MCLabel l_around = emit_label(as);
+ asm_guardcc(as, cclo);
+ emit_n(as, ARMI_CMP^mlo, leftlo);
+ emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
+ if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
+ asm_guardcc(as, cchi);
+ } else {
+ asm_guardcc(as, cclo);
+ emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
+ }
+ emit_n(as, ARMI_CMP^mhi, lefthi);
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_HASFFI || LJ_SOFTFP
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+#if LJ_SOFTFP
+ if (!irt_isint(ir->t)) {
+ asm_sfpcomp(as, ir-1);
+ return;
+ }
+#endif
+#if LJ_HASFFI
+ asm_int64comp(as, ir-1);
+#endif
+ return;
+#if LJ_SOFTFP
+ } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
+ as->curins--; /* Always skip the loword min/max. */
+ if (uselo || usehi)
+ asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO);
+ return;
+#elif LJ_HASFFI
+ } else if ((ir-1)->o == IR_CONV) {
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+#endif
+ } else if ((ir-1)->o == IR_XSTORE) {
+ if ((ir-1)->r != RID_SINK)
+ asm_xstore(as, ir, 4);
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+#if LJ_HASFFI
+ case IR_ADD:
+ as->curins--;
+ asm_intop(as, ir, ARMI_ADC);
+ asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
+ break;
+ case IR_SUB:
+ as->curins--;
+ asm_intop(as, ir, ARMI_SBC);
+ asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
+ break;
+ case IR_NEG:
+ as->curins--;
+ asm_intneg(as, ir, ARMI_RSC);
+ asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
+ break;
+#endif
+#if LJ_SOFTFP
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
+ break;
+#endif
+ case IR_CALLN:
+ case IR_CALLS:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+#if LJ_SOFTFP
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
+#endif
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0);
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ Reg pbase;
+ uint32_t k;
+ if (irp) {
+ if (!ra_hasspill(irp->s)) {
+ pbase = irp->r;
+ lua_assert(ra_hasreg(pbase));
+ } else if (allow) {
+ pbase = rset_pickbot(allow);
+ } else {
+ pbase = RID_RET;
+ emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
+ }
+ } else {
+ pbase = RID_BASE;
+ }
+ emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
+ k = emit_isk12(0, (int32_t)(8*topslot));
+ lua_assert(k);
+ emit_n(as, ARMI_CMP^k, RID_TMP);
+ emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
+ emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
+ (int32_t)offsetof(lua_State, maxstack));
+ if (irp) { /* Must not spill arbitrary registers in head of side trace. */
+ int32_t i = i32ptr(&J2G(as->J)->jit_L);
+ if (ra_hasspill(irp->s))
+ emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
+ emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
+ if (ra_hasspill(irp->s) && !allow)
+ emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
+ emit_loadi(as, RID_TMP, (i & ~4095));
+ } else {
+ emit_getgl(as, RID_TMP, jit_L);
+ }
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+#if LJ_SOFTFP
+ RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
+ Reg tmp;
+ lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
+ rset_exclude(RSET_GPREVEN, RID_BASE));
+ emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
+ if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
+ emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs);
+#endif
+ } else {
+ RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
+ Reg type;
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
+ emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
+ if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), odd);
+#if LJ_SOFTFP
+ } else if ((sn & SNAP_SOFTFPNUM)) {
+ type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
+#endif
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
+ }
+ emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp1, tmp2;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ tmp1 = ra_releasetmp(as, ASMREF_TMP1);
+ tmp2 = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp2, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
+ emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
+ emit_lso(as, ARMI_LDR, tmp2, tmp1,
+ (int32_t)offsetof(global_State, gc.threshold));
+ emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
+ (int32_t)offsetof(global_State, gc.total));
+ ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the bcc and patched the final bl. */
+ p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
+ } else {
+ p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Reload L register from g->jit_L. */
+static void asm_head_lreg(ASMState *as)
+{
+ IRIns *ir = IR(ASMREF_L);
+ if (ra_used(ir)) {
+ Reg r = ra_dest(as, ir, RSET_GPR);
+ emit_getgl(as, r, jit_L);
+ ra_evictk(as);
+ }
+}
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && rset_test(as->modset, ir->r)) ra_spill(as, ir);
+ ra_destreg(as, ir, RID_BASE);
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && rset_test(as->modset, ir->r)) ra_spill(as, ir);
+ if (ra_hasspill(irp->s)) {
+ rset_clear(allow, ra_dest(as, ir, allow));
+ } else {
+ Reg r = irp->r;
+ lua_assert(ra_hasreg(r));
+ rset_clear(allow, r);
+ if (r != ir->r && !rset_test(as->freeset, r))
+ ra_restore(as, regcost_ref(as->cost[r]));
+ ra_destreg(as, ir, r);
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ as->mctop = --p;
+ } else {
+ /* Patch stack adjustment. */
+ uint32_t k = emit_isk12(ARMI_ADD, spadj);
+ lua_assert(k);
+ p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-1; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+ *p = 0; /* Prevent load/store merging. */
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+ case IR_GCSTEP: asm_gcstep(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_EQ: case IR_NE:
+ if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
+ as->curins--;
+ asm_href(as, ir-1, (IROp)ir->o);
+ break;
+ }
+ /* fallthrough */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; }
+#endif
+ asm_intcomp(as, ir);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_bitop(as, ir, ARMI_AND); break;
+ case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break;
+ case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break;
+ case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break;
+ case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break;
+ case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break;
+ case IR_BROL: lua_assert(0); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: case IR_ADDOV: asm_add(as, ir); break;
+ case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break;
+ case IR_MUL: case IR_MULOV: asm_mul(as, ir); break;
+ case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
+ case IR_NEG: asm_neg(as, ir); break;
+
+#if LJ_SOFTFP
+ case IR_DIV: case IR_POW: case IR_ABS:
+ case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
+ lua_assert(0); /* Unused for LJ_SOFTFP. */
+ break;
+#else
+ case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break;
+ case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
+ case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break;
+ case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
+ case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
+ case IR_FPMATH:
+ if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
+ break;
+ if (ir->op2 <= IRFPM_TRUNC)
+ asm_callround(as, ir, ir->op2);
+ else if (ir->op2 == IRFPM_SQRT)
+ asm_fpunary(as, ir, ARMI_VSQRT_D);
+ else
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+ case IR_TOBIT: asm_tobit(as, ir); break;
+#endif
+
+ case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break;
+ case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir, 0); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir, 0); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = (int)CCI_NARGS(ci);
+ int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++) {
+ if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
+ if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
+ if (irt_isnum(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--;
+ else fprodd = 0, nslots = (nslots + 3) & ~1;
+ } else {
+ if (fprodd) fprodd--;
+ else if (nfpr > 0) fprodd = 1, nfpr--;
+ else nslots++;
+ }
+ } else if (irt_isnum(IR(args[i])->t)) {
+ ngpr &= ~1;
+ if (ngpr > 0) ngpr -= 2; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ /* May need extra exit for asm_stack_check on side traces. */
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *cstart = NULL, *cend = p;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode *px = exitstub_addr(J, exitno) - 2;
+ for (; p < pe; p++) {
+ /* Look for bl_cc exitstub, replace with b_cc target. */
+ uint32_t ins = *p;
+ if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
+ ((ins ^ (px-p)) & 0x00ffffffu) == 0) {
+ *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
+ cend = p+1;
+ if (!cstart) cstart = p;
+ }
+ }
+ lua_assert(cstart != NULL);
+ lj_mcode_sync(cstart, cend);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/3rdparty/lua/src/lj_asm_mips.h b/3rdparty/lua/src/lj_asm_mips.h
index ad227c6..9fe7c9c 100644
--- a/3rdparty/lua/src/lj_asm_mips.h
+++ b/3rdparty/lua/src/lj_asm_mips.h
@@ -1,1977 +1,1976 @@
-/*
-** MIPS IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Register allocator extensions --------------------------------------- */
-
-/* Allocate a register with a hint. */
-static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!ra_hashint(r) && !iscrossref(as, ref))
- ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
- r = ra_allocref(as, ref, allow);
- }
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate a register or RID_ZERO. */
-static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0)
- return RID_ZERO;
- r = ra_allocref(as, ref, allow);
- } else {
- ra_noweak(as, r);
- }
- return r;
-}
-
-/* Allocate two source registers for three-operand instructions. */
-static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- Reg left = irl->r, right = irr->r;
- if (ra_hasreg(left)) {
- ra_noweak(as, left);
- if (ra_noreg(right))
- right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
- else
- ra_noweak(as, right);
- } else if (ra_hasreg(right)) {
- ra_noweak(as, right);
- left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
- } else if (ra_hashint(right)) {
- right = ra_alloc1z(as, ir->op2, allow);
- left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
- } else {
- left = ra_alloc1z(as, ir->op1, allow);
- right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
- }
- return left | (right << 8);
-}
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Need some spare long-range jump slots, for out-of-range branches. */
-#define MIPS_SPAREJUMP 4
-
-/* Setup spare long-range jump slots per mcarea. */
-static void asm_sparejump_setup(ASMState *as)
-{
- MCode *mxp = as->mcbot;
- /* Assumes sizeof(MCLink) == 8. */
- if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == 8) {
- lua_assert(MIPSI_NOP == 0);
- memset(mxp+2, 0, MIPS_SPAREJUMP*8);
- mxp += MIPS_SPAREJUMP*2;
- lua_assert(mxp < as->mctop);
- lj_mcode_sync(as->mcbot, mxp);
- lj_mcode_commitbot(as->J, mxp);
- as->mcbot = mxp;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- }
-}
-
-/* Setup exit stub after the end of each trace. */
-static void asm_exitstub_setup(ASMState *as)
-{
- MCode *mxp = as->mctop;
- /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
- *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
- *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
- lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0);
- *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
- as->mctop = mxp;
-}
-
-/* Keep this in-sync with exitstub_trace_addr(). */
-#define asm_exitstub_addr(as) ((as)->mctop)
-
-/* Emit conditional branch to exit for guard. */
-static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
-{
- MCode *target = asm_exitstub_addr(as);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->invmcp = NULL;
- as->loopinv = 1;
- as->mcp = p+1;
- mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */
- target = p; /* Patch target later in asm_loop_fixup. */
- }
- emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
- emit_branch(as, mi, rs, rt, target);
-}
-
-/* -- Operand fusion ------------------------------------------------------ */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if there's no conflicting instruction between curins and ref. */
-static int noconflict(ASMState *as, IRRef ref, IROp conflict)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref)
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse the array base of colocated arrays. */
-static int32_t asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
- return (int32_t)sizeof(GCtab);
- return 0;
-}
-
-/* Fuse array/hash/upvalue reference into register+offset operand. */
-static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- if (ir->o == IR_AREF) {
- if (mayfuse(as, ref)) {
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, refa, allow);
- }
- }
- }
- } else if (ir->o == IR_HREFK) {
- if (mayfuse(as, ref)) {
- int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, ir->op1, allow);
- }
- }
- } else if (ir->o == IR_UREFC) {
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
- int32_t jgl = (intptr_t)J2G(as->J);
- if ((uint32_t)(ofs-jgl) < 65536) {
- *ofsp = ofs-jgl-32768;
- return RID_JGL;
- } else {
- *ofsp = (int16_t)ofs;
- return ra_allock(as, ofs-(int16_t)ofs, allow);
- }
- }
- }
- }
- *ofsp = 0;
- return ra_alloc1(as, ref, allow);
-}
-
-/* Fuse XLOAD/XSTORE reference into load/store operand. */
-static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
- RegSet allow, int32_t ofs)
-{
- IRIns *ir = IR(ref);
- Reg base;
- if (ra_noreg(ir->r) && canfuse(as, ir)) {
- if (ir->o == IR_ADD) {
- int32_t ofs2;
- if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
- ref = ir->op1;
- ofs = ofs2;
- }
- } else if (ir->o == IR_STRREF) {
- int32_t ofs2 = 65536;
- lua_assert(ofs == 0);
- ofs = (int32_t)sizeof(GCstr);
- if (irref_isk(ir->op2)) {
- ofs2 = ofs + IR(ir->op2)->i;
- ref = ir->op1;
- } else if (irref_isk(ir->op1)) {
- ofs2 = ofs + IR(ir->op1)->i;
- ref = ir->op2;
- }
- if (!checki16(ofs2)) {
- /* NYI: Fuse ADD with constant. */
- Reg right, left = ra_alloc2(as, ir, allow);
- right = (left >> 8); left &= 255;
- emit_hsi(as, mi, rt, RID_TMP, ofs);
- emit_dst(as, MIPSI_ADDU, RID_TMP, left, right);
- return;
- }
- ofs = ofs2;
- }
- }
- base = ra_alloc1(as, ref, allow);
- emit_hsi(as, mi, rt, base, ofs);
-}
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = 16;
- Reg gpr, fpr = REGARG_FIRSTFPR;
- if ((void *)ci->func)
- emit_call(as, (void *)ci->func);
- for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
- as->cost[gpr] = REGCOST(~0u, ASMREF_L);
- gpr = REGARG_FIRSTGPR;
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- if (ref) {
- IRIns *ir = IR(ref);
- if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
- !(ci->flags & CCI_VARARG)) {
- lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
- ra_leftov(as, fpr, ref);
- fpr += 2;
- gpr += irt_isnum(ir->t) ? 2 : 1;
- } else {
- fpr = REGARG_LASTFPR+1;
- if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
- if (irt_isfp(ir->t)) {
- RegSet of = as->freeset;
- Reg r;
- /* Workaround to protect argument GPRs from being used for remat. */
- as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
- r = ra_alloc1(as, ref, RSET_FPR);
- as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
- if (irt_isnum(ir->t)) {
- emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
- emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
- lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */
- gpr += 2;
- } else if (irt_isfloat(ir->t)) {
- emit_tg(as, MIPSI_MFC1, gpr, r);
- gpr++;
- }
- } else {
- ra_leftov(as, gpr, ref);
- gpr++;
- }
- } else {
- Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
- emit_spstore(as, ir, r, ofs);
- ofs += irt_isnum(ir->t) ? 8 : 4;
- }
- }
- } else {
- fpr = REGARG_LASTFPR+1;
- if (gpr <= REGARG_LASTGPR)
- gpr++;
- else
- ofs += 4;
- }
- checkmclim(as);
- }
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = ((ir+1)->o == IR_HIOP);
- if ((ci->flags & CCI_NOFPRCLOBBER))
- drop &= ~RSET_FPR;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- lua_assert(!irt_ispri(ir->t));
- if (irt_isfp(ir->t)) {
- if ((ci->flags & CCI_CASTU64)) {
- int32_t ofs = sps_scale(ir->s);
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
- emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
- }
- if (ofs) {
- emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
- emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
- }
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
- } else if (hiop) {
- ra_destpair(as, ir);
- } else {
- ra_destreg(as, ir, RID_RET);
- }
- }
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- if (irref_isk(func)) { /* Call to constant address. */
- ci.func = (ASMFunction)(void *)(irf->i);
- } else { /* Need specific register for indirect calls. */
- Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
- MCode *p = as->mcp;
- if (r == RID_CFUNCADDR)
- *--p = MIPSI_NOP;
- else
- *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r);
- *--p = MIPSI_JALR | MIPSF_S(r);
- as->mcp = p;
- ci.func = (ASMFunction)(void *)0;
- }
- asm_gencall(as, &ci, args);
-}
-
-static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
-{
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)|
- RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR);
- if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_FPRET);
- emit_call(as, (void *)lj_ir_callinfo[id].func);
- ra_leftov(as, REGARG_FIRSTFPR, ir->op1);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guard(as, MIPSI_BNE, RID_TMP,
- ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
- emit_tsi(as, MIPSI_LW, RID_TMP, base, -8);
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_guard(as, MIPSI_BC1F, 0, 0);
- emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
- emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, MIPSI_CVT_W_D, tmp, left);
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_FPR;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, allow);
- Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
- Reg tmp = ra_scratch(as, rset_clear(allow, right));
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
-}
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
- IRRef lref = ir->op1;
- lua_assert(irt_type(ir->t) != st);
- lua_assert(!(irt_isint64(ir->t) ||
- (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S,
- dest, ra_alloc1(as, lref, RSET_FPR));
- } else if (st == IRT_U32) { /* U32 to FP conversion. */
- /* y = (x ^ 0x8000000) + 2147483648.0 */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- emit_fgh(as, irt_isfloat(ir->t) ? MIPSI_ADD_S : MIPSI_ADD_D,
- dest, dest, tmp);
- emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
- dest, dest);
- if (irt_isfloat(ir->t))
- emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
- RSET_GPR);
- else
- emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
- RSET_GPR);
- emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
- emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
- emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
- } else { /* Integer to FP conversion. */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
- dest, dest);
- emit_tg(as, MIPSI_MTC1, left, dest);
- }
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, lref, RSET_FPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- if (irt_isu32(ir->t)) {
- /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
- emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
- emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D,
- tmp, tmp);
- emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D,
- tmp, left, tmp);
- if (st == IRT_FLOAT)
- emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
- RSET_GPR);
- else
- emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
- RSET_GPR);
- } else {
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
- tmp, left);
- }
- }
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if ((ir->op2 & IRCONV_SEXT)) {
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
- } else {
- uint32_t shift = st == IRT_I8 ? 24 : 16;
- emit_dta(as, MIPSI_SRA, dest, dest, shift);
- emit_dta(as, MIPSI_SLL, dest, left, shift);
- }
- } else {
- emit_tsi(as, MIPSI_ANDI, dest, left,
- (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
- }
- } else { /* 32/64 bit integer conversions. */
- /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
-}
-
-#if LJ_HASFFI
-static void asm_conv64(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- IRCallID id;
- const CCallInfo *ci;
- IRRef args[2];
- args[LJ_BE?0:1] = ir->op1;
- args[LJ_BE?1:0] = (ir-1)->op1;
- if (st == IRT_NUM || st == IRT_FLOAT) {
- id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
- ir--;
- } else {
- id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
- }
- ci = &lj_ir_callinfo[id];
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- RegSet drop = RSET_SCRATCH;
- if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
- ra_evictset(as, drop);
- asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- /* Store the result to the spill slot or temp slots. */
- emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1),
- RID_SP, sps_scale(ir->s));
-}
-
-/* Get pointer to TValue. */
-static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isnum(ir->t)) {
- if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
- ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
- else /* Otherwise force a spill and use the spill slot. */
- emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir));
- } else {
- /* Otherwise use g->tmptv to hold the TValue. */
- RegSet allow = rset_exclude(RSET_GPR, dest);
- Reg type;
- emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- emit_setgl(as, src, tmptv.gcr);
- }
- type = ra_allock(as, irt_toitype(ir->t), allow);
- emit_setgl(as, type, tmptv.it);
- }
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx, base;
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- base = ra_alloc1(as, refa, RSET_GPR);
- emit_tsi(as, MIPSI_ADDIU, dest, base, ofs);
- return;
- }
- }
- base = ra_alloc1(as, ir->op1, RSET_GPR);
- idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base);
- emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- int destused = ra_used(ir);
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
- IRRef refkey = ir->op2;
- IRIns *irkey = IR(refkey);
- IRType1 kt = irkey->t;
- uint32_t khash;
- MCLabel l_end, l_loop, l_next;
-
- rset_clear(allow, tab);
- if (irt_isnum(kt)) {
- key = ra_alloc1(as, refkey, RSET_FPR);
- tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
- } else if (!irt_ispri(kt)) {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- type = ra_allock(as, irt_toitype(irkey->t), allow);
- rset_clear(allow, type);
- }
- tmp2 = ra_scratch(as, allow);
- rset_clear(allow, tmp2);
-
- /* Key not found in chain: load niltv. */
- l_end = emit_label(as);
- if (destused)
- emit_loada(as, dest, niltvg(J2G(as->J)));
- else
- *--as->mcp = MIPSI_NOP;
- /* Follow hash chain until the end. */
- emit_move(as, dest, tmp1);
- l_loop = --as->mcp;
- emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, next));
- l_next = emit_label(as);
-
- /* Type and value comparison. */
- if (irt_isnum(kt)) {
- emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
- emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
- emit_tg(as, MIPSI_MFC1, tmp1, key+1);
- emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
- emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
- emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
- } else {
- if (irt_ispri(kt)) {
- emit_branch(as, MIPSI_BEQ, tmp1, type, l_end);
- } else {
- emit_branch(as, MIPSI_BEQ, tmp2, key, l_end);
- emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
- emit_branch(as, MIPSI_BNE, tmp1, type, l_next);
- }
- }
- emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
- *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
-
- /* Load main position relative to tab->node into dest. */
- khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
- } else {
- Reg tmphash = tmp1;
- if (irref_isk(refkey))
- tmphash = ra_allock(as, khash, allow);
- emit_dst(as, MIPSI_ADDU, dest, dest, tmp1);
- lua_assert(sizeof(Node) == 24);
- emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
- emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
- emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
- emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
- emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
- if (irref_isk(refkey)) {
- /* Nothing to do. */
- } else if (irt_isstr(kt)) {
- emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash));
- } else { /* Must match with hash*() in lj_tab.c. */
- emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
- emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
- emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
- emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
- emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
- if (irt_isnum(kt)) {
- emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
- } else {
- emit_dst(as, MIPSI_OR, dest, dest, tmp1);
- emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1);
- emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
- }
- emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
- emit_tg(as, MIPSI_MFC1, tmp2, key);
- emit_tg(as, MIPSI_MFC1, tmp1, key+1);
- } else {
- emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
- emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
- emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- int32_t kofs = ofs + (int32_t)offsetof(Node, key);
- Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg key = RID_NONE, type = RID_TMP, idx = node;
- RegSet allow = rset_exclude(RSET_GPR, node);
- int32_t lo, hi;
- lua_assert(ofs % sizeof(Node) == 0);
- if (ofs > 32736) {
- idx = dest;
- rset_clear(allow, dest);
- kofs = (int32_t)offsetof(Node, key);
- } else if (ra_hasreg(dest)) {
- emit_tsi(as, MIPSI_ADDIU, dest, node, ofs);
- }
- if (!irt_ispri(irkey->t)) {
- key = ra_scratch(as, allow);
- rset_clear(allow, key);
- }
- if (irt_isnum(irkey->t)) {
- lo = (int32_t)ir_knum(irkey)->u32.lo;
- hi = (int32_t)ir_knum(irkey)->u32.hi;
- } else {
- lo = irkey->i;
- hi = irt_toitype(irkey->t);
- if (!ra_hasreg(key))
- goto nolo;
- }
- asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO);
-nolo:
- asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
- if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
- emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
- if (ofs > 32736)
- emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow));
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
- }
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- /* NYI: Check that UREFO is still open and not aliasing a slot. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
- emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
- } else {
- emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v));
- }
- emit_tsi(as, MIPSI_LW, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- UNUSED(as); UNUSED(ir);
- lua_assert(!ra_used(ir));
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef ref = ir->op2, refk = ir->op1;
- int32_t ofs = (int32_t)sizeof(GCstr);
- Reg r;
- if (irref_isk(ref)) {
- IRRef tmp = refk; refk = ref; ref = tmp;
- } else if (!irref_isk(refk)) {
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- IRIns *irr = IR(ir->op2);
- if (ra_hasreg(irr->r)) {
- ra_noweak(as, irr->r);
- right = irr->r;
- } else if (mayfuse(as, irr->op2) &&
- irr->o == IR_ADD && irref_isk(irr->op2) &&
- checki16(ofs + IR(irr->op2)->i)) {
- ofs += IR(irr->op2)->i;
- right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
- } else {
- right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs);
- emit_dst(as, MIPSI_ADDU, dest, left, right);
- return;
- }
- r = ra_alloc1(as, ref, RSET_GPR);
- ofs += IR(refk)->i;
- if (checki16(ofs))
- emit_tsi(as, MIPSI_ADDIU, dest, r, ofs);
- else
- emit_dst(as, MIPSI_ADDU, dest, r,
- ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static MIPSIns asm_fxloadins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: return MIPSI_LB;
- case IRT_U8: return MIPSI_LBU;
- case IRT_I16: return MIPSI_LH;
- case IRT_U16: return MIPSI_LHU;
- case IRT_NUM: return MIPSI_LDC1;
- case IRT_FLOAT: return MIPSI_LWC1;
- default: return MIPSI_LW;
- }
-}
-
-static MIPSIns asm_fxstoreins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: return MIPSI_SB;
- case IRT_I16: case IRT_U16: return MIPSI_SH;
- case IRT_NUM: return MIPSI_SDC1;
- case IRT_FLOAT: return MIPSI_SWC1;
- default: return MIPSI_SW;
- }
-}
-
-static void asm_fload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
- MIPSIns mi = asm_fxloadins(ir);
- int32_t ofs;
- if (ir->op2 == IRFL_TAB_ARRAY) {
- ofs = asm_fuseabase(as, ir->op1);
- if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
- emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs);
- return;
- }
- }
- ofs = field_ofs[ir->op2];
- lua_assert(!irt_isfp(ir->t));
- emit_tsi(as, mi, dest, idx, ofs);
-}
-
-static void asm_fstore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1z(as, ir->op2, RSET_GPR);
- IRIns *irf = IR(ir->op1);
- Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
- int32_t ofs = field_ofs[irf->op2];
- MIPSIns mi = asm_fxstoreins(ir);
- lua_assert(!irt_isfp(ir->t));
- emit_tsi(as, mi, src, idx, ofs);
- }
-}
-
-static void asm_xload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
- asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
-}
-
-static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
- rset_exclude(RSET_GPR, src), ofs);
- }
-}
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_TMP, idx;
- RegSet allow = RSET_GPR;
- int32_t ofs = 0;
- if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- rset_clear(allow, idx);
- if (irt_isnum(t)) {
- asm_guard(as, MIPSI_BEQ, type, RID_ZERO);
- emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM);
- if (ra_hasreg(dest))
- emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
- } else {
- asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow));
- if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
- }
- emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- Reg idx, src = RID_NONE, type = RID_NONE;
- int32_t ofs = 0;
- if (ir->r == RID_SINK)
- return;
- if (irt_isnum(ir->t)) {
- src = ra_alloc1(as, ir->op2, RSET_FPR);
- } else {
- if (!irt_ispri(ir->t)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- }
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- rset_clear(allow, type);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- if (irt_isnum(ir->t)) {
- emit_hsi(as, MIPSI_SDC1, src, idx, ofs);
- } else {
- if (ra_hasreg(src))
- emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
- emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_NONE, base;
- RegSet allow = RSET_GPR;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
- lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
- dest = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, dest);
- t.irt = IRT_NUM; /* Continue with a regular number type check. */
- } else if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- if (irt_isint(t)) {
- Reg tmp = ra_scratch(as, RSET_FPR);
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, MIPSI_CVT_W_D, tmp, tmp);
- dest = tmp;
- t.irt = IRT_NUM; /* Check for original type. */
- } else {
- Reg tmp = ra_scratch(as, RSET_GPR);
- emit_fg(as, MIPSI_CVT_D_W, dest, dest);
- emit_tg(as, MIPSI_MTC1, tmp, dest);
- dest = tmp;
- t.irt = IRT_INT; /* Check for original type. */
- }
- }
- goto dotypecheck;
- }
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
-dotypecheck:
- if (irt_isnum(t)) {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
- } else {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- Reg ktype = ra_allock(as, irt_toitype(t), allow);
- asm_guard(as, MIPSI_BNE, RID_TMP, ktype);
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
- }
- if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- RegSet drop = RSET_SCRATCH;
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
-
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- if (ra_used(ir))
- ra_destreg(as, ir, RID_RET); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- int32_t ofs = sizeof(GCcdata);
- lua_assert(sz == 4 || sz == 8);
- if (sz == 8) {
- ofs += 4;
- lua_assert((ir+1)->o == IR_HIOP);
- if (LJ_LE) ir++;
- }
- for (;;) {
- Reg r = ra_alloc1z(as, ir->op2, allow);
- emit_tsi(as, MIPSI_SW, r, RID_RET, ofs);
- rset_clear(allow, r);
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; if (LJ_BE) ir++; else ir--;
- }
- }
- /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
- emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
- emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
- emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
- emit_ti(as, MIPSI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */
- asm_gencall(as, ci, args);
- ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
- ra_releasetmp(as, ASMREF_TMP1));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- Reg link = RID_TMP;
- MCLabel l_end = emit_label(as);
- emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist));
- emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
- emit_setgl(as, tab, gc.grayagain);
- emit_getgl(as, link, gc.grayagain);
- emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */
- emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
- emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK);
- emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj, val, tmp;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- obj = IR(ir->op1)->r;
- tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
- emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
- emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK);
- emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
- emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES);
- val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
- emit_tsi(as, MIPSI_LBU, tmp, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
- emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked));
-}
-
-/* -- Arithmetic and logic operations ------------------------------------- */
-
-static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- emit_fgh(as, mi, dest, left, right);
-}
-
-static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
- emit_fg(as, mi, dest, left);
-}
-
-static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
- IRRef args[2];
- args[0] = irpp->op1;
- args[1] = irp->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
- return 1;
- }
- }
- return 0;
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, MIPSI_ADD_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dst(as, MIPSI_ADDU, dest, left, right);
- }
-}
-
-static void asm_sub(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, MIPSI_SUB_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_SUBU, dest, left, right);
- }
-}
-
-static void asm_mul(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, MIPSI_MUL_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_MUL, dest, left, right);
- }
-}
-
-static void asm_neg(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fpunary(as, ir, MIPSI_NEG_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
- }
-}
-
-static void asm_arithov(ASMState *as, IRIns *ir)
-{
- Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int k = IR(ir->op2)->i;
- if (ir->o == IR_SUBOV) k = -k;
- if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left);
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- if (dest == left) emit_move(as, RID_TMP, left);
- return;
- }
- }
- left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
- right), dest));
- asm_guard(as, MIPSI_BLTZ, RID_TMP, 0);
- emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp);
- if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */
- emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right);
- } else { /* ((dest^left) & (dest^~right)) < 0 */
- emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest);
- emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO);
- }
- emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left);
- emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right);
- if (dest == left || dest == right)
- emit_move(as, RID_TMP, dest == left ? left : right);
-}
-
-static void asm_mulov(ASMState *as, IRIns *ir)
-{
-#if LJ_DUALNUM
-#error "NYI: MULOV"
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused in single-number mode. */
-#endif
-}
-
-#if LJ_HASFFI
-static void asm_add64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k == 0) {
- emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP);
- goto loarith;
- } else if (checki16(k)) {
- emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- goto loarith;
- }
- }
- emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dst(as, MIPSI_ADDU, dest, left, right);
-loarith:
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k == 0) {
- if (dest != left)
- emit_move(as, dest, left);
- return;
- } else if (checki16(k)) {
- if (dest == left) {
- Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left));
- emit_move(as, dest, tmp);
- dest = tmp;
- }
- emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left);
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- if (dest == left && dest == right) {
- Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
- emit_move(as, dest, tmp);
- dest = tmp;
- }
- emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left);
- emit_dst(as, MIPSI_ADDU, dest, left, right);
-}
-
-static void asm_sub64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
- emit_dst(as, MIPSI_SUBU, dest, left, right);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (dest == left) {
- Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
- emit_move(as, dest, tmp);
- dest = tmp;
- }
- emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest);
- emit_dst(as, MIPSI_SUBU, dest, left, right);
-}
-
-static void asm_neg64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
- emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest);
- emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
-}
-#endif
-
-static void asm_bitnot(ASMState *as, IRIns *ir)
-{
- Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
- IRIns *irl = IR(ir->op1);
- if (mayfuse(as, ir->op1) && irl->o == IR_BOR) {
- left = ra_alloc2(as, irl, RSET_GPR);
- right = (left >> 8); left &= 255;
- } else {
- left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- right = RID_ZERO;
- }
- emit_dst(as, MIPSI_NOR, dest, left, right);
-}
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
- emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
- } else {
- Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest));
- emit_dst(as, MIPSI_OR, dest, dest, tmp);
- emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
- emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00);
- emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8);
- emit_dta(as, MIPSI_SRL, dest, left, 8);
- emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00);
- emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP);
- emit_dta(as, MIPSI_SRL, tmp, left, 24);
- emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
- }
-}
-
-static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checku16(k)) {
- emit_tsi(as, mik, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dst(as, mi, dest, left, right);
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
- emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift);
- } else {
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */
- }
-}
-
-static void asm_bitror(ASMState *as, IRIns *ir)
-{
- if ((as->flags & JIT_F_MIPS32R2)) {
- asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_rotr(as, dest, left, RID_TMP, shift);
- } else {
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
- emit_dst(as, MIPSI_SRLV, dest, right, left);
- emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left);
- emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right);
- }
- }
-}
-
-static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
-{
- if (irt_isnum(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- if (dest == left) {
- emit_fg(as, MIPSI_MOVT_D, dest, right);
- } else {
- emit_fg(as, MIPSI_MOVF_D, dest, left);
- if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
- }
- emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (dest == left) {
- emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
- } else {
- emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
- if (dest != right) emit_move(as, dest, right);
- }
- emit_dst(as, MIPSI_SLT, RID_TMP,
- ismax ? left : right, ismax ? right : left);
- }
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-static void asm_comp(ASMState *as, IRIns *ir)
-{
- /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
- IROp op = ir->o;
- if (irt_isnum(ir->t)) {
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
- emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
- } else {
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (op == IR_ABC) op = IR_UGT;
- if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) {
- MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
- ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
- asm_guard(as, mi, left, 0);
- } else {
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if ((op&2)) k++;
- if (checki16(k)) {
- asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI,
- RID_TMP, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT,
- RID_TMP, (op&2) ? right : left, (op&2) ? left : right);
- }
- }
-}
-
-static void asm_compeq(ASMState *as, IRIns *ir)
-{
- Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR);
- right = (left >> 8); left &= 255;
- if (irt_isnum(ir->t)) {
- asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
- emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
- } else {
- asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
- }
-}
-
-#if LJ_HASFFI
-/* 64 bit integer comparisons. */
-static void asm_comp64(ASMState *as, IRIns *ir)
-{
- /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
- IROp op = (ir-1)->o;
- MCLabel l_end;
- Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
- righthi = (lefthi >> 8); lefthi &= 255;
- leftlo = ra_alloc2(as, ir-1,
- rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi));
- rightlo = (leftlo >> 8); leftlo &= 255;
- asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- l_end = emit_label(as);
- if (lefthi != righthi)
- emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP,
- (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi);
- emit_dst(as, MIPSI_SLTU, RID_TMP,
- (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo);
- if (lefthi != righthi)
- emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end);
-}
-
-static void asm_comp64eq(ASMState *as, IRIns *ir)
-{
- Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO);
- tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
- emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
- emit_dst(as, MIPSI_XOR, tmp, left, right);
- left = ra_alloc2(as, ir-1, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_XOR, RID_TMP, left, right);
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_HASFFI
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
- as->curins--; /* Always skip the CONV. */
- if (usehi || uselo)
- asm_conv64(as, ir);
- return;
- } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
- asm_comp64(as, ir);
- return;
- } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
- asm_comp64eq(as, ir);
- return;
- } else if ((ir-1)->o == IR_XSTORE) {
- as->curins--; /* Handle both stores here. */
- if ((ir-1)->r != RID_SINK) {
- asm_xstore(as, ir, LJ_LE ? 4 : 0);
- asm_xstore(as, ir-1, LJ_LE ? 0 : 4);
- }
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
- case IR_ADD: as->curins--; asm_add64(as, ir); break;
- case IR_SUB: as->curins--; asm_sub64(as, ir); break;
- case IR_NEG: as->curins--; asm_neg64(as, ir); break;
- case IR_CALLN:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
- case IR_CNEWI:
- /* Nothing to do here. Handled by lo op itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
- Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
- ExitNo oldsnap = as->snapno;
- rset_clear(allow, pbase);
- tmp = allow ? rset_pickbot(allow) :
- (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
- as->snapno = exitno;
- asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
- as->snapno = oldsnap;
- if (allow == RSET_EMPTY) /* Restore temp. register. */
- emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0);
- else
- ra_modified(as, tmp);
- emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
- emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase);
- emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack));
- if (pbase == RID_TMP)
- emit_getgl(as, RID_TMP, jit_base);
- emit_getgl(as, tmp, jit_L);
- if (allow == RSET_EMPTY) /* Spill temp. register. */
- emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0);
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
- } else {
- Reg type;
- RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- rset_clear(allow, src);
- emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0));
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s == 0) continue; /* Do not overwrite link to previous frame. */
- type = ra_allock(as, (int32_t)(*flinks--), allow);
- } else {
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- }
- emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- /* Assumes asm_snap_prep() already done. */
- asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- tmp = ra_releasetmp(as, ASMREF_TMP2);
- emit_loadi(as, tmp, as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end);
- emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp);
- emit_getgl(as, tmp, gc.threshold);
- emit_getgl(as, RID_TMP, gc.total);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- p[-1] = MIPSI_NOP;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guard already inverted the cond branch. Only patch the target. */
- p[-3] |= ((target-p+2) & 0x0000ffffu);
- } else {
- p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (as->loopinv) as->mctop--;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (r != RID_BASE)
- emit_move(as, r, RID_BASE);
- }
-}
-
-/* Coalesce BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (as->loopinv) as->mctop--;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (irp->r == r) {
- rset_clear(allow, r); /* Mark same BASE register as coalesced. */
- } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
- rset_clear(allow, irp->r);
- emit_move(as, r, irp->r); /* Move from coalesced parent reg. */
- } else {
- emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
- }
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
- int32_t spadj = as->T->spadjust;
- MCode *p = as->mctop-1;
- *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
- p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */
- as->invmcp = as->loopref ? as->mcp : NULL;
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_EQ: case IR_NE: asm_compeq(as, ir); break;
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_ABC:
- asm_comp(as, ir);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_bitnot(as, ir); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI); break;
- case IR_BOR: asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI); break;
- case IR_BXOR: asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI); break;
-
- case IR_BSHL: asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL); break;
- case IR_BSHR: asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL); break;
- case IR_BSAR: asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA); break;
- case IR_BROL: lua_assert(0); break;
- case IR_BROR: asm_bitror(as, ir); break;
-
- /* Arithmetic ops. */
- case IR_ADD: asm_add(as, ir); break;
- case IR_SUB: asm_sub(as, ir); break;
- case IR_MUL: asm_mul(as, ir); break;
- case IR_DIV: asm_fparith(as, ir, MIPSI_DIV_D); break;
- case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
- case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
- case IR_NEG: asm_neg(as, ir); break;
-
- case IR_ABS: asm_fpunary(as, ir, MIPSI_ABS_D); break;
- case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
- case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
- case IR_MIN: asm_min_max(as, ir, 0); break;
- case IR_MAX: asm_min_max(as, ir, 1); break;
- case IR_FPMATH:
- if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
- break;
- if (ir->op2 <= IRFPM_TRUNC)
- asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
- else if (ir->op2 == IRFPM_SQRT)
- asm_fpunary(as, ir, MIPSI_SQRT_D);
- else
- asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
-
- /* Overflow-checking arithmetic ops. */
- case IR_ADDOV: asm_arithov(as, ir); break;
- case IR_SUBOV: asm_arithov(as, ir); break;
- case IR_MULOV: asm_mulov(as, ir); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: asm_fload(as, ir); break;
- case IR_XLOAD: asm_xload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: asm_fstore(as, ir); break;
- case IR_XSTORE: asm_xstore(as, ir, 0); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- uint32_t i, nargs = (int)CCI_NARGS(ci);
- int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
- asm_collectargs(as, ir, ci, args);
- for (i = 0; i < nargs; i++) {
- if (args[i] && irt_isfp(IR(args[i])->t) &&
- nfpr > 0 && !(ci->flags & CCI_VARARG)) {
- nfpr--;
- ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
- } else if (args[i] && irt_isnum(IR(args[i])->t)) {
- nfpr = 0;
- ngpr = ngpr & ~1;
- if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
- } else {
- nfpr = 0;
- if (ngpr > 0) ngpr--; else nslots++;
- }
- }
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
- return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
-}
-
-static void asm_setup_target(ASMState *as)
-{
- asm_sparejump_setup(as);
- asm_exitstub_setup(as);
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *pe = (MCode *)((char *)p + T->szmcode);
- MCode *px = exitstub_trace_addr(T, exitno);
- MCode *cstart = NULL, *cstop = NULL;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno;
- MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
- for (p++; p < pe; p++) {
- if (*p == exitload) { /* Look for load of exit number. */
- if (((p[-1] ^ (px-p)) & 0xffffu) == 0) { /* Look for exitstub branch. */
- ptrdiff_t delta = target - p;
- if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */
- patchbranch:
- p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu);
- *p = MIPSI_NOP; /* Replace the load of the exit number. */
- cstop = p;
- if (!cstart) cstart = p-1;
- } else { /* Branch out of range. Use spare jump slot in mcarea. */
- int i;
- for (i = 2; i < 2+MIPS_SPAREJUMP*2; i += 2) {
- if (mcarea[i] == tjump) {
- delta = mcarea+i - p;
- goto patchbranch;
- } else if (mcarea[i] == MIPSI_NOP) {
- mcarea[i] = tjump;
- cstart = mcarea+i;
- delta = mcarea+i - p;
- goto patchbranch;
- }
- }
- /* Ignore jump slot overflow. Child trace is simply not attached. */
- }
- } else if (p+1 == pe) {
- /* Patch NOP after code for inverted loop branch. Use of J is ok. */
- lua_assert(p[1] == MIPSI_NOP);
- p[1] = tjump;
- *p = MIPSI_NOP; /* Replace the load of the exit number. */
- cstop = p+2;
- if (!cstart) cstart = p+1;
- }
- }
- }
- if (cstart) lj_mcode_sync(cstart, cstop);
- lj_mcode_patch(J, mcarea, 1);
-}
-
+/*
+** MIPS IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a register or RID_ZERO. */
+static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0)
+ return RID_ZERO;
+ r = ra_allocref(as, ref, allow);
+ } else {
+ ra_noweak(as, r);
+ }
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_alloc1z(as, ir->op2, allow);
+ left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_alloc1z(as, ir->op1, allow);
+ right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Need some spare long-range jump slots, for out-of-range branches. */
+#define MIPS_SPAREJUMP 4
+
+/* Setup spare long-range jump slots per mcarea. */
+static void asm_sparejump_setup(ASMState *as)
+{
+ MCode *mxp = as->mcbot;
+ /* Assumes sizeof(MCLink) == 8. */
+ if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == 8) {
+ lua_assert(MIPSI_NOP == 0);
+ memset(mxp+2, 0, MIPS_SPAREJUMP*8);
+ mxp += MIPS_SPAREJUMP*2;
+ lua_assert(mxp < as->mctop);
+ lj_mcode_sync(as->mcbot, mxp);
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ }
+}
+
+/* Setup exit stub after the end of each trace. */
+static void asm_exitstub_setup(ASMState *as)
+{
+ MCode *mxp = as->mctop;
+ /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
+ *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
+ *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
+ lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0);
+ *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
+ as->mctop = mxp;
+}
+
+/* Keep this in-sync with exitstub_trace_addr(). */
+#define asm_exitstub_addr(as) ((as)->mctop)
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
+{
+ MCode *target = asm_exitstub_addr(as);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->invmcp = NULL;
+ as->loopinv = 1;
+ as->mcp = p+1;
+ mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */
+ target = p; /* Patch target later in asm_loop_fixup. */
+ }
+ emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
+ emit_branch(as, mi, rs, rt, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ int32_t jgl = (intptr_t)J2G(as->J);
+ if ((uint32_t)(ofs-jgl) < 65536) {
+ *ofsp = ofs-jgl-32768;
+ return RID_JGL;
+ } else {
+ *ofsp = (int16_t)ofs;
+ return ra_allock(as, ofs-(int16_t)ofs, allow);
+ }
+ }
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
+ RegSet allow, int32_t ofs)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ if (ir->o == IR_ADD) {
+ int32_t ofs2;
+ if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
+ ref = ir->op1;
+ ofs = ofs2;
+ }
+ } else if (ir->o == IR_STRREF) {
+ int32_t ofs2 = 65536;
+ lua_assert(ofs == 0);
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs2 = ofs + IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs2 = ofs + IR(ir->op1)->i;
+ ref = ir->op2;
+ }
+ if (!checki16(ofs2)) {
+ /* NYI: Fuse ADD with constant. */
+ Reg right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ emit_hsi(as, mi, rt, RID_TMP, ofs);
+ emit_dst(as, MIPSI_ADDU, RID_TMP, left, right);
+ return;
+ }
+ ofs = ofs2;
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_hsi(as, mi, rt, base, ofs);
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = 16;
+ Reg gpr, fpr = REGARG_FIRSTFPR;
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
+ as->cost[gpr] = REGCOST(~0u, ASMREF_L);
+ gpr = REGARG_FIRSTGPR;
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ if (ref) {
+ IRIns *ir = IR(ref);
+ if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
+ !(ci->flags & CCI_VARARG)) {
+ lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr += 2;
+ gpr += irt_isnum(ir->t) ? 2 : 1;
+ } else {
+ fpr = REGARG_LASTFPR+1;
+ if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
+ if (irt_isfp(ir->t)) {
+ RegSet of = as->freeset;
+ Reg r;
+ /* Workaround to protect argument GPRs from being used for remat. */
+ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
+ r = ra_alloc1(as, ref, RSET_FPR);
+ as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
+ if (irt_isnum(ir->t)) {
+ emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
+ emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
+ lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */
+ gpr += 2;
+ } else if (irt_isfloat(ir->t)) {
+ emit_tg(as, MIPSI_MFC1, gpr, r);
+ gpr++;
+ }
+ } else {
+ ra_leftov(as, gpr, ref);
+ gpr++;
+ }
+ } else {
+ Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, r, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ }
+ } else {
+ fpr = REGARG_LASTFPR+1;
+ if (gpr <= REGARG_LASTGPR)
+ gpr++;
+ else
+ ofs += 4;
+ }
+ checkmclim(as);
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lua_assert(!irt_ispri(ir->t));
+ if (irt_isfp(ir->t)) {
+ if ((ci->flags & CCI_CASTU64)) {
+ int32_t ofs = sps_scale(ir->s);
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
+ emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
+ }
+ if (ofs) {
+ emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
+ }
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need specific register for indirect calls. */
+ Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
+ MCode *p = as->mcp;
+ if (r == RID_CFUNCADDR)
+ *--p = MIPSI_NOP;
+ else
+ *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r);
+ *--p = MIPSI_JALR | MIPSF_S(r);
+ as->mcp = p;
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)|
+ RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR);
+ if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_FPRET);
+ emit_call(as, (void *)lj_ir_callinfo[id].func);
+ ra_leftov(as, REGARG_FIRSTFPR, ir->op1);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guard(as, MIPSI_BNE, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_tsi(as, MIPSI_LW, RID_TMP, base, -8);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guard(as, MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
+ emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, MIPSI_CVT_W_D, tmp, left);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lua_assert(irt_type(ir->t) != st);
+ lua_assert(!(irt_isint64(ir->t) ||
+ (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S,
+ dest, ra_alloc1(as, lref, RSET_FPR));
+ } else if (st == IRT_U32) { /* U32 to FP conversion. */
+ /* y = (x ^ 0x8000000) + 2147483648.0 */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ emit_fgh(as, irt_isfloat(ir->t) ? MIPSI_ADD_S : MIPSI_ADD_D,
+ dest, dest, tmp);
+ emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
+ dest, dest);
+ if (irt_isfloat(ir->t))
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
+ RSET_GPR);
+ else
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
+ RSET_GPR);
+ emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
+ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
+ emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
+ } else { /* Integer to FP conversion. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
+ dest, dest);
+ emit_tg(as, MIPSI_MTC1, left, dest);
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ if (irt_isu32(ir->t)) {
+ /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
+ emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
+ emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D,
+ tmp, tmp);
+ emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D,
+ tmp, left, tmp);
+ if (st == IRT_FLOAT)
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
+ RSET_GPR);
+ else
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
+ RSET_GPR);
+ } else {
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
+ tmp, left);
+ }
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if ((ir->op2 & IRCONV_SEXT)) {
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
+ } else {
+ uint32_t shift = st == IRT_I8 ? 24 : 16;
+ emit_dta(as, MIPSI_SRA, dest, dest, shift);
+ emit_dta(as, MIPSI_SLL, dest, left, shift);
+ }
+ } else {
+ emit_tsi(as, MIPSI_ANDI, dest, left,
+ (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
+ }
+ } else { /* 32/64 bit integer conversions. */
+ /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+#if LJ_HASFFI
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ IRCallID id;
+ const CCallInfo *ci;
+ IRRef args[2];
+ args[LJ_BE?0:1] = ir->op1;
+ args[LJ_BE?1:0] = (ir-1)->op1;
+ if (st == IRT_NUM || st == IRT_FLOAT) {
+ id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
+ ir--;
+ } else {
+ id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
+ }
+ ci = &lj_ir_callinfo[id];
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ RegSet drop = RSET_SCRATCH;
+ if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
+ ra_evictset(as, drop);
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1),
+ RID_SP, sps_scale(ir->s));
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ else /* Otherwise force a spill and use the spill slot. */
+ emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir));
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ RegSet allow = rset_exclude(RSET_GPR, dest);
+ Reg type;
+ emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ emit_setgl(as, src, tmptv.gcr);
+ }
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ emit_setgl(as, type, tmptv.it);
+ }
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_tsi(as, MIPSI_ADDIU, dest, base, ofs);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base);
+ emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ rset_clear(allow, tab);
+ if (irt_isnum(kt)) {
+ key = ra_alloc1(as, refkey, RSET_FPR);
+ tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ } else if (!irt_ispri(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ type = ra_allock(as, irt_toitype(irkey->t), allow);
+ rset_clear(allow, type);
+ }
+ tmp2 = ra_scratch(as, allow);
+ rset_clear(allow, tmp2);
+
+ /* Key not found in chain: load niltv. */
+ l_end = emit_label(as);
+ if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+ else
+ *--as->mcp = MIPSI_NOP;
+ /* Follow hash chain until the end. */
+ emit_move(as, dest, tmp1);
+ l_loop = --as->mcp;
+ emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (irt_isnum(kt)) {
+ emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
+ emit_tg(as, MIPSI_MFC1, tmp1, key+1);
+ emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
+ emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
+ emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
+ } else {
+ if (irt_ispri(kt)) {
+ emit_branch(as, MIPSI_BEQ, tmp1, type, l_end);
+ } else {
+ emit_branch(as, MIPSI_BEQ, tmp2, key, l_end);
+ emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
+ emit_branch(as, MIPSI_BNE, tmp1, type, l_next);
+ }
+ }
+ emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
+ *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ Reg tmphash = tmp1;
+ if (irref_isk(refkey))
+ tmphash = ra_allock(as, khash, allow);
+ emit_dst(as, MIPSI_ADDU, dest, dest, tmp1);
+ lua_assert(sizeof(Node) == 24);
+ emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
+ emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
+ emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
+ emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
+ emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
+ if (irref_isk(refkey)) {
+ /* Nothing to do. */
+ } else if (irt_isstr(kt)) {
+ emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
+ emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
+ emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
+ emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
+ emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
+ if (irt_isnum(kt)) {
+ emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
+ } else {
+ emit_dst(as, MIPSI_OR, dest, dest, tmp1);
+ emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1);
+ emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
+ }
+ emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
+ emit_tg(as, MIPSI_MFC1, tmp2, key);
+ emit_tg(as, MIPSI_MFC1, tmp1, key+1);
+ } else {
+ emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
+ emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
+ emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ int32_t lo, hi;
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ofs > 32736) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_tsi(as, MIPSI_ADDIU, dest, node, ofs);
+ }
+ if (!irt_ispri(irkey->t)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ }
+ if (irt_isnum(irkey->t)) {
+ lo = (int32_t)ir_knum(irkey)->u32.lo;
+ hi = (int32_t)ir_knum(irkey)->u32.hi;
+ } else {
+ lo = irkey->i;
+ hi = irt_toitype(irkey->t);
+ if (!ra_hasreg(key))
+ goto nolo;
+ }
+ asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO);
+nolo:
+ asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
+ if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
+ if (ofs > 32736)
+ emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow));
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
+ }
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
+ emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_tsi(as, MIPSI_LW, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lua_assert(!ra_used(ir));
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ int32_t ofs = (int32_t)sizeof(GCstr);
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ checki16(ofs + IR(irr->op2)->i)) {
+ ofs += IR(irr->op2)->i;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs);
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ ofs += IR(refk)->i;
+ if (checki16(ofs))
+ emit_tsi(as, MIPSI_ADDIU, dest, r, ofs);
+ else
+ emit_dst(as, MIPSI_ADDU, dest, r,
+ ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static MIPSIns asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return MIPSI_LB;
+ case IRT_U8: return MIPSI_LBU;
+ case IRT_I16: return MIPSI_LH;
+ case IRT_U16: return MIPSI_LHU;
+ case IRT_NUM: return MIPSI_LDC1;
+ case IRT_FLOAT: return MIPSI_LWC1;
+ default: return MIPSI_LW;
+ }
+}
+
+static MIPSIns asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return MIPSI_SB;
+ case IRT_I16: case IRT_U16: return MIPSI_SH;
+ case IRT_NUM: return MIPSI_SDC1;
+ case IRT_FLOAT: return MIPSI_SWC1;
+ default: return MIPSI_SW;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ MIPSIns mi = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ lua_assert(!irt_isfp(ir->t));
+ emit_tsi(as, mi, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1z(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ MIPSIns mi = asm_fxstoreins(ir);
+ lua_assert(!irt_isfp(ir->t));
+ emit_tsi(as, mi, src, idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src), ofs);
+ }
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_TMP, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = 0;
+ if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ rset_clear(allow, idx);
+ if (irt_isnum(t)) {
+ asm_guard(as, MIPSI_BEQ, type, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM);
+ if (ra_hasreg(dest))
+ emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
+ } else {
+ asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow));
+ if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
+ }
+ emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = 0;
+ if (ir->r == RID_SINK)
+ return;
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ } else {
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ rset_clear(allow, type);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (irt_isnum(ir->t)) {
+ emit_hsi(as, MIPSI_SDC1, src, idx, ofs);
+ } else {
+ if (ra_hasreg(src))
+ emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ Reg tmp = ra_scratch(as, RSET_FPR);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, MIPSI_CVT_W_D, tmp, tmp);
+ dest = tmp;
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, RSET_GPR);
+ emit_fg(as, MIPSI_CVT_D_W, dest, dest);
+ emit_tg(as, MIPSI_MTC1, tmp, dest);
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+dotypecheck:
+ if (irt_isnum(t)) {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
+ } else {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ Reg ktype = ra_allock(as, irt_toitype(t), allow);
+ asm_guard(as, MIPSI_BNE, RID_TMP, ktype);
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
+ }
+ if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lua_assert(sz == 4 || sz == 8);
+ if (sz == 8) {
+ ofs += 4;
+ lua_assert((ir+1)->o == IR_HIOP);
+ if (LJ_LE) ir++;
+ }
+ for (;;) {
+ Reg r = ra_alloc1z(as, ir->op2, allow);
+ emit_tsi(as, MIPSI_SW, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; if (LJ_BE) ir++; else ir--;
+ }
+ }
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
+ emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
+ emit_ti(as, MIPSI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg link = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_getgl(as, link, gc.grayagain);
+ emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK);
+ emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK);
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_tsi(as, MIPSI_LBU, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ emit_fgh(as, mi, dest, left, right);
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_fg(as, mi, dest, left);
+}
+
+static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
+ IRRef args[2];
+ args[0] = irpp->op1;
+ args[1] = irp->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, MIPSI_ADD_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+ }
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, MIPSI_SUB_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+ }
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, MIPSI_MUL_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_MUL, dest, left, right);
+ }
+}
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, MIPSI_NEG_D);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+ }
+}
+
+static void asm_arithov(ASMState *as, IRIns *ir)
+{
+ Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int k = IR(ir->op2)->i;
+ if (ir->o == IR_SUBOV) k = -k;
+ if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ if (dest == left) emit_move(as, RID_TMP, left);
+ return;
+ }
+ }
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
+ right), dest));
+ asm_guard(as, MIPSI_BLTZ, RID_TMP, 0);
+ emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp);
+ if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */
+ emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right);
+ } else { /* ((dest^left) & (dest^~right)) < 0 */
+ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest);
+ emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO);
+ }
+ emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left);
+ emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right);
+ if (dest == left || dest == right)
+ emit_move(as, RID_TMP, dest == left ? left : right);
+}
+
+static void asm_mulov(ASMState *as, IRIns *ir)
+{
+#if LJ_DUALNUM
+#error "NYI: MULOV"
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused in single-number mode. */
+#endif
+}
+
+#if LJ_HASFFI
+static void asm_add64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0) {
+ emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP);
+ goto loarith;
+ } else if (checki16(k)) {
+ emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ goto loarith;
+ }
+ }
+ emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+loarith:
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0) {
+ if (dest != left)
+ emit_move(as, dest, left);
+ return;
+ } else if (checki16(k)) {
+ if (dest == left) {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ if (dest == left && dest == right) {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left);
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+}
+
+static void asm_sub64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest);
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+}
+
+static void asm_neg64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+}
+#endif
+
+static void asm_bitnot(ASMState *as, IRIns *ir)
+{
+ Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
+ IRIns *irl = IR(ir->op1);
+ if (mayfuse(as, ir->op1) && irl->o == IR_BOR) {
+ left = ra_alloc2(as, irl, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ } else {
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ right = RID_ZERO;
+ }
+ emit_dst(as, MIPSI_NOR, dest, left, right);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
+ emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
+ } else {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest));
+ emit_dst(as, MIPSI_OR, dest, dest, tmp);
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00);
+ emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8);
+ emit_dta(as, MIPSI_SRL, dest, left, 8);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00);
+ emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP);
+ emit_dta(as, MIPSI_SRL, tmp, left, 24);
+ emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
+ }
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checku16(k)) {
+ emit_tsi(as, mik, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, mi, dest, left, right);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
+ emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift);
+ } else {
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */
+ }
+}
+
+static void asm_bitror(ASMState *as, IRIns *ir)
+{
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_rotr(as, dest, left, RID_TMP, shift);
+ } else {
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SRLV, dest, right, left);
+ emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left);
+ emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right);
+ }
+ }
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
+{
+ if (irt_isnum(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ emit_fg(as, MIPSI_MOVT_D, dest, right);
+ } else {
+ emit_fg(as, MIPSI_MOVF_D, dest, left);
+ if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
+ }
+ emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
+ } else {
+ emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
+ if (dest != right) emit_move(as, dest, right);
+ }
+ emit_dst(as, MIPSI_SLT, RID_TMP,
+ ismax ? left : right, ismax ? right : left);
+ }
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
+ IROp op = ir->o;
+ if (irt_isnum(ir->t)) {
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
+ } else {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (op == IR_ABC) op = IR_UGT;
+ if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) {
+ MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
+ ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
+ asm_guard(as, mi, left, 0);
+ } else {
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if ((op&2)) k++;
+ if (checki16(k)) {
+ asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI,
+ RID_TMP, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT,
+ RID_TMP, (op&2) ? right : left, (op&2) ? left : right);
+ }
+ }
+}
+
+static void asm_compeq(ASMState *as, IRIns *ir)
+{
+ Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (irt_isnum(ir->t)) {
+ asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
+ } else {
+ asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
+ }
+}
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_comp64(ASMState *as, IRIns *ir)
+{
+ /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
+ IROp op = (ir-1)->o;
+ MCLabel l_end;
+ Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
+ righthi = (lefthi >> 8); lefthi &= 255;
+ leftlo = ra_alloc2(as, ir-1,
+ rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi));
+ rightlo = (leftlo >> 8); leftlo &= 255;
+ asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ l_end = emit_label(as);
+ if (lefthi != righthi)
+ emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP,
+ (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi);
+ emit_dst(as, MIPSI_SLTU, RID_TMP,
+ (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo);
+ if (lefthi != righthi)
+ emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end);
+}
+
+static void asm_comp64eq(ASMState *as, IRIns *ir)
+{
+ Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO);
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
+ emit_dst(as, MIPSI_XOR, tmp, left, right);
+ left = ra_alloc2(as, ir-1, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_XOR, RID_TMP, left, right);
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_HASFFI
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+ } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ asm_comp64(as, ir);
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ asm_comp64eq(as, ir);
+ return;
+ } else if ((ir-1)->o == IR_XSTORE) {
+ as->curins--; /* Handle both stores here. */
+ if ((ir-1)->r != RID_SINK) {
+ asm_xstore(as, ir, LJ_LE ? 4 : 0);
+ asm_xstore(as, ir-1, LJ_LE ? 0 : 4);
+ }
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_ADD: as->curins--; asm_add64(as, ir); break;
+ case IR_SUB: as->curins--; asm_sub64(as, ir); break;
+ case IR_NEG: as->curins--; asm_neg64(as, ir); break;
+ case IR_CALLN:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
+ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
+ ExitNo oldsnap = as->snapno;
+ rset_clear(allow, pbase);
+ tmp = allow ? rset_pickbot(allow) :
+ (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
+ as->snapno = exitno;
+ asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
+ as->snapno = oldsnap;
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0);
+ else
+ ra_modified(as, tmp);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
+ emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase);
+ emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack));
+ if (pbase == RID_TMP)
+ emit_getgl(as, RID_TMP, jit_base);
+ emit_getgl(as, tmp, jit_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
+ } else {
+ Reg type;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0));
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), allow);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ }
+ emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ /* Assumes asm_snap_prep() already done. */
+ asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ tmp = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end);
+ emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp);
+ emit_getgl(as, tmp, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ p[-1] = MIPSI_NOP;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guard already inverted the cond branch. Only patch the target. */
+ p[-3] |= ((target-p+2) & 0x0000ffffu);
+ } else {
+ p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (as->loopinv) as->mctop--;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_move(as, r, RID_BASE);
+ }
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (as->loopinv) as->mctop--;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_move(as, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ int32_t spadj = as->T->spadjust;
+ MCode *p = as->mctop-1;
+ *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
+ p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */
+ as->invmcp = as->loopref ? as->mcp : NULL;
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+ case IR_GCSTEP: asm_gcstep(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_EQ: case IR_NE: asm_compeq(as, ir); break;
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+ asm_comp(as, ir);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bitnot(as, ir); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI); break;
+ case IR_BOR: asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI); break;
+ case IR_BXOR: asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL); break;
+ case IR_BSHR: asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL); break;
+ case IR_BSAR: asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA); break;
+ case IR_BROL: lua_assert(0); break;
+ case IR_BROR: asm_bitror(as, ir); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB: asm_sub(as, ir); break;
+ case IR_MUL: asm_mul(as, ir); break;
+ case IR_DIV: asm_fparith(as, ir, MIPSI_DIV_D); break;
+ case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
+ case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
+ case IR_NEG: asm_neg(as, ir); break;
+
+ case IR_ABS: asm_fpunary(as, ir, MIPSI_ABS_D); break;
+ case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
+ case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
+ case IR_MIN: asm_min_max(as, ir, 0); break;
+ case IR_MAX: asm_min_max(as, ir, 1); break;
+ case IR_FPMATH:
+ if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
+ break;
+ if (ir->op2 <= IRFPM_TRUNC)
+ asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ else if (ir->op2 == IRFPM_SQRT)
+ asm_fpunary(as, ir, MIPSI_SQRT_D);
+ else
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+
+ /* Overflow-checking arithmetic ops. */
+ case IR_ADDOV: asm_arithov(as, ir); break;
+ case IR_SUBOV: asm_arithov(as, ir); break;
+ case IR_MULOV: asm_mulov(as, ir); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir, 0); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOBIT: asm_tobit(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = (int)CCI_NARGS(ci);
+ int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++) {
+ if (args[i] && irt_isfp(IR(args[i])->t) &&
+ nfpr > 0 && !(ci->flags & CCI_VARARG)) {
+ nfpr--;
+ ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
+ } else if (args[i] && irt_isnum(IR(args[i])->t)) {
+ nfpr = 0;
+ ngpr = ngpr & ~1;
+ if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
+ } else {
+ nfpr = 0;
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ asm_sparejump_setup(as);
+ asm_exitstub_setup(as);
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ MCode *cstart = NULL, *cstop = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno;
+ MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+ for (p++; p < pe; p++) {
+ if (*p == exitload) { /* Look for load of exit number. */
+ if (((p[-1] ^ (px-p)) & 0xffffu) == 0) { /* Look for exitstub branch. */
+ ptrdiff_t delta = target - p;
+ if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */
+ patchbranch:
+ p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu);
+ *p = MIPSI_NOP; /* Replace the load of the exit number. */
+ cstop = p;
+ if (!cstart) cstart = p-1;
+ } else { /* Branch out of range. Use spare jump slot in mcarea. */
+ int i;
+ for (i = 2; i < 2+MIPS_SPAREJUMP*2; i += 2) {
+ if (mcarea[i] == tjump) {
+ delta = mcarea+i - p;
+ goto patchbranch;
+ } else if (mcarea[i] == MIPSI_NOP) {
+ mcarea[i] = tjump;
+ cstart = mcarea+i;
+ delta = mcarea+i - p;
+ goto patchbranch;
+ }
+ }
+ /* Ignore jump slot overflow. Child trace is simply not attached. */
+ }
+ } else if (p+1 == pe) {
+ /* Patch NOP after code for inverted loop branch. Use of J is ok. */
+ lua_assert(p[1] == MIPSI_NOP);
+ p[1] = tjump;
+ *p = MIPSI_NOP; /* Replace the load of the exit number. */
+ cstop = p+2;
+ if (!cstart) cstart = p+1;
+ }
+ }
+ }
+ if (cstart) lj_mcode_sync(cstart, cstop);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/3rdparty/lua/src/lj_asm_ppc.h b/3rdparty/lua/src/lj_asm_ppc.h
index 531b8db..651fa31 100644
--- a/3rdparty/lua/src/lj_asm_ppc.h
+++ b/3rdparty/lua/src/lj_asm_ppc.h
@@ -1,2169 +1,2166 @@
-/*
-** PPC IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Register allocator extensions --------------------------------------- */
-
-/* Allocate a register with a hint. */
-static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!ra_hashint(r) && !iscrossref(as, ref))
- ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
- r = ra_allocref(as, ref, allow);
- }
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate two source registers for three-operand instructions. */
-static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- Reg left = irl->r, right = irr->r;
- if (ra_hasreg(left)) {
- ra_noweak(as, left);
- if (ra_noreg(right))
- right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
- else
- ra_noweak(as, right);
- } else if (ra_hasreg(right)) {
- ra_noweak(as, right);
- left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
- } else if (ra_hashint(right)) {
- right = ra_allocref(as, ir->op2, allow);
- left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
- } else {
- left = ra_allocref(as, ir->op1, allow);
- right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
- }
- return left | (right << 8);
-}
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Setup exit stubs after the end of each trace. */
-static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
-{
- ExitNo i;
- MCode *mxp = as->mctop;
- if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
- asm_mclimit(as);
- /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
- for (i = nexits-1; (int32_t)i >= 0; i--)
- *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
- *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
- mxp--;
- *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
- *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
- as->mctop = mxp;
-}
-
-static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
-{
- /* Keep this in-sync with exitstub_trace_addr(). */
- return as->mctop + exitno + 3;
-}
-
-/* Emit conditional branch to exit for guard. */
-static void asm_guardcc(ASMState *as, PPCCC cc)
-{
- MCode *target = asm_exitstub_addr(as, as->snapno);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->loopinv = 1;
- *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
- emit_condbranch(as, PPCI_BC, cc^4, p);
- return;
- }
- emit_condbranch(as, PPCI_BC, cc, target);
-}
-
-/* -- Operand fusion ------------------------------------------------------ */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if there's no conflicting instruction between curins and ref. */
-static int noconflict(ASMState *as, IRRef ref, IROp conflict)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref)
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse the array base of colocated arrays. */
-static int32_t asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
- return (int32_t)sizeof(GCtab);
- return 0;
-}
-
-/* Indicates load/store indexed is ok. */
-#define AHUREF_LSX ((int32_t)0x80000000)
-
-/* Fuse array/hash/upvalue reference into register+offset operand. */
-static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- if (ir->o == IR_AREF) {
- if (mayfuse(as, ref)) {
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, refa, allow);
- }
- }
- if (*ofsp == AHUREF_LSX) {
- Reg base = ra_alloc1(as, ir->op1, allow);
- Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- return base | (idx << 8);
- }
- }
- } else if (ir->o == IR_HREFK) {
- if (mayfuse(as, ref)) {
- int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, ir->op1, allow);
- }
- }
- } else if (ir->o == IR_UREFC) {
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
- int32_t jgl = (intptr_t)J2G(as->J);
- if ((uint32_t)(ofs-jgl) < 65536) {
- *ofsp = ofs-jgl-32768;
- return RID_JGL;
- } else {
- *ofsp = (int16_t)ofs;
- return ra_allock(as, ofs-(int16_t)ofs, allow);
- }
- }
- }
- }
- *ofsp = 0;
- return ra_alloc1(as, ref, allow);
-}
-
-/* Fuse XLOAD/XSTORE reference into load/store operand. */
-static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
- RegSet allow, int32_t ofs)
-{
- IRIns *ir = IR(ref);
- Reg base;
- if (ra_noreg(ir->r) && canfuse(as, ir)) {
- if (ir->o == IR_ADD) {
- int32_t ofs2;
- if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
- ofs = ofs2;
- ref = ir->op1;
- } else if (ofs == 0) {
- Reg right, left = ra_alloc2(as, ir, allow);
- right = (left >> 8); left &= 255;
- emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
- return;
- }
- } else if (ir->o == IR_STRREF) {
- lua_assert(ofs == 0);
- ofs = (int32_t)sizeof(GCstr);
- if (irref_isk(ir->op2)) {
- ofs += IR(ir->op2)->i;
- ref = ir->op1;
- } else if (irref_isk(ir->op1)) {
- ofs += IR(ir->op1)->i;
- ref = ir->op2;
- } else {
- /* NYI: Fuse ADD with constant. */
- Reg tmp, right, left = ra_alloc2(as, ir, allow);
- right = (left >> 8); left &= 255;
- tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
- emit_fai(as, pi, rt, tmp, ofs);
- emit_tab(as, PPCI_ADD, tmp, left, right);
- return;
- }
- if (!checki16(ofs)) {
- Reg left = ra_alloc1(as, ref, allow);
- Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
- emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
- return;
- }
- }
- }
- base = ra_alloc1(as, ref, allow);
- emit_fai(as, pi, rt, base, ofs);
-}
-
-/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
-static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
- RegSet allow)
-{
- IRIns *ira = IR(ref);
- Reg right, left;
- if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) {
- left = ra_alloc2(as, ira, allow);
- right = (left >> 8); left &= 255;
- } else {
- right = ra_alloc1(as, ref, allow);
- left = RID_R0;
- }
- emit_tab(as, pi, rt, left, right);
-}
-
-/* Fuse to multiply-add/sub instruction. */
-static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
-{
- IRRef lref = ir->op1, rref = ir->op2;
- IRIns *irm;
- if (lref != rref &&
- ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
- ra_noreg(irm->r)) ||
- (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
- (rref = lref, pi = pir, ra_noreg(irm->r))))) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg add = ra_alloc1(as, rref, RSET_FPR);
- Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
- right = (left >> 8); left &= 255;
- emit_facb(as, pi, dest, left, right, add);
- return 1;
- }
- return 0;
-}
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = 8;
- Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR;
- if ((void *)ci->func)
- emit_call(as, (void *)ci->func);
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- if (ref) {
- IRIns *ir = IR(ref);
- if (irt_isfp(ir->t)) {
- if (fpr <= REGARG_LASTFPR) {
- lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
- ra_leftov(as, fpr, ref);
- fpr++;
- } else {
- Reg r = ra_alloc1(as, ref, RSET_FPR);
- if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
- emit_spstore(as, ir, r, ofs);
- ofs += irt_isnum(ir->t) ? 8 : 4;
- }
- } else {
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
- ra_leftov(as, gpr, ref);
- gpr++;
- } else {
- Reg r = ra_alloc1(as, ref, RSET_GPR);
- emit_spstore(as, ir, r, ofs);
- ofs += 4;
- }
- }
- } else {
- if (gpr <= REGARG_LASTGPR)
- gpr++;
- else
- ofs += 4;
- }
- checkmclim(as);
- }
- if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
- emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = ((ir+1)->o == IR_HIOP);
- if ((ci->flags & CCI_NOFPRCLOBBER))
- drop &= ~RSET_FPR;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- lua_assert(!irt_ispri(ir->t));
- if (irt_isfp(ir->t)) {
- if ((ci->flags & CCI_CASTU64)) {
- /* Use spill slot or temp slots. */
- int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
- }
- emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
- emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
- } else if (hiop) {
- ra_destpair(as, ir);
- } else {
- ra_destreg(as, ir, RID_RET);
- }
- }
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- if (irref_isk(func)) { /* Call to constant address. */
- ci.func = (ASMFunction)(void *)(irf->i);
- } else { /* Need a non-argument register for indirect calls. */
- RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
- Reg freg = ra_alloc1(as, func, allow);
- *--as->mcp = PPCI_BCTRL;
- *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
- ci.func = (ASMFunction)(void *)0;
- }
- asm_gencall(as, &ci, args);
-}
-
-static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guardcc(as, CC_NE);
- emit_ab(as, PPCI_CMPW, RID_TMP,
- ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
- emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- RegSet allow = RSET_FPR;
- Reg tmp = ra_scratch(as, rset_clear(allow, left));
- Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
- asm_guardcc(as, CC_NE);
- emit_fab(as, PPCI_FCMPU, 0, tmp, left);
- emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
- emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
- emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
- emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
- emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- emit_lsptr(as, PPCI_LFS, (fbias & 31),
- (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
- RSET_GPR);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, tmp, left);
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_FPR;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, allow);
- Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
- Reg tmp = ra_scratch(as, rset_clear(allow, right));
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fab(as, PPCI_FADD, tmp, left, right);
-}
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
- IRRef lref = ir->op1;
- lua_assert(irt_type(ir->t) != st);
- lua_assert(!(irt_isint64(ir->t) ||
- (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- if (st == IRT_NUM) /* double -> float conversion. */
- emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
- else /* float -> double conversion is a no-op on PPC. */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- } else { /* Integer to FP conversion. */
- /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
- /* IRT_U32: Bias with 2^52, subtract 2^52. */
- RegSet allow = RSET_GPR;
- Reg left = ra_alloc1(as, lref, allow);
- Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
- Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- const float *kbias;
- if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
- emit_fab(as, PPCI_FSUB, dest, dest, fbias);
- emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
- kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000));
- if (st == IRT_U32) kbias++;
- emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias,
- rset_clear(allow, hibias));
- emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
- RID_SP, SPOFS_TMPLO);
- emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
- if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
- }
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, lref, RSET_FPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- if (irt_isu32(ir->t)) {
- /* Convert both x and x-2^31 to int and merge results. */
- Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
- emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
- emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
- emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
- emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
- emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
- emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_tai(as, PPCI_LWZ, dest,
- RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
- emit_fb(as, PPCI_FCTIWZ, tmp, left);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
- emit_fab(as, PPCI_FSUB, tmp, left, tmp);
- emit_lsptr(as, PPCI_LFS, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)),
- RSET_GPR);
- } else {
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, tmp, left);
- }
- }
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if ((ir->op2 & IRCONV_SEXT))
- emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
- else
- emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
- } else { /* 32/64 bit integer conversions. */
- /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
-}
-
-#if LJ_HASFFI
-static void asm_conv64(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- IRCallID id;
- const CCallInfo *ci;
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = (ir-1)->op1;
- if (st == IRT_NUM || st == IRT_FLOAT) {
- id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
- ir--;
- } else {
- id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
- }
- ci = &lj_ir_callinfo[id];
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- int32_t ofs;
- RegSet drop = RSET_SCRATCH;
- if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
- ra_evictset(as, drop);
- asm_guardcc(as, CC_EQ);
- emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- /* Store the result to the spill slot or temp slots. */
- ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
- emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
-}
-
-/* Get pointer to TValue. */
-static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isnum(ir->t)) {
- if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
- ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
- else /* Otherwise force a spill and use the spill slot. */
- emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
- } else {
- /* Otherwise use g->tmptv to hold the TValue. */
- RegSet allow = rset_exclude(RSET_GPR, dest);
- Reg type;
- emit_tai(as, PPCI_ADDI, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- emit_setgl(as, src, tmptv.gcr);
- }
- type = ra_allock(as, irt_toitype(ir->t), allow);
- emit_setgl(as, type, tmptv.it);
- }
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx, base;
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- base = ra_alloc1(as, refa, RSET_GPR);
- emit_tai(as, PPCI_ADDI, dest, base, ofs);
- return;
- }
- }
- base = ra_alloc1(as, ir->op1, RSET_GPR);
- idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
- emit_slwi(as, RID_TMP, idx, 3);
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir, IROp merge)
-{
- RegSet allow = RSET_GPR;
- int destused = ra_used(ir);
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
- Reg tisnum = RID_NONE, tmpnum = RID_NONE;
- IRRef refkey = ir->op2;
- IRIns *irkey = IR(refkey);
- IRType1 kt = irkey->t;
- uint32_t khash;
- MCLabel l_end, l_loop, l_next;
-
- rset_clear(allow, tab);
- if (irt_isnum(kt)) {
- key = ra_alloc1(as, refkey, RSET_FPR);
- tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
- tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
- rset_clear(allow, tisnum);
- } else if (!irt_ispri(kt)) {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- }
- tmp2 = ra_scratch(as, allow);
- rset_clear(allow, tmp2);
-
- /* Key not found in chain: jump to exit (if merged) or load niltv. */
- l_end = emit_label(as);
- as->invmcp = NULL;
- if (merge == IR_NE)
- asm_guardcc(as, CC_EQ);
- else if (destused)
- emit_loada(as, dest, niltvg(J2G(as->J)));
-
- /* Follow hash chain until the end. */
- l_loop = --as->mcp;
- emit_ai(as, PPCI_CMPWI, dest, 0);
- emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
- l_next = emit_label(as);
-
- /* Type and value comparison. */
- if (merge == IR_EQ)
- asm_guardcc(as, CC_EQ);
- else
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
- if (irt_isnum(kt)) {
- emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
- emit_condbranch(as, PPCI_BC, CC_GE, l_next);
- emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
- emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
- } else {
- if (!irt_ispri(kt)) {
- emit_ab(as, PPCI_CMPW, tmp2, key);
- emit_condbranch(as, PPCI_BC, CC_NE, l_next);
- }
- emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
- if (!irt_ispri(kt))
- emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
- }
- emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
- *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
- (((char *)as->mcp-(char *)l_loop) & 0xffffu);
-
- /* Load main position relative to tab->node into dest. */
- khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
- } else {
- Reg tmphash = tmp1;
- if (irref_isk(refkey))
- tmphash = ra_allock(as, khash, allow);
- emit_tab(as, PPCI_ADD, dest, dest, tmp1);
- emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
- emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
- emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
- if (irref_isk(refkey)) {
- /* Nothing to do. */
- } else if (irt_isstr(kt)) {
- emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash));
- } else { /* Must match with hash*() in lj_tab.c. */
- emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
- emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
- emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
- emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
- emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
- if (irt_isnum(kt)) {
- int32_t ofs = ra_spill(as, irkey);
- emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
- emit_rotlwi(as, dest, tmp1, HASH_ROT1);
- emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
- emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
- emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
- } else {
- emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
- emit_rotlwi(as, dest, tmp1, HASH_ROT1);
- emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
- emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- int32_t kofs = ofs + (int32_t)offsetof(Node, key);
- Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg key = RID_NONE, type = RID_TMP, idx = node;
- RegSet allow = rset_exclude(RSET_GPR, node);
- lua_assert(ofs % sizeof(Node) == 0);
- if (ofs > 32736) {
- idx = dest;
- rset_clear(allow, dest);
- kofs = (int32_t)offsetof(Node, key);
- } else if (ra_hasreg(dest)) {
- emit_tai(as, PPCI_ADDI, dest, node, ofs);
- }
- asm_guardcc(as, CC_NE);
- if (!irt_ispri(irkey->t)) {
- key = ra_scratch(as, allow);
- rset_clear(allow, key);
- }
- rset_clear(allow, type);
- if (irt_isnum(irkey->t)) {
- emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
- asm_guardcc(as, CC_NE);
- emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
- } else {
- if (ra_hasreg(key)) {
- emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
- asm_guardcc(as, CC_NE);
- }
- emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
- }
- if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
- emit_tai(as, PPCI_LWZ, type, idx, kofs);
- if (ofs > 32736) {
- emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
- emit_tai(as, PPCI_ADDI, dest, node, ofs);
- }
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- if (ir->r == RID_SINK)
- return;
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- /* NYI: Check that UREFO is still open and not aliasing a slot. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- asm_guardcc(as, CC_NE);
- emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
- emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
- emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
- } else {
- emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
- }
- emit_tai(as, PPCI_LWZ, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- UNUSED(as); UNUSED(ir);
- lua_assert(!ra_used(ir));
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef ref = ir->op2, refk = ir->op1;
- int32_t ofs = (int32_t)sizeof(GCstr);
- Reg r;
- if (irref_isk(ref)) {
- IRRef tmp = refk; refk = ref; ref = tmp;
- } else if (!irref_isk(refk)) {
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- IRIns *irr = IR(ir->op2);
- if (ra_hasreg(irr->r)) {
- ra_noweak(as, irr->r);
- right = irr->r;
- } else if (mayfuse(as, irr->op2) &&
- irr->o == IR_ADD && irref_isk(irr->op2) &&
- checki16(ofs + IR(irr->op2)->i)) {
- ofs += IR(irr->op2)->i;
- right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
- } else {
- right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_tai(as, PPCI_ADDI, dest, dest, ofs);
- emit_tab(as, PPCI_ADD, dest, left, right);
- return;
- }
- r = ra_alloc1(as, ref, RSET_GPR);
- ofs += IR(refk)->i;
- if (checki16(ofs))
- emit_tai(as, PPCI_ADDI, dest, r, ofs);
- else
- emit_tab(as, PPCI_ADD, dest, r,
- ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static PPCIns asm_fxloadins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
- case IRT_U8: return PPCI_LBZ;
- case IRT_I16: return PPCI_LHA;
- case IRT_U16: return PPCI_LHZ;
- case IRT_NUM: return PPCI_LFD;
- case IRT_FLOAT: return PPCI_LFS;
- default: return PPCI_LWZ;
- }
-}
-
-static PPCIns asm_fxstoreins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: return PPCI_STB;
- case IRT_I16: case IRT_U16: return PPCI_STH;
- case IRT_NUM: return PPCI_STFD;
- case IRT_FLOAT: return PPCI_STFS;
- default: return PPCI_STW;
- }
-}
-
-static void asm_fload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
- PPCIns pi = asm_fxloadins(ir);
- int32_t ofs;
- if (ir->op2 == IRFL_TAB_ARRAY) {
- ofs = asm_fuseabase(as, ir->op1);
- if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
- emit_tai(as, PPCI_ADDI, dest, idx, ofs);
- return;
- }
- }
- ofs = field_ofs[ir->op2];
- lua_assert(!irt_isi8(ir->t));
- emit_tai(as, pi, dest, idx, ofs);
-}
-
-static void asm_fstore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
- IRIns *irf = IR(ir->op1);
- Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
- int32_t ofs = field_ofs[irf->op2];
- PPCIns pi = asm_fxstoreins(ir);
- emit_tai(as, pi, src, idx, ofs);
- }
-}
-
-static void asm_xload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
- if (irt_isi8(ir->t))
- emit_as(as, PPCI_EXTSB, dest, dest);
- asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
-}
-
-static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
-{
- IRIns *irb;
- if (ir->r == RID_SINK)
- return;
- if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
- ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
- /* Fuse BSWAP with XSTORE to stwbrx. */
- Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
- asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
- } else {
- Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
- rset_exclude(RSET_GPR, src), ofs);
- }
-}
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
- RegSet allow = RSET_GPR;
- int32_t ofs = AHUREF_LSX;
- if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- if (!irt_isnum(t)) ofs = 0;
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- if (irt_isnum(t)) {
- Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
- asm_guardcc(as, CC_GE);
- emit_ab(as, PPCI_CMPLW, type, tisnum);
- if (ra_hasreg(dest)) {
- if (ofs == AHUREF_LSX) {
- tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
- (idx&255)), (idx>>8)));
- emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
- } else {
- emit_fai(as, PPCI_LFD, dest, idx, ofs);
- }
- }
- } else {
- asm_guardcc(as, CC_NE);
- emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
- if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
- }
- if (ofs == AHUREF_LSX) {
- emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
- emit_slwi(as, tmp, (idx>>8), 3);
- } else {
- emit_tai(as, PPCI_LWZ, type, idx, ofs);
- }
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- Reg idx, src = RID_NONE, type = RID_NONE;
- int32_t ofs = AHUREF_LSX;
- if (ir->r == RID_SINK)
- return;
- if (irt_isnum(ir->t)) {
- src = ra_alloc1(as, ir->op2, RSET_FPR);
- } else {
- if (!irt_ispri(ir->t)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- ofs = 0;
- }
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- rset_clear(allow, type);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- if (irt_isnum(ir->t)) {
- if (ofs == AHUREF_LSX) {
- emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
- emit_slwi(as, RID_TMP, (idx>>8), 3);
- } else {
- emit_fai(as, PPCI_STFD, src, idx, ofs);
- }
- } else {
- if (ra_hasreg(src))
- emit_tai(as, PPCI_STW, src, idx, ofs+4);
- if (ofs == AHUREF_LSX) {
- emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
- emit_slwi(as, RID_TMP, (idx>>8), 3);
- } else {
- emit_tai(as, PPCI_STW, type, idx, ofs);
- }
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_NONE, base;
- RegSet allow = RSET_GPR;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
- lua_assert(LJ_DUALNUM ||
- !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
- dest = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, dest);
- t.irt = IRT_NUM; /* Continue with a regular number type check. */
- } else if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- if (irt_isint(t)) {
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- dest = ra_scratch(as, RSET_FPR);
- emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, dest, dest);
- t.irt = IRT_NUM; /* Check for original type. */
- } else {
- Reg tmp = ra_scratch(as, allow);
- Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
- Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- emit_fab(as, PPCI_FSUB, dest, dest, fbias);
- emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
- emit_lsptr(as, PPCI_LFS, (fbias & 31),
- (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
- rset_clear(allow, hibias));
- emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
- emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
- emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
- dest = tmp;
- t.irt = IRT_INT; /* Check for original type. */
- }
- }
- goto dotypecheck;
- }
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
-dotypecheck:
- if (irt_isnum(t)) {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
- asm_guardcc(as, CC_GE);
- emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum);
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4);
- } else {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- asm_guardcc(as, CC_NE);
- emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
- }
- if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- RegSet drop = RSET_SCRATCH;
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
-
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- if (ra_used(ir))
- ra_destreg(as, ir, RID_RET); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- int32_t ofs = sizeof(GCcdata);
- lua_assert(sz == 4 || sz == 8);
- if (sz == 8) {
- ofs += 4;
- lua_assert((ir+1)->o == IR_HIOP);
- }
- for (;;) {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_tai(as, PPCI_STW, r, RID_RET, ofs);
- rset_clear(allow, r);
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; ir++;
- }
- }
- /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
- emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
- emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
- emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
- emit_ti(as, PPCI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */
- asm_gencall(as, ci, args);
- ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
- ra_releasetmp(as, ASMREF_TMP1));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- Reg link = RID_TMP;
- MCLabel l_end = emit_label(as);
- emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
- emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
- emit_setgl(as, tab, gc.grayagain);
- lua_assert(LJ_GC_BLACK == 0x04);
- emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
- emit_getgl(as, link, gc.grayagain);
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
- emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
- emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj, val, tmp;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- obj = IR(ir->op1)->r;
- tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
- emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
- emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
- emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
- val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
- emit_tai(as, PPCI_LBZ, tmp, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
- emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
-}
-
-/* -- Arithmetic and logic operations ------------------------------------- */
-
-static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- if (pi == PPCI_FMUL)
- emit_fac(as, pi, dest, left, right);
- else
- emit_fab(as, pi, dest, left, right);
-}
-
-static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
- emit_fb(as, pi, dest, left);
-}
-
-static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
- IRRef args[2];
- args[0] = irpp->op1;
- args[1] = irp->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
- return 1;
- }
- }
- return 0;
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
- asm_fparith(as, ir, PPCI_FADD);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- PPCIns pi;
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- pi = PPCI_ADDI;
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi = PPCI_ADDICDOT;
- }
- emit_tai(as, pi, dest, left, k);
- return;
- } else if ((k & 0xffff) == 0) {
- emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
- return;
- } else if (!as->sectref) {
- emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
- emit_tai(as, PPCI_ADDI, dest, left, k);
- return;
- }
- }
- pi = PPCI_ADD;
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, pi, dest, left, right);
- }
-}
-
-static void asm_sub(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
- asm_fparith(as, ir, PPCI_FSUB);
- } else {
- PPCIns pi = PPCI_SUBF;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left, right;
- if (irref_isk(ir->op1)) {
- int32_t k = IR(ir->op1)->i;
- if (checki16(k)) {
- right = ra_alloc1(as, ir->op2, RSET_GPR);
- emit_tai(as, PPCI_SUBFIC, dest, right, k);
- return;
- }
- }
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
- }
-}
-
-static void asm_mul(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, PPCI_FMUL);
- } else {
- PPCIns pi = PPCI_MULLW;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- emit_tai(as, PPCI_MULLI, dest, left, k);
- return;
- }
- }
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, pi, dest, left, right);
- }
-}
-
-static void asm_neg(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fpunary(as, ir, PPCI_FNEG);
- } else {
- Reg dest, left;
- PPCIns pi = PPCI_NEG;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_tab(as, pi, dest, left, 0);
- }
-}
-
-static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
-{
- Reg dest, left, right;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- }
- asm_guardcc(as, CC_SO);
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
- emit_tab(as, pi|PPCF_DOT, dest, left, right);
-}
-
-#if LJ_HASFFI
-static void asm_add64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- PPCIns pi = PPCI_ADDE;
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k == 0)
- pi = PPCI_ADDZE;
- else if (k == -1)
- pi = PPCI_ADDME;
- else
- goto needright;
- right = 0;
- } else {
- needright:
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_tab(as, pi, dest, left, right);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- emit_tai(as, PPCI_ADDIC, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, PPCI_ADDC, dest, left, right);
-}
-
-static void asm_sub64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
- PPCIns pi = PPCI_SUBFE;
- if (irref_isk(ir->op1)) {
- int32_t k = IR(ir->op1)->i;
- if (k == 0)
- pi = PPCI_SUBFZE;
- else if (k == -1)
- pi = PPCI_SUBFME;
- else
- goto needleft;
- left = 0;
- } else {
- needleft:
- left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
- }
- emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- right = ra_alloc1(as, ir->op2, RSET_GPR);
- if (irref_isk(ir->op1)) {
- int32_t k = IR(ir->op1)->i;
- if (checki16(k)) {
- emit_tai(as, PPCI_SUBFIC, dest, right, k);
- return;
- }
- }
- left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
- emit_tab(as, PPCI_SUBFC, dest, right, left);
-}
-
-static void asm_neg64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_tab(as, PPCI_SUBFZE, dest, left, 0);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_tai(as, PPCI_SUBFIC, dest, left, 0);
-}
-#endif
-
-static void asm_bitnot(ASMState *as, IRIns *ir)
-{
- Reg dest, left, right;
- PPCIns pi = PPCI_NOR;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- if (mayfuse(as, ir->op1)) {
- IRIns *irl = IR(ir->op1);
- if (irl->o == IR_BAND)
- pi ^= (PPCI_NOR ^ PPCI_NAND);
- else if (irl->o == IR_BXOR)
- pi ^= (PPCI_NOR ^ PPCI_EQV);
- else if (irl->o != IR_BOR)
- goto nofuse;
- left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
- right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
- } else {
-nofuse:
- left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- }
- emit_asb(as, pi, dest, left, right);
-}
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRIns *irx;
- if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
- ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
- /* Fuse BSWAP with XLOAD to lwbrx. */
- asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
- } else {
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg tmp = dest;
- if (tmp == left) {
- tmp = RID_TMP;
- emit_mr(as, dest, RID_TMP);
- }
- emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
- emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
- emit_rotlwi(as, tmp, left, 8);
- }
-}
-
-static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- Reg tmp = left;
- if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
- if (!checku16(k)) {
- emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
- if ((k & 0xffff) == 0) return;
- }
- emit_asi(as, pik, dest, left, k);
- return;
- }
- }
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_asb(as, pi, dest, left, right);
-}
-
-/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
-static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
-{
- IRIns *ir;
- Reg left;
- if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
- irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
- int32_t sh = (IR(ir->op2)->i & 31);
- switch (ir->o) {
- case IR_BSHL:
- if ((mask & ((1u<<sh)-1))) goto nofuse;
- break;
- case IR_BSHR:
- if ((mask & ~((~0u)>>sh))) goto nofuse;
- sh = ((32-sh)&31);
- break;
- case IR_BROL:
- break;
- default:
- goto nofuse;
- }
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
- return;
- }
-nofuse:
- left = ra_alloc1(as, ref, RSET_GPR);
- *--as->mcp = pi | PPCF_T(left);
-}
-
-static void asm_bitand(ASMState *as, IRIns *ir)
-{
- Reg dest, left, right;
- IRRef lref = ir->op1;
- PPCIns dot = 0;
- IRRef op2;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- dot = PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k) {
- /* First check for a contiguous bitmask as used by rlwinm. */
- uint32_t s1 = lj_ffs((uint32_t)k);
- uint32_t k1 = ((uint32_t)k >> s1);
- if ((k1 & (k1+1)) == 0) {
- asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
- PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
- k, lref);
- return;
- }
- if (~(uint32_t)k) {
- uint32_t s2 = lj_ffs(~(uint32_t)k);
- uint32_t k2 = (~(uint32_t)k >> s2);
- if ((k2 & (k2+1)) == 0) {
- asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
- PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
- k, lref);
- return;
- }
- }
- }
- if (checku16(k)) {
- left = ra_alloc1(as, lref, RSET_GPR);
- emit_asi(as, PPCI_ANDIDOT, dest, left, k);
- return;
- } else if ((k & 0xffff) == 0) {
- left = ra_alloc1(as, lref, RSET_GPR);
- emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
- return;
- }
- }
- op2 = ir->op2;
- if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
- dot ^= (PPCI_AND ^ PPCI_ANDC);
- op2 = IR(op2)->op1;
- }
- left = ra_hintalloc(as, lref, dest, RSET_GPR);
- right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
- emit_asb(as, PPCI_AND ^ dot, dest, left, right);
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
-{
- Reg dest, left;
- Reg dot = 0;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- dot = PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- int32_t shift = (IR(ir->op2)->i & 31);
- if (pik == 0) /* SLWI */
- emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
- else if (pik == 1) /* SRWI */
- emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
- else
- emit_asb(as, pik|dot, dest, left, shift);
- } else {
- Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_asb(as, pi|dot, dest, left, right);
- }
-}
-
-static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
-{
- if (irt_isnum(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg tmp = dest;
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- if (tmp == left || tmp == right)
- tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
- dest), left), right));
- emit_facb(as, PPCI_FSEL, dest, tmp,
- ismax ? left : right, ismax ? right : left);
- emit_fab(as, PPCI_FSUB, tmp, left, right);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg tmp1 = RID_TMP, tmp2 = dest;
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (tmp2 == left || tmp2 == right)
- tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
- dest), left), right));
- emit_tab(as, PPCI_ADD, dest, tmp2, right);
- emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
- emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
- emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
- emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
- emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
- }
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
-#define CC_TWO 0x80 /* Check two flags for FP comparison. */
-
-/* Map of comparisons to flags. ORDER IR. */
-static const uint8_t asm_compmap[IR_ABC+1] = {
- /* op int cc FP cc */
- /* LT */ CC_GE + (CC_GE<<4),
- /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
- /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
- /* GT */ CC_LE + (CC_LE<<4),
- /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
- /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
- /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
- /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
- /* EQ */ CC_NE + (CC_NE<<4),
- /* NE */ CC_EQ + (CC_EQ<<4),
- /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
-};
-
-static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
-{
- Reg right, left = ra_alloc1(as, lref, RSET_GPR);
- if (irref_isk(rref)) {
- int32_t k = IR(rref)->i;
- if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
- if (checki16(k)) {
- emit_tai(as, PPCI_CMPWI, cr, left, k);
- /* Signed comparison with zero and referencing previous ins? */
- if (k == 0 && lref == as->curins-1)
- as->flagmcp = as->mcp; /* Allow elimination of the compare. */
- return;
- } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
- if (checku16(k)) {
- emit_tai(as, PPCI_CMPLWI, cr, left, k);
- return;
- } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
- emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
- emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
- return;
- }
- }
- } else { /* Unsigned comparison with constant. */
- if (checku16(k)) {
- emit_tai(as, PPCI_CMPLWI, cr, left, k);
- return;
- }
- }
- }
- right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
- emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
-}
-
-static void asm_comp(ASMState *as, IRIns *ir)
-{
- PPCCC cc = asm_compmap[ir->o];
- if (irt_isnum(ir->t)) {
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- asm_guardcc(as, (cc >> 4));
- if ((cc & CC_TWO))
- emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
- emit_fab(as, PPCI_FCMPU, 0, left, right);
- } else {
- IRRef lref = ir->op1, rref = ir->op2;
- if (irref_isk(lref) && !irref_isk(rref)) {
- /* Swap constants to the right (only for ABC). */
- IRRef tmp = lref; lref = rref; rref = tmp;
- if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
- }
- asm_guardcc(as, cc);
- asm_intcomp_(as, lref, rref, 0, cc);
- }
-}
-
-#if LJ_HASFFI
-/* 64 bit integer comparisons. */
-static void asm_comp64(ASMState *as, IRIns *ir)
-{
- PPCCC cc = asm_compmap[(ir-1)->o];
- if ((cc&3) == (CC_EQ&3)) {
- asm_guardcc(as, cc);
- emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
- (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
- } else {
- asm_guardcc(as, CC_EQ);
- emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
- emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
- (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
- }
- /* Loword comparison sets cr1 and is unsigned, except for equality. */
- asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
- cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
- /* Hiword comparison sets cr0. */
- asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
- as->flagmcp = NULL; /* Doesn't work here. */
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_HASFFI
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
- as->curins--; /* Always skip the CONV. */
- if (usehi || uselo)
- asm_conv64(as, ir);
- return;
- } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
- asm_comp64(as, ir);
- return;
- } else if ((ir-1)->o == IR_XSTORE) {
- as->curins--; /* Handle both stores here. */
- if ((ir-1)->r != RID_SINK) {
- asm_xstore(as, ir, 0);
- asm_xstore(as, ir-1, 4);
- }
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
- case IR_ADD: as->curins--; asm_add64(as, ir); break;
- case IR_SUB: as->curins--; asm_sub64(as, ir); break;
- case IR_NEG: as->curins--; asm_neg64(as, ir); break;
- case IR_CALLN:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
- case IR_CNEWI:
- /* Nothing to do here. Handled by lo op itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
- Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
- rset_clear(allow, pbase);
- tmp = allow ? rset_pickbot(allow) :
- (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
- emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
- if (allow == RSET_EMPTY) /* Restore temp. register. */
- emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
- else
- ra_modified(as, tmp);
- emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
- emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
- emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
- if (pbase == RID_TMP)
- emit_getgl(as, RID_TMP, jit_base);
- emit_getgl(as, tmp, jit_L);
- if (allow == RSET_EMPTY) /* Spill temp. register. */
- emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
- } else {
- Reg type;
- RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- rset_clear(allow, src);
- emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s == 0) continue; /* Do not overwrite link to previous frame. */
- type = ra_allock(as, (int32_t)(*flinks--), allow);
- } else {
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- }
- emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
- emit_ai(as, PPCI_CMPWI, RID_RET, 0);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- tmp = ra_releasetmp(as, ASMREF_TMP2);
- emit_loadi(as, tmp, as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
- emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
- emit_getgl(as, tmp, gc.threshold);
- emit_getgl(as, RID_TMP, gc.total);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guardcc already inverted the cond branch and patched the final b. */
- p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
- } else {
- p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (r != RID_BASE)
- emit_mr(as, r, RID_BASE);
- }
-}
-
-/* Coalesce BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (irp->r == r) {
- rset_clear(allow, r); /* Mark same BASE register as coalesced. */
- } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
- rset_clear(allow, irp->r);
- emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
- } else {
- emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
- }
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- MCode *p = as->mctop;
- MCode *target;
- int32_t spadj = as->T->spadjust;
- if (spadj == 0) {
- *--p = PPCI_NOP;
- *--p = PPCI_NOP;
- as->mctop = p;
- } else {
- /* Patch stack adjustment. */
- lua_assert(checki16(CFRAME_SIZE+spadj));
- p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
- p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
- }
- /* Patch exit branch. */
- target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
- p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- MCode *p = as->mctop - 1; /* Leave room for exit branch. */
- if (as->loopref) {
- as->invmcp = as->mcp = p;
- } else {
- as->mcp = p-2; /* Leave room for stack pointer adjustment. */
- as->invmcp = NULL;
- }
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_EQ: case IR_NE:
- if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
- as->curins--;
- asm_href(as, ir-1, (IROp)ir->o);
- break;
- }
- /* fallthrough */
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_ABC:
- asm_comp(as, ir);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_bitnot(as, ir); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_bitand(as, ir); break;
- case IR_BOR: asm_bitop(as, ir, PPCI_OR, PPCI_ORI); break;
- case IR_BXOR: asm_bitop(as, ir, PPCI_XOR, PPCI_XORI); break;
-
- case IR_BSHL: asm_bitshift(as, ir, PPCI_SLW, 0); break;
- case IR_BSHR: asm_bitshift(as, ir, PPCI_SRW, 1); break;
- case IR_BSAR: asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI); break;
- case IR_BROL: asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31),
- PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)); break;
- case IR_BROR: lua_assert(0); break;
-
- /* Arithmetic ops. */
- case IR_ADD: asm_add(as, ir); break;
- case IR_SUB: asm_sub(as, ir); break;
- case IR_MUL: asm_mul(as, ir); break;
- case IR_DIV: asm_fparith(as, ir, PPCI_FDIV); break;
- case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
- case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
- case IR_NEG: asm_neg(as, ir); break;
-
- case IR_ABS: asm_fpunary(as, ir, PPCI_FABS); break;
- case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
- case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
- case IR_MIN: asm_min_max(as, ir, 0); break;
- case IR_MAX: asm_min_max(as, ir, 1); break;
- case IR_FPMATH:
- if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
- break;
- if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
- asm_fpunary(as, ir, PPCI_FSQRT);
- else
- asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
-
- /* Overflow-checking arithmetic ops. */
- case IR_ADDOV: asm_arithov(as, ir, PPCI_ADDO); break;
- case IR_SUBOV: asm_arithov(as, ir, PPCI_SUBFO); break;
- case IR_MULOV: asm_arithov(as, ir, PPCI_MULLWO); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir, 0); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: asm_fload(as, ir); break;
- case IR_XLOAD: asm_xload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: asm_fstore(as, ir); break;
- case IR_XSTORE: asm_xstore(as, ir, 0); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- uint32_t i, nargs = (int)CCI_NARGS(ci);
- int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
- asm_collectargs(as, ir, ci, args);
- for (i = 0; i < nargs; i++)
- if (args[i] && irt_isfp(IR(args[i])->t)) {
- if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
- return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
-}
-
-static void asm_setup_target(ASMState *as)
-{
- asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *pe = (MCode *)((char *)p + T->szmcode);
- MCode *px = exitstub_trace_addr(T, exitno);
- MCode *cstart = NULL;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- int clearso = 0;
- for (; p < pe; p++) {
- /* Look for exitstub branch, try to replace with branch to target. */
- uint32_t ins = *p;
- if ((ins & 0xfc000000u) == 0x40000000u &&
- ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
- ptrdiff_t delta = (char *)target - (char *)p;
- if (((ins >> 16) & 3) == (CC_SO&3)) {
- clearso = sizeof(MCode);
- delta -= sizeof(MCode);
- }
- /* Many, but not all short-range branches can be patched directly. */
- if (((delta + 0x8000) >> 16) == 0) {
- *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
- ((delta & 0x8000) * (PPCF_Y/0x8000));
- if (!cstart) cstart = p;
- }
- } else if ((ins & 0xfc000000u) == PPCI_B &&
- ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
- ptrdiff_t delta = (char *)target - (char *)p;
- lua_assert(((delta + 0x02000000) >> 26) == 0);
- *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
- if (!cstart) cstart = p;
- }
- }
- { /* Always patch long-range branch in exit stub itself. */
- ptrdiff_t delta = (char *)target - (char *)px - clearso;
- lua_assert(((delta + 0x02000000) >> 26) == 0);
- *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
- }
- if (!cstart) cstart = px;
- lj_mcode_sync(cstart, px+1);
- if (clearso) { /* Extend the current trace. Ugly workaround. */
- MCode *pp = J->cur.mcode;
- J->cur.szmcode += sizeof(MCode);
- *--pp = PPCI_MCRXR; /* Clear SO flag. */
- J->cur.mcode = pp;
- lj_mcode_sync(pp, pp+1);
- }
- lj_mcode_patch(J, mcarea, 1);
-}
-
+/*
+** PPC IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_allocref(as, ir->op2, allow);
+ left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_allocref(as, ir->op1, allow);
+ right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Setup exit stubs after the end of each trace. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ MCode *mxp = as->mctop;
+ /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
+ for (i = nexits-1; (int32_t)i >= 0; i--)
+ *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
+ *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
+ mxp--;
+ *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
+ *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
+ as->mctop = mxp;
+}
+
+static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
+{
+ /* Keep this in-sync with exitstub_trace_addr(). */
+ return as->mctop + exitno + 3;
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, PPCCC cc)
+{
+ MCode *target = asm_exitstub_addr(as, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
+ emit_condbranch(as, PPCI_BC, cc^4, p);
+ return;
+ }
+ emit_condbranch(as, PPCI_BC, cc, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Indicates load/store indexed is ok. */
+#define AHUREF_LSX ((int32_t)0x80000000)
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ if (*ofsp == AHUREF_LSX) {
+ Reg base = ra_alloc1(as, ir->op1, allow);
+ Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ return base | (idx << 8);
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ int32_t jgl = (intptr_t)J2G(as->J);
+ if ((uint32_t)(ofs-jgl) < 65536) {
+ *ofsp = ofs-jgl-32768;
+ return RID_JGL;
+ } else {
+ *ofsp = (int16_t)ofs;
+ return ra_allock(as, ofs-(int16_t)ofs, allow);
+ }
+ }
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
+ RegSet allow, int32_t ofs)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ if (ir->o == IR_ADD) {
+ int32_t ofs2;
+ if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
+ ofs = ofs2;
+ ref = ir->op1;
+ } else if (ofs == 0) {
+ Reg right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
+ return;
+ }
+ } else if (ir->o == IR_STRREF) {
+ lua_assert(ofs == 0);
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs += IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs += IR(ir->op1)->i;
+ ref = ir->op2;
+ } else {
+ /* NYI: Fuse ADD with constant. */
+ Reg tmp, right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
+ emit_fai(as, pi, rt, tmp, ofs);
+ emit_tab(as, PPCI_ADD, tmp, left, right);
+ return;
+ }
+ if (!checki16(ofs)) {
+ Reg left = ra_alloc1(as, ref, allow);
+ Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
+ emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_fai(as, pi, rt, base, ofs);
+}
+
+/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
+static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ira = IR(ref);
+ Reg right, left;
+ if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) {
+ left = ra_alloc2(as, ira, allow);
+ right = (left >> 8); left &= 255;
+ } else {
+ right = ra_alloc1(as, ref, allow);
+ left = RID_R0;
+ }
+ emit_tab(as, pi, rt, left, right);
+}
+
+/* Fuse to multiply-add/sub instruction. */
+static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irm;
+ if (lref != rref &&
+ ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
+ ra_noreg(irm->r)) ||
+ (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
+ (rref = lref, pi = pir, ra_noreg(irm->r))))) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg add = ra_alloc1(as, rref, RSET_FPR);
+ Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
+ right = (left >> 8); left &= 255;
+ emit_facb(as, pi, dest, left, right, add);
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = 8;
+ Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR;
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ if (ref) {
+ IRIns *ir = IR(ref);
+ if (irt_isfp(ir->t)) {
+ if (fpr <= REGARG_LASTFPR) {
+ lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_FPR);
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, r, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ } else {
+ if (gpr <= REGARG_LASTGPR) {
+ lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
+ ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs);
+ ofs += 4;
+ }
+ }
+ } else {
+ if (gpr <= REGARG_LASTGPR)
+ gpr++;
+ else
+ ofs += 4;
+ }
+ checkmclim(as);
+ }
+ if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
+ emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP);
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lua_assert(!irt_ispri(ir->t));
+ if (irt_isfp(ir->t)) {
+ if ((ci->flags & CCI_CASTU64)) {
+ /* Use spill slot or temp slots. */
+ int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
+ }
+ emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
+ emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need a non-argument register for indirect calls. */
+ RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
+ Reg freg = ra_alloc1(as, func, allow);
+ *--as->mcp = PPCI_BCTRL;
+ *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_ab(as, PPCI_CMPW, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ RegSet allow = RSET_FPR;
+ Reg tmp = ra_scratch(as, rset_clear(allow, left));
+ Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
+ asm_guardcc(as, CC_NE);
+ emit_fab(as, PPCI_FCMPU, 0, tmp, left);
+ emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
+ emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
+ emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
+ RSET_GPR);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fab(as, PPCI_FADD, tmp, left, right);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lua_assert(irt_type(ir->t) != st);
+ lua_assert(!(irt_isint64(ir->t) ||
+ (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ if (st == IRT_NUM) /* double -> float conversion. */
+ emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
+ else /* float -> double conversion is a no-op on PPC. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* Integer to FP conversion. */
+ /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
+ /* IRT_U32: Bias with 2^52, subtract 2^52. */
+ RegSet allow = RSET_GPR;
+ Reg left = ra_alloc1(as, lref, allow);
+ Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
+ Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ const float *kbias;
+ if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
+ emit_fab(as, PPCI_FSUB, dest, dest, fbias);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
+ kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000));
+ if (st == IRT_U32) kbias++;
+ emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias,
+ rset_clear(allow, hibias));
+ emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
+ RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ if (irt_isu32(ir->t)) {
+ /* Convert both x and x-2^31 to int and merge results. */
+ Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
+ emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
+ emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
+ emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
+ emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
+ emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
+ emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_tai(as, PPCI_LWZ, dest,
+ RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
+ emit_fab(as, PPCI_FSUB, tmp, left, tmp);
+ emit_lsptr(as, PPCI_LFS, (tmp & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)),
+ RSET_GPR);
+ } else {
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+ }
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if ((ir->op2 & IRCONV_SEXT))
+ emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
+ else
+ emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
+ } else { /* 32/64 bit integer conversions. */
+ /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+#if LJ_HASFFI
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ IRCallID id;
+ const CCallInfo *ci;
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = (ir-1)->op1;
+ if (st == IRT_NUM || st == IRT_FLOAT) {
+ id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
+ ir--;
+ } else {
+ id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
+ }
+ ci = &lj_ir_callinfo[id];
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ int32_t ofs;
+ RegSet drop = RSET_SCRATCH;
+ if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
+ ra_evictset(as, drop);
+ asm_guardcc(as, CC_EQ);
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ else /* Otherwise force a spill and use the spill slot. */
+ emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ RegSet allow = rset_exclude(RSET_GPR, dest);
+ Reg type;
+ emit_tai(as, PPCI_ADDI, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ emit_setgl(as, src, tmptv.gcr);
+ }
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ emit_setgl(as, type, tmptv.it);
+ }
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_tai(as, PPCI_ADDI, dest, base, ofs);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
+ emit_slwi(as, RID_TMP, idx, 3);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
+ Reg tisnum = RID_NONE, tmpnum = RID_NONE;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ rset_clear(allow, tab);
+ if (irt_isnum(kt)) {
+ key = ra_alloc1(as, refkey, RSET_FPR);
+ tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
+ rset_clear(allow, tisnum);
+ } else if (!irt_ispri(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+ tmp2 = ra_scratch(as, allow);
+ rset_clear(allow, tmp2);
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_EQ);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_ai(as, PPCI_CMPWI, dest, 0);
+ emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ if (irt_isnum(kt)) {
+ emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
+ emit_condbranch(as, PPCI_BC, CC_GE, l_next);
+ emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
+ emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
+ } else {
+ if (!irt_ispri(kt)) {
+ emit_ab(as, PPCI_CMPW, tmp2, key);
+ emit_condbranch(as, PPCI_BC, CC_NE, l_next);
+ }
+ emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
+ if (!irt_ispri(kt))
+ emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
+ }
+ emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
+ *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
+ (((char *)as->mcp-(char *)l_loop) & 0xffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ Reg tmphash = tmp1;
+ if (irref_isk(refkey))
+ tmphash = ra_allock(as, khash, allow);
+ emit_tab(as, PPCI_ADD, dest, dest, tmp1);
+ emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
+ emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
+ emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
+ if (irref_isk(refkey)) {
+ /* Nothing to do. */
+ } else if (irt_isstr(kt)) {
+ emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
+ emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
+ emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
+ emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
+ emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
+ if (irt_isnum(kt)) {
+ int32_t ofs = ra_spill(as, irkey);
+ emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
+ emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
+ emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
+ } else {
+ emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
+ emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ofs > 32736) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_tai(as, PPCI_ADDI, dest, node, ofs);
+ }
+ asm_guardcc(as, CC_NE);
+ if (!irt_ispri(irkey->t)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ }
+ rset_clear(allow, type);
+ if (irt_isnum(irkey->t)) {
+ emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
+ asm_guardcc(as, CC_NE);
+ emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ if (ra_hasreg(key)) {
+ emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
+ asm_guardcc(as, CC_NE);
+ }
+ emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
+ }
+ if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
+ emit_tai(as, PPCI_LWZ, type, idx, kofs);
+ if (ofs > 32736) {
+ emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
+ emit_tai(as, PPCI_ADDI, dest, node, ofs);
+ }
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ if (ir->r == RID_SINK)
+ return;
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
+ emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
+ emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_tai(as, PPCI_LWZ, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lua_assert(!ra_used(ir));
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ int32_t ofs = (int32_t)sizeof(GCstr);
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ checki16(ofs + IR(irr->op2)->i)) {
+ ofs += IR(irr->op2)->i;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tai(as, PPCI_ADDI, dest, dest, ofs);
+ emit_tab(as, PPCI_ADD, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ ofs += IR(refk)->i;
+ if (checki16(ofs))
+ emit_tai(as, PPCI_ADDI, dest, r, ofs);
+ else
+ emit_tab(as, PPCI_ADD, dest, r,
+ ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static PPCIns asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
+ case IRT_U8: return PPCI_LBZ;
+ case IRT_I16: return PPCI_LHA;
+ case IRT_U16: return PPCI_LHZ;
+ case IRT_NUM: return PPCI_LFD;
+ case IRT_FLOAT: return PPCI_LFS;
+ default: return PPCI_LWZ;
+ }
+}
+
+static PPCIns asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return PPCI_STB;
+ case IRT_I16: case IRT_U16: return PPCI_STH;
+ case IRT_NUM: return PPCI_STFD;
+ case IRT_FLOAT: return PPCI_STFS;
+ default: return PPCI_STW;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ PPCIns pi = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_tai(as, PPCI_ADDI, dest, idx, ofs);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ lua_assert(!irt_isi8(ir->t));
+ emit_tai(as, pi, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ PPCIns pi = asm_fxstoreins(ir);
+ emit_tai(as, pi, src, idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
+ if (irt_isi8(ir->t))
+ emit_as(as, PPCI_EXTSB, dest, dest);
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
+{
+ IRIns *irb;
+ if (ir->r == RID_SINK)
+ return;
+ if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
+ ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
+ /* Fuse BSWAP with XSTORE to stwbrx. */
+ Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
+ asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
+ } else {
+ Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src), ofs);
+ }
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = AHUREF_LSX;
+ if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ if (!irt_isnum(t)) ofs = 0;
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (irt_isnum(t)) {
+ Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
+ asm_guardcc(as, CC_GE);
+ emit_ab(as, PPCI_CMPLW, type, tisnum);
+ if (ra_hasreg(dest)) {
+ if (ofs == AHUREF_LSX) {
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
+ (idx&255)), (idx>>8)));
+ emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
+ } else {
+ emit_fai(as, PPCI_LFD, dest, idx, ofs);
+ }
+ }
+ } else {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
+ if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
+ }
+ if (ofs == AHUREF_LSX) {
+ emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
+ emit_slwi(as, tmp, (idx>>8), 3);
+ } else {
+ emit_tai(as, PPCI_LWZ, type, idx, ofs);
+ }
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = AHUREF_LSX;
+ if (ir->r == RID_SINK)
+ return;
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ } else {
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ ofs = 0;
+ }
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ rset_clear(allow, type);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (irt_isnum(ir->t)) {
+ if (ofs == AHUREF_LSX) {
+ emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
+ emit_slwi(as, RID_TMP, (idx>>8), 3);
+ } else {
+ emit_fai(as, PPCI_STFD, src, idx, ofs);
+ }
+ } else {
+ if (ra_hasreg(src))
+ emit_tai(as, PPCI_STW, src, idx, ofs+4);
+ if (ofs == AHUREF_LSX) {
+ emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
+ emit_slwi(as, RID_TMP, (idx>>8), 3);
+ } else {
+ emit_tai(as, PPCI_STW, type, idx, ofs);
+ }
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(LJ_DUALNUM ||
+ !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else if (ra_used(ir)) {
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ dest = ra_scratch(as, RSET_FPR);
+ emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, dest, dest);
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, allow);
+ Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
+ Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ emit_fab(as, PPCI_FSUB, dest, dest, fbias);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
+ rset_clear(allow, hibias));
+ emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+dotypecheck:
+ if (irt_isnum(t)) {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
+ asm_guardcc(as, CC_GE);
+ emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum);
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4);
+ } else {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
+ }
+ if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lua_assert(sz == 4 || sz == 8);
+ if (sz == 8) {
+ ofs += 4;
+ lua_assert((ir+1)->o == IR_HIOP);
+ }
+ for (;;) {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_tai(as, PPCI_STW, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir++;
+ }
+ }
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
+ emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
+ emit_ti(as, PPCI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg link = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ lua_assert(LJ_GC_BLACK == 0x04);
+ emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
+ emit_getgl(as, link, gc.grayagain);
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
+ emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
+ emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_tai(as, PPCI_LBZ, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (pi == PPCI_FMUL)
+ emit_fac(as, pi, dest, left, right);
+ else
+ emit_fab(as, pi, dest, left, right);
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_fb(as, pi, dest, left);
+}
+
+static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
+ IRRef args[2];
+ args[0] = irpp->op1;
+ args[1] = irp->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
+ asm_fparith(as, ir, PPCI_FADD);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ PPCIns pi;
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ pi = PPCI_ADDI;
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi = PPCI_ADDICDOT;
+ }
+ emit_tai(as, pi, dest, left, k);
+ return;
+ } else if ((k & 0xffff) == 0) {
+ emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
+ return;
+ } else if (!as->sectref) {
+ emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
+ emit_tai(as, PPCI_ADDI, dest, left, k);
+ return;
+ }
+ }
+ pi = PPCI_ADD;
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, left, right);
+ }
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
+ asm_fparith(as, ir, PPCI_FSUB);
+ } else {
+ PPCIns pi = PPCI_SUBF;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left, right;
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (checki16(k)) {
+ right = ra_alloc1(as, ir->op2, RSET_GPR);
+ emit_tai(as, PPCI_SUBFIC, dest, right, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
+ }
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, PPCI_FMUL);
+ } else {
+ PPCIns pi = PPCI_MULLW;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_MULLI, dest, left, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, left, right);
+ }
+}
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, PPCI_FNEG);
+ } else {
+ Reg dest, left;
+ PPCIns pi = PPCI_NEG;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_tab(as, pi, dest, left, 0);
+ }
+}
+
+static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest, left, right;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ }
+ asm_guardcc(as, CC_SO);
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
+ emit_tab(as, pi|PPCF_DOT, dest, left, right);
+}
+
+#if LJ_HASFFI
+static void asm_add64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ PPCIns pi = PPCI_ADDE;
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0)
+ pi = PPCI_ADDZE;
+ else if (k == -1)
+ pi = PPCI_ADDME;
+ else
+ goto needright;
+ right = 0;
+ } else {
+ needright:
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tab(as, pi, dest, left, right);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_ADDIC, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, PPCI_ADDC, dest, left, right);
+}
+
+static void asm_sub64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
+ PPCIns pi = PPCI_SUBFE;
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (k == 0)
+ pi = PPCI_SUBFZE;
+ else if (k == -1)
+ pi = PPCI_SUBFME;
+ else
+ goto needleft;
+ left = 0;
+ } else {
+ needleft:
+ left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
+ }
+ emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ right = ra_alloc1(as, ir->op2, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_SUBFIC, dest, right, k);
+ return;
+ }
+ }
+ left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
+ emit_tab(as, PPCI_SUBFC, dest, right, left);
+}
+
+static void asm_neg64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tab(as, PPCI_SUBFZE, dest, left, 0);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tai(as, PPCI_SUBFIC, dest, left, 0);
+}
+#endif
+
+static void asm_bitnot(ASMState *as, IRIns *ir)
+{
+ Reg dest, left, right;
+ PPCIns pi = PPCI_NOR;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ if (mayfuse(as, ir->op1)) {
+ IRIns *irl = IR(ir->op1);
+ if (irl->o == IR_BAND)
+ pi ^= (PPCI_NOR ^ PPCI_NAND);
+ else if (irl->o == IR_BXOR)
+ pi ^= (PPCI_NOR ^ PPCI_EQV);
+ else if (irl->o != IR_BOR)
+ goto nofuse;
+ left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
+ right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
+ } else {
+nofuse:
+ left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ }
+ emit_asb(as, pi, dest, left, right);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRIns *irx;
+ if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
+ ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
+ /* Fuse BSWAP with XLOAD to lwbrx. */
+ asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
+ } else {
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg tmp = dest;
+ if (tmp == left) {
+ tmp = RID_TMP;
+ emit_mr(as, dest, RID_TMP);
+ }
+ emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
+ emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
+ emit_rotlwi(as, tmp, left, 8);
+ }
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ Reg tmp = left;
+ if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
+ if (!checku16(k)) {
+ emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
+ if ((k & 0xffff) == 0) return;
+ }
+ emit_asi(as, pik, dest, left, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, pi, dest, left, right);
+}
+
+/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
+static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
+{
+ IRIns *ir;
+ Reg left;
+ if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
+ irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
+ int32_t sh = (IR(ir->op2)->i & 31);
+ switch (ir->o) {
+ case IR_BSHL:
+ if ((mask & ((1u<<sh)-1))) goto nofuse;
+ break;
+ case IR_BSHR:
+ if ((mask & ~((~0u)>>sh))) goto nofuse;
+ sh = ((32-sh)&31);
+ break;
+ case IR_BROL:
+ break;
+ default:
+ goto nofuse;
+ }
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
+ return;
+ }
+nofuse:
+ left = ra_alloc1(as, ref, RSET_GPR);
+ *--as->mcp = pi | PPCF_T(left);
+}
+
+static void asm_bitand(ASMState *as, IRIns *ir)
+{
+ Reg dest, left, right;
+ IRRef lref = ir->op1;
+ PPCIns dot = 0;
+ IRRef op2;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ dot = PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k) {
+ /* First check for a contiguous bitmask as used by rlwinm. */
+ uint32_t s1 = lj_ffs((uint32_t)k);
+ uint32_t k1 = ((uint32_t)k >> s1);
+ if ((k1 & (k1+1)) == 0) {
+ asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
+ PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
+ k, lref);
+ return;
+ }
+ if (~(uint32_t)k) {
+ uint32_t s2 = lj_ffs(~(uint32_t)k);
+ uint32_t k2 = (~(uint32_t)k >> s2);
+ if ((k2 & (k2+1)) == 0) {
+ asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
+ PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
+ k, lref);
+ return;
+ }
+ }
+ }
+ if (checku16(k)) {
+ left = ra_alloc1(as, lref, RSET_GPR);
+ emit_asi(as, PPCI_ANDIDOT, dest, left, k);
+ return;
+ } else if ((k & 0xffff) == 0) {
+ left = ra_alloc1(as, lref, RSET_GPR);
+ emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
+ return;
+ }
+ }
+ op2 = ir->op2;
+ if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
+ dot ^= (PPCI_AND ^ PPCI_ANDC);
+ op2 = IR(op2)->op1;
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, PPCI_AND ^ dot, dest, left, right);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
+{
+ Reg dest, left;
+ Reg dot = 0;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ dot = PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ int32_t shift = (IR(ir->op2)->i & 31);
+ if (pik == 0) /* SLWI */
+ emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
+ else if (pik == 1) /* SRWI */
+ emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
+ else
+ emit_asb(as, pik|dot, dest, left, shift);
+ } else {
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, pi|dot, dest, left, right);
+ }
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
+{
+ if (irt_isnum(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg tmp = dest;
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (tmp == left || tmp == right)
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
+ dest), left), right));
+ emit_facb(as, PPCI_FSEL, dest, tmp,
+ ismax ? left : right, ismax ? right : left);
+ emit_fab(as, PPCI_FSUB, tmp, left, right);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp1 = RID_TMP, tmp2 = dest;
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (tmp2 == left || tmp2 == right)
+ tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
+ dest), left), right));
+ emit_tab(as, PPCI_ADD, dest, tmp2, right);
+ emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
+ emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
+ emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
+ emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
+ emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
+ }
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
+#define CC_TWO 0x80 /* Check two flags for FP comparison. */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op int cc FP cc */
+ /* LT */ CC_GE + (CC_GE<<4),
+ /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
+ /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
+ /* GT */ CC_LE + (CC_LE<<4),
+ /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
+ /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
+ /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
+ /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
+ /* EQ */ CC_NE + (CC_NE<<4),
+ /* NE */ CC_EQ + (CC_EQ<<4),
+ /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
+};
+
+static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
+{
+ Reg right, left = ra_alloc1(as, lref, RSET_GPR);
+ if (irref_isk(rref)) {
+ int32_t k = IR(rref)->i;
+ if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
+ if (checki16(k)) {
+ emit_tai(as, PPCI_CMPWI, cr, left, k);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (k == 0 && lref == as->curins-1)
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+ return;
+ } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
+ if (checku16(k)) {
+ emit_tai(as, PPCI_CMPLWI, cr, left, k);
+ return;
+ } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
+ emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
+ emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
+ return;
+ }
+ }
+ } else { /* Unsigned comparison with constant. */
+ if (checku16(k)) {
+ emit_tai(as, PPCI_CMPLWI, cr, left, k);
+ return;
+ }
+ }
+ }
+ right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
+ emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
+}
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ PPCCC cc = asm_compmap[ir->o];
+ if (irt_isnum(ir->t)) {
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ asm_guardcc(as, (cc >> 4));
+ if ((cc & CC_TWO))
+ emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
+ emit_fab(as, PPCI_FCMPU, 0, left, right);
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ if (irref_isk(lref) && !irref_isk(rref)) {
+ /* Swap constants to the right (only for ABC). */
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
+ }
+ asm_guardcc(as, cc);
+ asm_intcomp_(as, lref, rref, 0, cc);
+ }
+}
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_comp64(ASMState *as, IRIns *ir)
+{
+ PPCCC cc = asm_compmap[(ir-1)->o];
+ if ((cc&3) == (CC_EQ&3)) {
+ asm_guardcc(as, cc);
+ emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
+ (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
+ } else {
+ asm_guardcc(as, CC_EQ);
+ emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
+ emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
+ (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
+ }
+ /* Loword comparison sets cr1 and is unsigned, except for equality. */
+ asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
+ cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
+ /* Hiword comparison sets cr0. */
+ asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
+ as->flagmcp = NULL; /* Doesn't work here. */
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_HASFFI
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+ asm_comp64(as, ir);
+ return;
+ } else if ((ir-1)->o == IR_XSTORE) {
+ as->curins--; /* Handle both stores here. */
+ if ((ir-1)->r != RID_SINK) {
+ asm_xstore(as, ir, 0);
+ asm_xstore(as, ir-1, 4);
+ }
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_ADD: as->curins--; asm_add64(as, ir); break;
+ case IR_SUB: as->curins--; asm_sub64(as, ir); break;
+ case IR_NEG: as->curins--; asm_neg64(as, ir); break;
+ case IR_CALLN:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
+ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
+ rset_clear(allow, pbase);
+ tmp = allow ? rset_pickbot(allow) :
+ (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
+ emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
+ else
+ ra_modified(as, tmp);
+ emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
+ emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
+ emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
+ if (pbase == RID_TMP)
+ emit_getgl(as, RID_TMP, jit_base);
+ emit_getgl(as, tmp, jit_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
+ } else {
+ Reg type;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), allow);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ }
+ emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ tmp = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
+ emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
+ emit_getgl(as, tmp, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the cond branch and patched the final b. */
+ p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
+ } else {
+ p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_mr(as, r, RID_BASE);
+ }
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ *--p = PPCI_NOP;
+ *--p = PPCI_NOP;
+ as->mctop = p;
+ } else {
+ /* Patch stack adjustment. */
+ lua_assert(checki16(CFRAME_SIZE+spadj));
+ p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
+ p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-2; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+ case IR_GCSTEP: asm_gcstep(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_EQ: case IR_NE:
+ if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
+ as->curins--;
+ asm_href(as, ir-1, (IROp)ir->o);
+ break;
+ }
+ /* fallthrough */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+ asm_comp(as, ir);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bitnot(as, ir); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_bitand(as, ir); break;
+ case IR_BOR: asm_bitop(as, ir, PPCI_OR, PPCI_ORI); break;
+ case IR_BXOR: asm_bitop(as, ir, PPCI_XOR, PPCI_XORI); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, PPCI_SLW, 0); break;
+ case IR_BSHR: asm_bitshift(as, ir, PPCI_SRW, 1); break;
+ case IR_BSAR: asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI); break;
+ case IR_BROL: asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31),
+ PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)); break;
+ case IR_BROR: lua_assert(0); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB: asm_sub(as, ir); break;
+ case IR_MUL: asm_mul(as, ir); break;
+ case IR_DIV: asm_fparith(as, ir, PPCI_FDIV); break;
+ case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
+ case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
+ case IR_NEG: asm_neg(as, ir); break;
+
+ case IR_ABS: asm_fpunary(as, ir, PPCI_FABS); break;
+ case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
+ case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
+ case IR_MIN: asm_min_max(as, ir, 0); break;
+ case IR_MAX: asm_min_max(as, ir, 1); break;
+ case IR_FPMATH:
+ if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
+ break;
+ if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
+ asm_fpunary(as, ir, PPCI_FSQRT);
+ else
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+
+ /* Overflow-checking arithmetic ops. */
+ case IR_ADDOV: asm_arithov(as, ir, PPCI_ADDO); break;
+ case IR_SUBOV: asm_arithov(as, ir, PPCI_SUBFO); break;
+ case IR_MULOV: asm_arithov(as, ir, PPCI_MULLWO); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir, 0); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir, 0); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOBIT: asm_tobit(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = (int)CCI_NARGS(ci);
+ int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ MCode *cstart = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ int clearso = 0;
+ for (; p < pe; p++) {
+ /* Look for exitstub branch, try to replace with branch to target. */
+ uint32_t ins = *p;
+ if ((ins & 0xfc000000u) == 0x40000000u &&
+ ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if (((ins >> 16) & 3) == (CC_SO&3)) {
+ clearso = sizeof(MCode);
+ delta -= sizeof(MCode);
+ }
+ /* Many, but not all short-range branches can be patched directly. */
+ if (((delta + 0x8000) >> 16) == 0) {
+ *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
+ ((delta & 0x8000) * (PPCF_Y/0x8000));
+ if (!cstart) cstart = p;
+ }
+ } else if ((ins & 0xfc000000u) == PPCI_B &&
+ ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
+ ptrdiff_t delta = (char *)target - (char *)p;
+ lua_assert(((delta + 0x02000000) >> 26) == 0);
+ *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
+ if (!cstart) cstart = p;
+ }
+ }
+ { /* Always patch long-range branch in exit stub itself. */
+ ptrdiff_t delta = (char *)target - (char *)px - clearso;
+ lua_assert(((delta + 0x02000000) >> 26) == 0);
+ *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
+ }
+ if (!cstart) cstart = px;
+ lj_mcode_sync(cstart, px+1);
+ if (clearso) { /* Extend the current trace. Ugly workaround. */
+ MCode *pp = J->cur.mcode;
+ J->cur.szmcode += sizeof(MCode);
+ *--pp = PPCI_MCRXR; /* Clear SO flag. */
+ J->cur.mcode = pp;
+ lj_mcode_sync(pp, pp+1);
+ }
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/3rdparty/lua/src/lj_asm_x86.h b/3rdparty/lua/src/lj_asm_x86.h
index e6747b8..64441cc 100644
--- a/3rdparty/lua/src/lj_asm_x86.h
+++ b/3rdparty/lua/src/lj_asm_x86.h
@@ -1,2806 +1,2793 @@
-/*
-** x86/x64 IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Generate an exit stub group at the bottom of the reserved MCode memory. */
-static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
-{
- ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
- MCode *mxp = as->mcbot;
- MCode *mxpstart = mxp;
- if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
- asm_mclimit(as);
- /* Push low byte of exitno for each exit stub. */
- *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
- for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
- *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
- *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
- }
- /* Push the high byte of the exitno for each exit stub group. */
- *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
- /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
- *mxp++ = XI_MOVmi;
- *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
- *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- *mxp++ = 2*sizeof(void *);
- *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
- /* Jump to exit handler which fills in the ExitState. */
- *mxp++ = XI_JMP; mxp += 4;
- *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler);
- /* Commit the code for this group (even if assembly fails later on). */
- lj_mcode_commitbot(as->J, mxp);
- as->mcbot = mxp;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- return mxpstart;
-}
-
-/* Setup all needed exit stubs. */
-static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
-{
- ExitNo i;
- if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
- lj_trace_err(as->J, LJ_TRERR_SNAPOV);
- for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
- if (as->J->exitstubgroup[i] == NULL)
- as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
-}
-
-/* Emit conditional branch to exit for guard.
-** It's important to emit this *after* all registers have been allocated,
-** because rematerializations may invalidate the flags.
-*/
-static void asm_guardcc(ASMState *as, int cc)
-{
- MCode *target = exitstub_addr(as->J, as->snapno);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->loopinv = 1;
- *(int32_t *)(p+1) = jmprel(p+5, target);
- target = p;
- cc ^= 1;
- if (as->realign) {
- emit_sjcc(as, cc, target);
- return;
- }
- }
- emit_jcc(as, cc, target);
-}
-
-/* -- Memory operand fusion ----------------------------------------------- */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if a reference is a signed 32 bit constant. */
-static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
-{
- if (irref_isk(ref)) {
- IRIns *ir = IR(ref);
- if (ir->o != IR_KINT64) {
- *k = ir->i;
- return 1;
- } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
- *k = (int32_t)ir_kint64(ir)->u64;
- return 1;
- }
- }
- return 0;
-}
-
-/* Check if there's no conflicting instruction between curins and ref.
-** Also avoid fusing loads if there are multiple references.
-*/
-static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref) {
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
- return 0;
- }
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse array base into memory operand. */
-static IRRef asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *irb = IR(ref);
- as->mrm.ofs = 0;
- if (irb->o == IR_FLOAD) {
- IRIns *ira = IR(irb->op1);
- lua_assert(irb->op2 == IRFL_TAB_ARRAY);
- /* We can avoid the FLOAD of t->array for colocated arrays. */
- if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
- as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
- return irb->op1; /* Table obj. */
- }
- } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
- /* Fuse base offset (vararg load). */
- as->mrm.ofs = IR(irb->op2)->i;
- return irb->op1;
- }
- return ref; /* Otherwise use the given array base. */
-}
-
-/* Fuse array reference into memory operand. */
-static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irx;
- lua_assert(ir->o == IR_AREF);
- as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
- irx = IR(ir->op2);
- if (irref_isk(ir->op2)) {
- as->mrm.ofs += 8*irx->i;
- as->mrm.idx = RID_NONE;
- } else {
- rset_clear(allow, as->mrm.base);
- as->mrm.scale = XM_SCALE8;
- /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
- ** Doesn't help much without ABCelim, but reduces register pressure.
- */
- if (!LJ_64 && /* Has bad effects with negative index on x64. */
- mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
- irx->o == IR_ADD && irref_isk(irx->op2)) {
- as->mrm.ofs += 8*IR(irx->op2)->i;
- as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
- } else {
- as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
- }
- }
-}
-
-/* Fuse array/hash/upvalue reference into memory operand.
-** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
-** pass the final allow mask, excluding any GPRs used for other inputs.
-** In particular: 2-operand GPR instructions need to call ra_dest() first!
-*/
-static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- switch ((IROp)ir->o) {
- case IR_AREF:
- if (mayfuse(as, ref)) {
- asm_fusearef(as, ir, allow);
- return;
- }
- break;
- case IR_HREFK:
- if (mayfuse(as, ref)) {
- as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
- as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- as->mrm.idx = RID_NONE;
- return;
- }
- break;
- case IR_UREFC:
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
- as->mrm.ofs = ptr2addr(&uv->tv);
- as->mrm.base = as->mrm.idx = RID_NONE;
- return;
- }
- break;
- default:
- lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
- ir->o == IR_KKPTR);
- break;
- }
- }
- as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
- as->mrm.ofs = 0;
- as->mrm.idx = RID_NONE;
-}
-
-/* Fuse FLOAD/FREF reference into memory operand. */
-static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
-{
- lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF);
- as->mrm.ofs = field_ofs[ir->op2];
- as->mrm.idx = RID_NONE;
- if (irref_isk(ir->op1)) {
- as->mrm.ofs += IR(ir->op1)->i;
- as->mrm.base = RID_NONE;
- } else {
- as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
- }
-}
-
-/* Fuse string reference into memory operand. */
-static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irr;
- lua_assert(ir->o == IR_STRREF);
- as->mrm.base = as->mrm.idx = RID_NONE;
- as->mrm.scale = XM_SCALE1;
- as->mrm.ofs = sizeof(GCstr);
- if (irref_isk(ir->op1)) {
- as->mrm.ofs += IR(ir->op1)->i;
- } else {
- Reg r = ra_alloc1(as, ir->op1, allow);
- rset_clear(allow, r);
- as->mrm.base = (uint8_t)r;
- }
- irr = IR(ir->op2);
- if (irref_isk(ir->op2)) {
- as->mrm.ofs += irr->i;
- } else {
- Reg r;
- /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
- if (!LJ_64 && /* Has bad effects with negative index on x64. */
- mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
- as->mrm.ofs += IR(irr->op2)->i;
- r = ra_alloc1(as, irr->op1, allow);
- } else {
- r = ra_alloc1(as, ir->op2, allow);
- }
- if (as->mrm.base == RID_NONE)
- as->mrm.base = (uint8_t)r;
- else
- as->mrm.idx = (uint8_t)r;
- }
-}
-
-static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- as->mrm.idx = RID_NONE;
- if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
- as->mrm.ofs = ir->i;
- as->mrm.base = RID_NONE;
- } else if (ir->o == IR_STRREF) {
- asm_fusestrref(as, ir, allow);
- } else {
- as->mrm.ofs = 0;
- if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
- /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
- IRIns *irx;
- IRRef idx;
- Reg r;
- if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
- ref = ir->op1;
- ir = IR(ref);
- if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
- goto noadd;
- }
- as->mrm.scale = XM_SCALE1;
- idx = ir->op1;
- ref = ir->op2;
- irx = IR(idx);
- if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
- idx = ir->op2;
- ref = ir->op1;
- irx = IR(idx);
- }
- if (canfuse(as, irx) && ra_noreg(irx->r)) {
- if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
- /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
- idx = irx->op1;
- as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
- } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
- /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
- idx = irx->op1;
- as->mrm.scale = XM_SCALE2;
- }
- }
- r = ra_alloc1(as, idx, allow);
- rset_clear(allow, r);
- as->mrm.idx = (uint8_t)r;
- }
- noadd:
- as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
- }
-}
-
-/* Fuse load into memory operand. */
-static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_hasreg(ir->r)) {
- if (allow != RSET_EMPTY) { /* Fast path. */
- ra_noweak(as, ir->r);
- return ir->r;
- }
- fusespill:
- /* Force a spill if only memory operands are allowed (asm_x87load). */
- as->mrm.base = RID_ESP;
- as->mrm.ofs = ra_spill(as, ir);
- as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- if (ir->o == IR_KNUM) {
- RegSet avail = as->freeset & ~as->modset & RSET_FPR;
- lua_assert(allow != RSET_EMPTY);
- if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
- as->mrm.ofs = ptr2addr(ir_knum(ir));
- as->mrm.base = as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- } else if (ir->o == IR_KINT64) {
- RegSet avail = as->freeset & ~as->modset & RSET_GPR;
- lua_assert(allow != RSET_EMPTY);
- if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
- as->mrm.ofs = ptr2addr(ir_kint64(ir));
- as->mrm.base = as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- } else if (mayfuse(as, ref)) {
- RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
- if (ir->o == IR_SLOAD) {
- if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
- noconflict(as, ref, IR_RETF, 0)) {
- as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
- as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0);
- as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- } else if (ir->o == IR_FLOAD) {
- /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
- if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
- noconflict(as, ref, IR_FSTORE, 0)) {
- asm_fusefref(as, ir, xallow);
- return RID_MRM;
- }
- } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
- if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) {
- asm_fuseahuref(as, ir->op1, xallow);
- return RID_MRM;
- }
- } else if (ir->o == IR_XLOAD) {
- /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
- ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
- */
- if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
- noconflict(as, ref, IR_XSTORE, 0)) {
- asm_fusexref(as, ir->op1, xallow);
- return RID_MRM;
- }
- } else if (ir->o == IR_VLOAD) {
- asm_fuseahuref(as, ir->op1, xallow);
- return RID_MRM;
- }
- }
- if (!(as->freeset & allow) && !irref_isk(ref) &&
- (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
- goto fusespill;
- return ra_allocref(as, ref, allow);
-}
-
-#if LJ_64
-/* Don't fuse a 32 bit load into a 64 bit operation. */
-static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
-{
- if (is64 && !irt_is64(IR(ref)->t))
- return ra_alloc1(as, ref, allow);
- return asm_fuseload(as, ref, allow);
-}
-#else
-#define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
-#endif
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Count the required number of stack slots for a call. */
-static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t i, nargs = CCI_NARGS(ci);
- int nslots = 0;
-#if LJ_64
- if (LJ_ABI_WIN) {
- nslots = (int)(nargs*2); /* Only matters for more than four args. */
- } else {
- int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
- for (i = 0; i < nargs; i++)
- if (args[i] && irt_isfp(IR(args[i])->t)) {
- if (nfpr > 0) nfpr--; else nslots += 2;
- } else {
- if (ngpr > 0) ngpr--; else nslots += 2;
- }
- }
-#else
- int ngpr = 0;
- if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
- ngpr = 2;
- else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
- ngpr = 1;
- for (i = 0; i < nargs; i++)
- if (args[i] && irt_isfp(IR(args[i])->t)) {
- nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
-#endif
- return nslots;
-}
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = STACKARG_OFS;
-#if LJ_64
- uint32_t gprs = REGARG_GPRS;
- Reg fpr = REGARG_FIRSTFPR;
-#if !LJ_ABI_WIN
- MCode *patchnfpr = NULL;
-#endif
-#else
- uint32_t gprs = 0;
- if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
- if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
- gprs = (REGARG_GPRS & 31);
- else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
- gprs = REGARG_GPRS;
- }
-#endif
- if ((void *)ci->func)
- emit_call(as, ci->func);
-#if LJ_64
- if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
-#if LJ_ABI_WIN
- for (n = 0; n < 4 && n < nargs; n++) {
- IRIns *ir = IR(args[n]);
- if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
- emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
- ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
- }
-#else
- patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
- *--as->mcp = XI_MOVrib | RID_EAX;
-#endif
- }
-#endif
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- IRIns *ir = IR(ref);
- Reg r;
-#if LJ_64 && LJ_ABI_WIN
- /* Windows/x64 argument registers are strictly positional. */
- r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
- fpr++; gprs >>= 5;
-#elif LJ_64
- /* POSIX/x64 argument registers are used in order of appearance. */
- if (irt_isfp(ir->t)) {
- r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
- } else {
- r = gprs & 31; gprs >>= 5;
- }
-#else
- if (ref && irt_isfp(ir->t)) {
- r = 0;
- } else {
- r = gprs & 31; gprs >>= 5;
- if (!ref) continue;
- }
-#endif
- if (r) { /* Argument is in a register. */
- if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
-#if LJ_64
- if (ir->o == IR_KINT64)
- emit_loadu64(as, r, ir_kint64(ir)->u64);
- else
-#endif
- emit_loadi(as, r, ir->i);
- } else {
- lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */
- if (ra_hasreg(ir->r)) {
- ra_noweak(as, ir->r);
- emit_movrr(as, ir, r, ir->r);
- } else {
- ra_allocref(as, ref, RID2RSET(r));
- }
- }
- } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
- lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */
- if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
- /* Split stores for unaligned FP consts. */
- emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
- emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
- } else {
- r = ra_alloc1(as, ref, RSET_FPR);
- emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
- r, RID_ESP, ofs);
- }
- ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
- } else { /* Non-FP argument is on stack. */
- if (LJ_32 && ref < ASMREF_TMP1) {
- emit_movmroi(as, RID_ESP, ofs, ir->i);
- } else {
- r = ra_alloc1(as, ref, RSET_GPR);
- emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
- }
- ofs += sizeof(intptr_t);
- }
- checkmclim(as);
- }
-#if LJ_64 && !LJ_ABI_WIN
- if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
-#endif
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = (LJ_32 && (ir+1)->o == IR_HIOP);
- if ((ci->flags & CCI_NOFPRCLOBBER))
- drop &= ~RSET_FPR;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- if (irt_isfp(ir->t)) {
- int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
-#if LJ_64
- if ((ci->flags & CCI_CASTU64)) {
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
- }
- if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
-#else
- /* Number result is in x87 st0 for x86 calling convention. */
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
- dest, RID_ESP, ofs);
- }
- if ((ci->flags & CCI_CASTU64)) {
- emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
- emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
- } else {
- emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
- irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
- }
-#endif
-#if LJ_32
- } else if (hiop) {
- ra_destpair(as, ir);
-#endif
- } else {
- lua_assert(!irt_ispri(ir->t));
- ra_destreg(as, ir, RID_RET);
- }
- } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
- emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
- }
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-/* Return a constant function pointer or NULL for indirect calls. */
-static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
-{
-#if LJ_32
- UNUSED(as);
- if (irref_isk(func))
- return (void *)irf->i;
-#else
- if (irref_isk(func)) {
- MCode *p;
- if (irf->o == IR_KINT64)
- p = (MCode *)(void *)ir_k64(irf)->u64;
- else
- p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
- if (p - as->mcp == (int32_t)(p - as->mcp))
- return p; /* Call target is still in +-2GB range. */
- /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
- }
-#endif
- return NULL;
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- int32_t spadj = 0;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
-#if LJ_32
- /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
- if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
- spadj = 4 * asm_count_call_slots(as, &ci, args);
-#endif
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- ci.func = (ASMFunction)asm_callx_func(as, irf, func);
- if (!(void *)ci.func) {
- /* Use a (hoistable) non-scratch register for indirect calls. */
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- Reg r = ra_alloc1(as, func, allow);
- if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
- emit_rr(as, XO_GROUP5, XOg_CALL, r);
- } else if (LJ_32) {
- emit_spsub(as, spadj);
- }
- asm_gencall(as, &ci, args);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guardcc(as, CC_NE);
- emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_guardcc(as, CC_P);
- asm_guardcc(as, CC_NE);
- emit_rr(as, XO_UCOMISD, left, tmp);
- emit_rr(as, XO_CVTSI2SD, tmp, dest);
- if (!(as->flags & JIT_F_SPLIT_XMM))
- emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
- emit_rr(as, XO_CVTTSD2SI, dest, left);
- /* Can't fuse since left is needed twice. */
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg tmp = ra_noreg(IR(ir->op1)->r) ?
- ra_alloc1(as, ir->op1, RSET_FPR) :
- ra_scratch(as, RSET_FPR);
- Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
- emit_rr(as, XO_MOVDto, tmp, dest);
- emit_mrm(as, XO_ADDSD, tmp, right);
- ra_left(as, tmp, ir->op1);
-}
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
- IRRef lref = ir->op1;
- lua_assert(irt_type(ir->t) != st);
- lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- Reg left = asm_fuseload(as, lref, RSET_FPR);
- emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
- if (left == dest) return; /* Avoid the XO_XORPS. */
- } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
- /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
- cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000));
- Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- if (irt_isfloat(ir->t))
- emit_rr(as, XO_CVTSD2SS, dest, dest);
- emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
- emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
- emit_loadn(as, bias, k);
- emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
- return;
- } else { /* Integer to FP conversion. */
- Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
- ra_alloc1(as, lref, RSET_GPR) :
- asm_fuseloadm(as, lref, RSET_GPR, st64);
- if (LJ_64 && st == IRT_U64) {
- MCLabel l_end = emit_label(as);
- const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000));
- emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
- emit_sjcc(as, CC_NS, l_end);
- emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
- }
- emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
- dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
- }
- if (!(as->flags & JIT_F_SPLIT_XMM))
- emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- x86Op op = st == IRT_NUM ?
- ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) :
- ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI);
- if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
- /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
- /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
- Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
- ra_scratch(as, RSET_FPR);
- MCLabel l_end = emit_label(as);
- if (LJ_32)
- emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
- emit_rr(as, op, dest|REX_64, tmp);
- if (st == IRT_NUM)
- emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J,
- LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000)));
- else
- emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J,
- LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000)));
- emit_sjcc(as, CC_NS, l_end);
- emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
- emit_rr(as, op, dest|REX_64, tmp);
- ra_left(as, tmp, lref);
- } else {
- Reg left = asm_fuseload(as, lref, RSET_FPR);
- if (LJ_64 && irt_isu32(ir->t))
- emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
- emit_mrm(as, op,
- dest|((LJ_64 &&
- (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
- left);
- }
- }
- } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left, dest = ra_dest(as, ir, RSET_GPR);
- RegSet allow = RSET_GPR;
- x86Op op;
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if (st == IRT_I8) {
- op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
- } else if (st == IRT_U8) {
- op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
- } else if (st == IRT_I16) {
- op = XO_MOVSXw;
- } else {
- op = XO_MOVZXw;
- }
- left = asm_fuseload(as, lref, allow);
- /* Add extra MOV if source is already in wrong register. */
- if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
- Reg tmp = ra_scratch(as, allow);
- emit_rr(as, op, dest, tmp);
- emit_rr(as, XO_MOV, tmp, left);
- } else {
- emit_mrm(as, op, dest, left);
- }
- } else { /* 32/64 bit integer conversions. */
- if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
- } else if (irt_is64(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st64 || !(ir->op2 & IRCONV_SEXT)) {
- /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
- ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
- } else { /* 32 to 64 bit sign extension. */
- Reg left = asm_fuseload(as, lref, RSET_GPR);
- emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
- }
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st64) {
- Reg left = asm_fuseload(as, lref, RSET_GPR);
- /* This is either a 32 bit reg/reg mov which zeroes the hiword
- ** or a load of the loword from a 64 bit address.
- */
- emit_mrm(as, XO_MOV, dest, left);
- } else { /* 32/32 bit no-op (cast). */
- ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
- }
-}
-
-#if LJ_32 && LJ_HASFFI
-/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
-
-/* 64 bit integer to FP conversion in 32 bit mode. */
-static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
-{
- Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
- int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
- dest, RID_ESP, ofs);
- }
- emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
- irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
- if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
- /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
- MCLabel l_end = emit_label(as);
- emit_rma(as, XO_FADDq, XOg_FADDq,
- lj_ir_k64_find(as->J, U64x(43f00000,00000000)));
- emit_sjcc(as, CC_NS, l_end);
- emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
- } else {
- lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64);
- }
- emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
- /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
- emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
- emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
-}
-
-/* FP to 64 bit integer conversion in 32 bit mode. */
-static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- Reg lo, hi;
- lua_assert(st == IRT_NUM || st == IRT_FLOAT);
- lua_assert(dt == IRT_I64 || dt == IRT_U64);
- lua_assert(((ir-1)->op2 & IRCONV_TRUNC));
- hi = ra_dest(as, ir, RSET_GPR);
- lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
- if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
- /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
- if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
- emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
- emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
- emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
- }
- if (dt == IRT_U64) {
- /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
- MCLabel l_pop, l_end = emit_label(as);
- emit_x87op(as, XI_FPOP);
- l_pop = emit_label(as);
- emit_sjmp(as, l_end);
- emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
- if ((as->flags & JIT_F_SSE3))
- emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
- else
- emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
- emit_rma(as, XO_FADDq, XOg_FADDq,
- lj_ir_k64_find(as->J, U64x(c3f00000,00000000)));
- emit_sjcc(as, CC_NS, l_pop);
- emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
- }
- emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
- if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
- emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
- } else { /* Otherwise set FPU rounding mode to truncate before the store. */
- emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
- emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
- emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
- emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
- emit_loadi(as, lo, 0xc00);
- emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
- }
- if (dt == IRT_U64)
- emit_x87op(as, XI_FDUP);
- emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
- st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
- asm_fuseload(as, ir->op1, RSET_EMPTY));
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- /* Force a spill slot for the destination register (if any). */
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- RegSet drop = RSET_SCRATCH;
- if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
- rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
- ra_evictset(as, drop);
- asm_guardcc(as, CC_E);
- emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- /* Store the result to the spill slot or temp slots. */
- emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
- RID_ESP, sps_scale(ir->s));
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRIns *irl = IR(ir->op1);
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(irl->t)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
- RID_ESP, ra_spill(as, irl));
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_fusearef(as, ir, RSET_GPR);
- if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
- emit_mrm(as, XO_LEA, dest, RID_MRM);
- else if (as->mrm.base != dest)
- emit_rr(as, XO_MOV, dest, as->mrm.base);
-}
-
-/* Merge NE(HREF, niltv) check. */
-static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
-{
- /* Assumes nothing else generates NE of HREF. */
- if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins &&
- ra_hasreg(ir->r)) {
- MCode *p = as->mcp;
- p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6;
- /* Ensure no loop branch inversion happened. */
- if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) {
- as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */
- return p + *(int32_t *)(p-4); /* Return exit address. */
- }
- }
- return NULL;
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir)
-{
- MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */
- RegSet allow = RSET_GPR;
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = RID_NONE, tmp = RID_NONE;
- IRIns *irkey = IR(ir->op2);
- int isk = irref_isk(ir->op2);
- IRType1 kt = irkey->t;
- uint32_t khash;
- MCLabel l_end, l_loop, l_next;
-
- if (!isk) {
- rset_clear(allow, tab);
- key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
- if (!irt_isstr(kt))
- tmp = ra_scratch(as, rset_exclude(allow, key));
- }
-
- /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
- l_end = emit_label(as);
- if (nilexit && ir[1].o == IR_NE) {
- emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */
- nilexit = NULL;
- } else {
- emit_loada(as, dest, niltvg(J2G(as->J)));
- }
-
- /* Follow hash chain until the end. */
- l_loop = emit_sjcc_label(as, CC_NZ);
- emit_rr(as, XO_TEST, dest, dest);
- emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next));
- l_next = emit_label(as);
-
- /* Type and value comparison. */
- if (nilexit)
- emit_jcc(as, CC_E, nilexit);
- else
- emit_sjcc(as, CC_E, l_end);
- if (irt_isnum(kt)) {
- if (isk) {
- /* Assumes -0.0 is already canonicalized to +0.0. */
- emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
- (int32_t)ir_knum(irkey)->u32.lo);
- emit_sjcc(as, CC_NE, l_next);
- emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
- (int32_t)ir_knum(irkey)->u32.hi);
- } else {
- emit_sjcc(as, CC_P, l_next);
- emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
- emit_sjcc(as, CC_AE, l_next);
- /* The type check avoids NaN penalties and complaints from Valgrind. */
-#if LJ_64
- emit_u32(as, LJ_TISNUM);
- emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
-#else
- emit_i8(as, LJ_TISNUM);
- emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
-#endif
- }
-#if LJ_64
- } else if (irt_islightud(kt)) {
- emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
-#endif
- } else {
- if (!irt_ispri(kt)) {
- lua_assert(irt_isaddr(kt));
- if (isk)
- emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
- ptr2addr(ir_kgc(irkey)));
- else
- emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
- emit_sjcc(as, CC_NE, l_next);
- }
- lua_assert(!irt_isnil(kt));
- emit_i8(as, irt_toitype(kt));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
- }
- emit_sfixup(as, l_loop);
- checkmclim(as);
-
- /* Load main position relative to tab->node into dest. */
- khash = isk ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node));
- } else {
- emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node));
- if ((as->flags & JIT_F_PREFER_IMUL)) {
- emit_i8(as, sizeof(Node));
- emit_rr(as, XO_IMULi8, dest, dest);
- } else {
- emit_shifti(as, XOg_SHL, dest, 3);
- emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
- }
- if (isk) {
- emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
- emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
- } else if (irt_isstr(kt)) {
- emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash));
- emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
- } else { /* Must match with hashrot() in lj_tab.c. */
- emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
- emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
- emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
- emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
- emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
- emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
- emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
- emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
- if (irt_isnum(kt)) {
- emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
-#if LJ_64
- emit_shifti(as, XOg_SHR|REX_64, dest, 32);
- emit_rr(as, XO_MOV, tmp, dest);
- emit_rr(as, XO_MOVDto, key|REX_64, dest);
-#else
- emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
- emit_rr(as, XO_MOVDto, key, tmp);
-#endif
- } else {
- emit_rr(as, XO_MOV, tmp, key);
- emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
-#if !LJ_64
- MCLabel l_exit;
-#endif
- lua_assert(ofs % sizeof(Node) == 0);
- if (ra_hasreg(dest)) {
- if (ofs != 0) {
- if (dest == node && !(as->flags & JIT_F_LEA_AGU))
- emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs);
- else
- emit_rmro(as, XO_LEA, dest, node, ofs);
- } else if (dest != node) {
- emit_rr(as, XO_MOV, dest, node);
- }
- }
- asm_guardcc(as, CC_NE);
-#if LJ_64
- if (!irt_ispri(irkey->t)) {
- Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
- emit_rmro(as, XO_CMP, key|REX_64, node,
- ofs + (int32_t)offsetof(Node, key.u64));
- lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t));
- /* Assumes -0.0 is already canonicalized to +0.0. */
- emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
- ((uint64_t)irt_toitype(irkey->t) << 32) |
- (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
- } else {
- lua_assert(!irt_isnil(irkey->t));
- emit_i8(as, irt_toitype(irkey->t));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
- ofs + (int32_t)offsetof(Node, key.it));
- }
-#else
- l_exit = emit_label(as);
- if (irt_isnum(irkey->t)) {
- /* Assumes -0.0 is already canonicalized to +0.0. */
- emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
- ofs + (int32_t)offsetof(Node, key.u32.lo),
- (int32_t)ir_knum(irkey)->u32.lo);
- emit_sjcc(as, CC_NE, l_exit);
- emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
- ofs + (int32_t)offsetof(Node, key.u32.hi),
- (int32_t)ir_knum(irkey)->u32.hi);
- } else {
- if (!irt_ispri(irkey->t)) {
- lua_assert(irt_isgcv(irkey->t));
- emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
- ofs + (int32_t)offsetof(Node, key.gcr),
- ptr2addr(ir_kgc(irkey)));
- emit_sjcc(as, CC_NE, l_exit);
- }
- lua_assert(!irt_isnil(irkey->t));
- emit_i8(as, irt_toitype(irkey->t));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
- ofs + (int32_t)offsetof(Node, key.it));
- }
-#endif
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- IRIns *irkey;
- Reg tmp;
- if (ir->r == RID_SINK)
- return;
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- tmp = ra_releasetmp(as, ASMREF_TMP1);
- irkey = IR(ir->op2);
- if (irt_isnum(irkey->t)) {
- /* For numbers use the constant itself or a spill slot as a TValue. */
- if (irref_isk(ir->op2))
- emit_loada(as, tmp, ir_knum(irkey));
- else
- emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey));
- } else {
- /* Otherwise use g->tmptv to hold the TValue. */
- if (!irref_isk(ir->op2)) {
- Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
- emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
- } else if (!irt_ispri(irkey->t)) {
- emit_movmroi(as, tmp, 0, irkey->i);
- }
- if (!(LJ_64 && irt_islightud(irkey->t)))
- emit_movmroi(as, tmp, 4, irt_toitype(irkey->t));
- emit_loada(as, tmp, &J2G(as->J)->tmptv);
- }
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- /* NYI: Check that UREFO is still open and not aliasing a slot. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_rma(as, XO_MOV, dest, v);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv));
- asm_guardcc(as, CC_NE);
- emit_i8(as, 1);
- emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
- } else {
- emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v));
- }
- emit_rmro(as, XO_MOV, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_fusefref(as, ir, RSET_GPR);
- emit_mrm(as, XO_LEA, dest, RID_MRM);
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_fusestrref(as, ir, RSET_GPR);
- if (as->mrm.base == RID_NONE)
- emit_loadi(as, dest, as->mrm.ofs);
- else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
- emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs);
- else
- emit_mrm(as, XO_LEA, dest, RID_MRM);
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static void asm_fxload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- x86Op xo;
- if (ir->o == IR_FLOAD)
- asm_fusefref(as, ir, RSET_GPR);
- else
- asm_fusexref(as, ir->op1, RSET_GPR);
- /* ir->op2 is ignored -- unaligned loads are ok on x86. */
- switch (irt_type(ir->t)) {
- case IRT_I8: xo = XO_MOVSXb; break;
- case IRT_U8: xo = XO_MOVZXb; break;
- case IRT_I16: xo = XO_MOVSXw; break;
- case IRT_U16: xo = XO_MOVZXw; break;
- case IRT_NUM: xo = XMM_MOVRM(as); break;
- case IRT_FLOAT: xo = XO_MOVSS; break;
- default:
- if (LJ_64 && irt_is64(ir->t))
- dest |= REX_64;
- else
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
- xo = XO_MOV;
- break;
- }
- emit_mrm(as, xo, dest, RID_MRM);
-}
-
-static void asm_fxstore(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- Reg src = RID_NONE, osrc = RID_NONE;
- int32_t k = 0;
- if (ir->r == RID_SINK)
- return;
- /* The IRT_I16/IRT_U16 stores should never be simplified for constant
- ** values since mov word [mem], imm16 has a length-changing prefix.
- */
- if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
- !asm_isk32(as, ir->op2, &k)) {
- RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
- (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
- src = osrc = ra_alloc1(as, ir->op2, allow8);
- if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
- rset_clear(allow, osrc);
- src = ra_scratch(as, allow8);
- }
- rset_clear(allow, src);
- }
- if (ir->o == IR_FSTORE) {
- asm_fusefref(as, IR(ir->op1), allow);
- } else {
- asm_fusexref(as, ir->op1, allow);
- if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
- }
- if (ra_hasreg(src)) {
- x86Op xo;
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
- case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
- case IRT_NUM: xo = XO_MOVSDto; break;
- case IRT_FLOAT: xo = XO_MOVSSto; break;
-#if LJ_64
- case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
-#endif
- default:
- if (LJ_64 && irt_is64(ir->t))
- src |= REX_64;
- else
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
- xo = XO_MOVto;
- break;
- }
- emit_mrm(as, xo, src, RID_MRM);
- if (!LJ_64 && src != osrc) {
- ra_noweak(as, osrc);
- emit_rr(as, XO_MOV, src, osrc);
- }
- } else {
- if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
- emit_i8(as, k);
- emit_mrm(as, XO_MOVmib, 0, RID_MRM);
- } else {
- lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
- irt_isaddr(ir->t));
- emit_i32(as, k);
- emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
- }
- }
-}
-
-#if LJ_64
-static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
-{
- if (ra_used(ir) || typecheck) {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (typecheck) {
- Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
- asm_guardcc(as, CC_NE);
- emit_i8(as, -2);
- emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
- emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
- emit_rr(as, XO_MOV, tmp|REX_64, dest);
- }
- return dest;
- } else {
- return RID_NONE;
- }
-}
-#endif
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
- (LJ_DUALNUM && irt_isint(ir->t)));
-#if LJ_64
- if (irt_islightud(ir->t)) {
- Reg dest = asm_load_lightud64(as, ir, 1);
- if (ra_hasreg(dest)) {
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
- }
- return;
- } else
-#endif
- if (ra_used(ir)) {
- RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
- Reg dest = ra_dest(as, ir, allow);
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM);
- } else {
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- }
- /* Always do the type check, even if the load result is unused. */
- as->mrm.ofs += 4;
- asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
- if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
- lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t));
- emit_u32(as, LJ_TISNUM);
- emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
- } else {
- emit_i8(as, irt_toitype(ir->t));
- emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
- }
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- if (ir->r == RID_SINK)
- return;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- emit_mrm(as, XO_MOVSDto, src, RID_MRM);
-#if LJ_64
- } else if (irt_islightud(ir->t)) {
- Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
- asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
- emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
-#endif
- } else {
- IRIns *irr = IR(ir->op2);
- RegSet allow = RSET_GPR;
- Reg src = RID_NONE;
- if (!irref_isk(ir->op2)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- }
- asm_fuseahuref(as, ir->op1, allow);
- if (ra_hasreg(src)) {
- emit_mrm(as, XO_MOVto, src, RID_MRM);
- } else if (!irt_ispri(irr->t)) {
- lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)));
- emit_i32(as, irr->i);
- emit_mrm(as, XO_MOVmi, 0, RID_MRM);
- }
- as->mrm.ofs += 4;
- emit_i32(as, (int32_t)irt_toitype(ir->t));
- emit_mrm(as, XO_MOVmi, 0, RID_MRM);
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
- IRType1 t = ir->t;
- Reg base;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
- lua_assert(LJ_DUALNUM ||
- !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
- Reg left = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- emit_rmro(as, XMM_MOVRM(as), left, base, ofs);
- t.irt = IRT_NUM; /* Continue with a regular number type check. */
-#if LJ_64
- } else if (irt_islightud(t)) {
- Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
- if (ra_hasreg(dest)) {
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
- }
- return;
-#endif
- } else if (ra_used(ir)) {
- RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
- Reg dest = ra_dest(as, ir, allow);
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
- emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs);
- } else if (irt_isnum(t)) {
- emit_rmro(as, XMM_MOVRM(as), dest, base, ofs);
- } else {
- emit_rmro(as, XO_MOV, dest, base, ofs);
- }
- } else {
- if (!(ir->op2 & IRSLOAD_TYPECHECK))
- return; /* No type check: avoid base alloc. */
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- }
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- /* Need type check, even if the load result is unused. */
- asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
- if (LJ_64 && irt_type(t) >= IRT_NUM) {
- lua_assert(irt_isinteger(t) || irt_isnum(t));
- emit_u32(as, LJ_TISNUM);
- emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
- } else {
- emit_i8(as, irt_toitype(t));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
- }
- }
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
- asm_setupresult(as, ir, ci); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
-#if LJ_64
- Reg r64 = sz == 8 ? REX_64 : 0;
- if (irref_isk(ir->op2)) {
- IRIns *irk = IR(ir->op2);
- uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 :
- (uint64_t)(uint32_t)irk->i;
- if (sz == 4 || checki32((int64_t)k)) {
- emit_i32(as, (int32_t)k);
- emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
- } else {
- emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
- emit_loadu64(as, RID_ECX, k);
- }
- } else {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
- }
-#else
- int32_t ofs = sizeof(GCcdata);
- if (sz == 8) {
- ofs += 4; ir++;
- lua_assert(ir->o == IR_HIOP);
- }
- do {
- if (irref_isk(ir->op2)) {
- emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
- } else {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_movtomro(as, r, RID_RET, ofs);
- rset_clear(allow, r);
- }
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; ir--;
- } while (1);
-#endif
- lua_assert(sz == 4 || sz == 8);
- }
-
- /* Combine initialization of marked, gct and ctypeid. */
- emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
- emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
- (int32_t)((~LJ_TCDATA<<8)+(ctypeid<<16)));
- emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
- emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
-
- asm_gencall(as, ci, args);
- emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- MCLabel l_end = emit_label(as);
- emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist));
- emit_setgl(as, tab, gc.grayagain);
- emit_getgl(as, tmp, gc.grayagain);
- emit_i8(as, ~LJ_GC_BLACK);
- emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
- emit_sjcc(as, CC_Z, l_end);
- emit_i8(as, LJ_GC_BLACK);
- emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
- obj = IR(ir->op1)->r;
- emit_sjcc(as, CC_Z, l_end);
- emit_i8(as, LJ_GC_WHITES);
- if (irref_isk(ir->op2)) {
- GCobj *vp = ir_kgc(IR(ir->op2));
- emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
- } else {
- Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
- emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
- }
- emit_sjcc(as, CC_Z, l_end);
- emit_i8(as, LJ_GC_BLACK);
- emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
-}
-
-/* -- FP/int arithmetic and logic operations ------------------------------ */
-
-/* Load reference onto x87 stack. Force a spill to memory if needed. */
-static void asm_x87load(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_KNUM) {
- cTValue *tv = ir_knum(ir);
- if (tvispzero(tv)) /* Use fldz only for +0. */
- emit_x87op(as, XI_FLDZ);
- else if (tvispone(tv))
- emit_x87op(as, XI_FLD1);
- else
- emit_rma(as, XO_FLDq, XOg_FLDq, tv);
- } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
- !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
- IRIns *iri = IR(ir->op1);
- emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
- } else {
- emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
- }
-}
-
-/* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
-static int fpmjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
- IRIns *irx;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_XMM0);
- emit_call(as, lj_vm_pow_sse);
- irx = IR(irpp->op1);
- if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1)
- irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */
- ra_left(as, RID_XMM0, irpp->op1);
- ra_left(as, RID_XMM1, irp->op2);
- return 1;
- }
- }
- return 0;
-}
-
-static void asm_fpmath(ASMState *as, IRIns *ir)
-{
- IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER;
- if (fpm == IRFPM_SQRT) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
- emit_mrm(as, XO_SQRTSD, dest, left);
- } else if (fpm <= IRFPM_TRUNC) {
- if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
- /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
- ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
- ** This is atrocious, but the alternatives are much worse.
- */
- /* Round down/up/trunc == 1001/1010/1011. */
- emit_i8(as, 0x09 + fpm);
- emit_mrm(as, XO_ROUNDSD, dest, left);
- if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
- as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
- }
- *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
- } else { /* Call helper functions for SSE2 variant. */
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_XMM0);
- emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
- fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
- ra_left(as, RID_XMM0, ir->op1);
- }
- } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) {
- /* Rejoined to pow(). */
- } else { /* Handle x87 ops. */
- int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs);
- }
- emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
- switch (fpm) { /* st0 = lj_vm_*(st0) */
- case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break;
- case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break;
- case IRFPM_SIN: emit_x87op(as, XI_FSIN); break;
- case IRFPM_COS: emit_x87op(as, XI_FCOS); break;
- case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break;
- case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10:
- /* Note: the use of fyl2xp1 would be pointless here. When computing
- ** log(1.0+eps) the precision is already lost after 1.0 is added.
- ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
- */
- emit_x87op(as, XI_FYL2X); break;
- case IRFPM_OTHER:
- switch (ir->o) {
- case IR_ATAN2:
- emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break;
- case IR_LDEXP:
- emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break;
- default: lua_assert(0); break;
- }
- break;
- default: lua_assert(0); break;
- }
- asm_x87load(as, ir->op1);
- switch (fpm) {
- case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break;
- case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break;
- case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break;
- case IRFPM_OTHER:
- if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2);
- break;
- default: break;
- }
- }
-}
-
-static void asm_fppowi(ASMState *as, IRIns *ir)
-{
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_XMM0);
- emit_call(as, lj_vm_powi_sse);
- ra_left(as, RID_XMM0, ir->op1);
- ra_left(as, RID_EAX, ir->op2);
-}
-
-#if LJ_64 && LJ_HASFFI
-static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-#endif
-
-static void asm_intmod(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static int asm_swapops(ASMState *as, IRIns *ir)
-{
- IRIns *irl = IR(ir->op1);
- IRIns *irr = IR(ir->op2);
- lua_assert(ra_noreg(irr->r));
- if (!irm_iscomm(lj_ir_mode[ir->o]))
- return 0; /* Can't swap non-commutative operations. */
- if (irref_isk(ir->op2))
- return 0; /* Don't swap constants to the left. */
- if (ra_hasreg(irl->r))
- return 1; /* Swap if left already has a register. */
- if (ra_samehint(ir->r, irr->r))
- return 1; /* Swap if dest and right have matching hints. */
- if (as->curins > as->loopref) { /* In variant part? */
- if (ir->op2 < as->loopref && !irt_isphi(irr->t))
- return 0; /* Keep invariants on the right. */
- if (ir->op1 < as->loopref && !irt_isphi(irl->t))
- return 1; /* Swap invariants to the right. */
- }
- if (opisfusableload(irl->o))
- return 1; /* Swap fusable loads to the right. */
- return 0; /* Otherwise don't swap. */
-}
-
-static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
-{
- IRRef lref = ir->op1;
- IRRef rref = ir->op2;
- RegSet allow = RSET_FPR;
- Reg dest;
- Reg right = IR(rref)->r;
- if (ra_hasreg(right)) {
- rset_clear(allow, right);
- ra_noweak(as, right);
- }
- dest = ra_dest(as, ir, allow);
- if (lref == rref) {
- right = dest;
- } else if (ra_noreg(right)) {
- if (asm_swapops(as, ir)) {
- IRRef tmp = lref; lref = rref; rref = tmp;
- }
- right = asm_fuseload(as, rref, rset_clear(allow, dest));
- }
- emit_mrm(as, xo, dest, right);
- ra_left(as, dest, lref);
-}
-
-static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
-{
- IRRef lref = ir->op1;
- IRRef rref = ir->op2;
- RegSet allow = RSET_GPR;
- Reg dest, right;
- int32_t k = 0;
- if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
- MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2);
- if ((p[1] & 15) < 14) {
- if ((p[1] & 15) >= 12) p[1] -= 4; /* L <->S, NL <-> NS */
- as->flagmcp = NULL;
- as->mcp = p;
- } /* else: cannot transform LE/NLE to cc without use of OF. */
- }
- right = IR(rref)->r;
- if (ra_hasreg(right)) {
- rset_clear(allow, right);
- ra_noweak(as, right);
- }
- dest = ra_dest(as, ir, allow);
- if (lref == rref) {
- right = dest;
- } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
- if (asm_swapops(as, ir)) {
- IRRef tmp = lref; lref = rref; rref = tmp;
- }
- right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
- }
- if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
- asm_guardcc(as, CC_O);
- if (xa != XOg_X_IMUL) {
- if (ra_hasreg(right))
- emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
- else
- emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
- } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
- emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
- } else { /* IMUL r, r, k. */
- /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
- Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
- x86Op xo;
- if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
- } else { emit_i32(as, k); xo = XO_IMULi; }
- emit_mrm(as, xo, REX_64IR(ir, dest), left);
- return;
- }
- ra_left(as, dest, lref);
-}
-
-/* LEA is really a 4-operand ADD with an independent destination register,
-** up to two source registers and an immediate. One register can be scaled
-** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
-** instructions.
-**
-** Currently only a few common cases are supported:
-** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
-** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
-** - Right ADD fusion: y = a+(b+k)
-** The ommited variants have already been reduced by FOLD.
-**
-** There are more fusion opportunities, like gathering shifts or joining
-** common references. But these are probably not worth the trouble, since
-** array indexing is not decomposed and already makes use of all fields
-** of the ModRM operand.
-*/
-static int asm_lea(ASMState *as, IRIns *ir)
-{
- IRIns *irl = IR(ir->op1);
- IRIns *irr = IR(ir->op2);
- RegSet allow = RSET_GPR;
- Reg dest;
- as->mrm.base = as->mrm.idx = RID_NONE;
- as->mrm.scale = XM_SCALE1;
- as->mrm.ofs = 0;
- if (ra_hasreg(irl->r)) {
- rset_clear(allow, irl->r);
- ra_noweak(as, irl->r);
- as->mrm.base = irl->r;
- if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
- /* The PHI renaming logic does a better job in some cases. */
- if (ra_hasreg(ir->r) &&
- ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
- (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
- return 0;
- if (irref_isk(ir->op2)) {
- as->mrm.ofs = irr->i;
- } else {
- rset_clear(allow, irr->r);
- ra_noweak(as, irr->r);
- as->mrm.idx = irr->r;
- }
- } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
- irref_isk(irr->op2)) {
- Reg idx = ra_alloc1(as, irr->op1, allow);
- rset_clear(allow, idx);
- as->mrm.idx = (uint8_t)idx;
- as->mrm.ofs = IR(irr->op2)->i;
- } else {
- return 0;
- }
- } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
- (irref_isk(ir->op2) || irref_isk(irl->op2))) {
- Reg idx, base = ra_alloc1(as, irl->op1, allow);
- rset_clear(allow, base);
- as->mrm.base = (uint8_t)base;
- if (irref_isk(ir->op2)) {
- as->mrm.ofs = irr->i;
- idx = ra_alloc1(as, irl->op2, allow);
- } else {
- as->mrm.ofs = IR(irl->op2)->i;
- idx = ra_alloc1(as, ir->op2, allow);
- }
- rset_clear(allow, idx);
- as->mrm.idx = (uint8_t)idx;
- } else {
- return 0;
- }
- dest = ra_dest(as, ir, allow);
- emit_mrm(as, XO_LEA, dest, RID_MRM);
- return 1; /* Success. */
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_ADDSD);
- else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp ||
- irt_is64(ir->t) || !asm_lea(as, ir))
- asm_intarith(as, ir, XOg_ADD);
-}
-
-static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
- ra_left(as, dest, ir->op1);
-}
-
-static void asm_min_max(ASMState *as, IRIns *ir, int cc)
-{
- Reg right, dest = ra_dest(as, ir, RSET_GPR);
- IRRef lref = ir->op1, rref = ir->op2;
- if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
- right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
- emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
- emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
- ra_left(as, dest, lref);
-}
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
- REX_64IR(ir, 0), dest, 0, as->mcp, 1);
- ra_left(as, dest, ir->op1);
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
-{
- IRRef rref = ir->op2;
- IRIns *irr = IR(rref);
- Reg dest;
- if (irref_isk(rref)) { /* Constant shifts. */
- int shift;
- dest = ra_dest(as, ir, RSET_GPR);
- shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
- switch (shift) {
- case 0: break;
- case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
- default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
- }
- } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
- Reg right;
- dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
- if (dest == RID_ECX) {
- dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
- emit_rr(as, XO_MOV, RID_ECX, dest);
- }
- right = irr->r;
- if (ra_noreg(right))
- right = ra_allocref(as, rref, RID2RSET(RID_ECX));
- else if (right != RID_ECX)
- ra_scratch(as, RID2RSET(RID_ECX));
- emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
- ra_noweak(as, right);
- if (right != RID_ECX)
- emit_rr(as, XO_MOV, RID_ECX, right);
- }
- ra_left(as, dest, ir->op1);
- /*
- ** Note: avoid using the flags resulting from a shift or rotate!
- ** All of them cause a partial flag stall, except for r,1 shifts
- ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
- */
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-/* Virtual flags for unordered FP comparisons. */
-#define VCC_U 0x1000 /* Unordered. */
-#define VCC_P 0x2000 /* Needs extra CC_P branch. */
-#define VCC_S 0x4000 /* Swap avoids CC_P branch. */
-#define VCC_PS (VCC_P|VCC_S)
-
-/* Map of comparisons to flags. ORDER IR. */
-#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
-static const uint16_t asm_compmap[IR_ABC+1] = {
- /* signed non-eq unsigned flags */
- /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
- /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
- /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
- /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
- /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
- /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
- /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
- /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
- /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
- /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
- /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
-};
-
-/* FP and integer comparisons. */
-static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
-{
- if (irt_isnum(ir->t)) {
- IRRef lref = ir->op1;
- IRRef rref = ir->op2;
- Reg left, right;
- MCLabel l_around;
- /*
- ** An extra CC_P branch is required to preserve ordered/unordered
- ** semantics for FP comparisons. This can be avoided by swapping
- ** the operands and inverting the condition (except for EQ and UNE).
- ** So always try to swap if possible.
- **
- ** Another option would be to swap operands to achieve better memory
- ** operand fusion. But it's unlikely that this outweighs the cost
- ** of the extra branches.
- */
- if (cc & VCC_S) { /* Swap? */
- IRRef tmp = lref; lref = rref; rref = tmp;
- cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
- }
- left = ra_alloc1(as, lref, RSET_FPR);
- right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
- l_around = emit_label(as);
- asm_guardcc(as, cc >> 4);
- if (cc & VCC_P) { /* Extra CC_P branch required? */
- if (!(cc & VCC_U)) {
- asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
- } else if (l_around != as->invmcp) {
- emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
- } else {
- /* Patched to mcloop by asm_loop_fixup. */
- as->loopinv = 2;
- if (as->realign)
- emit_sjcc(as, CC_P, as->mcp);
- else
- emit_jcc(as, CC_P, as->mcp);
- }
- }
- emit_mrm(as, XO_UCOMISD, left, right);
- } else {
- IRRef lref = ir->op1, rref = ir->op2;
- IROp leftop = (IROp)(IR(lref)->o);
- Reg r64 = REX_64IR(ir, 0);
- int32_t imm = 0;
- lua_assert(irt_is64(ir->t) || irt_isint(ir->t) ||
- irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t));
- /* Swap constants (only for ABC) and fusable loads to the right. */
- if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
- if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
- else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
- lref = ir->op2; rref = ir->op1;
- }
- if (asm_isk32(as, rref, &imm)) {
- IRIns *irl = IR(lref);
- /* Check wether we can use test ins. Not for unsigned, since CF=0. */
- int usetest = (imm == 0 && (cc & 0xa) != 0x2);
- if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
- /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
- Reg right, left = RID_NONE;
- RegSet allow = RSET_GPR;
- if (!asm_isk32(as, irl->op2, &imm)) {
- left = ra_alloc1(as, irl->op2, allow);
- rset_clear(allow, left);
- } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
- IRIns *irll = IR(irl->op1);
- if (opisfusableload((IROp)irll->o) &&
- (irt_isi8(irll->t) || irt_isu8(irll->t))) {
- IRType1 origt = irll->t; /* Temporarily flip types. */
- irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
- as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
- right = asm_fuseload(as, irl->op1, RSET_GPR);
- as->curins++;
- irll->t = origt;
- if (right != RID_MRM) goto test_nofuse;
- /* Fusion succeeded, emit test byte mrm, imm8. */
- asm_guardcc(as, cc);
- emit_i8(as, (imm & 0xff));
- emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
- return;
- }
- }
- as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
- right = asm_fuseloadm(as, irl->op1, allow, r64);
- as->curins++; /* Undo the above. */
- test_nofuse:
- asm_guardcc(as, cc);
- if (ra_noreg(left)) {
- emit_i32(as, imm);
- emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
- } else {
- emit_mrm(as, XO_TEST, r64 + left, right);
- }
- } else {
- Reg left;
- if (opisfusableload((IROp)irl->o) &&
- ((irt_isu8(irl->t) && checku8(imm)) ||
- ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
- (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
- /* Only the IRT_INT case is fused by asm_fuseload.
- ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
- ** are handled here.
- ** Note that cmp word [mem], imm16 should not be generated,
- ** since it has a length-changing prefix. Compares of a word
- ** against a sign-extended imm8 are ok, however.
- */
- IRType1 origt = irl->t; /* Temporarily flip types. */
- irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
- left = asm_fuseload(as, lref, RSET_GPR);
- irl->t = origt;
- if (left == RID_MRM) { /* Fusion succeeded? */
- if (irt_isu8(irl->t) || irt_isu16(irl->t))
- cc >>= 4; /* Need unsigned compare. */
- asm_guardcc(as, cc);
- emit_i8(as, imm);
- emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
- XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
- return;
- } /* Otherwise handle register case as usual. */
- } else {
- left = asm_fuseloadm(as, lref,
- irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
- }
- asm_guardcc(as, cc);
- if (usetest && left != RID_MRM) {
- /* Use test r,r instead of cmp r,0. */
- x86Op xo = XO_TEST;
- if (irt_isu8(ir->t)) {
- lua_assert(ir->o == IR_EQ || ir->o == IR_NE);
- xo = XO_TESTb;
- if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
- if (LJ_64) {
- left |= FORCE_REX;
- } else {
- emit_i32(as, 0xff);
- emit_mrm(as, XO_GROUP3, XOg_TEST, left);
- return;
- }
- }
- }
- emit_rr(as, xo, r64 + left, left);
- if (irl+1 == ir) /* Referencing previous ins? */
- as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
- } else {
- emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
- }
- }
- } else {
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
- asm_guardcc(as, cc);
- emit_mrm(as, XO_CMP, r64 + left, right);
- }
- }
-}
-
-#if LJ_32 && LJ_HASFFI
-/* 64 bit integer comparisons in 32 bit mode. */
-static void asm_comp_int64(ASMState *as, IRIns *ir)
-{
- uint32_t cc = asm_compmap[(ir-1)->o];
- RegSet allow = RSET_GPR;
- Reg lefthi = RID_NONE, leftlo = RID_NONE;
- Reg righthi = RID_NONE, rightlo = RID_NONE;
- MCLabel l_around;
- x86ModRM mrm;
-
- as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
-
- /* Allocate/fuse hiword operands. */
- if (irref_isk(ir->op2)) {
- lefthi = asm_fuseload(as, ir->op1, allow);
- } else {
- lefthi = ra_alloc1(as, ir->op1, allow);
- rset_clear(allow, lefthi);
- righthi = asm_fuseload(as, ir->op2, allow);
- if (righthi == RID_MRM) {
- if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
- if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
- } else {
- rset_clear(allow, righthi);
- }
- }
- mrm = as->mrm; /* Save state for hiword instruction. */
-
- /* Allocate/fuse loword operands. */
- if (irref_isk((ir-1)->op2)) {
- leftlo = asm_fuseload(as, (ir-1)->op1, allow);
- } else {
- leftlo = ra_alloc1(as, (ir-1)->op1, allow);
- rset_clear(allow, leftlo);
- rightlo = asm_fuseload(as, (ir-1)->op2, allow);
- }
-
- /* All register allocations must be performed _before_ this point. */
- l_around = emit_label(as);
- as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
-
- /* Loword comparison and branch. */
- asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
- if (ra_noreg(rightlo)) {
- int32_t imm = IR((ir-1)->op2)->i;
- if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
- emit_rr(as, XO_TEST, leftlo, leftlo);
- else
- emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
- } else {
- emit_mrm(as, XO_CMP, leftlo, rightlo);
- }
-
- /* Hiword comparison and branches. */
- if ((cc & 15) != CC_NE)
- emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
- if ((cc & 15) != CC_E)
- asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
- as->mrm = mrm; /* Restore state. */
- if (ra_noreg(righthi)) {
- int32_t imm = IR(ir->op2)->i;
- if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
- emit_rr(as, XO_TEST, lefthi, lefthi);
- else
- emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
- } else {
- emit_mrm(as, XO_CMP, lefthi, righthi);
- }
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_32 && LJ_HASFFI
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
- if (usehi || uselo) {
- if (irt_isfp(ir->t))
- asm_conv_fp_int64(as, ir);
- else
- asm_conv_int64_fp(as, ir);
- }
- as->curins--; /* Always skip the CONV. */
- return;
- } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
- asm_comp_int64(as, ir);
- return;
- } else if ((ir-1)->o == IR_XSTORE) {
- if ((ir-1)->r != RID_SINK)
- asm_fxstore(as, ir);
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
- case IR_ADD:
- as->flagmcp = NULL;
- as->curins--;
- asm_intarith(as, ir, XOg_ADC);
- asm_intarith(as, ir-1, XOg_ADD);
- break;
- case IR_SUB:
- as->flagmcp = NULL;
- as->curins--;
- asm_intarith(as, ir, XOg_SBB);
- asm_intarith(as, ir-1, XOg_SUB);
- break;
- case IR_NEG: {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- emit_rr(as, XO_GROUP3, XOg_NEG, dest);
- emit_i8(as, 0);
- emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
- ra_left(as, dest, ir->op1);
- as->curins--;
- asm_neg_not(as, ir-1, XOg_NEG);
- break;
- }
- case IR_CALLN:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
- case IR_CNEWI:
- /* Nothing to do here. Handled by CNEWI itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- /* Try to get an unused temp. register, otherwise spill/restore eax. */
- Reg pbase = irp ? irp->r : RID_BASE;
- Reg r = allow ? rset_pickbot(allow) : RID_EAX;
- emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
- if (allow == RSET_EMPTY) /* Restore temp. register. */
- emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
- else
- ra_modified(as, r);
- emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot));
- if (ra_hasreg(pbase) && pbase != r)
- emit_rr(as, XO_ARITH(XOg_SUB), r, pbase);
- else
- emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
- ptr2addr(&J2G(as->J)->jit_base));
- emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack));
- emit_getgl(as, r, jit_L);
- if (allow == RSET_EMPTY) /* Spill temp. register. */
- emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
- } else {
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
- (LJ_DUALNUM && irt_isinteger(ir->t)));
- if (!irref_isk(ref)) {
- Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
- emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
- } else if (!irt_ispri(ir->t)) {
- emit_movmroi(as, RID_BASE, ofs, ir->i);
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s != 0) /* Do not overwrite link to previous frame. */
- emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
- } else {
- if (!(LJ_64 && irt_islightud(ir->t)))
- emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
- }
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
- emit_rr(as, XO_TEST, RID_RET, RID_RET);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- tmp = ra_releasetmp(as, ASMREF_TMP1);
- emit_loada(as, tmp, J2G(as->J));
- emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_sjcc(as, CC_B, l_end);
- emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold);
- emit_getgl(as, tmp, gc.total);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- if (as->realign) { /* Realigned loops use short jumps. */
- as->realign = NULL; /* Stop another retry. */
- lua_assert(((intptr_t)target & 15) == 0);
- if (as->loopinv) { /* Inverted loop branch? */
- p -= 5;
- p[0] = XI_JMP;
- lua_assert(target - p >= -128);
- p[-1] = (MCode)(target - p); /* Patch sjcc. */
- if (as->loopinv == 2)
- p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
- } else {
- lua_assert(target - p >= -128);
- p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
- p[-2] = XI_JMPs;
- }
- } else {
- MCode *newloop;
- p[-5] = XI_JMP;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guardcc already inverted the jcc and patched the jmp. */
- p -= 5;
- newloop = target+4;
- *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
- if (as->loopinv == 2) {
- *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
- newloop = target+8;
- }
- } else { /* Otherwise just patch jmp. */
- *(int32_t *)(p-4) = (int32_t)(target - p);
- newloop = target+3;
- }
- /* Realign small loops and shorten the loop branch. */
- if (newloop >= p - 128) {
- as->realign = newloop; /* Force a retry and remember alignment. */
- as->curins = as->stopins; /* Abort asm_trace now. */
- as->T->nins = as->orignins; /* Remove any added renames. */
- }
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (r != RID_BASE)
- emit_rr(as, XO_MOV, r, RID_BASE);
- }
-}
-
-/* Coalesce or reload BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (irp->r == r) {
- rset_clear(allow, r); /* Mark same BASE register as coalesced. */
- } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
- rset_clear(allow, irp->r);
- emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */
- } else {
- emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
- }
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
- MCode *p = as->mctop;
- MCode *target, *q;
- int32_t spadj = as->T->spadjust;
- if (spadj == 0) {
- p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0);
- } else {
- MCode *p1;
- /* Patch stack adjustment. */
- if (checki8(spadj)) {
- p -= 3;
- p1 = p-6;
- *p1 = (MCode)spadj;
- } else {
- p1 = p-9;
- *(int32_t *)p1 = spadj;
- }
- if ((as->flags & JIT_F_LEA_AGU)) {
-#if LJ_64
- p1[-4] = 0x48;
-#endif
- p1[-3] = (MCode)XI_LEA;
- p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP);
- p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- } else {
-#if LJ_64
- p1[-3] = 0x48;
-#endif
- p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
- p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
- }
- }
- /* Patch exit branch. */
- target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = XI_JMP;
- /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
- for (q = as->mctop-1; q >= p; q--)
- *q = XI_NOP;
- as->mctop = p;
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- MCode *p = as->mctop;
- /* Realign and leave room for backwards loop branch or exit branch. */
- if (as->realign) {
- int i = ((int)(intptr_t)as->realign) & 15;
- /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
- while (i-- > 0)
- *--p = XI_NOP;
- as->mctop = p;
- p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
- } else {
- p -= 5; /* Space for exit branch (near jmp). */
- }
- if (as->loopref) {
- as->invmcp = as->mcp = p;
- } else {
- /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
- as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0));
- as->invmcp = NULL;
- }
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_EQ: case IR_NE: case IR_ABC:
- asm_comp(as, ir, asm_compmap[ir->o]);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_intarith(as, ir, XOg_AND); break;
- case IR_BOR: asm_intarith(as, ir, XOg_OR); break;
- case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break;
-
- case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break;
- case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break;
- case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break;
- case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break;
- case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break;
-
- /* Arithmetic ops. */
- case IR_ADD: asm_add(as, ir); break;
- case IR_SUB:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_SUBSD);
- else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
- asm_intarith(as, ir, XOg_SUB);
- break;
- case IR_MUL:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_MULSD);
- else
- asm_intarith(as, ir, XOg_X_IMUL);
- break;
- case IR_DIV:
-#if LJ_64 && LJ_HASFFI
- if (!irt_isnum(ir->t))
- asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
- IRCALL_lj_carith_divu64);
- else
-#endif
- asm_fparith(as, ir, XO_DIVSD);
- break;
- case IR_MOD:
-#if LJ_64 && LJ_HASFFI
- if (!irt_isint(ir->t))
- asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
- IRCALL_lj_carith_modu64);
- else
-#endif
- asm_intmod(as, ir);
- break;
-
- case IR_NEG:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_XORPS);
- else
- asm_neg_not(as, ir, XOg_NEG);
- break;
- case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break;
-
- case IR_MIN:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_MINSD);
- else
- asm_min_max(as, ir, CC_G);
- break;
- case IR_MAX:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_MAXSD);
- else
- asm_min_max(as, ir, CC_L);
- break;
-
- case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
- asm_fpmath(as, ir);
- break;
- case IR_POW:
-#if LJ_64 && LJ_HASFFI
- if (!irt_isnum(ir->t))
- asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
- IRCALL_lj_carith_powu64);
- else
-#endif
- asm_fppowi(as, ir);
- break;
-
- /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
- case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break;
- case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break;
- case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- int nslots;
- asm_collectargs(as, ir, ci, args);
- nslots = asm_count_call_slots(as, ci, args);
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
-#if LJ_64
- return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
-#else
- return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
-#endif
-}
-
-/* Target-specific setup. */
-static void asm_setup_target(ASMState *as)
-{
- asm_exitstub_setup(as, as->T->nsnap);
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- MSize len = T->szmcode;
- MCode *px = exitstub_addr(J, exitno) - 6;
- MCode *pe = p+len-6;
- uint32_t stateaddr = u32ptr(&J2G(J)->vmstate);
- if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
- *(int32_t *)(p+len-4) = jmprel(p+len, target);
- /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
- for (; p < pe; p++)
- if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) {
- p += LJ_64 ? 11 : 10;
- break;
- }
- lua_assert(p < pe);
- for (; p < pe; p++) {
- if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) {
- *(int32_t *)(p+2) = jmprel(p+6, target);
- p += 5;
- }
- }
- lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
- lj_mcode_patch(J, mcarea, 1);
-}
-
+/*
+** x86/x64 IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Generate an exit stub group at the bottom of the reserved MCode memory. */
+static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
+{
+ ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
+ MCode *mxp = as->mcbot;
+ MCode *mxpstart = mxp;
+ if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
+ asm_mclimit(as);
+ /* Push low byte of exitno for each exit stub. */
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
+ for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
+ *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
+ }
+ /* Push the high byte of the exitno for each exit stub group. */
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
+ /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
+ *mxp++ = XI_MOVmi;
+ *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
+ *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ *mxp++ = 2*sizeof(void *);
+ *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
+ /* Jump to exit handler which fills in the ExitState. */
+ *mxp++ = XI_JMP; mxp += 4;
+ *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler);
+ /* Commit the code for this group (even if assembly fails later on). */
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ return mxpstart;
+}
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
+ lj_trace_err(as->J, LJ_TRERR_SNAPOV);
+ for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
+ if (as->J->exitstubgroup[i] == NULL)
+ as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
+}
+
+/* Emit conditional branch to exit for guard.
+** It's important to emit this *after* all registers have been allocated,
+** because rematerializations may invalidate the flags.
+*/
+static void asm_guardcc(ASMState *as, int cc)
+{
+ MCode *target = exitstub_addr(as->J, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *(int32_t *)(p+1) = jmprel(p+5, target);
+ target = p;
+ cc ^= 1;
+ if (as->realign) {
+ emit_sjcc(as, cc, target);
+ return;
+ }
+ }
+ emit_jcc(as, cc, target);
+}
+
+/* -- Memory operand fusion ----------------------------------------------- */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if a reference is a signed 32 bit constant. */
+static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
+{
+ if (irref_isk(ref)) {
+ IRIns *ir = IR(ref);
+ if (ir->o != IR_KINT64) {
+ *k = ir->i;
+ return 1;
+ } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
+ *k = (int32_t)ir_kint64(ir)->u64;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Check if there's no conflicting instruction between curins and ref.
+** Also avoid fusing loads if there are multiple references.
+*/
+static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref) {
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
+ return 0;
+ }
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse array base into memory operand. */
+static IRRef asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *irb = IR(ref);
+ as->mrm.ofs = 0;
+ if (irb->o == IR_FLOAD) {
+ IRIns *ira = IR(irb->op1);
+ lua_assert(irb->op2 == IRFL_TAB_ARRAY);
+ /* We can avoid the FLOAD of t->array for colocated arrays. */
+ if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
+ as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
+ return irb->op1; /* Table obj. */
+ }
+ } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
+ /* Fuse base offset (vararg load). */
+ as->mrm.ofs = IR(irb->op2)->i;
+ return irb->op1;
+ }
+ return ref; /* Otherwise use the given array base. */
+}
+
+/* Fuse array reference into memory operand. */
+static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irx;
+ lua_assert(ir->o == IR_AREF);
+ as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
+ irx = IR(ir->op2);
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs += 8*irx->i;
+ as->mrm.idx = RID_NONE;
+ } else {
+ rset_clear(allow, as->mrm.base);
+ as->mrm.scale = XM_SCALE8;
+ /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
+ ** Doesn't help much without ABCelim, but reduces register pressure.
+ */
+ if (!LJ_64 && /* Has bad effects with negative index on x64. */
+ mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
+ irx->o == IR_ADD && irref_isk(irx->op2)) {
+ as->mrm.ofs += 8*IR(irx->op2)->i;
+ as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
+ } else {
+ as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
+ }
+ }
+}
+
+/* Fuse array/hash/upvalue reference into memory operand.
+** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
+** pass the final allow mask, excluding any GPRs used for other inputs.
+** In particular: 2-operand GPR instructions need to call ra_dest() first!
+*/
+static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ switch ((IROp)ir->o) {
+ case IR_AREF:
+ if (mayfuse(as, ref)) {
+ asm_fusearef(as, ir, allow);
+ return;
+ }
+ break;
+ case IR_HREFK:
+ if (mayfuse(as, ref)) {
+ as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
+ as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ as->mrm.idx = RID_NONE;
+ return;
+ }
+ break;
+ case IR_UREFC:
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
+ as->mrm.ofs = ptr2addr(&uv->tv);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ return;
+ }
+ break;
+ default:
+ lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
+ ir->o == IR_KKPTR);
+ break;
+ }
+ }
+ as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
+ as->mrm.ofs = 0;
+ as->mrm.idx = RID_NONE;
+}
+
+/* Fuse FLOAD/FREF reference into memory operand. */
+static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
+{
+ lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF);
+ as->mrm.ofs = field_ofs[ir->op2];
+ as->mrm.idx = RID_NONE;
+ if (irref_isk(ir->op1)) {
+ as->mrm.ofs += IR(ir->op1)->i;
+ as->mrm.base = RID_NONE;
+ } else {
+ as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
+ }
+}
+
+/* Fuse string reference into memory operand. */
+static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irr;
+ lua_assert(ir->o == IR_STRREF);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ as->mrm.scale = XM_SCALE1;
+ as->mrm.ofs = sizeof(GCstr);
+ if (irref_isk(ir->op1)) {
+ as->mrm.ofs += IR(ir->op1)->i;
+ } else {
+ Reg r = ra_alloc1(as, ir->op1, allow);
+ rset_clear(allow, r);
+ as->mrm.base = (uint8_t)r;
+ }
+ irr = IR(ir->op2);
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs += irr->i;
+ } else {
+ Reg r;
+ /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
+ if (!LJ_64 && /* Has bad effects with negative index on x64. */
+ mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
+ as->mrm.ofs += IR(irr->op2)->i;
+ r = ra_alloc1(as, irr->op1, allow);
+ } else {
+ r = ra_alloc1(as, ir->op2, allow);
+ }
+ if (as->mrm.base == RID_NONE)
+ as->mrm.base = (uint8_t)r;
+ else
+ as->mrm.idx = (uint8_t)r;
+ }
+}
+
+static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ as->mrm.idx = RID_NONE;
+ if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+ as->mrm.ofs = ir->i;
+ as->mrm.base = RID_NONE;
+ } else if (ir->o == IR_STRREF) {
+ asm_fusestrref(as, ir, allow);
+ } else {
+ as->mrm.ofs = 0;
+ if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
+ /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
+ IRIns *irx;
+ IRRef idx;
+ Reg r;
+ if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
+ ref = ir->op1;
+ ir = IR(ref);
+ if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
+ goto noadd;
+ }
+ as->mrm.scale = XM_SCALE1;
+ idx = ir->op1;
+ ref = ir->op2;
+ irx = IR(idx);
+ if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
+ idx = ir->op2;
+ ref = ir->op1;
+ irx = IR(idx);
+ }
+ if (canfuse(as, irx) && ra_noreg(irx->r)) {
+ if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
+ /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
+ idx = irx->op1;
+ as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
+ } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
+ /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
+ idx = irx->op1;
+ as->mrm.scale = XM_SCALE2;
+ }
+ }
+ r = ra_alloc1(as, idx, allow);
+ rset_clear(allow, r);
+ as->mrm.idx = (uint8_t)r;
+ }
+ noadd:
+ as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
+ }
+}
+
+/* Fuse load into memory operand. */
+static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ if (allow != RSET_EMPTY) { /* Fast path. */
+ ra_noweak(as, ir->r);
+ return ir->r;
+ }
+ fusespill:
+ /* Force a spill if only memory operands are allowed (asm_x87load). */
+ as->mrm.base = RID_ESP;
+ as->mrm.ofs = ra_spill(as, ir);
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ if (ir->o == IR_KNUM) {
+ RegSet avail = as->freeset & ~as->modset & RSET_FPR;
+ lua_assert(allow != RSET_EMPTY);
+ if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
+ as->mrm.ofs = ptr2addr(ir_knum(ir));
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ } else if (mayfuse(as, ref)) {
+ RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
+ if (ir->o == IR_SLOAD) {
+ if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
+ noconflict(as, ref, IR_RETF, 0)) {
+ as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
+ as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0);
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_FLOAD) {
+ /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
+ if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
+ noconflict(as, ref, IR_FSTORE, 0)) {
+ asm_fusefref(as, ir, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
+ if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) {
+ asm_fuseahuref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_XLOAD) {
+ /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
+ ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
+ */
+ if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
+ noconflict(as, ref, IR_XSTORE, 0)) {
+ asm_fusexref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_VLOAD) {
+ asm_fuseahuref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ }
+ if (!(as->freeset & allow) &&
+ (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
+ goto fusespill;
+ return ra_allocref(as, ref, allow);
+}
+
+#if LJ_64
+/* Don't fuse a 32 bit load into a 64 bit operation. */
+static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
+{
+ if (is64 && !irt_is64(IR(ref)->t))
+ return ra_alloc1(as, ref, allow);
+ return asm_fuseload(as, ref, allow);
+}
+#else
+#define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
+#endif
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Count the required number of stack slots for a call. */
+static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t i, nargs = CCI_NARGS(ci);
+ int nslots = 0;
+#if LJ_64
+ if (LJ_ABI_WIN) {
+ nslots = (int)(nargs*2); /* Only matters for more than four args. */
+ } else {
+ int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots += 2;
+ }
+ }
+#else
+ int ngpr = 0;
+ if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
+ ngpr = 2;
+ else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
+ ngpr = 1;
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+#endif
+ return nslots;
+}
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_NARGS(ci);
+ int32_t ofs = STACKARG_OFS;
+#if LJ_64
+ uint32_t gprs = REGARG_GPRS;
+ Reg fpr = REGARG_FIRSTFPR;
+#if !LJ_ABI_WIN
+ MCode *patchnfpr = NULL;
+#endif
+#else
+ uint32_t gprs = 0;
+ if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
+ if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
+ gprs = (REGARG_GPRS & 31);
+ else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
+ gprs = REGARG_GPRS;
+ }
+#endif
+ if ((void *)ci->func)
+ emit_call(as, ci->func);
+#if LJ_64
+ if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
+#if LJ_ABI_WIN
+ for (n = 0; n < 4 && n < nargs; n++) {
+ IRIns *ir = IR(args[n]);
+ if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
+ emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
+ ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
+ }
+#else
+ patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
+ *--as->mcp = XI_MOVrib | RID_EAX;
+#endif
+ }
+#endif
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+ Reg r;
+#if LJ_64 && LJ_ABI_WIN
+ /* Windows/x64 argument registers are strictly positional. */
+ r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
+ fpr++; gprs >>= 5;
+#elif LJ_64
+ /* POSIX/x64 argument registers are used in order of appearance. */
+ if (irt_isfp(ir->t)) {
+ r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
+ } else {
+ r = gprs & 31; gprs >>= 5;
+ }
+#else
+ if (ref && irt_isfp(ir->t)) {
+ r = 0;
+ } else {
+ r = gprs & 31; gprs >>= 5;
+ if (!ref) continue;
+ }
+#endif
+ if (r) { /* Argument is in a register. */
+ if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
+#if LJ_64
+ if (ir->o == IR_KINT64)
+ emit_loadu64(as, r, ir_kint64(ir)->u64);
+ else
+#endif
+ emit_loadi(as, r, ir->i);
+ } else {
+ lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ emit_movrr(as, ir, r, ir->r);
+ } else {
+ ra_allocref(as, ref, RID2RSET(r));
+ }
+ }
+ } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
+ lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */
+ if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
+ /* Split stores for unaligned FP consts. */
+ emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
+ emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
+ } else {
+ r = ra_alloc1(as, ref, RSET_FPR);
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
+ r, RID_ESP, ofs);
+ }
+ ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
+ } else { /* Non-FP argument is on stack. */
+ if (LJ_32 && ref < ASMREF_TMP1) {
+ emit_movmroi(as, RID_ESP, ofs, ir->i);
+ } else {
+ r = ra_alloc1(as, ref, RSET_GPR);
+ emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
+ }
+ ofs += sizeof(intptr_t);
+ }
+ checkmclim(as);
+ }
+#if LJ_64 && !LJ_ABI_WIN
+ if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
+#endif
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = (LJ_32 && (ir+1)->o == IR_HIOP);
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ if (irt_isfp(ir->t)) {
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+#if LJ_64
+ if ((ci->flags & CCI_CASTU64)) {
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
+ }
+ if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+#else
+ /* Number result is in x87 st0 for x86 calling convention. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
+ dest, RID_ESP, ofs);
+ }
+ if ((ci->flags & CCI_CASTU64)) {
+ emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
+ emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
+ } else {
+ emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
+ irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
+ }
+#endif
+#if LJ_32
+ } else if (hiop) {
+ ra_destpair(as, ir);
+#endif
+ } else {
+ lua_assert(!irt_ispri(ir->t));
+ ra_destreg(as, ir, RID_RET);
+ }
+ } else if (LJ_32 && irt_isfp(ir->t)) {
+ emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
+ }
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+/* Return a constant function pointer or NULL for indirect calls. */
+static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
+{
+#if LJ_32
+ UNUSED(as);
+ if (irref_isk(func))
+ return (void *)irf->i;
+#else
+ if (irref_isk(func)) {
+ MCode *p;
+ if (irf->o == IR_KINT64)
+ p = (MCode *)(void *)ir_k64(irf)->u64;
+ else
+ p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
+ if (p - as->mcp == (int32_t)(p - as->mcp))
+ return p; /* Call target is still in +-2GB range. */
+ /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
+ }
+#endif
+ return NULL;
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ int32_t spadj = 0;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+#if LJ_32
+ /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
+ if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
+ spadj = 4 * asm_count_call_slots(as, &ci, args);
+#endif
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ ci.func = (ASMFunction)asm_callx_func(as, irf, func);
+ if (!(void *)ci.func) {
+ /* Use a (hoistable) non-scratch register for indirect calls. */
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ Reg r = ra_alloc1(as, func, allow);
+ if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
+ emit_rr(as, XO_GROUP5, XOg_CALL, r);
+ } else if (LJ_32) {
+ emit_spsub(as, spadj);
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guardcc(as, CC_P);
+ asm_guardcc(as, CC_NE);
+ emit_rr(as, XO_UCOMISD, left, tmp);
+ emit_rr(as, XO_CVTSI2SD, tmp, dest);
+ if (!(as->flags & JIT_F_SPLIT_XMM))
+ emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
+ emit_rr(as, XO_CVTTSD2SI, dest, left);
+ /* Can't fuse since left is needed twice. */
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp = ra_noreg(IR(ir->op1)->r) ?
+ ra_alloc1(as, ir->op1, RSET_FPR) :
+ ra_scratch(as, RSET_FPR);
+ Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
+ emit_rr(as, XO_MOVDto, tmp, dest);
+ emit_mrm(as, XO_ADDSD, tmp, right);
+ ra_left(as, tmp, ir->op1);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lua_assert(irt_type(ir->t) != st);
+ lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ Reg left = asm_fuseload(as, lref, RSET_FPR);
+ emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
+ if (left == dest) return; /* Avoid the XO_XORPS. */
+ } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
+ /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
+ cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000));
+ Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ if (irt_isfloat(ir->t))
+ emit_rr(as, XO_CVTSD2SS, dest, dest);
+ emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
+ emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
+ emit_loadn(as, bias, k);
+ emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
+ return;
+ } else { /* Integer to FP conversion. */
+ Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
+ ra_alloc1(as, lref, RSET_GPR) :
+ asm_fuseloadm(as, lref, RSET_GPR, st64);
+ if (LJ_64 && st == IRT_U64) {
+ MCLabel l_end = emit_label(as);
+ const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000));
+ emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
+ }
+ emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
+ dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
+ }
+ if (!(as->flags & JIT_F_SPLIT_XMM))
+ emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lua_assert(irt_isint(ir->t) && st == IRT_NUM);
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ x86Op op = st == IRT_NUM ?
+ ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) :
+ ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI);
+ if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
+ /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
+ /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
+ Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
+ ra_scratch(as, RSET_FPR);
+ MCLabel l_end = emit_label(as);
+ if (LJ_32)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
+ emit_rr(as, op, dest|REX_64, tmp);
+ if (st == IRT_NUM)
+ emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J,
+ LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000)));
+ else
+ emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J,
+ LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000)));
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
+ emit_rr(as, op, dest|REX_64, tmp);
+ ra_left(as, tmp, lref);
+ } else {
+ Reg left = asm_fuseload(as, lref, RSET_FPR);
+ if (LJ_64 && irt_isu32(ir->t))
+ emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
+ emit_mrm(as, op,
+ dest|((LJ_64 &&
+ (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
+ left);
+ }
+ }
+ } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ RegSet allow = RSET_GPR;
+ x86Op op;
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
+ if (st == IRT_I8) {
+ op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
+ } else if (st == IRT_U8) {
+ op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
+ } else if (st == IRT_I16) {
+ op = XO_MOVSXw;
+ } else {
+ op = XO_MOVZXw;
+ }
+ left = asm_fuseload(as, lref, allow);
+ /* Add extra MOV if source is already in wrong register. */
+ if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
+ Reg tmp = ra_scratch(as, allow);
+ emit_rr(as, op, dest, tmp);
+ emit_rr(as, XO_MOV, tmp, left);
+ } else {
+ emit_mrm(as, op, dest, left);
+ }
+ } else { /* 32/64 bit integer conversions. */
+ if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else if (irt_is64(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st64 || !(ir->op2 & IRCONV_SEXT)) {
+ /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* 32 to 64 bit sign extension. */
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st64) {
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ /* This is either a 32 bit reg/reg mov which zeroes the hiword
+ ** or a load of the loword from a 64 bit address.
+ */
+ emit_mrm(as, XO_MOV, dest, left);
+ } else { /* 32/32 bit no-op (cast). */
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+ }
+}
+
+#if LJ_32 && LJ_HASFFI
+/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
+
+/* 64 bit integer to FP conversion in 32 bit mode. */
+static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
+{
+ Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
+ dest, RID_ESP, ofs);
+ }
+ emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
+ irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
+ if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
+ /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
+ MCLabel l_end = emit_label(as);
+ emit_rma(as, XO_FADDq, XOg_FADDq,
+ lj_ir_k64_find(as->J, U64x(43f00000,00000000)));
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
+ } else {
+ lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64);
+ }
+ emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
+ /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
+ emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
+ emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
+}
+
+/* FP to 64 bit integer conversion in 32 bit mode. */
+static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ Reg lo, hi;
+ lua_assert(st == IRT_NUM || st == IRT_FLOAT);
+ lua_assert(dt == IRT_I64 || dt == IRT_U64);
+ lua_assert(((ir-1)->op2 & IRCONV_TRUNC));
+ hi = ra_dest(as, ir, RSET_GPR);
+ lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
+ if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
+ /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
+ if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
+ emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
+ emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
+ emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
+ }
+ if (dt == IRT_U64) {
+ /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
+ MCLabel l_pop, l_end = emit_label(as);
+ emit_x87op(as, XI_FPOP);
+ l_pop = emit_label(as);
+ emit_sjmp(as, l_end);
+ emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
+ if ((as->flags & JIT_F_SSE3))
+ emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
+ else
+ emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
+ emit_rma(as, XO_FADDq, XOg_FADDq,
+ lj_ir_k64_find(as->J, U64x(c3f00000,00000000)));
+ emit_sjcc(as, CC_NS, l_pop);
+ emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
+ }
+ emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
+ if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
+ emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
+ } else { /* Otherwise set FPU rounding mode to truncate before the store. */
+ emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
+ emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
+ emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
+ emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
+ emit_loadi(as, lo, 0xc00);
+ emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
+ }
+ if (dt == IRT_U64)
+ emit_x87op(as, XI_FDUP);
+ emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
+ st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
+ asm_fuseload(as, ir->op1, RSET_EMPTY));
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ /* Force a spill slot for the destination register (if any). */
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ RegSet drop = RSET_SCRATCH;
+ if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
+ rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
+ ra_evictset(as, drop);
+ asm_guardcc(as, CC_E);
+ emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
+ RID_ESP, sps_scale(ir->s));
+}
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRRef args[2];
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (irt_isnum(irl->t)) {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
+ args[1] = ASMREF_TMP1; /* const lua_Number * */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
+ RID_ESP, ra_spill(as, irl));
+ } else {
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
+ args[1] = ir->op1; /* int32_t k */
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ }
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusearef(as, ir, RSET_GPR);
+ if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+ else if (as->mrm.base != dest)
+ emit_rr(as, XO_MOV, dest, as->mrm.base);
+}
+
+/* Merge NE(HREF, niltv) check. */
+static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
+{
+ /* Assumes nothing else generates NE of HREF. */
+ if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins &&
+ ra_hasreg(ir->r)) {
+ MCode *p = as->mcp;
+ p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6;
+ /* Ensure no loop branch inversion happened. */
+ if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) {
+ as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */
+ return p + *(int32_t *)(p-4); /* Return exit address. */
+ }
+ }
+ return NULL;
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir)
+{
+ MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */
+ RegSet allow = RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, tmp = RID_NONE;
+ IRIns *irkey = IR(ir->op2);
+ int isk = irref_isk(ir->op2);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ if (!isk) {
+ rset_clear(allow, tab);
+ key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
+ if (!irt_isstr(kt))
+ tmp = ra_scratch(as, rset_exclude(allow, key));
+ }
+
+ /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
+ l_end = emit_label(as);
+ if (nilexit && ir[1].o == IR_NE) {
+ emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */
+ nilexit = NULL;
+ } else {
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+ }
+
+ /* Follow hash chain until the end. */
+ l_loop = emit_sjcc_label(as, CC_NZ);
+ emit_rr(as, XO_TEST, dest, dest);
+ emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (nilexit)
+ emit_jcc(as, CC_E, nilexit);
+ else
+ emit_sjcc(as, CC_E, l_end);
+ if (irt_isnum(kt)) {
+ if (isk) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
+ (int32_t)ir_knum(irkey)->u32.lo);
+ emit_sjcc(as, CC_NE, l_next);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
+ (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ emit_sjcc(as, CC_P, l_next);
+ emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
+ emit_sjcc(as, CC_AE, l_next);
+ /* The type check avoids NaN penalties and complaints from Valgrind. */
+#if LJ_64
+ emit_u32(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
+#else
+ emit_i8(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
+#endif
+ }
+#if LJ_64
+ } else if (irt_islightud(kt)) {
+ emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
+#endif
+ } else {
+ if (!irt_ispri(kt)) {
+ lua_assert(irt_isaddr(kt));
+ if (isk)
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
+ ptr2addr(ir_kgc(irkey)));
+ else
+ emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
+ emit_sjcc(as, CC_NE, l_next);
+ }
+ lua_assert(!irt_isnil(kt));
+ emit_i8(as, irt_toitype(kt));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
+ }
+ emit_sfixup(as, l_loop);
+ checkmclim(as);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = isk ? ir_khash(irkey) : 1;
+ if (khash == 0) {
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node));
+ } else {
+ emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node));
+ if ((as->flags & JIT_F_PREFER_IMUL)) {
+ emit_i8(as, sizeof(Node));
+ emit_rr(as, XO_IMULi8, dest, dest);
+ } else {
+ emit_shifti(as, XOg_SHL, dest, 3);
+ emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
+ }
+ if (isk) {
+ emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
+ } else if (irt_isstr(kt)) {
+ emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash));
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
+ } else { /* Must match with hashrot() in lj_tab.c. */
+ emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
+ emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
+ emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
+ emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
+ emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
+ emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
+ emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
+ emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
+ if (irt_isnum(kt)) {
+ emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
+#if LJ_64
+ emit_shifti(as, XOg_SHR|REX_64, dest, 32);
+ emit_rr(as, XO_MOV, tmp, dest);
+ emit_rr(as, XO_MOVDto, key|REX_64, dest);
+#else
+ emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
+ emit_rr(as, XO_MOVDto, key, tmp);
+#endif
+ } else {
+ emit_rr(as, XO_MOV, tmp, key);
+ emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+#if !LJ_64
+ MCLabel l_exit;
+#endif
+ lua_assert(ofs % sizeof(Node) == 0);
+ if (ra_hasreg(dest)) {
+ if (ofs != 0) {
+ if (dest == node && !(as->flags & JIT_F_LEA_AGU))
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs);
+ else
+ emit_rmro(as, XO_LEA, dest, node, ofs);
+ } else if (dest != node) {
+ emit_rr(as, XO_MOV, dest, node);
+ }
+ }
+ asm_guardcc(as, CC_NE);
+#if LJ_64
+ if (!irt_ispri(irkey->t)) {
+ Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
+ emit_rmro(as, XO_CMP, key|REX_64, node,
+ ofs + (int32_t)offsetof(Node, key.u64));
+ lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t));
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
+ ((uint64_t)irt_toitype(irkey->t) << 32) |
+ (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
+ } else {
+ lua_assert(!irt_isnil(irkey->t));
+ emit_i8(as, irt_toitype(irkey->t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+ }
+#else
+ l_exit = emit_label(as);
+ if (irt_isnum(irkey->t)) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.u32.lo),
+ (int32_t)ir_knum(irkey)->u32.lo);
+ emit_sjcc(as, CC_NE, l_exit);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.u32.hi),
+ (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ if (!irt_ispri(irkey->t)) {
+ lua_assert(irt_isgcv(irkey->t));
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.gcr),
+ ptr2addr(ir_kgc(irkey)));
+ emit_sjcc(as, CC_NE, l_exit);
+ }
+ lua_assert(!irt_isnil(irkey->t));
+ emit_i8(as, irt_toitype(irkey->t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+ }
+#endif
+}
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ IRIns *irkey;
+ Reg tmp;
+ if (ir->r == RID_SINK)
+ return;
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ irkey = IR(ir->op2);
+ if (irt_isnum(irkey->t)) {
+ /* For numbers use the constant itself or a spill slot as a TValue. */
+ if (irref_isk(ir->op2))
+ emit_loada(as, tmp, ir_knum(irkey));
+ else
+ emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey));
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ if (!irref_isk(ir->op2)) {
+ Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
+ emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
+ } else if (!irt_ispri(irkey->t)) {
+ emit_movmroi(as, tmp, 0, irkey->i);
+ }
+ if (!(LJ_64 && irt_islightud(irkey->t)))
+ emit_movmroi(as, tmp, 4, irt_toitype(irkey->t));
+ emit_loada(as, tmp, &J2G(as->J)->tmptv);
+ }
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ /* NYI: Check that UREFO is still open and not aliasing a slot. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_rma(as, XO_MOV, dest, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv));
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, 1);
+ emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
+ } else {
+ emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v));
+ }
+ emit_rmro(as, XO_MOV, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusefref(as, ir, RSET_GPR);
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusestrref(as, ir, RSET_GPR);
+ if (as->mrm.base == RID_NONE)
+ emit_loadi(as, dest, as->mrm.ofs);
+ else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs);
+ else
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static void asm_fxload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ x86Op xo;
+ if (ir->o == IR_FLOAD)
+ asm_fusefref(as, ir, RSET_GPR);
+ else
+ asm_fusexref(as, ir->op1, RSET_GPR);
+ /* ir->op2 is ignored -- unaligned loads are ok on x86. */
+ switch (irt_type(ir->t)) {
+ case IRT_I8: xo = XO_MOVSXb; break;
+ case IRT_U8: xo = XO_MOVZXb; break;
+ case IRT_I16: xo = XO_MOVSXw; break;
+ case IRT_U16: xo = XO_MOVZXw; break;
+ case IRT_NUM: xo = XMM_MOVRM(as); break;
+ case IRT_FLOAT: xo = XO_MOVSS; break;
+ default:
+ if (LJ_64 && irt_is64(ir->t))
+ dest |= REX_64;
+ else
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
+ xo = XO_MOV;
+ break;
+ }
+ emit_mrm(as, xo, dest, RID_MRM);
+}
+
+static void asm_fxstore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg src = RID_NONE, osrc = RID_NONE;
+ int32_t k = 0;
+ if (ir->r == RID_SINK)
+ return;
+ /* The IRT_I16/IRT_U16 stores should never be simplified for constant
+ ** values since mov word [mem], imm16 has a length-changing prefix.
+ */
+ if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
+ !asm_isk32(as, ir->op2, &k)) {
+ RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
+ (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
+ src = osrc = ra_alloc1(as, ir->op2, allow8);
+ if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
+ rset_clear(allow, osrc);
+ src = ra_scratch(as, allow8);
+ }
+ rset_clear(allow, src);
+ }
+ if (ir->o == IR_FSTORE) {
+ asm_fusefref(as, IR(ir->op1), allow);
+ } else {
+ asm_fusexref(as, ir->op1, allow);
+ if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
+ }
+ if (ra_hasreg(src)) {
+ x86Op xo;
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
+ case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
+ case IRT_NUM: xo = XO_MOVSDto; break;
+ case IRT_FLOAT: xo = XO_MOVSSto; break;
+#if LJ_64
+ case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
+#endif
+ default:
+ if (LJ_64 && irt_is64(ir->t))
+ src |= REX_64;
+ else
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
+ xo = XO_MOVto;
+ break;
+ }
+ emit_mrm(as, xo, src, RID_MRM);
+ if (!LJ_64 && src != osrc) {
+ ra_noweak(as, osrc);
+ emit_rr(as, XO_MOV, src, osrc);
+ }
+ } else {
+ if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
+ emit_i8(as, k);
+ emit_mrm(as, XO_MOVmib, 0, RID_MRM);
+ } else {
+ lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
+ irt_isaddr(ir->t));
+ emit_i32(as, k);
+ emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
+ }
+ }
+}
+
+#if LJ_64
+static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
+{
+ if (ra_used(ir) || typecheck) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (typecheck) {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, -2);
+ emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
+ emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
+ emit_rr(as, XO_MOV, tmp|REX_64, dest);
+ }
+ return dest;
+ } else {
+ return RID_NONE;
+ }
+}
+#endif
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ (LJ_DUALNUM && irt_isint(ir->t)));
+#if LJ_64
+ if (irt_islightud(ir->t)) {
+ Reg dest = asm_load_lightud64(as, ir, 1);
+ if (ra_hasreg(dest)) {
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
+ }
+ return;
+ } else
+#endif
+ if (ra_used(ir)) {
+ RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM);
+ } else {
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ }
+ /* Always do the type check, even if the load result is unused. */
+ as->mrm.ofs += 4;
+ asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
+ if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
+ lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t));
+ emit_u32(as, LJ_TISNUM);
+ emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
+ } else {
+ emit_i8(as, irt_toitype(ir->t));
+ emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
+ }
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ if (ir->r == RID_SINK)
+ return;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, XO_MOVSDto, src, RID_MRM);
+#if LJ_64
+ } else if (irt_islightud(ir->t)) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
+ emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
+#endif
+ } else {
+ IRIns *irr = IR(ir->op2);
+ RegSet allow = RSET_GPR;
+ Reg src = RID_NONE;
+ if (!irref_isk(ir->op2)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ asm_fuseahuref(as, ir->op1, allow);
+ if (ra_hasreg(src)) {
+ emit_mrm(as, XO_MOVto, src, RID_MRM);
+ } else if (!irt_ispri(irr->t)) {
+ lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)));
+ emit_i32(as, irr->i);
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+ as->mrm.ofs += 4;
+ emit_i32(as, (int32_t)irt_toitype(ir->t));
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ IRType1 t = ir->t;
+ Reg base;
+ lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
+ lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
+ lua_assert(LJ_DUALNUM ||
+ !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ Reg left = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ emit_rmro(as, XMM_MOVRM(as), left, base, ofs);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+#if LJ_64
+ } else if (irt_islightud(t)) {
+ Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
+ if (ra_hasreg(dest)) {
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
+ }
+ return;
+#endif
+ } else if (ra_used(ir)) {
+ RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
+ emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs);
+ } else if (irt_isnum(t)) {
+ emit_rmro(as, XMM_MOVRM(as), dest, base, ofs);
+ } else {
+ emit_rmro(as, XO_MOV, dest, base, ofs);
+ }
+ } else {
+ if (!(ir->op2 & IRSLOAD_TYPECHECK))
+ return; /* No type check: avoid base alloc. */
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ }
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ /* Need type check, even if the load result is unused. */
+ asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
+ if (LJ_64 && irt_type(t) >= IRT_NUM) {
+ lua_assert(irt_isinteger(t) || irt_isnum(t));
+ emit_u32(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
+ } else {
+ emit_i8(as, irt_toitype(t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
+ }
+ }
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
+ CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
+ lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[2];
+ lua_assert(sz != CTSIZE_INVALID);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+#if LJ_64
+ Reg r64 = sz == 8 ? REX_64 : 0;
+ if (irref_isk(ir->op2)) {
+ IRIns *irk = IR(ir->op2);
+ uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 :
+ (uint64_t)(uint32_t)irk->i;
+ if (sz == 4 || checki32((int64_t)k)) {
+ emit_i32(as, (int32_t)k);
+ emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
+ } else {
+ emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
+ emit_loadu64(as, RID_ECX, k);
+ }
+ } else {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
+ }
+#else
+ int32_t ofs = sizeof(GCcdata);
+ if (sz == 8) {
+ ofs += 4; ir++;
+ lua_assert(ir->o == IR_HIOP);
+ }
+ do {
+ if (irref_isk(ir->op2)) {
+ emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
+ } else {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_movtomro(as, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ }
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir--;
+ } while (1);
+#endif
+ lua_assert(sz == 4 || sz == 8);
+ }
+
+ /* Combine initialization of marked, gct and ctypeid. */
+ emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
+ emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
+ (int32_t)((~LJ_TCDATA<<8)+(ctypeid<<16)));
+ emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
+ emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
+
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
+}
+#else
+#define asm_cnew(as, ir) ((void)0)
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ MCLabel l_end = emit_label(as);
+ emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_getgl(as, tmp, gc.grayagain);
+ emit_i8(as, ~LJ_GC_BLACK);
+ emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_BLACK);
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj;
+ /* No need for other object barriers (yet). */
+ lua_assert(IR(ir->op1)->o == IR_UREFC);
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
+ obj = IR(ir->op1)->r;
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_WHITES);
+ if (irref_isk(ir->op2)) {
+ GCobj *vp = ir_kgc(IR(ir->op2));
+ emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
+ } else {
+ Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
+ }
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_BLACK);
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+}
+
+/* -- FP/int arithmetic and logic operations ------------------------------ */
+
+/* Load reference onto x87 stack. Force a spill to memory if needed. */
+static void asm_x87load(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_KNUM) {
+ cTValue *tv = ir_knum(ir);
+ if (tvispzero(tv)) /* Use fldz only for +0. */
+ emit_x87op(as, XI_FLDZ);
+ else if (tvispone(tv))
+ emit_x87op(as, XI_FLD1);
+ else
+ emit_rma(as, XO_FLDq, XOg_FLDq, tv);
+ } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
+ !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
+ IRIns *iri = IR(ir->op1);
+ emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
+ } else {
+ emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
+ }
+}
+
+/* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
+static int fpmjoin_pow(ASMState *as, IRIns *ir)
+{
+ IRIns *irp = IR(ir->op1);
+ if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
+ IRIns *irpp = IR(irp->op1);
+ if (irpp == ir-2 && irpp->o == IR_FPMATH &&
+ irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
+ IRIns *irx;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, lj_vm_pow_sse);
+ irx = IR(irpp->op1);
+ if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1)
+ irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */
+ ra_left(as, RID_XMM0, irpp->op1);
+ ra_left(as, RID_XMM1, irp->op2);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+ IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER;
+ if (fpm == IRFPM_SQRT) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
+ emit_mrm(as, XO_SQRTSD, dest, left);
+ } else if (fpm <= IRFPM_TRUNC) {
+ if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
+ /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
+ ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
+ ** This is atrocious, but the alternatives are much worse.
+ */
+ /* Round down/up/trunc == 1001/1010/1011. */
+ emit_i8(as, 0x09 + fpm);
+ emit_mrm(as, XO_ROUNDSD, dest, left);
+ if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
+ as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
+ }
+ *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
+ } else { /* Call helper functions for SSE2 variant. */
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
+ fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
+ ra_left(as, RID_XMM0, ir->op1);
+ }
+ } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) {
+ /* Rejoined to pow(). */
+ } else { /* Handle x87 ops. */
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs);
+ }
+ emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
+ switch (fpm) { /* st0 = lj_vm_*(st0) */
+ case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break;
+ case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break;
+ case IRFPM_SIN: emit_x87op(as, XI_FSIN); break;
+ case IRFPM_COS: emit_x87op(as, XI_FCOS); break;
+ case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break;
+ case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10:
+ /* Note: the use of fyl2xp1 would be pointless here. When computing
+ ** log(1.0+eps) the precision is already lost after 1.0 is added.
+ ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
+ */
+ emit_x87op(as, XI_FYL2X); break;
+ case IRFPM_OTHER:
+ switch (ir->o) {
+ case IR_ATAN2:
+ emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break;
+ case IR_LDEXP:
+ emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break;
+ default: lua_assert(0); break;
+ }
+ break;
+ default: lua_assert(0); break;
+ }
+ asm_x87load(as, ir->op1);
+ switch (fpm) {
+ case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break;
+ case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break;
+ case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break;
+ case IRFPM_OTHER:
+ if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2);
+ break;
+ default: break;
+ }
+ }
+}
+
+static void asm_fppowi(ASMState *as, IRIns *ir)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, lj_vm_powi_sse);
+ ra_left(as, RID_XMM0, ir->op1);
+ ra_left(as, RID_EAX, ir->op2);
+}
+
+#if LJ_64 && LJ_HASFFI
+static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_intmod(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static int asm_swapops(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ lua_assert(ra_noreg(irr->r));
+ if (!irm_iscomm(lj_ir_mode[ir->o]))
+ return 0; /* Can't swap non-commutative operations. */
+ if (irref_isk(ir->op2))
+ return 0; /* Don't swap constants to the left. */
+ if (ra_hasreg(irl->r))
+ return 1; /* Swap if left already has a register. */
+ if (ra_samehint(ir->r, irr->r))
+ return 1; /* Swap if dest and right have matching hints. */
+ if (as->curins > as->loopref) { /* In variant part? */
+ if (ir->op2 < as->loopref && !irt_isphi(irr->t))
+ return 0; /* Keep invariants on the right. */
+ if (ir->op1 < as->loopref && !irt_isphi(irl->t))
+ return 1; /* Swap invariants to the right. */
+ }
+ if (opisfusableload(irl->o))
+ return 1; /* Swap fusable loads to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
+{
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ RegSet allow = RSET_FPR;
+ Reg dest;
+ Reg right = IR(rref)->r;
+ if (ra_hasreg(right)) {
+ rset_clear(allow, right);
+ ra_noweak(as, right);
+ }
+ dest = ra_dest(as, ir, allow);
+ if (lref == rref) {
+ right = dest;
+ } else if (ra_noreg(right)) {
+ if (asm_swapops(as, ir)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ right = asm_fuseload(as, rref, rset_clear(allow, dest));
+ }
+ emit_mrm(as, xo, dest, right);
+ ra_left(as, dest, lref);
+}
+
+static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
+{
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ RegSet allow = RSET_GPR;
+ Reg dest, right;
+ int32_t k = 0;
+ if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
+ as->flagmcp = NULL;
+ as->mcp += (LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2;
+ }
+ right = IR(rref)->r;
+ if (ra_hasreg(right)) {
+ rset_clear(allow, right);
+ ra_noweak(as, right);
+ }
+ dest = ra_dest(as, ir, allow);
+ if (lref == rref) {
+ right = dest;
+ } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
+ if (asm_swapops(as, ir)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
+ }
+ if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_O);
+ if (xa != XOg_X_IMUL) {
+ if (ra_hasreg(right))
+ emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
+ else
+ emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
+ } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
+ emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
+ } else { /* IMUL r, r, k. */
+ /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
+ Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
+ x86Op xo;
+ if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
+ } else { emit_i32(as, k); xo = XO_IMULi; }
+ emit_mrm(as, xo, REX_64IR(ir, dest), left);
+ return;
+ }
+ ra_left(as, dest, lref);
+}
+
+/* LEA is really a 4-operand ADD with an independent destination register,
+** up to two source registers and an immediate. One register can be scaled
+** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
+** instructions.
+**
+** Currently only a few common cases are supported:
+** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
+** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
+** - Right ADD fusion: y = a+(b+k)
+** The ommited variants have already been reduced by FOLD.
+**
+** There are more fusion opportunities, like gathering shifts or joining
+** common references. But these are probably not worth the trouble, since
+** array indexing is not decomposed and already makes use of all fields
+** of the ModRM operand.
+*/
+static int asm_lea(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ RegSet allow = RSET_GPR;
+ Reg dest;
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ as->mrm.scale = XM_SCALE1;
+ as->mrm.ofs = 0;
+ if (ra_hasreg(irl->r)) {
+ rset_clear(allow, irl->r);
+ ra_noweak(as, irl->r);
+ as->mrm.base = irl->r;
+ if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
+ /* The PHI renaming logic does a better job in some cases. */
+ if (ra_hasreg(ir->r) &&
+ ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
+ (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
+ return 0;
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs = irr->i;
+ } else {
+ rset_clear(allow, irr->r);
+ ra_noweak(as, irr->r);
+ as->mrm.idx = irr->r;
+ }
+ } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
+ irref_isk(irr->op2)) {
+ Reg idx = ra_alloc1(as, irr->op1, allow);
+ rset_clear(allow, idx);
+ as->mrm.idx = (uint8_t)idx;
+ as->mrm.ofs = IR(irr->op2)->i;
+ } else {
+ return 0;
+ }
+ } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
+ (irref_isk(ir->op2) || irref_isk(irl->op2))) {
+ Reg idx, base = ra_alloc1(as, irl->op1, allow);
+ rset_clear(allow, base);
+ as->mrm.base = (uint8_t)base;
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs = irr->i;
+ idx = ra_alloc1(as, irl->op2, allow);
+ } else {
+ as->mrm.ofs = IR(irl->op2)->i;
+ idx = ra_alloc1(as, ir->op2, allow);
+ }
+ rset_clear(allow, idx);
+ as->mrm.idx = (uint8_t)idx;
+ } else {
+ return 0;
+ }
+ dest = ra_dest(as, ir, allow);
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+ return 1; /* Success. */
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_ADDSD);
+ else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp ||
+ irt_is64(ir->t) || !asm_lea(as, ir))
+ asm_intarith(as, ir, XOg_ADD);
+}
+
+static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
+ ra_left(as, dest, ir->op1);
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, int cc)
+{
+ Reg right, dest = ra_dest(as, ir, RSET_GPR);
+ IRRef lref = ir->op1, rref = ir->op2;
+ if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
+ right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
+ emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
+ emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
+ ra_left(as, dest, lref);
+}
+
+static void asm_bitswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
+ REX_64IR(ir, 0), dest, 0, as->mcp, 1);
+ ra_left(as, dest, ir->op1);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
+{
+ IRRef rref = ir->op2;
+ IRIns *irr = IR(rref);
+ Reg dest;
+ if (irref_isk(rref)) { /* Constant shifts. */
+ int shift;
+ dest = ra_dest(as, ir, RSET_GPR);
+ shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
+ switch (shift) {
+ case 0: break;
+ case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
+ default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
+ }
+ } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
+ Reg right;
+ dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
+ if (dest == RID_ECX) {
+ dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
+ emit_rr(as, XO_MOV, RID_ECX, dest);
+ }
+ right = irr->r;
+ if (ra_noreg(right))
+ right = ra_allocref(as, rref, RID2RSET(RID_ECX));
+ else if (right != RID_ECX)
+ ra_scratch(as, RID2RSET(RID_ECX));
+ emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
+ ra_noweak(as, right);
+ if (right != RID_ECX)
+ emit_rr(as, XO_MOV, RID_ECX, right);
+ }
+ ra_left(as, dest, ir->op1);
+ /*
+ ** Note: avoid using the flags resulting from a shift or rotate!
+ ** All of them cause a partial flag stall, except for r,1 shifts
+ ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
+ */
+}
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Virtual flags for unordered FP comparisons. */
+#define VCC_U 0x1000 /* Unordered. */
+#define VCC_P 0x2000 /* Needs extra CC_P branch. */
+#define VCC_S 0x4000 /* Swap avoids CC_P branch. */
+#define VCC_PS (VCC_P|VCC_S)
+
+/* Map of comparisons to flags. ORDER IR. */
+#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
+static const uint16_t asm_compmap[IR_ABC+1] = {
+ /* signed non-eq unsigned flags */
+ /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
+ /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
+ /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
+ /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
+ /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
+ /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
+ /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
+ /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
+ /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
+ /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
+ /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
+};
+
+/* FP and integer comparisons. */
+static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
+{
+ if (irt_isnum(ir->t)) {
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ Reg left, right;
+ MCLabel l_around;
+ /*
+ ** An extra CC_P branch is required to preserve ordered/unordered
+ ** semantics for FP comparisons. This can be avoided by swapping
+ ** the operands and inverting the condition (except for EQ and UNE).
+ ** So always try to swap if possible.
+ **
+ ** Another option would be to swap operands to achieve better memory
+ ** operand fusion. But it's unlikely that this outweighs the cost
+ ** of the extra branches.
+ */
+ if (cc & VCC_S) { /* Swap? */
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
+ }
+ left = ra_alloc1(as, lref, RSET_FPR);
+ right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
+ l_around = emit_label(as);
+ asm_guardcc(as, cc >> 4);
+ if (cc & VCC_P) { /* Extra CC_P branch required? */
+ if (!(cc & VCC_U)) {
+ asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
+ } else if (l_around != as->invmcp) {
+ emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
+ } else {
+ /* Patched to mcloop by asm_loop_fixup. */
+ as->loopinv = 2;
+ if (as->realign)
+ emit_sjcc(as, CC_P, as->mcp);
+ else
+ emit_jcc(as, CC_P, as->mcp);
+ }
+ }
+ emit_mrm(as, XO_UCOMISD, left, right);
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ IROp leftop = (IROp)(IR(lref)->o);
+ Reg r64 = REX_64IR(ir, 0);
+ int32_t imm = 0;
+ lua_assert(irt_is64(ir->t) || irt_isint(ir->t) ||
+ irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t));
+ /* Swap constants (only for ABC) and fusable loads to the right. */
+ if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
+ if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
+ else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
+ lref = ir->op2; rref = ir->op1;
+ }
+ if (asm_isk32(as, rref, &imm)) {
+ IRIns *irl = IR(lref);
+ /* Check wether we can use test ins. Not for unsigned, since CF=0. */
+ int usetest = (imm == 0 && (cc & 0xa) != 0x2);
+ if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
+ /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
+ Reg right, left = RID_NONE;
+ RegSet allow = RSET_GPR;
+ if (!asm_isk32(as, irl->op2, &imm)) {
+ left = ra_alloc1(as, irl->op2, allow);
+ rset_clear(allow, left);
+ } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
+ IRIns *irll = IR(irl->op1);
+ if (opisfusableload((IROp)irll->o) &&
+ (irt_isi8(irll->t) || irt_isu8(irll->t))) {
+ IRType1 origt = irll->t; /* Temporarily flip types. */
+ irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
+ as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
+ right = asm_fuseload(as, irl->op1, RSET_GPR);
+ as->curins++;
+ irll->t = origt;
+ if (right != RID_MRM) goto test_nofuse;
+ /* Fusion succeeded, emit test byte mrm, imm8. */
+ asm_guardcc(as, cc);
+ emit_i8(as, (imm & 0xff));
+ emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
+ return;
+ }
+ }
+ as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
+ right = asm_fuseloadm(as, irl->op1, allow, r64);
+ as->curins++; /* Undo the above. */
+ test_nofuse:
+ asm_guardcc(as, cc);
+ if (ra_noreg(left)) {
+ emit_i32(as, imm);
+ emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
+ } else {
+ emit_mrm(as, XO_TEST, r64 + left, right);
+ }
+ } else {
+ Reg left;
+ if (opisfusableload((IROp)irl->o) &&
+ ((irt_isu8(irl->t) && checku8(imm)) ||
+ ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
+ (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
+ /* Only the IRT_INT case is fused by asm_fuseload.
+ ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
+ ** are handled here.
+ ** Note that cmp word [mem], imm16 should not be generated,
+ ** since it has a length-changing prefix. Compares of a word
+ ** against a sign-extended imm8 are ok, however.
+ */
+ IRType1 origt = irl->t; /* Temporarily flip types. */
+ irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
+ left = asm_fuseload(as, lref, RSET_GPR);
+ irl->t = origt;
+ if (left == RID_MRM) { /* Fusion succeeded? */
+ if (irt_isu8(irl->t) || irt_isu16(irl->t))
+ cc >>= 4; /* Need unsigned compare. */
+ asm_guardcc(as, cc);
+ emit_i8(as, imm);
+ emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
+ XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
+ return;
+ } /* Otherwise handle register case as usual. */
+ } else {
+ left = asm_fuseloadm(as, lref,
+ irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
+ }
+ asm_guardcc(as, cc);
+ if (usetest && left != RID_MRM) {
+ /* Use test r,r instead of cmp r,0. */
+ x86Op xo = XO_TEST;
+ if (irt_isu8(ir->t)) {
+ lua_assert(ir->o == IR_EQ || ir->o == IR_NE);
+ xo = XO_TESTb;
+ if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
+ if (LJ_64) {
+ left |= FORCE_REX;
+ } else {
+ emit_i32(as, 0xff);
+ emit_mrm(as, XO_GROUP3, XOg_TEST, left);
+ return;
+ }
+ }
+ }
+ emit_rr(as, xo, r64 + left, left);
+ if (irl+1 == ir) /* Referencing previous ins? */
+ as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
+ } else {
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
+ }
+ }
+ } else {
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
+ asm_guardcc(as, cc);
+ emit_mrm(as, XO_CMP, r64 + left, right);
+ }
+ }
+}
+
+#if LJ_32 && LJ_HASFFI
+/* 64 bit integer comparisons in 32 bit mode. */
+static void asm_comp_int64(ASMState *as, IRIns *ir)
+{
+ uint32_t cc = asm_compmap[(ir-1)->o];
+ RegSet allow = RSET_GPR;
+ Reg lefthi = RID_NONE, leftlo = RID_NONE;
+ Reg righthi = RID_NONE, rightlo = RID_NONE;
+ MCLabel l_around;
+ x86ModRM mrm;
+
+ as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
+
+ /* Allocate/fuse hiword operands. */
+ if (irref_isk(ir->op2)) {
+ lefthi = asm_fuseload(as, ir->op1, allow);
+ } else {
+ lefthi = ra_alloc1(as, ir->op1, allow);
+ rset_clear(allow, lefthi);
+ righthi = asm_fuseload(as, ir->op2, allow);
+ if (righthi == RID_MRM) {
+ if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
+ if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
+ } else {
+ rset_clear(allow, righthi);
+ }
+ }
+ mrm = as->mrm; /* Save state for hiword instruction. */
+
+ /* Allocate/fuse loword operands. */
+ if (irref_isk((ir-1)->op2)) {
+ leftlo = asm_fuseload(as, (ir-1)->op1, allow);
+ } else {
+ leftlo = ra_alloc1(as, (ir-1)->op1, allow);
+ rset_clear(allow, leftlo);
+ rightlo = asm_fuseload(as, (ir-1)->op2, allow);
+ }
+
+ /* All register allocations must be performed _before_ this point. */
+ l_around = emit_label(as);
+ as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
+
+ /* Loword comparison and branch. */
+ asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
+ if (ra_noreg(rightlo)) {
+ int32_t imm = IR((ir-1)->op2)->i;
+ if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
+ emit_rr(as, XO_TEST, leftlo, leftlo);
+ else
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
+ } else {
+ emit_mrm(as, XO_CMP, leftlo, rightlo);
+ }
+
+ /* Hiword comparison and branches. */
+ if ((cc & 15) != CC_NE)
+ emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
+ if ((cc & 15) != CC_E)
+ asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
+ as->mrm = mrm; /* Restore state. */
+ if (ra_noreg(righthi)) {
+ int32_t imm = IR(ir->op2)->i;
+ if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
+ emit_rr(as, XO_TEST, lefthi, lefthi);
+ else
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
+ } else {
+ emit_mrm(as, XO_CMP, lefthi, righthi);
+ }
+}
+#endif
+
+/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
+
+/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+#if LJ_32 && LJ_HASFFI
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ if (usehi || uselo) {
+ if (irt_isfp(ir->t))
+ asm_conv_fp_int64(as, ir);
+ else
+ asm_conv_int64_fp(as, ir);
+ }
+ as->curins--; /* Always skip the CONV. */
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ asm_comp_int64(as, ir);
+ return;
+ } else if ((ir-1)->o == IR_XSTORE) {
+ if ((ir-1)->r != RID_SINK)
+ asm_fxstore(as, ir);
+ return;
+ }
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_ADD:
+ as->flagmcp = NULL;
+ as->curins--;
+ asm_intarith(as, ir, XOg_ADC);
+ asm_intarith(as, ir-1, XOg_ADD);
+ break;
+ case IR_SUB:
+ as->flagmcp = NULL;
+ as->curins--;
+ asm_intarith(as, ir, XOg_SBB);
+ asm_intarith(as, ir-1, XOg_SUB);
+ break;
+ case IR_NEG: {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_rr(as, XO_GROUP3, XOg_NEG, dest);
+ emit_i8(as, 0);
+ emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
+ ra_left(as, dest, ir->op1);
+ as->curins--;
+ asm_neg_not(as, ir-1, XOg_NEG);
+ break;
+ }
+ case IR_CALLN:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by CNEWI itself. */
+ break;
+ default: lua_assert(0); break;
+ }
+#else
+ UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */
+#endif
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore eax. */
+ Reg pbase = irp ? irp->r : RID_BASE;
+ Reg r = allow ? rset_pickbot(allow) : RID_EAX;
+ emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
+ else
+ ra_modified(as, r);
+ emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot));
+ if (ra_hasreg(pbase) && pbase != r)
+ emit_rr(as, XO_ARITH(XOg_SUB), r, pbase);
+ else
+ emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
+ ptr2addr(&J2G(as->J)->jit_base));
+ emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack));
+ emit_getgl(as, r, jit_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
+ } else {
+ lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ (LJ_DUALNUM && irt_isinteger(ir->t)));
+ if (!irref_isk(ref)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
+ emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
+ } else if (!irt_ispri(ir->t)) {
+ emit_movmroi(as, RID_BASE, ofs, ir->i);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s != 0) /* Do not overwrite link to previous frame. */
+ emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
+ } else {
+ if (!(LJ_64 && irt_islightud(ir->t)))
+ emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
+ }
+ }
+ checkmclim(as);
+ }
+ lua_assert(map + nent == flinks);
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_rr(as, XO_TEST, RID_RET, RID_RET);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ emit_loada(as, tmp, J2G(as->J));
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_sjcc(as, CC_B, l_end);
+ emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold);
+ emit_getgl(as, tmp, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->realign) { /* Realigned loops use short jumps. */
+ as->realign = NULL; /* Stop another retry. */
+ lua_assert(((intptr_t)target & 15) == 0);
+ if (as->loopinv) { /* Inverted loop branch? */
+ p -= 5;
+ p[0] = XI_JMP;
+ lua_assert(target - p >= -128);
+ p[-1] = (MCode)(target - p); /* Patch sjcc. */
+ if (as->loopinv == 2)
+ p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
+ } else {
+ lua_assert(target - p >= -128);
+ p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
+ p[-2] = XI_JMPs;
+ }
+ } else {
+ MCode *newloop;
+ p[-5] = XI_JMP;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the jcc and patched the jmp. */
+ p -= 5;
+ newloop = target+4;
+ *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
+ if (as->loopinv == 2) {
+ *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
+ newloop = target+8;
+ }
+ } else { /* Otherwise just patch jmp. */
+ *(int32_t *)(p-4) = (int32_t)(target - p);
+ newloop = target+3;
+ }
+ /* Realign small loops and shorten the loop branch. */
+ if (newloop >= p - 128) {
+ as->realign = newloop; /* Force a retry and remember alignment. */
+ as->curins = as->stopins; /* Abort asm_trace now. */
+ as->T->nins = as->orignins; /* Remove any added renames. */
+ }
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_rr(as, XO_MOV, r, RID_BASE);
+ }
+}
+
+/* Coalesce or reload BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
+ MCode *p = as->mctop;
+ MCode *target, *q;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0);
+ } else {
+ MCode *p1;
+ /* Patch stack adjustment. */
+ if (checki8(spadj)) {
+ p -= 3;
+ p1 = p-6;
+ *p1 = (MCode)spadj;
+ } else {
+ p1 = p-9;
+ *(int32_t *)p1 = spadj;
+ }
+ if ((as->flags & JIT_F_LEA_AGU)) {
+#if LJ_64
+ p1[-4] = 0x48;
+#endif
+ p1[-3] = (MCode)XI_LEA;
+ p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP);
+ p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ } else {
+#if LJ_64
+ p1[-3] = 0x48;
+#endif
+ p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
+ p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
+ }
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = XI_JMP;
+ /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
+ for (q = as->mctop-1; q >= p; q--)
+ *q = XI_NOP;
+ as->mctop = p;
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop;
+ /* Realign and leave room for backwards loop branch or exit branch. */
+ if (as->realign) {
+ int i = ((int)(intptr_t)as->realign) & 15;
+ /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
+ while (i-- > 0)
+ *--p = XI_NOP;
+ as->mctop = p;
+ p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
+ } else {
+ p -= 5; /* Space for exit branch (near jmp). */
+ }
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
+ as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0));
+ as->invmcp = NULL;
+ }
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+ case IR_GCSTEP: asm_gcstep(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_EQ: case IR_NE: case IR_ABC:
+ asm_comp(as, ir, asm_compmap[ir->o]);
+ break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break;
+ case IR_BSWAP: asm_bitswap(as, ir); break;
+
+ case IR_BAND: asm_intarith(as, ir, XOg_AND); break;
+ case IR_BOR: asm_intarith(as, ir, XOg_OR); break;
+ case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break;
+
+ case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break;
+ case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break;
+ case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break;
+ case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break;
+ case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_SUBSD);
+ else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
+ asm_intarith(as, ir, XOg_SUB);
+ break;
+ case IR_MUL:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MULSD);
+ else
+ asm_intarith(as, ir, XOg_X_IMUL);
+ break;
+ case IR_DIV:
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isnum(ir->t))
+ asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
+ IRCALL_lj_carith_divu64);
+ else
+#endif
+ asm_fparith(as, ir, XO_DIVSD);
+ break;
+ case IR_MOD:
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isint(ir->t))
+ asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
+ IRCALL_lj_carith_modu64);
+ else
+#endif
+ asm_intmod(as, ir);
+ break;
+
+ case IR_NEG:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_XORPS);
+ else
+ asm_neg_not(as, ir, XOg_NEG);
+ break;
+ case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break;
+
+ case IR_MIN:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MINSD);
+ else
+ asm_min_max(as, ir, CC_G);
+ break;
+ case IR_MAX:
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MAXSD);
+ else
+ asm_min_max(as, ir, CC_L);
+ break;
+
+ case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
+ asm_fpmath(as, ir);
+ break;
+ case IR_POW:
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isnum(ir->t))
+ asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
+ IRCALL_lj_carith_powu64);
+ else
+#endif
+ asm_fppowi(as, ir);
+ break;
+
+ /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
+ case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break;
+ case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break;
+ case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_TOBIT: asm_tobit(as, ir); break;
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ int nslots;
+ asm_collectargs(as, ir, ci, args);
+ nslots = asm_count_call_slots(as, ci, args);
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+#if LJ_64
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+#else
+ return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
+#endif
+}
+
+/* Target-specific setup. */
+static void asm_setup_target(ASMState *as)
+{
+ asm_exitstub_setup(as, as->T->nsnap);
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MSize len = T->szmcode;
+ MCode *px = exitstub_addr(J, exitno) - 6;
+ MCode *pe = p+len-6;
+ uint32_t stateaddr = u32ptr(&J2G(J)->vmstate);
+ if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
+ *(int32_t *)(p+len-4) = jmprel(p+len, target);
+ /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
+ for (; p < pe; p++)
+ if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) {
+ p += LJ_64 ? 11 : 10;
+ break;
+ }
+ lua_assert(p < pe);
+ for (; p < pe; p++) {
+ if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) {
+ *(int32_t *)(p+2) = jmprel(p+6, target);
+ p += 5;
+ }
+ }
+ lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/3rdparty/lua/src/lj_bc.c b/3rdparty/lua/src/lj_bc.c
index 2912b83..1e5869e 100644
--- a/3rdparty/lua/src/lj_bc.c
+++ b/3rdparty/lua/src/lj_bc.c
@@ -1,14 +1,14 @@
-/*
-** Bytecode instruction modes.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_bc_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_bc.h"
-
-/* Bytecode offsets and bytecode instruction modes. */
-#include "lj_bcdef.h"
-
+/*
+** Bytecode instruction modes.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bc_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+
+/* Bytecode offsets and bytecode instruction modes. */
+#include "lj_bcdef.h"
+
diff --git a/3rdparty/lua/src/lj_bc.h b/3rdparty/lua/src/lj_bc.h
index 82305d1..56e71dd 100644
--- a/3rdparty/lua/src/lj_bc.h
+++ b/3rdparty/lua/src/lj_bc.h
@@ -1,261 +1,261 @@
-/*
-** Bytecode instruction format.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_BC_H
-#define _LJ_BC_H
-
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit:
-**
-** +----+----+----+----+
-** | B | C | A | OP | Format ABC
-** +----+----+----+----+
-** | D | A | OP | Format AD
-** +--------------------
-** MSB LSB
-**
-** In-memory instructions are always stored in host byte order.
-*/
-
-/* Operand ranges and related constants. */
-#define BCMAX_A 0xff
-#define BCMAX_B 0xff
-#define BCMAX_C 0xff
-#define BCMAX_D 0xffff
-#define BCBIAS_J 0x8000
-#define NO_REG BCMAX_A
-#define NO_JMP (~(BCPos)0)
-
-/* Macros to get instruction fields. */
-#define bc_op(i) ((BCOp)((i)&0xff))
-#define bc_a(i) ((BCReg)(((i)>>8)&0xff))
-#define bc_b(i) ((BCReg)((i)>>24))
-#define bc_c(i) ((BCReg)(((i)>>16)&0xff))
-#define bc_d(i) ((BCReg)((i)>>16))
-#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J)
-
-/* Macros to set instruction fields. */
-#define setbc_byte(p, x, ofs) \
- ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x)
-#define setbc_op(p, x) setbc_byte(p, (x), 0)
-#define setbc_a(p, x) setbc_byte(p, (x), 1)
-#define setbc_b(p, x) setbc_byte(p, (x), 3)
-#define setbc_c(p, x) setbc_byte(p, (x), 2)
-#define setbc_d(p, x) \
- ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x)
-#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J))
-
-/* Macros to compose instructions. */
-#define BCINS_ABC(o, a, b, c) \
- (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16))
-#define BCINS_AD(o, a, d) \
- (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16))
-#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J))
-
-/* Bytecode instruction definition. Order matters, see below.
-**
-** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod)
-**
-** The opcode name suffixes specify the type for RB/RC or RD:
-** V = variable slot
-** S = string const
-** N = number const
-** P = primitive type (~itype)
-** B = unsigned byte literal
-** M = multiple args/results
-*/
-#define BCDEF(_) \
- /* Comparison ops. ORDER OPR. */ \
- _(ISLT, var, ___, var, lt) \
- _(ISGE, var, ___, var, lt) \
- _(ISLE, var, ___, var, le) \
- _(ISGT, var, ___, var, le) \
- \
- _(ISEQV, var, ___, var, eq) \
- _(ISNEV, var, ___, var, eq) \
- _(ISEQS, var, ___, str, eq) \
- _(ISNES, var, ___, str, eq) \
- _(ISEQN, var, ___, num, eq) \
- _(ISNEN, var, ___, num, eq) \
- _(ISEQP, var, ___, pri, eq) \
- _(ISNEP, var, ___, pri, eq) \
- \
- /* Unary test and copy ops. */ \
- _(ISTC, dst, ___, var, ___) \
- _(ISFC, dst, ___, var, ___) \
- _(IST, ___, ___, var, ___) \
- _(ISF, ___, ___, var, ___) \
- \
- /* Unary ops. */ \
- _(MOV, dst, ___, var, ___) \
- _(NOT, dst, ___, var, ___) \
- _(UNM, dst, ___, var, unm) \
- _(LEN, dst, ___, var, len) \
- \
- /* Binary ops. ORDER OPR. VV last, POW must be next. */ \
- _(ADDVN, dst, var, num, add) \
- _(SUBVN, dst, var, num, sub) \
- _(MULVN, dst, var, num, mul) \
- _(DIVVN, dst, var, num, div) \
- _(MODVN, dst, var, num, mod) \
- \
- _(ADDNV, dst, var, num, add) \
- _(SUBNV, dst, var, num, sub) \
- _(MULNV, dst, var, num, mul) \
- _(DIVNV, dst, var, num, div) \
- _(MODNV, dst, var, num, mod) \
- \
- _(ADDVV, dst, var, var, add) \
- _(SUBVV, dst, var, var, sub) \
- _(MULVV, dst, var, var, mul) \
- _(DIVVV, dst, var, var, div) \
- _(MODVV, dst, var, var, mod) \
- \
- _(POW, dst, var, var, pow) \
- _(CAT, dst, rbase, rbase, concat) \
- \
- /* Constant ops. */ \
- _(KSTR, dst, ___, str, ___) \
- _(KCDATA, dst, ___, cdata, ___) \
- _(KSHORT, dst, ___, lits, ___) \
- _(KNUM, dst, ___, num, ___) \
- _(KPRI, dst, ___, pri, ___) \
- _(KNIL, base, ___, base, ___) \
- \
- /* Upvalue and function ops. */ \
- _(UGET, dst, ___, uv, ___) \
- _(USETV, uv, ___, var, ___) \
- _(USETS, uv, ___, str, ___) \
- _(USETN, uv, ___, num, ___) \
- _(USETP, uv, ___, pri, ___) \
- _(UCLO, rbase, ___, jump, ___) \
- _(FNEW, dst, ___, func, gc) \
- \
- /* Table ops. */ \
- _(TNEW, dst, ___, lit, gc) \
- _(TDUP, dst, ___, tab, gc) \
- _(GGET, dst, ___, str, index) \
- _(GSET, var, ___, str, newindex) \
- _(TGETV, dst, var, var, index) \
- _(TGETS, dst, var, str, index) \
- _(TGETB, dst, var, lit, index) \
- _(TSETV, var, var, var, newindex) \
- _(TSETS, var, var, str, newindex) \
- _(TSETB, var, var, lit, newindex) \
- _(TSETM, base, ___, num, newindex) \
- \
- /* Calls and vararg handling. T = tail call. */ \
- _(CALLM, base, lit, lit, call) \
- _(CALL, base, lit, lit, call) \
- _(CALLMT, base, ___, lit, call) \
- _(CALLT, base, ___, lit, call) \
- _(ITERC, base, lit, lit, call) \
- _(ITERN, base, lit, lit, call) \
- _(VARG, base, lit, lit, ___) \
- _(ISNEXT, base, ___, jump, ___) \
- \
- /* Returns. */ \
- _(RETM, base, ___, lit, ___) \
- _(RET, rbase, ___, lit, ___) \
- _(RET0, rbase, ___, lit, ___) \
- _(RET1, rbase, ___, lit, ___) \
- \
- /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \
- _(FORI, base, ___, jump, ___) \
- _(JFORI, base, ___, jump, ___) \
- \
- _(FORL, base, ___, jump, ___) \
- _(IFORL, base, ___, jump, ___) \
- _(JFORL, base, ___, lit, ___) \
- \
- _(ITERL, base, ___, jump, ___) \
- _(IITERL, base, ___, jump, ___) \
- _(JITERL, base, ___, lit, ___) \
- \
- _(LOOP, rbase, ___, jump, ___) \
- _(ILOOP, rbase, ___, jump, ___) \
- _(JLOOP, rbase, ___, lit, ___) \
- \
- _(JMP, rbase, ___, jump, ___) \
- \
- /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \
- _(FUNCF, rbase, ___, ___, ___) \
- _(IFUNCF, rbase, ___, ___, ___) \
- _(JFUNCF, rbase, ___, lit, ___) \
- _(FUNCV, rbase, ___, ___, ___) \
- _(IFUNCV, rbase, ___, ___, ___) \
- _(JFUNCV, rbase, ___, lit, ___) \
- _(FUNCC, rbase, ___, ___, ___) \
- _(FUNCCW, rbase, ___, ___, ___)
-
-/* Bytecode opcode numbers. */
-typedef enum {
-#define BCENUM(name, ma, mb, mc, mt) BC_##name,
-BCDEF(BCENUM)
-#undef BCENUM
- BC__MAX
-} BCOp;
-
-LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV);
-LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV);
-LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES);
-LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN);
-LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP);
-LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE);
-LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT);
-LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT);
-LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC);
-LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM);
-LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT);
-LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET);
-LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL);
-LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL);
-LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL);
-LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL);
-LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP);
-LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP);
-LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF);
-LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF);
-LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV);
-LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV);
-
-/* This solves a circular dependency problem, change as needed. */
-#define FF_next_N 4
-
-/* Stack slots used by FORI/FORL, relative to operand A. */
-enum {
- FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT
-};
-
-/* Bytecode operand modes. ORDER BCMode */
-typedef enum {
- BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */
- BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata,
- BCM_max
-} BCMode;
-#define BCM___ BCMnone
-
-#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7))
-#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15))
-#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15))
-#define bcmode_d(op) bcmode_c(op)
-#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3))
-#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11))
-
-#define BCMODE(name, ma, mb, mc, mm) \
- (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)),
-#define BCMODE_FF 0
-
-static LJ_AINLINE int bc_isret(BCOp op)
-{
- return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1);
-}
-
-LJ_DATA const uint16_t lj_bc_mode[];
-LJ_DATA const uint16_t lj_bc_ofs[];
-
-#endif
+/*
+** Bytecode instruction format.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BC_H
+#define _LJ_BC_H
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit:
+**
+** +----+----+----+----+
+** | B | C | A | OP | Format ABC
+** +----+----+----+----+
+** | D | A | OP | Format AD
+** +--------------------
+** MSB LSB
+**
+** In-memory instructions are always stored in host byte order.
+*/
+
+/* Operand ranges and related constants. */
+#define BCMAX_A 0xff
+#define BCMAX_B 0xff
+#define BCMAX_C 0xff
+#define BCMAX_D 0xffff
+#define BCBIAS_J 0x8000
+#define NO_REG BCMAX_A
+#define NO_JMP (~(BCPos)0)
+
+/* Macros to get instruction fields. */
+#define bc_op(i) ((BCOp)((i)&0xff))
+#define bc_a(i) ((BCReg)(((i)>>8)&0xff))
+#define bc_b(i) ((BCReg)((i)>>24))
+#define bc_c(i) ((BCReg)(((i)>>16)&0xff))
+#define bc_d(i) ((BCReg)((i)>>16))
+#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J)
+
+/* Macros to set instruction fields. */
+#define setbc_byte(p, x, ofs) \
+ ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x)
+#define setbc_op(p, x) setbc_byte(p, (x), 0)
+#define setbc_a(p, x) setbc_byte(p, (x), 1)
+#define setbc_b(p, x) setbc_byte(p, (x), 3)
+#define setbc_c(p, x) setbc_byte(p, (x), 2)
+#define setbc_d(p, x) \
+ ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x)
+#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J))
+
+/* Macros to compose instructions. */
+#define BCINS_ABC(o, a, b, c) \
+ (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16))
+#define BCINS_AD(o, a, d) \
+ (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16))
+#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J))
+
+/* Bytecode instruction definition. Order matters, see below.
+**
+** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod)
+**
+** The opcode name suffixes specify the type for RB/RC or RD:
+** V = variable slot
+** S = string const
+** N = number const
+** P = primitive type (~itype)
+** B = unsigned byte literal
+** M = multiple args/results
+*/
+#define BCDEF(_) \
+ /* Comparison ops. ORDER OPR. */ \
+ _(ISLT, var, ___, var, lt) \
+ _(ISGE, var, ___, var, lt) \
+ _(ISLE, var, ___, var, le) \
+ _(ISGT, var, ___, var, le) \
+ \
+ _(ISEQV, var, ___, var, eq) \
+ _(ISNEV, var, ___, var, eq) \
+ _(ISEQS, var, ___, str, eq) \
+ _(ISNES, var, ___, str, eq) \
+ _(ISEQN, var, ___, num, eq) \
+ _(ISNEN, var, ___, num, eq) \
+ _(ISEQP, var, ___, pri, eq) \
+ _(ISNEP, var, ___, pri, eq) \
+ \
+ /* Unary test and copy ops. */ \
+ _(ISTC, dst, ___, var, ___) \
+ _(ISFC, dst, ___, var, ___) \
+ _(IST, ___, ___, var, ___) \
+ _(ISF, ___, ___, var, ___) \
+ \
+ /* Unary ops. */ \
+ _(MOV, dst, ___, var, ___) \
+ _(NOT, dst, ___, var, ___) \
+ _(UNM, dst, ___, var, unm) \
+ _(LEN, dst, ___, var, len) \
+ \
+ /* Binary ops. ORDER OPR. VV last, POW must be next. */ \
+ _(ADDVN, dst, var, num, add) \
+ _(SUBVN, dst, var, num, sub) \
+ _(MULVN, dst, var, num, mul) \
+ _(DIVVN, dst, var, num, div) \
+ _(MODVN, dst, var, num, mod) \
+ \
+ _(ADDNV, dst, var, num, add) \
+ _(SUBNV, dst, var, num, sub) \
+ _(MULNV, dst, var, num, mul) \
+ _(DIVNV, dst, var, num, div) \
+ _(MODNV, dst, var, num, mod) \
+ \
+ _(ADDVV, dst, var, var, add) \
+ _(SUBVV, dst, var, var, sub) \
+ _(MULVV, dst, var, var, mul) \
+ _(DIVVV, dst, var, var, div) \
+ _(MODVV, dst, var, var, mod) \
+ \
+ _(POW, dst, var, var, pow) \
+ _(CAT, dst, rbase, rbase, concat) \
+ \
+ /* Constant ops. */ \
+ _(KSTR, dst, ___, str, ___) \
+ _(KCDATA, dst, ___, cdata, ___) \
+ _(KSHORT, dst, ___, lits, ___) \
+ _(KNUM, dst, ___, num, ___) \
+ _(KPRI, dst, ___, pri, ___) \
+ _(KNIL, base, ___, base, ___) \
+ \
+ /* Upvalue and function ops. */ \
+ _(UGET, dst, ___, uv, ___) \
+ _(USETV, uv, ___, var, ___) \
+ _(USETS, uv, ___, str, ___) \
+ _(USETN, uv, ___, num, ___) \
+ _(USETP, uv, ___, pri, ___) \
+ _(UCLO, rbase, ___, jump, ___) \
+ _(FNEW, dst, ___, func, gc) \
+ \
+ /* Table ops. */ \
+ _(TNEW, dst, ___, lit, gc) \
+ _(TDUP, dst, ___, tab, gc) \
+ _(GGET, dst, ___, str, index) \
+ _(GSET, var, ___, str, newindex) \
+ _(TGETV, dst, var, var, index) \
+ _(TGETS, dst, var, str, index) \
+ _(TGETB, dst, var, lit, index) \
+ _(TSETV, var, var, var, newindex) \
+ _(TSETS, var, var, str, newindex) \
+ _(TSETB, var, var, lit, newindex) \
+ _(TSETM, base, ___, num, newindex) \
+ \
+ /* Calls and vararg handling. T = tail call. */ \
+ _(CALLM, base, lit, lit, call) \
+ _(CALL, base, lit, lit, call) \
+ _(CALLMT, base, ___, lit, call) \
+ _(CALLT, base, ___, lit, call) \
+ _(ITERC, base, lit, lit, call) \
+ _(ITERN, base, lit, lit, call) \
+ _(VARG, base, lit, lit, ___) \
+ _(ISNEXT, base, ___, jump, ___) \
+ \
+ /* Returns. */ \
+ _(RETM, base, ___, lit, ___) \
+ _(RET, rbase, ___, lit, ___) \
+ _(RET0, rbase, ___, lit, ___) \
+ _(RET1, rbase, ___, lit, ___) \
+ \
+ /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \
+ _(FORI, base, ___, jump, ___) \
+ _(JFORI, base, ___, jump, ___) \
+ \
+ _(FORL, base, ___, jump, ___) \
+ _(IFORL, base, ___, jump, ___) \
+ _(JFORL, base, ___, lit, ___) \
+ \
+ _(ITERL, base, ___, jump, ___) \
+ _(IITERL, base, ___, jump, ___) \
+ _(JITERL, base, ___, lit, ___) \
+ \
+ _(LOOP, rbase, ___, jump, ___) \
+ _(ILOOP, rbase, ___, jump, ___) \
+ _(JLOOP, rbase, ___, lit, ___) \
+ \
+ _(JMP, rbase, ___, jump, ___) \
+ \
+ /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \
+ _(FUNCF, rbase, ___, ___, ___) \
+ _(IFUNCF, rbase, ___, ___, ___) \
+ _(JFUNCF, rbase, ___, lit, ___) \
+ _(FUNCV, rbase, ___, ___, ___) \
+ _(IFUNCV, rbase, ___, ___, ___) \
+ _(JFUNCV, rbase, ___, lit, ___) \
+ _(FUNCC, rbase, ___, ___, ___) \
+ _(FUNCCW, rbase, ___, ___, ___)
+
+/* Bytecode opcode numbers. */
+typedef enum {
+#define BCENUM(name, ma, mb, mc, mt) BC_##name,
+BCDEF(BCENUM)
+#undef BCENUM
+ BC__MAX
+} BCOp;
+
+LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV);
+LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV);
+LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES);
+LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN);
+LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP);
+LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE);
+LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT);
+LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT);
+LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC);
+LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM);
+LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT);
+LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET);
+LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL);
+LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL);
+LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL);
+LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL);
+LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP);
+LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP);
+LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF);
+LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF);
+LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV);
+LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV);
+
+/* This solves a circular dependency problem, change as needed. */
+#define FF_next_N 4
+
+/* Stack slots used by FORI/FORL, relative to operand A. */
+enum {
+ FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT
+};
+
+/* Bytecode operand modes. ORDER BCMode */
+typedef enum {
+ BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */
+ BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata,
+ BCM_max
+} BCMode;
+#define BCM___ BCMnone
+
+#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7))
+#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15))
+#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15))
+#define bcmode_d(op) bcmode_c(op)
+#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3))
+#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11))
+
+#define BCMODE(name, ma, mb, mc, mm) \
+ (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)),
+#define BCMODE_FF 0
+
+static LJ_AINLINE int bc_isret(BCOp op)
+{
+ return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1);
+}
+
+LJ_DATA const uint16_t lj_bc_mode[];
+LJ_DATA const uint16_t lj_bc_ofs[];
+
+#endif
diff --git a/3rdparty/lua/src/lj_bcdump.h b/3rdparty/lua/src/lj_bcdump.h
index 494b98b..e660156 100644
--- a/3rdparty/lua/src/lj_bcdump.h
+++ b/3rdparty/lua/src/lj_bcdump.h
@@ -1,66 +1,66 @@
-/*
-** Bytecode dump definitions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_BCDUMP_H
-#define _LJ_BCDUMP_H
-
-#include "lj_obj.h"
-#include "lj_lex.h"
-
-/* -- Bytecode dump format ------------------------------------------------ */
-
-/*
-** dump = header proto+ 0U
-** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*]
-** proto = lengthU pdata
-** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*]
-** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU
-** [debuglenU [firstlineU numlineU]]
-** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* }
-** knum = intU0 | (loU1 hiU)
-** ktab = narrayU nhashU karray* khash*
-** karray = ktabk
-** khash = ktabk ktabk
-** ktabk = ktabtypeU { intU | (loU hiU) | strB* }
-**
-** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1
-*/
-
-/* Bytecode dump header. */
-#define BCDUMP_HEAD1 0x1b
-#define BCDUMP_HEAD2 0x4c
-#define BCDUMP_HEAD3 0x4a
-
-/* If you perform *any* kind of private modifications to the bytecode itself
-** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher.
-*/
-#define BCDUMP_VERSION 1
-
-/* Compatibility flags. */
-#define BCDUMP_F_BE 0x01
-#define BCDUMP_F_STRIP 0x02
-#define BCDUMP_F_FFI 0x04
-
-#define BCDUMP_F_KNOWN (BCDUMP_F_FFI*2-1)
-
-/* Type codes for the GC constants of a prototype. Plus length for strings. */
-enum {
- BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64,
- BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR
-};
-
-/* Type codes for the keys/values of a constant table. */
-enum {
- BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE,
- BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR
-};
-
-/* -- Bytecode reader/writer ---------------------------------------------- */
-
-LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer,
- void *data, int strip);
-LJ_FUNC GCproto *lj_bcread(LexState *ls);
-
-#endif
+/*
+** Bytecode dump definitions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BCDUMP_H
+#define _LJ_BCDUMP_H
+
+#include "lj_obj.h"
+#include "lj_lex.h"
+
+/* -- Bytecode dump format ------------------------------------------------ */
+
+/*
+** dump = header proto+ 0U
+** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*]
+** proto = lengthU pdata
+** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*]
+** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU
+** [debuglenU [firstlineU numlineU]]
+** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* }
+** knum = intU0 | (loU1 hiU)
+** ktab = narrayU nhashU karray* khash*
+** karray = ktabk
+** khash = ktabk ktabk
+** ktabk = ktabtypeU { intU | (loU hiU) | strB* }
+**
+** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1
+*/
+
+/* Bytecode dump header. */
+#define BCDUMP_HEAD1 0x1b
+#define BCDUMP_HEAD2 0x4c
+#define BCDUMP_HEAD3 0x4a
+
+/* If you perform *any* kind of private modifications to the bytecode itself
+** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher.
+*/
+#define BCDUMP_VERSION 1
+
+/* Compatibility flags. */
+#define BCDUMP_F_BE 0x01
+#define BCDUMP_F_STRIP 0x02
+#define BCDUMP_F_FFI 0x04
+
+#define BCDUMP_F_KNOWN (BCDUMP_F_FFI*2-1)
+
+/* Type codes for the GC constants of a prototype. Plus length for strings. */
+enum {
+ BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64,
+ BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR
+};
+
+/* Type codes for the keys/values of a constant table. */
+enum {
+ BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE,
+ BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR
+};
+
+/* -- Bytecode reader/writer ---------------------------------------------- */
+
+LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer,
+ void *data, int strip);
+LJ_FUNC GCproto *lj_bcread(LexState *ls);
+
+#endif
diff --git a/3rdparty/lua/src/lj_bcread.c b/3rdparty/lua/src/lj_bcread.c
index a9cd8a2..2b5ba85 100644
--- a/3rdparty/lua/src/lj_bcread.c
+++ b/3rdparty/lua/src/lj_bcread.c
@@ -1,476 +1,476 @@
-/*
-** Bytecode reader.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_bcread_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_bc.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#include "lj_cdata.h"
-#include "lualib.h"
-#endif
-#include "lj_lex.h"
-#include "lj_bcdump.h"
-#include "lj_state.h"
-
-/* Reuse some lexer fields for our own purposes. */
-#define bcread_flags(ls) ls->level
-#define bcread_swap(ls) \
- ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE)
-#define bcread_oldtop(L, ls) restorestack(L, ls->lastline)
-#define bcread_savetop(L, ls, top) \
- ls->lastline = (BCLine)savestack(L, (top))
-
-/* -- Input buffer handling ----------------------------------------------- */
-
-/* Throw reader error. */
-static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em)
-{
- lua_State *L = ls->L;
- const char *name = ls->chunkarg;
- if (*name == BCDUMP_HEAD1) name = "(binary)";
- else if (*name == '@' || *name == '=') name++;
- lj_str_pushf(L, "%s: %s", name, err2msg(em));
- lj_err_throw(L, LUA_ERRSYNTAX);
-}
-
-/* Resize input buffer. */
-static void bcread_resize(LexState *ls, MSize len)
-{
- if (ls->sb.sz < len) {
- MSize sz = ls->sb.sz * 2;
- while (len > sz) sz = sz * 2;
- lj_str_resizebuf(ls->L, &ls->sb, sz);
- /* Caveat: this may change ls->sb.buf which may affect ls->p. */
- }
-}
-
-/* Refill buffer if needed. */
-static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need)
-{
- lua_assert(len != 0);
- if (len > LJ_MAX_MEM || ls->current < 0)
- bcread_error(ls, LJ_ERR_BCBAD);
- do {
- const char *buf;
- size_t size;
- if (ls->n) { /* Copy remainder to buffer. */
- if (ls->sb.n) { /* Move down in buffer. */
- lua_assert(ls->p + ls->n == ls->sb.buf + ls->sb.n);
- if (ls->n != ls->sb.n)
- memmove(ls->sb.buf, ls->p, ls->n);
- } else { /* Copy from buffer provided by reader. */
- bcread_resize(ls, len);
- memcpy(ls->sb.buf, ls->p, ls->n);
- }
- ls->p = ls->sb.buf;
- }
- ls->sb.n = ls->n;
- buf = ls->rfunc(ls->L, ls->rdata, &size); /* Get more data from reader. */
- if (buf == NULL || size == 0) { /* EOF? */
- if (need) bcread_error(ls, LJ_ERR_BCBAD);
- ls->current = -1; /* Only bad if we get called again. */
- break;
- }
- if (ls->sb.n) { /* Append to buffer. */
- MSize n = ls->sb.n + (MSize)size;
- bcread_resize(ls, n < len ? len : n);
- memcpy(ls->sb.buf + ls->sb.n, buf, size);
- ls->n = ls->sb.n = n;
- ls->p = ls->sb.buf;
- } else { /* Return buffer provided by reader. */
- ls->n = (MSize)size;
- ls->p = buf;
- }
- } while (ls->n < len);
-}
-
-/* Need a certain number of bytes. */
-static LJ_AINLINE void bcread_need(LexState *ls, MSize len)
-{
- if (LJ_UNLIKELY(ls->n < len))
- bcread_fill(ls, len, 1);
-}
-
-/* Want to read up to a certain number of bytes, but may need less. */
-static LJ_AINLINE void bcread_want(LexState *ls, MSize len)
-{
- if (LJ_UNLIKELY(ls->n < len))
- bcread_fill(ls, len, 0);
-}
-
-#define bcread_dec(ls) check_exp(ls->n > 0, ls->n--)
-#define bcread_consume(ls, len) check_exp(ls->n >= (len), ls->n -= (len))
-
-/* Return memory block from buffer. */
-static uint8_t *bcread_mem(LexState *ls, MSize len)
-{
- uint8_t *p = (uint8_t *)ls->p;
- bcread_consume(ls, len);
- ls->p = (char *)p + len;
- return p;
-}
-
-/* Copy memory block from buffer. */
-static void bcread_block(LexState *ls, void *q, MSize len)
-{
- memcpy(q, bcread_mem(ls, len), len);
-}
-
-/* Read byte from buffer. */
-static LJ_AINLINE uint32_t bcread_byte(LexState *ls)
-{
- bcread_dec(ls);
- return (uint32_t)(uint8_t)*ls->p++;
-}
-
-/* Read ULEB128 value from buffer. */
-static uint32_t bcread_uleb128(LexState *ls)
-{
- const uint8_t *p = (const uint8_t *)ls->p;
- uint32_t v = *p++;
- if (LJ_UNLIKELY(v >= 0x80)) {
- int sh = 0;
- v &= 0x7f;
- do {
- v |= ((*p & 0x7f) << (sh += 7));
- bcread_dec(ls);
- } while (*p++ >= 0x80);
- }
- bcread_dec(ls);
- ls->p = (char *)p;
- return v;
-}
-
-/* Read top 32 bits of 33 bit ULEB128 value from buffer. */
-static uint32_t bcread_uleb128_33(LexState *ls)
-{
- const uint8_t *p = (const uint8_t *)ls->p;
- uint32_t v = (*p++ >> 1);
- if (LJ_UNLIKELY(v >= 0x40)) {
- int sh = -1;
- v &= 0x3f;
- do {
- v |= ((*p & 0x7f) << (sh += 7));
- bcread_dec(ls);
- } while (*p++ >= 0x80);
- }
- bcread_dec(ls);
- ls->p = (char *)p;
- return v;
-}
-
-/* -- Bytecode reader ----------------------------------------------------- */
-
-/* Read debug info of a prototype. */
-static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg)
-{
- void *lineinfo = (void *)proto_lineinfo(pt);
- bcread_block(ls, lineinfo, sizedbg);
- /* Swap lineinfo if the endianess differs. */
- if (bcread_swap(ls) && pt->numline >= 256) {
- MSize i, n = pt->sizebc-1;
- if (pt->numline < 65536) {
- uint16_t *p = (uint16_t *)lineinfo;
- for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8));
- } else {
- uint32_t *p = (uint32_t *)lineinfo;
- for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]);
- }
- }
-}
-
-/* Find pointer to varinfo. */
-static const void *bcread_varinfo(GCproto *pt)
-{
- const uint8_t *p = proto_uvinfo(pt);
- MSize n = pt->sizeuv;
- if (n) while (*p++ || --n) ;
- return p;
-}
-
-/* Read a single constant key/value of a template table. */
-static void bcread_ktabk(LexState *ls, TValue *o)
-{
- MSize tp = bcread_uleb128(ls);
- if (tp >= BCDUMP_KTAB_STR) {
- MSize len = tp - BCDUMP_KTAB_STR;
- const char *p = (const char *)bcread_mem(ls, len);
- setstrV(ls->L, o, lj_str_new(ls->L, p, len));
- } else if (tp == BCDUMP_KTAB_INT) {
- setintV(o, (int32_t)bcread_uleb128(ls));
- } else if (tp == BCDUMP_KTAB_NUM) {
- o->u32.lo = bcread_uleb128(ls);
- o->u32.hi = bcread_uleb128(ls);
- } else {
- lua_assert(tp <= BCDUMP_KTAB_TRUE);
- setitype(o, ~tp);
- }
-}
-
-/* Read a template table. */
-static GCtab *bcread_ktab(LexState *ls)
-{
- MSize narray = bcread_uleb128(ls);
- MSize nhash = bcread_uleb128(ls);
- GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash));
- if (narray) { /* Read array entries. */
- MSize i;
- TValue *o = tvref(t->array);
- for (i = 0; i < narray; i++, o++)
- bcread_ktabk(ls, o);
- }
- if (nhash) { /* Read hash entries. */
- MSize i;
- for (i = 0; i < nhash; i++) {
- TValue key;
- bcread_ktabk(ls, &key);
- lua_assert(!tvisnil(&key));
- bcread_ktabk(ls, lj_tab_set(ls->L, t, &key));
- }
- }
- return t;
-}
-
-/* Read GC constants of a prototype. */
-static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc)
-{
- MSize i;
- GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
- for (i = 0; i < sizekgc; i++, kr++) {
- MSize tp = bcread_uleb128(ls);
- if (tp >= BCDUMP_KGC_STR) {
- MSize len = tp - BCDUMP_KGC_STR;
- const char *p = (const char *)bcread_mem(ls, len);
- setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len)));
- } else if (tp == BCDUMP_KGC_TAB) {
- setgcref(*kr, obj2gco(bcread_ktab(ls)));
-#if LJ_HASFFI
- } else if (tp != BCDUMP_KGC_CHILD) {
- CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE :
- tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64;
- CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8;
- GCcdata *cd = lj_cdata_new_(ls->L, id, sz);
- TValue *p = (TValue *)cdataptr(cd);
- setgcref(*kr, obj2gco(cd));
- p[0].u32.lo = bcread_uleb128(ls);
- p[0].u32.hi = bcread_uleb128(ls);
- if (tp == BCDUMP_KGC_COMPLEX) {
- p[1].u32.lo = bcread_uleb128(ls);
- p[1].u32.hi = bcread_uleb128(ls);
- }
-#endif
- } else {
- lua_State *L = ls->L;
- lua_assert(tp == BCDUMP_KGC_CHILD);
- if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */
- bcread_error(ls, LJ_ERR_BCBAD);
- L->top--;
- setgcref(*kr, obj2gco(protoV(L->top)));
- }
- }
-}
-
-/* Read number constants of a prototype. */
-static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn)
-{
- MSize i;
- TValue *o = mref(pt->k, TValue);
- for (i = 0; i < sizekn; i++, o++) {
- int isnum = (ls->p[0] & 1);
- uint32_t lo = bcread_uleb128_33(ls);
- if (isnum) {
- o->u32.lo = lo;
- o->u32.hi = bcread_uleb128(ls);
- } else {
- setintV(o, lo);
- }
- }
-}
-
-/* Read bytecode instructions. */
-static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc)
-{
- BCIns *bc = proto_bc(pt);
- bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
- pt->framesize, 0);
- bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns));
- /* Swap bytecode instructions if the endianess differs. */
- if (bcread_swap(ls)) {
- MSize i;
- for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]);
- }
-}
-
-/* Read upvalue refs. */
-static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv)
-{
- if (sizeuv) {
- uint16_t *uv = proto_uv(pt);
- bcread_block(ls, uv, sizeuv*2);
- /* Swap upvalue refs if the endianess differs. */
- if (bcread_swap(ls)) {
- MSize i;
- for (i = 0; i < sizeuv; i++)
- uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8));
- }
- }
-}
-
-/* Read a prototype. */
-static GCproto *bcread_proto(LexState *ls)
-{
- GCproto *pt;
- MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept;
- MSize ofsk, ofsuv, ofsdbg;
- MSize sizedbg = 0;
- BCLine firstline = 0, numline = 0;
- MSize len, startn;
-
- /* Read length. */
- if (ls->n > 0 && ls->p[0] == 0) { /* Shortcut EOF. */
- ls->n--; ls->p++;
- return NULL;
- }
- bcread_want(ls, 5);
- len = bcread_uleb128(ls);
- if (!len) return NULL; /* EOF */
- bcread_need(ls, len);
- startn = ls->n;
-
- /* Read prototype header. */
- flags = bcread_byte(ls);
- numparams = bcread_byte(ls);
- framesize = bcread_byte(ls);
- sizeuv = bcread_byte(ls);
- sizekgc = bcread_uleb128(ls);
- sizekn = bcread_uleb128(ls);
- sizebc = bcread_uleb128(ls) + 1;
- if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) {
- sizedbg = bcread_uleb128(ls);
- if (sizedbg) {
- firstline = bcread_uleb128(ls);
- numline = bcread_uleb128(ls);
- }
- }
-
- /* Calculate total size of prototype including all colocated arrays. */
- sizept = (MSize)sizeof(GCproto) +
- sizebc*(MSize)sizeof(BCIns) +
- sizekgc*(MSize)sizeof(GCRef);
- sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1);
- ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue);
- ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2;
- ofsdbg = sizept; sizept += sizedbg;
-
- /* Allocate prototype object and initialize its fields. */
- pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept);
- pt->gct = ~LJ_TPROTO;
- pt->numparams = (uint8_t)numparams;
- pt->framesize = (uint8_t)framesize;
- pt->sizebc = sizebc;
- setmref(pt->k, (char *)pt + ofsk);
- setmref(pt->uv, (char *)pt + ofsuv);
- pt->sizekgc = 0; /* Set to zero until fully initialized. */
- pt->sizekn = sizekn;
- pt->sizept = sizept;
- pt->sizeuv = (uint8_t)sizeuv;
- pt->flags = (uint8_t)flags;
- pt->trace = 0;
- setgcref(pt->chunkname, obj2gco(ls->chunkname));
-
- /* Close potentially uninitialized gap between bc and kgc. */
- *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0;
-
- /* Read bytecode instructions and upvalue refs. */
- bcread_bytecode(ls, pt, sizebc);
- bcread_uv(ls, pt, sizeuv);
-
- /* Read constants. */
- bcread_kgc(ls, pt, sizekgc);
- pt->sizekgc = sizekgc;
- bcread_knum(ls, pt, sizekn);
-
- /* Read and initialize debug info. */
- pt->firstline = firstline;
- pt->numline = numline;
- if (sizedbg) {
- MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
- setmref(pt->lineinfo, (char *)pt + ofsdbg);
- setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli);
- bcread_dbg(ls, pt, sizedbg);
- setmref(pt->varinfo, bcread_varinfo(pt));
- } else {
- setmref(pt->lineinfo, NULL);
- setmref(pt->uvinfo, NULL);
- setmref(pt->varinfo, NULL);
- }
-
- if (len != startn - ls->n)
- bcread_error(ls, LJ_ERR_BCBAD);
- return pt;
-}
-
-/* Read and check header of bytecode dump. */
-static int bcread_header(LexState *ls)
-{
- uint32_t flags;
- bcread_want(ls, 3+5+5);
- if (bcread_byte(ls) != BCDUMP_HEAD2 ||
- bcread_byte(ls) != BCDUMP_HEAD3 ||
- bcread_byte(ls) != BCDUMP_VERSION) return 0;
- bcread_flags(ls) = flags = bcread_uleb128(ls);
- if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0;
- if ((flags & BCDUMP_F_FFI)) {
-#if LJ_HASFFI
- lua_State *L = ls->L;
- if (!ctype_ctsG(G(L))) {
- ptrdiff_t oldtop = savestack(L, L->top);
- luaopen_ffi(L); /* Load FFI library on-demand. */
- L->top = restorestack(L, oldtop);
- }
-#else
- return 0;
-#endif
- }
- if ((flags & BCDUMP_F_STRIP)) {
- ls->chunkname = lj_str_newz(ls->L, ls->chunkarg);
- } else {
- MSize len = bcread_uleb128(ls);
- bcread_need(ls, len);
- ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len);
- }
- return 1; /* Ok. */
-}
-
-/* Read a bytecode dump. */
-GCproto *lj_bcread(LexState *ls)
-{
- lua_State *L = ls->L;
- lua_assert(ls->current == BCDUMP_HEAD1);
- bcread_savetop(L, ls, L->top);
- lj_str_resetbuf(&ls->sb);
- /* Check for a valid bytecode dump header. */
- if (!bcread_header(ls))
- bcread_error(ls, LJ_ERR_BCFMT);
- for (;;) { /* Process all prototypes in the bytecode dump. */
- GCproto *pt = bcread_proto(ls);
- if (!pt) break;
- setprotoV(L, L->top, pt);
- incr_top(L);
- }
- if ((int32_t)ls->n > 0 || L->top-1 != bcread_oldtop(L, ls))
- bcread_error(ls, LJ_ERR_BCBAD);
- /* Pop off last prototype. */
- L->top--;
- return protoV(L->top);
-}
-
+/*
+** Bytecode reader.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bcread_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lualib.h"
+#endif
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_state.h"
+
+/* Reuse some lexer fields for our own purposes. */
+#define bcread_flags(ls) ls->level
+#define bcread_swap(ls) \
+ ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE)
+#define bcread_oldtop(L, ls) restorestack(L, ls->lastline)
+#define bcread_savetop(L, ls, top) \
+ ls->lastline = (BCLine)savestack(L, (top))
+
+/* -- Input buffer handling ----------------------------------------------- */
+
+/* Throw reader error. */
+static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em)
+{
+ lua_State *L = ls->L;
+ const char *name = ls->chunkarg;
+ if (*name == BCDUMP_HEAD1) name = "(binary)";
+ else if (*name == '@' || *name == '=') name++;
+ lj_str_pushf(L, "%s: %s", name, err2msg(em));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+}
+
+/* Resize input buffer. */
+static void bcread_resize(LexState *ls, MSize len)
+{
+ if (ls->sb.sz < len) {
+ MSize sz = ls->sb.sz * 2;
+ while (len > sz) sz = sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, sz);
+ /* Caveat: this may change ls->sb.buf which may affect ls->p. */
+ }
+}
+
+/* Refill buffer if needed. */
+static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need)
+{
+ lua_assert(len != 0);
+ if (len > LJ_MAX_MEM || ls->current < 0)
+ bcread_error(ls, LJ_ERR_BCBAD);
+ do {
+ const char *buf;
+ size_t size;
+ if (ls->n) { /* Copy remainder to buffer. */
+ if (ls->sb.n) { /* Move down in buffer. */
+ lua_assert(ls->p + ls->n == ls->sb.buf + ls->sb.n);
+ if (ls->n != ls->sb.n)
+ memmove(ls->sb.buf, ls->p, ls->n);
+ } else { /* Copy from buffer provided by reader. */
+ bcread_resize(ls, len);
+ memcpy(ls->sb.buf, ls->p, ls->n);
+ }
+ ls->p = ls->sb.buf;
+ }
+ ls->sb.n = ls->n;
+ buf = ls->rfunc(ls->L, ls->rdata, &size); /* Get more data from reader. */
+ if (buf == NULL || size == 0) { /* EOF? */
+ if (need) bcread_error(ls, LJ_ERR_BCBAD);
+ ls->current = -1; /* Only bad if we get called again. */
+ break;
+ }
+ if (ls->sb.n) { /* Append to buffer. */
+ MSize n = ls->sb.n + (MSize)size;
+ bcread_resize(ls, n < len ? len : n);
+ memcpy(ls->sb.buf + ls->sb.n, buf, size);
+ ls->n = ls->sb.n = n;
+ ls->p = ls->sb.buf;
+ } else { /* Return buffer provided by reader. */
+ ls->n = (MSize)size;
+ ls->p = buf;
+ }
+ } while (ls->n < len);
+}
+
+/* Need a certain number of bytes. */
+static LJ_AINLINE void bcread_need(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY(ls->n < len))
+ bcread_fill(ls, len, 1);
+}
+
+/* Want to read up to a certain number of bytes, but may need less. */
+static LJ_AINLINE void bcread_want(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY(ls->n < len))
+ bcread_fill(ls, len, 0);
+}
+
+#define bcread_dec(ls) check_exp(ls->n > 0, ls->n--)
+#define bcread_consume(ls, len) check_exp(ls->n >= (len), ls->n -= (len))
+
+/* Return memory block from buffer. */
+static uint8_t *bcread_mem(LexState *ls, MSize len)
+{
+ uint8_t *p = (uint8_t *)ls->p;
+ bcread_consume(ls, len);
+ ls->p = (char *)p + len;
+ return p;
+}
+
+/* Copy memory block from buffer. */
+static void bcread_block(LexState *ls, void *q, MSize len)
+{
+ memcpy(q, bcread_mem(ls, len), len);
+}
+
+/* Read byte from buffer. */
+static LJ_AINLINE uint32_t bcread_byte(LexState *ls)
+{
+ bcread_dec(ls);
+ return (uint32_t)(uint8_t)*ls->p++;
+}
+
+/* Read ULEB128 value from buffer. */
+static uint32_t bcread_uleb128(LexState *ls)
+{
+ const uint8_t *p = (const uint8_t *)ls->p;
+ uint32_t v = *p++;
+ if (LJ_UNLIKELY(v >= 0x80)) {
+ int sh = 0;
+ v &= 0x7f;
+ do {
+ v |= ((*p & 0x7f) << (sh += 7));
+ bcread_dec(ls);
+ } while (*p++ >= 0x80);
+ }
+ bcread_dec(ls);
+ ls->p = (char *)p;
+ return v;
+}
+
+/* Read top 32 bits of 33 bit ULEB128 value from buffer. */
+static uint32_t bcread_uleb128_33(LexState *ls)
+{
+ const uint8_t *p = (const uint8_t *)ls->p;
+ uint32_t v = (*p++ >> 1);
+ if (LJ_UNLIKELY(v >= 0x40)) {
+ int sh = -1;
+ v &= 0x3f;
+ do {
+ v |= ((*p & 0x7f) << (sh += 7));
+ bcread_dec(ls);
+ } while (*p++ >= 0x80);
+ }
+ bcread_dec(ls);
+ ls->p = (char *)p;
+ return v;
+}
+
+/* -- Bytecode reader ----------------------------------------------------- */
+
+/* Read debug info of a prototype. */
+static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg)
+{
+ void *lineinfo = (void *)proto_lineinfo(pt);
+ bcread_block(ls, lineinfo, sizedbg);
+ /* Swap lineinfo if the endianess differs. */
+ if (bcread_swap(ls) && pt->numline >= 256) {
+ MSize i, n = pt->sizebc-1;
+ if (pt->numline < 65536) {
+ uint16_t *p = (uint16_t *)lineinfo;
+ for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8));
+ } else {
+ uint32_t *p = (uint32_t *)lineinfo;
+ for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]);
+ }
+ }
+}
+
+/* Find pointer to varinfo. */
+static const void *bcread_varinfo(GCproto *pt)
+{
+ const uint8_t *p = proto_uvinfo(pt);
+ MSize n = pt->sizeuv;
+ if (n) while (*p++ || --n) ;
+ return p;
+}
+
+/* Read a single constant key/value of a template table. */
+static void bcread_ktabk(LexState *ls, TValue *o)
+{
+ MSize tp = bcread_uleb128(ls);
+ if (tp >= BCDUMP_KTAB_STR) {
+ MSize len = tp - BCDUMP_KTAB_STR;
+ const char *p = (const char *)bcread_mem(ls, len);
+ setstrV(ls->L, o, lj_str_new(ls->L, p, len));
+ } else if (tp == BCDUMP_KTAB_INT) {
+ setintV(o, (int32_t)bcread_uleb128(ls));
+ } else if (tp == BCDUMP_KTAB_NUM) {
+ o->u32.lo = bcread_uleb128(ls);
+ o->u32.hi = bcread_uleb128(ls);
+ } else {
+ lua_assert(tp <= BCDUMP_KTAB_TRUE);
+ setitype(o, ~tp);
+ }
+}
+
+/* Read a template table. */
+static GCtab *bcread_ktab(LexState *ls)
+{
+ MSize narray = bcread_uleb128(ls);
+ MSize nhash = bcread_uleb128(ls);
+ GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash));
+ if (narray) { /* Read array entries. */
+ MSize i;
+ TValue *o = tvref(t->array);
+ for (i = 0; i < narray; i++, o++)
+ bcread_ktabk(ls, o);
+ }
+ if (nhash) { /* Read hash entries. */
+ MSize i;
+ for (i = 0; i < nhash; i++) {
+ TValue key;
+ bcread_ktabk(ls, &key);
+ lua_assert(!tvisnil(&key));
+ bcread_ktabk(ls, lj_tab_set(ls->L, t, &key));
+ }
+ }
+ return t;
+}
+
+/* Read GC constants of a prototype. */
+static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc)
+{
+ MSize i;
+ GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
+ for (i = 0; i < sizekgc; i++, kr++) {
+ MSize tp = bcread_uleb128(ls);
+ if (tp >= BCDUMP_KGC_STR) {
+ MSize len = tp - BCDUMP_KGC_STR;
+ const char *p = (const char *)bcread_mem(ls, len);
+ setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len)));
+ } else if (tp == BCDUMP_KGC_TAB) {
+ setgcref(*kr, obj2gco(bcread_ktab(ls)));
+#if LJ_HASFFI
+ } else if (tp != BCDUMP_KGC_CHILD) {
+ CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE :
+ tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64;
+ CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8;
+ GCcdata *cd = lj_cdata_new_(ls->L, id, sz);
+ TValue *p = (TValue *)cdataptr(cd);
+ setgcref(*kr, obj2gco(cd));
+ p[0].u32.lo = bcread_uleb128(ls);
+ p[0].u32.hi = bcread_uleb128(ls);
+ if (tp == BCDUMP_KGC_COMPLEX) {
+ p[1].u32.lo = bcread_uleb128(ls);
+ p[1].u32.hi = bcread_uleb128(ls);
+ }
+#endif
+ } else {
+ lua_State *L = ls->L;
+ lua_assert(tp == BCDUMP_KGC_CHILD);
+ if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */
+ bcread_error(ls, LJ_ERR_BCBAD);
+ L->top--;
+ setgcref(*kr, obj2gco(protoV(L->top)));
+ }
+ }
+}
+
+/* Read number constants of a prototype. */
+static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn)
+{
+ MSize i;
+ TValue *o = mref(pt->k, TValue);
+ for (i = 0; i < sizekn; i++, o++) {
+ int isnum = (ls->p[0] & 1);
+ uint32_t lo = bcread_uleb128_33(ls);
+ if (isnum) {
+ o->u32.lo = lo;
+ o->u32.hi = bcread_uleb128(ls);
+ } else {
+ setintV(o, lo);
+ }
+ }
+}
+
+/* Read bytecode instructions. */
+static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc)
+{
+ BCIns *bc = proto_bc(pt);
+ bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
+ pt->framesize, 0);
+ bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns));
+ /* Swap bytecode instructions if the endianess differs. */
+ if (bcread_swap(ls)) {
+ MSize i;
+ for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]);
+ }
+}
+
+/* Read upvalue refs. */
+static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv)
+{
+ if (sizeuv) {
+ uint16_t *uv = proto_uv(pt);
+ bcread_block(ls, uv, sizeuv*2);
+ /* Swap upvalue refs if the endianess differs. */
+ if (bcread_swap(ls)) {
+ MSize i;
+ for (i = 0; i < sizeuv; i++)
+ uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8));
+ }
+ }
+}
+
+/* Read a prototype. */
+static GCproto *bcread_proto(LexState *ls)
+{
+ GCproto *pt;
+ MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept;
+ MSize ofsk, ofsuv, ofsdbg;
+ MSize sizedbg = 0;
+ BCLine firstline = 0, numline = 0;
+ MSize len, startn;
+
+ /* Read length. */
+ if (ls->n > 0 && ls->p[0] == 0) { /* Shortcut EOF. */
+ ls->n--; ls->p++;
+ return NULL;
+ }
+ bcread_want(ls, 5);
+ len = bcread_uleb128(ls);
+ if (!len) return NULL; /* EOF */
+ bcread_need(ls, len);
+ startn = ls->n;
+
+ /* Read prototype header. */
+ flags = bcread_byte(ls);
+ numparams = bcread_byte(ls);
+ framesize = bcread_byte(ls);
+ sizeuv = bcread_byte(ls);
+ sizekgc = bcread_uleb128(ls);
+ sizekn = bcread_uleb128(ls);
+ sizebc = bcread_uleb128(ls) + 1;
+ if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) {
+ sizedbg = bcread_uleb128(ls);
+ if (sizedbg) {
+ firstline = bcread_uleb128(ls);
+ numline = bcread_uleb128(ls);
+ }
+ }
+
+ /* Calculate total size of prototype including all colocated arrays. */
+ sizept = (MSize)sizeof(GCproto) +
+ sizebc*(MSize)sizeof(BCIns) +
+ sizekgc*(MSize)sizeof(GCRef);
+ sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1);
+ ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue);
+ ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2;
+ ofsdbg = sizept; sizept += sizedbg;
+
+ /* Allocate prototype object and initialize its fields. */
+ pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept);
+ pt->gct = ~LJ_TPROTO;
+ pt->numparams = (uint8_t)numparams;
+ pt->framesize = (uint8_t)framesize;
+ pt->sizebc = sizebc;
+ setmref(pt->k, (char *)pt + ofsk);
+ setmref(pt->uv, (char *)pt + ofsuv);
+ pt->sizekgc = 0; /* Set to zero until fully initialized. */
+ pt->sizekn = sizekn;
+ pt->sizept = sizept;
+ pt->sizeuv = (uint8_t)sizeuv;
+ pt->flags = (uint8_t)flags;
+ pt->trace = 0;
+ setgcref(pt->chunkname, obj2gco(ls->chunkname));
+
+ /* Close potentially uninitialized gap between bc and kgc. */
+ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0;
+
+ /* Read bytecode instructions and upvalue refs. */
+ bcread_bytecode(ls, pt, sizebc);
+ bcread_uv(ls, pt, sizeuv);
+
+ /* Read constants. */
+ bcread_kgc(ls, pt, sizekgc);
+ pt->sizekgc = sizekgc;
+ bcread_knum(ls, pt, sizekn);
+
+ /* Read and initialize debug info. */
+ pt->firstline = firstline;
+ pt->numline = numline;
+ if (sizedbg) {
+ MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
+ setmref(pt->lineinfo, (char *)pt + ofsdbg);
+ setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli);
+ bcread_dbg(ls, pt, sizedbg);
+ setmref(pt->varinfo, bcread_varinfo(pt));
+ } else {
+ setmref(pt->lineinfo, NULL);
+ setmref(pt->uvinfo, NULL);
+ setmref(pt->varinfo, NULL);
+ }
+
+ if (len != startn - ls->n)
+ bcread_error(ls, LJ_ERR_BCBAD);
+ return pt;
+}
+
+/* Read and check header of bytecode dump. */
+static int bcread_header(LexState *ls)
+{
+ uint32_t flags;
+ bcread_want(ls, 3+5+5);
+ if (bcread_byte(ls) != BCDUMP_HEAD2 ||
+ bcread_byte(ls) != BCDUMP_HEAD3 ||
+ bcread_byte(ls) != BCDUMP_VERSION) return 0;
+ bcread_flags(ls) = flags = bcread_uleb128(ls);
+ if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0;
+ if ((flags & BCDUMP_F_FFI)) {
+#if LJ_HASFFI
+ lua_State *L = ls->L;
+ if (!ctype_ctsG(G(L))) {
+ ptrdiff_t oldtop = savestack(L, L->top);
+ luaopen_ffi(L); /* Load FFI library on-demand. */
+ L->top = restorestack(L, oldtop);
+ }
+#else
+ return 0;
+#endif
+ }
+ if ((flags & BCDUMP_F_STRIP)) {
+ ls->chunkname = lj_str_newz(ls->L, ls->chunkarg);
+ } else {
+ MSize len = bcread_uleb128(ls);
+ bcread_need(ls, len);
+ ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len);
+ }
+ return 1; /* Ok. */
+}
+
+/* Read a bytecode dump. */
+GCproto *lj_bcread(LexState *ls)
+{
+ lua_State *L = ls->L;
+ lua_assert(ls->current == BCDUMP_HEAD1);
+ bcread_savetop(L, ls, L->top);
+ lj_str_resetbuf(&ls->sb);
+ /* Check for a valid bytecode dump header. */
+ if (!bcread_header(ls))
+ bcread_error(ls, LJ_ERR_BCFMT);
+ for (;;) { /* Process all prototypes in the bytecode dump. */
+ GCproto *pt = bcread_proto(ls);
+ if (!pt) break;
+ setprotoV(L, L->top, pt);
+ incr_top(L);
+ }
+ if ((int32_t)ls->n > 0 || L->top-1 != bcread_oldtop(L, ls))
+ bcread_error(ls, LJ_ERR_BCBAD);
+ /* Pop off last prototype. */
+ L->top--;
+ return protoV(L->top);
+}
+
diff --git a/3rdparty/lua/src/lj_bcwrite.c b/3rdparty/lua/src/lj_bcwrite.c
index e932a8a..4805d51 100644
--- a/3rdparty/lua/src/lj_bcwrite.c
+++ b/3rdparty/lua/src/lj_bcwrite.c
@@ -1,396 +1,396 @@
-/*
-** Bytecode writer.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_bcwrite_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_str.h"
-#include "lj_bc.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#endif
-#if LJ_HASJIT
-#include "lj_dispatch.h"
-#include "lj_jit.h"
-#endif
-#include "lj_bcdump.h"
-#include "lj_vm.h"
-
-/* Context for bytecode writer. */
-typedef struct BCWriteCtx {
- SBuf sb; /* Output buffer. */
- lua_State *L; /* Lua state. */
- GCproto *pt; /* Root prototype. */
- lua_Writer wfunc; /* Writer callback. */
- void *wdata; /* Writer callback data. */
- int strip; /* Strip debug info. */
- int status; /* Status from writer callback. */
-} BCWriteCtx;
-
-/* -- Output buffer handling ---------------------------------------------- */
-
-/* Resize buffer if needed. */
-static LJ_NOINLINE void bcwrite_resize(BCWriteCtx *ctx, MSize len)
-{
- MSize sz = ctx->sb.sz * 2;
- while (ctx->sb.n + len > sz) sz = sz * 2;
- lj_str_resizebuf(ctx->L, &ctx->sb, sz);
-}
-
-/* Need a certain amount of buffer space. */
-static LJ_AINLINE void bcwrite_need(BCWriteCtx *ctx, MSize len)
-{
- if (LJ_UNLIKELY(ctx->sb.n + len > ctx->sb.sz))
- bcwrite_resize(ctx, len);
-}
-
-/* Add memory block to buffer. */
-static void bcwrite_block(BCWriteCtx *ctx, const void *p, MSize len)
-{
- uint8_t *q = (uint8_t *)(ctx->sb.buf + ctx->sb.n);
- MSize i;
- ctx->sb.n += len;
- for (i = 0; i < len; i++) q[i] = ((uint8_t *)p)[i];
-}
-
-/* Add byte to buffer. */
-static LJ_AINLINE void bcwrite_byte(BCWriteCtx *ctx, uint8_t b)
-{
- ctx->sb.buf[ctx->sb.n++] = b;
-}
-
-/* Add ULEB128 value to buffer. */
-static void bcwrite_uleb128(BCWriteCtx *ctx, uint32_t v)
-{
- MSize n = ctx->sb.n;
- uint8_t *p = (uint8_t *)ctx->sb.buf;
- for (; v >= 0x80; v >>= 7)
- p[n++] = (uint8_t)((v & 0x7f) | 0x80);
- p[n++] = (uint8_t)v;
- ctx->sb.n = n;
-}
-
-/* -- Bytecode writer ----------------------------------------------------- */
-
-/* Write a single constant key/value of a template table. */
-static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow)
-{
- bcwrite_need(ctx, 1+10);
- if (tvisstr(o)) {
- const GCstr *str = strV(o);
- MSize len = str->len;
- bcwrite_need(ctx, 5+len);
- bcwrite_uleb128(ctx, BCDUMP_KTAB_STR+len);
- bcwrite_block(ctx, strdata(str), len);
- } else if (tvisint(o)) {
- bcwrite_byte(ctx, BCDUMP_KTAB_INT);
- bcwrite_uleb128(ctx, intV(o));
- } else if (tvisnum(o)) {
- if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */
- lua_Number num = numV(o);
- int32_t k = lj_num2int(num);
- if (num == (lua_Number)k) { /* -0 is never a constant. */
- bcwrite_byte(ctx, BCDUMP_KTAB_INT);
- bcwrite_uleb128(ctx, k);
- return;
- }
- }
- bcwrite_byte(ctx, BCDUMP_KTAB_NUM);
- bcwrite_uleb128(ctx, o->u32.lo);
- bcwrite_uleb128(ctx, o->u32.hi);
- } else {
- lua_assert(tvispri(o));
- bcwrite_byte(ctx, BCDUMP_KTAB_NIL+~itype(o));
- }
-}
-
-/* Write a template table. */
-static void bcwrite_ktab(BCWriteCtx *ctx, const GCtab *t)
-{
- MSize narray = 0, nhash = 0;
- if (t->asize > 0) { /* Determine max. length of array part. */
- ptrdiff_t i;
- TValue *array = tvref(t->array);
- for (i = (ptrdiff_t)t->asize-1; i >= 0; i--)
- if (!tvisnil(&array[i]))
- break;
- narray = (MSize)(i+1);
- }
- if (t->hmask > 0) { /* Count number of used hash slots. */
- MSize i, hmask = t->hmask;
- Node *node = noderef(t->node);
- for (i = 0; i <= hmask; i++)
- nhash += !tvisnil(&node[i].val);
- }
- /* Write number of array slots and hash slots. */
- bcwrite_uleb128(ctx, narray);
- bcwrite_uleb128(ctx, nhash);
- if (narray) { /* Write array entries (may contain nil). */
- MSize i;
- TValue *o = tvref(t->array);
- for (i = 0; i < narray; i++, o++)
- bcwrite_ktabk(ctx, o, 1);
- }
- if (nhash) { /* Write hash entries. */
- MSize i = nhash;
- Node *node = noderef(t->node) + t->hmask;
- for (;; node--)
- if (!tvisnil(&node->val)) {
- bcwrite_ktabk(ctx, &node->key, 0);
- bcwrite_ktabk(ctx, &node->val, 1);
- if (--i == 0) break;
- }
- }
-}
-
-/* Write GC constants of a prototype. */
-static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt)
-{
- MSize i, sizekgc = pt->sizekgc;
- GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
- for (i = 0; i < sizekgc; i++, kr++) {
- GCobj *o = gcref(*kr);
- MSize tp, need = 1;
- /* Determine constant type and needed size. */
- if (o->gch.gct == ~LJ_TSTR) {
- tp = BCDUMP_KGC_STR + gco2str(o)->len;
- need = 5+gco2str(o)->len;
- } else if (o->gch.gct == ~LJ_TPROTO) {
- lua_assert((pt->flags & PROTO_CHILD));
- tp = BCDUMP_KGC_CHILD;
-#if LJ_HASFFI
- } else if (o->gch.gct == ~LJ_TCDATA) {
- CTypeID id = gco2cd(o)->ctypeid;
- need = 1+4*5;
- if (id == CTID_INT64) {
- tp = BCDUMP_KGC_I64;
- } else if (id == CTID_UINT64) {
- tp = BCDUMP_KGC_U64;
- } else {
- lua_assert(id == CTID_COMPLEX_DOUBLE);
- tp = BCDUMP_KGC_COMPLEX;
- }
-#endif
- } else {
- lua_assert(o->gch.gct == ~LJ_TTAB);
- tp = BCDUMP_KGC_TAB;
- need = 1+2*5;
- }
- /* Write constant type. */
- bcwrite_need(ctx, need);
- bcwrite_uleb128(ctx, tp);
- /* Write constant data (if any). */
- if (tp >= BCDUMP_KGC_STR) {
- bcwrite_block(ctx, strdata(gco2str(o)), gco2str(o)->len);
- } else if (tp == BCDUMP_KGC_TAB) {
- bcwrite_ktab(ctx, gco2tab(o));
-#if LJ_HASFFI
- } else if (tp != BCDUMP_KGC_CHILD) {
- cTValue *p = (TValue *)cdataptr(gco2cd(o));
- bcwrite_uleb128(ctx, p[0].u32.lo);
- bcwrite_uleb128(ctx, p[0].u32.hi);
- if (tp == BCDUMP_KGC_COMPLEX) {
- bcwrite_uleb128(ctx, p[1].u32.lo);
- bcwrite_uleb128(ctx, p[1].u32.hi);
- }
-#endif
- }
- }
-}
-
-/* Write number constants of a prototype. */
-static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt)
-{
- MSize i, sizekn = pt->sizekn;
- cTValue *o = mref(pt->k, TValue);
- bcwrite_need(ctx, 10*sizekn);
- for (i = 0; i < sizekn; i++, o++) {
- int32_t k;
- if (tvisint(o)) {
- k = intV(o);
- goto save_int;
- } else {
- /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */
- if (!LJ_DUALNUM) { /* Narrow number constants to integers. */
- lua_Number num = numV(o);
- k = lj_num2int(num);
- if (num == (lua_Number)k) { /* -0 is never a constant. */
- save_int:
- bcwrite_uleb128(ctx, 2*(uint32_t)k | ((uint32_t)k & 0x80000000u));
- if (k < 0) {
- char *p = &ctx->sb.buf[ctx->sb.n-1];
- *p = (*p & 7) | ((k>>27) & 0x18);
- }
- continue;
- }
- }
- bcwrite_uleb128(ctx, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u)));
- if (o->u32.lo >= 0x80000000u) {
- char *p = &ctx->sb.buf[ctx->sb.n-1];
- *p = (*p & 7) | ((o->u32.lo>>27) & 0x18);
- }
- bcwrite_uleb128(ctx, o->u32.hi);
- }
- }
-}
-
-/* Write bytecode instructions. */
-static void bcwrite_bytecode(BCWriteCtx *ctx, GCproto *pt)
-{
- MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */
-#if LJ_HASJIT
- uint8_t *p = (uint8_t *)&ctx->sb.buf[ctx->sb.n];
-#endif
- bcwrite_block(ctx, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns));
-#if LJ_HASJIT
- /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */
- if ((pt->flags & PROTO_ILOOP) || pt->trace) {
- jit_State *J = L2J(ctx->L);
- MSize i;
- for (i = 0; i < nbc; i++, p += sizeof(BCIns)) {
- BCOp op = (BCOp)p[LJ_ENDIAN_SELECT(0, 3)];
- if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP ||
- op == BC_JFORI) {
- p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL);
- } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
- BCReg rd = p[LJ_ENDIAN_SELECT(2, 1)] + (p[LJ_ENDIAN_SELECT(3, 0)] << 8);
- BCIns ins = traceref(J, rd)->startins;
- p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_JFORL+BC_FORL);
- p[LJ_ENDIAN_SELECT(2, 1)] = bc_c(ins);
- p[LJ_ENDIAN_SELECT(3, 0)] = bc_b(ins);
- }
- }
- }
-#endif
-}
-
-/* Write prototype. */
-static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
-{
- MSize sizedbg = 0;
-
- /* Recursively write children of prototype. */
- if ((pt->flags & PROTO_CHILD)) {
- ptrdiff_t i, n = pt->sizekgc;
- GCRef *kr = mref(pt->k, GCRef) - 1;
- for (i = 0; i < n; i++, kr--) {
- GCobj *o = gcref(*kr);
- if (o->gch.gct == ~LJ_TPROTO)
- bcwrite_proto(ctx, gco2pt(o));
- }
- }
-
- /* Start writing the prototype info to a buffer. */
- lj_str_resetbuf(&ctx->sb);
- ctx->sb.n = 5; /* Leave room for final size. */
- bcwrite_need(ctx, 4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2);
-
- /* Write prototype header. */
- bcwrite_byte(ctx, (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI)));
- bcwrite_byte(ctx, pt->numparams);
- bcwrite_byte(ctx, pt->framesize);
- bcwrite_byte(ctx, pt->sizeuv);
- bcwrite_uleb128(ctx, pt->sizekgc);
- bcwrite_uleb128(ctx, pt->sizekn);
- bcwrite_uleb128(ctx, pt->sizebc-1);
- if (!ctx->strip) {
- if (proto_lineinfo(pt))
- sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt);
- bcwrite_uleb128(ctx, sizedbg);
- if (sizedbg) {
- bcwrite_uleb128(ctx, pt->firstline);
- bcwrite_uleb128(ctx, pt->numline);
- }
- }
-
- /* Write bytecode instructions and upvalue refs. */
- bcwrite_bytecode(ctx, pt);
- bcwrite_block(ctx, proto_uv(pt), pt->sizeuv*2);
-
- /* Write constants. */
- bcwrite_kgc(ctx, pt);
- bcwrite_knum(ctx, pt);
-
- /* Write debug info, if not stripped. */
- if (sizedbg) {
- bcwrite_need(ctx, sizedbg);
- bcwrite_block(ctx, proto_lineinfo(pt), sizedbg);
- }
-
- /* Pass buffer to writer function. */
- if (ctx->status == 0) {
- MSize n = ctx->sb.n - 5;
- MSize nn = (lj_fls(n)+8)*9 >> 6;
- ctx->sb.n = 5 - nn;
- bcwrite_uleb128(ctx, n); /* Fill in final size. */
- lua_assert(ctx->sb.n == 5);
- ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf+5-nn, nn+n, ctx->wdata);
- }
-}
-
-/* Write header of bytecode dump. */
-static void bcwrite_header(BCWriteCtx *ctx)
-{
- GCstr *chunkname = proto_chunkname(ctx->pt);
- const char *name = strdata(chunkname);
- MSize len = chunkname->len;
- lj_str_resetbuf(&ctx->sb);
- bcwrite_need(ctx, 5+5+len);
- bcwrite_byte(ctx, BCDUMP_HEAD1);
- bcwrite_byte(ctx, BCDUMP_HEAD2);
- bcwrite_byte(ctx, BCDUMP_HEAD3);
- bcwrite_byte(ctx, BCDUMP_VERSION);
- bcwrite_byte(ctx, (ctx->strip ? BCDUMP_F_STRIP : 0) +
- (LJ_BE ? BCDUMP_F_BE : 0) +
- ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0));
- if (!ctx->strip) {
- bcwrite_uleb128(ctx, len);
- bcwrite_block(ctx, name, len);
- }
- ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf, ctx->sb.n, ctx->wdata);
-}
-
-/* Write footer of bytecode dump. */
-static void bcwrite_footer(BCWriteCtx *ctx)
-{
- if (ctx->status == 0) {
- uint8_t zero = 0;
- ctx->status = ctx->wfunc(ctx->L, &zero, 1, ctx->wdata);
- }
-}
-
-/* Protected callback for bytecode writer. */
-static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud)
-{
- BCWriteCtx *ctx = (BCWriteCtx *)ud;
- UNUSED(dummy);
- lj_str_resizebuf(L, &ctx->sb, 1024); /* Avoids resize for most prototypes. */
- bcwrite_header(ctx);
- bcwrite_proto(ctx, ctx->pt);
- bcwrite_footer(ctx);
- return NULL;
-}
-
-/* Write bytecode for a prototype. */
-int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data,
- int strip)
-{
- BCWriteCtx ctx;
- int status;
- ctx.L = L;
- ctx.pt = pt;
- ctx.wfunc = writer;
- ctx.wdata = data;
- ctx.strip = strip;
- ctx.status = 0;
- lj_str_initbuf(&ctx.sb);
- status = lj_vm_cpcall(L, NULL, &ctx, cpwriter);
- if (status == 0) status = ctx.status;
- lj_str_freebuf(G(ctx.L), &ctx.sb);
- return status;
-}
-
+/*
+** Bytecode writer.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bcwrite_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_str.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#if LJ_HASJIT
+#include "lj_dispatch.h"
+#include "lj_jit.h"
+#endif
+#include "lj_bcdump.h"
+#include "lj_vm.h"
+
+/* Context for bytecode writer. */
+typedef struct BCWriteCtx {
+ SBuf sb; /* Output buffer. */
+ lua_State *L; /* Lua state. */
+ GCproto *pt; /* Root prototype. */
+ lua_Writer wfunc; /* Writer callback. */
+ void *wdata; /* Writer callback data. */
+ int strip; /* Strip debug info. */
+ int status; /* Status from writer callback. */
+} BCWriteCtx;
+
+/* -- Output buffer handling ---------------------------------------------- */
+
+/* Resize buffer if needed. */
+static LJ_NOINLINE void bcwrite_resize(BCWriteCtx *ctx, MSize len)
+{
+ MSize sz = ctx->sb.sz * 2;
+ while (ctx->sb.n + len > sz) sz = sz * 2;
+ lj_str_resizebuf(ctx->L, &ctx->sb, sz);
+}
+
+/* Need a certain amount of buffer space. */
+static LJ_AINLINE void bcwrite_need(BCWriteCtx *ctx, MSize len)
+{
+ if (LJ_UNLIKELY(ctx->sb.n + len > ctx->sb.sz))
+ bcwrite_resize(ctx, len);
+}
+
+/* Add memory block to buffer. */
+static void bcwrite_block(BCWriteCtx *ctx, const void *p, MSize len)
+{
+ uint8_t *q = (uint8_t *)(ctx->sb.buf + ctx->sb.n);
+ MSize i;
+ ctx->sb.n += len;
+ for (i = 0; i < len; i++) q[i] = ((uint8_t *)p)[i];
+}
+
+/* Add byte to buffer. */
+static LJ_AINLINE void bcwrite_byte(BCWriteCtx *ctx, uint8_t b)
+{
+ ctx->sb.buf[ctx->sb.n++] = b;
+}
+
+/* Add ULEB128 value to buffer. */
+static void bcwrite_uleb128(BCWriteCtx *ctx, uint32_t v)
+{
+ MSize n = ctx->sb.n;
+ uint8_t *p = (uint8_t *)ctx->sb.buf;
+ for (; v >= 0x80; v >>= 7)
+ p[n++] = (uint8_t)((v & 0x7f) | 0x80);
+ p[n++] = (uint8_t)v;
+ ctx->sb.n = n;
+}
+
+/* -- Bytecode writer ----------------------------------------------------- */
+
+/* Write a single constant key/value of a template table. */
+static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow)
+{
+ bcwrite_need(ctx, 1+10);
+ if (tvisstr(o)) {
+ const GCstr *str = strV(o);
+ MSize len = str->len;
+ bcwrite_need(ctx, 5+len);
+ bcwrite_uleb128(ctx, BCDUMP_KTAB_STR+len);
+ bcwrite_block(ctx, strdata(str), len);
+ } else if (tvisint(o)) {
+ bcwrite_byte(ctx, BCDUMP_KTAB_INT);
+ bcwrite_uleb128(ctx, intV(o));
+ } else if (tvisnum(o)) {
+ if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */
+ lua_Number num = numV(o);
+ int32_t k = lj_num2int(num);
+ if (num == (lua_Number)k) { /* -0 is never a constant. */
+ bcwrite_byte(ctx, BCDUMP_KTAB_INT);
+ bcwrite_uleb128(ctx, k);
+ return;
+ }
+ }
+ bcwrite_byte(ctx, BCDUMP_KTAB_NUM);
+ bcwrite_uleb128(ctx, o->u32.lo);
+ bcwrite_uleb128(ctx, o->u32.hi);
+ } else {
+ lua_assert(tvispri(o));
+ bcwrite_byte(ctx, BCDUMP_KTAB_NIL+~itype(o));
+ }
+}
+
+/* Write a template table. */
+static void bcwrite_ktab(BCWriteCtx *ctx, const GCtab *t)
+{
+ MSize narray = 0, nhash = 0;
+ if (t->asize > 0) { /* Determine max. length of array part. */
+ ptrdiff_t i;
+ TValue *array = tvref(t->array);
+ for (i = (ptrdiff_t)t->asize-1; i >= 0; i--)
+ if (!tvisnil(&array[i]))
+ break;
+ narray = (MSize)(i+1);
+ }
+ if (t->hmask > 0) { /* Count number of used hash slots. */
+ MSize i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (i = 0; i <= hmask; i++)
+ nhash += !tvisnil(&node[i].val);
+ }
+ /* Write number of array slots and hash slots. */
+ bcwrite_uleb128(ctx, narray);
+ bcwrite_uleb128(ctx, nhash);
+ if (narray) { /* Write array entries (may contain nil). */
+ MSize i;
+ TValue *o = tvref(t->array);
+ for (i = 0; i < narray; i++, o++)
+ bcwrite_ktabk(ctx, o, 1);
+ }
+ if (nhash) { /* Write hash entries. */
+ MSize i = nhash;
+ Node *node = noderef(t->node) + t->hmask;
+ for (;; node--)
+ if (!tvisnil(&node->val)) {
+ bcwrite_ktabk(ctx, &node->key, 0);
+ bcwrite_ktabk(ctx, &node->val, 1);
+ if (--i == 0) break;
+ }
+ }
+}
+
+/* Write GC constants of a prototype. */
+static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize i, sizekgc = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
+ for (i = 0; i < sizekgc; i++, kr++) {
+ GCobj *o = gcref(*kr);
+ MSize tp, need = 1;
+ /* Determine constant type and needed size. */
+ if (o->gch.gct == ~LJ_TSTR) {
+ tp = BCDUMP_KGC_STR + gco2str(o)->len;
+ need = 5+gco2str(o)->len;
+ } else if (o->gch.gct == ~LJ_TPROTO) {
+ lua_assert((pt->flags & PROTO_CHILD));
+ tp = BCDUMP_KGC_CHILD;
+#if LJ_HASFFI
+ } else if (o->gch.gct == ~LJ_TCDATA) {
+ CTypeID id = gco2cd(o)->ctypeid;
+ need = 1+4*5;
+ if (id == CTID_INT64) {
+ tp = BCDUMP_KGC_I64;
+ } else if (id == CTID_UINT64) {
+ tp = BCDUMP_KGC_U64;
+ } else {
+ lua_assert(id == CTID_COMPLEX_DOUBLE);
+ tp = BCDUMP_KGC_COMPLEX;
+ }
+#endif
+ } else {
+ lua_assert(o->gch.gct == ~LJ_TTAB);
+ tp = BCDUMP_KGC_TAB;
+ need = 1+2*5;
+ }
+ /* Write constant type. */
+ bcwrite_need(ctx, need);
+ bcwrite_uleb128(ctx, tp);
+ /* Write constant data (if any). */
+ if (tp >= BCDUMP_KGC_STR) {
+ bcwrite_block(ctx, strdata(gco2str(o)), gco2str(o)->len);
+ } else if (tp == BCDUMP_KGC_TAB) {
+ bcwrite_ktab(ctx, gco2tab(o));
+#if LJ_HASFFI
+ } else if (tp != BCDUMP_KGC_CHILD) {
+ cTValue *p = (TValue *)cdataptr(gco2cd(o));
+ bcwrite_uleb128(ctx, p[0].u32.lo);
+ bcwrite_uleb128(ctx, p[0].u32.hi);
+ if (tp == BCDUMP_KGC_COMPLEX) {
+ bcwrite_uleb128(ctx, p[1].u32.lo);
+ bcwrite_uleb128(ctx, p[1].u32.hi);
+ }
+#endif
+ }
+ }
+}
+
+/* Write number constants of a prototype. */
+static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize i, sizekn = pt->sizekn;
+ cTValue *o = mref(pt->k, TValue);
+ bcwrite_need(ctx, 10*sizekn);
+ for (i = 0; i < sizekn; i++, o++) {
+ int32_t k;
+ if (tvisint(o)) {
+ k = intV(o);
+ goto save_int;
+ } else {
+ /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */
+ if (!LJ_DUALNUM) { /* Narrow number constants to integers. */
+ lua_Number num = numV(o);
+ k = lj_num2int(num);
+ if (num == (lua_Number)k) { /* -0 is never a constant. */
+ save_int:
+ bcwrite_uleb128(ctx, 2*(uint32_t)k | ((uint32_t)k & 0x80000000u));
+ if (k < 0) {
+ char *p = &ctx->sb.buf[ctx->sb.n-1];
+ *p = (*p & 7) | ((k>>27) & 0x18);
+ }
+ continue;
+ }
+ }
+ bcwrite_uleb128(ctx, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u)));
+ if (o->u32.lo >= 0x80000000u) {
+ char *p = &ctx->sb.buf[ctx->sb.n-1];
+ *p = (*p & 7) | ((o->u32.lo>>27) & 0x18);
+ }
+ bcwrite_uleb128(ctx, o->u32.hi);
+ }
+ }
+}
+
+/* Write bytecode instructions. */
+static void bcwrite_bytecode(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */
+#if LJ_HASJIT
+ uint8_t *p = (uint8_t *)&ctx->sb.buf[ctx->sb.n];
+#endif
+ bcwrite_block(ctx, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns));
+#if LJ_HASJIT
+ /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */
+ if ((pt->flags & PROTO_ILOOP) || pt->trace) {
+ jit_State *J = L2J(ctx->L);
+ MSize i;
+ for (i = 0; i < nbc; i++, p += sizeof(BCIns)) {
+ BCOp op = (BCOp)p[LJ_ENDIAN_SELECT(0, 3)];
+ if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP ||
+ op == BC_JFORI) {
+ p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL);
+ } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
+ BCReg rd = p[LJ_ENDIAN_SELECT(2, 1)] + (p[LJ_ENDIAN_SELECT(3, 0)] << 8);
+ BCIns ins = traceref(J, rd)->startins;
+ p[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_JFORL+BC_FORL);
+ p[LJ_ENDIAN_SELECT(2, 1)] = bc_c(ins);
+ p[LJ_ENDIAN_SELECT(3, 0)] = bc_b(ins);
+ }
+ }
+ }
+#endif
+}
+
+/* Write prototype. */
+static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize sizedbg = 0;
+
+ /* Recursively write children of prototype. */
+ if ((pt->flags & PROTO_CHILD)) {
+ ptrdiff_t i, n = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - 1;
+ for (i = 0; i < n; i++, kr--) {
+ GCobj *o = gcref(*kr);
+ if (o->gch.gct == ~LJ_TPROTO)
+ bcwrite_proto(ctx, gco2pt(o));
+ }
+ }
+
+ /* Start writing the prototype info to a buffer. */
+ lj_str_resetbuf(&ctx->sb);
+ ctx->sb.n = 5; /* Leave room for final size. */
+ bcwrite_need(ctx, 4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2);
+
+ /* Write prototype header. */
+ bcwrite_byte(ctx, (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI)));
+ bcwrite_byte(ctx, pt->numparams);
+ bcwrite_byte(ctx, pt->framesize);
+ bcwrite_byte(ctx, pt->sizeuv);
+ bcwrite_uleb128(ctx, pt->sizekgc);
+ bcwrite_uleb128(ctx, pt->sizekn);
+ bcwrite_uleb128(ctx, pt->sizebc-1);
+ if (!ctx->strip) {
+ if (proto_lineinfo(pt))
+ sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt);
+ bcwrite_uleb128(ctx, sizedbg);
+ if (sizedbg) {
+ bcwrite_uleb128(ctx, pt->firstline);
+ bcwrite_uleb128(ctx, pt->numline);
+ }
+ }
+
+ /* Write bytecode instructions and upvalue refs. */
+ bcwrite_bytecode(ctx, pt);
+ bcwrite_block(ctx, proto_uv(pt), pt->sizeuv*2);
+
+ /* Write constants. */
+ bcwrite_kgc(ctx, pt);
+ bcwrite_knum(ctx, pt);
+
+ /* Write debug info, if not stripped. */
+ if (sizedbg) {
+ bcwrite_need(ctx, sizedbg);
+ bcwrite_block(ctx, proto_lineinfo(pt), sizedbg);
+ }
+
+ /* Pass buffer to writer function. */
+ if (ctx->status == 0) {
+ MSize n = ctx->sb.n - 5;
+ MSize nn = (lj_fls(n)+8)*9 >> 6;
+ ctx->sb.n = 5 - nn;
+ bcwrite_uleb128(ctx, n); /* Fill in final size. */
+ lua_assert(ctx->sb.n == 5);
+ ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf+5-nn, nn+n, ctx->wdata);
+ }
+}
+
+/* Write header of bytecode dump. */
+static void bcwrite_header(BCWriteCtx *ctx)
+{
+ GCstr *chunkname = proto_chunkname(ctx->pt);
+ const char *name = strdata(chunkname);
+ MSize len = chunkname->len;
+ lj_str_resetbuf(&ctx->sb);
+ bcwrite_need(ctx, 5+5+len);
+ bcwrite_byte(ctx, BCDUMP_HEAD1);
+ bcwrite_byte(ctx, BCDUMP_HEAD2);
+ bcwrite_byte(ctx, BCDUMP_HEAD3);
+ bcwrite_byte(ctx, BCDUMP_VERSION);
+ bcwrite_byte(ctx, (ctx->strip ? BCDUMP_F_STRIP : 0) +
+ (LJ_BE ? BCDUMP_F_BE : 0) +
+ ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0));
+ if (!ctx->strip) {
+ bcwrite_uleb128(ctx, len);
+ bcwrite_block(ctx, name, len);
+ }
+ ctx->status = ctx->wfunc(ctx->L, ctx->sb.buf, ctx->sb.n, ctx->wdata);
+}
+
+/* Write footer of bytecode dump. */
+static void bcwrite_footer(BCWriteCtx *ctx)
+{
+ if (ctx->status == 0) {
+ uint8_t zero = 0;
+ ctx->status = ctx->wfunc(ctx->L, &zero, 1, ctx->wdata);
+ }
+}
+
+/* Protected callback for bytecode writer. */
+static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ BCWriteCtx *ctx = (BCWriteCtx *)ud;
+ UNUSED(dummy);
+ lj_str_resizebuf(L, &ctx->sb, 1024); /* Avoids resize for most prototypes. */
+ bcwrite_header(ctx);
+ bcwrite_proto(ctx, ctx->pt);
+ bcwrite_footer(ctx);
+ return NULL;
+}
+
+/* Write bytecode for a prototype. */
+int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data,
+ int strip)
+{
+ BCWriteCtx ctx;
+ int status;
+ ctx.L = L;
+ ctx.pt = pt;
+ ctx.wfunc = writer;
+ ctx.wdata = data;
+ ctx.strip = strip;
+ ctx.status = 0;
+ lj_str_initbuf(&ctx.sb);
+ status = lj_vm_cpcall(L, NULL, &ctx, cpwriter);
+ if (status == 0) status = ctx.status;
+ lj_str_freebuf(G(ctx.L), &ctx.sb);
+ return status;
+}
+
diff --git a/3rdparty/lua/src/lj_carith.c b/3rdparty/lua/src/lj_carith.c
index 59a2db6..afe7e68 100644
--- a/3rdparty/lua/src/lj_carith.c
+++ b/3rdparty/lua/src/lj_carith.c
@@ -1,353 +1,351 @@
-/*
-** C data arithmetic.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_ctype.h"
-#include "lj_cconv.h"
-#include "lj_cdata.h"
-#include "lj_carith.h"
-
-/* -- C data arithmetic --------------------------------------------------- */
-
-/* Binary operands of an operator converted to ctypes. */
-typedef struct CDArith {
- uint8_t *p[2];
- CType *ct[2];
-} CDArith;
-
-/* Check arguments for arithmetic metamethods. */
-static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca)
-{
- TValue *o = L->base;
- int ok = 1;
- MSize i;
- if (o+1 >= L->top)
- lj_err_argt(L, 1, LUA_TCDATA);
- for (i = 0; i < 2; i++, o++) {
- if (tviscdata(o)) {
- GCcdata *cd = cdataV(o);
- CTypeID id = (CTypeID)cd->ctypeid;
- CType *ct = ctype_raw(cts, id);
- uint8_t *p = (uint8_t *)cdataptr(cd);
- if (ctype_isptr(ct->info)) {
- p = (uint8_t *)cdata_getptr(p, ct->size);
- if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
- } else if (ctype_isfunc(ct->info)) {
- p = (uint8_t *)*(void **)p;
- ct = ctype_get(cts,
- lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
- }
- if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
- ca->ct[i] = ct;
- ca->p[i] = p;
- } else if (tvisint(o)) {
- ca->ct[i] = ctype_get(cts, CTID_INT32);
- ca->p[i] = (uint8_t *)&o->i;
- } else if (tvisnum(o)) {
- ca->ct[i] = ctype_get(cts, CTID_DOUBLE);
- ca->p[i] = (uint8_t *)&o->n;
- } else if (tvisnil(o)) {
- ca->ct[i] = ctype_get(cts, CTID_P_VOID);
- ca->p[i] = (uint8_t *)0;
- } else if (tvisstr(o)) {
- TValue *o2 = i == 0 ? o+1 : o-1;
- CType *ct = ctype_raw(cts, cdataV(o2)->ctypeid);
- ca->ct[i] = NULL;
- ca->p[i] = (uint8_t *)strVdata(o);
- ok = 0;
- if (ctype_isenum(ct->info)) {
- CTSize ofs;
- CType *cct = lj_ctype_getfield(cts, ct, strV(o), &ofs);
- if (cct && ctype_isconstval(cct->info)) {
- ca->ct[i] = ctype_child(cts, cct);
- ca->p[i] = (uint8_t *)&cct->size; /* Assumes ct does not grow. */
- ok = 1;
- } else {
- ca->ct[1-i] = ct; /* Use enum to improve error message. */
- ca->p[1-i] = NULL;
- break;
- }
- }
- } else {
- ca->ct[i] = NULL;
- ca->p[i] = (void *)(intptr_t)1; /* To make it unequal. */
- ok = 0;
- }
- }
- return ok;
-}
-
-/* Pointer arithmetic. */
-static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
-{
- CType *ctp = ca->ct[0];
- uint8_t *pp = ca->p[0];
- ptrdiff_t idx;
- CTSize sz;
- CTypeID id;
- GCcdata *cd;
- if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
- if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
- (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
- uint8_t *pp2 = ca->p[1];
- if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */
- setboolV(L->top-1, (pp == pp2));
- return 1;
- }
- if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL))
- return 0;
- if (mm == MM_sub) { /* Pointer difference. */
- intptr_t diff;
- sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
- if (sz == 0 || sz == CTSIZE_INVALID)
- return 0;
- diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz;
- /* All valid pointer differences on x64 are in (-2^47, +2^47),
- ** which fits into a double without loss of precision.
- */
- setintptrV(L->top-1, (int32_t)diff);
- return 1;
- } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */
- setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2));
- return 1;
- } else {
- lua_assert(mm == MM_le);
- setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2));
- return 1;
- }
- }
- if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info)))
- return 0;
- lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1],
- (uint8_t *)&idx, ca->p[1], 0);
- if (mm == MM_sub) idx = -idx;
- } else if (mm == MM_add && ctype_isnum(ctp->info) &&
- (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
- /* Swap pointer and index. */
- ctp = ca->ct[1]; pp = ca->p[1];
- lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0],
- (uint8_t *)&idx, ca->p[0], 0);
- } else {
- return 0;
- }
- sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
- if (sz == CTSIZE_INVALID)
- return 0;
- pp += idx*(int32_t)sz; /* Compute pointer + index. */
- id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
- CTSIZE_PTR);
- cd = lj_cdata_new(cts, id, CTSIZE_PTR);
- *(uint8_t **)cdataptr(cd) = pp;
- setcdataV(L, L->top-1, cd);
- lj_gc_check(L);
- return 1;
-}
-
-/* 64 bit integer arithmetic. */
-static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
-{
- if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 &&
- ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) {
- CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) ||
- ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ?
- CTID_UINT64 : CTID_INT64;
- CType *ct = ctype_get(cts, id);
- GCcdata *cd;
- uint64_t u0, u1, *up;
- lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0);
- if (mm != MM_unm)
- lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0);
- switch (mm) {
- case MM_eq:
- setboolV(L->top-1, (u0 == u1));
- return 1;
- case MM_lt:
- setboolV(L->top-1,
- id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1));
- return 1;
- case MM_le:
- setboolV(L->top-1,
- id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1));
- return 1;
- default: break;
- }
- cd = lj_cdata_new(cts, id, 8);
- up = (uint64_t *)cdataptr(cd);
- setcdataV(L, L->top-1, cd);
- switch (mm) {
- case MM_add: *up = u0 + u1; break;
- case MM_sub: *up = u0 - u1; break;
- case MM_mul: *up = u0 * u1; break;
- case MM_div:
- if (id == CTID_INT64)
- *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1);
- else
- *up = lj_carith_divu64(u0, u1);
- break;
- case MM_mod:
- if (id == CTID_INT64)
- *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1);
- else
- *up = lj_carith_modu64(u0, u1);
- break;
- case MM_pow:
- if (id == CTID_INT64)
- *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1);
- else
- *up = lj_carith_powu64(u0, u1);
- break;
- case MM_unm: *up = (uint64_t)-(int64_t)u0; break;
- default: lua_assert(0); break;
- }
- lj_gc_check(L);
- return 1;
- }
- return 0;
-}
-
-/* Handle ctype arithmetic metamethods. */
-static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
-{
- cTValue *tv = NULL;
- if (tviscdata(L->base)) {
- CTypeID id = cdataV(L->base)->ctypeid;
- CType *ct = ctype_raw(cts, id);
- if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
- tv = lj_ctype_meta(cts, id, mm);
- }
- if (!tv && L->base+1 < L->top && tviscdata(L->base+1)) {
- CTypeID id = cdataV(L->base+1)->ctypeid;
- CType *ct = ctype_raw(cts, id);
- if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
- tv = lj_ctype_meta(cts, id, mm);
- }
- if (!tv) {
- const char *repr[2];
- int i, isenum = -1, isstr = -1;
- if (mm == MM_eq) { /* Equality checks never raise an error. */
- int eq = ca->p[0] == ca->p[1];
- setboolV(L->top-1, eq);
- setboolV(&G(L)->tmptv2, eq); /* Remember for trace recorder. */
- return 1;
- }
- for (i = 0; i < 2; i++) {
- if (ca->ct[i] && tviscdata(L->base+i)) {
- if (ctype_isenum(ca->ct[i]->info)) isenum = i;
- repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL));
- } else {
- if (tvisstr(&L->base[i])) isstr = i;
- repr[i] = lj_typename(&L->base[i]);
- }
- }
- if ((isenum ^ isstr) == 1)
- lj_err_callerv(L, LJ_ERR_FFI_BADCONV, repr[isstr], repr[isenum]);
- lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN :
- mm == MM_concat ? LJ_ERR_FFI_BADCONCAT :
- mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH,
- repr[0], repr[1]);
- }
- return lj_meta_tailcall(L, tv);
-}
-
-/* Arithmetic operators for cdata. */
-int lj_carith_op(lua_State *L, MMS mm)
-{
- CTState *cts = ctype_cts(L);
- CDArith ca;
- if (carith_checkarg(L, cts, &ca)) {
- if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) {
- copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */
- return 1;
- }
- }
- return lj_carith_meta(L, cts, &ca, mm);
-}
-
-/* -- 64 bit integer arithmetic helpers ----------------------------------- */
-
-#if LJ_32 && LJ_HASJIT
-/* Signed/unsigned 64 bit multiplication. */
-int64_t lj_carith_mul64(int64_t a, int64_t b)
-{
- return a * b;
-}
-#endif
-
-/* Unsigned 64 bit division. */
-uint64_t lj_carith_divu64(uint64_t a, uint64_t b)
-{
- if (b == 0) return U64x(80000000,00000000);
- return a / b;
-}
-
-/* Signed 64 bit division. */
-int64_t lj_carith_divi64(int64_t a, int64_t b)
-{
- if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1))
- return U64x(80000000,00000000);
- return a / b;
-}
-
-/* Unsigned 64 bit modulo. */
-uint64_t lj_carith_modu64(uint64_t a, uint64_t b)
-{
- if (b == 0) return U64x(80000000,00000000);
- return a % b;
-}
-
-/* Signed 64 bit modulo. */
-int64_t lj_carith_modi64(int64_t a, int64_t b)
-{
- if (b == 0) return U64x(80000000,00000000);
- if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0;
- return a % b;
-}
-
-/* Unsigned 64 bit x^k. */
-uint64_t lj_carith_powu64(uint64_t x, uint64_t k)
-{
- uint64_t y;
- if (k == 0)
- return 1;
- for (; (k & 1) == 0; k >>= 1) x *= x;
- y = x;
- if ((k >>= 1) != 0) {
- for (;;) {
- x *= x;
- if (k == 1) break;
- if (k & 1) y *= x;
- k >>= 1;
- }
- y *= x;
- }
- return y;
-}
-
-/* Signed 64 bit x^k. */
-int64_t lj_carith_powi64(int64_t x, int64_t k)
-{
- if (k == 0)
- return 1;
- if (k < 0) {
- if (x == 0)
- return U64x(7fffffff,ffffffff);
- else if (x == 1)
- return 1;
- else if (x == -1)
- return (k & 1) ? -1 : 1;
- else
- return 0;
- }
- return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k);
-}
-
-#endif
+/*
+** C data arithmetic.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_carith.h"
+
+/* -- C data arithmetic --------------------------------------------------- */
+
+/* Binary operands of an operator converted to ctypes. */
+typedef struct CDArith {
+ uint8_t *p[2];
+ CType *ct[2];
+} CDArith;
+
+/* Check arguments for arithmetic metamethods. */
+static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca)
+{
+ TValue *o = L->base;
+ int ok = 1;
+ MSize i;
+ if (o+1 >= L->top)
+ lj_err_argt(L, 1, LUA_TCDATA);
+ for (i = 0; i < 2; i++, o++) {
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ CTypeID id = (CTypeID)cd->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ if (ctype_isptr(ct->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ } else if (ctype_isfunc(ct->info)) {
+ p = (uint8_t *)*(void **)p;
+ ct = ctype_get(cts,
+ lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
+ }
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ ca->ct[i] = ct;
+ ca->p[i] = p;
+ } else if (tvisint(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_INT32);
+ ca->p[i] = (uint8_t *)&o->i;
+ } else if (tvisnum(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_DOUBLE);
+ ca->p[i] = (uint8_t *)&o->n;
+ } else if (tvisnil(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_P_VOID);
+ ca->p[i] = (uint8_t *)0;
+ } else if (tvisstr(o)) {
+ TValue *o2 = i == 0 ? o+1 : o-1;
+ CType *ct = ctype_raw(cts, cdataV(o2)->ctypeid);
+ ca->ct[i] = NULL;
+ ca->p[i] = NULL;
+ ok = 0;
+ if (ctype_isenum(ct->info)) {
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, ct, strV(o), &ofs);
+ if (cct && ctype_isconstval(cct->info)) {
+ ca->ct[i] = ctype_child(cts, cct);
+ ca->p[i] = (uint8_t *)&cct->size; /* Assumes ct does not grow. */
+ ok = 1;
+ } else {
+ ca->ct[1-i] = ct; /* Use enum to improve error message. */
+ ca->p[1-i] = NULL;
+ break;
+ }
+ }
+ } else {
+ ca->ct[i] = NULL;
+ ca->p[i] = NULL;
+ ok = 0;
+ }
+ }
+ return ok;
+}
+
+/* Pointer arithmetic. */
+static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ CType *ctp = ca->ct[0];
+ uint8_t *pp = ca->p[0];
+ ptrdiff_t idx;
+ CTSize sz;
+ CTypeID id;
+ GCcdata *cd;
+ if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
+ if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
+ (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
+ uint8_t *pp2 = ca->p[1];
+ if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */
+ setboolV(L->top-1, (pp == pp2));
+ return 1;
+ }
+ if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL))
+ return 0;
+ if (mm == MM_sub) { /* Pointer difference. */
+ intptr_t diff;
+ sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
+ if (sz == 0 || sz == CTSIZE_INVALID)
+ return 0;
+ diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz;
+ /* All valid pointer differences on x64 are in (-2^47, +2^47),
+ ** which fits into a double without loss of precision.
+ */
+ setintptrV(L->top-1, (int32_t)diff);
+ return 1;
+ } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */
+ setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2));
+ return 1;
+ } else {
+ lua_assert(mm == MM_le);
+ setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2));
+ return 1;
+ }
+ }
+ if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info)))
+ return 0;
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1],
+ (uint8_t *)&idx, ca->p[1], 0);
+ if (mm == MM_sub) idx = -idx;
+ } else if (mm == MM_add && ctype_isnum(ctp->info) &&
+ (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
+ /* Swap pointer and index. */
+ ctp = ca->ct[1]; pp = ca->p[1];
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0],
+ (uint8_t *)&idx, ca->p[0], 0);
+ } else {
+ return 0;
+ }
+ sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
+ if (sz == CTSIZE_INVALID)
+ return 0;
+ pp += idx*(int32_t)sz; /* Compute pointer + index. */
+ id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
+ CTSIZE_PTR);
+ cd = lj_cdata_new(cts, id, CTSIZE_PTR);
+ *(uint8_t **)cdataptr(cd) = pp;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+/* 64 bit integer arithmetic. */
+static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 &&
+ ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) {
+ CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) ||
+ ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ?
+ CTID_UINT64 : CTID_INT64;
+ CType *ct = ctype_get(cts, id);
+ GCcdata *cd;
+ uint64_t u0, u1, *up;
+ lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0);
+ if (mm != MM_unm)
+ lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0);
+ switch (mm) {
+ case MM_eq:
+ setboolV(L->top-1, (u0 == u1));
+ return 1;
+ case MM_lt:
+ setboolV(L->top-1,
+ id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1));
+ return 1;
+ case MM_le:
+ setboolV(L->top-1,
+ id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1));
+ return 1;
+ default: break;
+ }
+ cd = lj_cdata_new(cts, id, 8);
+ up = (uint64_t *)cdataptr(cd);
+ setcdataV(L, L->top-1, cd);
+ switch (mm) {
+ case MM_add: *up = u0 + u1; break;
+ case MM_sub: *up = u0 - u1; break;
+ case MM_mul: *up = u0 * u1; break;
+ case MM_div:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_divu64(u0, u1);
+ break;
+ case MM_mod:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_modu64(u0, u1);
+ break;
+ case MM_pow:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_powu64(u0, u1);
+ break;
+ case MM_unm: *up = (uint64_t)-(int64_t)u0; break;
+ default: lua_assert(0); break;
+ }
+ lj_gc_check(L);
+ return 1;
+ }
+ return 0;
+}
+
+/* Handle ctype arithmetic metamethods. */
+static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ cTValue *tv = NULL;
+ if (tviscdata(L->base)) {
+ CTypeID id = cdataV(L->base)->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ }
+ if (!tv && L->base+1 < L->top && tviscdata(L->base+1)) {
+ CTypeID id = cdataV(L->base+1)->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ }
+ if (!tv) {
+ const char *repr[2];
+ int i, isenum = -1, isstr = -1;
+ if (mm == MM_eq) { /* Equality checks never raise an error. */
+ setboolV(L->top-1, 0);
+ return 1;
+ }
+ for (i = 0; i < 2; i++) {
+ if (ca->ct[i] && tviscdata(L->base+i)) {
+ if (ctype_isenum(ca->ct[i]->info)) isenum = i;
+ repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL));
+ } else {
+ if (tvisstr(&L->base[i])) isstr = i;
+ repr[i] = lj_typename(&L->base[i]);
+ }
+ }
+ if ((isenum ^ isstr) == 1)
+ lj_err_callerv(L, LJ_ERR_FFI_BADCONV, repr[isstr], repr[isenum]);
+ lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN :
+ mm == MM_concat ? LJ_ERR_FFI_BADCONCAT :
+ mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH,
+ repr[0], repr[1]);
+ }
+ return lj_meta_tailcall(L, tv);
+}
+
+/* Arithmetic operators for cdata. */
+int lj_carith_op(lua_State *L, MMS mm)
+{
+ CTState *cts = ctype_cts(L);
+ CDArith ca;
+ if (carith_checkarg(L, cts, &ca)) {
+ if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) {
+ copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */
+ return 1;
+ }
+ }
+ return lj_carith_meta(L, cts, &ca, mm);
+}
+
+/* -- 64 bit integer arithmetic helpers ----------------------------------- */
+
+#if LJ_32 && LJ_HASJIT
+/* Signed/unsigned 64 bit multiplication. */
+int64_t lj_carith_mul64(int64_t a, int64_t b)
+{
+ return a * b;
+}
+#endif
+
+/* Unsigned 64 bit division. */
+uint64_t lj_carith_divu64(uint64_t a, uint64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ return a / b;
+}
+
+/* Signed 64 bit division. */
+int64_t lj_carith_divi64(int64_t a, int64_t b)
+{
+ if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1))
+ return U64x(80000000,00000000);
+ return a / b;
+}
+
+/* Unsigned 64 bit modulo. */
+uint64_t lj_carith_modu64(uint64_t a, uint64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ return a % b;
+}
+
+/* Signed 64 bit modulo. */
+int64_t lj_carith_modi64(int64_t a, int64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0;
+ return a % b;
+}
+
+/* Unsigned 64 bit x^k. */
+uint64_t lj_carith_powu64(uint64_t x, uint64_t k)
+{
+ uint64_t y;
+ if (k == 0)
+ return 1;
+ for (; (k & 1) == 0; k >>= 1) x *= x;
+ y = x;
+ if ((k >>= 1) != 0) {
+ for (;;) {
+ x *= x;
+ if (k == 1) break;
+ if (k & 1) y *= x;
+ k >>= 1;
+ }
+ y *= x;
+ }
+ return y;
+}
+
+/* Signed 64 bit x^k. */
+int64_t lj_carith_powi64(int64_t x, int64_t k)
+{
+ if (k == 0)
+ return 1;
+ if (k < 0) {
+ if (x == 0)
+ return U64x(7fffffff,ffffffff);
+ else if (x == 1)
+ return 1;
+ else if (x == -1)
+ return (k & 1) ? -1 : 1;
+ else
+ return 0;
+ }
+ return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k);
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_carith.h b/3rdparty/lua/src/lj_carith.h
index 6adfd3f..ae17df0 100644
--- a/3rdparty/lua/src/lj_carith.h
+++ b/3rdparty/lua/src/lj_carith.h
@@ -1,27 +1,27 @@
-/*
-** C data arithmetic.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CARITH_H
-#define _LJ_CARITH_H
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-LJ_FUNC int lj_carith_op(lua_State *L, MMS mm);
-
-#if LJ_32 && LJ_HASJIT
-LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k);
-#endif
-LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b);
-LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b);
-LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b);
-LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b);
-LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k);
-LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k);
-
-#endif
-
-#endif
+/*
+** C data arithmetic.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CARITH_H
+#define _LJ_CARITH_H
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+LJ_FUNC int lj_carith_op(lua_State *L, MMS mm);
+
+#if LJ_32 && LJ_HASJIT
+LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k);
+#endif
+LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b);
+LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b);
+LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b);
+LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b);
+LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k);
+LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_ccall.c b/3rdparty/lua/src/lj_ccall.c
index b9f1070..eb73604 100644
--- a/3rdparty/lua/src/lj_ccall.c
+++ b/3rdparty/lua/src/lj_ccall.c
@@ -1,900 +1,899 @@
-/*
-** FFI C call handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_ctype.h"
-#include "lj_cconv.h"
-#include "lj_cdata.h"
-#include "lj_ccall.h"
-#include "lj_trace.h"
-
-/* Target-specific handling of register arguments. */
-#if LJ_TARGET_X86
-/* -- x86 calling conventions --------------------------------------------- */
-
-#if LJ_ABI_WIN
-
-#define CCALL_HANDLE_STRUCTRET \
- /* Return structs bigger than 8 by reference (on stack only). */ \
- cc->retref = (sz > 8); \
- if (cc->retref) cc->stack[nsp++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
-
-#else
-
-#if LJ_TARGET_OSX
-
-#define CCALL_HANDLE_STRUCTRET \
- /* Return structs of size 1, 2, 4 or 8 in registers. */ \
- cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
- if (cc->retref) { \
- if (ngpr < maxgpr) \
- cc->gpr[ngpr++] = (GPRArg)dp; \
- else \
- cc->stack[nsp++] = (GPRArg)dp; \
- } else { /* Struct with single FP field ends up in FPR. */ \
- cc->resx87 = ccall_classify_struct(cts, ctr); \
- }
-
-#define CCALL_HANDLE_STRUCTRET2 \
- if (cc->resx87) sp = (uint8_t *)&cc->fpr[0]; \
- memcpy(dp, sp, ctr->size);
-
-#else
-
-#define CCALL_HANDLE_STRUCTRET \
- cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \
- if (ngpr < maxgpr) \
- cc->gpr[ngpr++] = (GPRArg)dp; \
- else \
- cc->stack[nsp++] = (GPRArg)dp;
-
-#endif
-
-#define CCALL_HANDLE_COMPLEXRET \
- /* Return complex float in GPRs and complex double by reference. */ \
- cc->retref = (sz > 8); \
- if (cc->retref) { \
- if (ngpr < maxgpr) \
- cc->gpr[ngpr++] = (GPRArg)dp; \
- else \
- cc->stack[nsp++] = (GPRArg)dp; \
- }
-
-#endif
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- if (!cc->retref) \
- *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
-
-#define CCALL_HANDLE_STRUCTARG \
- ngpr = maxgpr; /* Pass all structs by value on the stack. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- isfp = 1; /* Pass complex by value on stack. */
-
-#define CCALL_HANDLE_REGARG \
- if (!isfp) { /* Only non-FP values may be passed in registers. */ \
- if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
- if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
- } else if (ngpr + 1 <= maxgpr) { \
- dp = &cc->gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#elif LJ_TARGET_X64 && LJ_ABI_WIN
-/* -- Windows/x64 calling conventions ------------------------------------- */
-
-#define CCALL_HANDLE_STRUCTRET \
- /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \
- cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
- if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- if (!cc->retref) \
- *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
-
-#define CCALL_HANDLE_STRUCTARG \
- /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \
- if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \
- rp = cdataptr(lj_cdata_new(cts, did, sz)); \
- sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \
- }
-
-#define CCALL_HANDLE_COMPLEXARG \
- /* Pass complex float in a GPR and complex double by reference. */ \
- if (sz != 2*sizeof(float)) { \
- rp = cdataptr(lj_cdata_new(cts, did, sz)); \
- sz = CTSIZE_PTR; \
- }
-
-/* Windows/x64 argument registers are strictly positional (use ngpr). */
-#define CCALL_HANDLE_REGARG \
- if (isfp) { \
- if (ngpr < maxgpr) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \
- } else { \
- if (ngpr < maxgpr) { dp = &cc->gpr[ngpr++]; goto done; } \
- }
-
-#elif LJ_TARGET_X64
-/* -- POSIX/x64 calling conventions --------------------------------------- */
-
-#define CCALL_HANDLE_STRUCTRET \
- int rcl[2]; rcl[0] = rcl[1] = 0; \
- if (ccall_classify_struct(cts, ctr, rcl, 0)) { \
- cc->retref = 1; /* Return struct by reference. */ \
- cc->gpr[ngpr++] = (GPRArg)dp; \
- } else { \
- cc->retref = 0; /* Return small structs in registers. */ \
- }
-
-#define CCALL_HANDLE_STRUCTRET2 \
- int rcl[2]; rcl[0] = rcl[1] = 0; \
- ccall_classify_struct(cts, ctr, rcl, 0); \
- ccall_struct_ret(cc, rcl, dp, ctr->size);
-
-#define CCALL_HANDLE_COMPLEXRET \
- /* Complex values are returned in one or two FPRs. */ \
- cc->retref = 0;
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \
- *(int64_t *)dp = cc->fpr[0].l[0]; \
- } else { /* Copy non-contiguous complex double from FPRs. */ \
- ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \
- ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \
- }
-
-#define CCALL_HANDLE_STRUCTARG \
- int rcl[2]; rcl[0] = rcl[1] = 0; \
- if (!ccall_classify_struct(cts, d, rcl, 0)) { \
- cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \
- if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \
- nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \
- continue; \
- } /* Pass all other structs by value on stack. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */
-
-#define CCALL_HANDLE_REGARG \
- if (isfp) { /* Try to pass argument in FPRs. */ \
- int n2 = ctype_isvector(d->info) ? 1 : n; \
- if (nfpr + n2 <= CCALL_NARG_FPR) { \
- dp = &cc->fpr[nfpr]; \
- nfpr += n2; \
- goto done; \
- } \
- } else { /* Try to pass argument in GPRs. */ \
- /* Note that reordering is explicitly allowed in the x64 ABI. */ \
- if (n <= 2 && ngpr + n <= maxgpr) { \
- dp = &cc->gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#elif LJ_TARGET_ARM
-/* -- ARM calling conventions --------------------------------------------- */
-
-#if LJ_ABI_SOFTFP
-
-#define CCALL_HANDLE_STRUCTRET \
- /* Return structs of size <= 4 in a GPR. */ \
- cc->retref = !(sz <= 4); \
- if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET \
- cc->retref = 1; /* Return all complex values by reference. */ \
- cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- UNUSED(dp); /* Nothing to do. */
-
-#define CCALL_HANDLE_STRUCTARG \
- /* Pass all structs by value in registers and/or on the stack. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- /* Pass complex by value in 2 or 4 GPRs. */
-
-#define CCALL_HANDLE_REGARG_FP1
-#define CCALL_HANDLE_REGARG_FP2
-
-#else
-
-#define CCALL_HANDLE_STRUCTRET \
- cc->retref = !ccall_classify_struct(cts, ctr, ct); \
- if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_STRUCTRET2 \
- if (ccall_classify_struct(cts, ctr, ct) > 1) sp = (uint8_t *)&cc->fpr[0]; \
- memcpy(dp, sp, ctr->size);
-
-#define CCALL_HANDLE_COMPLEXRET \
- if (!(ct->info & CTF_VARARG)) cc->retref = 0; /* Return complex in FPRs. */
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- if (!(ct->info & CTF_VARARG)) memcpy(dp, &cc->fpr[0], ctr->size);
-
-#define CCALL_HANDLE_STRUCTARG \
- isfp = (ccall_classify_struct(cts, d, ct) > 1);
- /* Pass all structs by value in registers and/or on the stack. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- isfp = 1; /* Pass complex by value in FPRs or on stack. */
-
-#define CCALL_HANDLE_REGARG_FP1 \
- if (isfp && !(ct->info & CTF_VARARG)) { \
- if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
- if (nfpr + (n >> 1) <= CCALL_NARG_FPR) { \
- dp = &cc->fpr[nfpr]; \
- nfpr += (n >> 1); \
- goto done; \
- } \
- } else { \
- if (sz > 1 && fprodd != nfpr) fprodd = 0; \
- if (fprodd) { \
- if (2*nfpr+n <= 2*CCALL_NARG_FPR+1) { \
- dp = (void *)&cc->fpr[fprodd-1].f[1]; \
- nfpr += (n >> 1); \
- if ((n & 1)) fprodd = 0; else fprodd = nfpr-1; \
- goto done; \
- } \
- } else { \
- if (2*nfpr+n <= 2*CCALL_NARG_FPR) { \
- dp = (void *)&cc->fpr[nfpr]; \
- nfpr += (n >> 1); \
- if ((n & 1)) fprodd = ++nfpr; else fprodd = 0; \
- goto done; \
- } \
- } \
- } \
- fprodd = 0; /* No reordering after the first FP value is on stack. */ \
- } else {
-
-#define CCALL_HANDLE_REGARG_FP2 }
-
-#endif
-
-#define CCALL_HANDLE_REGARG \
- CCALL_HANDLE_REGARG_FP1 \
- if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
- if (ngpr < maxgpr) \
- ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
- } \
- if (ngpr < maxgpr) { \
- dp = &cc->gpr[ngpr]; \
- if (ngpr + n > maxgpr) { \
- nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
- if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
- ngpr = maxgpr; \
- } else { \
- ngpr += n; \
- } \
- goto done; \
- } CCALL_HANDLE_REGARG_FP2
-
-#define CCALL_HANDLE_RET \
- if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0];
-
-#elif LJ_TARGET_PPC
-/* -- PPC calling conventions --------------------------------------------- */
-
-#define CCALL_HANDLE_STRUCTRET \
- cc->retref = 1; /* Return all structs by reference. */ \
- cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET \
- /* Complex values are returned in 2 or 4 GPRs. */ \
- cc->retref = 0;
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
-
-#define CCALL_HANDLE_STRUCTARG \
- rp = cdataptr(lj_cdata_new(cts, did, sz)); \
- sz = CTSIZE_PTR; /* Pass all structs by reference. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- /* Pass complex by value in 2 or 4 GPRs. */
-
-#define CCALL_HANDLE_REGARG \
- if (isfp) { /* Try to pass argument in FPRs. */ \
- if (nfpr + 1 <= CCALL_NARG_FPR) { \
- dp = &cc->fpr[nfpr]; \
- nfpr += 1; \
- d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
- goto done; \
- } \
- } else { /* Try to pass argument in GPRs. */ \
- if (n > 1) { \
- lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \
- if (ctype_isinteger(d->info)) \
- ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
- else if (ngpr + n > maxgpr) \
- ngpr = maxgpr; /* Prevent reordering. */ \
- } \
- if (ngpr + n <= maxgpr) { \
- dp = &cc->gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#define CCALL_HANDLE_RET \
- if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
- ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */
-
-#elif LJ_TARGET_PPCSPE
-/* -- PPC/SPE calling conventions ----------------------------------------- */
-
-#define CCALL_HANDLE_STRUCTRET \
- cc->retref = 1; /* Return all structs by reference. */ \
- cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET \
- /* Complex values are returned in 2 or 4 GPRs. */ \
- cc->retref = 0;
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
-
-#define CCALL_HANDLE_STRUCTARG \
- rp = cdataptr(lj_cdata_new(cts, did, sz)); \
- sz = CTSIZE_PTR; /* Pass all structs by reference. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- /* Pass complex by value in 2 or 4 GPRs. */
-
-/* PPC/SPE has a softfp ABI. */
-#define CCALL_HANDLE_REGARG \
- if (n > 1) { /* Doesn't fit in a single GPR? */ \
- lua_assert(n == 2 || n == 4); /* int64_t, double or complex (float). */ \
- if (n == 2) \
- ngpr = (ngpr + 1u) & ~1u; /* Only align 64 bit value to regpair. */ \
- else if (ngpr + n > maxgpr) \
- ngpr = maxgpr; /* Prevent reordering. */ \
- } \
- if (ngpr + n <= maxgpr) { \
- dp = &cc->gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- }
-
-#elif LJ_TARGET_MIPS
-/* -- MIPS calling conventions -------------------------------------------- */
-
-#define CCALL_HANDLE_STRUCTRET \
- cc->retref = 1; /* Return all structs by reference. */ \
- cc->gpr[ngpr++] = (GPRArg)dp;
-
-#define CCALL_HANDLE_COMPLEXRET \
- /* Complex values are returned in 1 or 2 FPRs. */ \
- cc->retref = 0;
-
-#define CCALL_HANDLE_COMPLEXRET2 \
- if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
- ((float *)dp)[0] = cc->fpr[0].f; \
- ((float *)dp)[1] = cc->fpr[1].f; \
- } else { /* Copy complex double from FPRs. */ \
- ((double *)dp)[0] = cc->fpr[0].d; \
- ((double *)dp)[1] = cc->fpr[1].d; \
- }
-
-#define CCALL_HANDLE_STRUCTARG \
- /* Pass all structs by value in registers and/or on the stack. */
-
-#define CCALL_HANDLE_COMPLEXARG \
- /* Pass complex by value in 2 or 4 GPRs. */
-
-#define CCALL_HANDLE_REGARG \
- if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \
- /* Try to pass argument in FPRs. */ \
- dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \
- nfpr++; ngpr += n; \
- goto done; \
- } else { /* Try to pass argument in GPRs. */ \
- nfpr = CCALL_NARG_FPR; \
- if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \
- ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
- if (ngpr < maxgpr) { \
- dp = &cc->gpr[ngpr]; \
- if (ngpr + n > maxgpr) { \
- nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
- if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
- ngpr = maxgpr; \
- } else { \
- ngpr += n; \
- } \
- goto done; \
- } \
- }
-
-#define CCALL_HANDLE_RET \
- if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
- sp = (uint8_t *)&cc->fpr[0].f;
-
-#else
-#error "Missing calling convention definitions for this architecture"
-#endif
-
-#ifndef CCALL_HANDLE_STRUCTRET2
-#define CCALL_HANDLE_STRUCTRET2 \
- memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */
-#endif
-
-/* -- x86 OSX ABI struct classification ----------------------------------- */
-
-#if LJ_TARGET_X86 && LJ_TARGET_OSX
-
-/* Check for struct with single FP field. */
-static int ccall_classify_struct(CTState *cts, CType *ct)
-{
- CTSize sz = ct->size;
- if (!(sz == sizeof(float) || sz == sizeof(double))) return 0;
- if ((ct->info & CTF_UNION)) return 0;
- while (ct->sib) {
- ct = ctype_get(cts, ct->sib);
- if (ctype_isfield(ct->info)) {
- CType *sct = ctype_rawchild(cts, ct);
- if (ctype_isfp(sct->info)) {
- if (sct->size == sz)
- return (sz >> 2); /* Return 1 for float or 2 for double. */
- } else if (ctype_isstruct(sct->info)) {
- if (sct->size)
- return ccall_classify_struct(cts, sct);
- } else {
- break;
- }
- } else if (ctype_isbitfield(ct->info)) {
- break;
- } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
- CType *sct = ctype_rawchild(cts, ct);
- if (sct->size)
- return ccall_classify_struct(cts, sct);
- }
- }
- return 0;
-}
-
-#endif
-
-/* -- x64 struct classification ------------------------------------------- */
-
-#if LJ_TARGET_X64 && !LJ_ABI_WIN
-
-/* Register classes for x64 struct classification. */
-#define CCALL_RCL_INT 1
-#define CCALL_RCL_SSE 2
-#define CCALL_RCL_MEM 4
-/* NYI: classify vectors. */
-
-static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs);
-
-/* Classify a C type. */
-static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
-{
- if (ctype_isarray(ct->info)) {
- CType *cct = ctype_rawchild(cts, ct);
- CTSize eofs, esz = cct->size, asz = ct->size;
- for (eofs = 0; eofs < asz; eofs += esz)
- ccall_classify_ct(cts, cct, rcl, ofs+eofs);
- } else if (ctype_isstruct(ct->info)) {
- ccall_classify_struct(cts, ct, rcl, ofs);
- } else {
- int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT;
- lua_assert(ctype_hassize(ct->info));
- if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */
- rcl[(ofs >= 8)] |= cl;
- }
-}
-
-/* Recursively classify a struct based on its fields. */
-static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
-{
- if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */
- while (ct->sib) {
- CTSize fofs;
- ct = ctype_get(cts, ct->sib);
- fofs = ofs+ct->size;
- if (ctype_isfield(ct->info))
- ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs);
- else if (ctype_isbitfield(ct->info))
- rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */
- else if (ctype_isxattrib(ct->info, CTA_SUBTYPE))
- ccall_classify_struct(cts, ctype_rawchild(cts, ct), rcl, fofs);
- }
- return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */
-}
-
-/* Try to split up a small struct into registers. */
-static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl)
-{
- MSize ngpr = cc->ngpr, nfpr = cc->nfpr;
- uint32_t i;
- for (i = 0; i < 2; i++) {
- lua_assert(!(rcl[i] & CCALL_RCL_MEM));
- if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
- if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */
- cc->gpr[ngpr++] = dp[i];
- } else if ((rcl[i] & CCALL_RCL_SSE)) {
- if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */
- cc->fpr[nfpr++].l[0] = dp[i];
- }
- }
- cc->ngpr = ngpr; cc->nfpr = nfpr;
- return 0; /* Ok. */
-}
-
-/* Pass a small struct argument. */
-static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl,
- TValue *o, int narg)
-{
- GPRArg dp[2];
- dp[0] = dp[1] = 0;
- /* Convert to temp. struct. */
- lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
- if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */
- MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1;
- if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */
- cc->nsp = nsp + n;
- memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR);
- }
- return 0; /* Ok. */
-}
-
-/* Combine returned small struct. */
-static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz)
-{
- GPRArg sp[2];
- MSize ngpr = 0, nfpr = 0;
- uint32_t i;
- for (i = 0; i < 2; i++) {
- if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
- sp[i] = cc->gpr[ngpr++];
- } else if ((rcl[i] & CCALL_RCL_SSE)) {
- sp[i] = cc->fpr[nfpr++].l[0];
- }
- }
- memcpy(dp, sp, sz);
-}
-#endif
-
-/* -- ARM hard-float ABI struct classification ---------------------------- */
-
-#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
-
-/* Classify a struct based on its fields. */
-static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf)
-{
- CTSize sz = ct->size;
- unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
- if ((ctf->info & CTF_VARARG)) goto noth;
- while (ct->sib) {
- CType *sct;
- ct = ctype_get(cts, ct->sib);
- if (ctype_isfield(ct->info)) {
- sct = ctype_rawchild(cts, ct);
- if (ctype_isfp(sct->info)) {
- r |= sct->size;
- if (!isu) n++; else if (n == 0) n = 1;
- } else if (ctype_iscomplex(sct->info)) {
- r |= (sct->size >> 1);
- if (!isu) n += 2; else if (n < 2) n = 2;
- } else if (ctype_isstruct(sct->info)) {
- goto substruct;
- } else {
- goto noth;
- }
- } else if (ctype_isbitfield(ct->info)) {
- goto noth;
- } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
- sct = ctype_rawchild(cts, ct);
- substruct:
- if (sct->size > 0) {
- unsigned int s = ccall_classify_struct(cts, sct, ctf);
- if (s <= 1) goto noth;
- r |= (s & 255);
- if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
- }
- }
- }
- if ((r == 4 || r == 8) && n <= 4)
- return r + (n << 8);
-noth: /* Not a homogeneous float/double aggregate. */
- return (sz <= 4); /* Return structs of size <= 4 in a GPR. */
-}
-
-#endif
-
-/* -- Common C call handling ---------------------------------------------- */
-
-/* Infer the destination CTypeID for a vararg argument. */
-CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o)
-{
- if (tvisnumber(o)) {
- return CTID_DOUBLE;
- } else if (tviscdata(o)) {
- CTypeID id = cdataV(o)->ctypeid;
- CType *s = ctype_get(cts, id);
- if (ctype_isrefarray(s->info)) {
- return lj_ctype_intern(cts,
- CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR);
- } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) {
- /* NYI: how to pass a struct by value in a vararg argument? */
- return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR);
- } else if (ctype_isfp(s->info) && s->size == sizeof(float)) {
- return CTID_DOUBLE;
- } else {
- return id;
- }
- } else if (tvisstr(o)) {
- return CTID_P_CCHAR;
- } else if (tvisbool(o)) {
- return CTID_BOOL;
- } else {
- return CTID_P_VOID;
- }
-}
-
-/* Setup arguments for C call. */
-static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
- CCallState *cc)
-{
- int gcsteps = 0;
- TValue *o, *top = L->top;
- CTypeID fid;
- CType *ctr;
- MSize maxgpr, ngpr = 0, nsp = 0, narg;
-#if CCALL_NARG_FPR
- MSize nfpr = 0;
-#if LJ_TARGET_ARM
- MSize fprodd = 0;
-#endif
-#endif
-
- /* Clear unused regs to get some determinism in case of misdeclaration. */
- memset(cc->gpr, 0, sizeof(cc->gpr));
-#if CCALL_NUM_FPR
- memset(cc->fpr, 0, sizeof(cc->fpr));
-#endif
-
-#if LJ_TARGET_X86
- /* x86 has several different calling conventions. */
- cc->resx87 = 0;
- switch (ctype_cconv(ct->info)) {
- case CTCC_FASTCALL: maxgpr = 2; break;
- case CTCC_THISCALL: maxgpr = 1; break;
- default: maxgpr = 0; break;
- }
-#else
- maxgpr = CCALL_NARG_GPR;
-#endif
-
- /* Perform required setup for some result types. */
- ctr = ctype_rawchild(cts, ct);
- if (ctype_isvector(ctr->info)) {
- if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16)))
- goto err_nyi;
- } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) {
- /* Preallocate cdata object and anchor it after arguments. */
- CTSize sz = ctr->size;
- GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz);
- void *dp = cdataptr(cd);
- setcdataV(L, L->top++, cd);
- if (ctype_isstruct(ctr->info)) {
- CCALL_HANDLE_STRUCTRET
- } else {
- CCALL_HANDLE_COMPLEXRET
- }
-#if LJ_TARGET_X86
- } else if (ctype_isfp(ctr->info)) {
- cc->resx87 = ctr->size == sizeof(float) ? 1 : 2;
-#endif
- }
-
- /* Skip initial attributes. */
- fid = ct->sib;
- while (fid) {
- CType *ctf = ctype_get(cts, fid);
- if (!ctype_isattrib(ctf->info)) break;
- fid = ctf->sib;
- }
-
- /* Walk through all passed arguments. */
- for (o = L->base+1, narg = 1; o < top; o++, narg++) {
- CTypeID did;
- CType *d;
- CTSize sz;
- MSize n, isfp = 0, isva = 0;
- void *dp, *rp = NULL;
-
- if (fid) { /* Get argument type from field. */
- CType *ctf = ctype_get(cts, fid);
- fid = ctf->sib;
- lua_assert(ctype_isfield(ctf->info));
- did = ctype_cid(ctf->info);
- } else {
- if (!(ct->info & CTF_VARARG))
- lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */
- did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
- isva = 1;
- }
- d = ctype_raw(cts, did);
- sz = d->size;
-
- /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */
- if (ctype_isnum(d->info)) {
- if (sz > 8) goto err_nyi;
- if ((d->info & CTF_FP))
- isfp = 1;
- } else if (ctype_isvector(d->info)) {
- if (CCALL_VECTOR_REG && (sz == 8 || sz == 16))
- isfp = 1;
- else
- goto err_nyi;
- } else if (ctype_isstruct(d->info)) {
- CCALL_HANDLE_STRUCTARG
- } else if (ctype_iscomplex(d->info)) {
- CCALL_HANDLE_COMPLEXARG
- } else {
- sz = CTSIZE_PTR;
- }
- sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
- n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
-
- CCALL_HANDLE_REGARG /* Handle register arguments. */
-
- /* Otherwise pass argument on stack. */
- if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) {
- MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1;
- nsp = (nsp + align) & ~align; /* Align argument on stack. */
- }
- if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */
- err_nyi:
- lj_err_caller(L, LJ_ERR_FFI_NYICALL);
- }
- dp = &cc->stack[nsp];
- nsp += n;
- isva = 0;
-
- done:
- if (rp) { /* Pass by reference. */
- gcsteps++;
- *(void **)dp = rp;
- dp = rp;
- }
- lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
- /* Extend passed integers to 32 bits at least. */
- if (ctype_isinteger_or_bool(d->info) && d->size < 4) {
- if (d->info & CTF_UNSIGNED)
- *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp :
- (uint32_t)*(uint16_t *)dp;
- else
- *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp :
- (int32_t)*(int16_t *)dp;
- }
-#if LJ_TARGET_X64 && LJ_ABI_WIN
- if (isva) { /* Windows/x64 mirrors varargs in both register sets. */
- if (nfpr == ngpr)
- cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0];
- else
- cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1];
- }
-#else
- UNUSED(isva);
-#endif
-#if LJ_TARGET_X64 && !LJ_ABI_WIN
- if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) {
- cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */
- cc->fpr[nfpr-2].d[1] = 0;
- }
-#else
- UNUSED(isfp);
-#endif
- }
- if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */
-
-#if LJ_TARGET_X64 || LJ_TARGET_PPC
- cc->nfpr = nfpr; /* Required for vararg functions. */
-#endif
- cc->nsp = nsp;
- cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR;
- if (nsp > CCALL_SPS_FREE)
- cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u);
- return gcsteps;
-}
-
-/* Get results from C call. */
-static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
- CCallState *cc, int *ret)
-{
- CType *ctr = ctype_rawchild(cts, ct);
- uint8_t *sp = (uint8_t *)&cc->gpr[0];
- if (ctype_isvoid(ctr->info)) {
- *ret = 0; /* Zero results. */
- return 0; /* No additional GC step. */
- }
- *ret = 1; /* One result. */
- if (ctype_isstruct(ctr->info)) {
- /* Return cdata object which is already on top of stack. */
- if (!cc->retref) {
- void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
- CCALL_HANDLE_STRUCTRET2
- }
- return 1; /* One GC step. */
- }
- if (ctype_iscomplex(ctr->info)) {
- /* Return cdata object which is already on top of stack. */
- void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
- CCALL_HANDLE_COMPLEXRET2
- return 1; /* One GC step. */
- }
- if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR)
- sp += (CTSIZE_PTR - ctr->size);
-#if CCALL_NUM_FPR
- if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info))
- sp = (uint8_t *)&cc->fpr[0];
-#endif
-#ifdef CCALL_HANDLE_RET
- CCALL_HANDLE_RET
-#endif
- /* No reference types end up here, so there's no need for the CTypeID. */
- lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)));
- return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp);
-}
-
-/* Call C function. */
-int lj_ccall_func(lua_State *L, GCcdata *cd)
-{
- CTState *cts = ctype_cts(L);
- CType *ct = ctype_raw(cts, cd->ctypeid);
- CTSize sz = CTSIZE_PTR;
- if (ctype_isptr(ct->info)) {
- sz = ct->size;
- ct = ctype_rawchild(cts, ct);
- }
- if (ctype_isfunc(ct->info)) {
- CCallState cc;
- int gcsteps, ret;
- cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz);
- gcsteps = ccall_set_args(L, cts, ct, &cc);
- ct = (CType *)((intptr_t)ct-(intptr_t)cts->tab);
- cts->cb.slot = ~0u;
- lj_vm_ffi_call(&cc);
- if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */
- TValue tv;
- setlightudV(&tv, (void *)cc.func);
- setboolV(lj_tab_set(L, cts->miscmap, &tv), 1);
- }
- ct = (CType *)((intptr_t)ct+(intptr_t)cts->tab); /* May be reallocated. */
- gcsteps += ccall_get_results(L, cts, ct, &cc, &ret);
-#if LJ_TARGET_X86 && LJ_ABI_WIN
- /* Automatically detect __stdcall and fix up C function declaration. */
- if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) {
- CTF_INSERT(ct->info, CCONV, CTCC_STDCALL);
- lj_trace_abort(G(L));
- }
-#endif
- while (gcsteps-- > 0)
- lj_gc_check(L);
- return ret;
- }
- return -1; /* Not a function. */
-}
-
-#endif
+/*
+** FFI C call handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_ccall.h"
+#include "lj_trace.h"
+
+/* Target-specific handling of register arguments. */
+#if LJ_TARGET_X86
+/* -- x86 calling conventions --------------------------------------------- */
+
+#if LJ_ABI_WIN
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs bigger than 8 by reference (on stack only). */ \
+ cc->retref = (sz > 8); \
+ if (cc->retref) cc->stack[nsp++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
+
+#else
+
+#if LJ_TARGET_OSX
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size 1, 2, 4 or 8 in registers. */ \
+ cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
+ if (cc->retref) { \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp; \
+ } else { /* Struct with single FP field ends up in FPR. */ \
+ cc->resx87 = ccall_classify_struct(cts, ctr); \
+ }
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ if (cc->resx87) sp = (uint8_t *)&cc->fpr[0]; \
+ memcpy(dp, sp, ctr->size);
+
+#else
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp;
+
+#endif
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Return complex float in GPRs and complex double by reference. */ \
+ cc->retref = (sz > 8); \
+ if (cc->retref) { \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp; \
+ }
+
+#endif
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!cc->retref) \
+ *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ ngpr = maxgpr; /* Pass all structs by value on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 1; /* Pass complex by value on stack. */
+
+#define CCALL_HANDLE_REGARG \
+ if (!isfp) { /* Only non-FP values may be passed in registers. */ \
+ if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
+ if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
+ } else if (ngpr + 1 <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_X64 && LJ_ABI_WIN
+/* -- Windows/x64 calling conventions ------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \
+ cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!cc->retref) \
+ *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \
+ if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \
+ }
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex float in a GPR and complex double by reference. */ \
+ if (sz != 2*sizeof(float)) { \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; \
+ }
+
+/* Windows/x64 argument registers are strictly positional (use ngpr). */
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { \
+ if (ngpr < maxgpr) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \
+ } else { \
+ if (ngpr < maxgpr) { dp = &cc->gpr[ngpr++]; goto done; } \
+ }
+
+#elif LJ_TARGET_X64
+/* -- POSIX/x64 calling conventions --------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ if (ccall_classify_struct(cts, ctr, rcl, 0)) { \
+ cc->retref = 1; /* Return struct by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ } else { \
+ cc->retref = 0; /* Return small structs in registers. */ \
+ }
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ ccall_classify_struct(cts, ctr, rcl, 0); \
+ ccall_struct_ret(cc, rcl, dp, ctr->size);
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in one or two FPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \
+ *(int64_t *)dp = cc->fpr[0].l[0]; \
+ } else { /* Copy non-contiguous complex double from FPRs. */ \
+ ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \
+ ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \
+ }
+
+#define CCALL_HANDLE_STRUCTARG \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ if (!ccall_classify_struct(cts, d, rcl, 0)) { \
+ cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \
+ if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \
+ nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \
+ continue; \
+ } /* Pass all other structs by value on stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { /* Try to pass argument in FPRs. */ \
+ if (nfpr + n <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += n; \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ /* Note that reordering is explicitly allowed in the x64 ABI. */ \
+ if (n <= 2 && ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_ARM
+/* -- ARM calling conventions --------------------------------------------- */
+
+#if LJ_ABI_SOFTFP
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size <= 4 in a GPR. */ \
+ cc->retref = !(sz <= 4); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ cc->retref = 1; /* Return all complex values by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ UNUSED(dp); /* Nothing to do. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_REGARG_FP1
+#define CCALL_HANDLE_REGARG_FP2
+
+#else
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = !ccall_classify_struct(cts, ctr, ct); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ if (ccall_classify_struct(cts, ctr, ct) > 1) sp = (uint8_t *)&cc->fpr[0]; \
+ memcpy(dp, sp, ctr->size);
+
+#define CCALL_HANDLE_COMPLEXRET \
+ if (!(ct->info & CTF_VARARG)) cc->retref = 0; /* Return complex in FPRs. */
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!(ct->info & CTF_VARARG)) memcpy(dp, &cc->fpr[0], ctr->size);
+
+#define CCALL_HANDLE_STRUCTARG \
+ isfp = (ccall_classify_struct(cts, d, ct) > 1);
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 1; /* Pass complex by value in FPRs or on stack. */
+
+#define CCALL_HANDLE_REGARG_FP1 \
+ if (isfp && !(ct->info & CTF_VARARG)) { \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
+ if (nfpr + (n >> 1) <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += (n >> 1); \
+ goto done; \
+ } \
+ } else { \
+ if (sz > 1 && fprodd != nfpr) fprodd = 0; \
+ if (fprodd) { \
+ if (2*nfpr+n <= 2*CCALL_NARG_FPR+1) { \
+ dp = (void *)&cc->fpr[fprodd-1].f[1]; \
+ nfpr += (n >> 1); \
+ if ((n & 1)) fprodd = 0; else fprodd = nfpr-1; \
+ goto done; \
+ } \
+ } else { \
+ if (2*nfpr+n <= 2*CCALL_NARG_FPR) { \
+ dp = (void *)&cc->fpr[nfpr]; \
+ nfpr += (n >> 1); \
+ if ((n & 1)) fprodd = ++nfpr; else fprodd = 0; \
+ goto done; \
+ } \
+ } \
+ } \
+ fprodd = 0; /* No reordering after the first FP value is on stack. */ \
+ } else {
+
+#define CCALL_HANDLE_REGARG_FP2 }
+
+#endif
+
+#define CCALL_HANDLE_REGARG \
+ CCALL_HANDLE_REGARG_FP1 \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
+ if (ngpr < maxgpr) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ } \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ } CCALL_HANDLE_REGARG_FP2
+
+#define CCALL_HANDLE_RET \
+ if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0];
+
+#elif LJ_TARGET_PPC
+/* -- PPC calling conventions --------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 2 or 4 GPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all structs by reference. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { /* Try to pass argument in FPRs. */ \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += 1; \
+ d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ if (n > 1) { \
+ lua_assert(n == 2 || n == 4); /* int64_t or complex (float). */ \
+ if (ctype_isinteger(d->info)) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
+ else if (ngpr + n > maxgpr) \
+ ngpr = maxgpr; /* Prevent reordering. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */
+
+#elif LJ_TARGET_PPCSPE
+/* -- PPC/SPE calling conventions ----------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 2 or 4 GPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all structs by reference. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+/* PPC/SPE has a softfp ABI. */
+#define CCALL_HANDLE_REGARG \
+ if (n > 1) { /* Doesn't fit in a single GPR? */ \
+ lua_assert(n == 2 || n == 4); /* int64_t, double or complex (float). */ \
+ if (n == 2) \
+ ngpr = (ngpr + 1u) & ~1u; /* Only align 64 bit value to regpair. */ \
+ else if (ngpr + n > maxgpr) \
+ ngpr = maxgpr; /* Prevent reordering. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+
+#elif LJ_TARGET_MIPS
+/* -- MIPS calling conventions -------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 1 or 2 FPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
+ ((float *)dp)[0] = cc->fpr[0].f; \
+ ((float *)dp)[1] = cc->fpr[1].f; \
+ } else { /* Copy complex double from FPRs. */ \
+ ((double *)dp)[0] = cc->fpr[0].d; \
+ ((double *)dp)[1] = cc->fpr[1].d; \
+ }
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \
+ /* Try to pass argument in FPRs. */ \
+ dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \
+ nfpr++; ngpr += n; \
+ goto done; \
+ } else { /* Try to pass argument in GPRs. */ \
+ nfpr = CCALL_NARG_FPR; \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ } \
+ }
+
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ sp = (uint8_t *)&cc->fpr[0].f;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+#ifndef CCALL_HANDLE_STRUCTRET2
+#define CCALL_HANDLE_STRUCTRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */
+#endif
+
+/* -- x86 OSX ABI struct classification ----------------------------------- */
+
+#if LJ_TARGET_X86 && LJ_TARGET_OSX
+
+/* Check for struct with single FP field. */
+static int ccall_classify_struct(CTState *cts, CType *ct)
+{
+ CTSize sz = ct->size;
+ if (!(sz == sizeof(float) || sz == sizeof(double))) return 0;
+ if ((ct->info & CTF_UNION)) return 0;
+ while (ct->sib) {
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ CType *sct = ctype_rawchild(cts, ct);
+ if (ctype_isfp(sct->info)) {
+ if (sct->size == sz)
+ return (sz >> 2); /* Return 1 for float or 2 for double. */
+ } else if (ctype_isstruct(sct->info)) {
+ if (sct->size)
+ return ccall_classify_struct(cts, sct);
+ } else {
+ break;
+ }
+ } else if (ctype_isbitfield(ct->info)) {
+ break;
+ } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ CType *sct = ctype_rawchild(cts, ct);
+ if (sct->size)
+ return ccall_classify_struct(cts, sct);
+ }
+ }
+ return 0;
+}
+
+#endif
+
+/* -- x64 struct classification ------------------------------------------- */
+
+#if LJ_TARGET_X64 && !LJ_ABI_WIN
+
+/* Register classes for x64 struct classification. */
+#define CCALL_RCL_INT 1
+#define CCALL_RCL_SSE 2
+#define CCALL_RCL_MEM 4
+/* NYI: classify vectors. */
+
+static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs);
+
+/* Classify a C type. */
+static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
+{
+ if (ctype_isarray(ct->info)) {
+ CType *cct = ctype_rawchild(cts, ct);
+ CTSize eofs, esz = cct->size, asz = ct->size;
+ for (eofs = 0; eofs < asz; eofs += esz)
+ ccall_classify_ct(cts, cct, rcl, ofs+eofs);
+ } else if (ctype_isstruct(ct->info)) {
+ ccall_classify_struct(cts, ct, rcl, ofs);
+ } else {
+ int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT;
+ lua_assert(ctype_hassize(ct->info));
+ if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */
+ rcl[(ofs >= 8)] |= cl;
+ }
+}
+
+/* Recursively classify a struct based on its fields. */
+static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
+{
+ if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */
+ while (ct->sib) {
+ CTSize fofs;
+ ct = ctype_get(cts, ct->sib);
+ fofs = ofs+ct->size;
+ if (ctype_isfield(ct->info))
+ ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs);
+ else if (ctype_isbitfield(ct->info))
+ rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */
+ else if (ctype_isxattrib(ct->info, CTA_SUBTYPE))
+ ccall_classify_struct(cts, ctype_rawchild(cts, ct), rcl, fofs);
+ }
+ return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */
+}
+
+/* Try to split up a small struct into registers. */
+static int ccall_struct_reg(CCallState *cc, GPRArg *dp, int *rcl)
+{
+ MSize ngpr = cc->ngpr, nfpr = cc->nfpr;
+ uint32_t i;
+ for (i = 0; i < 2; i++) {
+ lua_assert(!(rcl[i] & CCALL_RCL_MEM));
+ if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
+ if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */
+ cc->gpr[ngpr++] = dp[i];
+ } else if ((rcl[i] & CCALL_RCL_SSE)) {
+ if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */
+ cc->fpr[nfpr++].l[0] = dp[i];
+ }
+ }
+ cc->ngpr = ngpr; cc->nfpr = nfpr;
+ return 0; /* Ok. */
+}
+
+/* Pass a small struct argument. */
+static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl,
+ TValue *o, int narg)
+{
+ GPRArg dp[2];
+ dp[0] = dp[1] = 0;
+ /* Convert to temp. struct. */
+ lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
+ if (ccall_struct_reg(cc, dp, rcl)) { /* Register overflow? Pass on stack. */
+ MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1;
+ if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */
+ cc->nsp = nsp + n;
+ memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR);
+ }
+ return 0; /* Ok. */
+}
+
+/* Combine returned small struct. */
+static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz)
+{
+ GPRArg sp[2];
+ MSize ngpr = 0, nfpr = 0;
+ uint32_t i;
+ for (i = 0; i < 2; i++) {
+ if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
+ sp[i] = cc->gpr[ngpr++];
+ } else if ((rcl[i] & CCALL_RCL_SSE)) {
+ sp[i] = cc->fpr[nfpr++].l[0];
+ }
+ }
+ memcpy(dp, sp, sz);
+}
+#endif
+
+/* -- ARM hard-float ABI struct classification ---------------------------- */
+
+#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
+
+/* Classify a struct based on its fields. */
+static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf)
+{
+ CTSize sz = ct->size;
+ unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
+ if ((ctf->info & CTF_VARARG)) goto noth;
+ while (ct->sib) {
+ CType *sct;
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ sct = ctype_rawchild(cts, ct);
+ if (ctype_isfp(sct->info)) {
+ r |= sct->size;
+ if (!isu) n++; else if (n == 0) n = 1;
+ } else if (ctype_iscomplex(sct->info)) {
+ r |= (sct->size >> 1);
+ if (!isu) n += 2; else if (n < 2) n = 2;
+ } else if (ctype_isstruct(sct->info)) {
+ goto substruct;
+ } else {
+ goto noth;
+ }
+ } else if (ctype_isbitfield(ct->info)) {
+ goto noth;
+ } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ sct = ctype_rawchild(cts, ct);
+ substruct:
+ if (sct->size > 0) {
+ unsigned int s = ccall_classify_struct(cts, sct, ctf);
+ if (s <= 1) goto noth;
+ r |= (s & 255);
+ if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
+ }
+ }
+ }
+ if ((r == 4 || r == 8) && n <= 4)
+ return r + (n << 8);
+noth: /* Not a homogeneous float/double aggregate. */
+ return (sz <= 4); /* Return structs of size <= 4 in a GPR. */
+}
+
+#endif
+
+/* -- Common C call handling ---------------------------------------------- */
+
+/* Infer the destination CTypeID for a vararg argument. */
+CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o)
+{
+ if (tvisnumber(o)) {
+ return CTID_DOUBLE;
+ } else if (tviscdata(o)) {
+ CTypeID id = cdataV(o)->ctypeid;
+ CType *s = ctype_get(cts, id);
+ if (ctype_isrefarray(s->info)) {
+ return lj_ctype_intern(cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR);
+ } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) {
+ /* NYI: how to pass a struct by value in a vararg argument? */
+ return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR);
+ } else if (ctype_isfp(s->info) && s->size == sizeof(float)) {
+ return CTID_DOUBLE;
+ } else {
+ return id;
+ }
+ } else if (tvisstr(o)) {
+ return CTID_P_CCHAR;
+ } else if (tvisbool(o)) {
+ return CTID_BOOL;
+ } else {
+ return CTID_P_VOID;
+ }
+}
+
+/* Setup arguments for C call. */
+static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
+ CCallState *cc)
+{
+ int gcsteps = 0;
+ TValue *o, *top = L->top;
+ CTypeID fid;
+ CType *ctr;
+ MSize maxgpr, ngpr = 0, nsp = 0, narg;
+#if CCALL_NARG_FPR
+ MSize nfpr = 0;
+#if LJ_TARGET_ARM
+ MSize fprodd = 0;
+#endif
+#endif
+
+ /* Clear unused regs to get some determinism in case of misdeclaration. */
+ memset(cc->gpr, 0, sizeof(cc->gpr));
+#if CCALL_NUM_FPR
+ memset(cc->fpr, 0, sizeof(cc->fpr));
+#endif
+
+#if LJ_TARGET_X86
+ /* x86 has several different calling conventions. */
+ cc->resx87 = 0;
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: maxgpr = 2; break;
+ case CTCC_THISCALL: maxgpr = 1; break;
+ default: maxgpr = 0; break;
+ }
+#else
+ maxgpr = CCALL_NARG_GPR;
+#endif
+
+ /* Perform required setup for some result types. */
+ ctr = ctype_rawchild(cts, ct);
+ if (ctype_isvector(ctr->info)) {
+ if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16)))
+ goto err_nyi;
+ } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) {
+ /* Preallocate cdata object and anchor it after arguments. */
+ CTSize sz = ctr->size;
+ GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz);
+ void *dp = cdataptr(cd);
+ setcdataV(L, L->top++, cd);
+ if (ctype_isstruct(ctr->info)) {
+ CCALL_HANDLE_STRUCTRET
+ } else {
+ CCALL_HANDLE_COMPLEXRET
+ }
+#if LJ_TARGET_X86
+ } else if (ctype_isfp(ctr->info)) {
+ cc->resx87 = ctr->size == sizeof(float) ? 1 : 2;
+#endif
+ }
+
+ /* Skip initial attributes. */
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) break;
+ fid = ctf->sib;
+ }
+
+ /* Walk through all passed arguments. */
+ for (o = L->base+1, narg = 1; o < top; o++, narg++) {
+ CTypeID did;
+ CType *d;
+ CTSize sz;
+ MSize n, isfp = 0, isva = 0;
+ void *dp, *rp = NULL;
+
+ if (fid) { /* Get argument type from field. */
+ CType *ctf = ctype_get(cts, fid);
+ fid = ctf->sib;
+ lua_assert(ctype_isfield(ctf->info));
+ did = ctype_cid(ctf->info);
+ } else {
+ if (!(ct->info & CTF_VARARG))
+ lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */
+ did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
+ isva = 1;
+ }
+ d = ctype_raw(cts, did);
+ sz = d->size;
+
+ /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */
+ if (ctype_isnum(d->info)) {
+ if (sz > 8) goto err_nyi;
+ if ((d->info & CTF_FP))
+ isfp = 1;
+ } else if (ctype_isvector(d->info)) {
+ if (CCALL_VECTOR_REG && (sz == 8 || sz == 16))
+ isfp = 1;
+ else
+ goto err_nyi;
+ } else if (ctype_isstruct(d->info)) {
+ CCALL_HANDLE_STRUCTARG
+ } else if (ctype_iscomplex(d->info)) {
+ CCALL_HANDLE_COMPLEXARG
+ } else {
+ sz = CTSIZE_PTR;
+ }
+ sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
+ n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
+
+ CCALL_HANDLE_REGARG /* Handle register arguments. */
+
+ /* Otherwise pass argument on stack. */
+ if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) {
+ MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1;
+ nsp = (nsp + align) & ~align; /* Align argument on stack. */
+ }
+ if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */
+ err_nyi:
+ lj_err_caller(L, LJ_ERR_FFI_NYICALL);
+ }
+ dp = &cc->stack[nsp];
+ nsp += n;
+ isva = 0;
+
+ done:
+ if (rp) { /* Pass by reference. */
+ gcsteps++;
+ *(void **)dp = rp;
+ dp = rp;
+ }
+ lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
+ /* Extend passed integers to 32 bits at least. */
+ if (ctype_isinteger_or_bool(d->info) && d->size < 4) {
+ if (d->info & CTF_UNSIGNED)
+ *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp :
+ (uint32_t)*(uint16_t *)dp;
+ else
+ *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp :
+ (int32_t)*(int16_t *)dp;
+ }
+#if LJ_TARGET_X64 && LJ_ABI_WIN
+ if (isva) { /* Windows/x64 mirrors varargs in both register sets. */
+ if (nfpr == ngpr)
+ cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0];
+ else
+ cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1];
+ }
+#else
+ UNUSED(isva);
+#endif
+#if LJ_TARGET_X64 && !LJ_ABI_WIN
+ if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) {
+ cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */
+ cc->fpr[nfpr-2].d[1] = 0;
+ }
+#else
+ UNUSED(isfp);
+#endif
+ }
+ if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */
+
+#if LJ_TARGET_X64 || LJ_TARGET_PPC
+ cc->nfpr = nfpr; /* Required for vararg functions. */
+#endif
+ cc->nsp = nsp;
+ cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR;
+ if (nsp > CCALL_SPS_FREE)
+ cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u);
+ return gcsteps;
+}
+
+/* Get results from C call. */
+static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
+ CCallState *cc, int *ret)
+{
+ CType *ctr = ctype_rawchild(cts, ct);
+ uint8_t *sp = (uint8_t *)&cc->gpr[0];
+ if (ctype_isvoid(ctr->info)) {
+ *ret = 0; /* Zero results. */
+ return 0; /* No additional GC step. */
+ }
+ *ret = 1; /* One result. */
+ if (ctype_isstruct(ctr->info)) {
+ /* Return cdata object which is already on top of stack. */
+ if (!cc->retref) {
+ void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
+ CCALL_HANDLE_STRUCTRET2
+ }
+ return 1; /* One GC step. */
+ }
+ if (ctype_iscomplex(ctr->info)) {
+ /* Return cdata object which is already on top of stack. */
+ void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
+ CCALL_HANDLE_COMPLEXRET2
+ return 1; /* One GC step. */
+ }
+ if (LJ_BE && ctype_isinteger_or_bool(ctr->info) && ctr->size < CTSIZE_PTR)
+ sp += (CTSIZE_PTR - ctr->size);
+#if CCALL_NUM_FPR
+ if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info))
+ sp = (uint8_t *)&cc->fpr[0];
+#endif
+#ifdef CCALL_HANDLE_RET
+ CCALL_HANDLE_RET
+#endif
+ /* No reference types end up here, so there's no need for the CTypeID. */
+ lua_assert(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)));
+ return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp);
+}
+
+/* Call C function. */
+int lj_ccall_func(lua_State *L, GCcdata *cd)
+{
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ CTSize sz = CTSIZE_PTR;
+ if (ctype_isptr(ct->info)) {
+ sz = ct->size;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isfunc(ct->info)) {
+ CCallState cc;
+ int gcsteps, ret;
+ cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz);
+ gcsteps = ccall_set_args(L, cts, ct, &cc);
+ ct = (CType *)((intptr_t)ct-(intptr_t)cts->tab);
+ cts->cb.slot = ~0u;
+ lj_vm_ffi_call(&cc);
+ if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */
+ TValue tv;
+ setlightudV(&tv, (void *)cc.func);
+ setboolV(lj_tab_set(L, cts->miscmap, &tv), 1);
+ }
+ ct = (CType *)((intptr_t)ct+(intptr_t)cts->tab); /* May be reallocated. */
+ gcsteps += ccall_get_results(L, cts, ct, &cc, &ret);
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+ /* Automatically detect __stdcall and fix up C function declaration. */
+ if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) {
+ CTF_INSERT(ct->info, CCONV, CTCC_STDCALL);
+ lj_trace_abort(G(L));
+ }
+#endif
+ while (gcsteps-- > 0)
+ lj_gc_check(L);
+ return ret;
+ }
+ return -1; /* Not a function. */
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_ccall.h b/3rdparty/lua/src/lj_ccall.h
index 661188b..1afeed7 100644
--- a/3rdparty/lua/src/lj_ccall.h
+++ b/3rdparty/lua/src/lj_ccall.h
@@ -1,171 +1,171 @@
-/*
-** FFI C call handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CCALL_H
-#define _LJ_CCALL_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* -- C calling conventions ----------------------------------------------- */
-
-#if LJ_TARGET_X86ORX64
-
-#if LJ_TARGET_X86
-#define CCALL_NARG_GPR 2 /* For fastcall arguments. */
-#define CCALL_NARG_FPR 0
-#define CCALL_NRET_GPR 2
-#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */
-#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */
-#elif LJ_ABI_WIN
-#define CCALL_NARG_GPR 4
-#define CCALL_NARG_FPR 4
-#define CCALL_NRET_GPR 1
-#define CCALL_NRET_FPR 1
-#define CCALL_SPS_EXTRA 4
-#else
-#define CCALL_NARG_GPR 6
-#define CCALL_NARG_FPR 8
-#define CCALL_NRET_GPR 2
-#define CCALL_NRET_FPR 2
-#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */
-#endif
-
-#define CCALL_SPS_FREE 1
-#define CCALL_ALIGN_CALLSTATE 16
-
-typedef LJ_ALIGN(16) union FPRArg {
- double d[2];
- float f[4];
- uint8_t b[16];
- uint16_t s[8];
- int i[4];
- int64_t l[2];
-} FPRArg;
-
-typedef intptr_t GPRArg;
-
-#elif LJ_TARGET_ARM
-
-#define CCALL_NARG_GPR 4
-#define CCALL_NRET_GPR 2 /* For softfp double. */
-#if LJ_ABI_SOFTFP
-#define CCALL_NARG_FPR 0
-#define CCALL_NRET_FPR 0
-#else
-#define CCALL_NARG_FPR 8
-#define CCALL_NRET_FPR 4
-#endif
-#define CCALL_SPS_FREE 0
-
-typedef intptr_t GPRArg;
-typedef union FPRArg {
- double d;
- float f[2];
-} FPRArg;
-
-#elif LJ_TARGET_PPC
-
-#define CCALL_NARG_GPR 8
-#define CCALL_NARG_FPR 8
-#define CCALL_NRET_GPR 4 /* For complex double. */
-#define CCALL_NRET_FPR 1
-#define CCALL_SPS_EXTRA 4
-#define CCALL_SPS_FREE 0
-
-typedef intptr_t GPRArg;
-typedef double FPRArg;
-
-#elif LJ_TARGET_PPCSPE
-
-#define CCALL_NARG_GPR 8
-#define CCALL_NARG_FPR 0
-#define CCALL_NRET_GPR 4 /* For softfp complex double. */
-#define CCALL_NRET_FPR 0
-#define CCALL_SPS_FREE 0 /* NYI */
-
-typedef intptr_t GPRArg;
-
-#elif LJ_TARGET_MIPS
-
-#define CCALL_NARG_GPR 4
-#define CCALL_NARG_FPR 2
-#define CCALL_NRET_GPR 2
-#define CCALL_NRET_FPR 2
-#define CCALL_SPS_EXTRA 7
-#define CCALL_SPS_FREE 1
-
-typedef intptr_t GPRArg;
-typedef union FPRArg {
- double d;
- struct { LJ_ENDIAN_LOHI(float f; , float g;) };
-} FPRArg;
-
-#else
-#error "Missing calling convention definitions for this architecture"
-#endif
-
-#ifndef CCALL_SPS_EXTRA
-#define CCALL_SPS_EXTRA 0
-#endif
-#ifndef CCALL_VECTOR_REG
-#define CCALL_VECTOR_REG 0
-#endif
-#ifndef CCALL_ALIGN_STACKARG
-#define CCALL_ALIGN_STACKARG 1
-#endif
-#ifndef CCALL_ALIGN_CALLSTATE
-#define CCALL_ALIGN_CALLSTATE 8
-#endif
-
-#define CCALL_NUM_GPR \
- (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR)
-#define CCALL_NUM_FPR \
- (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR)
-
-/* Check against constants in lj_ctype.h. */
-LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR);
-LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR);
-
-#define CCALL_MAXSTACK 32
-
-/* -- C call state -------------------------------------------------------- */
-
-typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState {
- void (*func)(void); /* Pointer to called function. */
- uint32_t spadj; /* Stack pointer adjustment. */
- uint8_t nsp; /* Number of stack slots. */
- uint8_t retref; /* Return value by reference. */
-#if LJ_TARGET_X64
- uint8_t ngpr; /* Number of arguments in GPRs. */
- uint8_t nfpr; /* Number of arguments in FPRs. */
-#elif LJ_TARGET_X86
- uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */
-#elif LJ_TARGET_PPC
- uint8_t nfpr; /* Number of arguments in FPRs. */
-#endif
-#if LJ_32
- int32_t align1;
-#endif
-#if CCALL_NUM_FPR
- FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */
-#endif
- GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */
- GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */
-} CCallState;
-
-/* -- C call handling ----------------------------------------------------- */
-
-/* Really belongs to lj_vm.h. */
-LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc);
-
-LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o);
-LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd);
-
-#endif
-
-#endif
+/*
+** FFI C call handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCALL_H
+#define _LJ_CCALL_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* -- C calling conventions ----------------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+
+#if LJ_TARGET_X86
+#define CCALL_NARG_GPR 2 /* For fastcall arguments. */
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */
+#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */
+#elif LJ_ABI_WIN
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR 4
+#define CCALL_NRET_GPR 1
+#define CCALL_NRET_FPR 1
+#define CCALL_SPS_EXTRA 4
+#else
+#define CCALL_NARG_GPR 6
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 2
+#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */
+#endif
+
+#define CCALL_SPS_FREE 1
+#define CCALL_ALIGN_CALLSTATE 16
+
+typedef LJ_ALIGN(16) union FPRArg {
+ double d[2];
+ float f[4];
+ uint8_t b[16];
+ uint16_t s[8];
+ int i[4];
+ int64_t l[2];
+} FPRArg;
+
+typedef intptr_t GPRArg;
+
+#elif LJ_TARGET_ARM
+
+#define CCALL_NARG_GPR 4
+#define CCALL_NRET_GPR 2 /* For softfp double. */
+#if LJ_ABI_SOFTFP
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_FPR 0
+#else
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_FPR 4
+#endif
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ float f[2];
+} FPRArg;
+
+#elif LJ_TARGET_PPC
+
+#define CCALL_NARG_GPR 8
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_GPR 4 /* For complex double. */
+#define CCALL_NRET_FPR 1
+#define CCALL_SPS_EXTRA 4
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+typedef double FPRArg;
+
+#elif LJ_TARGET_PPCSPE
+
+#define CCALL_NARG_GPR 8
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 4 /* For softfp complex double. */
+#define CCALL_NRET_FPR 0
+#define CCALL_SPS_FREE 0 /* NYI */
+
+typedef intptr_t GPRArg;
+
+#elif LJ_TARGET_MIPS
+
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR 2
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 2
+#define CCALL_SPS_EXTRA 7
+#define CCALL_SPS_FREE 1
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ struct { LJ_ENDIAN_LOHI(float f; , float g;) };
+} FPRArg;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+#ifndef CCALL_SPS_EXTRA
+#define CCALL_SPS_EXTRA 0
+#endif
+#ifndef CCALL_VECTOR_REG
+#define CCALL_VECTOR_REG 0
+#endif
+#ifndef CCALL_ALIGN_STACKARG
+#define CCALL_ALIGN_STACKARG 1
+#endif
+#ifndef CCALL_ALIGN_CALLSTATE
+#define CCALL_ALIGN_CALLSTATE 8
+#endif
+
+#define CCALL_NUM_GPR \
+ (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR)
+#define CCALL_NUM_FPR \
+ (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR)
+
+/* Check against constants in lj_ctype.h. */
+LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR);
+LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR);
+
+#define CCALL_MAXSTACK 32
+
+/* -- C call state -------------------------------------------------------- */
+
+typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState {
+ void (*func)(void); /* Pointer to called function. */
+ uint32_t spadj; /* Stack pointer adjustment. */
+ uint8_t nsp; /* Number of stack slots. */
+ uint8_t retref; /* Return value by reference. */
+#if LJ_TARGET_X64
+ uint8_t ngpr; /* Number of arguments in GPRs. */
+ uint8_t nfpr; /* Number of arguments in FPRs. */
+#elif LJ_TARGET_X86
+ uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */
+#elif LJ_TARGET_PPC
+ uint8_t nfpr; /* Number of arguments in FPRs. */
+#endif
+#if LJ_32
+ int32_t align1;
+#endif
+#if CCALL_NUM_FPR
+ FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */
+#endif
+ GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */
+ GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */
+} CCallState;
+
+/* -- C call handling ----------------------------------------------------- */
+
+/* Really belongs to lj_vm.h. */
+LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc);
+
+LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o);
+LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_ccallback.c b/3rdparty/lua/src/lj_ccallback.c
index cd13ef6..0010992 100644
--- a/3rdparty/lua/src/lj_ccallback.c
+++ b/3rdparty/lua/src/lj_ccallback.c
@@ -1,644 +1,641 @@
-/*
-** FFI C callback handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_tab.h"
-#include "lj_state.h"
-#include "lj_frame.h"
-#include "lj_ctype.h"
-#include "lj_cconv.h"
-#include "lj_ccall.h"
-#include "lj_ccallback.h"
-#include "lj_target.h"
-#include "lj_mcode.h"
-#include "lj_trace.h"
-#include "lj_vm.h"
-
-/* -- Target-specific handling of callback slots -------------------------- */
-
-#define CALLBACK_MCODE_SIZE (LJ_PAGESIZE * LJ_NUM_CBPAGE)
-
-#if LJ_OS_NOJIT
-
-/* Disabled callback support. */
-#define CALLBACK_SLOT2OFS(slot) (0*(slot))
-#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
-#define CALLBACK_MAX_SLOT 0
-
-#elif LJ_TARGET_X86ORX64
-
-#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0)
-#define CALLBACK_MCODE_GROUP (-2+1+2+5+(LJ_64 ? 6 : 5))
-
-#define CALLBACK_SLOT2OFS(slot) \
- (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot))
-
-static MSize CALLBACK_OFS2SLOT(MSize ofs)
-{
- MSize group;
- ofs -= CALLBACK_MCODE_HEAD;
- group = ofs / (32*4 + CALLBACK_MCODE_GROUP);
- return (ofs % (32*4 + CALLBACK_MCODE_GROUP))/4 + group*32;
-}
-
-#define CALLBACK_MAX_SLOT \
- (((CALLBACK_MCODE_SIZE-CALLBACK_MCODE_HEAD)/(CALLBACK_MCODE_GROUP+4*32))*32)
-
-#elif LJ_TARGET_ARM
-
-#define CALLBACK_MCODE_HEAD 32
-#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
-#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
-#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
-
-#elif LJ_TARGET_PPC
-
-#define CALLBACK_MCODE_HEAD 24
-#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
-#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
-#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
-
-#elif LJ_TARGET_MIPS
-
-#define CALLBACK_MCODE_HEAD 24
-#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
-#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
-#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
-
-#else
-
-/* Missing support for this architecture. */
-#define CALLBACK_SLOT2OFS(slot) (0*(slot))
-#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
-#define CALLBACK_MAX_SLOT 0
-
-#endif
-
-/* Convert callback slot number to callback function pointer. */
-static void *callback_slot2ptr(CTState *cts, MSize slot)
-{
- return (uint8_t *)cts->cb.mcode + CALLBACK_SLOT2OFS(slot);
-}
-
-/* Convert callback function pointer to slot number. */
-MSize lj_ccallback_ptr2slot(CTState *cts, void *p)
-{
- uintptr_t ofs = (uintptr_t)((uint8_t *)p -(uint8_t *)cts->cb.mcode);
- if (ofs < CALLBACK_MCODE_SIZE) {
- MSize slot = CALLBACK_OFS2SLOT((MSize)ofs);
- if (CALLBACK_SLOT2OFS(slot) == (MSize)ofs)
- return slot;
- }
- return ~0u; /* Not a known callback function pointer. */
-}
-
-/* Initialize machine code for callback function pointers. */
-#if LJ_OS_NOJIT
-/* Disabled callback support. */
-#define callback_mcode_init(g, p) UNUSED(p)
-#elif LJ_TARGET_X86ORX64
-static void callback_mcode_init(global_State *g, uint8_t *page)
-{
- uint8_t *p = page;
- uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback;
- MSize slot;
-#if LJ_64
- *(void **)p = target; p += 8;
-#endif
- for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
- /* mov al, slot; jmp group */
- *p++ = XI_MOVrib | RID_EAX; *p++ = (uint8_t)slot;
- if ((slot & 31) == 31 || slot == CALLBACK_MAX_SLOT-1) {
- /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */
- *p++ = XI_PUSH + RID_EBP;
- *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8);
- *p++ = XI_MOVri | RID_EBP;
- *(int32_t *)p = i32ptr(g); p += 4;
-#if LJ_64
- /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */
- *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP;
- *(int32_t *)p = (int32_t)(page-(p+4)); p += 4;
-#else
- /* jmp lj_vm_ffi_callback. */
- *p++ = XI_JMP; *(int32_t *)p = target-(p+4); p += 4;
-#endif
- } else {
- *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2);
- }
- }
- lua_assert(p - page <= CALLBACK_MCODE_SIZE);
-}
-#elif LJ_TARGET_ARM
-static void callback_mcode_init(global_State *g, uint32_t *page)
-{
- uint32_t *p = page;
- void *target = (void *)lj_vm_ffi_callback;
- MSize slot;
- /* This must match with the saveregs macro in buildvm_arm.dasc. */
- *p++ = ARMI_SUB|ARMF_D(RID_R12)|ARMF_N(RID_R12)|ARMF_M(RID_PC);
- *p++ = ARMI_PUSH|ARMF_N(RID_SP)|RSET_RANGE(RID_R4,RID_R11+1)|RID2RSET(RID_LR);
- *p++ = ARMI_SUB|ARMI_K12|ARMF_D(RID_R12)|ARMF_N(RID_R12)|CALLBACK_MCODE_HEAD;
- *p++ = ARMI_STR|ARMI_LS_P|ARMI_LS_W|ARMF_D(RID_R12)|ARMF_N(RID_SP)|(CFRAME_SIZE-4*9);
- *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_R12)|ARMF_N(RID_PC);
- *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_PC)|ARMF_N(RID_PC);
- *p++ = u32ptr(g);
- *p++ = u32ptr(target);
- for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
- *p++ = ARMI_MOV|ARMF_D(RID_R12)|ARMF_M(RID_PC);
- *p = ARMI_B | ((page-p-2) & 0x00ffffffu);
- p++;
- }
- lua_assert(p - page <= CALLBACK_MCODE_SIZE);
-}
-#elif LJ_TARGET_PPC
-static void callback_mcode_init(global_State *g, uint32_t *page)
-{
- uint32_t *p = page;
- void *target = (void *)lj_vm_ffi_callback;
- MSize slot;
- *p++ = PPCI_LIS | PPCF_T(RID_TMP) | (u32ptr(target) >> 16);
- *p++ = PPCI_LIS | PPCF_T(RID_R12) | (u32ptr(g) >> 16);
- *p++ = PPCI_ORI | PPCF_A(RID_TMP)|PPCF_T(RID_TMP) | (u32ptr(target) & 0xffff);
- *p++ = PPCI_ORI | PPCF_A(RID_R12)|PPCF_T(RID_R12) | (u32ptr(g) & 0xffff);
- *p++ = PPCI_MTCTR | PPCF_T(RID_TMP);
- *p++ = PPCI_BCTR;
- for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
- *p++ = PPCI_LI | PPCF_T(RID_R11) | slot;
- *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2);
- p++;
- }
- lua_assert(p - page <= CALLBACK_MCODE_SIZE);
-}
-#elif LJ_TARGET_MIPS
-static void callback_mcode_init(global_State *g, uint32_t *page)
-{
- uint32_t *p = page;
- void *target = (void *)lj_vm_ffi_callback;
- MSize slot;
- *p++ = MIPSI_SW | MIPSF_T(RID_R1)|MIPSF_S(RID_SP) | 0;
- *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (u32ptr(target) >> 16);
- *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (u32ptr(g) >> 16);
- *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) |(u32ptr(target)&0xffff);
- *p++ = MIPSI_JR | MIPSF_S(RID_R3);
- *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (u32ptr(g)&0xffff);
- for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
- *p = MIPSI_B | ((page-p-1) & 0x0000ffffu);
- p++;
- *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot;
- }
- lua_assert(p - page <= CALLBACK_MCODE_SIZE);
-}
-#else
-/* Missing support for this architecture. */
-#define callback_mcode_init(g, p) UNUSED(p)
-#endif
-
-/* -- Machine code management --------------------------------------------- */
-
-#if LJ_TARGET_WINDOWS
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-#elif LJ_TARGET_POSIX
-
-#include <sys/mman.h>
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#endif
-
-/* Allocate and initialize area for callback function pointers. */
-static void callback_mcode_new(CTState *cts)
-{
- size_t sz = (size_t)CALLBACK_MCODE_SIZE;
- void *p;
- if (CALLBACK_MAX_SLOT == 0)
- lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
-#if LJ_TARGET_WINDOWS
- p = VirtualAlloc(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
- if (!p)
- lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
-#elif LJ_TARGET_POSIX
- p = mmap(NULL, sz, (PROT_READ|PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS,
- -1, 0);
- if (p == MAP_FAILED)
- lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
-#else
- /* Fallback allocator. Fails if memory is not executable by default. */
- p = lj_mem_new(cts->L, sz);
-#endif
- cts->cb.mcode = p;
- callback_mcode_init(cts->g, p);
- lj_mcode_sync(p, (char *)p + sz);
-#if LJ_TARGET_WINDOWS
- {
- DWORD oprot;
- VirtualProtect(p, sz, PAGE_EXECUTE_READ, &oprot);
- }
-#elif LJ_TARGET_POSIX
- mprotect(p, sz, (PROT_READ|PROT_EXEC));
-#endif
-}
-
-/* Free area for callback function pointers. */
-void lj_ccallback_mcode_free(CTState *cts)
-{
- size_t sz = (size_t)CALLBACK_MCODE_SIZE;
- void *p = cts->cb.mcode;
- if (p == NULL) return;
-#if LJ_TARGET_WINDOWS
- VirtualFree(p, 0, MEM_RELEASE);
- UNUSED(sz);
-#elif LJ_TARGET_POSIX
- munmap(p, sz);
-#else
- lj_mem_free(cts->g, p, sz);
-#endif
-}
-
-/* -- C callback entry ---------------------------------------------------- */
-
-/* Target-specific handling of register arguments. Similar to lj_ccall.c. */
-#if LJ_TARGET_X86
-
-#define CALLBACK_HANDLE_REGARG \
- if (!isfp) { /* Only non-FP values may be passed in registers. */ \
- if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
- if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
- } else if (ngpr + 1 <= maxgpr) { \
- sp = &cts->cb.gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#elif LJ_TARGET_X64 && LJ_ABI_WIN
-
-/* Windows/x64 argument registers are strictly positional (use ngpr). */
-#define CALLBACK_HANDLE_REGARG \
- if (isfp) { \
- if (ngpr < maxgpr) { sp = &cts->cb.fpr[ngpr++]; UNUSED(nfpr); goto done; } \
- } else { \
- if (ngpr < maxgpr) { sp = &cts->cb.gpr[ngpr++]; goto done; } \
- }
-
-#elif LJ_TARGET_X64
-
-#define CALLBACK_HANDLE_REGARG \
- if (isfp) { \
- if (nfpr + n <= CCALL_NARG_FPR) { \
- sp = &cts->cb.fpr[nfpr]; \
- nfpr += n; \
- goto done; \
- } \
- } else { \
- if (ngpr + n <= maxgpr) { \
- sp = &cts->cb.gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#elif LJ_TARGET_ARM
-
-#if LJ_ABI_SOFTFP
-
-#define CALLBACK_HANDLE_REGARG_FP1 UNUSED(isfp);
-#define CALLBACK_HANDLE_REGARG_FP2
-
-#else
-
-#define CALLBACK_HANDLE_REGARG_FP1 \
- if (isfp) { \
- if (n == 1) { \
- if (fprodd) { \
- sp = &cts->cb.fpr[fprodd-1]; \
- fprodd = 0; \
- goto done; \
- } else if (nfpr + 1 <= CCALL_NARG_FPR) { \
- sp = &cts->cb.fpr[nfpr++]; \
- fprodd = nfpr; \
- goto done; \
- } \
- } else { \
- if (nfpr + 1 <= CCALL_NARG_FPR) { \
- sp = &cts->cb.fpr[nfpr++]; \
- goto done; \
- } \
- } \
- fprodd = 0; /* No reordering after the first FP value is on stack. */ \
- } else {
-
-#define CALLBACK_HANDLE_REGARG_FP2 }
-
-#endif
-
-#define CALLBACK_HANDLE_REGARG \
- CALLBACK_HANDLE_REGARG_FP1 \
- if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
- if (ngpr + n <= maxgpr) { \
- sp = &cts->cb.gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } CALLBACK_HANDLE_REGARG_FP2
-
-#elif LJ_TARGET_PPC
-
-#define CALLBACK_HANDLE_REGARG \
- if (isfp) { \
- if (nfpr + 1 <= CCALL_NARG_FPR) { \
- sp = &cts->cb.fpr[nfpr++]; \
- cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
- goto done; \
- } \
- } else { /* Try to pass argument in GPRs. */ \
- if (n > 1) { \
- lua_assert(ctype_isinteger(cta->info) && n == 2); /* int64_t. */ \
- ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
- } \
- if (ngpr + n <= maxgpr) { \
- sp = &cts->cb.gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#define CALLBACK_HANDLE_RET \
- if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
- *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */
-
-#elif LJ_TARGET_MIPS
-
-#define CALLBACK_HANDLE_REGARG \
- if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \
- sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \
- nfpr++; ngpr += n; \
- goto done; \
- } else { /* Try to pass argument in GPRs. */ \
- nfpr = CCALL_NARG_FPR; \
- if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
- if (ngpr + n <= maxgpr) { \
- sp = &cts->cb.gpr[ngpr]; \
- ngpr += n; \
- goto done; \
- } \
- }
-
-#define CALLBACK_HANDLE_RET \
- if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
- ((float *)dp)[1] = *(float *)dp;
-
-#else
-#error "Missing calling convention definitions for this architecture"
-#endif
-
-/* Convert and push callback arguments to Lua stack. */
-static void callback_conv_args(CTState *cts, lua_State *L)
-{
- TValue *o = L->top;
- intptr_t *stack = cts->cb.stack;
- MSize slot = cts->cb.slot;
- CTypeID id = 0, rid, fid;
- int gcsteps = 0;
- CType *ct;
- GCfunc *fn;
- MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR;
-#if CCALL_NARG_FPR
- MSize nfpr = 0;
-#if LJ_TARGET_ARM
- MSize fprodd = 0;
-#endif
-#endif
-
- if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) {
- ct = ctype_get(cts, id);
- rid = ctype_cid(ct->info);
- fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot));
- } else { /* Must set up frame first, before throwing the error. */
- ct = NULL;
- rid = 0;
- fn = (GCfunc *)L;
- }
- o->u32.lo = LJ_CONT_FFI_CALLBACK; /* Continuation returns from callback. */
- o->u32.hi = rid; /* Return type. x86: +(spadj<<16). */
- o++;
- setframe_gc(o, obj2gco(fn));
- setframe_ftsz(o, (int)((char *)(o+1) - (char *)L->base) + FRAME_CONT);
- L->top = L->base = ++o;
- if (!ct)
- lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK);
- if (isluafunc(fn))
- setcframe_pc(L->cframe, proto_bc(funcproto(fn))+1);
- lj_state_checkstack(L, LUA_MINSTACK); /* May throw. */
- o = L->base; /* Might have been reallocated. */
-
-#if LJ_TARGET_X86
- /* x86 has several different calling conventions. */
- switch (ctype_cconv(ct->info)) {
- case CTCC_FASTCALL: maxgpr = 2; break;
- case CTCC_THISCALL: maxgpr = 1; break;
- default: maxgpr = 0; break;
- }
-#endif
-
- fid = ct->sib;
- while (fid) {
- CType *ctf = ctype_get(cts, fid);
- if (!ctype_isattrib(ctf->info)) {
- CType *cta;
- void *sp;
- CTSize sz;
- int isfp;
- MSize n;
- lua_assert(ctype_isfield(ctf->info));
- cta = ctype_rawchild(cts, ctf);
- isfp = ctype_isfp(cta->info);
- sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
- n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
-
- CALLBACK_HANDLE_REGARG /* Handle register arguments. */
-
- /* Otherwise pass argument on stack. */
- if (CCALL_ALIGN_STACKARG && LJ_32 && sz == 8)
- nsp = (nsp + 1) & ~1u; /* Align 64 bit argument on stack. */
- sp = &stack[nsp];
- nsp += n;
-
- done:
- if (LJ_BE && cta->size < CTSIZE_PTR)
- sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size);
- gcsteps += lj_cconv_tv_ct(cts, cta, 0, o++, sp);
- }
- fid = ctf->sib;
- }
- L->top = o;
-#if LJ_TARGET_X86
- /* Store stack adjustment for returns from non-cdecl callbacks. */
- if (ctype_cconv(ct->info) != CTCC_CDECL)
- (L->base-2)->u32.hi |= (nsp << (16+2));
-#endif
- while (gcsteps-- > 0)
- lj_gc_check(L);
-}
-
-/* Convert Lua object to callback result. */
-static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
-{
- CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi);
-#if LJ_TARGET_X86
- cts->cb.gpr[2] = 0;
-#endif
- if (!ctype_isvoid(ctr->info)) {
- uint8_t *dp = (uint8_t *)&cts->cb.gpr[0];
-#if CCALL_NUM_FPR
- if (ctype_isfp(ctr->info))
- dp = (uint8_t *)&cts->cb.fpr[0];
-#endif
- lj_cconv_ct_tv(cts, ctr, dp, o, 0);
-#ifdef CALLBACK_HANDLE_RET
- CALLBACK_HANDLE_RET
-#endif
- /* Extend returned integers to (at least) 32 bits. */
- if (ctype_isinteger_or_bool(ctr->info) && ctr->size < 4) {
- if (ctr->info & CTF_UNSIGNED)
- *(uint32_t *)dp = ctr->size == 1 ? (uint32_t)*(uint8_t *)dp :
- (uint32_t)*(uint16_t *)dp;
- else
- *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp :
- (int32_t)*(int16_t *)dp;
- }
-#if LJ_TARGET_X86
- if (ctype_isfp(ctr->info))
- cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2;
-#endif
- }
-}
-
-/* Enter callback. */
-lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf)
-{
- lua_State *L = cts->L;
- global_State *g = cts->g;
- lua_assert(L != NULL);
- if (gcref(g->jit_L)) {
- setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK));
- if (g->panic) g->panic(L);
- exit(EXIT_FAILURE);
- }
- lj_trace_abort(g); /* Never record across callback. */
- /* Setup C frame. */
- cframe_prev(cf) = L->cframe;
- setcframe_L(cf, L);
- cframe_errfunc(cf) = -1;
- cframe_nres(cf) = 0;
- L->cframe = cf;
- callback_conv_args(cts, L);
- return L; /* Now call the function on this stack. */
-}
-
-/* Leave callback. */
-void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o)
-{
- lua_State *L = cts->L;
- GCfunc *fn;
- TValue *obase = L->base;
- L->base = L->top; /* Keep continuation frame for throwing errors. */
- if (o >= L->base) {
- /* PC of RET* is lost. Point to last line for result conv. errors. */
- fn = curr_func(L);
- if (isluafunc(fn)) {
- GCproto *pt = funcproto(fn);
- setcframe_pc(L->cframe, proto_bc(pt)+pt->sizebc+1);
- }
- }
- callback_conv_result(cts, L, o);
- /* Finally drop C frame and continuation frame. */
- L->cframe = cframe_prev(L->cframe);
- L->top -= 2;
- L->base = obase;
- cts->cb.slot = 0; /* Blacklist C function that called the callback. */
-}
-
-/* -- C callback management ----------------------------------------------- */
-
-/* Get an unused slot in the callback slot table. */
-static MSize callback_slot_new(CTState *cts, CType *ct)
-{
- CTypeID id = ctype_typeid(cts, ct);
- CTypeID1 *cbid = cts->cb.cbid;
- MSize top;
- for (top = cts->cb.topid; top < cts->cb.sizeid; top++)
- if (LJ_LIKELY(cbid[top] == 0))
- goto found;
-#if CALLBACK_MAX_SLOT
- if (top >= CALLBACK_MAX_SLOT)
-#endif
- lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
- if (!cts->cb.mcode)
- callback_mcode_new(cts);
- lj_mem_growvec(cts->L, cbid, cts->cb.sizeid, CALLBACK_MAX_SLOT, CTypeID1);
- cts->cb.cbid = cbid;
- memset(cbid+top, 0, (cts->cb.sizeid-top)*sizeof(CTypeID1));
-found:
- cbid[top] = id;
- cts->cb.topid = top+1;
- return top;
-}
-
-/* Check for function pointer and supported argument/result types. */
-static CType *callback_checkfunc(CTState *cts, CType *ct)
-{
- int narg = 0;
- if (!ctype_isptr(ct->info) || (LJ_64 && ct->size != CTSIZE_PTR))
- return NULL;
- ct = ctype_rawchild(cts, ct);
- if (ctype_isfunc(ct->info)) {
- CType *ctr = ctype_rawchild(cts, ct);
- CTypeID fid = ct->sib;
- if (!(ctype_isvoid(ctr->info) || ctype_isenum(ctr->info) ||
- ctype_isptr(ctr->info) || (ctype_isnum(ctr->info) && ctr->size <= 8)))
- return NULL;
- if ((ct->info & CTF_VARARG))
- return NULL;
- while (fid) {
- CType *ctf = ctype_get(cts, fid);
- if (!ctype_isattrib(ctf->info)) {
- CType *cta;
- lua_assert(ctype_isfield(ctf->info));
- cta = ctype_rawchild(cts, ctf);
- if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) ||
- (ctype_isnum(cta->info) && cta->size <= 8)) ||
- ++narg >= LUA_MINSTACK-3)
- return NULL;
- }
- fid = ctf->sib;
- }
- return ct;
- }
- return NULL;
-}
-
-/* Create a new callback and return the callback function pointer. */
-void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn)
-{
- ct = callback_checkfunc(cts, ct);
- if (ct) {
- MSize slot = callback_slot_new(cts, ct);
- GCtab *t = cts->miscmap;
- setfuncV(cts->L, lj_tab_setint(cts->L, t, (int32_t)slot), fn);
- lj_gc_anybarriert(cts->L, t);
- return callback_slot2ptr(cts, slot);
- }
- return NULL; /* Bad conversion. */
-}
-
-#endif
+/*
+** FFI C callback handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_ccall.h"
+#include "lj_ccallback.h"
+#include "lj_target.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* -- Target-specific handling of callback slots -------------------------- */
+
+#define CALLBACK_MCODE_SIZE (LJ_PAGESIZE * LJ_NUM_CBPAGE)
+
+#if LJ_OS_NOJIT
+
+/* Disabled callback support. */
+#define CALLBACK_SLOT2OFS(slot) (0*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
+#define CALLBACK_MAX_SLOT 0
+
+#elif LJ_TARGET_X86ORX64
+
+#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0)
+#define CALLBACK_MCODE_GROUP (-2+1+2+5+(LJ_64 ? 6 : 5))
+
+#define CALLBACK_SLOT2OFS(slot) \
+ (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot))
+
+static MSize CALLBACK_OFS2SLOT(MSize ofs)
+{
+ MSize group;
+ ofs -= CALLBACK_MCODE_HEAD;
+ group = ofs / (32*4 + CALLBACK_MCODE_GROUP);
+ return (ofs % (32*4 + CALLBACK_MCODE_GROUP))/4 + group*32;
+}
+
+#define CALLBACK_MAX_SLOT \
+ (((CALLBACK_MCODE_SIZE-CALLBACK_MCODE_HEAD)/(CALLBACK_MCODE_GROUP+4*32))*32)
+
+#elif LJ_TARGET_ARM
+
+#define CALLBACK_MCODE_HEAD 32
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+
+#elif LJ_TARGET_PPC
+
+#define CALLBACK_MCODE_HEAD 24
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+
+#elif LJ_TARGET_MIPS
+
+#define CALLBACK_MCODE_HEAD 24
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+
+#else
+
+/* Missing support for this architecture. */
+#define CALLBACK_SLOT2OFS(slot) (0*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
+#define CALLBACK_MAX_SLOT 0
+
+#endif
+
+/* Convert callback slot number to callback function pointer. */
+static void *callback_slot2ptr(CTState *cts, MSize slot)
+{
+ return (uint8_t *)cts->cb.mcode + CALLBACK_SLOT2OFS(slot);
+}
+
+/* Convert callback function pointer to slot number. */
+MSize lj_ccallback_ptr2slot(CTState *cts, void *p)
+{
+ uintptr_t ofs = (uintptr_t)((uint8_t *)p -(uint8_t *)cts->cb.mcode);
+ if (ofs < CALLBACK_MCODE_SIZE) {
+ MSize slot = CALLBACK_OFS2SLOT((MSize)ofs);
+ if (CALLBACK_SLOT2OFS(slot) == (MSize)ofs)
+ return slot;
+ }
+ return ~0u; /* Not a known callback function pointer. */
+}
+
+/* Initialize machine code for callback function pointers. */
+#if LJ_OS_NOJIT
+/* Disabled callback support. */
+#define callback_mcode_init(g, p) UNUSED(p)
+#elif LJ_TARGET_X86ORX64
+static void callback_mcode_init(global_State *g, uint8_t *page)
+{
+ uint8_t *p = page;
+ uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback;
+ MSize slot;
+#if LJ_64
+ *(void **)p = target; p += 8;
+#endif
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ /* mov al, slot; jmp group */
+ *p++ = XI_MOVrib | RID_EAX; *p++ = (uint8_t)slot;
+ if ((slot & 31) == 31 || slot == CALLBACK_MAX_SLOT-1) {
+ /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */
+ *p++ = XI_PUSH + RID_EBP;
+ *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8);
+ *p++ = XI_MOVri | RID_EBP;
+ *(int32_t *)p = i32ptr(g); p += 4;
+#if LJ_64
+ /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */
+ *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP;
+ *(int32_t *)p = (int32_t)(page-(p+4)); p += 4;
+#else
+ /* jmp lj_vm_ffi_callback. */
+ *p++ = XI_JMP; *(int32_t *)p = target-(p+4); p += 4;
+#endif
+ } else {
+ *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2);
+ }
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#elif LJ_TARGET_ARM
+static void callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ /* This must match with the saveregs macro in buildvm_arm.dasc. */
+ *p++ = ARMI_SUB|ARMF_D(RID_R12)|ARMF_N(RID_R12)|ARMF_M(RID_PC);
+ *p++ = ARMI_PUSH|ARMF_N(RID_SP)|RSET_RANGE(RID_R4,RID_R11+1)|RID2RSET(RID_LR);
+ *p++ = ARMI_SUB|ARMI_K12|ARMF_D(RID_R12)|ARMF_N(RID_R12)|CALLBACK_MCODE_HEAD;
+ *p++ = ARMI_STR|ARMI_LS_P|ARMI_LS_W|ARMF_D(RID_R12)|ARMF_N(RID_SP)|(CFRAME_SIZE-4*9);
+ *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_R12)|ARMF_N(RID_PC);
+ *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_PC)|ARMF_N(RID_PC);
+ *p++ = u32ptr(g);
+ *p++ = u32ptr(target);
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = ARMI_MOV|ARMF_D(RID_R12)|ARMF_M(RID_PC);
+ *p = ARMI_B | ((page-p-2) & 0x00ffffffu);
+ p++;
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#elif LJ_TARGET_PPC
+static void callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ *p++ = PPCI_LIS | PPCF_T(RID_TMP) | (u32ptr(target) >> 16);
+ *p++ = PPCI_LIS | PPCF_T(RID_R12) | (u32ptr(g) >> 16);
+ *p++ = PPCI_ORI | PPCF_A(RID_TMP)|PPCF_T(RID_TMP) | (u32ptr(target) & 0xffff);
+ *p++ = PPCI_ORI | PPCF_A(RID_R12)|PPCF_T(RID_R12) | (u32ptr(g) & 0xffff);
+ *p++ = PPCI_MTCTR | PPCF_T(RID_TMP);
+ *p++ = PPCI_BCTR;
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = PPCI_LI | PPCF_T(RID_R11) | slot;
+ *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2);
+ p++;
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#elif LJ_TARGET_MIPS
+static void callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ *p++ = MIPSI_SW | MIPSF_T(RID_R1)|MIPSF_S(RID_SP) | 0;
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (u32ptr(target) >> 16);
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (u32ptr(g) >> 16);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) |(u32ptr(target)&0xffff);
+ *p++ = MIPSI_JR | MIPSF_S(RID_R3);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (u32ptr(g)&0xffff);
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p = MIPSI_B | ((page-p-1) & 0x0000ffffu);
+ p++;
+ *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot;
+ }
+ lua_assert(p - page <= CALLBACK_MCODE_SIZE);
+}
+#else
+/* Missing support for this architecture. */
+#define callback_mcode_init(g, p) UNUSED(p)
+#endif
+
+/* -- Machine code management --------------------------------------------- */
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#elif LJ_TARGET_POSIX
+
+#include <sys/mman.h>
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#endif
+
+/* Allocate and initialize area for callback function pointers. */
+static void callback_mcode_new(CTState *cts)
+{
+ size_t sz = (size_t)CALLBACK_MCODE_SIZE;
+ void *p;
+ if (CALLBACK_MAX_SLOT == 0)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#if LJ_TARGET_WINDOWS
+ p = VirtualAlloc(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ if (!p)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#elif LJ_TARGET_POSIX
+ p = mmap(NULL, sz, (PROT_READ|PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS,
+ -1, 0);
+ if (p == MAP_FAILED)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#else
+ /* Fallback allocator. Fails if memory is not executable by default. */
+ p = lj_mem_new(cts->L, sz);
+#endif
+ cts->cb.mcode = p;
+ callback_mcode_init(cts->g, p);
+ lj_mcode_sync(p, (char *)p + sz);
+#if LJ_TARGET_WINDOWS
+ {
+ DWORD oprot;
+ VirtualProtect(p, sz, PAGE_EXECUTE_READ, &oprot);
+ }
+#elif LJ_TARGET_POSIX
+ mprotect(p, sz, (PROT_READ|PROT_EXEC));
+#endif
+}
+
+/* Free area for callback function pointers. */
+void lj_ccallback_mcode_free(CTState *cts)
+{
+ size_t sz = (size_t)CALLBACK_MCODE_SIZE;
+ void *p = cts->cb.mcode;
+ if (p == NULL) return;
+#if LJ_TARGET_WINDOWS
+ VirtualFree(p, 0, MEM_RELEASE);
+ UNUSED(sz);
+#elif LJ_TARGET_POSIX
+ munmap(p, sz);
+#else
+ lj_mem_free(cts->g, p, sz);
+#endif
+}
+
+/* -- C callback entry ---------------------------------------------------- */
+
+/* Target-specific handling of register arguments. Similar to lj_ccall.c. */
+#if LJ_TARGET_X86
+
+#define CALLBACK_HANDLE_REGARG \
+ if (!isfp) { /* Only non-FP values may be passed in registers. */ \
+ if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
+ if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
+ } else if (ngpr + 1 <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_X64 && LJ_ABI_WIN
+
+/* Windows/x64 argument registers are strictly positional (use ngpr). */
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (ngpr < maxgpr) { sp = &cts->cb.fpr[ngpr++]; UNUSED(nfpr); goto done; } \
+ } else { \
+ if (ngpr < maxgpr) { sp = &cts->cb.gpr[ngpr++]; goto done; } \
+ }
+
+#elif LJ_TARGET_X64
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + n <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr]; \
+ nfpr += n; \
+ goto done; \
+ } \
+ } else { \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_ARM
+
+#if LJ_ABI_SOFTFP
+
+#define CALLBACK_HANDLE_REGARG_FP1 UNUSED(isfp);
+#define CALLBACK_HANDLE_REGARG_FP2
+
+#else
+
+#define CALLBACK_HANDLE_REGARG_FP1 \
+ if (isfp) { \
+ if (n == 1) { \
+ if (fprodd) { \
+ sp = &cts->cb.fpr[fprodd-1]; \
+ fprodd = 0; \
+ goto done; \
+ } else if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr++]; \
+ fprodd = nfpr; \
+ goto done; \
+ } \
+ } else { \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr++]; \
+ goto done; \
+ } \
+ } \
+ fprodd = 0; /* No reordering after the first FP value is on stack. */ \
+ } else {
+
+#define CALLBACK_HANDLE_REGARG_FP2 }
+
+#endif
+
+#define CALLBACK_HANDLE_REGARG \
+ CALLBACK_HANDLE_REGARG_FP1 \
+ if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } CALLBACK_HANDLE_REGARG_FP2
+
+#elif LJ_TARGET_PPC
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr++]; \
+ cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ if (n > 1) { \
+ lua_assert(ctype_isinteger(cta->info) && n == 2); /* int64_t. */ \
+ ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */
+
+#elif LJ_TARGET_MIPS
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \
+ sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \
+ nfpr++; ngpr += n; \
+ goto done; \
+ } else { /* Try to pass argument in GPRs. */ \
+ nfpr = CCALL_NARG_FPR; \
+ if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ((float *)dp)[1] = *(float *)dp;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+/* Convert and push callback arguments to Lua stack. */
+static void callback_conv_args(CTState *cts, lua_State *L)
+{
+ TValue *o = L->top;
+ intptr_t *stack = cts->cb.stack;
+ MSize slot = cts->cb.slot;
+ CTypeID id = 0, rid, fid;
+ CType *ct;
+ GCfunc *fn;
+ MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR;
+#if CCALL_NARG_FPR
+ MSize nfpr = 0;
+#if LJ_TARGET_ARM
+ MSize fprodd = 0;
+#endif
+#endif
+
+ if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) {
+ ct = ctype_get(cts, id);
+ rid = ctype_cid(ct->info);
+ fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot));
+ } else { /* Must set up frame first, before throwing the error. */
+ ct = NULL;
+ rid = 0;
+ fn = (GCfunc *)L;
+ }
+ o->u32.lo = LJ_CONT_FFI_CALLBACK; /* Continuation returns from callback. */
+ o->u32.hi = rid; /* Return type. x86: +(spadj<<16). */
+ o++;
+ setframe_gc(o, obj2gco(fn));
+ setframe_ftsz(o, (int)((char *)(o+1) - (char *)L->base) + FRAME_CONT);
+ L->top = L->base = ++o;
+ if (!ct)
+ lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK);
+ if (isluafunc(fn))
+ setcframe_pc(L->cframe, proto_bc(funcproto(fn))+1);
+ lj_state_checkstack(L, LUA_MINSTACK); /* May throw. */
+ o = L->base; /* Might have been reallocated. */
+
+#if LJ_TARGET_X86
+ /* x86 has several different calling conventions. */
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: maxgpr = 2; break;
+ case CTCC_THISCALL: maxgpr = 1; break;
+ default: maxgpr = 0; break;
+ }
+#endif
+
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) {
+ CType *cta;
+ void *sp;
+ CTSize sz;
+ int isfp;
+ MSize n;
+ lua_assert(ctype_isfield(ctf->info));
+ cta = ctype_rawchild(cts, ctf);
+ isfp = ctype_isfp(cta->info);
+ sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
+ n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
+
+ CALLBACK_HANDLE_REGARG /* Handle register arguments. */
+
+ /* Otherwise pass argument on stack. */
+ if (CCALL_ALIGN_STACKARG && LJ_32 && sz == 8)
+ nsp = (nsp + 1) & ~1u; /* Align 64 bit argument on stack. */
+ sp = &stack[nsp];
+ nsp += n;
+
+ done:
+ if (LJ_BE && cta->size < CTSIZE_PTR)
+ sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size);
+ lj_cconv_tv_ct(cts, cta, 0, o++, sp);
+ }
+ fid = ctf->sib;
+ }
+ L->top = o;
+#if LJ_TARGET_X86
+ /* Store stack adjustment for returns from non-cdecl callbacks. */
+ if (ctype_cconv(ct->info) != CTCC_CDECL)
+ (L->base-2)->u32.hi |= (nsp << (16+2));
+#endif
+}
+
+/* Convert Lua object to callback result. */
+static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
+{
+ CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi);
+#if LJ_TARGET_X86
+ cts->cb.gpr[2] = 0;
+#endif
+ if (!ctype_isvoid(ctr->info)) {
+ uint8_t *dp = (uint8_t *)&cts->cb.gpr[0];
+#if CCALL_NUM_FPR
+ if (ctype_isfp(ctr->info))
+ dp = (uint8_t *)&cts->cb.fpr[0];
+#endif
+ lj_cconv_ct_tv(cts, ctr, dp, o, 0);
+#ifdef CALLBACK_HANDLE_RET
+ CALLBACK_HANDLE_RET
+#endif
+ /* Extend returned integers to (at least) 32 bits. */
+ if (ctype_isinteger_or_bool(ctr->info) && ctr->size < 4) {
+ if (ctr->info & CTF_UNSIGNED)
+ *(uint32_t *)dp = ctr->size == 1 ? (uint32_t)*(uint8_t *)dp :
+ (uint32_t)*(uint16_t *)dp;
+ else
+ *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp :
+ (int32_t)*(int16_t *)dp;
+ }
+#if LJ_TARGET_X86
+ if (ctype_isfp(ctr->info))
+ cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2;
+#endif
+ }
+}
+
+/* Enter callback. */
+lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf)
+{
+ lua_State *L = cts->L;
+ global_State *g = cts->g;
+ lua_assert(L != NULL);
+ if (gcref(g->jit_L)) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK));
+ if (g->panic) g->panic(L);
+ exit(EXIT_FAILURE);
+ }
+ lj_trace_abort(g); /* Never record across callback. */
+ /* Setup C frame. */
+ cframe_prev(cf) = L->cframe;
+ setcframe_L(cf, L);
+ cframe_errfunc(cf) = -1;
+ cframe_nres(cf) = 0;
+ L->cframe = cf;
+ callback_conv_args(cts, L);
+ return L; /* Now call the function on this stack. */
+}
+
+/* Leave callback. */
+void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o)
+{
+ lua_State *L = cts->L;
+ GCfunc *fn;
+ TValue *obase = L->base;
+ L->base = L->top; /* Keep continuation frame for throwing errors. */
+ if (o >= L->base) {
+ /* PC of RET* is lost. Point to last line for result conv. errors. */
+ fn = curr_func(L);
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ setcframe_pc(L->cframe, proto_bc(pt)+pt->sizebc+1);
+ }
+ }
+ callback_conv_result(cts, L, o);
+ /* Finally drop C frame and continuation frame. */
+ L->cframe = cframe_prev(L->cframe);
+ L->top -= 2;
+ L->base = obase;
+ cts->cb.slot = 0; /* Blacklist C function that called the callback. */
+}
+
+/* -- C callback management ----------------------------------------------- */
+
+/* Get an unused slot in the callback slot table. */
+static MSize callback_slot_new(CTState *cts, CType *ct)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ CTypeID1 *cbid = cts->cb.cbid;
+ MSize top;
+ for (top = cts->cb.topid; top < cts->cb.sizeid; top++)
+ if (LJ_LIKELY(cbid[top] == 0))
+ goto found;
+#if CALLBACK_MAX_SLOT
+ if (top >= CALLBACK_MAX_SLOT)
+#endif
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+ if (!cts->cb.mcode)
+ callback_mcode_new(cts);
+ lj_mem_growvec(cts->L, cbid, cts->cb.sizeid, CALLBACK_MAX_SLOT, CTypeID1);
+ cts->cb.cbid = cbid;
+ memset(cbid+top, 0, (cts->cb.sizeid-top)*sizeof(CTypeID1));
+found:
+ cbid[top] = id;
+ cts->cb.topid = top+1;
+ return top;
+}
+
+/* Check for function pointer and supported argument/result types. */
+static CType *callback_checkfunc(CTState *cts, CType *ct)
+{
+ int narg = 0;
+ if (!ctype_isptr(ct->info) || (LJ_64 && ct->size != CTSIZE_PTR))
+ return NULL;
+ ct = ctype_rawchild(cts, ct);
+ if (ctype_isfunc(ct->info)) {
+ CType *ctr = ctype_rawchild(cts, ct);
+ CTypeID fid = ct->sib;
+ if (!(ctype_isvoid(ctr->info) || ctype_isenum(ctr->info) ||
+ ctype_isptr(ctr->info) || (ctype_isnum(ctr->info) && ctr->size <= 8)))
+ return NULL;
+ if ((ct->info & CTF_VARARG))
+ return NULL;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) {
+ CType *cta;
+ lua_assert(ctype_isfield(ctf->info));
+ cta = ctype_rawchild(cts, ctf);
+ if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) ||
+ (ctype_isnum(cta->info) && cta->size <= 8)) ||
+ ++narg >= LUA_MINSTACK-3)
+ return NULL;
+ }
+ fid = ctf->sib;
+ }
+ return ct;
+ }
+ return NULL;
+}
+
+/* Create a new callback and return the callback function pointer. */
+void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn)
+{
+ ct = callback_checkfunc(cts, ct);
+ if (ct) {
+ MSize slot = callback_slot_new(cts, ct);
+ GCtab *t = cts->miscmap;
+ setfuncV(cts->L, lj_tab_setint(cts->L, t, (int32_t)slot), fn);
+ lj_gc_anybarriert(cts->L, t);
+ return callback_slot2ptr(cts, slot);
+ }
+ return NULL; /* Bad conversion. */
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_ccallback.h b/3rdparty/lua/src/lj_ccallback.h
index 65e7777..ac11d7b 100644
--- a/3rdparty/lua/src/lj_ccallback.h
+++ b/3rdparty/lua/src/lj_ccallback.h
@@ -1,25 +1,25 @@
-/*
-** FFI C callback handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CCALLBACK_H
-#define _LJ_CCALLBACK_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* Really belongs to lj_vm.h. */
-LJ_ASMF void lj_vm_ffi_callback(void);
-
-LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p);
-LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf);
-LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o);
-LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn);
-LJ_FUNC void lj_ccallback_mcode_free(CTState *cts);
-
-#endif
-
-#endif
+/*
+** FFI C callback handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCALLBACK_H
+#define _LJ_CCALLBACK_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Really belongs to lj_vm.h. */
+LJ_ASMF void lj_vm_ffi_callback(void);
+
+LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p);
+LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf);
+LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o);
+LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn);
+LJ_FUNC void lj_ccallback_mcode_free(CTState *cts);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_cconv.c b/3rdparty/lua/src/lj_cconv.c
index a03b030..337382f 100644
--- a/3rdparty/lua/src/lj_cconv.c
+++ b/3rdparty/lua/src/lj_cconv.c
@@ -1,752 +1,751 @@
-/*
-** C type conversions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_err.h"
-#include "lj_tab.h"
-#include "lj_ctype.h"
-#include "lj_cdata.h"
-#include "lj_cconv.h"
-#include "lj_ccallback.h"
-
-/* -- Conversion errors --------------------------------------------------- */
-
-/* Bad conversion. */
-LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s,
- CTInfo flags)
-{
- const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
- const char *src;
- if ((flags & CCF_FROMTV))
- src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER :
- ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)];
- else
- src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL));
- if (CCF_GETARG(flags))
- lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
- else
- lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
-}
-
-/* Bad conversion from TValue. */
-LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o,
- CTInfo flags)
-{
- const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
- const char *src = lj_typename(o);
- if (CCF_GETARG(flags))
- lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
- else
- lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
-}
-
-/* Initializer overflow. */
-LJ_NORET static void cconv_err_initov(CTState *cts, CType *d)
-{
- const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
- lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst);
-}
-
-/* -- C type compatibility checks ----------------------------------------- */
-
-/* Get raw type and qualifiers for a child type. Resolves enums, too. */
-static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual)
-{
- ct = ctype_child(cts, ct);
- for (;;) {
- if (ctype_isattrib(ct->info)) {
- if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
- } else if (!ctype_isenum(ct->info)) {
- break;
- }
- ct = ctype_child(cts, ct);
- }
- *qual |= (ct->info & CTF_QUAL);
- return ct;
-}
-
-/* Check for compatible types when converting to a pointer.
-** Note: these checks are more relaxed than what C99 mandates.
-*/
-int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags)
-{
- if (!((flags & CCF_CAST) || d == s)) {
- CTInfo dqual = 0, squal = 0;
- d = cconv_childqual(cts, d, &dqual);
- if (!ctype_isstruct(s->info))
- s = cconv_childqual(cts, s, &squal);
- if ((flags & CCF_SAME)) {
- if (dqual != squal)
- return 0; /* Different qualifiers. */
- } else if (!(flags & CCF_IGNQUAL)) {
- if ((dqual & squal) != squal)
- return 0; /* Discarded qualifiers. */
- if (ctype_isvoid(d->info) || ctype_isvoid(s->info))
- return 1; /* Converting to/from void * is always ok. */
- }
- if (ctype_type(d->info) != ctype_type(s->info) ||
- d->size != s->size)
- return 0; /* Different type or different size. */
- if (ctype_isnum(d->info)) {
- if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP)))
- return 0; /* Different numeric types. */
- } else if (ctype_ispointer(d->info)) {
- /* Check child types for compatibility. */
- return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME);
- } else if (ctype_isstruct(d->info)) {
- if (d != s)
- return 0; /* Must be exact same type for struct/union. */
- } else if (ctype_isfunc(d->info)) {
- /* NYI: structural equality of functions. */
- }
- }
- return 1; /* Types are compatible. */
-}
-
-/* -- C type to C type conversion ----------------------------------------- */
-
-/* Convert C type to C type. Caveat: expects to get the raw CType!
-**
-** Note: This is only used by the interpreter and not optimized at all.
-** The JIT compiler will do a much better job specializing for each case.
-*/
-void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
- uint8_t *dp, uint8_t *sp, CTInfo flags)
-{
- CTSize dsize = d->size, ssize = s->size;
- CTInfo dinfo = d->info, sinfo = s->info;
- void *tmpptr;
-
- lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo));
- lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo));
-
- if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
- goto err_conv;
-
- /* Some basic sanity checks. */
- lua_assert(!ctype_isnum(dinfo) || dsize > 0);
- lua_assert(!ctype_isnum(sinfo) || ssize > 0);
- lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4);
- lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4);
- lua_assert(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize);
- lua_assert(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize);
-
- switch (cconv_idx2(dinfo, sinfo)) {
- /* Destination is a bool. */
- case CCX(B, B):
- /* Source operand is already normalized. */
- if (dsize == 1) *dp = *sp; else *(int *)dp = *sp;
- break;
- case CCX(B, I): {
- MSize i;
- uint8_t b = 0;
- for (i = 0; i < ssize; i++) b |= sp[i];
- b = (b != 0);
- if (dsize == 1) *dp = b; else *(int *)dp = b;
- break;
- }
- case CCX(B, F): {
- uint8_t b;
- if (ssize == sizeof(double)) b = (*(double *)sp != 0);
- else if (ssize == sizeof(float)) b = (*(float *)sp != 0);
- else goto err_conv; /* NYI: long double. */
- if (dsize == 1) *dp = b; else *(int *)dp = b;
- break;
- }
-
- /* Destination is an integer. */
- case CCX(I, B):
- case CCX(I, I):
- conv_I_I:
- if (dsize > ssize) { /* Zero-extend or sign-extend LSB. */
-#if LJ_LE
- uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0;
- memcpy(dp, sp, ssize);
- memset(dp + ssize, fill, dsize-ssize);
-#else
- uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0;
- memset(dp, fill, dsize-ssize);
- memcpy(dp + (dsize-ssize), sp, ssize);
-#endif
- } else { /* Copy LSB. */
-#if LJ_LE
- memcpy(dp, sp, dsize);
-#else
- memcpy(dp, sp + (ssize-dsize), dsize);
-#endif
- }
- break;
- case CCX(I, F): {
- double n; /* Always convert via double. */
- conv_I_F:
- /* Convert source to double. */
- if (ssize == sizeof(double)) n = *(double *)sp;
- else if (ssize == sizeof(float)) n = (double)*(float *)sp;
- else goto err_conv; /* NYI: long double. */
- /* Then convert double to integer. */
- /* The conversion must exactly match the semantics of JIT-compiled code! */
- if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) {
- int32_t i = (int32_t)n;
- if (dsize == 4) *(int32_t *)dp = i;
- else if (dsize == 2) *(int16_t *)dp = (int16_t)i;
- else *(int8_t *)dp = (int8_t)i;
- } else if (dsize == 4) {
- *(uint32_t *)dp = (uint32_t)n;
- } else if (dsize == 8) {
- if (!(dinfo & CTF_UNSIGNED))
- *(int64_t *)dp = (int64_t)n;
- else
- *(uint64_t *)dp = lj_num2u64(n);
- } else {
- goto err_conv; /* NYI: conversion to >64 bit integers. */
- }
- break;
- }
- case CCX(I, C):
- s = ctype_child(cts, s);
- sinfo = s->info;
- ssize = s->size;
- goto conv_I_F; /* Just convert re. */
- case CCX(I, P):
- if (!(flags & CCF_CAST)) goto err_conv;
- sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
- goto conv_I_I;
- case CCX(I, A):
- if (!(flags & CCF_CAST)) goto err_conv;
- sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
- ssize = CTSIZE_PTR;
- tmpptr = sp;
- sp = (uint8_t *)&tmpptr;
- goto conv_I_I;
-
- /* Destination is a floating-point number. */
- case CCX(F, B):
- case CCX(F, I): {
- double n; /* Always convert via double. */
- conv_F_I:
- /* First convert source to double. */
- /* The conversion must exactly match the semantics of JIT-compiled code! */
- if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) {
- int32_t i;
- if (ssize == 4) {
- i = *(int32_t *)sp;
- } else if (!(sinfo & CTF_UNSIGNED)) {
- if (ssize == 2) i = *(int16_t *)sp;
- else i = *(int8_t *)sp;
- } else {
- if (ssize == 2) i = *(uint16_t *)sp;
- else i = *(uint8_t *)sp;
- }
- n = (double)i;
- } else if (ssize == 4) {
- n = (double)*(uint32_t *)sp;
- } else if (ssize == 8) {
- if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp;
- else n = (double)*(uint64_t *)sp;
- } else {
- goto err_conv; /* NYI: conversion from >64 bit integers. */
- }
- /* Convert double to destination. */
- if (dsize == sizeof(double)) *(double *)dp = n;
- else if (dsize == sizeof(float)) *(float *)dp = (float)n;
- else goto err_conv; /* NYI: long double. */
- break;
- }
- case CCX(F, F): {
- double n; /* Always convert via double. */
- conv_F_F:
- if (ssize == dsize) goto copyval;
- /* Convert source to double. */
- if (ssize == sizeof(double)) n = *(double *)sp;
- else if (ssize == sizeof(float)) n = (double)*(float *)sp;
- else goto err_conv; /* NYI: long double. */
- /* Convert double to destination. */
- if (dsize == sizeof(double)) *(double *)dp = n;
- else if (dsize == sizeof(float)) *(float *)dp = (float)n;
- else goto err_conv; /* NYI: long double. */
- break;
- }
- case CCX(F, C):
- s = ctype_child(cts, s);
- sinfo = s->info;
- ssize = s->size;
- goto conv_F_F; /* Ignore im, and convert from re. */
-
- /* Destination is a complex number. */
- case CCX(C, I):
- d = ctype_child(cts, d);
- dinfo = d->info;
- dsize = d->size;
- memset(dp + dsize, 0, dsize); /* Clear im. */
- goto conv_F_I; /* Convert to re. */
- case CCX(C, F):
- d = ctype_child(cts, d);
- dinfo = d->info;
- dsize = d->size;
- memset(dp + dsize, 0, dsize); /* Clear im. */
- goto conv_F_F; /* Convert to re. */
-
- case CCX(C, C):
- if (dsize != ssize) { /* Different types: convert re/im separately. */
- CType *dc = ctype_child(cts, d);
- CType *sc = ctype_child(cts, s);
- lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags);
- lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags);
- return;
- }
- goto copyval; /* Otherwise this is easy. */
-
- /* Destination is a vector. */
- case CCX(V, I):
- case CCX(V, F):
- case CCX(V, C): {
- CType *dc = ctype_child(cts, d);
- CTSize esize;
- /* First convert the scalar to the first element. */
- lj_cconv_ct_ct(cts, dc, s, dp, sp, flags);
- /* Then replicate it to the other elements (splat). */
- for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) {
- dp += esize;
- memcpy(dp, sp, esize);
- }
- break;
- }
-
- case CCX(V, V):
- /* Copy same-sized vectors, even for different lengths/element-types. */
- if (dsize != ssize) goto err_conv;
- goto copyval;
-
- /* Destination is a pointer. */
- case CCX(P, I):
- if (!(flags & CCF_CAST)) goto err_conv;
- dinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
- goto conv_I_I;
-
- case CCX(P, F):
- if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv;
- /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
- dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED);
- goto conv_I_F;
-
- case CCX(P, P):
- if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
- cdata_setptr(dp, dsize, cdata_getptr(sp, ssize));
- break;
-
- case CCX(P, A):
- case CCX(P, S):
- if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
- cdata_setptr(dp, dsize, sp);
- break;
-
- /* Destination is an array. */
- case CCX(A, A):
- if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize ||
- d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags))
- goto err_conv;
- goto copyval;
-
- /* Destination is a struct/union. */
- case CCX(S, S):
- if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s)
- goto err_conv; /* Must be exact same type. */
-copyval: /* Copy value. */
- lua_assert(dsize == ssize);
- memcpy(dp, sp, dsize);
- break;
-
- default:
- err_conv:
- cconv_err_conv(cts, d, s, flags);
- }
-}
-
-/* -- C type to TValue conversion ----------------------------------------- */
-
-/* Convert C type to TValue. Caveat: expects to get the raw CType! */
-int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
- TValue *o, uint8_t *sp)
-{
- CTInfo sinfo = s->info;
- if (ctype_isnum(sinfo)) {
- if (!ctype_isbool(sinfo)) {
- if (ctype_isinteger(sinfo) && s->size > 4) goto copyval;
- if (LJ_DUALNUM && ctype_isinteger(sinfo)) {
- int32_t i;
- lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s,
- (uint8_t *)&i, sp, 0);
- if ((sinfo & CTF_UNSIGNED) && i < 0)
- setnumV(o, (lua_Number)(uint32_t)i);
- else
- setintV(o, i);
- } else {
- lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s,
- (uint8_t *)&o->n, sp, 0);
- /* Numbers are NOT canonicalized here! Beware of uninitialized data. */
- lua_assert(tvisnum(o));
- }
- } else {
- uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0);
- setboolV(o, b);
- setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */
- }
- return 0;
- } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
- /* Create reference. */
- setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid));
- return 1; /* Need GC step. */
- } else {
- GCcdata *cd;
- CTSize sz;
- copyval: /* Copy value. */
- sz = s->size;
- lua_assert(sz != CTSIZE_INVALID);
- /* Attributes are stripped, qualifiers are kept (but mostly ignored). */
- cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz);
- setcdataV(cts->L, o, cd);
- memcpy(cdataptr(cd), sp, sz);
- return 1; /* Need GC step. */
- }
-}
-
-/* Convert bitfield to TValue. */
-int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp)
-{
- CTInfo info = s->info;
- CTSize pos, bsz;
- uint32_t val;
- lua_assert(ctype_isbitfield(info));
- /* NYI: packed bitfields may cause misaligned reads. */
- switch (ctype_bitcsz(info)) {
- case 4: val = *(uint32_t *)sp; break;
- case 2: val = *(uint16_t *)sp; break;
- case 1: val = *(uint8_t *)sp; break;
- default: lua_assert(0); val = 0; break;
- }
- /* Check if a packed bitfield crosses a container boundary. */
- pos = ctype_bitpos(info);
- bsz = ctype_bitbsz(info);
- lua_assert(pos < 8*ctype_bitcsz(info));
- lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info));
- if (pos + bsz > 8*ctype_bitcsz(info))
- lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
- if (!(info & CTF_BOOL)) {
- CTSize shift = 32 - bsz;
- if (!(info & CTF_UNSIGNED)) {
- setintV(o, (int32_t)(val << (shift-pos)) >> shift);
- } else {
- val = (val << (shift-pos)) >> shift;
- if (!LJ_DUALNUM || (int32_t)val < 0)
- setnumV(o, (lua_Number)(uint32_t)val);
- else
- setintV(o, (int32_t)val);
- }
- } else {
- lua_assert(bsz == 1);
- setboolV(o, (val >> pos) & 1);
- }
- return 0; /* No GC step needed. */
-}
-
-/* -- TValue to C type conversion ----------------------------------------- */
-
-/* Convert table to array. */
-static void cconv_array_tab(CTState *cts, CType *d,
- uint8_t *dp, GCtab *t, CTInfo flags)
-{
- int32_t i;
- CType *dc = ctype_rawchild(cts, d); /* Array element type. */
- CTSize size = d->size, esize = dc->size, ofs = 0;
- for (i = 0; ; i++) {
- TValue *tv = (TValue *)lj_tab_getint(t, i);
- if (!tv || tvisnil(tv)) {
- if (i == 0) continue; /* Try again for 1-based tables. */
- break; /* Stop at first nil. */
- }
- if (ofs >= size)
- cconv_err_initov(cts, d);
- lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags);
- ofs += esize;
- }
- if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */
- if (ofs == esize) { /* Replicate a single element. */
- for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize);
- } else { /* Otherwise fill the remainder with zero. */
- memset(dp + ofs, 0, size - ofs);
- }
- }
-}
-
-/* Convert table to sub-struct/union. */
-static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp,
- GCtab *t, int32_t *ip, CTInfo flags)
-{
- CTypeID id = d->sib;
- while (id) {
- CType *df = ctype_get(cts, id);
- id = df->sib;
- if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
- TValue *tv;
- int32_t i = *ip, iz = i;
- if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
- if (i >= 0) {
- retry:
- tv = (TValue *)lj_tab_getint(t, i);
- if (!tv || tvisnil(tv)) {
- if (i == 0) { i = 1; goto retry; } /* 1-based tables. */
- if (iz == 0) { *ip = i = -1; goto tryname; } /* Init named fields. */
- break; /* Stop at first nil. */
- }
- *ip = i + 1;
- } else {
- tryname:
- tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name)));
- if (!tv || tvisnil(tv)) continue;
- }
- if (ctype_isfield(df->info))
- lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags);
- else
- lj_cconv_bf_tv(cts, df, dp+df->size, tv);
- if ((d->info & CTF_UNION)) break;
- } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
- cconv_substruct_tab(cts, ctype_rawchild(cts, df),
- dp+df->size, t, ip, flags);
- } /* Ignore all other entries in the chain. */
- }
-}
-
-/* Convert table to struct/union. */
-static void cconv_struct_tab(CTState *cts, CType *d,
- uint8_t *dp, GCtab *t, CTInfo flags)
-{
- int32_t i = 0;
- memset(dp, 0, d->size); /* Much simpler to clear the struct first. */
- cconv_substruct_tab(cts, d, dp, t, &i, flags);
-}
-
-/* Convert TValue to C type. Caveat: expects to get the raw CType! */
-void lj_cconv_ct_tv(CTState *cts, CType *d,
- uint8_t *dp, TValue *o, CTInfo flags)
-{
- CTypeID sid = CTID_P_VOID;
- CType *s;
- void *tmpptr;
- uint8_t tmpbool, *sp = (uint8_t *)&tmpptr;
- if (LJ_LIKELY(tvisint(o))) {
- sp = (uint8_t *)&o->i;
- sid = CTID_INT32;
- flags |= CCF_FROMTV;
- } else if (LJ_LIKELY(tvisnum(o))) {
- sp = (uint8_t *)&o->n;
- sid = CTID_DOUBLE;
- flags |= CCF_FROMTV;
- } else if (tviscdata(o)) {
- sp = cdataptr(cdataV(o));
- sid = cdataV(o)->ctypeid;
- s = ctype_get(cts, sid);
- if (ctype_isref(s->info)) { /* Resolve reference for value. */
- lua_assert(s->size == CTSIZE_PTR);
- sp = *(void **)sp;
- sid = ctype_cid(s->info);
- }
- s = ctype_raw(cts, sid);
- if (ctype_isfunc(s->info)) {
- sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR);
- } else {
- if (ctype_isenum(s->info)) s = ctype_child(cts, s);
- goto doconv;
- }
- } else if (tvisstr(o)) {
- GCstr *str = strV(o);
- if (ctype_isenum(d->info)) { /* Match string against enum constant. */
- CTSize ofs;
- CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
- if (!cct || !ctype_isconstval(cct->info))
- goto err_conv;
- lua_assert(d->size == 4);
- sp = (uint8_t *)&cct->size;
- sid = ctype_cid(cct->info);
- } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
- CType *dc = ctype_rawchild(cts, d);
- CTSize sz = str->len+1;
- if (!ctype_isinteger(dc->info) || dc->size != 1)
- goto err_conv;
- if (d->size != 0 && d->size < sz)
- sz = d->size;
- memcpy(dp, strdata(str), sz);
- return;
- } else { /* Otherwise pass it as a const char[]. */
- sp = (uint8_t *)strdata(str);
- sid = CTID_A_CCHAR;
- flags |= CCF_FROMTV;
- }
- } else if (tvistab(o)) {
- if (ctype_isarray(d->info)) {
- cconv_array_tab(cts, d, dp, tabV(o), flags);
- return;
- } else if (ctype_isstruct(d->info)) {
- cconv_struct_tab(cts, d, dp, tabV(o), flags);
- return;
- } else {
- goto err_conv;
- }
- } else if (tvisbool(o)) {
- tmpbool = boolV(o);
- sp = &tmpbool;
- sid = CTID_BOOL;
- } else if (tvisnil(o)) {
- tmpptr = (void *)0;
- flags |= CCF_FROMTV;
- } else if (tvisudata(o)) {
- GCudata *ud = udataV(o);
- tmpptr = uddata(ud);
- if (ud->udtype == UDTYPE_IO_FILE)
- tmpptr = *(void **)tmpptr;
- } else if (tvislightud(o)) {
- tmpptr = lightudV(o);
- } else if (tvisfunc(o)) {
- void *p = lj_ccallback_new(cts, d, funcV(o));
- if (p) {
- *(void **)dp = p;
- return;
- }
- goto err_conv;
- } else {
- err_conv:
- cconv_err_convtv(cts, d, o, flags);
- }
- s = ctype_get(cts, sid);
-doconv:
- if (ctype_isenum(d->info)) d = ctype_child(cts, d);
- lj_cconv_ct_ct(cts, d, s, dp, sp, flags);
-}
-
-/* Convert TValue to bitfield. */
-void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
-{
- CTInfo info = d->info;
- CTSize pos, bsz;
- uint32_t val, mask;
- lua_assert(ctype_isbitfield(info));
- if ((info & CTF_BOOL)) {
- uint8_t tmpbool;
- lua_assert(ctype_bitbsz(info) == 1);
- lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0);
- val = tmpbool;
- } else {
- CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32;
- lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0);
- }
- pos = ctype_bitpos(info);
- bsz = ctype_bitbsz(info);
- lua_assert(pos < 8*ctype_bitcsz(info));
- lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info));
- /* Check if a packed bitfield crosses a container boundary. */
- if (pos + bsz > 8*ctype_bitcsz(info))
- lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
- mask = ((1u << bsz) - 1u) << pos;
- val = (val << pos) & mask;
- /* NYI: packed bitfields may cause misaligned reads/writes. */
- switch (ctype_bitcsz(info)) {
- case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break;
- case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break;
- case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break;
- default: lua_assert(0); break;
- }
-}
-
-/* -- Initialize C type with TValues -------------------------------------- */
-
-/* Initialize an array with TValues. */
-static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
- TValue *o, MSize len)
-{
- CType *dc = ctype_rawchild(cts, d); /* Array element type. */
- CTSize ofs, esize = dc->size;
- MSize i;
- if (len*esize > sz)
- cconv_err_initov(cts, d);
- for (i = 0, ofs = 0; i < len; i++, ofs += esize)
- lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0);
- if (ofs == esize) { /* Replicate a single element. */
- for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize);
- } else { /* Otherwise fill the remainder with zero. */
- memset(dp + ofs, 0, sz - ofs);
- }
-}
-
-/* Initialize a sub-struct/union with TValues. */
-static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp,
- TValue *o, MSize len, MSize *ip)
-{
- CTypeID id = d->sib;
- while (id) {
- CType *df = ctype_get(cts, id);
- id = df->sib;
- if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
- MSize i = *ip;
- if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
- if (i >= len) break;
- *ip = i + 1;
- if (ctype_isfield(df->info))
- lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0);
- else
- lj_cconv_bf_tv(cts, df, dp+df->size, o + i);
- if ((d->info & CTF_UNION)) break;
- } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
- cconv_substruct_init(cts, ctype_rawchild(cts, df),
- dp+df->size, o, len, ip);
- if ((d->info & CTF_UNION)) break;
- } /* Ignore all other entries in the chain. */
- }
-}
-
-/* Initialize a struct/union with TValues. */
-static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
- TValue *o, MSize len)
-{
- MSize i = 0;
- memset(dp, 0, sz); /* Much simpler to clear the struct first. */
- cconv_substruct_init(cts, d, dp, o, len, &i);
- if (i < len)
- cconv_err_initov(cts, d);
-}
-
-/* Check whether to use a multi-value initializer.
-** This is true if an aggregate is to be initialized with a value.
-** Valarrays are treated as values here so ct_tv handles (V|C, I|F).
-*/
-int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o)
-{
- if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info)))
- return 0; /* Destination is not an aggregate. */
- if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info)))
- return 0; /* Initializer is not a value. */
- if (tviscdata(o) && lj_ctype_rawref(cts, cdataV(o)->ctypeid) == d)
- return 0; /* Source and destination are identical aggregates. */
- return 1; /* Otherwise the initializer is a value. */
-}
-
-/* Initialize C type with TValues. Caveat: expects to get the raw CType! */
-void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
- uint8_t *dp, TValue *o, MSize len)
-{
- if (len == 0)
- memset(dp, 0, sz);
- else if (len == 1 && !lj_cconv_multi_init(cts, d, o))
- lj_cconv_ct_tv(cts, d, dp, o, 0);
- else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */
- cconv_array_init(cts, d, sz, dp, o, len);
- else if (ctype_isstruct(d->info))
- cconv_struct_init(cts, d, sz, dp, o, len);
- else
- cconv_err_initov(cts, d);
-}
-
-#endif
+/*
+** C type conversions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_ccallback.h"
+
+/* -- Conversion errors --------------------------------------------------- */
+
+/* Bad conversion. */
+LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s,
+ CTInfo flags)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ const char *src;
+ if ((flags & CCF_FROMTV))
+ src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER :
+ ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)];
+ else
+ src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL));
+ if (CCF_GETARG(flags))
+ lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
+ else
+ lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
+}
+
+/* Bad conversion from TValue. */
+LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o,
+ CTInfo flags)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ const char *src = lj_typename(o);
+ if (CCF_GETARG(flags))
+ lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
+ else
+ lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
+}
+
+/* Initializer overflow. */
+LJ_NORET static void cconv_err_initov(CTState *cts, CType *d)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst);
+}
+
+/* -- C type compatibility checks ----------------------------------------- */
+
+/* Get raw type and qualifiers for a child type. Resolves enums, too. */
+static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual)
+{
+ ct = ctype_child(cts, ct);
+ for (;;) {
+ if (ctype_isattrib(ct->info)) {
+ if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
+ } else if (!ctype_isenum(ct->info)) {
+ break;
+ }
+ ct = ctype_child(cts, ct);
+ }
+ *qual |= (ct->info & CTF_QUAL);
+ return ct;
+}
+
+/* Check for compatible types when converting to a pointer.
+** Note: these checks are more relaxed than what C99 mandates.
+*/
+int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags)
+{
+ if (!((flags & CCF_CAST) || d == s)) {
+ CTInfo dqual = 0, squal = 0;
+ d = cconv_childqual(cts, d, &dqual);
+ if (!ctype_isstruct(s->info))
+ s = cconv_childqual(cts, s, &squal);
+ if ((flags & CCF_SAME)) {
+ if (dqual != squal)
+ return 0; /* Different qualifiers. */
+ } else if (!(flags & CCF_IGNQUAL)) {
+ if ((dqual & squal) != squal)
+ return 0; /* Discarded qualifiers. */
+ if (ctype_isvoid(d->info) || ctype_isvoid(s->info))
+ return 1; /* Converting to/from void * is always ok. */
+ }
+ if (ctype_type(d->info) != ctype_type(s->info) ||
+ d->size != s->size)
+ return 0; /* Different type or different size. */
+ if (ctype_isnum(d->info)) {
+ if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP)))
+ return 0; /* Different numeric types. */
+ } else if (ctype_ispointer(d->info)) {
+ /* Check child types for compatibility. */
+ return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME);
+ } else if (ctype_isstruct(d->info)) {
+ if (d != s)
+ return 0; /* Must be exact same type for struct/union. */
+ } else if (ctype_isfunc(d->info)) {
+ /* NYI: structural equality of functions. */
+ }
+ }
+ return 1; /* Types are compatible. */
+}
+
+/* -- C type to C type conversion ----------------------------------------- */
+
+/* Convert C type to C type. Caveat: expects to get the raw CType!
+**
+** Note: This is only used by the interpreter and not optimized at all.
+** The JIT compiler will do a much better job specializing for each case.
+*/
+void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
+ uint8_t *dp, uint8_t *sp, CTInfo flags)
+{
+ CTSize dsize = d->size, ssize = s->size;
+ CTInfo dinfo = d->info, sinfo = s->info;
+ void *tmpptr;
+
+ lua_assert(!ctype_isenum(dinfo) && !ctype_isenum(sinfo));
+ lua_assert(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo));
+
+ if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
+ goto err_conv;
+
+ /* Some basic sanity checks. */
+ lua_assert(!ctype_isnum(dinfo) || dsize > 0);
+ lua_assert(!ctype_isnum(sinfo) || ssize > 0);
+ lua_assert(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4);
+ lua_assert(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4);
+ lua_assert(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize);
+ lua_assert(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize);
+
+ switch (cconv_idx2(dinfo, sinfo)) {
+ /* Destination is a bool. */
+ case CCX(B, B):
+ /* Source operand is already normalized. */
+ if (dsize == 1) *dp = *sp; else *(int *)dp = *sp;
+ break;
+ case CCX(B, I): {
+ MSize i;
+ uint8_t b = 0;
+ for (i = 0; i < ssize; i++) b |= sp[i];
+ b = (b != 0);
+ if (dsize == 1) *dp = b; else *(int *)dp = b;
+ break;
+ }
+ case CCX(B, F): {
+ uint8_t b;
+ if (ssize == sizeof(double)) b = (*(double *)sp != 0);
+ else if (ssize == sizeof(float)) b = (*(float *)sp != 0);
+ else goto err_conv; /* NYI: long double. */
+ if (dsize == 1) *dp = b; else *(int *)dp = b;
+ break;
+ }
+
+ /* Destination is an integer. */
+ case CCX(I, B):
+ case CCX(I, I):
+ conv_I_I:
+ if (dsize > ssize) { /* Zero-extend or sign-extend LSB. */
+#if LJ_LE
+ uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0;
+ memcpy(dp, sp, ssize);
+ memset(dp + ssize, fill, dsize-ssize);
+#else
+ uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0;
+ memset(dp, fill, dsize-ssize);
+ memcpy(dp + (dsize-ssize), sp, ssize);
+#endif
+ } else { /* Copy LSB. */
+#if LJ_LE
+ memcpy(dp, sp, dsize);
+#else
+ memcpy(dp, sp + (ssize-dsize), dsize);
+#endif
+ }
+ break;
+ case CCX(I, F): {
+ double n; /* Always convert via double. */
+ conv_I_F:
+ /* Convert source to double. */
+ if (ssize == sizeof(double)) n = *(double *)sp;
+ else if (ssize == sizeof(float)) n = (double)*(float *)sp;
+ else goto err_conv; /* NYI: long double. */
+ /* Then convert double to integer. */
+ /* The conversion must exactly match the semantics of JIT-compiled code! */
+ if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) {
+ int32_t i = (int32_t)n;
+ if (dsize == 4) *(int32_t *)dp = i;
+ else if (dsize == 2) *(int16_t *)dp = (int16_t)i;
+ else *(int8_t *)dp = (int8_t)i;
+ } else if (dsize == 4) {
+ *(uint32_t *)dp = (uint32_t)n;
+ } else if (dsize == 8) {
+ if (!(dinfo & CTF_UNSIGNED))
+ *(int64_t *)dp = (int64_t)n;
+ else
+ *(uint64_t *)dp = lj_num2u64(n);
+ } else {
+ goto err_conv; /* NYI: conversion to >64 bit integers. */
+ }
+ break;
+ }
+ case CCX(I, C):
+ s = ctype_child(cts, s);
+ sinfo = s->info;
+ ssize = s->size;
+ goto conv_I_F; /* Just convert re. */
+ case CCX(I, P):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ goto conv_I_I;
+ case CCX(I, A):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ ssize = CTSIZE_PTR;
+ tmpptr = sp;
+ sp = (uint8_t *)&tmpptr;
+ goto conv_I_I;
+
+ /* Destination is a floating-point number. */
+ case CCX(F, B):
+ case CCX(F, I): {
+ double n; /* Always convert via double. */
+ conv_F_I:
+ /* First convert source to double. */
+ /* The conversion must exactly match the semantics of JIT-compiled code! */
+ if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) {
+ int32_t i;
+ if (ssize == 4) {
+ i = *(int32_t *)sp;
+ } else if (!(sinfo & CTF_UNSIGNED)) {
+ if (ssize == 2) i = *(int16_t *)sp;
+ else i = *(int8_t *)sp;
+ } else {
+ if (ssize == 2) i = *(uint16_t *)sp;
+ else i = *(uint8_t *)sp;
+ }
+ n = (double)i;
+ } else if (ssize == 4) {
+ n = (double)*(uint32_t *)sp;
+ } else if (ssize == 8) {
+ if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp;
+ else n = (double)*(uint64_t *)sp;
+ } else {
+ goto err_conv; /* NYI: conversion from >64 bit integers. */
+ }
+ /* Convert double to destination. */
+ if (dsize == sizeof(double)) *(double *)dp = n;
+ else if (dsize == sizeof(float)) *(float *)dp = (float)n;
+ else goto err_conv; /* NYI: long double. */
+ break;
+ }
+ case CCX(F, F): {
+ double n; /* Always convert via double. */
+ conv_F_F:
+ if (ssize == dsize) goto copyval;
+ /* Convert source to double. */
+ if (ssize == sizeof(double)) n = *(double *)sp;
+ else if (ssize == sizeof(float)) n = (double)*(float *)sp;
+ else goto err_conv; /* NYI: long double. */
+ /* Convert double to destination. */
+ if (dsize == sizeof(double)) *(double *)dp = n;
+ else if (dsize == sizeof(float)) *(float *)dp = (float)n;
+ else goto err_conv; /* NYI: long double. */
+ break;
+ }
+ case CCX(F, C):
+ s = ctype_child(cts, s);
+ sinfo = s->info;
+ ssize = s->size;
+ goto conv_F_F; /* Ignore im, and convert from re. */
+
+ /* Destination is a complex number. */
+ case CCX(C, I):
+ d = ctype_child(cts, d);
+ dinfo = d->info;
+ dsize = d->size;
+ memset(dp + dsize, 0, dsize); /* Clear im. */
+ goto conv_F_I; /* Convert to re. */
+ case CCX(C, F):
+ d = ctype_child(cts, d);
+ dinfo = d->info;
+ dsize = d->size;
+ memset(dp + dsize, 0, dsize); /* Clear im. */
+ goto conv_F_F; /* Convert to re. */
+
+ case CCX(C, C):
+ if (dsize != ssize) { /* Different types: convert re/im separately. */
+ CType *dc = ctype_child(cts, d);
+ CType *sc = ctype_child(cts, s);
+ lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags);
+ lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags);
+ return;
+ }
+ goto copyval; /* Otherwise this is easy. */
+
+ /* Destination is a vector. */
+ case CCX(V, I):
+ case CCX(V, F):
+ case CCX(V, C): {
+ CType *dc = ctype_child(cts, d);
+ CTSize esize;
+ /* First convert the scalar to the first element. */
+ lj_cconv_ct_ct(cts, dc, s, dp, sp, flags);
+ /* Then replicate it to the other elements (splat). */
+ for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) {
+ dp += esize;
+ memcpy(dp, sp, esize);
+ }
+ break;
+ }
+
+ case CCX(V, V):
+ /* Copy same-sized vectors, even for different lengths/element-types. */
+ if (dsize != ssize) goto err_conv;
+ goto copyval;
+
+ /* Destination is a pointer. */
+ case CCX(P, I):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ dinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ goto conv_I_I;
+
+ case CCX(P, F):
+ if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv;
+ /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
+ dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED);
+ goto conv_I_F;
+
+ case CCX(P, P):
+ if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
+ cdata_setptr(dp, dsize, cdata_getptr(sp, ssize));
+ break;
+
+ case CCX(P, A):
+ case CCX(P, S):
+ if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
+ cdata_setptr(dp, dsize, sp);
+ break;
+
+ /* Destination is an array. */
+ case CCX(A, A):
+ if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize ||
+ d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags))
+ goto err_conv;
+ goto copyval;
+
+ /* Destination is a struct/union. */
+ case CCX(S, S):
+ if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s)
+ goto err_conv; /* Must be exact same type. */
+copyval: /* Copy value. */
+ lua_assert(dsize == ssize);
+ memcpy(dp, sp, dsize);
+ break;
+
+ default:
+ err_conv:
+ cconv_err_conv(cts, d, s, flags);
+ }
+}
+
+/* -- C type to TValue conversion ----------------------------------------- */
+
+/* Convert C type to TValue. Caveat: expects to get the raw CType! */
+int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
+ TValue *o, uint8_t *sp)
+{
+ CTInfo sinfo = s->info;
+ if (ctype_isnum(sinfo)) {
+ if (!ctype_isbool(sinfo)) {
+ if (ctype_isinteger(sinfo) && s->size > 4) goto copyval;
+ if (LJ_DUALNUM && ctype_isinteger(sinfo)) {
+ int32_t i;
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s,
+ (uint8_t *)&i, sp, 0);
+ if ((sinfo & CTF_UNSIGNED) && i < 0)
+ setnumV(o, (lua_Number)(uint32_t)i);
+ else
+ setintV(o, i);
+ } else {
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s,
+ (uint8_t *)&o->n, sp, 0);
+ /* Numbers are NOT canonicalized here! Beware of uninitialized data. */
+ lua_assert(tvisnum(o));
+ }
+ } else {
+ uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0);
+ setboolV(o, b);
+ setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */
+ }
+ return 0;
+ } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
+ /* Create reference. */
+ setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid));
+ return 1; /* Need GC step. */
+ } else {
+ GCcdata *cd;
+ CTSize sz;
+ copyval: /* Copy value. */
+ sz = s->size;
+ lua_assert(sz != CTSIZE_INVALID);
+ /* Attributes are stripped, qualifiers are kept (but mostly ignored). */
+ cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz);
+ setcdataV(cts->L, o, cd);
+ memcpy(cdataptr(cd), sp, sz);
+ return 1; /* Need GC step. */
+ }
+}
+
+/* Convert bitfield to TValue. */
+int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp)
+{
+ CTInfo info = s->info;
+ CTSize pos, bsz;
+ uint32_t val;
+ lua_assert(ctype_isbitfield(info));
+ /* NYI: packed bitfields may cause misaligned reads. */
+ switch (ctype_bitcsz(info)) {
+ case 4: val = *(uint32_t *)sp; break;
+ case 2: val = *(uint16_t *)sp; break;
+ case 1: val = *(uint8_t *)sp; break;
+ default: lua_assert(0); val = 0; break;
+ }
+ /* Check if a packed bitfield crosses a container boundary. */
+ pos = ctype_bitpos(info);
+ bsz = ctype_bitbsz(info);
+ lua_assert(pos < 8*ctype_bitcsz(info));
+ lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info));
+ if (pos + bsz > 8*ctype_bitcsz(info))
+ lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
+ if (!(info & CTF_BOOL)) {
+ CTSize shift = 32 - bsz;
+ if (!(info & CTF_UNSIGNED)) {
+ setintV(o, (int32_t)(val << (shift-pos)) >> shift);
+ } else {
+ val = (val << (shift-pos)) >> shift;
+ if (!LJ_DUALNUM || (int32_t)val < 0)
+ setnumV(o, (lua_Number)(uint32_t)val);
+ else
+ setintV(o, (int32_t)val);
+ }
+ } else {
+ lua_assert(bsz == 1);
+ setboolV(o, (val >> pos) & 1);
+ }
+ return 0; /* No GC step needed. */
+}
+
+/* -- TValue to C type conversion ----------------------------------------- */
+
+/* Convert table to array. */
+static void cconv_array_tab(CTState *cts, CType *d,
+ uint8_t *dp, GCtab *t, CTInfo flags)
+{
+ int32_t i;
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize size = d->size, esize = dc->size, ofs = 0;
+ for (i = 0; ; i++) {
+ TValue *tv = (TValue *)lj_tab_getint(t, i);
+ if (!tv || tvisnil(tv)) {
+ if (i == 0) continue; /* Try again for 1-based tables. */
+ break; /* Stop at first nil. */
+ }
+ if (ofs >= size)
+ cconv_err_initov(cts, d);
+ lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags);
+ ofs += esize;
+ }
+ if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */
+ if (ofs == esize) { /* Replicate a single element. */
+ for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize);
+ } else { /* Otherwise fill the remainder with zero. */
+ memset(dp + ofs, 0, size - ofs);
+ }
+ }
+}
+
+/* Convert table to sub-struct/union. */
+static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp,
+ GCtab *t, int32_t *ip, CTInfo flags)
+{
+ CTypeID id = d->sib;
+ while (id) {
+ CType *df = ctype_get(cts, id);
+ id = df->sib;
+ if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
+ TValue *tv;
+ int32_t i = *ip, iz = i;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ if (i >= 0) {
+ retry:
+ tv = (TValue *)lj_tab_getint(t, i);
+ if (!tv || tvisnil(tv)) {
+ if (i == 0) { i = 1; goto retry; } /* 1-based tables. */
+ if (iz == 0) { *ip = i = -1; goto tryname; } /* Init named fields. */
+ break; /* Stop at first nil. */
+ }
+ *ip = i + 1;
+ } else {
+ tryname:
+ tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name)));
+ if (!tv || tvisnil(tv)) continue;
+ }
+ if (ctype_isfield(df->info))
+ lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags);
+ else
+ lj_cconv_bf_tv(cts, df, dp+df->size, tv);
+ if ((d->info & CTF_UNION)) break;
+ } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
+ cconv_substruct_tab(cts, ctype_rawchild(cts, df),
+ dp+df->size, t, ip, flags);
+ } /* Ignore all other entries in the chain. */
+ }
+}
+
+/* Convert table to struct/union. */
+static void cconv_struct_tab(CTState *cts, CType *d,
+ uint8_t *dp, GCtab *t, CTInfo flags)
+{
+ int32_t i = 0;
+ memset(dp, 0, d->size); /* Much simpler to clear the struct first. */
+ cconv_substruct_tab(cts, d, dp, t, &i, flags);
+}
+
+/* Convert TValue to C type. Caveat: expects to get the raw CType! */
+void lj_cconv_ct_tv(CTState *cts, CType *d,
+ uint8_t *dp, TValue *o, CTInfo flags)
+{
+ CTypeID sid = CTID_P_VOID;
+ CType *s;
+ void *tmpptr;
+ uint8_t tmpbool, *sp = (uint8_t *)&tmpptr;
+ if (LJ_LIKELY(tvisint(o))) {
+ sp = (uint8_t *)&o->i;
+ sid = CTID_INT32;
+ flags |= CCF_FROMTV;
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ sp = (uint8_t *)&o->n;
+ sid = CTID_DOUBLE;
+ flags |= CCF_FROMTV;
+ } else if (tviscdata(o)) {
+ sp = cdataptr(cdataV(o));
+ sid = cdataV(o)->ctypeid;
+ s = ctype_get(cts, sid);
+ if (ctype_isref(s->info)) { /* Resolve reference for value. */
+ lua_assert(s->size == CTSIZE_PTR);
+ sp = *(void **)sp;
+ sid = ctype_cid(s->info);
+ }
+ s = ctype_raw(cts, sid);
+ if (ctype_isfunc(s->info)) {
+ sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR);
+ } else {
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ goto doconv;
+ }
+ } else if (tvisstr(o)) {
+ GCstr *str = strV(o);
+ if (ctype_isenum(d->info)) { /* Match string against enum constant. */
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
+ if (!cct || !ctype_isconstval(cct->info))
+ goto err_conv;
+ lua_assert(d->size == 4);
+ sp = (uint8_t *)&cct->size;
+ sid = ctype_cid(cct->info);
+ } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
+ CType *dc = ctype_rawchild(cts, d);
+ CTSize sz = str->len+1;
+ if (!ctype_isinteger(dc->info) || dc->size != 1)
+ goto err_conv;
+ if (d->size != 0 && d->size < sz)
+ sz = d->size;
+ memcpy(dp, strdata(str), sz);
+ return;
+ } else { /* Otherwise pass it as a const char[]. */
+ sp = (uint8_t *)strdata(str);
+ sid = CTID_A_CCHAR;
+ flags |= CCF_FROMTV;
+ }
+ } else if (tvistab(o)) {
+ if (ctype_isarray(d->info)) {
+ cconv_array_tab(cts, d, dp, tabV(o), flags);
+ return;
+ } else if (ctype_isstruct(d->info)) {
+ cconv_struct_tab(cts, d, dp, tabV(o), flags);
+ return;
+ } else {
+ goto err_conv;
+ }
+ } else if (tvisbool(o)) {
+ tmpbool = boolV(o);
+ sp = &tmpbool;
+ sid = CTID_BOOL;
+ } else if (tvisnil(o)) {
+ tmpptr = (void *)0;
+ flags |= CCF_FROMTV;
+ } else if (tvisudata(o)) {
+ GCudata *ud = udataV(o);
+ tmpptr = uddata(ud);
+ if (ud->udtype == UDTYPE_IO_FILE)
+ tmpptr = *(void **)tmpptr;
+ } else if (tvislightud(o)) {
+ tmpptr = lightudV(o);
+ } else if (tvisfunc(o)) {
+ void *p = lj_ccallback_new(cts, d, funcV(o));
+ if (p) {
+ *(void **)dp = p;
+ return;
+ }
+ goto err_conv;
+ } else {
+ err_conv:
+ cconv_err_convtv(cts, d, o, flags);
+ }
+ s = ctype_get(cts, sid);
+doconv:
+ if (ctype_isenum(d->info)) d = ctype_child(cts, d);
+ lj_cconv_ct_ct(cts, d, s, dp, sp, flags);
+}
+
+/* Convert TValue to bitfield. */
+void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
+{
+ CTInfo info = d->info;
+ CTSize pos, bsz;
+ uint32_t val, mask;
+ lua_assert(ctype_isbitfield(info));
+ if ((info & CTF_BOOL)) {
+ uint8_t tmpbool;
+ lua_assert(ctype_bitbsz(info) == 1);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0);
+ val = tmpbool;
+ } else {
+ CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32;
+ lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0);
+ }
+ pos = ctype_bitpos(info);
+ bsz = ctype_bitbsz(info);
+ lua_assert(pos < 8*ctype_bitcsz(info));
+ lua_assert(bsz > 0 && bsz <= 8*ctype_bitcsz(info));
+ /* Check if a packed bitfield crosses a container boundary. */
+ if (pos + bsz > 8*ctype_bitcsz(info))
+ lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
+ mask = ((1u << bsz) - 1u) << pos;
+ val = (val << pos) & mask;
+ /* NYI: packed bitfields may cause misaligned reads/writes. */
+ switch (ctype_bitcsz(info)) {
+ case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break;
+ case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break;
+ case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break;
+ default: lua_assert(0); break;
+ }
+}
+
+/* -- Initialize C type with TValues -------------------------------------- */
+
+/* Initialize an array with TValues. */
+static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
+ TValue *o, MSize len)
+{
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize ofs, esize = dc->size;
+ MSize i;
+ if (len*esize > sz)
+ cconv_err_initov(cts, d);
+ for (i = 0, ofs = 0; i < len; i++, ofs += esize)
+ lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0);
+ if (ofs == esize) { /* Replicate a single element. */
+ for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize);
+ } else { /* Otherwise fill the remainder with zero. */
+ memset(dp + ofs, 0, sz - ofs);
+ }
+}
+
+/* Initialize a sub-struct/union with TValues. */
+static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp,
+ TValue *o, MSize len, MSize *ip)
+{
+ CTypeID id = d->sib;
+ while (id) {
+ CType *df = ctype_get(cts, id);
+ id = df->sib;
+ if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
+ MSize i = *ip;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ if (i >= len) break;
+ *ip = i + 1;
+ if (ctype_isfield(df->info))
+ lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0);
+ else
+ lj_cconv_bf_tv(cts, df, dp+df->size, o + i);
+ if ((d->info & CTF_UNION)) break;
+ } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
+ cconv_substruct_init(cts, ctype_rawchild(cts, df),
+ dp+df->size, o, len, ip);
+ } /* Ignore all other entries in the chain. */
+ }
+}
+
+/* Initialize a struct/union with TValues. */
+static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
+ TValue *o, MSize len)
+{
+ MSize i = 0;
+ memset(dp, 0, sz); /* Much simpler to clear the struct first. */
+ cconv_substruct_init(cts, d, dp, o, len, &i);
+ if (i < len)
+ cconv_err_initov(cts, d);
+}
+
+/* Check whether to use a multi-value initializer.
+** This is true if an aggregate is to be initialized with a value.
+** Valarrays are treated as values here so ct_tv handles (V|C, I|F).
+*/
+int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o)
+{
+ if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info)))
+ return 0; /* Destination is not an aggregate. */
+ if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info)))
+ return 0; /* Initializer is not a value. */
+ if (tviscdata(o) && lj_ctype_rawref(cts, cdataV(o)->ctypeid) == d)
+ return 0; /* Source and destination are identical aggregates. */
+ return 1; /* Otherwise the initializer is a value. */
+}
+
+/* Initialize C type with TValues. Caveat: expects to get the raw CType! */
+void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
+ uint8_t *dp, TValue *o, MSize len)
+{
+ if (len == 0)
+ memset(dp, 0, sz);
+ else if (len == 1 && !lj_cconv_multi_init(cts, d, o))
+ lj_cconv_ct_tv(cts, d, dp, o, 0);
+ else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */
+ cconv_array_init(cts, d, sz, dp, o, len);
+ else if (ctype_isstruct(d->info))
+ cconv_struct_init(cts, d, sz, dp, o, len);
+ else
+ cconv_err_initov(cts, d);
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_cconv.h b/3rdparty/lua/src/lj_cconv.h
index 82b6f92..5c7a819 100644
--- a/3rdparty/lua/src/lj_cconv.h
+++ b/3rdparty/lua/src/lj_cconv.h
@@ -1,70 +1,70 @@
-/*
-** C type conversions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CCONV_H
-#define _LJ_CCONV_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* Compressed C type index. ORDER CCX. */
-enum {
- CCX_B, /* Bool. */
- CCX_I, /* Integer. */
- CCX_F, /* Floating-point number. */
- CCX_C, /* Complex. */
- CCX_V, /* Vector. */
- CCX_P, /* Pointer. */
- CCX_A, /* Refarray. */
- CCX_S /* Struct/union. */
-};
-
-/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */
-static LJ_AINLINE uint32_t cconv_idx(CTInfo info)
-{
- uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */
- lua_assert(ctype_type(info) <= CT_MAYCONVERT);
-#if LJ_64
- idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u);
-#else
- idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u);
-#endif
- lua_assert(idx < 8);
- return idx;
-}
-
-#define cconv_idx2(dinfo, sinfo) \
- ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo)))
-
-#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src)
-
-/* Conversion flags. */
-#define CCF_CAST 0x00000001u
-#define CCF_FROMTV 0x00000002u
-#define CCF_SAME 0x00000004u
-#define CCF_IGNQUAL 0x00000008u
-
-#define CCF_ARG_SHIFT 8
-#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT)
-#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT)
-
-LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags);
-LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
- uint8_t *dp, uint8_t *sp, CTInfo flags);
-LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
- TValue *o, uint8_t *sp);
-LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp);
-LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d,
- uint8_t *dp, TValue *o, CTInfo flags);
-LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o);
-LJ_FUNC int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o);
-LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
- uint8_t *dp, TValue *o, MSize len);
-
-#endif
-
-#endif
+/*
+** C type conversions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCONV_H
+#define _LJ_CCONV_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Compressed C type index. ORDER CCX. */
+enum {
+ CCX_B, /* Bool. */
+ CCX_I, /* Integer. */
+ CCX_F, /* Floating-point number. */
+ CCX_C, /* Complex. */
+ CCX_V, /* Vector. */
+ CCX_P, /* Pointer. */
+ CCX_A, /* Refarray. */
+ CCX_S /* Struct/union. */
+};
+
+/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */
+static LJ_AINLINE uint32_t cconv_idx(CTInfo info)
+{
+ uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */
+ lua_assert(ctype_type(info) <= CT_MAYCONVERT);
+#if LJ_64
+ idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u);
+#else
+ idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u);
+#endif
+ lua_assert(idx < 8);
+ return idx;
+}
+
+#define cconv_idx2(dinfo, sinfo) \
+ ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo)))
+
+#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src)
+
+/* Conversion flags. */
+#define CCF_CAST 0x00000001u
+#define CCF_FROMTV 0x00000002u
+#define CCF_SAME 0x00000004u
+#define CCF_IGNQUAL 0x00000008u
+
+#define CCF_ARG_SHIFT 8
+#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT)
+#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT)
+
+LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags);
+LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
+ uint8_t *dp, uint8_t *sp, CTInfo flags);
+LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
+ TValue *o, uint8_t *sp);
+LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp);
+LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d,
+ uint8_t *dp, TValue *o, CTInfo flags);
+LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o);
+LJ_FUNC int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o);
+LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
+ uint8_t *dp, TValue *o, MSize len);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_cdata.c b/3rdparty/lua/src/lj_cdata.c
index 4521282..10f4809 100644
--- a/3rdparty/lua/src/lj_cdata.c
+++ b/3rdparty/lua/src/lj_cdata.c
@@ -1,285 +1,285 @@
-/*
-** C data management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_ctype.h"
-#include "lj_cconv.h"
-#include "lj_cdata.h"
-
-/* -- C data allocation --------------------------------------------------- */
-
-/* Allocate a new C data object holding a reference to another object. */
-GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id)
-{
- CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR);
- GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR);
- *(const void **)cdataptr(cd) = p;
- return cd;
-}
-
-/* Allocate variable-sized or specially aligned C data object. */
-GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, CTSize align)
-{
- global_State *g;
- MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) +
- (align > CT_MEMALIGN ? (1u<<align) - (1u<<CT_MEMALIGN) : 0);
- char *p = lj_mem_newt(cts->L, extra + sz, char);
- uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata);
- uintptr_t almask = (1u << align) - 1u;
- GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata));
- lua_assert((char *)cd - p < 65536);
- cdatav(cd)->offset = (uint16_t)((char *)cd - p);
- cdatav(cd)->extra = extra;
- cdatav(cd)->len = sz;
- g = cts->g;
- setgcrefr(cd->nextgc, g->gc.root);
- setgcref(g->gc.root, obj2gco(cd));
- newwhite(g, obj2gco(cd));
- cd->marked |= 0x80;
- cd->gct = ~LJ_TCDATA;
- cd->ctypeid = id;
- return cd;
-}
-
-/* Free a C data object. */
-void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd)
-{
- if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) {
- GCobj *root;
- makewhite(g, obj2gco(cd));
- markfinalized(obj2gco(cd));
- if ((root = gcref(g->gc.mmudata)) != NULL) {
- setgcrefr(cd->nextgc, root->gch.nextgc);
- setgcref(root->gch.nextgc, obj2gco(cd));
- setgcref(g->gc.mmudata, obj2gco(cd));
- } else {
- setgcref(cd->nextgc, obj2gco(cd));
- setgcref(g->gc.mmudata, obj2gco(cd));
- }
- } else if (LJ_LIKELY(!cdataisv(cd))) {
- CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid);
- CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR;
- lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) ||
- ctype_isextern(ct->info));
- lj_mem_free(g, cd, sizeof(GCcdata) + sz);
- } else {
- lj_mem_free(g, memcdatav(cd), sizecdatav(cd));
- }
-}
-
-TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd)
-{
- global_State *g = G(L);
- GCtab *t = ctype_ctsG(g)->finalizer;
- if (gcref(t->metatable)) {
- /* Add cdata to finalizer table, if still enabled. */
- TValue *tv, tmp;
- setcdataV(L, &tmp, cd);
- lj_gc_anybarriert(L, t);
- tv = lj_tab_set(L, t, &tmp);
- cd->marked |= LJ_GC_CDATA_FIN;
- return tv;
- } else {
- /* Otherwise return dummy TValue. */
- return &g->tmptv;
- }
-}
-
-/* -- C data indexing ----------------------------------------------------- */
-
-/* Index C data by a TValue. Return CType and pointer. */
-CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp,
- CTInfo *qual)
-{
- uint8_t *p = (uint8_t *)cdataptr(cd);
- CType *ct = ctype_get(cts, cd->ctypeid);
- ptrdiff_t idx;
-
- /* Resolve reference for cdata object. */
- if (ctype_isref(ct->info)) {
- lua_assert(ct->size == CTSIZE_PTR);
- p = *(uint8_t **)p;
- ct = ctype_child(cts, ct);
- }
-
-collect_attrib:
- /* Skip attributes and collect qualifiers. */
- while (ctype_isattrib(ct->info)) {
- if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
- ct = ctype_child(cts, ct);
- }
- lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */
-
- if (tvisint(key)) {
- idx = (ptrdiff_t)intV(key);
- goto integer_key;
- } else if (tvisnum(key)) { /* Numeric key. */
- idx = LJ_64 ? (ptrdiff_t)numV(key) : (ptrdiff_t)lj_num2int(numV(key));
- integer_key:
- if (ctype_ispointer(ct->info)) {
- CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */
- if (sz == CTSIZE_INVALID)
- lj_err_caller(cts->L, LJ_ERR_FFI_INVSIZE);
- if (ctype_isptr(ct->info)) {
- p = (uint8_t *)cdata_getptr(p, ct->size);
- } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) {
- if ((ct->info & CTF_COMPLEX)) idx &= 1;
- *qual |= CTF_CONST; /* Valarray elements are constant. */
- }
- *pp = p + idx*(int32_t)sz;
- return ct;
- }
- } else if (tviscdata(key)) { /* Integer cdata key. */
- GCcdata *cdk = cdataV(key);
- CType *ctk = ctype_raw(cts, cdk->ctypeid);
- if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk);
- if (ctype_isinteger(ctk->info)) {
- lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk,
- (uint8_t *)&idx, cdataptr(cdk), 0);
- goto integer_key;
- }
- } else if (tvisstr(key)) { /* String key. */
- GCstr *name = strV(key);
- if (ctype_isstruct(ct->info)) {
- CTSize ofs;
- CType *fct = lj_ctype_getfieldq(cts, ct, name, &ofs, qual);
- if (fct) {
- *pp = p + ofs;
- return fct;
- }
- } else if (ctype_iscomplex(ct->info)) {
- if (name->len == 2) {
- *qual |= CTF_CONST; /* Complex fields are constant. */
- if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') {
- *pp = p;
- return ct;
- } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') {
- *pp = p + (ct->size >> 1);
- return ct;
- }
- }
- } else if (cd->ctypeid == CTID_CTYPEID) {
- /* Allow indexing a (pointer to) struct constructor to get constants. */
- CType *sct = ctype_raw(cts, *(CTypeID *)p);
- if (ctype_isptr(sct->info))
- sct = ctype_rawchild(cts, sct);
- if (ctype_isstruct(sct->info)) {
- CTSize ofs;
- CType *fct = lj_ctype_getfield(cts, sct, name, &ofs);
- if (fct && ctype_isconstval(fct->info))
- return fct;
- }
- ct = sct; /* Allow resolving metamethods for constructors, too. */
- }
- }
- if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
- if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) {
- p = (uint8_t *)cdata_getptr(p, ct->size);
- ct = ctype_child(cts, ct);
- goto collect_attrib;
- }
- }
- *qual |= 1; /* Lookup failed. */
- return ct; /* But return the resolved raw type. */
-}
-
-/* -- C data getters ------------------------------------------------------ */
-
-/* Get constant value and convert to TValue. */
-static void cdata_getconst(CTState *cts, TValue *o, CType *ct)
-{
- CType *ctt = ctype_child(cts, ct);
- lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4);
- /* Constants are already zero-extended/sign-extended to 32 bits. */
- if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
- setnumV(o, (lua_Number)(uint32_t)ct->size);
- else
- setintV(o, (int32_t)ct->size);
-}
-
-/* Get C data value and convert to TValue. */
-int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp)
-{
- CTypeID sid;
-
- if (ctype_isconstval(s->info)) {
- cdata_getconst(cts, o, s);
- return 0; /* No GC step needed. */
- } else if (ctype_isbitfield(s->info)) {
- return lj_cconv_tv_bf(cts, s, o, sp);
- }
-
- /* Get child type of pointer/array/field. */
- lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info));
- sid = ctype_cid(s->info);
- s = ctype_get(cts, sid);
-
- /* Resolve reference for field. */
- if (ctype_isref(s->info)) {
- lua_assert(s->size == CTSIZE_PTR);
- sp = *(uint8_t **)sp;
- sid = ctype_cid(s->info);
- s = ctype_get(cts, sid);
- }
-
- /* Skip attributes. */
- while (ctype_isattrib(s->info))
- s = ctype_child(cts, s);
-
- return lj_cconv_tv_ct(cts, s, sid, o, sp);
-}
-
-/* -- C data setters ------------------------------------------------------ */
-
-/* Convert TValue and set C data value. */
-void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual)
-{
- if (ctype_isconstval(d->info)) {
- goto err_const;
- } else if (ctype_isbitfield(d->info)) {
- if (((d->info|qual) & CTF_CONST)) goto err_const;
- lj_cconv_bf_tv(cts, d, dp, o);
- return;
- }
-
- /* Get child type of pointer/array/field. */
- lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info));
- d = ctype_child(cts, d);
-
- /* Resolve reference for field. */
- if (ctype_isref(d->info)) {
- lua_assert(d->size == CTSIZE_PTR);
- dp = *(uint8_t **)dp;
- d = ctype_child(cts, d);
- }
-
- /* Skip attributes and collect qualifiers. */
- for (;;) {
- if (ctype_isattrib(d->info)) {
- if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
- } else {
- break;
- }
- d = ctype_child(cts, d);
- }
-
- lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info));
-
- if (((d->info|qual) & CTF_CONST)) {
- err_const:
- lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST);
- }
-
- lj_cconv_ct_tv(cts, d, dp, o, 0);
-}
-
-#endif
+/*
+** C data management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+
+/* -- C data allocation --------------------------------------------------- */
+
+/* Allocate a new C data object holding a reference to another object. */
+GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id)
+{
+ CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR);
+ GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR);
+ *(const void **)cdataptr(cd) = p;
+ return cd;
+}
+
+/* Allocate variable-sized or specially aligned C data object. */
+GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz, CTSize align)
+{
+ global_State *g;
+ MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) +
+ (align > CT_MEMALIGN ? (1u<<align) - (1u<<CT_MEMALIGN) : 0);
+ char *p = lj_mem_newt(cts->L, extra + sz, char);
+ uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata);
+ uintptr_t almask = (1u << align) - 1u;
+ GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata));
+ lua_assert((char *)cd - p < 65536);
+ cdatav(cd)->offset = (uint16_t)((char *)cd - p);
+ cdatav(cd)->extra = extra;
+ cdatav(cd)->len = sz;
+ g = cts->g;
+ setgcrefr(cd->nextgc, g->gc.root);
+ setgcref(g->gc.root, obj2gco(cd));
+ newwhite(g, obj2gco(cd));
+ cd->marked |= 0x80;
+ cd->gct = ~LJ_TCDATA;
+ cd->ctypeid = id;
+ return cd;
+}
+
+/* Free a C data object. */
+void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd)
+{
+ if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) {
+ GCobj *root;
+ makewhite(g, obj2gco(cd));
+ markfinalized(obj2gco(cd));
+ if ((root = gcref(g->gc.mmudata)) != NULL) {
+ setgcrefr(cd->nextgc, root->gch.nextgc);
+ setgcref(root->gch.nextgc, obj2gco(cd));
+ setgcref(g->gc.mmudata, obj2gco(cd));
+ } else {
+ setgcref(cd->nextgc, obj2gco(cd));
+ setgcref(g->gc.mmudata, obj2gco(cd));
+ }
+ } else if (LJ_LIKELY(!cdataisv(cd))) {
+ CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid);
+ CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR;
+ lua_assert(ctype_hassize(ct->info) || ctype_isfunc(ct->info) ||
+ ctype_isextern(ct->info));
+ lj_mem_free(g, cd, sizeof(GCcdata) + sz);
+ } else {
+ lj_mem_free(g, memcdatav(cd), sizecdatav(cd));
+ }
+}
+
+TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd)
+{
+ global_State *g = G(L);
+ GCtab *t = ctype_ctsG(g)->finalizer;
+ if (gcref(t->metatable)) {
+ /* Add cdata to finalizer table, if still enabled. */
+ TValue *tv, tmp;
+ setcdataV(L, &tmp, cd);
+ lj_gc_anybarriert(L, t);
+ tv = lj_tab_set(L, t, &tmp);
+ cd->marked |= LJ_GC_CDATA_FIN;
+ return tv;
+ } else {
+ /* Otherwise return dummy TValue. */
+ return &g->tmptv;
+ }
+}
+
+/* -- C data indexing ----------------------------------------------------- */
+
+/* Index C data by a TValue. Return CType and pointer. */
+CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp,
+ CTInfo *qual)
+{
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ CType *ct = ctype_get(cts, cd->ctypeid);
+ ptrdiff_t idx;
+
+ /* Resolve reference for cdata object. */
+ if (ctype_isref(ct->info)) {
+ lua_assert(ct->size == CTSIZE_PTR);
+ p = *(uint8_t **)p;
+ ct = ctype_child(cts, ct);
+ }
+
+collect_attrib:
+ /* Skip attributes and collect qualifiers. */
+ while (ctype_isattrib(ct->info)) {
+ if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
+ ct = ctype_child(cts, ct);
+ }
+ lua_assert(!ctype_isref(ct->info)); /* Interning rejects refs to refs. */
+
+ if (tvisint(key)) {
+ idx = (ptrdiff_t)intV(key);
+ goto integer_key;
+ } else if (tvisnum(key)) { /* Numeric key. */
+ idx = LJ_64 ? (ptrdiff_t)numV(key) : (ptrdiff_t)lj_num2int(numV(key));
+ integer_key:
+ if (ctype_ispointer(ct->info)) {
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */
+ if (sz != CTSIZE_INVALID) {
+ if (ctype_isptr(ct->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) {
+ if ((ct->info & CTF_COMPLEX)) idx &= 1;
+ *qual |= CTF_CONST; /* Valarray elements are constant. */
+ }
+ *pp = p + idx*(int32_t)sz;
+ return ct;
+ }
+ }
+ } else if (tviscdata(key)) { /* Integer cdata key. */
+ GCcdata *cdk = cdataV(key);
+ CType *ctk = ctype_raw(cts, cdk->ctypeid);
+ if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk);
+ if (ctype_isinteger(ctk->info)) {
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk,
+ (uint8_t *)&idx, cdataptr(cdk), 0);
+ goto integer_key;
+ }
+ } else if (tvisstr(key)) { /* String key. */
+ GCstr *name = strV(key);
+ if (ctype_isstruct(ct->info)) {
+ CTSize ofs;
+ CType *fct = lj_ctype_getfieldq(cts, ct, name, &ofs, qual);
+ if (fct) {
+ *pp = p + ofs;
+ return fct;
+ }
+ } else if (ctype_iscomplex(ct->info)) {
+ if (name->len == 2) {
+ *qual |= CTF_CONST; /* Complex fields are constant. */
+ if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') {
+ *pp = p;
+ return ct;
+ } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') {
+ *pp = p + (ct->size >> 1);
+ return ct;
+ }
+ }
+ } else if (cd->ctypeid == CTID_CTYPEID) {
+ /* Allow indexing a (pointer to) struct constructor to get constants. */
+ CType *sct = ctype_raw(cts, *(CTypeID *)p);
+ if (ctype_isptr(sct->info))
+ sct = ctype_rawchild(cts, sct);
+ if (ctype_isstruct(sct->info)) {
+ CTSize ofs;
+ CType *fct = lj_ctype_getfield(cts, sct, name, &ofs);
+ if (fct && ctype_isconstval(fct->info))
+ return fct;
+ }
+ ct = sct; /* Allow resolving metamethods for constructors, too. */
+ }
+ }
+ if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
+ if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ ct = ctype_child(cts, ct);
+ goto collect_attrib;
+ }
+ }
+ *qual |= 1; /* Lookup failed. */
+ return ct; /* But return the resolved raw type. */
+}
+
+/* -- C data getters ------------------------------------------------------ */
+
+/* Get constant value and convert to TValue. */
+static void cdata_getconst(CTState *cts, TValue *o, CType *ct)
+{
+ CType *ctt = ctype_child(cts, ct);
+ lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4);
+ /* Constants are already zero-extended/sign-extended to 32 bits. */
+ if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
+ setnumV(o, (lua_Number)(uint32_t)ct->size);
+ else
+ setintV(o, (int32_t)ct->size);
+}
+
+/* Get C data value and convert to TValue. */
+int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp)
+{
+ CTypeID sid;
+
+ if (ctype_isconstval(s->info)) {
+ cdata_getconst(cts, o, s);
+ return 0; /* No GC step needed. */
+ } else if (ctype_isbitfield(s->info)) {
+ return lj_cconv_tv_bf(cts, s, o, sp);
+ }
+
+ /* Get child type of pointer/array/field. */
+ lua_assert(ctype_ispointer(s->info) || ctype_isfield(s->info));
+ sid = ctype_cid(s->info);
+ s = ctype_get(cts, sid);
+
+ /* Resolve reference for field. */
+ if (ctype_isref(s->info)) {
+ lua_assert(s->size == CTSIZE_PTR);
+ sp = *(uint8_t **)sp;
+ sid = ctype_cid(s->info);
+ s = ctype_get(cts, sid);
+ }
+
+ /* Skip attributes. */
+ while (ctype_isattrib(s->info))
+ s = ctype_child(cts, s);
+
+ return lj_cconv_tv_ct(cts, s, sid, o, sp);
+}
+
+/* -- C data setters ------------------------------------------------------ */
+
+/* Convert TValue and set C data value. */
+void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual)
+{
+ if (ctype_isconstval(d->info)) {
+ goto err_const;
+ } else if (ctype_isbitfield(d->info)) {
+ if (((d->info|qual) & CTF_CONST)) goto err_const;
+ lj_cconv_bf_tv(cts, d, dp, o);
+ return;
+ }
+
+ /* Get child type of pointer/array/field. */
+ lua_assert(ctype_ispointer(d->info) || ctype_isfield(d->info));
+ d = ctype_child(cts, d);
+
+ /* Resolve reference for field. */
+ if (ctype_isref(d->info)) {
+ lua_assert(d->size == CTSIZE_PTR);
+ dp = *(uint8_t **)dp;
+ d = ctype_child(cts, d);
+ }
+
+ /* Skip attributes and collect qualifiers. */
+ for (;;) {
+ if (ctype_isattrib(d->info)) {
+ if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
+ } else {
+ break;
+ }
+ d = ctype_child(cts, d);
+ }
+
+ lua_assert(ctype_hassize(d->info) && !ctype_isvoid(d->info));
+
+ if (((d->info|qual) & CTF_CONST)) {
+ err_const:
+ lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST);
+ }
+
+ lj_cconv_ct_tv(cts, d, dp, o, 0);
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_cdata.h b/3rdparty/lua/src/lj_cdata.h
index 80a0888..0c81b02 100644
--- a/3rdparty/lua/src/lj_cdata.h
+++ b/3rdparty/lua/src/lj_cdata.h
@@ -1,75 +1,75 @@
-/*
-** C data management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CDATA_H
-#define _LJ_CDATA_H
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* Get C data pointer. */
-static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz)
-{
- if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
- return ((void *)(uintptr_t)*(uint32_t *)p);
- } else {
- lua_assert(sz == CTSIZE_PTR);
- return *(void **)p;
- }
-}
-
-/* Set C data pointer. */
-static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v)
-{
- if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
- *(uint32_t *)p = (uint32_t)(uintptr_t)v;
- } else {
- lua_assert(sz == CTSIZE_PTR);
- *(void **)p = (void *)v;
- }
-}
-
-/* Allocate fixed-size C data object. */
-static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz)
-{
- GCcdata *cd;
-#ifdef LUA_USE_ASSERT
- CType *ct = ctype_raw(cts, id);
- lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz);
-#endif
- cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz);
- cd->gct = ~LJ_TCDATA;
- cd->ctypeid = ctype_check(cts, id);
- return cd;
-}
-
-/* Variant which works without a valid CTState. */
-static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz)
-{
- GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz);
- cd->gct = ~LJ_TCDATA;
- cd->ctypeid = id;
- return cd;
-}
-
-LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id);
-LJ_FUNC GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz,
- CTSize align);
-
-LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd);
-LJ_FUNCA TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd);
-
-LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key,
- uint8_t **pp, CTInfo *qual);
-LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp);
-LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o,
- CTInfo qual);
-
-#endif
-
-#endif
+/*
+** C data management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CDATA_H
+#define _LJ_CDATA_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Get C data pointer. */
+static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz)
+{
+ if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
+ return ((void *)(uintptr_t)*(uint32_t *)p);
+ } else {
+ lua_assert(sz == CTSIZE_PTR);
+ return *(void **)p;
+ }
+}
+
+/* Set C data pointer. */
+static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v)
+{
+ if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
+ *(uint32_t *)p = (uint32_t)(uintptr_t)v;
+ } else {
+ lua_assert(sz == CTSIZE_PTR);
+ *(void **)p = (void *)v;
+ }
+}
+
+/* Allocate fixed-size C data object. */
+static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz)
+{
+ GCcdata *cd;
+#ifdef LUA_USE_ASSERT
+ CType *ct = ctype_raw(cts, id);
+ lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz);
+#endif
+ cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz);
+ cd->gct = ~LJ_TCDATA;
+ cd->ctypeid = ctype_check(cts, id);
+ return cd;
+}
+
+/* Variant which works without a valid CTState. */
+static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz)
+{
+ GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz);
+ cd->gct = ~LJ_TCDATA;
+ cd->ctypeid = id;
+ return cd;
+}
+
+LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id);
+LJ_FUNC GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz,
+ CTSize align);
+
+LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd);
+LJ_FUNCA TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd);
+
+LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key,
+ uint8_t **pp, CTInfo *qual);
+LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp);
+LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o,
+ CTInfo qual);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_char.c b/3rdparty/lua/src/lj_char.c
index 4d0a52a..11f23ef 100644
--- a/3rdparty/lua/src/lj_char.c
+++ b/3rdparty/lua/src/lj_char.c
@@ -1,43 +1,43 @@
-/*
-** Character types.
-** Donated to the public domain.
-**
-** This is intended to replace the problematic libc single-byte NLS functions.
-** These just don't make sense anymore with UTF-8 locales becoming the norm
-** on POSIX systems. It never worked too well on Windows systems since hardly
-** anyone bothered to call setlocale().
-**
-** This table is hardcoded for ASCII. Identifiers include the characters
-** 128-255, too. This allows for the use of all non-ASCII chars as identifiers
-** in the lexer. This is a broad definition, but works well in practice
-** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*).
-**
-** If you really need proper character types for UTF-8 strings, please use
-** an add-on library such as slnunicode: http://luaforge.net/projects/sln/
-*/
-
-#define lj_char_c
-#define LUA_CORE
-
-#include "lj_char.h"
-
-LJ_DATADEF const uint8_t lj_char_bits[257] = {
- 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4,
- 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160,
- 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132,
- 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192,
- 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
-};
-
+/*
+** Character types.
+** Donated to the public domain.
+**
+** This is intended to replace the problematic libc single-byte NLS functions.
+** These just don't make sense anymore with UTF-8 locales becoming the norm
+** on POSIX systems. It never worked too well on Windows systems since hardly
+** anyone bothered to call setlocale().
+**
+** This table is hardcoded for ASCII. Identifiers include the characters
+** 128-255, too. This allows for the use of all non-ASCII chars as identifiers
+** in the lexer. This is a broad definition, but works well in practice
+** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*).
+**
+** If you really need proper character types for UTF-8 strings, please use
+** an add-on library such as slnunicode: http://luaforge.net/projects/sln/
+*/
+
+#define lj_char_c
+#define LUA_CORE
+
+#include "lj_char.h"
+
+LJ_DATADEF const uint8_t lj_char_bits[257] = {
+ 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4,
+ 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160,
+ 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132,
+ 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192,
+ 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
+};
+
diff --git a/3rdparty/lua/src/lj_char.h b/3rdparty/lua/src/lj_char.h
index ce1fa81..c3c86d3 100644
--- a/3rdparty/lua/src/lj_char.h
+++ b/3rdparty/lua/src/lj_char.h
@@ -1,42 +1,42 @@
-/*
-** Character types.
-** Donated to the public domain.
-*/
-
-#ifndef _LJ_CHAR_H
-#define _LJ_CHAR_H
-
-#include "lj_def.h"
-
-#define LJ_CHAR_CNTRL 0x01
-#define LJ_CHAR_SPACE 0x02
-#define LJ_CHAR_PUNCT 0x04
-#define LJ_CHAR_DIGIT 0x08
-#define LJ_CHAR_XDIGIT 0x10
-#define LJ_CHAR_UPPER 0x20
-#define LJ_CHAR_LOWER 0x40
-#define LJ_CHAR_IDENT 0x80
-#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER)
-#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT)
-#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT)
-
-/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */
-#define lj_char_isa(c, t) ((lj_char_bits+1)[(c)] & t)
-#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL)
-#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE)
-#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT)
-#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT)
-#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT)
-#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER)
-#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER)
-#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT)
-#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA)
-#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM)
-#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH)
-
-#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1))
-#define lj_char_tolower(c) ((c) + lj_char_isupper(c))
-
-LJ_DATA const uint8_t lj_char_bits[257];
-
-#endif
+/*
+** Character types.
+** Donated to the public domain.
+*/
+
+#ifndef _LJ_CHAR_H
+#define _LJ_CHAR_H
+
+#include "lj_def.h"
+
+#define LJ_CHAR_CNTRL 0x01
+#define LJ_CHAR_SPACE 0x02
+#define LJ_CHAR_PUNCT 0x04
+#define LJ_CHAR_DIGIT 0x08
+#define LJ_CHAR_XDIGIT 0x10
+#define LJ_CHAR_UPPER 0x20
+#define LJ_CHAR_LOWER 0x40
+#define LJ_CHAR_IDENT 0x80
+#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER)
+#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT)
+#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT)
+
+/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */
+#define lj_char_isa(c, t) ((lj_char_bits+1)[(c)] & t)
+#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL)
+#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE)
+#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT)
+#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT)
+#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT)
+#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER)
+#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER)
+#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT)
+#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA)
+#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM)
+#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH)
+
+#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1))
+#define lj_char_tolower(c) ((c) + lj_char_isupper(c))
+
+LJ_DATA const uint8_t lj_char_bits[257];
+
+#endif
diff --git a/3rdparty/lua/src/lj_clib.c b/3rdparty/lua/src/lj_clib.c
index c561aed..23d1f18 100644
--- a/3rdparty/lua/src/lj_clib.c
+++ b/3rdparty/lua/src/lj_clib.c
@@ -1,409 +1,412 @@
-/*
-** FFI C library loader.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_tab.h"
-#include "lj_str.h"
-#include "lj_udata.h"
-#include "lj_ctype.h"
-#include "lj_cconv.h"
-#include "lj_cdata.h"
-#include "lj_clib.h"
-
-/* -- OS-specific functions ----------------------------------------------- */
-
-#if LJ_TARGET_DLOPEN
-
-#include <dlfcn.h>
-#include <stdio.h>
-
-#if defined(RTLD_DEFAULT)
-#define CLIB_DEFHANDLE RTLD_DEFAULT
-#elif LJ_TARGET_OSX || LJ_TARGET_BSD
-#define CLIB_DEFHANDLE ((void *)(intptr_t)-2)
-#else
-#define CLIB_DEFHANDLE NULL
-#endif
-
-LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L)
-{
- lj_err_callermsg(L, dlerror());
-}
-
-#define clib_error(L, fmt, name) clib_error_(L)
-
-#if defined(__CYGWIN__)
-#define CLIB_SOPREFIX "cyg"
-#else
-#define CLIB_SOPREFIX "lib"
-#endif
-
-#if LJ_TARGET_OSX
-#define CLIB_SOEXT "%s.dylib"
-#elif defined(__CYGWIN__)
-#define CLIB_SOEXT "%s.dll"
-#else
-#define CLIB_SOEXT "%s.so"
-#endif
-
-static const char *clib_extname(lua_State *L, const char *name)
-{
- if (!strchr(name, '/')
-#ifdef __CYGWIN__
- && !strchr(name, '\\')
-#endif
- ) {
- if (!strchr(name, '.')) {
- name = lj_str_pushf(L, CLIB_SOEXT, name);
- L->top--;
-#ifdef __CYGWIN__
- } else {
- return name;
-#endif
- }
- if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] &&
- name[2] == CLIB_SOPREFIX[2])) {
- name = lj_str_pushf(L, CLIB_SOPREFIX "%s", name);
- L->top--;
- }
- }
- return name;
-}
-
-/* Check for a recognized ld script line. */
-static const char *clib_check_lds(lua_State *L, const char *buf)
-{
- char *p, *e;
- if ((!strncmp(buf, "GROUP", 5) || !strncmp(buf, "INPUT", 5)) &&
- (p = strchr(buf, '('))) {
- while (*++p == ' ') ;
- for (e = p; *e && *e != ' ' && *e != ')'; e++) ;
- return strdata(lj_str_new(L, p, e-p));
- }
- return NULL;
-}
-
-/* Quick and dirty solution to resolve shared library name from ld script. */
-static const char *clib_resolve_lds(lua_State *L, const char *name)
-{
- FILE *fp = fopen(name, "r");
- const char *p = NULL;
- if (fp) {
- char buf[256];
- if (fgets(buf, sizeof(buf), fp)) {
- if (!strncmp(buf, "/* GNU ld script", 16)) { /* ld script magic? */
- while (fgets(buf, sizeof(buf), fp)) { /* Check all lines. */
- p = clib_check_lds(L, buf);
- if (p) break;
- }
- } else { /* Otherwise check only the first line. */
- p = clib_check_lds(L, buf);
- }
- }
- fclose(fp);
- }
- return p;
-}
-
-static void *clib_loadlib(lua_State *L, const char *name, int global)
-{
- void *h = dlopen(clib_extname(L, name),
- RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
- if (!h) {
- const char *e, *err = dlerror();
- if (*err == '/' && (e = strchr(err, ':')) &&
- (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) {
- h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
- if (h) return h;
- err = dlerror();
- }
- lj_err_callermsg(L, err);
- }
- return h;
-}
-
-static void clib_unloadlib(CLibrary *cl)
-{
- if (cl->handle && cl->handle != CLIB_DEFHANDLE)
- dlclose(cl->handle);
-}
-
-static void *clib_getsym(CLibrary *cl, const char *name)
-{
- void *p = dlsym(cl->handle, name);
- return p;
-}
-
-#elif LJ_TARGET_WINDOWS
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
-#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
-#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
-BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
-#endif
-
-#define CLIB_DEFHANDLE ((void *)-1)
-
-/* Default libraries. */
-enum {
- CLIB_HANDLE_EXE,
- CLIB_HANDLE_DLL,
- CLIB_HANDLE_CRT,
- CLIB_HANDLE_KERNEL32,
- CLIB_HANDLE_USER32,
- CLIB_HANDLE_GDI32,
- CLIB_HANDLE_MAX
-};
-
-static void *clib_def_handle[CLIB_HANDLE_MAX];
-
-LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
- const char *name)
-{
- DWORD err = GetLastError();
- char buf[128];
- if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
- NULL, err, 0, buf, sizeof(buf), NULL))
- buf[0] = '\0';
- lj_err_callermsg(L, lj_str_pushf(L, fmt, name, buf));
-}
-
-static int clib_needext(const char *s)
-{
- while (*s) {
- if (*s == '/' || *s == '\\' || *s == '.') return 0;
- s++;
- }
- return 1;
-}
-
-static const char *clib_extname(lua_State *L, const char *name)
-{
- if (clib_needext(name)) {
- name = lj_str_pushf(L, "%s.dll", name);
- L->top--;
- }
- return name;
-}
-
-static void *clib_loadlib(lua_State *L, const char *name, int global)
-{
- DWORD oldwerr = GetLastError();
- void *h = (void *)LoadLibraryA(clib_extname(L, name));
- if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name);
- SetLastError(oldwerr);
- UNUSED(global);
- return h;
-}
-
-static void clib_unloadlib(CLibrary *cl)
-{
- if (cl->handle == CLIB_DEFHANDLE) {
- MSize i;
- for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) {
- void *h = clib_def_handle[i];
- if (h) {
- clib_def_handle[i] = NULL;
- FreeLibrary((HINSTANCE)h);
- }
- }
- } else if (cl->handle) {
- FreeLibrary((HINSTANCE)cl->handle);
- }
-}
-
-static void *clib_getsym(CLibrary *cl, const char *name)
-{
- void *p = NULL;
- if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */
- MSize i;
- for (i = 0; i < CLIB_HANDLE_MAX; i++) {
- HINSTANCE h = (HINSTANCE)clib_def_handle[i];
- if (!(void *)h) { /* Resolve default library handles (once). */
- switch (i) {
- case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break;
- case CLIB_HANDLE_DLL:
- GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
- (const char *)clib_def_handle, &h);
- break;
- case CLIB_HANDLE_CRT:
- GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
- (const char *)&_fmode, &h);
- break;
- case CLIB_HANDLE_KERNEL32: h = LoadLibraryA("kernel32.dll"); break;
- case CLIB_HANDLE_USER32: h = LoadLibraryA("user32.dll"); break;
- case CLIB_HANDLE_GDI32: h = LoadLibraryA("gdi32.dll"); break;
- }
- if (!h) continue;
- clib_def_handle[i] = (void *)h;
- }
- p = (void *)GetProcAddress(h, name);
- if (p) break;
- }
- } else {
- p = (void *)GetProcAddress((HINSTANCE)cl->handle, name);
- }
- return p;
-}
-
-#else
-
-#define CLIB_DEFHANDLE NULL
-
-LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
- const char *name)
-{
- lj_err_callermsg(L, lj_str_pushf(L, fmt, name, "no support for this OS"));
-}
-
-static void *clib_loadlib(lua_State *L, const char *name, int global)
-{
- lj_err_callermsg(L, "no support for loading dynamic libraries for this OS");
- UNUSED(name); UNUSED(global);
- return NULL;
-}
-
-static void clib_unloadlib(CLibrary *cl)
-{
- UNUSED(cl);
-}
-
-static void *clib_getsym(CLibrary *cl, const char *name)
-{
- UNUSED(cl); UNUSED(name);
- return NULL;
-}
-
-#endif
-
-/* -- C library indexing -------------------------------------------------- */
-
-#if LJ_TARGET_X86 && LJ_ABI_WIN
-/* Compute argument size for fastcall/stdcall functions. */
-static CTSize clib_func_argsize(CTState *cts, CType *ct)
-{
- CTSize n = 0;
- while (ct->sib) {
- CType *d;
- ct = ctype_get(cts, ct->sib);
- if (ctype_isfield(ct->info)) {
- d = ctype_rawchild(cts, ct);
- n += ((d->size + 3) & ~3);
- }
- }
- return n;
-}
-#endif
-
-/* Get redirected or mangled external symbol. */
-static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name)
-{
- if (ct->sib) {
- CType *ctf = ctype_get(cts, ct->sib);
- if (ctype_isxattrib(ctf->info, CTA_REDIR))
- return strdata(gco2str(gcref(ctf->name)));
- }
- return strdata(name);
-}
-
-/* Index a C library by name. */
-TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name)
-{
- TValue *tv = lj_tab_setstr(L, cl->cache, name);
- if (LJ_UNLIKELY(tvisnil(tv))) {
- CTState *cts = ctype_cts(L);
- CType *ct;
- CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
- if (!id)
- lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name));
- if (ctype_isconstval(ct->info)) {
- CType *ctt = ctype_child(cts, ct);
- lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4);
- if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
- setnumV(tv, (lua_Number)(uint32_t)ct->size);
- else
- setintV(tv, (int32_t)ct->size);
- } else {
- const char *sym = clib_extsym(cts, ct, name);
-#if LJ_TARGET_WINDOWS
- DWORD oldwerr = GetLastError();
-#endif
- void *p = clib_getsym(cl, sym);
- GCcdata *cd;
- lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info));
-#if LJ_TARGET_X86 && LJ_ABI_WIN
- /* Retry with decorated name for fastcall/stdcall functions. */
- if (!p && ctype_isfunc(ct->info)) {
- CTInfo cconv = ctype_cconv(ct->info);
- if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) {
- CTSize sz = clib_func_argsize(cts, ct);
- const char *symd = lj_str_pushf(L,
- cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d",
- sym, sz);
- L->top--;
- p = clib_getsym(cl, symd);
- }
- }
-#endif
- if (!p)
- clib_error(L, "cannot resolve symbol " LUA_QS ": %s", sym);
-#if LJ_TARGET_WINDOWS
- SetLastError(oldwerr);
-#endif
- cd = lj_cdata_new(cts, id, CTSIZE_PTR);
- *(void **)cdataptr(cd) = p;
- setcdataV(L, tv, cd);
- }
- }
- return tv;
-}
-
-/* -- C library management ------------------------------------------------ */
-
-/* Create a new CLibrary object and push it on the stack. */
-static CLibrary *clib_new(lua_State *L, GCtab *mt)
-{
- GCtab *t = lj_tab_new(L, 0, 0);
- GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t);
- CLibrary *cl = (CLibrary *)uddata(ud);
- cl->cache = t;
- ud->udtype = UDTYPE_FFI_CLIB;
- /* NOBARRIER: The GCudata is new (marked white). */
- setgcref(ud->metatable, obj2gco(mt));
- setudataV(L, L->top++, ud);
- return cl;
-}
-
-/* Load a C library. */
-void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global)
-{
- void *handle = clib_loadlib(L, strdata(name), global);
- CLibrary *cl = clib_new(L, mt);
- cl->handle = handle;
-}
-
-/* Unload a C library. */
-void lj_clib_unload(CLibrary *cl)
-{
- clib_unloadlib(cl);
- cl->handle = NULL;
-}
-
-/* Create the default C library object. */
-void lj_clib_default(lua_State *L, GCtab *mt)
-{
- CLibrary *cl = clib_new(L, mt);
- cl->handle = CLIB_DEFHANDLE;
-}
-
-#endif
+/*
+** FFI C library loader.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_str.h"
+#include "lj_udata.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_clib.h"
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_TARGET_DLOPEN
+
+#include <dlfcn.h>
+#include <stdio.h>
+
+#if defined(RTLD_DEFAULT)
+#define CLIB_DEFHANDLE RTLD_DEFAULT
+#elif LJ_TARGET_OSX || LJ_TARGET_BSD
+#define CLIB_DEFHANDLE ((void *)(intptr_t)-2)
+#else
+#define CLIB_DEFHANDLE NULL
+#endif
+
+LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L)
+{
+ lj_err_callermsg(L, dlerror());
+}
+
+#define clib_error(L, fmt, name) clib_error_(L)
+
+#if defined(__CYGWIN__)
+#define CLIB_SOPREFIX "cyg"
+#else
+#define CLIB_SOPREFIX "lib"
+#endif
+
+#if LJ_TARGET_OSX
+#define CLIB_SOEXT "%s.dylib"
+#elif defined(__CYGWIN__)
+#define CLIB_SOEXT "%s.dll"
+#else
+#define CLIB_SOEXT "%s.so"
+#endif
+
+static const char *clib_extname(lua_State *L, const char *name)
+{
+ if (!strchr(name, '/')
+#ifdef __CYGWIN__
+ && !strchr(name, '\\')
+#endif
+ ) {
+ if (!strchr(name, '.')) {
+ name = lj_str_pushf(L, CLIB_SOEXT, name);
+ L->top--;
+#ifdef __CYGWIN__
+ } else {
+ return name;
+#endif
+ }
+ if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] &&
+ name[2] == CLIB_SOPREFIX[2])) {
+ name = lj_str_pushf(L, CLIB_SOPREFIX "%s", name);
+ L->top--;
+ }
+ }
+ return name;
+}
+
+/* Check for a recognized ld script line. */
+static const char *clib_check_lds(lua_State *L, const char *buf)
+{
+ char *p, *e;
+ if ((!strncmp(buf, "GROUP", 5) || !strncmp(buf, "INPUT", 5)) &&
+ (p = strchr(buf, '('))) {
+ while (*++p == ' ') ;
+ for (e = p; *e && *e != ' ' && *e != ')'; e++) ;
+ return strdata(lj_str_new(L, p, e-p));
+ }
+ return NULL;
+}
+
+/* Quick and dirty solution to resolve shared library name from ld script. */
+static const char *clib_resolve_lds(lua_State *L, const char *name)
+{
+ FILE *fp = fopen(name, "r");
+ const char *p = NULL;
+ if (fp) {
+ char buf[256];
+ if (fgets(buf, sizeof(buf), fp)) {
+ if (!strncmp(buf, "/* GNU ld script", 16)) { /* ld script magic? */
+ while (fgets(buf, sizeof(buf), fp)) { /* Check all lines. */
+ p = clib_check_lds(L, buf);
+ if (p) break;
+ }
+ } else { /* Otherwise check only the first line. */
+ p = clib_check_lds(L, buf);
+ }
+ }
+ fclose(fp);
+ }
+ return p;
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ void *h = dlopen(clib_extname(L, name),
+ RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
+ if (!h) {
+ const char *e, *err = dlerror();
+ if (*err == '/' && (e = strchr(err, ':')) &&
+ (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) {
+ h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
+ if (h) return h;
+ err = dlerror();
+ }
+ lj_err_callermsg(L, err);
+ }
+ return h;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ if (cl->handle && cl->handle != CLIB_DEFHANDLE)
+ dlclose(cl->handle);
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ void *p = dlsym(cl->handle, name);
+ return p;
+}
+
+#elif LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+#include <windows.h>
+
+#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
+#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
+BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
+#endif
+
+#define CLIB_DEFHANDLE ((void *)-1)
+
+/* Default libraries. */
+enum {
+ CLIB_HANDLE_EXE,
+ CLIB_HANDLE_DLL,
+ CLIB_HANDLE_CRT,
+ CLIB_HANDLE_KERNEL32,
+ CLIB_HANDLE_USER32,
+ CLIB_HANDLE_GDI32,
+ CLIB_HANDLE_MAX
+};
+
+static void *clib_def_handle[CLIB_HANDLE_MAX];
+
+LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
+ const char *name)
+{
+ DWORD err = GetLastError();
+ char buf[128];
+ if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, err, 0, buf, sizeof(buf), NULL))
+ buf[0] = '\0';
+ lj_err_callermsg(L, lj_str_pushf(L, fmt, name, buf));
+}
+
+static int clib_needext(const char *s)
+{
+ while (*s) {
+ if (*s == '/' || *s == '\\' || *s == '.') return 0;
+ s++;
+ }
+ return 1;
+}
+
+static const char *clib_extname(lua_State *L, const char *name)
+{
+ if (clib_needext(name)) {
+ name = lj_str_pushf(L, "%s.dll", name);
+ L->top--;
+ }
+ return name;
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ DWORD oldwerr = GetLastError();
+ void *h = (void *)LoadLibraryA(clib_extname(L, name));
+ if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name);
+ SetLastError(oldwerr);
+ UNUSED(global);
+ return h;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ if (cl->handle == CLIB_DEFHANDLE) {
+ MSize i;
+ for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) {
+ void *h = clib_def_handle[i];
+ if (h) {
+ clib_def_handle[i] = NULL;
+ FreeLibrary((HINSTANCE)h);
+ }
+ }
+ } else if (!cl->handle) {
+ FreeLibrary((HINSTANCE)cl->handle);
+ }
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ void *p = NULL;
+ if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */
+ MSize i;
+ for (i = 0; i < CLIB_HANDLE_MAX; i++) {
+ HINSTANCE h = (HINSTANCE)clib_def_handle[i];
+ if (!(void *)h) { /* Resolve default library handles (once). */
+ switch (i) {
+ case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break;
+ case CLIB_HANDLE_DLL:
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)clib_def_handle, &h);
+ break;
+ case CLIB_HANDLE_CRT:
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)&_fmode, &h);
+ break;
+ case CLIB_HANDLE_KERNEL32: h = LoadLibraryA("kernel32.dll"); break;
+ case CLIB_HANDLE_USER32: h = LoadLibraryA("user32.dll"); break;
+ case CLIB_HANDLE_GDI32: h = LoadLibraryA("gdi32.dll"); break;
+ }
+ if (!h) continue;
+ clib_def_handle[i] = (void *)h;
+ }
+ p = (void *)GetProcAddress(h, name);
+ if (p) break;
+ }
+ } else {
+ p = (void *)GetProcAddress((HINSTANCE)cl->handle, name);
+ }
+ return p;
+}
+
+#else
+
+#define CLIB_DEFHANDLE NULL
+
+LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
+ const char *name)
+{
+ lj_err_callermsg(L, lj_str_pushf(L, fmt, name, "no support for this OS"));
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ lj_err_callermsg(L, "no support for loading dynamic libraries for this OS");
+ UNUSED(name); UNUSED(global);
+ return NULL;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ UNUSED(cl);
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ UNUSED(cl); UNUSED(name);
+ return NULL;
+}
+
+#endif
+
+/* -- C library indexing -------------------------------------------------- */
+
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+/* Compute argument size for fastcall/stdcall functions. */
+static CTSize clib_func_argsize(CTState *cts, CType *ct)
+{
+ CTSize n = 0;
+ while (ct->sib) {
+ CType *d;
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ d = ctype_rawchild(cts, ct);
+ n += ((d->size + 3) & ~3);
+ }
+ }
+ return n;
+}
+#endif
+
+/* Get redirected or mangled external symbol. */
+static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name)
+{
+ if (ct->sib) {
+ CType *ctf = ctype_get(cts, ct->sib);
+ if (ctype_isxattrib(ctf->info, CTA_REDIR))
+ return strdata(gco2str(gcref(ctf->name)));
+ }
+ return strdata(name);
+}
+
+/* Index a C library by name. */
+TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name)
+{
+ TValue *tv = lj_tab_setstr(L, cl->cache, name);
+ if (LJ_UNLIKELY(tvisnil(tv))) {
+ CTState *cts = ctype_cts(L);
+ CType *ct;
+ CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
+ if (!id)
+ lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name));
+ if (ctype_isconstval(ct->info)) {
+ CType *ctt = ctype_child(cts, ct);
+ lua_assert(ctype_isinteger(ctt->info) && ctt->size <= 4);
+ if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
+ setnumV(tv, (lua_Number)(uint32_t)ct->size);
+ else
+ setintV(tv, (int32_t)ct->size);
+ } else {
+ const char *sym = clib_extsym(cts, ct, name);
+#if LJ_TARGET_WINDOWS
+ DWORD oldwerr = GetLastError();
+#endif
+ void *p = clib_getsym(cl, sym);
+ GCcdata *cd;
+ lua_assert(ctype_isfunc(ct->info) || ctype_isextern(ct->info));
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+ /* Retry with decorated name for fastcall/stdcall functions. */
+ if (!p && ctype_isfunc(ct->info)) {
+ CTInfo cconv = ctype_cconv(ct->info);
+ if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) {
+ CTSize sz = clib_func_argsize(cts, ct);
+ const char *symd = lj_str_pushf(L,
+ cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d",
+ sym, sz);
+ L->top--;
+ p = clib_getsym(cl, symd);
+ }
+ }
+#endif
+ if (!p)
+ clib_error(L, "cannot resolve symbol " LUA_QS ": %s", sym);
+#if LJ_TARGET_WINDOWS
+ SetLastError(oldwerr);
+#endif
+ cd = lj_cdata_new(cts, id, CTSIZE_PTR);
+ *(void **)cdataptr(cd) = p;
+ setcdataV(L, tv, cd);
+ }
+ }
+ return tv;
+}
+
+/* -- C library management ------------------------------------------------ */
+
+/* Create a new CLibrary object and push it on the stack. */
+static CLibrary *clib_new(lua_State *L, GCtab *mt)
+{
+ GCtab *t = lj_tab_new(L, 0, 0);
+ GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t);
+ CLibrary *cl = (CLibrary *)uddata(ud);
+ cl->cache = t;
+ ud->udtype = UDTYPE_FFI_CLIB;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcref(ud->metatable, obj2gco(mt));
+ setudataV(L, L->top++, ud);
+ return cl;
+}
+
+/* Load a C library. */
+void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global)
+{
+ void *handle = clib_loadlib(L, strdata(name), global);
+ CLibrary *cl = clib_new(L, mt);
+ cl->handle = handle;
+}
+
+/* Unload a C library. */
+void lj_clib_unload(CLibrary *cl)
+{
+ clib_unloadlib(cl);
+ cl->handle = NULL;
+}
+
+/* Create the default C library object. */
+void lj_clib_default(lua_State *L, GCtab *mt)
+{
+ CLibrary *cl = clib_new(L, mt);
+ cl->handle = CLIB_DEFHANDLE;
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_clib.h b/3rdparty/lua/src/lj_clib.h
index 2ece03c..421388a 100644
--- a/3rdparty/lua/src/lj_clib.h
+++ b/3rdparty/lua/src/lj_clib.h
@@ -1,29 +1,29 @@
-/*
-** FFI C library loader.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CLIB_H
-#define _LJ_CLIB_H
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-/* Namespace for C library indexing. */
-#define CLNS_INDEX ((1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL))
-
-/* C library namespace. */
-typedef struct CLibrary {
- void *handle; /* Opaque handle for dynamic library loader. */
- GCtab *cache; /* Cache for resolved symbols. Anchored in ud->env. */
-} CLibrary;
-
-LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name);
-LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global);
-LJ_FUNC void lj_clib_unload(CLibrary *cl);
-LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt);
-
-#endif
-
-#endif
+/*
+** FFI C library loader.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CLIB_H
+#define _LJ_CLIB_H
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+/* Namespace for C library indexing. */
+#define CLNS_INDEX ((1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL))
+
+/* C library namespace. */
+typedef struct CLibrary {
+ void *handle; /* Opaque handle for dynamic library loader. */
+ GCtab *cache; /* Cache for resolved symbols. Anchored in ud->env. */
+} CLibrary;
+
+LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name);
+LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global);
+LJ_FUNC void lj_clib_unload(CLibrary *cl);
+LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_cparse.c b/3rdparty/lua/src/lj_cparse.c
index b9df88d..107c038 100644
--- a/3rdparty/lua/src/lj_cparse.c
+++ b/3rdparty/lua/src/lj_cparse.c
@@ -1,6 +1,6 @@
/*
** C declaration parser.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#include "lj_obj.h"
@@ -57,22 +57,14 @@ static LJ_AINLINE int cp_iseol(CPChar c)
return (c == '\n' || c == '\r');
}
+static LJ_AINLINE CPChar cp_get(CPState *cp);
+
/* Peek next raw character. */
static LJ_AINLINE CPChar cp_rawpeek(CPState *cp)
{
return (CPChar)(uint8_t)(*cp->p);
}
-static LJ_NOINLINE CPChar cp_get_bs(CPState *cp);
-
-/* Get next character. */
-static LJ_AINLINE CPChar cp_get(CPState *cp)
-{
- cp->c = (CPChar)(uint8_t)(*cp->p++);
- if (LJ_LIKELY(cp->c != '\\')) return cp->c;
- return cp_get_bs(cp);
-}
-
/* Transparently skip backslash-escaped line breaks. */
static LJ_NOINLINE CPChar cp_get_bs(CPState *cp)
{
@@ -85,6 +77,14 @@ static LJ_NOINLINE CPChar cp_get_bs(CPState *cp)
return cp_get(cp);
}
+/* Get next character. */
+static LJ_AINLINE CPChar cp_get(CPState *cp)
+{
+ cp->c = (CPChar)(uint8_t)(*cp->p++);
+ if (LJ_LIKELY(cp->c != '\\')) return cp->c;
+ return cp_get_bs(cp);
+}
+
/* Grow save buffer. */
static LJ_NOINLINE void cp_save_grow(CPState *cp, CPChar c)
{
@@ -1258,7 +1258,7 @@ static void cp_struct_layout(CPState *cp, CTypeID sid, CTInfo sattr)
sinfo |= (info & (CTF_QUAL|CTF_VLA)); /* Merge pseudo-qualifiers. */
/* Check for size overflow and determine alignment. */
- if (sz >= 0x20000000u || bofs + csz < bofs || (info & CTF_VLA)) {
+ if (sz >= 0x20000000u || bofs + csz < bofs) {
if (!(sz == CTSIZE_INVALID && ctype_isarray(info) &&
!(sinfo & CTF_UNION)))
cp_err(cp, LJ_ERR_FFI_INVSIZE);
diff --git a/3rdparty/lua/src/lj_cparse.h b/3rdparty/lua/src/lj_cparse.h
index e6e49e2..c097b14 100644
--- a/3rdparty/lua/src/lj_cparse.h
+++ b/3rdparty/lua/src/lj_cparse.h
@@ -1,65 +1,65 @@
-/*
-** C declaration parser.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CPARSE_H
-#define _LJ_CPARSE_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* C parser limits. */
-#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */
-#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */
-#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */
-#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */
-
-/* Flags for C parser mode. */
-#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */
-#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */
-#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */
-#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */
-#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */
-#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */
-
-typedef int CPChar; /* C parser character. Unsigned ext. from char. */
-typedef int CPToken; /* C parser token. */
-
-/* C parser internal value representation. */
-typedef struct CPValue {
- union {
- int32_t i32; /* Value for CTID_INT32. */
- uint32_t u32; /* Value for CTID_UINT32. */
- };
- CTypeID id; /* C Type ID of the value. */
-} CPValue;
-
-/* C parser state. */
-typedef struct CPState {
- CPChar c; /* Current character. */
- CPToken tok; /* Current token. */
- CPValue val; /* Token value. */
- GCstr *str; /* Interned string of identifier/keyword. */
- CType *ct; /* C type table entry. */
- const char *p; /* Current position in input buffer. */
- SBuf sb; /* String buffer for tokens. */
- lua_State *L; /* Lua state. */
- CTState *cts; /* C type state. */
- TValue *param; /* C type parameters. */
- const char *srcname; /* Current source name. */
- BCLine linenumber; /* Input line counter. */
- int depth; /* Recursive declaration depth. */
- uint32_t tmask; /* Type mask for next identifier. */
- uint32_t mode; /* C parser mode. */
- uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */
- uint8_t curpack; /* Current position in pack pragma stack. */
-} CPState;
-
-LJ_FUNC int lj_cparse(CPState *cp);
-
-#endif
-
-#endif
+/*
+** C declaration parser.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CPARSE_H
+#define _LJ_CPARSE_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* C parser limits. */
+#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */
+#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */
+#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */
+#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */
+
+/* Flags for C parser mode. */
+#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */
+#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */
+#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */
+#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */
+#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */
+#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */
+
+typedef int CPChar; /* C parser character. Unsigned ext. from char. */
+typedef int CPToken; /* C parser token. */
+
+/* C parser internal value representation. */
+typedef struct CPValue {
+ union {
+ int32_t i32; /* Value for CTID_INT32. */
+ uint32_t u32; /* Value for CTID_UINT32. */
+ };
+ CTypeID id; /* C Type ID of the value. */
+} CPValue;
+
+/* C parser state. */
+typedef struct CPState {
+ CPChar c; /* Current character. */
+ CPToken tok; /* Current token. */
+ CPValue val; /* Token value. */
+ GCstr *str; /* Interned string of identifier/keyword. */
+ CType *ct; /* C type table entry. */
+ const char *p; /* Current position in input buffer. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_State *L; /* Lua state. */
+ CTState *cts; /* C type state. */
+ TValue *param; /* C type parameters. */
+ const char *srcname; /* Current source name. */
+ BCLine linenumber; /* Input line counter. */
+ int depth; /* Recursive declaration depth. */
+ uint32_t tmask; /* Type mask for next identifier. */
+ uint32_t mode; /* C parser mode. */
+ uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */
+ uint8_t curpack; /* Current position in pack pragma stack. */
+} CPState;
+
+LJ_FUNC int lj_cparse(CPState *cp);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_crecord.c b/3rdparty/lua/src/lj_crecord.c
index 49005b1..df98e6e 100644
--- a/3rdparty/lua/src/lj_crecord.c
+++ b/3rdparty/lua/src/lj_crecord.c
@@ -1,1671 +1,1653 @@
-/*
-** Trace recorder for C data operations.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_ffrecord_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT && LJ_HASFFI
-
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_frame.h"
-#include "lj_ctype.h"
-#include "lj_cdata.h"
-#include "lj_cparse.h"
-#include "lj_cconv.h"
-#include "lj_clib.h"
-#include "lj_ccall.h"
-#include "lj_ff.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_ircall.h"
-#include "lj_iropt.h"
-#include "lj_trace.h"
-#include "lj_record.h"
-#include "lj_ffrecord.h"
-#include "lj_snap.h"
-#include "lj_crecord.h"
-#include "lj_dispatch.h"
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
-/* Pass IR on to next optimization in chain (FOLD). */
-#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
-
-#define emitconv(a, dt, st, flags) \
- emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags))
-
-/* -- C type checks ------------------------------------------------------- */
-
-static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o)
-{
- GCcdata *cd;
- TRef trtypeid;
- if (!tref_iscdata(tr))
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- cd = cdataV(o);
- /* Specialize to the CTypeID. */
- trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_CTYPEID);
- emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->ctypeid));
- return cd;
-}
-
-/* Specialize to the CTypeID held by a cdata constructor. */
-static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr)
-{
- CTypeID id;
- lua_assert(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID);
- id = *(CTypeID *)cdataptr(cd);
- tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT);
- emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id));
- return id;
-}
-
-static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o)
-{
- if (tref_isstr(tr)) {
- GCstr *s = strV(o);
- CPState cp;
- CTypeID oldtop;
- /* Specialize to the string containing the C type declaration. */
- emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s));
- cp.L = J->L;
- cp.cts = ctype_ctsG(J2G(J));
- oldtop = cp.cts->top;
- cp.srcname = strdata(s);
- cp.p = strdata(s);
- cp.param = NULL;
- cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
- if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- return cp.val.id;
- } else {
- GCcdata *cd = argv2cdata(J, tr, o);
- return cd->ctypeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) :
- cd->ctypeid;
- }
-}
-
-/* Convert CType to IRType (if possible). */
-static IRType crec_ct2irt(CTState *cts, CType *ct)
-{
- if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
- if (LJ_LIKELY(ctype_isnum(ct->info))) {
- if ((ct->info & CTF_FP)) {
- if (ct->size == sizeof(double))
- return IRT_NUM;
- else if (ct->size == sizeof(float))
- return IRT_FLOAT;
- } else {
- uint32_t b = lj_fls(ct->size);
- if (b <= 3)
- return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0);
- }
- } else if (ctype_isptr(ct->info)) {
- return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
- } else if (ctype_iscomplex(ct->info)) {
- if (ct->size == 2*sizeof(double))
- return IRT_NUM;
- else if (ct->size == 2*sizeof(float))
- return IRT_FLOAT;
- }
- return IRT_CDATA;
-}
-
-/* -- Optimized memory fill and copy -------------------------------------- */
-
-/* Maximum length and unroll of inlined copy/fill. */
-#define CREC_COPY_MAXUNROLL 16
-#define CREC_COPY_MAXLEN 128
-
-#define CREC_FILL_MAXUNROLL 16
-
-/* Number of windowed registers used for optimized memory copy. */
-#if LJ_TARGET_X86
-#define CREC_COPY_REGWIN 2
-#elif LJ_TARGET_PPC || LJ_TARGET_MIPS
-#define CREC_COPY_REGWIN 8
-#else
-#define CREC_COPY_REGWIN 4
-#endif
-
-/* List of memory offsets for copy/fill. */
-typedef struct CRecMemList {
- CTSize ofs; /* Offset in bytes. */
- IRType tp; /* Type of load/store. */
- TRef trofs; /* TRef of interned offset. */
- TRef trval; /* TRef of load value. */
-} CRecMemList;
-
-/* Generate copy list for element-wise struct copy. */
-static MSize crec_copy_struct(CRecMemList *ml, CTState *cts, CType *ct)
-{
- CTypeID fid = ct->sib;
- MSize mlp = 0;
- while (fid) {
- CType *df = ctype_get(cts, fid);
- fid = df->sib;
- if (ctype_isfield(df->info)) {
- CType *cct;
- IRType tp;
- if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
- cct = ctype_rawchild(cts, df); /* Field type. */
- tp = crec_ct2irt(cts, cct);
- if (tp == IRT_CDATA) return 0; /* NYI: aggregates. */
- if (mlp >= CREC_COPY_MAXUNROLL) return 0;
- ml[mlp].ofs = df->size;
- ml[mlp].tp = tp;
- mlp++;
- if (ctype_iscomplex(cct->info)) {
- if (mlp >= CREC_COPY_MAXUNROLL) return 0;
- ml[mlp].ofs = df->size + (cct->size >> 1);
- ml[mlp].tp = tp;
- mlp++;
- }
- } else if (!ctype_isconstval(df->info)) {
- /* NYI: bitfields and sub-structures. */
- return 0;
- }
- }
- return mlp;
-}
-
-/* Generate unrolled copy list, from highest to lowest step size/alignment. */
-static MSize crec_copy_unroll(CRecMemList *ml, CTSize len, CTSize step,
- IRType tp)
-{
- CTSize ofs = 0;
- MSize mlp = 0;
- if (tp == IRT_CDATA) tp = IRT_U8 + 2*lj_fls(step);
- do {
- while (ofs + step <= len) {
- if (mlp >= CREC_COPY_MAXUNROLL) return 0;
- ml[mlp].ofs = ofs;
- ml[mlp].tp = tp;
- mlp++;
- ofs += step;
- }
- step >>= 1;
- tp -= 2;
- } while (ofs < len);
- return mlp;
-}
-
-/*
-** Emit copy list with windowed loads/stores.
-** LJ_TARGET_UNALIGNED: may emit unaligned loads/stores (not marked as such).
-*/
-static void crec_copy_emit(jit_State *J, CRecMemList *ml, MSize mlp,
- TRef trdst, TRef trsrc)
-{
- MSize i, j, rwin = 0;
- for (i = 0, j = 0; i < mlp; ) {
- TRef trofs = lj_ir_kintp(J, ml[i].ofs);
- TRef trsptr = emitir(IRT(IR_ADD, IRT_PTR), trsrc, trofs);
- ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0);
- ml[i].trofs = trofs;
- i++;
- rwin += (LJ_SOFTFP && ml[i].tp == IRT_NUM) ? 2 : 1;
- if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */
- rwin = 0;
- for ( ; j < i; j++) {
- TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, ml[j].trofs);
- emitir(IRT(IR_XSTORE, ml[j].tp), trdptr, ml[j].trval);
- }
- }
- }
-}
-
-/* Optimized memory copy. */
-static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen,
- CType *ct)
-{
- if (tref_isk(trlen)) { /* Length must be constant. */
- CRecMemList ml[CREC_COPY_MAXUNROLL];
- MSize mlp = 0;
- CTSize step = 1, len = (CTSize)IR(tref_ref(trlen))->i;
- IRType tp = IRT_CDATA;
- int needxbar = 0;
- if (len == 0) return; /* Shortcut. */
- if (len > CREC_COPY_MAXLEN) goto fallback;
- if (ct) {
- CTState *cts = ctype_ctsG(J2G(J));
- lua_assert(ctype_isarray(ct->info) || ctype_isstruct(ct->info));
- if (ctype_isarray(ct->info)) {
- CType *cct = ctype_rawchild(cts, ct);
- tp = crec_ct2irt(cts, cct);
- if (tp == IRT_CDATA) goto rawcopy;
- step = lj_ir_type_size[tp];
- lua_assert((len & (step-1)) == 0);
- } else if ((ct->info & CTF_UNION)) {
- step = (1u << ctype_align(ct->info));
- goto rawcopy;
- } else {
- mlp = crec_copy_struct(ml, cts, ct);
- goto emitcopy;
- }
- } else {
- rawcopy:
- needxbar = 1;
- if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR)
- step = CTSIZE_PTR;
- }
- mlp = crec_copy_unroll(ml, len, step, tp);
- emitcopy:
- if (mlp) {
- crec_copy_emit(J, ml, mlp, trdst, trsrc);
- if (needxbar)
- emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
- return;
- }
- }
-fallback:
- /* Call memcpy. Always needs a barrier to disable alias analysis. */
- lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen);
- emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
-}
-
-/* Generate unrolled fill list, from highest to lowest step size/alignment. */
-static MSize crec_fill_unroll(CRecMemList *ml, CTSize len, CTSize step)
-{
- CTSize ofs = 0;
- MSize mlp = 0;
- IRType tp = IRT_U8 + 2*lj_fls(step);
- do {
- while (ofs + step <= len) {
- if (mlp >= CREC_COPY_MAXUNROLL) return 0;
- ml[mlp].ofs = ofs;
- ml[mlp].tp = tp;
- mlp++;
- ofs += step;
- }
- step >>= 1;
- tp -= 2;
- } while (ofs < len);
- return mlp;
-}
-
-/*
-** Emit stores for fill list.
-** LJ_TARGET_UNALIGNED: may emit unaligned stores (not marked as such).
-*/
-static void crec_fill_emit(jit_State *J, CRecMemList *ml, MSize mlp,
- TRef trdst, TRef trfill)
-{
- MSize i;
- for (i = 0; i < mlp; i++) {
- TRef trofs = lj_ir_kintp(J, ml[i].ofs);
- TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, trofs);
- emitir(IRT(IR_XSTORE, ml[i].tp), trdptr, trfill);
- }
-}
-
-/* Optimized memory fill. */
-static void crec_fill(jit_State *J, TRef trdst, TRef trlen, TRef trfill,
- CTSize step)
-{
- if (tref_isk(trlen)) { /* Length must be constant. */
- CRecMemList ml[CREC_FILL_MAXUNROLL];
- MSize mlp;
- CTSize len = (CTSize)IR(tref_ref(trlen))->i;
- if (len == 0) return; /* Shortcut. */
- if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR)
- step = CTSIZE_PTR;
- if (step * CREC_FILL_MAXUNROLL < len) goto fallback;
- mlp = crec_fill_unroll(ml, len, step);
- if (!mlp) goto fallback;
- if (tref_isk(trfill) || ml[0].tp != IRT_U8)
- trfill = emitconv(trfill, IRT_INT, IRT_U8, 0);
- if (ml[0].tp != IRT_U8) { /* Scatter U8 to U16/U32/U64. */
- if (CTSIZE_PTR == 8 && ml[0].tp == IRT_U64) {
- if (tref_isk(trfill)) /* Pointless on x64 with zero-extended regs. */
- trfill = emitconv(trfill, IRT_U64, IRT_U32, 0);
- trfill = emitir(IRT(IR_MUL, IRT_U64), trfill,
- lj_ir_kint64(J, U64x(01010101,01010101)));
- } else {
- trfill = emitir(IRTI(IR_MUL), trfill,
- lj_ir_kint(J, ml[0].tp == IRT_U16 ? 0x0101 : 0x01010101));
- }
- }
- crec_fill_emit(J, ml, mlp, trdst, trfill);
- } else {
-fallback:
- /* Call memset. Always needs a barrier to disable alias analysis. */
- lj_ir_call(J, IRCALL_memset, trdst, trfill, trlen); /* Note: arg order! */
- }
- emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
-}
-
-/* -- Convert C type to C type -------------------------------------------- */
-
-/*
-** This code mirrors the code in lj_cconv.c. It performs the same steps
-** for the trace recorder that lj_cconv.c does for the interpreter.
-**
-** One major difference is that we can get away with much fewer checks
-** here. E.g. checks for casts, constness or correct types can often be
-** omitted, even if they might fail. The interpreter subsequently throws
-** an error, which aborts the trace.
-**
-** All operations are specialized to their C types, so the on-trace
-** outcome must be the same as the outcome in the interpreter. If the
-** interpreter doesn't throw an error, then the trace is correct, too.
-** Care must be taken not to generate invalid (temporary) IR or to
-** trigger asserts.
-*/
-
-/* Determine whether a passed number or cdata number is non-zero. */
-static int crec_isnonzero(CType *s, void *p)
-{
- if (p == (void *)0)
- return 0;
- if (p == (void *)1)
- return 1;
- if ((s->info & CTF_FP)) {
- if (s->size == sizeof(float))
- return (*(float *)p != 0);
- else
- return (*(double *)p != 0);
- } else {
- if (s->size == 1)
- return (*(uint8_t *)p != 0);
- else if (s->size == 2)
- return (*(uint16_t *)p != 0);
- else if (s->size == 4)
- return (*(uint32_t *)p != 0);
- else
- return (*(uint64_t *)p != 0);
- }
-}
-
-static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp,
- void *svisnz)
-{
- IRType dt = crec_ct2irt(ctype_ctsG(J2G(J)), d);
- IRType st = crec_ct2irt(ctype_ctsG(J2G(J)), s);
- CTSize dsize = d->size, ssize = s->size;
- CTInfo dinfo = d->info, sinfo = s->info;
-
- if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
- goto err_conv;
-
- /*
- ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and
- ** numbers up to 8 bytes. Otherwise sp holds a pointer.
- */
-
- switch (cconv_idx2(dinfo, sinfo)) {
- /* Destination is a bool. */
- case CCX(B, B):
- goto xstore; /* Source operand is already normalized. */
- case CCX(B, I):
- case CCX(B, F):
- if (st != IRT_CDATA) {
- /* Specialize to the result of a comparison against 0. */
- TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) :
- (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) :
- lj_ir_kint(J, 0);
- int isnz = crec_isnonzero(s, svisnz);
- emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero);
- sp = lj_ir_kint(J, isnz);
- goto xstore;
- }
- goto err_nyi;
-
- /* Destination is an integer. */
- case CCX(I, B):
- case CCX(I, I):
- conv_I_I:
- if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
- /* Extend 32 to 64 bit integer. */
- if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED)))
- sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st,
- (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
- else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */
- sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0);
- else if (st == IRT_INT)
- sp = lj_opt_narrow_toint(J, sp);
- xstore:
- if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J);
- if (dp == 0) return sp;
- emitir(IRT(IR_XSTORE, dt), dp, sp);
- break;
- case CCX(I, C):
- sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
- /* fallthrough */
- case CCX(I, F):
- if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
- sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_TRUNC|IRCONV_ANY);
- goto xstore;
- case CCX(I, P):
- case CCX(I, A):
- sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
- ssize = CTSIZE_PTR;
- st = IRT_UINTP;
- if (((dsize ^ ssize) & 8) == 0) { /* Must insert no-op type conversion. */
- sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, IRT_PTR, 0);
- goto xstore;
- }
- goto conv_I_I;
-
- /* Destination is a floating-point number. */
- case CCX(F, B):
- case CCX(F, I):
- conv_F_I:
- if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
- sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0);
- goto xstore;
- case CCX(F, C):
- sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
- /* fallthrough */
- case CCX(F, F):
- conv_F_F:
- if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
- if (dt != st) sp = emitconv(sp, dt, st, 0);
- goto xstore;
-
- /* Destination is a complex number. */
- case CCX(C, I):
- case CCX(C, F):
- { /* Clear im. */
- TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
- emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0));
- }
- /* Convert to re. */
- if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I;
-
- case CCX(C, C):
- if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
- {
- TRef re, im, ptr;
- re = emitir(IRT(IR_XLOAD, st), sp, 0);
- ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1)));
- im = emitir(IRT(IR_XLOAD, st), ptr, 0);
- if (dt != st) {
- re = emitconv(re, dt, st, 0);
- im = emitconv(im, dt, st, 0);
- }
- emitir(IRT(IR_XSTORE, dt), dp, re);
- ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
- emitir(IRT(IR_XSTORE, dt), ptr, im);
- }
- break;
-
- /* Destination is a vector. */
- case CCX(V, I):
- case CCX(V, F):
- case CCX(V, C):
- case CCX(V, V):
- goto err_nyi;
-
- /* Destination is a pointer. */
- case CCX(P, P):
- case CCX(P, A):
- case CCX(P, S):
- /* There are only 32 bit pointers/addresses on 32 bit machines.
- ** Also ok on x64, since all 32 bit ops clear the upper part of the reg.
- */
- goto xstore;
- case CCX(P, I):
- if (st == IRT_CDATA) goto err_nyi;
- if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */
- sp = emitconv(sp, IRT_U32, st, 0);
- goto xstore;
- case CCX(P, F):
- if (st == IRT_CDATA) goto err_nyi;
- /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
- sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32,
- st, IRCONV_TRUNC|IRCONV_ANY);
- goto xstore;
-
- /* Destination is an array. */
- case CCX(A, A):
- /* Destination is a struct/union. */
- case CCX(S, S):
- if (dp == 0) goto err_conv;
- crec_copy(J, dp, sp, lj_ir_kint(J, dsize), d);
- break;
-
- default:
- err_conv:
- err_nyi:
- lj_trace_err(J, LJ_TRERR_NYICONV);
- break;
- }
- return 0;
-}
-
-/* -- Convert C type to TValue (load) ------------------------------------- */
-
-static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- IRType t = crec_ct2irt(cts, s);
- CTInfo sinfo = s->info;
- if (ctype_isnum(sinfo)) {
- TRef tr;
- if (t == IRT_CDATA)
- goto err_nyi; /* NYI: copyval of >64 bit integers. */
- tr = emitir(IRT(IR_XLOAD, t), sp, 0);
- if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */
- return emitconv(tr, IRT_NUM, t, 0);
- } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */
- sp = tr;
- lj_needsplit(J);
- } else if ((sinfo & CTF_BOOL)) {
- /* Assume not equal to zero. Fixup and emit pending guard later. */
- lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
- J->postproc = LJ_POST_FIXGUARD;
- return TREF_TRUE;
- } else {
- return tr;
- }
- } else if (ctype_isptr(sinfo) || ctype_isenum(sinfo)) {
- sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Box pointers and enums. */
- } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
- cts->L = J->L;
- sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */
- } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */
- ptrdiff_t esz = (ptrdiff_t)(s->size >> 1);
- TRef ptr, tr1, tr2, dp;
- dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL);
- tr1 = emitir(IRT(IR_XLOAD, t), sp, 0);
- ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz));
- tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0);
- ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)));
- emitir(IRT(IR_XSTORE, t), ptr, tr1);
- ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz));
- emitir(IRT(IR_XSTORE, t), ptr, tr2);
- return dp;
- } else {
- /* NYI: copyval of vectors. */
- err_nyi:
- lj_trace_err(J, LJ_TRERR_NYICONV);
- }
- /* Box pointer, ref, enum or 64 bit integer. */
- return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp);
-}
-
-/* -- Convert TValue to C type (store) ------------------------------------ */
-
-static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- CTypeID sid = CTID_P_VOID;
- void *svisnz = 0;
- CType *s;
- if (LJ_LIKELY(tref_isinteger(sp))) {
- sid = CTID_INT32;
- svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
- } else if (tref_isnum(sp)) {
- sid = CTID_DOUBLE;
- svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
- } else if (tref_isbool(sp)) {
- sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0);
- sid = CTID_BOOL;
- } else if (tref_isnil(sp)) {
- sp = lj_ir_kptr(J, NULL);
- } else if (tref_isudata(sp)) {
- GCudata *ud = udataV(sval);
- if (ud->udtype == UDTYPE_IO_FILE) {
- TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), sp, IRFL_UDATA_UDTYPE);
- emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE));
- sp = emitir(IRT(IR_FLOAD, IRT_PTR), sp, IRFL_UDATA_FILE);
- } else {
- sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCudata)));
- }
- } else if (tref_isstr(sp)) {
- if (ctype_isenum(d->info)) { /* Match string against enum constant. */
- GCstr *str = strV(sval);
- CTSize ofs;
- CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
- /* Specialize to the name of the enum constant. */
- emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str));
- if (cct && ctype_isconstval(cct->info)) {
- lua_assert(ctype_child(cts, cct)->size == 4);
- svisnz = (void *)(intptr_t)(ofs != 0);
- sp = lj_ir_kint(J, (int32_t)ofs);
- sid = ctype_cid(cct->info);
- } /* else: interpreter will throw. */
- } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
- lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */
- } else { /* Otherwise pass the string data as a const char[]. */
- /* Don't use STRREF. It folds with SNEW, which loses the trailing NUL. */
- sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr)));
- sid = CTID_A_CCHAR;
- }
- } else { /* NYI: tref_istab(sp), tref_islightud(sp). */
- IRType t;
- sid = argv2cdata(J, sp, sval)->ctypeid;
- s = ctype_raw(cts, sid);
- svisnz = cdataptr(cdataV(sval));
- t = crec_ct2irt(cts, s);
- if (ctype_isptr(s->info)) {
- sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR);
- if (ctype_isref(s->info)) {
- svisnz = *(void **)svisnz;
- s = ctype_rawchild(cts, s);
- if (ctype_isenum(s->info)) s = ctype_child(cts, s);
- t = crec_ct2irt(cts, s);
- } else {
- goto doconv;
- }
- } else if (t == IRT_I64 || t == IRT_U64) {
- sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64);
- lj_needsplit(J);
- goto doconv;
- } else if (t == IRT_INT || t == IRT_U32) {
- if (ctype_isenum(s->info)) s = ctype_child(cts, s);
- sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT);
- goto doconv;
- } else {
- sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata)));
- }
- if (ctype_isnum(s->info) && t != IRT_CDATA)
- sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Load number value. */
- goto doconv;
- }
- s = ctype_get(cts, sid);
-doconv:
- if (ctype_isenum(d->info)) d = ctype_child(cts, d);
- return crec_ct_ct(J, d, s, dp, sp, svisnz);
-}
-
-/* -- C data metamethods -------------------------------------------------- */
-
-/* This would be rather difficult in FOLD, so do it here:
-** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k)
-** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz)
-*/
-static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz)
-{
- IRIns *ir = IR(tref_ref(tr));
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) &&
- (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) {
- IRIns *irk = IR(ir->op2);
- ptrdiff_t k;
- if (LJ_64 && irk->o == IR_KINT64)
- k = (ptrdiff_t)ir_kint64(irk)->u64 * sz;
- else
- k = (ptrdiff_t)irk->i * sz;
- if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k;
- tr = ir->op1; /* Not a TRef, but the caller doesn't care. */
- }
- return tr;
-}
-
-/* Record ctype __index/__newindex metamethods. */
-static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
- RecordFFData *rd)
-{
- CTypeID id = ctype_typeid(cts, ct);
- cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index);
- if (!tv)
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- if (tvisfunc(tv)) {
- J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
- rd->nres = -1; /* Pending tailcall. */
- } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) {
- /* Specialize to result of __index lookup. */
- cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]);
- J->base[0] = lj_record_constify(J, o);
- if (!J->base[0])
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- /* Always specialize to the key. */
- emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
- } else {
- /* NYI: resolving of non-function metamethods. */
- /* NYI: non-string keys for __index table. */
- /* NYI: stores to __newindex table. */
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- }
-}
-
-void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd)
-{
- TRef idx, ptr = J->base[0];
- ptrdiff_t ofs = sizeof(GCcdata);
- GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]);
- CTState *cts = ctype_ctsG(J2G(J));
- CType *ct = ctype_raw(cts, cd->ctypeid);
- CTypeID sid = 0;
-
- /* Resolve pointer or reference for cdata object. */
- if (ctype_isptr(ct->info)) {
- IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
- if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
- ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR);
- ofs = 0;
- ptr = crec_reassoc_ofs(J, ptr, &ofs, 1);
- }
-
-again:
- idx = J->base[1];
- if (tref_isnumber(idx)) {
- idx = lj_opt_narrow_cindex(J, idx);
- if (ctype_ispointer(ct->info)) {
- CTSize sz;
- integer_key:
- if ((ct->info & CTF_COMPLEX))
- idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1));
- sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info)));
- idx = crec_reassoc_ofs(J, idx, &ofs, sz);
-#if LJ_TARGET_ARM || LJ_TARGET_PPC
- /* Hoist base add to allow fusion of index/shift into operands. */
- if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs
-#if LJ_TARGET_ARM
- && (sz == 1 || sz == 4)
-#endif
- ) {
- ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
- ofs = 0;
- }
-#endif
- idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz));
- ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr);
- }
- } else if (tref_iscdata(idx)) {
- GCcdata *cdk = cdataV(&rd->argv[1]);
- CType *ctk = ctype_raw(cts, cdk->ctypeid);
- IRType t = crec_ct2irt(cts, ctk);
- if (ctype_ispointer(ct->info) && t >= IRT_I8 && t <= IRT_U64) {
- if (ctk->size == 8) {
- idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64);
- } else if (ctk->size == 4) {
- idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT);
- } else {
- idx = emitir(IRT(IR_ADD, IRT_PTR), idx,
- lj_ir_kintp(J, sizeof(GCcdata)));
- idx = emitir(IRT(IR_XLOAD, t), idx, 0);
- }
- if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED))
- idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT);
- if (!LJ_64 && ctk->size > sizeof(intptr_t)) {
- idx = emitconv(idx, IRT_INTP, t, 0);
- lj_needsplit(J);
- }
- goto integer_key;
- }
- } else if (tref_isstr(idx)) {
- GCstr *name = strV(&rd->argv[1]);
- if (cd && cd->ctypeid == CTID_CTYPEID)
- ct = ctype_raw(cts, crec_constructor(J, cd, ptr));
- if (ctype_isstruct(ct->info)) {
- CTSize fofs;
- CType *fct;
- fct = lj_ctype_getfield(cts, ct, name, &fofs);
- if (fct) {
- /* Always specialize to the field name. */
- emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
- if (ctype_isconstval(fct->info)) {
- if (fct->size >= 0x80000000u &&
- (ctype_child(cts, fct)->info & CTF_UNSIGNED)) {
- J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size);
- return;
- }
- J->base[0] = lj_ir_kint(J, (int32_t)fct->size);
- return; /* Interpreter will throw for newindex. */
- } else if (ctype_isbitfield(fct->info)) {
- lj_trace_err(J, LJ_TRERR_NYICONV);
- } else {
- lua_assert(ctype_isfield(fct->info));
- sid = ctype_cid(fct->info);
- }
- ofs += (ptrdiff_t)fofs;
- }
- } else if (ctype_iscomplex(ct->info)) {
- if (name->len == 2 &&
- ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') ||
- (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) {
- /* Always specialize to the field name. */
- emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
- if (strdata(name)[0] == 'i') ofs += (ct->size >> 1);
- sid = ctype_cid(ct->info);
- }
- }
- }
- if (!sid) {
- if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
- CType *cct = ctype_rawchild(cts, ct);
- if (ctype_isstruct(cct->info)) {
- ct = cct;
- cd = NULL;
- if (tref_isstr(idx)) goto again;
- }
- }
- crec_index_meta(J, cts, ct, rd);
- return;
- }
-
- if (ofs)
- ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
-
- /* Resolve reference for field. */
- ct = ctype_get(cts, sid);
- if (ctype_isref(ct->info)) {
- ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0);
- sid = ctype_cid(ct->info);
- ct = ctype_get(cts, sid);
- }
-
- while (ctype_isattrib(ct->info))
- ct = ctype_child(cts, ct); /* Skip attributes. */
-
- if (rd->data == 0) { /* __index metamethod. */
- J->base[0] = crec_tv_ct(J, ct, sid, ptr);
- } else { /* __newindex metamethod. */
- rd->nres = 0;
- J->needsnap = 1;
- crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
- }
-}
-
-/* Record setting a finalizer. */
-static void crec_finalizer(jit_State *J, TRef trcd, cTValue *fin)
-{
- TRef trlo = lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd);
- TRef trhi = emitir(IRT(IR_ADD, IRT_P32), trlo, lj_ir_kint(J, 4));
- if (LJ_BE) { TRef tmp = trlo; trlo = trhi; trhi = tmp; }
- if (tvisfunc(fin)) {
- emitir(IRT(IR_XSTORE, IRT_P32), trlo, lj_ir_kfunc(J, funcV(fin)));
- emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TFUNC));
- } else if (tviscdata(fin)) {
- emitir(IRT(IR_XSTORE, IRT_P32), trlo,
- lj_ir_kgc(J, obj2gco(cdataV(fin)), IRT_CDATA));
- emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TCDATA));
- } else {
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- }
- J->needsnap = 1;
-}
-
-/* Record cdata allocation. */
-static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- CTSize sz;
- CTInfo info = lj_ctype_info(cts, id, &sz);
- CType *d = ctype_raw(cts, id);
- TRef trid;
- if (!sz || sz > 128 || (info & CTF_VLA) || ctype_align(info) > CT_MEMALIGN)
- lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: large/special allocations. */
- trid = lj_ir_kint(J, id);
- /* Use special instruction to box pointer or 32/64 bit integer. */
- if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) {
- TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) :
- ctype_isptr(info) ? lj_ir_kptr(J, NULL) :
- sz == 4 ? lj_ir_kint(J, 0) :
- (lj_needsplit(J), lj_ir_kint64(J, 0));
- J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp);
- } else {
- TRef trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, TREF_NIL);
- cTValue *fin;
- J->base[0] = trcd;
- if (J->base[1] && !J->base[2] &&
- !lj_cconv_multi_init(cts, d, &rd->argv[1])) {
- goto single_init;
- } else if (ctype_isarray(d->info)) {
- CType *dc = ctype_rawchild(cts, d); /* Array element type. */
- CTSize ofs, esize = dc->size;
- TRef sp = 0;
- TValue tv;
- TValue *sval = &tv;
- MSize i;
- tv.u64 = 0;
- if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)))
- lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init array of aggregates. */
- for (i = 1, ofs = 0; ofs < sz; ofs += esize) {
- TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
- lj_ir_kintp(J, ofs + sizeof(GCcdata)));
- if (J->base[i]) {
- sp = J->base[i];
- sval = &rd->argv[i];
- i++;
- } else if (i != 2) {
- sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL;
- }
- crec_ct_tv(J, dc, dp, sp, sval);
- }
- } else if (ctype_isstruct(d->info)) {
- CTypeID fid = d->sib;
- MSize i = 1;
- while (fid) {
- CType *df = ctype_get(cts, fid);
- fid = df->sib;
- if (ctype_isfield(df->info)) {
- CType *dc;
- TRef sp, dp;
- TValue tv;
- TValue *sval = &tv;
- setintV(&tv, 0);
- if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
- dc = ctype_rawchild(cts, df); /* Field type. */
- if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info) ||
- ctype_isenum(dc->info)))
- lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */
- if (J->base[i]) {
- sp = J->base[i];
- sval = &rd->argv[i];
- i++;
- } else {
- sp = ctype_isptr(dc->info) ? TREF_NIL : lj_ir_kint(J, 0);
- }
- dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
- lj_ir_kintp(J, df->size + sizeof(GCcdata)));
- crec_ct_tv(J, dc, dp, sp, sval);
- } else if (!ctype_isconstval(df->info)) {
- /* NYI: init bitfields and sub-structures. */
- lj_trace_err(J, LJ_TRERR_NYICONV);
- }
- }
- } else {
- TRef dp;
- single_init:
- dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata)));
- if (J->base[1]) {
- crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]);
- } else {
- TValue tv;
- tv.u64 = 0;
- crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv);
- }
- }
- /* Handle __gc metamethod. */
- fin = lj_ctype_meta(cts, id, MM_gc);
- if (fin)
- crec_finalizer(J, trcd, fin);
- }
-}
-
-/* Record argument conversions. */
-static TRef crec_call_args(jit_State *J, RecordFFData *rd,
- CTState *cts, CType *ct)
-{
- TRef args[CCI_NARGS_MAX];
- CTypeID fid;
- MSize i, n;
- TRef tr, *base;
- cTValue *o;
-#if LJ_TARGET_X86
-#if LJ_ABI_WIN
- TRef *arg0 = NULL, *arg1 = NULL;
-#endif
- int ngpr = 0;
- if (ctype_cconv(ct->info) == CTCC_THISCALL)
- ngpr = 1;
- else if (ctype_cconv(ct->info) == CTCC_FASTCALL)
- ngpr = 2;
-#endif
-
- /* Skip initial attributes. */
- fid = ct->sib;
- while (fid) {
- CType *ctf = ctype_get(cts, fid);
- if (!ctype_isattrib(ctf->info)) break;
- fid = ctf->sib;
- }
- args[0] = TREF_NIL;
- for (n = 0, base = J->base+1, o = rd->argv+1; *base; n++, base++, o++) {
- CTypeID did;
- CType *d;
-
- if (n >= CCI_NARGS_MAX)
- lj_trace_err(J, LJ_TRERR_NYICALL);
-
- if (fid) { /* Get argument type from field. */
- CType *ctf = ctype_get(cts, fid);
- fid = ctf->sib;
- lua_assert(ctype_isfield(ctf->info));
- did = ctype_cid(ctf->info);
- } else {
- if (!(ct->info & CTF_VARARG))
- lj_trace_err(J, LJ_TRERR_NYICALL); /* Too many arguments. */
- did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
- }
- d = ctype_raw(cts, did);
- if (!(ctype_isnum(d->info) || ctype_isptr(d->info) ||
- ctype_isenum(d->info)))
- lj_trace_err(J, LJ_TRERR_NYICALL);
- tr = crec_ct_tv(J, d, 0, *base, o);
- if (ctype_isinteger_or_bool(d->info)) {
- if (d->size < 4) {
- if ((d->info & CTF_UNSIGNED))
- tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_U8 : IRT_U16, 0);
- else
- tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT);
- }
- } else if (LJ_SOFTFP && ctype_isfp(d->info) && d->size > 4) {
- lj_needsplit(J);
- }
-#if LJ_TARGET_X86
- /* 64 bit args must not end up in registers for fastcall/thiscall. */
-#if LJ_ABI_WIN
- if (!ctype_isfp(d->info)) {
- /* Sigh, the Windows/x86 ABI allows reordering across 64 bit args. */
- if (tref_typerange(tr, IRT_I64, IRT_U64)) {
- if (ngpr) {
- arg0 = &args[n]; args[n++] = TREF_NIL; ngpr--;
- if (ngpr) {
- arg1 = &args[n]; args[n++] = TREF_NIL; ngpr--;
- }
- }
- } else {
- if (arg0) { *arg0 = tr; arg0 = NULL; n--; continue; }
- if (arg1) { *arg1 = tr; arg1 = NULL; n--; continue; }
- if (ngpr) ngpr--;
- }
- }
-#else
- if (!ctype_isfp(d->info) && ngpr) {
- if (tref_typerange(tr, IRT_I64, IRT_U64)) {
- /* No reordering for other x86 ABIs. Simply add alignment args. */
- do { args[n++] = TREF_NIL; } while (--ngpr);
- } else {
- ngpr--;
- }
- }
-#endif
-#endif
- args[n] = tr;
- }
- tr = args[0];
- for (i = 1; i < n; i++)
- tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]);
- return tr;
-}
-
-/* Create a snapshot for the caller, simulating a 'false' return value. */
-static void crec_snap_caller(jit_State *J)
-{
- lua_State *L = J->L;
- TValue *base = L->base, *top = L->top;
- const BCIns *pc = J->pc;
- TRef ftr = J->base[-1];
- ptrdiff_t delta;
- if (!frame_islua(base-1) || J->framedepth <= 0)
- lj_trace_err(J, LJ_TRERR_NYICALL);
- J->pc = frame_pc(base-1); delta = 1+bc_a(J->pc[-1]);
- L->top = base; L->base = base - delta;
- J->base[-1] = TREF_FALSE;
- J->base -= delta; J->baseslot -= (BCReg)delta;
- J->maxslot = (BCReg)delta; J->framedepth--;
- lj_snap_add(J);
- L->base = base; L->top = top;
- J->framedepth++; J->maxslot = 1;
- J->base += delta; J->baseslot += (BCReg)delta;
- J->base[-1] = ftr; J->pc = pc;
-}
-
-/* Record function call. */
-static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- CType *ct = ctype_raw(cts, cd->ctypeid);
- IRType tp = IRT_PTR;
- if (ctype_isptr(ct->info)) {
- tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
- ct = ctype_rawchild(cts, ct);
- }
- if (ctype_isfunc(ct->info)) {
- TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR);
- CType *ctr = ctype_rawchild(cts, ct);
- IRType t = crec_ct2irt(cts, ctr);
- TRef tr;
- TValue tv;
- /* Check for blacklisted C functions that might call a callback. */
- setlightudV(&tv,
- cdata_getptr(cdataptr(cd), (LJ_64 && tp == IRT_P64) ? 8 : 4));
- if (tvistrue(lj_tab_get(J->L, cts->miscmap, &tv)))
- lj_trace_err(J, LJ_TRERR_BLACKL);
- if (ctype_isvoid(ctr->info)) {
- t = IRT_NIL;
- rd->nres = 0;
- } else if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) ||
- ctype_isenum(ctr->info)) || t == IRT_CDATA) {
- lj_trace_err(J, LJ_TRERR_NYICALL);
- }
- if ((ct->info & CTF_VARARG)
-#if LJ_TARGET_X86
- || ctype_cconv(ct->info) != CTCC_CDECL
-#endif
- )
- func = emitir(IRT(IR_CARG, IRT_NIL), func,
- lj_ir_kint(J, ctype_typeid(cts, ct)));
- tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func);
- if (ctype_isbool(ctr->info)) {
- if (frame_islua(J->L->base-1) && bc_b(frame_pc(J->L->base-1)[-1]) == 1) {
- /* Don't check result if ignored. */
- tr = TREF_NIL;
- } else {
- crec_snap_caller(J);
-#if LJ_TARGET_X86ORX64
- /* Note: only the x86/x64 backend supports U8 and only for EQ(tr, 0). */
- lj_ir_set(J, IRTG(IR_NE, IRT_U8), tr, lj_ir_kint(J, 0));
-#else
- lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
-#endif
- J->postproc = LJ_POST_FIXGUARDSNAP;
- tr = TREF_TRUE;
- }
- } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) ||
- t == IRT_I64 || t == IRT_U64 || ctype_isenum(ctr->info)) {
- TRef trid = lj_ir_kint(J, ctype_cid(ct->info));
- tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr);
- if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
- } else if (t == IRT_FLOAT || t == IRT_U32) {
- tr = emitconv(tr, IRT_NUM, t, 0);
- } else if (t == IRT_I8 || t == IRT_I16) {
- tr = emitconv(tr, IRT_INT, t, IRCONV_SEXT);
- } else if (t == IRT_U8 || t == IRT_U16) {
- tr = emitconv(tr, IRT_INT, t, 0);
- }
- J->base[0] = tr;
- J->needsnap = 1;
- return 1;
- }
- return 0;
-}
-
-void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]);
- CTypeID id = cd->ctypeid;
- CType *ct;
- cTValue *tv;
- MMS mm = MM_call;
- if (id == CTID_CTYPEID) {
- id = crec_constructor(J, cd, J->base[0]);
- mm = MM_new;
- } else if (crec_call(J, rd, cd)) {
- return;
- }
- /* Record ctype __call/__new metamethod. */
- ct = ctype_raw(cts, id);
- tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm);
- if (tv) {
- if (tvisfunc(tv)) {
- J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
- rd->nres = -1; /* Pending tailcall. */
- return;
- }
- } else if (mm == MM_new) {
- crec_alloc(J, rd, id);
- return;
- }
- /* No metamethod or NYI: non-function metamethods. */
- lj_trace_err(J, LJ_TRERR_BADTYPE);
-}
-
-static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm)
-{
- if (ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) {
- IRType dt;
- CTypeID id;
- TRef tr;
- MSize i;
- IROp op;
- lj_needsplit(J);
- if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) ||
- ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) {
- dt = IRT_U64; id = CTID_UINT64;
- } else {
- dt = IRT_I64; id = CTID_INT64;
- if (mm < MM_add &&
- !((s[0]->info | s[1]->info) & CTF_FP) &&
- s[0]->size == 4 && s[1]->size == 4) { /* Try to narrow comparison. */
- if (!((s[0]->info ^ s[1]->info) & CTF_UNSIGNED) ||
- (tref_isk(sp[1]) && IR(tref_ref(sp[1]))->i >= 0)) {
- dt = (s[0]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT;
- goto comp;
- } else if (tref_isk(sp[0]) && IR(tref_ref(sp[0]))->i >= 0) {
- dt = (s[1]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT;
- goto comp;
- }
- }
- }
- for (i = 0; i < 2; i++) {
- IRType st = tref_type(sp[i]);
- if (st == IRT_NUM || st == IRT_FLOAT)
- sp[i] = emitconv(sp[i], dt, st, IRCONV_TRUNC|IRCONV_ANY);
- else if (!(st == IRT_I64 || st == IRT_U64))
- sp[i] = emitconv(sp[i], dt, IRT_INT,
- (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
- }
- if (mm < MM_add) {
- comp:
- /* Assume true comparison. Fixup and emit pending guard later. */
- if (mm == MM_eq) {
- op = IR_EQ;
- } else {
- op = mm == MM_lt ? IR_LT : IR_LE;
- if (dt == IRT_U32 || dt == IRT_U64)
- op += (IR_ULT-IR_LT);
- }
- lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]);
- J->postproc = LJ_POST_FIXGUARD;
- return TREF_TRUE;
- } else {
- tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]);
- }
- return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
- }
- return 0;
-}
-
-static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- CType *ctp = s[0];
- if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
- if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
- (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
- if (mm == MM_sub) { /* Pointer difference. */
- TRef tr;
- CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
- if (sz == 0 || (sz & (sz-1)) != 0)
- return 0; /* NYI: integer division. */
- tr = emitir(IRT(IR_SUB, IRT_INTP), sp[0], sp[1]);
- tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz)));
-#if LJ_64
- tr = emitconv(tr, IRT_NUM, IRT_INTP, 0);
-#endif
- return tr;
- } else { /* Pointer comparison (unsigned). */
- /* Assume true comparison. Fixup and emit pending guard later. */
- IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE;
- lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]);
- J->postproc = LJ_POST_FIXGUARD;
- return TREF_TRUE;
- }
- }
- if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info)))
- return 0;
- } else if (mm == MM_add && ctype_isnum(ctp->info) &&
- (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
- TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */
- ctp = s[1];
- } else {
- return 0;
- }
- {
- TRef tr = sp[1];
- IRType t = tref_type(tr);
- CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
- CTypeID id;
-#if LJ_64
- if (t == IRT_NUM || t == IRT_FLOAT)
- tr = emitconv(tr, IRT_INTP, t, IRCONV_TRUNC|IRCONV_ANY);
- else if (!(t == IRT_I64 || t == IRT_U64))
- tr = emitconv(tr, IRT_INTP, IRT_INT,
- ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT);
-#else
- if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) {
- tr = emitconv(tr, IRT_INTP, t,
- (t == IRT_NUM || t == IRT_FLOAT) ?
- IRCONV_TRUNC|IRCONV_ANY : 0);
- }
-#endif
- tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz));
- tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr);
- id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
- CTSIZE_PTR);
- return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
- }
-}
-
-/* Record ctype arithmetic metamethods. */
-static TRef crec_arith_meta(jit_State *J, TRef *sp, CType **s, CTState *cts,
- RecordFFData *rd)
-{
- cTValue *tv = NULL;
- if (J->base[0]) {
- if (tviscdata(&rd->argv[0])) {
- CTypeID id = argv2cdata(J, J->base[0], &rd->argv[0])->ctypeid;
- CType *ct = ctype_raw(cts, id);
- if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
- tv = lj_ctype_meta(cts, id, (MMS)rd->data);
- }
- if (!tv && J->base[1] && tviscdata(&rd->argv[1])) {
- CTypeID id = argv2cdata(J, J->base[1], &rd->argv[1])->ctypeid;
- CType *ct = ctype_raw(cts, id);
- if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
- tv = lj_ctype_meta(cts, id, (MMS)rd->data);
- }
- }
- if (tv) {
- if (tvisfunc(tv)) {
- J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
- rd->nres = -1; /* Pending tailcall. */
- return 0;
- } /* NYI: non-function metamethods. */
- } else if ((MMS)rd->data == MM_eq) { /* Fallback cdata pointer comparison. */
- if (sp[0] && sp[1] && ctype_isnum(s[0]->info) == ctype_isnum(s[1]->info)) {
- /* Assume true comparison. Fixup and emit pending guard later. */
- lj_ir_set(J, IRTG(IR_EQ, IRT_PTR), sp[0], sp[1]);
- J->postproc = LJ_POST_FIXGUARD;
- return TREF_TRUE;
- } else {
- return TREF_FALSE;
- }
- }
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- return 0;
-}
-
-void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- TRef sp[2];
- CType *s[2];
- MSize i;
- for (i = 0; i < 2; i++) {
- TRef tr = J->base[i];
- CType *ct = ctype_get(cts, CTID_DOUBLE);
- if (!tr) {
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- } else if (tref_iscdata(tr)) {
- CTypeID id = argv2cdata(J, tr, &rd->argv[i])->ctypeid;
- IRType t;
- ct = ctype_raw(cts, id);
- t = crec_ct2irt(cts, ct);
- if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */
- tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR);
- if (ctype_isref(ct->info)) {
- ct = ctype_rawchild(cts, ct);
- t = crec_ct2irt(cts, ct);
- }
- } else if (t == IRT_I64 || t == IRT_U64) {
- tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64);
- lj_needsplit(J);
- goto ok;
- } else if (t == IRT_INT || t == IRT_U32) {
- tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT);
- if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
- goto ok;
- } else if (ctype_isfunc(ct->info)) {
- tr = emitir(IRT(IR_FLOAD, IRT_PTR), tr, IRFL_CDATA_PTR);
- ct = ctype_get(cts,
- lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
- goto ok;
- } else {
- tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata)));
- }
- if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
- if (ctype_isnum(ct->info)) {
- if (t == IRT_CDATA) {
- tr = 0;
- } else {
- if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
- tr = emitir(IRT(IR_XLOAD, t), tr, 0);
- }
- }
- } else if (tref_isnil(tr)) {
- tr = lj_ir_kptr(J, NULL);
- ct = ctype_get(cts, CTID_P_VOID);
- } else if (tref_isinteger(tr)) {
- ct = ctype_get(cts, CTID_INT32);
- } else if (tref_isstr(tr)) {
- TRef tr2 = J->base[1-i];
- CTypeID id = argv2cdata(J, tr2, &rd->argv[1-i])->ctypeid;
- ct = ctype_raw(cts, id);
- if (ctype_isenum(ct->info)) { /* Match string against enum constant. */
- GCstr *str = strV(&rd->argv[i]);
- CTSize ofs;
- CType *cct = lj_ctype_getfield(cts, ct, str, &ofs);
- if (cct && ctype_isconstval(cct->info)) {
- /* Specialize to the name of the enum constant. */
- emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, str));
- ct = ctype_child(cts, cct);
- tr = lj_ir_kint(J, (int32_t)ofs);
- } else { /* Interpreter will throw or return false. */
- ct = ctype_get(cts, CTID_P_VOID);
- }
- } else if (ctype_isptr(ct->info)) {
- tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCstr)));
- } else {
- ct = ctype_get(cts, CTID_P_VOID);
- }
- } else if (!tref_isnum(tr)) {
- tr = 0;
- ct = ctype_get(cts, CTID_P_VOID);
- }
- ok:
- s[i] = ct;
- sp[i] = tr;
- }
- {
- TRef tr;
- if (!(tr = crec_arith_int64(J, sp, s, (MMS)rd->data)) &&
- !(tr = crec_arith_ptr(J, sp, s, (MMS)rd->data)) &&
- !(tr = crec_arith_meta(J, sp, s, cts, rd)))
- return;
- J->base[0] = tr;
- /* Fixup cdata comparisons, too. Avoids some cdata escapes. */
- if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1) &&
- !irt_isguard(J->guardemit)) {
- const BCIns *pc = frame_contpc(J->L->base-1) - 1;
- if (bc_op(*pc) <= BC_ISNEP) {
- setframe_pc(&J2G(J)->tmptv, pc);
- J2G(J)->tmptv.u32.lo = ((tref_istrue(tr) ^ bc_op(*pc)) & 1);
- J->postproc = LJ_POST_FIXCOMP;
- }
- }
- }
-}
-
-/* -- C library namespace metamethods ------------------------------------- */
-
-void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) &&
- udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) {
- CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0]));
- GCstr *name = strV(&rd->argv[1]);
- CType *ct;
- CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
- cTValue *tv = lj_tab_getstr(cl->cache, name);
- rd->nres = rd->data;
- if (id && tv && !tvisnil(tv)) {
- /* Specialize to the symbol name and make the result a constant. */
- emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name));
- if (ctype_isconstval(ct->info)) {
- if (ct->size >= 0x80000000u &&
- (ctype_child(cts, ct)->info & CTF_UNSIGNED))
- J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size);
- else
- J->base[0] = lj_ir_kint(J, (int32_t)ct->size);
- } else if (ctype_isextern(ct->info)) {
- CTypeID sid = ctype_cid(ct->info);
- void *sp = *(void **)cdataptr(cdataV(tv));
- TRef ptr;
- ct = ctype_raw(cts, sid);
- if (LJ_64 && !checkptr32(sp))
- ptr = lj_ir_kintp(J, (uintptr_t)sp);
- else
- ptr = lj_ir_kptr(J, sp);
- if (rd->data) {
- J->base[0] = crec_tv_ct(J, ct, sid, ptr);
- } else {
- J->needsnap = 1;
- crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
- }
- } else {
- J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA);
- }
- } else {
- lj_trace_err(J, LJ_TRERR_NOCACHE);
- }
- } /* else: interpreter will throw. */
-}
-
-/* -- FFI library functions ----------------------------------------------- */
-
-static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval)
-{
- return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval);
-}
-
-void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd)
-{
- crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0]));
-}
-
-void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd)
-{
- UNUSED(rd);
- if (J->base[0])
- lj_trace_err(J, LJ_TRERR_NYICALL);
- J->base[0] = lj_ir_call(J, IRCALL_lj_vm_errno);
-}
-
-void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- TRef tr = J->base[0];
- if (tr) {
- TRef trlen = J->base[1];
- if (!tref_isnil(trlen)) {
- trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
- tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]);
- } else {
- tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]);
- trlen = lj_ir_call(J, IRCALL_strlen, tr);
- }
- J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen);
- } /* else: interpreter will throw. */
-}
-
-void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2];
- if (trdst && trsrc && (trlen || tref_isstr(trsrc))) {
- trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
- trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]);
- if (trlen) {
- trlen = crec_toint(J, cts, trlen, &rd->argv[2]);
- } else {
- trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN);
- trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
- }
- rd->nres = 0;
- crec_copy(J, trdst, trsrc, trlen, NULL);
- } /* else: interpreter will throw. */
-}
-
-void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- TRef trdst = J->base[0], trlen = J->base[1], trfill = J->base[2];
- if (trdst && trlen) {
- CTSize step = 1;
- if (tviscdata(&rd->argv[0])) { /* Get alignment of original destination. */
- CTSize sz;
- CType *ct = ctype_raw(cts, cdataV(&rd->argv[0])->ctypeid);
- if (ctype_isptr(ct->info))
- ct = ctype_rawchild(cts, ct);
- step = (1u<<ctype_align(lj_ctype_info(cts, ctype_typeid(cts, ct), &sz)));
- }
- trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
- trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
- if (trfill)
- trfill = crec_toint(J, cts, trfill, &rd->argv[2]);
- else
- trfill = lj_ir_kint(J, 0);
- rd->nres = 0;
- crec_fill(J, trdst, trlen, trfill, step);
- } /* else: interpreter will throw. */
-}
-
-void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd)
-{
- if (tref_iscdata(J->base[0])) {
- TRef trid = lj_ir_kint(J, argv2ctype(J, J->base[0], &rd->argv[0]));
- J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA),
- lj_ir_kint(J, CTID_CTYPEID), trid);
- } else {
- setfuncV(J->L, &J->errinfo, J->fn);
- lj_trace_err_info(J, LJ_TRERR_NYIFFU);
- }
-}
-
-void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd)
-{
- argv2ctype(J, J->base[0], &rd->argv[0]);
- if (tref_iscdata(J->base[1])) {
- argv2ctype(J, J->base[1], &rd->argv[1]);
- J->postproc = LJ_POST_FIXBOOL;
- J->base[0] = TREF_TRUE;
- } else {
- J->base[0] = TREF_FALSE;
- }
-}
-
-void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd)
-{
- if (tref_isstr(J->base[0])) {
- /* Specialize to the ABI string to make the boolean result a constant. */
- emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0])));
- J->postproc = LJ_POST_FIXBOOL;
- J->base[0] = TREF_TRUE;
- } else {
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- }
-}
-
-/* Record ffi.sizeof(), ffi.alignof(), ffi.offsetof(). */
-void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd)
-{
- CTypeID id = argv2ctype(J, J->base[0], &rd->argv[0]);
- if (rd->data == FF_ffi_sizeof) {
- CType *ct = lj_ctype_rawref(ctype_ctsG(J2G(J)), id);
- if (ctype_isvltype(ct->info))
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- } else if (rd->data == FF_ffi_offsetof) { /* Specialize to the field name. */
- if (!tref_isstr(J->base[1]))
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
- rd->nres = 3; /* Just in case. */
- }
- J->postproc = LJ_POST_FIXCONST;
- J->base[0] = J->base[1] = J->base[2] = TREF_NIL;
-}
-
-void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd)
-{
- argv2cdata(J, J->base[0], &rd->argv[0]);
- crec_finalizer(J, J->base[0], &rd->argv[1]);
-}
-
-/* -- Miscellaneous library functions ------------------------------------- */
-
-void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd)
-{
- CTState *cts = ctype_ctsG(J2G(J));
- CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->ctypeid);
- if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
- if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
- if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 &&
- !(ct->size == 4 && (ct->info & CTF_UNSIGNED)))
- d = ctype_get(cts, CTID_INT32);
- else
- d = ctype_get(cts, CTID_DOUBLE);
- J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]);
- } else {
- J->base[0] = TREF_NIL;
- }
-}
-
-#undef IR
-#undef emitir
-#undef emitconv
-
-#endif
+/*
+** Trace recorder for C data operations.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ffrecord_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT && LJ_HASFFI
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cparse.h"
+#include "lj_cconv.h"
+#include "lj_clib.h"
+#include "lj_ccall.h"
+#include "lj_ff.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_snap.h"
+#include "lj_crecord.h"
+#include "lj_dispatch.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+#define emitconv(a, dt, st, flags) \
+ emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags))
+
+/* -- C type checks ------------------------------------------------------- */
+
+static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o)
+{
+ GCcdata *cd;
+ TRef trtypeid;
+ if (!tref_iscdata(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ cd = cdataV(o);
+ /* Specialize to the CTypeID. */
+ trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_CTYPEID);
+ emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->ctypeid));
+ return cd;
+}
+
+/* Specialize to the CTypeID held by a cdata constructor. */
+static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr)
+{
+ CTypeID id;
+ lua_assert(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID);
+ id = *(CTypeID *)cdataptr(cd);
+ tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT);
+ emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id));
+ return id;
+}
+
+static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o)
+{
+ if (tref_isstr(tr)) {
+ GCstr *s = strV(o);
+ CPState cp;
+ CTypeID oldtop;
+ /* Specialize to the string containing the C type declaration. */
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s));
+ cp.L = J->L;
+ cp.cts = ctype_ctsG(J2G(J));
+ oldtop = cp.cts->top;
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.param = NULL;
+ cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
+ if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return cp.val.id;
+ } else {
+ GCcdata *cd = argv2cdata(J, tr, o);
+ return cd->ctypeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) :
+ cd->ctypeid;
+ }
+}
+
+/* Convert CType to IRType (if possible). */
+static IRType crec_ct2irt(CTState *cts, CType *ct)
+{
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (LJ_LIKELY(ctype_isnum(ct->info))) {
+ if ((ct->info & CTF_FP)) {
+ if (ct->size == sizeof(double))
+ return IRT_NUM;
+ else if (ct->size == sizeof(float))
+ return IRT_FLOAT;
+ } else {
+ uint32_t b = lj_fls(ct->size);
+ if (b <= 3)
+ return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0);
+ }
+ } else if (ctype_isptr(ct->info)) {
+ return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ } else if (ctype_iscomplex(ct->info)) {
+ if (ct->size == 2*sizeof(double))
+ return IRT_NUM;
+ else if (ct->size == 2*sizeof(float))
+ return IRT_FLOAT;
+ }
+ return IRT_CDATA;
+}
+
+/* -- Optimized memory fill and copy -------------------------------------- */
+
+/* Maximum length and unroll of inlined copy/fill. */
+#define CREC_COPY_MAXUNROLL 16
+#define CREC_COPY_MAXLEN 128
+
+#define CREC_FILL_MAXUNROLL 16
+
+/* Number of windowed registers used for optimized memory copy. */
+#if LJ_TARGET_X86
+#define CREC_COPY_REGWIN 2
+#elif LJ_TARGET_PPC || LJ_TARGET_MIPS
+#define CREC_COPY_REGWIN 8
+#else
+#define CREC_COPY_REGWIN 4
+#endif
+
+/* List of memory offsets for copy/fill. */
+typedef struct CRecMemList {
+ CTSize ofs; /* Offset in bytes. */
+ IRType tp; /* Type of load/store. */
+ TRef trofs; /* TRef of interned offset. */
+ TRef trval; /* TRef of load value. */
+} CRecMemList;
+
+/* Generate copy list for element-wise struct copy. */
+static MSize crec_copy_struct(CRecMemList *ml, CTState *cts, CType *ct)
+{
+ CTypeID fid = ct->sib;
+ MSize mlp = 0;
+ while (fid) {
+ CType *df = ctype_get(cts, fid);
+ fid = df->sib;
+ if (ctype_isfield(df->info)) {
+ CType *cct;
+ IRType tp;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ cct = ctype_rawchild(cts, df); /* Field type. */
+ tp = crec_ct2irt(cts, cct);
+ if (tp == IRT_CDATA) return 0; /* NYI: aggregates. */
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = df->size;
+ ml[mlp].tp = tp;
+ mlp++;
+ if (ctype_iscomplex(cct->info)) {
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = df->size + (cct->size >> 1);
+ ml[mlp].tp = tp;
+ mlp++;
+ }
+ } else if (!ctype_isconstval(df->info)) {
+ /* NYI: bitfields and sub-structures. */
+ return 0;
+ }
+ }
+ return mlp;
+}
+
+/* Generate unrolled copy list, from highest to lowest step size/alignment. */
+static MSize crec_copy_unroll(CRecMemList *ml, CTSize len, CTSize step,
+ IRType tp)
+{
+ CTSize ofs = 0;
+ MSize mlp = 0;
+ if (tp == IRT_CDATA) tp = IRT_U8 + 2*lj_fls(step);
+ do {
+ while (ofs + step <= len) {
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = ofs;
+ ml[mlp].tp = tp;
+ mlp++;
+ ofs += step;
+ }
+ step >>= 1;
+ tp -= 2;
+ } while (ofs < len);
+ return mlp;
+}
+
+/*
+** Emit copy list with windowed loads/stores.
+** LJ_TARGET_UNALIGNED: may emit unaligned loads/stores (not marked as such).
+*/
+static void crec_copy_emit(jit_State *J, CRecMemList *ml, MSize mlp,
+ TRef trdst, TRef trsrc)
+{
+ MSize i, j, rwin = 0;
+ for (i = 0, j = 0; i < mlp; ) {
+ TRef trofs = lj_ir_kintp(J, ml[i].ofs);
+ TRef trsptr = emitir(IRT(IR_ADD, IRT_PTR), trsrc, trofs);
+ ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0);
+ ml[i].trofs = trofs;
+ i++;
+ rwin += (LJ_SOFTFP && ml[i].tp == IRT_NUM) ? 2 : 1;
+ if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */
+ rwin = 0;
+ for ( ; j < i; j++) {
+ TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, ml[j].trofs);
+ emitir(IRT(IR_XSTORE, ml[j].tp), trdptr, ml[j].trval);
+ }
+ }
+ }
+}
+
+/* Optimized memory copy. */
+static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen,
+ CType *ct)
+{
+ if (tref_isk(trlen)) { /* Length must be constant. */
+ CRecMemList ml[CREC_COPY_MAXUNROLL];
+ MSize mlp = 0;
+ CTSize step = 1, len = (CTSize)IR(tref_ref(trlen))->i;
+ IRType tp = IRT_CDATA;
+ int needxbar = 0;
+ if (len == 0) return; /* Shortcut. */
+ if (len > CREC_COPY_MAXLEN) goto fallback;
+ if (ct) {
+ CTState *cts = ctype_ctsG(J2G(J));
+ lua_assert(ctype_isarray(ct->info) || ctype_isstruct(ct->info));
+ if (ctype_isarray(ct->info)) {
+ CType *cct = ctype_rawchild(cts, ct);
+ tp = crec_ct2irt(cts, cct);
+ if (tp == IRT_CDATA) goto rawcopy;
+ step = lj_ir_type_size[tp];
+ lua_assert((len & (step-1)) == 0);
+ } else if ((ct->info & CTF_UNION)) {
+ step = (1u << ctype_align(ct->info));
+ goto rawcopy;
+ } else {
+ mlp = crec_copy_struct(ml, cts, ct);
+ goto emitcopy;
+ }
+ } else {
+ rawcopy:
+ needxbar = 1;
+ if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR)
+ step = CTSIZE_PTR;
+ }
+ mlp = crec_copy_unroll(ml, len, step, tp);
+ emitcopy:
+ if (mlp) {
+ crec_copy_emit(J, ml, mlp, trdst, trsrc);
+ if (needxbar)
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+ return;
+ }
+ }
+fallback:
+ /* Call memcpy. Always needs a barrier to disable alias analysis. */
+ lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen);
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+}
+
+/* Generate unrolled fill list, from highest to lowest step size/alignment. */
+static MSize crec_fill_unroll(CRecMemList *ml, CTSize len, CTSize step)
+{
+ CTSize ofs = 0;
+ MSize mlp = 0;
+ IRType tp = IRT_U8 + 2*lj_fls(step);
+ do {
+ while (ofs + step <= len) {
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = ofs;
+ ml[mlp].tp = tp;
+ mlp++;
+ ofs += step;
+ }
+ step >>= 1;
+ tp -= 2;
+ } while (ofs < len);
+ return mlp;
+}
+
+/*
+** Emit stores for fill list.
+** LJ_TARGET_UNALIGNED: may emit unaligned stores (not marked as such).
+*/
+static void crec_fill_emit(jit_State *J, CRecMemList *ml, MSize mlp,
+ TRef trdst, TRef trfill)
+{
+ MSize i;
+ for (i = 0; i < mlp; i++) {
+ TRef trofs = lj_ir_kintp(J, ml[i].ofs);
+ TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, trofs);
+ emitir(IRT(IR_XSTORE, ml[i].tp), trdptr, trfill);
+ }
+}
+
+/* Optimized memory fill. */
+static void crec_fill(jit_State *J, TRef trdst, TRef trlen, TRef trfill,
+ CTSize step)
+{
+ if (tref_isk(trlen)) { /* Length must be constant. */
+ CRecMemList ml[CREC_FILL_MAXUNROLL];
+ MSize mlp;
+ CTSize len = (CTSize)IR(tref_ref(trlen))->i;
+ if (len == 0) return; /* Shortcut. */
+ if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR)
+ step = CTSIZE_PTR;
+ if (step * CREC_FILL_MAXUNROLL < len) goto fallback;
+ mlp = crec_fill_unroll(ml, len, step);
+ if (!mlp) goto fallback;
+ if (tref_isk(trfill) || ml[0].tp != IRT_U8)
+ trfill = emitconv(trfill, IRT_INT, IRT_U8, 0);
+ if (ml[0].tp != IRT_U8) { /* Scatter U8 to U16/U32/U64. */
+ if (CTSIZE_PTR == 8 && ml[0].tp == IRT_U64) {
+ if (tref_isk(trfill)) /* Pointless on x64 with zero-extended regs. */
+ trfill = emitconv(trfill, IRT_U64, IRT_U32, 0);
+ trfill = emitir(IRT(IR_MUL, IRT_U64), trfill,
+ lj_ir_kint64(J, U64x(01010101,01010101)));
+ } else {
+ trfill = emitir(IRTI(IR_MUL), trfill,
+ lj_ir_kint(J, ml[0].tp == IRT_U16 ? 0x0101 : 0x01010101));
+ }
+ }
+ crec_fill_emit(J, ml, mlp, trdst, trfill);
+ } else {
+fallback:
+ /* Call memset. Always needs a barrier to disable alias analysis. */
+ lj_ir_call(J, IRCALL_memset, trdst, trfill, trlen); /* Note: arg order! */
+ }
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+}
+
+/* -- Convert C type to C type -------------------------------------------- */
+
+/*
+** This code mirrors the code in lj_cconv.c. It performs the same steps
+** for the trace recorder that lj_cconv.c does for the interpreter.
+**
+** One major difference is that we can get away with much fewer checks
+** here. E.g. checks for casts, constness or correct types can often be
+** omitted, even if they might fail. The interpreter subsequently throws
+** an error, which aborts the trace.
+**
+** All operations are specialized to their C types, so the on-trace
+** outcome must be the same as the outcome in the interpreter. If the
+** interpreter doesn't throw an error, then the trace is correct, too.
+** Care must be taken not to generate invalid (temporary) IR or to
+** trigger asserts.
+*/
+
+/* Determine whether a passed number or cdata number is non-zero. */
+static int crec_isnonzero(CType *s, void *p)
+{
+ if (p == (void *)0)
+ return 0;
+ if (p == (void *)1)
+ return 1;
+ if ((s->info & CTF_FP)) {
+ if (s->size == sizeof(float))
+ return (*(float *)p != 0);
+ else
+ return (*(double *)p != 0);
+ } else {
+ if (s->size == 1)
+ return (*(uint8_t *)p != 0);
+ else if (s->size == 2)
+ return (*(uint16_t *)p != 0);
+ else if (s->size == 4)
+ return (*(uint32_t *)p != 0);
+ else
+ return (*(uint64_t *)p != 0);
+ }
+}
+
+static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp,
+ void *svisnz)
+{
+ IRType dt = crec_ct2irt(ctype_ctsG(J2G(J)), d);
+ IRType st = crec_ct2irt(ctype_ctsG(J2G(J)), s);
+ CTSize dsize = d->size, ssize = s->size;
+ CTInfo dinfo = d->info, sinfo = s->info;
+
+ if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
+ goto err_conv;
+
+ /*
+ ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and
+ ** numbers up to 8 bytes. Otherwise sp holds a pointer.
+ */
+
+ switch (cconv_idx2(dinfo, sinfo)) {
+ /* Destination is a bool. */
+ case CCX(B, B):
+ goto xstore; /* Source operand is already normalized. */
+ case CCX(B, I):
+ case CCX(B, F):
+ if (st != IRT_CDATA) {
+ /* Specialize to the result of a comparison against 0. */
+ TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) :
+ (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) :
+ lj_ir_kint(J, 0);
+ int isnz = crec_isnonzero(s, svisnz);
+ emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero);
+ sp = lj_ir_kint(J, isnz);
+ goto xstore;
+ }
+ goto err_nyi;
+
+ /* Destination is an integer. */
+ case CCX(I, B):
+ case CCX(I, I):
+ conv_I_I:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ /* Extend 32 to 64 bit integer. */
+ if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED)))
+ sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st,
+ (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
+ else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0);
+ else if (st == IRT_INT)
+ sp = lj_opt_narrow_toint(J, sp);
+ xstore:
+ if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J);
+ if (dp == 0) return sp;
+ emitir(IRT(IR_XSTORE, dt), dp, sp);
+ break;
+ case CCX(I, C):
+ sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
+ /* fallthrough */
+ case CCX(I, F):
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_TRUNC|IRCONV_ANY);
+ goto xstore;
+ case CCX(I, P):
+ case CCX(I, A):
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ ssize = CTSIZE_PTR;
+ st = IRT_UINTP;
+ if (((dsize ^ ssize) & 8) == 0) { /* Must insert no-op type conversion. */
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, IRT_PTR, 0);
+ goto xstore;
+ }
+ goto conv_I_I;
+
+ /* Destination is a floating-point number. */
+ case CCX(F, B):
+ case CCX(F, I):
+ conv_F_I:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0);
+ goto xstore;
+ case CCX(F, C):
+ sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
+ /* fallthrough */
+ case CCX(F, F):
+ conv_F_F:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ if (dt != st) sp = emitconv(sp, dt, st, 0);
+ goto xstore;
+
+ /* Destination is a complex number. */
+ case CCX(C, I):
+ case CCX(C, F):
+ { /* Clear im. */
+ TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
+ emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0));
+ }
+ /* Convert to re. */
+ if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I;
+
+ case CCX(C, C):
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ {
+ TRef re, im, ptr;
+ re = emitir(IRT(IR_XLOAD, st), sp, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1)));
+ im = emitir(IRT(IR_XLOAD, st), ptr, 0);
+ if (dt != st) {
+ re = emitconv(re, dt, st, 0);
+ im = emitconv(im, dt, st, 0);
+ }
+ emitir(IRT(IR_XSTORE, dt), dp, re);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
+ emitir(IRT(IR_XSTORE, dt), ptr, im);
+ }
+ break;
+
+ /* Destination is a vector. */
+ case CCX(V, I):
+ case CCX(V, F):
+ case CCX(V, C):
+ case CCX(V, V):
+ goto err_nyi;
+
+ /* Destination is a pointer. */
+ case CCX(P, P):
+ case CCX(P, A):
+ case CCX(P, S):
+ /* There are only 32 bit pointers/addresses on 32 bit machines.
+ ** Also ok on x64, since all 32 bit ops clear the upper part of the reg.
+ */
+ goto xstore;
+ case CCX(P, I):
+ if (st == IRT_CDATA) goto err_nyi;
+ if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */
+ sp = emitconv(sp, IRT_U32, st, 0);
+ goto xstore;
+ case CCX(P, F):
+ if (st == IRT_CDATA) goto err_nyi;
+ /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
+ sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32,
+ st, IRCONV_TRUNC|IRCONV_ANY);
+ goto xstore;
+
+ /* Destination is an array. */
+ case CCX(A, A):
+ /* Destination is a struct/union. */
+ case CCX(S, S):
+ if (dp == 0) goto err_conv;
+ crec_copy(J, dp, sp, lj_ir_kint(J, dsize), d);
+ break;
+
+ default:
+ err_conv:
+ err_nyi:
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ break;
+ }
+ return 0;
+}
+
+/* -- Convert C type to TValue (load) ------------------------------------- */
+
+static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ IRType t = crec_ct2irt(cts, s);
+ CTInfo sinfo = s->info;
+ if (ctype_isnum(sinfo)) {
+ TRef tr;
+ if (t == IRT_CDATA)
+ goto err_nyi; /* NYI: copyval of >64 bit integers. */
+ tr = emitir(IRT(IR_XLOAD, t), sp, 0);
+ if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */
+ return emitconv(tr, IRT_NUM, t, 0);
+ } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */
+ sp = tr;
+ lj_needsplit(J);
+ } else if ((sinfo & CTF_BOOL)) {
+ /* Assume not equal to zero. Fixup and emit pending guard later. */
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ return tr;
+ }
+ } else if (ctype_isptr(sinfo) || ctype_isenum(sinfo)) {
+ sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Box pointers and enums. */
+ } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
+ cts->L = J->L;
+ sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */
+ } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */
+ ptrdiff_t esz = (ptrdiff_t)(s->size >> 1);
+ TRef ptr, tr1, tr2, dp;
+ dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL);
+ tr1 = emitir(IRT(IR_XLOAD, t), sp, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz));
+ tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)));
+ emitir(IRT(IR_XSTORE, t), ptr, tr1);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz));
+ emitir(IRT(IR_XSTORE, t), ptr, tr2);
+ return dp;
+ } else {
+ /* NYI: copyval of vectors. */
+ err_nyi:
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ }
+ /* Box pointer, ref, enum or 64 bit integer. */
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp);
+}
+
+/* -- Convert TValue to C type (store) ------------------------------------ */
+
+static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID sid = CTID_P_VOID;
+ void *svisnz = 0;
+ CType *s;
+ if (LJ_LIKELY(tref_isinteger(sp))) {
+ sid = CTID_INT32;
+ svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
+ } else if (tref_isnum(sp)) {
+ sid = CTID_DOUBLE;
+ svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
+ } else if (tref_isbool(sp)) {
+ sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0);
+ sid = CTID_BOOL;
+ } else if (tref_isnil(sp)) {
+ sp = lj_ir_kptr(J, NULL);
+ } else if (tref_isudata(sp)) {
+ GCudata *ud = udataV(sval);
+ if (ud->udtype == UDTYPE_IO_FILE) {
+ TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), sp, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE));
+ sp = emitir(IRT(IR_FLOAD, IRT_PTR), sp, IRFL_UDATA_FILE);
+ } else {
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCudata)));
+ }
+ } else if (tref_isstr(sp)) {
+ if (ctype_isenum(d->info)) { /* Match string against enum constant. */
+ GCstr *str = strV(sval);
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
+ /* Specialize to the name of the enum constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str));
+ if (cct && ctype_isconstval(cct->info)) {
+ lua_assert(ctype_child(cts, cct)->size == 4);
+ svisnz = (void *)(intptr_t)(ofs != 0);
+ sp = lj_ir_kint(J, (int32_t)ofs);
+ sid = ctype_cid(cct->info);
+ } /* else: interpreter will throw. */
+ } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */
+ } else { /* Otherwise pass the string data as a const char[]. */
+ /* Don't use STRREF. It folds with SNEW, which loses the trailing NUL. */
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr)));
+ sid = CTID_A_CCHAR;
+ }
+ } else { /* NYI: tref_istab(sp), tref_islightud(sp). */
+ IRType t;
+ sid = argv2cdata(J, sp, sval)->ctypeid;
+ s = ctype_raw(cts, sid);
+ svisnz = cdataptr(cdataV(sval));
+ t = crec_ct2irt(cts, s);
+ if (ctype_isptr(s->info)) {
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR);
+ if (ctype_isref(s->info)) {
+ svisnz = *(void **)svisnz;
+ s = ctype_rawchild(cts, s);
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ t = crec_ct2irt(cts, s);
+ } else {
+ goto doconv;
+ }
+ } else if (t == IRT_I64 || t == IRT_U64) {
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64);
+ lj_needsplit(J);
+ goto doconv;
+ } else if (t == IRT_INT || t == IRT_U32) {
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT);
+ goto doconv;
+ } else {
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata)));
+ }
+ if (ctype_isnum(s->info) && t != IRT_CDATA)
+ sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Load number value. */
+ goto doconv;
+ }
+ s = ctype_get(cts, sid);
+doconv:
+ if (ctype_isenum(d->info)) d = ctype_child(cts, d);
+ return crec_ct_ct(J, d, s, dp, sp, svisnz);
+}
+
+/* -- C data metamethods -------------------------------------------------- */
+
+/* This would be rather difficult in FOLD, so do it here:
+** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k)
+** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz)
+*/
+static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz)
+{
+ IRIns *ir = IR(tref_ref(tr));
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) &&
+ (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) {
+ IRIns *irk = IR(ir->op2);
+ ptrdiff_t k;
+ if (LJ_64 && irk->o == IR_KINT64)
+ k = (ptrdiff_t)ir_kint64(irk)->u64 * sz;
+ else
+ k = (ptrdiff_t)irk->i * sz;
+ if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k;
+ tr = ir->op1; /* Not a TRef, but the caller doesn't care. */
+ }
+ return tr;
+}
+
+/* Record ctype __index/__newindex metamethods. */
+static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
+ RecordFFData *rd)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index);
+ if (!tv)
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ if (tvisfunc(tv)) {
+ J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
+ rd->nres = -1; /* Pending tailcall. */
+ } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) {
+ /* Specialize to result of __index lookup. */
+ cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]);
+ J->base[0] = lj_record_constify(J, o);
+ if (!J->base[0])
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /* Always specialize to the key. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
+ } else {
+ /* NYI: resolving of non-function metamethods. */
+ /* NYI: non-string keys for __index table. */
+ /* NYI: stores to __newindex table. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd)
+{
+ TRef idx, ptr = J->base[0];
+ ptrdiff_t ofs = sizeof(GCcdata);
+ GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]);
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ CTypeID sid = 0;
+
+ /* Resolve pointer or reference for cdata object. */
+ if (ctype_isptr(ct->info)) {
+ IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR);
+ ofs = 0;
+ ptr = crec_reassoc_ofs(J, ptr, &ofs, 1);
+ }
+
+again:
+ idx = J->base[1];
+ if (tref_isnumber(idx)) {
+ idx = lj_opt_narrow_cindex(J, idx);
+ if (ctype_ispointer(ct->info)) {
+ CTSize sz;
+ integer_key:
+ if ((ct->info & CTF_COMPLEX))
+ idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1));
+ sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info)));
+ idx = crec_reassoc_ofs(J, idx, &ofs, sz);
+#if LJ_TARGET_ARM || LJ_TARGET_PPC
+ /* Hoist base add to allow fusion of index/shift into operands. */
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs
+#if LJ_TARGET_ARM
+ && (sz == 1 || sz == 4)
+#endif
+ ) {
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+ ofs = 0;
+ }
+#endif
+ idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz));
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr);
+ }
+ } else if (tref_iscdata(idx)) {
+ GCcdata *cdk = cdataV(&rd->argv[1]);
+ CType *ctk = ctype_raw(cts, cdk->ctypeid);
+ IRType t = crec_ct2irt(cts, ctk);
+ if (ctype_ispointer(ct->info) && t >= IRT_I8 && t <= IRT_U64) {
+ if (ctk->size == 8) {
+ idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64);
+ } else if (ctk->size == 4) {
+ idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT);
+ } else {
+ idx = emitir(IRT(IR_ADD, IRT_PTR), idx,
+ lj_ir_kintp(J, sizeof(GCcdata)));
+ idx = emitir(IRT(IR_XLOAD, t), idx, 0);
+ }
+ if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED))
+ idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT);
+ if (!LJ_64 && ctk->size > sizeof(intptr_t)) {
+ idx = emitconv(idx, IRT_INTP, t, 0);
+ lj_needsplit(J);
+ }
+ goto integer_key;
+ }
+ } else if (tref_isstr(idx)) {
+ GCstr *name = strV(&rd->argv[1]);
+ if (cd->ctypeid == CTID_CTYPEID)
+ ct = ctype_raw(cts, crec_constructor(J, cd, ptr));
+ if (ctype_isstruct(ct->info)) {
+ CTSize fofs;
+ CType *fct;
+ fct = lj_ctype_getfield(cts, ct, name, &fofs);
+ if (fct) {
+ /* Always specialize to the field name. */
+ emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
+ if (ctype_isconstval(fct->info)) {
+ if (fct->size >= 0x80000000u &&
+ (ctype_child(cts, fct)->info & CTF_UNSIGNED)) {
+ J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size);
+ return;
+ }
+ J->base[0] = lj_ir_kint(J, (int32_t)fct->size);
+ return; /* Interpreter will throw for newindex. */
+ } else if (ctype_isbitfield(fct->info)) {
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ } else {
+ lua_assert(ctype_isfield(fct->info));
+ sid = ctype_cid(fct->info);
+ }
+ ofs += (ptrdiff_t)fofs;
+ }
+ } else if (ctype_iscomplex(ct->info)) {
+ if (name->len == 2 &&
+ ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') ||
+ (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) {
+ /* Always specialize to the field name. */
+ emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
+ if (strdata(name)[0] == 'i') ofs += (ct->size >> 1);
+ sid = ctype_cid(ct->info);
+ }
+ }
+ }
+ if (!sid) {
+ if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
+ CType *cct = ctype_rawchild(cts, ct);
+ if (ctype_isstruct(cct->info)) {
+ ct = cct;
+ if (tref_isstr(idx)) goto again;
+ }
+ }
+ crec_index_meta(J, cts, ct, rd);
+ return;
+ }
+
+ if (ofs)
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+
+ /* Resolve reference for field. */
+ ct = ctype_get(cts, sid);
+ if (ctype_isref(ct->info))
+ ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0);
+
+ while (ctype_isattrib(ct->info))
+ ct = ctype_child(cts, ct); /* Skip attributes. */
+
+ if (rd->data == 0) { /* __index metamethod. */
+ J->base[0] = crec_tv_ct(J, ct, sid, ptr);
+ } else { /* __newindex metamethod. */
+ rd->nres = 0;
+ J->needsnap = 1;
+ crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
+ }
+}
+
+/* Record setting a finalizer. */
+static void crec_finalizer(jit_State *J, TRef trcd, cTValue *fin)
+{
+ TRef trlo = lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd);
+ TRef trhi = emitir(IRT(IR_ADD, IRT_P32), trlo, lj_ir_kint(J, 4));
+ if (LJ_BE) { TRef tmp = trlo; trlo = trhi; trhi = tmp; }
+ if (tvisfunc(fin)) {
+ emitir(IRT(IR_XSTORE, IRT_P32), trlo, lj_ir_kfunc(J, funcV(fin)));
+ emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TFUNC));
+ } else if (tviscdata(fin)) {
+ emitir(IRT(IR_XSTORE, IRT_P32), trlo,
+ lj_ir_kgc(J, obj2gco(cdataV(fin)), IRT_CDATA));
+ emitir(IRTI(IR_XSTORE), trhi, lj_ir_kint(J, LJ_TCDATA));
+ } else {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ J->needsnap = 1;
+}
+
+/* Record cdata allocation. */
+static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ CType *d = ctype_raw(cts, id);
+ TRef trid;
+ if (!sz || sz > 128 || (info & CTF_VLA) || ctype_align(info) > CT_MEMALIGN)
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: large/special allocations. */
+ trid = lj_ir_kint(J, id);
+ /* Use special instruction to box pointer or 32/64 bit integer. */
+ if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) {
+ TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) :
+ ctype_isptr(info) ? lj_ir_kptr(J, NULL) :
+ sz == 4 ? lj_ir_kint(J, 0) :
+ (lj_needsplit(J), lj_ir_kint64(J, 0));
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp);
+ } else {
+ TRef trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, TREF_NIL);
+ cTValue *fin;
+ J->base[0] = trcd;
+ if (J->base[1] && !J->base[2] &&
+ !lj_cconv_multi_init(cts, d, &rd->argv[1])) {
+ goto single_init;
+ } else if (ctype_isarray(d->info)) {
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize ofs, esize = dc->size;
+ TRef sp = 0;
+ TValue tv;
+ TValue *sval = &tv;
+ MSize i;
+ tv.u64 = 0;
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)))
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init array of aggregates. */
+ for (i = 1, ofs = 0; ofs < sz; ofs += esize) {
+ TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
+ lj_ir_kintp(J, ofs + sizeof(GCcdata)));
+ if (J->base[i]) {
+ sp = J->base[i];
+ sval = &rd->argv[i];
+ i++;
+ } else if (i != 2) {
+ sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL;
+ }
+ crec_ct_tv(J, dc, dp, sp, sval);
+ }
+ } else if (ctype_isstruct(d->info)) {
+ CTypeID fid = d->sib;
+ MSize i = 1;
+ while (fid) {
+ CType *df = ctype_get(cts, fid);
+ fid = df->sib;
+ if (ctype_isfield(df->info)) {
+ CType *dc;
+ TRef sp, dp;
+ TValue tv;
+ TValue *sval = &tv;
+ setintV(&tv, 0);
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ dc = ctype_rawchild(cts, df); /* Field type. */
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info) ||
+ ctype_isenum(dc->info)))
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */
+ if (J->base[i]) {
+ sp = J->base[i];
+ sval = &rd->argv[i];
+ i++;
+ } else {
+ sp = ctype_isptr(dc->info) ? TREF_NIL : lj_ir_kint(J, 0);
+ }
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
+ lj_ir_kintp(J, df->size + sizeof(GCcdata)));
+ crec_ct_tv(J, dc, dp, sp, sval);
+ } else if (!ctype_isconstval(df->info)) {
+ /* NYI: init bitfields and sub-structures. */
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ }
+ }
+ } else {
+ TRef dp;
+ single_init:
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata)));
+ if (J->base[1]) {
+ crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]);
+ } else {
+ TValue tv;
+ tv.u64 = 0;
+ crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv);
+ }
+ }
+ /* Handle __gc metamethod. */
+ fin = lj_ctype_meta(cts, id, MM_gc);
+ if (fin)
+ crec_finalizer(J, trcd, fin);
+ }
+}
+
+/* Record argument conversions. */
+static TRef crec_call_args(jit_State *J, RecordFFData *rd,
+ CTState *cts, CType *ct)
+{
+ TRef args[CCI_NARGS_MAX];
+ CTypeID fid;
+ MSize i, n;
+ TRef tr, *base;
+ cTValue *o;
+#if LJ_TARGET_X86
+#if LJ_ABI_WIN
+ TRef *arg0 = NULL, *arg1 = NULL;
+#endif
+ int ngpr = 0;
+ if (ctype_cconv(ct->info) == CTCC_THISCALL)
+ ngpr = 1;
+ else if (ctype_cconv(ct->info) == CTCC_FASTCALL)
+ ngpr = 2;
+#endif
+
+ /* Skip initial attributes. */
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) break;
+ fid = ctf->sib;
+ }
+ args[0] = TREF_NIL;
+ for (n = 0, base = J->base+1, o = rd->argv+1; *base; n++, base++, o++) {
+ CTypeID did;
+ CType *d;
+
+ if (n >= CCI_NARGS_MAX)
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+
+ if (fid) { /* Get argument type from field. */
+ CType *ctf = ctype_get(cts, fid);
+ fid = ctf->sib;
+ lua_assert(ctype_isfield(ctf->info));
+ did = ctype_cid(ctf->info);
+ } else {
+ if (!(ct->info & CTF_VARARG))
+ lj_trace_err(J, LJ_TRERR_NYICALL); /* Too many arguments. */
+ did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
+ }
+ d = ctype_raw(cts, did);
+ if (!(ctype_isnum(d->info) || ctype_isptr(d->info) ||
+ ctype_isenum(d->info)))
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ tr = crec_ct_tv(J, d, 0, *base, o);
+ if (ctype_isinteger_or_bool(d->info)) {
+ if (d->size < 4) {
+ if ((d->info & CTF_UNSIGNED))
+ tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_U8 : IRT_U16, 0);
+ else
+ tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT);
+ }
+ } else if (LJ_SOFTFP && ctype_isfp(d->info) && d->size > 4) {
+ lj_needsplit(J);
+ }
+#if LJ_TARGET_X86
+ /* 64 bit args must not end up in registers for fastcall/thiscall. */
+#if LJ_ABI_WIN
+ if (!ctype_isfp(d->info)) {
+ /* Sigh, the Windows/x86 ABI allows reordering across 64 bit args. */
+ if (tref_typerange(tr, IRT_I64, IRT_U64)) {
+ if (ngpr) {
+ arg0 = &args[n]; args[n++] = TREF_NIL; ngpr--;
+ if (ngpr) {
+ arg1 = &args[n]; args[n++] = TREF_NIL; ngpr--;
+ }
+ }
+ } else {
+ if (arg0) { *arg0 = tr; arg0 = NULL; n--; continue; }
+ if (arg1) { *arg1 = tr; arg1 = NULL; n--; continue; }
+ if (ngpr) ngpr--;
+ }
+ }
+#else
+ if (!ctype_isfp(d->info) && ngpr) {
+ if (tref_typerange(tr, IRT_I64, IRT_U64)) {
+ /* No reordering for other x86 ABIs. Simply add alignment args. */
+ do { args[n++] = TREF_NIL; } while (--ngpr);
+ } else {
+ ngpr--;
+ }
+ }
+#endif
+#endif
+ args[n] = tr;
+ }
+ tr = args[0];
+ for (i = 1; i < n; i++)
+ tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]);
+ return tr;
+}
+
+/* Create a snapshot for the caller, simulating a 'false' return value. */
+static void crec_snap_caller(jit_State *J)
+{
+ lua_State *L = J->L;
+ TValue *base = L->base, *top = L->top;
+ const BCIns *pc = J->pc;
+ TRef ftr = J->base[-1];
+ ptrdiff_t delta;
+ if (!frame_islua(base-1) || J->framedepth <= 0)
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ J->pc = frame_pc(base-1); delta = 1+bc_a(J->pc[-1]);
+ L->top = base; L->base = base - delta;
+ J->base[-1] = TREF_FALSE;
+ J->base -= delta; J->baseslot -= (BCReg)delta;
+ J->maxslot = (BCReg)delta; J->framedepth--;
+ lj_snap_add(J);
+ L->base = base; L->top = top;
+ J->framedepth++; J->maxslot = 1;
+ J->base += delta; J->baseslot += (BCReg)delta;
+ J->base[-1] = ftr; J->pc = pc;
+}
+
+/* Record function call. */
+static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ IRType tp = IRT_PTR;
+ if (ctype_isptr(ct->info)) {
+ tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isfunc(ct->info)) {
+ TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR);
+ CType *ctr = ctype_rawchild(cts, ct);
+ IRType t = crec_ct2irt(cts, ctr);
+ TRef tr;
+ TValue tv;
+ /* Check for blacklisted C functions that might call a callback. */
+ setlightudV(&tv,
+ cdata_getptr(cdataptr(cd), (LJ_64 && tp == IRT_P64) ? 8 : 4));
+ if (tvistrue(lj_tab_get(J->L, cts->miscmap, &tv)))
+ lj_trace_err(J, LJ_TRERR_BLACKL);
+ if (ctype_isvoid(ctr->info)) {
+ t = IRT_NIL;
+ rd->nres = 0;
+ } else if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) ||
+ ctype_isenum(ctr->info)) || t == IRT_CDATA) {
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ }
+ if ((ct->info & CTF_VARARG)
+#if LJ_TARGET_X86
+ || ctype_cconv(ct->info) != CTCC_CDECL
+#endif
+ )
+ func = emitir(IRT(IR_CARG, IRT_NIL), func,
+ lj_ir_kint(J, ctype_typeid(cts, ct)));
+ tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func);
+ if (ctype_isbool(ctr->info)) {
+ if (frame_islua(J->L->base-1) && bc_b(frame_pc(J->L->base-1)[-1]) == 1) {
+ /* Don't check result if ignored. */
+ tr = TREF_NIL;
+ } else {
+ crec_snap_caller(J);
+#if LJ_TARGET_X86ORX64
+ /* Note: only the x86/x64 backend supports U8 and only for EQ(tr, 0). */
+ lj_ir_set(J, IRTG(IR_NE, IRT_U8), tr, lj_ir_kint(J, 0));
+#else
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+#endif
+ J->postproc = LJ_POST_FIXGUARDSNAP;
+ tr = TREF_TRUE;
+ }
+ } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) ||
+ t == IRT_I64 || t == IRT_U64 || ctype_isenum(ctr->info)) {
+ TRef trid = lj_ir_kint(J, ctype_cid(ct->info));
+ tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr);
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ } else if (t == IRT_FLOAT || t == IRT_U32) {
+ tr = emitconv(tr, IRT_NUM, t, 0);
+ } else if (t == IRT_I8 || t == IRT_I16) {
+ tr = emitconv(tr, IRT_INT, t, IRCONV_SEXT);
+ } else if (t == IRT_U8 || t == IRT_U16) {
+ tr = emitconv(tr, IRT_INT, t, 0);
+ }
+ J->base[0] = tr;
+ J->needsnap = 1;
+ return 1;
+ }
+ return 0;
+}
+
+void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]);
+ CTypeID id = cd->ctypeid;
+ CType *ct;
+ cTValue *tv;
+ MMS mm = MM_call;
+ if (id == CTID_CTYPEID) {
+ id = crec_constructor(J, cd, J->base[0]);
+ mm = MM_new;
+ } else if (crec_call(J, rd, cd)) {
+ return;
+ }
+ /* Record ctype __call/__new metamethod. */
+ ct = ctype_raw(cts, id);
+ tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm);
+ if (tv) {
+ if (tvisfunc(tv)) {
+ J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
+ rd->nres = -1; /* Pending tailcall. */
+ return;
+ }
+ } else if (mm == MM_new) {
+ crec_alloc(J, rd, id);
+ return;
+ }
+ /* No metamethod or NYI: non-function metamethods. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+}
+
+static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm)
+{
+ if (ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) {
+ IRType dt;
+ CTypeID id;
+ TRef tr;
+ MSize i;
+ IROp op;
+ lj_needsplit(J);
+ if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) ||
+ ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) {
+ dt = IRT_U64; id = CTID_UINT64;
+ } else {
+ dt = IRT_I64; id = CTID_INT64;
+ if (mm < MM_add &&
+ !((s[0]->info | s[1]->info) & CTF_FP) &&
+ s[0]->size == 4 && s[1]->size == 4) { /* Try to narrow comparison. */
+ if (!((s[0]->info ^ s[1]->info) & CTF_UNSIGNED) ||
+ (tref_isk(sp[1]) && IR(tref_ref(sp[1]))->i >= 0)) {
+ dt = (s[0]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT;
+ goto comp;
+ } else if (tref_isk(sp[0]) && IR(tref_ref(sp[0]))->i >= 0) {
+ dt = (s[1]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT;
+ goto comp;
+ }
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ IRType st = tref_type(sp[i]);
+ if (st == IRT_NUM || st == IRT_FLOAT)
+ sp[i] = emitconv(sp[i], dt, st, IRCONV_TRUNC|IRCONV_ANY);
+ else if (!(st == IRT_I64 || st == IRT_U64))
+ sp[i] = emitconv(sp[i], dt, IRT_INT,
+ (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
+ }
+ if (mm < MM_add) {
+ comp:
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ if (mm == MM_eq) {
+ op = IR_EQ;
+ } else {
+ op = mm == MM_lt ? IR_LT : IR_LE;
+ if (dt == IRT_U32 || dt == IRT_U64)
+ op += (IR_ULT-IR_LT);
+ }
+ lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]);
+ }
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ }
+ return 0;
+}
+
+static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ctp = s[0];
+ if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
+ if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
+ (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
+ if (mm == MM_sub) { /* Pointer difference. */
+ TRef tr;
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
+ if (sz == 0 || (sz & (sz-1)) != 0)
+ return 0; /* NYI: integer division. */
+ tr = emitir(IRT(IR_SUB, IRT_INTP), sp[0], sp[1]);
+ tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz)));
+#if LJ_64
+ tr = emitconv(tr, IRT_NUM, IRT_INTP, 0);
+#endif
+ return tr;
+ } else { /* Pointer comparison (unsigned). */
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE;
+ lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ }
+ }
+ if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info)))
+ return 0;
+ } else if (mm == MM_add && ctype_isnum(ctp->info) &&
+ (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
+ TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */
+ ctp = s[1];
+ } else {
+ return 0;
+ }
+ {
+ TRef tr = sp[1];
+ IRType t = tref_type(tr);
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
+ CTypeID id;
+#if LJ_64
+ if (t == IRT_NUM || t == IRT_FLOAT)
+ tr = emitconv(tr, IRT_INTP, t, IRCONV_TRUNC|IRCONV_ANY);
+ else if (!(t == IRT_I64 || t == IRT_U64))
+ tr = emitconv(tr, IRT_INTP, IRT_INT,
+ ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT);
+#else
+ if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) {
+ tr = emitconv(tr, IRT_INTP, t,
+ (t == IRT_NUM || t == IRT_FLOAT) ?
+ IRCONV_TRUNC|IRCONV_ANY : 0);
+ }
+#endif
+ tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz));
+ tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr);
+ id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
+ CTSIZE_PTR);
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ }
+}
+
+/* Record ctype arithmetic metamethods. */
+static void crec_arith_meta(jit_State *J, CTState *cts, RecordFFData *rd)
+{
+ cTValue *tv = NULL;
+ if (J->base[0]) {
+ if (tviscdata(&rd->argv[0])) {
+ CTypeID id = argv2cdata(J, J->base[0], &rd->argv[0])->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, (MMS)rd->data);
+ }
+ if (!tv && J->base[1] && tviscdata(&rd->argv[1])) {
+ CTypeID id = argv2cdata(J, J->base[1], &rd->argv[1])->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, (MMS)rd->data);
+ }
+ }
+ if (tv) {
+ if (tvisfunc(tv)) {
+ J->base[-1] = lj_ir_kfunc(J, funcV(tv)) | TREF_FRAME;
+ rd->nres = -1; /* Pending tailcall. */
+ return;
+ } /* NYI: non-function metamethods. */
+ } else if ((MMS)rd->data == MM_eq) {
+ J->base[0] = TREF_FALSE;
+ return;
+ }
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+}
+
+void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef sp[2];
+ CType *s[2];
+ MSize i;
+ for (i = 0; i < 2; i++) {
+ TRef tr = J->base[i];
+ CType *ct = ctype_get(cts, CTID_DOUBLE);
+ if (!tr) {
+ goto trymeta;
+ } else if (tref_iscdata(tr)) {
+ CTypeID id = argv2cdata(J, tr, &rd->argv[i])->ctypeid;
+ IRType t;
+ ct = ctype_raw(cts, id);
+ t = crec_ct2irt(cts, ct);
+ if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR);
+ if (ctype_isref(ct->info)) {
+ ct = ctype_rawchild(cts, ct);
+ t = crec_ct2irt(cts, ct);
+ }
+ } else if (t == IRT_I64 || t == IRT_U64) {
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64);
+ lj_needsplit(J);
+ goto ok;
+ } else if (t == IRT_INT || t == IRT_U32) {
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ goto ok;
+ } else if (ctype_isfunc(ct->info)) {
+ tr = emitir(IRT(IR_FLOAD, IRT_PTR), tr, IRFL_CDATA_PTR);
+ ct = ctype_get(cts,
+ lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
+ goto ok;
+ } else {
+ tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata)));
+ }
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info)) {
+ if (t == IRT_CDATA) goto trymeta;
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ tr = emitir(IRT(IR_XLOAD, t), tr, 0);
+ } else if (!(ctype_isptr(ct->info) || ctype_isrefarray(ct->info))) {
+ goto trymeta;
+ }
+ } else if (tref_isnil(tr)) {
+ tr = lj_ir_kptr(J, NULL);
+ ct = ctype_get(cts, CTID_P_VOID);
+ } else if (tref_isinteger(tr)) {
+ ct = ctype_get(cts, CTID_INT32);
+ } else if (tref_isstr(tr)) {
+ TRef tr2 = J->base[1-i];
+ CTypeID id = argv2cdata(J, tr2, &rd->argv[1-i])->ctypeid;
+ ct = ctype_raw(cts, id);
+ if (ctype_isenum(ct->info)) { /* Match string against enum constant. */
+ GCstr *str = strV(&rd->argv[i]);
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, ct, str, &ofs);
+ if (cct && ctype_isconstval(cct->info)) {
+ /* Specialize to the name of the enum constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, str));
+ ct = ctype_child(cts, cct);
+ tr = lj_ir_kint(J, (int32_t)ofs);
+ } /* else: interpreter will throw. */
+ } /* else: interpreter will throw. */
+ } else if (!tref_isnum(tr)) {
+ goto trymeta;
+ }
+ ok:
+ s[i] = ct;
+ sp[i] = tr;
+ }
+ {
+ TRef tr;
+ if ((tr = crec_arith_int64(J, sp, s, (MMS)rd->data)) ||
+ (tr = crec_arith_ptr(J, sp, s, (MMS)rd->data))) {
+ J->base[0] = tr;
+ /* Fixup cdata comparisons, too. Avoids some cdata escapes. */
+ if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1) &&
+ !irt_isguard(J->guardemit)) {
+ const BCIns *pc = frame_contpc(J->L->base-1) - 1;
+ if (bc_op(*pc) <= BC_ISNEP) {
+ setframe_pc(&J2G(J)->tmptv, pc);
+ J2G(J)->tmptv.u32.lo = ((tref_istrue(tr) ^ bc_op(*pc)) & 1);
+ J->postproc = LJ_POST_FIXCOMP;
+ }
+ }
+ } else {
+ trymeta:
+ crec_arith_meta(J, cts, rd);
+ }
+ }
+}
+
+/* -- C library namespace metamethods ------------------------------------- */
+
+void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) &&
+ udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) {
+ CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0]));
+ GCstr *name = strV(&rd->argv[1]);
+ CType *ct;
+ CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
+ cTValue *tv = lj_tab_getstr(cl->cache, name);
+ rd->nres = rd->data;
+ if (id && tv && !tvisnil(tv)) {
+ /* Specialize to the symbol name and make the result a constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name));
+ if (ctype_isconstval(ct->info)) {
+ if (ct->size >= 0x80000000u &&
+ (ctype_child(cts, ct)->info & CTF_UNSIGNED))
+ J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size);
+ else
+ J->base[0] = lj_ir_kint(J, (int32_t)ct->size);
+ } else if (ctype_isextern(ct->info)) {
+ CTypeID sid = ctype_cid(ct->info);
+ void *sp = *(void **)cdataptr(cdataV(tv));
+ TRef ptr;
+ ct = ctype_raw(cts, sid);
+ if (LJ_64 && !checkptr32(sp))
+ ptr = lj_ir_kintp(J, (uintptr_t)sp);
+ else
+ ptr = lj_ir_kptr(J, sp);
+ if (rd->data) {
+ J->base[0] = crec_tv_ct(J, ct, sid, ptr);
+ } else {
+ J->needsnap = 1;
+ crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
+ }
+ } else {
+ J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA);
+ }
+ } else {
+ lj_trace_err(J, LJ_TRERR_NOCACHE);
+ }
+ } /* else: interpreter will throw. */
+}
+
+/* -- FFI library functions ----------------------------------------------- */
+
+static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval)
+{
+ return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval);
+}
+
+void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd)
+{
+ crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0]));
+}
+
+void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd)
+{
+ UNUSED(rd);
+ if (J->base[0])
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ J->base[0] = lj_ir_call(J, IRCALL_lj_vm_errno);
+}
+
+void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef tr = J->base[0];
+ if (tr) {
+ TRef trlen = J->base[1];
+ if (trlen) {
+ trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]);
+ } else {
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]);
+ trlen = lj_ir_call(J, IRCALL_strlen, tr);
+ }
+ J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2];
+ if (trdst && trsrc && (trlen || tref_isstr(trsrc))) {
+ trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
+ trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]);
+ if (trlen) {
+ trlen = crec_toint(J, cts, trlen, &rd->argv[2]);
+ } else {
+ trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN);
+ trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
+ }
+ rd->nres = 0;
+ crec_copy(J, trdst, trsrc, trlen, NULL);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef trdst = J->base[0], trlen = J->base[1], trfill = J->base[2];
+ if (trdst && trlen) {
+ CTSize step = 1;
+ if (tviscdata(&rd->argv[0])) { /* Get alignment of original destination. */
+ CTSize sz;
+ CType *ct = ctype_raw(cts, cdataV(&rd->argv[0])->ctypeid);
+ if (ctype_isptr(ct->info))
+ ct = ctype_rawchild(cts, ct);
+ step = (1u<<ctype_align(lj_ctype_info(cts, ctype_typeid(cts, ct), &sz)));
+ }
+ trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
+ trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
+ if (trfill)
+ trfill = crec_toint(J, cts, trfill, &rd->argv[2]);
+ else
+ trfill = lj_ir_kint(J, 0);
+ rd->nres = 0;
+ crec_fill(J, trdst, trlen, trfill, step);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd)
+{
+ if (tref_iscdata(J->base[0])) {
+ TRef trid = lj_ir_kint(J, argv2ctype(J, J->base[0], &rd->argv[0]));
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA),
+ lj_ir_kint(J, CTID_CTYPEID), trid);
+ } else {
+ setfuncV(J->L, &J->errinfo, J->fn);
+ lj_trace_err_info(J, LJ_TRERR_NYIFFU);
+ }
+}
+
+void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd)
+{
+ argv2ctype(J, J->base[0], &rd->argv[0]);
+ if (tref_iscdata(J->base[1])) {
+ argv2ctype(J, J->base[1], &rd->argv[1]);
+ J->postproc = LJ_POST_FIXBOOL;
+ J->base[0] = TREF_TRUE;
+ } else {
+ J->base[0] = TREF_FALSE;
+ }
+}
+
+void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd)
+{
+ if (tref_isstr(J->base[0])) {
+ /* Specialize to the ABI string to make the boolean result a constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0])));
+ J->postproc = LJ_POST_FIXBOOL;
+ J->base[0] = TREF_TRUE;
+ } else {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+/* Record ffi.sizeof(), ffi.alignof(), ffi.offsetof(). */
+void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd)
+{
+ CTypeID id = argv2ctype(J, J->base[0], &rd->argv[0]);
+ if (rd->data == FF_ffi_sizeof) {
+ CType *ct = lj_ctype_rawref(ctype_ctsG(J2G(J)), id);
+ if (ctype_isvltype(ct->info))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ } else if (rd->data == FF_ffi_offsetof) { /* Specialize to the field name. */
+ if (!tref_isstr(J->base[1]))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
+ rd->nres = 3; /* Just in case. */
+ }
+ J->postproc = LJ_POST_FIXCONST;
+ J->base[0] = J->base[1] = J->base[2] = TREF_NIL;
+}
+
+void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd)
+{
+ argv2cdata(J, J->base[0], &rd->argv[0]);
+ crec_finalizer(J, J->base[0], &rd->argv[1]);
+}
+
+/* -- Miscellaneous library functions ------------------------------------- */
+
+void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->ctypeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
+ if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 &&
+ !(ct->size == 4 && (ct->info & CTF_UNSIGNED)))
+ d = ctype_get(cts, CTID_INT32);
+ else
+ d = ctype_get(cts, CTID_DOUBLE);
+ J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]);
+ } else {
+ J->base[0] = TREF_NIL;
+ }
+}
+
+#undef IR
+#undef emitir
+#undef emitconv
+
+#endif
diff --git a/3rdparty/lua/src/lj_crecord.h b/3rdparty/lua/src/lj_crecord.h
index fbd1322..dea05f7 100644
--- a/3rdparty/lua/src/lj_crecord.h
+++ b/3rdparty/lua/src/lj_crecord.h
@@ -1,31 +1,31 @@
-/*
-** Trace recorder for C data operations.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CRECORD_H
-#define _LJ_CRECORD_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-#include "lj_ffrecord.h"
-
-#if LJ_HASJIT && LJ_HASFFI
-LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd);
-#endif
-
-#endif
+/*
+** Trace recorder for C data operations.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CRECORD_H
+#define _LJ_CRECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+#include "lj_ffrecord.h"
+
+#if LJ_HASJIT && LJ_HASFFI
+LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_ctype.c b/3rdparty/lua/src/lj_ctype.c
index 3e42c28..57a0d7c 100644
--- a/3rdparty/lua/src/lj_ctype.c
+++ b/3rdparty/lua/src/lj_ctype.c
@@ -1,634 +1,634 @@
-/*
-** C type management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_ctype.h"
-#include "lj_ccallback.h"
-
-/* -- C type definitions -------------------------------------------------- */
-
-/* Predefined typedefs. */
-#define CTTDDEF(_) \
- /* Vararg handling. */ \
- _("va_list", P_VOID) \
- _("__builtin_va_list", P_VOID) \
- _("__gnuc_va_list", P_VOID) \
- /* From stddef.h. */ \
- _("ptrdiff_t", INT_PSZ) \
- _("size_t", UINT_PSZ) \
- _("wchar_t", WCHAR) \
- /* Subset of stdint.h. */ \
- _("int8_t", INT8) \
- _("int16_t", INT16) \
- _("int32_t", INT32) \
- _("int64_t", INT64) \
- _("uint8_t", UINT8) \
- _("uint16_t", UINT16) \
- _("uint32_t", UINT32) \
- _("uint64_t", UINT64) \
- _("intptr_t", INT_PSZ) \
- _("uintptr_t", UINT_PSZ) \
- /* End of typedef list. */
-
-/* Keywords (only the ones we actually care for). */
-#define CTKWDEF(_) \
- /* Type specifiers. */ \
- _("void", -1, CTOK_VOID) \
- _("_Bool", 0, CTOK_BOOL) \
- _("bool", 1, CTOK_BOOL) \
- _("char", 1, CTOK_CHAR) \
- _("int", 4, CTOK_INT) \
- _("__int8", 1, CTOK_INT) \
- _("__int16", 2, CTOK_INT) \
- _("__int32", 4, CTOK_INT) \
- _("__int64", 8, CTOK_INT) \
- _("float", 4, CTOK_FP) \
- _("double", 8, CTOK_FP) \
- _("long", 0, CTOK_LONG) \
- _("short", 0, CTOK_SHORT) \
- _("_Complex", 0, CTOK_COMPLEX) \
- _("complex", 0, CTOK_COMPLEX) \
- _("__complex", 0, CTOK_COMPLEX) \
- _("__complex__", 0, CTOK_COMPLEX) \
- _("signed", 0, CTOK_SIGNED) \
- _("__signed", 0, CTOK_SIGNED) \
- _("__signed__", 0, CTOK_SIGNED) \
- _("unsigned", 0, CTOK_UNSIGNED) \
- /* Type qualifiers. */ \
- _("const", 0, CTOK_CONST) \
- _("__const", 0, CTOK_CONST) \
- _("__const__", 0, CTOK_CONST) \
- _("volatile", 0, CTOK_VOLATILE) \
- _("__volatile", 0, CTOK_VOLATILE) \
- _("__volatile__", 0, CTOK_VOLATILE) \
- _("restrict", 0, CTOK_RESTRICT) \
- _("__restrict", 0, CTOK_RESTRICT) \
- _("__restrict__", 0, CTOK_RESTRICT) \
- _("inline", 0, CTOK_INLINE) \
- _("__inline", 0, CTOK_INLINE) \
- _("__inline__", 0, CTOK_INLINE) \
- /* Storage class specifiers. */ \
- _("typedef", 0, CTOK_TYPEDEF) \
- _("extern", 0, CTOK_EXTERN) \
- _("static", 0, CTOK_STATIC) \
- _("auto", 0, CTOK_AUTO) \
- _("register", 0, CTOK_REGISTER) \
- /* GCC Attributes. */ \
- _("__extension__", 0, CTOK_EXTENSION) \
- _("__attribute", 0, CTOK_ATTRIBUTE) \
- _("__attribute__", 0, CTOK_ATTRIBUTE) \
- _("asm", 0, CTOK_ASM) \
- _("__asm", 0, CTOK_ASM) \
- _("__asm__", 0, CTOK_ASM) \
- /* MSVC Attributes. */ \
- _("__declspec", 0, CTOK_DECLSPEC) \
- _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \
- _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \
- _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \
- _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \
- _("__ptr32", 4, CTOK_PTRSZ) \
- _("__ptr64", 8, CTOK_PTRSZ) \
- /* Other type specifiers. */ \
- _("struct", 0, CTOK_STRUCT) \
- _("union", 0, CTOK_UNION) \
- _("enum", 0, CTOK_ENUM) \
- /* Operators. */ \
- _("sizeof", 0, CTOK_SIZEOF) \
- _("__alignof", 0, CTOK_ALIGNOF) \
- _("__alignof__", 0, CTOK_ALIGNOF) \
- /* End of keyword list. */
-
-/* Type info for predefined types. Size merged in. */
-static CTInfo lj_ctype_typeinfo[] = {
-#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)),
-#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id),
-#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)),
-CTTYDEF(CTTYINFODEF)
-CTTDDEF(CTTDINFODEF)
-CTKWDEF(CTKWINFODEF)
-#undef CTTYINFODEF
-#undef CTTDINFODEF
-#undef CTKWINFODEF
- 0
-};
-
-/* Predefined type names collected in a single string. */
-static const char * const lj_ctype_typenames =
-#define CTTDNAMEDEF(name, id) name "\0"
-#define CTKWNAMEDEF(name, sz, cds) name "\0"
-CTTDDEF(CTTDNAMEDEF)
-CTKWDEF(CTKWNAMEDEF)
-#undef CTTDNAMEDEF
-#undef CTKWNAMEDEF
-;
-
-#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1)
-#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
-#define CTTYPETAB_MIN CTTYPEINFO_NUM
-#else
-#define CTTYPETAB_MIN 128
-#endif
-
-/* -- C type interning ---------------------------------------------------- */
-
-#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK)
-#define ct_hashname(name) \
- (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK)
-
-/* Create new type element. */
-CTypeID lj_ctype_new(CTState *cts, CType **ctp)
-{
- CTypeID id = cts->top;
- CType *ct;
- lua_assert(cts->L);
- if (LJ_UNLIKELY(id >= cts->sizetab)) {
- if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
-#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
- ct = lj_mem_newvec(cts->L, id+1, CType);
- memcpy(ct, cts->tab, id*sizeof(CType));
- memset(cts->tab, 0, id*sizeof(CType));
- lj_mem_freevec(cts->g, cts->tab, cts->sizetab, CType);
- cts->tab = ct;
- cts->sizetab = id+1;
-#else
- lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
-#endif
- }
- cts->top = id+1;
- *ctp = ct = &cts->tab[id];
- ct->info = 0;
- ct->size = 0;
- ct->sib = 0;
- ct->next = 0;
- setgcrefnull(ct->name);
- return id;
-}
-
-/* Intern a type element. */
-CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size)
-{
- uint32_t h = ct_hashtype(info, size);
- CTypeID id = cts->hash[h];
- lua_assert(cts->L);
- while (id) {
- CType *ct = ctype_get(cts, id);
- if (ct->info == info && ct->size == size)
- return id;
- id = ct->next;
- }
- id = cts->top;
- if (LJ_UNLIKELY(id >= cts->sizetab)) {
- if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
- lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
- }
- cts->top = id+1;
- cts->tab[id].info = info;
- cts->tab[id].size = size;
- cts->tab[id].sib = 0;
- cts->tab[id].next = cts->hash[h];
- setgcrefnull(cts->tab[id].name);
- cts->hash[h] = (CTypeID1)id;
- return id;
-}
-
-/* Add type element to hash table. */
-static void ctype_addtype(CTState *cts, CType *ct, CTypeID id)
-{
- uint32_t h = ct_hashtype(ct->info, ct->size);
- ct->next = cts->hash[h];
- cts->hash[h] = (CTypeID1)id;
-}
-
-/* Add named element to hash table. */
-void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id)
-{
- uint32_t h = ct_hashname(gcref(ct->name));
- ct->next = cts->hash[h];
- cts->hash[h] = (CTypeID1)id;
-}
-
-/* Get a C type by name, matching the type mask. */
-CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask)
-{
- CTypeID id = cts->hash[ct_hashname(name)];
- while (id) {
- CType *ct = ctype_get(cts, id);
- if (gcref(ct->name) == obj2gco(name) &&
- ((tmask >> ctype_type(ct->info)) & 1)) {
- *ctp = ct;
- return id;
- }
- id = ct->next;
- }
- *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */
- return 0;
-}
-
-/* Get a struct/union/enum/function field by name. */
-CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, CTSize *ofs,
- CTInfo *qual)
-{
- while (ct->sib) {
- ct = ctype_get(cts, ct->sib);
- if (gcref(ct->name) == obj2gco(name)) {
- *ofs = ct->size;
- return ct;
- }
- if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
- CType *fct, *cct = ctype_child(cts, ct);
- CTInfo q = 0;
- while (ctype_isattrib(cct->info)) {
- if (ctype_attrib(cct->info) == CTA_QUAL) q |= cct->size;
- cct = ctype_child(cts, cct);
- }
- fct = lj_ctype_getfieldq(cts, cct, name, ofs, qual);
- if (fct) {
- if (qual) *qual |= q;
- *ofs += ct->size;
- return fct;
- }
- }
- }
- return NULL; /* Not found. */
-}
-
-/* -- C type information -------------------------------------------------- */
-
-/* Follow references and get raw type for a C type ID. */
-CType *lj_ctype_rawref(CTState *cts, CTypeID id)
-{
- CType *ct = ctype_get(cts, id);
- while (ctype_isattrib(ct->info) || ctype_isref(ct->info))
- ct = ctype_child(cts, ct);
- return ct;
-}
-
-/* Get size for a C type ID. Does NOT support VLA/VLS. */
-CTSize lj_ctype_size(CTState *cts, CTypeID id)
-{
- CType *ct = ctype_raw(cts, id);
- return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
-}
-
-/* Get size for a variable-length C type. Does NOT support other C types. */
-CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem)
-{
- uint64_t xsz = 0;
- if (ctype_isstruct(ct->info)) {
- CTypeID arrid = 0, fid = ct->sib;
- xsz = ct->size; /* Add the struct size. */
- while (fid) {
- CType *ctf = ctype_get(cts, fid);
- if (ctype_type(ctf->info) == CT_FIELD)
- arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */
- fid = ctf->sib;
- }
- ct = ctype_raw(cts, arrid);
- }
- lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */
- ct = ctype_rawchild(cts, ct); /* Get array element. */
- lua_assert(ctype_hassize(ct->info));
- /* Calculate actual size of VLA and check for overflow. */
- xsz += (uint64_t)ct->size * nelem;
- return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID;
-}
-
-/* Get type, qualifiers, size and alignment for a C type ID. */
-CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp)
-{
- CTInfo qual = 0;
- CType *ct = ctype_get(cts, id);
- for (;;) {
- CTInfo info = ct->info;
- if (ctype_isenum(info)) {
- /* Follow child. Need to look at its attributes, too. */
- } else if (ctype_isattrib(info)) {
- if (ctype_isxattrib(info, CTA_QUAL))
- qual |= ct->size;
- else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED))
- qual |= CTFP_ALIGNED + CTALIGN(ct->size);
- } else {
- if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN);
- qual |= (info & ~(CTF_ALIGN|CTMASK_CID));
- lua_assert(ctype_hassize(info) || ctype_isfunc(info));
- *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size;
- break;
- }
- ct = ctype_get(cts, ctype_cid(info));
- }
- return qual;
-}
-
-/* Get ctype metamethod. */
-cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm)
-{
- CType *ct = ctype_get(cts, id);
- cTValue *tv;
- while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) {
- id = ctype_cid(ct->info);
- ct = ctype_get(cts, id);
- }
- if (ctype_isptr(ct->info) &&
- ctype_isfunc(ctype_get(cts, ctype_cid(ct->info))->info))
- tv = lj_tab_getstr(cts->miscmap, &cts->g->strempty);
- else
- tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
- if (tv && tvistab(tv) &&
- (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv))
- return tv;
- return NULL;
-}
-
-/* -- C type representation ----------------------------------------------- */
-
-/* Fixed max. length of a C type representation. */
-#define CTREPR_MAX 512
-
-typedef struct CTRepr {
- char *pb, *pe;
- CTState *cts;
- lua_State *L;
- int needsp;
- int ok;
- char buf[CTREPR_MAX];
-} CTRepr;
-
-/* Prepend string. */
-static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len)
-{
- char *p = ctr->pb;
- if (ctr->buf + len+1 > p) { ctr->ok = 0; return; }
- if (ctr->needsp) *--p = ' ';
- ctr->needsp = 1;
- p -= len;
- while (len-- > 0) p[len] = str[len];
- ctr->pb = p;
-}
-
-#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1)
-
-/* Prepend char. */
-static void ctype_prepc(CTRepr *ctr, int c)
-{
- if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; }
- *--ctr->pb = c;
-}
-
-/* Prepend number. */
-static void ctype_prepnum(CTRepr *ctr, uint32_t n)
-{
- char *p = ctr->pb;
- if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; }
- do { *--p = (char)('0' + n % 10); } while (n /= 10);
- ctr->pb = p;
- ctr->needsp = 0;
-}
-
-/* Append char. */
-static void ctype_appc(CTRepr *ctr, int c)
-{
- if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; }
- *ctr->pe++ = c;
-}
-
-/* Append number. */
-static void ctype_appnum(CTRepr *ctr, uint32_t n)
-{
- char buf[10];
- char *p = buf+sizeof(buf);
- char *q = ctr->pe;
- if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; }
- do { *--p = (char)('0' + n % 10); } while (n /= 10);
- do { *q++ = *p++; } while (p < buf+sizeof(buf));
- ctr->pe = q;
-}
-
-/* Prepend qualifiers. */
-static void ctype_prepqual(CTRepr *ctr, CTInfo info)
-{
- if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile");
- if ((info & CTF_CONST)) ctype_preplit(ctr, "const");
-}
-
-/* Prepend named type. */
-static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t)
-{
- if (gcref(ct->name)) {
- GCstr *str = gco2str(gcref(ct->name));
- ctype_prepstr(ctr, strdata(str), str->len);
- } else {
- if (ctr->needsp) ctype_prepc(ctr, ' ');
- ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct));
- ctr->needsp = 1;
- }
- ctype_prepstr(ctr, t, (MSize)strlen(t));
- ctype_prepqual(ctr, qual);
-}
-
-static void ctype_repr(CTRepr *ctr, CTypeID id)
-{
- CType *ct = ctype_get(ctr->cts, id);
- CTInfo qual = 0;
- int ptrto = 0;
- for (;;) {
- CTInfo info = ct->info;
- CTSize size = ct->size;
- switch (ctype_type(info)) {
- case CT_NUM:
- if ((info & CTF_BOOL)) {
- ctype_preplit(ctr, "bool");
- } else if ((info & CTF_FP)) {
- if (size == sizeof(double)) ctype_preplit(ctr, "double");
- else if (size == sizeof(float)) ctype_preplit(ctr, "float");
- else ctype_preplit(ctr, "long double");
- } else if (size == 1) {
- if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char");
- else if (CTF_UCHAR) ctype_preplit(ctr, "signed char");
- else ctype_preplit(ctr, "unsigned char");
- } else if (size < 8) {
- if (size == 4) ctype_preplit(ctr, "int");
- else ctype_preplit(ctr, "short");
- if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned");
- } else {
- ctype_preplit(ctr, "_t");
- ctype_prepnum(ctr, size*8);
- ctype_preplit(ctr, "int");
- if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u');
- }
- ctype_prepqual(ctr, (qual|info));
- return;
- case CT_VOID:
- ctype_preplit(ctr, "void");
- ctype_prepqual(ctr, (qual|info));
- return;
- case CT_STRUCT:
- ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct");
- return;
- case CT_ENUM:
- if (id == CTID_CTYPEID) {
- ctype_preplit(ctr, "ctype");
- return;
- }
- ctype_preptype(ctr, ct, qual, "enum");
- return;
- case CT_ATTRIB:
- if (ctype_attrib(info) == CTA_QUAL) qual |= size;
- break;
- case CT_PTR:
- if ((info & CTF_REF)) {
- ctype_prepc(ctr, '&');
- } else {
- ctype_prepqual(ctr, (qual|info));
- if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32");
- ctype_prepc(ctr, '*');
- }
- qual = 0;
- ptrto = 1;
- ctr->needsp = 1;
- break;
- case CT_ARRAY:
- if (ctype_isrefarray(info)) {
- ctr->needsp = 1;
- if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
- ctype_appc(ctr, '[');
- if (size != CTSIZE_INVALID) {
- CTSize csize = ctype_child(ctr->cts, ct)->size;
- ctype_appnum(ctr, csize ? size/csize : 0);
- } else if ((info & CTF_VLA)) {
- ctype_appc(ctr, '?');
- }
- ctype_appc(ctr, ']');
- } else if ((info & CTF_COMPLEX)) {
- if (size == 2*sizeof(float)) ctype_preplit(ctr, "float");
- ctype_preplit(ctr, "complex");
- return;
- } else {
- ctype_preplit(ctr, ")))");
- ctype_prepnum(ctr, size);
- ctype_preplit(ctr, "__attribute__((vector_size(");
- }
- break;
- case CT_FUNC:
- ctr->needsp = 1;
- if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
- ctype_appc(ctr, '(');
- ctype_appc(ctr, ')');
- break;
- default:
- lua_assert(0);
- break;
- }
- ct = ctype_get(ctr->cts, ctype_cid(info));
- }
-}
-
-/* Return a printable representation of a C type. */
-GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name)
-{
- global_State *g = G(L);
- CTRepr ctr;
- ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2];
- ctr.cts = ctype_ctsG(g);
- ctr.L = L;
- ctr.ok = 1;
- ctr.needsp = 0;
- if (name) ctype_prepstr(&ctr, strdata(name), name->len);
- ctype_repr(&ctr, id);
- if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?");
- return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb);
-}
-
-/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */
-GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned)
-{
- char buf[1+20+3];
- char *p = buf+sizeof(buf);
- int sign = 0;
- *--p = 'L'; *--p = 'L';
- if (isunsigned) {
- *--p = 'U';
- } else if ((int64_t)n < 0) {
- n = (uint64_t)-(int64_t)n;
- sign = 1;
- }
- do { *--p = (char)('0' + n % 10); } while (n /= 10);
- if (sign) *--p = '-';
- return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p));
-}
-
-/* Convert complex to string with 'i' or 'I' suffix. */
-GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size)
-{
- char buf[2*LJ_STR_NUMBUF+2+1];
- TValue re, im;
- size_t len;
- if (size == 2*sizeof(double)) {
- re.n = *(double *)sp; im.n = ((double *)sp)[1];
- } else {
- re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1];
- }
- len = lj_str_bufnum(buf, &re);
- if (!(im.u32.hi & 0x80000000u) || im.n != im.n) buf[len++] = '+';
- len += lj_str_bufnum(buf+len, &im);
- buf[len] = buf[len-1] >= 'a' ? 'I' : 'i';
- return lj_str_new(L, buf, len+1);
-}
-
-/* -- C type state -------------------------------------------------------- */
-
-/* Initialize C type table and state. */
-CTState *lj_ctype_init(lua_State *L)
-{
- CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState);
- CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType);
- const char *name = lj_ctype_typenames;
- CTypeID id;
- memset(cts, 0, sizeof(CTState));
- cts->tab = ct;
- cts->sizetab = CTTYPETAB_MIN;
- cts->top = CTTYPEINFO_NUM;
- cts->L = NULL;
- cts->g = G(L);
- for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) {
- CTInfo info = lj_ctype_typeinfo[id];
- ct->size = (CTSize)((int32_t)(info << 16) >> 26);
- ct->info = info & 0xffff03ffu;
- ct->sib = 0;
- if (ctype_type(info) == CT_KW || ctype_istypedef(info)) {
- size_t len = strlen(name);
- GCstr *str = lj_str_new(L, name, len);
- ctype_setname(ct, str);
- name += len+1;
- lj_ctype_addname(cts, ct, id);
- } else {
- setgcrefnull(ct->name);
- ct->next = 0;
- if (!ctype_isenum(info)) ctype_addtype(cts, ct, id);
- }
- }
- setmref(G(L)->ctype_state, cts);
- return cts;
-}
-
-/* Free C type table and state. */
-void lj_ctype_freestate(global_State *g)
-{
- CTState *cts = ctype_ctsG(g);
- if (cts) {
- lj_ccallback_mcode_free(cts);
- lj_mem_freevec(g, cts->tab, cts->sizetab, CType);
- lj_mem_freevec(g, cts->cb.cbid, cts->cb.sizeid, CTypeID1);
- lj_mem_freet(g, cts);
- }
-}
-
-#endif
+/*
+** C type management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_ccallback.h"
+
+/* -- C type definitions -------------------------------------------------- */
+
+/* Predefined typedefs. */
+#define CTTDDEF(_) \
+ /* Vararg handling. */ \
+ _("va_list", P_VOID) \
+ _("__builtin_va_list", P_VOID) \
+ _("__gnuc_va_list", P_VOID) \
+ /* From stddef.h. */ \
+ _("ptrdiff_t", INT_PSZ) \
+ _("size_t", UINT_PSZ) \
+ _("wchar_t", WCHAR) \
+ /* Subset of stdint.h. */ \
+ _("int8_t", INT8) \
+ _("int16_t", INT16) \
+ _("int32_t", INT32) \
+ _("int64_t", INT64) \
+ _("uint8_t", UINT8) \
+ _("uint16_t", UINT16) \
+ _("uint32_t", UINT32) \
+ _("uint64_t", UINT64) \
+ _("intptr_t", INT_PSZ) \
+ _("uintptr_t", UINT_PSZ) \
+ /* End of typedef list. */
+
+/* Keywords (only the ones we actually care for). */
+#define CTKWDEF(_) \
+ /* Type specifiers. */ \
+ _("void", -1, CTOK_VOID) \
+ _("_Bool", 0, CTOK_BOOL) \
+ _("bool", 1, CTOK_BOOL) \
+ _("char", 1, CTOK_CHAR) \
+ _("int", 4, CTOK_INT) \
+ _("__int8", 1, CTOK_INT) \
+ _("__int16", 2, CTOK_INT) \
+ _("__int32", 4, CTOK_INT) \
+ _("__int64", 8, CTOK_INT) \
+ _("float", 4, CTOK_FP) \
+ _("double", 8, CTOK_FP) \
+ _("long", 0, CTOK_LONG) \
+ _("short", 0, CTOK_SHORT) \
+ _("_Complex", 0, CTOK_COMPLEX) \
+ _("complex", 0, CTOK_COMPLEX) \
+ _("__complex", 0, CTOK_COMPLEX) \
+ _("__complex__", 0, CTOK_COMPLEX) \
+ _("signed", 0, CTOK_SIGNED) \
+ _("__signed", 0, CTOK_SIGNED) \
+ _("__signed__", 0, CTOK_SIGNED) \
+ _("unsigned", 0, CTOK_UNSIGNED) \
+ /* Type qualifiers. */ \
+ _("const", 0, CTOK_CONST) \
+ _("__const", 0, CTOK_CONST) \
+ _("__const__", 0, CTOK_CONST) \
+ _("volatile", 0, CTOK_VOLATILE) \
+ _("__volatile", 0, CTOK_VOLATILE) \
+ _("__volatile__", 0, CTOK_VOLATILE) \
+ _("restrict", 0, CTOK_RESTRICT) \
+ _("__restrict", 0, CTOK_RESTRICT) \
+ _("__restrict__", 0, CTOK_RESTRICT) \
+ _("inline", 0, CTOK_INLINE) \
+ _("__inline", 0, CTOK_INLINE) \
+ _("__inline__", 0, CTOK_INLINE) \
+ /* Storage class specifiers. */ \
+ _("typedef", 0, CTOK_TYPEDEF) \
+ _("extern", 0, CTOK_EXTERN) \
+ _("static", 0, CTOK_STATIC) \
+ _("auto", 0, CTOK_AUTO) \
+ _("register", 0, CTOK_REGISTER) \
+ /* GCC Attributes. */ \
+ _("__extension__", 0, CTOK_EXTENSION) \
+ _("__attribute", 0, CTOK_ATTRIBUTE) \
+ _("__attribute__", 0, CTOK_ATTRIBUTE) \
+ _("asm", 0, CTOK_ASM) \
+ _("__asm", 0, CTOK_ASM) \
+ _("__asm__", 0, CTOK_ASM) \
+ /* MSVC Attributes. */ \
+ _("__declspec", 0, CTOK_DECLSPEC) \
+ _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \
+ _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \
+ _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \
+ _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \
+ _("__ptr32", 4, CTOK_PTRSZ) \
+ _("__ptr64", 8, CTOK_PTRSZ) \
+ /* Other type specifiers. */ \
+ _("struct", 0, CTOK_STRUCT) \
+ _("union", 0, CTOK_UNION) \
+ _("enum", 0, CTOK_ENUM) \
+ /* Operators. */ \
+ _("sizeof", 0, CTOK_SIZEOF) \
+ _("__alignof", 0, CTOK_ALIGNOF) \
+ _("__alignof__", 0, CTOK_ALIGNOF) \
+ /* End of keyword list. */
+
+/* Type info for predefined types. Size merged in. */
+static CTInfo lj_ctype_typeinfo[] = {
+#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)),
+#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id),
+#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)),
+CTTYDEF(CTTYINFODEF)
+CTTDDEF(CTTDINFODEF)
+CTKWDEF(CTKWINFODEF)
+#undef CTTYINFODEF
+#undef CTTDINFODEF
+#undef CTKWINFODEF
+ 0
+};
+
+/* Predefined type names collected in a single string. */
+static const char * const lj_ctype_typenames =
+#define CTTDNAMEDEF(name, id) name "\0"
+#define CTKWNAMEDEF(name, sz, cds) name "\0"
+CTTDDEF(CTTDNAMEDEF)
+CTKWDEF(CTKWNAMEDEF)
+#undef CTTDNAMEDEF
+#undef CTKWNAMEDEF
+;
+
+#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1)
+#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
+#define CTTYPETAB_MIN CTTYPEINFO_NUM
+#else
+#define CTTYPETAB_MIN 128
+#endif
+
+/* -- C type interning ---------------------------------------------------- */
+
+#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK)
+#define ct_hashname(name) \
+ (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK)
+
+/* Create new type element. */
+CTypeID lj_ctype_new(CTState *cts, CType **ctp)
+{
+ CTypeID id = cts->top;
+ CType *ct;
+ lua_assert(cts->L);
+ if (LJ_UNLIKELY(id >= cts->sizetab)) {
+ if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
+#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
+ ct = lj_mem_newvec(cts->L, id+1, CType);
+ memcpy(ct, cts->tab, id*sizeof(CType));
+ memset(cts->tab, 0, id*sizeof(CType));
+ lj_mem_freevec(cts->g, cts->tab, cts->sizetab, CType);
+ cts->tab = ct;
+ cts->sizetab = id+1;
+#else
+ lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
+#endif
+ }
+ cts->top = id+1;
+ *ctp = ct = &cts->tab[id];
+ ct->info = 0;
+ ct->size = 0;
+ ct->sib = 0;
+ ct->next = 0;
+ setgcrefnull(ct->name);
+ return id;
+}
+
+/* Intern a type element. */
+CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size)
+{
+ uint32_t h = ct_hashtype(info, size);
+ CTypeID id = cts->hash[h];
+ lua_assert(cts->L);
+ while (id) {
+ CType *ct = ctype_get(cts, id);
+ if (ct->info == info && ct->size == size)
+ return id;
+ id = ct->next;
+ }
+ id = cts->top;
+ if (LJ_UNLIKELY(id >= cts->sizetab)) {
+ if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
+ lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
+ }
+ cts->top = id+1;
+ cts->tab[id].info = info;
+ cts->tab[id].size = size;
+ cts->tab[id].sib = 0;
+ cts->tab[id].next = cts->hash[h];
+ setgcrefnull(cts->tab[id].name);
+ cts->hash[h] = (CTypeID1)id;
+ return id;
+}
+
+/* Add type element to hash table. */
+static void ctype_addtype(CTState *cts, CType *ct, CTypeID id)
+{
+ uint32_t h = ct_hashtype(ct->info, ct->size);
+ ct->next = cts->hash[h];
+ cts->hash[h] = (CTypeID1)id;
+}
+
+/* Add named element to hash table. */
+void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id)
+{
+ uint32_t h = ct_hashname(gcref(ct->name));
+ ct->next = cts->hash[h];
+ cts->hash[h] = (CTypeID1)id;
+}
+
+/* Get a C type by name, matching the type mask. */
+CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask)
+{
+ CTypeID id = cts->hash[ct_hashname(name)];
+ while (id) {
+ CType *ct = ctype_get(cts, id);
+ if (gcref(ct->name) == obj2gco(name) &&
+ ((tmask >> ctype_type(ct->info)) & 1)) {
+ *ctp = ct;
+ return id;
+ }
+ id = ct->next;
+ }
+ *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */
+ return 0;
+}
+
+/* Get a struct/union/enum/function field by name. */
+CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, CTSize *ofs,
+ CTInfo *qual)
+{
+ while (ct->sib) {
+ ct = ctype_get(cts, ct->sib);
+ if (gcref(ct->name) == obj2gco(name)) {
+ *ofs = ct->size;
+ return ct;
+ }
+ if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ CType *fct, *cct = ctype_child(cts, ct);
+ CTInfo q = 0;
+ while (ctype_isattrib(cct->info)) {
+ if (ctype_attrib(cct->info) == CTA_QUAL) q |= cct->size;
+ cct = ctype_child(cts, cct);
+ }
+ fct = lj_ctype_getfieldq(cts, cct, name, ofs, qual);
+ if (fct) {
+ if (qual) *qual |= q;
+ *ofs += ct->size;
+ return fct;
+ }
+ }
+ }
+ return NULL; /* Not found. */
+}
+
+/* -- C type information -------------------------------------------------- */
+
+/* Follow references and get raw type for a C type ID. */
+CType *lj_ctype_rawref(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_get(cts, id);
+ while (ctype_isattrib(ct->info) || ctype_isref(ct->info))
+ ct = ctype_child(cts, ct);
+ return ct;
+}
+
+/* Get size for a C type ID. Does NOT support VLA/VLS. */
+CTSize lj_ctype_size(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_raw(cts, id);
+ return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
+}
+
+/* Get size for a variable-length C type. Does NOT support other C types. */
+CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem)
+{
+ uint64_t xsz = 0;
+ if (ctype_isstruct(ct->info)) {
+ CTypeID arrid = 0, fid = ct->sib;
+ xsz = ct->size; /* Add the struct size. */
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (ctype_type(ctf->info) == CT_FIELD)
+ arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */
+ fid = ctf->sib;
+ }
+ ct = ctype_raw(cts, arrid);
+ }
+ lua_assert(ctype_isvlarray(ct->info)); /* Must be a VLA. */
+ ct = ctype_rawchild(cts, ct); /* Get array element. */
+ lua_assert(ctype_hassize(ct->info));
+ /* Calculate actual size of VLA and check for overflow. */
+ xsz += (uint64_t)ct->size * nelem;
+ return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID;
+}
+
+/* Get type, qualifiers, size and alignment for a C type ID. */
+CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp)
+{
+ CTInfo qual = 0;
+ CType *ct = ctype_get(cts, id);
+ for (;;) {
+ CTInfo info = ct->info;
+ if (ctype_isenum(info)) {
+ /* Follow child. Need to look at its attributes, too. */
+ } else if (ctype_isattrib(info)) {
+ if (ctype_isxattrib(info, CTA_QUAL))
+ qual |= ct->size;
+ else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED))
+ qual |= CTFP_ALIGNED + CTALIGN(ct->size);
+ } else {
+ if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN);
+ qual |= (info & ~(CTF_ALIGN|CTMASK_CID));
+ lua_assert(ctype_hassize(info) || ctype_isfunc(info));
+ *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size;
+ break;
+ }
+ ct = ctype_get(cts, ctype_cid(info));
+ }
+ return qual;
+}
+
+/* Get ctype metamethod. */
+cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm)
+{
+ CType *ct = ctype_get(cts, id);
+ cTValue *tv;
+ while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) {
+ id = ctype_cid(ct->info);
+ ct = ctype_get(cts, id);
+ }
+ if (ctype_isptr(ct->info) &&
+ ctype_isfunc(ctype_get(cts, ctype_cid(ct->info))->info))
+ tv = lj_tab_getstr(cts->miscmap, &cts->g->strempty);
+ else
+ tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
+ if (tv && tvistab(tv) &&
+ (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv))
+ return tv;
+ return NULL;
+}
+
+/* -- C type representation ----------------------------------------------- */
+
+/* Fixed max. length of a C type representation. */
+#define CTREPR_MAX 512
+
+typedef struct CTRepr {
+ char *pb, *pe;
+ CTState *cts;
+ lua_State *L;
+ int needsp;
+ int ok;
+ char buf[CTREPR_MAX];
+} CTRepr;
+
+/* Prepend string. */
+static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len)
+{
+ char *p = ctr->pb;
+ if (ctr->buf + len+1 > p) { ctr->ok = 0; return; }
+ if (ctr->needsp) *--p = ' ';
+ ctr->needsp = 1;
+ p -= len;
+ while (len-- > 0) p[len] = str[len];
+ ctr->pb = p;
+}
+
+#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1)
+
+/* Prepend char. */
+static void ctype_prepc(CTRepr *ctr, int c)
+{
+ if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; }
+ *--ctr->pb = c;
+}
+
+/* Prepend number. */
+static void ctype_prepnum(CTRepr *ctr, uint32_t n)
+{
+ char *p = ctr->pb;
+ if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ ctr->pb = p;
+ ctr->needsp = 0;
+}
+
+/* Append char. */
+static void ctype_appc(CTRepr *ctr, int c)
+{
+ if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; }
+ *ctr->pe++ = c;
+}
+
+/* Append number. */
+static void ctype_appnum(CTRepr *ctr, uint32_t n)
+{
+ char buf[10];
+ char *p = buf+sizeof(buf);
+ char *q = ctr->pe;
+ if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ do { *q++ = *p++; } while (p < buf+sizeof(buf));
+ ctr->pe = q;
+}
+
+/* Prepend qualifiers. */
+static void ctype_prepqual(CTRepr *ctr, CTInfo info)
+{
+ if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile");
+ if ((info & CTF_CONST)) ctype_preplit(ctr, "const");
+}
+
+/* Prepend named type. */
+static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t)
+{
+ if (gcref(ct->name)) {
+ GCstr *str = gco2str(gcref(ct->name));
+ ctype_prepstr(ctr, strdata(str), str->len);
+ } else {
+ if (ctr->needsp) ctype_prepc(ctr, ' ');
+ ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct));
+ ctr->needsp = 1;
+ }
+ ctype_prepstr(ctr, t, (MSize)strlen(t));
+ ctype_prepqual(ctr, qual);
+}
+
+static void ctype_repr(CTRepr *ctr, CTypeID id)
+{
+ CType *ct = ctype_get(ctr->cts, id);
+ CTInfo qual = 0;
+ int ptrto = 0;
+ for (;;) {
+ CTInfo info = ct->info;
+ CTSize size = ct->size;
+ switch (ctype_type(info)) {
+ case CT_NUM:
+ if ((info & CTF_BOOL)) {
+ ctype_preplit(ctr, "bool");
+ } else if ((info & CTF_FP)) {
+ if (size == sizeof(double)) ctype_preplit(ctr, "double");
+ else if (size == sizeof(float)) ctype_preplit(ctr, "float");
+ else ctype_preplit(ctr, "long double");
+ } else if (size == 1) {
+ if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char");
+ else if (CTF_UCHAR) ctype_preplit(ctr, "signed char");
+ else ctype_preplit(ctr, "unsigned char");
+ } else if (size < 8) {
+ if (size == 4) ctype_preplit(ctr, "int");
+ else ctype_preplit(ctr, "short");
+ if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned");
+ } else {
+ ctype_preplit(ctr, "_t");
+ ctype_prepnum(ctr, size*8);
+ ctype_preplit(ctr, "int");
+ if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u');
+ }
+ ctype_prepqual(ctr, (qual|info));
+ return;
+ case CT_VOID:
+ ctype_preplit(ctr, "void");
+ ctype_prepqual(ctr, (qual|info));
+ return;
+ case CT_STRUCT:
+ ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct");
+ return;
+ case CT_ENUM:
+ if (id == CTID_CTYPEID) {
+ ctype_preplit(ctr, "ctype");
+ return;
+ }
+ ctype_preptype(ctr, ct, qual, "enum");
+ return;
+ case CT_ATTRIB:
+ if (ctype_attrib(info) == CTA_QUAL) qual |= size;
+ break;
+ case CT_PTR:
+ if ((info & CTF_REF)) {
+ ctype_prepc(ctr, '&');
+ } else {
+ ctype_prepqual(ctr, (qual|info));
+ if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32");
+ ctype_prepc(ctr, '*');
+ }
+ qual = 0;
+ ptrto = 1;
+ ctr->needsp = 1;
+ break;
+ case CT_ARRAY:
+ if (ctype_isrefarray(info)) {
+ ctr->needsp = 1;
+ if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
+ ctype_appc(ctr, '[');
+ if (size != CTSIZE_INVALID) {
+ CTSize csize = ctype_child(ctr->cts, ct)->size;
+ ctype_appnum(ctr, csize ? size/csize : 0);
+ } else if ((info & CTF_VLA)) {
+ ctype_appc(ctr, '?');
+ }
+ ctype_appc(ctr, ']');
+ } else if ((info & CTF_COMPLEX)) {
+ if (size == 2*sizeof(float)) ctype_preplit(ctr, "float");
+ ctype_preplit(ctr, "complex");
+ return;
+ } else {
+ ctype_preplit(ctr, ")))");
+ ctype_prepnum(ctr, size);
+ ctype_preplit(ctr, "__attribute__((vector_size(");
+ }
+ break;
+ case CT_FUNC:
+ ctr->needsp = 1;
+ if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
+ ctype_appc(ctr, '(');
+ ctype_appc(ctr, ')');
+ break;
+ default:
+ lua_assert(0);
+ break;
+ }
+ ct = ctype_get(ctr->cts, ctype_cid(info));
+ }
+}
+
+/* Return a printable representation of a C type. */
+GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name)
+{
+ global_State *g = G(L);
+ CTRepr ctr;
+ ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2];
+ ctr.cts = ctype_ctsG(g);
+ ctr.L = L;
+ ctr.ok = 1;
+ ctr.needsp = 0;
+ if (name) ctype_prepstr(&ctr, strdata(name), name->len);
+ ctype_repr(&ctr, id);
+ if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?");
+ return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb);
+}
+
+/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */
+GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned)
+{
+ char buf[1+20+3];
+ char *p = buf+sizeof(buf);
+ int sign = 0;
+ *--p = 'L'; *--p = 'L';
+ if (isunsigned) {
+ *--p = 'U';
+ } else if ((int64_t)n < 0) {
+ n = (uint64_t)-(int64_t)n;
+ sign = 1;
+ }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ if (sign) *--p = '-';
+ return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p));
+}
+
+/* Convert complex to string with 'i' or 'I' suffix. */
+GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size)
+{
+ char buf[2*LJ_STR_NUMBUF+2+1];
+ TValue re, im;
+ size_t len;
+ if (size == 2*sizeof(double)) {
+ re.n = *(double *)sp; im.n = ((double *)sp)[1];
+ } else {
+ re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1];
+ }
+ len = lj_str_bufnum(buf, &re);
+ if (!(im.u32.hi & 0x80000000u) || im.n != im.n) buf[len++] = '+';
+ len += lj_str_bufnum(buf+len, &im);
+ buf[len] = buf[len-1] >= 'a' ? 'I' : 'i';
+ return lj_str_new(L, buf, len+1);
+}
+
+/* -- C type state -------------------------------------------------------- */
+
+/* Initialize C type table and state. */
+CTState *lj_ctype_init(lua_State *L)
+{
+ CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState);
+ CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType);
+ const char *name = lj_ctype_typenames;
+ CTypeID id;
+ memset(cts, 0, sizeof(CTState));
+ cts->tab = ct;
+ cts->sizetab = CTTYPETAB_MIN;
+ cts->top = CTTYPEINFO_NUM;
+ cts->L = NULL;
+ cts->g = G(L);
+ for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) {
+ CTInfo info = lj_ctype_typeinfo[id];
+ ct->size = (CTSize)((int32_t)(info << 16) >> 26);
+ ct->info = info & 0xffff03ffu;
+ ct->sib = 0;
+ if (ctype_type(info) == CT_KW || ctype_istypedef(info)) {
+ size_t len = strlen(name);
+ GCstr *str = lj_str_new(L, name, len);
+ ctype_setname(ct, str);
+ name += len+1;
+ lj_ctype_addname(cts, ct, id);
+ } else {
+ setgcrefnull(ct->name);
+ ct->next = 0;
+ if (!ctype_isenum(info)) ctype_addtype(cts, ct, id);
+ }
+ }
+ setmref(G(L)->ctype_state, cts);
+ return cts;
+}
+
+/* Free C type table and state. */
+void lj_ctype_freestate(global_State *g)
+{
+ CTState *cts = ctype_ctsG(g);
+ if (cts) {
+ lj_ccallback_mcode_free(cts);
+ lj_mem_freevec(g, cts->tab, cts->sizetab, CType);
+ lj_mem_freevec(g, cts->cb.cbid, cts->cb.sizeid, CTypeID1);
+ lj_mem_freet(g, cts);
+ }
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_ctype.h b/3rdparty/lua/src/lj_ctype.h
index 73f2fec..c43a2ba 100644
--- a/3rdparty/lua/src/lj_ctype.h
+++ b/3rdparty/lua/src/lj_ctype.h
@@ -1,461 +1,461 @@
-/*
-** C type management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CTYPE_H
-#define _LJ_CTYPE_H
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-
-#if LJ_HASFFI
-
-/* -- C type definitions -------------------------------------------------- */
-
-/* C type numbers. Highest 4 bits of C type info. ORDER CT. */
-enum {
- /* Externally visible types. */
- CT_NUM, /* Integer or floating-point numbers. */
- CT_STRUCT, /* Struct or union. */
- CT_PTR, /* Pointer or reference. */
- CT_ARRAY, /* Array or complex type. */
- CT_MAYCONVERT = CT_ARRAY,
- CT_VOID, /* Void type. */
- CT_ENUM, /* Enumeration. */
- CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */
- CT_FUNC, /* Function. */
- CT_TYPEDEF, /* Typedef. */
- CT_ATTRIB, /* Miscellaneous attributes. */
- /* Internal element types. */
- CT_FIELD, /* Struct/union field or function parameter. */
- CT_BITFIELD, /* Struct/union bitfield. */
- CT_CONSTVAL, /* Constant value. */
- CT_EXTERN, /* External reference. */
- CT_KW /* Keyword. */
-};
-
-LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR);
-LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT);
-
-/*
-** ---------- info ------------
-** |type flags... A cid | size | sib | next | name |
-** +----------------------------+--------+-------+-------+-------+--
-** |NUM BFvcUL.. A | size | | type | |
-** |STRUCT ..vcU..V A | size | field | name? | name? |
-** |PTR ..vcR... A cid | size | | type | |
-** |ARRAY VCvc...V A cid | size | | type | |
-** |VOID ..vc.... A | size | | type | |
-** |ENUM A cid | size | const | name? | name? |
-** |FUNC ....VS.. cc cid | nargs | field | name? | name? |
-** |TYPEDEF cid | | | name | name |
-** |ATTRIB attrnum cid | attr | sib? | type? | |
-** |FIELD cid | offset | field | | name? |
-** |BITFIELD B.vcU csz bsz pos | offset | field | | name? |
-** |CONSTVAL c cid | value | const | name | name |
-** |EXTERN cid | | sib? | name | name |
-** |KW tok | size | | name | name |
-** +----------------------------+--------+-------+-------+-------+--
-** ^^ ^^--- bits used for C type conversion dispatch
-*/
-
-/* C type info flags. TFFArrrr */
-#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */
-#define CTF_FP 0x04000000u /* Floating-point: NUM. */
-#define CTF_CONST 0x02000000u /* Const qualifier. */
-#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */
-#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */
-#define CTF_LONG 0x00400000u /* Long: NUM. */
-#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */
-#define CTF_REF 0x00800000u /* Reference: PTR. */
-#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */
-#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */
-#define CTF_UNION 0x00800000u /* Union: STRUCT. */
-#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */
-#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */
-
-#define CTF_QUAL (CTF_CONST|CTF_VOLATILE)
-#define CTF_ALIGN (CTMASK_ALIGN<<CTSHIFT_ALIGN)
-#define CTF_UCHAR ((char)-1 > 0 ? CTF_UNSIGNED : 0)
-
-/* Flags used in parser. .F.Ammvf cp->attr */
-#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */
-#define CTFP_PACKED 0x00000002u /* cp->attr */
-/* ...C...f cp->fattr */
-#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */
-
-/* C type info bitfields. */
-#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */
-#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */
-#define CTSHIFT_NUM 28
-#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */
-#define CTSHIFT_ALIGN 16
-#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */
-#define CTSHIFT_ATTRIB 16
-#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */
-#define CTSHIFT_CCONV 16
-#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */
-#define CTSHIFT_REGPARM 18
-/* Bitfields only used in parser. */
-#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */
-#define CTSHIFT_VSIZEP 4
-#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */
-#define CTSHIFT_MSIZEP 8
-
-/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */
-#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */
-#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */
-#define CTMASK_BITPOS 127
-#define CTMASK_BITBSZ 127
-#define CTMASK_BITCSZ 127
-#define CTSHIFT_BITPOS 0
-#define CTSHIFT_BITBSZ 8
-#define CTSHIFT_BITCSZ 16
-
-#define CTF_INSERT(info, field, val) \
- info = (info & ~(CTMASK_##field<<CTSHIFT_##field)) | \
- (((CTSize)(val) & CTMASK_##field) << CTSHIFT_##field)
-
-/* Calling conventions. ORDER CC */
-enum { CTCC_CDECL, CTCC_THISCALL, CTCC_FASTCALL, CTCC_STDCALL };
-
-/* Attribute numbers. */
-enum {
- CTA_NONE, /* Ignored attribute. Must be zero. */
- CTA_QUAL, /* Unmerged qualifiers. */
- CTA_ALIGN, /* Alignment override. */
- CTA_SUBTYPE, /* Transparent sub-type. */
- CTA_REDIR, /* Redirected symbol name. */
- CTA_BAD, /* To catch bad IDs. */
- CTA__MAX
-};
-
-/* Special sizes. */
-#define CTSIZE_INVALID 0xffffffffu
-
-typedef uint32_t CTInfo; /* Type info. */
-typedef uint32_t CTSize; /* Type size. */
-typedef uint32_t CTypeID; /* Type ID. */
-typedef uint16_t CTypeID1; /* Minimum-sized type ID. */
-
-/* C type table element. */
-typedef struct CType {
- CTInfo info; /* Type info. */
- CTSize size; /* Type size or other info. */
- CTypeID1 sib; /* Sibling element. */
- CTypeID1 next; /* Next element in hash chain. */
- GCRef name; /* Element name (GCstr). */
-} CType;
-
-#define CTHASH_SIZE 128 /* Number of hash anchors. */
-#define CTHASH_MASK (CTHASH_SIZE-1)
-
-/* Simplify target-specific configuration. Checked in lj_ccall.h. */
-#define CCALL_MAX_GPR 8
-#define CCALL_MAX_FPR 8
-
-typedef LJ_ALIGN(8) union FPRCBArg { double d; float f[2]; } FPRCBArg;
-
-/* C callback state. Defined here, to avoid dragging in lj_ccall.h. */
-
-typedef LJ_ALIGN(8) struct CCallback {
- FPRCBArg fpr[CCALL_MAX_FPR]; /* Arguments/results in FPRs. */
- intptr_t gpr[CCALL_MAX_GPR]; /* Arguments/results in GPRs. */
- intptr_t *stack; /* Pointer to arguments on stack. */
- void *mcode; /* Machine code for callback func. pointers. */
- CTypeID1 *cbid; /* Callback type table. */
- MSize sizeid; /* Size of callback type table. */
- MSize topid; /* Highest unused callback type table slot. */
- MSize slot; /* Current callback slot. */
-} CCallback;
-
-/* C type state. */
-typedef struct CTState {
- CType *tab; /* C type table. */
- CTypeID top; /* Current top of C type table. */
- MSize sizetab; /* Size of C type table. */
- lua_State *L; /* Lua state (needed for errors and allocations). */
- global_State *g; /* Global state. */
- GCtab *finalizer; /* Map of cdata to finalizer. */
- GCtab *miscmap; /* Map of -CTypeID to metatable and cb slot to func. */
- CCallback cb; /* Temporary callback state. */
- CTypeID1 hash[CTHASH_SIZE]; /* Hash anchors for C type table. */
-} CTState;
-
-#define CTINFO(ct, flags) (((CTInfo)(ct) << CTSHIFT_NUM) + (flags))
-#define CTALIGN(al) ((CTSize)(al) << CTSHIFT_ALIGN)
-#define CTATTRIB(at) ((CTInfo)(at) << CTSHIFT_ATTRIB)
-
-#define ctype_type(info) ((info) >> CTSHIFT_NUM)
-#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID))
-#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN)
-#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB)
-#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS)
-#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ)
-#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ)
-#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP)
-#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP)
-#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV)
-
-/* Simple type checks. */
-#define ctype_isnum(info) (ctype_type((info)) == CT_NUM)
-#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID)
-#define ctype_isptr(info) (ctype_type((info)) == CT_PTR)
-#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY)
-#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT)
-#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC)
-#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM)
-#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF)
-#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB)
-#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD)
-#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD)
-#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL)
-#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN)
-#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE)
-
-/* Combined type and flag checks. */
-#define ctype_isinteger(info) \
- (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0))
-#define ctype_isinteger_or_bool(info) \
- (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0))
-#define ctype_isbool(info) \
- (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL))
-#define ctype_isfp(info) \
- (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP))
-
-#define ctype_ispointer(info) \
- ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */
-#define ctype_isref(info) \
- (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF))
-
-#define ctype_isrefarray(info) \
- (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0))
-#define ctype_isvector(info) \
- (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR))
-#define ctype_iscomplex(info) \
- (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX))
-
-#define ctype_isvltype(info) \
- (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<<CTSHIFT_NUM))) == \
- CTINFO(CT_STRUCT, CTF_VLA)) /* VL array or VL struct. */
-#define ctype_isvlarray(info) \
- (((info) & (CTMASK_NUM|CTF_VLA)) == CTINFO(CT_ARRAY, CTF_VLA))
-
-#define ctype_isxattrib(info, at) \
- (((info) & (CTMASK_NUM|CTATTRIB(CTMASK_ATTRIB))) == \
- CTINFO(CT_ATTRIB, CTATTRIB(at)))
-
-/* Target-dependent sizes and alignments. */
-#if LJ_64
-#define CTSIZE_PTR 8
-#define CTALIGN_PTR CTALIGN(3)
-#else
-#define CTSIZE_PTR 4
-#define CTALIGN_PTR CTALIGN(2)
-#endif
-
-#define CTINFO_REF(ref) \
- CTINFO(CT_PTR, (CTF_CONST|CTF_REF|CTALIGN_PTR) + (ref))
-
-#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */
-
-/* -- Predefined types ---------------------------------------------------- */
-
-/* Target-dependent types. */
-#if LJ_TARGET_PPC || LJ_TARGET_PPCSPE
-#define CTTYDEFP(_) \
- _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2))
-#else
-#define CTTYDEFP(_)
-#endif
-
-/* Common types. */
-#define CTTYDEF(_) \
- _(NONE, 0, CT_ATTRIB, CTATTRIB(CTA_BAD)) \
- _(VOID, -1, CT_VOID, CTALIGN(0)) \
- _(CVOID, -1, CT_VOID, CTF_CONST|CTALIGN(0)) \
- _(BOOL, 1, CT_NUM, CTF_BOOL|CTF_UNSIGNED|CTALIGN(0)) \
- _(CCHAR, 1, CT_NUM, CTF_CONST|CTF_UCHAR|CTALIGN(0)) \
- _(INT8, 1, CT_NUM, CTALIGN(0)) \
- _(UINT8, 1, CT_NUM, CTF_UNSIGNED|CTALIGN(0)) \
- _(INT16, 2, CT_NUM, CTALIGN(1)) \
- _(UINT16, 2, CT_NUM, CTF_UNSIGNED|CTALIGN(1)) \
- _(INT32, 4, CT_NUM, CTALIGN(2)) \
- _(UINT32, 4, CT_NUM, CTF_UNSIGNED|CTALIGN(2)) \
- _(INT64, 8, CT_NUM, CTF_LONG|CTALIGN(3)) \
- _(UINT64, 8, CT_NUM, CTF_UNSIGNED|CTF_LONG|CTALIGN(3)) \
- _(FLOAT, 4, CT_NUM, CTF_FP|CTALIGN(2)) \
- _(DOUBLE, 8, CT_NUM, CTF_FP|CTALIGN(3)) \
- _(COMPLEX_FLOAT, 8, CT_ARRAY, CTF_COMPLEX|CTALIGN(2)|CTID_FLOAT) \
- _(COMPLEX_DOUBLE, 16, CT_ARRAY, CTF_COMPLEX|CTALIGN(3)|CTID_DOUBLE) \
- _(P_VOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_VOID) \
- _(P_CVOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CVOID) \
- _(P_CCHAR, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CCHAR) \
- _(A_CCHAR, -1, CT_ARRAY, CTF_CONST|CTALIGN(0)|CTID_CCHAR) \
- _(CTYPEID, 4, CT_ENUM, CTALIGN(2)|CTID_INT32) \
- CTTYDEFP(_) \
- /* End of type list. */
-
-/* Public predefined type IDs. */
-enum {
-#define CTTYIDDEF(id, sz, ct, info) CTID_##id,
-CTTYDEF(CTTYIDDEF)
-#undef CTTYIDDEF
- /* Predefined typedefs and keywords follow. */
- CTID_MAX = 65536
-};
-
-/* Target-dependent type IDs. */
-#if LJ_64
-#define CTID_INT_PSZ CTID_INT64
-#define CTID_UINT_PSZ CTID_UINT64
-#else
-#define CTID_INT_PSZ CTID_INT32
-#define CTID_UINT_PSZ CTID_UINT32
-#endif
-
-#if LJ_ABI_WIN
-#define CTID_WCHAR CTID_UINT16
-#elif LJ_TARGET_PPC
-#define CTID_WCHAR CTID_LINT32
-#else
-#define CTID_WCHAR CTID_INT32
-#endif
-
-/* -- C tokens and keywords ----------------------------------------------- */
-
-/* C lexer keywords. */
-#define CTOKDEF(_) \
- _(IDENT, "<identifier>") _(STRING, "<string>") \
- _(INTEGER, "<integer>") _(EOF, "<eof>") \
- _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \
- _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->")
-
-/* Simple declaration specifiers. */
-#define CDSDEF(_) \
- _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \
- _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \
- _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \
- _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER)
-
-/* C keywords. */
-#define CKWDEF(_) \
- CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \
- _(DECLSPEC) _(CCDECL) _(PTRSZ) \
- _(STRUCT) _(UNION) _(ENUM) \
- _(SIZEOF) _(ALIGNOF)
-
-/* C token numbers. */
-enum {
- CTOK_OFS = 255,
-#define CTOKNUM(name, sym) CTOK_##name,
-#define CKWNUM(name) CTOK_##name,
-CTOKDEF(CTOKNUM)
-CKWDEF(CKWNUM)
-#undef CTOKNUM
-#undef CKWNUM
- CTOK_FIRSTDECL = CTOK_VOID,
- CTOK_FIRSTSCL = CTOK_TYPEDEF,
- CTOK_LASTDECLFLAG = CTOK_REGISTER,
- CTOK_LASTDECL = CTOK_ENUM
-};
-
-/* Declaration specifier flags. */
-enum {
-#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)),
-CDSDEF(CDSFLAG)
-#undef CDSFLAG
- CDF__END
-};
-
-#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER)
-
-/* -- C type management --------------------------------------------------- */
-
-#define ctype_ctsG(g) (mref((g)->ctype_state, CTState))
-
-/* Get C type state. */
-static LJ_AINLINE CTState *ctype_cts(lua_State *L)
-{
- CTState *cts = ctype_ctsG(G(L));
- cts->L = L; /* Save L for errors and allocations. */
- return cts;
-}
-
-/* Save and restore state of C type table. */
-#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts)
-#define LJ_CTYPE_RESTORE(cts) \
- ((cts)->top = savects_.top, \
- memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash)))
-
-/* Check C type ID for validity when assertions are enabled. */
-static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id)
-{
- lua_assert(id > 0 && id < cts->top); UNUSED(cts);
- return id;
-}
-
-/* Get C type for C type ID. */
-static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id)
-{
- return &cts->tab[ctype_check(cts, id)];
-}
-
-/* Get C type ID for a C type. */
-#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab))
-
-/* Get child C type. */
-static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct)
-{
- lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) ||
- ctype_isbitfield(ct->info))); /* These don't have children. */
- return ctype_get(cts, ctype_cid(ct->info));
-}
-
-/* Get raw type for a C type ID. */
-static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id)
-{
- CType *ct = ctype_get(cts, id);
- while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct);
- return ct;
-}
-
-/* Get raw type of the child of a C type. */
-static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct)
-{
- do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info));
- return ct;
-}
-
-/* Set the name of a C type table element. */
-static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s)
-{
- /* NOBARRIER: mark string as fixed -- the C type table is never collected. */
- fixstring(s);
- setgcref(ct->name, obj2gco(s));
-}
-
-LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp);
-LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size);
-LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id);
-LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name,
- uint32_t tmask);
-LJ_FUNC CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name,
- CTSize *ofs, CTInfo *qual);
-#define lj_ctype_getfield(cts, ct, name, ofs) \
- lj_ctype_getfieldq((cts), (ct), (name), (ofs), NULL)
-LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id);
-LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id);
-LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem);
-LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp);
-LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm);
-LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name);
-LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned);
-LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size);
-LJ_FUNC CTState *lj_ctype_init(lua_State *L);
-LJ_FUNC void lj_ctype_freestate(global_State *g);
-
-#endif
-
-#endif
+/*
+** C type management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CTYPE_H
+#define _LJ_CTYPE_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+
+#if LJ_HASFFI
+
+/* -- C type definitions -------------------------------------------------- */
+
+/* C type numbers. Highest 4 bits of C type info. ORDER CT. */
+enum {
+ /* Externally visible types. */
+ CT_NUM, /* Integer or floating-point numbers. */
+ CT_STRUCT, /* Struct or union. */
+ CT_PTR, /* Pointer or reference. */
+ CT_ARRAY, /* Array or complex type. */
+ CT_MAYCONVERT = CT_ARRAY,
+ CT_VOID, /* Void type. */
+ CT_ENUM, /* Enumeration. */
+ CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */
+ CT_FUNC, /* Function. */
+ CT_TYPEDEF, /* Typedef. */
+ CT_ATTRIB, /* Miscellaneous attributes. */
+ /* Internal element types. */
+ CT_FIELD, /* Struct/union field or function parameter. */
+ CT_BITFIELD, /* Struct/union bitfield. */
+ CT_CONSTVAL, /* Constant value. */
+ CT_EXTERN, /* External reference. */
+ CT_KW /* Keyword. */
+};
+
+LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR);
+LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT);
+
+/*
+** ---------- info ------------
+** |type flags... A cid | size | sib | next | name |
+** +----------------------------+--------+-------+-------+-------+--
+** |NUM BFvcUL.. A | size | | type | |
+** |STRUCT ..vcU..V A | size | field | name? | name? |
+** |PTR ..vcR... A cid | size | | type | |
+** |ARRAY VCvc...V A cid | size | | type | |
+** |VOID ..vc.... A | size | | type | |
+** |ENUM A cid | size | const | name? | name? |
+** |FUNC ....VS.. cc cid | nargs | field | name? | name? |
+** |TYPEDEF cid | | | name | name |
+** |ATTRIB attrnum cid | attr | sib? | type? | |
+** |FIELD cid | offset | field | | name? |
+** |BITFIELD B.vcU csz bsz pos | offset | field | | name? |
+** |CONSTVAL c cid | value | const | name | name |
+** |EXTERN cid | | sib? | name | name |
+** |KW tok | size | | name | name |
+** +----------------------------+--------+-------+-------+-------+--
+** ^^ ^^--- bits used for C type conversion dispatch
+*/
+
+/* C type info flags. TFFArrrr */
+#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */
+#define CTF_FP 0x04000000u /* Floating-point: NUM. */
+#define CTF_CONST 0x02000000u /* Const qualifier. */
+#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */
+#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */
+#define CTF_LONG 0x00400000u /* Long: NUM. */
+#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */
+#define CTF_REF 0x00800000u /* Reference: PTR. */
+#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */
+#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */
+#define CTF_UNION 0x00800000u /* Union: STRUCT. */
+#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */
+#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */
+
+#define CTF_QUAL (CTF_CONST|CTF_VOLATILE)
+#define CTF_ALIGN (CTMASK_ALIGN<<CTSHIFT_ALIGN)
+#define CTF_UCHAR ((char)-1 > 0 ? CTF_UNSIGNED : 0)
+
+/* Flags used in parser. .F.Ammvf cp->attr */
+#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */
+#define CTFP_PACKED 0x00000002u /* cp->attr */
+/* ...C...f cp->fattr */
+#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */
+
+/* C type info bitfields. */
+#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */
+#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */
+#define CTSHIFT_NUM 28
+#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */
+#define CTSHIFT_ALIGN 16
+#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */
+#define CTSHIFT_ATTRIB 16
+#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */
+#define CTSHIFT_CCONV 16
+#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */
+#define CTSHIFT_REGPARM 18
+/* Bitfields only used in parser. */
+#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */
+#define CTSHIFT_VSIZEP 4
+#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */
+#define CTSHIFT_MSIZEP 8
+
+/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */
+#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */
+#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */
+#define CTMASK_BITPOS 127
+#define CTMASK_BITBSZ 127
+#define CTMASK_BITCSZ 127
+#define CTSHIFT_BITPOS 0
+#define CTSHIFT_BITBSZ 8
+#define CTSHIFT_BITCSZ 16
+
+#define CTF_INSERT(info, field, val) \
+ info = (info & ~(CTMASK_##field<<CTSHIFT_##field)) | \
+ (((CTSize)(val) & CTMASK_##field) << CTSHIFT_##field)
+
+/* Calling conventions. ORDER CC */
+enum { CTCC_CDECL, CTCC_THISCALL, CTCC_FASTCALL, CTCC_STDCALL };
+
+/* Attribute numbers. */
+enum {
+ CTA_NONE, /* Ignored attribute. Must be zero. */
+ CTA_QUAL, /* Unmerged qualifiers. */
+ CTA_ALIGN, /* Alignment override. */
+ CTA_SUBTYPE, /* Transparent sub-type. */
+ CTA_REDIR, /* Redirected symbol name. */
+ CTA_BAD, /* To catch bad IDs. */
+ CTA__MAX
+};
+
+/* Special sizes. */
+#define CTSIZE_INVALID 0xffffffffu
+
+typedef uint32_t CTInfo; /* Type info. */
+typedef uint32_t CTSize; /* Type size. */
+typedef uint32_t CTypeID; /* Type ID. */
+typedef uint16_t CTypeID1; /* Minimum-sized type ID. */
+
+/* C type table element. */
+typedef struct CType {
+ CTInfo info; /* Type info. */
+ CTSize size; /* Type size or other info. */
+ CTypeID1 sib; /* Sibling element. */
+ CTypeID1 next; /* Next element in hash chain. */
+ GCRef name; /* Element name (GCstr). */
+} CType;
+
+#define CTHASH_SIZE 128 /* Number of hash anchors. */
+#define CTHASH_MASK (CTHASH_SIZE-1)
+
+/* Simplify target-specific configuration. Checked in lj_ccall.h. */
+#define CCALL_MAX_GPR 8
+#define CCALL_MAX_FPR 8
+
+typedef LJ_ALIGN(8) union FPRCBArg { double d; float f[2]; } FPRCBArg;
+
+/* C callback state. Defined here, to avoid dragging in lj_ccall.h. */
+
+typedef LJ_ALIGN(8) struct CCallback {
+ FPRCBArg fpr[CCALL_MAX_FPR]; /* Arguments/results in FPRs. */
+ intptr_t gpr[CCALL_MAX_GPR]; /* Arguments/results in GPRs. */
+ intptr_t *stack; /* Pointer to arguments on stack. */
+ void *mcode; /* Machine code for callback func. pointers. */
+ CTypeID1 *cbid; /* Callback type table. */
+ MSize sizeid; /* Size of callback type table. */
+ MSize topid; /* Highest unused callback type table slot. */
+ MSize slot; /* Current callback slot. */
+} CCallback;
+
+/* C type state. */
+typedef struct CTState {
+ CType *tab; /* C type table. */
+ CTypeID top; /* Current top of C type table. */
+ MSize sizetab; /* Size of C type table. */
+ lua_State *L; /* Lua state (needed for errors and allocations). */
+ global_State *g; /* Global state. */
+ GCtab *finalizer; /* Map of cdata to finalizer. */
+ GCtab *miscmap; /* Map of -CTypeID to metatable and cb slot to func. */
+ CCallback cb; /* Temporary callback state. */
+ CTypeID1 hash[CTHASH_SIZE]; /* Hash anchors for C type table. */
+} CTState;
+
+#define CTINFO(ct, flags) (((CTInfo)(ct) << CTSHIFT_NUM) + (flags))
+#define CTALIGN(al) ((CTSize)(al) << CTSHIFT_ALIGN)
+#define CTATTRIB(at) ((CTInfo)(at) << CTSHIFT_ATTRIB)
+
+#define ctype_type(info) ((info) >> CTSHIFT_NUM)
+#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID))
+#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN)
+#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB)
+#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS)
+#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ)
+#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ)
+#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP)
+#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP)
+#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV)
+
+/* Simple type checks. */
+#define ctype_isnum(info) (ctype_type((info)) == CT_NUM)
+#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID)
+#define ctype_isptr(info) (ctype_type((info)) == CT_PTR)
+#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY)
+#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT)
+#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC)
+#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM)
+#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF)
+#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB)
+#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD)
+#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD)
+#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL)
+#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN)
+#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE)
+
+/* Combined type and flag checks. */
+#define ctype_isinteger(info) \
+ (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0))
+#define ctype_isinteger_or_bool(info) \
+ (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0))
+#define ctype_isbool(info) \
+ (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL))
+#define ctype_isfp(info) \
+ (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP))
+
+#define ctype_ispointer(info) \
+ ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */
+#define ctype_isref(info) \
+ (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF))
+
+#define ctype_isrefarray(info) \
+ (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0))
+#define ctype_isvector(info) \
+ (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR))
+#define ctype_iscomplex(info) \
+ (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX))
+
+#define ctype_isvltype(info) \
+ (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<<CTSHIFT_NUM))) == \
+ CTINFO(CT_STRUCT, CTF_VLA)) /* VL array or VL struct. */
+#define ctype_isvlarray(info) \
+ (((info) & (CTMASK_NUM|CTF_VLA)) == CTINFO(CT_ARRAY, CTF_VLA))
+
+#define ctype_isxattrib(info, at) \
+ (((info) & (CTMASK_NUM|CTATTRIB(CTMASK_ATTRIB))) == \
+ CTINFO(CT_ATTRIB, CTATTRIB(at)))
+
+/* Target-dependent sizes and alignments. */
+#if LJ_64
+#define CTSIZE_PTR 8
+#define CTALIGN_PTR CTALIGN(3)
+#else
+#define CTSIZE_PTR 4
+#define CTALIGN_PTR CTALIGN(2)
+#endif
+
+#define CTINFO_REF(ref) \
+ CTINFO(CT_PTR, (CTF_CONST|CTF_REF|CTALIGN_PTR) + (ref))
+
+#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */
+
+/* -- Predefined types ---------------------------------------------------- */
+
+/* Target-dependent types. */
+#if LJ_TARGET_PPC || LJ_TARGET_PPCSPE
+#define CTTYDEFP(_) \
+ _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2))
+#else
+#define CTTYDEFP(_)
+#endif
+
+/* Common types. */
+#define CTTYDEF(_) \
+ _(NONE, 0, CT_ATTRIB, CTATTRIB(CTA_BAD)) \
+ _(VOID, -1, CT_VOID, CTALIGN(0)) \
+ _(CVOID, -1, CT_VOID, CTF_CONST|CTALIGN(0)) \
+ _(BOOL, 1, CT_NUM, CTF_BOOL|CTF_UNSIGNED|CTALIGN(0)) \
+ _(CCHAR, 1, CT_NUM, CTF_CONST|CTF_UCHAR|CTALIGN(0)) \
+ _(INT8, 1, CT_NUM, CTALIGN(0)) \
+ _(UINT8, 1, CT_NUM, CTF_UNSIGNED|CTALIGN(0)) \
+ _(INT16, 2, CT_NUM, CTALIGN(1)) \
+ _(UINT16, 2, CT_NUM, CTF_UNSIGNED|CTALIGN(1)) \
+ _(INT32, 4, CT_NUM, CTALIGN(2)) \
+ _(UINT32, 4, CT_NUM, CTF_UNSIGNED|CTALIGN(2)) \
+ _(INT64, 8, CT_NUM, CTF_LONG|CTALIGN(3)) \
+ _(UINT64, 8, CT_NUM, CTF_UNSIGNED|CTF_LONG|CTALIGN(3)) \
+ _(FLOAT, 4, CT_NUM, CTF_FP|CTALIGN(2)) \
+ _(DOUBLE, 8, CT_NUM, CTF_FP|CTALIGN(3)) \
+ _(COMPLEX_FLOAT, 8, CT_ARRAY, CTF_COMPLEX|CTALIGN(2)|CTID_FLOAT) \
+ _(COMPLEX_DOUBLE, 16, CT_ARRAY, CTF_COMPLEX|CTALIGN(3)|CTID_DOUBLE) \
+ _(P_VOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_VOID) \
+ _(P_CVOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CVOID) \
+ _(P_CCHAR, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CCHAR) \
+ _(A_CCHAR, -1, CT_ARRAY, CTF_CONST|CTALIGN(0)|CTID_CCHAR) \
+ _(CTYPEID, 4, CT_ENUM, CTALIGN(2)|CTID_INT32) \
+ CTTYDEFP(_) \
+ /* End of type list. */
+
+/* Public predefined type IDs. */
+enum {
+#define CTTYIDDEF(id, sz, ct, info) CTID_##id,
+CTTYDEF(CTTYIDDEF)
+#undef CTTYIDDEF
+ /* Predefined typedefs and keywords follow. */
+ CTID_MAX = 65536
+};
+
+/* Target-dependent type IDs. */
+#if LJ_64
+#define CTID_INT_PSZ CTID_INT64
+#define CTID_UINT_PSZ CTID_UINT64
+#else
+#define CTID_INT_PSZ CTID_INT32
+#define CTID_UINT_PSZ CTID_UINT32
+#endif
+
+#if LJ_ABI_WIN
+#define CTID_WCHAR CTID_UINT16
+#elif LJ_TARGET_PPC
+#define CTID_WCHAR CTID_LINT32
+#else
+#define CTID_WCHAR CTID_INT32
+#endif
+
+/* -- C tokens and keywords ----------------------------------------------- */
+
+/* C lexer keywords. */
+#define CTOKDEF(_) \
+ _(IDENT, "<identifier>") _(STRING, "<string>") \
+ _(INTEGER, "<integer>") _(EOF, "<eof>") \
+ _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \
+ _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->")
+
+/* Simple declaration specifiers. */
+#define CDSDEF(_) \
+ _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \
+ _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \
+ _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \
+ _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER)
+
+/* C keywords. */
+#define CKWDEF(_) \
+ CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \
+ _(DECLSPEC) _(CCDECL) _(PTRSZ) \
+ _(STRUCT) _(UNION) _(ENUM) \
+ _(SIZEOF) _(ALIGNOF)
+
+/* C token numbers. */
+enum {
+ CTOK_OFS = 255,
+#define CTOKNUM(name, sym) CTOK_##name,
+#define CKWNUM(name) CTOK_##name,
+CTOKDEF(CTOKNUM)
+CKWDEF(CKWNUM)
+#undef CTOKNUM
+#undef CKWNUM
+ CTOK_FIRSTDECL = CTOK_VOID,
+ CTOK_FIRSTSCL = CTOK_TYPEDEF,
+ CTOK_LASTDECLFLAG = CTOK_REGISTER,
+ CTOK_LASTDECL = CTOK_ENUM
+};
+
+/* Declaration specifier flags. */
+enum {
+#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)),
+CDSDEF(CDSFLAG)
+#undef CDSFLAG
+ CDF__END
+};
+
+#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER)
+
+/* -- C type management --------------------------------------------------- */
+
+#define ctype_ctsG(g) (mref((g)->ctype_state, CTState))
+
+/* Get C type state. */
+static LJ_AINLINE CTState *ctype_cts(lua_State *L)
+{
+ CTState *cts = ctype_ctsG(G(L));
+ cts->L = L; /* Save L for errors and allocations. */
+ return cts;
+}
+
+/* Save and restore state of C type table. */
+#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts)
+#define LJ_CTYPE_RESTORE(cts) \
+ ((cts)->top = savects_.top, \
+ memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash)))
+
+/* Check C type ID for validity when assertions are enabled. */
+static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id)
+{
+ lua_assert(id > 0 && id < cts->top); UNUSED(cts);
+ return id;
+}
+
+/* Get C type for C type ID. */
+static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id)
+{
+ return &cts->tab[ctype_check(cts, id)];
+}
+
+/* Get C type ID for a C type. */
+#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab))
+
+/* Get child C type. */
+static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct)
+{
+ lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) ||
+ ctype_isbitfield(ct->info))); /* These don't have children. */
+ return ctype_get(cts, ctype_cid(ct->info));
+}
+
+/* Get raw type for a C type ID. */
+static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_get(cts, id);
+ while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct);
+ return ct;
+}
+
+/* Get raw type of the child of a C type. */
+static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct)
+{
+ do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info));
+ return ct;
+}
+
+/* Set the name of a C type table element. */
+static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s)
+{
+ /* NOBARRIER: mark string as fixed -- the C type table is never collected. */
+ fixstring(s);
+ setgcref(ct->name, obj2gco(s));
+}
+
+LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp);
+LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size);
+LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id);
+LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name,
+ uint32_t tmask);
+LJ_FUNC CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name,
+ CTSize *ofs, CTInfo *qual);
+#define lj_ctype_getfield(cts, ct, name, ofs) \
+ lj_ctype_getfieldq((cts), (ct), (name), (ofs), NULL)
+LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id);
+LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id);
+LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem);
+LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp);
+LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm);
+LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name);
+LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned);
+LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size);
+LJ_FUNC CTState *lj_ctype_init(lua_State *L);
+LJ_FUNC void lj_ctype_freestate(global_State *g);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_debug.c b/3rdparty/lua/src/lj_debug.c
index ec364a9..be7fb2b 100644
--- a/3rdparty/lua/src/lj_debug.c
+++ b/3rdparty/lua/src/lj_debug.c
@@ -1,605 +1,596 @@
-/*
-** Debugging and introspection.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_debug_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_state.h"
-#include "lj_frame.h"
-#include "lj_bc.h"
-#include "lj_vm.h"
-#if LJ_HASJIT
-#include "lj_jit.h"
-#endif
-
-/* -- Frames -------------------------------------------------------------- */
-
-/* Get frame corresponding to a level. */
-cTValue *lj_debug_frame(lua_State *L, int level, int *size)
-{
- cTValue *frame, *nextframe, *bot = tvref(L->stack);
- /* Traverse frames backwards. */
- for (nextframe = frame = L->base-1; frame > bot; ) {
- if (frame_gc(frame) == obj2gco(L))
- level++; /* Skip dummy frames. See lj_meta_call(). */
- if (level-- == 0) {
- *size = (int)(nextframe - frame);
- return frame; /* Level found. */
- }
- nextframe = frame;
- if (frame_islua(frame)) {
- frame = frame_prevl(frame);
- } else {
- if (frame_isvarg(frame))
- level++; /* Skip vararg pseudo-frame. */
- frame = frame_prevd(frame);
- }
- }
- *size = level;
- return NULL; /* Level not found. */
-}
-
-/* Invalid bytecode position. */
-#define NO_BCPOS (~(BCPos)0)
-
-/* Return bytecode position for function/frame or NO_BCPOS. */
-static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
-{
- const BCIns *ins;
- GCproto *pt;
- BCPos pos;
- lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD);
- if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */
- return NO_BCPOS;
- } else if (nextframe == NULL) { /* Lua function on top. */
- void *cf = cframe_raw(L->cframe);
- if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf))
- return NO_BCPOS;
- ins = cframe_pc(cf); /* Only happens during error/hook handling. */
- } else {
- if (frame_islua(nextframe)) {
- ins = frame_pc(nextframe);
- } else if (frame_iscont(nextframe)) {
- ins = frame_contpc(nextframe);
- } else {
- /* Lua function below errfunc/gc/hook: find cframe to get the PC. */
- void *cf = cframe_raw(L->cframe);
- TValue *f = L->base-1;
- for (;;) {
- if (cf == NULL)
- return NO_BCPOS;
- while (cframe_nres(cf) < 0) {
- if (f >= restorestack(L, -cframe_nres(cf)))
- break;
- cf = cframe_raw(cframe_prev(cf));
- if (cf == NULL)
- return NO_BCPOS;
- }
- if (f < nextframe)
- break;
- if (frame_islua(f)) {
- f = frame_prevl(f);
- } else {
- if (frame_isc(f) || (LJ_HASFFI && frame_iscont(f) &&
- (f-1)->u32.lo == LJ_CONT_FFI_CALLBACK))
- cf = cframe_raw(cframe_prev(cf));
- f = frame_prevd(f);
- }
- }
- ins = cframe_pc(cf);
- }
- }
- pt = funcproto(fn);
- pos = proto_bcpos(pt, ins) - 1;
-#if LJ_HASJIT
- if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */
- GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins));
- lua_assert(bc_isret(bc_op(ins[-1])));
- pos = proto_bcpos(pt, mref(T->startpc, const BCIns));
- }
-#endif
- return pos;
-}
-
-/* -- Line numbers -------------------------------------------------------- */
-
-/* Get line number for a bytecode position. */
-BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc)
-{
- const void *lineinfo = proto_lineinfo(pt);
- if (pc <= pt->sizebc && lineinfo) {
- BCLine first = pt->firstline;
- if (pc == pt->sizebc) return first + pt->numline;
- if (pc-- == 0) return first;
- if (pt->numline < 256)
- return first + (BCLine)((const uint8_t *)lineinfo)[pc];
- else if (pt->numline < 65536)
- return first + (BCLine)((const uint16_t *)lineinfo)[pc];
- else
- return first + (BCLine)((const uint32_t *)lineinfo)[pc];
- }
- return 0;
-}
-
-/* Get line number for function/frame. */
-static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe)
-{
- BCPos pc = debug_framepc(L, fn, nextframe);
- if (pc != NO_BCPOS) {
- GCproto *pt = funcproto(fn);
- lua_assert(pc <= pt->sizebc);
- return lj_debug_line(pt, pc);
- }
- return -1;
-}
-
-/* -- Variable names ------------------------------------------------------ */
-
-/* Read ULEB128 value. */
-static uint32_t debug_read_uleb128(const uint8_t **pp)
-{
- const uint8_t *p = *pp;
- uint32_t v = *p++;
- if (LJ_UNLIKELY(v >= 0x80)) {
- int sh = 0;
- v &= 0x7f;
- do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
- }
- *pp = p;
- return v;
-}
-
-/* Get name of a local variable from slot number and PC. */
-static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot)
-{
- const uint8_t *p = proto_varinfo(pt);
- if (p) {
- BCPos lastpc = 0;
- for (;;) {
- const char *name = (const char *)p;
- uint32_t vn = *p++;
- BCPos startpc, endpc;
- if (vn < VARNAME__MAX) {
- if (vn == VARNAME_END) break; /* End of varinfo. */
- } else {
- while (*p++) ; /* Skip over variable name string. */
- }
- lastpc = startpc = lastpc + debug_read_uleb128(&p);
- if (startpc > pc) break;
- endpc = startpc + debug_read_uleb128(&p);
- if (pc < endpc && slot-- == 0) {
- if (vn < VARNAME__MAX) {
-#define VARNAMESTR(name, str) str "\0"
- name = VARNAMEDEF(VARNAMESTR);
-#undef VARNAMESTR
- if (--vn) while (*name++ || --vn) ;
- }
- return name;
- }
- }
- }
- return NULL;
-}
-
-/* Get name of local variable from 1-based slot number and function/frame. */
-static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
- const char **name, BCReg slot1)
-{
- uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
- uint32_t size = (uint32_t)ar->i_ci >> 16;
- TValue *frame = tvref(L->stack) + offset;
- TValue *nextframe = size ? frame + size : NULL;
- GCfunc *fn = frame_func(frame);
- BCPos pc = debug_framepc(L, fn, nextframe);
- if (!nextframe) nextframe = L->top;
- if ((int)slot1 < 0) { /* Negative slot number is for varargs. */
- if (pc != NO_BCPOS) {
- GCproto *pt = funcproto(fn);
- if ((pt->flags & PROTO_VARARG)) {
- slot1 = pt->numparams + (BCReg)(-(int)slot1);
- if (frame_isvarg(frame)) { /* Vararg frame has been set up? (pc!=0) */
- nextframe = frame;
- frame = frame_prevd(frame);
- }
- if (frame + slot1 < nextframe) {
- *name = "(*vararg)";
- return frame+slot1;
- }
- }
- }
- return NULL;
- }
- if (pc != NO_BCPOS &&
- (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL)
- ;
- else if (slot1 > 0 && frame + slot1 < nextframe)
- *name = "(*temporary)";
- return frame+slot1;
-}
-
-/* Get name of upvalue. */
-const char *lj_debug_uvname(GCproto *pt, uint32_t idx)
-{
- const uint8_t *p = proto_uvinfo(pt);
- lua_assert(idx < pt->sizeuv);
- if (!p) return "";
- if (idx) while (*p++ || --idx) ;
- return (const char *)p;
-}
-
-/* Get name and value of upvalue. */
-const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp)
-{
- if (tvisfunc(o)) {
- GCfunc *fn = funcV(o);
- if (isluafunc(fn)) {
- GCproto *pt = funcproto(fn);
- if (idx < pt->sizeuv) {
- *tvp = uvval(&gcref(fn->l.uvptr[idx])->uv);
- return lj_debug_uvname(pt, idx);
- }
- } else {
- if (idx < fn->c.nupvalues) {
- *tvp = &fn->c.upvalue[idx];
- return "";
- }
- }
- }
- return NULL;
-}
-
-/* Deduce name of an object from slot number and PC. */
-const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot,
- const char **name)
-{
- const char *lname;
-restart:
- lname = debug_varname(pt, proto_bcpos(pt, ip), slot);
- if (lname != NULL) { *name = lname; return "local"; }
- while (--ip > proto_bc(pt)) {
- BCIns ins = *ip;
- BCOp op = bc_op(ins);
- BCReg ra = bc_a(ins);
- if (bcmode_a(op) == BCMbase) {
- if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins)))
- return NULL;
- } else if (bcmode_a(op) == BCMdst && ra == slot) {
- switch (bc_op(ins)) {
- case BC_MOV:
- if (ra == slot) { slot = bc_d(ins); goto restart; }
- break;
- case BC_GGET:
- *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins))));
- return "global";
- case BC_TGETS:
- *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins))));
- if (ip > proto_bc(pt)) {
- BCIns insp = ip[-1];
- if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1 &&
- bc_d(insp) == bc_b(ins))
- return "method";
- }
- return "field";
- case BC_UGET:
- *name = lj_debug_uvname(pt, bc_d(ins));
- return "upvalue";
- default:
- return NULL;
- }
- }
- }
- return NULL;
-}
-
-/* Deduce function name from caller of a frame. */
-const char *lj_debug_funcname(lua_State *L, TValue *frame, const char **name)
-{
- TValue *pframe;
- GCfunc *fn;
- BCPos pc;
- if (frame <= tvref(L->stack))
- return NULL;
- if (frame_isvarg(frame))
- frame = frame_prevd(frame);
- pframe = frame_prev(frame);
- fn = frame_func(pframe);
- pc = debug_framepc(L, fn, frame);
- if (pc != NO_BCPOS) {
- GCproto *pt = funcproto(fn);
- const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)];
- MMS mm = bcmode_mm(bc_op(*ip));
- if (mm == MM_call) {
- BCReg slot = bc_a(*ip);
- if (bc_op(*ip) == BC_ITERC) slot -= 3;
- return lj_debug_slotname(pt, ip, slot, name);
- } else if (mm != MM__MAX) {
- *name = strdata(mmname_str(G(L), mm));
- return "metamethod";
- }
- }
- return NULL;
-}
-
-/* -- Source code locations ----------------------------------------------- */
-
-/* Generate shortened source name. */
-void lj_debug_shortname(char *out, GCstr *str)
-{
- const char *src = strdata(str);
- if (*src == '=') {
- strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */
- out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */
- } else if (*src == '@') { /* Output "source", or "...source". */
- size_t len = str->len-1;
- src++; /* Skip the `@' */
- if (len >= LUA_IDSIZE) {
- src += len-(LUA_IDSIZE-4); /* Get last part of file name. */
- *out++ = '.'; *out++ = '.'; *out++ = '.';
- }
- strcpy(out, src);
- } else { /* Output [string "string"]. */
- size_t len; /* Length, up to first control char. */
- for (len = 0; len < LUA_IDSIZE-12; len++)
- if (((const unsigned char *)src)[len] < ' ') break;
- strcpy(out, "[string \""); out += 9;
- if (src[len] != '\0') { /* Must truncate? */
- if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15;
- strncpy(out, src, len); out += len;
- strcpy(out, "..."); out += 3;
- } else {
- strcpy(out, src); out += len;
- }
- strcpy(out, "\"]");
- }
-}
-
-/* Add current location of a frame to error message. */
-void lj_debug_addloc(lua_State *L, const char *msg,
- cTValue *frame, cTValue *nextframe)
-{
- if (frame) {
- GCfunc *fn = frame_func(frame);
- if (isluafunc(fn)) {
- BCLine line = debug_frameline(L, fn, nextframe);
- if (line >= 0) {
- char buf[LUA_IDSIZE];
- lj_debug_shortname(buf, proto_chunkname(funcproto(fn)));
- lj_str_pushf(L, "%s:%d: %s", buf, line, msg);
- return;
- }
- }
- }
- lj_str_pushf(L, "%s", msg);
-}
-
-/* Push location string for a bytecode position to Lua stack. */
-void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc)
-{
- GCstr *name = proto_chunkname(pt);
- const char *s = strdata(name);
- MSize i, len = name->len;
- BCLine line = lj_debug_line(pt, pc);
- if (*s == '@') {
- s++; len--;
- for (i = len; i > 0; i--)
- if (s[i] == '/' || s[i] == '\\') {
- s += i+1;
- break;
- }
- lj_str_pushf(L, "%s:%d", s, line);
- } else if (len > 40) {
- lj_str_pushf(L, "%p:%d", pt, line);
- } else if (*s == '=') {
- lj_str_pushf(L, "%s:%d", s+1, line);
- } else {
- lj_str_pushf(L, "\"%s\":%d", s, line);
- }
-}
-
-/* -- Public debug API ---------------------------------------------------- */
-
-/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */
-
-LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n)
-{
- const char *name = NULL;
- if (ar) {
- TValue *o = debug_localname(L, ar, &name, (BCReg)n);
- if (name) {
- copyTV(L, L->top, o);
- incr_top(L);
- }
- } else if (tvisfunc(L->top-1) && isluafunc(funcV(L->top-1))) {
- name = debug_varname(funcproto(funcV(L->top-1)), 0, (BCReg)n-1);
- }
- return name;
-}
-
-LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n)
-{
- const char *name = NULL;
- TValue *o = debug_localname(L, ar, &name, (BCReg)n);
- if (name)
- copyTV(L, o, L->top-1);
- L->top--;
- return name;
-}
-
-int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext)
-{
- int opt_f = 0, opt_L = 0;
- TValue *frame = NULL;
- TValue *nextframe = NULL;
- GCfunc *fn;
- if (*what == '>') {
- TValue *func = L->top - 1;
- api_check(L, tvisfunc(func));
- fn = funcV(func);
- L->top--;
- what++;
- } else {
- uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
- uint32_t size = (uint32_t)ar->i_ci >> 16;
- lua_assert(offset != 0);
- frame = tvref(L->stack) + offset;
- if (size) nextframe = frame + size;
- lua_assert(frame <= tvref(L->maxstack) &&
- (!nextframe || nextframe <= tvref(L->maxstack)));
- fn = frame_func(frame);
- lua_assert(fn->c.gct == ~LJ_TFUNC);
- }
- for (; *what; what++) {
- if (*what == 'S') {
- if (isluafunc(fn)) {
- GCproto *pt = funcproto(fn);
- BCLine firstline = pt->firstline;
- GCstr *name = proto_chunkname(pt);
- ar->source = strdata(name);
- lj_debug_shortname(ar->short_src, name);
- ar->linedefined = (int)firstline;
- ar->lastlinedefined = (int)(firstline + pt->numline);
- ar->what = (firstline || !pt->numline) ? "Lua" : "main";
- } else {
- ar->source = "=[C]";
- ar->short_src[0] = '[';
- ar->short_src[1] = 'C';
- ar->short_src[2] = ']';
- ar->short_src[3] = '\0';
- ar->linedefined = -1;
- ar->lastlinedefined = -1;
- ar->what = "C";
- }
- } else if (*what == 'l') {
- ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1;
- } else if (*what == 'u') {
- ar->nups = fn->c.nupvalues;
- if (ext) {
- if (isluafunc(fn)) {
- GCproto *pt = funcproto(fn);
- ar->nparams = pt->numparams;
- ar->isvararg = !!(pt->flags & PROTO_VARARG);
- } else {
- ar->nparams = 0;
- ar->isvararg = 1;
- }
- }
- } else if (*what == 'n') {
- ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL;
- if (ar->namewhat == NULL) {
- ar->namewhat = "";
- ar->name = NULL;
- }
- } else if (*what == 'f') {
- opt_f = 1;
- } else if (*what == 'L') {
- opt_L = 1;
- } else {
- return 0; /* Bad option. */
- }
- }
- if (opt_f) {
- setfuncV(L, L->top, fn);
- incr_top(L);
- }
- if (opt_L) {
- if (isluafunc(fn)) {
- GCtab *t = lj_tab_new(L, 0, 0);
- GCproto *pt = funcproto(fn);
- const void *lineinfo = proto_lineinfo(pt);
- if (lineinfo) {
- BCLine first = pt->firstline;
- int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4;
- MSize i, szl = pt->sizebc-1;
- for (i = 0; i < szl; i++) {
- BCLine line = first +
- (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] :
- sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] :
- (BCLine)((const uint32_t *)lineinfo)[i]);
- setboolV(lj_tab_setint(L, t, line), 1);
- }
- }
- settabV(L, L->top, t);
- } else {
- setnilV(L->top);
- }
- incr_top(L);
- }
- return 1; /* Ok. */
-}
-
-LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar)
-{
- return lj_debug_getinfo(L, what, (lj_Debug *)ar, 0);
-}
-
-LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar)
-{
- int size;
- cTValue *frame = lj_debug_frame(L, level, &size);
- if (frame) {
- ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack));
- return 1;
- } else {
- ar->i_ci = level - size;
- return 0;
- }
-}
-
-/* Number of frames for the leading and trailing part of a traceback. */
-#define TRACEBACK_LEVELS1 12
-#define TRACEBACK_LEVELS2 10
-
-LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
- int level)
-{
- int top = (int)(L->top - L->base);
- int lim = TRACEBACK_LEVELS1;
- lua_Debug ar;
- if (msg) lua_pushfstring(L, "%s\n", msg);
- lua_pushliteral(L, "stack traceback:");
- while (lua_getstack(L1, level++, &ar)) {
- GCfunc *fn;
- if (level > lim) {
- if (!lua_getstack(L1, level + TRACEBACK_LEVELS2, &ar)) {
- level--;
- } else {
- lua_pushliteral(L, "\n\t...");
- lua_getstack(L1, -10, &ar);
- level = ar.i_ci - TRACEBACK_LEVELS2;
- }
- lim = 2147483647;
- continue;
- }
- lua_getinfo(L1, "Snlf", &ar);
- fn = funcV(L1->top-1); L1->top--;
- if (isffunc(fn) && !*ar.namewhat)
- lua_pushfstring(L, "\n\t[builtin#%d]:", fn->c.ffid);
- else
- lua_pushfstring(L, "\n\t%s:", ar.short_src);
- if (ar.currentline > 0)
- lua_pushfstring(L, "%d:", ar.currentline);
- if (*ar.namewhat) {
- lua_pushfstring(L, " in function " LUA_QS, ar.name);
- } else {
- if (*ar.what == 'm') {
- lua_pushliteral(L, " in main chunk");
- } else if (*ar.what == 'C') {
- lua_pushfstring(L, " at %p", fn->c.f);
- } else {
- lua_pushfstring(L, " in function <%s:%d>",
- ar.short_src, ar.linedefined);
- }
- }
- if ((int)(L->top - L->base) - top >= 15)
- lua_concat(L, (int)(L->top - L->base) - top);
- }
- lua_concat(L, (int)(L->top - L->base) - top);
-}
-
+/*
+** Debugging and introspection.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_debug_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+
+/* -- Frames -------------------------------------------------------------- */
+
+/* Get frame corresponding to a level. */
+cTValue *lj_debug_frame(lua_State *L, int level, int *size)
+{
+ cTValue *frame, *nextframe, *bot = tvref(L->stack);
+ /* Traverse frames backwards. */
+ for (nextframe = frame = L->base-1; frame > bot; ) {
+ if (frame_gc(frame) == obj2gco(L))
+ level++; /* Skip dummy frames. See lj_meta_call(). */
+ if (level-- == 0) {
+ *size = (int)(nextframe - frame);
+ return frame; /* Level found. */
+ }
+ nextframe = frame;
+ if (frame_islua(frame)) {
+ frame = frame_prevl(frame);
+ } else {
+ if (frame_isvarg(frame))
+ level++; /* Skip vararg pseudo-frame. */
+ frame = frame_prevd(frame);
+ }
+ }
+ *size = level;
+ return NULL; /* Level not found. */
+}
+
+/* Invalid bytecode position. */
+#define NO_BCPOS (~(BCPos)0)
+
+/* Return bytecode position for function/frame or NO_BCPOS. */
+static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
+{
+ const BCIns *ins;
+ GCproto *pt;
+ BCPos pos;
+ lua_assert(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD);
+ if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */
+ return NO_BCPOS;
+ } else if (nextframe == NULL) { /* Lua function on top. */
+ void *cf = cframe_raw(L->cframe);
+ if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf))
+ return NO_BCPOS;
+ ins = cframe_pc(cf); /* Only happens during error/hook handling. */
+ } else {
+ if (frame_islua(nextframe)) {
+ ins = frame_pc(nextframe);
+ } else if (frame_iscont(nextframe)) {
+ ins = frame_contpc(nextframe);
+ } else {
+ /* Lua function below errfunc/gc/hook: find cframe to get the PC. */
+ void *cf = cframe_raw(L->cframe);
+ TValue *f = L->base-1;
+ if (cf == NULL)
+ return NO_BCPOS;
+ while (f > nextframe) {
+ if (frame_islua(f)) {
+ f = frame_prevl(f);
+ } else {
+ if (frame_isc(f))
+ cf = cframe_raw(cframe_prev(cf));
+ f = frame_prevd(f);
+ }
+ }
+ if (cframe_prev(cf))
+ cf = cframe_raw(cframe_prev(cf));
+ ins = cframe_pc(cf);
+ }
+ }
+ pt = funcproto(fn);
+ pos = proto_bcpos(pt, ins) - 1;
+#if LJ_HASJIT
+ if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */
+ GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins));
+ lua_assert(bc_isret(bc_op(ins[-1])));
+ pos = proto_bcpos(pt, mref(T->startpc, const BCIns));
+ }
+#endif
+ return pos;
+}
+
+/* -- Line numbers -------------------------------------------------------- */
+
+/* Get line number for a bytecode position. */
+BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc)
+{
+ const void *lineinfo = proto_lineinfo(pt);
+ if (pc <= pt->sizebc && lineinfo) {
+ BCLine first = pt->firstline;
+ if (pc == pt->sizebc) return first + pt->numline;
+ if (pc-- == 0) return first;
+ if (pt->numline < 256)
+ return first + (BCLine)((const uint8_t *)lineinfo)[pc];
+ else if (pt->numline < 65536)
+ return first + (BCLine)((const uint16_t *)lineinfo)[pc];
+ else
+ return first + (BCLine)((const uint32_t *)lineinfo)[pc];
+ }
+ return 0;
+}
+
+/* Get line number for function/frame. */
+static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe)
+{
+ BCPos pc = debug_framepc(L, fn, nextframe);
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ lua_assert(pc <= pt->sizebc);
+ return lj_debug_line(pt, pc);
+ }
+ return -1;
+}
+
+/* -- Variable names ------------------------------------------------------ */
+
+/* Read ULEB128 value. */
+static uint32_t debug_read_uleb128(const uint8_t **pp)
+{
+ const uint8_t *p = *pp;
+ uint32_t v = *p++;
+ if (LJ_UNLIKELY(v >= 0x80)) {
+ int sh = 0;
+ v &= 0x7f;
+ do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
+ }
+ *pp = p;
+ return v;
+}
+
+/* Get name of a local variable from slot number and PC. */
+static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot)
+{
+ const uint8_t *p = proto_varinfo(pt);
+ if (p) {
+ BCPos lastpc = 0;
+ for (;;) {
+ const char *name = (const char *)p;
+ uint32_t vn = *p++;
+ BCPos startpc, endpc;
+ if (vn < VARNAME__MAX) {
+ if (vn == VARNAME_END) break; /* End of varinfo. */
+ } else {
+ while (*p++) ; /* Skip over variable name string. */
+ }
+ lastpc = startpc = lastpc + debug_read_uleb128(&p);
+ if (startpc > pc) break;
+ endpc = startpc + debug_read_uleb128(&p);
+ if (pc < endpc && slot-- == 0) {
+ if (vn < VARNAME__MAX) {
+#define VARNAMESTR(name, str) str "\0"
+ name = VARNAMEDEF(VARNAMESTR);
+#undef VARNAMESTR
+ if (--vn) while (*name++ || --vn) ;
+ }
+ return name;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Get name of local variable from 1-based slot number and function/frame. */
+static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
+ const char **name, BCReg slot1)
+{
+ uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
+ uint32_t size = (uint32_t)ar->i_ci >> 16;
+ TValue *frame = tvref(L->stack) + offset;
+ TValue *nextframe = size ? frame + size : NULL;
+ GCfunc *fn = frame_func(frame);
+ BCPos pc = debug_framepc(L, fn, nextframe);
+ if (!nextframe) nextframe = L->top;
+ if ((int)slot1 < 0) { /* Negative slot number is for varargs. */
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ if ((pt->flags & PROTO_VARARG)) {
+ slot1 = pt->numparams + (BCReg)(-(int)slot1);
+ if (frame_isvarg(frame)) { /* Vararg frame has been set up? (pc!=0) */
+ nextframe = frame;
+ frame = frame_prevd(frame);
+ }
+ if (frame + slot1 < nextframe) {
+ *name = "(*vararg)";
+ return frame+slot1;
+ }
+ }
+ }
+ return NULL;
+ }
+ if (pc != NO_BCPOS &&
+ (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL)
+ ;
+ else if (slot1 > 0 && frame + slot1 < nextframe)
+ *name = "(*temporary)";
+ return frame+slot1;
+}
+
+/* Get name of upvalue. */
+const char *lj_debug_uvname(GCproto *pt, uint32_t idx)
+{
+ const uint8_t *p = proto_uvinfo(pt);
+ lua_assert(idx < pt->sizeuv);
+ if (!p) return "";
+ if (idx) while (*p++ || --idx) ;
+ return (const char *)p;
+}
+
+/* Get name and value of upvalue. */
+const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp)
+{
+ if (tvisfunc(o)) {
+ GCfunc *fn = funcV(o);
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ if (idx < pt->sizeuv) {
+ *tvp = uvval(&gcref(fn->l.uvptr[idx])->uv);
+ return lj_debug_uvname(pt, idx);
+ }
+ } else {
+ if (idx < fn->c.nupvalues) {
+ *tvp = &fn->c.upvalue[idx];
+ return "";
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Deduce name of an object from slot number and PC. */
+const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot,
+ const char **name)
+{
+ const char *lname;
+restart:
+ lname = debug_varname(pt, proto_bcpos(pt, ip), slot);
+ if (lname != NULL) { *name = lname; return "local"; }
+ while (--ip > proto_bc(pt)) {
+ BCIns ins = *ip;
+ BCOp op = bc_op(ins);
+ BCReg ra = bc_a(ins);
+ if (bcmode_a(op) == BCMbase) {
+ if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins)))
+ return NULL;
+ } else if (bcmode_a(op) == BCMdst && ra == slot) {
+ switch (bc_op(ins)) {
+ case BC_MOV:
+ if (ra == slot) { slot = bc_d(ins); goto restart; }
+ break;
+ case BC_GGET:
+ *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins))));
+ return "global";
+ case BC_TGETS:
+ *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins))));
+ if (ip > proto_bc(pt)) {
+ BCIns insp = ip[-1];
+ if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1 &&
+ bc_d(insp) == bc_b(ins))
+ return "method";
+ }
+ return "field";
+ case BC_UGET:
+ *name = lj_debug_uvname(pt, bc_d(ins));
+ return "upvalue";
+ default:
+ return NULL;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Deduce function name from caller of a frame. */
+const char *lj_debug_funcname(lua_State *L, TValue *frame, const char **name)
+{
+ TValue *pframe;
+ GCfunc *fn;
+ BCPos pc;
+ if (frame <= tvref(L->stack))
+ return NULL;
+ if (frame_isvarg(frame))
+ frame = frame_prevd(frame);
+ pframe = frame_prev(frame);
+ fn = frame_func(pframe);
+ pc = debug_framepc(L, fn, frame);
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)];
+ MMS mm = bcmode_mm(bc_op(*ip));
+ if (mm == MM_call) {
+ BCReg slot = bc_a(*ip);
+ if (bc_op(*ip) == BC_ITERC) slot -= 3;
+ return lj_debug_slotname(pt, ip, slot, name);
+ } else if (mm != MM__MAX) {
+ *name = strdata(mmname_str(G(L), mm));
+ return "metamethod";
+ }
+ }
+ return NULL;
+}
+
+/* -- Source code locations ----------------------------------------------- */
+
+/* Generate shortened source name. */
+void lj_debug_shortname(char *out, GCstr *str)
+{
+ const char *src = strdata(str);
+ if (*src == '=') {
+ strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */
+ out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */
+ } else if (*src == '@') { /* Output "source", or "...source". */
+ size_t len = str->len-1;
+ src++; /* Skip the `@' */
+ if (len >= LUA_IDSIZE) {
+ src += len-(LUA_IDSIZE-4); /* Get last part of file name. */
+ *out++ = '.'; *out++ = '.'; *out++ = '.';
+ }
+ strcpy(out, src);
+ } else { /* Output [string "string"]. */
+ size_t len; /* Length, up to first control char. */
+ for (len = 0; len < LUA_IDSIZE-12; len++)
+ if (((const unsigned char *)src)[len] < ' ') break;
+ strcpy(out, "[string \""); out += 9;
+ if (src[len] != '\0') { /* Must truncate? */
+ if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15;
+ strncpy(out, src, len); out += len;
+ strcpy(out, "..."); out += 3;
+ } else {
+ strcpy(out, src); out += len;
+ }
+ strcpy(out, "\"]");
+ }
+}
+
+/* Add current location of a frame to error message. */
+void lj_debug_addloc(lua_State *L, const char *msg,
+ cTValue *frame, cTValue *nextframe)
+{
+ if (frame) {
+ GCfunc *fn = frame_func(frame);
+ if (isluafunc(fn)) {
+ BCLine line = debug_frameline(L, fn, nextframe);
+ if (line >= 0) {
+ char buf[LUA_IDSIZE];
+ lj_debug_shortname(buf, proto_chunkname(funcproto(fn)));
+ lj_str_pushf(L, "%s:%d: %s", buf, line, msg);
+ return;
+ }
+ }
+ }
+ lj_str_pushf(L, "%s", msg);
+}
+
+/* Push location string for a bytecode position to Lua stack. */
+void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc)
+{
+ GCstr *name = proto_chunkname(pt);
+ const char *s = strdata(name);
+ MSize i, len = name->len;
+ BCLine line = lj_debug_line(pt, pc);
+ if (*s == '@') {
+ s++; len--;
+ for (i = len; i > 0; i--)
+ if (s[i] == '/' || s[i] == '\\') {
+ s += i+1;
+ break;
+ }
+ lj_str_pushf(L, "%s:%d", s, line);
+ } else if (len > 40) {
+ lj_str_pushf(L, "%p:%d", pt, line);
+ } else if (*s == '=') {
+ lj_str_pushf(L, "%s:%d", s+1, line);
+ } else {
+ lj_str_pushf(L, "\"%s\":%d", s, line);
+ }
+}
+
+/* -- Public debug API ---------------------------------------------------- */
+
+/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */
+
+LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n)
+{
+ const char *name = NULL;
+ if (ar) {
+ TValue *o = debug_localname(L, ar, &name, (BCReg)n);
+ if (name) {
+ copyTV(L, L->top, o);
+ incr_top(L);
+ }
+ } else if (tvisfunc(L->top-1) && isluafunc(funcV(L->top-1))) {
+ name = debug_varname(funcproto(funcV(L->top-1)), 0, (BCReg)n-1);
+ }
+ return name;
+}
+
+LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n)
+{
+ const char *name = NULL;
+ TValue *o = debug_localname(L, ar, &name, (BCReg)n);
+ if (name)
+ copyTV(L, o, L->top-1);
+ L->top--;
+ return name;
+}
+
+int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext)
+{
+ int opt_f = 0, opt_L = 0;
+ TValue *frame = NULL;
+ TValue *nextframe = NULL;
+ GCfunc *fn;
+ if (*what == '>') {
+ TValue *func = L->top - 1;
+ api_check(L, tvisfunc(func));
+ fn = funcV(func);
+ L->top--;
+ what++;
+ } else {
+ uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
+ uint32_t size = (uint32_t)ar->i_ci >> 16;
+ lua_assert(offset != 0);
+ frame = tvref(L->stack) + offset;
+ if (size) nextframe = frame + size;
+ lua_assert(frame <= tvref(L->maxstack) &&
+ (!nextframe || nextframe <= tvref(L->maxstack)));
+ fn = frame_func(frame);
+ lua_assert(fn->c.gct == ~LJ_TFUNC);
+ }
+ for (; *what; what++) {
+ if (*what == 'S') {
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ BCLine firstline = pt->firstline;
+ GCstr *name = proto_chunkname(pt);
+ ar->source = strdata(name);
+ lj_debug_shortname(ar->short_src, name);
+ ar->linedefined = (int)firstline;
+ ar->lastlinedefined = (int)(firstline + pt->numline);
+ ar->what = firstline ? "Lua" : "main";
+ } else {
+ ar->source = "=[C]";
+ ar->short_src[0] = '[';
+ ar->short_src[1] = 'C';
+ ar->short_src[2] = ']';
+ ar->short_src[3] = '\0';
+ ar->linedefined = -1;
+ ar->lastlinedefined = -1;
+ ar->what = "C";
+ }
+ } else if (*what == 'l') {
+ ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1;
+ } else if (*what == 'u') {
+ ar->nups = fn->c.nupvalues;
+ if (ext) {
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ ar->nparams = pt->numparams;
+ ar->isvararg = !!(pt->flags & PROTO_VARARG);
+ } else {
+ ar->nparams = 0;
+ ar->isvararg = 1;
+ }
+ }
+ } else if (*what == 'n') {
+ ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL;
+ if (ar->namewhat == NULL) {
+ ar->namewhat = "";
+ ar->name = NULL;
+ }
+ } else if (*what == 'f') {
+ opt_f = 1;
+ } else if (*what == 'L') {
+ opt_L = 1;
+ } else {
+ return 0; /* Bad option. */
+ }
+ }
+ if (opt_f) {
+ setfuncV(L, L->top, fn);
+ incr_top(L);
+ }
+ if (opt_L) {
+ if (isluafunc(fn)) {
+ GCtab *t = lj_tab_new(L, 0, 0);
+ GCproto *pt = funcproto(fn);
+ const void *lineinfo = proto_lineinfo(pt);
+ if (lineinfo) {
+ BCLine first = pt->firstline;
+ int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4;
+ MSize i, szl = pt->sizebc-1;
+ for (i = 0; i < szl; i++) {
+ BCLine line = first +
+ (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] :
+ sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] :
+ (BCLine)((const uint32_t *)lineinfo)[i]);
+ setboolV(lj_tab_setint(L, t, line), 1);
+ }
+ }
+ settabV(L, L->top, t);
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+ }
+ return 1; /* Ok. */
+}
+
+LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar)
+{
+ return lj_debug_getinfo(L, what, (lj_Debug *)ar, 0);
+}
+
+LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar)
+{
+ int size;
+ cTValue *frame = lj_debug_frame(L, level, &size);
+ if (frame) {
+ ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack));
+ return 1;
+ } else {
+ ar->i_ci = level - size;
+ return 0;
+ }
+}
+
+/* Number of frames for the leading and trailing part of a traceback. */
+#define TRACEBACK_LEVELS1 12
+#define TRACEBACK_LEVELS2 10
+
+LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
+ int level)
+{
+ int top = (int)(L->top - L->base);
+ int lim = TRACEBACK_LEVELS1;
+ lua_Debug ar;
+ if (msg) lua_pushfstring(L, "%s\n", msg);
+ lua_pushliteral(L, "stack traceback:");
+ while (lua_getstack(L1, level++, &ar)) {
+ GCfunc *fn;
+ if (level > lim) {
+ if (!lua_getstack(L1, level + TRACEBACK_LEVELS2, &ar)) {
+ level--;
+ } else {
+ lua_pushliteral(L, "\n\t...");
+ lua_getstack(L1, -10, &ar);
+ level = ar.i_ci - TRACEBACK_LEVELS2;
+ }
+ lim = 2147483647;
+ continue;
+ }
+ lua_getinfo(L1, "Snlf", &ar);
+ fn = funcV(L1->top-1); L1->top--;
+ if (isffunc(fn) && !*ar.namewhat)
+ lua_pushfstring(L, "\n\t[builtin#%d]:", fn->c.ffid);
+ else
+ lua_pushfstring(L, "\n\t%s:", ar.short_src);
+ if (ar.currentline > 0)
+ lua_pushfstring(L, "%d:", ar.currentline);
+ if (*ar.namewhat) {
+ lua_pushfstring(L, " in function " LUA_QS, ar.name);
+ } else {
+ if (*ar.what == 'm') {
+ lua_pushliteral(L, " in main chunk");
+ } else if (*ar.what == 'C') {
+ lua_pushfstring(L, " at %p", fn->c.f);
+ } else {
+ lua_pushfstring(L, " in function <%s:%d>",
+ ar.short_src, ar.linedefined);
+ }
+ }
+ if ((int)(L->top - L->base) - top >= 15)
+ lua_concat(L, (int)(L->top - L->base) - top);
+ }
+ lua_concat(L, (int)(L->top - L->base) - top);
+}
+
diff --git a/3rdparty/lua/src/lj_debug.h b/3rdparty/lua/src/lj_debug.h
index 4a87ef6..7cf57de 100644
--- a/3rdparty/lua/src/lj_debug.h
+++ b/3rdparty/lua/src/lj_debug.h
@@ -1,61 +1,61 @@
-/*
-** Debugging and introspection.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_DEBUG_H
-#define _LJ_DEBUG_H
-
-#include "lj_obj.h"
-
-typedef struct lj_Debug {
- /* Common fields. Must be in the same order as in lua.h. */
- int event;
- const char *name;
- const char *namewhat;
- const char *what;
- const char *source;
- int currentline;
- int nups;
- int linedefined;
- int lastlinedefined;
- char short_src[LUA_IDSIZE];
- int i_ci;
- /* Extended fields. Only valid if lj_debug_getinfo() is called with ext = 1.*/
- int nparams;
- int isvararg;
-} lj_Debug;
-
-LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size);
-LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc);
-LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx);
-LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp);
-LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc,
- BCReg slot, const char **name);
-LJ_FUNC const char *lj_debug_funcname(lua_State *L, TValue *frame,
- const char **name);
-LJ_FUNC void lj_debug_shortname(char *out, GCstr *str);
-LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg,
- cTValue *frame, cTValue *nextframe);
-LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc);
-LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar,
- int ext);
-
-/* Fixed internal variable names. */
-#define VARNAMEDEF(_) \
- _(FOR_IDX, "(for index)") \
- _(FOR_STOP, "(for limit)") \
- _(FOR_STEP, "(for step)") \
- _(FOR_GEN, "(for generator)") \
- _(FOR_STATE, "(for state)") \
- _(FOR_CTL, "(for control)")
-
-enum {
- VARNAME_END,
-#define VARNAMEENUM(name, str) VARNAME_##name,
- VARNAMEDEF(VARNAMEENUM)
-#undef VARNAMEENUM
- VARNAME__MAX
-};
-
-#endif
+/*
+** Debugging and introspection.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DEBUG_H
+#define _LJ_DEBUG_H
+
+#include "lj_obj.h"
+
+typedef struct lj_Debug {
+ /* Common fields. Must be in the same order as in lua.h. */
+ int event;
+ const char *name;
+ const char *namewhat;
+ const char *what;
+ const char *source;
+ int currentline;
+ int nups;
+ int linedefined;
+ int lastlinedefined;
+ char short_src[LUA_IDSIZE];
+ int i_ci;
+ /* Extended fields. Only valid if lj_debug_getinfo() is called with ext = 1.*/
+ int nparams;
+ int isvararg;
+} lj_Debug;
+
+LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size);
+LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc);
+LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx);
+LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp);
+LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc,
+ BCReg slot, const char **name);
+LJ_FUNC const char *lj_debug_funcname(lua_State *L, TValue *frame,
+ const char **name);
+LJ_FUNC void lj_debug_shortname(char *out, GCstr *str);
+LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg,
+ cTValue *frame, cTValue *nextframe);
+LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc);
+LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar,
+ int ext);
+
+/* Fixed internal variable names. */
+#define VARNAMEDEF(_) \
+ _(FOR_IDX, "(for index)") \
+ _(FOR_STOP, "(for limit)") \
+ _(FOR_STEP, "(for step)") \
+ _(FOR_GEN, "(for generator)") \
+ _(FOR_STATE, "(for state)") \
+ _(FOR_CTL, "(for control)")
+
+enum {
+ VARNAME_END,
+#define VARNAMEENUM(name, str) VARNAME_##name,
+ VARNAMEDEF(VARNAMEENUM)
+#undef VARNAMEENUM
+ VARNAME__MAX
+};
+
+#endif
diff --git a/3rdparty/lua/src/lj_def.h b/3rdparty/lua/src/lj_def.h
index 7aab086..250729f 100644
--- a/3rdparty/lua/src/lj_def.h
+++ b/3rdparty/lua/src/lj_def.h
@@ -1,353 +1,349 @@
-/*
-** LuaJIT common internal definitions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_DEF_H
-#define _LJ_DEF_H
-
-#include "lua.h"
-
-#if defined(_MSC_VER)
-/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */
-typedef __int8 int8_t;
-typedef __int16 int16_t;
-typedef __int32 int32_t;
-typedef __int64 int64_t;
-typedef unsigned __int8 uint8_t;
-typedef unsigned __int16 uint16_t;
-typedef unsigned __int32 uint32_t;
-typedef unsigned __int64 uint64_t;
-#ifdef _WIN64
-typedef __int64 intptr_t;
-typedef unsigned __int64 uintptr_t;
-#else
-typedef __int32 intptr_t;
-typedef unsigned __int32 uintptr_t;
-#endif
-#elif defined(__symbian__)
-/* Cough. */
-typedef signed char int8_t;
-typedef short int int16_t;
-typedef int int32_t;
-typedef long long int64_t;
-typedef unsigned char uint8_t;
-typedef unsigned short int uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-typedef int intptr_t;
-typedef unsigned int uintptr_t;
-#else
-#include <stdint.h>
-#endif
-
-/* Needed everywhere. */
-#include <string.h>
-#include <stdlib.h>
-
-/* Various VM limits. */
-#define LJ_MAX_MEM 0x7fffff00 /* Max. total memory allocation. */
-#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */
-#define LJ_MAX_STR LJ_MAX_MEM /* Max. string length. */
-#define LJ_MAX_UDATA LJ_MAX_MEM /* Max. userdata length. */
-
-#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */
-#define LJ_MAX_HBITS 26 /* Max. hash bits. */
-#define LJ_MAX_ABITS 28 /* Max. bits of array key. */
-#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */
-#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */
-
-#define LJ_MAX_LINE LJ_MAX_MEM /* Max. source code line number. */
-#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */
-#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */
-#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */
-#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */
-#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */
-
-#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */
-#define LJ_STACK_EXTRA 5 /* Extra stack space (metamethods). */
-
-#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */
-
-/* Minimum table/buffer sizes. */
-#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */
-#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */
-#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */
-#define LJ_MIN_SBUF 32 /* Min. string buffer length. */
-#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */
-#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */
-#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */
-
-/* JIT compiler limits. */
-#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */
-#define LJ_MAX_PHI 64 /* Max. # of PHIs for a loop. */
-#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */
-
-/* Various macros. */
-#ifndef UNUSED
-#define UNUSED(x) ((void)(x)) /* to avoid warnings */
-#endif
-
-#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo)
-#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p))
-#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p))
-
-#define checki8(x) ((x) == (int32_t)(int8_t)(x))
-#define checku8(x) ((x) == (int32_t)(uint8_t)(x))
-#define checki16(x) ((x) == (int32_t)(int16_t)(x))
-#define checku16(x) ((x) == (int32_t)(uint16_t)(x))
-#define checki32(x) ((x) == (int32_t)(x))
-#define checku32(x) ((x) == (uint32_t)(x))
-#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
-
-/* Every half-decent C compiler transforms this into a rotate instruction. */
-#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1))))
-#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n)))
-
-/* A really naive Bloom filter. But sufficient for our needs. */
-typedef uintptr_t BloomFilter;
-#define BLOOM_MASK (8*sizeof(BloomFilter) - 1)
-#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK))
-#define bloomset(b, x) ((b) |= bloombit((x)))
-#define bloomtest(b, x) ((b) & bloombit((x)))
-
-#if defined(__GNUC__) || defined(__psp2__)
-
-#define LJ_NORET __attribute__((noreturn))
-#define LJ_ALIGN(n) __attribute__((aligned(n)))
-#define LJ_INLINE inline
-#define LJ_AINLINE inline __attribute__((always_inline))
-#define LJ_NOINLINE __attribute__((noinline))
-
-#if defined(__ELF__) || defined(__MACH__) || defined(__psp2__)
-#if !((defined(__sun__) && defined(__svr4__)) || defined(__CELLOS_LV2__))
-#define LJ_NOAPI extern __attribute__((visibility("hidden")))
-#endif
-#endif
-
-/* Note: it's only beneficial to use fastcall on x86 and then only for up to
-** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only
-** indirect calls and related tail-called C functions are marked as fastcall.
-*/
-#if defined(__i386__)
-#define LJ_FASTCALL __attribute__((fastcall))
-#endif
-
-#define LJ_LIKELY(x) __builtin_expect(!!(x), 1)
-#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0)
-
-#define lj_ffs(x) ((uint32_t)__builtin_ctz(x))
-/* Don't ask ... */
-#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__))
-static LJ_AINLINE uint32_t lj_fls(uint32_t x)
-{
- uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r;
-}
-#else
-#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31))
-#endif
-
-#if defined(__arm__)
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
-#if defined(__psp2__)
- return __builtin_rev(x);
-#else
- uint32_t r;
-#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\
- __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__
- __asm__("rev %0, %1" : "=r" (r) : "r" (x));
- return r;
-#else
-#ifdef __thumb__
- r = x ^ lj_ror(x, 16);
-#else
- __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x));
-#endif
- return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8);
-#endif
-#endif
-}
-
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
-}
-#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
- return (uint32_t)__builtin_bswap32((int32_t)x);
-}
-
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return (uint64_t)__builtin_bswap64((int64_t)x);
-}
-#elif defined(__i386__) || defined(__x86_64__)
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
- uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
-}
-
-#if defined(__i386__)
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
-}
-#else
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
-}
-#endif
-#else
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
- return (x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24);
-}
-
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return (uint64_t)lj_bswap((uint32_t)(x >> 32)) |
- ((uint64_t)lj_bswap((uint32_t)x) << 32);
-}
-#endif
-
-typedef union __attribute__((packed)) Unaligned16 {
- uint16_t u;
- uint8_t b[2];
-} Unaligned16;
-
-typedef union __attribute__((packed)) Unaligned32 {
- uint32_t u;
- uint8_t b[4];
-} Unaligned32;
-
-/* Unaligned load of uint16_t. */
-static LJ_AINLINE uint16_t lj_getu16(const void *p)
-{
- return ((const Unaligned16 *)p)->u;
-}
-
-/* Unaligned load of uint32_t. */
-static LJ_AINLINE uint32_t lj_getu32(const void *p)
-{
- return ((const Unaligned32 *)p)->u;
-}
-
-#elif defined(_MSC_VER)
-
-#define LJ_NORET __declspec(noreturn)
-#define LJ_ALIGN(n) __declspec(align(n))
-#define LJ_INLINE __inline
-#define LJ_AINLINE __forceinline
-#define LJ_NOINLINE __declspec(noinline)
-#if defined(_M_IX86)
-#define LJ_FASTCALL __fastcall
-#endif
-
-#ifdef _M_PPC
-unsigned int _CountLeadingZeros(long);
-#pragma intrinsic(_CountLeadingZeros)
-static LJ_AINLINE uint32_t lj_fls(uint32_t x)
-{
- return _CountLeadingZeros(x) ^ 31;
-}
-#else
-unsigned char _BitScanForward(uint32_t *, unsigned long);
-unsigned char _BitScanReverse(uint32_t *, unsigned long);
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-static LJ_AINLINE uint32_t lj_ffs(uint32_t x)
-{
- uint32_t r; _BitScanForward(&r, x); return r;
-}
-
-static LJ_AINLINE uint32_t lj_fls(uint32_t x)
-{
- uint32_t r; _BitScanReverse(&r, x); return r;
-}
-#endif
-
-unsigned long _byteswap_ulong(unsigned long);
-uint64_t _byteswap_uint64(uint64_t);
-#define lj_bswap(x) (_byteswap_ulong((x)))
-#define lj_bswap64(x) (_byteswap_uint64((x)))
-
-#if defined(_M_PPC) && defined(LUAJIT_NO_UNALIGNED)
-/*
-** Replacement for unaligned loads on Xbox 360. Disabled by default since it's
-** usually more costly than the occasional stall when crossing a cache-line.
-*/
-static LJ_AINLINE uint16_t lj_getu16(const void *v)
-{
- const uint8_t *p = (const uint8_t *)v;
- return (uint16_t)((p[0]<<8) | p[1]);
-}
-static LJ_AINLINE uint32_t lj_getu32(const void *v)
-{
- const uint8_t *p = (const uint8_t *)v;
- return (uint32_t)((p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]);
-}
-#else
-/* Unaligned loads are generally ok on x86/x64. */
-#define lj_getu16(p) (*(uint16_t *)(p))
-#define lj_getu32(p) (*(uint32_t *)(p))
-#endif
-
-#else
-#error "missing defines for your compiler"
-#endif
-
-/* Optional defines. */
-#ifndef LJ_FASTCALL
-#define LJ_FASTCALL
-#endif
-#ifndef LJ_NORET
-#define LJ_NORET
-#endif
-#ifndef LJ_NOAPI
-#define LJ_NOAPI extern
-#endif
-#ifndef LJ_LIKELY
-#define LJ_LIKELY(x) (x)
-#define LJ_UNLIKELY(x) (x)
-#endif
-
-/* Attributes for internal functions. */
-#define LJ_DATA LJ_NOAPI
-#define LJ_DATADEF
-#define LJ_ASMF LJ_NOAPI
-#define LJ_FUNCA LJ_NOAPI
-#if defined(ljamalg_c)
-#define LJ_FUNC static
-#else
-#define LJ_FUNC LJ_NOAPI
-#endif
-#define LJ_FUNC_NORET LJ_FUNC LJ_NORET
-#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET
-#define LJ_ASMF_NORET LJ_ASMF LJ_NORET
-
-/* Runtime assertions. */
-#ifdef lua_assert
-#define check_exp(c, e) (lua_assert(c), (e))
-#define api_check(l, e) lua_assert(e)
-#else
-#define lua_assert(c) ((void)0)
-#define check_exp(c, e) (e)
-#define api_check luai_apicheck
-#endif
-
-/* Static assertions. */
-#define LJ_ASSERT_NAME2(name, line) name ## line
-#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line)
-#ifdef __COUNTER__
-#define LJ_STATIC_ASSERT(cond) \
- extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
-#else
-#define LJ_STATIC_ASSERT(cond) \
- extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
-#endif
-
-#endif
+/*
+** LuaJIT common internal definitions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DEF_H
+#define _LJ_DEF_H
+
+#include "lua.h"
+
+#if defined(_MSC_VER)
+/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#ifdef _WIN64
+typedef __int64 intptr_t;
+typedef unsigned __int64 uintptr_t;
+#else
+typedef __int32 intptr_t;
+typedef unsigned __int32 uintptr_t;
+#endif
+#elif defined(__symbian__)
+/* Cough. */
+typedef signed char int8_t;
+typedef short int int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+typedef unsigned char uint8_t;
+typedef unsigned short int uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+typedef int intptr_t;
+typedef unsigned int uintptr_t;
+#else
+#include <stdint.h>
+#endif
+
+/* Needed everywhere. */
+#include <string.h>
+#include <stdlib.h>
+
+/* Various VM limits. */
+#define LJ_MAX_MEM 0x7fffff00 /* Max. total memory allocation. */
+#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */
+#define LJ_MAX_STR LJ_MAX_MEM /* Max. string length. */
+#define LJ_MAX_UDATA LJ_MAX_MEM /* Max. userdata length. */
+
+#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */
+#define LJ_MAX_HBITS 26 /* Max. hash bits. */
+#define LJ_MAX_ABITS 28 /* Max. bits of array key. */
+#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */
+#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */
+
+#define LJ_MAX_LINE LJ_MAX_MEM /* Max. source code line number. */
+#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */
+#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */
+#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */
+#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */
+#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */
+
+#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */
+#define LJ_STACK_EXTRA 5 /* Extra stack space (metamethods). */
+
+#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */
+
+/* Minimum table/buffer sizes. */
+#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */
+#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */
+#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */
+#define LJ_MIN_SBUF 32 /* Min. string buffer length. */
+#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */
+#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */
+#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */
+
+/* JIT compiler limits. */
+#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */
+#define LJ_MAX_PHI 64 /* Max. # of PHIs for a loop. */
+#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */
+
+/* Various macros. */
+#ifndef UNUSED
+#define UNUSED(x) ((void)(x)) /* to avoid warnings */
+#endif
+
+#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo)
+#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p))
+#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p))
+
+#define checki8(x) ((x) == (int32_t)(int8_t)(x))
+#define checku8(x) ((x) == (int32_t)(uint8_t)(x))
+#define checki16(x) ((x) == (int32_t)(int16_t)(x))
+#define checku16(x) ((x) == (int32_t)(uint16_t)(x))
+#define checki32(x) ((x) == (int32_t)(x))
+#define checku32(x) ((x) == (uint32_t)(x))
+#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
+
+/* Every half-decent C compiler transforms this into a rotate instruction. */
+#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1))))
+#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n)))
+
+/* A really naive Bloom filter. But sufficient for our needs. */
+typedef uintptr_t BloomFilter;
+#define BLOOM_MASK (8*sizeof(BloomFilter) - 1)
+#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK))
+#define bloomset(b, x) ((b) |= bloombit((x)))
+#define bloomtest(b, x) ((b) & bloombit((x)))
+
+#if defined(__GNUC__)
+
+#define LJ_NORET __attribute__((noreturn))
+#define LJ_ALIGN(n) __attribute__((aligned(n)))
+#define LJ_INLINE inline
+#define LJ_AINLINE inline __attribute__((always_inline))
+#define LJ_NOINLINE __attribute__((noinline))
+
+#if defined(__ELF__) || defined(__MACH__)
+#if !((defined(__sun__) && defined(__svr4__)) || defined(__CELLOS_LV2__))
+#define LJ_NOAPI extern __attribute__((visibility("hidden")))
+#endif
+#endif
+
+/* Note: it's only beneficial to use fastcall on x86 and then only for up to
+** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only
+** indirect calls and related tail-called C functions are marked as fastcall.
+*/
+#if defined(__i386__)
+#define LJ_FASTCALL __attribute__((fastcall))
+#endif
+
+#define LJ_LIKELY(x) __builtin_expect(!!(x), 1)
+#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+#define lj_ffs(x) ((uint32_t)__builtin_ctz(x))
+/* Don't ask ... */
+#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__))
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r;
+}
+#else
+#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31))
+#endif
+
+#if defined(__arm__)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ uint32_t r;
+#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\
+ __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__
+ __asm__("rev %0, %1" : "=r" (r) : "r" (x));
+ return r;
+#else
+#ifdef __thumb__
+ r = x ^ lj_ror(x, 16);
+#else
+ __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x));
+#endif
+ return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8);
+#endif
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
+}
+#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ return (uint32_t)__builtin_bswap32((int32_t)x);
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return (uint64_t)__builtin_bswap64((int64_t)x);
+}
+#elif defined(__i386__) || defined(__x86_64__)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
+}
+
+#if defined(__i386__)
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
+}
+#else
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
+}
+#endif
+#else
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ return (x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24);
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return (uint64_t)lj_bswap((uint32_t)(x >> 32)) |
+ ((uint64_t)lj_bswap((uint32_t)x) << 32);
+}
+#endif
+
+typedef union __attribute__((packed)) Unaligned16 {
+ uint16_t u;
+ uint8_t b[2];
+} Unaligned16;
+
+typedef union __attribute__((packed)) Unaligned32 {
+ uint32_t u;
+ uint8_t b[4];
+} Unaligned32;
+
+/* Unaligned load of uint16_t. */
+static LJ_AINLINE uint16_t lj_getu16(const void *p)
+{
+ return ((const Unaligned16 *)p)->u;
+}
+
+/* Unaligned load of uint32_t. */
+static LJ_AINLINE uint32_t lj_getu32(const void *p)
+{
+ return ((const Unaligned32 *)p)->u;
+}
+
+#elif defined(_MSC_VER)
+
+#define LJ_NORET __declspec(noreturn)
+#define LJ_ALIGN(n) __declspec(align(n))
+#define LJ_INLINE __inline
+#define LJ_AINLINE __forceinline
+#define LJ_NOINLINE __declspec(noinline)
+#if defined(_M_IX86)
+#define LJ_FASTCALL __fastcall
+#endif
+
+#ifdef _M_PPC
+unsigned int _CountLeadingZeros(long);
+#pragma intrinsic(_CountLeadingZeros)
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ return _CountLeadingZeros(x) ^ 31;
+}
+#else
+unsigned char _BitScanForward(uint32_t *, unsigned long);
+unsigned char _BitScanReverse(uint32_t *, unsigned long);
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+
+static LJ_AINLINE uint32_t lj_ffs(uint32_t x)
+{
+ uint32_t r; _BitScanForward(&r, x); return r;
+}
+
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ uint32_t r; _BitScanReverse(&r, x); return r;
+}
+#endif
+
+unsigned long _byteswap_ulong(unsigned long);
+uint64_t _byteswap_uint64(uint64_t);
+#define lj_bswap(x) (_byteswap_ulong((x)))
+#define lj_bswap64(x) (_byteswap_uint64((x)))
+
+#if defined(_M_PPC) && defined(LUAJIT_NO_UNALIGNED)
+/*
+** Replacement for unaligned loads on Xbox 360. Disabled by default since it's
+** usually more costly than the occasional stall when crossing a cache-line.
+*/
+static LJ_AINLINE uint16_t lj_getu16(const void *v)
+{
+ const uint8_t *p = (const uint8_t *)v;
+ return (uint16_t)((p[0]<<8) | p[1]);
+}
+static LJ_AINLINE uint32_t lj_getu32(const void *v)
+{
+ const uint8_t *p = (const uint8_t *)v;
+ return (uint32_t)((p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]);
+}
+#else
+/* Unaligned loads are generally ok on x86/x64. */
+#define lj_getu16(p) (*(uint16_t *)(p))
+#define lj_getu32(p) (*(uint32_t *)(p))
+#endif
+
+#else
+#error "missing defines for your compiler"
+#endif
+
+/* Optional defines. */
+#ifndef LJ_FASTCALL
+#define LJ_FASTCALL
+#endif
+#ifndef LJ_NORET
+#define LJ_NORET
+#endif
+#ifndef LJ_NOAPI
+#define LJ_NOAPI extern
+#endif
+#ifndef LJ_LIKELY
+#define LJ_LIKELY(x) (x)
+#define LJ_UNLIKELY(x) (x)
+#endif
+
+/* Attributes for internal functions. */
+#define LJ_DATA LJ_NOAPI
+#define LJ_DATADEF
+#define LJ_ASMF LJ_NOAPI
+#define LJ_FUNCA LJ_NOAPI
+#if defined(ljamalg_c)
+#define LJ_FUNC static
+#else
+#define LJ_FUNC LJ_NOAPI
+#endif
+#define LJ_FUNC_NORET LJ_FUNC LJ_NORET
+#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET
+#define LJ_ASMF_NORET LJ_ASMF LJ_NORET
+
+/* Runtime assertions. */
+#ifdef lua_assert
+#define check_exp(c, e) (lua_assert(c), (e))
+#define api_check(l, e) lua_assert(e)
+#else
+#define lua_assert(c) ((void)0)
+#define check_exp(c, e) (e)
+#define api_check luai_apicheck
+#endif
+
+/* Static assertions. */
+#define LJ_ASSERT_NAME2(name, line) name ## line
+#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line)
+#ifdef __COUNTER__
+#define LJ_STATIC_ASSERT(cond) \
+ extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
+#else
+#define LJ_STATIC_ASSERT(cond) \
+ extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_dispatch.c b/3rdparty/lua/src/lj_dispatch.c
index 3e714d4..d57f1a6 100644
--- a/3rdparty/lua/src/lj_dispatch.c
+++ b/3rdparty/lua/src/lj_dispatch.c
@@ -1,494 +1,494 @@
-/*
-** Instruction dispatch handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_dispatch_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_err.h"
-#include "lj_func.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_debug.h"
-#include "lj_state.h"
-#include "lj_frame.h"
-#include "lj_bc.h"
-#include "lj_ff.h"
-#if LJ_HASJIT
-#include "lj_jit.h"
-#endif
-#if LJ_HASFFI
-#include "lj_ccallback.h"
-#endif
-#include "lj_trace.h"
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-#include "luajit.h"
-
-/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */
-LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC);
-
-/* -- Dispatch table management ------------------------------------------- */
-
-#if LJ_TARGET_MIPS
-#include <math.h>
-LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
- lua_State *co);
-
-#define GOTFUNC(name) (ASMFunction)name,
-static const ASMFunction dispatch_got[] = {
- GOTDEF(GOTFUNC)
-};
-#undef GOTFUNC
-#endif
-
-/* Initialize instruction dispatch table and hot counters. */
-void lj_dispatch_init(GG_State *GG)
-{
- uint32_t i;
- ASMFunction *disp = GG->dispatch;
- for (i = 0; i < GG_LEN_SDISP; i++)
- disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]);
- for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
- disp[i] = makeasmfunc(lj_bc_ofs[i]);
- /* The JIT engine is off by default. luaopen_jit() turns it on. */
- disp[BC_FORL] = disp[BC_IFORL];
- disp[BC_ITERL] = disp[BC_IITERL];
- disp[BC_LOOP] = disp[BC_ILOOP];
- disp[BC_FUNCF] = disp[BC_IFUNCF];
- disp[BC_FUNCV] = disp[BC_IFUNCV];
- GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0);
- for (i = 0; i < GG_NUM_ASMFF; i++)
- GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0);
-#if LJ_TARGET_MIPS
- memcpy(GG->got, dispatch_got, LJ_GOT__MAX*4);
-#endif
-}
-
-#if LJ_HASJIT
-/* Initialize hotcount table. */
-void lj_dispatch_init_hotcount(global_State *g)
-{
- int32_t hotloop = G2J(g)->param[JIT_P_hotloop];
- HotCount start = (HotCount)(hotloop*HOTCOUNT_LOOP - 1);
- HotCount *hotcount = G2GG(g)->hotcount;
- uint32_t i;
- for (i = 0; i < HOTCOUNT_SIZE; i++)
- hotcount[i] = start;
-}
-#endif
-
-/* Internal dispatch mode bits. */
-#define DISPMODE_JIT 0x01 /* JIT compiler on. */
-#define DISPMODE_REC 0x02 /* Recording active. */
-#define DISPMODE_INS 0x04 /* Override instruction dispatch. */
-#define DISPMODE_CALL 0x08 /* Override call dispatch. */
-#define DISPMODE_RET 0x10 /* Override return dispatch. */
-
-/* Update dispatch table depending on various flags. */
-void lj_dispatch_update(global_State *g)
-{
- uint8_t oldmode = g->dispatchmode;
- uint8_t mode = 0;
-#if LJ_HASJIT
- mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0;
- mode |= G2J(g)->state != LJ_TRACE_IDLE ?
- (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0;
-#endif
- mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0;
- mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0;
- mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0;
- if (oldmode != mode) { /* Mode changed? */
- ASMFunction *disp = G2GG(g)->dispatch;
- ASMFunction f_forl, f_iterl, f_loop, f_funcf, f_funcv;
- g->dispatchmode = mode;
-
- /* Hotcount if JIT is on, but not while recording. */
- if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) {
- f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]);
- f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]);
- f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]);
- f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]);
- f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]);
- } else { /* Otherwise use the non-hotcounting instructions. */
- f_forl = disp[GG_LEN_DDISP+BC_IFORL];
- f_iterl = disp[GG_LEN_DDISP+BC_IITERL];
- f_loop = disp[GG_LEN_DDISP+BC_ILOOP];
- f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]);
- f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]);
- }
- /* Init static counting instruction dispatch first (may be copied below). */
- disp[GG_LEN_DDISP+BC_FORL] = f_forl;
- disp[GG_LEN_DDISP+BC_ITERL] = f_iterl;
- disp[GG_LEN_DDISP+BC_LOOP] = f_loop;
-
- /* Set dynamic instruction dispatch. */
- if ((oldmode ^ mode) & (DISPMODE_REC|DISPMODE_INS)) {
- /* Need to update the whole table. */
- if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { /* No ins dispatch? */
- /* Copy static dispatch table to dynamic dispatch table. */
- memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction));
- /* Overwrite with dynamic return dispatch. */
- if ((mode & DISPMODE_RET)) {
- disp[BC_RETM] = lj_vm_rethook;
- disp[BC_RET] = lj_vm_rethook;
- disp[BC_RET0] = lj_vm_rethook;
- disp[BC_RET1] = lj_vm_rethook;
- }
- } else {
- /* The recording dispatch also checks for hooks. */
- ASMFunction f = (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook;
- uint32_t i;
- for (i = 0; i < GG_LEN_SDISP; i++)
- disp[i] = f;
- }
- } else if (!(mode & (DISPMODE_REC|DISPMODE_INS))) {
- /* Otherwise set dynamic counting ins. */
- disp[BC_FORL] = f_forl;
- disp[BC_ITERL] = f_iterl;
- disp[BC_LOOP] = f_loop;
- /* Set dynamic return dispatch. */
- if ((mode & DISPMODE_RET)) {
- disp[BC_RETM] = lj_vm_rethook;
- disp[BC_RET] = lj_vm_rethook;
- disp[BC_RET0] = lj_vm_rethook;
- disp[BC_RET1] = lj_vm_rethook;
- } else {
- disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM];
- disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET];
- disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0];
- disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1];
- }
- }
-
- /* Set dynamic call dispatch. */
- if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */
- uint32_t i;
- if ((mode & DISPMODE_CALL) == 0) { /* No call hooks? */
- for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
- disp[i] = makeasmfunc(lj_bc_ofs[i]);
- } else {
- for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
- disp[i] = lj_vm_callhook;
- }
- }
- if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */
- disp[BC_FUNCF] = f_funcf;
- disp[BC_FUNCV] = f_funcv;
- }
-
-#if LJ_HASJIT
- /* Reset hotcounts for JIT off to on transition. */
- if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT))
- lj_dispatch_init_hotcount(g);
-#endif
- }
-}
-
-/* -- JIT mode setting ---------------------------------------------------- */
-
-#if LJ_HASJIT
-/* Set JIT mode for a single prototype. */
-static void setptmode(global_State *g, GCproto *pt, int mode)
-{
- if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */
- pt->flags &= ~PROTO_NOJIT;
- lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */
- } else { /* Flush and/or disable JIT compilation. */
- if (!(mode & LUAJIT_MODE_FLUSH))
- pt->flags |= PROTO_NOJIT;
- lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */
- }
-}
-
-/* Recursively set the JIT mode for all children of a prototype. */
-static void setptmode_all(global_State *g, GCproto *pt, int mode)
-{
- ptrdiff_t i;
- if (!(pt->flags & PROTO_CHILD)) return;
- for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) {
- GCobj *o = proto_kgc(pt, i);
- if (o->gch.gct == ~LJ_TPROTO) {
- setptmode(g, gco2pt(o), mode);
- setptmode_all(g, gco2pt(o), mode);
- }
- }
-}
-#endif
-
-/* Public API function: control the JIT engine. */
-int luaJIT_setmode(lua_State *L, int idx, int mode)
-{
- global_State *g = G(L);
- int mm = mode & LUAJIT_MODE_MASK;
- lj_trace_abort(g); /* Abort recording on any state change. */
- /* Avoid pulling the rug from under our own feet. */
- if ((g->hookmask & HOOK_GC))
- lj_err_caller(L, LJ_ERR_NOGCMM);
- switch (mm) {
-#if LJ_HASJIT
- case LUAJIT_MODE_ENGINE:
- if ((mode & LUAJIT_MODE_FLUSH)) {
- lj_trace_flushall(L);
- } else {
- if (!(mode & LUAJIT_MODE_ON))
- G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
-#if LJ_TARGET_X86ORX64
- else if ((G2J(g)->flags & JIT_F_SSE2))
- G2J(g)->flags |= (uint32_t)JIT_F_ON;
- else
- return 0; /* Don't turn on JIT compiler without SSE2 support. */
-#else
- else
- G2J(g)->flags |= (uint32_t)JIT_F_ON;
-#endif
- lj_dispatch_update(g);
- }
- break;
- case LUAJIT_MODE_FUNC:
- case LUAJIT_MODE_ALLFUNC:
- case LUAJIT_MODE_ALLSUBFUNC: {
- cTValue *tv = idx == 0 ? frame_prev(L->base-1) :
- idx > 0 ? L->base + (idx-1) : L->top + idx;
- GCproto *pt;
- if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn))
- pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */
- else if (tvisproto(tv))
- pt = protoV(tv);
- else
- return 0; /* Failed. */
- if (mm != LUAJIT_MODE_ALLSUBFUNC)
- setptmode(g, pt, mode);
- if (mm != LUAJIT_MODE_FUNC)
- setptmode_all(g, pt, mode);
- break;
- }
- case LUAJIT_MODE_TRACE:
- if (!(mode & LUAJIT_MODE_FLUSH))
- return 0; /* Failed. */
- lj_trace_flush(G2J(g), idx);
- break;
-#else
- case LUAJIT_MODE_ENGINE:
- case LUAJIT_MODE_FUNC:
- case LUAJIT_MODE_ALLFUNC:
- case LUAJIT_MODE_ALLSUBFUNC:
- UNUSED(idx);
- if ((mode & LUAJIT_MODE_ON))
- return 0; /* Failed. */
- break;
-#endif
- case LUAJIT_MODE_WRAPCFUNC:
- if ((mode & LUAJIT_MODE_ON)) {
- if (idx != 0) {
- cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx;
- if (tvislightud(tv))
- g->wrapf = (lua_CFunction)lightudV(tv);
- else
- return 0; /* Failed. */
- } else {
- return 0; /* Failed. */
- }
- g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0);
- } else {
- g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0);
- }
- break;
- default:
- return 0; /* Failed. */
- }
- return 1; /* OK. */
-}
-
-/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */
-LUA_API void LUAJIT_VERSION_SYM(void)
-{
-}
-
-/* -- Hooks --------------------------------------------------------------- */
-
-/* This function can be called asynchronously (e.g. during a signal). */
-LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count)
-{
- global_State *g = G(L);
- mask &= HOOK_EVENTMASK;
- if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */
- g->hookf = func;
- g->hookcount = g->hookcstart = (int32_t)count;
- g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask);
- lj_trace_abort(g); /* Abort recording on any hook change. */
- lj_dispatch_update(g);
- return 1;
-}
-
-LUA_API lua_Hook lua_gethook(lua_State *L)
-{
- return G(L)->hookf;
-}
-
-LUA_API int lua_gethookmask(lua_State *L)
-{
- return G(L)->hookmask & HOOK_EVENTMASK;
-}
-
-LUA_API int lua_gethookcount(lua_State *L)
-{
- return (int)G(L)->hookcstart;
-}
-
-/* Call a hook. */
-static void callhook(lua_State *L, int event, BCLine line)
-{
- global_State *g = G(L);
- lua_Hook hookf = g->hookf;
- if (hookf && !hook_active(g)) {
- lua_Debug ar;
- lj_trace_abort(g); /* Abort recording on any hook call. */
- ar.event = event;
- ar.currentline = line;
- /* Top frame, nextframe = NULL. */
- ar.i_ci = (int)((L->base-1) - tvref(L->stack));
- lj_state_checkstack(L, 1+LUA_MINSTACK);
- hook_enter(g);
- hookf(L, &ar);
- lua_assert(hook_active(g));
- hook_leave(g);
- }
-}
-
-/* -- Dispatch callbacks -------------------------------------------------- */
-
-/* Calculate number of used stack slots in the current frame. */
-static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres)
-{
- BCIns ins = pc[-1];
- if (bc_op(ins) == BC_UCLO)
- ins = pc[bc_j(ins)];
- switch (bc_op(ins)) {
- case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1;
- case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1;
- case BC_TSETM: return bc_a(ins) + nres-1;
- default: return pt->framesize;
- }
-}
-
-/* Instruction dispatch. Used by instr/line/return hooks or when recording. */
-void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
-{
- ERRNO_SAVE
- GCfunc *fn = curr_func(L);
- GCproto *pt = funcproto(fn);
- void *cf = cframe_raw(L->cframe);
- const BCIns *oldpc = cframe_pc(cf);
- global_State *g = G(L);
- BCReg slots;
- setcframe_pc(cf, pc);
- slots = cur_topslot(pt, pc, cframe_multres_n(cf));
- L->top = L->base + slots; /* Fix top. */
-#if LJ_HASJIT
- {
- jit_State *J = G2J(g);
- if (J->state != LJ_TRACE_IDLE) {
-#ifdef LUA_USE_ASSERT
- ptrdiff_t delta = L->top - L->base;
-#endif
- J->L = L;
- lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
- lua_assert(L->top - L->base == delta);
- }
- }
-#endif
- if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) {
- g->hookcount = g->hookcstart;
- callhook(L, LUA_HOOKCOUNT, -1);
- L->top = L->base + slots; /* Fix top again. */
- }
- if ((g->hookmask & LUA_MASKLINE)) {
- BCPos npc = proto_bcpos(pt, pc) - 1;
- BCPos opc = proto_bcpos(pt, oldpc) - 1;
- BCLine line = lj_debug_line(pt, npc);
- if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) {
- callhook(L, LUA_HOOKLINE, line);
- L->top = L->base + slots; /* Fix top again. */
- }
- }
- if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1])))
- callhook(L, LUA_HOOKRET, -1);
- ERRNO_RESTORE
-}
-
-/* Initialize call. Ensure stack space and return # of missing parameters. */
-static int call_init(lua_State *L, GCfunc *fn)
-{
- if (isluafunc(fn)) {
- GCproto *pt = funcproto(fn);
- int numparams = pt->numparams;
- int gotparams = (int)(L->top - L->base);
- int need = pt->framesize;
- if ((pt->flags & PROTO_VARARG)) need += 1+gotparams;
- lj_state_checkstack(L, (MSize)need);
- numparams -= gotparams;
- return numparams >= 0 ? numparams : 0;
- } else {
- lj_state_checkstack(L, LUA_MINSTACK);
- return 0;
- }
-}
-
-/* Call dispatch. Used by call hooks, hot calls or when recording. */
-ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
-{
- ERRNO_SAVE
- GCfunc *fn = curr_func(L);
- BCOp op;
- global_State *g = G(L);
-#if LJ_HASJIT
- jit_State *J = G2J(g);
-#endif
- int missing = call_init(L, fn);
-#if LJ_HASJIT
- J->L = L;
- if ((uintptr_t)pc & 1) { /* Marker for hot call. */
-#ifdef LUA_USE_ASSERT
- ptrdiff_t delta = L->top - L->base;
-#endif
- pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1);
- lj_trace_hot(J, pc);
- lua_assert(L->top - L->base == delta);
- goto out;
- } else if (J->state != LJ_TRACE_IDLE &&
- !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
-#ifdef LUA_USE_ASSERT
- ptrdiff_t delta = L->top - L->base;
-#endif
- /* Record the FUNC* bytecodes, too. */
- lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
- lua_assert(L->top - L->base == delta);
- }
-#endif
- if ((g->hookmask & LUA_MASKCALL)) {
- int i;
- for (i = 0; i < missing; i++) /* Add missing parameters. */
- setnilV(L->top++);
- callhook(L, LUA_HOOKCALL, -1);
- /* Preserve modifications of missing parameters by lua_setlocal(). */
- while (missing-- > 0 && tvisnil(L->top - 1))
- L->top--;
- }
-#if LJ_HASJIT
-out:
-#endif
- op = bc_op(pc[-1]); /* Get FUNC* op. */
-#if LJ_HASJIT
- /* Use the non-hotcounting variants if JIT is off or while recording. */
- if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) &&
- (op == BC_FUNCF || op == BC_FUNCV))
- op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF);
-#endif
- ERRNO_RESTORE
- return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */
-}
-
+/*
+** Instruction dispatch handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_dispatch_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_func.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_debug.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ff.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+#if LJ_HASFFI
+#include "lj_ccallback.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "luajit.h"
+
+/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */
+LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC);
+
+/* -- Dispatch table management ------------------------------------------- */
+
+#if LJ_TARGET_MIPS
+#include <math.h>
+LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
+ lua_State *co);
+
+#define GOTFUNC(name) (ASMFunction)name,
+static const ASMFunction dispatch_got[] = {
+ GOTDEF(GOTFUNC)
+};
+#undef GOTFUNC
+#endif
+
+/* Initialize instruction dispatch table and hot counters. */
+void lj_dispatch_init(GG_State *GG)
+{
+ uint32_t i;
+ ASMFunction *disp = GG->dispatch;
+ for (i = 0; i < GG_LEN_SDISP; i++)
+ disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ /* The JIT engine is off by default. luaopen_jit() turns it on. */
+ disp[BC_FORL] = disp[BC_IFORL];
+ disp[BC_ITERL] = disp[BC_IITERL];
+ disp[BC_LOOP] = disp[BC_ILOOP];
+ disp[BC_FUNCF] = disp[BC_IFUNCF];
+ disp[BC_FUNCV] = disp[BC_IFUNCV];
+ GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0);
+ for (i = 0; i < GG_NUM_ASMFF; i++)
+ GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0);
+#if LJ_TARGET_MIPS
+ memcpy(GG->got, dispatch_got, LJ_GOT__MAX*4);
+#endif
+}
+
+#if LJ_HASJIT
+/* Initialize hotcount table. */
+void lj_dispatch_init_hotcount(global_State *g)
+{
+ int32_t hotloop = G2J(g)->param[JIT_P_hotloop];
+ HotCount start = (HotCount)(hotloop*HOTCOUNT_LOOP - 1);
+ HotCount *hotcount = G2GG(g)->hotcount;
+ uint32_t i;
+ for (i = 0; i < HOTCOUNT_SIZE; i++)
+ hotcount[i] = start;
+}
+#endif
+
+/* Internal dispatch mode bits. */
+#define DISPMODE_JIT 0x01 /* JIT compiler on. */
+#define DISPMODE_REC 0x02 /* Recording active. */
+#define DISPMODE_INS 0x04 /* Override instruction dispatch. */
+#define DISPMODE_CALL 0x08 /* Override call dispatch. */
+#define DISPMODE_RET 0x10 /* Override return dispatch. */
+
+/* Update dispatch table depending on various flags. */
+void lj_dispatch_update(global_State *g)
+{
+ uint8_t oldmode = g->dispatchmode;
+ uint8_t mode = 0;
+#if LJ_HASJIT
+ mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0;
+ mode |= G2J(g)->state != LJ_TRACE_IDLE ?
+ (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0;
+#endif
+ mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0;
+ mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0;
+ mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0;
+ if (oldmode != mode) { /* Mode changed? */
+ ASMFunction *disp = G2GG(g)->dispatch;
+ ASMFunction f_forl, f_iterl, f_loop, f_funcf, f_funcv;
+ g->dispatchmode = mode;
+
+ /* Hotcount if JIT is on, but not while recording. */
+ if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) {
+ f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]);
+ f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]);
+ f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]);
+ f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]);
+ f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]);
+ } else { /* Otherwise use the non-hotcounting instructions. */
+ f_forl = disp[GG_LEN_DDISP+BC_IFORL];
+ f_iterl = disp[GG_LEN_DDISP+BC_IITERL];
+ f_loop = disp[GG_LEN_DDISP+BC_ILOOP];
+ f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]);
+ f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]);
+ }
+ /* Init static counting instruction dispatch first (may be copied below). */
+ disp[GG_LEN_DDISP+BC_FORL] = f_forl;
+ disp[GG_LEN_DDISP+BC_ITERL] = f_iterl;
+ disp[GG_LEN_DDISP+BC_LOOP] = f_loop;
+
+ /* Set dynamic instruction dispatch. */
+ if ((oldmode ^ mode) & (DISPMODE_REC|DISPMODE_INS)) {
+ /* Need to update the whole table. */
+ if (!(mode & (DISPMODE_REC|DISPMODE_INS))) { /* No ins dispatch? */
+ /* Copy static dispatch table to dynamic dispatch table. */
+ memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction));
+ /* Overwrite with dynamic return dispatch. */
+ if ((mode & DISPMODE_RET)) {
+ disp[BC_RETM] = lj_vm_rethook;
+ disp[BC_RET] = lj_vm_rethook;
+ disp[BC_RET0] = lj_vm_rethook;
+ disp[BC_RET1] = lj_vm_rethook;
+ }
+ } else {
+ /* The recording dispatch also checks for hooks. */
+ ASMFunction f = (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook;
+ uint32_t i;
+ for (i = 0; i < GG_LEN_SDISP; i++)
+ disp[i] = f;
+ }
+ } else if (!(mode & (DISPMODE_REC|DISPMODE_INS))) {
+ /* Otherwise set dynamic counting ins. */
+ disp[BC_FORL] = f_forl;
+ disp[BC_ITERL] = f_iterl;
+ disp[BC_LOOP] = f_loop;
+ /* Set dynamic return dispatch. */
+ if ((mode & DISPMODE_RET)) {
+ disp[BC_RETM] = lj_vm_rethook;
+ disp[BC_RET] = lj_vm_rethook;
+ disp[BC_RET0] = lj_vm_rethook;
+ disp[BC_RET1] = lj_vm_rethook;
+ } else {
+ disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM];
+ disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET];
+ disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0];
+ disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1];
+ }
+ }
+
+ /* Set dynamic call dispatch. */
+ if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */
+ uint32_t i;
+ if ((mode & DISPMODE_CALL) == 0) { /* No call hooks? */
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ } else {
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = lj_vm_callhook;
+ }
+ }
+ if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */
+ disp[BC_FUNCF] = f_funcf;
+ disp[BC_FUNCV] = f_funcv;
+ }
+
+#if LJ_HASJIT
+ /* Reset hotcounts for JIT off to on transition. */
+ if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT))
+ lj_dispatch_init_hotcount(g);
+#endif
+ }
+}
+
+/* -- JIT mode setting ---------------------------------------------------- */
+
+#if LJ_HASJIT
+/* Set JIT mode for a single prototype. */
+static void setptmode(global_State *g, GCproto *pt, int mode)
+{
+ if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */
+ pt->flags &= ~PROTO_NOJIT;
+ lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */
+ } else { /* Flush and/or disable JIT compilation. */
+ if (!(mode & LUAJIT_MODE_FLUSH))
+ pt->flags |= PROTO_NOJIT;
+ lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */
+ }
+}
+
+/* Recursively set the JIT mode for all children of a prototype. */
+static void setptmode_all(global_State *g, GCproto *pt, int mode)
+{
+ ptrdiff_t i;
+ if (!(pt->flags & PROTO_CHILD)) return;
+ for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) {
+ GCobj *o = proto_kgc(pt, i);
+ if (o->gch.gct == ~LJ_TPROTO) {
+ setptmode(g, gco2pt(o), mode);
+ setptmode_all(g, gco2pt(o), mode);
+ }
+ }
+}
+#endif
+
+/* Public API function: control the JIT engine. */
+int luaJIT_setmode(lua_State *L, int idx, int mode)
+{
+ global_State *g = G(L);
+ int mm = mode & LUAJIT_MODE_MASK;
+ lj_trace_abort(g); /* Abort recording on any state change. */
+ /* Avoid pulling the rug from under our own feet. */
+ if ((g->hookmask & HOOK_GC))
+ lj_err_caller(L, LJ_ERR_NOGCMM);
+ switch (mm) {
+#if LJ_HASJIT
+ case LUAJIT_MODE_ENGINE:
+ if ((mode & LUAJIT_MODE_FLUSH)) {
+ lj_trace_flushall(L);
+ } else {
+ if (!(mode & LUAJIT_MODE_ON))
+ G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
+#if LJ_TARGET_X86ORX64
+ else if ((G2J(g)->flags & JIT_F_SSE2))
+ G2J(g)->flags |= (uint32_t)JIT_F_ON;
+ else
+ return 0; /* Don't turn on JIT compiler without SSE2 support. */
+#else
+ else
+ G2J(g)->flags |= (uint32_t)JIT_F_ON;
+#endif
+ lj_dispatch_update(g);
+ }
+ break;
+ case LUAJIT_MODE_FUNC:
+ case LUAJIT_MODE_ALLFUNC:
+ case LUAJIT_MODE_ALLSUBFUNC: {
+ cTValue *tv = idx == 0 ? frame_prev(L->base-1) :
+ idx > 0 ? L->base + (idx-1) : L->top + idx;
+ GCproto *pt;
+ if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn))
+ pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */
+ else if (tvisproto(tv))
+ pt = protoV(tv);
+ else
+ return 0; /* Failed. */
+ if (mm != LUAJIT_MODE_ALLSUBFUNC)
+ setptmode(g, pt, mode);
+ if (mm != LUAJIT_MODE_FUNC)
+ setptmode_all(g, pt, mode);
+ break;
+ }
+ case LUAJIT_MODE_TRACE:
+ if (!(mode & LUAJIT_MODE_FLUSH))
+ return 0; /* Failed. */
+ lj_trace_flush(G2J(g), idx);
+ break;
+#else
+ case LUAJIT_MODE_ENGINE:
+ case LUAJIT_MODE_FUNC:
+ case LUAJIT_MODE_ALLFUNC:
+ case LUAJIT_MODE_ALLSUBFUNC:
+ UNUSED(idx);
+ if ((mode & LUAJIT_MODE_ON))
+ return 0; /* Failed. */
+ break;
+#endif
+ case LUAJIT_MODE_WRAPCFUNC:
+ if ((mode & LUAJIT_MODE_ON)) {
+ if (idx != 0) {
+ cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx;
+ if (tvislightud(tv))
+ g->wrapf = (lua_CFunction)lightudV(tv);
+ else
+ return 0; /* Failed. */
+ } else {
+ return 0; /* Failed. */
+ }
+ g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0);
+ } else {
+ g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0);
+ }
+ break;
+ default:
+ return 0; /* Failed. */
+ }
+ return 1; /* OK. */
+}
+
+/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */
+LUA_API void LUAJIT_VERSION_SYM(void)
+{
+}
+
+/* -- Hooks --------------------------------------------------------------- */
+
+/* This function can be called asynchronously (e.g. during a signal). */
+LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count)
+{
+ global_State *g = G(L);
+ mask &= HOOK_EVENTMASK;
+ if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */
+ g->hookf = func;
+ g->hookcount = g->hookcstart = (int32_t)count;
+ g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask);
+ lj_trace_abort(g); /* Abort recording on any hook change. */
+ lj_dispatch_update(g);
+ return 1;
+}
+
+LUA_API lua_Hook lua_gethook(lua_State *L)
+{
+ return G(L)->hookf;
+}
+
+LUA_API int lua_gethookmask(lua_State *L)
+{
+ return G(L)->hookmask & HOOK_EVENTMASK;
+}
+
+LUA_API int lua_gethookcount(lua_State *L)
+{
+ return (int)G(L)->hookcstart;
+}
+
+/* Call a hook. */
+static void callhook(lua_State *L, int event, BCLine line)
+{
+ global_State *g = G(L);
+ lua_Hook hookf = g->hookf;
+ if (hookf && !hook_active(g)) {
+ lua_Debug ar;
+ lj_trace_abort(g); /* Abort recording on any hook call. */
+ ar.event = event;
+ ar.currentline = line;
+ /* Top frame, nextframe = NULL. */
+ ar.i_ci = (int)((L->base-1) - tvref(L->stack));
+ lj_state_checkstack(L, 1+LUA_MINSTACK);
+ hook_enter(g);
+ hookf(L, &ar);
+ lua_assert(hook_active(g));
+ hook_leave(g);
+ }
+}
+
+/* -- Dispatch callbacks -------------------------------------------------- */
+
+/* Calculate number of used stack slots in the current frame. */
+static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres)
+{
+ BCIns ins = pc[-1];
+ if (bc_op(ins) == BC_UCLO)
+ ins = pc[bc_j(ins)];
+ switch (bc_op(ins)) {
+ case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1;
+ case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1;
+ case BC_TSETM: return bc_a(ins) + nres-1;
+ default: return pt->framesize;
+ }
+}
+
+/* Instruction dispatch. Used by instr/line/return hooks or when recording. */
+void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ GCproto *pt = funcproto(fn);
+ void *cf = cframe_raw(L->cframe);
+ const BCIns *oldpc = cframe_pc(cf);
+ global_State *g = G(L);
+ BCReg slots;
+ setcframe_pc(cf, pc);
+ slots = cur_topslot(pt, pc, cframe_multres_n(cf));
+ L->top = L->base + slots; /* Fix top. */
+#if LJ_HASJIT
+ {
+ jit_State *J = G2J(g);
+ if (J->state != LJ_TRACE_IDLE) {
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ J->L = L;
+ lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
+ lua_assert(L->top - L->base == delta);
+ }
+ }
+#endif
+ if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) {
+ g->hookcount = g->hookcstart;
+ callhook(L, LUA_HOOKCOUNT, -1);
+ L->top = L->base + slots; /* Fix top again. */
+ }
+ if ((g->hookmask & LUA_MASKLINE)) {
+ BCPos npc = proto_bcpos(pt, pc) - 1;
+ BCPos opc = proto_bcpos(pt, oldpc) - 1;
+ BCLine line = lj_debug_line(pt, npc);
+ if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) {
+ callhook(L, LUA_HOOKLINE, line);
+ L->top = L->base + slots; /* Fix top again. */
+ }
+ }
+ if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1])))
+ callhook(L, LUA_HOOKRET, -1);
+ ERRNO_RESTORE
+}
+
+/* Initialize call. Ensure stack space and return # of missing parameters. */
+static int call_init(lua_State *L, GCfunc *fn)
+{
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ int numparams = pt->numparams;
+ int gotparams = (int)(L->top - L->base);
+ int need = pt->framesize;
+ if ((pt->flags & PROTO_VARARG)) need += 1+gotparams;
+ lj_state_checkstack(L, (MSize)need);
+ numparams -= gotparams;
+ return numparams >= 0 ? numparams : 0;
+ } else {
+ lj_state_checkstack(L, LUA_MINSTACK);
+ return 0;
+ }
+}
+
+/* Call dispatch. Used by call hooks, hot calls or when recording. */
+ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ BCOp op;
+ global_State *g = G(L);
+#if LJ_HASJIT
+ jit_State *J = G2J(g);
+#endif
+ int missing = call_init(L, fn);
+#if LJ_HASJIT
+ J->L = L;
+ if ((uintptr_t)pc & 1) { /* Marker for hot call. */
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1);
+ lj_trace_hot(J, pc);
+ lua_assert(L->top - L->base == delta);
+ goto out;
+ } else if (J->state != LJ_TRACE_IDLE &&
+ !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ /* Record the FUNC* bytecodes, too. */
+ lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
+ lua_assert(L->top - L->base == delta);
+ }
+#endif
+ if ((g->hookmask & LUA_MASKCALL)) {
+ int i;
+ for (i = 0; i < missing; i++) /* Add missing parameters. */
+ setnilV(L->top++);
+ callhook(L, LUA_HOOKCALL, -1);
+ /* Preserve modifications of missing parameters by lua_setlocal(). */
+ while (missing-- > 0 && tvisnil(L->top - 1))
+ L->top--;
+ }
+#if LJ_HASJIT
+out:
+#endif
+ op = bc_op(pc[-1]); /* Get FUNC* op. */
+#if LJ_HASJIT
+ /* Use the non-hotcounting variants if JIT is off or while recording. */
+ if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) &&
+ (op == BC_FUNCF || op == BC_FUNCV))
+ op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF);
+#endif
+ ERRNO_RESTORE
+ return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */
+}
+
diff --git a/3rdparty/lua/src/lj_dispatch.h b/3rdparty/lua/src/lj_dispatch.h
index 2658d5b..a56b626 100644
--- a/3rdparty/lua/src/lj_dispatch.h
+++ b/3rdparty/lua/src/lj_dispatch.h
@@ -1,131 +1,131 @@
-/*
-** Instruction dispatch handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_DISPATCH_H
-#define _LJ_DISPATCH_H
-
-#include "lj_obj.h"
-#include "lj_bc.h"
-#if LJ_HASJIT
-#include "lj_jit.h"
-#endif
-
-#if LJ_TARGET_MIPS
-/* Need our own global offset table for the dreaded MIPS calling conventions. */
-#if LJ_HASJIT
-#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot)
-#else
-#define JITGOTDEF(_)
-#endif
-#if LJ_HASFFI
-#define FFIGOTDEF(_) \
- _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave)
-#else
-#define FFIGOTDEF(_)
-#endif
-#define GOTDEF(_) \
- _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
- _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
- _(pow) _(fmod) _(ldexp) \
- _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_err_throw) \
- _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
- _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \
- _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \
- _(lj_meta_for) _(lj_meta_len) _(lj_meta_tget) _(lj_meta_tset) \
- _(lj_state_growstack) _(lj_str_fromnum) _(lj_str_fromnumber) _(lj_str_new) \
- _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) _(lj_tab_new) \
- _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \
- JITGOTDEF(_) FFIGOTDEF(_)
-
-enum {
-#define GOTENUM(name) LJ_GOT_##name,
-GOTDEF(GOTENUM)
-#undef GOTENUM
- LJ_GOT__MAX
-};
-#endif
-
-/* Type of hot counter. Must match the code in the assembler VM. */
-/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */
-typedef uint16_t HotCount;
-
-/* Number of hot counter hash table entries (must be a power of two). */
-#define HOTCOUNT_SIZE 64
-#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount))
-
-/* Hotcount decrements. */
-#define HOTCOUNT_LOOP 2
-#define HOTCOUNT_CALL 1
-
-/* This solves a circular dependency problem -- bump as needed. Sigh. */
-#define GG_NUM_ASMFF 62
-
-#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF)
-#define GG_LEN_SDISP BC_FUNCF
-#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP)
-
-/* Global state, main thread and extra fields are allocated together. */
-typedef struct GG_State {
- lua_State L; /* Main thread. */
- global_State g; /* Global state. */
-#if LJ_TARGET_MIPS
- ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */
-#endif
-#if LJ_HASJIT
- jit_State J; /* JIT state. */
- HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */
-#endif
- ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */
- BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */
-} GG_State;
-
-#define GG_OFS(field) ((int)offsetof(GG_State, field))
-#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g)))
-#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J)))
-#define L2GG(L) (G2GG(G(L)))
-#define J2G(J) (&J2GG(J)->g)
-#define G2J(gl) (&G2GG(gl)->J)
-#define L2J(L) (&L2GG(L)->J)
-#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g))
-#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch))
-#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch))
-#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch))
-#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction))
-
-#define hotcount_get(gg, pc) \
- (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)]
-#define hotcount_set(gg, pc, val) \
- (hotcount_get((gg), (pc)) = (HotCount)(val))
-
-/* Dispatch table management. */
-LJ_FUNC void lj_dispatch_init(GG_State *GG);
-#if LJ_HASJIT
-LJ_FUNC void lj_dispatch_init_hotcount(global_State *g);
-#endif
-LJ_FUNC void lj_dispatch_update(global_State *g);
-
-/* Instruction dispatch callback for hooks or when recording. */
-LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
-LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
-LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc);
-
-#if LJ_HASFFI && !defined(_BUILDVM_H)
-/* Save/restore errno and GetLastError() around hooks, exits and recording. */
-#include <errno.h>
-#if LJ_TARGET_WINDOWS
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError();
-#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr);
-#else
-#define ERRNO_SAVE int olderr = errno;
-#define ERRNO_RESTORE errno = olderr;
-#endif
-#else
-#define ERRNO_SAVE
-#define ERRNO_RESTORE
-#endif
-
-#endif
+/*
+** Instruction dispatch handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DISPATCH_H
+#define _LJ_DISPATCH_H
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+
+#if LJ_TARGET_MIPS
+/* Need our own global offset table for the dreaded MIPS calling conventions. */
+#if LJ_HASJIT
+#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot)
+#else
+#define JITGOTDEF(_)
+#endif
+#if LJ_HASFFI
+#define FFIGOTDEF(_) \
+ _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave)
+#else
+#define FFIGOTDEF(_)
+#endif
+#define GOTDEF(_) \
+ _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
+ _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
+ _(pow) _(fmod) _(ldexp) \
+ _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_err_throw) \
+ _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
+ _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \
+ _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \
+ _(lj_meta_for) _(lj_meta_len) _(lj_meta_tget) _(lj_meta_tset) \
+ _(lj_state_growstack) _(lj_str_fromnum) _(lj_str_fromnumber) _(lj_str_new) \
+ _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) _(lj_tab_new) \
+ _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \
+ JITGOTDEF(_) FFIGOTDEF(_)
+
+enum {
+#define GOTENUM(name) LJ_GOT_##name,
+GOTDEF(GOTENUM)
+#undef GOTENUM
+ LJ_GOT__MAX
+};
+#endif
+
+/* Type of hot counter. Must match the code in the assembler VM. */
+/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */
+typedef uint16_t HotCount;
+
+/* Number of hot counter hash table entries (must be a power of two). */
+#define HOTCOUNT_SIZE 64
+#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount))
+
+/* Hotcount decrements. */
+#define HOTCOUNT_LOOP 2
+#define HOTCOUNT_CALL 1
+
+/* This solves a circular dependency problem -- bump as needed. Sigh. */
+#define GG_NUM_ASMFF 62
+
+#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF)
+#define GG_LEN_SDISP BC_FUNCF
+#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP)
+
+/* Global state, main thread and extra fields are allocated together. */
+typedef struct GG_State {
+ lua_State L; /* Main thread. */
+ global_State g; /* Global state. */
+#if LJ_TARGET_MIPS
+ ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */
+#endif
+#if LJ_HASJIT
+ jit_State J; /* JIT state. */
+ HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */
+#endif
+ ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */
+ BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */
+} GG_State;
+
+#define GG_OFS(field) ((int)offsetof(GG_State, field))
+#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g)))
+#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J)))
+#define L2GG(L) (G2GG(G(L)))
+#define J2G(J) (&J2GG(J)->g)
+#define G2J(gl) (&G2GG(gl)->J)
+#define L2J(L) (&L2GG(L)->J)
+#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g))
+#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch))
+#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch))
+#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch))
+#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction))
+
+#define hotcount_get(gg, pc) \
+ (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)]
+#define hotcount_set(gg, pc, val) \
+ (hotcount_get((gg), (pc)) = (HotCount)(val))
+
+/* Dispatch table management. */
+LJ_FUNC void lj_dispatch_init(GG_State *GG);
+#if LJ_HASJIT
+LJ_FUNC void lj_dispatch_init_hotcount(global_State *g);
+#endif
+LJ_FUNC void lj_dispatch_update(global_State *g);
+
+/* Instruction dispatch callback for hooks or when recording. */
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
+LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc);
+
+#if LJ_HASFFI && !defined(_BUILDVM_H)
+/* Save/restore errno and GetLastError() around hooks, exits and recording. */
+#include <errno.h>
+#if LJ_TARGET_WINDOWS
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError();
+#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr);
+#else
+#define ERRNO_SAVE int olderr = errno;
+#define ERRNO_RESTORE errno = olderr;
+#endif
+#else
+#define ERRNO_SAVE
+#define ERRNO_RESTORE
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_emit_arm.h b/3rdparty/lua/src/lj_emit_arm.h
index 40f0193..b76a9a4 100644
--- a/3rdparty/lua/src/lj_emit_arm.h
+++ b/3rdparty/lua/src/lj_emit_arm.h
@@ -1,356 +1,356 @@
-/*
-** ARM instruction emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Constant encoding --------------------------------------------------- */
-
-static uint8_t emit_invai[16] = {
- /* AND */ (ARMI_AND^ARMI_BIC) >> 21,
- /* EOR */ 0,
- /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21,
- /* RSB */ 0,
- /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21,
- /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21,
- /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21,
- /* RSC */ 0,
- /* TST */ 0,
- /* TEQ */ 0,
- /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21,
- /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21,
- /* ORR */ 0,
- /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21,
- /* BIC */ (ARMI_BIC^ARMI_AND) >> 21,
- /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21
-};
-
-/* Encode constant in K12 format for data processing instructions. */
-static uint32_t emit_isk12(ARMIns ai, int32_t n)
-{
- uint32_t invai, i, m = (uint32_t)n;
- /* K12: unsigned 8 bit value, rotated in steps of two bits. */
- for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
- if (m <= 255) return ARMI_K12|m|i;
- /* Otherwise try negation/complement with the inverse instruction. */
- invai = emit_invai[((ai >> 21) & 15)];
- if (!invai) return 0; /* Failed. No inverse instruction. */
- m = ~(uint32_t)n;
- if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) ||
- invai == (ARMI_CMP^ARMI_CMN) >> 21) m++;
- for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
- if (m <= 255) return ARMI_K12|(invai<<21)|m|i;
- return 0; /* Failed. */
-}
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm)
-{
- *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm);
-}
-
-static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm)
-{
- *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm);
-}
-
-static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn)
-{
- *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn);
-}
-
-static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm)
-{
- *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm);
-}
-
-static void emit_d(ASMState *as, ARMIns ai, Reg rd)
-{
- *--as->mcp = ai | ARMF_D(rd);
-}
-
-static void emit_n(ASMState *as, ARMIns ai, Reg rn)
-{
- *--as->mcp = ai | ARMF_N(rn);
-}
-
-static void emit_m(ASMState *as, ARMIns ai, Reg rm)
-{
- *--as->mcp = ai | ARMF_M(rm);
-}
-
-static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
-{
- lua_assert(ofs >= -255 && ofs <= 255);
- if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
- *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) |
- ((ofs & 0xf0) << 4) | (ofs & 0x0f);
-}
-
-static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
-{
- lua_assert(ofs >= -4095 && ofs <= 4095);
- /* Combine LDR/STR pairs to LDRD/STRD. */
- if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) &&
- (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn &&
- (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) &&
- as->mcp != as->mcloop) {
- as->mcp++;
- emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4);
- return;
- }
- if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
- *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs;
-}
-
-#if !LJ_SOFTFP
-static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
-{
- lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0);
- if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
- *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2);
-}
-#endif
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Prefer spills of BASE/L. */
-#define emit_canremat(ref) ((ref) < ASMREF_L)
-
-/* Try to find a one step delta relative to another constant. */
-static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != d);
- if (emit_canremat(ref)) {
- int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
- uint32_t k = emit_isk12(ARMI_ADD, delta);
- if (k) {
- if (k == ARMI_K12)
- emit_dm(as, ARMI_MOV, d, r);
- else
- emit_dn(as, ARMI_ADD^k, d, r);
- return 1;
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Try to find a two step delta relative to another constant. */
-static int emit_kdelta2(ASMState *as, Reg d, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != d);
- if (emit_canremat(ref)) {
- int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i;
- if (other) {
- int32_t delta = i - other;
- uint32_t sh, inv = 0, k2, k;
- if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; }
- sh = lj_ffs(delta) & ~1;
- k2 = emit_isk12(0, delta & (255 << sh));
- k = emit_isk12(0, delta & ~(255 << sh));
- if (k) {
- emit_dn(as, ARMI_ADD^k2^inv, d, d);
- emit_dn(as, ARMI_ADD^k^inv, d, r);
- return 1;
- }
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Load a 32 bit constant into a GPR. */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- uint32_t k = emit_isk12(ARMI_MOV, i);
- lua_assert(rset_test(as->freeset, r) || r == RID_TMP);
- if (k) {
- /* Standard K12 constant. */
- emit_d(as, ARMI_MOV^k, r);
- } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) {
- /* 16 bit loword constant for ARMv6T2. */
- emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
- } else if (emit_kdelta1(as, r, i)) {
- /* One step delta relative to another constant. */
- } else if ((as->flags & JIT_F_ARMV6T2)) {
- /* 32 bit hiword/loword constant for ARMv6T2. */
- emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r);
- emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
- } else if (emit_kdelta2(as, r, i)) {
- /* Two step delta relative to another constant. */
- } else {
- /* Otherwise construct the constant with up to 4 instructions. */
- /* NYI: use mvn+bic, use pc-relative loads. */
- for (;;) {
- uint32_t sh = lj_ffs(i) & ~1;
- int32_t m = i & (255 << sh);
- i &= ~(255 << sh);
- if (i == 0) {
- emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r);
- break;
- }
- emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r);
- }
- }
-}
-
-#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
-
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
-
-/* Get/set from constant pointer. */
-static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
-{
- int32_t i = i32ptr(p);
- emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)),
- (i & 4095));
-}
-
-#if !LJ_SOFTFP
-/* Load a number constant into an FPR. */
-static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
-{
- int32_t i;
- if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) {
- uint32_t hi = tv->u32.hi;
- uint32_t b = ((hi >> 22) & 0x1ff);
- if (!(hi & 0xffff) && (b == 0x100 || b == 0x0ff)) {
- *--as->mcp = ARMI_VMOVI_D | ARMF_D(r & 15) |
- ((tv->u32.hi >> 12) & 0x00080000) |
- ((tv->u32.hi >> 4) & 0x00070000) |
- ((tv->u32.hi >> 16) & 0x0000000f);
- return;
- }
- }
- i = i32ptr(tv);
- emit_vlso(as, ARMI_VLDR_D, r,
- ra_allock(as, (i & ~1020), RSET_GPR), (i & 1020));
-}
-#endif
-
-/* Get/set global_State fields. */
-#define emit_getgl(as, r, field) \
- emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field)
-#define emit_setgl(as, r, field) \
- emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field)
-
-/* Trace number is determined from pc of exit instruction. */
-#define emit_setvmstate(as, i) UNUSED(i)
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for internal jumps. */
-typedef MCode *MCLabel;
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-static void emit_branch(ASMState *as, ARMIns ai, MCode *target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = (target - p) - 1;
- lua_assert(((delta + 0x00800000) >> 24) == 0);
- *--p = ai | ((uint32_t)delta & 0x00ffffffu);
- as->mcp = p;
-}
-
-#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target))
-
-static void emit_call(ASMState *as, void *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = ((char *)target - (char *)p) - 8;
- if ((((delta>>2) + 0x00800000) >> 24) == 0) {
- if ((delta & 1))
- *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 27);
- else
- *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu);
- } else { /* Target out of range: need indirect call. But don't use R0-R3. */
- Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1));
- *p = ARMI_BLXr | ARMF_M(r);
- }
-}
-
-/* -- Emit generic operations --------------------------------------------- */
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
-#if LJ_SOFTFP
- lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
-#else
- if (dst >= RID_MAX_GPR) {
- emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S,
- (dst & 15), (src & 15));
- return;
- }
-#endif
- if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
- MCode ins = *as->mcp, swp = (src^dst);
- if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) {
- if (!((ins ^ (dst << 16)) & 0x000f0000))
- *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */
- if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000))
- *as->mcp = ins ^ (swp << 12); /* Swap D in store. */
- }
- }
- emit_dm(as, ARMI_MOV, dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
-#if LJ_SOFTFP
- lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
-#else
- if (r >= RID_MAX_GPR)
- emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, RID_SP, ofs);
- else
-#endif
- emit_lso(as, ARMI_LDR, r, RID_SP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
-#if LJ_SOFTFP
- lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
-#else
- if (r >= RID_MAX_GPR)
- emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, RID_SP, ofs);
- else
-#endif
- emit_lso(as, ARMI_STR, r, RID_SP, ofs);
-}
-
-/* Emit an arithmetic/logic operation with a constant operand. */
-static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src,
- int32_t i, RegSet allow)
-{
- uint32_t k = emit_isk12(ai, i);
- if (k)
- emit_dn(as, ai^k, dest, src);
- else
- emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs)
- emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r));
-}
-
-#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
-
+/*
+** ARM instruction emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Constant encoding --------------------------------------------------- */
+
+static uint8_t emit_invai[16] = {
+ /* AND */ (ARMI_AND^ARMI_BIC) >> 21,
+ /* EOR */ 0,
+ /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21,
+ /* RSB */ 0,
+ /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21,
+ /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21,
+ /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21,
+ /* RSC */ 0,
+ /* TST */ 0,
+ /* TEQ */ 0,
+ /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21,
+ /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21,
+ /* ORR */ 0,
+ /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21,
+ /* BIC */ (ARMI_BIC^ARMI_AND) >> 21,
+ /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21
+};
+
+/* Encode constant in K12 format for data processing instructions. */
+static uint32_t emit_isk12(ARMIns ai, int32_t n)
+{
+ uint32_t invai, i, m = (uint32_t)n;
+ /* K12: unsigned 8 bit value, rotated in steps of two bits. */
+ for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
+ if (m <= 255) return ARMI_K12|m|i;
+ /* Otherwise try negation/complement with the inverse instruction. */
+ invai = emit_invai[((ai >> 21) & 15)];
+ if (!invai) return 0; /* Failed. No inverse instruction. */
+ m = ~(uint32_t)n;
+ if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) ||
+ invai == (ARMI_CMP^ARMI_CMN) >> 21) m++;
+ for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
+ if (m <= 255) return ARMI_K12|(invai<<21)|m|i;
+ return 0; /* Failed. */
+}
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm);
+}
+
+static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm);
+}
+
+static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn);
+}
+
+static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm);
+}
+
+static void emit_d(ASMState *as, ARMIns ai, Reg rd)
+{
+ *--as->mcp = ai | ARMF_D(rd);
+}
+
+static void emit_n(ASMState *as, ARMIns ai, Reg rn)
+{
+ *--as->mcp = ai | ARMF_N(rn);
+}
+
+static void emit_m(ASMState *as, ARMIns ai, Reg rm)
+{
+ *--as->mcp = ai | ARMF_M(rm);
+}
+
+static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lua_assert(ofs >= -255 && ofs <= 255);
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) |
+ ((ofs & 0xf0) << 4) | (ofs & 0x0f);
+}
+
+static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lua_assert(ofs >= -4095 && ofs <= 4095);
+ /* Combine LDR/STR pairs to LDRD/STRD. */
+ if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) &&
+ (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn &&
+ (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) &&
+ as->mcp != as->mcloop) {
+ as->mcp++;
+ emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4);
+ return;
+ }
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs;
+}
+
+#if !LJ_SOFTFP
+static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0);
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2);
+}
+#endif
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer spills of BASE/L. */
+#define emit_canremat(ref) ((ref) < ASMREF_L)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != d);
+ if (emit_canremat(ref)) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ uint32_t k = emit_isk12(ARMI_ADD, delta);
+ if (k) {
+ if (k == ARMI_K12)
+ emit_dm(as, ARMI_MOV, d, r);
+ else
+ emit_dn(as, ARMI_ADD^k, d, r);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Try to find a two step delta relative to another constant. */
+static int emit_kdelta2(ASMState *as, Reg d, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != d);
+ if (emit_canremat(ref)) {
+ int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i;
+ if (other) {
+ int32_t delta = i - other;
+ uint32_t sh, inv = 0, k2, k;
+ if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; }
+ sh = lj_ffs(delta) & ~1;
+ k2 = emit_isk12(0, delta & (255 << sh));
+ k = emit_isk12(0, delta & ~(255 << sh));
+ if (k) {
+ emit_dn(as, ARMI_ADD^k2^inv, d, d);
+ emit_dn(as, ARMI_ADD^k^inv, d, r);
+ return 1;
+ }
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ uint32_t k = emit_isk12(ARMI_MOV, i);
+ lua_assert(rset_test(as->freeset, r) || r == RID_TMP);
+ if (k) {
+ /* Standard K12 constant. */
+ emit_d(as, ARMI_MOV^k, r);
+ } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) {
+ /* 16 bit loword constant for ARMv6T2. */
+ emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
+ } else if (emit_kdelta1(as, r, i)) {
+ /* One step delta relative to another constant. */
+ } else if ((as->flags & JIT_F_ARMV6T2)) {
+ /* 32 bit hiword/loword constant for ARMv6T2. */
+ emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r);
+ emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
+ } else if (emit_kdelta2(as, r, i)) {
+ /* Two step delta relative to another constant. */
+ } else {
+ /* Otherwise construct the constant with up to 4 instructions. */
+ /* NYI: use mvn+bic, use pc-relative loads. */
+ for (;;) {
+ uint32_t sh = lj_ffs(i) & ~1;
+ int32_t m = i & (255 << sh);
+ i &= ~(255 << sh);
+ if (i == 0) {
+ emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r);
+ break;
+ }
+ emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r);
+ }
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
+{
+ int32_t i = i32ptr(p);
+ emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)),
+ (i & 4095));
+}
+
+#if !LJ_SOFTFP
+/* Load a number constant into an FPR. */
+static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
+{
+ int32_t i;
+ if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) {
+ uint32_t hi = tv->u32.hi;
+ uint32_t b = ((hi >> 22) & 0x1ff);
+ if (!(hi & 0xffff) && (b == 0x100 || b == 0x0ff)) {
+ *--as->mcp = ARMI_VMOVI_D | ARMF_D(r & 15) |
+ ((tv->u32.hi >> 12) & 0x00080000) |
+ ((tv->u32.hi >> 4) & 0x00070000) |
+ ((tv->u32.hi >> 16) & 0x0000000f);
+ return;
+ }
+ }
+ i = i32ptr(tv);
+ emit_vlso(as, ARMI_VLDR_D, r,
+ ra_allock(as, (i & ~1020), RSET_GPR), (i & 1020));
+}
+#endif
+
+/* Get/set global_State fields. */
+#define emit_getgl(as, r, field) \
+ emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field)
+#define emit_setgl(as, r, field) \
+ emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field)
+
+/* Trace number is determined from pc of exit instruction. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_branch(ASMState *as, ARMIns ai, MCode *target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = (target - p) - 1;
+ lua_assert(((delta + 0x00800000) >> 24) == 0);
+ *--p = ai | ((uint32_t)delta & 0x00ffffffu);
+ as->mcp = p;
+}
+
+#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target))
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = ((char *)target - (char *)p) - 8;
+ if ((((delta>>2) + 0x00800000) >> 24) == 0) {
+ if ((delta & 1))
+ *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 27);
+ else
+ *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu);
+ } else { /* Target out of range: need indirect call. But don't use R0-R3. */
+ Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1));
+ *p = ARMI_BLXr | ARMF_M(r);
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+#if LJ_SOFTFP
+ lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
+#else
+ if (dst >= RID_MAX_GPR) {
+ emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S,
+ (dst & 15), (src & 15));
+ return;
+ }
+#endif
+ if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
+ MCode ins = *as->mcp, swp = (src^dst);
+ if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) {
+ if (!((ins ^ (dst << 16)) & 0x000f0000))
+ *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */
+ if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000))
+ *as->mcp = ins ^ (swp << 12); /* Swap D in store. */
+ }
+ }
+ emit_dm(as, ARMI_MOV, dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+#if LJ_SOFTFP
+ lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
+#else
+ if (r >= RID_MAX_GPR)
+ emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, RID_SP, ofs);
+ else
+#endif
+ emit_lso(as, ARMI_LDR, r, RID_SP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+#if LJ_SOFTFP
+ lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
+#else
+ if (r >= RID_MAX_GPR)
+ emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, RID_SP, ofs);
+ else
+#endif
+ emit_lso(as, ARMI_STR, r, RID_SP, ofs);
+}
+
+/* Emit an arithmetic/logic operation with a constant operand. */
+static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src,
+ int32_t i, RegSet allow)
+{
+ uint32_t k = emit_isk12(ai, i);
+ if (k)
+ emit_dn(as, ai^k, dest, src);
+ else
+ emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs)
+ emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r));
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/3rdparty/lua/src/lj_emit_mips.h b/3rdparty/lua/src/lj_emit_mips.h
index f72930d..74821b8 100644
--- a/3rdparty/lua/src/lj_emit_mips.h
+++ b/3rdparty/lua/src/lj_emit_mips.h
@@ -1,211 +1,211 @@
-/*
-** MIPS instruction emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt)
-{
- *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt);
-}
-
-static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a)
-{
- *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a);
-}
-
-#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0)
-#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt))
-
-static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i)
-{
- *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff);
-}
-
-#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i))
-#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i))
-
-static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh)
-{
- *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31);
-}
-
-#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0)
-
-static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
-{
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dta(as, MIPSI_ROTR, dest, src, shift);
- } else {
- emit_dst(as, MIPSI_OR, dest, dest, tmp);
- emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31);
- emit_dta(as, MIPSI_SRL, tmp, src, shift);
- }
-}
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Prefer rematerialization of BASE/L from global_State over spills. */
-#define emit_canremat(ref) ((ref) <= REF_BASE)
-
-/* Try to find a one step delta relative to another constant. */
-static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != t);
- if (ref < ASMREF_L) {
- int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
- if (checki16(delta)) {
- emit_tsi(as, MIPSI_ADDIU, t, r, delta);
- return 1;
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Load a 32 bit constant into a GPR. */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- if (checki16(i)) {
- emit_ti(as, MIPSI_LI, r, i);
- } else {
- if ((i & 0xffff)) {
- int32_t jgl = i32ptr(J2G(as->J));
- if ((uint32_t)(i-jgl) < 65536) {
- emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768);
- return;
- } else if (emit_kdelta1(as, r, i)) {
- return;
- } else if ((i >> 16) == 0) {
- emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i);
- return;
- }
- emit_tsi(as, MIPSI_ORI, r, r, i);
- }
- emit_ti(as, MIPSI_LUI, r, (i >> 16));
- }
-}
-
-#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
-
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
-static void ra_allockreg(ASMState *as, int32_t k, Reg r);
-
-/* Get/set from constant pointer. */
-static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
-{
- int32_t jgl = i32ptr(J2G(as->J));
- int32_t i = i32ptr(p);
- Reg base;
- if ((uint32_t)(i-jgl) < 65536) {
- i = i-jgl-32768;
- base = RID_JGL;
- } else {
- base = ra_allock(as, i-(int16_t)i, allow);
- }
- emit_tsi(as, mi, r, base, i);
-}
-
-#define emit_loadn(as, r, tv) \
- emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR)
-
-/* Get/set global_State fields. */
-static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
-{
- emit_tsi(as, mi, r, RID_JGL, ofs-32768);
-}
-
-#define emit_getgl(as, r, field) \
- emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field))
-#define emit_setgl(as, r, field) \
- emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field))
-
-/* Trace number is determined from per-trace exit stubs. */
-#define emit_setvmstate(as, i) UNUSED(i)
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for internal jumps. */
-typedef MCode *MCLabel;
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = target - p;
- lua_assert(((delta + 0x8000) >> 16) == 0);
- *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu);
- as->mcp = p;
-}
-
-static void emit_jmp(ASMState *as, MCode *target)
-{
- *--as->mcp = MIPSI_NOP;
- emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target));
-}
-
-static void emit_call(ASMState *as, void *target)
-{
- MCode *p = as->mcp;
- *--p = MIPSI_NOP;
- if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0)
- *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu);
- else /* Target out of range: need indirect call. */
- *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR);
- as->mcp = p;
- ra_allockreg(as, i32ptr(target), RID_CFUNCADDR);
-}
-
-/* -- Emit generic operations --------------------------------------------- */
-
-#define emit_move(as, dst, src) \
- emit_ds(as, MIPSI_MOVE, (dst), (src))
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
- if (dst < RID_MAX_GPR)
- emit_move(as, dst, src);
- else
- emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tsi(as, MIPSI_LW, r, RID_SP, ofs);
- else
- emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1,
- (r & 31), RID_SP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tsi(as, MIPSI_SW, r, RID_SP, ofs);
- else
- emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1,
- (r&31), RID_SP, ofs);
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs) {
- lua_assert(checki16(ofs));
- emit_tsi(as, MIPSI_ADDIU, r, r, ofs);
- }
-}
-
-#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
-
+/*
+** MIPS instruction emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt)
+{
+ *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt);
+}
+
+static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a)
+{
+ *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a);
+}
+
+#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0)
+#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt))
+
+static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i)
+{
+ *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff);
+}
+
+#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i))
+#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i))
+
+static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh)
+{
+ *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31);
+}
+
+#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0)
+
+static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
+{
+ if ((as->flags & JIT_F_MIPS32R2)) {
+ emit_dta(as, MIPSI_ROTR, dest, src, shift);
+ } else {
+ emit_dst(as, MIPSI_OR, dest, dest, tmp);
+ emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31);
+ emit_dta(as, MIPSI_SRL, tmp, src, shift);
+ }
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != t);
+ if (ref < ASMREF_L) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ if (checki16(delta)) {
+ emit_tsi(as, MIPSI_ADDIU, t, r, delta);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ if (checki16(i)) {
+ emit_ti(as, MIPSI_LI, r, i);
+ } else {
+ if ((i & 0xffff)) {
+ int32_t jgl = i32ptr(J2G(as->J));
+ if ((uint32_t)(i-jgl) < 65536) {
+ emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768);
+ return;
+ } else if (emit_kdelta1(as, r, i)) {
+ return;
+ } else if ((i >> 16) == 0) {
+ emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i);
+ return;
+ }
+ emit_tsi(as, MIPSI_ORI, r, r, i);
+ }
+ emit_ti(as, MIPSI_LUI, r, (i >> 16));
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
+static void ra_allockreg(ASMState *as, int32_t k, Reg r);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
+{
+ int32_t jgl = i32ptr(J2G(as->J));
+ int32_t i = i32ptr(p);
+ Reg base;
+ if ((uint32_t)(i-jgl) < 65536) {
+ i = i-jgl-32768;
+ base = RID_JGL;
+ } else {
+ base = ra_allock(as, i-(int16_t)i, allow);
+ }
+ emit_tsi(as, mi, r, base, i);
+}
+
+#define emit_loadn(as, r, tv) \
+ emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR)
+
+/* Get/set global_State fields. */
+static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
+{
+ emit_tsi(as, mi, r, RID_JGL, ofs-32768);
+}
+
+#define emit_getgl(as, r, field) \
+ emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field))
+#define emit_setgl(as, r, field) \
+ emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field))
+
+/* Trace number is determined from per-trace exit stubs. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lua_assert(((delta + 0x8000) >> 16) == 0);
+ *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu);
+ as->mcp = p;
+}
+
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ *--as->mcp = MIPSI_NOP;
+ emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target));
+}
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = as->mcp;
+ *--p = MIPSI_NOP;
+ if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0)
+ *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu);
+ else /* Target out of range: need indirect call. */
+ *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR);
+ as->mcp = p;
+ ra_allockreg(as, i32ptr(target), RID_CFUNCADDR);
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+#define emit_move(as, dst, src) \
+ emit_ds(as, MIPSI_MOVE, (dst), (src))
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ if (dst < RID_MAX_GPR)
+ emit_move(as, dst, src);
+ else
+ emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tsi(as, MIPSI_LW, r, RID_SP, ofs);
+ else
+ emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1,
+ (r & 31), RID_SP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tsi(as, MIPSI_SW, r, RID_SP, ofs);
+ else
+ emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1,
+ (r&31), RID_SP, ofs);
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ lua_assert(checki16(ofs));
+ emit_tsi(as, MIPSI_ADDIU, r, r, ofs);
+ }
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/3rdparty/lua/src/lj_emit_ppc.h b/3rdparty/lua/src/lj_emit_ppc.h
index a402080..a589f3a 100644
--- a/3rdparty/lua/src/lj_emit_ppc.h
+++ b/3rdparty/lua/src/lj_emit_ppc.h
@@ -1,238 +1,238 @@
-/*
-** PPC instruction emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb)
-{
- *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb);
-}
-
-#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb))
-#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0)
-#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb))
-
-static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i)
-{
- *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff);
-}
-
-#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i))
-#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i))
-#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i))
-
-#define emit_fab(as, pi, rf, ra, rb) \
- emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31)
-#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31)
-#define emit_fac(as, pi, rf, ra, rc) \
- emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0)
-#define emit_facb(as, pi, rf, ra, rc, rb) \
- emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31)
-#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i))
-
-static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs,
- int32_t n, int32_t b, int32_t e)
-{
- *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) |
- PPCF_MB(b) | PPCF_ME(e);
-}
-
-static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n)
-{
- lua_assert(n >= 0 && n < 32);
- emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n);
-}
-
-static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
-{
- lua_assert(n >= 0 && n < 32);
- emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31);
-}
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Prefer rematerialization of BASE/L from global_State over spills. */
-#define emit_canremat(ref) ((ref) <= REF_BASE)
-
-/* Try to find a one step delta relative to another constant. */
-static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != t);
- if (ref < ASMREF_L) {
- int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
- if (checki16(delta)) {
- emit_tai(as, PPCI_ADDI, t, r, delta);
- return 1;
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Load a 32 bit constant into a GPR. */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- if (checki16(i)) {
- emit_ti(as, PPCI_LI, r, i);
- } else {
- if ((i & 0xffff)) {
- int32_t jgl = i32ptr(J2G(as->J));
- if ((uint32_t)(i-jgl) < 65536) {
- emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768);
- return;
- } else if (emit_kdelta1(as, r, i)) {
- return;
- }
- emit_asi(as, PPCI_ORI, r, r, i);
- }
- emit_ti(as, PPCI_LIS, r, (i >> 16));
- }
-}
-
-#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
-
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
-
-/* Get/set from constant pointer. */
-static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
-{
- int32_t jgl = i32ptr(J2G(as->J));
- int32_t i = i32ptr(p);
- Reg base;
- if ((uint32_t)(i-jgl) < 65536) {
- i = i-jgl-32768;
- base = RID_JGL;
- } else {
- base = ra_allock(as, i-(int16_t)i, allow);
- }
- emit_tai(as, pi, r, base, i);
-}
-
-#define emit_loadn(as, r, tv) \
- emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR)
-
-/* Get/set global_State fields. */
-static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs)
-{
- emit_tai(as, pi, r, RID_JGL, ofs-32768);
-}
-
-#define emit_getgl(as, r, field) \
- emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field))
-#define emit_setgl(as, r, field) \
- emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field))
-
-/* Trace number is determined from per-trace exit stubs. */
-#define emit_setvmstate(as, i) UNUSED(i)
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for internal jumps. */
-typedef MCode *MCLabel;
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = (char *)target - (char *)p;
- lua_assert(((delta + 0x8000) >> 16) == 0);
- pi ^= (delta & 0x8000) * (PPCF_Y/0x8000);
- *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu);
-}
-
-static void emit_jmp(ASMState *as, MCode *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = (char *)target - (char *)p;
- *p = PPCI_B | (delta & 0x03fffffcu);
-}
-
-static void emit_call(ASMState *as, void *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = (char *)target - (char *)p;
- if ((((delta>>2) + 0x00800000) >> 24) == 0) {
- *p = PPCI_BL | (delta & 0x03fffffcu);
- } else { /* Target out of range: need indirect call. Don't use arg reg. */
- RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
- Reg r = ra_allock(as, i32ptr(target), allow);
- *p = PPCI_BCTRL;
- p[-1] = PPCI_MTCTR | PPCF_T(r);
- as->mcp = p-1;
- }
-}
-
-/* -- Emit generic operations --------------------------------------------- */
-
-#define emit_mr(as, dst, src) \
- emit_asb(as, PPCI_MR, (dst), (src), (src))
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
- UNUSED(ir);
- if (dst < RID_MAX_GPR)
- emit_mr(as, dst, src);
- else
- emit_fb(as, PPCI_FMR, dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tai(as, PPCI_LWZ, r, RID_SP, ofs);
- else
- emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, RID_SP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tai(as, PPCI_STW, r, RID_SP, ofs);
- else
- emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, RID_SP, ofs);
-}
-
-/* Emit a compare (for equality) with a constant operand. */
-static void emit_cmpi(ASMState *as, Reg r, int32_t k)
-{
- if (checki16(k)) {
- emit_ai(as, PPCI_CMPWI, r, k);
- } else if (checku16(k)) {
- emit_ai(as, PPCI_CMPLWI, r, k);
- } else {
- emit_ai(as, PPCI_CMPLWI, RID_TMP, k);
- emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16));
- }
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs) {
- emit_tai(as, PPCI_ADDI, r, r, ofs);
- if (!checki16(ofs))
- emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16);
- }
-}
-
-static void emit_spsub(ASMState *as, int32_t ofs)
-{
- if (ofs) {
- emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs);
- emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP,
- CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0));
- }
-}
-
+/*
+** PPC instruction emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb)
+{
+ *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb);
+}
+
+#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb))
+#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0)
+#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb))
+
+static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i)
+{
+ *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff);
+}
+
+#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i))
+#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i))
+#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i))
+
+#define emit_fab(as, pi, rf, ra, rb) \
+ emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31)
+#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31)
+#define emit_fac(as, pi, rf, ra, rc) \
+ emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0)
+#define emit_facb(as, pi, rf, ra, rc, rb) \
+ emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31)
+#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i))
+
+static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs,
+ int32_t n, int32_t b, int32_t e)
+{
+ *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) |
+ PPCF_MB(b) | PPCF_ME(e);
+}
+
+static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n)
+{
+ lua_assert(n >= 0 && n < 32);
+ emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n);
+}
+
+static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
+{
+ lua_assert(n >= 0 && n < 32);
+ emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31);
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lua_assert(r != t);
+ if (ref < ASMREF_L) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ if (checki16(delta)) {
+ emit_tai(as, PPCI_ADDI, t, r, delta);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ if (checki16(i)) {
+ emit_ti(as, PPCI_LI, r, i);
+ } else {
+ if ((i & 0xffff)) {
+ int32_t jgl = i32ptr(J2G(as->J));
+ if ((uint32_t)(i-jgl) < 65536) {
+ emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768);
+ return;
+ } else if (emit_kdelta1(as, r, i)) {
+ return;
+ }
+ emit_asi(as, PPCI_ORI, r, r, i);
+ }
+ emit_ti(as, PPCI_LIS, r, (i >> 16));
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
+{
+ int32_t jgl = i32ptr(J2G(as->J));
+ int32_t i = i32ptr(p);
+ Reg base;
+ if ((uint32_t)(i-jgl) < 65536) {
+ i = i-jgl-32768;
+ base = RID_JGL;
+ } else {
+ base = ra_allock(as, i-(int16_t)i, allow);
+ }
+ emit_tai(as, pi, r, base, i);
+}
+
+#define emit_loadn(as, r, tv) \
+ emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR)
+
+/* Get/set global_State fields. */
+static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs)
+{
+ emit_tai(as, pi, r, RID_JGL, ofs-32768);
+}
+
+#define emit_getgl(as, r, field) \
+ emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field))
+#define emit_setgl(as, r, field) \
+ emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field))
+
+/* Trace number is determined from per-trace exit stubs. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ lua_assert(((delta + 0x8000) >> 16) == 0);
+ pi ^= (delta & 0x8000) * (PPCF_Y/0x8000);
+ *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu);
+}
+
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ *p = PPCI_B | (delta & 0x03fffffcu);
+}
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if ((((delta>>2) + 0x00800000) >> 24) == 0) {
+ *p = PPCI_BL | (delta & 0x03fffffcu);
+ } else { /* Target out of range: need indirect call. Don't use arg reg. */
+ RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
+ Reg r = ra_allock(as, i32ptr(target), allow);
+ *p = PPCI_BCTRL;
+ p[-1] = PPCI_MTCTR | PPCF_T(r);
+ as->mcp = p-1;
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+#define emit_mr(as, dst, src) \
+ emit_asb(as, PPCI_MR, (dst), (src), (src))
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ UNUSED(ir);
+ if (dst < RID_MAX_GPR)
+ emit_mr(as, dst, src);
+ else
+ emit_fb(as, PPCI_FMR, dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tai(as, PPCI_LWZ, r, RID_SP, ofs);
+ else
+ emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, RID_SP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tai(as, PPCI_STW, r, RID_SP, ofs);
+ else
+ emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, RID_SP, ofs);
+}
+
+/* Emit a compare (for equality) with a constant operand. */
+static void emit_cmpi(ASMState *as, Reg r, int32_t k)
+{
+ if (checki16(k)) {
+ emit_ai(as, PPCI_CMPWI, r, k);
+ } else if (checku16(k)) {
+ emit_ai(as, PPCI_CMPLWI, r, k);
+ } else {
+ emit_ai(as, PPCI_CMPLWI, RID_TMP, k);
+ emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16));
+ }
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ emit_tai(as, PPCI_ADDI, r, r, ofs);
+ if (!checki16(ofs))
+ emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16);
+ }
+}
+
+static void emit_spsub(ASMState *as, int32_t ofs)
+{
+ if (ofs) {
+ emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs);
+ emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP,
+ CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0));
+ }
+}
+
diff --git a/3rdparty/lua/src/lj_emit_x86.h b/3rdparty/lua/src/lj_emit_x86.h
index 70b36ef..bd184a3 100644
--- a/3rdparty/lua/src/lj_emit_x86.h
+++ b/3rdparty/lua/src/lj_emit_x86.h
@@ -1,466 +1,466 @@
-/*
-** x86/x64 instruction emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
-
-#if LJ_64
-#define REXRB(p, rr, rb) \
- { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
- if (rex != 0x40) *--(p) = rex; }
-#define FORCE_REX 0x200
-#define REX_64 (FORCE_REX|0x080000)
-#else
-#define REXRB(p, rr, rb) ((void)0)
-#define FORCE_REX 0
-#define REX_64 0
-#endif
-
-#define emit_i8(as, i) (*--as->mcp = (MCode)(i))
-#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
-#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
-
-#define emit_x87op(as, xo) \
- (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
-
-/* op */
-static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
- MCode *p, int delta)
-{
- int n = (int8_t)xo;
-#if defined(__GNUC__)
- if (__builtin_constant_p(xo) && n == -2)
- p[delta-2] = (MCode)(xo >> 24);
- else if (__builtin_constant_p(xo) && n == -3)
- *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16);
- else
-#endif
- *(uint32_t *)(p+delta-5) = (uint32_t)xo;
- p += n + delta;
-#if LJ_64
- {
- uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1);
- if (rex != 0x40) {
- rex |= (rr >> 16);
- if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); }
- else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; }
- *--p = (MCode)rex;
- }
- }
-#else
- UNUSED(rr); UNUSED(rb); UNUSED(rx);
-#endif
- return p;
-}
-
-/* op + modrm */
-#define emit_opm(xo, mode, rr, rb, p, delta) \
- (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
- emit_op((xo), (rr), (rb), 0, (p), (delta)))
-
-/* op + modrm + sib */
-#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
- (p[-1] = MODRM((scale), (rx), (rb)), \
- p[-2] = MODRM((mode), (rr), RID_ESP), \
- emit_op((xo), (rr), (rb), (rx), (p), -1))
-
-/* op r1, r2 */
-static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
-{
- MCode *p = as->mcp;
- as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0);
-}
-
-#if LJ_64 && defined(LUA_USE_ASSERT)
-/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
-static int32_t ptr2addr(const void *p)
-{
- lua_assert((uintptr_t)p < (uintptr_t)0x80000000);
- return i32ptr(p);
-}
-#else
-#define ptr2addr(p) (i32ptr((p)))
-#endif
-
-/* op r, [addr] */
-static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
-{
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = ptr2addr(addr);
-#if LJ_64
- p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
- as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
-#else
- as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
-#endif
-}
-
-/* op r, [base+ofs] */
-static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
-{
- MCode *p = as->mcp;
- x86Mode mode;
- if (ra_hasreg(rb)) {
- if (ofs == 0 && (rb&7) != RID_EBP) {
- mode = XM_OFS0;
- } else if (checki8(ofs)) {
- *--p = (MCode)ofs;
- mode = XM_OFS8;
- } else {
- p -= 4;
- *(int32_t *)p = ofs;
- mode = XM_OFS32;
- }
- if ((rb&7) == RID_ESP)
- *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- } else {
- *(int32_t *)(p-4) = ofs;
-#if LJ_64
- p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
- p -= 5;
- rb = RID_ESP;
-#else
- p -= 4;
- rb = RID_EBP;
-#endif
- mode = XM_OFS0;
- }
- as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
-}
-
-/* op r, [base+idx*scale+ofs] */
-static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx,
- x86Mode scale, int32_t ofs)
-{
- MCode *p = as->mcp;
- x86Mode mode;
- if (ofs == 0 && (rb&7) != RID_EBP) {
- mode = XM_OFS0;
- } else if (checki8(ofs)) {
- mode = XM_OFS8;
- *--p = (MCode)ofs;
- } else {
- mode = XM_OFS32;
- p -= 4;
- *(int32_t *)p = ofs;
- }
- as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p);
-}
-
-/* op r, i */
-static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i)
-{
- MCode *p = as->mcp;
- x86Op xo;
- if (checki8(i)) {
- *--p = (MCode)i;
- xo = XG_TOXOi8(xg);
- } else {
- p -= 4;
- *(int32_t *)p = i;
- xo = XG_TOXOi(xg);
- }
- as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0);
-}
-
-/* op [base+ofs], i */
-static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs,
- int32_t i)
-{
- x86Op xo;
- if (checki8(i)) {
- emit_i8(as, i);
- xo = XG_TOXOi8(xg);
- } else {
- emit_i32(as, i);
- xo = XG_TOXOi(xg);
- }
- emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs);
-}
-
-#define emit_shifti(as, xg, r, i) \
- (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
-
-/* op r, rm/mrm */
-static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
-{
- MCode *p = as->mcp;
- x86Mode mode = XM_REG;
- if (rb == RID_MRM) {
- rb = as->mrm.base;
- if (rb == RID_NONE) {
- rb = RID_EBP;
- mode = XM_OFS0;
- p -= 4;
- *(int32_t *)p = as->mrm.ofs;
- if (as->mrm.idx != RID_NONE)
- goto mrmidx;
-#if LJ_64
- *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
- rb = RID_ESP;
-#endif
- } else {
- if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
- mode = XM_OFS0;
- } else if (checki8(as->mrm.ofs)) {
- *--p = (MCode)as->mrm.ofs;
- mode = XM_OFS8;
- } else {
- p -= 4;
- *(int32_t *)p = as->mrm.ofs;
- mode = XM_OFS32;
- }
- if (as->mrm.idx != RID_NONE) {
- mrmidx:
- as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p);
- return;
- }
- if ((rb&7) == RID_ESP)
- *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- }
- }
- as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
-}
-
-/* op rm/mrm, i */
-static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
-{
- x86Op xo;
- if (checki8(i)) {
- emit_i8(as, i);
- xo = XG_TOXOi8(xg);
- } else {
- emit_i32(as, i);
- xo = XG_TOXOi(xg);
- }
- emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64));
-}
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Instruction selection for XMM moves. */
-#define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS)
-#define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD)
-
-/* mov [base+ofs], i */
-static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
-{
- emit_i32(as, i);
- emit_rmro(as, XO_MOVmi, 0, base, ofs);
-}
-
-/* mov [base+ofs], r */
-#define emit_movtomro(as, r, base, ofs) \
- emit_rmro(as, XO_MOVto, (r), (base), (ofs))
-
-/* Get/set global_State fields. */
-#define emit_opgl(as, xo, r, field) \
- emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
-#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field)
-#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field)
-
-#define emit_setvmstate(as, i) \
- (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
-
-/* mov r, i / xor r, r */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */
- if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP ||
- (as->curins+1 < as->T->nins &&
- IR(as->curins+1)->o == IR_HIOP)))) {
- emit_rr(as, XO_ARITH(XOg_XOR), r, r);
- } else {
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = i;
- p[-5] = (MCode)(XI_MOVri+(r&7));
- p -= 5;
- REXRB(p, 0, r);
- as->mcp = p;
- }
-}
-
-/* mov r, addr */
-#define emit_loada(as, r, addr) \
- emit_loadi(as, (r), ptr2addr((addr)))
-
-#if LJ_64
-/* mov r, imm64 or shorter 32 bit extended load. */
-static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
-{
- if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */
- emit_loadi(as, r, (int32_t)u64);
- } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = (int32_t)u64;
- as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
- } else { /* Full-size 64 bit load. */
- MCode *p = as->mcp;
- *(uint64_t *)(p-8) = u64;
- p[-9] = (MCode)(XI_MOVri+(r&7));
- p[-10] = 0x48 + ((r>>3)&1);
- p -= 10;
- as->mcp = p;
- }
-}
-#endif
-
-/* movsd r, [&tv->n] / xorps r, r */
-static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
-{
- if (tvispzero(tv)) /* Use xor only for +0. */
- emit_rr(as, XO_XORPS, r, r);
- else
- emit_rma(as, XMM_MOVRM(as), r, &tv->n);
-}
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for short jumps. */
-typedef MCode *MCLabel;
-
-#if LJ_32 && LJ_HASFFI
-/* jmp short target */
-static void emit_sjmp(ASMState *as, MCLabel target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = target - p;
- lua_assert(delta == (int8_t)delta);
- p[-1] = (MCode)(int8_t)delta;
- p[-2] = XI_JMPs;
- as->mcp = p - 2;
-}
-#endif
-
-/* jcc short target */
-static void emit_sjcc(ASMState *as, int cc, MCLabel target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = target - p;
- lua_assert(delta == (int8_t)delta);
- p[-1] = (MCode)(int8_t)delta;
- p[-2] = (MCode)(XI_JCCs+(cc&15));
- as->mcp = p - 2;
-}
-
-/* jcc short (pending target) */
-static MCLabel emit_sjcc_label(ASMState *as, int cc)
-{
- MCode *p = as->mcp;
- p[-1] = 0;
- p[-2] = (MCode)(XI_JCCs+(cc&15));
- as->mcp = p - 2;
- return p;
-}
-
-/* Fixup jcc short target. */
-static void emit_sfixup(ASMState *as, MCLabel source)
-{
- source[-1] = (MCode)(as->mcp-source);
-}
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-/* Compute relative 32 bit offset for jump and call instructions. */
-static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target)
-{
- ptrdiff_t delta = target - p;
- lua_assert(delta == (int32_t)delta);
- return (int32_t)delta;
-}
-
-/* jcc target */
-static void emit_jcc(ASMState *as, int cc, MCode *target)
-{
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = (MCode)(XI_JCCn+(cc&15));
- p[-6] = 0x0f;
- as->mcp = p - 6;
-}
-
-/* jmp target */
-static void emit_jmp(ASMState *as, MCode *target)
-{
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = XI_JMP;
- as->mcp = p - 5;
-}
-
-/* call target */
-static void emit_call_(ASMState *as, MCode *target)
-{
- MCode *p = as->mcp;
-#if LJ_64
- if (target-p != (int32_t)(target-p)) {
- /* Assumes RID_RET is never an argument to calls and always clobbered. */
- emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET);
- emit_loadu64(as, RID_RET, (uint64_t)target);
- return;
- }
-#endif
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = XI_CALL;
- as->mcp = p - 5;
-}
-
-#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
-
-/* -- Emit generic operations --------------------------------------------- */
-
-/* Use 64 bit operations to handle 64 bit IR types. */
-#if LJ_64
-#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
-#else
-#define REX_64IR(ir, r) (r)
-#endif
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
- UNUSED(ir);
- if (dst < RID_MAX_GPR)
- emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
- else
- emit_rr(as, XMM_MOVRR(as), dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs);
- else
- emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs);
- else
- emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs);
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs) {
- if ((as->flags & JIT_F_LEA_AGU))
- emit_rmro(as, XO_LEA, r, r, ofs);
- else
- emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs);
- }
-}
-
-#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
-
-/* Prefer rematerialization of BASE/L from global_State over spills. */
-#define emit_canremat(ref) ((ref) <= REF_BASE)
-
+/*
+** x86/x64 instruction emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
+
+#if LJ_64
+#define REXRB(p, rr, rb) \
+ { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
+ if (rex != 0x40) *--(p) = rex; }
+#define FORCE_REX 0x200
+#define REX_64 (FORCE_REX|0x080000)
+#else
+#define REXRB(p, rr, rb) ((void)0)
+#define FORCE_REX 0
+#define REX_64 0
+#endif
+
+#define emit_i8(as, i) (*--as->mcp = (MCode)(i))
+#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
+#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
+
+#define emit_x87op(as, xo) \
+ (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
+
+/* op */
+static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
+ MCode *p, int delta)
+{
+ int n = (int8_t)xo;
+#if defined(__GNUC__)
+ if (__builtin_constant_p(xo) && n == -2)
+ p[delta-2] = (MCode)(xo >> 24);
+ else if (__builtin_constant_p(xo) && n == -3)
+ *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16);
+ else
+#endif
+ *(uint32_t *)(p+delta-5) = (uint32_t)xo;
+ p += n + delta;
+#if LJ_64
+ {
+ uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1);
+ if (rex != 0x40) {
+ rex |= (rr >> 16);
+ if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); }
+ else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; }
+ *--p = (MCode)rex;
+ }
+ }
+#else
+ UNUSED(rr); UNUSED(rb); UNUSED(rx);
+#endif
+ return p;
+}
+
+/* op + modrm */
+#define emit_opm(xo, mode, rr, rb, p, delta) \
+ (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
+ emit_op((xo), (rr), (rb), 0, (p), (delta)))
+
+/* op + modrm + sib */
+#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
+ (p[-1] = MODRM((scale), (rx), (rb)), \
+ p[-2] = MODRM((mode), (rr), RID_ESP), \
+ emit_op((xo), (rr), (rb), (rx), (p), -1))
+
+/* op r1, r2 */
+static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
+{
+ MCode *p = as->mcp;
+ as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0);
+}
+
+#if LJ_64 && defined(LUA_USE_ASSERT)
+/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
+static int32_t ptr2addr(const void *p)
+{
+ lua_assert((uintptr_t)p < (uintptr_t)0x80000000);
+ return i32ptr(p);
+}
+#else
+#define ptr2addr(p) (i32ptr((p)))
+#endif
+
+/* op r, [addr] */
+static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = ptr2addr(addr);
+#if LJ_64
+ p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
+#else
+ as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
+#endif
+}
+
+/* op r, [base+ofs] */
+static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
+{
+ MCode *p = as->mcp;
+ x86Mode mode;
+ if (ra_hasreg(rb)) {
+ if (ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(ofs)) {
+ *--p = (MCode)ofs;
+ mode = XM_OFS8;
+ } else {
+ p -= 4;
+ *(int32_t *)p = ofs;
+ mode = XM_OFS32;
+ }
+ if ((rb&7) == RID_ESP)
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ } else {
+ *(int32_t *)(p-4) = ofs;
+#if LJ_64
+ p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ p -= 5;
+ rb = RID_ESP;
+#else
+ p -= 4;
+ rb = RID_EBP;
+#endif
+ mode = XM_OFS0;
+ }
+ as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
+}
+
+/* op r, [base+idx*scale+ofs] */
+static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx,
+ x86Mode scale, int32_t ofs)
+{
+ MCode *p = as->mcp;
+ x86Mode mode;
+ if (ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(ofs)) {
+ mode = XM_OFS8;
+ *--p = (MCode)ofs;
+ } else {
+ mode = XM_OFS32;
+ p -= 4;
+ *(int32_t *)p = ofs;
+ }
+ as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p);
+}
+
+/* op r, i */
+static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i)
+{
+ MCode *p = as->mcp;
+ x86Op xo;
+ if (checki8(i)) {
+ *--p = (MCode)i;
+ xo = XG_TOXOi8(xg);
+ } else {
+ p -= 4;
+ *(int32_t *)p = i;
+ xo = XG_TOXOi(xg);
+ }
+ as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0);
+}
+
+/* op [base+ofs], i */
+static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs,
+ int32_t i)
+{
+ x86Op xo;
+ if (checki8(i)) {
+ emit_i8(as, i);
+ xo = XG_TOXOi8(xg);
+ } else {
+ emit_i32(as, i);
+ xo = XG_TOXOi(xg);
+ }
+ emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs);
+}
+
+#define emit_shifti(as, xg, r, i) \
+ (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
+
+/* op r, rm/mrm */
+static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
+{
+ MCode *p = as->mcp;
+ x86Mode mode = XM_REG;
+ if (rb == RID_MRM) {
+ rb = as->mrm.base;
+ if (rb == RID_NONE) {
+ rb = RID_EBP;
+ mode = XM_OFS0;
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ if (as->mrm.idx != RID_NONE)
+ goto mrmidx;
+#if LJ_64
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ rb = RID_ESP;
+#endif
+ } else {
+ if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(as->mrm.ofs)) {
+ *--p = (MCode)as->mrm.ofs;
+ mode = XM_OFS8;
+ } else {
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ mode = XM_OFS32;
+ }
+ if (as->mrm.idx != RID_NONE) {
+ mrmidx:
+ as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p);
+ return;
+ }
+ if ((rb&7) == RID_ESP)
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ }
+ }
+ as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
+}
+
+/* op rm/mrm, i */
+static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
+{
+ x86Op xo;
+ if (checki8(i)) {
+ emit_i8(as, i);
+ xo = XG_TOXOi8(xg);
+ } else {
+ emit_i32(as, i);
+ xo = XG_TOXOi(xg);
+ }
+ emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64));
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Instruction selection for XMM moves. */
+#define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS)
+#define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD)
+
+/* mov [base+ofs], i */
+static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
+{
+ emit_i32(as, i);
+ emit_rmro(as, XO_MOVmi, 0, base, ofs);
+}
+
+/* mov [base+ofs], r */
+#define emit_movtomro(as, r, base, ofs) \
+ emit_rmro(as, XO_MOVto, (r), (base), (ofs))
+
+/* Get/set global_State fields. */
+#define emit_opgl(as, xo, r, field) \
+ emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
+#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field)
+#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field)
+
+#define emit_setvmstate(as, i) \
+ (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
+
+/* mov r, i / xor r, r */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */
+ if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP ||
+ (as->curins+1 < as->T->nins &&
+ IR(as->curins+1)->o == IR_HIOP)))) {
+ emit_rr(as, XO_ARITH(XOg_XOR), r, r);
+ } else {
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = i;
+ p[-5] = (MCode)(XI_MOVri+(r&7));
+ p -= 5;
+ REXRB(p, 0, r);
+ as->mcp = p;
+ }
+}
+
+/* mov r, addr */
+#define emit_loada(as, r, addr) \
+ emit_loadi(as, (r), ptr2addr((addr)))
+
+#if LJ_64
+/* mov r, imm64 or shorter 32 bit extended load. */
+static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
+{
+ if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */
+ emit_loadi(as, r, (int32_t)u64);
+ } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = (int32_t)u64;
+ as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
+ } else { /* Full-size 64 bit load. */
+ MCode *p = as->mcp;
+ *(uint64_t *)(p-8) = u64;
+ p[-9] = (MCode)(XI_MOVri+(r&7));
+ p[-10] = 0x48 + ((r>>3)&1);
+ p -= 10;
+ as->mcp = p;
+ }
+}
+#endif
+
+/* movsd r, [&tv->n] / xorps r, r */
+static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
+{
+ if (tvispzero(tv)) /* Use xor only for +0. */
+ emit_rr(as, XO_XORPS, r, r);
+ else
+ emit_rma(as, XMM_MOVRM(as), r, &tv->n);
+}
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for short jumps. */
+typedef MCode *MCLabel;
+
+#if LJ_32 && LJ_HASFFI
+/* jmp short target */
+static void emit_sjmp(ASMState *as, MCLabel target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lua_assert(delta == (int8_t)delta);
+ p[-1] = (MCode)(int8_t)delta;
+ p[-2] = XI_JMPs;
+ as->mcp = p - 2;
+}
+#endif
+
+/* jcc short target */
+static void emit_sjcc(ASMState *as, int cc, MCLabel target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lua_assert(delta == (int8_t)delta);
+ p[-1] = (MCode)(int8_t)delta;
+ p[-2] = (MCode)(XI_JCCs+(cc&15));
+ as->mcp = p - 2;
+}
+
+/* jcc short (pending target) */
+static MCLabel emit_sjcc_label(ASMState *as, int cc)
+{
+ MCode *p = as->mcp;
+ p[-1] = 0;
+ p[-2] = (MCode)(XI_JCCs+(cc&15));
+ as->mcp = p - 2;
+ return p;
+}
+
+/* Fixup jcc short target. */
+static void emit_sfixup(ASMState *as, MCLabel source)
+{
+ source[-1] = (MCode)(as->mcp-source);
+}
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+/* Compute relative 32 bit offset for jump and call instructions. */
+static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target)
+{
+ ptrdiff_t delta = target - p;
+ lua_assert(delta == (int32_t)delta);
+ return (int32_t)delta;
+}
+
+/* jcc target */
+static void emit_jcc(ASMState *as, int cc, MCode *target)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = (MCode)(XI_JCCn+(cc&15));
+ p[-6] = 0x0f;
+ as->mcp = p - 6;
+}
+
+/* jmp target */
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = XI_JMP;
+ as->mcp = p - 5;
+}
+
+/* call target */
+static void emit_call_(ASMState *as, MCode *target)
+{
+ MCode *p = as->mcp;
+#if LJ_64
+ if (target-p != (int32_t)(target-p)) {
+ /* Assumes RID_RET is never an argument to calls and always clobbered. */
+ emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET);
+ emit_loadu64(as, RID_RET, (uint64_t)target);
+ return;
+ }
+#endif
+ *(int32_t *)(p-4) = jmprel(p, target);
+ p[-5] = XI_CALL;
+ as->mcp = p - 5;
+}
+
+#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Use 64 bit operations to handle 64 bit IR types. */
+#if LJ_64
+#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
+#else
+#define REX_64IR(ir, r) (r)
+#endif
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ UNUSED(ir);
+ if (dst < RID_MAX_GPR)
+ emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
+ else
+ emit_rr(as, XMM_MOVRR(as), dst, src);
+}
+
+/* Generic load of register from stack slot. */
+static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs);
+ else
+ emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs);
+}
+
+/* Generic store of register to stack slot. */
+static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs);
+ else
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs);
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ if ((as->flags & JIT_F_LEA_AGU))
+ emit_rmro(as, XO_LEA, r, r, ofs);
+ else
+ emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs);
+ }
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
diff --git a/3rdparty/lua/src/lj_err.c b/3rdparty/lua/src/lj_err.c
index 081bfde..42cd12b 100644
--- a/3rdparty/lua/src/lj_err.c
+++ b/3rdparty/lua/src/lj_err.c
@@ -1,6 +1,6 @@
/*
** Error handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_err_c
@@ -196,7 +196,7 @@ static void *err_unwind(lua_State *L, void *stopcf, int errcode)
typedef struct _Unwind_Exception
{
uint64_t exclass;
- void (*excleanup)(int, struct _Unwind_Exception *);
+ void (*excleanup)(int, struct _Unwind_Exception);
uintptr_t p1, p2;
} __attribute__((__aligned__)) _Unwind_Exception;
@@ -378,7 +378,7 @@ typedef struct UndocumentedDispatcherContext {
ULONG64 EstablisherFrame;
ULONG64 TargetIp;
PCONTEXT ContextRecord;
- void (*LanguageHandler)(void);
+ PEXCEPTION_ROUTINE LanguageHandler;
PVOID HandlerData;
PUNWIND_HISTORY_TABLE HistoryTable;
ULONG ScopeIndex;
@@ -499,7 +499,8 @@ static ptrdiff_t finderrfunc(lua_State *L)
{
cTValue *frame = L->base-1, *bot = tvref(L->stack);
void *cf = L->cframe;
- while (frame > bot && cf) {
+ while (frame > bot) {
+ lua_assert(cf != NULL);
while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */
if (frame >= restorestack(L, -cframe_nres(cf)))
break;
@@ -517,14 +518,12 @@ static ptrdiff_t finderrfunc(lua_State *L)
case FRAME_C:
cf = cframe_prev(cf);
/* fallthrough */
- case FRAME_VARG:
- frame = frame_prevd(frame);
- break;
case FRAME_CONT:
#if LJ_HASFFI
if ((frame-1)->u32.lo == LJ_CONT_FFI_CALLBACK)
cf = cframe_prev(cf);
#endif
+ case FRAME_VARG:
frame = frame_prevd(frame);
break;
case FRAME_CP:
@@ -726,23 +725,9 @@ LJ_NOINLINE void lj_err_arg(lua_State *L, int narg, ErrMsg em)
/* Typecheck error for arguments. */
LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname)
{
- const char *tname, *msg;
- if (narg <= LUA_REGISTRYINDEX) {
- if (narg >= LUA_GLOBALSINDEX) {
- tname = lj_obj_itypename[~LJ_TTAB];
- } else {
- GCfunc *fn = curr_func(L);
- int idx = LUA_GLOBALSINDEX - narg;
- if (idx <= fn->c.nupvalues)
- tname = lj_typename(&fn->c.upvalue[idx-1]);
- else
- tname = lj_obj_typename[0];
- }
- } else {
- TValue *o = narg < 0 ? L->top + narg : L->base + narg-1;
- tname = o < L->top ? lj_typename(o) : lj_obj_typename[0];
- }
- msg = lj_str_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname);
+ TValue *o = narg < 0 ? L->top + narg : L->base + narg-1;
+ const char *tname = o < L->top ? lj_typename(o) : lj_obj_typename[0];
+ const char *msg = lj_str_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname);
err_argmsg(L, narg, msg);
}
diff --git a/3rdparty/lua/src/lj_err.h b/3rdparty/lua/src/lj_err.h
index 9615198..dbea409 100644
--- a/3rdparty/lua/src/lj_err.h
+++ b/3rdparty/lua/src/lj_err.h
@@ -1,41 +1,41 @@
-/*
-** Error handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_ERR_H
-#define _LJ_ERR_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-
-typedef enum {
-#define ERRDEF(name, msg) \
- LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1,
-#include "lj_errmsg.h"
- LJ_ERR__MAX
-} ErrMsg;
-
-LJ_DATA const char *lj_err_allmsg;
-#define err2msg(em) (lj_err_allmsg+(int)(em))
-
-LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em);
-LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode);
-LJ_FUNC_NORET void lj_err_mem(lua_State *L);
-LJ_FUNC_NORET void lj_err_run(lua_State *L);
-LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em);
-LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
- BCLine line, ErrMsg em, va_list argp);
-LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm);
-LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2);
-LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o);
-LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg);
-LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...);
-LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em);
-LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em);
-LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...);
-LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname);
-LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt);
-
-#endif
+/*
+** Error handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ERR_H
+#define _LJ_ERR_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+
+typedef enum {
+#define ERRDEF(name, msg) \
+ LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1,
+#include "lj_errmsg.h"
+ LJ_ERR__MAX
+} ErrMsg;
+
+LJ_DATA const char *lj_err_allmsg;
+#define err2msg(em) (lj_err_allmsg+(int)(em))
+
+LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em);
+LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode);
+LJ_FUNC_NORET void lj_err_mem(lua_State *L);
+LJ_FUNC_NORET void lj_err_run(lua_State *L);
+LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em);
+LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
+ BCLine line, ErrMsg em, va_list argp);
+LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm);
+LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2);
+LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o);
+LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg);
+LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...);
+LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em);
+LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em);
+LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...);
+LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname);
+LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt);
+
+#endif
diff --git a/3rdparty/lua/src/lj_errmsg.h b/3rdparty/lua/src/lj_errmsg.h
index 9da3496..fd46acd 100644
--- a/3rdparty/lua/src/lj_errmsg.h
+++ b/3rdparty/lua/src/lj_errmsg.h
@@ -1,193 +1,192 @@
-/*
-** VM error messages.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* This file may be included multiple times with different ERRDEF macros. */
-
-/* Basic error handling. */
-ERRDEF(ERRMEM, "not enough memory")
-ERRDEF(ERRERR, "error in error handling")
-ERRDEF(ERRCPP, "C++ exception")
-
-/* Allocations. */
-ERRDEF(STROV, "string length overflow")
-ERRDEF(UDATAOV, "userdata length overflow")
-ERRDEF(STKOV, "stack overflow")
-ERRDEF(STKOVM, "stack overflow (%s)")
-ERRDEF(TABOV, "table overflow")
-
-/* Table indexing. */
-ERRDEF(NANIDX, "table index is NaN")
-ERRDEF(NILIDX, "table index is nil")
-ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next"))
-
-/* Metamethod resolving. */
-ERRDEF(BADCALL, "attempt to call a %s value")
-ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)")
-ERRDEF(BADOPRV, "attempt to %s a %s value")
-ERRDEF(BADCMPT, "attempt to compare %s with %s")
-ERRDEF(BADCMPV, "attempt to compare two %s values")
-ERRDEF(GETLOOP, "loop in gettable")
-ERRDEF(SETLOOP, "loop in settable")
-ERRDEF(OPCALL, "call")
-ERRDEF(OPINDEX, "index")
-ERRDEF(OPARITH, "perform arithmetic on")
-ERRDEF(OPCAT, "concatenate")
-ERRDEF(OPLEN, "get length of")
-
-/* Type checks. */
-ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)")
-ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)")
-ERRDEF(BADTYPE, "%s expected, got %s")
-ERRDEF(BADVAL, "invalid value")
-ERRDEF(NOVAL, "value expected")
-ERRDEF(NOCORO, "coroutine expected")
-ERRDEF(NOTABN, "nil or table expected")
-ERRDEF(NOLFUNC, "Lua function expected")
-ERRDEF(NOFUNCL, "function or level expected")
-ERRDEF(NOSFT, "string/function/table expected")
-ERRDEF(NOPROXY, "boolean or proxy expected")
-ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number")
-ERRDEF(FORLIM, LUA_QL("for") " limit must be a number")
-ERRDEF(FORSTEP, LUA_QL("for") " step must be a number")
-
-/* C API checks. */
-ERRDEF(NOENV, "no calling environment")
-ERRDEF(CYIELD, "attempt to yield across C-call boundary")
-ERRDEF(BADLU, "bad light userdata pointer")
-ERRDEF(NOGCMM, "bad action while in __gc metamethod")
-#if LJ_TARGET_WINDOWS
-ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)")
-#endif
-
-/* Standard library function errors. */
-ERRDEF(ASSERT, "assertion failed!")
-ERRDEF(PROTMT, "cannot change a protected metatable")
-ERRDEF(UNPACK, "too many results to unpack")
-ERRDEF(RDRSTR, "reader function must return a string")
-ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print"))
-ERRDEF(IDXRNG, "index out of range")
-ERRDEF(BASERNG, "base out of range")
-ERRDEF(LVLRNG, "level out of range")
-ERRDEF(INVLVL, "invalid level")
-ERRDEF(INVOPT, "invalid option")
-ERRDEF(INVOPTM, "invalid option " LUA_QS)
-ERRDEF(INVFMT, "invalid format")
-ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object")
-ERRDEF(CORUN, "cannot resume running coroutine")
-ERRDEF(CODEAD, "cannot resume dead coroutine")
-ERRDEF(COSUSP, "cannot resume non-suspended coroutine")
-ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert"))
-ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat"))
-ERRDEF(TABSORT, "invalid order function for sorting")
-ERRDEF(IOCLFL, "attempt to use a closed file")
-ERRDEF(IOSTDCL, "standard file is closed")
-ERRDEF(OSUNIQF, "unable to generate a unique filename")
-ERRDEF(OSDATEF, "field " LUA_QS " missing in date table")
-ERRDEF(STRDUMP, "unable to dump given function")
-ERRDEF(STRSLC, "string slice too long")
-ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern")
-ERRDEF(STRPATC, "invalid pattern capture")
-ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")")
-ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")")
-ERRDEF(STRPATU, "unbalanced pattern")
-ERRDEF(STRPATX, "pattern too complex")
-ERRDEF(STRCAPI, "invalid capture index")
-ERRDEF(STRCAPN, "too many captures")
-ERRDEF(STRCAPU, "unfinished capture")
-ERRDEF(STRFMTO, "invalid option " LUA_QL("%%%c") " to " LUA_QL("format"))
-ERRDEF(STRFMTR, "invalid format (repeated flags)")
-ERRDEF(STRFMTW, "invalid format (width or precision too long)")
-ERRDEF(STRGSRV, "invalid replacement value (a %s)")
-ERRDEF(BADMODN, "name conflict for module " LUA_QS)
-#if LJ_HASJIT
-ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?")
-#if LJ_TARGET_X86ORX64
-ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2")
-#else
-ERRDEF(NOJIT, "JIT compiler disabled")
-#endif
-#elif defined(LJ_ARCH_NOJIT)
-ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)")
-#else
-ERRDEF(NOJIT, "JIT compiler permanently disabled by build option")
-#endif
-ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS)
-
-/* Lexer/parser errors. */
-ERRDEF(XMODE, "attempt to load chunk with wrong mode")
-ERRDEF(XNEAR, "%s near " LUA_QS)
-ERRDEF(XELEM, "lexical element too long")
-ERRDEF(XLINES, "chunk has too many lines")
-ERRDEF(XLEVELS, "chunk has too many syntax levels")
-ERRDEF(XNUMBER, "malformed number")
-ERRDEF(XLSTR, "unfinished long string")
-ERRDEF(XLCOM, "unfinished long comment")
-ERRDEF(XSTR, "unfinished string")
-ERRDEF(XESC, "invalid escape sequence")
-ERRDEF(XLDELIM, "invalid long string delimiter")
-ERRDEF(XTOKEN, LUA_QS " expected")
-ERRDEF(XJUMP, "control structure too long")
-ERRDEF(XSLOTS, "function or expression too complex")
-ERRDEF(XLIMC, "chunk has more than %d local variables")
-ERRDEF(XLIMM, "main function has more than %d %s")
-ERRDEF(XLIMF, "function at line %d has more than %d %s")
-ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)")
-ERRDEF(XFIXUP, "function too long for return fixup")
-ERRDEF(XPARAM, "<name> or " LUA_QL("...") " expected")
-#if !LJ_52
-ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)")
-#endif
-ERRDEF(XFUNARG, "function arguments expected")
-ERRDEF(XSYMBOL, "unexpected symbol")
-ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function")
-ERRDEF(XSYNTAX, "syntax error")
-ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected")
-ERRDEF(XBREAK, "no loop to break")
-ERRDEF(XLUNDEF, "undefined label " LUA_QS)
-ERRDEF(XLDUP, "duplicate label " LUA_QS)
-ERRDEF(XGSCOPE, "<goto %s> jumps into the scope of local " LUA_QS)
-
-/* Bytecode reader errors. */
-ERRDEF(BCFMT, "cannot load incompatible bytecode")
-ERRDEF(BCBAD, "cannot load malformed bytecode")
-
-#if LJ_HASFFI
-/* FFI errors. */
-ERRDEF(FFI_INVTYPE, "invalid C type")
-ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large")
-ERRDEF(FFI_BADSCL, "bad storage class")
-ERRDEF(FFI_DECLSPEC, "declaration specifier expected")
-ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS)
-ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS)
-ERRDEF(FFI_NUMPARAM, "wrong number of type parameters")
-ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS)
-ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS)
-ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS)
-ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS)
-ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS)
-ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS)
-ERRDEF(FFI_BADCALL, LUA_QS " is not callable")
-ERRDEF(FFI_NUMARG, "wrong number of arguments for function call")
-ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS)
-ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed")
-ERRDEF(FFI_BADIDXW, LUA_QS " cannot be indexed with " LUA_QS)
-ERRDEF(FFI_BADMM, LUA_QS " has no " LUA_QS " metamethod")
-ERRDEF(FFI_WRCONST, "attempt to write to constant location")
-ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS)
-ERRDEF(FFI_BADCBACK, "bad callback")
-#if LJ_OS_NOJIT
-ERRDEF(FFI_CBACKOV, "no support for callbacks on this OS")
-#else
-ERRDEF(FFI_CBACKOV, "too many callbacks")
-#endif
-ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields")
-ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)")
-#endif
-
-#undef ERRDEF
-
-/* Detecting unused error messages:
- awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh
-*/
+/*
+** VM error messages.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* This file may be included multiple times with different ERRDEF macros. */
+
+/* Basic error handling. */
+ERRDEF(ERRMEM, "not enough memory")
+ERRDEF(ERRERR, "error in error handling")
+ERRDEF(ERRCPP, "C++ exception")
+
+/* Allocations. */
+ERRDEF(STROV, "string length overflow")
+ERRDEF(UDATAOV, "userdata length overflow")
+ERRDEF(STKOV, "stack overflow")
+ERRDEF(STKOVM, "stack overflow (%s)")
+ERRDEF(TABOV, "table overflow")
+
+/* Table indexing. */
+ERRDEF(NANIDX, "table index is NaN")
+ERRDEF(NILIDX, "table index is nil")
+ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next"))
+
+/* Metamethod resolving. */
+ERRDEF(BADCALL, "attempt to call a %s value")
+ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)")
+ERRDEF(BADOPRV, "attempt to %s a %s value")
+ERRDEF(BADCMPT, "attempt to compare %s with %s")
+ERRDEF(BADCMPV, "attempt to compare two %s values")
+ERRDEF(GETLOOP, "loop in gettable")
+ERRDEF(SETLOOP, "loop in settable")
+ERRDEF(OPCALL, "call")
+ERRDEF(OPINDEX, "index")
+ERRDEF(OPARITH, "perform arithmetic on")
+ERRDEF(OPCAT, "concatenate")
+ERRDEF(OPLEN, "get length of")
+
+/* Type checks. */
+ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)")
+ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)")
+ERRDEF(BADTYPE, "%s expected, got %s")
+ERRDEF(BADVAL, "invalid value")
+ERRDEF(NOVAL, "value expected")
+ERRDEF(NOCORO, "coroutine expected")
+ERRDEF(NOTABN, "nil or table expected")
+ERRDEF(NOLFUNC, "Lua function expected")
+ERRDEF(NOFUNCL, "function or level expected")
+ERRDEF(NOSFT, "string/function/table expected")
+ERRDEF(NOPROXY, "boolean or proxy expected")
+ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number")
+ERRDEF(FORLIM, LUA_QL("for") " limit must be a number")
+ERRDEF(FORSTEP, LUA_QL("for") " step must be a number")
+
+/* C API checks. */
+ERRDEF(NOENV, "no calling environment")
+ERRDEF(CYIELD, "attempt to yield across C-call boundary")
+ERRDEF(BADLU, "bad light userdata pointer")
+ERRDEF(NOGCMM, "bad action while in __gc metamethod")
+#if LJ_TARGET_WINDOWS
+ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)")
+#endif
+
+/* Standard library function errors. */
+ERRDEF(ASSERT, "assertion failed!")
+ERRDEF(PROTMT, "cannot change a protected metatable")
+ERRDEF(UNPACK, "too many results to unpack")
+ERRDEF(RDRSTR, "reader function must return a string")
+ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print"))
+ERRDEF(IDXRNG, "index out of range")
+ERRDEF(BASERNG, "base out of range")
+ERRDEF(LVLRNG, "level out of range")
+ERRDEF(INVLVL, "invalid level")
+ERRDEF(INVOPT, "invalid option")
+ERRDEF(INVOPTM, "invalid option " LUA_QS)
+ERRDEF(INVFMT, "invalid format")
+ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object")
+ERRDEF(CORUN, "cannot resume running coroutine")
+ERRDEF(CODEAD, "cannot resume dead coroutine")
+ERRDEF(COSUSP, "cannot resume non-suspended coroutine")
+ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert"))
+ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat"))
+ERRDEF(TABSORT, "invalid order function for sorting")
+ERRDEF(IOCLFL, "attempt to use a closed file")
+ERRDEF(IOSTDCL, "standard file is closed")
+ERRDEF(OSUNIQF, "unable to generate a unique filename")
+ERRDEF(OSDATEF, "field " LUA_QS " missing in date table")
+ERRDEF(STRDUMP, "unable to dump given function")
+ERRDEF(STRSLC, "string slice too long")
+ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern")
+ERRDEF(STRPATC, "invalid pattern capture")
+ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")")
+ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")")
+ERRDEF(STRPATU, "unbalanced pattern")
+ERRDEF(STRPATX, "pattern too complex")
+ERRDEF(STRCAPI, "invalid capture index")
+ERRDEF(STRCAPN, "too many captures")
+ERRDEF(STRCAPU, "unfinished capture")
+ERRDEF(STRFMTO, "invalid option " LUA_QL("%%%c") " to " LUA_QL("format"))
+ERRDEF(STRFMTR, "invalid format (repeated flags)")
+ERRDEF(STRFMTW, "invalid format (width or precision too long)")
+ERRDEF(STRGSRV, "invalid replacement value (a %s)")
+ERRDEF(BADMODN, "name conflict for module " LUA_QS)
+#if LJ_HASJIT
+#if LJ_TARGET_X86ORX64
+ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2")
+#else
+ERRDEF(NOJIT, "JIT compiler disabled")
+#endif
+#elif defined(LJ_ARCH_NOJIT)
+ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)")
+#else
+ERRDEF(NOJIT, "JIT compiler permanently disabled by build option")
+#endif
+ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS)
+
+/* Lexer/parser errors. */
+ERRDEF(XMODE, "attempt to load chunk with wrong mode")
+ERRDEF(XNEAR, "%s near " LUA_QS)
+ERRDEF(XELEM, "lexical element too long")
+ERRDEF(XLINES, "chunk has too many lines")
+ERRDEF(XLEVELS, "chunk has too many syntax levels")
+ERRDEF(XNUMBER, "malformed number")
+ERRDEF(XLSTR, "unfinished long string")
+ERRDEF(XLCOM, "unfinished long comment")
+ERRDEF(XSTR, "unfinished string")
+ERRDEF(XESC, "invalid escape sequence")
+ERRDEF(XLDELIM, "invalid long string delimiter")
+ERRDEF(XTOKEN, LUA_QS " expected")
+ERRDEF(XJUMP, "control structure too long")
+ERRDEF(XSLOTS, "function or expression too complex")
+ERRDEF(XLIMC, "chunk has more than %d local variables")
+ERRDEF(XLIMM, "main function has more than %d %s")
+ERRDEF(XLIMF, "function at line %d has more than %d %s")
+ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)")
+ERRDEF(XFIXUP, "function too long for return fixup")
+ERRDEF(XPARAM, "<name> or " LUA_QL("...") " expected")
+#if !LJ_52
+ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)")
+#endif
+ERRDEF(XFUNARG, "function arguments expected")
+ERRDEF(XSYMBOL, "unexpected symbol")
+ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function")
+ERRDEF(XSYNTAX, "syntax error")
+ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected")
+ERRDEF(XBREAK, "no loop to break")
+ERRDEF(XLUNDEF, "undefined label " LUA_QS)
+ERRDEF(XLDUP, "duplicate label " LUA_QS)
+ERRDEF(XGSCOPE, "<goto %s> jumps into the scope of local " LUA_QS)
+
+/* Bytecode reader errors. */
+ERRDEF(BCFMT, "cannot load incompatible bytecode")
+ERRDEF(BCBAD, "cannot load malformed bytecode")
+
+#if LJ_HASFFI
+/* FFI errors. */
+ERRDEF(FFI_INVTYPE, "invalid C type")
+ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large")
+ERRDEF(FFI_BADSCL, "bad storage class")
+ERRDEF(FFI_DECLSPEC, "declaration specifier expected")
+ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS)
+ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS)
+ERRDEF(FFI_NUMPARAM, "wrong number of type parameters")
+ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS)
+ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS)
+ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS)
+ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS)
+ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS)
+ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS)
+ERRDEF(FFI_BADCALL, LUA_QS " is not callable")
+ERRDEF(FFI_NUMARG, "wrong number of arguments for function call")
+ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS)
+ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed")
+ERRDEF(FFI_BADIDXW, LUA_QS " cannot be indexed with " LUA_QS)
+ERRDEF(FFI_BADMM, LUA_QS " has no " LUA_QS " metamethod")
+ERRDEF(FFI_WRCONST, "attempt to write to constant location")
+ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS)
+ERRDEF(FFI_BADCBACK, "bad callback")
+#if LJ_OS_NOJIT
+ERRDEF(FFI_CBACKOV, "no support for callbacks on this OS")
+#else
+ERRDEF(FFI_CBACKOV, "too many callbacks")
+#endif
+ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields")
+ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)")
+#endif
+
+#undef ERRDEF
+
+/* Detecting unused error messages:
+ awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh
+*/
diff --git a/3rdparty/lua/src/lj_ff.h b/3rdparty/lua/src/lj_ff.h
index a5301d2..b4a4301 100644
--- a/3rdparty/lua/src/lj_ff.h
+++ b/3rdparty/lua/src/lj_ff.h
@@ -1,18 +1,18 @@
-/*
-** Fast function IDs.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FF_H
-#define _LJ_FF_H
-
-/* Fast function ID. */
-typedef enum {
- FF_LUA_ = FF_LUA, /* Lua function (must be 0). */
- FF_C_ = FF_C, /* Regular C function (must be 1). */
-#define FFDEF(name) FF_##name,
-#include "lj_ffdef.h"
- FF__MAX
-} FastFunc;
-
-#endif
+/*
+** Fast function IDs.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FF_H
+#define _LJ_FF_H
+
+/* Fast function ID. */
+typedef enum {
+ FF_LUA_ = FF_LUA, /* Lua function (must be 0). */
+ FF_C_ = FF_C, /* Regular C function (must be 1). */
+#define FFDEF(name) FF_##name,
+#include "lj_ffdef.h"
+ FF__MAX
+} FastFunc;
+
+#endif
diff --git a/3rdparty/lua/src/lj_ffrecord.c b/3rdparty/lua/src/lj_ffrecord.c
index 69f71ab..35e2e88 100644
--- a/3rdparty/lua/src/lj_ffrecord.c
+++ b/3rdparty/lua/src/lj_ffrecord.c
@@ -1,6 +1,6 @@
/*
** Fast function call recorder.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_ffrecord_c
@@ -657,19 +657,20 @@ static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd)
end = argv2int(J, &rd->argv[2]);
}
} else { /* string.byte(str, [,start [,end]]) */
- if (tref_isnil(J->base[1])) {
- start = 1;
- trstart = lj_ir_kint(J, 1);
- } else {
+ if (!tref_isnil(J->base[1])) {
start = argv2int(J, &rd->argv[1]);
trstart = lj_opt_narrow_toint(J, J->base[1]);
- }
- if (J->base[1] && !tref_isnil(J->base[2])) {
- trend = lj_opt_narrow_toint(J, J->base[2]);
- end = argv2int(J, &rd->argv[2]);
+ trend = J->base[2];
+ if (tref_isnil(trend)) {
+ trend = trstart;
+ end = start;
+ } else {
+ trend = lj_opt_narrow_toint(J, trend);
+ end = argv2int(J, &rd->argv[2]);
+ }
} else {
- trend = trstart;
- end = start;
+ trend = trstart = lj_ir_kint(J, 1);
+ end = start = 1;
}
}
if (end < 0) {
diff --git a/3rdparty/lua/src/lj_ffrecord.h b/3rdparty/lua/src/lj_ffrecord.h
index 467065b..9a30f35 100644
--- a/3rdparty/lua/src/lj_ffrecord.h
+++ b/3rdparty/lua/src/lj_ffrecord.h
@@ -1,24 +1,24 @@
-/*
-** Fast function call recorder.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FFRECORD_H
-#define _LJ_FFRECORD_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-/* Data used by handlers to record a fast function. */
-typedef struct RecordFFData {
- TValue *argv; /* Runtime argument values. */
- ptrdiff_t nres; /* Number of returned results (defaults to 1). */
- uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */
-} RecordFFData;
-
-LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv);
-LJ_FUNC void lj_ffrecord_func(jit_State *J);
-#endif
-
-#endif
+/*
+** Fast function call recorder.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FFRECORD_H
+#define _LJ_FFRECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* Data used by handlers to record a fast function. */
+typedef struct RecordFFData {
+ TValue *argv; /* Runtime argument values. */
+ ptrdiff_t nres; /* Number of returned results (defaults to 1). */
+ uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */
+} RecordFFData;
+
+LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv);
+LJ_FUNC void lj_ffrecord_func(jit_State *J);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_frame.h b/3rdparty/lua/src/lj_frame.h
index cd57be2..60ce1a9 100644
--- a/3rdparty/lua/src/lj_frame.h
+++ b/3rdparty/lua/src/lj_frame.h
@@ -1,6 +1,6 @@
/*
** Stack frames.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#ifndef _LJ_FRAME_H
diff --git a/3rdparty/lua/src/lj_func.c b/3rdparty/lua/src/lj_func.c
index d2e6dba..83f2d0b 100644
--- a/3rdparty/lua/src/lj_func.c
+++ b/3rdparty/lua/src/lj_func.c
@@ -1,185 +1,185 @@
-/*
-** Function handling (prototypes, functions and upvalues).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_func_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_func.h"
-#include "lj_trace.h"
-#include "lj_vm.h"
-
-/* -- Prototypes ---------------------------------------------------------- */
-
-void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt)
-{
- lj_mem_free(g, pt, pt->sizept);
-}
-
-/* -- Upvalues ------------------------------------------------------------ */
-
-static void unlinkuv(GCupval *uv)
-{
- lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
- setgcrefr(uvnext(uv)->prev, uv->prev);
- setgcrefr(uvprev(uv)->next, uv->next);
-}
-
-/* Find existing open upvalue for a stack slot or create a new one. */
-static GCupval *func_finduv(lua_State *L, TValue *slot)
-{
- global_State *g = G(L);
- GCRef *pp = &L->openupval;
- GCupval *p;
- GCupval *uv;
- /* Search the sorted list of open upvalues. */
- while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) {
- lua_assert(!p->closed && uvval(p) != &p->tv);
- if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */
- if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */
- flipwhite(obj2gco(p));
- return p;
- }
- pp = &p->nextgc;
- }
- /* No matching upvalue found. Create a new one. */
- uv = lj_mem_newt(L, sizeof(GCupval), GCupval);
- newwhite(g, uv);
- uv->gct = ~LJ_TUPVAL;
- uv->closed = 0; /* Still open. */
- setmref(uv->v, slot); /* Pointing to the stack slot. */
- /* NOBARRIER: The GCupval is new (marked white) and open. */
- setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */
- setgcref(*pp, obj2gco(uv));
- setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */
- setgcrefr(uv->next, g->uvhead.next);
- setgcref(uvnext(uv)->prev, obj2gco(uv));
- setgcref(g->uvhead.next, obj2gco(uv));
- lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
- return uv;
-}
-
-/* Create an empty and closed upvalue. */
-static GCupval *func_emptyuv(lua_State *L)
-{
- GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval));
- uv->gct = ~LJ_TUPVAL;
- uv->closed = 1;
- setnilV(&uv->tv);
- setmref(uv->v, &uv->tv);
- return uv;
-}
-
-/* Close all open upvalues pointing to some stack level or above. */
-void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level)
-{
- GCupval *uv;
- global_State *g = G(L);
- while (gcref(L->openupval) != NULL &&
- uvval((uv = gco2uv(gcref(L->openupval)))) >= level) {
- GCobj *o = obj2gco(uv);
- lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv);
- setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */
- if (isdead(g, o)) {
- lj_func_freeuv(g, uv);
- } else {
- unlinkuv(uv);
- lj_gc_closeuv(g, uv);
- }
- }
-}
-
-void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv)
-{
- if (!uv->closed)
- unlinkuv(uv);
- lj_mem_freet(g, uv);
-}
-
-/* -- Functions (closures) ------------------------------------------------ */
-
-GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env)
-{
- GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems));
- fn->c.gct = ~LJ_TFUNC;
- fn->c.ffid = FF_C;
- fn->c.nupvalues = (uint8_t)nelems;
- /* NOBARRIER: The GCfunc is new (marked white). */
- setmref(fn->c.pc, &G(L)->bc_cfunc_ext);
- setgcref(fn->c.env, obj2gco(env));
- return fn;
-}
-
-static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env)
-{
- uint32_t count;
- GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv));
- fn->l.gct = ~LJ_TFUNC;
- fn->l.ffid = FF_LUA;
- fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */
- /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */
- setmref(fn->l.pc, proto_bc(pt));
- setgcref(fn->l.env, obj2gco(env));
- /* Saturating 3 bit counter (0..7) for created closures. */
- count = (uint32_t)pt->flags + PROTO_CLCOUNT;
- pt->flags = (uint8_t)(count - ((count >> PROTO_CLC_BITS) & PROTO_CLCOUNT));
- return fn;
-}
-
-/* Create a new Lua function with empty upvalues. */
-GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env)
-{
- GCfunc *fn = func_newL(L, pt, env);
- MSize i, nuv = pt->sizeuv;
- /* NOBARRIER: The GCfunc is new (marked white). */
- for (i = 0; i < nuv; i++) {
- GCupval *uv = func_emptyuv(L);
- uv->dhash = (uint32_t)(uintptr_t)pt ^ ((uint32_t)proto_uv(pt)[i] << 24);
- setgcref(fn->l.uvptr[i], obj2gco(uv));
- }
- fn->l.nupvalues = (uint8_t)nuv;
- return fn;
-}
-
-/* Do a GC check and create a new Lua function with inherited upvalues. */
-GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent)
-{
- GCfunc *fn;
- GCRef *puv;
- MSize i, nuv;
- TValue *base;
- lj_gc_check_fixtop(L);
- fn = func_newL(L, pt, tabref(parent->env));
- /* NOBARRIER: The GCfunc is new (marked white). */
- puv = parent->uvptr;
- nuv = pt->sizeuv;
- base = L->base;
- for (i = 0; i < nuv; i++) {
- uint32_t v = proto_uv(pt)[i];
- GCupval *uv;
- if ((v & PROTO_UV_LOCAL)) {
- uv = func_finduv(L, base + (v & 0xff));
- uv->immutable = ((v / PROTO_UV_IMMUTABLE) & 1);
- uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24);
- } else {
- uv = &gcref(puv[v])->uv;
- }
- setgcref(fn->l.uvptr[i], obj2gco(uv));
- }
- fn->l.nupvalues = (uint8_t)nuv;
- return fn;
-}
-
-void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn)
-{
- MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
- sizeCfunc((MSize)fn->c.nupvalues);
- lj_mem_free(g, fn, size);
-}
-
+/*
+** Function handling (prototypes, functions and upvalues).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_func_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_func.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* -- Prototypes ---------------------------------------------------------- */
+
+void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt)
+{
+ lj_mem_free(g, pt, pt->sizept);
+}
+
+/* -- Upvalues ------------------------------------------------------------ */
+
+static void unlinkuv(GCupval *uv)
+{
+ lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
+ setgcrefr(uvnext(uv)->prev, uv->prev);
+ setgcrefr(uvprev(uv)->next, uv->next);
+}
+
+/* Find existing open upvalue for a stack slot or create a new one. */
+static GCupval *func_finduv(lua_State *L, TValue *slot)
+{
+ global_State *g = G(L);
+ GCRef *pp = &L->openupval;
+ GCupval *p;
+ GCupval *uv;
+ /* Search the sorted list of open upvalues. */
+ while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) {
+ lua_assert(!p->closed && uvval(p) != &p->tv);
+ if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */
+ if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */
+ flipwhite(obj2gco(p));
+ return p;
+ }
+ pp = &p->nextgc;
+ }
+ /* No matching upvalue found. Create a new one. */
+ uv = lj_mem_newt(L, sizeof(GCupval), GCupval);
+ newwhite(g, uv);
+ uv->gct = ~LJ_TUPVAL;
+ uv->closed = 0; /* Still open. */
+ setmref(uv->v, slot); /* Pointing to the stack slot. */
+ /* NOBARRIER: The GCupval is new (marked white) and open. */
+ setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */
+ setgcref(*pp, obj2gco(uv));
+ setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */
+ setgcrefr(uv->next, g->uvhead.next);
+ setgcref(uvnext(uv)->prev, obj2gco(uv));
+ setgcref(g->uvhead.next, obj2gco(uv));
+ lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
+ return uv;
+}
+
+/* Create an empty and closed upvalue. */
+static GCupval *func_emptyuv(lua_State *L)
+{
+ GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval));
+ uv->gct = ~LJ_TUPVAL;
+ uv->closed = 1;
+ setnilV(&uv->tv);
+ setmref(uv->v, &uv->tv);
+ return uv;
+}
+
+/* Close all open upvalues pointing to some stack level or above. */
+void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level)
+{
+ GCupval *uv;
+ global_State *g = G(L);
+ while (gcref(L->openupval) != NULL &&
+ uvval((uv = gco2uv(gcref(L->openupval)))) >= level) {
+ GCobj *o = obj2gco(uv);
+ lua_assert(!isblack(o) && !uv->closed && uvval(uv) != &uv->tv);
+ setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */
+ if (isdead(g, o)) {
+ lj_func_freeuv(g, uv);
+ } else {
+ unlinkuv(uv);
+ lj_gc_closeuv(g, uv);
+ }
+ }
+}
+
+void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv)
+{
+ if (!uv->closed)
+ unlinkuv(uv);
+ lj_mem_freet(g, uv);
+}
+
+/* -- Functions (closures) ------------------------------------------------ */
+
+GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env)
+{
+ GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems));
+ fn->c.gct = ~LJ_TFUNC;
+ fn->c.ffid = FF_C;
+ fn->c.nupvalues = (uint8_t)nelems;
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ setmref(fn->c.pc, &G(L)->bc_cfunc_ext);
+ setgcref(fn->c.env, obj2gco(env));
+ return fn;
+}
+
+static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env)
+{
+ uint32_t count;
+ GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv));
+ fn->l.gct = ~LJ_TFUNC;
+ fn->l.ffid = FF_LUA;
+ fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */
+ /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */
+ setmref(fn->l.pc, proto_bc(pt));
+ setgcref(fn->l.env, obj2gco(env));
+ /* Saturating 3 bit counter (0..7) for created closures. */
+ count = (uint32_t)pt->flags + PROTO_CLCOUNT;
+ pt->flags = (uint8_t)(count - ((count >> PROTO_CLC_BITS) & PROTO_CLCOUNT));
+ return fn;
+}
+
+/* Create a new Lua function with empty upvalues. */
+GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env)
+{
+ GCfunc *fn = func_newL(L, pt, env);
+ MSize i, nuv = pt->sizeuv;
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ for (i = 0; i < nuv; i++) {
+ GCupval *uv = func_emptyuv(L);
+ uv->dhash = (uint32_t)(uintptr_t)pt ^ ((uint32_t)proto_uv(pt)[i] << 24);
+ setgcref(fn->l.uvptr[i], obj2gco(uv));
+ }
+ fn->l.nupvalues = (uint8_t)nuv;
+ return fn;
+}
+
+/* Do a GC check and create a new Lua function with inherited upvalues. */
+GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent)
+{
+ GCfunc *fn;
+ GCRef *puv;
+ MSize i, nuv;
+ TValue *base;
+ lj_gc_check_fixtop(L);
+ fn = func_newL(L, pt, tabref(parent->env));
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ puv = parent->uvptr;
+ nuv = pt->sizeuv;
+ base = L->base;
+ for (i = 0; i < nuv; i++) {
+ uint32_t v = proto_uv(pt)[i];
+ GCupval *uv;
+ if ((v & PROTO_UV_LOCAL)) {
+ uv = func_finduv(L, base + (v & 0xff));
+ uv->immutable = ((v / PROTO_UV_IMMUTABLE) & 1);
+ uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24);
+ } else {
+ uv = &gcref(puv[v])->uv;
+ }
+ setgcref(fn->l.uvptr[i], obj2gco(uv));
+ }
+ fn->l.nupvalues = (uint8_t)nuv;
+ return fn;
+}
+
+void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn)
+{
+ MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
+ sizeCfunc((MSize)fn->c.nupvalues);
+ lj_mem_free(g, fn, size);
+}
+
diff --git a/3rdparty/lua/src/lj_func.h b/3rdparty/lua/src/lj_func.h
index 92d25f0..73280a8 100644
--- a/3rdparty/lua/src/lj_func.h
+++ b/3rdparty/lua/src/lj_func.h
@@ -1,24 +1,24 @@
-/*
-** Function handling (prototypes, functions and upvalues).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FUNC_H
-#define _LJ_FUNC_H
-
-#include "lj_obj.h"
-
-/* Prototypes. */
-LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt);
-
-/* Upvalues. */
-LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level);
-LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv);
-
-/* Functions (closures). */
-LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env);
-LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env);
-LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent);
-LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c);
-
-#endif
+/*
+** Function handling (prototypes, functions and upvalues).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FUNC_H
+#define _LJ_FUNC_H
+
+#include "lj_obj.h"
+
+/* Prototypes. */
+LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt);
+
+/* Upvalues. */
+LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level);
+LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv);
+
+/* Functions (closures). */
+LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env);
+LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env);
+LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent);
+LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c);
+
+#endif
diff --git a/3rdparty/lua/src/lj_gc.c b/3rdparty/lua/src/lj_gc.c
index a9880aa..79f8b72 100644
--- a/3rdparty/lua/src/lj_gc.c
+++ b/3rdparty/lua/src/lj_gc.c
@@ -1,849 +1,839 @@
-/*
-** Garbage collector.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_gc_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_func.h"
-#include "lj_udata.h"
-#include "lj_meta.h"
-#include "lj_state.h"
-#include "lj_frame.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#include "lj_cdata.h"
-#endif
-#include "lj_trace.h"
-#include "lj_vm.h"
-
-#define GCSTEPSIZE 1024u
-#define GCSWEEPMAX 40
-#define GCSWEEPCOST 10
-#define GCFINALIZECOST 100
-
-/* Macros to set GCobj colors and flags. */
-#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
-#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
-#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
-
-/* -- Mark phase ---------------------------------------------------------- */
-
-/* Mark a TValue (if needed). */
-#define gc_marktv(g, tv) \
- { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
- if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
-
-/* Mark a GCobj (if needed). */
-#define gc_markobj(g, o) \
- { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
-
-/* Mark a string object. */
-#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
-
-/* Mark a white GCobj. */
-static void gc_mark(global_State *g, GCobj *o)
-{
- int gct = o->gch.gct;
- lua_assert(iswhite(o) && !isdead(g, o));
- white2gray(o);
- if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
- GCtab *mt = tabref(gco2ud(o)->metatable);
- gray2black(o); /* Userdata are never gray. */
- if (mt) gc_markobj(g, mt);
- gc_markobj(g, tabref(gco2ud(o)->env));
- } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
- GCupval *uv = gco2uv(o);
- gc_marktv(g, uvval(uv));
- if (uv->closed)
- gray2black(o); /* Closed upvalues are never gray. */
- } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
- lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
- gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO);
- setgcrefr(o->gch.gclist, g->gc.gray);
- setgcref(g->gc.gray, o);
- }
-}
-
-/* Mark GC roots. */
-static void gc_mark_gcroot(global_State *g)
-{
- ptrdiff_t i;
- for (i = 0; i < GCROOT_MAX; i++)
- if (gcref(g->gcroot[i]) != NULL)
- gc_markobj(g, gcref(g->gcroot[i]));
-}
-
-/* Start a GC cycle and mark the root set. */
-static void gc_mark_start(global_State *g)
-{
- setgcrefnull(g->gc.gray);
- setgcrefnull(g->gc.grayagain);
- setgcrefnull(g->gc.weak);
- gc_markobj(g, mainthread(g));
- gc_markobj(g, tabref(mainthread(g)->env));
- gc_marktv(g, &g->registrytv);
- gc_mark_gcroot(g);
- g->gc.state = GCSpropagate;
-}
-
-/* Mark open upvalues. */
-static void gc_mark_uv(global_State *g)
-{
- GCupval *uv;
- for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
- lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
- if (isgray(obj2gco(uv)))
- gc_marktv(g, uvval(uv));
- }
-}
-
-/* Mark userdata in mmudata list. */
-static void gc_mark_mmudata(global_State *g)
-{
- GCobj *root = gcref(g->gc.mmudata);
- GCobj *u = root;
- if (u) {
- do {
- u = gcnext(u);
- makewhite(g, u); /* Could be from previous GC. */
- gc_mark(g, u);
- } while (u != root);
- }
-}
-
-/* Separate userdata objects to be finalized to mmudata list. */
-size_t lj_gc_separateudata(global_State *g, int all)
-{
- size_t m = 0;
- GCRef *p = &mainthread(g)->nextgc;
- GCobj *o;
- while ((o = gcref(*p)) != NULL) {
- if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
- p = &o->gch.nextgc; /* Nothing to do. */
- } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
- markfinalized(o); /* Done, as there's no __gc metamethod. */
- p = &o->gch.nextgc;
- } else { /* Otherwise move userdata to be finalized to mmudata list. */
- m += sizeudata(gco2ud(o));
- markfinalized(o);
- *p = o->gch.nextgc;
- if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
- GCobj *root = gcref(g->gc.mmudata);
- setgcrefr(o->gch.nextgc, root->gch.nextgc);
- setgcref(root->gch.nextgc, o);
- setgcref(g->gc.mmudata, o);
- } else { /* Create circular list. */
- setgcref(o->gch.nextgc, o);
- setgcref(g->gc.mmudata, o);
- }
- }
- }
- return m;
-}
-
-/* -- Propagation phase --------------------------------------------------- */
-
-/* Traverse a table. */
-static int gc_traverse_tab(global_State *g, GCtab *t)
-{
- int weak = 0;
- cTValue *mode;
- GCtab *mt = tabref(t->metatable);
- if (mt)
- gc_markobj(g, mt);
- mode = lj_meta_fastg(g, mt, MM_mode);
- if (mode && tvisstr(mode)) { /* Valid __mode field? */
- const char *modestr = strVdata(mode);
- int c;
- while ((c = *modestr++)) {
- if (c == 'k') weak |= LJ_GC_WEAKKEY;
- else if (c == 'v') weak |= LJ_GC_WEAKVAL;
- else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
- }
- if (weak > 0) { /* Weak tables are cleared in the atomic phase. */
- t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
- setgcrefr(t->gclist, g->gc.weak);
- setgcref(g->gc.weak, obj2gco(t));
- }
- }
- if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
- return 1;
- if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
- MSize i, asize = t->asize;
- for (i = 0; i < asize; i++)
- gc_marktv(g, arrayslot(t, i));
- }
- if (t->hmask > 0) { /* Mark hash part. */
- Node *node = noderef(t->node);
- MSize i, hmask = t->hmask;
- for (i = 0; i <= hmask; i++) {
- Node *n = &node[i];
- if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
- lua_assert(!tvisnil(&n->key));
- if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
- if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
- }
- }
- }
- return weak;
-}
-
-/* Traverse a function. */
-static void gc_traverse_func(global_State *g, GCfunc *fn)
-{
- gc_markobj(g, tabref(fn->c.env));
- if (isluafunc(fn)) {
- uint32_t i;
- lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
- gc_markobj(g, funcproto(fn));
- for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
- gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
- } else {
- uint32_t i;
- for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
- gc_marktv(g, &fn->c.upvalue[i]);
- }
-}
-
-#if LJ_HASJIT
-/* Mark a trace. */
-static void gc_marktrace(global_State *g, TraceNo traceno)
-{
- GCobj *o = obj2gco(traceref(G2J(g), traceno));
- lua_assert(traceno != G2J(g)->cur.traceno);
- if (iswhite(o)) {
- white2gray(o);
- setgcrefr(o->gch.gclist, g->gc.gray);
- setgcref(g->gc.gray, o);
- }
-}
-
-/* Traverse a trace. */
-static void gc_traverse_trace(global_State *g, GCtrace *T)
-{
- IRRef ref;
- if (T->traceno == 0) return;
- for (ref = T->nk; ref < REF_TRUE; ref++) {
- IRIns *ir = &T->ir[ref];
- if (ir->o == IR_KGC)
- gc_markobj(g, ir_kgc(ir));
- }
- if (T->link) gc_marktrace(g, T->link);
- if (T->nextroot) gc_marktrace(g, T->nextroot);
- if (T->nextside) gc_marktrace(g, T->nextside);
- gc_markobj(g, gcref(T->startpt));
-}
-
-/* The current trace is a GC root while not anchored in the prototype (yet). */
-#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
-#else
-#define gc_traverse_curtrace(g) UNUSED(g)
-#endif
-
-/* Traverse a prototype. */
-static void gc_traverse_proto(global_State *g, GCproto *pt)
-{
- ptrdiff_t i;
- gc_mark_str(proto_chunkname(pt));
- for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
- gc_markobj(g, proto_kgc(pt, i));
-#if LJ_HASJIT
- if (pt->trace) gc_marktrace(g, pt->trace);
-#endif
-}
-
-/* Traverse the frame structure of a stack. */
-static MSize gc_traverse_frames(global_State *g, lua_State *th)
-{
- TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
- /* Note: extra vararg frame not skipped, marks function twice (harmless). */
- for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) {
- GCfunc *fn = frame_func(frame);
- TValue *ftop = frame;
- if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
- if (ftop > top) top = ftop;
- gc_markobj(g, fn); /* Need to mark hidden function (or L). */
- }
- top++; /* Correct bias of -1 (frame == base-1). */
- if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
- return (MSize)(top - bot); /* Return minimum needed stack size. */
-}
-
-/* Traverse a thread object. */
-static void gc_traverse_thread(global_State *g, lua_State *th)
-{
- TValue *o, *top = th->top;
- for (o = tvref(th->stack)+1; o < top; o++)
- gc_marktv(g, o);
- if (g->gc.state == GCSatomic) {
- top = tvref(th->stack) + th->stacksize;
- for (; o < top; o++) /* Clear unmarked slots. */
- setnilV(o);
- }
- gc_markobj(g, tabref(th->env));
- lj_state_shrinkstack(th, gc_traverse_frames(g, th));
-}
-
-/* Propagate one gray object. Traverse it and turn it black. */
-static size_t propagatemark(global_State *g)
-{
- GCobj *o = gcref(g->gc.gray);
- int gct = o->gch.gct;
- lua_assert(isgray(o));
- gray2black(o);
- setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
- if (LJ_LIKELY(gct == ~LJ_TTAB)) {
- GCtab *t = gco2tab(o);
- if (gc_traverse_tab(g, t) > 0)
- black2gray(o); /* Keep weak tables gray. */
- return sizeof(GCtab) + sizeof(TValue) * t->asize +
- sizeof(Node) * (t->hmask + 1);
- } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
- GCfunc *fn = gco2func(o);
- gc_traverse_func(g, fn);
- return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
- sizeCfunc((MSize)fn->c.nupvalues);
- } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
- GCproto *pt = gco2pt(o);
- gc_traverse_proto(g, pt);
- return pt->sizept;
- } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
- lua_State *th = gco2th(o);
- setgcrefr(th->gclist, g->gc.grayagain);
- setgcref(g->gc.grayagain, o);
- black2gray(o); /* Threads are never black. */
- gc_traverse_thread(g, th);
- return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
- } else {
-#if LJ_HASJIT
- GCtrace *T = gco2trace(o);
- gc_traverse_trace(g, T);
- return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
- T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
-#else
- lua_assert(0);
- return 0;
-#endif
- }
-}
-
-/* Propagate all gray objects. */
-static size_t gc_propagate_gray(global_State *g)
-{
- size_t m = 0;
- while (gcref(g->gc.gray) != NULL)
- m += propagatemark(g);
- return m;
-}
-
-/* -- Sweep phase --------------------------------------------------------- */
-
-/* Try to shrink some common data structures. */
-static void gc_shrink(global_State *g, lua_State *L)
-{
- if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
- lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
- if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
- lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */
-}
-
-/* Type of GC free functions. */
-typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
-
-/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
-static const GCFreeFunc gc_freefunc[] = {
- (GCFreeFunc)lj_str_free,
- (GCFreeFunc)lj_func_freeuv,
- (GCFreeFunc)lj_state_free,
- (GCFreeFunc)lj_func_freeproto,
- (GCFreeFunc)lj_func_free,
-#if LJ_HASJIT
- (GCFreeFunc)lj_trace_free,
-#else
- (GCFreeFunc)0,
-#endif
-#if LJ_HASFFI
- (GCFreeFunc)lj_cdata_free,
-#else
- (GCFreeFunc)0,
-#endif
- (GCFreeFunc)lj_tab_free,
- (GCFreeFunc)lj_udata_free
-};
-
-/* Full sweep of a GC list. */
-#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
-
-/* Partial sweep of a GC list. */
-static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
-{
- /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
- int ow = otherwhite(g);
- GCobj *o;
- while ((o = gcref(*p)) != NULL && lim-- > 0) {
- if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
- gc_fullsweep(g, &gco2th(o)->openupval);
- if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
- lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
- makewhite(g, o); /* Value is alive, change to the current white. */
- p = &o->gch.nextgc;
- } else { /* Otherwise value is dead, free it. */
- lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
- setgcrefr(*p, o->gch.nextgc);
- if (o == gcref(g->gc.root))
- setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
- gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
- }
- }
- return p;
-}
-
-/* Check whether we can clear a key or a value slot from a table. */
-static int gc_mayclear(cTValue *o, int val)
-{
- if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
- if (tvisstr(o)) { /* But strings cannot be used as weak references. */
- gc_mark_str(strV(o)); /* And need to be marked. */
- return 0;
- }
- if (iswhite(gcV(o)))
- return 1; /* Object is about to be collected. */
- if (tvisudata(o) && val && isfinalized(udataV(o)))
- return 1; /* Finalized userdata is dropped only from values. */
- }
- return 0; /* Cannot clear. */
-}
-
-/* Clear collected entries from weak tables. */
-static void gc_clearweak(GCobj *o)
-{
- while (o) {
- GCtab *t = gco2tab(o);
- lua_assert((t->marked & LJ_GC_WEAK));
- if ((t->marked & LJ_GC_WEAKVAL)) {
- MSize i, asize = t->asize;
- for (i = 0; i < asize; i++) {
- /* Clear array slot when value is about to be collected. */
- TValue *tv = arrayslot(t, i);
- if (gc_mayclear(tv, 1))
- setnilV(tv);
- }
- }
- if (t->hmask > 0) {
- Node *node = noderef(t->node);
- MSize i, hmask = t->hmask;
- for (i = 0; i <= hmask; i++) {
- Node *n = &node[i];
- /* Clear hash slot when key or value is about to be collected. */
- if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
- gc_mayclear(&n->val, 1)))
- setnilV(&n->val);
- }
- }
- o = gcref(t->gclist);
- }
-}
-
-/* Call a userdata or cdata finalizer. */
-static void gc_call_finalizer(global_State *g, lua_State *L,
- cTValue *mo, GCobj *o)
-{
- /* Save and restore lots of state around the __gc callback. */
- uint8_t oldh = hook_save(g);
- MSize oldt = g->gc.threshold;
- int errcode;
- TValue *top;
- lj_trace_abort(g);
- top = L->top;
- L->top = top+2;
- hook_entergc(g); /* Disable hooks and new traces during __gc. */
- g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
- copyTV(L, top, mo);
- setgcV(L, top+1, o, ~o->gch.gct);
- errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */
- hook_restore(g, oldh);
- g->gc.threshold = oldt; /* Restore GC threshold. */
- if (errcode)
- lj_err_throw(L, errcode); /* Propagate errors. */
-}
-
-/* Finalize one userdata or cdata object from the mmudata list. */
-static void gc_finalize(lua_State *L)
-{
- global_State *g = G(L);
- GCobj *o = gcnext(gcref(g->gc.mmudata));
- cTValue *mo;
- lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */
- /* Unchain from list of userdata to be finalized. */
- if (o == gcref(g->gc.mmudata))
- setgcrefnull(g->gc.mmudata);
- else
- setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
-#if LJ_HASFFI
- if (o->gch.gct == ~LJ_TCDATA) {
- TValue tmp, *tv;
- /* Add cdata back to the GC list and make it white. */
- setgcrefr(o->gch.nextgc, g->gc.root);
- setgcref(g->gc.root, o);
- makewhite(g, o);
- o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
- /* Resolve finalizer. */
- setcdataV(L, &tmp, gco2cd(o));
- tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
- if (!tvisnil(tv)) {
- g->gc.nocdatafin = 0;
- copyTV(L, &tmp, tv);
- setnilV(tv); /* Clear entry in finalizer table. */
- gc_call_finalizer(g, L, &tmp, o);
- }
- return;
- }
-#endif
- /* Add userdata back to the main userdata list and make it white. */
- setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
- setgcref(mainthread(g)->nextgc, o);
- makewhite(g, o);
- /* Resolve the __gc metamethod. */
- mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
- if (mo)
- gc_call_finalizer(g, L, mo, o);
-}
-
-/* Finalize all userdata objects from mmudata list. */
-void lj_gc_finalize_udata(lua_State *L)
-{
- while (gcref(G(L)->gc.mmudata) != NULL)
- gc_finalize(L);
-}
-
-#if LJ_HASFFI
-/* Finalize all cdata objects from finalizer table. */
-void lj_gc_finalize_cdata(lua_State *L)
-{
- global_State *g = G(L);
- CTState *cts = ctype_ctsG(g);
- if (cts) {
- GCtab *t = cts->finalizer;
- Node *node = noderef(t->node);
- ptrdiff_t i;
- setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
- for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
- if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
- GCobj *o = gcV(&node[i].key);
- TValue tmp;
- makewhite(g, o);
- o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
- copyTV(L, &tmp, &node[i].val);
- setnilV(&node[i].val);
- gc_call_finalizer(g, L, &tmp, o);
- }
- }
-}
-#endif
-
-/* Free all remaining GC objects. */
-void lj_gc_freeall(global_State *g)
-{
- MSize i, strmask;
- /* Free everything, except super-fixed objects (the main thread). */
- g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
- gc_fullsweep(g, &g->gc.root);
- strmask = g->strmask;
- for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
- gc_fullsweep(g, &g->strhash[i]);
-}
-
-/* -- Collector ----------------------------------------------------------- */
-
-/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
-static void atomic(global_State *g, lua_State *L)
-{
- size_t udsize;
-
- gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
- gc_propagate_gray(g); /* Propagate any left-overs. */
-
- setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
- setgcrefnull(g->gc.weak);
- lua_assert(!iswhite(obj2gco(mainthread(g))));
- gc_markobj(g, L); /* Mark running thread. */
- gc_traverse_curtrace(g); /* Traverse current trace. */
- gc_mark_gcroot(g); /* Mark GC roots (again). */
- gc_propagate_gray(g); /* Propagate all of the above. */
-
- setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
- setgcrefnull(g->gc.grayagain);
- gc_propagate_gray(g); /* Propagate it. */
-
- udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
- gc_mark_mmudata(g); /* Mark them. */
- udsize += gc_propagate_gray(g); /* And propagate the marks. */
-
- /* All marking done, clear weak tables. */
- gc_clearweak(gcref(g->gc.weak));
-
- /* Prepare for sweep phase. */
- g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
- g->strempty.marked = g->gc.currentwhite;
- setmref(g->gc.sweep, &g->gc.root);
- g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */
-}
-
-/* GC state machine. Returns a cost estimate for each step performed. */
-static size_t gc_onestep(lua_State *L)
-{
- global_State *g = G(L);
- switch (g->gc.state) {
- case GCSpause:
- gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
- return 0;
- case GCSpropagate:
- if (gcref(g->gc.gray) != NULL)
- return propagatemark(g); /* Propagate one gray object. */
- g->gc.state = GCSatomic; /* End of mark phase. */
- return 0;
- case GCSatomic:
- if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */
- return LJ_MAX_MEM;
- atomic(g, L);
- g->gc.state = GCSsweepstring; /* Start of sweep phase. */
- g->gc.sweepstr = 0;
- return 0;
- case GCSsweepstring: {
- MSize old = g->gc.total;
- gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */
- if (g->gc.sweepstr > g->strmask)
- g->gc.state = GCSsweep; /* All string hash chains sweeped. */
- lua_assert(old >= g->gc.total);
- g->gc.estimate -= old - g->gc.total;
- return GCSWEEPCOST;
- }
- case GCSsweep: {
- MSize old = g->gc.total;
- setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
- lua_assert(old >= g->gc.total);
- g->gc.estimate -= old - g->gc.total;
- if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
- gc_shrink(g, L);
- if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
- g->gc.state = GCSfinalize;
-#if LJ_HASFFI
- g->gc.nocdatafin = 1;
-#endif
- } else { /* Otherwise skip this phase to help the JIT. */
- g->gc.state = GCSpause; /* End of GC cycle. */
- g->gc.debt = 0;
- }
- }
- return GCSWEEPMAX*GCSWEEPCOST;
- }
- case GCSfinalize:
- if (gcref(g->gc.mmudata) != NULL) {
- if (gcref(g->jit_L)) /* Don't call finalizers on trace. */
- return LJ_MAX_MEM;
- gc_finalize(L); /* Finalize one userdata object. */
- if (g->gc.estimate > GCFINALIZECOST)
- g->gc.estimate -= GCFINALIZECOST;
- return GCFINALIZECOST;
- }
-#if LJ_HASFFI
- if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
-#endif
- g->gc.state = GCSpause; /* End of GC cycle. */
- g->gc.debt = 0;
- return 0;
- default:
- lua_assert(0);
- return 0;
- }
-}
-
-/* Perform a limited amount of incremental GC steps. */
-int LJ_FASTCALL lj_gc_step(lua_State *L)
-{
- global_State *g = G(L);
- MSize lim;
- int32_t ostate = g->vmstate;
- setvmstate(g, GC);
- lim = (GCSTEPSIZE/100) * g->gc.stepmul;
- if (lim == 0)
- lim = LJ_MAX_MEM;
- if (g->gc.total > g->gc.threshold)
- g->gc.debt += g->gc.total - g->gc.threshold;
- do {
- lim -= (MSize)gc_onestep(L);
- if (g->gc.state == GCSpause) {
- g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
- g->vmstate = ostate;
- return 1; /* Finished a GC cycle. */
- }
- } while ((int32_t)lim > 0);
- if (g->gc.debt < GCSTEPSIZE) {
- g->gc.threshold = g->gc.total + GCSTEPSIZE;
- g->vmstate = ostate;
- return -1;
- } else {
- g->gc.debt -= GCSTEPSIZE;
- g->gc.threshold = g->gc.total;
- g->vmstate = ostate;
- return 0;
- }
-}
-
-/* Ditto, but fix the stack top first. */
-void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
-{
- if (curr_funcisL(L)) L->top = curr_topL(L);
- lj_gc_step(L);
-}
-
-#if LJ_HASJIT
-/* Perform multiple GC steps. Called from JIT-compiled code. */
-int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
-{
- lua_State *L = gco2th(gcref(g->jit_L));
- L->base = mref(G(L)->jit_base, TValue);
- L->top = curr_topL(L);
- while (steps-- > 0 && lj_gc_step(L) == 0)
- ;
- /* Return 1 to force a trace exit. */
- return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
-}
-#endif
-
-/* Perform a full GC cycle. */
-void lj_gc_fullgc(lua_State *L)
-{
- global_State *g = G(L);
- int32_t ostate = g->vmstate;
- setvmstate(g, GC);
- if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
- setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
- setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
- setgcrefnull(g->gc.grayagain);
- setgcrefnull(g->gc.weak);
- g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
- g->gc.sweepstr = 0;
- }
- while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
- gc_onestep(L); /* Finish sweep. */
- lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
- /* Now perform a full GC. */
- g->gc.state = GCSpause;
- do { gc_onestep(L); } while (g->gc.state != GCSpause);
- g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
- g->vmstate = ostate;
-}
-
-/* -- Write barriers ------------------------------------------------------ */
-
-/* Move the GC propagation frontier forward. */
-void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
-{
- lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
- lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
- lua_assert(o->gch.gct != ~LJ_TTAB);
- /* Preserve invariant during propagation. Otherwise it doesn't matter. */
- if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
- gc_mark(g, v); /* Move frontier forward. */
- else
- makewhite(g, o); /* Make it white to avoid the following barrier. */
-}
-
-/* Specialized barrier for closed upvalue. Pass &uv->tv. */
-void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
-{
-#define TV2MARKED(x) \
- (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
- if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
- gc_mark(g, gcV(tv));
- else
- TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
-#undef TV2MARKED
-}
-
-/* Close upvalue. Also needs a write barrier. */
-void lj_gc_closeuv(global_State *g, GCupval *uv)
-{
- GCobj *o = obj2gco(uv);
- /* Copy stack slot to upvalue itself and point to the copy. */
- copyTV(mainthread(g), &uv->tv, uvval(uv));
- setmref(uv->v, &uv->tv);
- uv->closed = 1;
- setgcrefr(o->gch.nextgc, g->gc.root);
- setgcref(g->gc.root, o);
- if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
- if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
- gray2black(o); /* Make it black and preserve invariant. */
- if (tviswhite(&uv->tv))
- lj_gc_barrierf(g, o, gcV(&uv->tv));
- } else {
- makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
- lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
- }
- }
-}
-
-#if LJ_HASJIT
-/* Mark a trace if it's saved during the propagation phase. */
-void lj_gc_barriertrace(global_State *g, uint32_t traceno)
-{
- if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
- gc_marktrace(g, traceno);
-}
-#endif
-
-/* -- Allocator ----------------------------------------------------------- */
-
-/* Call pluggable memory allocator to allocate or resize a fragment. */
-void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz)
-{
- global_State *g = G(L);
- lua_assert((osz == 0) == (p == NULL));
- p = g->allocf(g->allocd, p, osz, nsz);
- if (p == NULL && nsz > 0)
- lj_err_mem(L);
- lua_assert((nsz == 0) == (p == NULL));
- lua_assert(checkptr32(p));
- g->gc.total = (g->gc.total - osz) + nsz;
- return p;
-}
-
-/* Allocate new GC object and link it to the root set. */
-void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size)
-{
- global_State *g = G(L);
- GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
- if (o == NULL)
- lj_err_mem(L);
- lua_assert(checkptr32(o));
- g->gc.total += size;
- setgcrefr(o->gch.nextgc, g->gc.root);
- setgcref(g->gc.root, o);
- newwhite(g, o);
- return o;
-}
-
-/* Resize growable vector. */
-void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
-{
- MSize sz = (*szp) << 1;
- if (sz < LJ_MIN_VECSZ)
- sz = LJ_MIN_VECSZ;
- if (sz > lim)
- sz = lim;
- p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
- *szp = sz;
- return p;
-}
-
+/*
+** Garbage collector.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_gc_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+#define GCSTEPSIZE 1024u
+#define GCSWEEPMAX 40
+#define GCSWEEPCOST 10
+#define GCFINALIZECOST 100
+
+/* Macros to set GCobj colors and flags. */
+#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
+#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
+#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
+
+/* -- Mark phase ---------------------------------------------------------- */
+
+/* Mark a TValue (if needed). */
+#define gc_marktv(g, tv) \
+ { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
+ if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
+
+/* Mark a GCobj (if needed). */
+#define gc_markobj(g, o) \
+ { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
+
+/* Mark a string object. */
+#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
+
+/* Mark a white GCobj. */
+static void gc_mark(global_State *g, GCobj *o)
+{
+ int gct = o->gch.gct;
+ lua_assert(iswhite(o) && !isdead(g, o));
+ white2gray(o);
+ if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
+ GCtab *mt = tabref(gco2ud(o)->metatable);
+ gray2black(o); /* Userdata are never gray. */
+ if (mt) gc_markobj(g, mt);
+ gc_markobj(g, tabref(gco2ud(o)->env));
+ } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
+ GCupval *uv = gco2uv(o);
+ gc_marktv(g, uvval(uv));
+ if (uv->closed)
+ gray2black(o); /* Closed upvalues are never gray. */
+ } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
+ lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
+ gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO);
+ setgcrefr(o->gch.gclist, g->gc.gray);
+ setgcref(g->gc.gray, o);
+ }
+}
+
+/* Mark GC roots. */
+static void gc_mark_gcroot(global_State *g)
+{
+ ptrdiff_t i;
+ for (i = 0; i < GCROOT_MAX; i++)
+ if (gcref(g->gcroot[i]) != NULL)
+ gc_markobj(g, gcref(g->gcroot[i]));
+}
+
+/* Start a GC cycle and mark the root set. */
+static void gc_mark_start(global_State *g)
+{
+ setgcrefnull(g->gc.gray);
+ setgcrefnull(g->gc.grayagain);
+ setgcrefnull(g->gc.weak);
+ gc_markobj(g, mainthread(g));
+ gc_markobj(g, tabref(mainthread(g)->env));
+ gc_marktv(g, &g->registrytv);
+ gc_mark_gcroot(g);
+ g->gc.state = GCSpropagate;
+}
+
+/* Mark open upvalues. */
+static void gc_mark_uv(global_State *g)
+{
+ GCupval *uv;
+ for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
+ lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
+ if (isgray(obj2gco(uv)))
+ gc_marktv(g, uvval(uv));
+ }
+}
+
+/* Mark userdata in mmudata list. */
+static void gc_mark_mmudata(global_State *g)
+{
+ GCobj *root = gcref(g->gc.mmudata);
+ GCobj *u = root;
+ if (u) {
+ do {
+ u = gcnext(u);
+ makewhite(g, u); /* Could be from previous GC. */
+ gc_mark(g, u);
+ } while (u != root);
+ }
+}
+
+/* Separate userdata objects to be finalized to mmudata list. */
+size_t lj_gc_separateudata(global_State *g, int all)
+{
+ size_t m = 0;
+ GCRef *p = &mainthread(g)->nextgc;
+ GCobj *o;
+ while ((o = gcref(*p)) != NULL) {
+ if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
+ p = &o->gch.nextgc; /* Nothing to do. */
+ } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
+ markfinalized(o); /* Done, as there's no __gc metamethod. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise move userdata to be finalized to mmudata list. */
+ m += sizeudata(gco2ud(o));
+ markfinalized(o);
+ *p = o->gch.nextgc;
+ if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
+ GCobj *root = gcref(g->gc.mmudata);
+ setgcrefr(o->gch.nextgc, root->gch.nextgc);
+ setgcref(root->gch.nextgc, o);
+ setgcref(g->gc.mmudata, o);
+ } else { /* Create circular list. */
+ setgcref(o->gch.nextgc, o);
+ setgcref(g->gc.mmudata, o);
+ }
+ }
+ }
+ return m;
+}
+
+/* -- Propagation phase --------------------------------------------------- */
+
+/* Traverse a table. */
+static int gc_traverse_tab(global_State *g, GCtab *t)
+{
+ int weak = 0;
+ cTValue *mode;
+ GCtab *mt = tabref(t->metatable);
+ if (mt)
+ gc_markobj(g, mt);
+ mode = lj_meta_fastg(g, mt, MM_mode);
+ if (mode && tvisstr(mode)) { /* Valid __mode field? */
+ const char *modestr = strVdata(mode);
+ int c;
+ while ((c = *modestr++)) {
+ if (c == 'k') weak |= LJ_GC_WEAKKEY;
+ else if (c == 'v') weak |= LJ_GC_WEAKVAL;
+ else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
+ }
+ if (weak > 0) { /* Weak tables are cleared in the atomic phase. */
+ t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
+ setgcrefr(t->gclist, g->gc.weak);
+ setgcref(g->gc.weak, obj2gco(t));
+ }
+ }
+ if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
+ return 1;
+ if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
+ MSize i, asize = t->asize;
+ for (i = 0; i < asize; i++)
+ gc_marktv(g, arrayslot(t, i));
+ }
+ if (t->hmask > 0) { /* Mark hash part. */
+ Node *node = noderef(t->node);
+ MSize i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
+ lua_assert(!tvisnil(&n->key));
+ if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
+ if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
+ }
+ }
+ }
+ return weak;
+}
+
+/* Traverse a function. */
+static void gc_traverse_func(global_State *g, GCfunc *fn)
+{
+ gc_markobj(g, tabref(fn->c.env));
+ if (isluafunc(fn)) {
+ uint32_t i;
+ lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
+ gc_markobj(g, funcproto(fn));
+ for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
+ gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
+ } else {
+ uint32_t i;
+ for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
+ gc_marktv(g, &fn->c.upvalue[i]);
+ }
+}
+
+#if LJ_HASJIT
+/* Mark a trace. */
+static void gc_marktrace(global_State *g, TraceNo traceno)
+{
+ GCobj *o = obj2gco(traceref(G2J(g), traceno));
+ lua_assert(traceno != G2J(g)->cur.traceno);
+ if (iswhite(o)) {
+ white2gray(o);
+ setgcrefr(o->gch.gclist, g->gc.gray);
+ setgcref(g->gc.gray, o);
+ }
+}
+
+/* Traverse a trace. */
+static void gc_traverse_trace(global_State *g, GCtrace *T)
+{
+ IRRef ref;
+ if (T->traceno == 0) return;
+ for (ref = T->nk; ref < REF_TRUE; ref++) {
+ IRIns *ir = &T->ir[ref];
+ if (ir->o == IR_KGC)
+ gc_markobj(g, ir_kgc(ir));
+ }
+ if (T->link) gc_marktrace(g, T->link);
+ if (T->nextroot) gc_marktrace(g, T->nextroot);
+ if (T->nextside) gc_marktrace(g, T->nextside);
+ gc_markobj(g, gcref(T->startpt));
+}
+
+/* The current trace is a GC root while not anchored in the prototype (yet). */
+#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
+#else
+#define gc_traverse_curtrace(g) UNUSED(g)
+#endif
+
+/* Traverse a prototype. */
+static void gc_traverse_proto(global_State *g, GCproto *pt)
+{
+ ptrdiff_t i;
+ gc_mark_str(proto_chunkname(pt));
+ for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
+ gc_markobj(g, proto_kgc(pt, i));
+#if LJ_HASJIT
+ if (pt->trace) gc_marktrace(g, pt->trace);
+#endif
+}
+
+/* Traverse the frame structure of a stack. */
+static MSize gc_traverse_frames(global_State *g, lua_State *th)
+{
+ TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
+ /* Note: extra vararg frame not skipped, marks function twice (harmless). */
+ for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) {
+ GCfunc *fn = frame_func(frame);
+ TValue *ftop = frame;
+ if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
+ if (ftop > top) top = ftop;
+ gc_markobj(g, fn); /* Need to mark hidden function (or L). */
+ }
+ top++; /* Correct bias of -1 (frame == base-1). */
+ if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
+ return (MSize)(top - bot); /* Return minimum needed stack size. */
+}
+
+/* Traverse a thread object. */
+static void gc_traverse_thread(global_State *g, lua_State *th)
+{
+ TValue *o, *top = th->top;
+ for (o = tvref(th->stack)+1; o < top; o++)
+ gc_marktv(g, o);
+ if (g->gc.state == GCSatomic) {
+ top = tvref(th->stack) + th->stacksize;
+ for (; o < top; o++) /* Clear unmarked slots. */
+ setnilV(o);
+ }
+ gc_markobj(g, tabref(th->env));
+ lj_state_shrinkstack(th, gc_traverse_frames(g, th));
+}
+
+/* Propagate one gray object. Traverse it and turn it black. */
+static size_t propagatemark(global_State *g)
+{
+ GCobj *o = gcref(g->gc.gray);
+ int gct = o->gch.gct;
+ lua_assert(isgray(o));
+ gray2black(o);
+ setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
+ if (LJ_LIKELY(gct == ~LJ_TTAB)) {
+ GCtab *t = gco2tab(o);
+ if (gc_traverse_tab(g, t) > 0)
+ black2gray(o); /* Keep weak tables gray. */
+ return sizeof(GCtab) + sizeof(TValue) * t->asize +
+ sizeof(Node) * (t->hmask + 1);
+ } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
+ GCfunc *fn = gco2func(o);
+ gc_traverse_func(g, fn);
+ return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
+ sizeCfunc((MSize)fn->c.nupvalues);
+ } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
+ GCproto *pt = gco2pt(o);
+ gc_traverse_proto(g, pt);
+ return pt->sizept;
+ } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
+ lua_State *th = gco2th(o);
+ setgcrefr(th->gclist, g->gc.grayagain);
+ setgcref(g->gc.grayagain, o);
+ black2gray(o); /* Threads are never black. */
+ gc_traverse_thread(g, th);
+ return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
+ } else {
+#if LJ_HASJIT
+ GCtrace *T = gco2trace(o);
+ gc_traverse_trace(g, T);
+ return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
+ T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
+#else
+ lua_assert(0);
+ return 0;
+#endif
+ }
+}
+
+/* Propagate all gray objects. */
+static size_t gc_propagate_gray(global_State *g)
+{
+ size_t m = 0;
+ while (gcref(g->gc.gray) != NULL)
+ m += propagatemark(g);
+ return m;
+}
+
+/* -- Sweep phase --------------------------------------------------------- */
+
+/* Try to shrink some common data structures. */
+static void gc_shrink(global_State *g, lua_State *L)
+{
+ if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
+ lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
+ if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
+ lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */
+}
+
+/* Type of GC free functions. */
+typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
+
+/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
+static const GCFreeFunc gc_freefunc[] = {
+ (GCFreeFunc)lj_str_free,
+ (GCFreeFunc)lj_func_freeuv,
+ (GCFreeFunc)lj_state_free,
+ (GCFreeFunc)lj_func_freeproto,
+ (GCFreeFunc)lj_func_free,
+#if LJ_HASJIT
+ (GCFreeFunc)lj_trace_free,
+#else
+ (GCFreeFunc)0,
+#endif
+#if LJ_HASFFI
+ (GCFreeFunc)lj_cdata_free,
+#else
+ (GCFreeFunc)0,
+#endif
+ (GCFreeFunc)lj_tab_free,
+ (GCFreeFunc)lj_udata_free
+};
+
+/* Full sweep of a GC list. */
+#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
+
+/* Partial sweep of a GC list. */
+static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
+{
+ /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
+ int ow = otherwhite(g);
+ GCobj *o;
+ while ((o = gcref(*p)) != NULL && lim-- > 0) {
+ if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
+ gc_fullsweep(g, &gco2th(o)->openupval);
+ if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
+ lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
+ makewhite(g, o); /* Value is alive, change to the current white. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise value is dead, free it. */
+ lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
+ setgcrefr(*p, o->gch.nextgc);
+ if (o == gcref(g->gc.root))
+ setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
+ gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
+ }
+ }
+ return p;
+}
+
+/* Check whether we can clear a key or a value slot from a table. */
+static int gc_mayclear(cTValue *o, int val)
+{
+ if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
+ if (tvisstr(o)) { /* But strings cannot be used as weak references. */
+ gc_mark_str(strV(o)); /* And need to be marked. */
+ return 0;
+ }
+ if (iswhite(gcV(o)))
+ return 1; /* Object is about to be collected. */
+ if (tvisudata(o) && val && isfinalized(udataV(o)))
+ return 1; /* Finalized userdata is dropped only from values. */
+ }
+ return 0; /* Cannot clear. */
+}
+
+/* Clear collected entries from weak tables. */
+static void gc_clearweak(GCobj *o)
+{
+ while (o) {
+ GCtab *t = gco2tab(o);
+ lua_assert((t->marked & LJ_GC_WEAK));
+ if ((t->marked & LJ_GC_WEAKVAL)) {
+ MSize i, asize = t->asize;
+ for (i = 0; i < asize; i++) {
+ /* Clear array slot when value is about to be collected. */
+ TValue *tv = arrayslot(t, i);
+ if (gc_mayclear(tv, 1))
+ setnilV(tv);
+ }
+ }
+ if (t->hmask > 0) {
+ Node *node = noderef(t->node);
+ MSize i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ /* Clear hash slot when key or value is about to be collected. */
+ if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
+ gc_mayclear(&n->val, 1)))
+ setnilV(&n->val);
+ }
+ }
+ o = gcref(t->gclist);
+ }
+}
+
+/* Call a userdata or cdata finalizer. */
+static void gc_call_finalizer(global_State *g, lua_State *L,
+ cTValue *mo, GCobj *o)
+{
+ /* Save and restore lots of state around the __gc callback. */
+ uint8_t oldh = hook_save(g);
+ MSize oldt = g->gc.threshold;
+ int errcode;
+ TValue *top;
+ lj_trace_abort(g);
+ top = L->top;
+ L->top = top+2;
+ hook_entergc(g); /* Disable hooks and new traces during __gc. */
+ g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
+ copyTV(L, top, mo);
+ setgcV(L, top+1, o, ~o->gch.gct);
+ errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */
+ hook_restore(g, oldh);
+ g->gc.threshold = oldt; /* Restore GC threshold. */
+ if (errcode)
+ lj_err_throw(L, errcode); /* Propagate errors. */
+}
+
+/* Finalize one userdata or cdata object from the mmudata list. */
+static void gc_finalize(lua_State *L)
+{
+ global_State *g = G(L);
+ GCobj *o = gcnext(gcref(g->gc.mmudata));
+ cTValue *mo;
+ lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */
+ /* Unchain from list of userdata to be finalized. */
+ if (o == gcref(g->gc.mmudata))
+ setgcrefnull(g->gc.mmudata);
+ else
+ setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
+#if LJ_HASFFI
+ if (o->gch.gct == ~LJ_TCDATA) {
+ TValue tmp, *tv;
+ /* Add cdata back to the GC list and make it white. */
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ makewhite(g, o);
+ o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
+ /* Resolve finalizer. */
+ setcdataV(L, &tmp, gco2cd(o));
+ tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
+ if (!tvisnil(tv)) {
+ copyTV(L, &tmp, tv);
+ setnilV(tv); /* Clear entry in finalizer table. */
+ gc_call_finalizer(g, L, &tmp, o);
+ }
+ return;
+ }
+#endif
+ /* Add userdata back to the main userdata list and make it white. */
+ setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
+ setgcref(mainthread(g)->nextgc, o);
+ makewhite(g, o);
+ /* Resolve the __gc metamethod. */
+ mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
+ if (mo)
+ gc_call_finalizer(g, L, mo, o);
+}
+
+/* Finalize all userdata objects from mmudata list. */
+void lj_gc_finalize_udata(lua_State *L)
+{
+ while (gcref(G(L)->gc.mmudata) != NULL)
+ gc_finalize(L);
+}
+
+#if LJ_HASFFI
+/* Finalize all cdata objects from finalizer table. */
+void lj_gc_finalize_cdata(lua_State *L)
+{
+ global_State *g = G(L);
+ CTState *cts = ctype_ctsG(g);
+ if (cts) {
+ GCtab *t = cts->finalizer;
+ Node *node = noderef(t->node);
+ ptrdiff_t i;
+ setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
+ if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
+ GCobj *o = gcV(&node[i].key);
+ TValue tmp;
+ makewhite(g, o);
+ o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
+ copyTV(L, &tmp, &node[i].val);
+ setnilV(&node[i].val);
+ gc_call_finalizer(g, L, &tmp, o);
+ }
+ }
+}
+#endif
+
+/* Free all remaining GC objects. */
+void lj_gc_freeall(global_State *g)
+{
+ MSize i, strmask;
+ /* Free everything, except super-fixed objects (the main thread). */
+ g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
+ gc_fullsweep(g, &g->gc.root);
+ strmask = g->strmask;
+ for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
+ gc_fullsweep(g, &g->strhash[i]);
+}
+
+/* -- Collector ----------------------------------------------------------- */
+
+/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
+static void atomic(global_State *g, lua_State *L)
+{
+ size_t udsize;
+
+ gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
+ gc_propagate_gray(g); /* Propagate any left-overs. */
+
+ setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
+ setgcrefnull(g->gc.weak);
+ lua_assert(!iswhite(obj2gco(mainthread(g))));
+ gc_markobj(g, L); /* Mark running thread. */
+ gc_traverse_curtrace(g); /* Traverse current trace. */
+ gc_mark_gcroot(g); /* Mark GC roots (again). */
+ gc_propagate_gray(g); /* Propagate all of the above. */
+
+ setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
+ setgcrefnull(g->gc.grayagain);
+ gc_propagate_gray(g); /* Propagate it. */
+
+ udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
+ gc_mark_mmudata(g); /* Mark them. */
+ udsize += gc_propagate_gray(g); /* And propagate the marks. */
+
+ /* All marking done, clear weak tables. */
+ gc_clearweak(gcref(g->gc.weak));
+
+ /* Prepare for sweep phase. */
+ g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
+ g->strempty.marked = g->gc.currentwhite;
+ setmref(g->gc.sweep, &g->gc.root);
+ g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */
+}
+
+/* GC state machine. Returns a cost estimate for each step performed. */
+static size_t gc_onestep(lua_State *L)
+{
+ global_State *g = G(L);
+ switch (g->gc.state) {
+ case GCSpause:
+ gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
+ return 0;
+ case GCSpropagate:
+ if (gcref(g->gc.gray) != NULL)
+ return propagatemark(g); /* Propagate one gray object. */
+ g->gc.state = GCSatomic; /* End of mark phase. */
+ return 0;
+ case GCSatomic:
+ if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */
+ return LJ_MAX_MEM;
+ atomic(g, L);
+ g->gc.state = GCSsweepstring; /* Start of sweep phase. */
+ g->gc.sweepstr = 0;
+ return 0;
+ case GCSsweepstring: {
+ MSize old = g->gc.total;
+ gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */
+ if (g->gc.sweepstr > g->strmask)
+ g->gc.state = GCSsweep; /* All string hash chains sweeped. */
+ lua_assert(old >= g->gc.total);
+ g->gc.estimate -= old - g->gc.total;
+ return GCSWEEPCOST;
+ }
+ case GCSsweep: {
+ MSize old = g->gc.total;
+ setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
+ if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
+ gc_shrink(g, L);
+ if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
+ g->gc.state = GCSfinalize;
+ } else { /* Otherwise skip this phase to help the JIT. */
+ g->gc.state = GCSpause; /* End of GC cycle. */
+ g->gc.debt = 0;
+ }
+ }
+ lua_assert(old >= g->gc.total);
+ g->gc.estimate -= old - g->gc.total;
+ return GCSWEEPMAX*GCSWEEPCOST;
+ }
+ case GCSfinalize:
+ if (gcref(g->gc.mmudata) != NULL) {
+ if (gcref(g->jit_L)) /* Don't call finalizers on trace. */
+ return LJ_MAX_MEM;
+ gc_finalize(L); /* Finalize one userdata object. */
+ if (g->gc.estimate > GCFINALIZECOST)
+ g->gc.estimate -= GCFINALIZECOST;
+ return GCFINALIZECOST;
+ }
+ g->gc.state = GCSpause; /* End of GC cycle. */
+ g->gc.debt = 0;
+ return 0;
+ default:
+ lua_assert(0);
+ return 0;
+ }
+}
+
+/* Perform a limited amount of incremental GC steps. */
+int LJ_FASTCALL lj_gc_step(lua_State *L)
+{
+ global_State *g = G(L);
+ MSize lim;
+ int32_t ostate = g->vmstate;
+ setvmstate(g, GC);
+ lim = (GCSTEPSIZE/100) * g->gc.stepmul;
+ if (lim == 0)
+ lim = LJ_MAX_MEM;
+ g->gc.debt += g->gc.total - g->gc.threshold;
+ do {
+ lim -= (MSize)gc_onestep(L);
+ if (g->gc.state == GCSpause) {
+ g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
+ g->vmstate = ostate;
+ return 1; /* Finished a GC cycle. */
+ }
+ } while ((int32_t)lim > 0);
+ if (g->gc.debt < GCSTEPSIZE) {
+ g->gc.threshold = g->gc.total + GCSTEPSIZE;
+ } else {
+ g->gc.debt -= GCSTEPSIZE;
+ g->gc.threshold = g->gc.total;
+ }
+ g->vmstate = ostate;
+ return 0;
+}
+
+/* Ditto, but fix the stack top first. */
+void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
+{
+ if (curr_funcisL(L)) L->top = curr_topL(L);
+ lj_gc_step(L);
+}
+
+#if LJ_HASJIT
+/* Perform multiple GC steps. Called from JIT-compiled code. */
+int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
+{
+ lua_State *L = gco2th(gcref(g->jit_L));
+ L->base = mref(G(L)->jit_base, TValue);
+ L->top = curr_topL(L);
+ while (steps-- > 0 && lj_gc_step(L) == 0)
+ ;
+ /* Return 1 to force a trace exit. */
+ return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
+}
+#endif
+
+/* Perform a full GC cycle. */
+void lj_gc_fullgc(lua_State *L)
+{
+ global_State *g = G(L);
+ int32_t ostate = g->vmstate;
+ setvmstate(g, GC);
+ if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
+ setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
+ setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
+ setgcrefnull(g->gc.grayagain);
+ setgcrefnull(g->gc.weak);
+ g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
+ g->gc.sweepstr = 0;
+ }
+ while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
+ gc_onestep(L); /* Finish sweep. */
+ lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
+ /* Now perform a full GC. */
+ g->gc.state = GCSpause;
+ do { gc_onestep(L); } while (g->gc.state != GCSpause);
+ g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
+ g->vmstate = ostate;
+}
+
+/* -- Write barriers ------------------------------------------------------ */
+
+/* Move the GC propagation frontier forward. */
+void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
+{
+ lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
+ lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
+ lua_assert(o->gch.gct != ~LJ_TTAB);
+ /* Preserve invariant during propagation. Otherwise it doesn't matter. */
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_mark(g, v); /* Move frontier forward. */
+ else
+ makewhite(g, o); /* Make it white to avoid the following barrier. */
+}
+
+/* Specialized barrier for closed upvalue. Pass &uv->tv. */
+void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
+{
+#define TV2MARKED(x) \
+ (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_mark(g, gcV(tv));
+ else
+ TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
+#undef TV2MARKED
+}
+
+/* Close upvalue. Also needs a write barrier. */
+void lj_gc_closeuv(global_State *g, GCupval *uv)
+{
+ GCobj *o = obj2gco(uv);
+ /* Copy stack slot to upvalue itself and point to the copy. */
+ copyTV(mainthread(g), &uv->tv, uvval(uv));
+ setmref(uv->v, &uv->tv);
+ uv->closed = 1;
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
+ gray2black(o); /* Make it black and preserve invariant. */
+ if (tviswhite(&uv->tv))
+ lj_gc_barrierf(g, o, gcV(&uv->tv));
+ } else {
+ makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
+ lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
+ }
+ }
+}
+
+#if LJ_HASJIT
+/* Mark a trace if it's saved during the propagation phase. */
+void lj_gc_barriertrace(global_State *g, uint32_t traceno)
+{
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_marktrace(g, traceno);
+}
+#endif
+
+/* -- Allocator ----------------------------------------------------------- */
+
+/* Call pluggable memory allocator to allocate or resize a fragment. */
+void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz)
+{
+ global_State *g = G(L);
+ lua_assert((osz == 0) == (p == NULL));
+ p = g->allocf(g->allocd, p, osz, nsz);
+ if (p == NULL && nsz > 0)
+ lj_err_mem(L);
+ lua_assert((nsz == 0) == (p == NULL));
+ lua_assert(checkptr32(p));
+ g->gc.total = (g->gc.total - osz) + nsz;
+ return p;
+}
+
+/* Allocate new GC object and link it to the root set. */
+void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size)
+{
+ global_State *g = G(L);
+ GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
+ if (o == NULL)
+ lj_err_mem(L);
+ lua_assert(checkptr32(o));
+ g->gc.total += size;
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ newwhite(g, o);
+ return o;
+}
+
+/* Resize growable vector. */
+void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
+{
+ MSize sz = (*szp) << 1;
+ if (sz < LJ_MIN_VECSZ)
+ sz = LJ_MIN_VECSZ;
+ if (sz > lim)
+ sz = lim;
+ p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
+ *szp = sz;
+ return p;
+}
+
diff --git a/3rdparty/lua/src/lj_gc.h b/3rdparty/lua/src/lj_gc.h
index f1ce275..22f7fea 100644
--- a/3rdparty/lua/src/lj_gc.h
+++ b/3rdparty/lua/src/lj_gc.h
@@ -1,134 +1,134 @@
-/*
-** Garbage collector.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_GC_H
-#define _LJ_GC_H
-
-#include "lj_obj.h"
-
-/* Garbage collector states. Order matters. */
-enum {
- GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize
-};
-
-/* Bitmasks for marked field of GCobj. */
-#define LJ_GC_WHITE0 0x01
-#define LJ_GC_WHITE1 0x02
-#define LJ_GC_BLACK 0x04
-#define LJ_GC_FINALIZED 0x08
-#define LJ_GC_WEAKKEY 0x08
-#define LJ_GC_WEAKVAL 0x10
-#define LJ_GC_CDATA_FIN 0x10
-#define LJ_GC_FIXED 0x20
-#define LJ_GC_SFIXED 0x40
-
-#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1)
-#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK)
-#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL)
-
-/* Macros to test and set GCobj colors. */
-#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES)
-#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK)
-#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES)))
-#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x)))
-#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES)
-#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES)
-
-#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES)
-#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g))
-#define makewhite(g, x) \
- ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g))
-#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES)
-#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK)
-#define fixstring(s) ((s)->marked |= LJ_GC_FIXED)
-#define markfinalized(x) ((x)->gch.marked |= LJ_GC_FINALIZED)
-
-/* Collector. */
-LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all);
-LJ_FUNC void lj_gc_finalize_udata(lua_State *L);
-#if LJ_HASFFI
-LJ_FUNC void lj_gc_finalize_cdata(lua_State *L);
-#else
-#define lj_gc_finalize_cdata(L) UNUSED(L)
-#endif
-LJ_FUNC void lj_gc_freeall(global_State *g);
-LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L);
-LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L);
-#if LJ_HASJIT
-LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps);
-#endif
-LJ_FUNC void lj_gc_fullgc(lua_State *L);
-
-/* GC check: drive collector forward if the GC threshold has been reached. */
-#define lj_gc_check(L) \
- { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
- lj_gc_step(L); }
-#define lj_gc_check_fixtop(L) \
- { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
- lj_gc_step_fixtop(L); }
-
-/* Write barriers. */
-LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v);
-LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv);
-LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv);
-#if LJ_HASJIT
-LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno);
-#endif
-
-/* Move the GC propagation frontier back for tables (make it gray again). */
-static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
-{
- GCobj *o = obj2gco(t);
- lua_assert(isblack(o) && !isdead(g, o));
- lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
- black2gray(o);
- setgcrefr(t->gclist, g->gc.grayagain);
- setgcref(g->gc.grayagain, o);
-}
-
-/* Barrier for stores to table objects. TValue and GCobj variant. */
-#define lj_gc_anybarriert(L, t) \
- { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); }
-#define lj_gc_barriert(L, t, tv) \
- { if (tviswhite(tv) && isblack(obj2gco(t))) \
- lj_gc_barrierback(G(L), (t)); }
-#define lj_gc_objbarriert(L, t, o) \
- { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \
- lj_gc_barrierback(G(L), (t)); }
-
-/* Barrier for stores to any other object. TValue and GCobj variant. */
-#define lj_gc_barrier(L, p, tv) \
- { if (tviswhite(tv) && isblack(obj2gco(p))) \
- lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); }
-#define lj_gc_objbarrier(L, p, o) \
- { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \
- lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); }
-
-/* Allocator. */
-LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz);
-LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size);
-LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
- MSize *szp, MSize lim, MSize esz);
-
-#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s))
-
-static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize)
-{
- g->gc.total -= (MSize)osize;
- g->allocf(g->allocd, p, osize, 0);
-}
-
-#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (MSize)((n)*sizeof(t))))
-#define lj_mem_reallocvec(L, p, on, n, t) \
- ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (MSize)((n)*sizeof(t))))
-#define lj_mem_growvec(L, p, n, m, t) \
- ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t)))
-#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t))
-
-#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t)))
-#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s)))
-#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p)))
-
-#endif
+/*
+** Garbage collector.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_GC_H
+#define _LJ_GC_H
+
+#include "lj_obj.h"
+
+/* Garbage collector states. Order matters. */
+enum {
+ GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize
+};
+
+/* Bitmasks for marked field of GCobj. */
+#define LJ_GC_WHITE0 0x01
+#define LJ_GC_WHITE1 0x02
+#define LJ_GC_BLACK 0x04
+#define LJ_GC_FINALIZED 0x08
+#define LJ_GC_WEAKKEY 0x08
+#define LJ_GC_WEAKVAL 0x10
+#define LJ_GC_CDATA_FIN 0x10
+#define LJ_GC_FIXED 0x20
+#define LJ_GC_SFIXED 0x40
+
+#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1)
+#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK)
+#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL)
+
+/* Macros to test and set GCobj colors. */
+#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES)
+#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK)
+#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES)))
+#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x)))
+#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES)
+#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES)
+
+#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES)
+#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g))
+#define makewhite(g, x) \
+ ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g))
+#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES)
+#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK)
+#define fixstring(s) ((s)->marked |= LJ_GC_FIXED)
+#define markfinalized(x) ((x)->gch.marked |= LJ_GC_FINALIZED)
+
+/* Collector. */
+LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all);
+LJ_FUNC void lj_gc_finalize_udata(lua_State *L);
+#if LJ_HASFFI
+LJ_FUNC void lj_gc_finalize_cdata(lua_State *L);
+#else
+#define lj_gc_finalize_cdata(L) UNUSED(L)
+#endif
+LJ_FUNC void lj_gc_freeall(global_State *g);
+LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L);
+LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L);
+#if LJ_HASJIT
+LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps);
+#endif
+LJ_FUNC void lj_gc_fullgc(lua_State *L);
+
+/* GC check: drive collector forward if the GC threshold has been reached. */
+#define lj_gc_check(L) \
+ { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
+ lj_gc_step(L); }
+#define lj_gc_check_fixtop(L) \
+ { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
+ lj_gc_step_fixtop(L); }
+
+/* Write barriers. */
+LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v);
+LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv);
+LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv);
+#if LJ_HASJIT
+LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno);
+#endif
+
+/* Move the GC propagation frontier back for tables (make it gray again). */
+static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
+{
+ GCobj *o = obj2gco(t);
+ lua_assert(isblack(o) && !isdead(g, o));
+ lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
+ black2gray(o);
+ setgcrefr(t->gclist, g->gc.grayagain);
+ setgcref(g->gc.grayagain, o);
+}
+
+/* Barrier for stores to table objects. TValue and GCobj variant. */
+#define lj_gc_anybarriert(L, t) \
+ { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); }
+#define lj_gc_barriert(L, t, tv) \
+ { if (tviswhite(tv) && isblack(obj2gco(t))) \
+ lj_gc_barrierback(G(L), (t)); }
+#define lj_gc_objbarriert(L, t, o) \
+ { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \
+ lj_gc_barrierback(G(L), (t)); }
+
+/* Barrier for stores to any other object. TValue and GCobj variant. */
+#define lj_gc_barrier(L, p, tv) \
+ { if (tviswhite(tv) && isblack(obj2gco(p))) \
+ lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); }
+#define lj_gc_objbarrier(L, p, o) \
+ { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \
+ lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); }
+
+/* Allocator. */
+LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz);
+LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size);
+LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
+ MSize *szp, MSize lim, MSize esz);
+
+#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s))
+
+static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize)
+{
+ g->gc.total -= (MSize)osize;
+ g->allocf(g->allocd, p, osize, 0);
+}
+
+#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (MSize)((n)*sizeof(t))))
+#define lj_mem_reallocvec(L, p, on, n, t) \
+ ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (MSize)((n)*sizeof(t))))
+#define lj_mem_growvec(L, p, n, m, t) \
+ ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t)))
+#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t))
+
+#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t)))
+#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s)))
+#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p)))
+
+#endif
diff --git a/3rdparty/lua/src/lj_gdbjit.c b/3rdparty/lua/src/lj_gdbjit.c
index 6617220..284195a 100644
--- a/3rdparty/lua/src/lj_gdbjit.c
+++ b/3rdparty/lua/src/lj_gdbjit.c
@@ -1,795 +1,793 @@
-/*
-** Client for the GDB JIT API.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_gdbjit_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_frame.h"
-#include "lj_jit.h"
-#include "lj_dispatch.h"
-
-/* This is not compiled in by default.
-** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything.
-*/
-#ifdef LUAJIT_USE_GDBJIT
-
-/* The GDB JIT API allows JIT compilers to pass debug information about
-** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher
-** to see it in action.
-**
-** This is a passive API, so it works even when not running under GDB
-** or when attaching to an already running process. Alas, this implies
-** enabling it always has a non-negligible overhead -- do not use in
-** release mode!
-**
-** The LuaJIT GDB JIT client is rather minimal at the moment. It gives
-** each trace a symbol name and adds a source location and frame unwind
-** information. Obviously LuaJIT itself and any embedding C application
-** should be compiled with debug symbols, too (see the Makefile).
-**
-** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace
-** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc.
-** to set breakpoints on specific traces (even ahead of their creation).
-**
-** The source location for each trace allows listing the corresponding
-** source lines with the GDB command "list" (but only if the Lua source
-** has been loaded from a file). Currently this is always set to the
-** location where the trace has been started.
-**
-** Frame unwind information can be inspected with the GDB command
-** "info frame". This also allows proper backtraces across JIT-compiled
-** code with the GDB command "bt".
-**
-** You probably want to add the following settings to a .gdbinit file
-** (or add them to ~/.gdbinit):
-** set disassembly-flavor intel
-** set breakpoint pending on
-**
-** Here's a sample GDB session:
-** ------------------------------------------------------------------------
-
-$ cat >x.lua
-for outer=1,100 do
- for inner=1,100 do end
-end
-^D
-
-$ luajit -jv x.lua
-[TRACE 1 x.lua:2]
-[TRACE 2 (1/3) x.lua:1 -> 1]
-
-$ gdb --quiet --args luajit x.lua
-(gdb) tbreak TRACE_1
-Function "TRACE_1" not defined.
-Temporary breakpoint 1 (TRACE_1) pending.
-(gdb) run
-Starting program: luajit x.lua
-
-Temporary breakpoint 1, TRACE_1 () at x.lua:2
-2 for inner=1,100 do end
-(gdb) list
-1 for outer=1,100 do
-2 for inner=1,100 do end
-3 end
-(gdb) bt
-#0 TRACE_1 () at x.lua:2
-#1 0x08053690 in lua_pcall [...]
-[...]
-#7 0x0806ff90 in main [...]
-(gdb) disass TRACE_1
-Dump of assembler code for function TRACE_1:
-0xf7fd9fba <TRACE_1+0>: mov DWORD PTR ds:0xf7e0e2a0,0x1
-0xf7fd9fc4 <TRACE_1+10>: movsd xmm7,QWORD PTR [edx+0x20]
-[...]
-0xf7fd9ff8 <TRACE_1+62>: jmp 0xf7fd2014
-End of assembler dump.
-(gdb) tbreak TRACE_2
-Function "TRACE_2" not defined.
-Temporary breakpoint 2 (TRACE_2) pending.
-(gdb) cont
-Continuing.
-
-Temporary breakpoint 2, TRACE_2 () at x.lua:1
-1 for outer=1,100 do
-(gdb) info frame
-Stack level 0, frame at 0xffffd7c0:
- eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690
- called by frame at 0xffffd7e0
- source language unknown.
- Arglist at 0xffffd78c, args:
- Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0
- Saved registers:
- ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4,
- eip at 0xffffd7bc
-(gdb)
-
-** ------------------------------------------------------------------------
-*/
-
-/* -- GDB JIT API --------------------------------------------------------- */
-
-/* GDB JIT actions. */
-enum {
- GDBJIT_NOACTION = 0,
- GDBJIT_REGISTER,
- GDBJIT_UNREGISTER
-};
-
-/* GDB JIT entry. */
-typedef struct GDBJITentry {
- struct GDBJITentry *next_entry;
- struct GDBJITentry *prev_entry;
- const char *symfile_addr;
- uint64_t symfile_size;
-} GDBJITentry;
-
-/* GDB JIT descriptor. */
-typedef struct GDBJITdesc {
- uint32_t version;
- uint32_t action_flag;
- GDBJITentry *relevant_entry;
- GDBJITentry *first_entry;
-} GDBJITdesc;
-
-GDBJITdesc __jit_debug_descriptor = {
- 1, GDBJIT_NOACTION, NULL, NULL
-};
-
-/* GDB sets a breakpoint at this function. */
-void LJ_NOINLINE __jit_debug_register_code()
-{
- __asm__ __volatile__("");
-};
-
-/* -- In-memory ELF object definitions ------------------------------------ */
-
-/* ELF definitions. */
-typedef struct ELFheader {
- uint8_t emagic[4];
- uint8_t eclass;
- uint8_t eendian;
- uint8_t eversion;
- uint8_t eosabi;
- uint8_t eabiversion;
- uint8_t epad[7];
- uint16_t type;
- uint16_t machine;
- uint32_t version;
- uintptr_t entry;
- uintptr_t phofs;
- uintptr_t shofs;
- uint32_t flags;
- uint16_t ehsize;
- uint16_t phentsize;
- uint16_t phnum;
- uint16_t shentsize;
- uint16_t shnum;
- uint16_t shstridx;
-} ELFheader;
-
-typedef struct ELFsectheader {
- uint32_t name;
- uint32_t type;
- uintptr_t flags;
- uintptr_t addr;
- uintptr_t ofs;
- uintptr_t size;
- uint32_t link;
- uint32_t info;
- uintptr_t align;
- uintptr_t entsize;
-} ELFsectheader;
-
-#define ELFSECT_IDX_ABS 0xfff1
-
-enum {
- ELFSECT_TYPE_PROGBITS = 1,
- ELFSECT_TYPE_SYMTAB = 2,
- ELFSECT_TYPE_STRTAB = 3,
- ELFSECT_TYPE_NOBITS = 8
-};
-
-#define ELFSECT_FLAGS_WRITE 1
-#define ELFSECT_FLAGS_ALLOC 2
-#define ELFSECT_FLAGS_EXEC 4
-
-typedef struct ELFsymbol {
-#if LJ_64
- uint32_t name;
- uint8_t info;
- uint8_t other;
- uint16_t sectidx;
- uintptr_t value;
- uint64_t size;
-#else
- uint32_t name;
- uintptr_t value;
- uint32_t size;
- uint8_t info;
- uint8_t other;
- uint16_t sectidx;
-#endif
-} ELFsymbol;
-
-enum {
- ELFSYM_TYPE_FUNC = 2,
- ELFSYM_TYPE_FILE = 4,
- ELFSYM_BIND_LOCAL = 0 << 4,
- ELFSYM_BIND_GLOBAL = 1 << 4,
-};
-
-/* DWARF definitions. */
-#define DW_CIE_VERSION 1
-
-enum {
- DW_CFA_nop = 0x0,
- DW_CFA_offset_extended = 0x5,
- DW_CFA_def_cfa = 0xc,
- DW_CFA_def_cfa_offset = 0xe,
- DW_CFA_offset_extended_sf = 0x11,
- DW_CFA_advance_loc = 0x40,
- DW_CFA_offset = 0x80
-};
-
-enum {
- DW_EH_PE_udata4 = 3,
- DW_EH_PE_textrel = 0x20
-};
-
-enum {
- DW_TAG_compile_unit = 0x11
-};
-
-enum {
- DW_children_no = 0,
- DW_children_yes = 1
-};
-
-enum {
- DW_AT_name = 0x03,
- DW_AT_stmt_list = 0x10,
- DW_AT_low_pc = 0x11,
- DW_AT_high_pc = 0x12
-};
-
-enum {
- DW_FORM_addr = 0x01,
- DW_FORM_data4 = 0x06,
- DW_FORM_string = 0x08
-};
-
-enum {
- DW_LNS_extended_op = 0,
- DW_LNS_copy = 1,
- DW_LNS_advance_pc = 2,
- DW_LNS_advance_line = 3
-};
-
-enum {
- DW_LNE_end_sequence = 1,
- DW_LNE_set_address = 2
-};
-
-enum {
-#if LJ_TARGET_X86
- DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX,
- DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI,
- DW_REG_RA,
-#elif LJ_TARGET_X64
- /* Yes, the order is strange, but correct. */
- DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX,
- DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP,
- DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11,
- DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15,
- DW_REG_RA,
-#elif LJ_TARGET_ARM
- DW_REG_SP = 13,
- DW_REG_RA = 14,
-#elif LJ_TARGET_PPC
- DW_REG_SP = 1,
- DW_REG_RA = 65,
- DW_REG_CR = 70,
-#elif LJ_TARGET_MIPS
- DW_REG_SP = 29,
- DW_REG_RA = 31,
-#else
-#error "Unsupported target architecture"
-#endif
-};
-
-/* Minimal list of sections for the in-memory ELF object. */
-enum {
- GDBJIT_SECT_NULL,
- GDBJIT_SECT_text,
- GDBJIT_SECT_eh_frame,
- GDBJIT_SECT_shstrtab,
- GDBJIT_SECT_strtab,
- GDBJIT_SECT_symtab,
- GDBJIT_SECT_debug_info,
- GDBJIT_SECT_debug_abbrev,
- GDBJIT_SECT_debug_line,
- GDBJIT_SECT__MAX
-};
-
-enum {
- GDBJIT_SYM_UNDEF,
- GDBJIT_SYM_FILE,
- GDBJIT_SYM_FUNC,
- GDBJIT_SYM__MAX
-};
-
-/* In-memory ELF object. */
-typedef struct GDBJITobj {
- ELFheader hdr; /* ELF header. */
- ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */
- ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */
- uint8_t space[4096]; /* Space for various section data. */
-} GDBJITobj;
-
-/* Combined structure for GDB JIT entry and ELF object. */
-typedef struct GDBJITentryobj {
- GDBJITentry entry;
- size_t sz;
- GDBJITobj obj;
-} GDBJITentryobj;
-
-/* Template for in-memory ELF header. */
-static const ELFheader elfhdr_template = {
- .emagic = { 0x7f, 'E', 'L', 'F' },
- .eclass = LJ_64 ? 2 : 1,
- .eendian = LJ_ENDIAN_SELECT(1, 2),
- .eversion = 1,
-#if LJ_TARGET_LINUX
- .eosabi = 0, /* Nope, it's not 3. */
-#elif defined(__FreeBSD__)
- .eosabi = 9,
-#elif defined(__NetBSD__)
- .eosabi = 2,
-#elif defined(__OpenBSD__)
- .eosabi = 12,
-#elif defined(__DragonFly__)
- .eosabi = 0,
-#elif (defined(__sun__) && defined(__svr4__))
- .eosabi = 6,
-#else
- .eosabi = 0,
-#endif
- .eabiversion = 0,
- .epad = { 0, 0, 0, 0, 0, 0, 0 },
- .type = 1,
-#if LJ_TARGET_X86
- .machine = 3,
-#elif LJ_TARGET_X64
- .machine = 62,
-#elif LJ_TARGET_ARM
- .machine = 40,
-#elif LJ_TARGET_PPC
- .machine = 20,
-#elif LJ_TARGET_MIPS
- .machine = 8,
-#else
-#error "Unsupported target architecture"
-#endif
- .version = 1,
- .entry = 0,
- .phofs = 0,
- .shofs = offsetof(GDBJITobj, sect),
- .flags = 0,
- .ehsize = sizeof(ELFheader),
- .phentsize = 0,
- .phnum = 0,
- .shentsize = sizeof(ELFsectheader),
- .shnum = GDBJIT_SECT__MAX,
- .shstridx = GDBJIT_SECT_shstrtab
-};
-
-/* -- In-memory ELF object generation ------------------------------------- */
-
-/* Context for generating the ELF object for the GDB JIT API. */
-typedef struct GDBJITctx {
- uint8_t *p; /* Pointer to next address in obj.space. */
- uint8_t *startp; /* Pointer to start address in obj.space. */
- GCtrace *T; /* Generate symbols for this trace. */
- uintptr_t mcaddr; /* Machine code address. */
- MSize szmcode; /* Size of machine code. */
- MSize spadjp; /* Stack adjustment for parent trace or interpreter. */
- MSize spadj; /* Stack adjustment for trace itself. */
- BCLine lineno; /* Starting line number. */
- const char *filename; /* Starting file name. */
- size_t objsize; /* Final size of ELF object. */
- GDBJITobj obj; /* In-memory ELF object. */
-} GDBJITctx;
-
-/* Add a zero-terminated string. */
-static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str)
-{
- uint8_t *p = ctx->p;
- uint32_t ofs = (uint32_t)(p - ctx->startp);
- do {
- *p++ = (uint8_t)*str;
- } while (*str++);
- ctx->p = p;
- return ofs;
-}
-
-/* Append a decimal number. */
-static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n)
-{
- if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); }
- *ctx->p++ = '0' + n;
-}
-
-/* Add a ULEB128 value. */
-static void gdbjit_uleb128(GDBJITctx *ctx, uint32_t v)
-{
- uint8_t *p = ctx->p;
- for (; v >= 0x80; v >>= 7)
- *p++ = (uint8_t)((v & 0x7f) | 0x80);
- *p++ = (uint8_t)v;
- ctx->p = p;
-}
-
-/* Add a SLEB128 value. */
-static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v)
-{
- uint8_t *p = ctx->p;
- for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7)
- *p++ = (uint8_t)((v & 0x7f) | 0x80);
- *p++ = (uint8_t)(v & 0x7f);
- ctx->p = p;
-}
-
-/* Shortcuts to generate DWARF structures. */
-#define DB(x) (*p++ = (x))
-#define DI8(x) (*(int8_t *)p = (x), p++)
-#define DU16(x) (*(uint16_t *)p = (x), p += 2)
-#define DU32(x) (*(uint32_t *)p = (x), p += 4)
-#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t))
-#define DUV(x) (ctx->p = p, gdbjit_uleb128(ctx, (x)), p = ctx->p)
-#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p)
-#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p)
-#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop
-#define DSECT(name, stmt) \
- { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \
- *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \
-
-/* Initialize ELF section headers. */
-static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx)
-{
- ELFsectheader *sect;
-
- *ctx->p++ = '\0'; /* Empty string at start of string table. */
-
-#define SECTDEF(id, tp, al) \
- sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \
- sect->name = gdbjit_strz(ctx, "." #id); \
- sect->type = ELFSECT_TYPE_##tp; \
- sect->align = (al)
-
- SECTDEF(text, NOBITS, 16);
- sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC;
- sect->addr = ctx->mcaddr;
- sect->ofs = 0;
- sect->size = ctx->szmcode;
-
- SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t));
- sect->flags = ELFSECT_FLAGS_ALLOC;
-
- SECTDEF(shstrtab, STRTAB, 1);
- SECTDEF(strtab, STRTAB, 1);
-
- SECTDEF(symtab, SYMTAB, sizeof(uintptr_t));
- sect->ofs = offsetof(GDBJITobj, sym);
- sect->size = sizeof(ctx->obj.sym);
- sect->link = GDBJIT_SECT_strtab;
- sect->entsize = sizeof(ELFsymbol);
- sect->info = GDBJIT_SYM_FUNC;
-
- SECTDEF(debug_info, PROGBITS, 1);
- SECTDEF(debug_abbrev, PROGBITS, 1);
- SECTDEF(debug_line, PROGBITS, 1);
-
-#undef SECTDEF
-}
-
-/* Initialize symbol table. */
-static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx)
-{
- ELFsymbol *sym;
-
- *ctx->p++ = '\0'; /* Empty string at start of string table. */
-
- sym = &ctx->obj.sym[GDBJIT_SYM_FILE];
- sym->name = gdbjit_strz(ctx, "JIT mcode");
- sym->sectidx = ELFSECT_IDX_ABS;
- sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL;
-
- sym = &ctx->obj.sym[GDBJIT_SYM_FUNC];
- sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--;
- gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0';
- sym->sectidx = GDBJIT_SECT_text;
- sym->value = 0;
- sym->size = ctx->szmcode;
- sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL;
-}
-
-/* Initialize .eh_frame section. */
-static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx)
-{
- uint8_t *p = ctx->p;
- uint8_t *framep = p;
-
- /* Emit DWARF EH CIE. */
- DSECT(CIE,
- DU32(0); /* Offset to CIE itself. */
- DB(DW_CIE_VERSION);
- DSTR("zR"); /* Augmentation. */
- DUV(1); /* Code alignment factor. */
- DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */
- DB(DW_REG_RA); /* Return address register. */
- DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */
- DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t));
-#if LJ_TARGET_PPC
- DB(DW_CFA_offset_extended_sf); DB(DW_REG_RA); DSV(-1);
-#else
- DB(DW_CFA_offset|DW_REG_RA); DUV(1);
-#endif
- DALIGNNOP(sizeof(uintptr_t));
- )
-
- /* Emit DWARF EH FDE. */
- DSECT(FDE,
- DU32((uint32_t)(p-framep)); /* Offset to CIE. */
- DU32(0); /* Machine code offset relative to .text. */
- DU32(ctx->szmcode); /* Machine code length. */
- DB(0); /* Augmentation data. */
- /* Registers saved in CFRAME. */
-#if LJ_TARGET_X86
- DB(DW_CFA_offset|DW_REG_BP); DUV(2);
- DB(DW_CFA_offset|DW_REG_DI); DUV(3);
- DB(DW_CFA_offset|DW_REG_SI); DUV(4);
- DB(DW_CFA_offset|DW_REG_BX); DUV(5);
-#elif LJ_TARGET_X64
- DB(DW_CFA_offset|DW_REG_BP); DUV(2);
- DB(DW_CFA_offset|DW_REG_BX); DUV(3);
- DB(DW_CFA_offset|DW_REG_15); DUV(4);
- DB(DW_CFA_offset|DW_REG_14); DUV(5);
- /* Extra registers saved for JIT-compiled code. */
- DB(DW_CFA_offset|DW_REG_13); DUV(9);
- DB(DW_CFA_offset|DW_REG_12); DUV(10);
-#elif LJ_TARGET_ARM
- {
- int i;
- for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); }
- }
-#elif LJ_TARGET_PPC
- {
- int i;
- DB(DW_CFA_offset_extended); DB(DW_REG_CR); DUV(55);
- for (i = 14; i <= 31; i++) {
- DB(DW_CFA_offset|i); DUV(37+(31-i));
- DB(DW_CFA_offset|32|i); DUV(2+2*(31-i));
- }
- }
-#elif LJ_TARGET_MIPS
- {
- int i;
- DB(DW_CFA_offset|30); DUV(2);
- for (i = 23; i >= 16; i--) { DB(DW_CFA_offset|i); DUV(26-i); }
- for (i = 30; i >= 20; i -= 2) { DB(DW_CFA_offset|32|i); DUV(42-i); }
- }
-#else
-#error "Unsupported target architecture"
-#endif
- if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */
- DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp);
- DB(DW_CFA_advance_loc|1); /* Only an approximation. */
- }
- DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */
- DALIGNNOP(sizeof(uintptr_t));
- )
-
- ctx->p = p;
-}
-
-/* Initialize .debug_info section. */
-static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx)
-{
- uint8_t *p = ctx->p;
-
- DSECT(info,
- DU16(2); /* DWARF version. */
- DU32(0); /* Abbrev offset. */
- DB(sizeof(uintptr_t)); /* Pointer size. */
-
- DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */
- DSTR(ctx->filename); /* DW_AT_name. */
- DADDR(ctx->mcaddr); /* DW_AT_low_pc. */
- DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */
- DU32(0); /* DW_AT_stmt_list. */
- )
-
- ctx->p = p;
-}
-
-/* Initialize .debug_abbrev section. */
-static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx)
-{
- uint8_t *p = ctx->p;
-
- /* Abbrev #1: DW_TAG_compile_unit. */
- DUV(1); DUV(DW_TAG_compile_unit);
- DB(DW_children_no);
- DUV(DW_AT_name); DUV(DW_FORM_string);
- DUV(DW_AT_low_pc); DUV(DW_FORM_addr);
- DUV(DW_AT_high_pc); DUV(DW_FORM_addr);
- DUV(DW_AT_stmt_list); DUV(DW_FORM_data4);
- DB(0); DB(0);
-
- ctx->p = p;
-}
-
-#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op)))
-
-/* Initialize .debug_line section. */
-static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx)
-{
- uint8_t *p = ctx->p;
-
- DSECT(line,
- DU16(2); /* DWARF version. */
- DSECT(header,
- DB(1); /* Minimum instruction length. */
- DB(1); /* is_stmt. */
- DI8(0); /* Line base for special opcodes. */
- DB(2); /* Line range for special opcodes. */
- DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */
- DB(0); DB(1); DB(1); /* Standard opcode lengths. */
- /* Directory table. */
- DB(0);
- /* File name table. */
- DSTR(ctx->filename); DUV(0); DUV(0); DUV(0);
- DB(0);
- )
-
- DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr);
- if (ctx->lineno) {
- DB(DW_LNS_advance_line); DSV(ctx->lineno-1);
- }
- DB(DW_LNS_copy);
- DB(DW_LNS_advance_pc); DUV(ctx->szmcode);
- DLNE(DW_LNE_end_sequence, 0);
- )
-
- ctx->p = p;
-}
-
-#undef DLNE
-
-/* Undef shortcuts. */
-#undef DB
-#undef DI8
-#undef DU16
-#undef DU32
-#undef DADDR
-#undef DUV
-#undef DSV
-#undef DSTR
-#undef DALIGNNOP
-#undef DSECT
-
-/* Type of a section initializer callback. */
-typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx);
-
-/* Call section initializer and set the section offset and size. */
-static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf)
-{
- ctx->startp = ctx->p;
- ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj);
- initf(ctx);
- ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp);
-}
-
-#define SECTALIGN(p, a) \
- ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1)))
-
-/* Build in-memory ELF object. */
-static void gdbjit_buildobj(GDBJITctx *ctx)
-{
- GDBJITobj *obj = &ctx->obj;
- /* Fill in ELF header and clear structures. */
- memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader));
- memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX);
- memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX);
- /* Initialize sections. */
- ctx->p = obj->space;
- gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr);
- gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab);
- gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo);
- gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev);
- gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline);
- SECTALIGN(ctx->p, sizeof(uintptr_t));
- gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe);
- ctx->objsize = (size_t)((char *)ctx->p - (char *)obj);
- lua_assert(ctx->objsize < sizeof(GDBJITobj));
-}
-
-#undef SECTALIGN
-
-/* -- Interface to GDB JIT API -------------------------------------------- */
-
-/* Add new entry to GDB JIT symbol chain. */
-static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
-{
- /* Allocate memory for GDB JIT entry and ELF object. */
- MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize);
- GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj);
- memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */
- eo->sz = sz;
- ctx->T->gdbjit_entry = (void *)eo;
- /* Link new entry to chain and register it. */
- eo->entry.prev_entry = NULL;
- eo->entry.next_entry = __jit_debug_descriptor.first_entry;
- if (eo->entry.next_entry)
- eo->entry.next_entry->prev_entry = &eo->entry;
- eo->entry.symfile_addr = (const char *)&eo->obj;
- eo->entry.symfile_size = ctx->objsize;
- __jit_debug_descriptor.first_entry = &eo->entry;
- __jit_debug_descriptor.relevant_entry = &eo->entry;
- __jit_debug_descriptor.action_flag = GDBJIT_REGISTER;
- __jit_debug_register_code();
-}
-
-/* Add debug info for newly compiled trace and notify GDB. */
-void lj_gdbjit_addtrace(jit_State *J, GCtrace *T)
-{
- GDBJITctx ctx;
- GCproto *pt = &gcref(T->startpt)->pt;
- TraceNo parent = T->ir[REF_BASE].op1;
- const BCIns *startpc = mref(T->startpc, const BCIns);
- ctx.T = T;
- ctx.mcaddr = (uintptr_t)T->mcode;
- ctx.szmcode = T->szmcode;
- ctx.spadjp = CFRAME_SIZE_JIT +
- (MSize)(parent ? traceref(J, parent)->spadjust : 0);
- ctx.spadj = CFRAME_SIZE_JIT + T->spadjust;
- lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc);
- ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
- ctx.filename = proto_chunknamestr(pt);
- if (*ctx.filename == '@' || *ctx.filename == '=')
- ctx.filename++;
- else
- ctx.filename = "(string)";
- gdbjit_buildobj(&ctx);
- gdbjit_newentry(J->L, &ctx);
-}
-
-/* Delete debug info for trace and notify GDB. */
-void lj_gdbjit_deltrace(jit_State *J, GCtrace *T)
-{
- GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry;
- if (eo) {
- if (eo->entry.prev_entry)
- eo->entry.prev_entry->next_entry = eo->entry.next_entry;
- else
- __jit_debug_descriptor.first_entry = eo->entry.next_entry;
- if (eo->entry.next_entry)
- eo->entry.next_entry->prev_entry = eo->entry.prev_entry;
- __jit_debug_descriptor.relevant_entry = &eo->entry;
- __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER;
- __jit_debug_register_code();
- lj_mem_free(J2G(J), eo, eo->sz);
- }
-}
-
-#endif
-#endif
+/*
+** Client for the GDB JIT API.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_gdbjit_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_frame.h"
+#include "lj_jit.h"
+#include "lj_dispatch.h"
+
+/* This is not compiled in by default.
+** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything.
+*/
+#ifdef LUAJIT_USE_GDBJIT
+
+/* The GDB JIT API allows JIT compilers to pass debug information about
+** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher
+** to see it in action.
+**
+** This is a passive API, so it works even when not running under GDB
+** or when attaching to an already running process. Alas, this implies
+** enabling it always has a non-negligible overhead -- do not use in
+** release mode!
+**
+** The LuaJIT GDB JIT client is rather minimal at the moment. It gives
+** each trace a symbol name and adds a source location and frame unwind
+** information. Obviously LuaJIT itself and any embedding C application
+** should be compiled with debug symbols, too (see the Makefile).
+**
+** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace
+** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc.
+** to set breakpoints on specific traces (even ahead of their creation).
+**
+** The source location for each trace allows listing the corresponding
+** source lines with the GDB command "list" (but only if the Lua source
+** has been loaded from a file). Currently this is always set to the
+** location where the trace has been started.
+**
+** Frame unwind information can be inspected with the GDB command
+** "info frame". This also allows proper backtraces across JIT-compiled
+** code with the GDB command "bt".
+**
+** You probably want to add the following settings to a .gdbinit file
+** (or add them to ~/.gdbinit):
+** set disassembly-flavor intel
+** set breakpoint pending on
+**
+** Here's a sample GDB session:
+** ------------------------------------------------------------------------
+
+$ cat >x.lua
+for outer=1,100 do
+ for inner=1,100 do end
+end
+^D
+
+$ luajit -jv x.lua
+[TRACE 1 x.lua:2]
+[TRACE 2 (1/3) x.lua:1 -> 1]
+
+$ gdb --quiet --args luajit x.lua
+(gdb) tbreak TRACE_1
+Function "TRACE_1" not defined.
+Temporary breakpoint 1 (TRACE_1) pending.
+(gdb) run
+Starting program: luajit x.lua
+
+Temporary breakpoint 1, TRACE_1 () at x.lua:2
+2 for inner=1,100 do end
+(gdb) list
+1 for outer=1,100 do
+2 for inner=1,100 do end
+3 end
+(gdb) bt
+#0 TRACE_1 () at x.lua:2
+#1 0x08053690 in lua_pcall [...]
+[...]
+#7 0x0806ff90 in main [...]
+(gdb) disass TRACE_1
+Dump of assembler code for function TRACE_1:
+0xf7fd9fba <TRACE_1+0>: mov DWORD PTR ds:0xf7e0e2a0,0x1
+0xf7fd9fc4 <TRACE_1+10>: movsd xmm7,QWORD PTR [edx+0x20]
+[...]
+0xf7fd9ff8 <TRACE_1+62>: jmp 0xf7fd2014
+End of assembler dump.
+(gdb) tbreak TRACE_2
+Function "TRACE_2" not defined.
+Temporary breakpoint 2 (TRACE_2) pending.
+(gdb) cont
+Continuing.
+
+Temporary breakpoint 2, TRACE_2 () at x.lua:1
+1 for outer=1,100 do
+(gdb) info frame
+Stack level 0, frame at 0xffffd7c0:
+ eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690
+ called by frame at 0xffffd7e0
+ source language unknown.
+ Arglist at 0xffffd78c, args:
+ Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0
+ Saved registers:
+ ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4,
+ eip at 0xffffd7bc
+(gdb)
+
+** ------------------------------------------------------------------------
+*/
+
+/* -- GDB JIT API --------------------------------------------------------- */
+
+/* GDB JIT actions. */
+enum {
+ GDBJIT_NOACTION = 0,
+ GDBJIT_REGISTER,
+ GDBJIT_UNREGISTER
+};
+
+/* GDB JIT entry. */
+typedef struct GDBJITentry {
+ struct GDBJITentry *next_entry;
+ struct GDBJITentry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+} GDBJITentry;
+
+/* GDB JIT descriptor. */
+typedef struct GDBJITdesc {
+ uint32_t version;
+ uint32_t action_flag;
+ GDBJITentry *relevant_entry;
+ GDBJITentry *first_entry;
+} GDBJITdesc;
+
+GDBJITdesc __jit_debug_descriptor = {
+ 1, GDBJIT_NOACTION, NULL, NULL
+};
+
+/* GDB sets a breakpoint at this function. */
+void LJ_NOINLINE __jit_debug_register_code()
+{
+ __asm__ __volatile__("");
+};
+
+/* -- In-memory ELF object definitions ------------------------------------ */
+
+/* ELF definitions. */
+typedef struct ELFheader {
+ uint8_t emagic[4];
+ uint8_t eclass;
+ uint8_t eendian;
+ uint8_t eversion;
+ uint8_t eosabi;
+ uint8_t eabiversion;
+ uint8_t epad[7];
+ uint16_t type;
+ uint16_t machine;
+ uint32_t version;
+ uintptr_t entry;
+ uintptr_t phofs;
+ uintptr_t shofs;
+ uint32_t flags;
+ uint16_t ehsize;
+ uint16_t phentsize;
+ uint16_t phnum;
+ uint16_t shentsize;
+ uint16_t shnum;
+ uint16_t shstridx;
+} ELFheader;
+
+typedef struct ELFsectheader {
+ uint32_t name;
+ uint32_t type;
+ uintptr_t flags;
+ uintptr_t addr;
+ uintptr_t ofs;
+ uintptr_t size;
+ uint32_t link;
+ uint32_t info;
+ uintptr_t align;
+ uintptr_t entsize;
+} ELFsectheader;
+
+#define ELFSECT_IDX_ABS 0xfff1
+
+enum {
+ ELFSECT_TYPE_PROGBITS = 1,
+ ELFSECT_TYPE_SYMTAB = 2,
+ ELFSECT_TYPE_STRTAB = 3,
+ ELFSECT_TYPE_NOBITS = 8
+};
+
+#define ELFSECT_FLAGS_WRITE 1
+#define ELFSECT_FLAGS_ALLOC 2
+#define ELFSECT_FLAGS_EXEC 4
+
+typedef struct ELFsymbol {
+#if LJ_64
+ uint32_t name;
+ uint8_t info;
+ uint8_t other;
+ uint16_t sectidx;
+ uintptr_t value;
+ uint64_t size;
+#else
+ uint32_t name;
+ uintptr_t value;
+ uint32_t size;
+ uint8_t info;
+ uint8_t other;
+ uint16_t sectidx;
+#endif
+} ELFsymbol;
+
+enum {
+ ELFSYM_TYPE_FUNC = 2,
+ ELFSYM_TYPE_FILE = 4,
+ ELFSYM_BIND_LOCAL = 0 << 4,
+ ELFSYM_BIND_GLOBAL = 1 << 4,
+};
+
+/* DWARF definitions. */
+#define DW_CIE_VERSION 1
+
+enum {
+ DW_CFA_nop = 0x0,
+ DW_CFA_offset_extended = 0x5,
+ DW_CFA_def_cfa = 0xc,
+ DW_CFA_def_cfa_offset = 0xe,
+ DW_CFA_offset_extended_sf = 0x11,
+ DW_CFA_advance_loc = 0x40,
+ DW_CFA_offset = 0x80
+};
+
+enum {
+ DW_EH_PE_udata4 = 3,
+ DW_EH_PE_textrel = 0x20
+};
+
+enum {
+ DW_TAG_compile_unit = 0x11
+};
+
+enum {
+ DW_children_no = 0,
+ DW_children_yes = 1
+};
+
+enum {
+ DW_AT_name = 0x03,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12
+};
+
+enum {
+ DW_FORM_addr = 0x01,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_string = 0x08
+};
+
+enum {
+ DW_LNS_extended_op = 0,
+ DW_LNS_copy = 1,
+ DW_LNS_advance_pc = 2,
+ DW_LNS_advance_line = 3
+};
+
+enum {
+ DW_LNE_end_sequence = 1,
+ DW_LNE_set_address = 2
+};
+
+enum {
+#if LJ_TARGET_X86
+ DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX,
+ DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI,
+ DW_REG_RA,
+#elif LJ_TARGET_X64
+ /* Yes, the order is strange, but correct. */
+ DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX,
+ DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP,
+ DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11,
+ DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15,
+ DW_REG_RA,
+#elif LJ_TARGET_ARM
+ DW_REG_SP = 13,
+ DW_REG_RA = 14,
+#elif LJ_TARGET_PPC
+ DW_REG_SP = 1,
+ DW_REG_RA = 65,
+ DW_REG_CR = 70,
+#elif LJ_TARGET_MIPS
+ DW_REG_SP = 29,
+ DW_REG_RA = 31,
+#else
+#error "Unsupported target architecture"
+#endif
+};
+
+/* Minimal list of sections for the in-memory ELF object. */
+enum {
+ GDBJIT_SECT_NULL,
+ GDBJIT_SECT_text,
+ GDBJIT_SECT_eh_frame,
+ GDBJIT_SECT_shstrtab,
+ GDBJIT_SECT_strtab,
+ GDBJIT_SECT_symtab,
+ GDBJIT_SECT_debug_info,
+ GDBJIT_SECT_debug_abbrev,
+ GDBJIT_SECT_debug_line,
+ GDBJIT_SECT__MAX
+};
+
+enum {
+ GDBJIT_SYM_UNDEF,
+ GDBJIT_SYM_FILE,
+ GDBJIT_SYM_FUNC,
+ GDBJIT_SYM__MAX
+};
+
+/* In-memory ELF object. */
+typedef struct GDBJITobj {
+ ELFheader hdr; /* ELF header. */
+ ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */
+ ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */
+ uint8_t space[4096]; /* Space for various section data. */
+} GDBJITobj;
+
+/* Combined structure for GDB JIT entry and ELF object. */
+typedef struct GDBJITentryobj {
+ GDBJITentry entry;
+ size_t sz;
+ GDBJITobj obj;
+} GDBJITentryobj;
+
+/* Template for in-memory ELF header. */
+static const ELFheader elfhdr_template = {
+ .emagic = { 0x7f, 'E', 'L', 'F' },
+ .eclass = LJ_64 ? 2 : 1,
+ .eendian = LJ_ENDIAN_SELECT(1, 2),
+ .eversion = 1,
+#if LJ_TARGET_LINUX
+ .eosabi = 0, /* Nope, it's not 3. */
+#elif defined(__FreeBSD__)
+ .eosabi = 9,
+#elif defined(__NetBSD__)
+ .eosabi = 2,
+#elif defined(__OpenBSD__)
+ .eosabi = 12,
+#elif (defined(__sun__) && defined(__svr4__))
+ .eosabi = 6,
+#else
+ .eosabi = 0,
+#endif
+ .eabiversion = 0,
+ .epad = { 0, 0, 0, 0, 0, 0, 0 },
+ .type = 1,
+#if LJ_TARGET_X86
+ .machine = 3,
+#elif LJ_TARGET_X64
+ .machine = 62,
+#elif LJ_TARGET_ARM
+ .machine = 40,
+#elif LJ_TARGET_PPC
+ .machine = 20,
+#elif LJ_TARGET_MIPS
+ .machine = 8,
+#else
+#error "Unsupported target architecture"
+#endif
+ .version = 1,
+ .entry = 0,
+ .phofs = 0,
+ .shofs = offsetof(GDBJITobj, sect),
+ .flags = 0,
+ .ehsize = sizeof(ELFheader),
+ .phentsize = 0,
+ .phnum = 0,
+ .shentsize = sizeof(ELFsectheader),
+ .shnum = GDBJIT_SECT__MAX,
+ .shstridx = GDBJIT_SECT_shstrtab
+};
+
+/* -- In-memory ELF object generation ------------------------------------- */
+
+/* Context for generating the ELF object for the GDB JIT API. */
+typedef struct GDBJITctx {
+ uint8_t *p; /* Pointer to next address in obj.space. */
+ uint8_t *startp; /* Pointer to start address in obj.space. */
+ GCtrace *T; /* Generate symbols for this trace. */
+ uintptr_t mcaddr; /* Machine code address. */
+ MSize szmcode; /* Size of machine code. */
+ MSize spadjp; /* Stack adjustment for parent trace or interpreter. */
+ MSize spadj; /* Stack adjustment for trace itself. */
+ BCLine lineno; /* Starting line number. */
+ const char *filename; /* Starting file name. */
+ size_t objsize; /* Final size of ELF object. */
+ GDBJITobj obj; /* In-memory ELF object. */
+} GDBJITctx;
+
+/* Add a zero-terminated string. */
+static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str)
+{
+ uint8_t *p = ctx->p;
+ uint32_t ofs = (uint32_t)(p - ctx->startp);
+ do {
+ *p++ = (uint8_t)*str;
+ } while (*str++);
+ ctx->p = p;
+ return ofs;
+}
+
+/* Append a decimal number. */
+static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n)
+{
+ if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); }
+ *ctx->p++ = '0' + n;
+}
+
+/* Add a ULEB128 value. */
+static void gdbjit_uleb128(GDBJITctx *ctx, uint32_t v)
+{
+ uint8_t *p = ctx->p;
+ for (; v >= 0x80; v >>= 7)
+ *p++ = (uint8_t)((v & 0x7f) | 0x80);
+ *p++ = (uint8_t)v;
+ ctx->p = p;
+}
+
+/* Add a SLEB128 value. */
+static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v)
+{
+ uint8_t *p = ctx->p;
+ for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7)
+ *p++ = (uint8_t)((v & 0x7f) | 0x80);
+ *p++ = (uint8_t)(v & 0x7f);
+ ctx->p = p;
+}
+
+/* Shortcuts to generate DWARF structures. */
+#define DB(x) (*p++ = (x))
+#define DI8(x) (*(int8_t *)p = (x), p++)
+#define DU16(x) (*(uint16_t *)p = (x), p += 2)
+#define DU32(x) (*(uint32_t *)p = (x), p += 4)
+#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t))
+#define DUV(x) (ctx->p = p, gdbjit_uleb128(ctx, (x)), p = ctx->p)
+#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p)
+#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p)
+#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop
+#define DSECT(name, stmt) \
+ { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \
+ *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \
+
+/* Initialize ELF section headers. */
+static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx)
+{
+ ELFsectheader *sect;
+
+ *ctx->p++ = '\0'; /* Empty string at start of string table. */
+
+#define SECTDEF(id, tp, al) \
+ sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \
+ sect->name = gdbjit_strz(ctx, "." #id); \
+ sect->type = ELFSECT_TYPE_##tp; \
+ sect->align = (al)
+
+ SECTDEF(text, NOBITS, 16);
+ sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC;
+ sect->addr = ctx->mcaddr;
+ sect->ofs = 0;
+ sect->size = ctx->szmcode;
+
+ SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t));
+ sect->flags = ELFSECT_FLAGS_ALLOC;
+
+ SECTDEF(shstrtab, STRTAB, 1);
+ SECTDEF(strtab, STRTAB, 1);
+
+ SECTDEF(symtab, SYMTAB, sizeof(uintptr_t));
+ sect->ofs = offsetof(GDBJITobj, sym);
+ sect->size = sizeof(ctx->obj.sym);
+ sect->link = GDBJIT_SECT_strtab;
+ sect->entsize = sizeof(ELFsymbol);
+ sect->info = GDBJIT_SYM_FUNC;
+
+ SECTDEF(debug_info, PROGBITS, 1);
+ SECTDEF(debug_abbrev, PROGBITS, 1);
+ SECTDEF(debug_line, PROGBITS, 1);
+
+#undef SECTDEF
+}
+
+/* Initialize symbol table. */
+static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx)
+{
+ ELFsymbol *sym;
+
+ *ctx->p++ = '\0'; /* Empty string at start of string table. */
+
+ sym = &ctx->obj.sym[GDBJIT_SYM_FILE];
+ sym->name = gdbjit_strz(ctx, "JIT mcode");
+ sym->sectidx = ELFSECT_IDX_ABS;
+ sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL;
+
+ sym = &ctx->obj.sym[GDBJIT_SYM_FUNC];
+ sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--;
+ gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0';
+ sym->sectidx = GDBJIT_SECT_text;
+ sym->value = 0;
+ sym->size = ctx->szmcode;
+ sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL;
+}
+
+/* Initialize .eh_frame section. */
+static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+ uint8_t *framep = p;
+
+ /* Emit DWARF EH CIE. */
+ DSECT(CIE,
+ DU32(0); /* Offset to CIE itself. */
+ DB(DW_CIE_VERSION);
+ DSTR("zR"); /* Augmentation. */
+ DUV(1); /* Code alignment factor. */
+ DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */
+ DB(DW_REG_RA); /* Return address register. */
+ DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */
+ DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t));
+#if LJ_TARGET_PPC
+ DB(DW_CFA_offset_extended_sf); DB(DW_REG_RA); DSV(-1);
+#else
+ DB(DW_CFA_offset|DW_REG_RA); DUV(1);
+#endif
+ DALIGNNOP(sizeof(uintptr_t));
+ )
+
+ /* Emit DWARF EH FDE. */
+ DSECT(FDE,
+ DU32((uint32_t)(p-framep)); /* Offset to CIE. */
+ DU32(0); /* Machine code offset relative to .text. */
+ DU32(ctx->szmcode); /* Machine code length. */
+ DB(0); /* Augmentation data. */
+ /* Registers saved in CFRAME. */
+#if LJ_TARGET_X86
+ DB(DW_CFA_offset|DW_REG_BP); DUV(2);
+ DB(DW_CFA_offset|DW_REG_DI); DUV(3);
+ DB(DW_CFA_offset|DW_REG_SI); DUV(4);
+ DB(DW_CFA_offset|DW_REG_BX); DUV(5);
+#elif LJ_TARGET_X64
+ DB(DW_CFA_offset|DW_REG_BP); DUV(2);
+ DB(DW_CFA_offset|DW_REG_BX); DUV(3);
+ DB(DW_CFA_offset|DW_REG_15); DUV(4);
+ DB(DW_CFA_offset|DW_REG_14); DUV(5);
+ /* Extra registers saved for JIT-compiled code. */
+ DB(DW_CFA_offset|DW_REG_13); DUV(9);
+ DB(DW_CFA_offset|DW_REG_12); DUV(10);
+#elif LJ_TARGET_ARM
+ {
+ int i;
+ for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); }
+ }
+#elif LJ_TARGET_PPC
+ {
+ int i;
+ DB(DW_CFA_offset_extended); DB(DW_REG_CR); DUV(55);
+ for (i = 14; i <= 31; i++) {
+ DB(DW_CFA_offset|i); DUV(37+(31-i));
+ DB(DW_CFA_offset|32|i); DUV(2+2*(31-i));
+ }
+ }
+#elif LJ_TARGET_MIPS
+ {
+ int i;
+ DB(DW_CFA_offset|30); DUV(2);
+ for (i = 23; i >= 16; i--) { DB(DW_CFA_offset|i); DUV(26-i); }
+ for (i = 30; i >= 20; i -= 2) { DB(DW_CFA_offset|32|i); DUV(42-i); }
+ }
+#else
+#error "Unsupported target architecture"
+#endif
+ if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */
+ DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp);
+ DB(DW_CFA_advance_loc|1); /* Only an approximation. */
+ }
+ DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */
+ DALIGNNOP(sizeof(uintptr_t));
+ )
+
+ ctx->p = p;
+}
+
+/* Initialize .debug_info section. */
+static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ DSECT(info,
+ DU16(2); /* DWARF version. */
+ DU32(0); /* Abbrev offset. */
+ DB(sizeof(uintptr_t)); /* Pointer size. */
+
+ DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */
+ DSTR(ctx->filename); /* DW_AT_name. */
+ DADDR(ctx->mcaddr); /* DW_AT_low_pc. */
+ DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */
+ DU32(0); /* DW_AT_stmt_list. */
+ )
+
+ ctx->p = p;
+}
+
+/* Initialize .debug_abbrev section. */
+static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ /* Abbrev #1: DW_TAG_compile_unit. */
+ DUV(1); DUV(DW_TAG_compile_unit);
+ DB(DW_children_no);
+ DUV(DW_AT_name); DUV(DW_FORM_string);
+ DUV(DW_AT_low_pc); DUV(DW_FORM_addr);
+ DUV(DW_AT_high_pc); DUV(DW_FORM_addr);
+ DUV(DW_AT_stmt_list); DUV(DW_FORM_data4);
+ DB(0); DB(0);
+
+ ctx->p = p;
+}
+
+#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op)))
+
+/* Initialize .debug_line section. */
+static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ DSECT(line,
+ DU16(2); /* DWARF version. */
+ DSECT(header,
+ DB(1); /* Minimum instruction length. */
+ DB(1); /* is_stmt. */
+ DI8(0); /* Line base for special opcodes. */
+ DB(2); /* Line range for special opcodes. */
+ DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */
+ DB(0); DB(1); DB(1); /* Standard opcode lengths. */
+ /* Directory table. */
+ DB(0);
+ /* File name table. */
+ DSTR(ctx->filename); DUV(0); DUV(0); DUV(0);
+ DB(0);
+ )
+
+ DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr);
+ if (ctx->lineno) {
+ DB(DW_LNS_advance_line); DSV(ctx->lineno-1);
+ }
+ DB(DW_LNS_copy);
+ DB(DW_LNS_advance_pc); DUV(ctx->szmcode);
+ DLNE(DW_LNE_end_sequence, 0);
+ )
+
+ ctx->p = p;
+}
+
+#undef DLNE
+
+/* Undef shortcuts. */
+#undef DB
+#undef DI8
+#undef DU16
+#undef DU32
+#undef DADDR
+#undef DUV
+#undef DSV
+#undef DSTR
+#undef DALIGNNOP
+#undef DSECT
+
+/* Type of a section initializer callback. */
+typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx);
+
+/* Call section initializer and set the section offset and size. */
+static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf)
+{
+ ctx->startp = ctx->p;
+ ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj);
+ initf(ctx);
+ ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp);
+}
+
+#define SECTALIGN(p, a) \
+ ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1)))
+
+/* Build in-memory ELF object. */
+static void gdbjit_buildobj(GDBJITctx *ctx)
+{
+ GDBJITobj *obj = &ctx->obj;
+ /* Fill in ELF header and clear structures. */
+ memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader));
+ memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX);
+ memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX);
+ /* Initialize sections. */
+ ctx->p = obj->space;
+ gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr);
+ gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline);
+ SECTALIGN(ctx->p, sizeof(uintptr_t));
+ gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe);
+ ctx->objsize = (size_t)((char *)ctx->p - (char *)obj);
+ lua_assert(ctx->objsize < sizeof(GDBJITobj));
+}
+
+#undef SECTALIGN
+
+/* -- Interface to GDB JIT API -------------------------------------------- */
+
+/* Add new entry to GDB JIT symbol chain. */
+static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
+{
+ /* Allocate memory for GDB JIT entry and ELF object. */
+ MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize);
+ GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj);
+ memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */
+ eo->sz = sz;
+ ctx->T->gdbjit_entry = (void *)eo;
+ /* Link new entry to chain and register it. */
+ eo->entry.prev_entry = NULL;
+ eo->entry.next_entry = __jit_debug_descriptor.first_entry;
+ if (eo->entry.next_entry)
+ eo->entry.next_entry->prev_entry = &eo->entry;
+ eo->entry.symfile_addr = (const char *)&eo->obj;
+ eo->entry.symfile_size = ctx->objsize;
+ __jit_debug_descriptor.first_entry = &eo->entry;
+ __jit_debug_descriptor.relevant_entry = &eo->entry;
+ __jit_debug_descriptor.action_flag = GDBJIT_REGISTER;
+ __jit_debug_register_code();
+}
+
+/* Add debug info for newly compiled trace and notify GDB. */
+void lj_gdbjit_addtrace(jit_State *J, GCtrace *T)
+{
+ GDBJITctx ctx;
+ GCproto *pt = &gcref(T->startpt)->pt;
+ TraceNo parent = T->ir[REF_BASE].op1;
+ const BCIns *startpc = mref(T->startpc, const BCIns);
+ ctx.T = T;
+ ctx.mcaddr = (uintptr_t)T->mcode;
+ ctx.szmcode = T->szmcode;
+ ctx.spadjp = CFRAME_SIZE_JIT +
+ (MSize)(parent ? traceref(J, parent)->spadjust : 0);
+ ctx.spadj = CFRAME_SIZE_JIT + T->spadjust;
+ lua_assert(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc);
+ ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
+ ctx.filename = proto_chunknamestr(pt);
+ if (*ctx.filename == '@' || *ctx.filename == '=')
+ ctx.filename++;
+ else
+ ctx.filename = "(string)";
+ gdbjit_buildobj(&ctx);
+ gdbjit_newentry(J->L, &ctx);
+}
+
+/* Delete debug info for trace and notify GDB. */
+void lj_gdbjit_deltrace(jit_State *J, GCtrace *T)
+{
+ GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry;
+ if (eo) {
+ if (eo->entry.prev_entry)
+ eo->entry.prev_entry->next_entry = eo->entry.next_entry;
+ else
+ __jit_debug_descriptor.first_entry = eo->entry.next_entry;
+ if (eo->entry.next_entry)
+ eo->entry.next_entry->prev_entry = eo->entry.prev_entry;
+ __jit_debug_descriptor.relevant_entry = &eo->entry;
+ __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER;
+ __jit_debug_register_code();
+ lj_mem_free(J2G(J), eo, eo->sz);
+ }
+}
+
+#endif
+#endif
diff --git a/3rdparty/lua/src/lj_gdbjit.h b/3rdparty/lua/src/lj_gdbjit.h
index 2822e7d..481cb22 100644
--- a/3rdparty/lua/src/lj_gdbjit.h
+++ b/3rdparty/lua/src/lj_gdbjit.h
@@ -1,22 +1,22 @@
-/*
-** Client for the GDB JIT API.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_GDBJIT_H
-#define _LJ_GDBJIT_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT)
-
-LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T);
-LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T);
-
-#else
-#define lj_gdbjit_addtrace(J, T) UNUSED(T)
-#define lj_gdbjit_deltrace(J, T) UNUSED(T)
-#endif
-
-#endif
+/*
+** Client for the GDB JIT API.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_GDBJIT_H
+#define _LJ_GDBJIT_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT)
+
+LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T);
+LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T);
+
+#else
+#define lj_gdbjit_addtrace(J, T) UNUSED(T)
+#define lj_gdbjit_deltrace(J, T) UNUSED(T)
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_ir.c b/3rdparty/lua/src/lj_ir.c
index 003e413..e1a5910 100644
--- a/3rdparty/lua/src/lj_ir.c
+++ b/3rdparty/lua/src/lj_ir.c
@@ -1,501 +1,501 @@
-/*
-** SSA IR (Intermediate Representation) emitter.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_ir_c
-#define LUA_CORE
-
-/* For pointers to libc/libm functions. */
-#include <stdio.h>
-#include <math.h>
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_gc.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_ircall.h"
-#include "lj_iropt.h"
-#include "lj_trace.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#include "lj_cdata.h"
-#include "lj_carith.h"
-#endif
-#include "lj_vm.h"
-#include "lj_strscan.h"
-#include "lj_lib.h"
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-#define fins (&J->fold.ins)
-
-/* Pass IR on to next optimization in chain (FOLD). */
-#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
-
-/* -- IR tables ----------------------------------------------------------- */
-
-/* IR instruction modes. */
-LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = {
-IRDEF(IRMODE)
- 0
-};
-
-/* IR type sizes. */
-LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = {
-#define IRTSIZE(name, size) size,
-IRTDEF(IRTSIZE)
-#undef IRTSIZE
- 0
-};
-
-/* C call info for CALL* instructions. */
-LJ_DATADEF const CCallInfo lj_ir_callinfo[] = {
-#define IRCALLCI(cond, name, nargs, kind, type, flags) \
- { (ASMFunction)IRCALLCOND_##cond(name), \
- (nargs)|(CCI_CALL_##kind)|(IRT_##type<<CCI_OTSHIFT)|(flags) },
-IRCALLDEF(IRCALLCI)
-#undef IRCALLCI
- { NULL, 0 }
-};
-
-/* -- IR emitter ---------------------------------------------------------- */
-
-/* Grow IR buffer at the top. */
-void LJ_FASTCALL lj_ir_growtop(jit_State *J)
-{
- IRIns *baseir = J->irbuf + J->irbotlim;
- MSize szins = J->irtoplim - J->irbotlim;
- if (szins) {
- baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns),
- 2*szins*sizeof(IRIns));
- J->irtoplim = J->irbotlim + 2*szins;
- } else {
- baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns));
- J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4;
- J->irtoplim = J->irbotlim + LJ_MIN_IRSZ;
- }
- J->cur.ir = J->irbuf = baseir - J->irbotlim;
-}
-
-/* Grow IR buffer at the bottom or shift it up. */
-static void lj_ir_growbot(jit_State *J)
-{
- IRIns *baseir = J->irbuf + J->irbotlim;
- MSize szins = J->irtoplim - J->irbotlim;
- lua_assert(szins != 0);
- lua_assert(J->cur.nk == J->irbotlim);
- if (J->cur.nins + (szins >> 1) < J->irtoplim) {
- /* More than half of the buffer is free on top: shift up by a quarter. */
- MSize ofs = szins >> 2;
- memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
- J->irbotlim -= ofs;
- J->irtoplim -= ofs;
- J->cur.ir = J->irbuf = baseir - J->irbotlim;
- } else {
- /* Double the buffer size, but split the growth amongst top/bottom. */
- IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns);
- MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */
- memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
- lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns));
- J->irbotlim -= ofs;
- J->irtoplim = J->irbotlim + 2*szins;
- J->cur.ir = J->irbuf = newbase - J->irbotlim;
- }
-}
-
-/* Emit IR without any optimizations. */
-TRef LJ_FASTCALL lj_ir_emit(jit_State *J)
-{
- IRRef ref = lj_ir_nextins(J);
- IRIns *ir = IR(ref);
- IROp op = fins->o;
- ir->prev = J->chain[op];
- J->chain[op] = (IRRef1)ref;
- ir->o = op;
- ir->op1 = fins->op1;
- ir->op2 = fins->op2;
- J->guardemit.irt |= fins->t.irt;
- return TREF(ref, irt_t((ir->t = fins->t)));
-}
-
-/* Emit call to a C function. */
-TRef lj_ir_call(jit_State *J, IRCallID id, ...)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- uint32_t n = CCI_NARGS(ci);
- TRef tr = TREF_NIL;
- va_list argp;
- va_start(argp, id);
- if ((ci->flags & CCI_L)) n--;
- if (n > 0)
- tr = va_arg(argp, IRRef);
- while (n-- > 1)
- tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef));
- va_end(argp);
- if (CCI_OP(ci) == IR_CALLS)
- J->needsnap = 1; /* Need snapshot after call with side effect. */
- return emitir(CCI_OPTYPE(ci), tr, id);
-}
-
-/* -- Interning of constants ---------------------------------------------- */
-
-/*
-** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS.
-** They are chained like all other instructions, but grow downwards.
-** The are interned (like strings in the VM) to facilitate reference
-** comparisons. The same constant must get the same reference.
-*/
-
-/* Get ref of next IR constant and optionally grow IR.
-** Note: this may invalidate all IRIns *!
-*/
-static LJ_AINLINE IRRef ir_nextk(jit_State *J)
-{
- IRRef ref = J->cur.nk;
- if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J);
- J->cur.nk = --ref;
- return ref;
-}
-
-/* Intern int32_t constant. */
-TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k)
-{
- IRIns *ir, *cir = J->cur.ir;
- IRRef ref;
- for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev)
- if (cir[ref].i == k)
- goto found;
- ref = ir_nextk(J);
- ir = IR(ref);
- ir->i = k;
- ir->t.irt = IRT_INT;
- ir->o = IR_KINT;
- ir->prev = J->chain[IR_KINT];
- J->chain[IR_KINT] = (IRRef1)ref;
-found:
- return TREF(ref, IRT_INT);
-}
-
-/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the
-** 64 bit constant. The constants themselves are stored in a chained array
-** and shared across traces.
-**
-** Rationale for choosing this data structure:
-** - The address of the constants is embedded in the generated machine code
-** and must never move. A resizable array or hash table wouldn't work.
-** - Most apps need very few non-32 bit integer constants (less than a dozen).
-** - Linear search is hard to beat in terms of speed and low complexity.
-*/
-typedef struct K64Array {
- MRef next; /* Pointer to next list. */
- MSize numk; /* Number of used elements in this array. */
- TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
-} K64Array;
-
-/* Free all chained arrays. */
-void lj_ir_k64_freeall(jit_State *J)
-{
- K64Array *k;
- for (k = mref(J->k64, K64Array); k; ) {
- K64Array *next = mref(k->next, K64Array);
- lj_mem_free(J2G(J), k, sizeof(K64Array));
- k = next;
- }
-}
-
-/* Find 64 bit constant in chained array or add it. */
-cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
-{
- K64Array *k, *kp = NULL;
- TValue *ntv;
- MSize idx;
- /* Search for the constant in the whole chain of arrays. */
- for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) {
- kp = k; /* Remember previous element in list. */
- for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
- TValue *tv = &k->k[idx];
- if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
- return tv;
- }
- }
- /* Constant was not found, need to add it. */
- if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
- K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
- setmref(kn->next, NULL);
- kn->numk = 0;
- if (kp)
- setmref(kp->next, kn); /* Chain to the end of the list. */
- else
- setmref(J->k64, kn); /* Link first array. */
- kp = kn;
- }
- ntv = &kp->k[kp->numk++]; /* Add to current array. */
- ntv->u64 = u64;
- return ntv;
-}
-
-/* Intern 64 bit constant, given by its address. */
-TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv)
-{
- IRIns *ir, *cir = J->cur.ir;
- IRRef ref;
- IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64;
- for (ref = J->chain[op]; ref; ref = cir[ref].prev)
- if (ir_k64(&cir[ref]) == tv)
- goto found;
- ref = ir_nextk(J);
- ir = IR(ref);
- lua_assert(checkptr32(tv));
- setmref(ir->ptr, tv);
- ir->t.irt = t;
- ir->o = op;
- ir->prev = J->chain[op];
- J->chain[op] = (IRRef1)ref;
-found:
- return TREF(ref, t);
-}
-
-/* Intern FP constant, given by its 64 bit pattern. */
-TRef lj_ir_knum_u64(jit_State *J, uint64_t u64)
-{
- return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64));
-}
-
-/* Intern 64 bit integer constant. */
-TRef lj_ir_kint64(jit_State *J, uint64_t u64)
-{
- return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64));
-}
-
-/* Check whether a number is int and return it. -0 is NOT considered an int. */
-static int numistrueint(lua_Number n, int32_t *kp)
-{
- int32_t k = lj_num2int(n);
- if (n == (lua_Number)k) {
- if (kp) *kp = k;
- if (k == 0) { /* Special check for -0. */
- TValue tv;
- setnumV(&tv, n);
- if (tv.u32.hi != 0)
- return 0;
- }
- return 1;
- }
- return 0;
-}
-
-/* Intern number as int32_t constant if possible, otherwise as FP constant. */
-TRef lj_ir_knumint(jit_State *J, lua_Number n)
-{
- int32_t k;
- if (numistrueint(n, &k))
- return lj_ir_kint(J, k);
- else
- return lj_ir_knum(J, n);
-}
-
-/* Intern GC object "constant". */
-TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t)
-{
- IRIns *ir, *cir = J->cur.ir;
- IRRef ref;
- lua_assert(!isdead(J2G(J), o));
- for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev)
- if (ir_kgc(&cir[ref]) == o)
- goto found;
- ref = ir_nextk(J);
- ir = IR(ref);
- /* NOBARRIER: Current trace is a GC root. */
- setgcref(ir->gcr, o);
- ir->t.irt = (uint8_t)t;
- ir->o = IR_KGC;
- ir->prev = J->chain[IR_KGC];
- J->chain[IR_KGC] = (IRRef1)ref;
-found:
- return TREF(ref, t);
-}
-
-/* Intern 32 bit pointer constant. */
-TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr)
-{
- IRIns *ir, *cir = J->cur.ir;
- IRRef ref;
- lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr);
- for (ref = J->chain[op]; ref; ref = cir[ref].prev)
- if (mref(cir[ref].ptr, void) == ptr)
- goto found;
- ref = ir_nextk(J);
- ir = IR(ref);
- setmref(ir->ptr, ptr);
- ir->t.irt = IRT_P32;
- ir->o = op;
- ir->prev = J->chain[op];
- J->chain[op] = (IRRef1)ref;
-found:
- return TREF(ref, IRT_P32);
-}
-
-/* Intern typed NULL constant. */
-TRef lj_ir_knull(jit_State *J, IRType t)
-{
- IRIns *ir, *cir = J->cur.ir;
- IRRef ref;
- for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev)
- if (irt_t(cir[ref].t) == t)
- goto found;
- ref = ir_nextk(J);
- ir = IR(ref);
- ir->i = 0;
- ir->t.irt = (uint8_t)t;
- ir->o = IR_KNULL;
- ir->prev = J->chain[IR_KNULL];
- J->chain[IR_KNULL] = (IRRef1)ref;
-found:
- return TREF(ref, t);
-}
-
-/* Intern key slot. */
-TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot)
-{
- IRIns *ir, *cir = J->cur.ir;
- IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot);
- IRRef ref;
- /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */
- lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot);
- for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev)
- if (cir[ref].op12 == op12)
- goto found;
- ref = ir_nextk(J);
- ir = IR(ref);
- ir->op12 = op12;
- ir->t.irt = IRT_P32;
- ir->o = IR_KSLOT;
- ir->prev = J->chain[IR_KSLOT];
- J->chain[IR_KSLOT] = (IRRef1)ref;
-found:
- return TREF(ref, IRT_P32);
-}
-
-/* -- Access to IR constants ---------------------------------------------- */
-
-/* Copy value of IR constant. */
-void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
-{
- UNUSED(L);
- lua_assert(ir->o != IR_KSLOT); /* Common mistake. */
- switch (ir->o) {
- case IR_KPRI: setitype(tv, irt_toitype(ir->t)); break;
- case IR_KINT: setintV(tv, ir->i); break;
- case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break;
- case IR_KPTR: case IR_KKPTR: case IR_KNULL:
- setlightudV(tv, mref(ir->ptr, void));
- break;
- case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break;
-#if LJ_HASFFI
- case IR_KINT64: {
- GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8);
- *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64;
- setcdataV(L, tv, cd);
- break;
- }
-#endif
- default: lua_assert(0); break;
- }
-}
-
-/* -- Convert IR operand types -------------------------------------------- */
-
-/* Convert from string to number. */
-TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr)
-{
- if (!tref_isnumber(tr)) {
- if (tref_isstr(tr))
- tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
- else
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- }
- return tr;
-}
-
-/* Convert from integer or string to number. */
-TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr)
-{
- if (!tref_isnum(tr)) {
- if (tref_isinteger(tr))
- tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
- else if (tref_isstr(tr))
- tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
- else
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- }
- return tr;
-}
-
-/* Convert from integer or number to string. */
-TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr)
-{
- if (!tref_isstr(tr)) {
- if (!tref_isnumber(tr))
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0);
- }
- return tr;
-}
-
-/* -- Miscellaneous IR ops ------------------------------------------------ */
-
-/* Evaluate numeric comparison. */
-int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op)
-{
- switch (op) {
- case IR_EQ: return (a == b);
- case IR_NE: return (a != b);
- case IR_LT: return (a < b);
- case IR_GE: return (a >= b);
- case IR_LE: return (a <= b);
- case IR_GT: return (a > b);
- case IR_ULT: return !(a >= b);
- case IR_UGE: return !(a < b);
- case IR_ULE: return !(a > b);
- case IR_UGT: return !(a <= b);
- default: lua_assert(0); return 0;
- }
-}
-
-/* Evaluate string comparison. */
-int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op)
-{
- int res = lj_str_cmp(a, b);
- switch (op) {
- case IR_LT: return (res < 0);
- case IR_GE: return (res >= 0);
- case IR_LE: return (res <= 0);
- case IR_GT: return (res > 0);
- default: lua_assert(0); return 0;
- }
-}
-
-/* Rollback IR to previous state. */
-void lj_ir_rollback(jit_State *J, IRRef ref)
-{
- IRRef nins = J->cur.nins;
- while (nins > ref) {
- IRIns *ir;
- nins--;
- ir = IR(nins);
- J->chain[ir->o] = ir->prev;
- }
- J->cur.nins = nins;
-}
-
-#undef IR
-#undef fins
-#undef emitir
-
-#endif
+/*
+** SSA IR (Intermediate Representation) emitter.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ir_c
+#define LUA_CORE
+
+/* For pointers to libc/libm functions. */
+#include <stdio.h>
+#include <math.h>
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_carith.h"
+#endif
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_lib.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* -- IR tables ----------------------------------------------------------- */
+
+/* IR instruction modes. */
+LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = {
+IRDEF(IRMODE)
+ 0
+};
+
+/* IR type sizes. */
+LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = {
+#define IRTSIZE(name, size) size,
+IRTDEF(IRTSIZE)
+#undef IRTSIZE
+ 0
+};
+
+/* C call info for CALL* instructions. */
+LJ_DATADEF const CCallInfo lj_ir_callinfo[] = {
+#define IRCALLCI(cond, name, nargs, kind, type, flags) \
+ { (ASMFunction)IRCALLCOND_##cond(name), \
+ (nargs)|(CCI_CALL_##kind)|(IRT_##type<<CCI_OTSHIFT)|(flags) },
+IRCALLDEF(IRCALLCI)
+#undef IRCALLCI
+ { NULL, 0 }
+};
+
+/* -- IR emitter ---------------------------------------------------------- */
+
+/* Grow IR buffer at the top. */
+void LJ_FASTCALL lj_ir_growtop(jit_State *J)
+{
+ IRIns *baseir = J->irbuf + J->irbotlim;
+ MSize szins = J->irtoplim - J->irbotlim;
+ if (szins) {
+ baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns),
+ 2*szins*sizeof(IRIns));
+ J->irtoplim = J->irbotlim + 2*szins;
+ } else {
+ baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns));
+ J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4;
+ J->irtoplim = J->irbotlim + LJ_MIN_IRSZ;
+ }
+ J->cur.ir = J->irbuf = baseir - J->irbotlim;
+}
+
+/* Grow IR buffer at the bottom or shift it up. */
+static void lj_ir_growbot(jit_State *J)
+{
+ IRIns *baseir = J->irbuf + J->irbotlim;
+ MSize szins = J->irtoplim - J->irbotlim;
+ lua_assert(szins != 0);
+ lua_assert(J->cur.nk == J->irbotlim);
+ if (J->cur.nins + (szins >> 1) < J->irtoplim) {
+ /* More than half of the buffer is free on top: shift up by a quarter. */
+ MSize ofs = szins >> 2;
+ memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
+ J->irbotlim -= ofs;
+ J->irtoplim -= ofs;
+ J->cur.ir = J->irbuf = baseir - J->irbotlim;
+ } else {
+ /* Double the buffer size, but split the growth amongst top/bottom. */
+ IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns);
+ MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */
+ memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
+ lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns));
+ J->irbotlim -= ofs;
+ J->irtoplim = J->irbotlim + 2*szins;
+ J->cur.ir = J->irbuf = newbase - J->irbotlim;
+ }
+}
+
+/* Emit IR without any optimizations. */
+TRef LJ_FASTCALL lj_ir_emit(jit_State *J)
+{
+ IRRef ref = lj_ir_nextins(J);
+ IRIns *ir = IR(ref);
+ IROp op = fins->o;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+ ir->o = op;
+ ir->op1 = fins->op1;
+ ir->op2 = fins->op2;
+ J->guardemit.irt |= fins->t.irt;
+ return TREF(ref, irt_t((ir->t = fins->t)));
+}
+
+/* Emit call to a C function. */
+TRef lj_ir_call(jit_State *J, IRCallID id, ...)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ uint32_t n = CCI_NARGS(ci);
+ TRef tr = TREF_NIL;
+ va_list argp;
+ va_start(argp, id);
+ if ((ci->flags & CCI_L)) n--;
+ if (n > 0)
+ tr = va_arg(argp, IRRef);
+ while (n-- > 1)
+ tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef));
+ va_end(argp);
+ if (CCI_OP(ci) == IR_CALLS)
+ J->needsnap = 1; /* Need snapshot after call with side effect. */
+ return emitir(CCI_OPTYPE(ci), tr, id);
+}
+
+/* -- Interning of constants ---------------------------------------------- */
+
+/*
+** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS.
+** They are chained like all other instructions, but grow downwards.
+** The are interned (like strings in the VM) to facilitate reference
+** comparisons. The same constant must get the same reference.
+*/
+
+/* Get ref of next IR constant and optionally grow IR.
+** Note: this may invalidate all IRIns *!
+*/
+static LJ_AINLINE IRRef ir_nextk(jit_State *J)
+{
+ IRRef ref = J->cur.nk;
+ if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J);
+ J->cur.nk = --ref;
+ return ref;
+}
+
+/* Intern int32_t constant. */
+TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev)
+ if (cir[ref].i == k)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->i = k;
+ ir->t.irt = IRT_INT;
+ ir->o = IR_KINT;
+ ir->prev = J->chain[IR_KINT];
+ J->chain[IR_KINT] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_INT);
+}
+
+/* The MRef inside the KNUM/KINT64 IR instructions holds the address of the
+** 64 bit constant. The constants themselves are stored in a chained array
+** and shared across traces.
+**
+** Rationale for choosing this data structure:
+** - The address of the constants is embedded in the generated machine code
+** and must never move. A resizable array or hash table wouldn't work.
+** - Most apps need very few non-32 bit integer constants (less than a dozen).
+** - Linear search is hard to beat in terms of speed and low complexity.
+*/
+typedef struct K64Array {
+ MRef next; /* Pointer to next list. */
+ MSize numk; /* Number of used elements in this array. */
+ TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
+} K64Array;
+
+/* Free all chained arrays. */
+void lj_ir_k64_freeall(jit_State *J)
+{
+ K64Array *k;
+ for (k = mref(J->k64, K64Array); k; ) {
+ K64Array *next = mref(k->next, K64Array);
+ lj_mem_free(J2G(J), k, sizeof(K64Array));
+ k = next;
+ }
+}
+
+/* Find 64 bit constant in chained array or add it. */
+cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
+{
+ K64Array *k, *kp = NULL;
+ TValue *ntv;
+ MSize idx;
+ /* Search for the constant in the whole chain of arrays. */
+ for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) {
+ kp = k; /* Remember previous element in list. */
+ for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
+ TValue *tv = &k->k[idx];
+ if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
+ return tv;
+ }
+ }
+ /* Constant was not found, need to add it. */
+ if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
+ K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
+ setmref(kn->next, NULL);
+ kn->numk = 0;
+ if (kp)
+ setmref(kp->next, kn); /* Chain to the end of the list. */
+ else
+ setmref(J->k64, kn); /* Link first array. */
+ kp = kn;
+ }
+ ntv = &kp->k[kp->numk++]; /* Add to current array. */
+ ntv->u64 = u64;
+ return ntv;
+}
+
+/* Intern 64 bit constant, given by its address. */
+TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64;
+ for (ref = J->chain[op]; ref; ref = cir[ref].prev)
+ if (ir_k64(&cir[ref]) == tv)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ lua_assert(checkptr32(tv));
+ setmref(ir->ptr, tv);
+ ir->t.irt = t;
+ ir->o = op;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern FP constant, given by its 64 bit pattern. */
+TRef lj_ir_knum_u64(jit_State *J, uint64_t u64)
+{
+ return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64));
+}
+
+/* Intern 64 bit integer constant. */
+TRef lj_ir_kint64(jit_State *J, uint64_t u64)
+{
+ return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64));
+}
+
+/* Check whether a number is int and return it. -0 is NOT considered an int. */
+static int numistrueint(lua_Number n, int32_t *kp)
+{
+ int32_t k = lj_num2int(n);
+ if (n == (lua_Number)k) {
+ if (kp) *kp = k;
+ if (k == 0) { /* Special check for -0. */
+ TValue tv;
+ setnumV(&tv, n);
+ if (tv.u32.hi != 0)
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Intern number as int32_t constant if possible, otherwise as FP constant. */
+TRef lj_ir_knumint(jit_State *J, lua_Number n)
+{
+ int32_t k;
+ if (numistrueint(n, &k))
+ return lj_ir_kint(J, k);
+ else
+ return lj_ir_knum(J, n);
+}
+
+/* Intern GC object "constant". */
+TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ lua_assert(!isdead(J2G(J), o));
+ for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev)
+ if (ir_kgc(&cir[ref]) == o)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ /* NOBARRIER: Current trace is a GC root. */
+ setgcref(ir->gcr, o);
+ ir->t.irt = (uint8_t)t;
+ ir->o = IR_KGC;
+ ir->prev = J->chain[IR_KGC];
+ J->chain[IR_KGC] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern 32 bit pointer constant. */
+TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr);
+ for (ref = J->chain[op]; ref; ref = cir[ref].prev)
+ if (mref(cir[ref].ptr, void) == ptr)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ setmref(ir->ptr, ptr);
+ ir->t.irt = IRT_P32;
+ ir->o = op;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_P32);
+}
+
+/* Intern typed NULL constant. */
+TRef lj_ir_knull(jit_State *J, IRType t)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev)
+ if (irt_t(cir[ref].t) == t)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->i = 0;
+ ir->t.irt = (uint8_t)t;
+ ir->o = IR_KNULL;
+ ir->prev = J->chain[IR_KNULL];
+ J->chain[IR_KNULL] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern key slot. */
+TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot);
+ IRRef ref;
+ /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */
+ lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot);
+ for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev)
+ if (cir[ref].op12 == op12)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->op12 = op12;
+ ir->t.irt = IRT_P32;
+ ir->o = IR_KSLOT;
+ ir->prev = J->chain[IR_KSLOT];
+ J->chain[IR_KSLOT] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_P32);
+}
+
+/* -- Access to IR constants ---------------------------------------------- */
+
+/* Copy value of IR constant. */
+void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
+{
+ UNUSED(L);
+ lua_assert(ir->o != IR_KSLOT); /* Common mistake. */
+ switch (ir->o) {
+ case IR_KPRI: setitype(tv, irt_toitype(ir->t)); break;
+ case IR_KINT: setintV(tv, ir->i); break;
+ case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break;
+ case IR_KPTR: case IR_KKPTR: case IR_KNULL:
+ setlightudV(tv, mref(ir->ptr, void));
+ break;
+ case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break;
+#if LJ_HASFFI
+ case IR_KINT64: {
+ GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8);
+ *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64;
+ setcdataV(L, tv, cd);
+ break;
+ }
+#endif
+ default: lua_assert(0); break;
+ }
+}
+
+/* -- Convert IR operand types -------------------------------------------- */
+
+/* Convert from string to number. */
+TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr)
+{
+ if (!tref_isnumber(tr)) {
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+/* Convert from integer or string to number. */
+TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr)
+{
+ if (!tref_isnum(tr)) {
+ if (tref_isinteger(tr))
+ tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ else if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+/* Convert from integer or number to string. */
+TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr)
+{
+ if (!tref_isstr(tr)) {
+ if (!tref_isnumber(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0);
+ }
+ return tr;
+}
+
+/* -- Miscellaneous IR ops ------------------------------------------------ */
+
+/* Evaluate numeric comparison. */
+int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op)
+{
+ switch (op) {
+ case IR_EQ: return (a == b);
+ case IR_NE: return (a != b);
+ case IR_LT: return (a < b);
+ case IR_GE: return (a >= b);
+ case IR_LE: return (a <= b);
+ case IR_GT: return (a > b);
+ case IR_ULT: return !(a >= b);
+ case IR_UGE: return !(a < b);
+ case IR_ULE: return !(a > b);
+ case IR_UGT: return !(a <= b);
+ default: lua_assert(0); return 0;
+ }
+}
+
+/* Evaluate string comparison. */
+int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op)
+{
+ int res = lj_str_cmp(a, b);
+ switch (op) {
+ case IR_LT: return (res < 0);
+ case IR_GE: return (res >= 0);
+ case IR_LE: return (res <= 0);
+ case IR_GT: return (res > 0);
+ default: lua_assert(0); return 0;
+ }
+}
+
+/* Rollback IR to previous state. */
+void lj_ir_rollback(jit_State *J, IRRef ref)
+{
+ IRRef nins = J->cur.nins;
+ while (nins > ref) {
+ IRIns *ir;
+ nins--;
+ ir = IR(nins);
+ J->chain[ir->o] = ir->prev;
+ }
+ J->cur.nins = nins;
+}
+
+#undef IR
+#undef fins
+#undef emitir
+
+#endif
diff --git a/3rdparty/lua/src/lj_ir.h b/3rdparty/lua/src/lj_ir.h
index 3ed3423..a982432 100644
--- a/3rdparty/lua/src/lj_ir.h
+++ b/3rdparty/lua/src/lj_ir.h
@@ -1,551 +1,551 @@
-/*
-** SSA IR (Intermediate Representation) format.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_IR_H
-#define _LJ_IR_H
-
-#include "lj_obj.h"
-
-/* -- IR instructions ----------------------------------------------------- */
-
-/* IR instruction definition. Order matters, see below. ORDER IR */
-#define IRDEF(_) \
- /* Guarded assertions. */ \
- /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \
- _(LT, N , ref, ref) \
- _(GE, N , ref, ref) \
- _(LE, N , ref, ref) \
- _(GT, N , ref, ref) \
- \
- _(ULT, N , ref, ref) \
- _(UGE, N , ref, ref) \
- _(ULE, N , ref, ref) \
- _(UGT, N , ref, ref) \
- \
- _(EQ, C , ref, ref) \
- _(NE, C , ref, ref) \
- \
- _(ABC, N , ref, ref) \
- _(RETF, S , ref, ref) \
- \
- /* Miscellaneous ops. */ \
- _(NOP, N , ___, ___) \
- _(BASE, N , lit, lit) \
- _(PVAL, N , lit, ___) \
- _(GCSTEP, S , ___, ___) \
- _(HIOP, S , ref, ref) \
- _(LOOP, S , ___, ___) \
- _(USE, S , ref, ___) \
- _(PHI, S , ref, ref) \
- _(RENAME, S , ref, lit) \
- \
- /* Constants. */ \
- _(KPRI, N , ___, ___) \
- _(KINT, N , cst, ___) \
- _(KGC, N , cst, ___) \
- _(KPTR, N , cst, ___) \
- _(KKPTR, N , cst, ___) \
- _(KNULL, N , cst, ___) \
- _(KNUM, N , cst, ___) \
- _(KINT64, N , cst, ___) \
- _(KSLOT, N , ref, lit) \
- \
- /* Bit ops. */ \
- _(BNOT, N , ref, ___) \
- _(BSWAP, N , ref, ___) \
- _(BAND, C , ref, ref) \
- _(BOR, C , ref, ref) \
- _(BXOR, C , ref, ref) \
- _(BSHL, N , ref, ref) \
- _(BSHR, N , ref, ref) \
- _(BSAR, N , ref, ref) \
- _(BROL, N , ref, ref) \
- _(BROR, N , ref, ref) \
- \
- /* Arithmetic ops. ORDER ARITH */ \
- _(ADD, C , ref, ref) \
- _(SUB, N , ref, ref) \
- _(MUL, C , ref, ref) \
- _(DIV, N , ref, ref) \
- _(MOD, N , ref, ref) \
- _(POW, N , ref, ref) \
- _(NEG, N , ref, ref) \
- \
- _(ABS, N , ref, ref) \
- _(ATAN2, N , ref, ref) \
- _(LDEXP, N , ref, ref) \
- _(MIN, C , ref, ref) \
- _(MAX, C , ref, ref) \
- _(FPMATH, N , ref, lit) \
- \
- /* Overflow-checking arithmetic ops. */ \
- _(ADDOV, CW, ref, ref) \
- _(SUBOV, NW, ref, ref) \
- _(MULOV, CW, ref, ref) \
- \
- /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \
- \
- /* Memory references. */ \
- _(AREF, R , ref, ref) \
- _(HREFK, R , ref, ref) \
- _(HREF, L , ref, ref) \
- _(NEWREF, S , ref, ref) \
- _(UREFO, LW, ref, lit) \
- _(UREFC, LW, ref, lit) \
- _(FREF, R , ref, lit) \
- _(STRREF, N , ref, ref) \
- \
- /* Loads and Stores. These must be in the same order. */ \
- _(ALOAD, L , ref, ___) \
- _(HLOAD, L , ref, ___) \
- _(ULOAD, L , ref, ___) \
- _(FLOAD, L , ref, lit) \
- _(XLOAD, L , ref, lit) \
- _(SLOAD, L , lit, lit) \
- _(VLOAD, L , ref, ___) \
- \
- _(ASTORE, S , ref, ref) \
- _(HSTORE, S , ref, ref) \
- _(USTORE, S , ref, ref) \
- _(FSTORE, S , ref, ref) \
- _(XSTORE, S , ref, ref) \
- \
- /* Allocations. */ \
- _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \
- _(XSNEW, A , ref, ref) \
- _(TNEW, AW, lit, lit) \
- _(TDUP, AW, ref, ___) \
- _(CNEW, AW, ref, ref) \
- _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
- \
- /* Barriers. */ \
- _(TBAR, S , ref, ___) \
- _(OBAR, S , ref, ref) \
- _(XBAR, S , ___, ___) \
- \
- /* Type conversions. */ \
- _(CONV, NW, ref, lit) \
- _(TOBIT, N , ref, ref) \
- _(TOSTR, N , ref, ___) \
- _(STRTO, N , ref, ___) \
- \
- /* Calls. */ \
- _(CALLN, N , ref, lit) \
- _(CALLL, L , ref, lit) \
- _(CALLS, S , ref, lit) \
- _(CALLXS, S , ref, ref) \
- _(CARG, N , ref, ref) \
- \
- /* End of list. */
-
-/* IR opcodes (max. 256). */
-typedef enum {
-#define IRENUM(name, m, m1, m2) IR_##name,
-IRDEF(IRENUM)
-#undef IRENUM
- IR__MAX
-} IROp;
-
-/* Stored opcode. */
-typedef uint8_t IROp1;
-
-LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE);
-LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE);
-LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT);
-LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT);
-LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT);
-
-/* Delta between xLOAD and xSTORE. */
-#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD)
-
-LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE);
-LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE);
-LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE);
-LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
-
-/* -- Named IR literals --------------------------------------------------- */
-
-/* FPMATH sub-functions. ORDER FPM. */
-#define IRFPMDEF(_) \
- _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
- _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \
- _(SIN) _(COS) _(TAN) \
- _(OTHER)
-
-typedef enum {
-#define FPMENUM(name) IRFPM_##name,
-IRFPMDEF(FPMENUM)
-#undef FPMENUM
- IRFPM__MAX
-} IRFPMathOp;
-
-/* FLOAD fields. */
-#define IRFLDEF(_) \
- _(STR_LEN, offsetof(GCstr, len)) \
- _(FUNC_ENV, offsetof(GCfunc, l.env)) \
- _(FUNC_PC, offsetof(GCfunc, l.pc)) \
- _(TAB_META, offsetof(GCtab, metatable)) \
- _(TAB_ARRAY, offsetof(GCtab, array)) \
- _(TAB_NODE, offsetof(GCtab, node)) \
- _(TAB_ASIZE, offsetof(GCtab, asize)) \
- _(TAB_HMASK, offsetof(GCtab, hmask)) \
- _(TAB_NOMM, offsetof(GCtab, nomm)) \
- _(UDATA_META, offsetof(GCudata, metatable)) \
- _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \
- _(UDATA_FILE, sizeof(GCudata)) \
- _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \
- _(CDATA_PTR, sizeof(GCcdata)) \
- _(CDATA_INT, sizeof(GCcdata)) \
- _(CDATA_INT64, sizeof(GCcdata)) \
- _(CDATA_INT64_4, sizeof(GCcdata) + 4)
-
-typedef enum {
-#define FLENUM(name, ofs) IRFL_##name,
-IRFLDEF(FLENUM)
-#undef FLENUM
- IRFL__MAX
-} IRFieldID;
-
-/* SLOAD mode bits, stored in op2. */
-#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
-#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */
-#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
-#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
-#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
-#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */
-
-/* XLOAD mode, stored in op2. */
-#define IRXLOAD_READONLY 1 /* Load from read-only data. */
-#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */
-#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */
-
-/* CONV mode, stored in op2. */
-#define IRCONV_SRCMASK 0x001f /* Source IRType. */
-#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
-#define IRCONV_DSH 5
-#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT)
-#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM)
-#define IRCONV_TRUNC 0x0400 /* Truncate number to integer. */
-#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */
-#define IRCONV_MODEMASK 0x0fff
-#define IRCONV_CONVMASK 0xf000
-#define IRCONV_CSH 12
-/* Number to integer conversion mode. Ordered by strength of the checks. */
-#define IRCONV_TOBIT (0<<IRCONV_CSH) /* None. Cache only: TOBIT conv. */
-#define IRCONV_ANY (1<<IRCONV_CSH) /* Any FP number is ok. */
-#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
-#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
-
-/* -- IR operands --------------------------------------------------------- */
-
-/* IR operand mode (2 bit). */
-typedef enum {
- IRMref, /* IR reference. */
- IRMlit, /* 16 bit unsigned literal. */
- IRMcst, /* Constant literal: i, gcr or ptr. */
- IRMnone /* Unused operand. */
-} IRMode;
-#define IRM___ IRMnone
-
-/* Mode bits: Commutative, {Normal/Ref, Alloc, Load, Store}, Non-weak guard. */
-#define IRM_C 0x10
-
-#define IRM_N 0x00
-#define IRM_R IRM_N
-#define IRM_A 0x20
-#define IRM_L 0x40
-#define IRM_S 0x60
-
-#define IRM_W 0x80
-
-#define IRM_NW (IRM_N|IRM_W)
-#define IRM_CW (IRM_C|IRM_W)
-#define IRM_AW (IRM_A|IRM_W)
-#define IRM_LW (IRM_L|IRM_W)
-
-#define irm_op1(m) ((IRMode)((m)&3))
-#define irm_op2(m) ((IRMode)(((m)>>2)&3))
-#define irm_iscomm(m) ((m) & IRM_C)
-#define irm_kind(m) ((m) & IRM_S)
-
-#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W),
-
-LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
-
-/* -- IR instruction types ------------------------------------------------ */
-
-/* Map of itypes to non-negative numbers. ORDER LJ_T.
-** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
-** IRT_P32 and IRT_P64, which never escape the IR.
-** The various integers are only used in the IR and can only escape to
-** a TValue after implicit or explicit conversion. Their types must be
-** contiguous and next to IRT_NUM (see the typerange macros below).
-*/
-#define IRTDEF(_) \
- _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \
- _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \
- _(TAB, 4) _(UDATA, 4) \
- _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \
- _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \
- _(SOFTFP, 4) /* There is room for 9 more types. */
-
-/* IR result type and flags (8 bit). */
-typedef enum {
-#define IRTENUM(name, size) IRT_##name,
-IRTDEF(IRTENUM)
-#undef IRTENUM
- IRT__MAX,
-
- /* Native pointer type and the corresponding integer type. */
- IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
- IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
- IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
-
- /* Additional flags. */
- IRT_MARK = 0x20, /* Marker for misc. purposes. */
- IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */
- IRT_GUARD = 0x80, /* Instruction is a guard. */
-
- /* Masks. */
- IRT_TYPE = 0x1f,
- IRT_T = 0xff
-} IRType;
-
-#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE)
-
-/* Stored IRType. */
-typedef struct IRType1 { uint8_t irt; } IRType1;
-
-#define IRT(o, t) ((uint32_t)(((o)<<8) | (t)))
-#define IRTI(o) (IRT((o), IRT_INT))
-#define IRTN(o) (IRT((o), IRT_NUM))
-#define IRTG(o, t) (IRT((o), IRT_GUARD|(t)))
-#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT))
-
-#define irt_t(t) ((IRType)(t).irt)
-#define irt_type(t) ((IRType)((t).irt & IRT_TYPE))
-#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0)
-#define irt_typerange(t, first, last) \
- ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first))
-
-#define irt_isnil(t) (irt_type(t) == IRT_NIL)
-#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE)
-#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD)
-#define irt_isstr(t) (irt_type(t) == IRT_STR)
-#define irt_istab(t) (irt_type(t) == IRT_TAB)
-#define irt_iscdata(t) (irt_type(t) == IRT_CDATA)
-#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT)
-#define irt_isnum(t) (irt_type(t) == IRT_NUM)
-#define irt_isint(t) (irt_type(t) == IRT_INT)
-#define irt_isi8(t) (irt_type(t) == IRT_I8)
-#define irt_isu8(t) (irt_type(t) == IRT_U8)
-#define irt_isi16(t) (irt_type(t) == IRT_I16)
-#define irt_isu16(t) (irt_type(t) == IRT_U16)
-#define irt_isu32(t) (irt_type(t) == IRT_U32)
-#define irt_isi64(t) (irt_type(t) == IRT_I64)
-#define irt_isu64(t) (irt_type(t) == IRT_U64)
-
-#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t))
-#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
-#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
-#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
-#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
-
-#if LJ_64
-#define IRT_IS64 \
- ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD))
-#else
-#define IRT_IS64 \
- ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64))
-#endif
-
-#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
-#define irt_is64orfp(t) (((IRT_IS64|(1u<<IRT_FLOAT))>>irt_type(t)) & 1)
-
-#define irt_size(t) (lj_ir_type_size[irt_t((t))])
-
-LJ_DATA const uint8_t lj_ir_type_size[];
-
-static LJ_AINLINE IRType itype2irt(const TValue *tv)
-{
- if (tvisint(tv))
- return IRT_INT;
- else if (tvisnum(tv))
- return IRT_NUM;
-#if LJ_64
- else if (tvislightud(tv))
- return IRT_LIGHTUD;
-#endif
- else
- return (IRType)~itype(tv);
-}
-
-static LJ_AINLINE uint32_t irt_toitype_(IRType t)
-{
- lua_assert(!LJ_64 || t != IRT_LIGHTUD);
- if (LJ_DUALNUM && t > IRT_NUM) {
- return LJ_TISNUM;
- } else {
- lua_assert(t <= IRT_NUM);
- return ~(uint32_t)t;
- }
-}
-
-#define irt_toitype(t) irt_toitype_(irt_type((t)))
-
-#define irt_isguard(t) ((t).irt & IRT_GUARD)
-#define irt_ismarked(t) ((t).irt & IRT_MARK)
-#define irt_setmark(t) ((t).irt |= IRT_MARK)
-#define irt_clearmark(t) ((t).irt &= ~IRT_MARK)
-#define irt_isphi(t) ((t).irt & IRT_ISPHI)
-#define irt_setphi(t) ((t).irt |= IRT_ISPHI)
-#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI)
-
-/* Stored combined IR opcode and type. */
-typedef uint16_t IROpT;
-
-/* -- IR references ------------------------------------------------------- */
-
-/* IR references. */
-typedef uint16_t IRRef1; /* One stored reference. */
-typedef uint32_t IRRef2; /* Two stored references. */
-typedef uint32_t IRRef; /* Used to pass around references. */
-
-/* Fixed references. */
-enum {
- REF_BIAS = 0x8000,
- REF_TRUE = REF_BIAS-3,
- REF_FALSE = REF_BIAS-2,
- REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */
- REF_BASE = REF_BIAS, /* /--- IR grows upwards. */
- REF_FIRST = REF_BIAS+1,
- REF_DROP = 0xffff
-};
-
-/* Note: IRMlit operands must be < REF_BIAS, too!
-** This allows for fast and uniform manipulation of all operands
-** without looking up the operand mode in lj_ir_mode:
-** - CSE calculates the maximum reference of two operands.
-** This must work with mixed reference/literal operands, too.
-** - DCE marking only checks for operand >= REF_BIAS.
-** - LOOP needs to substitute reference operands.
-** Constant references and literals must not be modified.
-*/
-
-#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16))
-
-#define irref_isk(ref) ((ref) < REF_BIAS)
-
-/* Tagged IR references (32 bit).
-**
-** +-------+-------+---------------+
-** | irt | flags | ref |
-** +-------+-------+---------------+
-**
-** The tag holds a copy of the IRType and speeds up IR type checks.
-*/
-typedef uint32_t TRef;
-
-#define TREF_REFMASK 0x0000ffff
-#define TREF_FRAME 0x00010000
-#define TREF_CONT 0x00020000
-
-#define TREF(ref, t) ((TRef)((ref) + ((t)<<24)))
-
-#define tref_ref(tr) ((IRRef1)(tr))
-#define tref_t(tr) ((IRType)((tr)>>24))
-#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE))
-#define tref_typerange(tr, first, last) \
- ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first))
-
-#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24))
-#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
-#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
-#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
-#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
-#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
-#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
-#define tref_istab(tr) (tref_istype((tr), IRT_TAB))
-#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA))
-#define tref_isnum(tr) (tref_istype((tr), IRT_NUM))
-#define tref_isint(tr) (tref_istype((tr), IRT_INT))
-
-#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
-#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
-#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
-#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
-#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
-#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
-#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))
-
-#define tref_isk(tr) (irref_isk(tref_ref((tr))))
-#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2))))
-
-#define TREF_PRI(t) (TREF(REF_NIL-(t), (t)))
-#define TREF_NIL (TREF_PRI(IRT_NIL))
-#define TREF_FALSE (TREF_PRI(IRT_FALSE))
-#define TREF_TRUE (TREF_PRI(IRT_TRUE))
-
-/* -- IR format ----------------------------------------------------------- */
-
-/* IR instruction format (64 bit).
-**
-** 16 16 8 8 8 8
-** +-------+-------+---+---+---+---+
-** | op1 | op2 | t | o | r | s |
-** +-------+-------+---+---+---+---+
-** | op12/i/gco | ot | prev | (alternative fields in union)
-** +---------------+-------+-------+
-** 32 16 16
-**
-** prev is only valid prior to register allocation and then reused for r + s.
-*/
-
-typedef union IRIns {
- struct {
- LJ_ENDIAN_LOHI(
- IRRef1 op1; /* IR operand 1. */
- , IRRef1 op2; /* IR operand 2. */
- )
- IROpT ot; /* IR opcode and type (overlaps t and o). */
- IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */
- };
- struct {
- IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */
- LJ_ENDIAN_LOHI(
- IRType1 t; /* IR type. */
- , IROp1 o; /* IR opcode. */
- )
- LJ_ENDIAN_LOHI(
- uint8_t r; /* Register allocation (overlaps prev). */
- , uint8_t s; /* Spill slot allocation (overlaps prev). */
- )
- };
- int32_t i; /* 32 bit signed integer literal (overlaps op12). */
- GCRef gcr; /* GCobj constant (overlaps op12). */
- MRef ptr; /* Pointer constant (overlaps op12). */
-} IRIns;
-
-#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr))
-#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
-#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
-#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
-#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
-#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue))
-#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
-#define ir_k64(ir) \
- check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
-#define ir_kptr(ir) \
- check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void))
-
-/* A store or any other op with a non-weak guard has a side-effect. */
-static LJ_AINLINE int ir_sideeff(IRIns *ir)
-{
- return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S);
-}
-
-LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W);
-
-#endif
+/*
+** SSA IR (Intermediate Representation) format.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IR_H
+#define _LJ_IR_H
+
+#include "lj_obj.h"
+
+/* -- IR instructions ----------------------------------------------------- */
+
+/* IR instruction definition. Order matters, see below. ORDER IR */
+#define IRDEF(_) \
+ /* Guarded assertions. */ \
+ /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \
+ _(LT, N , ref, ref) \
+ _(GE, N , ref, ref) \
+ _(LE, N , ref, ref) \
+ _(GT, N , ref, ref) \
+ \
+ _(ULT, N , ref, ref) \
+ _(UGE, N , ref, ref) \
+ _(ULE, N , ref, ref) \
+ _(UGT, N , ref, ref) \
+ \
+ _(EQ, C , ref, ref) \
+ _(NE, C , ref, ref) \
+ \
+ _(ABC, N , ref, ref) \
+ _(RETF, S , ref, ref) \
+ \
+ /* Miscellaneous ops. */ \
+ _(NOP, N , ___, ___) \
+ _(BASE, N , lit, lit) \
+ _(PVAL, N , lit, ___) \
+ _(GCSTEP, S , ___, ___) \
+ _(HIOP, S , ref, ref) \
+ _(LOOP, S , ___, ___) \
+ _(USE, S , ref, ___) \
+ _(PHI, S , ref, ref) \
+ _(RENAME, S , ref, lit) \
+ \
+ /* Constants. */ \
+ _(KPRI, N , ___, ___) \
+ _(KINT, N , cst, ___) \
+ _(KGC, N , cst, ___) \
+ _(KPTR, N , cst, ___) \
+ _(KKPTR, N , cst, ___) \
+ _(KNULL, N , cst, ___) \
+ _(KNUM, N , cst, ___) \
+ _(KINT64, N , cst, ___) \
+ _(KSLOT, N , ref, lit) \
+ \
+ /* Bit ops. */ \
+ _(BNOT, N , ref, ___) \
+ _(BSWAP, N , ref, ___) \
+ _(BAND, C , ref, ref) \
+ _(BOR, C , ref, ref) \
+ _(BXOR, C , ref, ref) \
+ _(BSHL, N , ref, ref) \
+ _(BSHR, N , ref, ref) \
+ _(BSAR, N , ref, ref) \
+ _(BROL, N , ref, ref) \
+ _(BROR, N , ref, ref) \
+ \
+ /* Arithmetic ops. ORDER ARITH */ \
+ _(ADD, C , ref, ref) \
+ _(SUB, N , ref, ref) \
+ _(MUL, C , ref, ref) \
+ _(DIV, N , ref, ref) \
+ _(MOD, N , ref, ref) \
+ _(POW, N , ref, ref) \
+ _(NEG, N , ref, ref) \
+ \
+ _(ABS, N , ref, ref) \
+ _(ATAN2, N , ref, ref) \
+ _(LDEXP, N , ref, ref) \
+ _(MIN, C , ref, ref) \
+ _(MAX, C , ref, ref) \
+ _(FPMATH, N , ref, lit) \
+ \
+ /* Overflow-checking arithmetic ops. */ \
+ _(ADDOV, CW, ref, ref) \
+ _(SUBOV, NW, ref, ref) \
+ _(MULOV, CW, ref, ref) \
+ \
+ /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \
+ \
+ /* Memory references. */ \
+ _(AREF, R , ref, ref) \
+ _(HREFK, R , ref, ref) \
+ _(HREF, L , ref, ref) \
+ _(NEWREF, S , ref, ref) \
+ _(UREFO, LW, ref, lit) \
+ _(UREFC, LW, ref, lit) \
+ _(FREF, R , ref, lit) \
+ _(STRREF, N , ref, ref) \
+ \
+ /* Loads and Stores. These must be in the same order. */ \
+ _(ALOAD, L , ref, ___) \
+ _(HLOAD, L , ref, ___) \
+ _(ULOAD, L , ref, ___) \
+ _(FLOAD, L , ref, lit) \
+ _(XLOAD, L , ref, lit) \
+ _(SLOAD, L , lit, lit) \
+ _(VLOAD, L , ref, ___) \
+ \
+ _(ASTORE, S , ref, ref) \
+ _(HSTORE, S , ref, ref) \
+ _(USTORE, S , ref, ref) \
+ _(FSTORE, S , ref, ref) \
+ _(XSTORE, S , ref, ref) \
+ \
+ /* Allocations. */ \
+ _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \
+ _(XSNEW, A , ref, ref) \
+ _(TNEW, AW, lit, lit) \
+ _(TDUP, AW, ref, ___) \
+ _(CNEW, AW, ref, ref) \
+ _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
+ \
+ /* Barriers. */ \
+ _(TBAR, S , ref, ___) \
+ _(OBAR, S , ref, ref) \
+ _(XBAR, S , ___, ___) \
+ \
+ /* Type conversions. */ \
+ _(CONV, NW, ref, lit) \
+ _(TOBIT, N , ref, ref) \
+ _(TOSTR, N , ref, ___) \
+ _(STRTO, N , ref, ___) \
+ \
+ /* Calls. */ \
+ _(CALLN, N , ref, lit) \
+ _(CALLL, L , ref, lit) \
+ _(CALLS, S , ref, lit) \
+ _(CALLXS, S , ref, ref) \
+ _(CARG, N , ref, ref) \
+ \
+ /* End of list. */
+
+/* IR opcodes (max. 256). */
+typedef enum {
+#define IRENUM(name, m, m1, m2) IR_##name,
+IRDEF(IRENUM)
+#undef IRENUM
+ IR__MAX
+} IROp;
+
+/* Stored opcode. */
+typedef uint8_t IROp1;
+
+LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE);
+LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE);
+LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT);
+
+/* Delta between xLOAD and xSTORE. */
+#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD)
+
+LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE);
+LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE);
+LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE);
+LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
+
+/* -- Named IR literals --------------------------------------------------- */
+
+/* FPMATH sub-functions. ORDER FPM. */
+#define IRFPMDEF(_) \
+ _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
+ _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \
+ _(SIN) _(COS) _(TAN) \
+ _(OTHER)
+
+typedef enum {
+#define FPMENUM(name) IRFPM_##name,
+IRFPMDEF(FPMENUM)
+#undef FPMENUM
+ IRFPM__MAX
+} IRFPMathOp;
+
+/* FLOAD fields. */
+#define IRFLDEF(_) \
+ _(STR_LEN, offsetof(GCstr, len)) \
+ _(FUNC_ENV, offsetof(GCfunc, l.env)) \
+ _(FUNC_PC, offsetof(GCfunc, l.pc)) \
+ _(TAB_META, offsetof(GCtab, metatable)) \
+ _(TAB_ARRAY, offsetof(GCtab, array)) \
+ _(TAB_NODE, offsetof(GCtab, node)) \
+ _(TAB_ASIZE, offsetof(GCtab, asize)) \
+ _(TAB_HMASK, offsetof(GCtab, hmask)) \
+ _(TAB_NOMM, offsetof(GCtab, nomm)) \
+ _(UDATA_META, offsetof(GCudata, metatable)) \
+ _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \
+ _(UDATA_FILE, sizeof(GCudata)) \
+ _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \
+ _(CDATA_PTR, sizeof(GCcdata)) \
+ _(CDATA_INT, sizeof(GCcdata)) \
+ _(CDATA_INT64, sizeof(GCcdata)) \
+ _(CDATA_INT64_4, sizeof(GCcdata) + 4)
+
+typedef enum {
+#define FLENUM(name, ofs) IRFL_##name,
+IRFLDEF(FLENUM)
+#undef FLENUM
+ IRFL__MAX
+} IRFieldID;
+
+/* SLOAD mode bits, stored in op2. */
+#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
+#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */
+#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
+#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
+#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
+#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */
+
+/* XLOAD mode, stored in op2. */
+#define IRXLOAD_READONLY 1 /* Load from read-only data. */
+#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */
+#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */
+
+/* CONV mode, stored in op2. */
+#define IRCONV_SRCMASK 0x001f /* Source IRType. */
+#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
+#define IRCONV_DSH 5
+#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT)
+#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM)
+#define IRCONV_TRUNC 0x0400 /* Truncate number to integer. */
+#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */
+#define IRCONV_MODEMASK 0x0fff
+#define IRCONV_CONVMASK 0xf000
+#define IRCONV_CSH 12
+/* Number to integer conversion mode. Ordered by strength of the checks. */
+#define IRCONV_TOBIT (0<<IRCONV_CSH) /* None. Cache only: TOBIT conv. */
+#define IRCONV_ANY (1<<IRCONV_CSH) /* Any FP number is ok. */
+#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
+#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
+
+/* -- IR operands --------------------------------------------------------- */
+
+/* IR operand mode (2 bit). */
+typedef enum {
+ IRMref, /* IR reference. */
+ IRMlit, /* 16 bit unsigned literal. */
+ IRMcst, /* Constant literal: i, gcr or ptr. */
+ IRMnone /* Unused operand. */
+} IRMode;
+#define IRM___ IRMnone
+
+/* Mode bits: Commutative, {Normal/Ref, Alloc, Load, Store}, Non-weak guard. */
+#define IRM_C 0x10
+
+#define IRM_N 0x00
+#define IRM_R IRM_N
+#define IRM_A 0x20
+#define IRM_L 0x40
+#define IRM_S 0x60
+
+#define IRM_W 0x80
+
+#define IRM_NW (IRM_N|IRM_W)
+#define IRM_CW (IRM_C|IRM_W)
+#define IRM_AW (IRM_A|IRM_W)
+#define IRM_LW (IRM_L|IRM_W)
+
+#define irm_op1(m) ((IRMode)((m)&3))
+#define irm_op2(m) ((IRMode)(((m)>>2)&3))
+#define irm_iscomm(m) ((m) & IRM_C)
+#define irm_kind(m) ((m) & IRM_S)
+
+#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W),
+
+LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
+
+/* -- IR instruction types ------------------------------------------------ */
+
+/* Map of itypes to non-negative numbers. ORDER LJ_T.
+** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
+** IRT_P32 and IRT_P64, which never escape the IR.
+** The various integers are only used in the IR and can only escape to
+** a TValue after implicit or explicit conversion. Their types must be
+** contiguous and next to IRT_NUM (see the typerange macros below).
+*/
+#define IRTDEF(_) \
+ _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \
+ _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \
+ _(TAB, 4) _(UDATA, 4) \
+ _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \
+ _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \
+ _(SOFTFP, 4) /* There is room for 9 more types. */
+
+/* IR result type and flags (8 bit). */
+typedef enum {
+#define IRTENUM(name, size) IRT_##name,
+IRTDEF(IRTENUM)
+#undef IRTENUM
+ IRT__MAX,
+
+ /* Native pointer type and the corresponding integer type. */
+ IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
+ IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
+ IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
+
+ /* Additional flags. */
+ IRT_MARK = 0x20, /* Marker for misc. purposes. */
+ IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */
+ IRT_GUARD = 0x80, /* Instruction is a guard. */
+
+ /* Masks. */
+ IRT_TYPE = 0x1f,
+ IRT_T = 0xff
+} IRType;
+
+#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE)
+
+/* Stored IRType. */
+typedef struct IRType1 { uint8_t irt; } IRType1;
+
+#define IRT(o, t) ((uint32_t)(((o)<<8) | (t)))
+#define IRTI(o) (IRT((o), IRT_INT))
+#define IRTN(o) (IRT((o), IRT_NUM))
+#define IRTG(o, t) (IRT((o), IRT_GUARD|(t)))
+#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT))
+
+#define irt_t(t) ((IRType)(t).irt)
+#define irt_type(t) ((IRType)((t).irt & IRT_TYPE))
+#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0)
+#define irt_typerange(t, first, last) \
+ ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first))
+
+#define irt_isnil(t) (irt_type(t) == IRT_NIL)
+#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE)
+#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD)
+#define irt_isstr(t) (irt_type(t) == IRT_STR)
+#define irt_istab(t) (irt_type(t) == IRT_TAB)
+#define irt_iscdata(t) (irt_type(t) == IRT_CDATA)
+#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT)
+#define irt_isnum(t) (irt_type(t) == IRT_NUM)
+#define irt_isint(t) (irt_type(t) == IRT_INT)
+#define irt_isi8(t) (irt_type(t) == IRT_I8)
+#define irt_isu8(t) (irt_type(t) == IRT_U8)
+#define irt_isi16(t) (irt_type(t) == IRT_I16)
+#define irt_isu16(t) (irt_type(t) == IRT_U16)
+#define irt_isu32(t) (irt_type(t) == IRT_U32)
+#define irt_isi64(t) (irt_type(t) == IRT_I64)
+#define irt_isu64(t) (irt_type(t) == IRT_U64)
+
+#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t))
+#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
+#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
+#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
+#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
+
+#if LJ_64
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD))
+#else
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64))
+#endif
+
+#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
+#define irt_is64orfp(t) (((IRT_IS64|(1u<<IRT_FLOAT))>>irt_type(t)) & 1)
+
+#define irt_size(t) (lj_ir_type_size[irt_t((t))])
+
+LJ_DATA const uint8_t lj_ir_type_size[];
+
+static LJ_AINLINE IRType itype2irt(const TValue *tv)
+{
+ if (tvisint(tv))
+ return IRT_INT;
+ else if (tvisnum(tv))
+ return IRT_NUM;
+#if LJ_64
+ else if (tvislightud(tv))
+ return IRT_LIGHTUD;
+#endif
+ else
+ return (IRType)~itype(tv);
+}
+
+static LJ_AINLINE uint32_t irt_toitype_(IRType t)
+{
+ lua_assert(!LJ_64 || t != IRT_LIGHTUD);
+ if (LJ_DUALNUM && t > IRT_NUM) {
+ return LJ_TISNUM;
+ } else {
+ lua_assert(t <= IRT_NUM);
+ return ~(uint32_t)t;
+ }
+}
+
+#define irt_toitype(t) irt_toitype_(irt_type((t)))
+
+#define irt_isguard(t) ((t).irt & IRT_GUARD)
+#define irt_ismarked(t) ((t).irt & IRT_MARK)
+#define irt_setmark(t) ((t).irt |= IRT_MARK)
+#define irt_clearmark(t) ((t).irt &= ~IRT_MARK)
+#define irt_isphi(t) ((t).irt & IRT_ISPHI)
+#define irt_setphi(t) ((t).irt |= IRT_ISPHI)
+#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI)
+
+/* Stored combined IR opcode and type. */
+typedef uint16_t IROpT;
+
+/* -- IR references ------------------------------------------------------- */
+
+/* IR references. */
+typedef uint16_t IRRef1; /* One stored reference. */
+typedef uint32_t IRRef2; /* Two stored references. */
+typedef uint32_t IRRef; /* Used to pass around references. */
+
+/* Fixed references. */
+enum {
+ REF_BIAS = 0x8000,
+ REF_TRUE = REF_BIAS-3,
+ REF_FALSE = REF_BIAS-2,
+ REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */
+ REF_BASE = REF_BIAS, /* /--- IR grows upwards. */
+ REF_FIRST = REF_BIAS+1,
+ REF_DROP = 0xffff
+};
+
+/* Note: IRMlit operands must be < REF_BIAS, too!
+** This allows for fast and uniform manipulation of all operands
+** without looking up the operand mode in lj_ir_mode:
+** - CSE calculates the maximum reference of two operands.
+** This must work with mixed reference/literal operands, too.
+** - DCE marking only checks for operand >= REF_BIAS.
+** - LOOP needs to substitute reference operands.
+** Constant references and literals must not be modified.
+*/
+
+#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16))
+
+#define irref_isk(ref) ((ref) < REF_BIAS)
+
+/* Tagged IR references (32 bit).
+**
+** +-------+-------+---------------+
+** | irt | flags | ref |
+** +-------+-------+---------------+
+**
+** The tag holds a copy of the IRType and speeds up IR type checks.
+*/
+typedef uint32_t TRef;
+
+#define TREF_REFMASK 0x0000ffff
+#define TREF_FRAME 0x00010000
+#define TREF_CONT 0x00020000
+
+#define TREF(ref, t) ((TRef)((ref) + ((t)<<24)))
+
+#define tref_ref(tr) ((IRRef1)(tr))
+#define tref_t(tr) ((IRType)((tr)>>24))
+#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE))
+#define tref_typerange(tr, first, last) \
+ ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first))
+
+#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24))
+#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
+#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
+#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
+#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
+#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
+#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
+#define tref_istab(tr) (tref_istype((tr), IRT_TAB))
+#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA))
+#define tref_isnum(tr) (tref_istype((tr), IRT_NUM))
+#define tref_isint(tr) (tref_istype((tr), IRT_INT))
+
+#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
+#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
+#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
+#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
+#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
+#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
+#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))
+
+#define tref_isk(tr) (irref_isk(tref_ref((tr))))
+#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2))))
+
+#define TREF_PRI(t) (TREF(REF_NIL-(t), (t)))
+#define TREF_NIL (TREF_PRI(IRT_NIL))
+#define TREF_FALSE (TREF_PRI(IRT_FALSE))
+#define TREF_TRUE (TREF_PRI(IRT_TRUE))
+
+/* -- IR format ----------------------------------------------------------- */
+
+/* IR instruction format (64 bit).
+**
+** 16 16 8 8 8 8
+** +-------+-------+---+---+---+---+
+** | op1 | op2 | t | o | r | s |
+** +-------+-------+---+---+---+---+
+** | op12/i/gco | ot | prev | (alternative fields in union)
+** +---------------+-------+-------+
+** 32 16 16
+**
+** prev is only valid prior to register allocation and then reused for r + s.
+*/
+
+typedef union IRIns {
+ struct {
+ LJ_ENDIAN_LOHI(
+ IRRef1 op1; /* IR operand 1. */
+ , IRRef1 op2; /* IR operand 2. */
+ )
+ IROpT ot; /* IR opcode and type (overlaps t and o). */
+ IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */
+ };
+ struct {
+ IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */
+ LJ_ENDIAN_LOHI(
+ IRType1 t; /* IR type. */
+ , IROp1 o; /* IR opcode. */
+ )
+ LJ_ENDIAN_LOHI(
+ uint8_t r; /* Register allocation (overlaps prev). */
+ , uint8_t s; /* Spill slot allocation (overlaps prev). */
+ )
+ };
+ int32_t i; /* 32 bit signed integer literal (overlaps op12). */
+ GCRef gcr; /* GCobj constant (overlaps op12). */
+ MRef ptr; /* Pointer constant (overlaps op12). */
+} IRIns;
+
+#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr))
+#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
+#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
+#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
+#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
+#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue))
+#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
+#define ir_k64(ir) \
+ check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
+#define ir_kptr(ir) \
+ check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void))
+
+/* A store or any other op with a non-weak guard has a side-effect. */
+static LJ_AINLINE int ir_sideeff(IRIns *ir)
+{
+ return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S);
+}
+
+LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W);
+
+#endif
diff --git a/3rdparty/lua/src/lj_ircall.h b/3rdparty/lua/src/lj_ircall.h
index 13877ce..7fcc532 100644
--- a/3rdparty/lua/src/lj_ircall.h
+++ b/3rdparty/lua/src/lj_ircall.h
@@ -1,277 +1,271 @@
-/*
-** IR CALL* instruction definitions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_IRCALL_H
-#define _LJ_IRCALL_H
-
-#include "lj_obj.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-
-/* C call info for CALL* instructions. */
-typedef struct CCallInfo {
- ASMFunction func; /* Function pointer. */
- uint32_t flags; /* Number of arguments and flags. */
-} CCallInfo;
-
-#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */
-#define CCI_NARGS_MAX 32 /* Max. # of args. */
-
-#define CCI_OTSHIFT 16
-#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
-#define CCI_OPSHIFT 24
-#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
-
-#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
-#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
-#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
-#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
-#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL)
-#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL)
-
-/* C call info flags. */
-#define CCI_L 0x0100 /* Implicit L arg. */
-#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */
-#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */
-#define CCI_VARARG 0x0800 /* Vararg function. */
-
-#define CCI_CC_MASK 0x3000 /* Calling convention mask. */
-#define CCI_CC_SHIFT 12
-/* ORDER CC */
-#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */
-#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */
-#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
-#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
-
-/* Helpers for conditional function definitions. */
-#define IRCALLCOND_ANY(x) x
-
-#if LJ_TARGET_X86ORX64
-#define IRCALLCOND_FPMATH(x) NULL
-#else
-#define IRCALLCOND_FPMATH(x) x
-#endif
-
-#if LJ_SOFTFP
-#define IRCALLCOND_SOFTFP(x) x
-#if LJ_HASFFI
-#define IRCALLCOND_SOFTFP_FFI(x) x
-#else
-#define IRCALLCOND_SOFTFP_FFI(x) NULL
-#endif
-#else
-#define IRCALLCOND_SOFTFP(x) NULL
-#define IRCALLCOND_SOFTFP_FFI(x) NULL
-#endif
-
-#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS)
-
-#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
-#define IRCALLCOND_FP64_FFI(x) x
-#else
-#define IRCALLCOND_FP64_FFI(x) NULL
-#endif
-
-#if LJ_HASFFI
-#define IRCALLCOND_FFI(x) x
-#if LJ_32
-#define IRCALLCOND_FFI32(x) x
-#else
-#define IRCALLCOND_FFI32(x) NULL
-#endif
-#else
-#define IRCALLCOND_FFI(x) NULL
-#define IRCALLCOND_FFI32(x) NULL
-#endif
-
-#if LJ_TARGET_X86
-#define CCI_RANDFPR 0 /* Clang on OSX/x86 is overzealous. */
-#else
-#define CCI_RANDFPR CCI_NOFPRCLOBBER
-#endif
-
-#if LJ_SOFTFP
-#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */
-#else
-#define ARG1_FP 1
-#endif
-
-#if LJ_32
-#define ARG2_64 4 /* Treat as 4 32 bit arguments. */
-#else
-#define ARG2_64 2
-#endif
-
-/* Function definitions for CALL* instructions. */
-#define IRCALLDEF(_) \
- _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
- _(ANY, lj_str_new, 3, S, STR, CCI_L) \
- _(ANY, lj_strscan_num, 2, FN, INT, 0) \
- _(ANY, lj_str_fromint, 2, FN, STR, CCI_L) \
- _(ANY, lj_str_fromnum, 2, FN, STR, CCI_L) \
- _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \
- _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \
- _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \
- _(ANY, lj_tab_len, 1, FL, INT, 0) \
- _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
- _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
- _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \
- _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_RANDFPR)\
- _(ANY, lj_vm_modi, 2, FN, INT, 0) \
- _(ANY, sinh, ARG1_FP, N, NUM, 0) \
- _(ANY, cosh, ARG1_FP, N, NUM, 0) \
- _(ANY, tanh, ARG1_FP, N, NUM, 0) \
- _(ANY, fputc, 2, S, INT, 0) \
- _(ANY, fwrite, 4, S, INT, 0) \
- _(ANY, fflush, 1, S, INT, 0) \
- /* ORDER FPM */ \
- _(FPMATH, lj_vm_floor, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_ceil, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_trunc, ARG1_FP, N, NUM, 0) \
- _(FPMATH, sqrt, ARG1_FP, N, NUM, 0) \
- _(FPMATH, exp, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_exp2, ARG1_FP, N, NUM, 0) \
- _(FPMATH, log, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_log2, ARG1_FP, N, NUM, 0) \
- _(FPMATH, log10, ARG1_FP, N, NUM, 0) \
- _(FPMATH, sin, ARG1_FP, N, NUM, 0) \
- _(FPMATH, cos, ARG1_FP, N, NUM, 0) \
- _(FPMATH, tan, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_powi, ARG1_FP+1, N, NUM, 0) \
- _(FPMATH, pow, ARG1_FP*2, N, NUM, 0) \
- _(FPMATH, atan2, ARG1_FP*2, N, NUM, 0) \
- _(FPMATH, ldexp, ARG1_FP+1, N, NUM, 0) \
- _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \
- _(SOFTFP, softfp_add, 4, N, NUM, 0) \
- _(SOFTFP, softfp_sub, 4, N, NUM, 0) \
- _(SOFTFP, softfp_mul, 4, N, NUM, 0) \
- _(SOFTFP, softfp_div, 4, N, NUM, 0) \
- _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \
- _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
- _(SOFTFP, softfp_d2i, 2, N, INT, 0) \
- _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
- _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
- _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \
- _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \
- _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
- _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
- _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
- _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
- _(FP64_FFI, fp64_l2d, 2, N, NUM, 0) \
- _(FP64_FFI, fp64_ul2d, 2, N, NUM, 0) \
- _(FP64_FFI, fp64_l2f, 2, N, FLOAT, 0) \
- _(FP64_FFI, fp64_ul2f, 2, N, FLOAT, 0) \
- _(FP64_FFI, fp64_d2l, ARG1_FP, N, I64, 0) \
- _(FP64_FFI, fp64_d2ul, ARG1_FP, N, U64, 0) \
- _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
- _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
- _(FFI, lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_cdata_setfin, 2, FN, P32, CCI_L) \
- _(FFI, strlen, 1, L, INTP, 0) \
- _(FFI, memcpy, 3, S, PTR, 0) \
- _(FFI, memset, 3, S, PTR, 0) \
- _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
- _(FFI32, lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER)
- \
- /* End of list. */
-
-typedef enum {
-#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name,
-IRCALLDEF(IRCALLENUM)
-#undef IRCALLENUM
- IRCALL__MAX
-} IRCallID;
-
-LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...);
-
-LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
-
-/* Soft-float declarations. */
-#if LJ_SOFTFP
-#if LJ_TARGET_ARM
-#define softfp_add __aeabi_dadd
-#define softfp_sub __aeabi_dsub
-#define softfp_mul __aeabi_dmul
-#define softfp_div __aeabi_ddiv
-#define softfp_cmp __aeabi_cdcmple
-#define softfp_i2d __aeabi_i2d
-#define softfp_d2i __aeabi_d2iz
-#define softfp_ui2d __aeabi_ui2d
-#define softfp_f2d __aeabi_f2d
-#define softfp_d2ui __aeabi_d2uiz
-#define softfp_d2f __aeabi_d2f
-#define softfp_i2f __aeabi_i2f
-#define softfp_ui2f __aeabi_ui2f
-#define softfp_f2i __aeabi_f2iz
-#define softfp_f2ui __aeabi_f2uiz
-#define fp64_l2d __aeabi_l2d
-#define fp64_ul2d __aeabi_ul2d
-#define fp64_l2f __aeabi_l2f
-#define fp64_ul2f __aeabi_ul2f
-#if LJ_TARGET_IOS
-#define fp64_d2l __fixdfdi
-#define fp64_d2ul __fixunsdfdi
-#define fp64_f2l __fixsfdi
-#define fp64_f2ul __fixunssfdi
-#else
-#define fp64_d2l __aeabi_d2lz
-#define fp64_d2ul __aeabi_d2ulz
-#define fp64_f2l __aeabi_f2lz
-#define fp64_f2ul __aeabi_f2ulz
-#endif
-#else
-#error "Missing soft-float definitions for target architecture"
-#endif
-extern double softfp_add(double a, double b);
-extern double softfp_sub(double a, double b);
-extern double softfp_mul(double a, double b);
-extern double softfp_div(double a, double b);
-extern void softfp_cmp(double a, double b);
-extern double softfp_i2d(int32_t a);
-extern int32_t softfp_d2i(double a);
-#if LJ_HASFFI
-extern double softfp_ui2d(uint32_t a);
-extern double softfp_f2d(float a);
-extern uint32_t softfp_d2ui(double a);
-extern float softfp_d2f(double a);
-extern float softfp_i2f(int32_t a);
-extern float softfp_ui2f(uint32_t a);
-extern int32_t softfp_f2i(float a);
-extern uint32_t softfp_f2ui(float a);
-#endif
-#endif
-
-#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP)
-#ifdef __GNUC__
-#define fp64_l2d __floatdidf
-#define fp64_ul2d __floatundidf
-#define fp64_l2f __floatdisf
-#define fp64_ul2f __floatundisf
-#define fp64_d2l __fixdfdi
-#define fp64_d2ul __fixunsdfdi
-#define fp64_f2l __fixsfdi
-#define fp64_f2ul __fixunssfdi
-#else
-#error "Missing fp64 helper definitions for this compiler"
-#endif
-#endif
-
-#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
-extern double fp64_l2d(int64_t a);
-extern double fp64_ul2d(uint64_t a);
-extern float fp64_l2f(int64_t a);
-extern float fp64_ul2f(uint64_t a);
-extern int64_t fp64_d2l(double a);
-extern uint64_t fp64_d2ul(double a);
-extern int64_t fp64_f2l(float a);
-extern uint64_t fp64_f2ul(float a);
-#endif
-
-#endif
+/*
+** IR CALL* instruction definitions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IRCALL_H
+#define _LJ_IRCALL_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+
+/* C call info for CALL* instructions. */
+typedef struct CCallInfo {
+ ASMFunction func; /* Function pointer. */
+ uint32_t flags; /* Number of arguments and flags. */
+} CCallInfo;
+
+#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */
+#define CCI_NARGS_MAX 32 /* Max. # of args. */
+
+#define CCI_OTSHIFT 16
+#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
+#define CCI_OPSHIFT 24
+#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
+
+#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
+#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
+#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
+#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
+#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL)
+#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL)
+
+/* C call info flags. */
+#define CCI_L 0x0100 /* Implicit L arg. */
+#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */
+#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */
+#define CCI_VARARG 0x0800 /* Vararg function. */
+
+#define CCI_CC_MASK 0x3000 /* Calling convention mask. */
+#define CCI_CC_SHIFT 12
+/* ORDER CC */
+#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */
+#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */
+#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
+#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
+
+/* Helpers for conditional function definitions. */
+#define IRCALLCOND_ANY(x) x
+
+#if LJ_TARGET_X86ORX64
+#define IRCALLCOND_FPMATH(x) NULL
+#else
+#define IRCALLCOND_FPMATH(x) x
+#endif
+
+#if LJ_SOFTFP
+#define IRCALLCOND_SOFTFP(x) x
+#if LJ_HASFFI
+#define IRCALLCOND_SOFTFP_FFI(x) x
+#else
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+#else
+#define IRCALLCOND_SOFTFP(x) NULL
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+
+#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS)
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+#define IRCALLCOND_FP64_FFI(x) x
+#else
+#define IRCALLCOND_FP64_FFI(x) NULL
+#endif
+
+#if LJ_HASFFI
+#define IRCALLCOND_FFI(x) x
+#if LJ_32
+#define IRCALLCOND_FFI32(x) x
+#else
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+#else
+#define IRCALLCOND_FFI(x) NULL
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+
+#if LJ_SOFTFP
+#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */
+#else
+#define ARG1_FP 1
+#endif
+
+#if LJ_32
+#define ARG2_64 4 /* Treat as 4 32 bit arguments. */
+#else
+#define ARG2_64 2
+#endif
+
+/* Function definitions for CALL* instructions. */
+#define IRCALLDEF(_) \
+ _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
+ _(ANY, lj_str_new, 3, S, STR, CCI_L) \
+ _(ANY, lj_strscan_num, 2, FN, INT, 0) \
+ _(ANY, lj_str_fromint, 2, FN, STR, CCI_L) \
+ _(ANY, lj_str_fromnum, 2, FN, STR, CCI_L) \
+ _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \
+ _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \
+ _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \
+ _(ANY, lj_tab_len, 1, FL, INT, 0) \
+ _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
+ _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
+ _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \
+ _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64|CCI_NOFPRCLOBBER) \
+ _(ANY, lj_vm_modi, 2, FN, INT, 0) \
+ _(ANY, sinh, ARG1_FP, N, NUM, 0) \
+ _(ANY, cosh, ARG1_FP, N, NUM, 0) \
+ _(ANY, tanh, ARG1_FP, N, NUM, 0) \
+ _(ANY, fputc, 2, S, INT, 0) \
+ _(ANY, fwrite, 4, S, INT, 0) \
+ _(ANY, fflush, 1, S, INT, 0) \
+ /* ORDER FPM */ \
+ _(FPMATH, lj_vm_floor, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_ceil, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_trunc, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, sqrt, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, exp, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_exp2, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, log, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_log2, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, log10, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, sin, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, cos, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, tan, ARG1_FP, N, NUM, 0) \
+ _(FPMATH, lj_vm_powi, ARG1_FP+1, N, NUM, 0) \
+ _(FPMATH, pow, ARG1_FP*2, N, NUM, 0) \
+ _(FPMATH, atan2, ARG1_FP*2, N, NUM, 0) \
+ _(FPMATH, ldexp, ARG1_FP+1, N, NUM, 0) \
+ _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \
+ _(SOFTFP, softfp_add, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_sub, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_mul, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_div, 4, N, NUM, 0) \
+ _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \
+ _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
+ _(SOFTFP, softfp_d2i, 2, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
+ _(FP64_FFI, fp64_l2d, 2, N, NUM, 0) \
+ _(FP64_FFI, fp64_ul2d, 2, N, NUM, 0) \
+ _(FP64_FFI, fp64_l2f, 2, N, FLOAT, 0) \
+ _(FP64_FFI, fp64_ul2f, 2, N, FLOAT, 0) \
+ _(FP64_FFI, fp64_d2l, ARG1_FP, N, I64, 0) \
+ _(FP64_FFI, fp64_d2ul, ARG1_FP, N, U64, 0) \
+ _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
+ _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
+ _(FFI, lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
+ _(FFI, lj_cdata_setfin, 2, FN, P32, CCI_L) \
+ _(FFI, strlen, 1, L, INTP, 0) \
+ _(FFI, memcpy, 3, S, PTR, 0) \
+ _(FFI, memset, 3, S, PTR, 0) \
+ _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER)
+ \
+ /* End of list. */
+
+typedef enum {
+#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name,
+IRCALLDEF(IRCALLENUM)
+#undef IRCALLENUM
+ IRCALL__MAX
+} IRCallID;
+
+LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...);
+
+LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
+
+/* Soft-float declarations. */
+#if LJ_SOFTFP
+#if LJ_TARGET_ARM
+#define softfp_add __aeabi_dadd
+#define softfp_sub __aeabi_dsub
+#define softfp_mul __aeabi_dmul
+#define softfp_div __aeabi_ddiv
+#define softfp_cmp __aeabi_cdcmple
+#define softfp_i2d __aeabi_i2d
+#define softfp_d2i __aeabi_d2iz
+#define softfp_ui2d __aeabi_ui2d
+#define softfp_f2d __aeabi_f2d
+#define softfp_d2ui __aeabi_d2uiz
+#define softfp_d2f __aeabi_d2f
+#define softfp_i2f __aeabi_i2f
+#define softfp_ui2f __aeabi_ui2f
+#define softfp_f2i __aeabi_f2iz
+#define softfp_f2ui __aeabi_f2uiz
+#define fp64_l2d __aeabi_l2d
+#define fp64_ul2d __aeabi_ul2d
+#define fp64_l2f __aeabi_l2f
+#define fp64_ul2f __aeabi_ul2f
+#if LJ_TARGET_IOS
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#define fp64_d2l __aeabi_d2lz
+#define fp64_d2ul __aeabi_d2ulz
+#define fp64_f2l __aeabi_f2lz
+#define fp64_f2ul __aeabi_f2ulz
+#endif
+#else
+#error "Missing soft-float definitions for target architecture"
+#endif
+extern double softfp_add(double a, double b);
+extern double softfp_sub(double a, double b);
+extern double softfp_mul(double a, double b);
+extern double softfp_div(double a, double b);
+extern void softfp_cmp(double a, double b);
+extern double softfp_i2d(int32_t a);
+extern int32_t softfp_d2i(double a);
+#if LJ_HASFFI
+extern double softfp_ui2d(uint32_t a);
+extern double softfp_f2d(float a);
+extern uint32_t softfp_d2ui(double a);
+extern float softfp_d2f(double a);
+extern float softfp_i2f(int32_t a);
+extern float softfp_ui2f(uint32_t a);
+extern int32_t softfp_f2i(float a);
+extern uint32_t softfp_f2ui(float a);
+#endif
+#endif
+
+#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP)
+#ifdef __GNUC__
+#define fp64_l2d __floatdidf
+#define fp64_ul2d __floatundidf
+#define fp64_l2f __floatdisf
+#define fp64_ul2f __floatundisf
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#error "Missing fp64 helper definitions for this compiler"
+#endif
+#endif
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+extern double fp64_l2d(int64_t a);
+extern double fp64_ul2d(uint64_t a);
+extern float fp64_l2f(int64_t a);
+extern float fp64_ul2f(uint64_t a);
+extern int64_t fp64_d2l(double a);
+extern uint64_t fp64_d2ul(double a);
+extern int64_t fp64_f2l(float a);
+extern uint64_t fp64_f2ul(float a);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_iropt.h b/3rdparty/lua/src/lj_iropt.h
index 9a1f43f..a74f018 100644
--- a/3rdparty/lua/src/lj_iropt.h
+++ b/3rdparty/lua/src/lj_iropt.h
@@ -1,161 +1,161 @@
-/*
-** Common header for IR emitter and optimizations.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_IROPT_H
-#define _LJ_IROPT_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-/* IR emitter. */
-LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
-
-/* Save current IR in J->fold.ins, but do not emit it (yet). */
-static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
-{
- J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
-}
-
-#define lj_ir_set(J, ot, a, b) \
- lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
-
-/* Get ref of next IR instruction and optionally grow IR.
-** Note: this may invalidate all IRIns*!
-*/
-static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
-{
- IRRef ref = J->cur.nins;
- if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
- J->cur.nins = ref + 1;
- return ref;
-}
-
-/* Interning of constants. */
-LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
-LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
-LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv);
-LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
-LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
-LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
-LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
-LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
-LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
-LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
-LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
-
-#if LJ_64
-#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
-#else
-#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k))
-#endif
-
-static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
-{
- TValue tv;
- tv.n = n;
- return lj_ir_knum_u64(J, tv.u64);
-}
-
-#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
-#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
-#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
-#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr))
-#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr))
-
-/* Special FP constants. */
-#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000))
-#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000))
-#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
-
-/* Special 128 bit SIMD constants. */
-#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS))
-#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG))
-
-/* Access to constants. */
-LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
-
-/* Convert IR operand types. */
-LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr);
-LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
-LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
-
-/* Miscellaneous IR ops. */
-LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
-LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
-LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
-
-/* Emit IR instructions with on-the-fly optimizations. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim);
-
-/* Special return values for the fold functions. */
-enum {
- NEXTFOLD, /* Couldn't fold, pass on. */
- RETRYFOLD, /* Retry fold with modified fins. */
- KINTFOLD, /* Return ref for int constant in fins->i. */
- FAILFOLD, /* Guard would always fail. */
- DROPFOLD, /* Guard eliminated. */
- MAX_FOLD
-};
-
-#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
-#define INT64FOLD(k) (lj_ir_kint64(J, (k)))
-#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
-#define LEFTFOLD (J->fold.ins.op1)
-#define RIGHTFOLD (J->fold.ins.op2)
-#define CSEFOLD (lj_opt_cse(J))
-#define EMITFOLD (lj_ir_emit(J))
-
-/* Load/store forwarding. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J);
-LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
-LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
-LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
-
-/* Dead-store elimination. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J);
-
-/* Narrowing. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr);
-#if LJ_HASFFI
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key);
-#endif
-LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
- TValue *vb, TValue *vc, IROp op);
-LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
-LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc);
-LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc);
-LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
-
-/* Optimization passes. */
-LJ_FUNC void lj_opt_dce(jit_State *J);
-LJ_FUNC int lj_opt_loop(jit_State *J);
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
-LJ_FUNC void lj_opt_split(jit_State *J);
-#else
-#define lj_opt_split(J) UNUSED(J)
-#endif
-LJ_FUNC void lj_opt_sink(jit_State *J);
-
-#endif
-
-#endif
+/*
+** Common header for IR emitter and optimizations.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IROPT_H
+#define _LJ_IROPT_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* IR emitter. */
+LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
+
+/* Save current IR in J->fold.ins, but do not emit it (yet). */
+static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
+{
+ J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
+}
+
+#define lj_ir_set(J, ot, a, b) \
+ lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
+
+/* Get ref of next IR instruction and optionally grow IR.
+** Note: this may invalidate all IRIns*!
+*/
+static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
+{
+ IRRef ref = J->cur.nins;
+ if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
+ J->cur.nins = ref + 1;
+ return ref;
+}
+
+/* Interning of constants. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
+LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
+LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv);
+LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
+LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
+LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
+LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
+LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
+
+#if LJ_64
+#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
+#else
+#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k))
+#endif
+
+static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
+{
+ TValue tv;
+ tv.n = n;
+ return lj_ir_knum_u64(J, tv.u64);
+}
+
+#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
+#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
+#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
+#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr))
+#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr))
+
+/* Special FP constants. */
+#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000))
+#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000))
+#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
+
+/* Special 128 bit SIMD constants. */
+#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS))
+#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG))
+
+/* Access to constants. */
+LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
+
+/* Convert IR operand types. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
+
+/* Miscellaneous IR ops. */
+LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
+LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
+LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
+
+/* Emit IR instructions with on-the-fly optimizations. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim);
+
+/* Special return values for the fold functions. */
+enum {
+ NEXTFOLD, /* Couldn't fold, pass on. */
+ RETRYFOLD, /* Retry fold with modified fins. */
+ KINTFOLD, /* Return ref for int constant in fins->i. */
+ FAILFOLD, /* Guard would always fail. */
+ DROPFOLD, /* Guard eliminated. */
+ MAX_FOLD
+};
+
+#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
+#define INT64FOLD(k) (lj_ir_kint64(J, (k)))
+#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
+#define LEFTFOLD (J->fold.ins.op1)
+#define RIGHTFOLD (J->fold.ins.op2)
+#define CSEFOLD (lj_opt_cse(J))
+#define EMITFOLD (lj_ir_emit(J))
+
+/* Load/store forwarding. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
+LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
+
+/* Dead-store elimination. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J);
+
+/* Narrowing. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr);
+#if LJ_HASFFI
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key);
+#endif
+LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+ TValue *vb, TValue *vc, IROp op);
+LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc);
+LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
+
+/* Optimization passes. */
+LJ_FUNC void lj_opt_dce(jit_State *J);
+LJ_FUNC int lj_opt_loop(jit_State *J);
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+LJ_FUNC void lj_opt_split(jit_State *J);
+#else
+#define lj_opt_split(J) UNUSED(J)
+#endif
+LJ_FUNC void lj_opt_sink(jit_State *J);
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_jit.h b/3rdparty/lua/src/lj_jit.h
index c67204e..c0b1c41 100644
--- a/3rdparty/lua/src/lj_jit.h
+++ b/3rdparty/lua/src/lj_jit.h
@@ -1,417 +1,416 @@
-/*
-** Common definitions for the JIT compiler.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_JIT_H
-#define _LJ_JIT_H
-
-#include "lj_obj.h"
-#include "lj_ir.h"
-
-/* JIT engine flags. */
-#define JIT_F_ON 0x00000001
-
-/* CPU-specific JIT engine flags. */
-#if LJ_TARGET_X86ORX64
-#define JIT_F_CMOV 0x00000010
-#define JIT_F_SSE2 0x00000020
-#define JIT_F_SSE3 0x00000040
-#define JIT_F_SSE4_1 0x00000080
-#define JIT_F_P4 0x00000100
-#define JIT_F_PREFER_IMUL 0x00000200
-#define JIT_F_SPLIT_XMM 0x00000400
-#define JIT_F_LEA_AGU 0x00000800
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_CMOV
-#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
-#elif LJ_TARGET_ARM
-#define JIT_F_ARMV6_ 0x00000010
-#define JIT_F_ARMV6T2_ 0x00000020
-#define JIT_F_ARMV7 0x00000040
-#define JIT_F_VFPV2 0x00000080
-#define JIT_F_VFPV3 0x00000100
-
-#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7)
-#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7)
-#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_ARMV6_
-#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3"
-#elif LJ_TARGET_PPC
-#define JIT_F_SQRT 0x00000010
-#define JIT_F_ROUND 0x00000020
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_SQRT
-#define JIT_F_CPUSTRING "\4SQRT\5ROUND"
-#elif LJ_TARGET_MIPS
-#define JIT_F_MIPS32R2 0x00000010
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_MIPS32R2
-#define JIT_F_CPUSTRING "\010MIPS32R2"
-#else
-#define JIT_F_CPU_FIRST 0
-#define JIT_F_CPUSTRING ""
-#endif
-
-/* Optimization flags. */
-#define JIT_F_OPT_MASK 0x0fff0000
-
-#define JIT_F_OPT_FOLD 0x00010000
-#define JIT_F_OPT_CSE 0x00020000
-#define JIT_F_OPT_DCE 0x00040000
-#define JIT_F_OPT_FWD 0x00080000
-#define JIT_F_OPT_DSE 0x00100000
-#define JIT_F_OPT_NARROW 0x00200000
-#define JIT_F_OPT_LOOP 0x00400000
-#define JIT_F_OPT_ABC 0x00800000
-#define JIT_F_OPT_SINK 0x01000000
-#define JIT_F_OPT_FUSE 0x02000000
-
-/* Optimizations names for -O. Must match the order above. */
-#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD
-#define JIT_F_OPTSTRING \
- "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
-
-/* Optimization levels set a fixed combination of flags. */
-#define JIT_F_OPT_0 0
-#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
-#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
-#define JIT_F_OPT_3 (JIT_F_OPT_2|\
- JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
-#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
-
-#if LJ_TARGET_WINDOWS || LJ_64
-/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
-#define JIT_P_sizemcode_DEFAULT 64
-#else
-/* Could go as low as 4K, but the mmap() overhead would be rather high. */
-#define JIT_P_sizemcode_DEFAULT 32
-#endif
-
-/* Optimization parameters and their defaults. Length is a char in octal! */
-#define JIT_PARAMDEF(_) \
- _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
- _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
- _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
- _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
- _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
- \
- _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
- _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
- _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
- \
- _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
- _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
- _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
- _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
- \
- /* Size of each machine code area (in KBytes). */ \
- _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
- /* Max. total size of all machine code areas (in KBytes). */ \
- _(\010, maxmcode, 512) \
- /* End of list. */
-
-enum {
-#define JIT_PARAMENUM(len, name, value) JIT_P_##name,
-JIT_PARAMDEF(JIT_PARAMENUM)
-#undef JIT_PARAMENUM
- JIT_P__MAX
-};
-
-#define JIT_PARAMSTR(len, name, value) #len #name
-#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
-
-/* Trace compiler state. */
-typedef enum {
- LJ_TRACE_IDLE, /* Trace compiler idle. */
- LJ_TRACE_ACTIVE = 0x10,
- LJ_TRACE_RECORD, /* Bytecode recording active. */
- LJ_TRACE_START, /* New trace started. */
- LJ_TRACE_END, /* End of trace. */
- LJ_TRACE_ASM, /* Assemble trace. */
- LJ_TRACE_ERR /* Trace aborted with error. */
-} TraceState;
-
-/* Post-processing action. */
-typedef enum {
- LJ_POST_NONE, /* No action. */
- LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
- LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
- LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
- LJ_POST_FIXBOOL, /* Fixup boolean result. */
- LJ_POST_FIXCONST, /* Fixup constant results. */
- LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
-} PostProc;
-
-/* Machine code type. */
-#if LJ_TARGET_X86ORX64
-typedef uint8_t MCode;
-#else
-typedef uint32_t MCode;
-#endif
-
-/* Stack snapshot header. */
-typedef struct SnapShot {
- uint16_t mapofs; /* Offset into snapshot map. */
- IRRef1 ref; /* First IR ref for this snapshot. */
- uint8_t nslots; /* Number of valid slots. */
- uint8_t topslot; /* Maximum frame extent. */
- uint8_t nent; /* Number of compressed entries. */
- uint8_t count; /* Count of taken exits for this snapshot. */
-} SnapShot;
-
-#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
-
-/* Compressed snapshot entry. */
-typedef uint32_t SnapEntry;
-
-#define SNAP_FRAME 0x010000 /* Frame slot. */
-#define SNAP_CONT 0x020000 /* Continuation slot. */
-#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
-#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
-LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
-LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
-
-#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
-#define SNAP_TR(slot, tr) \
- (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
-#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
-#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
-#define snap_ref(sn) ((sn) & 0xffff)
-#define snap_slot(sn) ((BCReg)((sn) >> 24))
-#define snap_isframe(sn) ((sn) & SNAP_FRAME)
-#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn))
-#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
-
-/* Snapshot and exit numbers. */
-typedef uint32_t SnapNo;
-typedef uint32_t ExitNo;
-
-/* Trace number. */
-typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
-typedef uint16_t TraceNo1; /* Stored trace number. */
-
-/* Type of link. ORDER LJ_TRLINK */
-typedef enum {
- LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
- LJ_TRLINK_ROOT, /* Link to other root trace. */
- LJ_TRLINK_LOOP, /* Loop to same trace. */
- LJ_TRLINK_TAILREC, /* Tail-recursion. */
- LJ_TRLINK_UPREC, /* Up-recursion. */
- LJ_TRLINK_DOWNREC, /* Down-recursion. */
- LJ_TRLINK_INTERP, /* Fallback to interpreter. */
- LJ_TRLINK_RETURN /* Return to interpreter. */
-} TraceLink;
-
-/* Trace object. */
-typedef struct GCtrace {
- GCHeader;
- uint8_t topslot; /* Top stack slot already checked to be allocated. */
- uint8_t linktype; /* Type of link. */
- IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
- GCRef gclist;
- IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
- IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
- uint16_t nsnap; /* Number of snapshots. */
- uint16_t nsnapmap; /* Number of snapshot map elements. */
- SnapShot *snap; /* Snapshot array. */
- SnapEntry *snapmap; /* Snapshot map. */
- GCRef startpt; /* Starting prototype. */
- MRef startpc; /* Bytecode PC of starting instruction. */
- BCIns startins; /* Original bytecode of starting instruction. */
- MSize szmcode; /* Size of machine code. */
- MCode *mcode; /* Start of machine code. */
- MSize mcloop; /* Offset of loop start in machine code. */
- uint16_t nchild; /* Number of child traces (root trace only). */
- uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
- TraceNo1 traceno; /* Trace number. */
- TraceNo1 link; /* Linked trace (or self for loops). */
- TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
- TraceNo1 nextroot; /* Next root trace for same prototype. */
- TraceNo1 nextside; /* Next side trace of same root trace. */
- uint8_t sinktags; /* Trace has SINK tags. */
- uint8_t unused1;
-#ifdef LUAJIT_USE_GDBJIT
- void *gdbjit_entry; /* GDB JIT entry. */
-#endif
-} GCtrace;
-
-#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
-#define traceref(J, n) \
- check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
-
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
-
-static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
-{
- if (snap+1 == &T->snap[T->nsnap])
- return T->nsnapmap;
- else
- return (snap+1)->mapofs;
-}
-
-/* Round-robin penalty cache for bytecodes leading to aborted traces. */
-typedef struct HotPenalty {
- MRef pc; /* Starting bytecode PC. */
- uint16_t val; /* Penalty value, i.e. hotcount start. */
- uint16_t reason; /* Abort reason (really TraceErr). */
-} HotPenalty;
-
-#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
-#define PENALTY_MIN (36*2) /* Minimum penalty value. */
-#define PENALTY_MAX 60000 /* Maximum penalty value. */
-#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
-
-/* Round-robin backpropagation cache for narrowing conversions. */
-typedef struct BPropEntry {
- IRRef1 key; /* Key: original reference. */
- IRRef1 val; /* Value: reference after conversion. */
- IRRef mode; /* Mode for this entry (currently IRCONV_*). */
-} BPropEntry;
-
-/* Number of slots for the backpropagation cache. Must be a power of 2. */
-#define BPROP_SLOTS 16
-
-/* Scalar evolution analysis cache. */
-typedef struct ScEvEntry {
- MRef pc; /* Bytecode PC of FORI. */
- IRRef1 idx; /* Index reference. */
- IRRef1 start; /* Constant start reference. */
- IRRef1 stop; /* Constant stop reference. */
- IRRef1 step; /* Constant step reference. */
- IRType1 t; /* Scalar type. */
- uint8_t dir; /* Direction. 1: +, 0: -. */
-} ScEvEntry;
-
-/* 128 bit SIMD constants. */
-enum {
- LJ_KSIMD_ABS,
- LJ_KSIMD_NEG,
- LJ_KSIMD__MAX
-};
-
-/* Get 16 byte aligned pointer to SIMD constant. */
-#define LJ_KSIMD(J, n) \
- ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
-
-/* Set/reset flag to activate the SPLIT pass for the current trace. */
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
-#define lj_needsplit(J) (J->needsplit = 1)
-#define lj_resetsplit(J) (J->needsplit = 0)
-#else
-#define lj_needsplit(J) UNUSED(J)
-#define lj_resetsplit(J) UNUSED(J)
-#endif
-
-/* Fold state is used to fold instructions on-the-fly. */
-typedef struct FoldState {
- IRIns ins; /* Currently emitted instruction. */
- IRIns left; /* Instruction referenced by left operand. */
- IRIns right; /* Instruction referenced by right operand. */
-} FoldState;
-
-/* JIT compiler state. */
-typedef struct jit_State {
- GCtrace cur; /* Current trace. */
-
- lua_State *L; /* Current Lua state. */
- const BCIns *pc; /* Current PC. */
- GCfunc *fn; /* Current function. */
- GCproto *pt; /* Current prototype. */
- TRef *base; /* Current frame base, points into J->slots. */
-
- uint32_t flags; /* JIT engine flags. */
- BCReg maxslot; /* Relative to baseslot. */
- BCReg baseslot; /* Current frame base, offset into J->slots. */
-
- uint8_t mergesnap; /* Allowed to merge with next snapshot. */
- uint8_t needsnap; /* Need snapshot before recording next bytecode. */
- IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
- uint8_t bcskip; /* Number of bytecode instructions to skip. */
-
- FoldState fold; /* Fold state. */
-
- const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
- MSize bc_extent; /* Extent of the range. */
-
- TraceState state; /* Trace compiler state. */
-
- int32_t instunroll; /* Unroll counter for instable loops. */
- int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
- int32_t tailcalled; /* Number of successive tailcalls. */
- int32_t framedepth; /* Current frame depth. */
- int32_t retdepth; /* Return frame depth (count of RETF). */
-
- MRef k64; /* Pointer to chained array of 64 bit constants. */
- TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
-
- IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
- IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
- IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
- IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
-
- MSize sizesnap; /* Size of temp. snapshot buffer. */
- SnapShot *snapbuf; /* Temp. snapshot buffer. */
- SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
- MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
-
- PostProc postproc; /* Required post-processing after execution. */
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
- int needsplit; /* Need SPLIT pass. */
-#endif
-
- GCRef *trace; /* Array of traces. */
- TraceNo freetrace; /* Start of scan for next free trace. */
- MSize sizetrace; /* Size of trace array. */
-
- IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
- TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
-
- int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
-
- MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
-
- HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
- uint32_t penaltyslot; /* Round-robin index into penalty slots. */
- uint32_t prngstate; /* PRNG state. */
-
- BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
- uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
-
- ScEvEntry scev; /* Scalar evolution analysis cache slots. */
-
- const BCIns *startpc; /* Bytecode PC of starting instruction. */
- TraceNo parent; /* Parent of current side trace (0 for root traces). */
- ExitNo exitno; /* Exit number in parent of current side trace. */
-
- BCIns *patchpc; /* PC for pending re-patch. */
- BCIns patchins; /* Instruction for pending re-patch. */
-
- int mcprot; /* Protection of current mcode area. */
- MCode *mcarea; /* Base of current mcode area. */
- MCode *mctop; /* Top of current mcode area. */
- MCode *mcbot; /* Bottom of current mcode area. */
- size_t szmcarea; /* Size of current mcode area. */
- size_t szallmcarea; /* Total size of all allocated mcode areas. */
-
- TValue errinfo; /* Additional info element for trace errors. */
-}
-#if LJ_TARGET_ARM
-LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */
-#endif
-jit_State;
-
-/* Trivial PRNG e.g. used for penalty randomization. */
-static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
-{
- /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
- J->prngstate = J->prngstate * 1103515245 + 12345;
- return J->prngstate >> (32-bits);
-}
-
-#endif
+/*
+** Common definitions for the JIT compiler.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_JIT_H
+#define _LJ_JIT_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+
+/* JIT engine flags. */
+#define JIT_F_ON 0x00000001
+
+/* CPU-specific JIT engine flags. */
+#if LJ_TARGET_X86ORX64
+#define JIT_F_CMOV 0x00000010
+#define JIT_F_SSE2 0x00000020
+#define JIT_F_SSE3 0x00000040
+#define JIT_F_SSE4_1 0x00000080
+#define JIT_F_P4 0x00000100
+#define JIT_F_PREFER_IMUL 0x00000200
+#define JIT_F_SPLIT_XMM 0x00000400
+#define JIT_F_LEA_AGU 0x00000800
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_CMOV
+#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
+#elif LJ_TARGET_ARM
+#define JIT_F_ARMV6_ 0x00000010
+#define JIT_F_ARMV6T2_ 0x00000020
+#define JIT_F_ARMV7 0x00000040
+#define JIT_F_VFPV2 0x00000080
+#define JIT_F_VFPV3 0x00000100
+
+#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7)
+#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7)
+#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_ARMV6_
+#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3"
+#elif LJ_TARGET_PPC
+#define JIT_F_SQRT 0x00000010
+#define JIT_F_ROUND 0x00000020
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_SQRT
+#define JIT_F_CPUSTRING "\4SQRT\5ROUND"
+#elif LJ_TARGET_MIPS
+#define JIT_F_MIPS32R2 0x00000010
+
+/* Names for the CPU-specific flags. Must match the order above. */
+#define JIT_F_CPU_FIRST JIT_F_MIPS32R2
+#define JIT_F_CPUSTRING "\010MIPS32R2"
+#else
+#define JIT_F_CPU_FIRST 0
+#define JIT_F_CPUSTRING ""
+#endif
+
+/* Optimization flags. */
+#define JIT_F_OPT_MASK 0x0fff0000
+
+#define JIT_F_OPT_FOLD 0x00010000
+#define JIT_F_OPT_CSE 0x00020000
+#define JIT_F_OPT_DCE 0x00040000
+#define JIT_F_OPT_FWD 0x00080000
+#define JIT_F_OPT_DSE 0x00100000
+#define JIT_F_OPT_NARROW 0x00200000
+#define JIT_F_OPT_LOOP 0x00400000
+#define JIT_F_OPT_ABC 0x00800000
+#define JIT_F_OPT_SINK 0x01000000
+#define JIT_F_OPT_FUSE 0x02000000
+
+/* Optimizations names for -O. Must match the order above. */
+#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD
+#define JIT_F_OPTSTRING \
+ "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
+
+/* Optimization levels set a fixed combination of flags. */
+#define JIT_F_OPT_0 0
+#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
+#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
+#define JIT_F_OPT_3 (JIT_F_OPT_2|\
+ JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
+#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
+
+#if LJ_TARGET_WINDOWS || LJ_64
+/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
+#define JIT_P_sizemcode_DEFAULT 64
+#else
+/* Could go as low as 4K, but the mmap() overhead would be rather high. */
+#define JIT_P_sizemcode_DEFAULT 32
+#endif
+
+/* Optimization parameters and their defaults. Length is a char in octal! */
+#define JIT_PARAMDEF(_) \
+ _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
+ _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
+ _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
+ _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
+ _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
+ \
+ _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
+ _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
+ _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
+ \
+ _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
+ _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
+ _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
+ _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
+ \
+ /* Size of each machine code area (in KBytes). */ \
+ _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
+ /* Max. total size of all machine code areas (in KBytes). */ \
+ _(\010, maxmcode, 512) \
+ /* End of list. */
+
+enum {
+#define JIT_PARAMENUM(len, name, value) JIT_P_##name,
+JIT_PARAMDEF(JIT_PARAMENUM)
+#undef JIT_PARAMENUM
+ JIT_P__MAX
+};
+
+#define JIT_PARAMSTR(len, name, value) #len #name
+#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
+
+/* Trace compiler state. */
+typedef enum {
+ LJ_TRACE_IDLE, /* Trace compiler idle. */
+ LJ_TRACE_ACTIVE = 0x10,
+ LJ_TRACE_RECORD, /* Bytecode recording active. */
+ LJ_TRACE_START, /* New trace started. */
+ LJ_TRACE_END, /* End of trace. */
+ LJ_TRACE_ASM, /* Assemble trace. */
+ LJ_TRACE_ERR /* Trace aborted with error. */
+} TraceState;
+
+/* Post-processing action. */
+typedef enum {
+ LJ_POST_NONE, /* No action. */
+ LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
+ LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
+ LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
+ LJ_POST_FIXBOOL, /* Fixup boolean result. */
+ LJ_POST_FIXCONST, /* Fixup constant results. */
+ LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
+} PostProc;
+
+/* Machine code type. */
+#if LJ_TARGET_X86ORX64
+typedef uint8_t MCode;
+#else
+typedef uint32_t MCode;
+#endif
+
+/* Stack snapshot header. */
+typedef struct SnapShot {
+ uint16_t mapofs; /* Offset into snapshot map. */
+ IRRef1 ref; /* First IR ref for this snapshot. */
+ uint8_t nslots; /* Number of valid slots. */
+ uint8_t topslot; /* Maximum frame extent. */
+ uint8_t nent; /* Number of compressed entries. */
+ uint8_t count; /* Count of taken exits for this snapshot. */
+} SnapShot;
+
+#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
+
+/* Compressed snapshot entry. */
+typedef uint32_t SnapEntry;
+
+#define SNAP_FRAME 0x010000 /* Frame slot. */
+#define SNAP_CONT 0x020000 /* Continuation slot. */
+#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
+#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
+LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
+LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
+
+#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
+#define SNAP_TR(slot, tr) \
+ (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
+#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
+#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
+#define snap_ref(sn) ((sn) & 0xffff)
+#define snap_slot(sn) ((BCReg)((sn) >> 24))
+#define snap_isframe(sn) ((sn) & SNAP_FRAME)
+#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn))
+#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
+
+/* Snapshot and exit numbers. */
+typedef uint32_t SnapNo;
+typedef uint32_t ExitNo;
+
+/* Trace number. */
+typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
+typedef uint16_t TraceNo1; /* Stored trace number. */
+
+/* Type of link. ORDER LJ_TRLINK */
+typedef enum {
+ LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
+ LJ_TRLINK_ROOT, /* Link to other root trace. */
+ LJ_TRLINK_LOOP, /* Loop to same trace. */
+ LJ_TRLINK_TAILREC, /* Tail-recursion. */
+ LJ_TRLINK_UPREC, /* Up-recursion. */
+ LJ_TRLINK_DOWNREC, /* Down-recursion. */
+ LJ_TRLINK_INTERP, /* Fallback to interpreter. */
+ LJ_TRLINK_RETURN /* Return to interpreter. */
+} TraceLink;
+
+/* Trace object. */
+typedef struct GCtrace {
+ GCHeader;
+ uint8_t topslot; /* Top stack slot already checked to be allocated. */
+ uint8_t linktype; /* Type of link. */
+ IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
+ GCRef gclist;
+ IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
+ IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
+ uint16_t nsnap; /* Number of snapshots. */
+ uint16_t nsnapmap; /* Number of snapshot map elements. */
+ SnapShot *snap; /* Snapshot array. */
+ SnapEntry *snapmap; /* Snapshot map. */
+ GCRef startpt; /* Starting prototype. */
+ MRef startpc; /* Bytecode PC of starting instruction. */
+ BCIns startins; /* Original bytecode of starting instruction. */
+ MSize szmcode; /* Size of machine code. */
+ MCode *mcode; /* Start of machine code. */
+ MSize mcloop; /* Offset of loop start in machine code. */
+ uint16_t nchild; /* Number of child traces (root trace only). */
+ uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
+ TraceNo1 traceno; /* Trace number. */
+ TraceNo1 link; /* Linked trace (or self for loops). */
+ TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
+ TraceNo1 nextroot; /* Next root trace for same prototype. */
+ TraceNo1 nextside; /* Next side trace of same root trace. */
+ uint8_t sinktags; /* Trace has SINK tags. */
+ uint8_t unused1;
+#ifdef LUAJIT_USE_GDBJIT
+ void *gdbjit_entry; /* GDB JIT entry. */
+#endif
+} GCtrace;
+
+#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
+#define traceref(J, n) \
+ check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
+
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
+
+static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
+{
+ if (snap+1 == &T->snap[T->nsnap])
+ return T->nsnapmap;
+ else
+ return (snap+1)->mapofs;
+}
+
+/* Round-robin penalty cache for bytecodes leading to aborted traces. */
+typedef struct HotPenalty {
+ MRef pc; /* Starting bytecode PC. */
+ uint16_t val; /* Penalty value, i.e. hotcount start. */
+ uint16_t reason; /* Abort reason (really TraceErr). */
+} HotPenalty;
+
+#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
+#define PENALTY_MIN (36*2) /* Minimum penalty value. */
+#define PENALTY_MAX 60000 /* Maximum penalty value. */
+#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
+
+/* Round-robin backpropagation cache for narrowing conversions. */
+typedef struct BPropEntry {
+ IRRef1 key; /* Key: original reference. */
+ IRRef1 val; /* Value: reference after conversion. */
+ IRRef mode; /* Mode for this entry (currently IRCONV_*). */
+} BPropEntry;
+
+/* Number of slots for the backpropagation cache. Must be a power of 2. */
+#define BPROP_SLOTS 16
+
+/* Scalar evolution analysis cache. */
+typedef struct ScEvEntry {
+ IRRef1 idx; /* Index reference. */
+ IRRef1 start; /* Constant start reference. */
+ IRRef1 stop; /* Constant stop reference. */
+ IRRef1 step; /* Constant step reference. */
+ IRType1 t; /* Scalar type. */
+ uint8_t dir; /* Direction. 1: +, 0: -. */
+} ScEvEntry;
+
+/* 128 bit SIMD constants. */
+enum {
+ LJ_KSIMD_ABS,
+ LJ_KSIMD_NEG,
+ LJ_KSIMD__MAX
+};
+
+/* Get 16 byte aligned pointer to SIMD constant. */
+#define LJ_KSIMD(J, n) \
+ ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
+
+/* Set/reset flag to activate the SPLIT pass for the current trace. */
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+#define lj_needsplit(J) (J->needsplit = 1)
+#define lj_resetsplit(J) (J->needsplit = 0)
+#else
+#define lj_needsplit(J) UNUSED(J)
+#define lj_resetsplit(J) UNUSED(J)
+#endif
+
+/* Fold state is used to fold instructions on-the-fly. */
+typedef struct FoldState {
+ IRIns ins; /* Currently emitted instruction. */
+ IRIns left; /* Instruction referenced by left operand. */
+ IRIns right; /* Instruction referenced by right operand. */
+} FoldState;
+
+/* JIT compiler state. */
+typedef struct jit_State {
+ GCtrace cur; /* Current trace. */
+
+ lua_State *L; /* Current Lua state. */
+ const BCIns *pc; /* Current PC. */
+ GCfunc *fn; /* Current function. */
+ GCproto *pt; /* Current prototype. */
+ TRef *base; /* Current frame base, points into J->slots. */
+
+ uint32_t flags; /* JIT engine flags. */
+ BCReg maxslot; /* Relative to baseslot. */
+ BCReg baseslot; /* Current frame base, offset into J->slots. */
+
+ uint8_t mergesnap; /* Allowed to merge with next snapshot. */
+ uint8_t needsnap; /* Need snapshot before recording next bytecode. */
+ IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
+ uint8_t bcskip; /* Number of bytecode instructions to skip. */
+
+ FoldState fold; /* Fold state. */
+
+ const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
+ MSize bc_extent; /* Extent of the range. */
+
+ TraceState state; /* Trace compiler state. */
+
+ int32_t instunroll; /* Unroll counter for instable loops. */
+ int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
+ int32_t tailcalled; /* Number of successive tailcalls. */
+ int32_t framedepth; /* Current frame depth. */
+ int32_t retdepth; /* Return frame depth (count of RETF). */
+
+ MRef k64; /* Pointer to chained array of 64 bit constants. */
+ TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
+
+ IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
+ IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
+ IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
+ IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
+
+ MSize sizesnap; /* Size of temp. snapshot buffer. */
+ SnapShot *snapbuf; /* Temp. snapshot buffer. */
+ SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
+ MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
+
+ PostProc postproc; /* Required post-processing after execution. */
+#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
+ int needsplit; /* Need SPLIT pass. */
+#endif
+
+ GCRef *trace; /* Array of traces. */
+ TraceNo freetrace; /* Start of scan for next free trace. */
+ MSize sizetrace; /* Size of trace array. */
+
+ IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
+ TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
+
+ int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
+
+ MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
+
+ HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
+ uint32_t penaltyslot; /* Round-robin index into penalty slots. */
+ uint32_t prngstate; /* PRNG state. */
+
+ BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
+ uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
+
+ ScEvEntry scev; /* Scalar evolution analysis cache slots. */
+
+ const BCIns *startpc; /* Bytecode PC of starting instruction. */
+ TraceNo parent; /* Parent of current side trace (0 for root traces). */
+ ExitNo exitno; /* Exit number in parent of current side trace. */
+
+ BCIns *patchpc; /* PC for pending re-patch. */
+ BCIns patchins; /* Instruction for pending re-patch. */
+
+ int mcprot; /* Protection of current mcode area. */
+ MCode *mcarea; /* Base of current mcode area. */
+ MCode *mctop; /* Top of current mcode area. */
+ MCode *mcbot; /* Bottom of current mcode area. */
+ size_t szmcarea; /* Size of current mcode area. */
+ size_t szallmcarea; /* Total size of all allocated mcode areas. */
+
+ TValue errinfo; /* Additional info element for trace errors. */
+}
+#if LJ_TARGET_ARM
+LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */
+#endif
+jit_State;
+
+/* Trivial PRNG e.g. used for penalty randomization. */
+static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
+{
+ /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
+ J->prngstate = J->prngstate * 1103515245 + 12345;
+ return J->prngstate >> (32-bits);
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_lex.c b/3rdparty/lua/src/lj_lex.c
index 7bfe7ab..9f2b06f 100644
--- a/3rdparty/lua/src/lj_lex.c
+++ b/3rdparty/lua/src/lj_lex.c
@@ -1,482 +1,481 @@
-/*
-** Lexical analyzer.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_lex_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#if LJ_HASFFI
-#include "lj_tab.h"
-#include "lj_ctype.h"
-#include "lj_cdata.h"
-#include "lualib.h"
-#endif
-#include "lj_state.h"
-#include "lj_lex.h"
-#include "lj_parse.h"
-#include "lj_char.h"
-#include "lj_strscan.h"
-
-/* Lua lexer token names. */
-static const char *const tokennames[] = {
-#define TKSTR1(name) #name,
-#define TKSTR2(name, sym) #sym,
-TKDEF(TKSTR1, TKSTR2)
-#undef TKSTR1
-#undef TKSTR2
- NULL
-};
-
-/* -- Buffer handling ----------------------------------------------------- */
-
-#define char2int(c) ((int)(uint8_t)(c))
-#define next(ls) \
- (ls->current = (ls->n--) > 0 ? char2int(*ls->p++) : fillbuf(ls))
-#define save_and_next(ls) (save(ls, ls->current), next(ls))
-#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
-#define END_OF_STREAM (-1)
-
-static int fillbuf(LexState *ls)
-{
- size_t sz;
- const char *buf = ls->rfunc(ls->L, ls->rdata, &sz);
- if (buf == NULL || sz == 0) return END_OF_STREAM;
- ls->n = (MSize)sz - 1;
- ls->p = buf;
- return char2int(*(ls->p++));
-}
-
-static LJ_NOINLINE void save_grow(LexState *ls, int c)
-{
- MSize newsize;
- if (ls->sb.sz >= LJ_MAX_STR/2)
- lj_lex_error(ls, 0, LJ_ERR_XELEM);
- newsize = ls->sb.sz * 2;
- lj_str_resizebuf(ls->L, &ls->sb, newsize);
- ls->sb.buf[ls->sb.n++] = (char)c;
-}
-
-static LJ_AINLINE void save(LexState *ls, int c)
-{
- if (LJ_UNLIKELY(ls->sb.n + 1 > ls->sb.sz))
- save_grow(ls, c);
- else
- ls->sb.buf[ls->sb.n++] = (char)c;
-}
-
-static void inclinenumber(LexState *ls)
-{
- int old = ls->current;
- lua_assert(currIsNewline(ls));
- next(ls); /* skip `\n' or `\r' */
- if (currIsNewline(ls) && ls->current != old)
- next(ls); /* skip `\n\r' or `\r\n' */
- if (++ls->linenumber >= LJ_MAX_LINE)
- lj_lex_error(ls, ls->token, LJ_ERR_XLINES);
-}
-
-/* -- Scanner for terminals ----------------------------------------------- */
-
-/* Parse a number literal. */
-static void lex_number(LexState *ls, TValue *tv)
-{
- StrScanFmt fmt;
- int c, xp = 'e';
- lua_assert(lj_char_isdigit(ls->current));
- if ((c = ls->current) == '0') {
- save_and_next(ls);
- if ((ls->current | 0x20) == 'x') xp = 'p';
- }
- while (lj_char_isident(ls->current) || ls->current == '.' ||
- ((ls->current == '-' || ls->current == '+') && (c | 0x20) == xp)) {
- c = ls->current;
- save_and_next(ls);
- }
- save(ls, '\0');
- fmt = lj_strscan_scan((const uint8_t *)ls->sb.buf, tv,
- (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) |
- (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0));
- if (LJ_DUALNUM && fmt == STRSCAN_INT) {
- setitype(tv, LJ_TISNUM);
- } else if (fmt == STRSCAN_NUM) {
- /* Already in correct format. */
-#if LJ_HASFFI
- } else if (fmt != STRSCAN_ERROR) {
- lua_State *L = ls->L;
- GCcdata *cd;
- lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG);
- if (!ctype_ctsG(G(L))) {
- ptrdiff_t oldtop = savestack(L, L->top);
- luaopen_ffi(L); /* Load FFI library on-demand. */
- L->top = restorestack(L, oldtop);
- }
- if (fmt == STRSCAN_IMAG) {
- cd = lj_cdata_new_(L, CTID_COMPLEX_DOUBLE, 2*sizeof(double));
- ((double *)cdataptr(cd))[0] = 0;
- ((double *)cdataptr(cd))[1] = numV(tv);
- } else {
- cd = lj_cdata_new_(L, fmt==STRSCAN_I64 ? CTID_INT64 : CTID_UINT64, 8);
- *(uint64_t *)cdataptr(cd) = tv->u64;
- }
- lj_parse_keepcdata(ls, tv, cd);
-#endif
- } else {
- lua_assert(fmt == STRSCAN_ERROR);
- lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER);
- }
-}
-
-static int skip_sep(LexState *ls)
-{
- int count = 0;
- int s = ls->current;
- lua_assert(s == '[' || s == ']');
- save_and_next(ls);
- while (ls->current == '=') {
- save_and_next(ls);
- count++;
- }
- return (ls->current == s) ? count : (-count) - 1;
-}
-
-static void read_long_string(LexState *ls, TValue *tv, int sep)
-{
- save_and_next(ls); /* skip 2nd `[' */
- if (currIsNewline(ls)) /* string starts with a newline? */
- inclinenumber(ls); /* skip it */
- for (;;) {
- switch (ls->current) {
- case END_OF_STREAM:
- lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM);
- break;
- case ']':
- if (skip_sep(ls) == sep) {
- save_and_next(ls); /* skip 2nd `]' */
- goto endloop;
- }
- break;
- case '\n':
- case '\r':
- save(ls, '\n');
- inclinenumber(ls);
- if (!tv) lj_str_resetbuf(&ls->sb); /* avoid wasting space */
- break;
- default:
- if (tv) save_and_next(ls);
- else next(ls);
- break;
- }
- } endloop:
- if (tv) {
- GCstr *str = lj_parse_keepstr(ls, ls->sb.buf + (2 + (MSize)sep),
- ls->sb.n - 2*(2 + (MSize)sep));
- setstrV(ls->L, tv, str);
- }
-}
-
-static void read_string(LexState *ls, int delim, TValue *tv)
-{
- save_and_next(ls);
- while (ls->current != delim) {
- switch (ls->current) {
- case END_OF_STREAM:
- lj_lex_error(ls, TK_eof, LJ_ERR_XSTR);
- continue;
- case '\n':
- case '\r':
- lj_lex_error(ls, TK_string, LJ_ERR_XSTR);
- continue;
- case '\\': {
- int c = next(ls); /* Skip the '\\'. */
- switch (c) {
- case 'a': c = '\a'; break;
- case 'b': c = '\b'; break;
- case 'f': c = '\f'; break;
- case 'n': c = '\n'; break;
- case 'r': c = '\r'; break;
- case 't': c = '\t'; break;
- case 'v': c = '\v'; break;
- case 'x': /* Hexadecimal escape '\xXX'. */
- c = (next(ls) & 15u) << 4;
- if (!lj_char_isdigit(ls->current)) {
- if (!lj_char_isxdigit(ls->current)) goto err_xesc;
- c += 9 << 4;
- }
- c += (next(ls) & 15u);
- if (!lj_char_isdigit(ls->current)) {
- if (!lj_char_isxdigit(ls->current)) goto err_xesc;
- c += 9;
- }
- break;
- case 'z': /* Skip whitespace. */
- next(ls);
- while (lj_char_isspace(ls->current))
- if (currIsNewline(ls)) inclinenumber(ls); else next(ls);
- continue;
- case '\n': case '\r': save(ls, '\n'); inclinenumber(ls); continue;
- case '\\': case '\"': case '\'': break;
- case END_OF_STREAM: continue;
- default:
- if (!lj_char_isdigit(c))
- goto err_xesc;
- c -= '0'; /* Decimal escape '\ddd'. */
- if (lj_char_isdigit(next(ls))) {
- c = c*10 + (ls->current - '0');
- if (lj_char_isdigit(next(ls))) {
- c = c*10 + (ls->current - '0');
- if (c > 255) {
- err_xesc:
- lj_lex_error(ls, TK_string, LJ_ERR_XESC);
- }
- next(ls);
- }
- }
- save(ls, c);
- continue;
- }
- save(ls, c);
- next(ls);
- continue;
- }
- default:
- save_and_next(ls);
- break;
- }
- }
- save_and_next(ls); /* skip delimiter */
- setstrV(ls->L, tv, lj_parse_keepstr(ls, ls->sb.buf + 1, ls->sb.n - 2));
-}
-
-/* -- Main lexical scanner ------------------------------------------------ */
-
-static int llex(LexState *ls, TValue *tv)
-{
- lj_str_resetbuf(&ls->sb);
- for (;;) {
- if (lj_char_isident(ls->current)) {
- GCstr *s;
- if (lj_char_isdigit(ls->current)) { /* Numeric literal. */
- lex_number(ls, tv);
- return TK_number;
- }
- /* Identifier or reserved word. */
- do {
- save_and_next(ls);
- } while (lj_char_isident(ls->current));
- s = lj_parse_keepstr(ls, ls->sb.buf, ls->sb.n);
- setstrV(ls->L, tv, s);
- if (s->reserved > 0) /* Reserved word? */
- return TK_OFS + s->reserved;
- return TK_name;
- }
- switch (ls->current) {
- case '\n':
- case '\r':
- inclinenumber(ls);
- continue;
- case ' ':
- case '\t':
- case '\v':
- case '\f':
- next(ls);
- continue;
- case '-':
- next(ls);
- if (ls->current != '-') return '-';
- /* else is a comment */
- next(ls);
- if (ls->current == '[') {
- int sep = skip_sep(ls);
- lj_str_resetbuf(&ls->sb); /* `skip_sep' may dirty the buffer */
- if (sep >= 0) {
- read_long_string(ls, NULL, sep); /* long comment */
- lj_str_resetbuf(&ls->sb);
- continue;
- }
- }
- /* else short comment */
- while (!currIsNewline(ls) && ls->current != END_OF_STREAM)
- next(ls);
- continue;
- case '[': {
- int sep = skip_sep(ls);
- if (sep >= 0) {
- read_long_string(ls, tv, sep);
- return TK_string;
- } else if (sep == -1) {
- return '[';
- } else {
- lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM);
- continue;
- }
- }
- case '=':
- next(ls);
- if (ls->current != '=') return '='; else { next(ls); return TK_eq; }
- case '<':
- next(ls);
- if (ls->current != '=') return '<'; else { next(ls); return TK_le; }
- case '>':
- next(ls);
- if (ls->current != '=') return '>'; else { next(ls); return TK_ge; }
- case '~':
- next(ls);
- if (ls->current != '=') return '~'; else { next(ls); return TK_ne; }
- case ':':
- next(ls);
- if (ls->current != ':') return ':'; else { next(ls); return TK_label; }
- case '"':
- case '\'':
- read_string(ls, ls->current, tv);
- return TK_string;
- case '.':
- save_and_next(ls);
- if (ls->current == '.') {
- next(ls);
- if (ls->current == '.') {
- next(ls);
- return TK_dots; /* ... */
- }
- return TK_concat; /* .. */
- } else if (!lj_char_isdigit(ls->current)) {
- return '.';
- } else {
- lex_number(ls, tv);
- return TK_number;
- }
- case END_OF_STREAM:
- return TK_eof;
- default: {
- int c = ls->current;
- next(ls);
- return c; /* Single-char tokens (+ - / ...). */
- }
- }
- }
-}
-
-/* -- Lexer API ----------------------------------------------------------- */
-
-/* Setup lexer state. */
-int lj_lex_setup(lua_State *L, LexState *ls)
-{
- int header = 0;
- ls->L = L;
- ls->fs = NULL;
- ls->n = 0;
- ls->p = NULL;
- ls->vstack = NULL;
- ls->sizevstack = 0;
- ls->vtop = 0;
- ls->bcstack = NULL;
- ls->sizebcstack = 0;
- ls->token = 0;
- ls->lookahead = TK_eof; /* No look-ahead token. */
- ls->linenumber = 1;
- ls->lastline = 1;
- lj_str_resizebuf(ls->L, &ls->sb, LJ_MIN_SBUF);
- next(ls); /* Read-ahead first char. */
- if (ls->current == 0xef && ls->n >= 2 && char2int(ls->p[0]) == 0xbb &&
- char2int(ls->p[1]) == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
- ls->n -= 2;
- ls->p += 2;
- next(ls);
- header = 1;
- }
- if (ls->current == '#') { /* Skip POSIX #! header line. */
- do {
- next(ls);
- if (ls->current == END_OF_STREAM) return 0;
- } while (!currIsNewline(ls));
- inclinenumber(ls);
- header = 1;
- }
- if (ls->current == LUA_SIGNATURE[0]) { /* Bytecode dump. */
- if (header) {
- /*
- ** Loading bytecode with an extra header is disabled for security
- ** reasons. This may circumvent the usual check for bytecode vs.
- ** Lua code by looking at the first char. Since this is a potential
- ** security violation no attempt is made to echo the chunkname either.
- */
- setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCBAD));
- lj_err_throw(L, LUA_ERRSYNTAX);
- }
- return 1;
- }
- return 0;
-}
-
-/* Cleanup lexer state. */
-void lj_lex_cleanup(lua_State *L, LexState *ls)
-{
- global_State *g = G(L);
- lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine);
- lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo);
- lj_str_freebuf(g, &ls->sb);
-}
-
-void lj_lex_next(LexState *ls)
-{
- ls->lastline = ls->linenumber;
- if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */
- ls->token = llex(ls, &ls->tokenval); /* Get next token. */
- } else { /* Otherwise return lookahead token. */
- ls->token = ls->lookahead;
- ls->lookahead = TK_eof;
- ls->tokenval = ls->lookaheadval;
- }
-}
-
-LexToken lj_lex_lookahead(LexState *ls)
-{
- lua_assert(ls->lookahead == TK_eof);
- ls->lookahead = llex(ls, &ls->lookaheadval);
- return ls->lookahead;
-}
-
-const char *lj_lex_token2str(LexState *ls, LexToken token)
-{
- if (token > TK_OFS)
- return tokennames[token-TK_OFS-1];
- else if (!lj_char_iscntrl(token))
- return lj_str_pushf(ls->L, "%c", token);
- else
- return lj_str_pushf(ls->L, "char(%d)", token);
-}
-
-void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...)
-{
- const char *tok;
- va_list argp;
- if (token == 0) {
- tok = NULL;
- } else if (token == TK_name || token == TK_string || token == TK_number) {
- save(ls, '\0');
- tok = ls->sb.buf;
- } else {
- tok = lj_lex_token2str(ls, token);
- }
- va_start(argp, em);
- lj_err_lex(ls->L, ls->chunkname, tok, ls->linenumber, em, argp);
- va_end(argp);
-}
-
-void lj_lex_init(lua_State *L)
-{
- uint32_t i;
- for (i = 0; i < TK_RESERVED; i++) {
- GCstr *s = lj_str_newz(L, tokennames[i]);
- fixstring(s); /* Reserved words are never collected. */
- s->reserved = (uint8_t)(i+1);
- }
-}
-
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_lex_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#if LJ_HASFFI
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lualib.h"
+#endif
+#include "lj_state.h"
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+
+/* Lua lexer token names. */
+static const char *const tokennames[] = {
+#define TKSTR1(name) #name,
+#define TKSTR2(name, sym) #sym,
+TKDEF(TKSTR1, TKSTR2)
+#undef TKSTR1
+#undef TKSTR2
+ NULL
+};
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define char2int(c) ((int)(uint8_t)(c))
+#define next(ls) \
+ (ls->current = (ls->n--) > 0 ? char2int(*ls->p++) : fillbuf(ls))
+#define save_and_next(ls) (save(ls, ls->current), next(ls))
+#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
+#define END_OF_STREAM (-1)
+
+static int fillbuf(LexState *ls)
+{
+ size_t sz;
+ const char *buf = ls->rfunc(ls->L, ls->rdata, &sz);
+ if (buf == NULL || sz == 0) return END_OF_STREAM;
+ ls->n = (MSize)sz - 1;
+ ls->p = buf;
+ return char2int(*(ls->p++));
+}
+
+static LJ_NOINLINE void save_grow(LexState *ls, int c)
+{
+ MSize newsize;
+ if (ls->sb.sz >= LJ_MAX_STR/2)
+ lj_lex_error(ls, 0, LJ_ERR_XELEM);
+ newsize = ls->sb.sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, newsize);
+ ls->sb.buf[ls->sb.n++] = (char)c;
+}
+
+static LJ_AINLINE void save(LexState *ls, int c)
+{
+ if (LJ_UNLIKELY(ls->sb.n + 1 > ls->sb.sz))
+ save_grow(ls, c);
+ else
+ ls->sb.buf[ls->sb.n++] = (char)c;
+}
+
+static void inclinenumber(LexState *ls)
+{
+ int old = ls->current;
+ lua_assert(currIsNewline(ls));
+ next(ls); /* skip `\n' or `\r' */
+ if (currIsNewline(ls) && ls->current != old)
+ next(ls); /* skip `\n\r' or `\r\n' */
+ if (++ls->linenumber >= LJ_MAX_LINE)
+ lj_lex_error(ls, ls->token, LJ_ERR_XLINES);
+}
+
+/* -- Scanner for terminals ----------------------------------------------- */
+
+/* Parse a number literal. */
+static void lex_number(LexState *ls, TValue *tv)
+{
+ StrScanFmt fmt;
+ int c, xp = 'e';
+ lua_assert(lj_char_isdigit(ls->current));
+ if ((c = ls->current) == '0') {
+ save_and_next(ls);
+ if ((ls->current | 0x20) == 'x') xp = 'p';
+ }
+ while (lj_char_isident(ls->current) || ls->current == '.' ||
+ ((ls->current == '-' || ls->current == '+') && (c | 0x20) == xp)) {
+ c = ls->current;
+ save_and_next(ls);
+ }
+ save(ls, '\0');
+ fmt = lj_strscan_scan((const uint8_t *)ls->sb.buf, tv,
+ (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) |
+ (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0));
+ if (LJ_DUALNUM && fmt == STRSCAN_INT) {
+ setitype(tv, LJ_TISNUM);
+ } else if (fmt == STRSCAN_NUM) {
+ /* Already in correct format. */
+#if LJ_HASFFI
+ } else if (fmt != STRSCAN_ERROR) {
+ lua_State *L = ls->L;
+ GCcdata *cd;
+ lua_assert(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG);
+ if (!ctype_ctsG(G(L))) {
+ ptrdiff_t oldtop = savestack(L, L->top);
+ luaopen_ffi(L); /* Load FFI library on-demand. */
+ L->top = restorestack(L, oldtop);
+ }
+ if (fmt == STRSCAN_IMAG) {
+ cd = lj_cdata_new_(L, CTID_COMPLEX_DOUBLE, 2*sizeof(double));
+ ((double *)cdataptr(cd))[0] = 0;
+ ((double *)cdataptr(cd))[1] = numV(tv);
+ } else {
+ cd = lj_cdata_new_(L, fmt==STRSCAN_I64 ? CTID_INT64 : CTID_UINT64, 8);
+ *(uint64_t *)cdataptr(cd) = tv->u64;
+ }
+ lj_parse_keepcdata(ls, tv, cd);
+#endif
+ } else {
+ lua_assert(fmt == STRSCAN_ERROR);
+ lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER);
+ }
+}
+
+static int skip_sep(LexState *ls)
+{
+ int count = 0;
+ int s = ls->current;
+ lua_assert(s == '[' || s == ']');
+ save_and_next(ls);
+ while (ls->current == '=') {
+ save_and_next(ls);
+ count++;
+ }
+ return (ls->current == s) ? count : (-count) - 1;
+}
+
+static void read_long_string(LexState *ls, TValue *tv, int sep)
+{
+ save_and_next(ls); /* skip 2nd `[' */
+ if (currIsNewline(ls)) /* string starts with a newline? */
+ inclinenumber(ls); /* skip it */
+ for (;;) {
+ switch (ls->current) {
+ case END_OF_STREAM:
+ lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM);
+ break;
+ case ']':
+ if (skip_sep(ls) == sep) {
+ save_and_next(ls); /* skip 2nd `]' */
+ goto endloop;
+ }
+ break;
+ case '\n':
+ case '\r':
+ save(ls, '\n');
+ inclinenumber(ls);
+ if (!tv) lj_str_resetbuf(&ls->sb); /* avoid wasting space */
+ break;
+ default:
+ if (tv) save_and_next(ls);
+ else next(ls);
+ break;
+ }
+ } endloop:
+ if (tv) {
+ GCstr *str = lj_parse_keepstr(ls, ls->sb.buf + (2 + (MSize)sep),
+ ls->sb.n - 2*(2 + (MSize)sep));
+ setstrV(ls->L, tv, str);
+ }
+}
+
+static void read_string(LexState *ls, int delim, TValue *tv)
+{
+ save_and_next(ls);
+ while (ls->current != delim) {
+ switch (ls->current) {
+ case END_OF_STREAM:
+ lj_lex_error(ls, TK_eof, LJ_ERR_XSTR);
+ continue;
+ case '\n':
+ case '\r':
+ lj_lex_error(ls, TK_string, LJ_ERR_XSTR);
+ continue;
+ case '\\': {
+ int c = next(ls); /* Skip the '\\'. */
+ switch (c) {
+ case 'a': c = '\a'; break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+ case 'x': /* Hexadecimal escape '\xXX'. */
+ c = (next(ls) & 15u) << 4;
+ if (!lj_char_isdigit(ls->current)) {
+ if (!lj_char_isxdigit(ls->current)) goto err_xesc;
+ c += 9 << 4;
+ }
+ c += (next(ls) & 15u);
+ if (!lj_char_isdigit(ls->current)) {
+ if (!lj_char_isxdigit(ls->current)) goto err_xesc;
+ c += 9;
+ }
+ break;
+ case 'z': /* Skip whitespace. */
+ next(ls);
+ while (lj_char_isspace(ls->current))
+ if (currIsNewline(ls)) inclinenumber(ls); else next(ls);
+ continue;
+ case '\n': case '\r': save(ls, '\n'); inclinenumber(ls); continue;
+ case '\\': case '\"': case '\'': break;
+ case END_OF_STREAM: continue;
+ default:
+ if (!lj_char_isdigit(c))
+ goto err_xesc;
+ c -= '0'; /* Decimal escape '\ddd'. */
+ if (lj_char_isdigit(next(ls))) {
+ c = c*10 + (ls->current - '0');
+ if (lj_char_isdigit(next(ls))) {
+ c = c*10 + (ls->current - '0');
+ if (c > 255) {
+ err_xesc:
+ lj_lex_error(ls, TK_string, LJ_ERR_XESC);
+ }
+ next(ls);
+ }
+ }
+ save(ls, c);
+ continue;
+ }
+ save(ls, c);
+ next(ls);
+ continue;
+ }
+ default:
+ save_and_next(ls);
+ break;
+ }
+ }
+ save_and_next(ls); /* skip delimiter */
+ setstrV(ls->L, tv, lj_parse_keepstr(ls, ls->sb.buf + 1, ls->sb.n - 2));
+}
+
+/* -- Main lexical scanner ------------------------------------------------ */
+
+static int llex(LexState *ls, TValue *tv)
+{
+ lj_str_resetbuf(&ls->sb);
+ for (;;) {
+ if (lj_char_isident(ls->current)) {
+ GCstr *s;
+ if (lj_char_isdigit(ls->current)) { /* Numeric literal. */
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ /* Identifier or reserved word. */
+ do {
+ save_and_next(ls);
+ } while (lj_char_isident(ls->current));
+ s = lj_parse_keepstr(ls, ls->sb.buf, ls->sb.n);
+ setstrV(ls->L, tv, s);
+ if (s->reserved > 0) /* Reserved word? */
+ return TK_OFS + s->reserved;
+ return TK_name;
+ }
+ switch (ls->current) {
+ case '\n':
+ case '\r':
+ inclinenumber(ls);
+ continue;
+ case ' ':
+ case '\t':
+ case '\v':
+ case '\f':
+ next(ls);
+ continue;
+ case '-':
+ next(ls);
+ if (ls->current != '-') return '-';
+ /* else is a comment */
+ next(ls);
+ if (ls->current == '[') {
+ int sep = skip_sep(ls);
+ lj_str_resetbuf(&ls->sb); /* `skip_sep' may dirty the buffer */
+ if (sep >= 0) {
+ read_long_string(ls, NULL, sep); /* long comment */
+ lj_str_resetbuf(&ls->sb);
+ continue;
+ }
+ }
+ /* else short comment */
+ while (!currIsNewline(ls) && ls->current != END_OF_STREAM)
+ next(ls);
+ continue;
+ case '[': {
+ int sep = skip_sep(ls);
+ if (sep >= 0) {
+ read_long_string(ls, tv, sep);
+ return TK_string;
+ } else if (sep == -1) {
+ return '[';
+ } else {
+ lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM);
+ continue;
+ }
+ }
+ case '=':
+ next(ls);
+ if (ls->current != '=') return '='; else { next(ls); return TK_eq; }
+ case '<':
+ next(ls);
+ if (ls->current != '=') return '<'; else { next(ls); return TK_le; }
+ case '>':
+ next(ls);
+ if (ls->current != '=') return '>'; else { next(ls); return TK_ge; }
+ case '~':
+ next(ls);
+ if (ls->current != '=') return '~'; else { next(ls); return TK_ne; }
+ case ':':
+ next(ls);
+ if (ls->current != ':') return ':'; else { next(ls); return TK_label; }
+ case '"':
+ case '\'':
+ read_string(ls, ls->current, tv);
+ return TK_string;
+ case '.':
+ save_and_next(ls);
+ if (ls->current == '.') {
+ next(ls);
+ if (ls->current == '.') {
+ next(ls);
+ return TK_dots; /* ... */
+ }
+ return TK_concat; /* .. */
+ } else if (!lj_char_isdigit(ls->current)) {
+ return '.';
+ } else {
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ case END_OF_STREAM:
+ return TK_eof;
+ default: {
+ int c = ls->current;
+ next(ls);
+ return c; /* Single-char tokens (+ - / ...). */
+ }
+ }
+ }
+}
+
+/* -- Lexer API ----------------------------------------------------------- */
+
+/* Setup lexer state. */
+int lj_lex_setup(lua_State *L, LexState *ls)
+{
+ int header = 0;
+ ls->L = L;
+ ls->fs = NULL;
+ ls->n = 0;
+ ls->p = NULL;
+ ls->vstack = NULL;
+ ls->sizevstack = 0;
+ ls->vtop = 0;
+ ls->bcstack = NULL;
+ ls->sizebcstack = 0;
+ ls->lookahead = TK_eof; /* No look-ahead token. */
+ ls->linenumber = 1;
+ ls->lastline = 1;
+ lj_str_resizebuf(ls->L, &ls->sb, LJ_MIN_SBUF);
+ next(ls); /* Read-ahead first char. */
+ if (ls->current == 0xef && ls->n >= 2 && char2int(ls->p[0]) == 0xbb &&
+ char2int(ls->p[1]) == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
+ ls->n -= 2;
+ ls->p += 2;
+ next(ls);
+ header = 1;
+ }
+ if (ls->current == '#') { /* Skip POSIX #! header line. */
+ do {
+ next(ls);
+ if (ls->current == END_OF_STREAM) return 0;
+ } while (!currIsNewline(ls));
+ inclinenumber(ls);
+ header = 1;
+ }
+ if (ls->current == LUA_SIGNATURE[0]) { /* Bytecode dump. */
+ if (header) {
+ /*
+ ** Loading bytecode with an extra header is disabled for security
+ ** reasons. This may circumvent the usual check for bytecode vs.
+ ** Lua code by looking at the first char. Since this is a potential
+ ** security violation no attempt is made to echo the chunkname either.
+ */
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCBAD));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Cleanup lexer state. */
+void lj_lex_cleanup(lua_State *L, LexState *ls)
+{
+ global_State *g = G(L);
+ lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine);
+ lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo);
+ lj_str_freebuf(g, &ls->sb);
+}
+
+void lj_lex_next(LexState *ls)
+{
+ ls->lastline = ls->linenumber;
+ if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */
+ ls->token = llex(ls, &ls->tokenval); /* Get next token. */
+ } else { /* Otherwise return lookahead token. */
+ ls->token = ls->lookahead;
+ ls->lookahead = TK_eof;
+ ls->tokenval = ls->lookaheadval;
+ }
+}
+
+LexToken lj_lex_lookahead(LexState *ls)
+{
+ lua_assert(ls->lookahead == TK_eof);
+ ls->lookahead = llex(ls, &ls->lookaheadval);
+ return ls->lookahead;
+}
+
+const char *lj_lex_token2str(LexState *ls, LexToken token)
+{
+ if (token > TK_OFS)
+ return tokennames[token-TK_OFS-1];
+ else if (!lj_char_iscntrl(token))
+ return lj_str_pushf(ls->L, "%c", token);
+ else
+ return lj_str_pushf(ls->L, "char(%d)", token);
+}
+
+void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...)
+{
+ const char *tok;
+ va_list argp;
+ if (token == 0) {
+ tok = NULL;
+ } else if (token == TK_name || token == TK_string || token == TK_number) {
+ save(ls, '\0');
+ tok = ls->sb.buf;
+ } else {
+ tok = lj_lex_token2str(ls, token);
+ }
+ va_start(argp, em);
+ lj_err_lex(ls->L, ls->chunkname, tok, ls->linenumber, em, argp);
+ va_end(argp);
+}
+
+void lj_lex_init(lua_State *L)
+{
+ uint32_t i;
+ for (i = 0; i < TK_RESERVED; i++) {
+ GCstr *s = lj_str_newz(L, tokennames[i]);
+ fixstring(s); /* Reserved words are never collected. */
+ s->reserved = (uint8_t)(i+1);
+ }
+}
+
diff --git a/3rdparty/lua/src/lj_lex.h b/3rdparty/lua/src/lj_lex.h
index 7fdb676..6e18e4b 100644
--- a/3rdparty/lua/src/lj_lex.h
+++ b/3rdparty/lua/src/lj_lex.h
@@ -1,85 +1,85 @@
-/*
-** Lexical analyzer.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_LEX_H
-#define _LJ_LEX_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-#include "lj_err.h"
-
-/* Lua lexer tokens. */
-#define TKDEF(_, __) \
- _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \
- _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \
- _(repeat) _(return) _(then) _(true) _(until) _(while) \
- __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \
- __(label, ::) __(number, <number>) __(name, <name>) __(string, <string>) \
- __(eof, <eof>)
-
-enum {
- TK_OFS = 256,
-#define TKENUM1(name) TK_##name,
-#define TKENUM2(name, sym) TK_##name,
-TKDEF(TKENUM1, TKENUM2)
-#undef TKENUM1
-#undef TKENUM2
- TK_RESERVED = TK_while - TK_OFS
-};
-
-typedef int LexToken;
-
-/* Combined bytecode ins/line. Only used during bytecode generation. */
-typedef struct BCInsLine {
- BCIns ins; /* Bytecode instruction. */
- BCLine line; /* Line number for this bytecode. */
-} BCInsLine;
-
-/* Info for local variables. Only used during bytecode generation. */
-typedef struct VarInfo {
- GCRef name; /* Local variable name or goto/label name. */
- BCPos startpc; /* First point where the local variable is active. */
- BCPos endpc; /* First point where the local variable is dead. */
- uint8_t slot; /* Variable slot. */
- uint8_t info; /* Variable/goto/label info. */
-} VarInfo;
-
-/* Lua lexer state. */
-typedef struct LexState {
- struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
- struct lua_State *L; /* Lua state. */
- TValue tokenval; /* Current token value. */
- TValue lookaheadval; /* Lookahead token value. */
- int current; /* Current character (charint). */
- LexToken token; /* Current token. */
- LexToken lookahead; /* Lookahead token. */
- MSize n; /* Bytes left in input buffer. */
- const char *p; /* Current position in input buffer. */
- SBuf sb; /* String buffer for tokens. */
- lua_Reader rfunc; /* Reader callback. */
- void *rdata; /* Reader callback data. */
- BCLine linenumber; /* Input line counter. */
- BCLine lastline; /* Line of last token. */
- GCstr *chunkname; /* Current chunk name (interned string). */
- const char *chunkarg; /* Chunk name argument. */
- const char *mode; /* Allow loading bytecode (b) and/or source text (t). */
- VarInfo *vstack; /* Stack for names and extents of local variables. */
- MSize sizevstack; /* Size of variable stack. */
- MSize vtop; /* Top of variable stack. */
- BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */
- MSize sizebcstack; /* Size of bytecode stack. */
- uint32_t level; /* Syntactical nesting level. */
-} LexState;
-
-LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
-LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
-LJ_FUNC void lj_lex_next(LexState *ls);
-LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
-LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token);
-LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...);
-LJ_FUNC void lj_lex_init(lua_State *L);
-
-#endif
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LEX_H
+#define _LJ_LEX_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+#include "lj_err.h"
+
+/* Lua lexer tokens. */
+#define TKDEF(_, __) \
+ _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \
+ _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \
+ _(repeat) _(return) _(then) _(true) _(until) _(while) \
+ __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \
+ __(label, ::) __(number, <number>) __(name, <name>) __(string, <string>) \
+ __(eof, <eof>)
+
+enum {
+ TK_OFS = 256,
+#define TKENUM1(name) TK_##name,
+#define TKENUM2(name, sym) TK_##name,
+TKDEF(TKENUM1, TKENUM2)
+#undef TKENUM1
+#undef TKENUM2
+ TK_RESERVED = TK_while - TK_OFS
+};
+
+typedef int LexToken;
+
+/* Combined bytecode ins/line. Only used during bytecode generation. */
+typedef struct BCInsLine {
+ BCIns ins; /* Bytecode instruction. */
+ BCLine line; /* Line number for this bytecode. */
+} BCInsLine;
+
+/* Info for local variables. Only used during bytecode generation. */
+typedef struct VarInfo {
+ GCRef name; /* Local variable name or goto/label name. */
+ BCPos startpc; /* First point where the local variable is active. */
+ BCPos endpc; /* First point where the local variable is dead. */
+ uint8_t slot; /* Variable slot. */
+ uint8_t info; /* Variable/goto/label info. */
+} VarInfo;
+
+/* Lua lexer state. */
+typedef struct LexState {
+ struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
+ struct lua_State *L; /* Lua state. */
+ TValue tokenval; /* Current token value. */
+ TValue lookaheadval; /* Lookahead token value. */
+ int current; /* Current character (charint). */
+ LexToken token; /* Current token. */
+ LexToken lookahead; /* Lookahead token. */
+ MSize n; /* Bytes left in input buffer. */
+ const char *p; /* Current position in input buffer. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_Reader rfunc; /* Reader callback. */
+ void *rdata; /* Reader callback data. */
+ BCLine linenumber; /* Input line counter. */
+ BCLine lastline; /* Line of last token. */
+ GCstr *chunkname; /* Current chunk name (interned string). */
+ const char *chunkarg; /* Chunk name argument. */
+ const char *mode; /* Allow loading bytecode (b) and/or source text (t). */
+ VarInfo *vstack; /* Stack for names and extents of local variables. */
+ MSize sizevstack; /* Size of variable stack. */
+ MSize vtop; /* Top of variable stack. */
+ BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */
+ MSize sizebcstack; /* Size of bytecode stack. */
+ uint32_t level; /* Syntactical nesting level. */
+} LexState;
+
+LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_next(LexState *ls);
+LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
+LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token);
+LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...);
+LJ_FUNC void lj_lex_init(lua_State *L);
+
+#endif
diff --git a/3rdparty/lua/src/lj_lib.c b/3rdparty/lua/src/lj_lib.c
index e45bdce..331eaa6 100644
--- a/3rdparty/lua/src/lj_lib.c
+++ b/3rdparty/lua/src/lj_lib.c
@@ -1,258 +1,258 @@
-/*
-** Library function support.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_lib_c
-#define LUA_CORE
-
-#include "lauxlib.h"
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_func.h"
-#include "lj_bc.h"
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-#include "lj_strscan.h"
-#include "lj_lib.h"
-
-/* -- Library initialization ---------------------------------------------- */
-
-static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize)
-{
- if (libname) {
- luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
- lua_getfield(L, -1, libname);
- if (!tvistab(L->top-1)) {
- L->top--;
- if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL)
- lj_err_callerv(L, LJ_ERR_BADMODN, libname);
- settabV(L, L->top, tabV(L->top-1));
- L->top++;
- lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
- }
- L->top--;
- settabV(L, L->top-1, tabV(L->top));
- } else {
- lua_createtable(L, 0, hsize);
- }
- return tabV(L->top-1);
-}
-
-void lj_lib_register(lua_State *L, const char *libname,
- const uint8_t *p, const lua_CFunction *cf)
-{
- GCtab *env = tabref(L->env);
- GCfunc *ofn = NULL;
- int ffid = *p++;
- BCIns *bcff = &L2GG(L)->bcff[*p++];
- GCtab *tab = lib_create_table(L, libname, *p++);
- ptrdiff_t tpos = L->top - L->base;
-
- /* Avoid barriers further down. */
- lj_gc_anybarriert(L, tab);
- tab->nomm = 0;
-
- for (;;) {
- uint32_t tag = *p++;
- MSize len = tag & LIBINIT_LENMASK;
- tag &= LIBINIT_TAGMASK;
- if (tag != LIBINIT_STRING) {
- const char *name;
- MSize nuv = (MSize)(L->top - L->base - tpos);
- GCfunc *fn = lj_func_newC(L, nuv, env);
- if (nuv) {
- L->top = L->base + tpos;
- memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv);
- }
- fn->c.ffid = (uint8_t)(ffid++);
- name = (const char *)p;
- p += len;
- if (tag == LIBINIT_CF)
- setmref(fn->c.pc, &G(L)->bc_cfunc_int);
- else
- setmref(fn->c.pc, bcff++);
- if (tag == LIBINIT_ASM_)
- fn->c.f = ofn->c.f; /* Copy handler from previous function. */
- else
- fn->c.f = *cf++; /* Get cf or handler from C function table. */
- if (len) {
- /* NOBARRIER: See above for common barrier. */
- setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn);
- }
- ofn = fn;
- } else {
- switch (tag | len) {
- case LIBINIT_SET:
- L->top -= 2;
- if (tvisstr(L->top+1) && strV(L->top+1)->len == 0)
- env = tabV(L->top);
- else /* NOBARRIER: See above for common barrier. */
- copyTV(L, lj_tab_set(L, tab, L->top+1), L->top);
- break;
- case LIBINIT_NUMBER:
- memcpy(&L->top->n, p, sizeof(double));
- L->top++;
- p += sizeof(double);
- break;
- case LIBINIT_COPY:
- copyTV(L, L->top, L->top - *p++);
- L->top++;
- break;
- case LIBINIT_LASTCL:
- setfuncV(L, L->top++, ofn);
- break;
- case LIBINIT_FFID:
- ffid++;
- break;
- case LIBINIT_END:
- return;
- default:
- setstrV(L, L->top++, lj_str_new(L, (const char *)p, len));
- p += len;
- break;
- }
- }
- }
-}
-
-/* -- Type checks --------------------------------------------------------- */
-
-TValue *lj_lib_checkany(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (o >= L->top)
- lj_err_arg(L, narg, LJ_ERR_NOVAL);
- return o;
-}
-
-GCstr *lj_lib_checkstr(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (o < L->top) {
- if (LJ_LIKELY(tvisstr(o))) {
- return strV(o);
- } else if (tvisnumber(o)) {
- GCstr *s = lj_str_fromnumber(L, o);
- setstrV(L, o, s);
- return s;
- }
- }
- lj_err_argt(L, narg, LUA_TSTRING);
- return NULL; /* unreachable */
-}
-
-GCstr *lj_lib_optstr(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL;
-}
-
-#if LJ_DUALNUM
-void lj_lib_checknumber(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top && lj_strscan_numberobj(o)))
- lj_err_argt(L, narg, LUA_TNUMBER);
-}
-#endif
-
-lua_Number lj_lib_checknum(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top &&
- (tvisnumber(o) || (tvisstr(o) && lj_strscan_num(strV(o), o)))))
- lj_err_argt(L, narg, LUA_TNUMBER);
- if (LJ_UNLIKELY(tvisint(o))) {
- lua_Number n = (lua_Number)intV(o);
- setnumV(o, n);
- return n;
- } else {
- return numV(o);
- }
-}
-
-int32_t lj_lib_checkint(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top && lj_strscan_numberobj(o)))
- lj_err_argt(L, narg, LUA_TNUMBER);
- if (LJ_LIKELY(tvisint(o))) {
- return intV(o);
- } else {
- int32_t i = lj_num2int(numV(o));
- if (LJ_DUALNUM) setintV(o, i);
- return i;
- }
-}
-
-int32_t lj_lib_optint(lua_State *L, int narg, int32_t def)
-{
- TValue *o = L->base + narg-1;
- return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def;
-}
-
-int32_t lj_lib_checkbit(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top && lj_strscan_numberobj(o)))
- lj_err_argt(L, narg, LUA_TNUMBER);
- if (LJ_LIKELY(tvisint(o))) {
- return intV(o);
- } else {
- int32_t i = lj_num2bit(numV(o));
- if (LJ_DUALNUM) setintV(o, i);
- return i;
- }
-}
-
-GCfunc *lj_lib_checkfunc(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top && tvisfunc(o)))
- lj_err_argt(L, narg, LUA_TFUNCTION);
- return funcV(o);
-}
-
-GCtab *lj_lib_checktab(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (!(o < L->top && tvistab(o)))
- lj_err_argt(L, narg, LUA_TTABLE);
- return tabV(o);
-}
-
-GCtab *lj_lib_checktabornil(lua_State *L, int narg)
-{
- TValue *o = L->base + narg-1;
- if (o < L->top) {
- if (tvistab(o))
- return tabV(o);
- else if (tvisnil(o))
- return NULL;
- }
- lj_err_arg(L, narg, LJ_ERR_NOTABN);
- return NULL; /* unreachable */
-}
-
-int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst)
-{
- GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg);
- if (s) {
- const char *opt = strdata(s);
- MSize len = s->len;
- int i;
- for (i = 0; *(const uint8_t *)lst; i++) {
- if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0)
- return i;
- lst += 1+*(const uint8_t *)lst;
- }
- lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt);
- }
- return def;
-}
-
+/*
+** Library function support.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_lib_c
+#define LUA_CORE
+
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_bc.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_lib.h"
+
+/* -- Library initialization ---------------------------------------------- */
+
+static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize)
+{
+ if (libname) {
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, libname);
+ if (!tvistab(L->top-1)) {
+ L->top--;
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, libname);
+ settabV(L, L->top, tabV(L->top-1));
+ L->top++;
+ lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
+ }
+ L->top--;
+ settabV(L, L->top-1, tabV(L->top));
+ } else {
+ lua_createtable(L, 0, hsize);
+ }
+ return tabV(L->top-1);
+}
+
+void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *p, const lua_CFunction *cf)
+{
+ GCtab *env = tabref(L->env);
+ GCfunc *ofn = NULL;
+ int ffid = *p++;
+ BCIns *bcff = &L2GG(L)->bcff[*p++];
+ GCtab *tab = lib_create_table(L, libname, *p++);
+ ptrdiff_t tpos = L->top - L->base;
+
+ /* Avoid barriers further down. */
+ lj_gc_anybarriert(L, tab);
+ tab->nomm = 0;
+
+ for (;;) {
+ uint32_t tag = *p++;
+ MSize len = tag & LIBINIT_LENMASK;
+ tag &= LIBINIT_TAGMASK;
+ if (tag != LIBINIT_STRING) {
+ const char *name;
+ MSize nuv = (MSize)(L->top - L->base - tpos);
+ GCfunc *fn = lj_func_newC(L, nuv, env);
+ if (nuv) {
+ L->top = L->base + tpos;
+ memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv);
+ }
+ fn->c.ffid = (uint8_t)(ffid++);
+ name = (const char *)p;
+ p += len;
+ if (tag == LIBINIT_CF)
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+ else
+ setmref(fn->c.pc, bcff++);
+ if (tag == LIBINIT_ASM_)
+ fn->c.f = ofn->c.f; /* Copy handler from previous function. */
+ else
+ fn->c.f = *cf++; /* Get cf or handler from C function table. */
+ if (len) {
+ /* NOBARRIER: See above for common barrier. */
+ setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn);
+ }
+ ofn = fn;
+ } else {
+ switch (tag | len) {
+ case LIBINIT_SET:
+ L->top -= 2;
+ if (tvisstr(L->top+1) && strV(L->top+1)->len == 0)
+ env = tabV(L->top);
+ else /* NOBARRIER: See above for common barrier. */
+ copyTV(L, lj_tab_set(L, tab, L->top+1), L->top);
+ break;
+ case LIBINIT_NUMBER:
+ memcpy(&L->top->n, p, sizeof(double));
+ L->top++;
+ p += sizeof(double);
+ break;
+ case LIBINIT_COPY:
+ copyTV(L, L->top, L->top - *p++);
+ L->top++;
+ break;
+ case LIBINIT_LASTCL:
+ setfuncV(L, L->top++, ofn);
+ break;
+ case LIBINIT_FFID:
+ ffid++;
+ break;
+ case LIBINIT_END:
+ return;
+ default:
+ setstrV(L, L->top++, lj_str_new(L, (const char *)p, len));
+ p += len;
+ break;
+ }
+ }
+ }
+}
+
+/* -- Type checks --------------------------------------------------------- */
+
+TValue *lj_lib_checkany(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ return o;
+}
+
+GCstr *lj_lib_checkstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (LJ_LIKELY(tvisstr(o))) {
+ return strV(o);
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_str_fromnumber(L, o);
+ setstrV(L, o, s);
+ return s;
+ }
+ }
+ lj_err_argt(L, narg, LUA_TSTRING);
+ return NULL; /* unreachable */
+}
+
+GCstr *lj_lib_optstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL;
+}
+
+#if LJ_DUALNUM
+void lj_lib_checknumber(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+}
+#endif
+
+lua_Number lj_lib_checknum(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_strscan_num(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_UNLIKELY(tvisint(o))) {
+ lua_Number n = (lua_Number)intV(o);
+ setnumV(o, n);
+ return n;
+ } else {
+ return numV(o);
+ }
+}
+
+int32_t lj_lib_checkint(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2int(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+int32_t lj_lib_optint(lua_State *L, int narg, int32_t def)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def;
+}
+
+int32_t lj_lib_checkbit(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2bit(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+GCfunc *lj_lib_checkfunc(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvisfunc(o)))
+ lj_err_argt(L, narg, LUA_TFUNCTION);
+ return funcV(o);
+}
+
+GCtab *lj_lib_checktab(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvistab(o)))
+ lj_err_argt(L, narg, LUA_TTABLE);
+ return tabV(o);
+}
+
+GCtab *lj_lib_checktabornil(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (tvistab(o))
+ return tabV(o);
+ else if (tvisnil(o))
+ return NULL;
+ }
+ lj_err_arg(L, narg, LJ_ERR_NOTABN);
+ return NULL; /* unreachable */
+}
+
+int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst)
+{
+ GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg);
+ if (s) {
+ const char *opt = strdata(s);
+ MSize len = s->len;
+ int i;
+ for (i = 0; *(const uint8_t *)lst; i++) {
+ if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0)
+ return i;
+ lst += 1+*(const uint8_t *)lst;
+ }
+ lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt);
+ }
+ return def;
+}
+
diff --git a/3rdparty/lua/src/lj_lib.h b/3rdparty/lua/src/lj_lib.h
index 57986fb..2fe6d2a 100644
--- a/3rdparty/lua/src/lj_lib.h
+++ b/3rdparty/lua/src/lj_lib.h
@@ -1,112 +1,112 @@
-/*
-** Library function support.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_LIB_H
-#define _LJ_LIB_H
-
-#include "lj_obj.h"
-
-/*
-** A fallback handler is called by the assembler VM if the fast path fails:
-**
-** - too few arguments: unrecoverable.
-** - wrong argument type: recoverable, if coercion succeeds.
-** - bad argument value: unrecoverable.
-** - stack overflow: recoverable, if stack reallocation succeeds.
-** - extra handling: recoverable.
-**
-** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(),
-** lj_err_caller() or lj_err_callermsg().
-** The recoverable cases return 0 or the number of results + 1.
-** The assembler VM retries the fast path only if 0 is returned.
-** This time the fallback must not be called again or it gets stuck in a loop.
-*/
-
-/* Return values from fallback handler. */
-#define FFH_RETRY 0
-#define FFH_UNREACHABLE FFH_RETRY
-#define FFH_RES(n) ((n)+1)
-#define FFH_TAILCALL (-1)
-
-LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg);
-LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg);
-LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg);
-#if LJ_DUALNUM
-LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
-#else
-#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg))
-#endif
-LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
-LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
-LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
-LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg);
-LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
-LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
-LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
-LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
-
-/* Avoid including lj_frame.h. */
-#define lj_lib_upvalue(L, n) \
- (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
-
-#if LJ_TARGET_WINDOWS
-#define lj_lib_checkfpu(L) \
- do { setnumV(L->top++, (lua_Number)1437217655); \
- if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \
- L->top--; } while (0)
-#else
-#define lj_lib_checkfpu(L) UNUSED(L)
-#endif
-
-/* Push internal function on the stack. */
-static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
- int id, int n)
-{
- GCfunc *fn;
- lua_pushcclosure(L, f, n);
- fn = funcV(L->top-1);
- fn->c.ffid = (uint8_t)id;
- setmref(fn->c.pc, &G(L)->bc_cfunc_int);
-}
-
-#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
-
-/* Library function declarations. Scanned by buildvm. */
-#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
-#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
-#define LJLIB_ASM_(name)
-#define LJLIB_SET(name)
-#define LJLIB_PUSH(arg)
-#define LJLIB_REC(handler)
-#define LJLIB_NOREGUV
-#define LJLIB_NOREG
-
-#define LJ_LIB_REG(L, regname, name) \
- lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name)
-
-LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
- const uint8_t *init, const lua_CFunction *cf);
-
-/* Library init data tags. */
-#define LIBINIT_LENMASK 0x3f
-#define LIBINIT_TAGMASK 0xc0
-#define LIBINIT_CF 0x00
-#define LIBINIT_ASM 0x40
-#define LIBINIT_ASM_ 0x80
-#define LIBINIT_STRING 0xc0
-#define LIBINIT_MAXSTR 0x39
-#define LIBINIT_SET 0xfa
-#define LIBINIT_NUMBER 0xfb
-#define LIBINIT_COPY 0xfc
-#define LIBINIT_LASTCL 0xfd
-#define LIBINIT_FFID 0xfe
-#define LIBINIT_END 0xff
-
-/* Exported library functions. */
-
-typedef struct RandomState RandomState;
-LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs);
-
-#endif
+/*
+** Library function support.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LIB_H
+#define _LJ_LIB_H
+
+#include "lj_obj.h"
+
+/*
+** A fallback handler is called by the assembler VM if the fast path fails:
+**
+** - too few arguments: unrecoverable.
+** - wrong argument type: recoverable, if coercion succeeds.
+** - bad argument value: unrecoverable.
+** - stack overflow: recoverable, if stack reallocation succeeds.
+** - extra handling: recoverable.
+**
+** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(),
+** lj_err_caller() or lj_err_callermsg().
+** The recoverable cases return 0 or the number of results + 1.
+** The assembler VM retries the fast path only if 0 is returned.
+** This time the fallback must not be called again or it gets stuck in a loop.
+*/
+
+/* Return values from fallback handler. */
+#define FFH_RETRY 0
+#define FFH_UNREACHABLE FFH_RETRY
+#define FFH_RES(n) ((n)+1)
+#define FFH_TAILCALL (-1)
+
+LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg);
+#if LJ_DUALNUM
+LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
+#else
+#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg))
+#endif
+LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
+LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg);
+LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
+LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
+
+/* Avoid including lj_frame.h. */
+#define lj_lib_upvalue(L, n) \
+ (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
+
+#if LJ_TARGET_WINDOWS
+#define lj_lib_checkfpu(L) \
+ do { setnumV(L->top++, (lua_Number)1437217655); \
+ if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \
+ L->top--; } while (0)
+#else
+#define lj_lib_checkfpu(L) UNUSED(L)
+#endif
+
+/* Push internal function on the stack. */
+static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
+ int id, int n)
+{
+ GCfunc *fn;
+ lua_pushcclosure(L, f, n);
+ fn = funcV(L->top-1);
+ fn->c.ffid = (uint8_t)id;
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+}
+
+#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
+
+/* Library function declarations. Scanned by buildvm. */
+#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
+#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
+#define LJLIB_ASM_(name)
+#define LJLIB_SET(name)
+#define LJLIB_PUSH(arg)
+#define LJLIB_REC(handler)
+#define LJLIB_NOREGUV
+#define LJLIB_NOREG
+
+#define LJ_LIB_REG(L, regname, name) \
+ lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name)
+
+LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *init, const lua_CFunction *cf);
+
+/* Library init data tags. */
+#define LIBINIT_LENMASK 0x3f
+#define LIBINIT_TAGMASK 0xc0
+#define LIBINIT_CF 0x00
+#define LIBINIT_ASM 0x40
+#define LIBINIT_ASM_ 0x80
+#define LIBINIT_STRING 0xc0
+#define LIBINIT_MAXSTR 0x39
+#define LIBINIT_SET 0xfa
+#define LIBINIT_NUMBER 0xfb
+#define LIBINIT_COPY 0xfc
+#define LIBINIT_LASTCL 0xfd
+#define LIBINIT_FFID 0xfe
+#define LIBINIT_END 0xff
+
+/* Exported library functions. */
+
+typedef struct RandomState RandomState;
+LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs);
+
+#endif
diff --git a/3rdparty/lua/src/lj_load.c b/3rdparty/lua/src/lj_load.c
index 5490a4f..9d89267 100644
--- a/3rdparty/lua/src/lj_load.c
+++ b/3rdparty/lua/src/lj_load.c
@@ -1,168 +1,168 @@
-/*
-** Load and dump code.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include <errno.h>
-#include <stdio.h>
-
-#define lj_load_c
-#define LUA_CORE
-
-#include "lua.h"
-#include "lauxlib.h"
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_func.h"
-#include "lj_frame.h"
-#include "lj_vm.h"
-#include "lj_lex.h"
-#include "lj_bcdump.h"
-#include "lj_parse.h"
-
-/* -- Load Lua source code and bytecode ----------------------------------- */
-
-static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud)
-{
- LexState *ls = (LexState *)ud;
- GCproto *pt;
- GCfunc *fn;
- int bc;
- UNUSED(dummy);
- cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
- bc = lj_lex_setup(L, ls);
- if (ls->mode && !strchr(ls->mode, bc ? 'b' : 't')) {
- setstrV(L, L->top++, lj_err_str(L, LJ_ERR_XMODE));
- lj_err_throw(L, LUA_ERRSYNTAX);
- }
- pt = bc ? lj_bcread(ls) : lj_parse(ls);
- fn = lj_func_newL_empty(L, pt, tabref(L->env));
- /* Don't combine above/below into one statement. */
- setfuncV(L, L->top++, fn);
- return NULL;
-}
-
-LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data,
- const char *chunkname, const char *mode)
-{
- LexState ls;
- int status;
- ls.rfunc = reader;
- ls.rdata = data;
- ls.chunkarg = chunkname ? chunkname : "?";
- ls.mode = mode;
- lj_str_initbuf(&ls.sb);
- status = lj_vm_cpcall(L, NULL, &ls, cpparser);
- lj_lex_cleanup(L, &ls);
- lj_gc_check(L);
- return status;
-}
-
-LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data,
- const char *chunkname)
-{
- return lua_loadx(L, reader, data, chunkname, NULL);
-}
-
-typedef struct FileReaderCtx {
- FILE *fp;
- char buf[LUAL_BUFFERSIZE];
-} FileReaderCtx;
-
-static const char *reader_file(lua_State *L, void *ud, size_t *size)
-{
- FileReaderCtx *ctx = (FileReaderCtx *)ud;
- UNUSED(L);
- if (feof(ctx->fp)) return NULL;
- *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp);
- return *size > 0 ? ctx->buf : NULL;
-}
-
-LUALIB_API int luaL_loadfilex(lua_State *L, const char *filename,
- const char *mode)
-{
- FileReaderCtx ctx;
- int status;
- const char *chunkname;
- if (filename) {
- ctx.fp = fopen(filename, "rb");
- if (ctx.fp == NULL) {
- lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno));
- return LUA_ERRFILE;
- }
- chunkname = lua_pushfstring(L, "@%s", filename);
- } else {
- ctx.fp = stdin;
- chunkname = "=stdin";
- }
- status = lua_loadx(L, reader_file, &ctx, chunkname, mode);
- if (ferror(ctx.fp)) {
- L->top -= filename ? 2 : 1;
- lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno));
- if (filename)
- fclose(ctx.fp);
- return LUA_ERRFILE;
- }
- if (filename) {
- L->top--;
- copyTV(L, L->top-1, L->top);
- fclose(ctx.fp);
- }
- return status;
-}
-
-LUALIB_API int luaL_loadfile(lua_State *L, const char *filename)
-{
- return luaL_loadfilex(L, filename, NULL);
-}
-
-typedef struct StringReaderCtx {
- const char *str;
- size_t size;
-} StringReaderCtx;
-
-static const char *reader_string(lua_State *L, void *ud, size_t *size)
-{
- StringReaderCtx *ctx = (StringReaderCtx *)ud;
- UNUSED(L);
- if (ctx->size == 0) return NULL;
- *size = ctx->size;
- ctx->size = 0;
- return ctx->str;
-}
-
-LUALIB_API int luaL_loadbufferx(lua_State *L, const char *buf, size_t size,
- const char *name, const char *mode)
-{
- StringReaderCtx ctx;
- ctx.str = buf;
- ctx.size = size;
- return lua_loadx(L, reader_string, &ctx, name, mode);
-}
-
-LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size,
- const char *name)
-{
- return luaL_loadbufferx(L, buf, size, name, NULL);
-}
-
-LUALIB_API int luaL_loadstring(lua_State *L, const char *s)
-{
- return luaL_loadbuffer(L, s, strlen(s), s);
-}
-
-/* -- Dump bytecode ------------------------------------------------------- */
-
-LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data)
-{
- cTValue *o = L->top-1;
- api_check(L, L->top > L->base);
- if (tvisfunc(o) && isluafunc(funcV(o)))
- return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0);
- else
- return 1;
-}
-
+/*
+** Load and dump code.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <errno.h>
+#include <stdio.h>
+
+#define lj_load_c
+#define LUA_CORE
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_func.h"
+#include "lj_frame.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_parse.h"
+
+/* -- Load Lua source code and bytecode ----------------------------------- */
+
+static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ LexState *ls = (LexState *)ud;
+ GCproto *pt;
+ GCfunc *fn;
+ int bc;
+ UNUSED(dummy);
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ bc = lj_lex_setup(L, ls);
+ if (ls->mode && !strchr(ls->mode, bc ? 'b' : 't')) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_XMODE));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ pt = bc ? lj_bcread(ls) : lj_parse(ls);
+ fn = lj_func_newL_empty(L, pt, tabref(L->env));
+ /* Don't combine above/below into one statement. */
+ setfuncV(L, L->top++, fn);
+ return NULL;
+}
+
+LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname, const char *mode)
+{
+ LexState ls;
+ int status;
+ ls.rfunc = reader;
+ ls.rdata = data;
+ ls.chunkarg = chunkname ? chunkname : "?";
+ ls.mode = mode;
+ lj_str_initbuf(&ls.sb);
+ status = lj_vm_cpcall(L, NULL, &ls, cpparser);
+ lj_lex_cleanup(L, &ls);
+ lj_gc_check(L);
+ return status;
+}
+
+LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname)
+{
+ return lua_loadx(L, reader, data, chunkname, NULL);
+}
+
+typedef struct FileReaderCtx {
+ FILE *fp;
+ char buf[LUAL_BUFFERSIZE];
+} FileReaderCtx;
+
+static const char *reader_file(lua_State *L, void *ud, size_t *size)
+{
+ FileReaderCtx *ctx = (FileReaderCtx *)ud;
+ UNUSED(L);
+ if (feof(ctx->fp)) return NULL;
+ *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp);
+ return *size > 0 ? ctx->buf : NULL;
+}
+
+LUALIB_API int luaL_loadfilex(lua_State *L, const char *filename,
+ const char *mode)
+{
+ FileReaderCtx ctx;
+ int status;
+ const char *chunkname;
+ if (filename) {
+ ctx.fp = fopen(filename, "rb");
+ if (ctx.fp == NULL) {
+ lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno));
+ return LUA_ERRFILE;
+ }
+ chunkname = lua_pushfstring(L, "@%s", filename);
+ } else {
+ ctx.fp = stdin;
+ chunkname = "=stdin";
+ }
+ status = lua_loadx(L, reader_file, &ctx, chunkname, mode);
+ if (ferror(ctx.fp)) {
+ L->top -= filename ? 2 : 1;
+ lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno));
+ if (filename)
+ fclose(ctx.fp);
+ return LUA_ERRFILE;
+ }
+ if (filename) {
+ L->top--;
+ copyTV(L, L->top-1, L->top);
+ fclose(ctx.fp);
+ }
+ return status;
+}
+
+LUALIB_API int luaL_loadfile(lua_State *L, const char *filename)
+{
+ return luaL_loadfilex(L, filename, NULL);
+}
+
+typedef struct StringReaderCtx {
+ const char *str;
+ size_t size;
+} StringReaderCtx;
+
+static const char *reader_string(lua_State *L, void *ud, size_t *size)
+{
+ StringReaderCtx *ctx = (StringReaderCtx *)ud;
+ UNUSED(L);
+ if (ctx->size == 0) return NULL;
+ *size = ctx->size;
+ ctx->size = 0;
+ return ctx->str;
+}
+
+LUALIB_API int luaL_loadbufferx(lua_State *L, const char *buf, size_t size,
+ const char *name, const char *mode)
+{
+ StringReaderCtx ctx;
+ ctx.str = buf;
+ ctx.size = size;
+ return lua_loadx(L, reader_string, &ctx, name, mode);
+}
+
+LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size,
+ const char *name)
+{
+ return luaL_loadbufferx(L, buf, size, name, NULL);
+}
+
+LUALIB_API int luaL_loadstring(lua_State *L, const char *s)
+{
+ return luaL_loadbuffer(L, s, strlen(s), s);
+}
+
+/* -- Dump bytecode ------------------------------------------------------- */
+
+LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data)
+{
+ cTValue *o = L->top-1;
+ api_check(L, L->top > L->base);
+ if (tvisfunc(o) && isluafunc(funcV(o)))
+ return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0);
+ else
+ return 1;
+}
+
diff --git a/3rdparty/lua/src/lj_mcode.c b/3rdparty/lua/src/lj_mcode.c
index bfd2aca..cb79e8c 100644
--- a/3rdparty/lua/src/lj_mcode.c
+++ b/3rdparty/lua/src/lj_mcode.c
@@ -1,386 +1,360 @@
-/*
-** Machine code management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_mcode_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#if LJ_HASJIT
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_jit.h"
-#include "lj_mcode.h"
-#include "lj_trace.h"
-#include "lj_dispatch.h"
-#endif
-#if LJ_HASJIT || LJ_HASFFI
-#include "lj_vm.h"
-#endif
-
-/* -- OS-specific functions ----------------------------------------------- */
-
-#if LJ_HASJIT || LJ_HASFFI
-
-/* Define this if you want to run LuaJIT with Valgrind. */
-#ifdef LUAJIT_USE_VALGRIND
-#include <valgrind/valgrind.h>
-#endif
-
-#if LJ_TARGET_IOS
-void sys_icache_invalidate(void *start, size_t len);
-#endif
-
-/* Synchronize data/instruction cache. */
-void lj_mcode_sync(void *start, void *end)
-{
-#ifdef LUAJIT_USE_VALGRIND
- VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
-#endif
-#if LJ_TARGET_X86ORX64
- UNUSED(start); UNUSED(end);
-#elif LJ_TARGET_IOS
- sys_icache_invalidate(start, (char *)end-(char *)start);
-#elif LJ_TARGET_PPC
- lj_vm_cachesync(start, end);
-#elif defined(__GNUC__)
- __clear_cache(start, end);
-#else
-#error "Missing builtin to flush instruction cache"
-#endif
-}
-
-#endif
-
-#if LJ_HASJIT
-
-#if LJ_TARGET_WINDOWS
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-#define MCPROT_RW PAGE_READWRITE
-#define MCPROT_RX PAGE_EXECUTE_READ
-#define MCPROT_RWX PAGE_EXECUTE_READWRITE
-
-static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
-{
- void *p = VirtualAlloc((void *)hint, sz,
- MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
- if (!p && !hint)
- lj_trace_err(J, LJ_TRERR_MCODEAL);
- return p;
-}
-
-static void mcode_free(jit_State *J, void *p, size_t sz)
-{
- UNUSED(J); UNUSED(sz);
- VirtualFree(p, 0, MEM_RELEASE);
-}
-
-static int mcode_setprot(void *p, size_t sz, DWORD prot)
-{
- DWORD oprot;
- return !VirtualProtect(p, sz, prot, &oprot);
-}
-
-#elif LJ_TARGET_POSIX
-
-#include <sys/mman.h>
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#define MCPROT_RW (PROT_READ|PROT_WRITE)
-#define MCPROT_RX (PROT_READ|PROT_EXEC)
-#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
-
-static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
-{
- void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED) {
- if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
- p = NULL;
- }
- return p;
-}
-
-static void mcode_free(jit_State *J, void *p, size_t sz)
-{
- UNUSED(J);
- munmap(p, sz);
-}
-
-static int mcode_setprot(void *p, size_t sz, int prot)
-{
- return mprotect(p, sz, prot);
-}
-
-#elif LJ_64
-
-#error "Missing OS support for explicit placement of executable memory"
-
-#else
-
-/* Fallback allocator. This will fail if memory is not executable by default. */
-#define LUAJIT_UNPROTECT_MCODE
-#define MCPROT_RW 0
-#define MCPROT_RX 0
-#define MCPROT_RWX 0
-
-static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
-{
- UNUSED(hint); UNUSED(prot);
- return lj_mem_new(J->L, sz);
-}
-
-static void mcode_free(jit_State *J, void *p, size_t sz)
-{
- lj_mem_free(J2G(J), p, sz);
-}
-
-#endif
-
-/* -- MCode area protection ----------------------------------------------- */
-
-/* Define this ONLY if page protection twiddling becomes a bottleneck. */
-#ifdef LUAJIT_UNPROTECT_MCODE
-
-/* It's generally considered to be a potential security risk to have
-** pages with simultaneous write *and* execute access in a process.
-**
-** Do not even think about using this mode for server processes or
-** apps handling untrusted external data (such as a browser).
-**
-** The security risk is not in LuaJIT itself -- but if an adversary finds
-** any *other* flaw in your C application logic, then any RWX memory page
-** simplifies writing an exploit considerably.
-*/
-#define MCPROT_GEN MCPROT_RWX
-#define MCPROT_RUN MCPROT_RWX
-
-static void mcode_protect(jit_State *J, int prot)
-{
- UNUSED(J); UNUSED(prot);
-}
-
-#else
-
-/* This is the default behaviour and much safer:
-**
-** Most of the time the memory pages holding machine code are executable,
-** but NONE of them is writable.
-**
-** The current memory area is marked read-write (but NOT executable) only
-** during the short time window while the assembler generates machine code.
-*/
-#define MCPROT_GEN MCPROT_RW
-#define MCPROT_RUN MCPROT_RX
-
-/* Protection twiddling failed. Probably due to kernel security. */
-static LJ_NOINLINE void mcode_protfail(jit_State *J)
-{
- lua_CFunction panic = J2G(J)->panic;
- if (panic) {
- lua_State *L = J->L;
- setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
- panic(L);
- }
-}
-
-/* Change protection of MCode area. */
-static void mcode_protect(jit_State *J, int prot)
-{
- if (J->mcprot != prot) {
- if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
- mcode_protfail(J);
- J->mcprot = prot;
- }
-}
-
-#endif
-
-/* -- MCode area allocation ----------------------------------------------- */
-
-#if LJ_TARGET_X64
-#define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
-#else
-#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
-#endif
-
-#ifdef LJ_TARGET_JUMPRANGE
-
-/* Get memory within relative jump distance of our code in 64 bit mode. */
-static void *mcode_alloc(jit_State *J, size_t sz)
-{
- /* Target an address in the static assembler code (64K aligned).
- ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
- ** Use half the jump range so every address in the range can reach any other.
- */
-#if LJ_TARGET_MIPS
- /* Use the middle of the 256MB-aligned region. */
- uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
- 0x08000000u;
-#else
- uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
-#endif
- const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
- /* First try a contiguous area below the last one. */
- uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
- int i;
- for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */
- if (mcode_validptr(hint)) {
- void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
-
- if (mcode_validptr(p) &&
- ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
- return p;
- if (p) mcode_free(J, p, sz); /* Free badly placed area. */
- }
- /* Next try probing pseudo-random addresses. */
- do {
- hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */
- } while (!(hint + sz < range));
- hint = target + hint - (range>>1);
- }
- lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */
- return NULL;
-}
-
-#else
-
-/* All memory addresses are reachable by relative jumps. */
-static void *mcode_alloc(jit_State *J, size_t sz)
-{
-#ifdef __OpenBSD__
- /* Allow better executable memory allocation for OpenBSD W^X mode. */
- void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
- if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
- mcode_free(J, p, sz);
- return NULL;
- }
- return p;
-#else
- return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
-#endif
-}
-
-#endif
-
-/* -- MCode area management ----------------------------------------------- */
-
-/* Linked list of MCode areas. */
-typedef struct MCLink {
- MCode *next; /* Next area. */
- size_t size; /* Size of current area. */
-} MCLink;
-
-/* Allocate a new MCode area. */
-static void mcode_allocarea(jit_State *J)
-{
- MCode *oldarea = J->mcarea;
- size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
- sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
- J->mcarea = (MCode *)mcode_alloc(J, sz);
- J->szmcarea = sz;
- J->mcprot = MCPROT_GEN;
- J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
- J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
- ((MCLink *)J->mcarea)->next = oldarea;
- ((MCLink *)J->mcarea)->size = sz;
- J->szallmcarea += sz;
-}
-
-/* Free all MCode areas. */
-void lj_mcode_free(jit_State *J)
-{
- MCode *mc = J->mcarea;
- J->mcarea = NULL;
- J->szallmcarea = 0;
- while (mc) {
- MCode *next = ((MCLink *)mc)->next;
- mcode_free(J, mc, ((MCLink *)mc)->size);
- mc = next;
- }
-}
-
-/* -- MCode transactions -------------------------------------------------- */
-
-/* Reserve the remainder of the current MCode area. */
-MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
-{
- if (!J->mcarea)
- mcode_allocarea(J);
- else
- mcode_protect(J, MCPROT_GEN);
- *lim = J->mcbot;
- return J->mctop;
-}
-
-/* Commit the top part of the current MCode area. */
-void lj_mcode_commit(jit_State *J, MCode *top)
-{
- J->mctop = top;
- mcode_protect(J, MCPROT_RUN);
-}
-
-/* Abort the reservation. */
-void lj_mcode_abort(jit_State *J)
-{
- if (J->mcarea)
- mcode_protect(J, MCPROT_RUN);
-}
-
-/* Set/reset protection to allow patching of MCode areas. */
-MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
-{
-#ifdef LUAJIT_UNPROTECT_MCODE
- UNUSED(J); UNUSED(ptr); UNUSED(finish);
- return NULL;
-#else
- if (finish) {
- if (J->mcarea == ptr)
- mcode_protect(J, MCPROT_RUN);
- else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
- mcode_protfail(J);
- return NULL;
- } else {
- MCode *mc = J->mcarea;
- /* Try current area first to use the protection cache. */
- if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
- mcode_protect(J, MCPROT_GEN);
- return mc;
- }
- /* Otherwise search through the list of MCode areas. */
- for (;;) {
- mc = ((MCLink *)mc)->next;
- lua_assert(mc != NULL);
- if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
- if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
- mcode_protfail(J);
- return mc;
- }
- }
- }
-#endif
-}
-
-/* Limit of MCode reservation reached. */
-void lj_mcode_limiterr(jit_State *J, size_t need)
-{
- size_t sizemcode, maxmcode;
- lj_mcode_abort(J);
- sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
- sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
- maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
- if ((size_t)need > sizemcode)
- lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */
- if (J->szallmcarea + sizemcode > maxmcode)
- lj_trace_err(J, LJ_TRERR_MCODEAL);
- mcode_allocarea(J);
- lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */
-}
-
-#endif
+/*
+** Machine code management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_mcode_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#if LJ_HASJIT
+#include "lj_gc.h"
+#include "lj_jit.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#endif
+#if LJ_HASJIT || LJ_HASFFI
+#include "lj_vm.h"
+#endif
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_HASJIT || LJ_HASFFI
+
+/* Define this if you want to run LuaJIT with Valgrind. */
+#ifdef LUAJIT_USE_VALGRIND
+#include <valgrind/valgrind.h>
+#endif
+
+#if LJ_TARGET_IOS
+void sys_icache_invalidate(void *start, size_t len);
+#endif
+
+/* Synchronize data/instruction cache. */
+void lj_mcode_sync(void *start, void *end)
+{
+#ifdef LUAJIT_USE_VALGRIND
+ VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
+#endif
+#if LJ_TARGET_X86ORX64
+ UNUSED(start); UNUSED(end);
+#elif LJ_TARGET_IOS
+ sys_icache_invalidate(start, (char *)end-(char *)start);
+#elif LJ_TARGET_PPC
+ lj_vm_cachesync(start, end);
+#elif defined(__GNUC__)
+ __clear_cache(start, end);
+#else
+#error "Missing builtin to flush instruction cache"
+#endif
+}
+
+#endif
+
+#if LJ_HASJIT
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#define MCPROT_RW PAGE_READWRITE
+#define MCPROT_RX PAGE_EXECUTE_READ
+#define MCPROT_RWX PAGE_EXECUTE_READWRITE
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
+{
+ void *p = VirtualAlloc((void *)hint, sz,
+ MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
+ if (!p && !hint)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ UNUSED(J); UNUSED(sz);
+ VirtualFree(p, 0, MEM_RELEASE);
+}
+
+static void mcode_setprot(void *p, size_t sz, DWORD prot)
+{
+ DWORD oprot;
+ VirtualProtect(p, sz, prot, &oprot);
+}
+
+#elif LJ_TARGET_POSIX
+
+#include <sys/mman.h>
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#define MCPROT_RW (PROT_READ|PROT_WRITE)
+#define MCPROT_RX (PROT_READ|PROT_EXEC)
+#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+ void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED) {
+ if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
+ p = NULL;
+ }
+ return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ UNUSED(J);
+ munmap(p, sz);
+}
+
+static void mcode_setprot(void *p, size_t sz, int prot)
+{
+ mprotect(p, sz, prot);
+}
+
+#elif LJ_64
+
+#error "Missing OS support for explicit placement of executable memory"
+
+#else
+
+/* Fallback allocator. This will fail if memory is not executable by default. */
+#define LUAJIT_UNPROTECT_MCODE
+#define MCPROT_RW 0
+#define MCPROT_RX 0
+#define MCPROT_RWX 0
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+ UNUSED(hint); UNUSED(prot);
+ return lj_mem_new(J->L, sz);
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ lj_mem_free(J2G(J), p, sz);
+}
+
+#define mcode_setprot(p, sz, prot) UNUSED(p)
+
+#endif
+
+/* -- MCode area protection ----------------------------------------------- */
+
+/* Define this ONLY if the page protection twiddling becomes a bottleneck. */
+#ifdef LUAJIT_UNPROTECT_MCODE
+
+/* It's generally considered to be a potential security risk to have
+** pages with simultaneous write *and* execute access in a process.
+**
+** Do not even think about using this mode for server processes or
+** apps handling untrusted external data (such as a browser).
+**
+** The security risk is not in LuaJIT itself -- but if an adversary finds
+** any *other* flaw in your C application logic, then any RWX memory page
+** simplifies writing an exploit considerably.
+*/
+#define MCPROT_GEN MCPROT_RWX
+#define MCPROT_RUN MCPROT_RWX
+
+static void mcode_protect(jit_State *J, int prot)
+{
+ UNUSED(J); UNUSED(prot);
+}
+
+#else
+
+/* This is the default behaviour and much safer:
+**
+** Most of the time the memory pages holding machine code are executable,
+** but NONE of them is writable.
+**
+** The current memory area is marked read-write (but NOT executable) only
+** during the short time window while the assembler generates machine code.
+*/
+#define MCPROT_GEN MCPROT_RW
+#define MCPROT_RUN MCPROT_RX
+
+/* Change protection of MCode area. */
+static void mcode_protect(jit_State *J, int prot)
+{
+ if (J->mcprot != prot) {
+ mcode_setprot(J->mcarea, J->szmcarea, prot);
+ J->mcprot = prot;
+ }
+}
+
+#endif
+
+/* -- MCode area allocation ----------------------------------------------- */
+
+#if LJ_TARGET_X64
+#define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
+#else
+#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
+#endif
+
+#ifdef LJ_TARGET_JUMPRANGE
+
+/* Get memory within relative jump distance of our code in 64 bit mode. */
+static void *mcode_alloc(jit_State *J, size_t sz)
+{
+ /* Target an address in the static assembler code (64K aligned).
+ ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
+ ** Use half the jump range so every address in the range can reach any other.
+ */
+#if LJ_TARGET_MIPS
+ /* Use the middle of the 256MB-aligned region. */
+ uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
+ 0x08000000u;
+#else
+ uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
+#endif
+ const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
+ /* First try a contiguous area below the last one. */
+ uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
+ int i;
+ for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */
+ if (mcode_validptr(hint)) {
+ void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
+
+ if (mcode_validptr(p) &&
+ ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
+ return p;
+ if (p) mcode_free(J, p, sz); /* Free badly placed area. */
+ }
+ /* Next try probing pseudo-random addresses. */
+ do {
+ hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */
+ } while (!(hint + sz < range));
+ hint = target + hint - (range>>1);
+ }
+ lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */
+ return NULL;
+}
+
+#else
+
+/* All memory addresses are reachable by relative jumps. */
+#define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN)
+
+#endif
+
+/* -- MCode area management ----------------------------------------------- */
+
+/* Linked list of MCode areas. */
+typedef struct MCLink {
+ MCode *next; /* Next area. */
+ size_t size; /* Size of current area. */
+} MCLink;
+
+/* Allocate a new MCode area. */
+static void mcode_allocarea(jit_State *J)
+{
+ MCode *oldarea = J->mcarea;
+ size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
+ sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+ J->mcarea = (MCode *)mcode_alloc(J, sz);
+ J->szmcarea = sz;
+ J->mcprot = MCPROT_GEN;
+ J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
+ J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
+ ((MCLink *)J->mcarea)->next = oldarea;
+ ((MCLink *)J->mcarea)->size = sz;
+ J->szallmcarea += sz;
+}
+
+/* Free all MCode areas. */
+void lj_mcode_free(jit_State *J)
+{
+ MCode *mc = J->mcarea;
+ J->mcarea = NULL;
+ J->szallmcarea = 0;
+ while (mc) {
+ MCode *next = ((MCLink *)mc)->next;
+ mcode_free(J, mc, ((MCLink *)mc)->size);
+ mc = next;
+ }
+}
+
+/* -- MCode transactions -------------------------------------------------- */
+
+/* Reserve the remainder of the current MCode area. */
+MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
+{
+ if (!J->mcarea)
+ mcode_allocarea(J);
+ else
+ mcode_protect(J, MCPROT_GEN);
+ *lim = J->mcbot;
+ return J->mctop;
+}
+
+/* Commit the top part of the current MCode area. */
+void lj_mcode_commit(jit_State *J, MCode *top)
+{
+ J->mctop = top;
+ mcode_protect(J, MCPROT_RUN);
+}
+
+/* Abort the reservation. */
+void lj_mcode_abort(jit_State *J)
+{
+ mcode_protect(J, MCPROT_RUN);
+}
+
+/* Set/reset protection to allow patching of MCode areas. */
+MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
+{
+#ifdef LUAJIT_UNPROTECT_MCODE
+ UNUSED(J); UNUSED(ptr); UNUSED(finish);
+ return NULL;
+#else
+ if (finish) {
+ if (J->mcarea == ptr)
+ mcode_protect(J, MCPROT_RUN);
+ else
+ mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN);
+ return NULL;
+ } else {
+ MCode *mc = J->mcarea;
+ /* Try current area first to use the protection cache. */
+ if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
+ mcode_protect(J, MCPROT_GEN);
+ return mc;
+ }
+ /* Otherwise search through the list of MCode areas. */
+ for (;;) {
+ mc = ((MCLink *)mc)->next;
+ lua_assert(mc != NULL);
+ if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
+ mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN);
+ return mc;
+ }
+ }
+ }
+#endif
+}
+
+/* Limit of MCode reservation reached. */
+void lj_mcode_limiterr(jit_State *J, size_t need)
+{
+ size_t sizemcode, maxmcode;
+ lj_mcode_abort(J);
+ sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
+ sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+ maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
+ if ((size_t)need > sizemcode)
+ lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */
+ if (J->szallmcarea + sizemcode > maxmcode)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ mcode_allocarea(J);
+ lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_mcode.h b/3rdparty/lua/src/lj_mcode.h
index 3373cbc..2ba371b 100644
--- a/3rdparty/lua/src/lj_mcode.h
+++ b/3rdparty/lua/src/lj_mcode.h
@@ -1,30 +1,30 @@
-/*
-** Machine code management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_MCODE_H
-#define _LJ_MCODE_H
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT || LJ_HASFFI
-LJ_FUNC void lj_mcode_sync(void *start, void *end);
-#endif
-
-#if LJ_HASJIT
-
-#include "lj_jit.h"
-
-LJ_FUNC void lj_mcode_free(jit_State *J);
-LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim);
-LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m);
-LJ_FUNC void lj_mcode_abort(jit_State *J);
-LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish);
-LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need);
-
-#define lj_mcode_commitbot(J, m) (J->mcbot = (m))
-
-#endif
-
-#endif
+/*
+** Machine code management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_MCODE_H
+#define _LJ_MCODE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT || LJ_HASFFI
+LJ_FUNC void lj_mcode_sync(void *start, void *end);
+#endif
+
+#if LJ_HASJIT
+
+#include "lj_jit.h"
+
+LJ_FUNC void lj_mcode_free(jit_State *J);
+LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim);
+LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m);
+LJ_FUNC void lj_mcode_abort(jit_State *J);
+LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish);
+LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need);
+
+#define lj_mcode_commitbot(J, m) (J->mcbot = (m))
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_meta.c b/3rdparty/lua/src/lj_meta.c
index 7f0c40d..441d571 100644
--- a/3rdparty/lua/src/lj_meta.c
+++ b/3rdparty/lua/src/lj_meta.c
@@ -1,466 +1,466 @@
-/*
-** Metamethod handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_meta_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_frame.h"
-#include "lj_bc.h"
-#include "lj_vm.h"
-#include "lj_strscan.h"
-
-/* -- Metamethod handling ------------------------------------------------- */
-
-/* String interning of metamethod names for fast indexing. */
-void lj_meta_init(lua_State *L)
-{
-#define MMNAME(name) "__" #name
- const char *metanames = MMDEF(MMNAME);
-#undef MMNAME
- global_State *g = G(L);
- const char *p, *q;
- uint32_t mm;
- for (mm = 0, p = metanames; *p; mm++, p = q) {
- GCstr *s;
- for (q = p+2; *q && *q != '_'; q++) ;
- s = lj_str_new(L, p, (size_t)(q-p));
- /* NOBARRIER: g->gcroot[] is a GC root. */
- setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s));
- }
-}
-
-/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */
-cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name)
-{
- cTValue *mo = lj_tab_getstr(mt, name);
- lua_assert(mm <= MM_FAST);
- if (!mo || tvisnil(mo)) { /* No metamethod? */
- mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */
- return NULL;
- }
- return mo;
-}
-
-/* Lookup metamethod for object. */
-cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm)
-{
- GCtab *mt;
- if (tvistab(o))
- mt = tabref(tabV(o)->metatable);
- else if (tvisudata(o))
- mt = tabref(udataV(o)->metatable);
- else
- mt = tabref(basemt_obj(G(L), o));
- if (mt) {
- cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm));
- if (mo)
- return mo;
- }
- return niltv(L);
-}
-
-#if LJ_HASFFI
-/* Tailcall from C function. */
-int lj_meta_tailcall(lua_State *L, cTValue *tv)
-{
- TValue *base = L->base;
- TValue *top = L->top;
- const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */
- copyTV(L, base-1, tv); /* Replace frame with new object. */
- top->u32.lo = LJ_CONT_TAILCALL;
- setframe_pc(top, pc);
- setframe_gc(top+1, obj2gco(L)); /* Dummy frame object. */
- setframe_ftsz(top+1, (int)((char *)(top+2) - (char *)base) + FRAME_CONT);
- L->base = L->top = top+2;
- /*
- ** before: [old_mo|PC] [... ...]
- ** ^base ^top
- ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta]
- ** ^base/top
- ** tailcall: [new_mo|PC] [... ...]
- ** ^base ^top
- */
- return 0;
-}
-#endif
-
-/* Setup call to metamethod to be run by Assembler VM. */
-static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo,
- cTValue *a, cTValue *b)
-{
- /*
- ** |-- framesize -> top top+1 top+2 top+3
- ** before: [func slots ...]
- ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b]
- ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b]
- ** ^-- func base ^-- mm base
- ** after mm: [func slots ...] [result]
- ** ^-- copy to base[PC_RA] --/ for lj_cont_ra
- ** istruecond + branch for lj_cont_cond*
- ** ignore for lj_cont_nop
- ** next PC: [func slots ...]
- */
- TValue *top = L->top;
- if (curr_funcisL(L)) top = curr_topL(L);
- setcont(top, cont); /* Assembler VM stores PC in upper word. */
- copyTV(L, top+1, mo); /* Store metamethod and two arguments. */
- copyTV(L, top+2, a);
- copyTV(L, top+3, b);
- return top+2; /* Return new base. */
-}
-
-/* -- C helpers for some instructions, called from assembler VM ----------- */
-
-/* Helper for TGET*. __index chain and metamethod. */
-cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k)
-{
- int loop;
- for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
- cTValue *mo;
- if (LJ_LIKELY(tvistab(o))) {
- GCtab *t = tabV(o);
- cTValue *tv = lj_tab_get(L, t, k);
- if (!tvisnil(tv) ||
- !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index)))
- return tv;
- } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) {
- lj_err_optype(L, o, LJ_ERR_OPINDEX);
- return NULL; /* unreachable */
- }
- if (tvisfunc(mo)) {
- L->top = mmcall(L, lj_cont_ra, mo, o, k);
- return NULL; /* Trigger metamethod call. */
- }
- o = mo;
- }
- lj_err_msg(L, LJ_ERR_GETLOOP);
- return NULL; /* unreachable */
-}
-
-/* Helper for TSET*. __newindex chain and metamethod. */
-TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k)
-{
- TValue tmp;
- int loop;
- for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
- cTValue *mo;
- if (LJ_LIKELY(tvistab(o))) {
- GCtab *t = tabV(o);
- cTValue *tv = lj_tab_get(L, t, k);
- if (LJ_LIKELY(!tvisnil(tv))) {
- t->nomm = 0; /* Invalidate negative metamethod cache. */
- lj_gc_anybarriert(L, t);
- return (TValue *)tv;
- } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) {
- t->nomm = 0; /* Invalidate negative metamethod cache. */
- lj_gc_anybarriert(L, t);
- if (tv != niltv(L))
- return (TValue *)tv;
- if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX);
- else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; }
- else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX);
- return lj_tab_newkey(L, t, k);
- }
- } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) {
- lj_err_optype(L, o, LJ_ERR_OPINDEX);
- return NULL; /* unreachable */
- }
- if (tvisfunc(mo)) {
- L->top = mmcall(L, lj_cont_nop, mo, o, k);
- /* L->top+2 = v filled in by caller. */
- return NULL; /* Trigger metamethod call. */
- }
- copyTV(L, &tmp, mo);
- o = &tmp;
- }
- lj_err_msg(L, LJ_ERR_SETLOOP);
- return NULL; /* unreachable */
-}
-
-static cTValue *str2num(cTValue *o, TValue *n)
-{
- if (tvisnum(o))
- return o;
- else if (tvisint(o))
- return (setnumV(n, (lua_Number)intV(o)), n);
- else if (tvisstr(o) && lj_strscan_num(strV(o), n))
- return n;
- else
- return NULL;
-}
-
-/* Helper for arithmetic instructions. Coercion, metamethod. */
-TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc,
- BCReg op)
-{
- MMS mm = bcmode_mm(op);
- TValue tempb, tempc;
- cTValue *b, *c;
- if ((b = str2num(rb, &tempb)) != NULL &&
- (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */
- setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add));
- return NULL;
- } else {
- cTValue *mo = lj_meta_lookup(L, rb, mm);
- if (tvisnil(mo)) {
- mo = lj_meta_lookup(L, rc, mm);
- if (tvisnil(mo)) {
- if (str2num(rb, &tempb) == NULL) rc = rb;
- lj_err_optype(L, rc, LJ_ERR_OPARITH);
- return NULL; /* unreachable */
- }
- }
- return mmcall(L, lj_cont_ra, mo, rb, rc);
- }
-}
-
-/* In-place coercion of a number to a string. */
-static LJ_AINLINE int tostring(lua_State *L, TValue *o)
-{
- if (tvisstr(o)) {
- return 1;
- } else if (tvisnumber(o)) {
- setstrV(L, o, lj_str_fromnumber(L, o));
- return 1;
- } else {
- return 0;
- }
-}
-
-/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */
-TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
-{
- int fromc = 0;
- if (left < 0) { left = -left; fromc = 1; }
- do {
- int n = 1;
- if (!(tvisstr(top-1) || tvisnumber(top-1)) || !tostring(L, top)) {
- cTValue *mo = lj_meta_lookup(L, top-1, MM_concat);
- if (tvisnil(mo)) {
- mo = lj_meta_lookup(L, top, MM_concat);
- if (tvisnil(mo)) {
- if (tvisstr(top-1) || tvisnumber(top-1)) top++;
- lj_err_optype(L, top-1, LJ_ERR_OPCAT);
- return NULL; /* unreachable */
- }
- }
- /* One of the top two elements is not a string, call __cat metamethod:
- **
- ** before: [...][CAT stack .........................]
- ** top-1 top top+1 top+2
- ** pick two: [...][CAT stack ...] [o1] [o2]
- ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2]
- ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2]
- ** ^-- func base ^-- mm base
- ** after mm: [...][CAT stack ...] <--push-- [result]
- ** next step: [...][CAT stack .............]
- */
- copyTV(L, top+2, top); /* Careful with the order of stack copies! */
- copyTV(L, top+1, top-1);
- copyTV(L, top, mo);
- setcont(top-1, lj_cont_cat);
- return top+1; /* Trigger metamethod call. */
- } else if (strV(top)->len == 0) { /* Shortcut. */
- (void)tostring(L, top-1);
- } else {
- /* Pick as many strings as possible from the top and concatenate them:
- **
- ** before: [...][CAT stack ...........................]
- ** pick str: [...][CAT stack ...] [...... strings ......]
- ** concat: [...][CAT stack ...] [result]
- ** next step: [...][CAT stack ............]
- */
- MSize tlen = strV(top)->len;
- char *buffer;
- int i;
- for (n = 1; n <= left && tostring(L, top-n); n++) {
- MSize len = strV(top-n)->len;
- if (len >= LJ_MAX_STR - tlen)
- lj_err_msg(L, LJ_ERR_STROV);
- tlen += len;
- }
- buffer = lj_str_needbuf(L, &G(L)->tmpbuf, tlen);
- n--;
- tlen = 0;
- for (i = n; i >= 0; i--) {
- MSize len = strV(top-i)->len;
- memcpy(buffer + tlen, strVdata(top-i), len);
- tlen += len;
- }
- setstrV(L, top-n, lj_str_new(L, buffer, tlen));
- }
- left -= n;
- top -= n;
- } while (left >= 1);
- if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) {
- if (!fromc) L->top = curr_topL(L);
- lj_gc_step(L);
- }
- return NULL;
-}
-
-/* Helper for LEN. __len metamethod. */
-TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o)
-{
- cTValue *mo = lj_meta_lookup(L, o, MM_len);
- if (tvisnil(mo)) {
- if (LJ_52 && tvistab(o))
- tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<<MM_len);
- else
- lj_err_optype(L, o, LJ_ERR_OPLEN);
- return NULL;
- }
- return mmcall(L, lj_cont_ra, mo, o, LJ_52 ? o : niltv(L));
-}
-
-/* Helper for equality comparisons. __eq metamethod. */
-TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne)
-{
- /* Field metatable must be at same offset for GCtab and GCudata! */
- cTValue *mo = lj_meta_fast(L, tabref(o1->gch.metatable), MM_eq);
- if (mo) {
- TValue *top;
- uint32_t it;
- if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) {
- cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq);
- if (mo2 == NULL || !lj_obj_equal(mo, mo2))
- return (TValue *)(intptr_t)ne;
- }
- top = curr_top(L);
- setcont(top, ne ? lj_cont_condf : lj_cont_condt);
- copyTV(L, top+1, mo);
- it = ~(uint32_t)o1->gch.gct;
- setgcV(L, top+2, o1, it);
- setgcV(L, top+3, o2, it);
- return top+2; /* Trigger metamethod call. */
- }
- return (TValue *)(intptr_t)ne;
-}
-
-#if LJ_HASFFI
-TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins)
-{
- ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt;
- int op = (int)bc_op(ins) & ~1;
- TValue tv;
- cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)];
- cTValue *o1mm = o1;
- if (op == BC_ISEQV) {
- o2 = &L->base[bc_d(ins)];
- if (!tviscdata(o1mm)) o1mm = o2;
- } else if (op == BC_ISEQS) {
- setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins))));
- o2 = &tv;
- } else if (op == BC_ISEQN) {
- o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)];
- } else {
- lua_assert(op == BC_ISEQP);
- setitype(&tv, ~bc_d(ins));
- o2 = &tv;
- }
- mo = lj_meta_lookup(L, o1mm, MM_eq);
- if (LJ_LIKELY(!tvisnil(mo)))
- return mmcall(L, cont, mo, o1, o2);
- else
- return (TValue *)(intptr_t)(bc_op(ins) & 1);
-}
-#endif
-
-/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */
-TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op)
-{
- if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) {
- ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
- MMS mm = (op & 2) ? MM_le : MM_lt;
- cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm);
- if (LJ_UNLIKELY(tvisnil(mo))) goto err;
- return mmcall(L, cont, mo, o1, o2);
- } else if (LJ_52 || itype(o1) == itype(o2)) {
- /* Never called with two numbers. */
- if (tvisstr(o1) && tvisstr(o2)) {
- int32_t res = lj_str_cmp(strV(o1), strV(o2));
- return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1));
- } else {
- trymt:
- while (1) {
- ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
- MMS mm = (op & 2) ? MM_le : MM_lt;
- cTValue *mo = lj_meta_lookup(L, o1, mm);
-#if LJ_52
- if (tvisnil(mo) && tvisnil((mo = lj_meta_lookup(L, o2, mm))))
-#else
- cTValue *mo2 = lj_meta_lookup(L, o2, mm);
- if (tvisnil(mo) || !lj_obj_equal(mo, mo2))
-#endif
- {
- if (op & 2) { /* MM_le not found: retry with MM_lt. */
- cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */
- op ^= 3; /* Use LT and flip condition. */
- continue;
- }
- goto err;
- }
- return mmcall(L, cont, mo, o1, o2);
- }
- }
- } else if (tvisbool(o1) && tvisbool(o2)) {
- goto trymt;
- } else {
- err:
- lj_err_comp(L, o1, o2);
- return NULL;
- }
-}
-
-/* Helper for calls. __call metamethod. */
-void lj_meta_call(lua_State *L, TValue *func, TValue *top)
-{
- cTValue *mo = lj_meta_lookup(L, func, MM_call);
- TValue *p;
- if (!tvisfunc(mo))
- lj_err_optype_call(L, func);
- for (p = top; p > func; p--) copyTV(L, p, p-1);
- copyTV(L, func, mo);
-}
-
-/* Helper for FORI. Coercion. */
-void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o)
-{
- if (!lj_strscan_numberobj(o)) lj_err_msg(L, LJ_ERR_FORINIT);
- if (!lj_strscan_numberobj(o+1)) lj_err_msg(L, LJ_ERR_FORLIM);
- if (!lj_strscan_numberobj(o+2)) lj_err_msg(L, LJ_ERR_FORSTEP);
- if (LJ_DUALNUM) {
- /* Ensure all slots are integers or all slots are numbers. */
- int32_t k[3];
- int nint = 0;
- ptrdiff_t i;
- for (i = 0; i <= 2; i++) {
- if (tvisint(o+i)) {
- k[i] = intV(o+i); nint++;
- } else {
- k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i));
- }
- }
- if (nint == 3) { /* Narrow to integers. */
- setintV(o, k[0]);
- setintV(o+1, k[1]);
- setintV(o+2, k[2]);
- } else if (nint != 0) { /* Widen to numbers. */
- if (tvisint(o)) setnumV(o, (lua_Number)intV(o));
- if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1));
- if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2));
- }
- }
-}
-
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_meta_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* String interning of metamethod names for fast indexing. */
+void lj_meta_init(lua_State *L)
+{
+#define MMNAME(name) "__" #name
+ const char *metanames = MMDEF(MMNAME);
+#undef MMNAME
+ global_State *g = G(L);
+ const char *p, *q;
+ uint32_t mm;
+ for (mm = 0, p = metanames; *p; mm++, p = q) {
+ GCstr *s;
+ for (q = p+2; *q && *q != '_'; q++) ;
+ s = lj_str_new(L, p, (size_t)(q-p));
+ /* NOBARRIER: g->gcroot[] is a GC root. */
+ setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s));
+ }
+}
+
+/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */
+cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name)
+{
+ cTValue *mo = lj_tab_getstr(mt, name);
+ lua_assert(mm <= MM_FAST);
+ if (!mo || tvisnil(mo)) { /* No metamethod? */
+ mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */
+ return NULL;
+ }
+ return mo;
+}
+
+/* Lookup metamethod for object. */
+cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm)
+{
+ GCtab *mt;
+ if (tvistab(o))
+ mt = tabref(tabV(o)->metatable);
+ else if (tvisudata(o))
+ mt = tabref(udataV(o)->metatable);
+ else
+ mt = tabref(basemt_obj(G(L), o));
+ if (mt) {
+ cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm));
+ if (mo)
+ return mo;
+ }
+ return niltv(L);
+}
+
+#if LJ_HASFFI
+/* Tailcall from C function. */
+int lj_meta_tailcall(lua_State *L, cTValue *tv)
+{
+ TValue *base = L->base;
+ TValue *top = L->top;
+ const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */
+ copyTV(L, base-1, tv); /* Replace frame with new object. */
+ top->u32.lo = LJ_CONT_TAILCALL;
+ setframe_pc(top, pc);
+ setframe_gc(top+1, obj2gco(L)); /* Dummy frame object. */
+ setframe_ftsz(top+1, (int)((char *)(top+2) - (char *)base) + FRAME_CONT);
+ L->base = L->top = top+2;
+ /*
+ ** before: [old_mo|PC] [... ...]
+ ** ^base ^top
+ ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta]
+ ** ^base/top
+ ** tailcall: [new_mo|PC] [... ...]
+ ** ^base ^top
+ */
+ return 0;
+}
+#endif
+
+/* Setup call to metamethod to be run by Assembler VM. */
+static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo,
+ cTValue *a, cTValue *b)
+{
+ /*
+ ** |-- framesize -> top top+1 top+2 top+3
+ ** before: [func slots ...]
+ ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b]
+ ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b]
+ ** ^-- func base ^-- mm base
+ ** after mm: [func slots ...] [result]
+ ** ^-- copy to base[PC_RA] --/ for lj_cont_ra
+ ** istruecond + branch for lj_cont_cond*
+ ** ignore for lj_cont_nop
+ ** next PC: [func slots ...]
+ */
+ TValue *top = L->top;
+ if (curr_funcisL(L)) top = curr_topL(L);
+ setcont(top, cont); /* Assembler VM stores PC in upper word. */
+ copyTV(L, top+1, mo); /* Store metamethod and two arguments. */
+ copyTV(L, top+2, a);
+ copyTV(L, top+3, b);
+ return top+2; /* Return new base. */
+}
+
+/* -- C helpers for some instructions, called from assembler VM ----------- */
+
+/* Helper for TGET*. __index chain and metamethod. */
+cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k)
+{
+ int loop;
+ for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+ cTValue *mo;
+ if (LJ_LIKELY(tvistab(o))) {
+ GCtab *t = tabV(o);
+ cTValue *tv = lj_tab_get(L, t, k);
+ if (!tvisnil(tv) ||
+ !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index)))
+ return tv;
+ } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) {
+ lj_err_optype(L, o, LJ_ERR_OPINDEX);
+ return NULL; /* unreachable */
+ }
+ if (tvisfunc(mo)) {
+ L->top = mmcall(L, lj_cont_ra, mo, o, k);
+ return NULL; /* Trigger metamethod call. */
+ }
+ o = mo;
+ }
+ lj_err_msg(L, LJ_ERR_GETLOOP);
+ return NULL; /* unreachable */
+}
+
+/* Helper for TSET*. __newindex chain and metamethod. */
+TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k)
+{
+ TValue tmp;
+ int loop;
+ for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+ cTValue *mo;
+ if (LJ_LIKELY(tvistab(o))) {
+ GCtab *t = tabV(o);
+ cTValue *tv = lj_tab_get(L, t, k);
+ if (LJ_LIKELY(!tvisnil(tv))) {
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ lj_gc_anybarriert(L, t);
+ return (TValue *)tv;
+ } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) {
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ lj_gc_anybarriert(L, t);
+ if (tv != niltv(L))
+ return (TValue *)tv;
+ if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX);
+ else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; }
+ else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX);
+ return lj_tab_newkey(L, t, k);
+ }
+ } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) {
+ lj_err_optype(L, o, LJ_ERR_OPINDEX);
+ return NULL; /* unreachable */
+ }
+ if (tvisfunc(mo)) {
+ L->top = mmcall(L, lj_cont_nop, mo, o, k);
+ /* L->top+2 = v filled in by caller. */
+ return NULL; /* Trigger metamethod call. */
+ }
+ copyTV(L, &tmp, mo);
+ o = &tmp;
+ }
+ lj_err_msg(L, LJ_ERR_SETLOOP);
+ return NULL; /* unreachable */
+}
+
+static cTValue *str2num(cTValue *o, TValue *n)
+{
+ if (tvisnum(o))
+ return o;
+ else if (tvisint(o))
+ return (setnumV(n, (lua_Number)intV(o)), n);
+ else if (tvisstr(o) && lj_strscan_num(strV(o), n))
+ return n;
+ else
+ return NULL;
+}
+
+/* Helper for arithmetic instructions. Coercion, metamethod. */
+TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc,
+ BCReg op)
+{
+ MMS mm = bcmode_mm(op);
+ TValue tempb, tempc;
+ cTValue *b, *c;
+ if ((b = str2num(rb, &tempb)) != NULL &&
+ (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */
+ setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add));
+ return NULL;
+ } else {
+ cTValue *mo = lj_meta_lookup(L, rb, mm);
+ if (tvisnil(mo)) {
+ mo = lj_meta_lookup(L, rc, mm);
+ if (tvisnil(mo)) {
+ if (str2num(rb, &tempb) == NULL) rc = rb;
+ lj_err_optype(L, rc, LJ_ERR_OPARITH);
+ return NULL; /* unreachable */
+ }
+ }
+ return mmcall(L, lj_cont_ra, mo, rb, rc);
+ }
+}
+
+/* In-place coercion of a number to a string. */
+static LJ_AINLINE int tostring(lua_State *L, TValue *o)
+{
+ if (tvisstr(o)) {
+ return 1;
+ } else if (tvisnumber(o)) {
+ setstrV(L, o, lj_str_fromnumber(L, o));
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */
+TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
+{
+ int fromc = 0;
+ if (left < 0) { left = -left; fromc = 1; }
+ do {
+ int n = 1;
+ if (!(tvisstr(top-1) || tvisnumber(top-1)) || !tostring(L, top)) {
+ cTValue *mo = lj_meta_lookup(L, top-1, MM_concat);
+ if (tvisnil(mo)) {
+ mo = lj_meta_lookup(L, top, MM_concat);
+ if (tvisnil(mo)) {
+ if (tvisstr(top-1) || tvisnumber(top-1)) top++;
+ lj_err_optype(L, top-1, LJ_ERR_OPCAT);
+ return NULL; /* unreachable */
+ }
+ }
+ /* One of the top two elements is not a string, call __cat metamethod:
+ **
+ ** before: [...][CAT stack .........................]
+ ** top-1 top top+1 top+2
+ ** pick two: [...][CAT stack ...] [o1] [o2]
+ ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2]
+ ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2]
+ ** ^-- func base ^-- mm base
+ ** after mm: [...][CAT stack ...] <--push-- [result]
+ ** next step: [...][CAT stack .............]
+ */
+ copyTV(L, top+2, top); /* Careful with the order of stack copies! */
+ copyTV(L, top+1, top-1);
+ copyTV(L, top, mo);
+ setcont(top-1, lj_cont_cat);
+ return top+1; /* Trigger metamethod call. */
+ } else if (strV(top)->len == 0) { /* Shortcut. */
+ (void)tostring(L, top-1);
+ } else {
+ /* Pick as many strings as possible from the top and concatenate them:
+ **
+ ** before: [...][CAT stack ...........................]
+ ** pick str: [...][CAT stack ...] [...... strings ......]
+ ** concat: [...][CAT stack ...] [result]
+ ** next step: [...][CAT stack ............]
+ */
+ MSize tlen = strV(top)->len;
+ char *buffer;
+ int i;
+ for (n = 1; n <= left && tostring(L, top-n); n++) {
+ MSize len = strV(top-n)->len;
+ if (len >= LJ_MAX_STR - tlen)
+ lj_err_msg(L, LJ_ERR_STROV);
+ tlen += len;
+ }
+ buffer = lj_str_needbuf(L, &G(L)->tmpbuf, tlen);
+ n--;
+ tlen = 0;
+ for (i = n; i >= 0; i--) {
+ MSize len = strV(top-i)->len;
+ memcpy(buffer + tlen, strVdata(top-i), len);
+ tlen += len;
+ }
+ setstrV(L, top-n, lj_str_new(L, buffer, tlen));
+ }
+ left -= n;
+ top -= n;
+ } while (left >= 1);
+ if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) {
+ if (!fromc) L->top = curr_topL(L);
+ lj_gc_step(L);
+ }
+ return NULL;
+}
+
+/* Helper for LEN. __len metamethod. */
+TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o)
+{
+ cTValue *mo = lj_meta_lookup(L, o, MM_len);
+ if (tvisnil(mo)) {
+ if (LJ_52 && tvistab(o))
+ tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<<MM_len);
+ else
+ lj_err_optype(L, o, LJ_ERR_OPLEN);
+ return NULL;
+ }
+ return mmcall(L, lj_cont_ra, mo, o, LJ_52 ? o : niltv(L));
+}
+
+/* Helper for equality comparisons. __eq metamethod. */
+TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne)
+{
+ /* Field metatable must be at same offset for GCtab and GCudata! */
+ cTValue *mo = lj_meta_fast(L, tabref(o1->gch.metatable), MM_eq);
+ if (mo) {
+ TValue *top;
+ uint32_t it;
+ if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) {
+ cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq);
+ if (mo2 == NULL || !lj_obj_equal(mo, mo2))
+ return (TValue *)(intptr_t)ne;
+ }
+ top = curr_top(L);
+ setcont(top, ne ? lj_cont_condf : lj_cont_condt);
+ copyTV(L, top+1, mo);
+ it = ~(uint32_t)o1->gch.gct;
+ setgcV(L, top+2, o1, it);
+ setgcV(L, top+3, o2, it);
+ return top+2; /* Trigger metamethod call. */
+ }
+ return (TValue *)(intptr_t)ne;
+}
+
+#if LJ_HASFFI
+TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins)
+{
+ ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt;
+ int op = (int)bc_op(ins) & ~1;
+ TValue tv;
+ cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)];
+ cTValue *o1mm = o1;
+ if (op == BC_ISEQV) {
+ o2 = &L->base[bc_d(ins)];
+ if (!tviscdata(o1mm)) o1mm = o2;
+ } else if (op == BC_ISEQS) {
+ setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins))));
+ o2 = &tv;
+ } else if (op == BC_ISEQN) {
+ o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)];
+ } else {
+ lua_assert(op == BC_ISEQP);
+ setitype(&tv, ~bc_d(ins));
+ o2 = &tv;
+ }
+ mo = lj_meta_lookup(L, o1mm, MM_eq);
+ if (LJ_LIKELY(!tvisnil(mo)))
+ return mmcall(L, cont, mo, o1, o2);
+ else
+ return (TValue *)(intptr_t)(bc_op(ins) & 1);
+}
+#endif
+
+/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */
+TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op)
+{
+ if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) {
+ ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+ MMS mm = (op & 2) ? MM_le : MM_lt;
+ cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm);
+ if (LJ_UNLIKELY(tvisnil(mo))) goto err;
+ return mmcall(L, cont, mo, o1, o2);
+ } else if (LJ_52 || itype(o1) == itype(o2)) {
+ /* Never called with two numbers. */
+ if (tvisstr(o1) && tvisstr(o2)) {
+ int32_t res = lj_str_cmp(strV(o1), strV(o2));
+ return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1));
+ } else {
+ trymt:
+ while (1) {
+ ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+ MMS mm = (op & 2) ? MM_le : MM_lt;
+ cTValue *mo = lj_meta_lookup(L, o1, mm);
+#if LJ_52
+ if (tvisnil(mo) && tvisnil((mo = lj_meta_lookup(L, o2, mm))))
+#else
+ cTValue *mo2 = lj_meta_lookup(L, o2, mm);
+ if (tvisnil(mo) || !lj_obj_equal(mo, mo2))
+#endif
+ {
+ if (op & 2) { /* MM_le not found: retry with MM_lt. */
+ cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */
+ op ^= 3; /* Use LT and flip condition. */
+ continue;
+ }
+ goto err;
+ }
+ return mmcall(L, cont, mo, o1, o2);
+ }
+ }
+ } else if (tvisbool(o1) && tvisbool(o2)) {
+ goto trymt;
+ } else {
+ err:
+ lj_err_comp(L, o1, o2);
+ return NULL;
+ }
+}
+
+/* Helper for calls. __call metamethod. */
+void lj_meta_call(lua_State *L, TValue *func, TValue *top)
+{
+ cTValue *mo = lj_meta_lookup(L, func, MM_call);
+ TValue *p;
+ if (!tvisfunc(mo))
+ lj_err_optype_call(L, func);
+ for (p = top; p > func; p--) copyTV(L, p, p-1);
+ copyTV(L, func, mo);
+}
+
+/* Helper for FORI. Coercion. */
+void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o)
+{
+ if (!lj_strscan_numberobj(o)) lj_err_msg(L, LJ_ERR_FORINIT);
+ if (!lj_strscan_numberobj(o+1)) lj_err_msg(L, LJ_ERR_FORLIM);
+ if (!lj_strscan_numberobj(o+2)) lj_err_msg(L, LJ_ERR_FORSTEP);
+ if (LJ_DUALNUM) {
+ /* Ensure all slots are integers or all slots are numbers. */
+ int32_t k[3];
+ int nint = 0;
+ ptrdiff_t i;
+ for (i = 0; i <= 2; i++) {
+ if (tvisint(o+i)) {
+ k[i] = intV(o+i); nint++;
+ } else {
+ k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i));
+ }
+ }
+ if (nint == 3) { /* Narrow to integers. */
+ setintV(o, k[0]);
+ setintV(o+1, k[1]);
+ setintV(o+2, k[2]);
+ } else if (nint != 0) { /* Widen to numbers. */
+ if (tvisint(o)) setnumV(o, (lua_Number)intV(o));
+ if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1));
+ if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2));
+ }
+ }
+}
+
diff --git a/3rdparty/lua/src/lj_meta.h b/3rdparty/lua/src/lj_meta.h
index 688b760..6af5e51 100644
--- a/3rdparty/lua/src/lj_meta.h
+++ b/3rdparty/lua/src/lj_meta.h
@@ -1,37 +1,37 @@
-/*
-** Metamethod handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_META_H
-#define _LJ_META_H
-
-#include "lj_obj.h"
-
-/* Metamethod handling */
-LJ_FUNC void lj_meta_init(lua_State *L);
-LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name);
-LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm);
-#if LJ_HASFFI
-LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv);
-#endif
-
-#define lj_meta_fastg(g, mt, mm) \
- ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \
- lj_meta_cache(mt, mm, mmname_str(g, mm)))
-#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm)
-
-/* C helpers for some instructions, called from assembler VM. */
-LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k);
-LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k);
-LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb,
- cTValue *rc, BCReg op);
-LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left);
-LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
-LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
-LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
-LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
-LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
-LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
-
-#endif
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_META_H
+#define _LJ_META_H
+
+#include "lj_obj.h"
+
+/* Metamethod handling */
+LJ_FUNC void lj_meta_init(lua_State *L);
+LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name);
+LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm);
+#if LJ_HASFFI
+LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv);
+#endif
+
+#define lj_meta_fastg(g, mt, mm) \
+ ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \
+ lj_meta_cache(mt, mm, mmname_str(g, mm)))
+#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm)
+
+/* C helpers for some instructions, called from assembler VM. */
+LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb,
+ cTValue *rc, BCReg op);
+LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
+LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
+LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
+LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
+LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
+
+#endif
diff --git a/3rdparty/lua/src/lj_obj.c b/3rdparty/lua/src/lj_obj.c
index d7907d6..322b7be 100644
--- a/3rdparty/lua/src/lj_obj.c
+++ b/3rdparty/lua/src/lj_obj.c
@@ -1,35 +1,35 @@
-/*
-** Miscellaneous object handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_obj_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-/* Object type names. */
-LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */
- "no value", "nil", "boolean", "userdata", "number", "string",
- "table", "function", "userdata", "thread", "proto", "cdata"
-};
-
-LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */
- "nil", "boolean", "boolean", "userdata", "string", "upval", "thread",
- "proto", "function", "trace", "cdata", "table", "userdata", "number"
-};
-
-/* Compare two objects without calling metamethods. */
-int lj_obj_equal(cTValue *o1, cTValue *o2)
-{
- if (itype(o1) == itype(o2)) {
- if (tvispri(o1))
- return 1;
- if (!tvisnum(o1))
- return gcrefeq(o1->gcr, o2->gcr);
- } else if (!tvisnumber(o1) || !tvisnumber(o2)) {
- return 0;
- }
- return numberVnum(o1) == numberVnum(o2);
-}
-
+/*
+** Miscellaneous object handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_obj_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+/* Object type names. */
+LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */
+ "no value", "nil", "boolean", "userdata", "number", "string",
+ "table", "function", "userdata", "thread", "proto", "cdata"
+};
+
+LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */
+ "nil", "boolean", "boolean", "userdata", "string", "upval", "thread",
+ "proto", "function", "trace", "cdata", "table", "userdata", "number"
+};
+
+/* Compare two objects without calling metamethods. */
+int lj_obj_equal(cTValue *o1, cTValue *o2)
+{
+ if (itype(o1) == itype(o2)) {
+ if (tvispri(o1))
+ return 1;
+ if (!tvisnum(o1))
+ return gcrefeq(o1->gcr, o2->gcr);
+ } else if (!tvisnumber(o1) || !tvisnumber(o2)) {
+ return 0;
+ }
+ return numberVnum(o1) == numberVnum(o2);
+}
+
diff --git a/3rdparty/lua/src/lj_obj.h b/3rdparty/lua/src/lj_obj.h
index 3fc14de..b967819 100644
--- a/3rdparty/lua/src/lj_obj.h
+++ b/3rdparty/lua/src/lj_obj.h
@@ -1,856 +1,856 @@
-/*
-** LuaJIT VM tags, values and objects.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#ifndef _LJ_OBJ_H
-#define _LJ_OBJ_H
-
-#include "lua.h"
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* -- Memory references (32 bit address space) ---------------------------- */
-
-/* Memory size. */
-typedef uint32_t MSize;
-
-/* Memory reference */
-typedef struct MRef {
- uint32_t ptr32; /* Pseudo 32 bit pointer. */
-} MRef;
-
-#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32)
-
-#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
-#define setmrefr(r, v) ((r).ptr32 = (v).ptr32)
-
-/* -- GC object references (32 bit address space) ------------------------- */
-
-/* GCobj reference */
-typedef struct GCRef {
- uint32_t gcptr32; /* Pseudo 32 bit pointer. */
-} GCRef;
-
-/* Common GC header for all collectable objects. */
-#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct
-/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
-
-#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32)
-#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32)
-#define gcrefu(r) ((r).gcptr32)
-#define gcrefi(r) ((int32_t)(r).gcptr32)
-#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32)
-#define gcnext(gc) (gcref((gc)->gch.nextgc))
-
-#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
-#define setgcrefi(r, i) ((r).gcptr32 = (uint32_t)(i))
-#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p))
-#define setgcrefnull(r) ((r).gcptr32 = 0)
-#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32)
-
-/* IMPORTANT NOTE:
-**
-** All uses of the setgcref* macros MUST be accompanied with a write barrier.
-**
-** This is to ensure the integrity of the incremental GC. The invariant
-** to preserve is that a black object never points to a white object.
-** I.e. never store a white object into a field of a black object.
-**
-** It's ok to LEAVE OUT the write barrier ONLY in the following cases:
-** - The source is not a GC object (NULL).
-** - The target is a GC root. I.e. everything in global_State.
-** - The target is a lua_State field (threads are never black).
-** - The target is a stack slot, see setgcV et al.
-** - The target is an open upvalue, i.e. pointing to a stack slot.
-** - The target is a newly created object (i.e. marked white). But make
-** sure nothing invokes the GC inbetween.
-** - The target and the source are the same object (self-reference).
-** - The target already contains the object (e.g. moving elements around).
-**
-** The most common case is a store to a stack slot. All other cases where
-** a barrier has been omitted are annotated with a NOBARRIER comment.
-**
-** The same logic applies for stores to table slots (array part or hash
-** part). ALL uses of lj_tab_set* require a barrier for the stored value
-** *and* the stored key, based on the above rules. In practice this means
-** a barrier is needed if *either* of the key or value are a GC object.
-**
-** It's ok to LEAVE OUT the write barrier in the following special cases:
-** - The stored value is nil. The key doesn't matter because it's either
-** not resurrected or lj_tab_newkey() will take care of the key barrier.
-** - The key doesn't matter if the *previously* stored value is guaranteed
-** to be non-nil (because the key is kept alive in the table).
-** - The key doesn't matter if it's guaranteed not to be part of the table,
-** since lj_tab_newkey() takes care of the key barrier. This applies
-** trivially to new tables, but watch out for resurrected keys. Storing
-** a nil value leaves the key in the table!
-**
-** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used
-** by the interpreter for all table stores.
-**
-** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark
-** dead keys in tables. The reference is left in, but it's guaranteed to
-** be never dereferenced as long as the value is nil. It's ok if the key is
-** freed or if any object subsequently gets the same address.
-**
-** Not destroying dead keys helps to keep key hash slots stable. This avoids
-** specialization back-off for HREFK when a value flips between nil and
-** non-nil and the GC gets in the way. It also allows safely hoisting
-** HREF/HREFK across GC steps. Dead keys are only removed if a table is
-** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize.
-**
-** The trade-off is that a write barrier for tables must take the key into
-** account, too. Implicitly resurrecting the key by storing a non-nil value
-** may invalidate the incremental GC invariant.
-*/
-
-/* -- Common type definitions --------------------------------------------- */
-
-/* Types for handling bytecodes. Need this here, details in lj_bc.h. */
-typedef uint32_t BCIns; /* Bytecode instruction. */
-typedef uint32_t BCPos; /* Bytecode position. */
-typedef uint32_t BCReg; /* Bytecode register. */
-typedef int32_t BCLine; /* Bytecode line number. */
-
-/* Internal assembler functions. Never call these directly from C. */
-typedef void (*ASMFunction)(void);
-
-/* Resizable string buffer. Need this here, details in lj_str.h. */
-typedef struct SBuf {
- char *buf; /* String buffer base. */
- MSize n; /* String buffer length. */
- MSize sz; /* String buffer size. */
-} SBuf;
-
-/* -- Tags and values ----------------------------------------------------- */
-
-/* Frame link. */
-typedef union {
- int32_t ftsz; /* Frame type and size of previous frame. */
- MRef pcr; /* Overlaps PC for Lua frames. */
-} FrameLink;
-
-/* Tagged value. */
-typedef LJ_ALIGN(8) union TValue {
- uint64_t u64; /* 64 bit pattern overlaps number. */
- lua_Number n; /* Number object overlaps split tag/value object. */
- struct {
- LJ_ENDIAN_LOHI(
- union {
- GCRef gcr; /* GCobj reference (if any). */
- int32_t i; /* Integer value. */
- };
- , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
- )
- };
- struct {
- LJ_ENDIAN_LOHI(
- GCRef func; /* Function for next frame (or dummy L). */
- , FrameLink tp; /* Link to previous frame. */
- )
- } fr;
- struct {
- LJ_ENDIAN_LOHI(
- uint32_t lo; /* Lower 32 bits of number. */
- , uint32_t hi; /* Upper 32 bits of number. */
- )
- } u32;
-} TValue;
-
-typedef const TValue cTValue;
-
-#define tvref(r) (mref(r, TValue))
-
-/* More external and GCobj tags for internal objects. */
-#define LAST_TT LUA_TTHREAD
-#define LUA_TPROTO (LAST_TT+1)
-#define LUA_TCDATA (LAST_TT+2)
-
-/* Internal object tags.
-**
-** Internal tags overlap the MSW of a number object (must be a double).
-** Interpreted as a double these are special NaNs. The FPU only generates
-** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
-** for use as internal tags. Small negative numbers are used to shorten the
-** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate).
-**
-** ---MSW---.---LSW---
-** primitive types | itype | |
-** lightuserdata | itype | void * | (32 bit platforms)
-** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers)
-** GC objects | itype | GCRef |
-** int (LJ_DUALNUM)| itype | int |
-** number -------double------
-**
-** ORDER LJ_T
-** Primitive types nil/false/true must be first, lightuserdata next.
-** GC objects are at the end, table/userdata must be lowest.
-** Also check lj_ir.h for similar ordering constraints.
-*/
-#define LJ_TNIL (~0u)
-#define LJ_TFALSE (~1u)
-#define LJ_TTRUE (~2u)
-#define LJ_TLIGHTUD (~3u)
-#define LJ_TSTR (~4u)
-#define LJ_TUPVAL (~5u)
-#define LJ_TTHREAD (~6u)
-#define LJ_TPROTO (~7u)
-#define LJ_TFUNC (~8u)
-#define LJ_TTRACE (~9u)
-#define LJ_TCDATA (~10u)
-#define LJ_TTAB (~11u)
-#define LJ_TUDATA (~12u)
-/* This is just the canonical number type used in some places. */
-#define LJ_TNUMX (~13u)
-
-/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
-#if LJ_64
-#define LJ_TISNUM 0xfffeffffu
-#else
-#define LJ_TISNUM LJ_TNUMX
-#endif
-#define LJ_TISTRUECOND LJ_TFALSE
-#define LJ_TISPRI LJ_TTRUE
-#define LJ_TISGCV (LJ_TSTR+1)
-#define LJ_TISTABUD LJ_TTAB
-
-/* -- String object ------------------------------------------------------- */
-
-/* String object header. String payload follows. */
-typedef struct GCstr {
- GCHeader;
- uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */
- uint8_t unused;
- MSize hash; /* Hash of string. */
- MSize len; /* Size of string. */
-} GCstr;
-
-#define strref(r) (&gcref((r))->str)
-#define strdata(s) ((const char *)((s)+1))
-#define strdatawr(s) ((char *)((s)+1))
-#define strVdata(o) strdata(strV(o))
-#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1)
-
-/* -- Userdata object ----------------------------------------------------- */
-
-/* Userdata object. Payload follows. */
-typedef struct GCudata {
- GCHeader;
- uint8_t udtype; /* Userdata type. */
- uint8_t unused2;
- GCRef env; /* Should be at same offset in GCfunc. */
- MSize len; /* Size of payload. */
- GCRef metatable; /* Must be at same offset in GCtab. */
- uint32_t align1; /* To force 8 byte alignment of the payload. */
-} GCudata;
-
-/* Userdata types. */
-enum {
- UDTYPE_USERDATA, /* Regular userdata. */
- UDTYPE_IO_FILE, /* I/O library FILE. */
- UDTYPE_FFI_CLIB, /* FFI C library namespace. */
- UDTYPE__MAX
-};
-
-#define uddata(u) ((void *)((u)+1))
-#define sizeudata(u) (sizeof(struct GCudata)+(u)->len)
-
-/* -- C data object ------------------------------------------------------- */
-
-/* C data object. Payload follows. */
-typedef struct GCcdata {
- GCHeader;
- uint16_t ctypeid; /* C type ID. */
-} GCcdata;
-
-/* Prepended to variable-sized or realigned C data objects. */
-typedef struct GCcdataVar {
- uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */
- uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */
- MSize len; /* Size of payload. */
-} GCcdataVar;
-
-#define cdataptr(cd) ((void *)((cd)+1))
-#define cdataisv(cd) ((cd)->marked & 0x80)
-#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar)))
-#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len)
-#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra)
-#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset))
-
-/* -- Prototype object ---------------------------------------------------- */
-
-#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef))
-#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1))
-
-typedef struct GCproto {
- GCHeader;
- uint8_t numparams; /* Number of parameters. */
- uint8_t framesize; /* Fixed frame size. */
- MSize sizebc; /* Number of bytecode instructions. */
- GCRef gclist;
- MRef k; /* Split constant array (points to the middle). */
- MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */
- MSize sizekgc; /* Number of collectable constants. */
- MSize sizekn; /* Number of lua_Number constants. */
- MSize sizept; /* Total size including colocated arrays. */
- uint8_t sizeuv; /* Number of upvalues. */
- uint8_t flags; /* Miscellaneous flags (see below). */
- uint16_t trace; /* Anchor for chain of root traces. */
- /* ------ The following fields are for debugging/tracebacks only ------ */
- GCRef chunkname; /* Name of the chunk this function was defined in. */
- BCLine firstline; /* First line of the function definition. */
- BCLine numline; /* Number of lines for the function definition. */
- MRef lineinfo; /* Compressed map from bytecode ins. to source line. */
- MRef uvinfo; /* Upvalue names. */
- MRef varinfo; /* Names and compressed extents of local variables. */
-} GCproto;
-
-/* Flags for prototype. */
-#define PROTO_CHILD 0x01 /* Has child prototypes. */
-#define PROTO_VARARG 0x02 /* Vararg function. */
-#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */
-#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */
-#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */
-/* Only used during parsing. */
-#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */
-#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */
-/* Top bits used for counting created closures. */
-#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */
-#define PROTO_CLC_BITS 3
-#define PROTO_CLC_POLY (3*PROTO_CLCOUNT) /* Polymorphic threshold. */
-
-#define PROTO_UV_LOCAL 0x8000 /* Upvalue for local slot. */
-#define PROTO_UV_IMMUTABLE 0x4000 /* Immutable upvalue. */
-
-#define proto_kgc(pt, idx) \
- check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \
- gcref(mref((pt)->k, GCRef)[(idx)]))
-#define proto_knumtv(pt, idx) \
- check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)])
-#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto)))
-#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt)))
-#define proto_uv(pt) (mref((pt)->uv, uint16_t))
-
-#define proto_chunkname(pt) (strref((pt)->chunkname))
-#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt))))
-#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void))
-#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t))
-#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t))
-
-/* -- Upvalue object ------------------------------------------------------ */
-
-typedef struct GCupval {
- GCHeader;
- uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */
- uint8_t immutable; /* Immutable value. */
- union {
- TValue tv; /* If closed: the value itself. */
- struct { /* If open: double linked list, anchored at thread. */
- GCRef prev;
- GCRef next;
- };
- };
- MRef v; /* Points to stack slot (open) or above (closed). */
- uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */
-} GCupval;
-
-#define uvprev(uv_) (&gcref((uv_)->prev)->uv)
-#define uvnext(uv_) (&gcref((uv_)->next)->uv)
-#define uvval(uv_) (mref((uv_)->v, TValue))
-
-/* -- Function object (closures) ------------------------------------------ */
-
-/* Common header for functions. env should be at same offset in GCudata. */
-#define GCfuncHeader \
- GCHeader; uint8_t ffid; uint8_t nupvalues; \
- GCRef env; GCRef gclist; MRef pc
-
-typedef struct GCfuncC {
- GCfuncHeader;
- lua_CFunction f; /* C function to be called. */
- TValue upvalue[1]; /* Array of upvalues (TValue). */
-} GCfuncC;
-
-typedef struct GCfuncL {
- GCfuncHeader;
- GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */
-} GCfuncL;
-
-typedef union GCfunc {
- GCfuncC c;
- GCfuncL l;
-} GCfunc;
-
-#define FF_LUA 0
-#define FF_C 1
-#define isluafunc(fn) ((fn)->c.ffid == FF_LUA)
-#define iscfunc(fn) ((fn)->c.ffid == FF_C)
-#define isffunc(fn) ((fn)->c.ffid > FF_C)
-#define funcproto(fn) \
- check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto)))
-#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n))
-#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n))
-
-/* -- Table object -------------------------------------------------------- */
-
-/* Hash node. */
-typedef struct Node {
- TValue val; /* Value object. Must be first field. */
- TValue key; /* Key object. */
- MRef next; /* Hash chain. */
- MRef freetop; /* Top of free elements (stored in t->node[0]). */
-} Node;
-
-LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
-
-typedef struct GCtab {
- GCHeader;
- uint8_t nomm; /* Negative cache for fast metamethods. */
- int8_t colo; /* Array colocation. */
- MRef array; /* Array part. */
- GCRef gclist;
- GCRef metatable; /* Must be at same offset in GCudata. */
- MRef node; /* Hash part. */
- uint32_t asize; /* Size of array part (keys [0, asize-1]). */
- uint32_t hmask; /* Hash part mask (size of hash part - 1). */
-} GCtab;
-
-#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab))
-#define tabref(r) (&gcref((r))->tab)
-#define noderef(r) (mref((r), Node))
-#define nextnode(n) (mref((n)->next, Node))
-
-/* -- State objects ------------------------------------------------------- */
-
-/* VM states. */
-enum {
- LJ_VMST_INTERP, /* Interpreter. */
- LJ_VMST_C, /* C function. */
- LJ_VMST_GC, /* Garbage collector. */
- LJ_VMST_EXIT, /* Trace exit handler. */
- LJ_VMST_RECORD, /* Trace recorder. */
- LJ_VMST_OPT, /* Optimizer. */
- LJ_VMST_ASM, /* Assembler. */
- LJ_VMST__MAX
-};
-
-#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st)
-
-/* Metamethods. ORDER MM */
-#ifdef LJ_HASFFI
-#define MMDEF_FFI(_) _(new)
-#else
-#define MMDEF_FFI(_)
-#endif
-
-#if LJ_52 || LJ_HASFFI
-#define MMDEF_PAIRS(_) _(pairs) _(ipairs)
-#else
-#define MMDEF_PAIRS(_)
-#define MM_pairs 255
-#define MM_ipairs 255
-#endif
-
-#define MMDEF(_) \
- _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \
- /* Only the above (fast) metamethods are negative cached (max. 8). */ \
- _(lt) _(le) _(concat) _(call) \
- /* The following must be in ORDER ARITH. */ \
- _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \
- /* The following are used in the standard libraries. */ \
- _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_)
-
-typedef enum {
-#define MMENUM(name) MM_##name,
-MMDEF(MMENUM)
-#undef MMENUM
- MM__MAX,
- MM____ = MM__MAX,
- MM_FAST = MM_len
-} MMS;
-
-/* GC root IDs. */
-typedef enum {
- GCROOT_MMNAME, /* Metamethod names. */
- GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1,
- GCROOT_BASEMT, /* Metatables for base types. */
- GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX,
- GCROOT_IO_INPUT, /* Userdata for default I/O input file. */
- GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */
- GCROOT_MAX
-} GCRootID;
-
-#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)])
-#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
-#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
-
-typedef struct GCState {
- MSize total; /* Memory currently allocated. */
- MSize threshold; /* Memory threshold. */
- uint8_t currentwhite; /* Current white color. */
- uint8_t state; /* GC state. */
- uint8_t nocdatafin; /* No cdata finalizer called. */
- uint8_t unused2;
- MSize sweepstr; /* Sweep position in string table. */
- GCRef root; /* List of all collectable objects. */
- MRef sweep; /* Sweep position in root list. */
- GCRef gray; /* List of gray objects. */
- GCRef grayagain; /* List of objects for atomic traversal. */
- GCRef weak; /* List of weak tables (to be cleared). */
- GCRef mmudata; /* List of userdata (to be finalized). */
- MSize stepmul; /* Incremental GC step granularity. */
- MSize debt; /* Debt (how much GC is behind schedule). */
- MSize estimate; /* Estimate of memory actually in use. */
- MSize pause; /* Pause between successive GC cycles. */
-} GCState;
-
-/* Global state, shared by all threads of a Lua universe. */
-typedef struct global_State {
- GCRef *strhash; /* String hash table (hash chain anchors). */
- MSize strmask; /* String hash mask (size of hash table - 1). */
- MSize strnum; /* Number of strings in hash table. */
- lua_Alloc allocf; /* Memory allocator. */
- void *allocd; /* Memory allocator data. */
- GCState gc; /* Garbage collector. */
- SBuf tmpbuf; /* Temporary buffer for string concatenation. */
- Node nilnode; /* Fallback 1-element hash part (nil key and value). */
- GCstr strempty; /* Empty string. */
- uint8_t stremptyz; /* Zero terminator of empty string. */
- uint8_t hookmask; /* Hook mask. */
- uint8_t dispatchmode; /* Dispatch mode. */
- uint8_t vmevmask; /* VM event mask. */
- GCRef mainthref; /* Link to main thread. */
- TValue registrytv; /* Anchor for registry. */
- TValue tmptv, tmptv2; /* Temporary TValues. */
- GCupval uvhead; /* Head of double-linked list of all open upvalues. */
- int32_t hookcount; /* Instruction hook countdown. */
- int32_t hookcstart; /* Start count for instruction hook counter. */
- lua_Hook hookf; /* Hook function. */
- lua_CFunction wrapf; /* Wrapper for C function calls. */
- lua_CFunction panic; /* Called as a last resort for errors. */
- volatile int32_t vmstate; /* VM state or current JIT code trace number. */
- BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */
- BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */
- GCRef jit_L; /* Current JIT code lua_State or NULL. */
- MRef jit_base; /* Current JIT code L->base. */
- MRef ctype_state; /* Pointer to C type state. */
- GCRef gcroot[GCROOT_MAX]; /* GC roots. */
-} global_State;
-
-#define mainthread(g) (&gcref(g->mainthref)->th)
-#define niltv(L) \
- check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val)
-#define niltvg(g) \
- check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val)
-
-/* Hook management. Hook event masks are defined in lua.h. */
-#define HOOK_EVENTMASK 0x0f
-#define HOOK_ACTIVE 0x10
-#define HOOK_ACTIVE_SHIFT 4
-#define HOOK_VMEVENT 0x20
-#define HOOK_GC 0x40
-#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE)
-#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE)
-#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC))
-#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
-#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE)
-#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK)
-#define hook_restore(g, h) \
- ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h))
-
-/* Per-thread state object. */
-struct lua_State {
- GCHeader;
- uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */
- uint8_t status; /* Thread status. */
- MRef glref; /* Link to global state. */
- GCRef gclist; /* GC chain. */
- TValue *base; /* Base of currently executing function. */
- TValue *top; /* First free slot in the stack. */
- MRef maxstack; /* Last free slot in the stack. */
- MRef stack; /* Stack base. */
- GCRef openupval; /* List of open upvalues in the stack. */
- GCRef env; /* Thread environment (table of globals). */
- void *cframe; /* End of C stack frame chain. */
- MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */
-};
-
-#define G(L) (mref(L->glref, global_State))
-#define registry(L) (&G(L)->registrytv)
-
-/* Macros to access the currently executing (Lua) function. */
-#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn)
-#define curr_funcisL(L) (isluafunc(curr_func(L)))
-#define curr_proto(L) (funcproto(curr_func(L)))
-#define curr_topL(L) (L->base + curr_proto(L)->framesize)
-#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top)
-
-/* -- GC object definition and conversions -------------------------------- */
-
-/* GC header for generic access to common fields of GC objects. */
-typedef struct GChead {
- GCHeader;
- uint8_t unused1;
- uint8_t unused2;
- GCRef env;
- GCRef gclist;
- GCRef metatable;
-} GChead;
-
-/* The env field SHOULD be at the same offset for all GC objects. */
-LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env));
-LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env));
-
-/* The metatable field MUST be at the same offset for all GC objects. */
-LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable));
-LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable));
-
-/* The gclist field MUST be at the same offset for all GC objects. */
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist));
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist));
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist));
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist));
-
-typedef union GCobj {
- GChead gch;
- GCstr str;
- GCupval uv;
- lua_State th;
- GCproto pt;
- GCfunc fn;
- GCcdata cd;
- GCtab tab;
- GCudata ud;
-} GCobj;
-
-/* Macros to convert a GCobj pointer into a specific value. */
-#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str)
-#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv)
-#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th)
-#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt)
-#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn)
-#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd)
-#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab)
-#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud)
-
-/* Macro to convert any collectable object into a GCobj pointer. */
-#define obj2gco(v) ((GCobj *)(v))
-
-/* -- TValue getters/setters ---------------------------------------------- */
-
-#ifdef LUA_USE_ASSERT
-#include "lj_gc.h"
-#endif
-
-/* Macros to test types. */
-#define itype(o) ((o)->it)
-#define tvisnil(o) (itype(o) == LJ_TNIL)
-#define tvisfalse(o) (itype(o) == LJ_TFALSE)
-#define tvistrue(o) (itype(o) == LJ_TTRUE)
-#define tvisbool(o) (tvisfalse(o) || tvistrue(o))
-#if LJ_64
-#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2)
-#else
-#define tvislightud(o) (itype(o) == LJ_TLIGHTUD)
-#endif
-#define tvisstr(o) (itype(o) == LJ_TSTR)
-#define tvisfunc(o) (itype(o) == LJ_TFUNC)
-#define tvisthread(o) (itype(o) == LJ_TTHREAD)
-#define tvisproto(o) (itype(o) == LJ_TPROTO)
-#define tviscdata(o) (itype(o) == LJ_TCDATA)
-#define tvistab(o) (itype(o) == LJ_TTAB)
-#define tvisudata(o) (itype(o) == LJ_TUDATA)
-#define tvisnumber(o) (itype(o) <= LJ_TISNUM)
-#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM)
-#define tvisnum(o) (itype(o) < LJ_TISNUM)
-
-#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND)
-#define tvispri(o) (itype(o) >= LJ_TISPRI)
-#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */
-#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV))
-
-/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */
-#define tvisnan(o) ((o)->n != (o)->n)
-#if LJ_64
-#define tviszero(o) (((o)->u64 << 1) == 0)
-#else
-#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0)
-#endif
-#define tvispzero(o) ((o)->u64 == 0)
-#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000))
-#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000))
-#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64)
-
-/* Macros to convert type ids. */
-#if LJ_64
-#define itypemap(o) \
- (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
-#else
-#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o))
-#endif
-
-/* Macros to get tagged values. */
-#define gcval(o) (gcref((o)->gcr))
-#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - (o)->it))
-#if LJ_64
-#define lightudV(o) \
- check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff)))
-#else
-#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void))
-#endif
-#define gcV(o) check_exp(tvisgcv(o), gcval(o))
-#define strV(o) check_exp(tvisstr(o), &gcval(o)->str)
-#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn)
-#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th)
-#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt)
-#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd)
-#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab)
-#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud)
-#define numV(o) check_exp(tvisnum(o), (o)->n)
-#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i)
-
-/* Macros to set tagged values. */
-#define setitype(o, i) ((o)->it = (i))
-#define setnilV(o) ((o)->it = LJ_TNIL)
-#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x))
-
-static LJ_AINLINE void setlightudV(TValue *o, void *p)
-{
-#if LJ_64
- o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
-#else
- setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
-#endif
-}
-
-#if LJ_64
-#define checklightudptr(L, p) \
- (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p))
-#define setcont(o, f) \
- ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
-#else
-#define checklightudptr(L, p) (p)
-#define setcont(o, f) setlightudV((o), (void *)(f))
-#endif
-
-#define tvchecklive(L, o) \
- UNUSED(L), lua_assert(!tvisgcv(o) || \
- ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o))))
-
-static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype)
-{
- setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o);
-}
-
-#define define_setV(name, type, tag) \
-static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \
-{ \
- setgcV(L, o, obj2gco(v), tag); \
-}
-define_setV(setstrV, GCstr, LJ_TSTR)
-define_setV(setthreadV, lua_State, LJ_TTHREAD)
-define_setV(setprotoV, GCproto, LJ_TPROTO)
-define_setV(setfuncV, GCfunc, LJ_TFUNC)
-define_setV(setcdataV, GCcdata, LJ_TCDATA)
-define_setV(settabV, GCtab, LJ_TTAB)
-define_setV(setudataV, GCudata, LJ_TUDATA)
-
-#define setnumV(o, x) ((o)->n = (x))
-#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000))
-#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000))
-#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000))
-
-static LJ_AINLINE void setintV(TValue *o, int32_t i)
-{
-#if LJ_DUALNUM
- o->i = (uint32_t)i; setitype(o, LJ_TISNUM);
-#else
- o->n = (lua_Number)i;
-#endif
-}
-
-static LJ_AINLINE void setint64V(TValue *o, int64_t i)
-{
- if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i))
- setintV(o, (int32_t)i);
- else
- setnumV(o, (lua_Number)i);
-}
-
-#if LJ_64
-#define setintptrV(o, i) setint64V((o), (i))
-#else
-#define setintptrV(o, i) setintV((o), (i))
-#endif
-
-/* Copy tagged values. */
-static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
-{
- *o1 = *o2; tvchecklive(L, o1);
-}
-
-/* -- Number to integer conversion ---------------------------------------- */
-
-#if LJ_SOFTFP
-LJ_ASMF int32_t lj_vm_tobit(double x);
-#endif
-
-static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
-{
-#if LJ_SOFTFP
- return lj_vm_tobit(n);
-#else
- TValue o;
- o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */
- return (int32_t)o.u32.lo;
-#endif
-}
-
-#if LJ_TARGET_X86 && !defined(__SSE2__)
-#define lj_num2int(n) lj_num2bit((n))
-#else
-#define lj_num2int(n) ((int32_t)(n))
-#endif
-
-static LJ_AINLINE uint64_t lj_num2u64(lua_Number n)
-{
-#ifdef _MSC_VER
- if (n >= 9223372036854775808.0) /* They think it's a feature. */
- return (uint64_t)(int64_t)(n - 18446744073709551616.0);
- else
-#endif
- return (uint64_t)n;
-}
-
-static LJ_AINLINE int32_t numberVint(cTValue *o)
-{
- if (LJ_LIKELY(tvisint(o)))
- return intV(o);
- else
- return lj_num2int(numV(o));
-}
-
-static LJ_AINLINE lua_Number numberVnum(cTValue *o)
-{
- if (LJ_UNLIKELY(tvisint(o)))
- return (lua_Number)intV(o);
- else
- return numV(o);
-}
-
-/* -- Miscellaneous object handling --------------------------------------- */
-
-/* Names and maps for internal and external object tags. */
-LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1];
-LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
-
-#define lj_typename(o) (lj_obj_itypename[itypemap(o)])
-
-/* Compare two objects without calling metamethods. */
-LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2);
-
-#endif
+/*
+** LuaJIT VM tags, values and objects.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#ifndef _LJ_OBJ_H
+#define _LJ_OBJ_H
+
+#include "lua.h"
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Memory references (32 bit address space) ---------------------------- */
+
+/* Memory size. */
+typedef uint32_t MSize;
+
+/* Memory reference */
+typedef struct MRef {
+ uint32_t ptr32; /* Pseudo 32 bit pointer. */
+} MRef;
+
+#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32)
+
+#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
+#define setmrefr(r, v) ((r).ptr32 = (v).ptr32)
+
+/* -- GC object references (32 bit address space) ------------------------- */
+
+/* GCobj reference */
+typedef struct GCRef {
+ uint32_t gcptr32; /* Pseudo 32 bit pointer. */
+} GCRef;
+
+/* Common GC header for all collectable objects. */
+#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct
+/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
+
+#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32)
+#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32)
+#define gcrefu(r) ((r).gcptr32)
+#define gcrefi(r) ((int32_t)(r).gcptr32)
+#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32)
+#define gcnext(gc) (gcref((gc)->gch.nextgc))
+
+#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
+#define setgcrefi(r, i) ((r).gcptr32 = (uint32_t)(i))
+#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p))
+#define setgcrefnull(r) ((r).gcptr32 = 0)
+#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32)
+
+/* IMPORTANT NOTE:
+**
+** All uses of the setgcref* macros MUST be accompanied with a write barrier.
+**
+** This is to ensure the integrity of the incremental GC. The invariant
+** to preserve is that a black object never points to a white object.
+** I.e. never store a white object into a field of a black object.
+**
+** It's ok to LEAVE OUT the write barrier ONLY in the following cases:
+** - The source is not a GC object (NULL).
+** - The target is a GC root. I.e. everything in global_State.
+** - The target is a lua_State field (threads are never black).
+** - The target is a stack slot, see setgcV et al.
+** - The target is an open upvalue, i.e. pointing to a stack slot.
+** - The target is a newly created object (i.e. marked white). But make
+** sure nothing invokes the GC inbetween.
+** - The target and the source are the same object (self-reference).
+** - The target already contains the object (e.g. moving elements around).
+**
+** The most common case is a store to a stack slot. All other cases where
+** a barrier has been omitted are annotated with a NOBARRIER comment.
+**
+** The same logic applies for stores to table slots (array part or hash
+** part). ALL uses of lj_tab_set* require a barrier for the stored value
+** *and* the stored key, based on the above rules. In practice this means
+** a barrier is needed if *either* of the key or value are a GC object.
+**
+** It's ok to LEAVE OUT the write barrier in the following special cases:
+** - The stored value is nil. The key doesn't matter because it's either
+** not resurrected or lj_tab_newkey() will take care of the key barrier.
+** - The key doesn't matter if the *previously* stored value is guaranteed
+** to be non-nil (because the key is kept alive in the table).
+** - The key doesn't matter if it's guaranteed not to be part of the table,
+** since lj_tab_newkey() takes care of the key barrier. This applies
+** trivially to new tables, but watch out for resurrected keys. Storing
+** a nil value leaves the key in the table!
+**
+** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used
+** by the interpreter for all table stores.
+**
+** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark
+** dead keys in tables. The reference is left in, but it's guaranteed to
+** be never dereferenced as long as the value is nil. It's ok if the key is
+** freed or if any object subsequently gets the same address.
+**
+** Not destroying dead keys helps to keep key hash slots stable. This avoids
+** specialization back-off for HREFK when a value flips between nil and
+** non-nil and the GC gets in the way. It also allows safely hoisting
+** HREF/HREFK across GC steps. Dead keys are only removed if a table is
+** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize.
+**
+** The trade-off is that a write barrier for tables must take the key into
+** account, too. Implicitly resurrecting the key by storing a non-nil value
+** may invalidate the incremental GC invariant.
+*/
+
+/* -- Common type definitions --------------------------------------------- */
+
+/* Types for handling bytecodes. Need this here, details in lj_bc.h. */
+typedef uint32_t BCIns; /* Bytecode instruction. */
+typedef uint32_t BCPos; /* Bytecode position. */
+typedef uint32_t BCReg; /* Bytecode register. */
+typedef int32_t BCLine; /* Bytecode line number. */
+
+/* Internal assembler functions. Never call these directly from C. */
+typedef void (*ASMFunction)(void);
+
+/* Resizable string buffer. Need this here, details in lj_str.h. */
+typedef struct SBuf {
+ char *buf; /* String buffer base. */
+ MSize n; /* String buffer length. */
+ MSize sz; /* String buffer size. */
+} SBuf;
+
+/* -- Tags and values ----------------------------------------------------- */
+
+/* Frame link. */
+typedef union {
+ int32_t ftsz; /* Frame type and size of previous frame. */
+ MRef pcr; /* Overlaps PC for Lua frames. */
+} FrameLink;
+
+/* Tagged value. */
+typedef LJ_ALIGN(8) union TValue {
+ uint64_t u64; /* 64 bit pattern overlaps number. */
+ lua_Number n; /* Number object overlaps split tag/value object. */
+ struct {
+ LJ_ENDIAN_LOHI(
+ union {
+ GCRef gcr; /* GCobj reference (if any). */
+ int32_t i; /* Integer value. */
+ };
+ , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
+ )
+ };
+ struct {
+ LJ_ENDIAN_LOHI(
+ GCRef func; /* Function for next frame (or dummy L). */
+ , FrameLink tp; /* Link to previous frame. */
+ )
+ } fr;
+ struct {
+ LJ_ENDIAN_LOHI(
+ uint32_t lo; /* Lower 32 bits of number. */
+ , uint32_t hi; /* Upper 32 bits of number. */
+ )
+ } u32;
+} TValue;
+
+typedef const TValue cTValue;
+
+#define tvref(r) (mref(r, TValue))
+
+/* More external and GCobj tags for internal objects. */
+#define LAST_TT LUA_TTHREAD
+#define LUA_TPROTO (LAST_TT+1)
+#define LUA_TCDATA (LAST_TT+2)
+
+/* Internal object tags.
+**
+** Internal tags overlap the MSW of a number object (must be a double).
+** Interpreted as a double these are special NaNs. The FPU only generates
+** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
+** for use as internal tags. Small negative numbers are used to shorten the
+** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate).
+**
+** ---MSW---.---LSW---
+** primitive types | itype | |
+** lightuserdata | itype | void * | (32 bit platforms)
+** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers)
+** GC objects | itype | GCRef |
+** int (LJ_DUALNUM)| itype | int |
+** number -------double------
+**
+** ORDER LJ_T
+** Primitive types nil/false/true must be first, lightuserdata next.
+** GC objects are at the end, table/userdata must be lowest.
+** Also check lj_ir.h for similar ordering constraints.
+*/
+#define LJ_TNIL (~0u)
+#define LJ_TFALSE (~1u)
+#define LJ_TTRUE (~2u)
+#define LJ_TLIGHTUD (~3u)
+#define LJ_TSTR (~4u)
+#define LJ_TUPVAL (~5u)
+#define LJ_TTHREAD (~6u)
+#define LJ_TPROTO (~7u)
+#define LJ_TFUNC (~8u)
+#define LJ_TTRACE (~9u)
+#define LJ_TCDATA (~10u)
+#define LJ_TTAB (~11u)
+#define LJ_TUDATA (~12u)
+/* This is just the canonical number type used in some places. */
+#define LJ_TNUMX (~13u)
+
+/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
+#if LJ_64
+#define LJ_TISNUM 0xfffeffffu
+#else
+#define LJ_TISNUM LJ_TNUMX
+#endif
+#define LJ_TISTRUECOND LJ_TFALSE
+#define LJ_TISPRI LJ_TTRUE
+#define LJ_TISGCV (LJ_TSTR+1)
+#define LJ_TISTABUD LJ_TTAB
+
+/* -- String object ------------------------------------------------------- */
+
+/* String object header. String payload follows. */
+typedef struct GCstr {
+ GCHeader;
+ uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */
+ uint8_t unused;
+ MSize hash; /* Hash of string. */
+ MSize len; /* Size of string. */
+} GCstr;
+
+#define strref(r) (&gcref((r))->str)
+#define strdata(s) ((const char *)((s)+1))
+#define strdatawr(s) ((char *)((s)+1))
+#define strVdata(o) strdata(strV(o))
+#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1)
+
+/* -- Userdata object ----------------------------------------------------- */
+
+/* Userdata object. Payload follows. */
+typedef struct GCudata {
+ GCHeader;
+ uint8_t udtype; /* Userdata type. */
+ uint8_t unused2;
+ GCRef env; /* Should be at same offset in GCfunc. */
+ MSize len; /* Size of payload. */
+ GCRef metatable; /* Must be at same offset in GCtab. */
+ uint32_t align1; /* To force 8 byte alignment of the payload. */
+} GCudata;
+
+/* Userdata types. */
+enum {
+ UDTYPE_USERDATA, /* Regular userdata. */
+ UDTYPE_IO_FILE, /* I/O library FILE. */
+ UDTYPE_FFI_CLIB, /* FFI C library namespace. */
+ UDTYPE__MAX
+};
+
+#define uddata(u) ((void *)((u)+1))
+#define sizeudata(u) (sizeof(struct GCudata)+(u)->len)
+
+/* -- C data object ------------------------------------------------------- */
+
+/* C data object. Payload follows. */
+typedef struct GCcdata {
+ GCHeader;
+ uint16_t ctypeid; /* C type ID. */
+} GCcdata;
+
+/* Prepended to variable-sized or realigned C data objects. */
+typedef struct GCcdataVar {
+ uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */
+ uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */
+ MSize len; /* Size of payload. */
+} GCcdataVar;
+
+#define cdataptr(cd) ((void *)((cd)+1))
+#define cdataisv(cd) ((cd)->marked & 0x80)
+#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar)))
+#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len)
+#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra)
+#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset))
+
+/* -- Prototype object ---------------------------------------------------- */
+
+#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef))
+#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1))
+
+typedef struct GCproto {
+ GCHeader;
+ uint8_t numparams; /* Number of parameters. */
+ uint8_t framesize; /* Fixed frame size. */
+ MSize sizebc; /* Number of bytecode instructions. */
+ GCRef gclist;
+ MRef k; /* Split constant array (points to the middle). */
+ MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */
+ MSize sizekgc; /* Number of collectable constants. */
+ MSize sizekn; /* Number of lua_Number constants. */
+ MSize sizept; /* Total size including colocated arrays. */
+ uint8_t sizeuv; /* Number of upvalues. */
+ uint8_t flags; /* Miscellaneous flags (see below). */
+ uint16_t trace; /* Anchor for chain of root traces. */
+ /* ------ The following fields are for debugging/tracebacks only ------ */
+ GCRef chunkname; /* Name of the chunk this function was defined in. */
+ BCLine firstline; /* First line of the function definition. */
+ BCLine numline; /* Number of lines for the function definition. */
+ MRef lineinfo; /* Compressed map from bytecode ins. to source line. */
+ MRef uvinfo; /* Upvalue names. */
+ MRef varinfo; /* Names and compressed extents of local variables. */
+} GCproto;
+
+/* Flags for prototype. */
+#define PROTO_CHILD 0x01 /* Has child prototypes. */
+#define PROTO_VARARG 0x02 /* Vararg function. */
+#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */
+#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */
+#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */
+/* Only used during parsing. */
+#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */
+#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */
+/* Top bits used for counting created closures. */
+#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */
+#define PROTO_CLC_BITS 3
+#define PROTO_CLC_POLY (3*PROTO_CLCOUNT) /* Polymorphic threshold. */
+
+#define PROTO_UV_LOCAL 0x8000 /* Upvalue for local slot. */
+#define PROTO_UV_IMMUTABLE 0x4000 /* Immutable upvalue. */
+
+#define proto_kgc(pt, idx) \
+ check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \
+ gcref(mref((pt)->k, GCRef)[(idx)]))
+#define proto_knumtv(pt, idx) \
+ check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)])
+#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto)))
+#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt)))
+#define proto_uv(pt) (mref((pt)->uv, uint16_t))
+
+#define proto_chunkname(pt) (strref((pt)->chunkname))
+#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt))))
+#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void))
+#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t))
+#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t))
+
+/* -- Upvalue object ------------------------------------------------------ */
+
+typedef struct GCupval {
+ GCHeader;
+ uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */
+ uint8_t immutable; /* Immutable value. */
+ union {
+ TValue tv; /* If closed: the value itself. */
+ struct { /* If open: double linked list, anchored at thread. */
+ GCRef prev;
+ GCRef next;
+ };
+ };
+ MRef v; /* Points to stack slot (open) or above (closed). */
+ uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */
+} GCupval;
+
+#define uvprev(uv_) (&gcref((uv_)->prev)->uv)
+#define uvnext(uv_) (&gcref((uv_)->next)->uv)
+#define uvval(uv_) (mref((uv_)->v, TValue))
+
+/* -- Function object (closures) ------------------------------------------ */
+
+/* Common header for functions. env should be at same offset in GCudata. */
+#define GCfuncHeader \
+ GCHeader; uint8_t ffid; uint8_t nupvalues; \
+ GCRef env; GCRef gclist; MRef pc
+
+typedef struct GCfuncC {
+ GCfuncHeader;
+ lua_CFunction f; /* C function to be called. */
+ TValue upvalue[1]; /* Array of upvalues (TValue). */
+} GCfuncC;
+
+typedef struct GCfuncL {
+ GCfuncHeader;
+ GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */
+} GCfuncL;
+
+typedef union GCfunc {
+ GCfuncC c;
+ GCfuncL l;
+} GCfunc;
+
+#define FF_LUA 0
+#define FF_C 1
+#define isluafunc(fn) ((fn)->c.ffid == FF_LUA)
+#define iscfunc(fn) ((fn)->c.ffid == FF_C)
+#define isffunc(fn) ((fn)->c.ffid > FF_C)
+#define funcproto(fn) \
+ check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto)))
+#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n))
+#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n))
+
+/* -- Table object -------------------------------------------------------- */
+
+/* Hash node. */
+typedef struct Node {
+ TValue val; /* Value object. Must be first field. */
+ TValue key; /* Key object. */
+ MRef next; /* Hash chain. */
+ MRef freetop; /* Top of free elements (stored in t->node[0]). */
+} Node;
+
+LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
+
+typedef struct GCtab {
+ GCHeader;
+ uint8_t nomm; /* Negative cache for fast metamethods. */
+ int8_t colo; /* Array colocation. */
+ MRef array; /* Array part. */
+ GCRef gclist;
+ GCRef metatable; /* Must be at same offset in GCudata. */
+ MRef node; /* Hash part. */
+ uint32_t asize; /* Size of array part (keys [0, asize-1]). */
+ uint32_t hmask; /* Hash part mask (size of hash part - 1). */
+} GCtab;
+
+#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab))
+#define tabref(r) (&gcref((r))->tab)
+#define noderef(r) (mref((r), Node))
+#define nextnode(n) (mref((n)->next, Node))
+
+/* -- State objects ------------------------------------------------------- */
+
+/* VM states. */
+enum {
+ LJ_VMST_INTERP, /* Interpreter. */
+ LJ_VMST_C, /* C function. */
+ LJ_VMST_GC, /* Garbage collector. */
+ LJ_VMST_EXIT, /* Trace exit handler. */
+ LJ_VMST_RECORD, /* Trace recorder. */
+ LJ_VMST_OPT, /* Optimizer. */
+ LJ_VMST_ASM, /* Assembler. */
+ LJ_VMST__MAX
+};
+
+#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st)
+
+/* Metamethods. ORDER MM */
+#ifdef LJ_HASFFI
+#define MMDEF_FFI(_) _(new)
+#else
+#define MMDEF_FFI(_)
+#endif
+
+#if LJ_52 || LJ_HASFFI
+#define MMDEF_PAIRS(_) _(pairs) _(ipairs)
+#else
+#define MMDEF_PAIRS(_)
+#define MM_pairs 255
+#define MM_ipairs 255
+#endif
+
+#define MMDEF(_) \
+ _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \
+ /* Only the above (fast) metamethods are negative cached (max. 8). */ \
+ _(lt) _(le) _(concat) _(call) \
+ /* The following must be in ORDER ARITH. */ \
+ _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \
+ /* The following are used in the standard libraries. */ \
+ _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_)
+
+typedef enum {
+#define MMENUM(name) MM_##name,
+MMDEF(MMENUM)
+#undef MMENUM
+ MM__MAX,
+ MM____ = MM__MAX,
+ MM_FAST = MM_len
+} MMS;
+
+/* GC root IDs. */
+typedef enum {
+ GCROOT_MMNAME, /* Metamethod names. */
+ GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1,
+ GCROOT_BASEMT, /* Metatables for base types. */
+ GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX,
+ GCROOT_IO_INPUT, /* Userdata for default I/O input file. */
+ GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */
+ GCROOT_MAX
+} GCRootID;
+
+#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)])
+#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
+#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
+
+typedef struct GCState {
+ MSize total; /* Memory currently allocated. */
+ MSize threshold; /* Memory threshold. */
+ uint8_t currentwhite; /* Current white color. */
+ uint8_t state; /* GC state. */
+ uint8_t unused1;
+ uint8_t unused2;
+ MSize sweepstr; /* Sweep position in string table. */
+ GCRef root; /* List of all collectable objects. */
+ MRef sweep; /* Sweep position in root list. */
+ GCRef gray; /* List of gray objects. */
+ GCRef grayagain; /* List of objects for atomic traversal. */
+ GCRef weak; /* List of weak tables (to be cleared). */
+ GCRef mmudata; /* List of userdata (to be finalized). */
+ MSize stepmul; /* Incremental GC step granularity. */
+ MSize debt; /* Debt (how much GC is behind schedule). */
+ MSize estimate; /* Estimate of memory actually in use. */
+ MSize pause; /* Pause between successive GC cycles. */
+} GCState;
+
+/* Global state, shared by all threads of a Lua universe. */
+typedef struct global_State {
+ GCRef *strhash; /* String hash table (hash chain anchors). */
+ MSize strmask; /* String hash mask (size of hash table - 1). */
+ MSize strnum; /* Number of strings in hash table. */
+ lua_Alloc allocf; /* Memory allocator. */
+ void *allocd; /* Memory allocator data. */
+ GCState gc; /* Garbage collector. */
+ SBuf tmpbuf; /* Temporary buffer for string concatenation. */
+ Node nilnode; /* Fallback 1-element hash part (nil key and value). */
+ GCstr strempty; /* Empty string. */
+ uint8_t stremptyz; /* Zero terminator of empty string. */
+ uint8_t hookmask; /* Hook mask. */
+ uint8_t dispatchmode; /* Dispatch mode. */
+ uint8_t vmevmask; /* VM event mask. */
+ GCRef mainthref; /* Link to main thread. */
+ TValue registrytv; /* Anchor for registry. */
+ TValue tmptv, tmptv2; /* Temporary TValues. */
+ GCupval uvhead; /* Head of double-linked list of all open upvalues. */
+ int32_t hookcount; /* Instruction hook countdown. */
+ int32_t hookcstart; /* Start count for instruction hook counter. */
+ lua_Hook hookf; /* Hook function. */
+ lua_CFunction wrapf; /* Wrapper for C function calls. */
+ lua_CFunction panic; /* Called as a last resort for errors. */
+ volatile int32_t vmstate; /* VM state or current JIT code trace number. */
+ BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */
+ BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */
+ GCRef jit_L; /* Current JIT code lua_State or NULL. */
+ MRef jit_base; /* Current JIT code L->base. */
+ MRef ctype_state; /* Pointer to C type state. */
+ GCRef gcroot[GCROOT_MAX]; /* GC roots. */
+} global_State;
+
+#define mainthread(g) (&gcref(g->mainthref)->th)
+#define niltv(L) \
+ check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val)
+#define niltvg(g) \
+ check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val)
+
+/* Hook management. Hook event masks are defined in lua.h. */
+#define HOOK_EVENTMASK 0x0f
+#define HOOK_ACTIVE 0x10
+#define HOOK_ACTIVE_SHIFT 4
+#define HOOK_VMEVENT 0x20
+#define HOOK_GC 0x40
+#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE)
+#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE)
+#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC))
+#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
+#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE)
+#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK)
+#define hook_restore(g, h) \
+ ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h))
+
+/* Per-thread state object. */
+struct lua_State {
+ GCHeader;
+ uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */
+ uint8_t status; /* Thread status. */
+ MRef glref; /* Link to global state. */
+ GCRef gclist; /* GC chain. */
+ TValue *base; /* Base of currently executing function. */
+ TValue *top; /* First free slot in the stack. */
+ MRef maxstack; /* Last free slot in the stack. */
+ MRef stack; /* Stack base. */
+ GCRef openupval; /* List of open upvalues in the stack. */
+ GCRef env; /* Thread environment (table of globals). */
+ void *cframe; /* End of C stack frame chain. */
+ MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */
+};
+
+#define G(L) (mref(L->glref, global_State))
+#define registry(L) (&G(L)->registrytv)
+
+/* Macros to access the currently executing (Lua) function. */
+#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn)
+#define curr_funcisL(L) (isluafunc(curr_func(L)))
+#define curr_proto(L) (funcproto(curr_func(L)))
+#define curr_topL(L) (L->base + curr_proto(L)->framesize)
+#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top)
+
+/* -- GC object definition and conversions -------------------------------- */
+
+/* GC header for generic access to common fields of GC objects. */
+typedef struct GChead {
+ GCHeader;
+ uint8_t unused1;
+ uint8_t unused2;
+ GCRef env;
+ GCRef gclist;
+ GCRef metatable;
+} GChead;
+
+/* The env field SHOULD be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env));
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env));
+
+/* The metatable field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable));
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable));
+
+/* The gclist field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist));
+
+typedef union GCobj {
+ GChead gch;
+ GCstr str;
+ GCupval uv;
+ lua_State th;
+ GCproto pt;
+ GCfunc fn;
+ GCcdata cd;
+ GCtab tab;
+ GCudata ud;
+} GCobj;
+
+/* Macros to convert a GCobj pointer into a specific value. */
+#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str)
+#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv)
+#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th)
+#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt)
+#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn)
+#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd)
+#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab)
+#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud)
+
+/* Macro to convert any collectable object into a GCobj pointer. */
+#define obj2gco(v) ((GCobj *)(v))
+
+/* -- TValue getters/setters ---------------------------------------------- */
+
+#ifdef LUA_USE_ASSERT
+#include "lj_gc.h"
+#endif
+
+/* Macros to test types. */
+#define itype(o) ((o)->it)
+#define tvisnil(o) (itype(o) == LJ_TNIL)
+#define tvisfalse(o) (itype(o) == LJ_TFALSE)
+#define tvistrue(o) (itype(o) == LJ_TTRUE)
+#define tvisbool(o) (tvisfalse(o) || tvistrue(o))
+#if LJ_64
+#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2)
+#else
+#define tvislightud(o) (itype(o) == LJ_TLIGHTUD)
+#endif
+#define tvisstr(o) (itype(o) == LJ_TSTR)
+#define tvisfunc(o) (itype(o) == LJ_TFUNC)
+#define tvisthread(o) (itype(o) == LJ_TTHREAD)
+#define tvisproto(o) (itype(o) == LJ_TPROTO)
+#define tviscdata(o) (itype(o) == LJ_TCDATA)
+#define tvistab(o) (itype(o) == LJ_TTAB)
+#define tvisudata(o) (itype(o) == LJ_TUDATA)
+#define tvisnumber(o) (itype(o) <= LJ_TISNUM)
+#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM)
+#define tvisnum(o) (itype(o) < LJ_TISNUM)
+
+#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND)
+#define tvispri(o) (itype(o) >= LJ_TISPRI)
+#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */
+#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV))
+
+/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */
+#define tvisnan(o) ((o)->n != (o)->n)
+#if LJ_64
+#define tviszero(o) (((o)->u64 << 1) == 0)
+#else
+#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0)
+#endif
+#define tvispzero(o) ((o)->u64 == 0)
+#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000))
+#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000))
+#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64)
+
+/* Macros to convert type ids. */
+#if LJ_64
+#define itypemap(o) \
+ (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
+#else
+#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o))
+#endif
+
+/* Macros to get tagged values. */
+#define gcval(o) (gcref((o)->gcr))
+#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - (o)->it))
+#if LJ_64
+#define lightudV(o) \
+ check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff)))
+#else
+#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void))
+#endif
+#define gcV(o) check_exp(tvisgcv(o), gcval(o))
+#define strV(o) check_exp(tvisstr(o), &gcval(o)->str)
+#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn)
+#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th)
+#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt)
+#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd)
+#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab)
+#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud)
+#define numV(o) check_exp(tvisnum(o), (o)->n)
+#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i)
+
+/* Macros to set tagged values. */
+#define setitype(o, i) ((o)->it = (i))
+#define setnilV(o) ((o)->it = LJ_TNIL)
+#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x))
+
+static LJ_AINLINE void setlightudV(TValue *o, void *p)
+{
+#if LJ_64
+ o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
+#else
+ setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
+#endif
+}
+
+#if LJ_64
+#define checklightudptr(L, p) \
+ (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p))
+#define setcont(o, f) \
+ ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
+#else
+#define checklightudptr(L, p) (p)
+#define setcont(o, f) setlightudV((o), (void *)(f))
+#endif
+
+#define tvchecklive(L, o) \
+ UNUSED(L), lua_assert(!tvisgcv(o) || \
+ ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o))))
+
+static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype)
+{
+ setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o);
+}
+
+#define define_setV(name, type, tag) \
+static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \
+{ \
+ setgcV(L, o, obj2gco(v), tag); \
+}
+define_setV(setstrV, GCstr, LJ_TSTR)
+define_setV(setthreadV, lua_State, LJ_TTHREAD)
+define_setV(setprotoV, GCproto, LJ_TPROTO)
+define_setV(setfuncV, GCfunc, LJ_TFUNC)
+define_setV(setcdataV, GCcdata, LJ_TCDATA)
+define_setV(settabV, GCtab, LJ_TTAB)
+define_setV(setudataV, GCudata, LJ_TUDATA)
+
+#define setnumV(o, x) ((o)->n = (x))
+#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000))
+#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000))
+#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000))
+
+static LJ_AINLINE void setintV(TValue *o, int32_t i)
+{
+#if LJ_DUALNUM
+ o->i = (uint32_t)i; setitype(o, LJ_TISNUM);
+#else
+ o->n = (lua_Number)i;
+#endif
+}
+
+static LJ_AINLINE void setint64V(TValue *o, int64_t i)
+{
+ if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i))
+ setintV(o, (int32_t)i);
+ else
+ setnumV(o, (lua_Number)i);
+}
+
+#if LJ_64
+#define setintptrV(o, i) setint64V((o), (i))
+#else
+#define setintptrV(o, i) setintV((o), (i))
+#endif
+
+/* Copy tagged values. */
+static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
+{
+ *o1 = *o2; tvchecklive(L, o1);
+}
+
+/* -- Number to integer conversion ---------------------------------------- */
+
+#if LJ_SOFTFP
+LJ_ASMF int32_t lj_vm_tobit(double x);
+#endif
+
+static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
+{
+#if LJ_SOFTFP
+ return lj_vm_tobit(n);
+#else
+ TValue o;
+ o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */
+ return (int32_t)o.u32.lo;
+#endif
+}
+
+#if LJ_TARGET_X86 && !defined(__SSE2__)
+#define lj_num2int(n) lj_num2bit((n))
+#else
+#define lj_num2int(n) ((int32_t)(n))
+#endif
+
+static LJ_AINLINE uint64_t lj_num2u64(lua_Number n)
+{
+#ifdef _MSC_VER
+ if (n >= 9223372036854775808.0) /* They think it's a feature. */
+ return (uint64_t)(int64_t)(n - 18446744073709551616.0);
+ else
+#endif
+ return (uint64_t)n;
+}
+
+static LJ_AINLINE int32_t numberVint(cTValue *o)
+{
+ if (LJ_LIKELY(tvisint(o)))
+ return intV(o);
+ else
+ return lj_num2int(numV(o));
+}
+
+static LJ_AINLINE lua_Number numberVnum(cTValue *o)
+{
+ if (LJ_UNLIKELY(tvisint(o)))
+ return (lua_Number)intV(o);
+ else
+ return numV(o);
+}
+
+/* -- Miscellaneous object handling --------------------------------------- */
+
+/* Names and maps for internal and external object tags. */
+LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1];
+LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
+
+#define lj_typename(o) (lj_obj_itypename[itypemap(o)])
+
+/* Compare two objects without calling metamethods. */
+LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2);
+
+#endif
diff --git a/3rdparty/lua/src/lj_opt_dce.c b/3rdparty/lua/src/lj_opt_dce.c
index e31bed7..d64cca7 100644
--- a/3rdparty/lua/src/lj_opt_dce.c
+++ b/3rdparty/lua/src/lj_opt_dce.c
@@ -1,78 +1,77 @@
-/*
-** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_opt_dce_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_iropt.h"
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
-/* Scan through all snapshots and mark all referenced instructions. */
-static void dce_marksnap(jit_State *J)
-{
- SnapNo i, nsnap = J->cur.nsnap;
- for (i = 0; i < nsnap; i++) {
- SnapShot *snap = &J->cur.snap[i];
- SnapEntry *map = &J->cur.snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- for (n = 0; n < nent; n++) {
- IRRef ref = snap_ref(map[n]);
- if (ref >= REF_FIRST)
- irt_setmark(IR(ref)->t);
- }
- }
-}
-
-/* Backwards propagate marks. Replace unused instructions with NOPs. */
-static void dce_propagate(jit_State *J)
-{
- IRRef1 *pchain[IR__MAX];
- IRRef ins;
- uint32_t i;
- for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i];
- for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) {
- IRIns *ir = IR(ins);
- if (irt_ismarked(ir->t)) {
- irt_clearmark(ir->t);
- pchain[ir->o] = &ir->prev;
- } else if (!ir_sideeff(ir)) {
- *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */
- ir->t.irt = IRT_NIL;
- ir->o = IR_NOP; /* Replace instruction with NOP. */
- ir->op1 = ir->op2 = 0;
- ir->prev = 0;
- continue;
- }
- if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
- if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
- }
-}
-
-/* Dead Code Elimination.
-**
-** First backpropagate marks for all used instructions. Then replace
-** the unused ones with a NOP. Note that compressing the IR to eliminate
-** the NOPs does not pay off.
-*/
-void lj_opt_dce(jit_State *J)
-{
- if ((J->flags & JIT_F_OPT_DCE)) {
- dce_marksnap(J);
- dce_propagate(J);
- memset(J->bpropcache, 0, sizeof(J->bpropcache)); /* Invalidate cache. */
- }
-}
-
-#undef IR
-
-#endif
+/*
+** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_dce_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Scan through all snapshots and mark all referenced instructions. */
+static void dce_marksnap(jit_State *J)
+{
+ SnapNo i, nsnap = J->cur.nsnap;
+ for (i = 0; i < nsnap; i++) {
+ SnapShot *snap = &J->cur.snap[i];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (ref >= REF_FIRST)
+ irt_setmark(IR(ref)->t);
+ }
+ }
+}
+
+/* Backwards propagate marks. Replace unused instructions with NOPs. */
+static void dce_propagate(jit_State *J)
+{
+ IRRef1 *pchain[IR__MAX];
+ IRRef ins;
+ uint32_t i;
+ for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i];
+ for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) {
+ IRIns *ir = IR(ins);
+ if (irt_ismarked(ir->t)) {
+ irt_clearmark(ir->t);
+ pchain[ir->o] = &ir->prev;
+ } else if (!ir_sideeff(ir)) {
+ *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */
+ ir->t.irt = IRT_NIL;
+ ir->o = IR_NOP; /* Replace instruction with NOP. */
+ ir->op1 = ir->op2 = 0;
+ ir->prev = 0;
+ continue;
+ }
+ if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+ if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+ }
+}
+
+/* Dead Code Elimination.
+**
+** First backpropagate marks for all used instructions. Then replace
+** the unused ones with a NOP. Note that compressing the IR to eliminate
+** the NOPs does not pay off.
+*/
+void lj_opt_dce(jit_State *J)
+{
+ if ((J->flags & JIT_F_OPT_DCE)) {
+ dce_marksnap(J);
+ dce_propagate(J);
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/3rdparty/lua/src/lj_opt_fold.c b/3rdparty/lua/src/lj_opt_fold.c
index 8d47c2a..fe37b98 100644
--- a/3rdparty/lua/src/lj_opt_fold.c
+++ b/3rdparty/lua/src/lj_opt_fold.c
@@ -1,2304 +1,2295 @@
-/*
-** FOLD: Constant Folding, Algebraic Simplifications and Reassociation.
-** ABCelim: Array Bounds Check Elimination.
-** CSE: Common-Subexpression Elimination.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_opt_fold_c
-#define LUA_CORE
-
-#include <math.h>
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_iropt.h"
-#include "lj_trace.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#endif
-#include "lj_carith.h"
-#include "lj_vm.h"
-#include "lj_strscan.h"
-
-/* Here's a short description how the FOLD engine processes instructions:
-**
-** The FOLD engine receives a single instruction stored in fins (J->fold.ins).
-** The instruction and its operands are used to select matching fold rules.
-** These are applied iteratively until a fixed point is reached.
-**
-** The 8 bit opcode of the instruction itself plus the opcodes of the
-** two instructions referenced by its operands form a 24 bit key
-** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits).
-**
-** This key is used for partial matching against the fold rules. The
-** left/right operand fields of the key are successively masked with
-** the 'any' wildcard, from most specific to least specific:
-**
-** ins left right
-** ins any right
-** ins left any
-** ins any any
-**
-** The masked key is used to lookup a matching fold rule in a semi-perfect
-** hash table. If a matching rule is found, the related fold function is run.
-** Multiple rules can share the same fold function. A fold rule may return
-** one of several special values:
-**
-** - NEXTFOLD means no folding was applied, because an additional test
-** inside the fold function failed. Matching continues against less
-** specific fold rules. Finally the instruction is passed on to CSE.
-**
-** - RETRYFOLD means the instruction was modified in-place. Folding is
-** retried as if this instruction had just been received.
-**
-** All other return values are terminal actions -- no further folding is
-** applied:
-**
-** - INTFOLD(i) returns a reference to the integer constant i.
-**
-** - LEFTFOLD and RIGHTFOLD return the left/right operand reference
-** without emitting an instruction.
-**
-** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit
-** it without passing through any further optimizations.
-**
-** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have
-** no result (e.g. guarded assertions): FAILFOLD means the guard would
-** always fail, i.e. the current trace is pointless. DROPFOLD means
-** the guard is always true and has been eliminated. CONDFOLD is a
-** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail).
-**
-** - Any other return value is interpreted as an IRRef or TRef. This
-** can be a reference to an existing or a newly created instruction.
-** Only the least-significant 16 bits (IRRef1) are used to form a TRef
-** which is finally returned to the caller.
-**
-** The FOLD engine receives instructions both from the trace recorder and
-** substituted instructions from LOOP unrolling. This means all types
-** of instructions may end up here, even though the recorder bypasses
-** FOLD in some cases. Thus all loads, stores and allocations must have
-** an any/any rule to avoid being passed on to CSE.
-**
-** Carefully read the following requirements before adding or modifying
-** any fold rules:
-**
-** Requirement #1: All fold rules must preserve their destination type.
-**
-** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result).
-** Never use lj_ir_knumint() which can have either a KINT or KNUM result.
-**
-** Requirement #2: Fold rules should not create *new* instructions which
-** reference operands *across* PHIs.
-**
-** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the
-** left operand is a PHI. Then fleft->op1 would point across the PHI
-** frontier to an invariant instruction. Adding a PHI for this instruction
-** would be counterproductive. The solution is to add a barrier which
-** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case.
-** The only exception is for recurrences with high latencies like
-** repeated int->num->int conversions.
-**
-** One could relax this condition a bit if the referenced instruction is
-** a PHI, too. But this often leads to worse code due to excessive
-** register shuffling.
-**
-** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though.
-** Even returning fleft->op1 would be ok, because a new PHI will added,
-** if needed. But again, this leads to excessive register shuffling and
-** should be avoided.
-**
-** Requirement #3: The set of all fold rules must be monotonic to guarantee
-** termination.
-**
-** The goal is optimization, so one primarily wants to add strength-reducing
-** rules. This means eliminating an instruction or replacing an instruction
-** with one or more simpler instructions. Don't add fold rules which point
-** into the other direction.
-**
-** Some rules (like commutativity) do not directly reduce the strength of
-** an instruction, but enable other fold rules (e.g. by moving constants
-** to the right operand). These rules must be made unidirectional to avoid
-** cycles.
-**
-** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it.
-*/
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-#define fins (&J->fold.ins)
-#define fleft (&J->fold.left)
-#define fright (&J->fold.right)
-#define knumleft (ir_knum(fleft)->n)
-#define knumright (ir_knum(fright)->n)
-
-/* Pass IR on to next optimization in chain (FOLD). */
-#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
-
-/* Fold function type. Fastcall on x86 significantly reduces their size. */
-typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J);
-
-/* Macros for the fold specs, so buildvm can recognize them. */
-#define LJFOLD(x)
-#define LJFOLDX(x)
-#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J)
-/* Note: They must be at the start of a line or buildvm ignores them! */
-
-/* Barrier to prevent using operands across PHIs. */
-#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD
-
-/* Barrier to prevent folding across a GC step.
-** GC steps can only happen at the head of a trace and at LOOP.
-** And the GC is only driven forward if there is at least one allocation.
-*/
-#define gcstep_barrier(J, ref) \
- ((ref) < J->chain[IR_LOOP] && \
- (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \
- J->chain[IR_TNEW] || J->chain[IR_TDUP] || \
- J->chain[IR_CNEW] || J->chain[IR_CNEWI] || J->chain[IR_TOSTR]))
-
-/* -- Constant folding for FP numbers ------------------------------------- */
-
-LJFOLD(ADD KNUM KNUM)
-LJFOLD(SUB KNUM KNUM)
-LJFOLD(MUL KNUM KNUM)
-LJFOLD(DIV KNUM KNUM)
-LJFOLD(NEG KNUM KNUM)
-LJFOLD(ABS KNUM KNUM)
-LJFOLD(ATAN2 KNUM KNUM)
-LJFOLD(LDEXP KNUM KNUM)
-LJFOLD(MIN KNUM KNUM)
-LJFOLD(MAX KNUM KNUM)
-LJFOLDF(kfold_numarith)
-{
- lua_Number a = knumleft;
- lua_Number b = knumright;
- lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD);
- return lj_ir_knum(J, y);
-}
-
-LJFOLD(LDEXP KNUM KINT)
-LJFOLDF(kfold_ldexp)
-{
-#if LJ_TARGET_X86ORX64
- UNUSED(J);
- return NEXTFOLD;
-#else
- return lj_ir_knum(J, ldexp(knumleft, fright->i));
-#endif
-}
-
-LJFOLD(FPMATH KNUM any)
-LJFOLDF(kfold_fpmath)
-{
- lua_Number a = knumleft;
- lua_Number y = lj_vm_foldfpm(a, fins->op2);
- return lj_ir_knum(J, y);
-}
-
-LJFOLD(POW KNUM KINT)
-LJFOLDF(kfold_numpow)
-{
- lua_Number a = knumleft;
- lua_Number b = (lua_Number)fright->i;
- lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD);
- return lj_ir_knum(J, y);
-}
-
-/* Must not use kfold_kref for numbers (could be NaN). */
-LJFOLD(EQ KNUM KNUM)
-LJFOLD(NE KNUM KNUM)
-LJFOLD(LT KNUM KNUM)
-LJFOLD(GE KNUM KNUM)
-LJFOLD(LE KNUM KNUM)
-LJFOLD(GT KNUM KNUM)
-LJFOLD(ULT KNUM KNUM)
-LJFOLD(UGE KNUM KNUM)
-LJFOLD(ULE KNUM KNUM)
-LJFOLD(UGT KNUM KNUM)
-LJFOLDF(kfold_numcomp)
-{
- return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o));
-}
-
-/* -- Constant folding for 32 bit integers -------------------------------- */
-
-static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op)
-{
- switch (op) {
- case IR_ADD: k1 += k2; break;
- case IR_SUB: k1 -= k2; break;
- case IR_MUL: k1 *= k2; break;
- case IR_MOD: k1 = lj_vm_modi(k1, k2); break;
- case IR_NEG: k1 = -k1; break;
- case IR_BAND: k1 &= k2; break;
- case IR_BOR: k1 |= k2; break;
- case IR_BXOR: k1 ^= k2; break;
- case IR_BSHL: k1 <<= (k2 & 31); break;
- case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break;
- case IR_BSAR: k1 >>= (k2 & 31); break;
- case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break;
- case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break;
- case IR_MIN: k1 = k1 < k2 ? k1 : k2; break;
- case IR_MAX: k1 = k1 > k2 ? k1 : k2; break;
- default: lua_assert(0); break;
- }
- return k1;
-}
-
-LJFOLD(ADD KINT KINT)
-LJFOLD(SUB KINT KINT)
-LJFOLD(MUL KINT KINT)
-LJFOLD(MOD KINT KINT)
-LJFOLD(NEG KINT KINT)
-LJFOLD(BAND KINT KINT)
-LJFOLD(BOR KINT KINT)
-LJFOLD(BXOR KINT KINT)
-LJFOLD(BSHL KINT KINT)
-LJFOLD(BSHR KINT KINT)
-LJFOLD(BSAR KINT KINT)
-LJFOLD(BROL KINT KINT)
-LJFOLD(BROR KINT KINT)
-LJFOLD(MIN KINT KINT)
-LJFOLD(MAX KINT KINT)
-LJFOLDF(kfold_intarith)
-{
- return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o));
-}
-
-LJFOLD(ADDOV KINT KINT)
-LJFOLD(SUBOV KINT KINT)
-LJFOLD(MULOV KINT KINT)
-LJFOLDF(kfold_intovarith)
-{
- lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i,
- fins->o - IR_ADDOV);
- int32_t k = lj_num2int(n);
- if (n != (lua_Number)k)
- return FAILFOLD;
- return INTFOLD(k);
-}
-
-LJFOLD(BNOT KINT)
-LJFOLDF(kfold_bnot)
-{
- return INTFOLD(~fleft->i);
-}
-
-LJFOLD(BSWAP KINT)
-LJFOLDF(kfold_bswap)
-{
- return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i));
-}
-
-LJFOLD(LT KINT KINT)
-LJFOLD(GE KINT KINT)
-LJFOLD(LE KINT KINT)
-LJFOLD(GT KINT KINT)
-LJFOLD(ULT KINT KINT)
-LJFOLD(UGE KINT KINT)
-LJFOLD(ULE KINT KINT)
-LJFOLD(UGT KINT KINT)
-LJFOLD(ABC KINT KINT)
-LJFOLDF(kfold_intcomp)
-{
- int32_t a = fleft->i, b = fright->i;
- switch ((IROp)fins->o) {
- case IR_LT: return CONDFOLD(a < b);
- case IR_GE: return CONDFOLD(a >= b);
- case IR_LE: return CONDFOLD(a <= b);
- case IR_GT: return CONDFOLD(a > b);
- case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b);
- case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b);
- case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b);
- case IR_ABC:
- case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b);
- default: lua_assert(0); return FAILFOLD;
- }
-}
-
-LJFOLD(UGE any KINT)
-LJFOLDF(kfold_intcomp0)
-{
- if (fright->i == 0)
- return DROPFOLD;
- return NEXTFOLD;
-}
-
-/* -- Constant folding for 64 bit integers -------------------------------- */
-
-static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op)
-{
- switch (op) {
-#if LJ_64 || LJ_HASFFI
- case IR_ADD: k1 += k2; break;
- case IR_SUB: k1 -= k2; break;
-#endif
-#if LJ_HASFFI
- case IR_MUL: k1 *= k2; break;
- case IR_BAND: k1 &= k2; break;
- case IR_BOR: k1 |= k2; break;
- case IR_BXOR: k1 ^= k2; break;
-#endif
- default: UNUSED(k2); lua_assert(0); break;
- }
- return k1;
-}
-
-LJFOLD(ADD KINT64 KINT64)
-LJFOLD(SUB KINT64 KINT64)
-LJFOLD(MUL KINT64 KINT64)
-LJFOLD(BAND KINT64 KINT64)
-LJFOLD(BOR KINT64 KINT64)
-LJFOLD(BXOR KINT64 KINT64)
-LJFOLDF(kfold_int64arith)
-{
- return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64,
- ir_k64(fright)->u64, (IROp)fins->o));
-}
-
-LJFOLD(DIV KINT64 KINT64)
-LJFOLD(MOD KINT64 KINT64)
-LJFOLD(POW KINT64 KINT64)
-LJFOLDF(kfold_int64arith2)
-{
-#if LJ_HASFFI
- uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64;
- if (irt_isi64(fins->t)) {
- k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) :
- fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) :
- lj_carith_powi64((int64_t)k1, (int64_t)k2);
- } else {
- k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) :
- fins->o == IR_MOD ? lj_carith_modu64(k1, k2) :
- lj_carith_powu64(k1, k2);
- }
- return INT64FOLD(k1);
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-LJFOLD(BSHL KINT64 KINT)
-LJFOLD(BSHR KINT64 KINT)
-LJFOLD(BSAR KINT64 KINT)
-LJFOLD(BROL KINT64 KINT)
-LJFOLD(BROR KINT64 KINT)
-LJFOLDF(kfold_int64shift)
-{
-#if LJ_HASFFI || LJ_64
- uint64_t k = ir_k64(fleft)->u64;
- int32_t sh = (fright->i & 63);
- switch ((IROp)fins->o) {
- case IR_BSHL: k <<= sh; break;
-#if LJ_HASFFI
- case IR_BSHR: k >>= sh; break;
- case IR_BSAR: k = (uint64_t)((int64_t)k >> sh); break;
- case IR_BROL: k = lj_rol(k, sh); break;
- case IR_BROR: k = lj_ror(k, sh); break;
-#endif
- default: lua_assert(0); break;
- }
- return INT64FOLD(k);
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-LJFOLD(BNOT KINT64)
-LJFOLDF(kfold_bnot64)
-{
-#if LJ_HASFFI
- return INT64FOLD(~ir_k64(fleft)->u64);
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-LJFOLD(BSWAP KINT64)
-LJFOLDF(kfold_bswap64)
-{
-#if LJ_HASFFI
- return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64));
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-LJFOLD(LT KINT64 KINT64)
-LJFOLD(GE KINT64 KINT64)
-LJFOLD(LE KINT64 KINT64)
-LJFOLD(GT KINT64 KINT64)
-LJFOLD(ULT KINT64 KINT64)
-LJFOLD(UGE KINT64 KINT64)
-LJFOLD(ULE KINT64 KINT64)
-LJFOLD(UGT KINT64 KINT64)
-LJFOLDF(kfold_int64comp)
-{
-#if LJ_HASFFI
- uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64;
- switch ((IROp)fins->o) {
- case IR_LT: return CONDFOLD(a < b);
- case IR_GE: return CONDFOLD(a >= b);
- case IR_LE: return CONDFOLD(a <= b);
- case IR_GT: return CONDFOLD(a > b);
- case IR_ULT: return CONDFOLD((uint64_t)a < (uint64_t)b);
- case IR_UGE: return CONDFOLD((uint64_t)a >= (uint64_t)b);
- case IR_ULE: return CONDFOLD((uint64_t)a <= (uint64_t)b);
- case IR_UGT: return CONDFOLD((uint64_t)a > (uint64_t)b);
- default: lua_assert(0); return FAILFOLD;
- }
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-LJFOLD(UGE any KINT64)
-LJFOLDF(kfold_int64comp0)
-{
-#if LJ_HASFFI
- if (ir_k64(fright)->u64 == 0)
- return DROPFOLD;
- return NEXTFOLD;
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-/* -- Constant folding for strings ---------------------------------------- */
-
-LJFOLD(SNEW KKPTR KINT)
-LJFOLDF(kfold_snew_kptr)
-{
- GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i);
- return lj_ir_kstr(J, s);
-}
-
-LJFOLD(SNEW any KINT)
-LJFOLDF(kfold_snew_empty)
-{
- if (fright->i == 0)
- return lj_ir_kstr(J, &J2G(J)->strempty);
- return NEXTFOLD;
-}
-
-LJFOLD(STRREF KGC KINT)
-LJFOLDF(kfold_strref)
-{
- GCstr *str = ir_kstr(fleft);
- lua_assert((MSize)fright->i <= str->len);
- return lj_ir_kkptr(J, (char *)strdata(str) + fright->i);
-}
-
-LJFOLD(STRREF SNEW any)
-LJFOLDF(kfold_strref_snew)
-{
- PHIBARRIER(fleft);
- if (irref_isk(fins->op2) && fright->i == 0) {
- return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */
- } else {
- /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */
- IRIns *ir = IR(fleft->op1);
- if (ir->o == IR_STRREF) {
- IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */
- PHIBARRIER(ir);
- fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */
- fins->op1 = str;
- fins->ot = IRT(IR_STRREF, IRT_P32);
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(CALLN CARG IRCALL_lj_str_cmp)
-LJFOLDF(kfold_strcmp)
-{
- if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) {
- GCstr *a = ir_kstr(IR(fleft->op1));
- GCstr *b = ir_kstr(IR(fleft->op2));
- return INTFOLD(lj_str_cmp(a, b));
- }
- return NEXTFOLD;
-}
-
-/* -- Constant folding of pointer arithmetic ------------------------------ */
-
-LJFOLD(ADD KGC KINT)
-LJFOLD(ADD KGC KINT64)
-LJFOLDF(kfold_add_kgc)
-{
- GCobj *o = ir_kgc(fleft);
-#if LJ_64
- ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
-#else
- ptrdiff_t ofs = fright->i;
-#endif
-#if LJ_HASFFI
- if (irt_iscdata(fleft->t)) {
- CType *ct = ctype_raw(ctype_ctsG(J2G(J)), gco2cd(o)->ctypeid);
- if (ctype_isnum(ct->info) || ctype_isenum(ct->info) ||
- ctype_isptr(ct->info) || ctype_isfunc(ct->info) ||
- ctype_iscomplex(ct->info) || ctype_isvector(ct->info))
- return lj_ir_kkptr(J, (char *)o + ofs);
- }
-#endif
- return lj_ir_kptr(J, (char *)o + ofs);
-}
-
-LJFOLD(ADD KPTR KINT)
-LJFOLD(ADD KPTR KINT64)
-LJFOLD(ADD KKPTR KINT)
-LJFOLD(ADD KKPTR KINT64)
-LJFOLDF(kfold_add_kptr)
-{
- void *p = ir_kptr(fleft);
-#if LJ_64
- ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
-#else
- ptrdiff_t ofs = fright->i;
-#endif
- return lj_ir_kptr_(J, fleft->o, (char *)p + ofs);
-}
-
-LJFOLD(ADD any KGC)
-LJFOLD(ADD any KPTR)
-LJFOLD(ADD any KKPTR)
-LJFOLDF(kfold_add_kright)
-{
- if (fleft->o == IR_KINT || fleft->o == IR_KINT64) {
- IRRef1 tmp = fins->op1; fins->op1 = fins->op2; fins->op2 = tmp;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-/* -- Constant folding of conversions ------------------------------------- */
-
-LJFOLD(TOBIT KNUM KNUM)
-LJFOLDF(kfold_tobit)
-{
- return INTFOLD(lj_num2bit(knumleft));
-}
-
-LJFOLD(CONV KINT IRCONV_NUM_INT)
-LJFOLDF(kfold_conv_kint_num)
-{
- return lj_ir_knum(J, (lua_Number)fleft->i);
-}
-
-LJFOLD(CONV KINT IRCONV_NUM_U32)
-LJFOLDF(kfold_conv_kintu32_num)
-{
- return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i);
-}
-
-LJFOLD(CONV KINT IRCONV_INT_I8)
-LJFOLD(CONV KINT IRCONV_INT_U8)
-LJFOLD(CONV KINT IRCONV_INT_I16)
-LJFOLD(CONV KINT IRCONV_INT_U16)
-LJFOLDF(kfold_conv_kint_ext)
-{
- int32_t k = fleft->i;
- if ((fins->op2 & IRCONV_SRCMASK) == IRT_I8) k = (int8_t)k;
- else if ((fins->op2 & IRCONV_SRCMASK) == IRT_U8) k = (uint8_t)k;
- else if ((fins->op2 & IRCONV_SRCMASK) == IRT_I16) k = (int16_t)k;
- else k = (uint16_t)k;
- return INTFOLD(k);
-}
-
-LJFOLD(CONV KINT IRCONV_I64_INT)
-LJFOLD(CONV KINT IRCONV_U64_INT)
-LJFOLD(CONV KINT IRCONV_I64_U32)
-LJFOLD(CONV KINT IRCONV_U64_U32)
-LJFOLDF(kfold_conv_kint_i64)
-{
- if ((fins->op2 & IRCONV_SEXT))
- return INT64FOLD((uint64_t)(int64_t)fleft->i);
- else
- return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i);
-}
-
-LJFOLD(CONV KINT64 IRCONV_NUM_I64)
-LJFOLDF(kfold_conv_kint64_num_i64)
-{
- return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64);
-}
-
-LJFOLD(CONV KINT64 IRCONV_NUM_U64)
-LJFOLDF(kfold_conv_kint64_num_u64)
-{
- return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64);
-}
-
-LJFOLD(CONV KINT64 IRCONV_INT_I64)
-LJFOLD(CONV KINT64 IRCONV_U32_I64)
-LJFOLDF(kfold_conv_kint64_int_i64)
-{
- return INTFOLD((int32_t)ir_kint64(fleft)->u64);
-}
-
-LJFOLD(CONV KNUM IRCONV_INT_NUM)
-LJFOLDF(kfold_conv_knum_int_num)
-{
- lua_Number n = knumleft;
- if (!(fins->op2 & IRCONV_TRUNC)) {
- int32_t k = lj_num2int(n);
- if (irt_isguard(fins->t) && n != (lua_Number)k) {
- /* We're about to create a guard which always fails, like CONV +1.5.
- ** Some pathological loops cause this during LICM, e.g.:
- ** local x,k,t = 0,1.5,{1,[1.5]=2}
- ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
- ** assert(x == 300)
- */
- return FAILFOLD;
- }
- return INTFOLD(k);
- } else {
- return INTFOLD((int32_t)n);
- }
-}
-
-LJFOLD(CONV KNUM IRCONV_U32_NUM)
-LJFOLDF(kfold_conv_knum_u32_num)
-{
- lua_assert((fins->op2 & IRCONV_TRUNC));
-#ifdef _MSC_VER
- { /* Workaround for MSVC bug. */
- volatile uint32_t u = (uint32_t)knumleft;
- return INTFOLD((int32_t)u);
- }
-#else
- return INTFOLD((int32_t)(uint32_t)knumleft);
-#endif
-}
-
-LJFOLD(CONV KNUM IRCONV_I64_NUM)
-LJFOLDF(kfold_conv_knum_i64_num)
-{
- lua_assert((fins->op2 & IRCONV_TRUNC));
- return INT64FOLD((uint64_t)(int64_t)knumleft);
-}
-
-LJFOLD(CONV KNUM IRCONV_U64_NUM)
-LJFOLDF(kfold_conv_knum_u64_num)
-{
- lua_assert((fins->op2 & IRCONV_TRUNC));
- return INT64FOLD(lj_num2u64(knumleft));
-}
-
-LJFOLD(TOSTR KNUM)
-LJFOLDF(kfold_tostr_knum)
-{
- return lj_ir_kstr(J, lj_str_fromnum(J->L, &knumleft));
-}
-
-LJFOLD(TOSTR KINT)
-LJFOLDF(kfold_tostr_kint)
-{
- return lj_ir_kstr(J, lj_str_fromint(J->L, fleft->i));
-}
-
-LJFOLD(STRTO KGC)
-LJFOLDF(kfold_strto)
-{
- TValue n;
- if (lj_strscan_num(ir_kstr(fleft), &n))
- return lj_ir_knum(J, numV(&n));
- return FAILFOLD;
-}
-
-/* -- Constant folding of equality checks --------------------------------- */
-
-/* Don't constant-fold away FLOAD checks against KNULL. */
-LJFOLD(EQ FLOAD KNULL)
-LJFOLD(NE FLOAD KNULL)
-LJFOLDX(lj_opt_cse)
-
-/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */
-LJFOLD(EQ any KNULL)
-LJFOLD(NE any KNULL)
-LJFOLD(EQ KNULL any)
-LJFOLD(NE KNULL any)
-LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */
-LJFOLD(NE KINT KINT)
-LJFOLD(EQ KINT64 KINT64)
-LJFOLD(NE KINT64 KINT64)
-LJFOLD(EQ KGC KGC)
-LJFOLD(NE KGC KGC)
-LJFOLDF(kfold_kref)
-{
- return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE));
-}
-
-/* -- Algebraic shortcuts ------------------------------------------------- */
-
-LJFOLD(FPMATH FPMATH IRFPM_FLOOR)
-LJFOLD(FPMATH FPMATH IRFPM_CEIL)
-LJFOLD(FPMATH FPMATH IRFPM_TRUNC)
-LJFOLDF(shortcut_round)
-{
- IRFPMathOp op = (IRFPMathOp)fleft->op2;
- if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC)
- return LEFTFOLD; /* round(round_left(x)) = round_left(x) */
- return NEXTFOLD;
-}
-
-LJFOLD(ABS ABS KNUM)
-LJFOLDF(shortcut_left)
-{
- return LEFTFOLD; /* f(g(x)) ==> g(x) */
-}
-
-LJFOLD(ABS NEG KNUM)
-LJFOLDF(shortcut_dropleft)
-{
- PHIBARRIER(fleft);
- fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */
- return RETRYFOLD;
-}
-
-/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */
-LJFOLD(NEG NEG any)
-LJFOLD(BNOT BNOT)
-LJFOLD(BSWAP BSWAP)
-LJFOLDF(shortcut_leftleft)
-{
- PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */
- return fleft->op1; /* f(g(x)) ==> x */
-}
-
-/* -- FP algebraic simplifications ---------------------------------------- */
-
-/* FP arithmetic is tricky -- there's not much to simplify.
-** Please note the following common pitfalls before sending "improvements":
-** x+0 ==> x is INVALID for x=-0
-** 0-x ==> -x is INVALID for x=+0
-** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN
-*/
-
-LJFOLD(ADD NEG any)
-LJFOLDF(simplify_numadd_negx)
-{
- PHIBARRIER(fleft);
- fins->o = IR_SUB; /* (-a) + b ==> b - a */
- fins->op1 = fins->op2;
- fins->op2 = fleft->op1;
- return RETRYFOLD;
-}
-
-LJFOLD(ADD any NEG)
-LJFOLDF(simplify_numadd_xneg)
-{
- PHIBARRIER(fright);
- fins->o = IR_SUB; /* a + (-b) ==> a - b */
- fins->op2 = fright->op1;
- return RETRYFOLD;
-}
-
-LJFOLD(SUB any KNUM)
-LJFOLDF(simplify_numsub_k)
-{
- lua_Number n = knumright;
- if (n == 0.0) /* x - (+-0) ==> x */
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(SUB NEG KNUM)
-LJFOLDF(simplify_numsub_negk)
-{
- PHIBARRIER(fleft);
- fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */
- fins->op1 = (IRRef1)lj_ir_knum(J, -knumright);
- return RETRYFOLD;
-}
-
-LJFOLD(SUB any NEG)
-LJFOLDF(simplify_numsub_xneg)
-{
- PHIBARRIER(fright);
- fins->o = IR_ADD; /* a - (-b) ==> a + b */
- fins->op2 = fright->op1;
- return RETRYFOLD;
-}
-
-LJFOLD(MUL any KNUM)
-LJFOLD(DIV any KNUM)
-LJFOLDF(simplify_nummuldiv_k)
-{
- lua_Number n = knumright;
- if (n == 1.0) { /* x o 1 ==> x */
- return LEFTFOLD;
- } else if (n == -1.0) { /* x o -1 ==> -x */
- fins->o = IR_NEG;
- fins->op2 = (IRRef1)lj_ir_knum_neg(J);
- return RETRYFOLD;
- } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */
- fins->o = IR_ADD;
- fins->op2 = fins->op1;
- return RETRYFOLD;
- } else if (fins->o == IR_DIV) { /* x / 2^k ==> x * 2^-k */
- uint64_t u = ir_knum(fright)->u64;
- uint32_t ex = ((uint32_t)(u >> 52) & 0x7ff);
- if ((u & U64x(000fffff,ffffffff)) == 0 && ex - 1 < 0x7fd) {
- u = (u & ((uint64_t)1 << 63)) | ((uint64_t)(0x7fe - ex) << 52);
- fins->o = IR_MUL; /* Multiply by exact reciprocal. */
- fins->op2 = lj_ir_knum_u64(J, u);
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(MUL NEG KNUM)
-LJFOLD(DIV NEG KNUM)
-LJFOLDF(simplify_nummuldiv_negk)
-{
- PHIBARRIER(fleft);
- fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */
- fins->op2 = (IRRef1)lj_ir_knum(J, -knumright);
- return RETRYFOLD;
-}
-
-LJFOLD(MUL NEG NEG)
-LJFOLD(DIV NEG NEG)
-LJFOLDF(simplify_nummuldiv_negneg)
-{
- PHIBARRIER(fleft);
- PHIBARRIER(fright);
- fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */
- fins->op2 = fright->op1;
- return RETRYFOLD;
-}
-
-LJFOLD(POW any KINT)
-LJFOLDF(simplify_numpow_xk)
-{
- int32_t k = fright->i;
- TRef ref = fins->op1;
- if (k == 0) /* x ^ 0 ==> 1 */
- return lj_ir_knum_one(J); /* Result must be a number, not an int. */
- if (k == 1) /* x ^ 1 ==> x */
- return LEFTFOLD;
- if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */
- return NEXTFOLD;
- if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */
- ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref);
- k = -k;
- }
- /* Unroll x^k for 1 <= k <= 65536. */
- for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */
- ref = emitir(IRTN(IR_MUL), ref, ref);
- if ((k >>= 1) != 0) { /* Handle trailing bits. */
- TRef tmp = emitir(IRTN(IR_MUL), ref, ref);
- for (; k != 1; k >>= 1) {
- if (k & 1)
- ref = emitir(IRTN(IR_MUL), ref, tmp);
- tmp = emitir(IRTN(IR_MUL), tmp, tmp);
- }
- ref = emitir(IRTN(IR_MUL), ref, tmp);
- }
- return ref;
-}
-
-LJFOLD(POW KNUM any)
-LJFOLDF(simplify_numpow_kx)
-{
- lua_Number n = knumleft;
- if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */
- fins->o = IR_CONV;
-#if LJ_TARGET_X86ORX64
- fins->op1 = fins->op2;
- fins->op2 = IRCONV_NUM_INT;
- fins->op2 = (IRRef1)lj_opt_fold(J);
-#endif
- fins->op1 = (IRRef1)lj_ir_knum_one(J);
- fins->o = IR_LDEXP;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-/* -- Simplify conversions ------------------------------------------------ */
-
-LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */
-LJFOLDF(shortcut_conv_num_int)
-{
- PHIBARRIER(fleft);
- /* Only safe with a guarded conversion to int. */
- if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t))
- return fleft->op1; /* f(g(x)) ==> x */
- return NEXTFOLD;
-}
-
-LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */
-LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/
-LJFOLDF(simplify_conv_int_num)
-{
- /* Fold even across PHI to avoid expensive num->int conversions in loop. */
- if ((fleft->op2 & IRCONV_SRCMASK) ==
- ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH))
- return fleft->op1;
- return NEXTFOLD;
-}
-
-LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32 */
-LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32 */
-LJFOLDF(simplify_conv_i64_num)
-{
- PHIBARRIER(fleft);
- if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
- /* Reduce to a sign-extension. */
- fins->op1 = fleft->op1;
- fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
- return RETRYFOLD;
- } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
-#if LJ_TARGET_X64
- return fleft->op1;
-#else
- /* Reduce to a zero-extension. */
- fins->op1 = fleft->op1;
- fins->op2 = (IRT_I64<<5)|IRT_U32;
- return RETRYFOLD;
-#endif
- }
- return NEXTFOLD;
-}
-
-LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT or _U32 */
-LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT or _U32 */
-LJFOLD(CONV CONV IRCONV_U32_I64) /* _INT or _U32 */
-LJFOLD(CONV CONV IRCONV_U32_U64) /* _INT or _U32 */
-LJFOLDF(simplify_conv_int_i64)
-{
- int src;
- PHIBARRIER(fleft);
- src = (fleft->op2 & IRCONV_SRCMASK);
- if (src == IRT_INT || src == IRT_U32) {
- if (src == ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) {
- return fleft->op1;
- } else {
- fins->op2 = ((fins->op2 & IRCONV_DSTMASK) | src);
- fins->op1 = fleft->op1;
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(CONV CONV IRCONV_FLOAT_NUM) /* _FLOAT */
-LJFOLDF(simplify_conv_flt_num)
-{
- PHIBARRIER(fleft);
- if ((fleft->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
- return fleft->op1;
- return NEXTFOLD;
-}
-
-/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
-LJFOLD(TOBIT CONV KNUM)
-LJFOLDF(simplify_tobit_conv)
-{
- /* Fold even across PHI to avoid expensive num->int conversions in loop. */
- if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
- lua_assert(irt_isnum(fleft->t));
- return fleft->op1;
- } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
- lua_assert(irt_isnum(fleft->t));
- fins->o = IR_CONV;
- fins->op1 = fleft->op1;
- fins->op2 = (IRT_INT<<5)|IRT_U32;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
-LJFOLD(FPMATH CONV IRFPM_FLOOR)
-LJFOLD(FPMATH CONV IRFPM_CEIL)
-LJFOLD(FPMATH CONV IRFPM_TRUNC)
-LJFOLDF(simplify_floor_conv)
-{
- if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT ||
- (fleft->op2 & IRCONV_SRCMASK) == IRT_U32)
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-/* Strength reduction of widening. */
-LJFOLD(CONV any IRCONV_I64_INT)
-LJFOLD(CONV any IRCONV_U64_INT)
-LJFOLDF(simplify_conv_sext)
-{
- IRRef ref = fins->op1;
- int64_t ofs = 0;
- if (!(fins->op2 & IRCONV_SEXT))
- return NEXTFOLD;
- PHIBARRIER(fleft);
- if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t)))
- goto ok_reduce;
- if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
- ofs = (int64_t)IR(fleft->op2)->i;
- ref = fleft->op1;
- }
- /* Use scalar evolution analysis results to strength-reduce sign-extension. */
- if (ref == J->scev.idx) {
- IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
- lua_assert(irt_isint(J->scev.t));
- if (lo && IR(lo)->i + ofs >= 0) {
- ok_reduce:
-#if LJ_TARGET_X64
- /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
- return LEFTFOLD;
-#else
- /* Reduce to a (cheaper) zero-extension. */
- fins->op2 &= ~IRCONV_SEXT;
- return RETRYFOLD;
-#endif
- }
- }
- return NEXTFOLD;
-}
-
-/* Strength reduction of narrowing. */
-LJFOLD(CONV ADD IRCONV_INT_I64)
-LJFOLD(CONV SUB IRCONV_INT_I64)
-LJFOLD(CONV MUL IRCONV_INT_I64)
-LJFOLD(CONV ADD IRCONV_INT_U64)
-LJFOLD(CONV SUB IRCONV_INT_U64)
-LJFOLD(CONV MUL IRCONV_INT_U64)
-LJFOLD(CONV ADD IRCONV_U32_I64)
-LJFOLD(CONV SUB IRCONV_U32_I64)
-LJFOLD(CONV MUL IRCONV_U32_I64)
-LJFOLD(CONV ADD IRCONV_U32_U64)
-LJFOLD(CONV SUB IRCONV_U32_U64)
-LJFOLD(CONV MUL IRCONV_U32_U64)
-LJFOLDF(simplify_conv_narrow)
-{
- IROp op = (IROp)fleft->o;
- IRType t = irt_type(fins->t);
- IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2;
- PHIBARRIER(fleft);
- op1 = emitir(IRTI(IR_CONV), op1, mode);
- op2 = emitir(IRTI(IR_CONV), op2, mode);
- fins->ot = IRT(op, t);
- fins->op1 = op1;
- fins->op2 = op2;
- return RETRYFOLD;
-}
-
-/* Special CSE rule for CONV. */
-LJFOLD(CONV any any)
-LJFOLDF(cse_conv)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
- IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK);
- uint8_t guard = irt_isguard(fins->t);
- IRRef ref = J->chain[IR_CONV];
- while (ref > op1) {
- IRIns *ir = IR(ref);
- /* Commoning with stronger checks is ok. */
- if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 &&
- irt_isguard(ir->t) >= guard)
- return ref;
- ref = ir->prev;
- }
- }
- return EMITFOLD; /* No fallthrough to regular CSE. */
-}
-
-/* FP conversion narrowing. */
-LJFOLD(TOBIT ADD KNUM)
-LJFOLD(TOBIT SUB KNUM)
-LJFOLD(CONV ADD IRCONV_INT_NUM)
-LJFOLD(CONV SUB IRCONV_INT_NUM)
-LJFOLD(CONV ADD IRCONV_I64_NUM)
-LJFOLD(CONV SUB IRCONV_I64_NUM)
-LJFOLDF(narrow_convert)
-{
- PHIBARRIER(fleft);
- /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */
- if (J->chain[IR_LOOP])
- return NEXTFOLD;
- lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT);
- return lj_opt_narrow_convert(J);
-}
-
-/* -- Integer algebraic simplifications ----------------------------------- */
-
-LJFOLD(ADD any KINT)
-LJFOLD(ADDOV any KINT)
-LJFOLD(SUBOV any KINT)
-LJFOLDF(simplify_intadd_k)
-{
- if (fright->i == 0) /* i o 0 ==> i */
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(MULOV any KINT)
-LJFOLDF(simplify_intmul_k)
-{
- if (fright->i == 0) /* i * 0 ==> 0 */
- return RIGHTFOLD;
- if (fright->i == 1) /* i * 1 ==> i */
- return LEFTFOLD;
- if (fright->i == 2) { /* i * 2 ==> i + i */
- fins->o = IR_ADDOV;
- fins->op2 = fins->op1;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(SUB any KINT)
-LJFOLDF(simplify_intsub_k)
-{
- if (fright->i == 0) /* i - 0 ==> i */
- return LEFTFOLD;
- fins->o = IR_ADD; /* i - k ==> i + (-k) */
- fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */
- return RETRYFOLD;
-}
-
-LJFOLD(SUB KINT any)
-LJFOLD(SUB KINT64 any)
-LJFOLDF(simplify_intsub_kleft)
-{
- if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) {
- fins->o = IR_NEG; /* 0 - i ==> -i */
- fins->op1 = fins->op2;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(ADD any KINT64)
-LJFOLDF(simplify_intadd_k64)
-{
- if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(SUB any KINT64)
-LJFOLDF(simplify_intsub_k64)
-{
- uint64_t k = ir_kint64(fright)->u64;
- if (k == 0) /* i - 0 ==> i */
- return LEFTFOLD;
- fins->o = IR_ADD; /* i - k ==> i + (-k) */
- fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k);
- return RETRYFOLD;
-}
-
-static TRef simplify_intmul_k(jit_State *J, int32_t k)
-{
- /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2.
- ** But this is mainly intended for simple address arithmetic.
- ** Also it's easier for the backend to optimize the original multiplies.
- */
- if (k == 1) { /* i * 1 ==> i */
- return LEFTFOLD;
- } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */
- fins->o = IR_BSHL;
- fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k));
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(MUL any KINT)
-LJFOLDF(simplify_intmul_k32)
-{
- if (fright->i == 0) /* i * 0 ==> 0 */
- return INTFOLD(0);
- else if (fright->i > 0)
- return simplify_intmul_k(J, fright->i);
- return NEXTFOLD;
-}
-
-LJFOLD(MUL any KINT64)
-LJFOLDF(simplify_intmul_k64)
-{
- if (ir_kint64(fright)->u64 == 0) /* i * 0 ==> 0 */
- return INT64FOLD(0);
-#if LJ_64
- /* NYI: SPLIT for BSHL and 32 bit backend support. */
- else if (ir_kint64(fright)->u64 < 0x80000000u)
- return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64);
-#endif
- return NEXTFOLD;
-}
-
-LJFOLD(MOD any KINT)
-LJFOLDF(simplify_intmod_k)
-{
- int32_t k = fright->i;
- lua_assert(k != 0);
- if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */
- fins->o = IR_BAND;
- fins->op2 = lj_ir_kint(J, k-1);
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(MOD KINT any)
-LJFOLDF(simplify_intmod_kleft)
-{
- if (fleft->i == 0)
- return INTFOLD(0);
- return NEXTFOLD;
-}
-
-LJFOLD(SUB any any)
-LJFOLD(SUBOV any any)
-LJFOLDF(simplify_intsub)
-{
- if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */
- return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
- return NEXTFOLD;
-}
-
-LJFOLD(SUB ADD any)
-LJFOLDF(simplify_intsubadd_leftcancel)
-{
- if (!irt_isnum(fins->t)) {
- PHIBARRIER(fleft);
- if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */
- return fleft->op2;
- if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */
- return fleft->op1;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(SUB SUB any)
-LJFOLDF(simplify_intsubsub_leftcancel)
-{
- if (!irt_isnum(fins->t)) {
- PHIBARRIER(fleft);
- if (fins->op2 == fleft->op1) { /* (i - j) - i ==> 0 - j */
- fins->op1 = (IRRef1)lj_ir_kint(J, 0);
- fins->op2 = fleft->op2;
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(SUB any SUB)
-LJFOLDF(simplify_intsubsub_rightcancel)
-{
- if (!irt_isnum(fins->t)) {
- PHIBARRIER(fright);
- if (fins->op1 == fright->op1) /* i - (i - j) ==> j */
- return fright->op2;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(SUB any ADD)
-LJFOLDF(simplify_intsubadd_rightcancel)
-{
- if (!irt_isnum(fins->t)) {
- PHIBARRIER(fright);
- if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */
- fins->op2 = fright->op2;
- fins->op1 = (IRRef1)lj_ir_kint(J, 0);
- return RETRYFOLD;
- }
- if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */
- fins->op2 = fright->op1;
- fins->op1 = (IRRef1)lj_ir_kint(J, 0);
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(SUB ADD ADD)
-LJFOLDF(simplify_intsubaddadd_cancel)
-{
- if (!irt_isnum(fins->t)) {
- PHIBARRIER(fleft);
- PHIBARRIER(fright);
- if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */
- fins->op1 = fleft->op2;
- fins->op2 = fright->op2;
- return RETRYFOLD;
- }
- if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */
- fins->op1 = fleft->op2;
- fins->op2 = fright->op1;
- return RETRYFOLD;
- }
- if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */
- fins->op1 = fleft->op1;
- fins->op2 = fright->op2;
- return RETRYFOLD;
- }
- if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */
- fins->op1 = fleft->op1;
- fins->op2 = fright->op1;
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(BAND any KINT)
-LJFOLD(BAND any KINT64)
-LJFOLDF(simplify_band_k)
-{
- int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
- (int64_t)ir_k64(fright)->u64;
- if (k == 0) /* i & 0 ==> 0 */
- return RIGHTFOLD;
- if (k == -1) /* i & -1 ==> i */
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(BOR any KINT)
-LJFOLD(BOR any KINT64)
-LJFOLDF(simplify_bor_k)
-{
- int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
- (int64_t)ir_k64(fright)->u64;
- if (k == 0) /* i | 0 ==> i */
- return LEFTFOLD;
- if (k == -1) /* i | -1 ==> -1 */
- return RIGHTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(BXOR any KINT)
-LJFOLD(BXOR any KINT64)
-LJFOLDF(simplify_bxor_k)
-{
- int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
- (int64_t)ir_k64(fright)->u64;
- if (k == 0) /* i xor 0 ==> i */
- return LEFTFOLD;
- if (k == -1) { /* i xor -1 ==> ~i */
- fins->o = IR_BNOT;
- fins->op2 = 0;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(BSHL any KINT)
-LJFOLD(BSHR any KINT)
-LJFOLD(BSAR any KINT)
-LJFOLD(BROL any KINT)
-LJFOLD(BROR any KINT)
-LJFOLDF(simplify_shift_ik)
-{
- int32_t mask = irt_is64(fins->t) ? 63 : 31;
- int32_t k = (fright->i & mask);
- if (k == 0) /* i o 0 ==> i */
- return LEFTFOLD;
- if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */
- fins->o = IR_ADD;
- fins->op2 = fins->op1;
- return RETRYFOLD;
- }
- if (k != fright->i) { /* i o k ==> i o (k & mask) */
- fins->op2 = (IRRef1)lj_ir_kint(J, k);
- return RETRYFOLD;
- }
-#ifndef LJ_TARGET_UNIFYROT
- if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */
- fins->o = IR_BROL;
- fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask);
- return RETRYFOLD;
- }
-#endif
- return NEXTFOLD;
-}
-
-LJFOLD(BSHL any BAND)
-LJFOLD(BSHR any BAND)
-LJFOLD(BSAR any BAND)
-LJFOLD(BROL any BAND)
-LJFOLD(BROR any BAND)
-LJFOLDF(simplify_shift_andk)
-{
- IRIns *irk = IR(fright->op2);
- PHIBARRIER(fright);
- if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
- irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */
- int32_t mask = irt_is64(fins->t) ? 63 : 31;
- int32_t k = irk->i & mask;
- if (k == mask) {
- fins->op2 = fright->op1;
- return RETRYFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-LJFOLD(BSHL KINT any)
-LJFOLD(BSHR KINT any)
-LJFOLD(BSHL KINT64 any)
-LJFOLD(BSHR KINT64 any)
-LJFOLDF(simplify_shift1_ki)
-{
- int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
- (int64_t)ir_k64(fleft)->u64;
- if (k == 0) /* 0 o i ==> 0 */
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(BSAR KINT any)
-LJFOLD(BROL KINT any)
-LJFOLD(BROR KINT any)
-LJFOLD(BSAR KINT64 any)
-LJFOLD(BROL KINT64 any)
-LJFOLD(BROR KINT64 any)
-LJFOLDF(simplify_shift2_ki)
-{
- int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
- (int64_t)ir_k64(fleft)->u64;
- if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */
- return LEFTFOLD;
- return NEXTFOLD;
-}
-
-LJFOLD(BSHL BAND KINT)
-LJFOLD(BSHR BAND KINT)
-LJFOLD(BROL BAND KINT)
-LJFOLD(BROR BAND KINT)
-LJFOLDF(simplify_shiftk_andk)
-{
- IRIns *irk = IR(fleft->op2);
- PHIBARRIER(fleft);
- if (irk->o == IR_KINT) { /* (i & k1) o k2 ==> (i o k2) & (k1 o k2) */
- int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
- fins->op1 = fleft->op1;
- fins->op1 = (IRRef1)lj_opt_fold(J);
- fins->op2 = (IRRef1)lj_ir_kint(J, k);
- fins->ot = IRTI(IR_BAND);
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(BAND BSHL KINT)
-LJFOLD(BAND BSHR KINT)
-LJFOLDF(simplify_andk_shiftk)
-{
- IRIns *irk = IR(fleft->op2);
- if (irk->o == IR_KINT &&
- kfold_intop(-1, irk->i, (IROp)fleft->o) == fright->i)
- return LEFTFOLD; /* (i o k1) & k2 ==> i, if (-1 o k1) == k2 */
- return NEXTFOLD;
-}
-
-/* -- Reassociation ------------------------------------------------------- */
-
-LJFOLD(ADD ADD KINT)
-LJFOLD(MUL MUL KINT)
-LJFOLD(BAND BAND KINT)
-LJFOLD(BOR BOR KINT)
-LJFOLD(BXOR BXOR KINT)
-LJFOLDF(reassoc_intarith_k)
-{
- IRIns *irk = IR(fleft->op2);
- if (irk->o == IR_KINT) {
- int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
- if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */
- return LEFTFOLD;
- PHIBARRIER(fleft);
- fins->op1 = fleft->op1;
- fins->op2 = (IRRef1)lj_ir_kint(J, k);
- return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
- }
- return NEXTFOLD;
-}
-
-LJFOLD(ADD ADD KINT64)
-LJFOLD(MUL MUL KINT64)
-LJFOLD(BAND BAND KINT64)
-LJFOLD(BOR BOR KINT64)
-LJFOLD(BXOR BXOR KINT64)
-LJFOLDF(reassoc_intarith_k64)
-{
-#if LJ_HASFFI || LJ_64
- IRIns *irk = IR(fleft->op2);
- if (irk->o == IR_KINT64) {
- uint64_t k = kfold_int64arith(ir_k64(irk)->u64,
- ir_k64(fright)->u64, (IROp)fins->o);
- PHIBARRIER(fleft);
- fins->op1 = fleft->op1;
- fins->op2 = (IRRef1)lj_ir_kint64(J, k);
- return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
- }
- return NEXTFOLD;
-#else
- UNUSED(J); lua_assert(0); return FAILFOLD;
-#endif
-}
-
-LJFOLD(MIN MIN any)
-LJFOLD(MAX MAX any)
-LJFOLD(BAND BAND any)
-LJFOLD(BOR BOR any)
-LJFOLDF(reassoc_dup)
-{
- if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
- return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */
- return NEXTFOLD;
-}
-
-LJFOLD(BXOR BXOR any)
-LJFOLDF(reassoc_bxor)
-{
- PHIBARRIER(fleft);
- if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */
- return fleft->op2;
- if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */
- return fleft->op1;
- return NEXTFOLD;
-}
-
-LJFOLD(BSHL BSHL KINT)
-LJFOLD(BSHR BSHR KINT)
-LJFOLD(BSAR BSAR KINT)
-LJFOLD(BROL BROL KINT)
-LJFOLD(BROR BROR KINT)
-LJFOLDF(reassoc_shift)
-{
- IRIns *irk = IR(fleft->op2);
- PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */
- if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */
- int32_t mask = irt_is64(fins->t) ? 63 : 31;
- int32_t k = (irk->i & mask) + (fright->i & mask);
- if (k > mask) { /* Combined shift too wide? */
- if (fins->o == IR_BSHL || fins->o == IR_BSHR)
- return mask == 31 ? INTFOLD(0) : INT64FOLD(0);
- else if (fins->o == IR_BSAR)
- k = mask;
- else
- k &= mask;
- }
- fins->op1 = fleft->op1;
- fins->op2 = (IRRef1)lj_ir_kint(J, k);
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(MIN MIN KNUM)
-LJFOLD(MAX MAX KNUM)
-LJFOLD(MIN MIN KINT)
-LJFOLD(MAX MAX KINT)
-LJFOLDF(reassoc_minmax_k)
-{
- IRIns *irk = IR(fleft->op2);
- if (irk->o == IR_KNUM) {
- lua_Number a = ir_knum(irk)->n;
- lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD);
- if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
- return LEFTFOLD;
- PHIBARRIER(fleft);
- fins->op1 = fleft->op1;
- fins->op2 = (IRRef1)lj_ir_knum(J, y);
- return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
- } else if (irk->o == IR_KINT) {
- int32_t a = irk->i;
- int32_t y = kfold_intop(a, fright->i, fins->o);
- if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
- return LEFTFOLD;
- PHIBARRIER(fleft);
- fins->op1 = fleft->op1;
- fins->op2 = (IRRef1)lj_ir_kint(J, y);
- return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
- }
- return NEXTFOLD;
-}
-
-LJFOLD(MIN MAX any)
-LJFOLD(MAX MIN any)
-LJFOLDF(reassoc_minmax_left)
-{
- if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
- return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */
- return NEXTFOLD;
-}
-
-LJFOLD(MIN any MAX)
-LJFOLD(MAX any MIN)
-LJFOLDF(reassoc_minmax_right)
-{
- if (fins->op1 == fright->op1 || fins->op1 == fright->op2)
- return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */
- return NEXTFOLD;
-}
-
-/* -- Array bounds check elimination -------------------------------------- */
-
-/* Eliminate ABC across PHIs to handle t[i-1] forwarding case.
-** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists.
-** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig.
-*/
-LJFOLD(ABC any ADD)
-LJFOLDF(abc_fwd)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
- if (irref_isk(fright->op2)) {
- IRIns *add2 = IR(fright->op1);
- if (add2->o == IR_ADD && irref_isk(add2->op2) &&
- IR(fright->op2)->i == -IR(add2->op2)->i) {
- IRRef ref = J->chain[IR_ABC];
- IRRef lim = add2->op1;
- if (fins->op1 > lim) lim = fins->op1;
- while (ref > lim) {
- IRIns *ir = IR(ref);
- if (ir->op1 == fins->op1 && ir->op2 == add2->op1)
- return DROPFOLD;
- ref = ir->prev;
- }
- }
- }
- }
- return NEXTFOLD;
-}
-
-/* Eliminate ABC for constants.
-** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2))
-** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2.
-*/
-LJFOLD(ABC any KINT)
-LJFOLDF(abc_k)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
- IRRef ref = J->chain[IR_ABC];
- IRRef asize = fins->op1;
- while (ref > asize) {
- IRIns *ir = IR(ref);
- if (ir->op1 == asize && irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (fright->i > k)
- ir->op2 = fins->op2;
- return DROPFOLD;
- }
- ref = ir->prev;
- }
- return EMITFOLD; /* Already performed CSE. */
- }
- return NEXTFOLD;
-}
-
-/* Eliminate invariant ABC inside loop. */
-LJFOLD(ABC any any)
-LJFOLDF(abc_invar)
-{
- /* Invariant ABC marked as PTR. Drop if op1 is invariant, too. */
- if (!irt_isint(fins->t) && fins->op1 < J->chain[IR_LOOP] &&
- !irt_isphi(IR(fins->op1)->t))
- return DROPFOLD;
- return NEXTFOLD;
-}
-
-/* -- Commutativity ------------------------------------------------------- */
-
-/* The refs of commutative ops are canonicalized. Lower refs go to the right.
-** Rationale behind this:
-** - It (also) moves constants to the right.
-** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices).
-** - It helps CSE to find more matches.
-** - The assembler generates better code with constants at the right.
-*/
-
-LJFOLD(ADD any any)
-LJFOLD(MUL any any)
-LJFOLD(ADDOV any any)
-LJFOLD(MULOV any any)
-LJFOLDF(comm_swap)
-{
- if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
- IRRef1 tmp = fins->op1;
- fins->op1 = fins->op2;
- fins->op2 = tmp;
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(EQ any any)
-LJFOLD(NE any any)
-LJFOLDF(comm_equal)
-{
- /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */
- if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
- return CONDFOLD(fins->o == IR_EQ);
- return fold_comm_swap(J);
-}
-
-LJFOLD(LT any any)
-LJFOLD(GE any any)
-LJFOLD(LE any any)
-LJFOLD(GT any any)
-LJFOLD(ULT any any)
-LJFOLD(UGE any any)
-LJFOLD(ULE any any)
-LJFOLD(UGT any any)
-LJFOLDF(comm_comp)
-{
- /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */
- if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
- return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1);
- if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
- IRRef1 tmp = fins->op1;
- fins->op1 = fins->op2;
- fins->op2 = tmp;
- fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */
- return RETRYFOLD;
- }
- return NEXTFOLD;
-}
-
-LJFOLD(BAND any any)
-LJFOLD(BOR any any)
-LJFOLD(MIN any any)
-LJFOLD(MAX any any)
-LJFOLDF(comm_dup)
-{
- if (fins->op1 == fins->op2) /* x o x ==> x */
- return LEFTFOLD;
- return fold_comm_swap(J);
-}
-
-LJFOLD(BXOR any any)
-LJFOLDF(comm_bxor)
-{
- if (fins->op1 == fins->op2) /* i xor i ==> 0 */
- return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
- return fold_comm_swap(J);
-}
-
-/* -- Simplification of compound expressions ------------------------------ */
-
-static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p)
-{
- int32_t k;
- switch (irt_type(ir->t)) {
- case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p);
- case IRT_I8: k = (int32_t)*(int8_t *)p; break;
- case IRT_U8: k = (int32_t)*(uint8_t *)p; break;
- case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break;
- case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break;
- case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break;
- case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p);
- default: return 0;
- }
- return lj_ir_kint(J, k);
-}
-
-/* Turn: string.sub(str, a, b) == kstr
-** into: string.byte(str, a) == string.byte(kstr, 1) etc.
-** Note: this creates unaligned XLOADs on x86/x64.
-*/
-LJFOLD(EQ SNEW KGC)
-LJFOLD(NE SNEW KGC)
-LJFOLDF(merge_eqne_snew_kgc)
-{
- GCstr *kstr = ir_kstr(fright);
- int32_t len = (int32_t)kstr->len;
- lua_assert(irt_isstr(fins->t));
-
-#if LJ_TARGET_UNALIGNED
-#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */
-#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */
-#else
-#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */
-#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */
-#endif
-
- PHIBARRIER(fleft);
- if (len <= FOLD_SNEW_MAX_LEN) {
- IROp op = (IROp)fins->o;
- IRRef strref = fleft->op1;
- if (IR(strref)->o != IR_STRREF)
- return NEXTFOLD;
- if (op == IR_EQ) {
- emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len));
- /* Caveat: fins/fleft/fright is no longer valid after emitir. */
- } else {
- /* NE is not expanded since this would need an OR of two conds. */
- if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */
- return NEXTFOLD;
- if (IR(fleft->op2)->i != len)
- return DROPFOLD;
- }
- if (len > 0) {
- /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */
- uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) :
- len == 2 ? IRT(IR_XLOAD, IRT_U16) :
- IRTI(IR_XLOAD));
- TRef tmp = emitir(ot, strref,
- IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0));
- TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr));
- if (len == 3)
- tmp = emitir(IRTI(IR_BAND), tmp,
- lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00)));
- fins->op1 = (IRRef1)tmp;
- fins->op2 = (IRRef1)val;
- fins->ot = (IROpT)IRTGI(op);
- return RETRYFOLD;
- } else {
- return DROPFOLD;
- }
- }
- return NEXTFOLD;
-}
-
-/* -- Loads --------------------------------------------------------------- */
-
-/* Loads cannot be folded or passed on to CSE in general.
-** Alias analysis is needed to check for forwarding opportunities.
-**
-** Caveat: *all* loads must be listed here or they end up at CSE!
-*/
-
-LJFOLD(ALOAD any)
-LJFOLDX(lj_opt_fwd_aload)
-
-/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */
-LJFOLD(HLOAD KKPTR)
-LJFOLDF(kfold_hload_kkptr)
-{
- UNUSED(J);
- lua_assert(ir_kptr(fleft) == niltvg(J2G(J)));
- return TREF_NIL;
-}
-
-LJFOLD(HLOAD any)
-LJFOLDX(lj_opt_fwd_hload)
-
-LJFOLD(ULOAD any)
-LJFOLDX(lj_opt_fwd_uload)
-
-LJFOLD(CALLL any IRCALL_lj_tab_len)
-LJFOLDX(lj_opt_fwd_tab_len)
-
-/* Upvalue refs are really loads, but there are no corresponding stores.
-** So CSE is ok for them, except for UREFO across a GC step (see below).
-** If the referenced function is const, its upvalue addresses are const, too.
-** This can be used to improve CSE by looking for the same address,
-** even if the upvalues originate from a different function.
-*/
-LJFOLD(UREFO KGC any)
-LJFOLD(UREFC KGC any)
-LJFOLDF(cse_uref)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
- IRRef ref = J->chain[fins->o];
- GCfunc *fn = ir_kfunc(fleft);
- GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)]));
- while (ref > 0) {
- IRIns *ir = IR(ref);
- if (irref_isk(ir->op1)) {
- GCfunc *fn2 = ir_kfunc(IR(ir->op1));
- if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) {
- if (fins->o == IR_UREFO && gcstep_barrier(J, ref))
- break;
- return ref;
- }
- }
- ref = ir->prev;
- }
- }
- return EMITFOLD;
-}
-
-LJFOLD(HREFK any any)
-LJFOLDX(lj_opt_fwd_hrefk)
-
-LJFOLD(HREF TNEW any)
-LJFOLDF(fwd_href_tnew)
-{
- if (lj_opt_fwd_href_nokey(J))
- return lj_ir_kkptr(J, niltvg(J2G(J)));
- return NEXTFOLD;
-}
-
-LJFOLD(HREF TDUP KPRI)
-LJFOLD(HREF TDUP KGC)
-LJFOLD(HREF TDUP KNUM)
-LJFOLDF(fwd_href_tdup)
-{
- TValue keyv;
- lj_ir_kvalue(J->L, &keyv, fright);
- if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) &&
- lj_opt_fwd_href_nokey(J))
- return lj_ir_kkptr(J, niltvg(J2G(J)));
- return NEXTFOLD;
-}
-
-/* We can safely FOLD/CSE array/hash refs and field loads, since there
-** are no corresponding stores. But we need to check for any NEWREF with
-** an aliased table, as it may invalidate all of the pointers and fields.
-** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
-** FLOADs. And NEWREF itself is treated like a store (see below).
-*/
-LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
-LJFOLDF(fload_tab_tnew_asize)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
- return INTFOLD(fleft->op1);
- return NEXTFOLD;
-}
-
-LJFOLD(FLOAD TNEW IRFL_TAB_HMASK)
-LJFOLDF(fload_tab_tnew_hmask)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
- return INTFOLD((1 << fleft->op2)-1);
- return NEXTFOLD;
-}
-
-LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE)
-LJFOLDF(fload_tab_tdup_asize)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
- return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize);
- return NEXTFOLD;
-}
-
-LJFOLD(FLOAD TDUP IRFL_TAB_HMASK)
-LJFOLDF(fload_tab_tdup_hmask)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
- return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask);
- return NEXTFOLD;
-}
-
-LJFOLD(HREF any any)
-LJFOLD(FLOAD any IRFL_TAB_ARRAY)
-LJFOLD(FLOAD any IRFL_TAB_NODE)
-LJFOLD(FLOAD any IRFL_TAB_ASIZE)
-LJFOLD(FLOAD any IRFL_TAB_HMASK)
-LJFOLDF(fload_tab_ah)
-{
- TRef tr = lj_opt_cse(J);
- return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD;
-}
-
-/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */
-LJFOLD(FLOAD KGC IRFL_STR_LEN)
-LJFOLDF(fload_str_len_kgc)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
- return INTFOLD((int32_t)ir_kstr(fleft)->len);
- return NEXTFOLD;
-}
-
-LJFOLD(FLOAD SNEW IRFL_STR_LEN)
-LJFOLDF(fload_str_len_snew)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
- PHIBARRIER(fleft);
- return fleft->op2;
- }
- return NEXTFOLD;
-}
-
-/* The C type ID of cdata objects is immutable. */
-LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID)
-LJFOLDF(fload_cdata_typeid_kgc)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
- return INTFOLD((int32_t)ir_kcdata(fleft)->ctypeid);
- return NEXTFOLD;
-}
-
-/* Get the contents of immutable cdata objects. */
-LJFOLD(FLOAD KGC IRFL_CDATA_PTR)
-LJFOLD(FLOAD KGC IRFL_CDATA_INT)
-LJFOLD(FLOAD KGC IRFL_CDATA_INT64)
-LJFOLDF(fload_cdata_int64_kgc)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
- void *p = cdataptr(ir_kcdata(fleft));
- if (irt_is64(fins->t))
- return INT64FOLD(*(uint64_t *)p);
- else
- return INTFOLD(*(int32_t *)p);
- }
- return NEXTFOLD;
-}
-
-LJFOLD(FLOAD CNEW IRFL_CDATA_CTYPEID)
-LJFOLD(FLOAD CNEWI IRFL_CDATA_CTYPEID)
-LJFOLDF(fload_cdata_typeid_cnew)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
- return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */
- return NEXTFOLD;
-}
-
-/* Pointer, int and int64 cdata objects are immutable. */
-LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR)
-LJFOLD(FLOAD CNEWI IRFL_CDATA_INT)
-LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64)
-LJFOLDF(fload_cdata_ptr_int64_cnew)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
- return fleft->op2; /* Fold even across PHI to avoid allocations. */
- return NEXTFOLD;
-}
-
-LJFOLD(FLOAD any IRFL_STR_LEN)
-LJFOLD(FLOAD any IRFL_CDATA_CTYPEID)
-LJFOLD(FLOAD any IRFL_CDATA_PTR)
-LJFOLD(FLOAD any IRFL_CDATA_INT)
-LJFOLD(FLOAD any IRFL_CDATA_INT64)
-LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */
-LJFOLDX(lj_opt_cse)
-
-/* All other field loads need alias analysis. */
-LJFOLD(FLOAD any any)
-LJFOLDX(lj_opt_fwd_fload)
-
-/* This is for LOOP only. Recording handles SLOADs internally. */
-LJFOLD(SLOAD any any)
-LJFOLDF(fwd_sload)
-{
- if ((fins->op2 & IRSLOAD_FRAME)) {
- TRef tr = lj_opt_cse(J);
- return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr;
- } else {
- lua_assert(J->slot[fins->op1] != 0);
- return J->slot[fins->op1];
- }
-}
-
-/* Only fold for KKPTR. The pointer _and_ the contents must be const. */
-LJFOLD(XLOAD KKPTR any)
-LJFOLDF(xload_kptr)
-{
- TRef tr = kfold_xload(J, fins, ir_kptr(fleft));
- return tr ? tr : NEXTFOLD;
-}
-
-LJFOLD(XLOAD any any)
-LJFOLDX(lj_opt_fwd_xload)
-
-/* -- Write barriers ------------------------------------------------------ */
-
-/* Write barriers are amenable to CSE, but not across any incremental
-** GC steps.
-**
-** The same logic applies to open upvalue references, because a stack
-** may be resized during a GC step (not the current stack, but maybe that
-** of a coroutine).
-*/
-LJFOLD(TBAR any)
-LJFOLD(OBAR any any)
-LJFOLD(UREFO any any)
-LJFOLDF(barrier_tab)
-{
- TRef tr = lj_opt_cse(J);
- if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */
- return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */
- return tr;
-}
-
-LJFOLD(TBAR TNEW)
-LJFOLD(TBAR TDUP)
-LJFOLDF(barrier_tnew_tdup)
-{
- /* New tables are always white and never need a barrier. */
- if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */
- return NEXTFOLD;
- return DROPFOLD;
-}
-
-/* -- Stores and allocations ---------------------------------------------- */
-
-/* Stores and allocations cannot be folded or passed on to CSE in general.
-** But some stores can be eliminated with dead-store elimination (DSE).
-**
-** Caveat: *all* stores and allocs must be listed here or they end up at CSE!
-*/
-
-LJFOLD(ASTORE any any)
-LJFOLD(HSTORE any any)
-LJFOLDX(lj_opt_dse_ahstore)
-
-LJFOLD(USTORE any any)
-LJFOLDX(lj_opt_dse_ustore)
-
-LJFOLD(FSTORE any any)
-LJFOLDX(lj_opt_dse_fstore)
-
-LJFOLD(XSTORE any any)
-LJFOLDX(lj_opt_dse_xstore)
-
-LJFOLD(NEWREF any any) /* Treated like a store. */
-LJFOLD(CALLS any any)
-LJFOLD(CALLL any any) /* Safeguard fallback. */
-LJFOLD(CALLXS any any)
-LJFOLD(XBAR)
-LJFOLD(RETF any any) /* Modifies BASE. */
-LJFOLD(TNEW any any)
-LJFOLD(TDUP any)
-LJFOLD(CNEW any any)
-LJFOLD(XSNEW any any)
-LJFOLDX(lj_ir_emit)
-
-/* ------------------------------------------------------------------------ */
-
-/* Every entry in the generated hash table is a 32 bit pattern:
-**
-** xxxxxxxx iiiiiii lllllll rrrrrrrrrr
-**
-** xxxxxxxx = 8 bit index into fold function table
-** iiiiiii = 7 bit folded instruction opcode
-** lllllll = 7 bit left instruction opcode
-** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field
-*/
-
-#include "lj_folddef.h"
-
-/* ------------------------------------------------------------------------ */
-
-/* Fold IR instruction. */
-TRef LJ_FASTCALL lj_opt_fold(jit_State *J)
-{
- uint32_t key, any;
- IRRef ref;
-
- if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) {
- lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) |
- JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT);
- /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */
- if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N)
- return lj_opt_cse(J);
-
- /* No FOLD, forwarding or CSE? Emit raw IR for loads, except for SLOAD. */
- if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE)) !=
- (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE) &&
- irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD)
- return lj_ir_emit(J);
-
- /* No FOLD or DSE? Emit raw IR for stores. */
- if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_DSE)) !=
- (JIT_F_OPT_FOLD|JIT_F_OPT_DSE) &&
- irm_kind(lj_ir_mode[fins->o]) == IRM_S)
- return lj_ir_emit(J);
- }
-
- /* Fold engine start/retry point. */
-retry:
- /* Construct key from opcode and operand opcodes (unless literal/none). */
- key = ((uint32_t)fins->o << 17);
- if (fins->op1 >= J->cur.nk) {
- key += (uint32_t)IR(fins->op1)->o << 10;
- *fleft = *IR(fins->op1);
- }
- if (fins->op2 >= J->cur.nk) {
- key += (uint32_t)IR(fins->op2)->o;
- *fright = *IR(fins->op2);
- } else {
- key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */
- }
-
- /* Check for a match in order from most specific to least specific. */
- any = 0;
- for (;;) {
- uint32_t k = key | (any & 0x1ffff);
- uint32_t h = fold_hashkey(k);
- uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */
- if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) {
- ref = (IRRef)tref_ref(fold_func[fh >> 24](J));
- if (ref != NEXTFOLD)
- break;
- }
- if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */
- return lj_opt_cse(J);
- any = (any | (any >> 10)) ^ 0xffc00;
- }
-
- /* Return value processing, ordered by frequency. */
- if (LJ_LIKELY(ref >= MAX_FOLD))
- return TREF(ref, irt_t(IR(ref)->t));
- if (ref == RETRYFOLD)
- goto retry;
- if (ref == KINTFOLD)
- return lj_ir_kint(J, fins->i);
- if (ref == FAILFOLD)
- lj_trace_err(J, LJ_TRERR_GFAIL);
- lua_assert(ref == DROPFOLD);
- return REF_DROP;
-}
-
-/* -- Common-Subexpression Elimination ------------------------------------ */
-
-/* CSE an IR instruction. This is very fast due to the skip-list chains. */
-TRef LJ_FASTCALL lj_opt_cse(jit_State *J)
-{
- /* Avoid narrow to wide store-to-load forwarding stall */
- IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
- IROp op = fins->o;
- if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
- /* Limited search for same operands in per-opcode chain. */
- IRRef ref = J->chain[op];
- IRRef lim = fins->op1;
- if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */
- while (ref > lim) {
- if (IR(ref)->op12 == op12)
- return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */
- ref = IR(ref)->prev;
- }
- }
- /* Otherwise emit IR (inlined for speed). */
- {
- IRRef ref = lj_ir_nextins(J);
- IRIns *ir = IR(ref);
- ir->prev = J->chain[op];
- ir->op12 = op12;
- J->chain[op] = (IRRef1)ref;
- ir->o = fins->o;
- J->guardemit.irt |= fins->t.irt;
- return TREF(ref, irt_t((ir->t = fins->t)));
- }
-}
-
-/* CSE with explicit search limit. */
-TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim)
-{
- IRRef ref = J->chain[fins->o];
- IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
- while (ref > lim) {
- if (IR(ref)->op12 == op12)
- return ref;
- ref = IR(ref)->prev;
- }
- return lj_ir_emit(J);
-}
-
-/* ------------------------------------------------------------------------ */
-
-#undef IR
-#undef fins
-#undef fleft
-#undef fright
-#undef knumleft
-#undef knumright
-#undef emitir
-
-#endif
+/*
+** FOLD: Constant Folding, Algebraic Simplifications and Reassociation.
+** ABCelim: Array Bounds Check Elimination.
+** CSE: Common-Subexpression Elimination.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_fold_c
+#define LUA_CORE
+
+#include <math.h>
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_carith.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+
+/* Here's a short description how the FOLD engine processes instructions:
+**
+** The FOLD engine receives a single instruction stored in fins (J->fold.ins).
+** The instruction and its operands are used to select matching fold rules.
+** These are applied iteratively until a fixed point is reached.
+**
+** The 8 bit opcode of the instruction itself plus the opcodes of the
+** two instructions referenced by its operands form a 24 bit key
+** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits).
+**
+** This key is used for partial matching against the fold rules. The
+** left/right operand fields of the key are successively masked with
+** the 'any' wildcard, from most specific to least specific:
+**
+** ins left right
+** ins any right
+** ins left any
+** ins any any
+**
+** The masked key is used to lookup a matching fold rule in a semi-perfect
+** hash table. If a matching rule is found, the related fold function is run.
+** Multiple rules can share the same fold function. A fold rule may return
+** one of several special values:
+**
+** - NEXTFOLD means no folding was applied, because an additional test
+** inside the fold function failed. Matching continues against less
+** specific fold rules. Finally the instruction is passed on to CSE.
+**
+** - RETRYFOLD means the instruction was modified in-place. Folding is
+** retried as if this instruction had just been received.
+**
+** All other return values are terminal actions -- no further folding is
+** applied:
+**
+** - INTFOLD(i) returns a reference to the integer constant i.
+**
+** - LEFTFOLD and RIGHTFOLD return the left/right operand reference
+** without emitting an instruction.
+**
+** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit
+** it without passing through any further optimizations.
+**
+** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have
+** no result (e.g. guarded assertions): FAILFOLD means the guard would
+** always fail, i.e. the current trace is pointless. DROPFOLD means
+** the guard is always true and has been eliminated. CONDFOLD is a
+** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail).
+**
+** - Any other return value is interpreted as an IRRef or TRef. This
+** can be a reference to an existing or a newly created instruction.
+** Only the least-significant 16 bits (IRRef1) are used to form a TRef
+** which is finally returned to the caller.
+**
+** The FOLD engine receives instructions both from the trace recorder and
+** substituted instructions from LOOP unrolling. This means all types
+** of instructions may end up here, even though the recorder bypasses
+** FOLD in some cases. Thus all loads, stores and allocations must have
+** an any/any rule to avoid being passed on to CSE.
+**
+** Carefully read the following requirements before adding or modifying
+** any fold rules:
+**
+** Requirement #1: All fold rules must preserve their destination type.
+**
+** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result).
+** Never use lj_ir_knumint() which can have either a KINT or KNUM result.
+**
+** Requirement #2: Fold rules should not create *new* instructions which
+** reference operands *across* PHIs.
+**
+** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the
+** left operand is a PHI. Then fleft->op1 would point across the PHI
+** frontier to an invariant instruction. Adding a PHI for this instruction
+** would be counterproductive. The solution is to add a barrier which
+** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case.
+** The only exception is for recurrences with high latencies like
+** repeated int->num->int conversions.
+**
+** One could relax this condition a bit if the referenced instruction is
+** a PHI, too. But this often leads to worse code due to excessive
+** register shuffling.
+**
+** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though.
+** Even returning fleft->op1 would be ok, because a new PHI will added,
+** if needed. But again, this leads to excessive register shuffling and
+** should be avoided.
+**
+** Requirement #3: The set of all fold rules must be monotonic to guarantee
+** termination.
+**
+** The goal is optimization, so one primarily wants to add strength-reducing
+** rules. This means eliminating an instruction or replacing an instruction
+** with one or more simpler instructions. Don't add fold rules which point
+** into the other direction.
+**
+** Some rules (like commutativity) do not directly reduce the strength of
+** an instruction, but enable other fold rules (e.g. by moving constants
+** to the right operand). These rules must be made unidirectional to avoid
+** cycles.
+**
+** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+#define fleft (&J->fold.left)
+#define fright (&J->fold.right)
+#define knumleft (ir_knum(fleft)->n)
+#define knumright (ir_knum(fright)->n)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Fold function type. Fastcall on x86 significantly reduces their size. */
+typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J);
+
+/* Macros for the fold specs, so buildvm can recognize them. */
+#define LJFOLD(x)
+#define LJFOLDX(x)
+#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J)
+/* Note: They must be at the start of a line or buildvm ignores them! */
+
+/* Barrier to prevent using operands across PHIs. */
+#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD
+
+/* Barrier to prevent folding across a GC step.
+** GC steps can only happen at the head of a trace and at LOOP.
+** And the GC is only driven forward if there is at least one allocation.
+*/
+#define gcstep_barrier(J, ref) \
+ ((ref) < J->chain[IR_LOOP] && \
+ (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \
+ J->chain[IR_TNEW] || J->chain[IR_TDUP] || \
+ J->chain[IR_CNEW] || J->chain[IR_CNEWI] || J->chain[IR_TOSTR]))
+
+/* -- Constant folding for FP numbers ------------------------------------- */
+
+LJFOLD(ADD KNUM KNUM)
+LJFOLD(SUB KNUM KNUM)
+LJFOLD(MUL KNUM KNUM)
+LJFOLD(DIV KNUM KNUM)
+LJFOLD(NEG KNUM KNUM)
+LJFOLD(ABS KNUM KNUM)
+LJFOLD(ATAN2 KNUM KNUM)
+LJFOLD(LDEXP KNUM KNUM)
+LJFOLD(MIN KNUM KNUM)
+LJFOLD(MAX KNUM KNUM)
+LJFOLDF(kfold_numarith)
+{
+ lua_Number a = knumleft;
+ lua_Number b = knumright;
+ lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(LDEXP KNUM KINT)
+LJFOLDF(kfold_ldexp)
+{
+#if LJ_TARGET_X86ORX64
+ UNUSED(J);
+ return NEXTFOLD;
+#else
+ return lj_ir_knum(J, ldexp(knumleft, fright->i));
+#endif
+}
+
+LJFOLD(FPMATH KNUM any)
+LJFOLDF(kfold_fpmath)
+{
+ lua_Number a = knumleft;
+ lua_Number y = lj_vm_foldfpm(a, fins->op2);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(POW KNUM KINT)
+LJFOLDF(kfold_numpow)
+{
+ lua_Number a = knumleft;
+ lua_Number b = (lua_Number)fright->i;
+ lua_Number y = lj_vm_foldarith(a, b, IR_POW - IR_ADD);
+ return lj_ir_knum(J, y);
+}
+
+/* Must not use kfold_kref for numbers (could be NaN). */
+LJFOLD(EQ KNUM KNUM)
+LJFOLD(NE KNUM KNUM)
+LJFOLD(LT KNUM KNUM)
+LJFOLD(GE KNUM KNUM)
+LJFOLD(LE KNUM KNUM)
+LJFOLD(GT KNUM KNUM)
+LJFOLD(ULT KNUM KNUM)
+LJFOLD(UGE KNUM KNUM)
+LJFOLD(ULE KNUM KNUM)
+LJFOLD(UGT KNUM KNUM)
+LJFOLDF(kfold_numcomp)
+{
+ return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o));
+}
+
+/* -- Constant folding for 32 bit integers -------------------------------- */
+
+static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op)
+{
+ switch (op) {
+ case IR_ADD: k1 += k2; break;
+ case IR_SUB: k1 -= k2; break;
+ case IR_MUL: k1 *= k2; break;
+ case IR_MOD: k1 = lj_vm_modi(k1, k2); break;
+ case IR_NEG: k1 = -k1; break;
+ case IR_BAND: k1 &= k2; break;
+ case IR_BOR: k1 |= k2; break;
+ case IR_BXOR: k1 ^= k2; break;
+ case IR_BSHL: k1 <<= (k2 & 31); break;
+ case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break;
+ case IR_BSAR: k1 >>= (k2 & 31); break;
+ case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break;
+ case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break;
+ case IR_MIN: k1 = k1 < k2 ? k1 : k2; break;
+ case IR_MAX: k1 = k1 > k2 ? k1 : k2; break;
+ default: lua_assert(0); break;
+ }
+ return k1;
+}
+
+LJFOLD(ADD KINT KINT)
+LJFOLD(SUB KINT KINT)
+LJFOLD(MUL KINT KINT)
+LJFOLD(MOD KINT KINT)
+LJFOLD(NEG KINT KINT)
+LJFOLD(BAND KINT KINT)
+LJFOLD(BOR KINT KINT)
+LJFOLD(BXOR KINT KINT)
+LJFOLD(BSHL KINT KINT)
+LJFOLD(BSHR KINT KINT)
+LJFOLD(BSAR KINT KINT)
+LJFOLD(BROL KINT KINT)
+LJFOLD(BROR KINT KINT)
+LJFOLD(MIN KINT KINT)
+LJFOLD(MAX KINT KINT)
+LJFOLDF(kfold_intarith)
+{
+ return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o));
+}
+
+LJFOLD(ADDOV KINT KINT)
+LJFOLD(SUBOV KINT KINT)
+LJFOLD(MULOV KINT KINT)
+LJFOLDF(kfold_intovarith)
+{
+ lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i,
+ fins->o - IR_ADDOV);
+ int32_t k = lj_num2int(n);
+ if (n != (lua_Number)k)
+ return FAILFOLD;
+ return INTFOLD(k);
+}
+
+LJFOLD(BNOT KINT)
+LJFOLDF(kfold_bnot)
+{
+ return INTFOLD(~fleft->i);
+}
+
+LJFOLD(BSWAP KINT)
+LJFOLDF(kfold_bswap)
+{
+ return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i));
+}
+
+LJFOLD(LT KINT KINT)
+LJFOLD(GE KINT KINT)
+LJFOLD(LE KINT KINT)
+LJFOLD(GT KINT KINT)
+LJFOLD(ULT KINT KINT)
+LJFOLD(UGE KINT KINT)
+LJFOLD(ULE KINT KINT)
+LJFOLD(UGT KINT KINT)
+LJFOLD(ABC KINT KINT)
+LJFOLDF(kfold_intcomp)
+{
+ int32_t a = fleft->i, b = fright->i;
+ switch ((IROp)fins->o) {
+ case IR_LT: return CONDFOLD(a < b);
+ case IR_GE: return CONDFOLD(a >= b);
+ case IR_LE: return CONDFOLD(a <= b);
+ case IR_GT: return CONDFOLD(a > b);
+ case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b);
+ case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b);
+ case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b);
+ case IR_ABC:
+ case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b);
+ default: lua_assert(0); return FAILFOLD;
+ }
+}
+
+LJFOLD(UGE any KINT)
+LJFOLDF(kfold_intcomp0)
+{
+ if (fright->i == 0)
+ return DROPFOLD;
+ return NEXTFOLD;
+}
+
+/* -- Constant folding for 64 bit integers -------------------------------- */
+
+static uint64_t kfold_int64arith(uint64_t k1, uint64_t k2, IROp op)
+{
+ switch (op) {
+#if LJ_64 || LJ_HASFFI
+ case IR_ADD: k1 += k2; break;
+ case IR_SUB: k1 -= k2; break;
+#endif
+#if LJ_HASFFI
+ case IR_MUL: k1 *= k2; break;
+ case IR_BAND: k1 &= k2; break;
+ case IR_BOR: k1 |= k2; break;
+ case IR_BXOR: k1 ^= k2; break;
+#endif
+ default: UNUSED(k2); lua_assert(0); break;
+ }
+ return k1;
+}
+
+LJFOLD(ADD KINT64 KINT64)
+LJFOLD(SUB KINT64 KINT64)
+LJFOLD(MUL KINT64 KINT64)
+LJFOLD(BAND KINT64 KINT64)
+LJFOLD(BOR KINT64 KINT64)
+LJFOLD(BXOR KINT64 KINT64)
+LJFOLDF(kfold_int64arith)
+{
+ return INT64FOLD(kfold_int64arith(ir_k64(fleft)->u64,
+ ir_k64(fright)->u64, (IROp)fins->o));
+}
+
+LJFOLD(DIV KINT64 KINT64)
+LJFOLD(MOD KINT64 KINT64)
+LJFOLD(POW KINT64 KINT64)
+LJFOLDF(kfold_int64arith2)
+{
+#if LJ_HASFFI
+ uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64;
+ if (irt_isi64(fins->t)) {
+ k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) :
+ fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) :
+ lj_carith_powi64((int64_t)k1, (int64_t)k2);
+ } else {
+ k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) :
+ fins->o == IR_MOD ? lj_carith_modu64(k1, k2) :
+ lj_carith_powu64(k1, k2);
+ }
+ return INT64FOLD(k1);
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BSHL KINT64 KINT)
+LJFOLD(BSHR KINT64 KINT)
+LJFOLD(BSAR KINT64 KINT)
+LJFOLD(BROL KINT64 KINT)
+LJFOLD(BROR KINT64 KINT)
+LJFOLDF(kfold_int64shift)
+{
+#if LJ_HASFFI || LJ_64
+ uint64_t k = ir_k64(fleft)->u64;
+ int32_t sh = (fright->i & 63);
+ switch ((IROp)fins->o) {
+ case IR_BSHL: k <<= sh; break;
+#if LJ_HASFFI
+ case IR_BSHR: k >>= sh; break;
+ case IR_BSAR: k = (uint64_t)((int64_t)k >> sh); break;
+ case IR_BROL: k = lj_rol(k, sh); break;
+ case IR_BROR: k = lj_ror(k, sh); break;
+#endif
+ default: lua_assert(0); break;
+ }
+ return INT64FOLD(k);
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BNOT KINT64)
+LJFOLDF(kfold_bnot64)
+{
+#if LJ_HASFFI
+ return INT64FOLD(~ir_k64(fleft)->u64);
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BSWAP KINT64)
+LJFOLDF(kfold_bswap64)
+{
+#if LJ_HASFFI
+ return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64));
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(LT KINT64 KINT64)
+LJFOLD(GE KINT64 KINT64)
+LJFOLD(LE KINT64 KINT64)
+LJFOLD(GT KINT64 KINT64)
+LJFOLD(ULT KINT64 KINT64)
+LJFOLD(UGE KINT64 KINT64)
+LJFOLD(ULE KINT64 KINT64)
+LJFOLD(UGT KINT64 KINT64)
+LJFOLDF(kfold_int64comp)
+{
+#if LJ_HASFFI
+ uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64;
+ switch ((IROp)fins->o) {
+ case IR_LT: return CONDFOLD(a < b);
+ case IR_GE: return CONDFOLD(a >= b);
+ case IR_LE: return CONDFOLD(a <= b);
+ case IR_GT: return CONDFOLD(a > b);
+ case IR_ULT: return CONDFOLD((uint64_t)a < (uint64_t)b);
+ case IR_UGE: return CONDFOLD((uint64_t)a >= (uint64_t)b);
+ case IR_ULE: return CONDFOLD((uint64_t)a <= (uint64_t)b);
+ case IR_UGT: return CONDFOLD((uint64_t)a > (uint64_t)b);
+ default: lua_assert(0); return FAILFOLD;
+ }
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(UGE any KINT64)
+LJFOLDF(kfold_int64comp0)
+{
+#if LJ_HASFFI
+ if (ir_k64(fright)->u64 == 0)
+ return DROPFOLD;
+ return NEXTFOLD;
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+/* -- Constant folding for strings ---------------------------------------- */
+
+LJFOLD(SNEW KKPTR KINT)
+LJFOLDF(kfold_snew_kptr)
+{
+ GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i);
+ return lj_ir_kstr(J, s);
+}
+
+LJFOLD(SNEW any KINT)
+LJFOLDF(kfold_snew_empty)
+{
+ if (fright->i == 0)
+ return lj_ir_kstr(J, &J2G(J)->strempty);
+ return NEXTFOLD;
+}
+
+LJFOLD(STRREF KGC KINT)
+LJFOLDF(kfold_strref)
+{
+ GCstr *str = ir_kstr(fleft);
+ lua_assert((MSize)fright->i <= str->len);
+ return lj_ir_kkptr(J, (char *)strdata(str) + fright->i);
+}
+
+LJFOLD(STRREF SNEW any)
+LJFOLDF(kfold_strref_snew)
+{
+ PHIBARRIER(fleft);
+ if (irref_isk(fins->op2) && fright->i == 0) {
+ return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */
+ } else {
+ /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */
+ IRIns *ir = IR(fleft->op1);
+ IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */
+ lua_assert(ir->o == IR_STRREF);
+ PHIBARRIER(ir);
+ fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */
+ fins->op1 = str;
+ fins->ot = IRT(IR_STRREF, IRT_P32);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CALLN CARG IRCALL_lj_str_cmp)
+LJFOLDF(kfold_strcmp)
+{
+ if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) {
+ GCstr *a = ir_kstr(IR(fleft->op1));
+ GCstr *b = ir_kstr(IR(fleft->op2));
+ return INTFOLD(lj_str_cmp(a, b));
+ }
+ return NEXTFOLD;
+}
+
+/* -- Constant folding of pointer arithmetic ------------------------------ */
+
+LJFOLD(ADD KGC KINT)
+LJFOLD(ADD KGC KINT64)
+LJFOLDF(kfold_add_kgc)
+{
+ GCobj *o = ir_kgc(fleft);
+#if LJ_64
+ ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
+#else
+ ptrdiff_t ofs = fright->i;
+#endif
+#if LJ_HASFFI
+ if (irt_iscdata(fleft->t)) {
+ CType *ct = ctype_raw(ctype_ctsG(J2G(J)), gco2cd(o)->ctypeid);
+ if (ctype_isnum(ct->info) || ctype_isenum(ct->info) ||
+ ctype_isptr(ct->info) || ctype_isfunc(ct->info) ||
+ ctype_iscomplex(ct->info) || ctype_isvector(ct->info))
+ return lj_ir_kkptr(J, (char *)o + ofs);
+ }
+#endif
+ return lj_ir_kptr(J, (char *)o + ofs);
+}
+
+LJFOLD(ADD KPTR KINT)
+LJFOLD(ADD KPTR KINT64)
+LJFOLD(ADD KKPTR KINT)
+LJFOLD(ADD KKPTR KINT64)
+LJFOLDF(kfold_add_kptr)
+{
+ void *p = ir_kptr(fleft);
+#if LJ_64
+ ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
+#else
+ ptrdiff_t ofs = fright->i;
+#endif
+ return lj_ir_kptr_(J, fleft->o, (char *)p + ofs);
+}
+
+LJFOLD(ADD any KGC)
+LJFOLD(ADD any KPTR)
+LJFOLD(ADD any KKPTR)
+LJFOLDF(kfold_add_kright)
+{
+ if (fleft->o == IR_KINT || fleft->o == IR_KINT64) {
+ IRRef1 tmp = fins->op1; fins->op1 = fins->op2; fins->op2 = tmp;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+/* -- Constant folding of conversions ------------------------------------- */
+
+LJFOLD(TOBIT KNUM KNUM)
+LJFOLDF(kfold_tobit)
+{
+ return INTFOLD(lj_num2bit(knumleft));
+}
+
+LJFOLD(CONV KINT IRCONV_NUM_INT)
+LJFOLDF(kfold_conv_kint_num)
+{
+ return lj_ir_knum(J, (lua_Number)fleft->i);
+}
+
+LJFOLD(CONV KINT IRCONV_NUM_U32)
+LJFOLDF(kfold_conv_kintu32_num)
+{
+ return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i);
+}
+
+LJFOLD(CONV KINT IRCONV_INT_I8)
+LJFOLD(CONV KINT IRCONV_INT_U8)
+LJFOLD(CONV KINT IRCONV_INT_I16)
+LJFOLD(CONV KINT IRCONV_INT_U16)
+LJFOLDF(kfold_conv_kint_ext)
+{
+ int32_t k = fleft->i;
+ if ((fins->op2 & IRCONV_SRCMASK) == IRT_I8) k = (int8_t)k;
+ else if ((fins->op2 & IRCONV_SRCMASK) == IRT_U8) k = (uint8_t)k;
+ else if ((fins->op2 & IRCONV_SRCMASK) == IRT_I16) k = (int16_t)k;
+ else k = (uint16_t)k;
+ return INTFOLD(k);
+}
+
+LJFOLD(CONV KINT IRCONV_I64_INT)
+LJFOLD(CONV KINT IRCONV_U64_INT)
+LJFOLD(CONV KINT IRCONV_I64_U32)
+LJFOLD(CONV KINT IRCONV_U64_U32)
+LJFOLDF(kfold_conv_kint_i64)
+{
+ if ((fins->op2 & IRCONV_SEXT))
+ return INT64FOLD((uint64_t)(int64_t)fleft->i);
+ else
+ return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i);
+}
+
+LJFOLD(CONV KINT64 IRCONV_NUM_I64)
+LJFOLDF(kfold_conv_kint64_num_i64)
+{
+ return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KINT64 IRCONV_NUM_U64)
+LJFOLDF(kfold_conv_kint64_num_u64)
+{
+ return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KINT64 IRCONV_INT_I64)
+LJFOLD(CONV KINT64 IRCONV_U32_I64)
+LJFOLDF(kfold_conv_kint64_int_i64)
+{
+ return INTFOLD((int32_t)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KNUM IRCONV_INT_NUM)
+LJFOLDF(kfold_conv_knum_int_num)
+{
+ lua_Number n = knumleft;
+ if (!(fins->op2 & IRCONV_TRUNC)) {
+ int32_t k = lj_num2int(n);
+ if (irt_isguard(fins->t) && n != (lua_Number)k) {
+ /* We're about to create a guard which always fails, like CONV +1.5.
+ ** Some pathological loops cause this during LICM, e.g.:
+ ** local x,k,t = 0,1.5,{1,[1.5]=2}
+ ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
+ ** assert(x == 300)
+ */
+ return FAILFOLD;
+ }
+ return INTFOLD(k);
+ } else {
+ return INTFOLD((int32_t)n);
+ }
+}
+
+LJFOLD(CONV KNUM IRCONV_U32_NUM)
+LJFOLDF(kfold_conv_knum_u32_num)
+{
+ lua_assert((fins->op2 & IRCONV_TRUNC));
+#ifdef _MSC_VER
+ { /* Workaround for MSVC bug. */
+ volatile uint32_t u = (uint32_t)knumleft;
+ return INTFOLD((int32_t)u);
+ }
+#else
+ return INTFOLD((int32_t)(uint32_t)knumleft);
+#endif
+}
+
+LJFOLD(CONV KNUM IRCONV_I64_NUM)
+LJFOLDF(kfold_conv_knum_i64_num)
+{
+ lua_assert((fins->op2 & IRCONV_TRUNC));
+ return INT64FOLD((uint64_t)(int64_t)knumleft);
+}
+
+LJFOLD(CONV KNUM IRCONV_U64_NUM)
+LJFOLDF(kfold_conv_knum_u64_num)
+{
+ lua_assert((fins->op2 & IRCONV_TRUNC));
+ return INT64FOLD(lj_num2u64(knumleft));
+}
+
+LJFOLD(TOSTR KNUM)
+LJFOLDF(kfold_tostr_knum)
+{
+ return lj_ir_kstr(J, lj_str_fromnum(J->L, &knumleft));
+}
+
+LJFOLD(TOSTR KINT)
+LJFOLDF(kfold_tostr_kint)
+{
+ return lj_ir_kstr(J, lj_str_fromint(J->L, fleft->i));
+}
+
+LJFOLD(STRTO KGC)
+LJFOLDF(kfold_strto)
+{
+ TValue n;
+ if (lj_strscan_num(ir_kstr(fleft), &n))
+ return lj_ir_knum(J, numV(&n));
+ return FAILFOLD;
+}
+
+/* -- Constant folding of equality checks --------------------------------- */
+
+/* Don't constant-fold away FLOAD checks against KNULL. */
+LJFOLD(EQ FLOAD KNULL)
+LJFOLD(NE FLOAD KNULL)
+LJFOLDX(lj_opt_cse)
+
+/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */
+LJFOLD(EQ any KNULL)
+LJFOLD(NE any KNULL)
+LJFOLD(EQ KNULL any)
+LJFOLD(NE KNULL any)
+LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */
+LJFOLD(NE KINT KINT)
+LJFOLD(EQ KINT64 KINT64)
+LJFOLD(NE KINT64 KINT64)
+LJFOLD(EQ KGC KGC)
+LJFOLD(NE KGC KGC)
+LJFOLDF(kfold_kref)
+{
+ return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE));
+}
+
+/* -- Algebraic shortcuts ------------------------------------------------- */
+
+LJFOLD(FPMATH FPMATH IRFPM_FLOOR)
+LJFOLD(FPMATH FPMATH IRFPM_CEIL)
+LJFOLD(FPMATH FPMATH IRFPM_TRUNC)
+LJFOLDF(shortcut_round)
+{
+ IRFPMathOp op = (IRFPMathOp)fleft->op2;
+ if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC)
+ return LEFTFOLD; /* round(round_left(x)) = round_left(x) */
+ return NEXTFOLD;
+}
+
+LJFOLD(ABS ABS KNUM)
+LJFOLDF(shortcut_left)
+{
+ return LEFTFOLD; /* f(g(x)) ==> g(x) */
+}
+
+LJFOLD(ABS NEG KNUM)
+LJFOLDF(shortcut_dropleft)
+{
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */
+ return RETRYFOLD;
+}
+
+/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */
+LJFOLD(NEG NEG any)
+LJFOLD(BNOT BNOT)
+LJFOLD(BSWAP BSWAP)
+LJFOLDF(shortcut_leftleft)
+{
+ PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */
+ return fleft->op1; /* f(g(x)) ==> x */
+}
+
+/* -- FP algebraic simplifications ---------------------------------------- */
+
+/* FP arithmetic is tricky -- there's not much to simplify.
+** Please note the following common pitfalls before sending "improvements":
+** x+0 ==> x is INVALID for x=-0
+** 0-x ==> -x is INVALID for x=+0
+** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN
+*/
+
+LJFOLD(ADD NEG any)
+LJFOLDF(simplify_numadd_negx)
+{
+ PHIBARRIER(fleft);
+ fins->o = IR_SUB; /* (-a) + b ==> b - a */
+ fins->op1 = fins->op2;
+ fins->op2 = fleft->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(ADD any NEG)
+LJFOLDF(simplify_numadd_xneg)
+{
+ PHIBARRIER(fright);
+ fins->o = IR_SUB; /* a + (-b) ==> a - b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB any KNUM)
+LJFOLDF(simplify_numsub_k)
+{
+ lua_Number n = knumright;
+ if (n == 0.0) /* x - (+-0) ==> x */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB NEG KNUM)
+LJFOLDF(simplify_numsub_negk)
+{
+ PHIBARRIER(fleft);
+ fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */
+ fins->op1 = (IRRef1)lj_ir_knum(J, -knumright);
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB any NEG)
+LJFOLDF(simplify_numsub_xneg)
+{
+ PHIBARRIER(fright);
+ fins->o = IR_ADD; /* a - (-b) ==> a + b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(MUL any KNUM)
+LJFOLD(DIV any KNUM)
+LJFOLDF(simplify_nummuldiv_k)
+{
+ lua_Number n = knumright;
+ if (n == 1.0) { /* x o 1 ==> x */
+ return LEFTFOLD;
+ } else if (n == -1.0) { /* x o -1 ==> -x */
+ fins->o = IR_NEG;
+ fins->op2 = (IRRef1)lj_ir_knum_neg(J);
+ return RETRYFOLD;
+ } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */
+ fins->o = IR_ADD;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ } else if (fins->o == IR_DIV) { /* x / 2^k ==> x * 2^-k */
+ uint64_t u = ir_knum(fright)->u64;
+ uint32_t ex = ((uint32_t)(u >> 52) & 0x7ff);
+ if ((u & U64x(000fffff,ffffffff)) == 0 && ex - 1 < 0x7fd) {
+ u = (u & ((uint64_t)1 << 63)) | ((uint64_t)(0x7fe - ex) << 52);
+ fins->o = IR_MUL; /* Multiply by exact reciprocal. */
+ fins->op2 = lj_ir_knum_u64(J, u);
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL NEG KNUM)
+LJFOLD(DIV NEG KNUM)
+LJFOLDF(simplify_nummuldiv_negk)
+{
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */
+ fins->op2 = (IRRef1)lj_ir_knum(J, -knumright);
+ return RETRYFOLD;
+}
+
+LJFOLD(MUL NEG NEG)
+LJFOLD(DIV NEG NEG)
+LJFOLDF(simplify_nummuldiv_negneg)
+{
+ PHIBARRIER(fleft);
+ PHIBARRIER(fright);
+ fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(POW any KINT)
+LJFOLDF(simplify_numpow_xk)
+{
+ int32_t k = fright->i;
+ TRef ref = fins->op1;
+ if (k == 0) /* x ^ 0 ==> 1 */
+ return lj_ir_knum_one(J); /* Result must be a number, not an int. */
+ if (k == 1) /* x ^ 1 ==> x */
+ return LEFTFOLD;
+ if ((uint32_t)(k+65536) > 2*65536u) /* Limit code explosion. */
+ return NEXTFOLD;
+ if (k < 0) { /* x ^ (-k) ==> (1/x) ^ k. */
+ ref = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), ref);
+ k = -k;
+ }
+ /* Unroll x^k for 1 <= k <= 65536. */
+ for (; (k & 1) == 0; k >>= 1) /* Handle leading zeros. */
+ ref = emitir(IRTN(IR_MUL), ref, ref);
+ if ((k >>= 1) != 0) { /* Handle trailing bits. */
+ TRef tmp = emitir(IRTN(IR_MUL), ref, ref);
+ for (; k != 1; k >>= 1) {
+ if (k & 1)
+ ref = emitir(IRTN(IR_MUL), ref, tmp);
+ tmp = emitir(IRTN(IR_MUL), tmp, tmp);
+ }
+ ref = emitir(IRTN(IR_MUL), ref, tmp);
+ }
+ return ref;
+}
+
+LJFOLD(POW KNUM any)
+LJFOLDF(simplify_numpow_kx)
+{
+ lua_Number n = knumleft;
+ if (n == 2.0) { /* 2.0 ^ i ==> ldexp(1.0, tonum(i)) */
+ fins->o = IR_CONV;
+#if LJ_TARGET_X86ORX64
+ fins->op1 = fins->op2;
+ fins->op2 = IRCONV_NUM_INT;
+ fins->op2 = (IRRef1)lj_opt_fold(J);
+#endif
+ fins->op1 = (IRRef1)lj_ir_knum_one(J);
+ fins->o = IR_LDEXP;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+/* -- Simplify conversions ------------------------------------------------ */
+
+LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */
+LJFOLDF(shortcut_conv_num_int)
+{
+ PHIBARRIER(fleft);
+ /* Only safe with a guarded conversion to int. */
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t))
+ return fleft->op1; /* f(g(x)) ==> x */
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */
+LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/
+LJFOLDF(simplify_conv_int_num)
+{
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ if ((fleft->op2 & IRCONV_SRCMASK) ==
+ ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH))
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32 */
+LJFOLDF(simplify_conv_i64_num)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
+ /* Reduce to a sign-extension. */
+ fins->op1 = fleft->op1;
+ fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
+ return RETRYFOLD;
+ } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
+#if LJ_TARGET_X64
+ return fleft->op1;
+#else
+ /* Reduce to a zero-extension. */
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRT_I64<<5)|IRT_U32;
+ return RETRYFOLD;
+#endif
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_U32_I64) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_U32_U64) /* _INT or _U32 */
+LJFOLDF(simplify_conv_int_i64)
+{
+ int src;
+ PHIBARRIER(fleft);
+ src = (fleft->op2 & IRCONV_SRCMASK);
+ if (src == IRT_INT || src == IRT_U32) {
+ if (src == ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) {
+ return fleft->op1;
+ } else {
+ fins->op2 = ((fins->op2 & IRCONV_DSTMASK) | src);
+ fins->op1 = fleft->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_FLOAT_NUM) /* _FLOAT */
+LJFOLDF(simplify_conv_flt_num)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
+LJFOLD(TOBIT CONV KNUM)
+LJFOLDF(simplify_tobit_conv)
+{
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT ||
+ (fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ lua_assert(irt_isnum(fleft->t));
+ return fleft->op1;
+ }
+ return NEXTFOLD;
+}
+
+/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
+LJFOLD(FPMATH CONV IRFPM_FLOOR)
+LJFOLD(FPMATH CONV IRFPM_CEIL)
+LJFOLD(FPMATH CONV IRFPM_TRUNC)
+LJFOLDF(simplify_floor_conv)
+{
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT ||
+ (fleft->op2 & IRCONV_SRCMASK) == IRT_U32)
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+/* Strength reduction of widening. */
+LJFOLD(CONV any IRCONV_I64_INT)
+LJFOLD(CONV any IRCONV_U64_INT)
+LJFOLDF(simplify_conv_sext)
+{
+ IRRef ref = fins->op1;
+ int64_t ofs = 0;
+ if (!(fins->op2 & IRCONV_SEXT))
+ return NEXTFOLD;
+ PHIBARRIER(fleft);
+ if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t)))
+ goto ok_reduce;
+ if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
+ ofs = (int64_t)IR(fleft->op2)->i;
+ ref = fleft->op1;
+ }
+ /* Use scalar evolution analysis results to strength-reduce sign-extension. */
+ if (ref == J->scev.idx) {
+ IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
+ lua_assert(irt_isint(J->scev.t));
+ if (lo && IR(lo)->i + ofs >= 0) {
+ ok_reduce:
+#if LJ_TARGET_X64
+ /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
+ return LEFTFOLD;
+#else
+ /* Reduce to a (cheaper) zero-extension. */
+ fins->op2 &= ~IRCONV_SEXT;
+ return RETRYFOLD;
+#endif
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* Strength reduction of narrowing. */
+LJFOLD(CONV ADD IRCONV_INT_I64)
+LJFOLD(CONV SUB IRCONV_INT_I64)
+LJFOLD(CONV MUL IRCONV_INT_I64)
+LJFOLD(CONV ADD IRCONV_INT_U64)
+LJFOLD(CONV SUB IRCONV_INT_U64)
+LJFOLD(CONV MUL IRCONV_INT_U64)
+LJFOLD(CONV ADD IRCONV_U32_I64)
+LJFOLD(CONV SUB IRCONV_U32_I64)
+LJFOLD(CONV MUL IRCONV_U32_I64)
+LJFOLD(CONV ADD IRCONV_U32_U64)
+LJFOLD(CONV SUB IRCONV_U32_U64)
+LJFOLD(CONV MUL IRCONV_U32_U64)
+LJFOLDF(simplify_conv_narrow)
+{
+ IROp op = (IROp)fleft->o;
+ IRType t = irt_type(fins->t);
+ IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2;
+ PHIBARRIER(fleft);
+ op1 = emitir(IRTI(IR_CONV), op1, mode);
+ op2 = emitir(IRTI(IR_CONV), op2, mode);
+ fins->ot = IRT(op, t);
+ fins->op1 = op1;
+ fins->op2 = op2;
+ return RETRYFOLD;
+}
+
+/* Special CSE rule for CONV. */
+LJFOLD(CONV any any)
+LJFOLDF(cse_conv)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK);
+ uint8_t guard = irt_isguard(fins->t);
+ IRRef ref = J->chain[IR_CONV];
+ while (ref > op1) {
+ IRIns *ir = IR(ref);
+ /* Commoning with stronger checks is ok. */
+ if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 &&
+ irt_isguard(ir->t) >= guard)
+ return ref;
+ ref = ir->prev;
+ }
+ }
+ return EMITFOLD; /* No fallthrough to regular CSE. */
+}
+
+/* FP conversion narrowing. */
+LJFOLD(TOBIT ADD KNUM)
+LJFOLD(TOBIT SUB KNUM)
+LJFOLD(CONV ADD IRCONV_INT_NUM)
+LJFOLD(CONV SUB IRCONV_INT_NUM)
+LJFOLD(CONV ADD IRCONV_I64_NUM)
+LJFOLD(CONV SUB IRCONV_I64_NUM)
+LJFOLDF(narrow_convert)
+{
+ PHIBARRIER(fleft);
+ /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */
+ if (J->chain[IR_LOOP])
+ return NEXTFOLD;
+ lua_assert(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT);
+ return lj_opt_narrow_convert(J);
+}
+
+/* -- Integer algebraic simplifications ----------------------------------- */
+
+LJFOLD(ADD any KINT)
+LJFOLD(ADDOV any KINT)
+LJFOLD(SUBOV any KINT)
+LJFOLDF(simplify_intadd_k)
+{
+ if (fright->i == 0) /* i o 0 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(MULOV any KINT)
+LJFOLDF(simplify_intmul_k)
+{
+ if (fright->i == 0) /* i * 0 ==> 0 */
+ return RIGHTFOLD;
+ if (fright->i == 1) /* i * 1 ==> i */
+ return LEFTFOLD;
+ if (fright->i == 2) { /* i * 2 ==> i + i */
+ fins->o = IR_ADDOV;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any KINT)
+LJFOLDF(simplify_intsub_k)
+{
+ if (fright->i == 0) /* i - 0 ==> i */
+ return LEFTFOLD;
+ fins->o = IR_ADD; /* i - k ==> i + (-k) */
+ fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB KINT any)
+LJFOLD(SUB KINT64 any)
+LJFOLDF(simplify_intsub_kleft)
+{
+ if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) {
+ fins->o = IR_NEG; /* 0 - i ==> -i */
+ fins->op1 = fins->op2;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(ADD any KINT64)
+LJFOLDF(simplify_intadd_k64)
+{
+ if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any KINT64)
+LJFOLDF(simplify_intsub_k64)
+{
+ uint64_t k = ir_kint64(fright)->u64;
+ if (k == 0) /* i - 0 ==> i */
+ return LEFTFOLD;
+ fins->o = IR_ADD; /* i - k ==> i + (-k) */
+ fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k);
+ return RETRYFOLD;
+}
+
+static TRef simplify_intmul_k(jit_State *J, int32_t k)
+{
+ /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2.
+ ** But this is mainly intended for simple address arithmetic.
+ ** Also it's easier for the backend to optimize the original multiplies.
+ */
+ if (k == 1) { /* i * 1 ==> i */
+ return LEFTFOLD;
+ } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */
+ fins->o = IR_BSHL;
+ fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k));
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL any KINT)
+LJFOLDF(simplify_intmul_k32)
+{
+ if (fright->i == 0) /* i * 0 ==> 0 */
+ return INTFOLD(0);
+ else if (fright->i > 0)
+ return simplify_intmul_k(J, fright->i);
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL any KINT64)
+LJFOLDF(simplify_intmul_k64)
+{
+ if (ir_kint64(fright)->u64 == 0) /* i * 0 ==> 0 */
+ return INT64FOLD(0);
+#if LJ_64
+ /* NYI: SPLIT for BSHL and 32 bit backend support. */
+ else if (ir_kint64(fright)->u64 < 0x80000000u)
+ return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64);
+#endif
+ return NEXTFOLD;
+}
+
+LJFOLD(MOD any KINT)
+LJFOLDF(simplify_intmod_k)
+{
+ int32_t k = fright->i;
+ lua_assert(k != 0);
+ if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */
+ fins->o = IR_BAND;
+ fins->op2 = lj_ir_kint(J, k-1);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MOD KINT any)
+LJFOLDF(simplify_intmod_kleft)
+{
+ if (fleft->i == 0)
+ return INTFOLD(0);
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any any)
+LJFOLD(SUBOV any any)
+LJFOLDF(simplify_intsub)
+{
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */
+ return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB ADD any)
+LJFOLDF(simplify_intsubadd_leftcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */
+ return fleft->op2;
+ if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */
+ return fleft->op1;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB SUB any)
+LJFOLDF(simplify_intsubsub_leftcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) { /* (i - j) - i ==> 0 - j */
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ fins->op2 = fleft->op2;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any SUB)
+LJFOLDF(simplify_intsubsub_rightcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fright);
+ if (fins->op1 == fright->op1) /* i - (i - j) ==> j */
+ return fright->op2;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any ADD)
+LJFOLDF(simplify_intsubadd_rightcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fright);
+ if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */
+ fins->op2 = fright->op2;
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ return RETRYFOLD;
+ }
+ if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */
+ fins->op2 = fright->op1;
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB ADD ADD)
+LJFOLDF(simplify_intsubaddadd_cancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ PHIBARRIER(fright);
+ if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */
+ fins->op1 = fleft->op2;
+ fins->op2 = fright->op2;
+ return RETRYFOLD;
+ }
+ if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */
+ fins->op1 = fleft->op2;
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */
+ fins->op1 = fleft->op1;
+ fins->op2 = fright->op2;
+ return RETRYFOLD;
+ }
+ if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */
+ fins->op1 = fleft->op1;
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND any KINT)
+LJFOLD(BAND any KINT64)
+LJFOLDF(simplify_band_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i & 0 ==> 0 */
+ return RIGHTFOLD;
+ if (k == -1) /* i & -1 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BOR any KINT)
+LJFOLD(BOR any KINT64)
+LJFOLDF(simplify_bor_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i | 0 ==> i */
+ return LEFTFOLD;
+ if (k == -1) /* i | -1 ==> -1 */
+ return RIGHTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR any KINT)
+LJFOLD(BXOR any KINT64)
+LJFOLDF(simplify_bxor_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i xor 0 ==> i */
+ return LEFTFOLD;
+ if (k == -1) { /* i xor -1 ==> ~i */
+ fins->o = IR_BNOT;
+ fins->op2 = 0;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL any KINT)
+LJFOLD(BSHR any KINT)
+LJFOLD(BSAR any KINT)
+LJFOLD(BROL any KINT)
+LJFOLD(BROR any KINT)
+LJFOLDF(simplify_shift_ik)
+{
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = (fright->i & mask);
+ if (k == 0) /* i o 0 ==> i */
+ return LEFTFOLD;
+ if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */
+ fins->o = IR_ADD;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ if (k != fright->i) { /* i o k ==> i o (k & mask) */
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD;
+ }
+#ifndef LJ_TARGET_UNIFYROT
+ if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */
+ fins->o = IR_BROL;
+ fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask);
+ return RETRYFOLD;
+ }
+#endif
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL any BAND)
+LJFOLD(BSHR any BAND)
+LJFOLD(BSAR any BAND)
+LJFOLD(BROL any BAND)
+LJFOLD(BROR any BAND)
+LJFOLDF(simplify_shift_andk)
+{
+ IRIns *irk = IR(fright->op2);
+ PHIBARRIER(fright);
+ if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
+ irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = irk->i & mask;
+ if (k == mask) {
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL KINT any)
+LJFOLD(BSHR KINT any)
+LJFOLD(BSHL KINT64 any)
+LJFOLD(BSHR KINT64 any)
+LJFOLDF(simplify_shift1_ki)
+{
+ int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
+ (int64_t)ir_k64(fleft)->u64;
+ if (k == 0) /* 0 o i ==> 0 */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSAR KINT any)
+LJFOLD(BROL KINT any)
+LJFOLD(BROR KINT any)
+LJFOLD(BSAR KINT64 any)
+LJFOLD(BROL KINT64 any)
+LJFOLD(BROR KINT64 any)
+LJFOLDF(simplify_shift2_ki)
+{
+ int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
+ (int64_t)ir_k64(fleft)->u64;
+ if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL BAND KINT)
+LJFOLD(BSHR BAND KINT)
+LJFOLD(BROL BAND KINT)
+LJFOLD(BROR BAND KINT)
+LJFOLDF(simplify_shiftk_andk)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft);
+ if (irk->o == IR_KINT) { /* (i & k1) o k2 ==> (i o k2) & (k1 o k2) */
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ fins->op1 = fleft->op1;
+ fins->op1 = (IRRef1)lj_opt_fold(J);
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ fins->ot = IRTI(IR_BAND);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND BSHL KINT)
+LJFOLD(BAND BSHR KINT)
+LJFOLDF(simplify_andk_shiftk)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT &&
+ kfold_intop(-1, irk->i, (IROp)fleft->o) == fright->i)
+ return LEFTFOLD; /* (i o k1) & k2 ==> i, if (-1 o k1) == k2 */
+ return NEXTFOLD;
+}
+
+/* -- Reassociation ------------------------------------------------------- */
+
+LJFOLD(ADD ADD KINT)
+LJFOLD(MUL MUL KINT)
+LJFOLD(BAND BAND KINT)
+LJFOLD(BOR BOR KINT)
+LJFOLD(BXOR BXOR KINT)
+LJFOLDF(reassoc_intarith_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT) {
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(ADD ADD KINT64)
+LJFOLD(MUL MUL KINT64)
+LJFOLD(BAND BAND KINT64)
+LJFOLD(BOR BOR KINT64)
+LJFOLD(BXOR BXOR KINT64)
+LJFOLDF(reassoc_intarith_k64)
+{
+#if LJ_HASFFI || LJ_64
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT64) {
+ uint64_t k = kfold_int64arith(ir_k64(irk)->u64,
+ ir_k64(fright)->u64, (IROp)fins->o);
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint64(J, k);
+ return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
+ }
+ return NEXTFOLD;
+#else
+ UNUSED(J); lua_assert(0); return FAILFOLD;
+#endif
+}
+
+LJFOLD(MIN MIN any)
+LJFOLD(MAX MAX any)
+LJFOLD(BAND BAND any)
+LJFOLD(BOR BOR any)
+LJFOLDF(reassoc_dup)
+{
+ if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
+ return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR BXOR any)
+LJFOLDF(reassoc_bxor)
+{
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */
+ return fleft->op2;
+ if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL BSHL KINT)
+LJFOLD(BSHR BSHR KINT)
+LJFOLD(BSAR BSAR KINT)
+LJFOLD(BROL BROL KINT)
+LJFOLD(BROR BROR KINT)
+LJFOLDF(reassoc_shift)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */
+ if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = (irk->i & mask) + (fright->i & mask);
+ if (k > mask) { /* Combined shift too wide? */
+ if (fins->o == IR_BSHL || fins->o == IR_BSHR)
+ return mask == 31 ? INTFOLD(0) : INT64FOLD(0);
+ else if (fins->o == IR_BSAR)
+ k = mask;
+ else
+ k &= mask;
+ }
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN MIN KNUM)
+LJFOLD(MAX MAX KNUM)
+LJFOLD(MIN MIN KINT)
+LJFOLD(MAX MAX KINT)
+LJFOLDF(reassoc_minmax_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KNUM) {
+ lua_Number a = ir_knum(irk)->n;
+ lua_Number y = lj_vm_foldarith(a, knumright, fins->o - IR_ADD);
+ if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_knum(J, y);
+ return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
+ } else if (irk->o == IR_KINT) {
+ int32_t a = irk->i;
+ int32_t y = kfold_intop(a, fright->i, fins->o);
+ if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, y);
+ return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN MAX any)
+LJFOLD(MAX MIN any)
+LJFOLDF(reassoc_minmax_left)
+{
+ if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
+ return RIGHTFOLD; /* (b o1 a) o2 b ==> b; (a o1 b) o2 b ==> b */
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN any MAX)
+LJFOLD(MAX any MIN)
+LJFOLDF(reassoc_minmax_right)
+{
+ if (fins->op1 == fright->op1 || fins->op1 == fright->op2)
+ return LEFTFOLD; /* a o2 (a o1 b) ==> a; a o2 (b o1 a) ==> a */
+ return NEXTFOLD;
+}
+
+/* -- Array bounds check elimination -------------------------------------- */
+
+/* Eliminate ABC across PHIs to handle t[i-1] forwarding case.
+** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists.
+** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig.
+*/
+LJFOLD(ABC any ADD)
+LJFOLDF(abc_fwd)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
+ if (irref_isk(fright->op2)) {
+ IRIns *add2 = IR(fright->op1);
+ if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+ IR(fright->op2)->i == -IR(add2->op2)->i) {
+ IRRef ref = J->chain[IR_ABC];
+ IRRef lim = add2->op1;
+ if (fins->op1 > lim) lim = fins->op1;
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == fins->op1 && ir->op2 == add2->op1)
+ return DROPFOLD;
+ ref = ir->prev;
+ }
+ }
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* Eliminate ABC for constants.
+** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2))
+** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2.
+*/
+LJFOLD(ABC any KINT)
+LJFOLDF(abc_k)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
+ IRRef ref = J->chain[IR_ABC];
+ IRRef asize = fins->op1;
+ while (ref > asize) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == asize && irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (fright->i > k)
+ ir->op2 = fins->op2;
+ return DROPFOLD;
+ }
+ ref = ir->prev;
+ }
+ return EMITFOLD; /* Already performed CSE. */
+ }
+ return NEXTFOLD;
+}
+
+/* Eliminate invariant ABC inside loop. */
+LJFOLD(ABC any any)
+LJFOLDF(abc_invar)
+{
+ if (!irt_isint(fins->t) && J->chain[IR_LOOP]) /* Currently marked as PTR. */
+ return DROPFOLD;
+ return NEXTFOLD;
+}
+
+/* -- Commutativity ------------------------------------------------------- */
+
+/* The refs of commutative ops are canonicalized. Lower refs go to the right.
+** Rationale behind this:
+** - It (also) moves constants to the right.
+** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices).
+** - It helps CSE to find more matches.
+** - The assembler generates better code with constants at the right.
+*/
+
+LJFOLD(ADD any any)
+LJFOLD(MUL any any)
+LJFOLD(ADDOV any any)
+LJFOLD(MULOV any any)
+LJFOLDF(comm_swap)
+{
+ if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
+ IRRef1 tmp = fins->op1;
+ fins->op1 = fins->op2;
+ fins->op2 = tmp;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(EQ any any)
+LJFOLD(NE any any)
+LJFOLDF(comm_equal)
+{
+ /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
+ return CONDFOLD(fins->o == IR_EQ);
+ return fold_comm_swap(J);
+}
+
+LJFOLD(LT any any)
+LJFOLD(GE any any)
+LJFOLD(LE any any)
+LJFOLD(GT any any)
+LJFOLD(ULT any any)
+LJFOLD(UGE any any)
+LJFOLD(ULE any any)
+LJFOLD(UGT any any)
+LJFOLDF(comm_comp)
+{
+ /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
+ return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1);
+ if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
+ IRRef1 tmp = fins->op1;
+ fins->op1 = fins->op2;
+ fins->op2 = tmp;
+ fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND any any)
+LJFOLD(BOR any any)
+LJFOLD(MIN any any)
+LJFOLD(MAX any any)
+LJFOLDF(comm_dup)
+{
+ if (fins->op1 == fins->op2) /* x o x ==> x */
+ return LEFTFOLD;
+ return fold_comm_swap(J);
+}
+
+LJFOLD(BXOR any any)
+LJFOLDF(comm_bxor)
+{
+ if (fins->op1 == fins->op2) /* i xor i ==> 0 */
+ return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
+ return fold_comm_swap(J);
+}
+
+/* -- Simplification of compound expressions ------------------------------ */
+
+static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p)
+{
+ int32_t k;
+ switch (irt_type(ir->t)) {
+ case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p);
+ case IRT_I8: k = (int32_t)*(int8_t *)p; break;
+ case IRT_U8: k = (int32_t)*(uint8_t *)p; break;
+ case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break;
+ case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break;
+ case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break;
+ case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p);
+ default: return 0;
+ }
+ return lj_ir_kint(J, k);
+}
+
+/* Turn: string.sub(str, a, b) == kstr
+** into: string.byte(str, a) == string.byte(kstr, 1) etc.
+** Note: this creates unaligned XLOADs on x86/x64.
+*/
+LJFOLD(EQ SNEW KGC)
+LJFOLD(NE SNEW KGC)
+LJFOLDF(merge_eqne_snew_kgc)
+{
+ GCstr *kstr = ir_kstr(fright);
+ int32_t len = (int32_t)kstr->len;
+ lua_assert(irt_isstr(fins->t));
+
+#if LJ_TARGET_UNALIGNED
+#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */
+#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */
+#else
+#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */
+#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */
+#endif
+
+ PHIBARRIER(fleft);
+ if (len <= FOLD_SNEW_MAX_LEN) {
+ IROp op = (IROp)fins->o;
+ IRRef strref = fleft->op1;
+ lua_assert(IR(strref)->o == IR_STRREF);
+ if (op == IR_EQ) {
+ emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len));
+ /* Caveat: fins/fleft/fright is no longer valid after emitir. */
+ } else {
+ /* NE is not expanded since this would need an OR of two conds. */
+ if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */
+ return NEXTFOLD;
+ if (IR(fleft->op2)->i != len)
+ return DROPFOLD;
+ }
+ if (len > 0) {
+ /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */
+ uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) :
+ len == 2 ? IRT(IR_XLOAD, IRT_U16) :
+ IRTI(IR_XLOAD));
+ TRef tmp = emitir(ot, strref,
+ IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0));
+ TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr));
+ if (len == 3)
+ tmp = emitir(IRTI(IR_BAND), tmp,
+ lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00)));
+ fins->op1 = (IRRef1)tmp;
+ fins->op2 = (IRRef1)val;
+ fins->ot = (IROpT)IRTGI(op);
+ return RETRYFOLD;
+ } else {
+ return DROPFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* -- Loads --------------------------------------------------------------- */
+
+/* Loads cannot be folded or passed on to CSE in general.
+** Alias analysis is needed to check for forwarding opportunities.
+**
+** Caveat: *all* loads must be listed here or they end up at CSE!
+*/
+
+LJFOLD(ALOAD any)
+LJFOLDX(lj_opt_fwd_aload)
+
+/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */
+LJFOLD(HLOAD KKPTR)
+LJFOLDF(kfold_hload_kkptr)
+{
+ UNUSED(J);
+ lua_assert(ir_kptr(fleft) == niltvg(J2G(J)));
+ return TREF_NIL;
+}
+
+LJFOLD(HLOAD any)
+LJFOLDX(lj_opt_fwd_hload)
+
+LJFOLD(ULOAD any)
+LJFOLDX(lj_opt_fwd_uload)
+
+LJFOLD(CALLL any IRCALL_lj_tab_len)
+LJFOLDX(lj_opt_fwd_tab_len)
+
+/* Upvalue refs are really loads, but there are no corresponding stores.
+** So CSE is ok for them, except for UREFO across a GC step (see below).
+** If the referenced function is const, its upvalue addresses are const, too.
+** This can be used to improve CSE by looking for the same address,
+** even if the upvalues originate from a different function.
+*/
+LJFOLD(UREFO KGC any)
+LJFOLD(UREFC KGC any)
+LJFOLDF(cse_uref)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef ref = J->chain[fins->o];
+ GCfunc *fn = ir_kfunc(fleft);
+ GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)]));
+ while (ref > 0) {
+ IRIns *ir = IR(ref);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn2 = ir_kfunc(IR(ir->op1));
+ if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) {
+ if (fins->o == IR_UREFO && gcstep_barrier(J, ref))
+ break;
+ return ref;
+ }
+ }
+ ref = ir->prev;
+ }
+ }
+ return EMITFOLD;
+}
+
+LJFOLD(HREFK any any)
+LJFOLDX(lj_opt_fwd_hrefk)
+
+LJFOLD(HREF TNEW any)
+LJFOLDF(fwd_href_tnew)
+{
+ if (lj_opt_fwd_href_nokey(J))
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ return NEXTFOLD;
+}
+
+LJFOLD(HREF TDUP KPRI)
+LJFOLD(HREF TDUP KGC)
+LJFOLD(HREF TDUP KNUM)
+LJFOLDF(fwd_href_tdup)
+{
+ TValue keyv;
+ lj_ir_kvalue(J->L, &keyv, fright);
+ if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) &&
+ lj_opt_fwd_href_nokey(J))
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ return NEXTFOLD;
+}
+
+/* We can safely FOLD/CSE array/hash refs and field loads, since there
+** are no corresponding stores. But we need to check for any NEWREF with
+** an aliased table, as it may invalidate all of the pointers and fields.
+** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
+** FLOADs. And NEWREF itself is treated like a store (see below).
+*/
+LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
+LJFOLDF(fload_tab_tnew_asize)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD(fleft->op1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TNEW IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_tnew_hmask)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((1 << fleft->op2)-1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE)
+LJFOLDF(fload_tab_tdup_asize)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TDUP IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_tdup_hmask)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask);
+ return NEXTFOLD;
+}
+
+LJFOLD(HREF any any)
+LJFOLD(FLOAD any IRFL_TAB_ARRAY)
+LJFOLD(FLOAD any IRFL_TAB_NODE)
+LJFOLD(FLOAD any IRFL_TAB_ASIZE)
+LJFOLD(FLOAD any IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_ah)
+{
+ TRef tr = lj_opt_cse(J);
+ return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD;
+}
+
+/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */
+LJFOLD(FLOAD KGC IRFL_STR_LEN)
+LJFOLDF(fload_str_len_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kstr(fleft)->len);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD SNEW IRFL_STR_LEN)
+LJFOLDF(fload_str_len_snew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ PHIBARRIER(fleft);
+ return fleft->op2;
+ }
+ return NEXTFOLD;
+}
+
+/* The C type ID of cdata objects is immutable. */
+LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID)
+LJFOLDF(fload_cdata_typeid_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kcdata(fleft)->ctypeid);
+ return NEXTFOLD;
+}
+
+/* Get the contents of immutable cdata objects. */
+LJFOLD(FLOAD KGC IRFL_CDATA_PTR)
+LJFOLD(FLOAD KGC IRFL_CDATA_INT)
+LJFOLD(FLOAD KGC IRFL_CDATA_INT64)
+LJFOLDF(fload_cdata_int64_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ void *p = cdataptr(ir_kcdata(fleft));
+ if (irt_is64(fins->t))
+ return INT64FOLD(*(uint64_t *)p);
+ else
+ return INTFOLD(*(int32_t *)p);
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD CNEW IRFL_CDATA_CTYPEID)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_CTYPEID)
+LJFOLDF(fload_cdata_typeid_cnew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */
+ return NEXTFOLD;
+}
+
+/* Pointer, int and int64 cdata objects are immutable. */
+LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_INT)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64)
+LJFOLDF(fload_cdata_ptr_int64_cnew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return fleft->op2; /* Fold even across PHI to avoid allocations. */
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD any IRFL_STR_LEN)
+LJFOLD(FLOAD any IRFL_CDATA_CTYPEID)
+LJFOLD(FLOAD any IRFL_CDATA_PTR)
+LJFOLD(FLOAD any IRFL_CDATA_INT)
+LJFOLD(FLOAD any IRFL_CDATA_INT64)
+LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */
+LJFOLDX(lj_opt_cse)
+
+/* All other field loads need alias analysis. */
+LJFOLD(FLOAD any any)
+LJFOLDX(lj_opt_fwd_fload)
+
+/* This is for LOOP only. Recording handles SLOADs internally. */
+LJFOLD(SLOAD any any)
+LJFOLDF(fwd_sload)
+{
+ if ((fins->op2 & IRSLOAD_FRAME)) {
+ TRef tr = lj_opt_cse(J);
+ return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr;
+ } else {
+ lua_assert(J->slot[fins->op1] != 0);
+ return J->slot[fins->op1];
+ }
+}
+
+/* Only fold for KKPTR. The pointer _and_ the contents must be const. */
+LJFOLD(XLOAD KKPTR any)
+LJFOLDF(xload_kptr)
+{
+ TRef tr = kfold_xload(J, fins, ir_kptr(fleft));
+ return tr ? tr : NEXTFOLD;
+}
+
+LJFOLD(XLOAD any any)
+LJFOLDX(lj_opt_fwd_xload)
+
+/* -- Write barriers ------------------------------------------------------ */
+
+/* Write barriers are amenable to CSE, but not across any incremental
+** GC steps.
+**
+** The same logic applies to open upvalue references, because a stack
+** may be resized during a GC step (not the current stack, but maybe that
+** of a coroutine).
+*/
+LJFOLD(TBAR any)
+LJFOLD(OBAR any any)
+LJFOLD(UREFO any any)
+LJFOLDF(barrier_tab)
+{
+ TRef tr = lj_opt_cse(J);
+ if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */
+ return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */
+ return tr;
+}
+
+LJFOLD(TBAR TNEW)
+LJFOLD(TBAR TDUP)
+LJFOLDF(barrier_tnew_tdup)
+{
+ /* New tables are always white and never need a barrier. */
+ if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */
+ return NEXTFOLD;
+ return DROPFOLD;
+}
+
+/* -- Stores and allocations ---------------------------------------------- */
+
+/* Stores and allocations cannot be folded or passed on to CSE in general.
+** But some stores can be eliminated with dead-store elimination (DSE).
+**
+** Caveat: *all* stores and allocs must be listed here or they end up at CSE!
+*/
+
+LJFOLD(ASTORE any any)
+LJFOLD(HSTORE any any)
+LJFOLDX(lj_opt_dse_ahstore)
+
+LJFOLD(USTORE any any)
+LJFOLDX(lj_opt_dse_ustore)
+
+LJFOLD(FSTORE any any)
+LJFOLDX(lj_opt_dse_fstore)
+
+LJFOLD(XSTORE any any)
+LJFOLDX(lj_opt_dse_xstore)
+
+LJFOLD(NEWREF any any) /* Treated like a store. */
+LJFOLD(CALLS any any)
+LJFOLD(CALLL any any) /* Safeguard fallback. */
+LJFOLD(CALLXS any any)
+LJFOLD(XBAR)
+LJFOLD(RETF any any) /* Modifies BASE. */
+LJFOLD(TNEW any any)
+LJFOLD(TDUP any)
+LJFOLD(CNEW any any)
+LJFOLD(XSNEW any any)
+LJFOLDX(lj_ir_emit)
+
+/* ------------------------------------------------------------------------ */
+
+/* Every entry in the generated hash table is a 32 bit pattern:
+**
+** xxxxxxxx iiiiiii lllllll rrrrrrrrrr
+**
+** xxxxxxxx = 8 bit index into fold function table
+** iiiiiii = 7 bit folded instruction opcode
+** lllllll = 7 bit left instruction opcode
+** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field
+*/
+
+#include "lj_folddef.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Fold IR instruction. */
+TRef LJ_FASTCALL lj_opt_fold(jit_State *J)
+{
+ uint32_t key, any;
+ IRRef ref;
+
+ if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) {
+ lua_assert(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) |
+ JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT);
+ /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */
+ if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N)
+ return lj_opt_cse(J);
+
+ /* No FOLD, forwarding or CSE? Emit raw IR for loads, except for SLOAD. */
+ if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE)) !=
+ (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE) &&
+ irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD)
+ return lj_ir_emit(J);
+
+ /* No FOLD or DSE? Emit raw IR for stores. */
+ if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_DSE)) !=
+ (JIT_F_OPT_FOLD|JIT_F_OPT_DSE) &&
+ irm_kind(lj_ir_mode[fins->o]) == IRM_S)
+ return lj_ir_emit(J);
+ }
+
+ /* Fold engine start/retry point. */
+retry:
+ /* Construct key from opcode and operand opcodes (unless literal/none). */
+ key = ((uint32_t)fins->o << 17);
+ if (fins->op1 >= J->cur.nk) {
+ key += (uint32_t)IR(fins->op1)->o << 10;
+ *fleft = *IR(fins->op1);
+ }
+ if (fins->op2 >= J->cur.nk) {
+ key += (uint32_t)IR(fins->op2)->o;
+ *fright = *IR(fins->op2);
+ } else {
+ key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */
+ }
+
+ /* Check for a match in order from most specific to least specific. */
+ any = 0;
+ for (;;) {
+ uint32_t k = key | (any & 0x1ffff);
+ uint32_t h = fold_hashkey(k);
+ uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */
+ if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) {
+ ref = (IRRef)tref_ref(fold_func[fh >> 24](J));
+ if (ref != NEXTFOLD)
+ break;
+ }
+ if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */
+ return lj_opt_cse(J);
+ any = (any | (any >> 10)) ^ 0xffc00;
+ }
+
+ /* Return value processing, ordered by frequency. */
+ if (LJ_LIKELY(ref >= MAX_FOLD))
+ return TREF(ref, irt_t(IR(ref)->t));
+ if (ref == RETRYFOLD)
+ goto retry;
+ if (ref == KINTFOLD)
+ return lj_ir_kint(J, fins->i);
+ if (ref == FAILFOLD)
+ lj_trace_err(J, LJ_TRERR_GFAIL);
+ lua_assert(ref == DROPFOLD);
+ return REF_DROP;
+}
+
+/* -- Common-Subexpression Elimination ------------------------------------ */
+
+/* CSE an IR instruction. This is very fast due to the skip-list chains. */
+TRef LJ_FASTCALL lj_opt_cse(jit_State *J)
+{
+ /* Avoid narrow to wide store-to-load forwarding stall */
+ IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
+ IROp op = fins->o;
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ /* Limited search for same operands in per-opcode chain. */
+ IRRef ref = J->chain[op];
+ IRRef lim = fins->op1;
+ if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */
+ while (ref > lim) {
+ if (IR(ref)->op12 == op12)
+ return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */
+ ref = IR(ref)->prev;
+ }
+ }
+ /* Otherwise emit IR (inlined for speed). */
+ {
+ IRRef ref = lj_ir_nextins(J);
+ IRIns *ir = IR(ref);
+ ir->prev = J->chain[op];
+ ir->op12 = op12;
+ J->chain[op] = (IRRef1)ref;
+ ir->o = fins->o;
+ J->guardemit.irt |= fins->t.irt;
+ return TREF(ref, irt_t((ir->t = fins->t)));
+ }
+}
+
+/* CSE with explicit search limit. */
+TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim)
+{
+ IRRef ref = J->chain[fins->o];
+ IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
+ while (ref > lim) {
+ if (IR(ref)->op12 == op12)
+ return ref;
+ ref = IR(ref)->prev;
+ }
+ return lj_ir_emit(J);
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fleft
+#undef fright
+#undef knumleft
+#undef knumright
+#undef emitir
+
+#endif
diff --git a/3rdparty/lua/src/lj_opt_loop.c b/3rdparty/lua/src/lj_opt_loop.c
index b6a73a5..3a119f4 100644
--- a/3rdparty/lua/src/lj_opt_loop.c
+++ b/3rdparty/lua/src/lj_opt_loop.c
@@ -1,436 +1,437 @@
-/*
-** LOOP: Loop Optimizations.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_opt_loop_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_iropt.h"
-#include "lj_trace.h"
-#include "lj_snap.h"
-#include "lj_vm.h"
-
-/* Loop optimization:
-**
-** Traditional Loop-Invariant Code Motion (LICM) splits the instructions
-** of a loop into invariant and variant instructions. The invariant
-** instructions are hoisted out of the loop and only the variant
-** instructions remain inside the loop body.
-**
-** Unfortunately LICM is mostly useless for compiling dynamic languages.
-** The IR has many guards and most of the subsequent instructions are
-** control-dependent on them. The first non-hoistable guard would
-** effectively prevent hoisting of all subsequent instructions.
-**
-** That's why we use a special form of unrolling using copy-substitution,
-** combined with redundancy elimination:
-**
-** The recorded instruction stream is re-emitted to the compiler pipeline
-** with substituted operands. The substitution table is filled with the
-** refs returned by re-emitting each instruction. This can be done
-** on-the-fly, because the IR is in strict SSA form, where every ref is
-** defined before its use.
-**
-** This aproach generates two code sections, separated by the LOOP
-** instruction:
-**
-** 1. The recorded instructions form a kind of pre-roll for the loop. It
-** contains a mix of invariant and variant instructions and performs
-** exactly one loop iteration (but not necessarily the 1st iteration).
-**
-** 2. The loop body contains only the variant instructions and performs
-** all remaining loop iterations.
-**
-** On first sight that looks like a waste of space, because the variant
-** instructions are present twice. But the key insight is that the
-** pre-roll honors the control-dependencies for *both* the pre-roll itself
-** *and* the loop body!
-**
-** It also means one doesn't have to explicitly model control-dependencies
-** (which, BTW, wouldn't help LICM much). And it's much easier to
-** integrate sparse snapshotting with this approach.
-**
-** One of the nicest aspects of this approach is that all of the
-** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be
-** reused with only minor restrictions (e.g. one should not fold
-** instructions across loop-carried dependencies).
-**
-** But in general all optimizations can be applied which only need to look
-** backwards into the generated instruction stream. At any point in time
-** during the copy-substitution process this contains both a static loop
-** iteration (the pre-roll) and a dynamic one (from the to-be-copied
-** instruction up to the end of the partial loop body).
-**
-** Since control-dependencies are implicitly kept, CSE also applies to all
-** kinds of guards. The major advantage is that all invariant guards can
-** be hoisted, too.
-**
-** Load/store forwarding works across loop iterations, too. This is
-** important if loop-carried dependencies are kept in upvalues or tables.
-** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may
-** become a forwarded loop-recurrence after inlining.
-**
-** Since the IR is in SSA form, loop-carried dependencies have to be
-** modeled with PHI instructions. The potential candidates for PHIs are
-** collected on-the-fly during copy-substitution. After eliminating the
-** redundant ones, PHI instructions are emitted *below* the loop body.
-**
-** Note that this departure from traditional SSA form doesn't change the
-** semantics of the PHI instructions themselves. But it greatly simplifies
-** on-the-fly generation of the IR and the machine code.
-*/
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
-/* Pass IR on to next optimization in chain (FOLD). */
-#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
-
-/* Emit raw IR without passing through optimizations. */
-#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
-
-/* -- PHI elimination ----------------------------------------------------- */
-
-/* Emit or eliminate collected PHIs. */
-static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi,
- SnapNo onsnap)
-{
- int passx = 0;
- IRRef i, j, nslots;
- IRRef invar = J->chain[IR_LOOP];
- /* Pass #1: mark redundant and potentially redundant PHIs. */
- for (i = 0, j = 0; i < nphi; i++) {
- IRRef lref = phi[i];
- IRRef rref = subst[lref];
- if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */
- irt_clearphi(IR(lref)->t);
- } else {
- phi[j++] = (IRRef1)lref;
- if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) {
- /* Quick check for simple recurrences failed, need pass2. */
- irt_setmark(IR(lref)->t);
- passx = 1;
- }
- }
- }
- nphi = j;
- /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */
- if (passx) {
- SnapNo s;
- for (i = J->cur.nins-1; i > invar; i--) {
- IRIns *ir = IR(i);
- if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
- if (!irref_isk(ir->op1)) {
- irt_clearmark(IR(ir->op1)->t);
- if (ir->op1 < invar &&
- ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */
- ir = IR(ir->op1);
- while (ir->o == IR_CARG) {
- if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
- if (irref_isk(ir->op1)) break;
- ir = IR(ir->op1);
- irt_clearmark(ir->t);
- }
- }
- }
- }
- for (s = J->cur.nsnap-1; s >= onsnap; s--) {
- SnapShot *snap = &J->cur.snap[s];
- SnapEntry *map = &J->cur.snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- for (n = 0; n < nent; n++) {
- IRRef ref = snap_ref(map[n]);
- if (!irref_isk(ref)) irt_clearmark(IR(ref)->t);
- }
- }
- }
- /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */
- nslots = J->baseslot+J->maxslot;
- for (i = 1; i < nslots; i++) {
- IRRef ref = tref_ref(J->slot[i]);
- while (!irref_isk(ref) && ref != subst[ref]) {
- IRIns *ir = IR(ref);
- irt_clearmark(ir->t); /* Unmark potential uses, too. */
- if (irt_isphi(ir->t) || irt_ispri(ir->t))
- break;
- irt_setphi(ir->t);
- if (nphi >= LJ_MAX_PHI)
- lj_trace_err(J, LJ_TRERR_PHIOV);
- phi[nphi++] = (IRRef1)ref;
- ref = subst[ref];
- if (ref > invar)
- break;
- }
- }
- /* Pass #4: propagate non-redundant PHIs. */
- while (passx) {
- passx = 0;
- for (i = 0; i < nphi; i++) {
- IRRef lref = phi[i];
- IRIns *ir = IR(lref);
- if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */
- IRIns *irr = IR(subst[lref]);
- if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */
- irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */
- passx = 1; /* Retry. */
- }
- }
- }
- }
- /* Pass #5: emit PHI instructions or eliminate PHIs. */
- for (i = 0; i < nphi; i++) {
- IRRef lref = phi[i];
- IRIns *ir = IR(lref);
- if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */
- IRRef rref = subst[lref];
- if (rref > invar)
- irt_setphi(IR(rref)->t);
- emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref);
- } else { /* Otherwise eliminate PHI. */
- irt_clearmark(ir->t);
- irt_clearphi(ir->t);
- }
- }
-}
-
-/* -- Loop unrolling using copy-substitution ------------------------------ */
-
-/* Copy-substitute snapshot. */
-static void loop_subst_snap(jit_State *J, SnapShot *osnap,
- SnapEntry *loopmap, IRRef1 *subst)
-{
- SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs];
- SnapEntry *nextmap = &J->cur.snapmap[snap_nextofs(&J->cur, osnap)];
- MSize nmapofs;
- MSize on, ln, nn, onent = osnap->nent;
- BCReg nslots = osnap->nslots;
- SnapShot *snap = &J->cur.snap[J->cur.nsnap];
- if (irt_isguard(J->guardemit)) { /* Guard inbetween? */
- nmapofs = J->cur.nsnapmap;
- J->cur.nsnap++; /* Add new snapshot. */
- } else { /* Otherwise overwrite previous snapshot. */
- snap--;
- nmapofs = snap->mapofs;
- }
- J->guardemit.irt = 0;
- /* Setup new snapshot. */
- snap->mapofs = (uint16_t)nmapofs;
- snap->ref = (IRRef1)J->cur.nins;
- snap->nslots = nslots;
- snap->topslot = osnap->topslot;
- snap->count = 0;
- nmap = &J->cur.snapmap[nmapofs];
- /* Substitute snapshot slots. */
- on = ln = nn = 0;
- while (on < onent) {
- SnapEntry osn = omap[on], lsn = loopmap[ln];
- if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */
- nmap[nn++] = lsn;
- ln++;
- } else { /* Copy substituted slot from snapshot map. */
- if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */
- if (!irref_isk(snap_ref(osn)))
- osn = snap_setref(osn, subst[snap_ref(osn)]);
- nmap[nn++] = osn;
- on++;
- }
- }
- while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */
- nmap[nn++] = loopmap[ln++];
- snap->nent = (uint8_t)nn;
- omap += onent;
- nmap += nn;
- while (omap < nextmap) /* Copy PC + frame links. */
- *nmap++ = *omap++;
- J->cur.nsnapmap = (uint16_t)(nmap - J->cur.snapmap);
-}
-
-/* Unroll loop. */
-static void loop_unroll(jit_State *J)
-{
- IRRef1 phi[LJ_MAX_PHI];
- uint32_t nphi = 0;
- IRRef1 *subst;
- SnapNo onsnap;
- SnapShot *osnap, *loopsnap;
- SnapEntry *loopmap, *psentinel;
- IRRef ins, invar;
-
- /* Use temp buffer for substitution table.
- ** Only non-constant refs in [REF_BIAS,invar) are valid indexes.
- ** Caveat: don't call into the VM or run the GC or the buffer may be gone.
- */
- invar = J->cur.nins;
- subst = (IRRef1 *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf,
- (invar-REF_BIAS)*sizeof(IRRef1)) - REF_BIAS;
- subst[REF_BASE] = REF_BASE;
-
- /* LOOP separates the pre-roll from the loop body. */
- emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0);
-
- /* Grow snapshot buffer and map for copy-substituted snapshots.
- ** Need up to twice the number of snapshots minus #0 and loop snapshot.
- ** Need up to twice the number of entries plus fallback substitutions
- ** from the loop snapshot entries for each new snapshot.
- ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap!
- */
- onsnap = J->cur.nsnap;
- lj_snap_grow_buf(J, 2*onsnap-2);
- lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent);
-
- /* The loop snapshot is used for fallback substitutions. */
- loopsnap = &J->cur.snap[onsnap-1];
- loopmap = &J->cur.snapmap[loopsnap->mapofs];
- /* The PC of snapshot #0 and the loop snapshot must match. */
- psentinel = &loopmap[loopsnap->nent];
- lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]);
- *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */
-
- /* Start substitution with snapshot #1 (#0 is empty for root traces). */
- osnap = &J->cur.snap[1];
-
- /* Copy and substitute all recorded instructions and snapshots. */
- for (ins = REF_FIRST; ins < invar; ins++) {
- IRIns *ir;
- IRRef op1, op2;
-
- if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */
- loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */
-
- /* Substitute instruction operands. */
- ir = IR(ins);
- op1 = ir->op1;
- if (!irref_isk(op1)) op1 = subst[op1];
- op2 = ir->op2;
- if (!irref_isk(op2)) op2 = subst[op2];
- if (irm_kind(lj_ir_mode[ir->o]) == IRM_N &&
- op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */
- subst[ins] = (IRRef1)ins; /* Shortcut. */
- } else {
- /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */
- IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */
- IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2));
- subst[ins] = (IRRef1)ref;
- if (ref != ins) {
- IRIns *irr = IR(ref);
- if (ref < invar) { /* Loop-carried dependency? */
- /* Potential PHI? */
- if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) {
- irt_setphi(irr->t);
- if (nphi >= LJ_MAX_PHI)
- lj_trace_err(J, LJ_TRERR_PHIOV);
- phi[nphi++] = (IRRef1)ref;
- }
- /* Check all loop-carried dependencies for type instability. */
- if (!irt_sametype(t, irr->t)) {
- if (irt_isinteger(t) && irt_isinteger(irr->t))
- continue;
- else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */
- ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT));
- else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */
- ref = tref_ref(emitir(IRTGI(IR_CONV), ref,
- IRCONV_INT_NUM|IRCONV_CHECK));
- else
- lj_trace_err(J, LJ_TRERR_TYPEINS);
- subst[ins] = (IRRef1)ref;
- irr = IR(ref);
- goto phiconv;
- }
- } else if (ref != REF_DROP && irr->o == IR_CONV &&
- ref > invar && irr->op1 < invar) {
- /* May need an extra PHI for a CONV. */
- ref = irr->op1;
- irr = IR(ref);
- phiconv:
- if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) {
- irt_setphi(irr->t);
- if (nphi >= LJ_MAX_PHI)
- lj_trace_err(J, LJ_TRERR_PHIOV);
- phi[nphi++] = (IRRef1)ref;
- }
- }
- }
- }
- }
- if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */
- J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs;
- lua_assert(J->cur.nsnapmap <= J->sizesnapmap);
- *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */
-
- loop_emit_phi(J, subst, phi, nphi, onsnap);
-}
-
-/* Undo any partial changes made by the loop optimization. */
-static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap)
-{
- ptrdiff_t i;
- SnapShot *snap = &J->cur.snap[nsnap-1];
- SnapEntry *map = J->cur.snapmap;
- map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */
- J->cur.nsnapmap = (uint16_t)nsnapmap;
- J->cur.nsnap = nsnap;
- J->guardemit.irt = 0;
- lj_ir_rollback(J, ins);
- for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */
- BPropEntry *bp = &J->bpropcache[i];
- if (bp->val >= ins)
- bp->key = 0;
- }
- for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */
- IRIns *ir = IR(ins);
- irt_clearphi(ir->t);
- irt_clearmark(ir->t);
- }
-}
-
-/* Protected callback for loop optimization. */
-static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud)
-{
- UNUSED(L); UNUSED(dummy);
- loop_unroll((jit_State *)ud);
- return NULL;
-}
-
-/* Loop optimization. */
-int lj_opt_loop(jit_State *J)
-{
- IRRef nins = J->cur.nins;
- SnapNo nsnap = J->cur.nsnap;
- MSize nsnapmap = J->cur.nsnapmap;
- int errcode = lj_vm_cpcall(J->L, NULL, J, cploop_opt);
- if (LJ_UNLIKELY(errcode)) {
- lua_State *L = J->L;
- if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */
- int32_t e = numberVint(L->top-1);
- switch ((TraceError)e) {
- case LJ_TRERR_TYPEINS: /* Type instability. */
- case LJ_TRERR_GFAIL: /* Guard would always fail. */
- /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */
- if (--J->instunroll < 0) /* But do not unroll forever. */
- break;
- L->top--; /* Remove error object. */
- loop_undo(J, nins, nsnap, nsnapmap);
- return 1; /* Loop optimization failed, continue recording. */
- default:
- break;
- }
- }
- lj_err_throw(L, errcode); /* Propagate all other errors. */
- }
- return 0; /* Loop optimization is ok. */
-}
-
-#undef IR
-#undef emitir
-#undef emitir_raw
-
-#endif
+/*
+** LOOP: Loop Optimizations.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_loop_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_vm.h"
+
+/* Loop optimization:
+**
+** Traditional Loop-Invariant Code Motion (LICM) splits the instructions
+** of a loop into invariant and variant instructions. The invariant
+** instructions are hoisted out of the loop and only the variant
+** instructions remain inside the loop body.
+**
+** Unfortunately LICM is mostly useless for compiling dynamic languages.
+** The IR has many guards and most of the subsequent instructions are
+** control-dependent on them. The first non-hoistable guard would
+** effectively prevent hoisting of all subsequent instructions.
+**
+** That's why we use a special form of unrolling using copy-substitution,
+** combined with redundancy elimination:
+**
+** The recorded instruction stream is re-emitted to the compiler pipeline
+** with substituted operands. The substitution table is filled with the
+** refs returned by re-emitting each instruction. This can be done
+** on-the-fly, because the IR is in strict SSA form, where every ref is
+** defined before its use.
+**
+** This aproach generates two code sections, separated by the LOOP
+** instruction:
+**
+** 1. The recorded instructions form a kind of pre-roll for the loop. It
+** contains a mix of invariant and variant instructions and performs
+** exactly one loop iteration (but not necessarily the 1st iteration).
+**
+** 2. The loop body contains only the variant instructions and performs
+** all remaining loop iterations.
+**
+** On first sight that looks like a waste of space, because the variant
+** instructions are present twice. But the key insight is that the
+** pre-roll honors the control-dependencies for *both* the pre-roll itself
+** *and* the loop body!
+**
+** It also means one doesn't have to explicitly model control-dependencies
+** (which, BTW, wouldn't help LICM much). And it's much easier to
+** integrate sparse snapshotting with this approach.
+**
+** One of the nicest aspects of this approach is that all of the
+** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be
+** reused with only minor restrictions (e.g. one should not fold
+** instructions across loop-carried dependencies).
+**
+** But in general all optimizations can be applied which only need to look
+** backwards into the generated instruction stream. At any point in time
+** during the copy-substitution process this contains both a static loop
+** iteration (the pre-roll) and a dynamic one (from the to-be-copied
+** instruction up to the end of the partial loop body).
+**
+** Since control-dependencies are implicitly kept, CSE also applies to all
+** kinds of guards. The major advantage is that all invariant guards can
+** be hoisted, too.
+**
+** Load/store forwarding works across loop iterations, too. This is
+** important if loop-carried dependencies are kept in upvalues or tables.
+** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may
+** become a forwarded loop-recurrence after inlining.
+**
+** Since the IR is in SSA form, loop-carried dependencies have to be
+** modeled with PHI instructions. The potential candidates for PHIs are
+** collected on-the-fly during copy-substitution. After eliminating the
+** redundant ones, PHI instructions are emitted *below* the loop body.
+**
+** Note that this departure from traditional SSA form doesn't change the
+** semantics of the PHI instructions themselves. But it greatly simplifies
+** on-the-fly generation of the IR and the machine code.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- PHI elimination ----------------------------------------------------- */
+
+/* Emit or eliminate collected PHIs. */
+static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi,
+ SnapNo onsnap)
+{
+ int passx = 0;
+ IRRef i, nslots;
+ IRRef invar = J->chain[IR_LOOP];
+ /* Pass #1: mark redundant and potentially redundant PHIs. */
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRRef rref = subst[lref];
+ if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */
+ irt_setmark(IR(lref)->t);
+ } else if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) {
+ /* Quick check for simple recurrences failed, need pass2. */
+ irt_setmark(IR(lref)->t);
+ passx = 1;
+ }
+ }
+ /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */
+ if (passx) {
+ SnapNo s;
+ for (i = J->cur.nins-1; i > invar; i--) {
+ IRIns *ir = IR(i);
+ if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
+ if (!irref_isk(ir->op1)) {
+ irt_clearmark(IR(ir->op1)->t);
+ if (ir->op1 < invar &&
+ ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */
+ ir = IR(ir->op1);
+ while (ir->o == IR_CARG) {
+ if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
+ if (irref_isk(ir->op1)) break;
+ ir = IR(ir->op1);
+ irt_clearmark(ir->t);
+ }
+ }
+ }
+ }
+ for (s = J->cur.nsnap-1; s >= onsnap; s--) {
+ SnapShot *snap = &J->cur.snap[s];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (!irref_isk(ref)) irt_clearmark(IR(ref)->t);
+ }
+ }
+ }
+ /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */
+ nslots = J->baseslot+J->maxslot;
+ for (i = 1; i < nslots; i++) {
+ IRRef ref = tref_ref(J->slot[i]);
+ while (!irref_isk(ref) && ref != subst[ref]) {
+ IRIns *ir = IR(ref);
+ irt_clearmark(ir->t); /* Unmark potential uses, too. */
+ if (irt_isphi(ir->t) || irt_ispri(ir->t))
+ break;
+ irt_setphi(ir->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ ref = subst[ref];
+ if (ref > invar)
+ break;
+ }
+ }
+ /* Pass #4: propagate non-redundant PHIs. */
+ while (passx) {
+ passx = 0;
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRIns *ir = IR(lref);
+ if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */
+ IRRef rref = subst[lref];
+ if (lref == rref) { /* Mark redundant PHI. */
+ irt_setmark(ir->t);
+ } else {
+ IRIns *irr = IR(rref);
+ if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */
+ irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */
+ passx = 1; /* Retry. */
+ }
+ }
+ }
+ }
+ }
+ /* Pass #5: emit PHI instructions or eliminate PHIs. */
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRIns *ir = IR(lref);
+ if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */
+ IRRef rref = subst[lref];
+ if (rref > invar)
+ irt_setphi(IR(rref)->t);
+ emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref);
+ } else { /* Otherwise eliminate PHI. */
+ irt_clearmark(ir->t);
+ irt_clearphi(ir->t);
+ }
+ }
+}
+
+/* -- Loop unrolling using copy-substitution ------------------------------ */
+
+/* Copy-substitute snapshot. */
+static void loop_subst_snap(jit_State *J, SnapShot *osnap,
+ SnapEntry *loopmap, IRRef1 *subst)
+{
+ SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs];
+ SnapEntry *nextmap = &J->cur.snapmap[snap_nextofs(&J->cur, osnap)];
+ MSize nmapofs;
+ MSize on, ln, nn, onent = osnap->nent;
+ BCReg nslots = osnap->nslots;
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap];
+ if (irt_isguard(J->guardemit)) { /* Guard inbetween? */
+ nmapofs = J->cur.nsnapmap;
+ J->cur.nsnap++; /* Add new snapshot. */
+ } else { /* Otherwise overwrite previous snapshot. */
+ snap--;
+ nmapofs = snap->mapofs;
+ }
+ J->guardemit.irt = 0;
+ /* Setup new snapshot. */
+ snap->mapofs = (uint16_t)nmapofs;
+ snap->ref = (IRRef1)J->cur.nins;
+ snap->nslots = nslots;
+ snap->topslot = osnap->topslot;
+ snap->count = 0;
+ nmap = &J->cur.snapmap[nmapofs];
+ /* Substitute snapshot slots. */
+ on = ln = nn = 0;
+ while (on < onent) {
+ SnapEntry osn = omap[on], lsn = loopmap[ln];
+ if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */
+ nmap[nn++] = lsn;
+ ln++;
+ } else { /* Copy substituted slot from snapshot map. */
+ if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */
+ if (!irref_isk(snap_ref(osn)))
+ osn = snap_setref(osn, subst[snap_ref(osn)]);
+ nmap[nn++] = osn;
+ on++;
+ }
+ }
+ while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */
+ nmap[nn++] = loopmap[ln++];
+ snap->nent = (uint8_t)nn;
+ omap += onent;
+ nmap += nn;
+ while (omap < nextmap) /* Copy PC + frame links. */
+ *nmap++ = *omap++;
+ J->cur.nsnapmap = (uint16_t)(nmap - J->cur.snapmap);
+}
+
+/* Unroll loop. */
+static void loop_unroll(jit_State *J)
+{
+ IRRef1 phi[LJ_MAX_PHI];
+ uint32_t nphi = 0;
+ IRRef1 *subst;
+ SnapNo onsnap;
+ SnapShot *osnap, *loopsnap;
+ SnapEntry *loopmap, *psentinel;
+ IRRef ins, invar;
+
+ /* Use temp buffer for substitution table.
+ ** Only non-constant refs in [REF_BIAS,invar) are valid indexes.
+ ** Caveat: don't call into the VM or run the GC or the buffer may be gone.
+ */
+ invar = J->cur.nins;
+ subst = (IRRef1 *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf,
+ (invar-REF_BIAS)*sizeof(IRRef1)) - REF_BIAS;
+ subst[REF_BASE] = REF_BASE;
+
+ /* LOOP separates the pre-roll from the loop body. */
+ emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0);
+
+ /* Grow snapshot buffer and map for copy-substituted snapshots.
+ ** Need up to twice the number of snapshots minus #0 and loop snapshot.
+ ** Need up to twice the number of entries plus fallback substitutions
+ ** from the loop snapshot entries for each new snapshot.
+ ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap!
+ */
+ onsnap = J->cur.nsnap;
+ lj_snap_grow_buf(J, 2*onsnap-2);
+ lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent);
+
+ /* The loop snapshot is used for fallback substitutions. */
+ loopsnap = &J->cur.snap[onsnap-1];
+ loopmap = &J->cur.snapmap[loopsnap->mapofs];
+ /* The PC of snapshot #0 and the loop snapshot must match. */
+ psentinel = &loopmap[loopsnap->nent];
+ lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]);
+ *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */
+
+ /* Start substitution with snapshot #1 (#0 is empty for root traces). */
+ osnap = &J->cur.snap[1];
+
+ /* Copy and substitute all recorded instructions and snapshots. */
+ for (ins = REF_FIRST; ins < invar; ins++) {
+ IRIns *ir;
+ IRRef op1, op2;
+
+ if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */
+ loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */
+
+ /* Substitute instruction operands. */
+ ir = IR(ins);
+ op1 = ir->op1;
+ if (!irref_isk(op1)) op1 = subst[op1];
+ op2 = ir->op2;
+ if (!irref_isk(op2)) op2 = subst[op2];
+ if (irm_kind(lj_ir_mode[ir->o]) == IRM_N &&
+ op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */
+ subst[ins] = (IRRef1)ins; /* Shortcut. */
+ } else {
+ /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */
+ IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */
+ IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2));
+ subst[ins] = (IRRef1)ref;
+ if (ref != ins) {
+ IRIns *irr = IR(ref);
+ if (ref < invar) { /* Loop-carried dependency? */
+ /* Potential PHI? */
+ if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) {
+ irt_setphi(irr->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ }
+ /* Check all loop-carried dependencies for type instability. */
+ if (!irt_sametype(t, irr->t)) {
+ if (irt_isinteger(t) && irt_isinteger(irr->t))
+ continue;
+ else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */
+ ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT));
+ else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */
+ ref = tref_ref(emitir(IRTGI(IR_CONV), ref,
+ IRCONV_INT_NUM|IRCONV_CHECK));
+ else
+ lj_trace_err(J, LJ_TRERR_TYPEINS);
+ subst[ins] = (IRRef1)ref;
+ irr = IR(ref);
+ goto phiconv;
+ }
+ } else if (ref != REF_DROP && irr->o == IR_CONV &&
+ ref > invar && irr->op1 < invar) {
+ /* May need an extra PHI for a CONV. */
+ ref = irr->op1;
+ irr = IR(ref);
+ phiconv:
+ if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) {
+ irt_setphi(irr->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ }
+ }
+ }
+ }
+ }
+ if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */
+ J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs;
+ lua_assert(J->cur.nsnapmap <= J->sizesnapmap);
+ *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */
+
+ loop_emit_phi(J, subst, phi, nphi, onsnap);
+}
+
+/* Undo any partial changes made by the loop optimization. */
+static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap)
+{
+ ptrdiff_t i;
+ SnapShot *snap = &J->cur.snap[nsnap-1];
+ SnapEntry *map = J->cur.snapmap;
+ map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */
+ J->cur.nsnapmap = (uint16_t)nsnapmap;
+ J->cur.nsnap = nsnap;
+ J->guardemit.irt = 0;
+ lj_ir_rollback(J, ins);
+ for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */
+ BPropEntry *bp = &J->bpropcache[i];
+ if (bp->val >= ins)
+ bp->key = 0;
+ }
+ for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */
+ IRIns *ir = IR(ins);
+ irt_clearphi(ir->t);
+ irt_clearmark(ir->t);
+ }
+}
+
+/* Protected callback for loop optimization. */
+static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ UNUSED(L); UNUSED(dummy);
+ loop_unroll((jit_State *)ud);
+ return NULL;
+}
+
+/* Loop optimization. */
+int lj_opt_loop(jit_State *J)
+{
+ IRRef nins = J->cur.nins;
+ SnapNo nsnap = J->cur.nsnap;
+ MSize nsnapmap = J->cur.nsnapmap;
+ int errcode = lj_vm_cpcall(J->L, NULL, J, cploop_opt);
+ if (LJ_UNLIKELY(errcode)) {
+ lua_State *L = J->L;
+ if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */
+ int32_t e = numberVint(L->top-1);
+ switch ((TraceError)e) {
+ case LJ_TRERR_TYPEINS: /* Type instability. */
+ case LJ_TRERR_GFAIL: /* Guard would always fail. */
+ /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */
+ if (--J->instunroll < 0) /* But do not unroll forever. */
+ break;
+ L->top--; /* Remove error object. */
+ loop_undo(J, nins, nsnap, nsnapmap);
+ return 1; /* Loop optimization failed, continue recording. */
+ default:
+ break;
+ }
+ }
+ lj_err_throw(L, errcode); /* Propagate all other errors. */
+ }
+ return 0; /* Loop optimization is ok. */
+}
+
+#undef IR
+#undef emitir
+#undef emitir_raw
+
+#endif
diff --git a/3rdparty/lua/src/lj_opt_mem.c b/3rdparty/lua/src/lj_opt_mem.c
index 97c16d8..98974ce 100644
--- a/3rdparty/lua/src/lj_opt_mem.c
+++ b/3rdparty/lua/src/lj_opt_mem.c
@@ -1,916 +1,907 @@
-/*
-** Memory access optimizations.
-** AA: Alias Analysis using high-level semantic disambiguation.
-** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
-** DSE: Dead-Store Elimination.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_opt_mem_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_tab.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_iropt.h"
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-#define fins (&J->fold.ins)
-#define fleft (&J->fold.left)
-#define fright (&J->fold.right)
-
-/*
-** Caveat #1: return value is not always a TRef -- only use with tref_ref().
-** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
-*/
-
-/* Return values from alias analysis. */
-typedef enum {
- ALIAS_NO, /* The two refs CANNOT alias (exact). */
- ALIAS_MAY, /* The two refs MAY alias (inexact). */
- ALIAS_MUST /* The two refs MUST alias (exact). */
-} AliasRet;
-
-/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
-
-/* Simplified escape analysis: check for intervening stores. */
-static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
-{
- IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */
- for (ir++; ir < stop; ir++)
- if (ir->op2 == ref &&
- (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
- ir->o == IR_USTORE || ir->o == IR_FSTORE))
- return ALIAS_MAY; /* Reference was stored and might alias. */
- return ALIAS_NO; /* Reference was not stored. */
-}
-
-/* Alias analysis for two different table references. */
-static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
-{
- IRIns *taba = IR(ta), *tabb = IR(tb);
- int newa, newb;
- lua_assert(ta != tb);
- lua_assert(irt_istab(taba->t) && irt_istab(tabb->t));
- /* Disambiguate new allocations. */
- newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
- newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
- if (newa && newb)
- return ALIAS_NO; /* Two different allocations never alias. */
- if (newb) { /* At least one allocation? */
- IRIns *tmp = taba; taba = tabb; tabb = tmp;
- } else if (!newa) {
- return ALIAS_MAY; /* Anything else: we just don't know. */
- }
- return aa_escape(J, taba, tabb);
-}
-
-/* Alias analysis for array and hash access using key-based disambiguation. */
-static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
-{
- IRRef ka = refa->op2;
- IRRef kb = refb->op2;
- IRIns *keya, *keyb;
- IRRef ta, tb;
- if (refa == refb)
- return ALIAS_MUST; /* Shortcut for same refs. */
- keya = IR(ka);
- if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
- keyb = IR(kb);
- if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
- ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
- tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
- if (ka == kb) {
- /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
- if (ta == tb)
- return ALIAS_MUST; /* Same key, same table. */
- else
- return aa_table(J, ta, tb); /* Same key, possibly different table. */
- }
- if (irref_isk(ka) && irref_isk(kb))
- return ALIAS_NO; /* Different constant keys. */
- if (refa->o == IR_AREF) {
- /* Disambiguate array references based on index arithmetic. */
- int32_t ofsa = 0, ofsb = 0;
- IRRef basea = ka, baseb = kb;
- lua_assert(refb->o == IR_AREF);
- /* Gather base and offset from t[base] or t[base+-ofs]. */
- if (keya->o == IR_ADD && irref_isk(keya->op2)) {
- basea = keya->op1;
- ofsa = IR(keya->op2)->i;
- if (basea == kb && ofsa != 0)
- return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */
- }
- if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
- baseb = keyb->op1;
- ofsb = IR(keyb->op2)->i;
- if (ka == baseb && ofsb != 0)
- return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */
- }
- if (basea == baseb && ofsa != ofsb)
- return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
- } else {
- /* Disambiguate hash references based on the type of their keys. */
- lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
- (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF));
- if (!irt_sametype(keya->t, keyb->t))
- return ALIAS_NO; /* Different key types. */
- }
- if (ta == tb)
- return ALIAS_MAY; /* Same table, cannot disambiguate keys. */
- else
- return aa_table(J, ta, tb); /* Try to disambiguate tables. */
-}
-
-/* Array and hash load forwarding. */
-static TRef fwd_ahload(jit_State *J, IRRef xref)
-{
- IRIns *xr = IR(xref);
- IRRef lim = xref; /* Search limit. */
- IRRef ref;
-
- /* Search for conflicting stores. */
- ref = J->chain[fins->o+IRDELTA_L2S];
- while (ref > xref) {
- IRIns *store = IR(ref);
- switch (aa_ahref(J, xr, IR(store->op1))) {
- case ALIAS_NO: break; /* Continue searching. */
- case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
- case ALIAS_MUST: return store->op2; /* Store forwarding. */
- }
- ref = store->prev;
- }
-
- /* No conflicting store (yet): const-fold loads from allocations. */
- {
- IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
- IRRef tab = ir->op1;
- ir = IR(tab);
- if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
- /* A NEWREF with a number key may end up pointing to the array part.
- ** But it's referenced from HSTORE and not found in the ASTORE chain.
- ** For now simply consider this a conflict without forwarding anything.
- */
- if (xr->o == IR_AREF) {
- IRRef ref2 = J->chain[IR_NEWREF];
- while (ref2 > tab) {
- IRIns *newref = IR(ref2);
- if (irt_isnum(IR(newref->op2)->t))
- goto cselim;
- ref2 = newref->prev;
- }
- }
- /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
- ** But the above search for conflicting stores was limited by xref.
- ** So continue searching, limited by the TNEW/TDUP. Store forwarding
- ** is ok, too. A conflict does NOT limit the search for a matching load.
- */
- while (ref > tab) {
- IRIns *store = IR(ref);
- switch (aa_ahref(J, xr, IR(store->op1))) {
- case ALIAS_NO: break; /* Continue searching. */
- case ALIAS_MAY: goto cselim; /* Conflicting store. */
- case ALIAS_MUST: return store->op2; /* Store forwarding. */
- }
- ref = store->prev;
- }
- lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t));
- if (irt_ispri(fins->t)) {
- return TREF_PRI(irt_type(fins->t));
- } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
- irt_isstr(fins->t)) {
- TValue keyv;
- cTValue *tv;
- IRIns *key = IR(xr->op2);
- if (key->o == IR_KSLOT) key = IR(key->op1);
- lj_ir_kvalue(J->L, &keyv, key);
- tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
- lua_assert(itype2irt(tv) == irt_type(fins->t));
- if (irt_isnum(fins->t))
- return lj_ir_knum_u64(J, tv->u64);
- else if (LJ_DUALNUM && irt_isint(fins->t))
- return lj_ir_kint(J, intV(tv));
- else
- return lj_ir_kstr(J, strV(tv));
- }
- /* Othwerwise: don't intern as a constant. */
- }
- }
-
-cselim:
- /* Try to find a matching load. Below the conflicting store, if any. */
- ref = J->chain[fins->o];
- while (ref > lim) {
- IRIns *load = IR(ref);
- if (load->op1 == xref)
- return ref; /* Load forwarding. */
- ref = load->prev;
- }
- return 0; /* Conflict or no match. */
-}
-
-/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
-static TRef fwd_aload_reassoc(jit_State *J)
-{
- IRIns *irx = IR(fins->op1);
- IRIns *key = IR(irx->op2);
- if (key->o == IR_ADD && irref_isk(key->op2)) {
- IRIns *add2 = IR(key->op1);
- if (add2->o == IR_ADD && irref_isk(add2->op2) &&
- IR(key->op2)->i == -IR(add2->op2)->i) {
- IRRef ref = J->chain[IR_AREF];
- IRRef lim = add2->op1;
- if (irx->op1 > lim) lim = irx->op1;
- while (ref > lim) {
- IRIns *ir = IR(ref);
- if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
- return fwd_ahload(J, ref);
- ref = ir->prev;
- }
- }
- }
- return 0;
-}
-
-/* ALOAD forwarding. */
-TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
-{
- IRRef ref;
- if ((ref = fwd_ahload(J, fins->op1)) ||
- (ref = fwd_aload_reassoc(J)))
- return ref;
- return EMITFOLD;
-}
-
-/* HLOAD forwarding. */
-TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
-{
- IRRef ref = fwd_ahload(J, fins->op1);
- if (ref)
- return ref;
- return EMITFOLD;
-}
-
-/* HREFK forwarding. */
-TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
-{
- IRRef tab = fleft->op1;
- IRRef ref = J->chain[IR_NEWREF];
- while (ref > tab) {
- IRIns *newref = IR(ref);
- if (tab == newref->op1) {
- if (fright->op1 == newref->op2)
- return ref; /* Forward from NEWREF. */
- else
- goto docse;
- } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
- goto docse;
- }
- ref = newref->prev;
- }
- /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
- if (IR(tab)->o == IR_TDUP)
- fins->t.irt &= ~IRT_GUARD; /* Drop HREFK guard. */
-docse:
- return CSEFOLD;
-}
-
-/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
-int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
-{
- IRRef lim = fins->op1; /* Search limit. */
- IRRef ref;
-
- /* The key for an ASTORE may end up in the hash part after a NEWREF. */
- if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
- ref = J->chain[IR_ASTORE];
- while (ref > lim) {
- if (ref < J->chain[IR_NEWREF])
- return 0; /* Conflict. */
- ref = IR(ref)->prev;
- }
- }
-
- /* Search for conflicting stores. */
- ref = J->chain[IR_HSTORE];
- while (ref > lim) {
- IRIns *store = IR(ref);
- if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
- return 0; /* Conflict. */
- ref = store->prev;
- }
-
- return 1; /* No conflict. Can fold to niltv. */
-}
-
-/* Check whether there's no aliasing NEWREF for the left operand. */
-int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
-{
- IRRef ta = fins->op1;
- IRRef ref = J->chain[IR_NEWREF];
- while (ref > lim) {
- IRIns *newref = IR(ref);
- if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
- return 0; /* Conflict. */
- ref = newref->prev;
- }
- return 1; /* No conflict. Can safely FOLD/CSE. */
-}
-
-/* ASTORE/HSTORE elimination. */
-TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
-{
- IRRef xref = fins->op1; /* xREF reference. */
- IRRef val = fins->op2; /* Stored value reference. */
- IRIns *xr = IR(xref);
- IRRef1 *refp = &J->chain[fins->o];
- IRRef ref = *refp;
- while (ref > xref) { /* Search for redundant or conflicting stores. */
- IRIns *store = IR(ref);
- switch (aa_ahref(J, xr, IR(store->op1))) {
- case ALIAS_NO:
- break; /* Continue searching. */
- case ALIAS_MAY: /* Store to MAYBE the same location. */
- if (store->op2 != val) /* Conflict if the value is different. */
- goto doemit;
- break; /* Otherwise continue searching. */
- case ALIAS_MUST: /* Store to the same location. */
- if (store->op2 == val) /* Same value: drop the new store. */
- return DROPFOLD;
- /* Different value: try to eliminate the redundant store. */
- if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
- IRIns *ir;
- /* Check for any intervening guards (includes conflicting loads). */
- for (ir = IR(J->cur.nins-1); ir > store; ir--)
- if (irt_isguard(ir->t) || ir->o == IR_CALLL)
- goto doemit; /* No elimination possible. */
- /* Remove redundant store from chain and replace with NOP. */
- *refp = store->prev;
- store->o = IR_NOP;
- store->t.irt = IRT_NIL;
- store->op1 = store->op2 = 0;
- store->prev = 0;
- /* Now emit the new store instead. */
- }
- goto doemit;
- }
- ref = *(refp = &store->prev);
- }
-doemit:
- return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
-}
-
-/* -- ULOAD forwarding ---------------------------------------------------- */
-
-/* The current alias analysis for upvalues is very simplistic. It only
-** disambiguates between the unique upvalues of the same function.
-** This is good enough for now, since most upvalues are read-only.
-**
-** A more precise analysis would be feasible with the help of the parser:
-** generate a unique key for every upvalue, even across all prototypes.
-** Lacking a realistic use-case, it's unclear whether this is beneficial.
-*/
-static AliasRet aa_uref(IRIns *refa, IRIns *refb)
-{
- if (refa->o != refb->o)
- return ALIAS_NO; /* Different UREFx type. */
- if (refa->op1 == refb->op1) { /* Same function. */
- if (refa->op2 == refb->op2)
- return ALIAS_MUST; /* Same function, same upvalue idx. */
- else
- return ALIAS_NO; /* Same function, different upvalue idx. */
- } else { /* Different functions, check disambiguation hash values. */
- if (((refa->op2 ^ refb->op2) & 0xff))
- return ALIAS_NO; /* Upvalues with different hash values cannot alias. */
- else
- return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */
- }
-}
-
-/* ULOAD forwarding. */
-TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
-{
- IRRef uref = fins->op1;
- IRRef lim = REF_BASE; /* Search limit. */
- IRIns *xr = IR(uref);
- IRRef ref;
-
- /* Search for conflicting stores. */
- ref = J->chain[IR_USTORE];
- while (ref > lim) {
- IRIns *store = IR(ref);
- switch (aa_uref(xr, IR(store->op1))) {
- case ALIAS_NO: break; /* Continue searching. */
- case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
- case ALIAS_MUST: return store->op2; /* Store forwarding. */
- }
- ref = store->prev;
- }
-
-cselim:
- /* Try to find a matching load. Below the conflicting store, if any. */
-
- ref = J->chain[IR_ULOAD];
- while (ref > lim) {
- IRIns *ir = IR(ref);
- if (ir->op1 == uref ||
- (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
- return ref; /* Match for identical or equal UREFx (non-CSEable UREFO). */
- ref = ir->prev;
- }
- return lj_ir_emit(J);
-}
-
-/* USTORE elimination. */
-TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
-{
- IRRef xref = fins->op1; /* xREF reference. */
- IRRef val = fins->op2; /* Stored value reference. */
- IRIns *xr = IR(xref);
- IRRef1 *refp = &J->chain[IR_USTORE];
- IRRef ref = *refp;
- while (ref > xref) { /* Search for redundant or conflicting stores. */
- IRIns *store = IR(ref);
- switch (aa_uref(xr, IR(store->op1))) {
- case ALIAS_NO:
- break; /* Continue searching. */
- case ALIAS_MAY: /* Store to MAYBE the same location. */
- if (store->op2 != val) /* Conflict if the value is different. */
- goto doemit;
- break; /* Otherwise continue searching. */
- case ALIAS_MUST: /* Store to the same location. */
- if (store->op2 == val) /* Same value: drop the new store. */
- return DROPFOLD;
- /* Different value: try to eliminate the redundant store. */
- if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
- IRIns *ir;
- /* Check for any intervening guards (includes conflicting loads). */
- for (ir = IR(J->cur.nins-1); ir > store; ir--)
- if (irt_isguard(ir->t))
- goto doemit; /* No elimination possible. */
- /* Remove redundant store from chain and replace with NOP. */
- *refp = store->prev;
- store->o = IR_NOP;
- store->t.irt = IRT_NIL;
- store->op1 = store->op2 = 0;
- store->prev = 0;
- if (ref+1 < J->cur.nins &&
- store[1].o == IR_OBAR && store[1].op1 == xref) {
- IRRef1 *bp = &J->chain[IR_OBAR];
- IRIns *obar;
- for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
- bp = &obar->prev;
- /* Remove OBAR, too. */
- *bp = obar->prev;
- obar->o = IR_NOP;
- obar->t.irt = IRT_NIL;
- obar->op1 = obar->op2 = 0;
- obar->prev = 0;
- }
- /* Now emit the new store instead. */
- }
- goto doemit;
- }
- ref = *(refp = &store->prev);
- }
-doemit:
- return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
-}
-
-/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
-
-/* Alias analysis for field access.
-** Field loads are cheap and field stores are rare.
-** Simple disambiguation based on field types is good enough.
-*/
-static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
-{
- if (refa->op2 != refb->op2)
- return ALIAS_NO; /* Different fields. */
- if (refa->op1 == refb->op1)
- return ALIAS_MUST; /* Same field, same object. */
- else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
- return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */
- else
- return ALIAS_MAY; /* Same field, possibly different object. */
-}
-
-/* Only the loads for mutable fields end up here (see FOLD). */
-TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
-{
- IRRef oref = fins->op1; /* Object reference. */
- IRRef fid = fins->op2; /* Field ID. */
- IRRef lim = oref; /* Search limit. */
- IRRef ref;
-
- /* Search for conflicting stores. */
- ref = J->chain[IR_FSTORE];
- while (ref > oref) {
- IRIns *store = IR(ref);
- switch (aa_fref(J, fins, IR(store->op1))) {
- case ALIAS_NO: break; /* Continue searching. */
- case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
- case ALIAS_MUST: return store->op2; /* Store forwarding. */
- }
- ref = store->prev;
- }
-
- /* No conflicting store: const-fold field loads from allocations. */
- if (fid == IRFL_TAB_META) {
- IRIns *ir = IR(oref);
- if (ir->o == IR_TNEW || ir->o == IR_TDUP)
- return lj_ir_knull(J, IRT_TAB);
- }
-
-cselim:
- /* Try to find a matching load. Below the conflicting store, if any. */
- return lj_opt_cselim(J, lim);
-}
-
-/* FSTORE elimination. */
-TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
-{
- IRRef fref = fins->op1; /* FREF reference. */
- IRRef val = fins->op2; /* Stored value reference. */
- IRIns *xr = IR(fref);
- IRRef1 *refp = &J->chain[IR_FSTORE];
- IRRef ref = *refp;
- while (ref > fref) { /* Search for redundant or conflicting stores. */
- IRIns *store = IR(ref);
- switch (aa_fref(J, xr, IR(store->op1))) {
- case ALIAS_NO:
- break; /* Continue searching. */
- case ALIAS_MAY:
- if (store->op2 != val) /* Conflict if the value is different. */
- goto doemit;
- break; /* Otherwise continue searching. */
- case ALIAS_MUST:
- if (store->op2 == val) /* Same value: drop the new store. */
- return DROPFOLD;
- /* Different value: try to eliminate the redundant store. */
- if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
- IRIns *ir;
- /* Check for any intervening guards or conflicting loads. */
- for (ir = IR(J->cur.nins-1); ir > store; ir--)
- if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
- goto doemit; /* No elimination possible. */
- /* Remove redundant store from chain and replace with NOP. */
- *refp = store->prev;
- store->o = IR_NOP;
- store->t.irt = IRT_NIL;
- store->op1 = store->op2 = 0;
- store->prev = 0;
- /* Now emit the new store instead. */
- }
- goto doemit;
- }
- ref = *(refp = &store->prev);
- }
-doemit:
- return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
-}
-
-/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
-
-/* Find cdata allocation for a reference (if any). */
-static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
-{
- while (ir->o == IR_ADD) {
- if (!irref_isk(ir->op1)) {
- IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */
- if (ir1) return ir1;
- }
- if (irref_isk(ir->op2)) return NULL;
- ir = IR(ir->op2); /* Flatten right-recursion. */
- }
- return ir->o == IR_CNEW ? ir : NULL;
-}
-
-/* Alias analysis for two cdata allocations. */
-static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
-{
- IRIns *cnewa = aa_findcnew(J, refa);
- IRIns *cnewb = aa_findcnew(J, refb);
- if (cnewa == cnewb)
- return ALIAS_MAY; /* Same allocation or neither is an allocation. */
- if (cnewa && cnewb)
- return ALIAS_NO; /* Two different allocations never alias. */
- if (cnewb) { cnewa = cnewb; refb = refa; }
- return aa_escape(J, cnewa, refb);
-}
-
-/* Alias analysis for XLOAD/XSTORE. */
-static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
-{
- ptrdiff_t ofsa = 0, ofsb = 0;
- IRIns *refb = IR(xb->op1);
- IRIns *basea = refa, *baseb = refb;
- if (refa == refb && irt_sametype(xa->t, xb->t))
- return ALIAS_MUST; /* Shortcut for same refs with identical type. */
- /* Offset-based disambiguation. */
- if (refa->o == IR_ADD && irref_isk(refa->op2)) {
- IRIns *irk = IR(refa->op2);
- basea = IR(refa->op1);
- ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
- (ptrdiff_t)irk->i;
- }
- if (refb->o == IR_ADD && irref_isk(refb->op2)) {
- IRIns *irk = IR(refb->op2);
- baseb = IR(refb->op1);
- ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
- (ptrdiff_t)irk->i;
- }
- /* Treat constified pointers like base vs. base+offset. */
- if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
- ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
- baseb = basea;
- }
- /* This implements (very) strict aliasing rules.
- ** Different types do NOT alias, except for differences in signedness.
- ** Type punning through unions is allowed (but forces a reload).
- */
- if (basea == baseb) {
- ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
- if (ofsa == ofsb) {
- if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
- return ALIAS_MUST; /* Same-sized, same-kind. May need to convert. */
- } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
- return ALIAS_NO; /* Non-overlapping base+-o1 vs. base+-o2. */
- }
- /* NYI: extract, extend or reinterpret bits (int <-> fp). */
- return ALIAS_MAY; /* Overlapping or type punning: force reload. */
- }
- if (!irt_sametype(xa->t, xb->t) &&
- !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
- ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
- return ALIAS_NO;
- /* NYI: structural disambiguation. */
- return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */
-}
-
-/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
-static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
-{
- IRRef ref = J->chain[op];
- IRRef lim = op1;
- if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
- while (ref > lim) {
- IRIns *ir = IR(ref);
- if (ir->op1 == op1 && ir->op2 == op2)
- return ref;
- ref = ir->prev;
- }
- return 0;
-}
-
-/* Reassociate index references. */
-static IRRef reassoc_xref(jit_State *J, IRIns *ir)
-{
- ptrdiff_t ofs = 0;
- if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */
- IRIns *irk = IR(ir->op2);
- ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
- (ptrdiff_t)irk->i;
- ir = IR(ir->op1);
- }
- if (ir->o == IR_ADD) { /* Add of base + index. */
- /* Index ref > base ref for loop-carried dependences. Only check op1. */
- IRIns *ir2, *ir1 = IR(ir->op1);
- int32_t shift = 0;
- IRRef idxref;
- /* Determine index shifts. Don't bother with IR_MUL here. */
- if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
- shift = IR(ir1->op2)->i;
- else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
- shift = 1;
- else
- ir1 = ir;
- ir2 = IR(ir1->op1);
- /* A non-reassociated add. Must be a loop-carried dependence. */
- if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
- ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
- else
- return 0;
- idxref = ir2->op1;
- /* Try to CSE the reassociated chain. Give up if not found. */
- if (ir1 != ir &&
- !(idxref = reassoc_trycse(J, ir1->o, idxref,
- ir1->o == IR_BSHL ? ir1->op2 : idxref)))
- return 0;
- if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
- return 0;
- if (ofs != 0) {
- IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
- if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
- return 0;
- }
- return idxref; /* Success, found a reassociated index reference. Phew. */
- }
- return 0; /* Failure. */
-}
-
-/* XLOAD forwarding. */
-TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
-{
- IRRef xref = fins->op1;
- IRIns *xr = IR(xref);
- IRRef lim = xref; /* Search limit. */
- IRRef ref;
-
- if ((fins->op2 & IRXLOAD_READONLY))
- goto cselim;
- if ((fins->op2 & IRXLOAD_VOLATILE))
- goto doemit;
-
- /* Search for conflicting stores. */
- ref = J->chain[IR_XSTORE];
-retry:
- if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
- if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
- while (ref > lim) {
- IRIns *store = IR(ref);
- switch (aa_xref(J, xr, fins, store)) {
- case ALIAS_NO: break; /* Continue searching. */
- case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
- case ALIAS_MUST:
- /* Emit conversion if the loaded type doesn't match the forwarded type. */
- if (!irt_sametype(fins->t, IR(store->op2)->t)) {
- IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
- if (dt == IRT_I8 || dt == IRT_I16) { /* Trunc + sign-extend. */
- st = dt | IRCONV_SEXT;
- dt = IRT_INT;
- } else if (dt == IRT_U8 || dt == IRT_U16) { /* Trunc + zero-extend. */
- st = dt;
- dt = IRT_INT;
- }
- fins->ot = IRT(IR_CONV, dt);
- fins->op1 = store->op2;
- fins->op2 = (dt<<5)|st;
- return RETRYFOLD;
- }
- return store->op2; /* Store forwarding. */
- }
- ref = store->prev;
- }
-
-cselim:
- /* Try to find a matching load. Below the conflicting store, if any. */
- ref = J->chain[IR_XLOAD];
- while (ref > lim) {
- /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
- if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
- return ref;
- ref = IR(ref)->prev;
- }
-
- /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
- if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
- xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
- ref = J->chain[IR_XSTORE];
- while (ref > lim) /* Skip stores that have already been checked. */
- ref = IR(ref)->prev;
- lim = xref;
- xr = IR(xref);
- goto retry; /* Retry with the reassociated reference. */
- }
-doemit:
- return EMITFOLD;
-}
-
-/* XSTORE elimination. */
-TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
-{
- IRRef xref = fins->op1;
- IRIns *xr = IR(xref);
- IRRef lim = xref; /* Search limit. */
- IRRef val = fins->op2; /* Stored value reference. */
- IRRef1 *refp = &J->chain[IR_XSTORE];
- IRRef ref = *refp;
- if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
- if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
- if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
- while (ref > lim) { /* Search for redundant or conflicting stores. */
- IRIns *store = IR(ref);
- switch (aa_xref(J, xr, fins, store)) {
- case ALIAS_NO:
- break; /* Continue searching. */
- case ALIAS_MAY:
- if (store->op2 != val) /* Conflict if the value is different. */
- goto doemit;
- break; /* Otherwise continue searching. */
- case ALIAS_MUST:
- if (store->op2 == val) /* Same value: drop the new store. */
- return DROPFOLD;
- /* Different value: try to eliminate the redundant store. */
- if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
- IRIns *ir;
- /* Check for any intervening guards or any XLOADs (no AA performed). */
- for (ir = IR(J->cur.nins-1); ir > store; ir--)
- if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
- goto doemit; /* No elimination possible. */
- /* Remove redundant store from chain and replace with NOP. */
- *refp = store->prev;
- store->o = IR_NOP;
- store->t.irt = IRT_NIL;
- store->op1 = store->op2 = 0;
- store->prev = 0;
- /* Now emit the new store instead. */
- }
- goto doemit;
- }
- ref = *(refp = &store->prev);
- }
-doemit:
- return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
-}
-
-/* -- Forwarding of lj_tab_len -------------------------------------------- */
-
-/* This is rather simplistic right now, but better than nothing. */
-TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
-{
- IRRef tab = fins->op1; /* Table reference. */
- IRRef lim = tab; /* Search limit. */
- IRRef ref;
-
- /* Any ASTORE is a conflict and limits the search. */
- if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
-
- /* Search for conflicting HSTORE with numeric key. */
- ref = J->chain[IR_HSTORE];
- while (ref > lim) {
- IRIns *store = IR(ref);
- IRIns *href = IR(store->op1);
- IRIns *key = IR(href->op2);
- if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
- lim = ref; /* Conflicting store found, limits search for TLEN. */
- break;
- }
- ref = store->prev;
- }
-
- /* Try to find a matching load. Below the conflicting store, if any. */
- return lj_opt_cselim(J, lim);
-}
-
-/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
-
-/* Check whether the previous value for a table store is non-nil.
-** This can be derived either from a previous store or from a previous
-** load (because all loads from tables perform a type check).
-**
-** The result of the analysis can be used to avoid the metatable check
-** and the guard against HREF returning niltv. Both of these are cheap,
-** so let's not spend too much effort on the analysis.
-**
-** A result of 1 is exact: previous value CANNOT be nil.
-** A result of 0 is inexact: previous value MAY be nil.
-*/
-int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
-{
- /* First check stores. */
- IRRef ref = J->chain[loadop+IRDELTA_L2S];
- while (ref > xref) {
- IRIns *store = IR(ref);
- if (store->op1 == xref) { /* Same xREF. */
- /* A nil store MAY alias, but a non-nil store MUST alias. */
- return !irt_isnil(store->t);
- } else if (irt_isnil(store->t)) { /* Must check any nil store. */
- IRRef skref = IR(store->op1)->op2;
- IRRef xkref = IR(xref)->op2;
- /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
- if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
- if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
- return 0; /* A nil store with same const key or var key MAY alias. */
- /* Different const keys CANNOT alias. */
- } /* Different key types CANNOT alias. */
- } /* Other non-nil stores MAY alias. */
- ref = store->prev;
- }
-
- /* Check loads since nothing could be derived from stores. */
- ref = J->chain[loadop];
- while (ref > xref) {
- IRIns *load = IR(ref);
- if (load->op1 == xref) { /* Same xREF. */
- /* A nil load MAY alias, but a non-nil load MUST alias. */
- return !irt_isnil(load->t);
- } /* Other non-nil loads MAY alias. */
- ref = load->prev;
- }
- return 0; /* Nothing derived at all, previous value MAY be nil. */
-}
-
-/* ------------------------------------------------------------------------ */
-
-#undef IR
-#undef fins
-#undef fleft
-#undef fright
-
-#endif
+/*
+** Memory access optimizations.
+** AA: Alias Analysis using high-level semantic disambiguation.
+** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
+** DSE: Dead-Store Elimination.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_mem_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+#define fleft (&J->fold.left)
+#define fright (&J->fold.right)
+
+/*
+** Caveat #1: return value is not always a TRef -- only use with tref_ref().
+** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
+*/
+
+/* Return values from alias analysis. */
+typedef enum {
+ ALIAS_NO, /* The two refs CANNOT alias (exact). */
+ ALIAS_MAY, /* The two refs MAY alias (inexact). */
+ ALIAS_MUST /* The two refs MUST alias (exact). */
+} AliasRet;
+
+/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
+
+/* Simplified escape analysis: check for intervening stores. */
+static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
+{
+ IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */
+ for (ir++; ir < stop; ir++)
+ if (ir->op2 == ref &&
+ (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
+ ir->o == IR_USTORE || ir->o == IR_FSTORE))
+ return ALIAS_MAY; /* Reference was stored and might alias. */
+ return ALIAS_NO; /* Reference was not stored. */
+}
+
+/* Alias analysis for two different table references. */
+static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
+{
+ IRIns *taba = IR(ta), *tabb = IR(tb);
+ int newa, newb;
+ lua_assert(ta != tb);
+ lua_assert(irt_istab(taba->t) && irt_istab(tabb->t));
+ /* Disambiguate new allocations. */
+ newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
+ newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
+ if (newa && newb)
+ return ALIAS_NO; /* Two different allocations never alias. */
+ if (newb) { /* At least one allocation? */
+ IRIns *tmp = taba; taba = tabb; tabb = tmp;
+ } else if (!newa) {
+ return ALIAS_MAY; /* Anything else: we just don't know. */
+ }
+ return aa_escape(J, taba, tabb);
+}
+
+/* Alias analysis for array and hash access using key-based disambiguation. */
+static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ IRRef ka = refa->op2;
+ IRRef kb = refb->op2;
+ IRIns *keya, *keyb;
+ IRRef ta, tb;
+ if (refa == refb)
+ return ALIAS_MUST; /* Shortcut for same refs. */
+ keya = IR(ka);
+ if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
+ keyb = IR(kb);
+ if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
+ ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
+ tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
+ if (ka == kb) {
+ /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
+ if (ta == tb)
+ return ALIAS_MUST; /* Same key, same table. */
+ else
+ return aa_table(J, ta, tb); /* Same key, possibly different table. */
+ }
+ if (irref_isk(ka) && irref_isk(kb))
+ return ALIAS_NO; /* Different constant keys. */
+ if (refa->o == IR_AREF) {
+ /* Disambiguate array references based on index arithmetic. */
+ int32_t ofsa = 0, ofsb = 0;
+ IRRef basea = ka, baseb = kb;
+ lua_assert(refb->o == IR_AREF);
+ /* Gather base and offset from t[base] or t[base+-ofs]. */
+ if (keya->o == IR_ADD && irref_isk(keya->op2)) {
+ basea = keya->op1;
+ ofsa = IR(keya->op2)->i;
+ if (basea == kb && ofsa != 0)
+ return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */
+ }
+ if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
+ baseb = keyb->op1;
+ ofsb = IR(keyb->op2)->i;
+ if (ka == baseb && ofsb != 0)
+ return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */
+ }
+ if (basea == baseb && ofsa != ofsb)
+ return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
+ } else {
+ /* Disambiguate hash references based on the type of their keys. */
+ lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
+ (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF));
+ if (!irt_sametype(keya->t, keyb->t))
+ return ALIAS_NO; /* Different key types. */
+ }
+ if (ta == tb)
+ return ALIAS_MAY; /* Same table, cannot disambiguate keys. */
+ else
+ return aa_table(J, ta, tb); /* Try to disambiguate tables. */
+}
+
+/* Array and hash load forwarding. */
+static TRef fwd_ahload(jit_State *J, IRRef xref)
+{
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[fins->o+IRDELTA_L2S];
+ while (ref > xref) {
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+ /* No conflicting store (yet): const-fold loads from allocations. */
+ {
+ IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
+ IRRef tab = ir->op1;
+ ir = IR(tab);
+ if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
+ /* A NEWREF with a number key may end up pointing to the array part.
+ ** But it's referenced from HSTORE and not found in the ASTORE chain.
+ ** For now simply consider this a conflict without forwarding anything.
+ */
+ if (xr->o == IR_AREF) {
+ IRRef ref2 = J->chain[IR_NEWREF];
+ while (ref2 > tab) {
+ IRIns *newref = IR(ref2);
+ if (irt_isnum(IR(newref->op2)->t))
+ goto cselim;
+ ref2 = newref->prev;
+ }
+ }
+ /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
+ ** But the above search for conflicting stores was limited by xref.
+ ** So continue searching, limited by the TNEW/TDUP. Store forwarding
+ ** is ok, too. A conflict does NOT limit the search for a matching load.
+ */
+ while (ref > tab) {
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: goto cselim; /* Conflicting store. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+ lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t));
+ if (irt_ispri(fins->t)) {
+ return TREF_PRI(irt_type(fins->t));
+ } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
+ irt_isstr(fins->t)) {
+ TValue keyv;
+ cTValue *tv;
+ IRIns *key = IR(xr->op2);
+ if (key->o == IR_KSLOT) key = IR(key->op1);
+ lj_ir_kvalue(J->L, &keyv, key);
+ tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
+ lua_assert(itype2irt(tv) == irt_type(fins->t));
+ if (irt_isnum(fins->t))
+ return lj_ir_knum_u64(J, tv->u64);
+ else if (LJ_DUALNUM && irt_isint(fins->t))
+ return lj_ir_kint(J, intV(tv));
+ else
+ return lj_ir_kstr(J, strV(tv));
+ }
+ /* Othwerwise: don't intern as a constant. */
+ }
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[fins->o];
+ while (ref > lim) {
+ IRIns *load = IR(ref);
+ if (load->op1 == xref)
+ return ref; /* Load forwarding. */
+ ref = load->prev;
+ }
+ return 0; /* Conflict or no match. */
+}
+
+/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
+static TRef fwd_aload_reassoc(jit_State *J)
+{
+ IRIns *irx = IR(fins->op1);
+ IRIns *key = IR(irx->op2);
+ if (key->o == IR_ADD && irref_isk(key->op2)) {
+ IRIns *add2 = IR(key->op1);
+ if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+ IR(key->op2)->i == -IR(add2->op2)->i) {
+ IRRef ref = J->chain[IR_AREF];
+ IRRef lim = add2->op1;
+ if (irx->op1 > lim) lim = irx->op1;
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
+ return fwd_ahload(J, ref);
+ ref = ir->prev;
+ }
+ }
+ }
+ return 0;
+}
+
+/* ALOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
+{
+ IRRef ref;
+ if ((ref = fwd_ahload(J, fins->op1)) ||
+ (ref = fwd_aload_reassoc(J)))
+ return ref;
+ return EMITFOLD;
+}
+
+/* HLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
+{
+ IRRef ref = fwd_ahload(J, fins->op1);
+ if (ref)
+ return ref;
+ return EMITFOLD;
+}
+
+/* HREFK forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
+{
+ IRRef tab = fleft->op1;
+ IRRef ref = J->chain[IR_NEWREF];
+ while (ref > tab) {
+ IRIns *newref = IR(ref);
+ if (tab == newref->op1) {
+ if (fright->op1 == newref->op2)
+ return ref; /* Forward from NEWREF. */
+ else
+ goto docse;
+ } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
+ goto docse;
+ }
+ ref = newref->prev;
+ }
+ /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
+ if (IR(tab)->o == IR_TDUP)
+ fins->t.irt &= ~IRT_GUARD; /* Drop HREFK guard. */
+docse:
+ return CSEFOLD;
+}
+
+/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
+int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
+{
+ IRRef lim = fins->op1; /* Search limit. */
+ IRRef ref;
+
+ /* The key for an ASTORE may end up in the hash part after a NEWREF. */
+ if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
+ ref = J->chain[IR_ASTORE];
+ while (ref > lim) {
+ if (ref < J->chain[IR_NEWREF])
+ return 0; /* Conflict. */
+ ref = IR(ref)->prev;
+ }
+ }
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_HSTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
+ return 0; /* Conflict. */
+ ref = store->prev;
+ }
+
+ return 1; /* No conflict. Can fold to niltv. */
+}
+
+/* Check whether there's no aliasing NEWREF for the left operand. */
+int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
+{
+ IRRef ta = fins->op1;
+ IRRef ref = J->chain[IR_NEWREF];
+ while (ref > lim) {
+ IRIns *newref = IR(ref);
+ if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
+ return 0; /* Conflict. */
+ ref = newref->prev;
+ }
+ return 1; /* No conflict. Can safely FOLD/CSE. */
+}
+
+/* ASTORE/HSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
+{
+ IRRef xref = fins->op1; /* xREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(xref);
+ IRRef1 *refp = &J->chain[fins->o];
+ IRRef ref = *refp;
+ while (ref > xref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY: /* Store to MAYBE the same location. */
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST: /* Store to the same location. */
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards (includes conflicting loads). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || ir->o == IR_CALLL)
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- ULOAD forwarding ---------------------------------------------------- */
+
+/* The current alias analysis for upvalues is very simplistic. It only
+** disambiguates between the unique upvalues of the same function.
+** This is good enough for now, since most upvalues are read-only.
+**
+** A more precise analysis would be feasible with the help of the parser:
+** generate a unique key for every upvalue, even across all prototypes.
+** Lacking a realistic use-case, it's unclear whether this is beneficial.
+*/
+static AliasRet aa_uref(IRIns *refa, IRIns *refb)
+{
+ if (refa->o != refb->o)
+ return ALIAS_NO; /* Different UREFx type. */
+ if (refa->op1 == refb->op1) { /* Same function. */
+ if (refa->op2 == refb->op2)
+ return ALIAS_MUST; /* Same function, same upvalue idx. */
+ else
+ return ALIAS_NO; /* Same function, different upvalue idx. */
+ } else { /* Different functions, check disambiguation hash values. */
+ if (((refa->op2 ^ refb->op2) & 0xff))
+ return ALIAS_NO; /* Upvalues with different hash values cannot alias. */
+ else
+ return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */
+ }
+}
+
+/* ULOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
+{
+ IRRef uref = fins->op1;
+ IRRef lim = uref; /* Search limit. */
+ IRIns *xr = IR(uref);
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_USTORE];
+ while (ref > uref) {
+ IRIns *store = IR(ref);
+ switch (aa_uref(xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* USTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
+{
+ IRRef xref = fins->op1; /* xREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(xref);
+ IRRef1 *refp = &J->chain[IR_USTORE];
+ IRRef ref = *refp;
+ while (ref > xref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_uref(xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY: /* Store to MAYBE the same location. */
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST: /* Store to the same location. */
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards (includes conflicting loads). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ if (ref+1 < J->cur.nins &&
+ store[1].o == IR_OBAR && store[1].op1 == xref) {
+ IRRef1 *bp = &J->chain[IR_OBAR];
+ IRIns *obar;
+ for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
+ bp = &obar->prev;
+ /* Remove OBAR, too. */
+ *bp = obar->prev;
+ obar->o = IR_NOP;
+ obar->t.irt = IRT_NIL;
+ obar->op1 = obar->op2 = 0;
+ obar->prev = 0;
+ }
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
+
+/* Alias analysis for field access.
+** Field loads are cheap and field stores are rare.
+** Simple disambiguation based on field types is good enough.
+*/
+static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ if (refa->op2 != refb->op2)
+ return ALIAS_NO; /* Different fields. */
+ if (refa->op1 == refb->op1)
+ return ALIAS_MUST; /* Same field, same object. */
+ else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
+ return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */
+ else
+ return ALIAS_MAY; /* Same field, possibly different object. */
+}
+
+/* Only the loads for mutable fields end up here (see FOLD). */
+TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
+{
+ IRRef oref = fins->op1; /* Object reference. */
+ IRRef fid = fins->op2; /* Field ID. */
+ IRRef lim = oref; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_FSTORE];
+ while (ref > oref) {
+ IRIns *store = IR(ref);
+ switch (aa_fref(J, fins, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+ /* No conflicting store: const-fold field loads from allocations. */
+ if (fid == IRFL_TAB_META) {
+ IRIns *ir = IR(oref);
+ if (ir->o == IR_TNEW || ir->o == IR_TDUP)
+ return lj_ir_knull(J, IRT_TAB);
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* FSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
+{
+ IRRef fref = fins->op1; /* FREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(fref);
+ IRRef1 *refp = &J->chain[IR_FSTORE];
+ IRRef ref = *refp;
+ while (ref > fref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_fref(J, xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY:
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST:
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards or conflicting loads. */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
+
+/* Find cdata allocation for a reference (if any). */
+static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
+{
+ while (ir->o == IR_ADD) {
+ if (!irref_isk(ir->op1)) {
+ IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */
+ if (ir1) return ir1;
+ }
+ if (irref_isk(ir->op2)) return NULL;
+ ir = IR(ir->op2); /* Flatten right-recursion. */
+ }
+ return ir->o == IR_CNEW ? ir : NULL;
+}
+
+/* Alias analysis for two cdata allocations. */
+static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ IRIns *cnewa = aa_findcnew(J, refa);
+ IRIns *cnewb = aa_findcnew(J, refb);
+ if (cnewa == cnewb)
+ return ALIAS_MAY; /* Same allocation or neither is an allocation. */
+ if (cnewa && cnewb)
+ return ALIAS_NO; /* Two different allocations never alias. */
+ if (cnewb) { cnewa = cnewb; refb = refa; }
+ return aa_escape(J, cnewa, refb);
+}
+
+/* Alias analysis for XLOAD/XSTORE. */
+static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
+{
+ ptrdiff_t ofsa = 0, ofsb = 0;
+ IRIns *refb = IR(xb->op1);
+ IRIns *basea = refa, *baseb = refb;
+ if (refa == refb && irt_sametype(xa->t, xb->t))
+ return ALIAS_MUST; /* Shortcut for same refs with identical type. */
+ /* Offset-based disambiguation. */
+ if (refa->o == IR_ADD && irref_isk(refa->op2)) {
+ IRIns *irk = IR(refa->op2);
+ basea = IR(refa->op1);
+ ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ if (basea == refb && ofsa != 0)
+ return ALIAS_NO; /* base+-ofs vs. base. */
+ }
+ if (refb->o == IR_ADD && irref_isk(refb->op2)) {
+ IRIns *irk = IR(refb->op2);
+ baseb = IR(refb->op1);
+ ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ if (refa == baseb && ofsb != 0)
+ return ALIAS_NO; /* base vs. base+-ofs. */
+ }
+ /* This implements (very) strict aliasing rules.
+ ** Different types do NOT alias, except for differences in signedness.
+ ** Type punning through unions is allowed (but forces a reload).
+ */
+ if (basea == baseb) {
+ ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
+ if (ofsa == ofsb) {
+ if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
+ return ALIAS_MUST; /* Same-sized, same-kind. May need to convert. */
+ } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
+ return ALIAS_NO; /* Non-overlapping base+-o1 vs. base+-o2. */
+ }
+ /* NYI: extract, extend or reinterpret bits (int <-> fp). */
+ return ALIAS_MAY; /* Overlapping or type punning: force reload. */
+ }
+ if (!irt_sametype(xa->t, xb->t) &&
+ !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
+ ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
+ return ALIAS_NO;
+ /* NYI: structural disambiguation. */
+ return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */
+}
+
+/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
+static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
+{
+ IRRef ref = J->chain[op];
+ IRRef lim = op1;
+ if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == op1 && ir->op2 == op2)
+ return ref;
+ ref = ir->prev;
+ }
+ return 0;
+}
+
+/* Reassociate index references. */
+static IRRef reassoc_xref(jit_State *J, IRIns *ir)
+{
+ ptrdiff_t ofs = 0;
+ if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */
+ IRIns *irk = IR(ir->op2);
+ ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ ir = IR(ir->op1);
+ }
+ if (ir->o == IR_ADD) { /* Add of base + index. */
+ /* Index ref > base ref for loop-carried dependences. Only check op1. */
+ IRIns *ir2, *ir1 = IR(ir->op1);
+ int32_t shift = 0;
+ IRRef idxref;
+ /* Determine index shifts. Don't bother with IR_MUL here. */
+ if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
+ shift = IR(ir1->op2)->i;
+ else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
+ shift = 1;
+ else
+ ir1 = ir;
+ ir2 = IR(ir1->op1);
+ /* A non-reassociated add. Must be a loop-carried dependence. */
+ if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
+ ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
+ else
+ return 0;
+ idxref = ir2->op1;
+ /* Try to CSE the reassociated chain. Give up if not found. */
+ if (ir1 != ir &&
+ !(idxref = reassoc_trycse(J, ir1->o, idxref,
+ ir1->o == IR_BSHL ? ir1->op2 : idxref)))
+ return 0;
+ if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
+ return 0;
+ if (ofs != 0) {
+ IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
+ if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
+ return 0;
+ }
+ return idxref; /* Success, found a reassociated index reference. Phew. */
+ }
+ return 0; /* Failure. */
+}
+
+/* XLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
+{
+ IRRef xref = fins->op1;
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef ref;
+
+ if ((fins->op2 & IRXLOAD_READONLY))
+ goto cselim;
+ if ((fins->op2 & IRXLOAD_VOLATILE))
+ goto doemit;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_XSTORE];
+retry:
+ if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+ if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ switch (aa_xref(J, xr, fins, store)) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST:
+ /* Emit conversion if the loaded type doesn't match the forwarded type. */
+ if (!irt_sametype(fins->t, IR(store->op2)->t)) {
+ IRType st = irt_type(fins->t);
+ if (st == IRT_I8 || st == IRT_I16) { /* Trunc + sign-extend. */
+ st |= IRCONV_SEXT;
+ } else if (st == IRT_U8 || st == IRT_U16) { /* Trunc + zero-extend. */
+ } else if (st == IRT_INT) {
+ st = irt_type(IR(store->op2)->t); /* Needs dummy CONV.int.*. */
+ } else { /* I64/U64 are boxed, U32 is hidden behind a CONV.num.u32. */
+ goto store_fwd;
+ }
+ fins->ot = IRTI(IR_CONV);
+ fins->op1 = store->op2;
+ fins->op2 = (IRT_INT<<5)|st;
+ return RETRYFOLD;
+ }
+ store_fwd:
+ return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[IR_XLOAD];
+ while (ref > lim) {
+ /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
+ if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
+ return ref;
+ ref = IR(ref)->prev;
+ }
+
+ /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
+ if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
+ xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
+ ref = J->chain[IR_XSTORE];
+ while (ref > lim) /* Skip stores that have already been checked. */
+ ref = IR(ref)->prev;
+ lim = xref;
+ xr = IR(xref);
+ goto retry; /* Retry with the reassociated reference. */
+ }
+doemit:
+ return EMITFOLD;
+}
+
+/* XSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
+{
+ IRRef xref = fins->op1;
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRRef1 *refp = &J->chain[IR_XSTORE];
+ IRRef ref = *refp;
+ if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+ if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+ while (ref > lim) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_xref(J, xr, fins, store)) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY:
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST:
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards or any XLOADs (no AA performed). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ store->o = IR_NOP;
+ store->t.irt = IRT_NIL;
+ store->op1 = store->op2 = 0;
+ store->prev = 0;
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- Forwarding of lj_tab_len -------------------------------------------- */
+
+/* This is rather simplistic right now, but better than nothing. */
+TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
+{
+ IRRef tab = fins->op1; /* Table reference. */
+ IRRef lim = tab; /* Search limit. */
+ IRRef ref;
+
+ /* Any ASTORE is a conflict and limits the search. */
+ if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
+
+ /* Search for conflicting HSTORE with numeric key. */
+ ref = J->chain[IR_HSTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ IRIns *href = IR(store->op1);
+ IRIns *key = IR(href->op2);
+ if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
+ lim = ref; /* Conflicting store found, limits search for TLEN. */
+ break;
+ }
+ ref = store->prev;
+ }
+
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
+
+/* Check whether the previous value for a table store is non-nil.
+** This can be derived either from a previous store or from a previous
+** load (because all loads from tables perform a type check).
+**
+** The result of the analysis can be used to avoid the metatable check
+** and the guard against HREF returning niltv. Both of these are cheap,
+** so let's not spend too much effort on the analysis.
+**
+** A result of 1 is exact: previous value CANNOT be nil.
+** A result of 0 is inexact: previous value MAY be nil.
+*/
+int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
+{
+ /* First check stores. */
+ IRRef ref = J->chain[loadop+IRDELTA_L2S];
+ while (ref > xref) {
+ IRIns *store = IR(ref);
+ if (store->op1 == xref) { /* Same xREF. */
+ /* A nil store MAY alias, but a non-nil store MUST alias. */
+ return !irt_isnil(store->t);
+ } else if (irt_isnil(store->t)) { /* Must check any nil store. */
+ IRRef skref = IR(store->op1)->op2;
+ IRRef xkref = IR(xref)->op2;
+ /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
+ if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
+ if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
+ return 0; /* A nil store with same const key or var key MAY alias. */
+ /* Different const keys CANNOT alias. */
+ } /* Different key types CANNOT alias. */
+ } /* Other non-nil stores MAY alias. */
+ ref = store->prev;
+ }
+
+ /* Check loads since nothing could be derived from stores. */
+ ref = J->chain[loadop];
+ while (ref > xref) {
+ IRIns *load = IR(ref);
+ if (load->op1 == xref) { /* Same xREF. */
+ /* A nil load MAY alias, but a non-nil load MUST alias. */
+ return !irt_isnil(load->t);
+ } /* Other non-nil loads MAY alias. */
+ ref = load->prev;
+ }
+ return 0; /* Nothing derived at all, previous value MAY be nil. */
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fleft
+#undef fright
+
+#endif
diff --git a/3rdparty/lua/src/lj_opt_narrow.c b/3rdparty/lua/src/lj_opt_narrow.c
index 58b3763..caf2a8d 100644
--- a/3rdparty/lua/src/lj_opt_narrow.c
+++ b/3rdparty/lua/src/lj_opt_narrow.c
@@ -1,7 +1,7 @@
/*
** NARROW: Narrowing of numbers to integers (double to int32_t).
** STRIPOV: Stripping of overflow checks.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_opt_narrow_c
@@ -247,16 +247,10 @@ static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth)
if (bp) {
ref = bp->val;
} else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
- NarrowIns *savesp = nc->sp;
narrow_stripov_backprop(nc, ir->op1, depth);
- if (nc->sp < nc->maxsp) {
- narrow_stripov_backprop(nc, ir->op2, depth);
- if (nc->sp < nc->maxsp) {
- *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref);
- return;
- }
- }
- nc->sp = savesp; /* Path too deep, need to backtrack. */
+ narrow_stripov_backprop(nc, ir->op2, depth);
+ *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref);
+ return;
}
}
*nc->sp++ = NARROWINS(NARROW_REF, ref);
@@ -269,8 +263,6 @@ static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth)
IRIns *ir = IR(ref);
IRRef cref;
- if (nc->sp >= nc->maxsp) return 10; /* Path too deep. */
-
/* Check the easy cases first. */
if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) {
if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY)
diff --git a/3rdparty/lua/src/lj_opt_sink.c b/3rdparty/lua/src/lj_opt_sink.c
index 7f6e724..56e4636 100644
--- a/3rdparty/lua/src/lj_opt_sink.c
+++ b/3rdparty/lua/src/lj_opt_sink.c
@@ -1,245 +1,245 @@
-/*
-** SINK: Allocation Sinking and Store Sinking.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_opt_sink_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_iropt.h"
-#include "lj_target.h"
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
-/* Check whether the store ref points to an eligible allocation. */
-static IRIns *sink_checkalloc(jit_State *J, IRIns *irs)
-{
- IRIns *ir = IR(irs->op1);
- if (!irref_isk(ir->op2))
- return NULL; /* Non-constant key. */
- if (ir->o == IR_HREFK || ir->o == IR_AREF)
- ir = IR(ir->op1);
- else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF ||
- ir->o == IR_FREF || ir->o == IR_ADD))
- return NULL; /* Unhandled reference type (for XSTORE). */
- ir = IR(ir->op1);
- if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW))
- return NULL; /* Not an allocation. */
- return ir; /* Return allocation. */
-}
-
-/* Recursively check whether a value depends on a PHI. */
-static int sink_phidep(jit_State *J, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isphi(ir->t)) return 1;
- if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1)) return 1;
- if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2)) return 1;
- return 0;
-}
-
-/* Check whether a value is a sinkable PHI or loop-invariant. */
-static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref)
-{
- if (ref >= REF_FIRST) {
- IRIns *ir = IR(ref);
- if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT &&
- irt_isphi(IR(ir->op1)->t))) {
- ira->prev++;
- return 1; /* Sinkable PHI. */
- }
- /* Otherwise the value must be loop-invariant. */
- return ref < J->loopref && !sink_phidep(J, ref);
- }
- return 1; /* Constant (non-PHI). */
-}
-
-/* Mark non-sinkable allocations using single-pass backward propagation.
-**
-** Roots for the marking process are:
-** - Some PHIs or snapshots (see below).
-** - Non-PHI, non-constant values stored to PHI allocations.
-** - All guards.
-** - Any remaining loads not eliminated by store-to-load forwarding.
-** - Stores with non-constant keys.
-** - All stored values.
-*/
-static void sink_mark_ins(jit_State *J)
-{
- IRIns *ir, *irlast = IR(J->cur.nins-1);
- for (ir = irlast ; ; ir--) {
- switch (ir->o) {
- case IR_BASE:
- return; /* Finished. */
- case IR_CALLL: /* IRCALL_lj_tab_len */
- case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR:
- irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */
- break;
- case IR_FLOAD:
- if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META)
- irt_setmark(IR(ir->op1)->t); /* Mark table for remaining loads. */
- break;
- case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
- IRIns *ira = sink_checkalloc(J, ir);
- if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2)))
- irt_setmark(IR(ir->op1)->t); /* Mark ineligible ref. */
- irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
- break;
- }
-#if LJ_HASFFI
- case IR_CNEWI:
- if (irt_isphi(ir->t) &&
- (!sink_checkphi(J, ir, ir->op2) ||
- (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP &&
- !sink_checkphi(J, ir, (ir+1)->op2))))
- irt_setmark(ir->t); /* Mark ineligible allocation. */
- /* fallthrough */
-#endif
- case IR_USTORE:
- irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
- break;
-#if LJ_HASFFI
- case IR_CALLXS:
-#endif
- case IR_CALLS:
- irt_setmark(IR(ir->op1)->t); /* Mark (potentially) stored values. */
- break;
- case IR_PHI: {
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- irl->prev = irr->prev = 0; /* Clear PHI value counts. */
- if (irl->o == irr->o &&
- (irl->o == IR_TNEW || irl->o == IR_TDUP ||
- (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI))))
- break;
- irt_setmark(irl->t);
- irt_setmark(irr->t);
- break;
- }
- default:
- if (irt_ismarked(ir->t) || irt_isguard(ir->t)) { /* Propagate mark. */
- if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
- if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
- }
- break;
- }
- }
-}
-
-/* Mark all instructions referenced by a snapshot. */
-static void sink_mark_snap(jit_State *J, SnapShot *snap)
-{
- SnapEntry *map = &J->cur.snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- for (n = 0; n < nent; n++) {
- IRRef ref = snap_ref(map[n]);
- if (!irref_isk(ref))
- irt_setmark(IR(ref)->t);
- }
-}
-
-/* Iteratively remark PHI refs with differing marks or PHI value counts. */
-static void sink_remark_phi(jit_State *J)
-{
- IRIns *ir;
- int remark;
- do {
- remark = 0;
- for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) {
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- if (((irl->t.irt ^ irr->t.irt) & IRT_MARK))
- remark = 1;
- else if (irl->prev == irr->prev)
- continue;
- irt_setmark(IR(ir->op1)->t);
- irt_setmark(IR(ir->op2)->t);
- }
- } while (remark);
-}
-
-/* Sweep instructions and tag sunken allocations and stores. */
-static void sink_sweep_ins(jit_State *J)
-{
- IRIns *ir, *irfirst = IR(J->cur.nk);
- for (ir = IR(J->cur.nins-1) ; ir >= irfirst; ir--) {
- switch (ir->o) {
- case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
- IRIns *ira = sink_checkalloc(J, ir);
- if (ira && !irt_ismarked(ira->t)) {
- int delta = (int)(ir - ira);
- ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta);
- } else {
- ir->prev = REGSP_INIT;
- }
- break;
- }
- case IR_NEWREF:
- if (!irt_ismarked(IR(ir->op1)->t)) {
- ir->prev = REGSP(RID_SINK, 0);
- } else {
- irt_clearmark(ir->t);
- ir->prev = REGSP_INIT;
- }
- break;
-#if LJ_HASFFI
- case IR_CNEW: case IR_CNEWI:
-#endif
- case IR_TNEW: case IR_TDUP:
- if (!irt_ismarked(ir->t)) {
- ir->t.irt &= ~IRT_GUARD;
- ir->prev = REGSP(RID_SINK, 0);
- J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */
- } else {
- irt_clearmark(ir->t);
- ir->prev = REGSP_INIT;
- }
- break;
- case IR_PHI: {
- IRIns *ira = IR(ir->op2);
- if (!irt_ismarked(ira->t) &&
- (ira->o == IR_TNEW || ira->o == IR_TDUP ||
- (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) {
- ir->prev = REGSP(RID_SINK, 0);
- } else {
- ir->prev = REGSP_INIT;
- }
- break;
- }
- default:
- irt_clearmark(ir->t);
- ir->prev = REGSP_INIT;
- break;
- }
- }
-}
-
-/* Allocation sinking and store sinking.
-**
-** 1. Mark all non-sinkable allocations.
-** 2. Then sink all remaining allocations and the related stores.
-*/
-void lj_opt_sink(jit_State *J)
-{
- const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD|
- JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD);
- if ((J->flags & need) == need &&
- (J->chain[IR_TNEW] || J->chain[IR_TDUP] ||
- (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) {
- if (!J->loopref)
- sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]);
- sink_mark_ins(J);
- if (J->loopref)
- sink_remark_phi(J);
- sink_sweep_ins(J);
- }
-}
-
-#undef IR
-
-#endif
+/*
+** SINK: Allocation Sinking and Store Sinking.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_sink_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Check whether the store ref points to an eligible allocation. */
+static IRIns *sink_checkalloc(jit_State *J, IRIns *irs)
+{
+ IRIns *ir = IR(irs->op1);
+ if (!irref_isk(ir->op2))
+ return NULL; /* Non-constant key. */
+ if (ir->o == IR_HREFK || ir->o == IR_AREF)
+ ir = IR(ir->op1);
+ else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF ||
+ ir->o == IR_FREF || ir->o == IR_ADD))
+ return NULL; /* Unhandled reference type (for XSTORE). */
+ ir = IR(ir->op1);
+ if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW))
+ return NULL; /* Not an allocation. */
+ return ir; /* Return allocation. */
+}
+
+/* Recursively check whether a value depends on a PHI. */
+static int sink_phidep(jit_State *J, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (irt_isphi(ir->t)) return 1;
+ if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1)) return 1;
+ if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2)) return 1;
+ return 0;
+}
+
+/* Check whether a value is a sinkable PHI or loop-invariant. */
+static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref)
+{
+ if (ref >= REF_FIRST) {
+ IRIns *ir = IR(ref);
+ if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT &&
+ irt_isphi(IR(ir->op1)->t))) {
+ ira->prev++;
+ return 1; /* Sinkable PHI. */
+ }
+ /* Otherwise the value must be loop-invariant. */
+ return ref < J->loopref && !sink_phidep(J, ref);
+ }
+ return 1; /* Constant (non-PHI). */
+}
+
+/* Mark non-sinkable allocations using single-pass backward propagation.
+**
+** Roots for the marking process are:
+** - Some PHIs or snapshots (see below).
+** - Non-PHI, non-constant values stored to PHI allocations.
+** - All guards.
+** - Any remaining loads not eliminated by store-to-load forwarding.
+** - Stores with non-constant keys.
+** - All stored values.
+*/
+static void sink_mark_ins(jit_State *J)
+{
+ IRIns *ir, *irlast = IR(J->cur.nins-1);
+ for (ir = irlast ; ; ir--) {
+ switch (ir->o) {
+ case IR_BASE:
+ return; /* Finished. */
+ case IR_CALLL: /* IRCALL_lj_tab_len */
+ case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR:
+ irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */
+ break;
+ case IR_FLOAD:
+ if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META)
+ irt_setmark(IR(ir->op1)->t); /* Mark table for remaining loads. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+ IRIns *ira = sink_checkalloc(J, ir);
+ if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2)))
+ irt_setmark(IR(ir->op1)->t); /* Mark ineligible ref. */
+ irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
+ break;
+ }
+#if LJ_HASFFI
+ case IR_CNEWI:
+ if (irt_isphi(ir->t) &&
+ (!sink_checkphi(J, ir, ir->op2) ||
+ (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP &&
+ !sink_checkphi(J, ir, (ir+1)->op2))))
+ irt_setmark(ir->t); /* Mark ineligible allocation. */
+ /* fallthrough */
+#endif
+ case IR_USTORE:
+ irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
+ break;
+#if LJ_HASFFI
+ case IR_CALLXS:
+#endif
+ case IR_CALLS:
+ irt_setmark(IR(ir->op1)->t); /* Mark (potentially) stored values. */
+ break;
+ case IR_PHI: {
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ irl->prev = irr->prev = 0; /* Clear PHI value counts. */
+ if (irl->o == irr->o &&
+ (irl->o == IR_TNEW || irl->o == IR_TDUP ||
+ (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI))))
+ break;
+ irt_setmark(irl->t);
+ irt_setmark(irr->t);
+ break;
+ }
+ default:
+ if (irt_ismarked(ir->t) || irt_isguard(ir->t)) { /* Propagate mark. */
+ if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+ if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+ }
+ break;
+ }
+ }
+}
+
+/* Mark all instructions referenced by a snapshot. */
+static void sink_mark_snap(jit_State *J, SnapShot *snap)
+{
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (!irref_isk(ref))
+ irt_setmark(IR(ref)->t);
+ }
+}
+
+/* Iteratively remark PHI refs with differing marks or PHI value counts. */
+static void sink_remark_phi(jit_State *J)
+{
+ IRIns *ir;
+ int remark;
+ do {
+ remark = 0;
+ for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) {
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ if (((irl->t.irt ^ irr->t.irt) & IRT_MARK))
+ remark = 1;
+ else if (irl->prev == irr->prev)
+ continue;
+ irt_setmark(IR(ir->op1)->t);
+ irt_setmark(IR(ir->op2)->t);
+ }
+ } while (remark);
+}
+
+/* Sweep instructions and tag sunken allocations and stores. */
+static void sink_sweep_ins(jit_State *J)
+{
+ IRIns *ir, *irfirst = IR(J->cur.nk);
+ for (ir = IR(J->cur.nins-1) ; ir >= irfirst; ir--) {
+ switch (ir->o) {
+ case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+ IRIns *ira = sink_checkalloc(J, ir);
+ if (ira && !irt_ismarked(ira->t)) {
+ int delta = (int)(ir - ira);
+ ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta);
+ } else {
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ }
+ case IR_NEWREF:
+ if (!irt_ismarked(IR(ir->op1)->t)) {
+ ir->prev = REGSP(RID_SINK, 0);
+ } else {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ }
+ break;
+#if LJ_HASFFI
+ case IR_CNEW: case IR_CNEWI:
+#endif
+ case IR_TNEW: case IR_TDUP:
+ if (!irt_ismarked(ir->t)) {
+ ir->t.irt &= ~IRT_GUARD;
+ ir->prev = REGSP(RID_SINK, 0);
+ J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */
+ } else {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ case IR_PHI: {
+ IRIns *ira = IR(ir->op2);
+ if (!irt_ismarked(ira->t) &&
+ (ira->o == IR_TNEW || ira->o == IR_TDUP ||
+ (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) {
+ ir->prev = REGSP(RID_SINK, 0);
+ } else {
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ }
+ default:
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ break;
+ }
+ }
+}
+
+/* Allocation sinking and store sinking.
+**
+** 1. Mark all non-sinkable allocations.
+** 2. Then sink all remaining allocations and the related stores.
+*/
+void lj_opt_sink(jit_State *J)
+{
+ const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD|
+ JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD);
+ if ((J->flags & need) == need &&
+ (J->chain[IR_TNEW] || J->chain[IR_TDUP] ||
+ (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) {
+ if (!J->loopref)
+ sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]);
+ sink_mark_ins(J);
+ if (J->loopref)
+ sink_remark_phi(J);
+ sink_sweep_ins(J);
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/3rdparty/lua/src/lj_opt_split.c b/3rdparty/lua/src/lj_opt_split.c
index a665892..5a8c33b 100644
--- a/3rdparty/lua/src/lj_opt_split.c
+++ b/3rdparty/lua/src/lj_opt_split.c
@@ -1,731 +1,731 @@
-/*
-** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_opt_split_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI))
-
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_ircall.h"
-#include "lj_iropt.h"
-#include "lj_vm.h"
-
-/* SPLIT pass:
-**
-** This pass splits up 64 bit IR instructions into multiple 32 bit IR
-** instructions. It's only active for soft-float targets or for 32 bit CPUs
-** which lack native 64 bit integer operations (the FFI is currently the
-** only emitter for 64 bit integer instructions).
-**
-** Splitting the IR in a separate pass keeps each 32 bit IR assembler
-** backend simple. Only a small amount of extra functionality needs to be
-** implemented. This is much easier than adding support for allocating
-** register pairs to each backend (believe me, I tried). A few simple, but
-** important optimizations can be performed by the SPLIT pass, which would
-** be tedious to do in the backend.
-**
-** The basic idea is to replace each 64 bit IR instruction with its 32 bit
-** equivalent plus an extra HIOP instruction. The splitted IR is not passed
-** through FOLD or any other optimizations, so each HIOP is guaranteed to
-** immediately follow it's counterpart. The actual functionality of HIOP is
-** inferred from the previous instruction.
-**
-** The operands of HIOP hold the hiword input references. The output of HIOP
-** is the hiword output reference, which is also used to hold the hiword
-** register or spill slot information. The register allocator treats this
-** instruction independently of any other instruction, which improves code
-** quality compared to using fixed register pairs.
-**
-** It's easier to split up some instructions into two regular 32 bit
-** instructions. E.g. XLOAD is split up into two XLOADs with two different
-** addresses. Obviously 64 bit constants need to be split up into two 32 bit
-** constants, too. Some hiword instructions can be entirely omitted, e.g.
-** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls
-** are split up into two 32 bit arguments each.
-**
-** On soft-float targets, floating-point instructions are directly converted
-** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX).
-** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump).
-**
-** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with
-** two int64_t fields:
-**
-** 0100 p32 ADD base +8
-** 0101 i64 XLOAD 0100
-** 0102 i64 ADD 0101 +1
-** 0103 p32 ADD base +16
-** 0104 i64 XSTORE 0103 0102
-**
-** mov rax, [esi+0x8]
-** add rax, +0x01
-** mov [esi+0x10], rax
-**
-** Here's the transformed IR and the x86 machine code after the SPLIT pass:
-**
-** 0100 p32 ADD base +8
-** 0101 int XLOAD 0100
-** 0102 p32 ADD base +12
-** 0103 int XLOAD 0102
-** 0104 int ADD 0101 +1
-** 0105 int HIOP 0103 +0
-** 0106 p32 ADD base +16
-** 0107 int XSTORE 0106 0104
-** 0108 int HIOP 0106 0105
-**
-** mov eax, [esi+0x8]
-** mov ecx, [esi+0xc]
-** add eax, +0x01
-** adc ecx, +0x00
-** mov [esi+0x10], eax
-** mov [esi+0x14], ecx
-**
-** You may notice the reassociated hiword address computation, which is
-** later fused into the mov operands by the assembler.
-*/
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
-/* Directly emit the transformed IR without updating chains etc. */
-static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2)
-{
- IRRef nref = lj_ir_nextins(J);
- IRIns *ir = IR(nref);
- ir->ot = ot;
- ir->op1 = op1;
- ir->op2 = op2;
- return nref;
-}
-
-#if LJ_SOFTFP
-/* Emit a (checked) number to integer conversion. */
-static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check)
-{
- IRRef tmp, res;
-#if LJ_LE
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi);
-#else
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo);
-#endif
- res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i);
- if (check) {
- tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d);
- split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
- split_emit(J, IRTGI(IR_EQ), tmp, lo);
- split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi);
- }
- return res;
-}
-
-/* Emit a CALLN with one split 64 bit argument. */
-static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir,
- IRIns *ir, IRCallID id)
-{
- IRRef tmp, op1 = ir->op1;
- J->cur.nins--;
-#if LJ_LE
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
-#else
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
-#endif
- ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
- return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
-}
-
-/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */
-static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir,
- IRIns *ir, IRCallID id)
-{
- IRRef tmp, op1 = ir->op1, op2 = ir->op2;
- J->cur.nins--;
-#if LJ_LE
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
-#else
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
-#endif
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
- ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
- return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
-}
-#endif
-
-/* Emit a CALLN with two split 64 bit arguments. */
-static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir,
- IRIns *ir, IRCallID id)
-{
- IRRef tmp, op1 = ir->op1, op2 = ir->op2;
- J->cur.nins--;
-#if LJ_LE
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
-#else
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
-#endif
- ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
- return split_emit(J,
- IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
- tmp, tmp);
-}
-
-/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */
-static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref)
-{
- IRRef nref = oir[ref].prev;
- IRIns *ir = IR(nref);
- int32_t ofs = 4;
- if (ir->o == IR_KPTR)
- return lj_ir_kptr(J, (char *)ir_kptr(ir) + ofs);
- if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) {
- /* Reassociate address. */
- ofs += IR(ir->op2)->i;
- nref = ir->op1;
- if (ofs == 0) return nref;
- }
- return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs));
-}
-
-/* Substitute references of a snapshot. */
-static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir)
-{
- SnapEntry *map = &J->cur.snapmap[snap->mapofs];
- MSize n, nent = snap->nent;
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- IRIns *ir = &oir[snap_ref(sn)];
- if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn))))
- map[n] = ((sn & 0xffff0000) | ir->prev);
- }
-}
-
-/* Transform the old IR to the new IR. */
-static void split_ir(jit_State *J)
-{
- IRRef nins = J->cur.nins, nk = J->cur.nk;
- MSize irlen = nins - nk;
- MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1));
- IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need);
- IRRef1 *hisubst;
- IRRef ref, snref;
- SnapShot *snap;
-
- /* Copy old IR to buffer. */
- memcpy(oir, IR(nk), irlen*sizeof(IRIns));
- /* Bias hiword substitution table and old IR. Loword kept in field prev. */
- hisubst = (IRRef1 *)&oir[irlen] - nk;
- oir -= nk;
-
- /* Remove all IR instructions, but retain IR constants. */
- J->cur.nins = REF_FIRST;
- J->loopref = 0;
-
- /* Process constants and fixed references. */
- for (ref = nk; ref <= REF_BASE; ref++) {
- IRIns *ir = &oir[ref];
- if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) {
- /* Split up 64 bit constant. */
- TValue tv = *ir_k64(ir);
- ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo);
- hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi);
- } else {
- ir->prev = ref; /* Identity substitution for loword. */
- hisubst[ref] = 0;
- }
- }
-
- /* Process old IR instructions. */
- snap = J->cur.snap;
- snref = snap->ref;
- for (ref = REF_FIRST; ref < nins; ref++) {
- IRIns *ir = &oir[ref];
- IRRef nref = lj_ir_nextins(J);
- IRIns *nir = IR(nref);
- IRRef hi = 0;
-
- if (ref >= snref) {
- snap->ref = nref;
- split_subst_snap(J, snap++, oir);
- snref = snap < &J->cur.snap[J->cur.nsnap] ? snap->ref : ~(IRRef)0;
- }
-
- /* Copy-substitute old instruction to new instruction. */
- nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev;
- nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev;
- ir->prev = nref; /* Loword substitution. */
- nir->o = ir->o;
- nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI);
- hisubst[ref] = 0;
-
- /* Split 64 bit instructions. */
-#if LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
- /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */
- switch (ir->o) {
- case IR_ADD:
- hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add);
- break;
- case IR_SUB:
- hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub);
- break;
- case IR_MUL:
- hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul);
- break;
- case IR_DIV:
- hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div);
- break;
- case IR_POW:
- hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi);
- break;
- case IR_FPMATH:
- /* Try to rejoin pow from EXP2, MUL and LOG2. */
- if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) {
- IRIns *irp = IR(nir->op1);
- if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) {
- IRIns *irm4 = IR(irp->op1);
- IRIns *irm3 = IR(irm4->op1);
- IRIns *irm12 = IR(irm3->op1);
- IRIns *irl1 = IR(irm12->op1);
- if (irm12->op1 > J->loopref && irl1->o == IR_CALLN &&
- irl1->op2 == IRCALL_lj_vm_log2) {
- IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */
- IRRef arg3 = irm3->op2, arg4 = irm4->op2;
- J->cur.nins--;
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3);
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4);
- ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow);
- hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
- break;
- }
- }
- }
- hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
- case IR_ATAN2:
- hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2);
- break;
- case IR_LDEXP:
- hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp);
- break;
- case IR_NEG: case IR_ABS:
- nir->o = IR_CONV; /* Pass through loword. */
- nir->op2 = (IRT_INT << 5) | IRT_INT;
- hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP),
- hisubst[ir->op1], hisubst[ir->op2]);
- break;
- case IR_SLOAD:
- if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */
- nir->op2 &= ~IRSLOAD_CONVERT;
- ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref,
- IRCALL_softfp_i2d);
- hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
- break;
- }
- /* fallthrough */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- case IR_STRTO:
- hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
- break;
- case IR_XLOAD: {
- IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */
- J->cur.nins--;
- hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */
- nref = lj_ir_nextins(J);
- nir = IR(nref);
- *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */
- hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2);
-#if LJ_LE
- ir->prev = nref;
-#else
- ir->prev = hi; hi = nref;
-#endif
- break;
- }
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_XSTORE:
- split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]);
- break;
- case IR_CONV: { /* Conversion to number. Others handled below. */
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- UNUSED(st);
-#if LJ_32 && LJ_HASFFI
- if (st == IRT_I64 || st == IRT_U64) {
- hi = split_call_l(J, hisubst, oir, ir,
- st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d);
- break;
- }
-#endif
- lua_assert(st == IRT_INT ||
- (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT)));
- nir->o = IR_CALLN;
-#if LJ_32 && LJ_HASFFI
- nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d :
- st == IRT_FLOAT ? IRCALL_softfp_f2d :
- IRCALL_softfp_ui2d;
-#else
- nir->op2 = IRCALL_softfp_i2d;
-#endif
- hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
- break;
- }
- case IR_CALLN:
- case IR_CALLL:
- case IR_CALLS:
- case IR_CALLXS:
- goto split_call;
- case IR_PHI:
- if (nir->op1 == nir->op2)
- J->cur.nins--; /* Drop useless PHIs. */
- if (hisubst[ir->op1] != hisubst[ir->op2])
- split_emit(J, IRT(IR_PHI, IRT_SOFTFP),
- hisubst[ir->op1], hisubst[ir->op2]);
- break;
- case IR_HIOP:
- J->cur.nins--; /* Drop joining HIOP. */
- ir->prev = nir->op1;
- hi = nir->op2;
- break;
- default:
- lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX);
- hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP),
- hisubst[ir->op1], hisubst[ir->op2]);
- break;
- }
- } else
-#endif
-#if LJ_32 && LJ_HASFFI
- if (irt_isint64(ir->t)) {
- IRRef hiref = hisubst[ir->op1];
- nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
- switch (ir->o) {
- case IR_ADD:
- case IR_SUB:
- /* Use plain op for hiword if loword cannot produce a carry/borrow. */
- if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) {
- ir->prev = nir->op1; /* Pass through loword. */
- nir->op1 = hiref; nir->op2 = hisubst[ir->op2];
- hi = nref;
- break;
- }
- /* fallthrough */
- case IR_NEG:
- hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]);
- break;
- case IR_MUL:
- hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64);
- break;
- case IR_DIV:
- hi = split_call_ll(J, hisubst, oir, ir,
- irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
- IRCALL_lj_carith_divu64);
- break;
- case IR_MOD:
- hi = split_call_ll(J, hisubst, oir, ir,
- irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
- IRCALL_lj_carith_modu64);
- break;
- case IR_POW:
- hi = split_call_ll(J, hisubst, oir, ir,
- irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
- IRCALL_lj_carith_powu64);
- break;
- case IR_FLOAD:
- lua_assert(ir->op2 == IRFL_CDATA_INT64);
- hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4);
-#if LJ_BE
- ir->prev = hi; hi = nref;
-#endif
- break;
- case IR_XLOAD:
- hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2);
-#if LJ_BE
- ir->prev = hi; hi = nref;
-#endif
- break;
- case IR_XSTORE:
- split_emit(J, IRTI(IR_HIOP), nir->op1, hisubst[ir->op2]);
- break;
- case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
-#if LJ_SOFTFP
- if (st == IRT_NUM) { /* NUM to 64 bit int conv. */
- hi = split_call_l(J, hisubst, oir, ir,
- irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul);
- } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */
- nir->o = IR_CALLN;
- nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul;
- hi = split_emit(J, IRTI(IR_HIOP), nref, nref);
- }
-#else
- if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */
- hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref);
- }
-#endif
- else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */
- /* Drop cast, since assembler doesn't care. */
- goto fwdlo;
- } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */
- IRRef k31 = lj_ir_kint(J, 31);
- nir = IR(nref); /* May have been reallocated. */
- ir->prev = nir->op1; /* Pass through loword. */
- nir->o = IR_BSAR; /* hi = bsar(lo, 31). */
- nir->op2 = k31;
- hi = nref;
- } else { /* Zero-extend to 64 bit. */
- hi = lj_ir_kint(J, 0);
- goto fwdlo;
- }
- break;
- }
- case IR_CALLXS:
- goto split_call;
- case IR_PHI: {
- IRRef hiref2;
- if ((irref_isk(nir->op1) && irref_isk(nir->op2)) ||
- nir->op1 == nir->op2)
- J->cur.nins--; /* Drop useless PHIs. */
- hiref2 = hisubst[ir->op2];
- if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2))
- split_emit(J, IRTI(IR_PHI), hiref, hiref2);
- break;
- }
- case IR_HIOP:
- J->cur.nins--; /* Drop joining HIOP. */
- ir->prev = nir->op1;
- hi = nir->op2;
- break;
- default:
- lua_assert(ir->o <= IR_NE); /* Comparisons. */
- split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]);
- break;
- }
- } else
-#endif
-#if LJ_SOFTFP
- if (ir->o == IR_SLOAD) {
- if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */
- nir->op2 &= ~IRSLOAD_CONVERT;
- if (!(nir->op2 & IRSLOAD_TYPECHECK))
- nir->t.irt = IRT_INT; /* Drop guard. */
- split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
- ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t));
- }
- } else if (ir->o == IR_TOBIT) {
- IRRef tmp, op1 = ir->op1;
- J->cur.nins--;
-#if LJ_LE
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
-#else
- tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
-#endif
- ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit);
- } else if (ir->o == IR_TOSTR) {
- if (hisubst[ir->op1]) {
- if (irref_isk(ir->op1))
- nir->op1 = ir->op1;
- else
- split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref);
- }
- } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) {
- if (irref_isk(ir->op2) && hisubst[ir->op2])
- nir->op2 = ir->op2;
- } else
-#endif
- if (ir->o == IR_CONV) { /* See above, too. */
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
-#if LJ_32 && LJ_HASFFI
- if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */
-#if LJ_SOFTFP
- if (irt_isfloat(ir->t)) {
- split_call_l(J, hisubst, oir, ir,
- st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f);
- J->cur.nins--; /* Drop unused HIOP. */
- }
-#else
- if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */
- ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)),
- hisubst[ir->op1], nref);
- }
-#endif
- else { /* Truncate to lower 32 bits. */
- fwdlo:
- ir->prev = nir->op1; /* Forward loword. */
- /* Replace with NOP to avoid messing up the snapshot logic. */
- nir->ot = IRT(IR_NOP, IRT_NIL);
- nir->op1 = nir->op2 = 0;
- }
- }
-#endif
-#if LJ_SOFTFP && LJ_32 && LJ_HASFFI
- else if (irt_isfloat(ir->t)) {
- if (st == IRT_NUM) {
- split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f);
- J->cur.nins--; /* Drop unused HIOP. */
- } else {
- nir->o = IR_CALLN;
- nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f;
- }
- } else if (st == IRT_FLOAT) {
- nir->o = IR_CALLN;
- nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui;
- } else
-#endif
-#if LJ_SOFTFP
- if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) {
- if (irt_isguard(ir->t)) {
- lua_assert(st == IRT_NUM && irt_isint(ir->t));
- J->cur.nins--;
- ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1);
- } else {
- split_call_l(J, hisubst, oir, ir,
-#if LJ_32 && LJ_HASFFI
- st == IRT_NUM ?
- (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
- (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui)
-#else
- IRCALL_softfp_d2i
-#endif
- );
- J->cur.nins--; /* Drop unused HIOP. */
- }
- }
-#endif
- } else if (ir->o == IR_CALLXS) {
- IRRef hiref;
- split_call:
- hiref = hisubst[ir->op1];
- if (hiref) {
- IROpT ot = nir->ot;
- IRRef op2 = nir->op2;
- nir->ot = IRT(IR_CARG, IRT_NIL);
-#if LJ_LE
- nir->op2 = hiref;
-#else
- nir->op2 = nir->op1; nir->op1 = hiref;
-#endif
- ir->prev = nref = split_emit(J, ot, nref, op2);
- }
- if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t))
- hi = split_emit(J,
- IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
- nref, nref);
- } else if (ir->o == IR_CARG) {
- IRRef hiref = hisubst[ir->op1];
- if (hiref) {
- IRRef op2 = nir->op2;
-#if LJ_LE
- nir->op2 = hiref;
-#else
- nir->op2 = nir->op1; nir->op1 = hiref;
-#endif
- ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
- nir = IR(nref);
- }
- hiref = hisubst[ir->op2];
- if (hiref) {
-#if !LJ_TARGET_X86
- int carg = 0;
- IRIns *cir;
- for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1))
- carg++;
- if ((carg & 1) == 0) { /* Align 64 bit arguments. */
- IRRef op2 = nir->op2;
- nir->op2 = REF_NIL;
- nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
- nir = IR(nref);
- }
-#endif
-#if LJ_BE
- { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; }
-#endif
- ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref);
- }
- } else if (ir->o == IR_CNEWI) {
- if (hisubst[ir->op2])
- split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]);
- } else if (ir->o == IR_LOOP) {
- J->loopref = nref; /* Needed by assembler. */
- }
- hisubst[ref] = hi; /* Store hiword substitution. */
- }
- if (snref == nins) { /* Substitution for last snapshot. */
- snap->ref = J->cur.nins;
- split_subst_snap(J, snap, oir);
- }
-
- /* Add PHI marks. */
- for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) {
- IRIns *ir = IR(ref);
- if (ir->o != IR_PHI) break;
- if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t);
- if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t);
- }
-}
-
-/* Protected callback for split pass. */
-static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud)
-{
- jit_State *J = (jit_State *)ud;
- split_ir(J);
- UNUSED(L); UNUSED(dummy);
- return NULL;
-}
-
-#if defined(LUA_USE_ASSERT) || LJ_SOFTFP
-/* Slow, but sure way to check whether a SPLIT pass is needed. */
-static int split_needsplit(jit_State *J)
-{
- IRIns *ir, *irend;
- IRRef ref;
- for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++)
- if (LJ_SOFTFP ? irt_is64orfp(ir->t) : irt_isint64(ir->t))
- return 1;
- if (LJ_SOFTFP) {
- for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev)
- if ((IR(ref)->op2 & IRSLOAD_CONVERT))
- return 1;
- if (J->chain[IR_TOBIT])
- return 1;
- }
- for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) {
- IRType st = (IR(ref)->op2 & IRCONV_SRCMASK);
- if ((LJ_SOFTFP && (st == IRT_NUM || st == IRT_FLOAT)) ||
- st == IRT_I64 || st == IRT_U64)
- return 1;
- }
- return 0; /* Nope. */
-}
-#endif
-
-/* SPLIT pass. */
-void lj_opt_split(jit_State *J)
-{
-#if LJ_SOFTFP
- if (!J->needsplit)
- J->needsplit = split_needsplit(J);
-#else
- lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */
-#endif
- if (J->needsplit) {
- int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit);
- if (errcode) {
- /* Completely reset the trace to avoid inconsistent dump on abort. */
- J->cur.nins = J->cur.nk = REF_BASE;
- J->cur.nsnap = 0;
- lj_err_throw(J->L, errcode); /* Propagate errors. */
- }
- }
-}
-
-#undef IR
-
-#endif
+/*
+** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_split_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT && (LJ_SOFTFP || (LJ_32 && LJ_HASFFI))
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_vm.h"
+
+/* SPLIT pass:
+**
+** This pass splits up 64 bit IR instructions into multiple 32 bit IR
+** instructions. It's only active for soft-float targets or for 32 bit CPUs
+** which lack native 64 bit integer operations (the FFI is currently the
+** only emitter for 64 bit integer instructions).
+**
+** Splitting the IR in a separate pass keeps each 32 bit IR assembler
+** backend simple. Only a small amount of extra functionality needs to be
+** implemented. This is much easier than adding support for allocating
+** register pairs to each backend (believe me, I tried). A few simple, but
+** important optimizations can be performed by the SPLIT pass, which would
+** be tedious to do in the backend.
+**
+** The basic idea is to replace each 64 bit IR instruction with its 32 bit
+** equivalent plus an extra HIOP instruction. The splitted IR is not passed
+** through FOLD or any other optimizations, so each HIOP is guaranteed to
+** immediately follow it's counterpart. The actual functionality of HIOP is
+** inferred from the previous instruction.
+**
+** The operands of HIOP hold the hiword input references. The output of HIOP
+** is the hiword output reference, which is also used to hold the hiword
+** register or spill slot information. The register allocator treats this
+** instruction independently of any other instruction, which improves code
+** quality compared to using fixed register pairs.
+**
+** It's easier to split up some instructions into two regular 32 bit
+** instructions. E.g. XLOAD is split up into two XLOADs with two different
+** addresses. Obviously 64 bit constants need to be split up into two 32 bit
+** constants, too. Some hiword instructions can be entirely omitted, e.g.
+** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls
+** are split up into two 32 bit arguments each.
+**
+** On soft-float targets, floating-point instructions are directly converted
+** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX).
+** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump).
+**
+** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with
+** two int64_t fields:
+**
+** 0100 p32 ADD base +8
+** 0101 i64 XLOAD 0100
+** 0102 i64 ADD 0101 +1
+** 0103 p32 ADD base +16
+** 0104 i64 XSTORE 0103 0102
+**
+** mov rax, [esi+0x8]
+** add rax, +0x01
+** mov [esi+0x10], rax
+**
+** Here's the transformed IR and the x86 machine code after the SPLIT pass:
+**
+** 0100 p32 ADD base +8
+** 0101 int XLOAD 0100
+** 0102 p32 ADD base +12
+** 0103 int XLOAD 0102
+** 0104 int ADD 0101 +1
+** 0105 int HIOP 0103 +0
+** 0106 p32 ADD base +16
+** 0107 int XSTORE 0106 0104
+** 0108 int HIOP 0106 0105
+**
+** mov eax, [esi+0x8]
+** mov ecx, [esi+0xc]
+** add eax, +0x01
+** adc ecx, +0x00
+** mov [esi+0x10], eax
+** mov [esi+0x14], ecx
+**
+** You may notice the reassociated hiword address computation, which is
+** later fused into the mov operands by the assembler.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Directly emit the transformed IR without updating chains etc. */
+static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2)
+{
+ IRRef nref = lj_ir_nextins(J);
+ IRIns *ir = IR(nref);
+ ir->ot = ot;
+ ir->op1 = op1;
+ ir->op2 = op2;
+ return nref;
+}
+
+#if LJ_SOFTFP
+/* Emit a (checked) number to integer conversion. */
+static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check)
+{
+ IRRef tmp, res;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo);
+#endif
+ res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i);
+ if (check) {
+ tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d);
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+ split_emit(J, IRTGI(IR_EQ), tmp, lo);
+ split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi);
+ }
+ return res;
+}
+
+/* Emit a CALLN with one split 64 bit argument. */
+static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+}
+
+/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */
+static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1, op2 = ir->op2;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+}
+#endif
+
+/* Emit a CALLN with two split 64 bit arguments. */
+static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1, op2 = ir->op2;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+#endif
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J,
+ IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
+ tmp, tmp);
+}
+
+/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */
+static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref)
+{
+ IRRef nref = oir[ref].prev;
+ IRIns *ir = IR(nref);
+ int32_t ofs = 4;
+ if (ir->o == IR_KPTR)
+ return lj_ir_kptr(J, (char *)ir_kptr(ir) + ofs);
+ if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) {
+ /* Reassociate address. */
+ ofs += IR(ir->op2)->i;
+ nref = ir->op1;
+ if (ofs == 0) return nref;
+ }
+ return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs));
+}
+
+/* Substitute references of a snapshot. */
+static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir)
+{
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRIns *ir = &oir[snap_ref(sn)];
+ if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn))))
+ map[n] = ((sn & 0xffff0000) | ir->prev);
+ }
+}
+
+/* Transform the old IR to the new IR. */
+static void split_ir(jit_State *J)
+{
+ IRRef nins = J->cur.nins, nk = J->cur.nk;
+ MSize irlen = nins - nk;
+ MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1));
+ IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need);
+ IRRef1 *hisubst;
+ IRRef ref, snref;
+ SnapShot *snap;
+
+ /* Copy old IR to buffer. */
+ memcpy(oir, IR(nk), irlen*sizeof(IRIns));
+ /* Bias hiword substitution table and old IR. Loword kept in field prev. */
+ hisubst = (IRRef1 *)&oir[irlen] - nk;
+ oir -= nk;
+
+ /* Remove all IR instructions, but retain IR constants. */
+ J->cur.nins = REF_FIRST;
+ J->loopref = 0;
+
+ /* Process constants and fixed references. */
+ for (ref = nk; ref <= REF_BASE; ref++) {
+ IRIns *ir = &oir[ref];
+ if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) {
+ /* Split up 64 bit constant. */
+ TValue tv = *ir_k64(ir);
+ ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo);
+ hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi);
+ } else {
+ ir->prev = ref; /* Identity substitution for loword. */
+ hisubst[ref] = 0;
+ }
+ }
+
+ /* Process old IR instructions. */
+ snap = J->cur.snap;
+ snref = snap->ref;
+ for (ref = REF_FIRST; ref < nins; ref++) {
+ IRIns *ir = &oir[ref];
+ IRRef nref = lj_ir_nextins(J);
+ IRIns *nir = IR(nref);
+ IRRef hi = 0;
+
+ if (ref >= snref) {
+ snap->ref = nref;
+ split_subst_snap(J, snap++, oir);
+ snref = snap < &J->cur.snap[J->cur.nsnap] ? snap->ref : ~(IRRef)0;
+ }
+
+ /* Copy-substitute old instruction to new instruction. */
+ nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev;
+ nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev;
+ ir->prev = nref; /* Loword substitution. */
+ nir->o = ir->o;
+ nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI);
+ hisubst[ref] = 0;
+
+ /* Split 64 bit instructions. */
+#if LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
+ /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */
+ switch (ir->o) {
+ case IR_ADD:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add);
+ break;
+ case IR_SUB:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub);
+ break;
+ case IR_MUL:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul);
+ break;
+ case IR_DIV:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div);
+ break;
+ case IR_POW:
+ hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi);
+ break;
+ case IR_FPMATH:
+ /* Try to rejoin pow from EXP2, MUL and LOG2. */
+ if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) {
+ IRIns *irp = IR(nir->op1);
+ if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) {
+ IRIns *irm4 = IR(irp->op1);
+ IRIns *irm3 = IR(irm4->op1);
+ IRIns *irm12 = IR(irm3->op1);
+ IRIns *irl1 = IR(irm12->op1);
+ if (irm12->op1 > J->loopref && irl1->o == IR_CALLN &&
+ irl1->op2 == IRCALL_lj_vm_log2) {
+ IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */
+ IRRef arg3 = irm3->op2, arg4 = irm4->op2;
+ J->cur.nins--;
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4);
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow);
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+ break;
+ }
+ }
+ }
+ hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+ case IR_ATAN2:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2);
+ break;
+ case IR_LDEXP:
+ hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp);
+ break;
+ case IR_NEG: case IR_ABS:
+ nir->o = IR_CONV; /* Pass through loword. */
+ nir->op2 = (IRT_INT << 5) | IRT_INT;
+ hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ case IR_SLOAD:
+ if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */
+ nir->op2 &= ~IRSLOAD_CONVERT;
+ ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref,
+ IRCALL_softfp_i2d);
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ }
+ /* fallthrough */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ case IR_XLOAD: {
+ IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */
+ J->cur.nins--;
+ hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */
+ nref = lj_ir_nextins(J);
+ nir = IR(nref);
+ *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */
+ hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2);
+#if LJ_LE
+ ir->prev = nref;
+#else
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ }
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_XSTORE:
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]);
+ break;
+ case IR_CONV: { /* Conversion to number. Others handled below. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ UNUSED(st);
+#if LJ_32 && LJ_HASFFI
+ if (st == IRT_I64 || st == IRT_U64) {
+ hi = split_call_l(J, hisubst, oir, ir,
+ st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d);
+ break;
+ }
+#endif
+ lua_assert(st == IRT_INT ||
+ (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT)));
+ nir->o = IR_CALLN;
+#if LJ_32 && LJ_HASFFI
+ nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d :
+ st == IRT_FLOAT ? IRCALL_softfp_f2d :
+ IRCALL_softfp_ui2d;
+#else
+ nir->op2 = IRCALL_softfp_i2d;
+#endif
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ }
+ case IR_CALLN:
+ case IR_CALLL:
+ case IR_CALLS:
+ case IR_CALLXS:
+ goto split_call;
+ case IR_PHI:
+ if (nir->op1 == nir->op2)
+ J->cur.nins--; /* Drop useless PHIs. */
+ if (hisubst[ir->op1] != hisubst[ir->op2])
+ split_emit(J, IRT(IR_PHI, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ case IR_HIOP:
+ J->cur.nins--; /* Drop joining HIOP. */
+ ir->prev = nir->op1;
+ hi = nir->op2;
+ break;
+ default:
+ lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX);
+ hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ }
+ } else
+#endif
+#if LJ_32 && LJ_HASFFI
+ if (irt_isint64(ir->t)) {
+ IRRef hiref = hisubst[ir->op1];
+ nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
+ switch (ir->o) {
+ case IR_ADD:
+ case IR_SUB:
+ /* Use plain op for hiword if loword cannot produce a carry/borrow. */
+ if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) {
+ ir->prev = nir->op1; /* Pass through loword. */
+ nir->op1 = hiref; nir->op2 = hisubst[ir->op2];
+ hi = nref;
+ break;
+ }
+ /* fallthrough */
+ case IR_NEG:
+ hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]);
+ break;
+ case IR_MUL:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64);
+ break;
+ case IR_DIV:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
+ IRCALL_lj_carith_divu64);
+ break;
+ case IR_MOD:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
+ IRCALL_lj_carith_modu64);
+ break;
+ case IR_POW:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
+ IRCALL_lj_carith_powu64);
+ break;
+ case IR_FLOAD:
+ lua_assert(ir->op2 == IRFL_CDATA_INT64);
+ hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4);
+#if LJ_BE
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ case IR_XLOAD:
+ hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2);
+#if LJ_BE
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ case IR_XSTORE:
+ split_emit(J, IRTI(IR_HIOP), nir->op1, hisubst[ir->op2]);
+ break;
+ case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if LJ_SOFTFP
+ if (st == IRT_NUM) { /* NUM to 64 bit int conv. */
+ hi = split_call_l(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul);
+ } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */
+ nir->o = IR_CALLN;
+ nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul;
+ hi = split_emit(J, IRTI(IR_HIOP), nref, nref);
+ }
+#else
+ if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */
+ hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref);
+ }
+#endif
+ else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */
+ /* Drop cast, since assembler doesn't care. */
+ goto fwdlo;
+ } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */
+ IRRef k31 = lj_ir_kint(J, 31);
+ nir = IR(nref); /* May have been reallocated. */
+ ir->prev = nir->op1; /* Pass through loword. */
+ nir->o = IR_BSAR; /* hi = bsar(lo, 31). */
+ nir->op2 = k31;
+ hi = nref;
+ } else { /* Zero-extend to 64 bit. */
+ hi = lj_ir_kint(J, 0);
+ goto fwdlo;
+ }
+ break;
+ }
+ case IR_CALLXS:
+ goto split_call;
+ case IR_PHI: {
+ IRRef hiref2;
+ if ((irref_isk(nir->op1) && irref_isk(nir->op2)) ||
+ nir->op1 == nir->op2)
+ J->cur.nins--; /* Drop useless PHIs. */
+ hiref2 = hisubst[ir->op2];
+ if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2))
+ split_emit(J, IRTI(IR_PHI), hiref, hiref2);
+ break;
+ }
+ case IR_HIOP:
+ J->cur.nins--; /* Drop joining HIOP. */
+ ir->prev = nir->op1;
+ hi = nir->op2;
+ break;
+ default:
+ lua_assert(ir->o <= IR_NE); /* Comparisons. */
+ split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]);
+ break;
+ }
+ } else
+#endif
+#if LJ_SOFTFP
+ if (ir->o == IR_SLOAD) {
+ if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */
+ nir->op2 &= ~IRSLOAD_CONVERT;
+ if (!(nir->op2 & IRSLOAD_TYPECHECK))
+ nir->t.irt = IRT_INT; /* Drop guard. */
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t));
+ }
+ } else if (ir->o == IR_TOBIT) {
+ IRRef tmp, op1 = ir->op1;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit);
+ } else if (ir->o == IR_TOSTR) {
+ if (hisubst[ir->op1]) {
+ if (irref_isk(ir->op1))
+ nir->op1 = ir->op1;
+ else
+ split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref);
+ }
+ } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) {
+ if (irref_isk(ir->op2) && hisubst[ir->op2])
+ nir->op2 = ir->op2;
+ } else
+#endif
+ if (ir->o == IR_CONV) { /* See above, too. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if LJ_32 && LJ_HASFFI
+ if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */
+#if LJ_SOFTFP
+ if (irt_isfloat(ir->t)) {
+ split_call_l(J, hisubst, oir, ir,
+ st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f);
+ J->cur.nins--; /* Drop unused HIOP. */
+ }
+#else
+ if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */
+ ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)),
+ hisubst[ir->op1], nref);
+ }
+#endif
+ else { /* Truncate to lower 32 bits. */
+ fwdlo:
+ ir->prev = nir->op1; /* Forward loword. */
+ /* Replace with NOP to avoid messing up the snapshot logic. */
+ nir->ot = IRT(IR_NOP, IRT_NIL);
+ nir->op1 = nir->op2 = 0;
+ }
+ }
+#endif
+#if LJ_SOFTFP && LJ_32 && LJ_HASFFI
+ else if (irt_isfloat(ir->t)) {
+ if (st == IRT_NUM) {
+ split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f);
+ J->cur.nins--; /* Drop unused HIOP. */
+ } else {
+ nir->o = IR_CALLN;
+ nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f;
+ }
+ } else if (st == IRT_FLOAT) {
+ nir->o = IR_CALLN;
+ nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui;
+ } else
+#endif
+#if LJ_SOFTFP
+ if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) {
+ if (irt_isguard(ir->t)) {
+ lua_assert(st == IRT_NUM && irt_isint(ir->t));
+ J->cur.nins--;
+ ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1);
+ } else {
+ split_call_l(J, hisubst, oir, ir,
+#if LJ_32 && LJ_HASFFI
+ st == IRT_NUM ?
+ (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
+ (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui)
+#else
+ IRCALL_softfp_d2i
+#endif
+ );
+ J->cur.nins--; /* Drop unused HIOP. */
+ }
+ }
+#endif
+ } else if (ir->o == IR_CALLXS) {
+ IRRef hiref;
+ split_call:
+ hiref = hisubst[ir->op1];
+ if (hiref) {
+ IROpT ot = nir->ot;
+ IRRef op2 = nir->op2;
+ nir->ot = IRT(IR_CARG, IRT_NIL);
+#if LJ_LE
+ nir->op2 = hiref;
+#else
+ nir->op2 = nir->op1; nir->op1 = hiref;
+#endif
+ ir->prev = nref = split_emit(J, ot, nref, op2);
+ }
+ if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t))
+ hi = split_emit(J,
+ IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
+ nref, nref);
+ } else if (ir->o == IR_CARG) {
+ IRRef hiref = hisubst[ir->op1];
+ if (hiref) {
+ IRRef op2 = nir->op2;
+#if LJ_LE
+ nir->op2 = hiref;
+#else
+ nir->op2 = nir->op1; nir->op1 = hiref;
+#endif
+ ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
+ nir = IR(nref);
+ }
+ hiref = hisubst[ir->op2];
+ if (hiref) {
+#if !LJ_TARGET_X86
+ int carg = 0;
+ IRIns *cir;
+ for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1))
+ carg++;
+ if ((carg & 1) == 0) { /* Align 64 bit arguments. */
+ IRRef op2 = nir->op2;
+ nir->op2 = REF_NIL;
+ nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
+ nir = IR(nref);
+ }
+#endif
+#if LJ_BE
+ { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; }
+#endif
+ ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref);
+ }
+ } else if (ir->o == IR_CNEWI) {
+ if (hisubst[ir->op2])
+ split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]);
+ } else if (ir->o == IR_LOOP) {
+ J->loopref = nref; /* Needed by assembler. */
+ }
+ hisubst[ref] = hi; /* Store hiword substitution. */
+ }
+ if (snref == nins) { /* Substitution for last snapshot. */
+ snap->ref = J->cur.nins;
+ split_subst_snap(J, snap, oir);
+ }
+
+ /* Add PHI marks. */
+ for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) {
+ IRIns *ir = IR(ref);
+ if (ir->o != IR_PHI) break;
+ if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t);
+ if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t);
+ }
+}
+
+/* Protected callback for split pass. */
+static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ split_ir(J);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+#if defined(LUA_USE_ASSERT) || LJ_SOFTFP
+/* Slow, but sure way to check whether a SPLIT pass is needed. */
+static int split_needsplit(jit_State *J)
+{
+ IRIns *ir, *irend;
+ IRRef ref;
+ for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++)
+ if (LJ_SOFTFP ? irt_is64orfp(ir->t) : irt_isint64(ir->t))
+ return 1;
+ if (LJ_SOFTFP) {
+ for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev)
+ if ((IR(ref)->op2 & IRSLOAD_CONVERT))
+ return 1;
+ if (J->chain[IR_TOBIT])
+ return 1;
+ }
+ for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) {
+ IRType st = (IR(ref)->op2 & IRCONV_SRCMASK);
+ if ((LJ_SOFTFP && (st == IRT_NUM || st == IRT_FLOAT)) ||
+ st == IRT_I64 || st == IRT_U64)
+ return 1;
+ }
+ return 0; /* Nope. */
+}
+#endif
+
+/* SPLIT pass. */
+void lj_opt_split(jit_State *J)
+{
+#if LJ_SOFTFP
+ if (!J->needsplit)
+ J->needsplit = split_needsplit(J);
+#else
+ lua_assert(J->needsplit >= split_needsplit(J)); /* Verify flag. */
+#endif
+ if (J->needsplit) {
+ int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit);
+ if (errcode) {
+ /* Completely reset the trace to avoid inconsistent dump on abort. */
+ J->cur.nins = J->cur.nk = REF_BASE;
+ J->cur.nsnap = 0;
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ }
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/3rdparty/lua/src/lj_parse.c b/3rdparty/lua/src/lj_parse.c
index f2734e1..7ff7d72 100644
--- a/3rdparty/lua/src/lj_parse.c
+++ b/3rdparty/lua/src/lj_parse.c
@@ -1,2754 +1,2750 @@
-/*
-** Lua parser (source code -> bytecode).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_parse_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_debug.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_func.h"
-#include "lj_state.h"
-#include "lj_bc.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#endif
-#include "lj_lex.h"
-#include "lj_parse.h"
-#include "lj_vm.h"
-#include "lj_vmevent.h"
-
-/* -- Parser structures and definitions ----------------------------------- */
-
-/* Expression kinds. */
-typedef enum {
- /* Constant expressions must be first and in this order: */
- VKNIL,
- VKFALSE,
- VKTRUE,
- VKSTR, /* sval = string value */
- VKNUM, /* nval = number value */
- VKLAST = VKNUM,
- VKCDATA, /* nval = cdata value, not treated as a constant expression */
- /* Non-constant expressions follow: */
- VLOCAL, /* info = local register, aux = vstack index */
- VUPVAL, /* info = upvalue index, aux = vstack index */
- VGLOBAL, /* sval = string value */
- VINDEXED, /* info = table register, aux = index reg/byte/string const */
- VJMP, /* info = instruction PC */
- VRELOCABLE, /* info = instruction PC */
- VNONRELOC, /* info = result register */
- VCALL, /* info = instruction PC, aux = base */
- VVOID
-} ExpKind;
-
-/* Expression descriptor. */
-typedef struct ExpDesc {
- union {
- struct {
- uint32_t info; /* Primary info. */
- uint32_t aux; /* Secondary info. */
- } s;
- TValue nval; /* Number value. */
- GCstr *sval; /* String value. */
- } u;
- ExpKind k;
- BCPos t; /* True condition jump list. */
- BCPos f; /* False condition jump list. */
-} ExpDesc;
-
-/* Macros for expressions. */
-#define expr_hasjump(e) ((e)->t != (e)->f)
-
-#define expr_isk(e) ((e)->k <= VKLAST)
-#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e))
-#define expr_isnumk(e) ((e)->k == VKNUM)
-#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e))
-#define expr_isstrk(e) ((e)->k == VKSTR)
-
-#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval)
-#define expr_numberV(e) numberVnum(expr_numtv((e)))
-
-/* Initialize expression. */
-static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info)
-{
- e->k = k;
- e->u.s.info = info;
- e->f = e->t = NO_JMP;
-}
-
-/* Check number constant for +-0. */
-static int expr_numiszero(ExpDesc *e)
-{
- TValue *o = expr_numtv(e);
- return tvisint(o) ? (intV(o) == 0) : tviszero(o);
-}
-
-/* Per-function linked list of scope blocks. */
-typedef struct FuncScope {
- struct FuncScope *prev; /* Link to outer scope. */
- MSize vstart; /* Start of block-local variables. */
- uint8_t nactvar; /* Number of active vars outside the scope. */
- uint8_t flags; /* Scope flags. */
-} FuncScope;
-
-#define FSCOPE_LOOP 0x01 /* Scope is a (breakable) loop. */
-#define FSCOPE_BREAK 0x02 /* Break used in scope. */
-#define FSCOPE_GOLA 0x04 /* Goto or label used in scope. */
-#define FSCOPE_UPVAL 0x08 /* Upvalue in scope. */
-#define FSCOPE_NOCLOSE 0x10 /* Do not close upvalues. */
-
-#define NAME_BREAK ((GCstr *)(uintptr_t)1)
-
-/* Index into variable stack. */
-typedef uint16_t VarIndex;
-#define LJ_MAX_VSTACK (65536 - LJ_MAX_UPVAL)
-
-/* Variable/goto/label info. */
-#define VSTACK_VAR_RW 0x01 /* R/W variable. */
-#define VSTACK_GOTO 0x02 /* Pending goto. */
-#define VSTACK_LABEL 0x04 /* Label. */
-
-/* Per-function state. */
-typedef struct FuncState {
- GCtab *kt; /* Hash table for constants. */
- LexState *ls; /* Lexer state. */
- lua_State *L; /* Lua state. */
- FuncScope *bl; /* Current scope. */
- struct FuncState *prev; /* Enclosing function. */
- BCPos pc; /* Next bytecode position. */
- BCPos lasttarget; /* Bytecode position of last jump target. */
- BCPos jpc; /* Pending jump list to next bytecode. */
- BCReg freereg; /* First free register. */
- BCReg nactvar; /* Number of active local variables. */
- BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */
- BCLine linedefined; /* First line of the function definition. */
- BCInsLine *bcbase; /* Base of bytecode stack. */
- BCPos bclim; /* Limit of bytecode stack. */
- MSize vbase; /* Base of variable stack for this function. */
- uint8_t flags; /* Prototype flags. */
- uint8_t numparams; /* Number of parameters. */
- uint8_t framesize; /* Fixed frame size. */
- uint8_t nuv; /* Number of upvalues */
- VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */
- VarIndex uvmap[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx. */
- VarIndex uvtmp[LJ_MAX_UPVAL]; /* Temporary upvalue map. */
-} FuncState;
-
-/* Binary and unary operators. ORDER OPR */
-typedef enum BinOpr {
- OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */
- OPR_CONCAT,
- OPR_NE, OPR_EQ,
- OPR_LT, OPR_GE, OPR_LE, OPR_GT,
- OPR_AND, OPR_OR,
- OPR_NOBINOPR
-} BinOpr;
-
-LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT);
-LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT);
-LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT);
-LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD);
-LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD);
-LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD);
-LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD);
-
-/* -- Error handling ------------------------------------------------------ */
-
-LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em)
-{
- lj_lex_error(ls, ls->token, em);
-}
-
-LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken token)
-{
- lj_lex_error(ls, ls->token, LJ_ERR_XTOKEN, lj_lex_token2str(ls, token));
-}
-
-LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what)
-{
- if (fs->linedefined == 0)
- lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what);
- else
- lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what);
-}
-
-#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m)
-#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m)
-#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); }
-
-/* -- Management of constants --------------------------------------------- */
-
-/* Return bytecode encoding for primitive constant. */
-#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k)
-
-#define tvhaskslot(o) ((o)->u32.hi == 0)
-#define tvkslot(o) ((o)->u32.lo)
-
-/* Add a number constant. */
-static BCReg const_num(FuncState *fs, ExpDesc *e)
-{
- lua_State *L = fs->L;
- TValue *o;
- lua_assert(expr_isnumk(e));
- o = lj_tab_set(L, fs->kt, &e->u.nval);
- if (tvhaskslot(o))
- return tvkslot(o);
- o->u64 = fs->nkn;
- return fs->nkn++;
-}
-
-/* Add a GC object constant. */
-static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype)
-{
- lua_State *L = fs->L;
- TValue key, *o;
- setgcV(L, &key, gc, itype);
- /* NOBARRIER: the key is new or kept alive. */
- o = lj_tab_set(L, fs->kt, &key);
- if (tvhaskslot(o))
- return tvkslot(o);
- o->u64 = fs->nkgc;
- return fs->nkgc++;
-}
-
-/* Add a string constant. */
-static BCReg const_str(FuncState *fs, ExpDesc *e)
-{
- lua_assert(expr_isstrk(e) || e->k == VGLOBAL);
- return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR);
-}
-
-/* Anchor string constant to avoid GC. */
-GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len)
-{
- /* NOBARRIER: the key is new or kept alive. */
- lua_State *L = ls->L;
- GCstr *s = lj_str_new(L, str, len);
- TValue *tv = lj_tab_setstr(L, ls->fs->kt, s);
- if (tvisnil(tv)) setboolV(tv, 1);
- lj_gc_check(L);
- return s;
-}
-
-#if LJ_HASFFI
-/* Anchor cdata to avoid GC. */
-void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd)
-{
- /* NOBARRIER: the key is new or kept alive. */
- lua_State *L = ls->L;
- setcdataV(L, tv, cd);
- setboolV(lj_tab_set(L, ls->fs->kt, tv), 1);
-}
-#endif
-
-/* -- Jump list handling -------------------------------------------------- */
-
-/* Get next element in jump list. */
-static BCPos jmp_next(FuncState *fs, BCPos pc)
-{
- ptrdiff_t delta = bc_j(fs->bcbase[pc].ins);
- if ((BCPos)delta == NO_JMP)
- return NO_JMP;
- else
- return (BCPos)(((ptrdiff_t)pc+1)+delta);
-}
-
-/* Check if any of the instructions on the jump list produce no value. */
-static int jmp_novalue(FuncState *fs, BCPos list)
-{
- for (; list != NO_JMP; list = jmp_next(fs, list)) {
- BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins;
- if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG))
- return 1;
- }
- return 0;
-}
-
-/* Patch register of test instructions. */
-static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg)
-{
- BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc];
- BCOp op = bc_op(ilp->ins);
- if (op == BC_ISTC || op == BC_ISFC) {
- if (reg != NO_REG && reg != bc_d(ilp->ins)) {
- setbc_a(&ilp->ins, reg);
- } else { /* Nothing to store or already in the right register. */
- setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC));
- setbc_a(&ilp->ins, 0);
- }
- } else if (bc_a(ilp->ins) == NO_REG) {
- if (reg == NO_REG) {
- ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0);
- } else {
- setbc_a(&ilp->ins, reg);
- if (reg >= bc_a(ilp[1].ins))
- setbc_a(&ilp[1].ins, reg+1);
- }
- } else {
- return 0; /* Cannot patch other instructions. */
- }
- return 1;
-}
-
-/* Drop values for all instructions on jump list. */
-static void jmp_dropval(FuncState *fs, BCPos list)
-{
- for (; list != NO_JMP; list = jmp_next(fs, list))
- jmp_patchtestreg(fs, list, NO_REG);
-}
-
-/* Patch jump instruction to target. */
-static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest)
-{
- BCIns *jmp = &fs->bcbase[pc].ins;
- BCPos offset = dest-(pc+1)+BCBIAS_J;
- lua_assert(dest != NO_JMP);
- if (offset > BCMAX_D)
- err_syntax(fs->ls, LJ_ERR_XJUMP);
- setbc_d(jmp, offset);
-}
-
-/* Append to jump list. */
-static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2)
-{
- if (l2 == NO_JMP) {
- return;
- } else if (*l1 == NO_JMP) {
- *l1 = l2;
- } else {
- BCPos list = *l1;
- BCPos next;
- while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */
- list = next;
- jmp_patchins(fs, list, l2);
- }
-}
-
-/* Patch jump list and preserve produced values. */
-static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget,
- BCReg reg, BCPos dtarget)
-{
- while (list != NO_JMP) {
- BCPos next = jmp_next(fs, list);
- if (jmp_patchtestreg(fs, list, reg))
- jmp_patchins(fs, list, vtarget); /* Jump to target with value. */
- else
- jmp_patchins(fs, list, dtarget); /* Jump to default target. */
- list = next;
- }
-}
-
-/* Jump to following instruction. Append to list of pending jumps. */
-static void jmp_tohere(FuncState *fs, BCPos list)
-{
- fs->lasttarget = fs->pc;
- jmp_append(fs, &fs->jpc, list);
-}
-
-/* Patch jump list to target. */
-static void jmp_patch(FuncState *fs, BCPos list, BCPos target)
-{
- if (target == fs->pc) {
- jmp_tohere(fs, list);
- } else {
- lua_assert(target < fs->pc);
- jmp_patchval(fs, list, target, NO_REG, target);
- }
-}
-
-/* -- Bytecode register allocator ----------------------------------------- */
-
-/* Bump frame size. */
-static void bcreg_bump(FuncState *fs, BCReg n)
-{
- BCReg sz = fs->freereg + n;
- if (sz > fs->framesize) {
- if (sz >= LJ_MAX_SLOTS)
- err_syntax(fs->ls, LJ_ERR_XSLOTS);
- fs->framesize = (uint8_t)sz;
- }
-}
-
-/* Reserve registers. */
-static void bcreg_reserve(FuncState *fs, BCReg n)
-{
- bcreg_bump(fs, n);
- fs->freereg += n;
-}
-
-/* Free register. */
-static void bcreg_free(FuncState *fs, BCReg reg)
-{
- if (reg >= fs->nactvar) {
- fs->freereg--;
- lua_assert(reg == fs->freereg);
- }
-}
-
-/* Free register for expression. */
-static void expr_free(FuncState *fs, ExpDesc *e)
-{
- if (e->k == VNONRELOC)
- bcreg_free(fs, e->u.s.info);
-}
-
-/* -- Bytecode emitter ---------------------------------------------------- */
-
-/* Emit bytecode instruction. */
-static BCPos bcemit_INS(FuncState *fs, BCIns ins)
-{
- BCPos pc = fs->pc;
- LexState *ls = fs->ls;
- jmp_patchval(fs, fs->jpc, pc, NO_REG, pc);
- fs->jpc = NO_JMP;
- if (LJ_UNLIKELY(pc >= fs->bclim)) {
- ptrdiff_t base = fs->bcbase - ls->bcstack;
- checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions");
- lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine);
- fs->bclim = (BCPos)(ls->sizebcstack - base);
- fs->bcbase = ls->bcstack + base;
- }
- fs->bcbase[pc].ins = ins;
- fs->bcbase[pc].line = ls->lastline;
- fs->pc = pc+1;
- return pc;
-}
-
-#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c))
-#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d))
-#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j))
-
-#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins)
-
-/* -- Bytecode emitter for expressions ------------------------------------ */
-
-/* Discharge non-constant expression to any register. */
-static void expr_discharge(FuncState *fs, ExpDesc *e)
-{
- BCIns ins;
- if (e->k == VUPVAL) {
- ins = BCINS_AD(BC_UGET, 0, e->u.s.info);
- } else if (e->k == VGLOBAL) {
- ins = BCINS_AD(BC_GGET, 0, const_str(fs, e));
- } else if (e->k == VINDEXED) {
- BCReg rc = e->u.s.aux;
- if ((int32_t)rc < 0) {
- ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc);
- } else if (rc > BCMAX_C) {
- ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1));
- } else {
- bcreg_free(fs, rc);
- ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc);
- }
- bcreg_free(fs, e->u.s.info);
- } else if (e->k == VCALL) {
- e->u.s.info = e->u.s.aux;
- e->k = VNONRELOC;
- return;
- } else if (e->k == VLOCAL) {
- e->k = VNONRELOC;
- return;
- } else {
- return;
- }
- e->u.s.info = bcemit_INS(fs, ins);
- e->k = VRELOCABLE;
-}
-
-/* Emit bytecode to set a range of registers to nil. */
-static void bcemit_nil(FuncState *fs, BCReg from, BCReg n)
-{
- if (fs->pc > fs->lasttarget) { /* No jumps to current position? */
- BCIns *ip = &fs->bcbase[fs->pc-1].ins;
- BCReg pto, pfrom = bc_a(*ip);
- switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */
- case BC_KPRI:
- if (bc_d(*ip) != ~LJ_TNIL) break;
- if (from == pfrom) {
- if (n == 1) return;
- } else if (from == pfrom+1) {
- from = pfrom;
- n++;
- } else {
- break;
- }
- *ip = BCINS_AD(BC_KNIL, from, from+n-1); /* Replace KPRI. */
- return;
- case BC_KNIL:
- pto = bc_d(*ip);
- if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */
- if (from+n-1 > pto)
- setbc_d(ip, from+n-1); /* Patch previous instruction range. */
- return;
- }
- break;
- default:
- break;
- }
- }
- /* Emit new instruction or replace old instruction. */
- bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) :
- BCINS_AD(BC_KNIL, from, from+n-1));
-}
-
-/* Discharge an expression to a specific register. Ignore branches. */
-static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg)
-{
- BCIns ins;
- expr_discharge(fs, e);
- if (e->k == VKSTR) {
- ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e));
- } else if (e->k == VKNUM) {
-#if LJ_DUALNUM
- cTValue *tv = expr_numtv(e);
- if (tvisint(tv) && checki16(intV(tv)))
- ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv));
- else
-#else
- lua_Number n = expr_numberV(e);
- int32_t k = lj_num2int(n);
- if (checki16(k) && n == (lua_Number)k)
- ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k);
- else
-#endif
- ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e));
-#if LJ_HASFFI
- } else if (e->k == VKCDATA) {
- fs->flags |= PROTO_FFI;
- ins = BCINS_AD(BC_KCDATA, reg,
- const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA));
-#endif
- } else if (e->k == VRELOCABLE) {
- setbc_a(bcptr(fs, e), reg);
- goto noins;
- } else if (e->k == VNONRELOC) {
- if (reg == e->u.s.info)
- goto noins;
- ins = BCINS_AD(BC_MOV, reg, e->u.s.info);
- } else if (e->k == VKNIL) {
- bcemit_nil(fs, reg, 1);
- goto noins;
- } else if (e->k <= VKTRUE) {
- ins = BCINS_AD(BC_KPRI, reg, const_pri(e));
- } else {
- lua_assert(e->k == VVOID || e->k == VJMP);
- return;
- }
- bcemit_INS(fs, ins);
-noins:
- e->u.s.info = reg;
- e->k = VNONRELOC;
-}
-
-/* Forward declaration. */
-static BCPos bcemit_jmp(FuncState *fs);
-
-/* Discharge an expression to a specific register. */
-static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg)
-{
- expr_toreg_nobranch(fs, e, reg);
- if (e->k == VJMP)
- jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */
- if (expr_hasjump(e)) { /* Discharge expression with branches. */
- BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP;
- if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) {
- BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs);
- jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE);
- bcemit_AJ(fs, BC_JMP, fs->freereg, 1);
- jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE);
- jmp_tohere(fs, jval);
- }
- jend = fs->pc;
- fs->lasttarget = jend;
- jmp_patchval(fs, e->f, jend, reg, jfalse);
- jmp_patchval(fs, e->t, jend, reg, jtrue);
- }
- e->f = e->t = NO_JMP;
- e->u.s.info = reg;
- e->k = VNONRELOC;
-}
-
-/* Discharge an expression to the next free register. */
-static void expr_tonextreg(FuncState *fs, ExpDesc *e)
-{
- expr_discharge(fs, e);
- expr_free(fs, e);
- bcreg_reserve(fs, 1);
- expr_toreg(fs, e, fs->freereg - 1);
-}
-
-/* Discharge an expression to any register. */
-static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e)
-{
- expr_discharge(fs, e);
- if (e->k == VNONRELOC) {
- if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */
- if (e->u.s.info >= fs->nactvar) {
- expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */
- return e->u.s.info;
- }
- }
- expr_tonextreg(fs, e); /* Discharge to next register. */
- return e->u.s.info;
-}
-
-/* Partially discharge expression to a value. */
-static void expr_toval(FuncState *fs, ExpDesc *e)
-{
- if (expr_hasjump(e))
- expr_toanyreg(fs, e);
- else
- expr_discharge(fs, e);
-}
-
-/* Emit store for LHS expression. */
-static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e)
-{
- BCIns ins;
- if (var->k == VLOCAL) {
- fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW;
- expr_free(fs, e);
- expr_toreg(fs, e, var->u.s.info);
- return;
- } else if (var->k == VUPVAL) {
- fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW;
- expr_toval(fs, e);
- if (e->k <= VKTRUE)
- ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e));
- else if (e->k == VKSTR)
- ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e));
- else if (e->k == VKNUM)
- ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e));
- else
- ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e));
- } else if (var->k == VGLOBAL) {
- BCReg ra = expr_toanyreg(fs, e);
- ins = BCINS_AD(BC_GSET, ra, const_str(fs, var));
- } else {
- BCReg ra, rc;
- lua_assert(var->k == VINDEXED);
- ra = expr_toanyreg(fs, e);
- rc = var->u.s.aux;
- if ((int32_t)rc < 0) {
- ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc);
- } else if (rc > BCMAX_C) {
- ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1));
- } else {
- /* Free late alloced key reg to avoid assert on free of value reg. */
- /* This can only happen when called from expr_table(). */
- lua_assert(e->k != VNONRELOC || ra < fs->nactvar ||
- rc < ra || (bcreg_free(fs, rc),1));
- ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc);
- }
- }
- bcemit_INS(fs, ins);
- expr_free(fs, e);
-}
-
-/* Emit method lookup expression. */
-static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key)
-{
- BCReg idx, func, obj = expr_toanyreg(fs, e);
- expr_free(fs, e);
- func = fs->freereg;
- bcemit_AD(fs, BC_MOV, func+1, obj); /* Copy object to first argument. */
- lua_assert(expr_isstrk(key));
- idx = const_str(fs, key);
- if (idx <= BCMAX_C) {
- bcreg_reserve(fs, 2);
- bcemit_ABC(fs, BC_TGETS, func, obj, idx);
- } else {
- bcreg_reserve(fs, 3);
- bcemit_AD(fs, BC_KSTR, func+2, idx);
- bcemit_ABC(fs, BC_TGETV, func, obj, func+2);
- fs->freereg--;
- }
- e->u.s.info = func;
- e->k = VNONRELOC;
-}
-
-/* -- Bytecode emitter for branches --------------------------------------- */
-
-/* Emit unconditional branch. */
-static BCPos bcemit_jmp(FuncState *fs)
-{
- BCPos jpc = fs->jpc;
- BCPos j = fs->pc - 1;
- BCIns *ip = &fs->bcbase[j].ins;
- fs->jpc = NO_JMP;
- if ((int32_t)j >= (int32_t)fs->lasttarget && bc_op(*ip) == BC_UCLO) {
- setbc_j(ip, NO_JMP);
- fs->lasttarget = j+1;
- } else {
- j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP);
- }
- jmp_append(fs, &j, jpc);
- return j;
-}
-
-/* Invert branch condition of bytecode instruction. */
-static void invertcond(FuncState *fs, ExpDesc *e)
-{
- BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins;
- setbc_op(ip, bc_op(*ip)^1);
-}
-
-/* Emit conditional branch. */
-static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond)
-{
- BCPos pc;
- if (e->k == VRELOCABLE) {
- BCIns *ip = bcptr(fs, e);
- if (bc_op(*ip) == BC_NOT) {
- *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip));
- return bcemit_jmp(fs);
- }
- }
- if (e->k != VNONRELOC) {
- bcreg_reserve(fs, 1);
- expr_toreg_nobranch(fs, e, fs->freereg-1);
- }
- bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info);
- pc = bcemit_jmp(fs);
- expr_free(fs, e);
- return pc;
-}
-
-/* Emit branch on true condition. */
-static void bcemit_branch_t(FuncState *fs, ExpDesc *e)
-{
- BCPos pc;
- expr_discharge(fs, e);
- if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
- pc = NO_JMP; /* Never jump. */
- else if (e->k == VJMP)
- invertcond(fs, e), pc = e->u.s.info;
- else if (e->k == VKFALSE || e->k == VKNIL)
- expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
- else
- pc = bcemit_branch(fs, e, 0);
- jmp_append(fs, &e->f, pc);
- jmp_tohere(fs, e->t);
- e->t = NO_JMP;
-}
-
-/* Emit branch on false condition. */
-static void bcemit_branch_f(FuncState *fs, ExpDesc *e)
-{
- BCPos pc;
- expr_discharge(fs, e);
- if (e->k == VKNIL || e->k == VKFALSE)
- pc = NO_JMP; /* Never jump. */
- else if (e->k == VJMP)
- pc = e->u.s.info;
- else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
- expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
- else
- pc = bcemit_branch(fs, e, 1);
- jmp_append(fs, &e->t, pc);
- jmp_tohere(fs, e->f);
- e->f = NO_JMP;
-}
-
-/* -- Bytecode emitter for operators -------------------------------------- */
-
-/* Try constant-folding of arithmetic operators. */
-static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2)
-{
- TValue o;
- lua_Number n;
- if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0;
- n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD);
- setnumV(&o, n);
- if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */
- if (LJ_DUALNUM) {
- int32_t k = lj_num2int(n);
- if ((lua_Number)k == n) {
- setintV(&e1->u.nval, k);
- return 1;
- }
- }
- setnumV(&e1->u.nval, n);
- return 1;
-}
-
-/* Emit arithmetic operator. */
-static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
-{
- BCReg rb, rc, t;
- uint32_t op;
- if (foldarith(opr, e1, e2))
- return;
- if (opr == OPR_POW) {
- op = BC_POW;
- rc = expr_toanyreg(fs, e2);
- rb = expr_toanyreg(fs, e1);
- } else {
- op = opr-OPR_ADD+BC_ADDVV;
- /* Must discharge 2nd operand first since VINDEXED might free regs. */
- expr_toval(fs, e2);
- if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C)
- op -= BC_ADDVV-BC_ADDVN;
- else
- rc = expr_toanyreg(fs, e2);
- /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */
- lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC);
- expr_toval(fs, e1);
- /* Avoid two consts to satisfy bytecode constraints. */
- if (expr_isnumk(e1) && !expr_isnumk(e2) &&
- (t = const_num(fs, e1)) <= BCMAX_B) {
- rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV;
- } else {
- rb = expr_toanyreg(fs, e1);
- }
- }
- /* Using expr_free might cause asserts if the order is wrong. */
- if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
- if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
- e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc);
- e1->k = VRELOCABLE;
-}
-
-/* Emit comparison operator. */
-static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
-{
- ExpDesc *eret = e1;
- BCIns ins;
- expr_toval(fs, e1);
- if (opr == OPR_EQ || opr == OPR_NE) {
- BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV;
- BCReg ra;
- if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */
- ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */
- expr_toval(fs, e2);
- switch (e2->k) {
- case VKNIL: case VKFALSE: case VKTRUE:
- ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2));
- break;
- case VKSTR:
- ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2));
- break;
- case VKNUM:
- ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2));
- break;
- default:
- ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2));
- break;
- }
- } else {
- uint32_t op = opr-OPR_LT+BC_ISLT;
- BCReg ra, rd;
- if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */
- e1 = e2; e2 = eret; /* Swap operands. */
- op = ((op-BC_ISLT)^3)+BC_ISLT;
- expr_toval(fs, e1);
- }
- rd = expr_toanyreg(fs, e2);
- ra = expr_toanyreg(fs, e1);
- ins = BCINS_AD(op, ra, rd);
- }
- /* Using expr_free might cause asserts if the order is wrong. */
- if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
- if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
- bcemit_INS(fs, ins);
- eret->u.s.info = bcemit_jmp(fs);
- eret->k = VJMP;
-}
-
-/* Fixup left side of binary operator. */
-static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e)
-{
- if (op == OPR_AND) {
- bcemit_branch_t(fs, e);
- } else if (op == OPR_OR) {
- bcemit_branch_f(fs, e);
- } else if (op == OPR_CONCAT) {
- expr_tonextreg(fs, e);
- } else if (op == OPR_EQ || op == OPR_NE) {
- if (!expr_isk_nojump(e)) expr_toanyreg(fs, e);
- } else {
- if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e);
- }
-}
-
-/* Emit binary operator. */
-static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2)
-{
- if (op <= OPR_POW) {
- bcemit_arith(fs, op, e1, e2);
- } else if (op == OPR_AND) {
- lua_assert(e1->t == NO_JMP); /* List must be closed. */
- expr_discharge(fs, e2);
- jmp_append(fs, &e2->f, e1->f);
- *e1 = *e2;
- } else if (op == OPR_OR) {
- lua_assert(e1->f == NO_JMP); /* List must be closed. */
- expr_discharge(fs, e2);
- jmp_append(fs, &e2->t, e1->t);
- *e1 = *e2;
- } else if (op == OPR_CONCAT) {
- expr_toval(fs, e2);
- if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) {
- lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1);
- expr_free(fs, e1);
- setbc_b(bcptr(fs, e2), e1->u.s.info);
- e1->u.s.info = e2->u.s.info;
- } else {
- expr_tonextreg(fs, e2);
- expr_free(fs, e2);
- expr_free(fs, e1);
- e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info);
- }
- e1->k = VRELOCABLE;
- } else {
- lua_assert(op == OPR_NE || op == OPR_EQ ||
- op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT);
- bcemit_comp(fs, op, e1, e2);
- }
-}
-
-/* Emit unary operator. */
-static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e)
-{
- if (op == BC_NOT) {
- /* Swap true and false lists. */
- { BCPos temp = e->f; e->f = e->t; e->t = temp; }
- jmp_dropval(fs, e->f);
- jmp_dropval(fs, e->t);
- expr_discharge(fs, e);
- if (e->k == VKNIL || e->k == VKFALSE) {
- e->k = VKTRUE;
- return;
- } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) {
- e->k = VKFALSE;
- return;
- } else if (e->k == VJMP) {
- invertcond(fs, e);
- return;
- } else if (e->k == VRELOCABLE) {
- bcreg_reserve(fs, 1);
- setbc_a(bcptr(fs, e), fs->freereg-1);
- e->u.s.info = fs->freereg-1;
- e->k = VNONRELOC;
- } else {
- lua_assert(e->k == VNONRELOC);
- }
- } else {
- lua_assert(op == BC_UNM || op == BC_LEN);
- if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */
-#if LJ_HASFFI
- if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */
- GCcdata *cd = cdataV(&e->u.nval);
- int64_t *p = (int64_t *)cdataptr(cd);
- if (cd->ctypeid == CTID_COMPLEX_DOUBLE)
- p[1] ^= (int64_t)U64x(80000000,00000000);
- else
- *p = -*p;
- return;
- } else
-#endif
- if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */
- TValue *o = expr_numtv(e);
- if (tvisint(o)) {
- int32_t k = intV(o);
- if (k == -k)
- setnumV(o, -(lua_Number)k);
- else
- setintV(o, -k);
- return;
- } else {
- o->u64 ^= U64x(80000000,00000000);
- return;
- }
- }
- }
- expr_toanyreg(fs, e);
- }
- expr_free(fs, e);
- e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info);
- e->k = VRELOCABLE;
-}
-
-/* -- Lexer support ------------------------------------------------------- */
-
-/* Check and consume optional token. */
-static int lex_opt(LexState *ls, LexToken tok)
-{
- if (ls->token == tok) {
- lj_lex_next(ls);
- return 1;
- }
- return 0;
-}
-
-/* Check and consume token. */
-static void lex_check(LexState *ls, LexToken tok)
-{
- if (ls->token != tok)
- err_token(ls, tok);
- lj_lex_next(ls);
-}
-
-/* Check for matching token. */
-static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line)
-{
- if (!lex_opt(ls, what)) {
- if (line == ls->linenumber) {
- err_token(ls, what);
- } else {
- const char *swhat = lj_lex_token2str(ls, what);
- const char *swho = lj_lex_token2str(ls, who);
- lj_lex_error(ls, ls->token, LJ_ERR_XMATCH, swhat, swho, line);
- }
- }
-}
-
-/* Check for string token. */
-static GCstr *lex_str(LexState *ls)
-{
- GCstr *s;
- if (ls->token != TK_name && (LJ_52 || ls->token != TK_goto))
- err_token(ls, TK_name);
- s = strV(&ls->tokenval);
- lj_lex_next(ls);
- return s;
-}
-
-/* -- Variable handling --------------------------------------------------- */
-
-#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]])
-
-/* Define a new local variable. */
-static void var_new(LexState *ls, BCReg n, GCstr *name)
-{
- FuncState *fs = ls->fs;
- MSize vtop = ls->vtop;
- checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables");
- if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
- if (ls->sizevstack >= LJ_MAX_VSTACK)
- lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
- lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
- }
- lua_assert((uintptr_t)name < VARNAME__MAX ||
- lj_tab_getstr(fs->kt, name) != NULL);
- /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
- setgcref(ls->vstack[vtop].name, obj2gco(name));
- fs->varmap[fs->nactvar+n] = (uint16_t)vtop;
- ls->vtop = vtop+1;
-}
-
-#define var_new_lit(ls, n, v) \
- var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1))
-
-#define var_new_fixed(ls, n, vn) \
- var_new(ls, (n), (GCstr *)(uintptr_t)(vn))
-
-/* Add local variables. */
-static void var_add(LexState *ls, BCReg nvars)
-{
- FuncState *fs = ls->fs;
- BCReg nactvar = fs->nactvar;
- while (nvars--) {
- VarInfo *v = &var_get(ls, fs, nactvar);
- v->startpc = fs->pc;
- v->slot = nactvar++;
- v->info = 0;
- }
- fs->nactvar = nactvar;
-}
-
-/* Remove local variables. */
-static void var_remove(LexState *ls, BCReg tolevel)
-{
- FuncState *fs = ls->fs;
- while (fs->nactvar > tolevel)
- var_get(ls, fs, --fs->nactvar).endpc = fs->pc;
-}
-
-/* Lookup local variable name. */
-static BCReg var_lookup_local(FuncState *fs, GCstr *n)
-{
- int i;
- for (i = fs->nactvar-1; i >= 0; i--) {
- if (n == strref(var_get(fs->ls, fs, i).name))
- return (BCReg)i;
- }
- return (BCReg)-1; /* Not found. */
-}
-
-/* Lookup or add upvalue index. */
-static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e)
-{
- MSize i, n = fs->nuv;
- for (i = 0; i < n; i++)
- if (fs->uvmap[i] == vidx)
- return i; /* Already exists. */
- /* Otherwise create a new one. */
- checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues");
- lua_assert(e->k == VLOCAL || e->k == VUPVAL);
- fs->uvmap[n] = (uint16_t)vidx;
- fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info);
- fs->nuv = n+1;
- return n;
-}
-
-/* Forward declaration. */
-static void fscope_uvmark(FuncState *fs, BCReg level);
-
-/* Recursively lookup variables in enclosing functions. */
-static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first)
-{
- if (fs) {
- BCReg reg = var_lookup_local(fs, name);
- if ((int32_t)reg >= 0) { /* Local in this function? */
- expr_init(e, VLOCAL, reg);
- if (!first)
- fscope_uvmark(fs, reg); /* Scope now has an upvalue. */
- return (MSize)(e->u.s.aux = (uint32_t)fs->varmap[reg]);
- } else {
- MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */
- if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */
- e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e);
- e->k = VUPVAL;
- return vidx;
- }
- }
- } else { /* Not found in any function, must be a global. */
- expr_init(e, VGLOBAL, 0);
- e->u.sval = name;
- }
- return (MSize)-1; /* Global. */
-}
-
-/* Lookup variable name. */
-#define var_lookup(ls, e) \
- var_lookup_((ls)->fs, lex_str(ls), (e), 1)
-
-/* -- Goto an label handling ---------------------------------------------- */
-
-/* Add a new goto or label. */
-static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc)
-{
- FuncState *fs = ls->fs;
- MSize vtop = ls->vtop;
- if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
- if (ls->sizevstack >= LJ_MAX_VSTACK)
- lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
- lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
- }
- lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL);
- /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
- setgcref(ls->vstack[vtop].name, obj2gco(name));
- ls->vstack[vtop].startpc = pc;
- ls->vstack[vtop].slot = (uint8_t)fs->nactvar;
- ls->vstack[vtop].info = info;
- ls->vtop = vtop+1;
- return vtop;
-}
-
-#define gola_isgoto(v) ((v)->info & VSTACK_GOTO)
-#define gola_islabel(v) ((v)->info & VSTACK_LABEL)
-#define gola_isgotolabel(v) ((v)->info & (VSTACK_GOTO|VSTACK_LABEL))
-
-/* Patch goto to jump to label. */
-static void gola_patch(LexState *ls, VarInfo *vg, VarInfo *vl)
-{
- FuncState *fs = ls->fs;
- BCPos pc = vg->startpc;
- setgcrefnull(vg->name); /* Invalidate pending goto. */
- setbc_a(&fs->bcbase[pc].ins, vl->slot);
- jmp_patch(fs, pc, vl->startpc);
-}
-
-/* Patch goto to close upvalues. */
-static void gola_close(LexState *ls, VarInfo *vg)
-{
- FuncState *fs = ls->fs;
- BCPos pc = vg->startpc;
- BCIns *ip = &fs->bcbase[pc].ins;
- lua_assert(gola_isgoto(vg));
- lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO);
- setbc_a(ip, vg->slot);
- if (bc_op(*ip) == BC_JMP) {
- BCPos next = jmp_next(fs, pc);
- if (next != NO_JMP) jmp_patch(fs, next, pc); /* Jump to UCLO. */
- setbc_op(ip, BC_UCLO); /* Turn into UCLO. */
- setbc_j(ip, NO_JMP);
- }
-}
-
-/* Resolve pending forward gotos for label. */
-static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx)
-{
- VarInfo *vg = ls->vstack + bl->vstart;
- VarInfo *vl = ls->vstack + idx;
- for (; vg < vl; vg++)
- if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) {
- if (vg->slot < vl->slot) {
- GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name);
- lua_assert((uintptr_t)name >= VARNAME__MAX);
- ls->linenumber = ls->fs->bcbase[vg->startpc].line;
- lua_assert(strref(vg->name) != NAME_BREAK);
- lj_lex_error(ls, 0, LJ_ERR_XGSCOPE,
- strdata(strref(vg->name)), strdata(name));
- }
- gola_patch(ls, vg, vl);
- }
-}
-
-/* Fixup remaining gotos and labels for scope. */
-static void gola_fixup(LexState *ls, FuncScope *bl)
-{
- VarInfo *v = ls->vstack + bl->vstart;
- VarInfo *ve = ls->vstack + ls->vtop;
- for (; v < ve; v++) {
- GCstr *name = strref(v->name);
- if (name != NULL) { /* Only consider remaining valid gotos/labels. */
- if (gola_islabel(v)) {
- VarInfo *vg;
- setgcrefnull(v->name); /* Invalidate label that goes out of scope. */
- for (vg = v+1; vg < ve; vg++) /* Resolve pending backward gotos. */
- if (strref(vg->name) == name && gola_isgoto(vg)) {
- if ((bl->flags&FSCOPE_UPVAL) && vg->slot > v->slot)
- gola_close(ls, vg);
- gola_patch(ls, vg, v);
- }
- } else if (gola_isgoto(v)) {
- if (bl->prev) { /* Propagate goto or break to outer scope. */
- bl->prev->flags |= name == NAME_BREAK ? FSCOPE_BREAK : FSCOPE_GOLA;
- v->slot = bl->nactvar;
- if ((bl->flags & FSCOPE_UPVAL))
- gola_close(ls, v);
- } else { /* No outer scope: undefined goto label or no loop. */
- ls->linenumber = ls->fs->bcbase[v->startpc].line;
- if (name == NAME_BREAK)
- lj_lex_error(ls, 0, LJ_ERR_XBREAK);
- else
- lj_lex_error(ls, 0, LJ_ERR_XLUNDEF, strdata(name));
- }
- }
- }
- }
-}
-
-/* Find existing label. */
-static VarInfo *gola_findlabel(LexState *ls, GCstr *name)
-{
- VarInfo *v = ls->vstack + ls->fs->bl->vstart;
- VarInfo *ve = ls->vstack + ls->vtop;
- for (; v < ve; v++)
- if (strref(v->name) == name && gola_islabel(v))
- return v;
- return NULL;
-}
-
-/* -- Scope handling ------------------------------------------------------ */
-
-/* Begin a scope. */
-static void fscope_begin(FuncState *fs, FuncScope *bl, int flags)
-{
- bl->nactvar = (uint8_t)fs->nactvar;
- bl->flags = flags;
- bl->vstart = fs->ls->vtop;
- bl->prev = fs->bl;
- fs->bl = bl;
- lua_assert(fs->freereg == fs->nactvar);
-}
-
-/* End a scope. */
-static void fscope_end(FuncState *fs)
-{
- FuncScope *bl = fs->bl;
- LexState *ls = fs->ls;
- fs->bl = bl->prev;
- var_remove(ls, bl->nactvar);
- fs->freereg = fs->nactvar;
- lua_assert(bl->nactvar == fs->nactvar);
- if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL)
- bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0);
- if ((bl->flags & FSCOPE_BREAK)) {
- if ((bl->flags & FSCOPE_LOOP)) {
- MSize idx = gola_new(ls, NAME_BREAK, VSTACK_LABEL, fs->pc);
- ls->vtop = idx; /* Drop break label immediately. */
- gola_resolve(ls, bl, idx);
- return;
- } /* else: need the fixup step to propagate the breaks. */
- } else if (!(bl->flags & FSCOPE_GOLA)) {
- return;
- }
- gola_fixup(ls, bl);
-}
-
-/* Mark scope as having an upvalue. */
-static void fscope_uvmark(FuncState *fs, BCReg level)
-{
- FuncScope *bl;
- for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev)
- ;
- if (bl)
- bl->flags |= FSCOPE_UPVAL;
-}
-
-/* -- Function state management ------------------------------------------- */
-
-/* Fixup bytecode for prototype. */
-static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n)
-{
- BCInsLine *base = fs->bcbase;
- MSize i;
- pt->sizebc = n;
- bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
- fs->framesize, 0);
- for (i = 1; i < n; i++)
- bc[i] = base[i].ins;
-}
-
-/* Fixup upvalues for child prototype, step #2. */
-static void fs_fixup_uv2(FuncState *fs, GCproto *pt)
-{
- VarInfo *vstack = fs->ls->vstack;
- uint16_t *uv = proto_uv(pt);
- MSize i, n = pt->sizeuv;
- for (i = 0; i < n; i++) {
- VarIndex vidx = uv[i];
- if (vidx >= LJ_MAX_VSTACK)
- uv[i] = vidx - LJ_MAX_VSTACK;
- else if ((vstack[vidx].info & VSTACK_VAR_RW))
- uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL;
- else
- uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL | PROTO_UV_IMMUTABLE;
- }
-}
-
-/* Fixup constants for prototype. */
-static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr)
-{
- GCtab *kt;
- TValue *array;
- Node *node;
- MSize i, hmask;
- checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants");
- checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants");
- setmref(pt->k, kptr);
- pt->sizekn = fs->nkn;
- pt->sizekgc = fs->nkgc;
- kt = fs->kt;
- array = tvref(kt->array);
- for (i = 0; i < kt->asize; i++)
- if (tvhaskslot(&array[i])) {
- TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])];
- if (LJ_DUALNUM)
- setintV(tv, (int32_t)i);
- else
- setnumV(tv, (lua_Number)i);
- }
- node = noderef(kt->node);
- hmask = kt->hmask;
- for (i = 0; i <= hmask; i++) {
- Node *n = &node[i];
- if (tvhaskslot(&n->val)) {
- ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val);
- lua_assert(!tvisint(&n->key));
- if (tvisnum(&n->key)) {
- TValue *tv = &((TValue *)kptr)[kidx];
- if (LJ_DUALNUM) {
- lua_Number nn = numV(&n->key);
- int32_t k = lj_num2int(nn);
- lua_assert(!tvismzero(&n->key));
- if ((lua_Number)k == nn)
- setintV(tv, k);
- else
- *tv = n->key;
- } else {
- *tv = n->key;
- }
- } else {
- GCobj *o = gcV(&n->key);
- setgcref(((GCRef *)kptr)[~kidx], o);
- lj_gc_objbarrier(fs->L, pt, o);
- if (tvisproto(&n->key))
- fs_fixup_uv2(fs, gco2pt(o));
- }
- }
- }
-}
-
-/* Fixup upvalues for prototype, step #1. */
-static void fs_fixup_uv1(FuncState *fs, GCproto *pt, uint16_t *uv)
-{
- setmref(pt->uv, uv);
- pt->sizeuv = fs->nuv;
- memcpy(uv, fs->uvtmp, fs->nuv*sizeof(VarIndex));
-}
-
-#ifndef LUAJIT_DISABLE_DEBUGINFO
-/* Prepare lineinfo for prototype. */
-static size_t fs_prep_line(FuncState *fs, BCLine numline)
-{
- return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
-}
-
-/* Fixup lineinfo for prototype. */
-static void fs_fixup_line(FuncState *fs, GCproto *pt,
- void *lineinfo, BCLine numline)
-{
- BCInsLine *base = fs->bcbase + 1;
- BCLine first = fs->linedefined;
- MSize i = 0, n = fs->pc-1;
- pt->firstline = fs->linedefined;
- pt->numline = numline;
- setmref(pt->lineinfo, lineinfo);
- if (LJ_LIKELY(numline < 256)) {
- uint8_t *li = (uint8_t *)lineinfo;
- do {
- BCLine delta = base[i].line - first;
- lua_assert(delta >= 0 && delta < 256);
- li[i] = (uint8_t)delta;
- } while (++i < n);
- } else if (LJ_LIKELY(numline < 65536)) {
- uint16_t *li = (uint16_t *)lineinfo;
- do {
- BCLine delta = base[i].line - first;
- lua_assert(delta >= 0 && delta < 65536);
- li[i] = (uint16_t)delta;
- } while (++i < n);
- } else {
- uint32_t *li = (uint32_t *)lineinfo;
- do {
- BCLine delta = base[i].line - first;
- lua_assert(delta >= 0);
- li[i] = (uint32_t)delta;
- } while (++i < n);
- }
-}
-
-/* Resize buffer if needed. */
-static LJ_NOINLINE void fs_buf_resize(LexState *ls, MSize len)
-{
- MSize sz = ls->sb.sz * 2;
- while (ls->sb.n + len > sz) sz = sz * 2;
- lj_str_resizebuf(ls->L, &ls->sb, sz);
-}
-
-static LJ_AINLINE void fs_buf_need(LexState *ls, MSize len)
-{
- if (LJ_UNLIKELY(ls->sb.n + len > ls->sb.sz))
- fs_buf_resize(ls, len);
-}
-
-/* Add string to buffer. */
-static void fs_buf_str(LexState *ls, const char *str, MSize len)
-{
- char *p = ls->sb.buf + ls->sb.n;
- MSize i;
- ls->sb.n += len;
- for (i = 0; i < len; i++) p[i] = str[i];
-}
-
-/* Add ULEB128 value to buffer. */
-static void fs_buf_uleb128(LexState *ls, uint32_t v)
-{
- MSize n = ls->sb.n;
- uint8_t *p = (uint8_t *)ls->sb.buf;
- for (; v >= 0x80; v >>= 7)
- p[n++] = (uint8_t)((v & 0x7f) | 0x80);
- p[n++] = (uint8_t)v;
- ls->sb.n = n;
-}
-
-/* Prepare variable info for prototype. */
-static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar)
-{
- VarInfo *vs =ls->vstack, *ve;
- MSize i, n;
- BCPos lastpc;
- lj_str_resetbuf(&ls->sb); /* Copy to temp. string buffer. */
- /* Store upvalue names. */
- for (i = 0, n = fs->nuv; i < n; i++) {
- GCstr *s = strref(vs[fs->uvmap[i]].name);
- MSize len = s->len+1;
- fs_buf_need(ls, len);
- fs_buf_str(ls, strdata(s), len);
- }
- *ofsvar = ls->sb.n;
- lastpc = 0;
- /* Store local variable names and compressed ranges. */
- for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) {
- if (!gola_isgotolabel(vs)) {
- GCstr *s = strref(vs->name);
- BCPos startpc;
- if ((uintptr_t)s < VARNAME__MAX) {
- fs_buf_need(ls, 1 + 2*5);
- ls->sb.buf[ls->sb.n++] = (uint8_t)(uintptr_t)s;
- } else {
- MSize len = s->len+1;
- fs_buf_need(ls, len + 2*5);
- fs_buf_str(ls, strdata(s), len);
- }
- startpc = vs->startpc;
- fs_buf_uleb128(ls, startpc-lastpc);
- fs_buf_uleb128(ls, vs->endpc-startpc);
- lastpc = startpc;
- }
- }
- fs_buf_need(ls, 1);
- ls->sb.buf[ls->sb.n++] = '\0'; /* Terminator for varinfo. */
- return ls->sb.n;
-}
-
-/* Fixup variable info for prototype. */
-static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar)
-{
- setmref(pt->uvinfo, p);
- setmref(pt->varinfo, (char *)p + ofsvar);
- memcpy(p, ls->sb.buf, ls->sb.n); /* Copy from temp. string buffer. */
-}
-#else
-
-/* Initialize with empty debug info, if disabled. */
-#define fs_prep_line(fs, numline) (UNUSED(numline), 0)
-#define fs_fixup_line(fs, pt, li, numline) \
- pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL)
-#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0)
-#define fs_fixup_var(ls, pt, p, ofsvar) \
- setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL)
-
-#endif
-
-/* Check if bytecode op returns. */
-static int bcopisret(BCOp op)
-{
- switch (op) {
- case BC_CALLMT: case BC_CALLT:
- case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
- return 1;
- default:
- return 0;
- }
-}
-
-/* Fixup return instruction for prototype. */
-static void fs_fixup_ret(FuncState *fs)
-{
- BCPos lastpc = fs->pc;
- if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) {
- if ((fs->bl->flags & FSCOPE_UPVAL))
- bcemit_AJ(fs, BC_UCLO, 0, 0);
- bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */
- }
- fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */
- fscope_end(fs);
- lua_assert(fs->bl == NULL);
- /* May need to fixup returns encoded before first function was created. */
- if (fs->flags & PROTO_FIXUP_RETURN) {
- BCPos pc;
- for (pc = 1; pc < lastpc; pc++) {
- BCIns ins = fs->bcbase[pc].ins;
- BCPos offset;
- switch (bc_op(ins)) {
- case BC_CALLMT: case BC_CALLT:
- case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
- offset = bcemit_INS(fs, ins); /* Copy original instruction. */
- fs->bcbase[offset].line = fs->bcbase[pc].line;
- offset = offset-(pc+1)+BCBIAS_J;
- if (offset > BCMAX_D)
- err_syntax(fs->ls, LJ_ERR_XFIXUP);
- /* Replace with UCLO plus branch. */
- fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset);
- break;
- case BC_UCLO:
- return; /* We're done. */
- default:
- break;
- }
- }
- }
-}
-
-/* Finish a FuncState and return the new prototype. */
-static GCproto *fs_finish(LexState *ls, BCLine line)
-{
- lua_State *L = ls->L;
- FuncState *fs = ls->fs;
- BCLine numline = line - fs->linedefined;
- size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar;
- GCproto *pt;
-
- /* Apply final fixups. */
- fs_fixup_ret(fs);
-
- /* Calculate total size of prototype including all colocated arrays. */
- sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef);
- sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1);
- ofsk = sizept; sizept += fs->nkn*sizeof(TValue);
- ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2;
- ofsli = sizept; sizept += fs_prep_line(fs, numline);
- ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar);
-
- /* Allocate prototype and initialize its fields. */
- pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept);
- pt->gct = ~LJ_TPROTO;
- pt->sizept = (MSize)sizept;
- pt->trace = 0;
- pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN));
- pt->numparams = fs->numparams;
- pt->framesize = fs->framesize;
- setgcref(pt->chunkname, obj2gco(ls->chunkname));
-
- /* Close potentially uninitialized gap between bc and kgc. */
- *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0;
- fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc);
- fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk));
- fs_fixup_uv1(fs, pt, (uint16_t *)((char *)pt + ofsuv));
- fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline);
- fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar);
-
- lj_vmevent_send(L, BC,
- setprotoV(L, L->top++, pt);
- );
-
- L->top--; /* Pop table of constants. */
- ls->vtop = fs->vbase; /* Reset variable stack. */
- ls->fs = fs->prev;
- lua_assert(ls->fs != NULL || ls->token == TK_eof);
- return pt;
-}
-
-/* Initialize a new FuncState. */
-static void fs_init(LexState *ls, FuncState *fs)
-{
- lua_State *L = ls->L;
- fs->prev = ls->fs; ls->fs = fs; /* Append to list. */
- fs->ls = ls;
- fs->vbase = ls->vtop;
- fs->L = L;
- fs->pc = 0;
- fs->lasttarget = 0;
- fs->jpc = NO_JMP;
- fs->freereg = 0;
- fs->nkgc = 0;
- fs->nkn = 0;
- fs->nactvar = 0;
- fs->nuv = 0;
- fs->bl = NULL;
- fs->flags = 0;
- fs->framesize = 1; /* Minimum frame size. */
- fs->kt = lj_tab_new(L, 0, 0);
- /* Anchor table of constants in stack to avoid being collected. */
- settabV(L, L->top, fs->kt);
- incr_top(L);
-}
-
-/* -- Expressions --------------------------------------------------------- */
-
-/* Forward declaration. */
-static void expr(LexState *ls, ExpDesc *v);
-
-/* Return string expression. */
-static void expr_str(LexState *ls, ExpDesc *e)
-{
- expr_init(e, VKSTR, 0);
- e->u.sval = lex_str(ls);
-}
-
-/* Return index expression. */
-static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e)
-{
- /* Already called: expr_toval(fs, e). */
- t->k = VINDEXED;
- if (expr_isnumk(e)) {
-#if LJ_DUALNUM
- if (tvisint(expr_numtv(e))) {
- int32_t k = intV(expr_numtv(e));
- if (checku8(k)) {
- t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
- return;
- }
- }
-#else
- lua_Number n = expr_numberV(e);
- int32_t k = lj_num2int(n);
- if (checku8(k) && n == (lua_Number)k) {
- t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
- return;
- }
-#endif
- } else if (expr_isstrk(e)) {
- BCReg idx = const_str(fs, e);
- if (idx <= BCMAX_C) {
- t->u.s.aux = ~idx; /* -256..-1: const string key */
- return;
- }
- }
- t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */
-}
-
-/* Parse index expression with named field. */
-static void expr_field(LexState *ls, ExpDesc *v)
-{
- FuncState *fs = ls->fs;
- ExpDesc key;
- expr_toanyreg(fs, v);
- lj_lex_next(ls); /* Skip dot or colon. */
- expr_str(ls, &key);
- expr_index(fs, v, &key);
-}
-
-/* Parse index expression with brackets. */
-static void expr_bracket(LexState *ls, ExpDesc *v)
-{
- lj_lex_next(ls); /* Skip '['. */
- expr(ls, v);
- expr_toval(ls->fs, v);
- lex_check(ls, ']');
-}
-
-/* Get value of constant expression. */
-static void expr_kvalue(TValue *v, ExpDesc *e)
-{
- if (e->k <= VKTRUE) {
- setitype(v, ~(uint32_t)e->k);
- } else if (e->k == VKSTR) {
- setgcref(v->gcr, obj2gco(e->u.sval));
- setitype(v, LJ_TSTR);
- } else {
- lua_assert(tvisnumber(expr_numtv(e)));
- *v = *expr_numtv(e);
- }
-}
-
-/* Parse table constructor expression. */
-static void expr_table(LexState *ls, ExpDesc *e)
-{
- FuncState *fs = ls->fs;
- BCLine line = ls->linenumber;
- GCtab *t = NULL;
- int vcall = 0, needarr = 0, fixt = 0;
- uint32_t narr = 1; /* First array index. */
- uint32_t nhash = 0; /* Number of hash entries. */
- BCReg freg = fs->freereg;
- BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0);
- expr_init(e, VNONRELOC, freg);
- bcreg_reserve(fs, 1);
- freg++;
- lex_check(ls, '{');
- while (ls->token != '}') {
- ExpDesc key, val;
- vcall = 0;
- if (ls->token == '[') {
- expr_bracket(ls, &key); /* Already calls expr_toval. */
- if (!expr_isk(&key)) expr_index(fs, e, &key);
- if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++;
- lex_check(ls, '=');
- } else if ((ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) &&
- lj_lex_lookahead(ls) == '=') {
- expr_str(ls, &key);
- lex_check(ls, '=');
- nhash++;
- } else {
- expr_init(&key, VKNUM, 0);
- setintV(&key.u.nval, (int)narr);
- narr++;
- needarr = vcall = 1;
- }
- expr(ls, &val);
- if (expr_isk(&key) && key.k != VKNIL &&
- (key.k == VKSTR || expr_isk_nojump(&val))) {
- TValue k, *v;
- if (!t) { /* Create template table on demand. */
- BCReg kidx;
- t = lj_tab_new(fs->L, needarr ? narr : 0, hsize2hbits(nhash));
- kidx = const_gc(fs, obj2gco(t), LJ_TTAB);
- fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx);
- }
- vcall = 0;
- expr_kvalue(&k, &key);
- v = lj_tab_set(fs->L, t, &k);
- lj_gc_anybarriert(fs->L, t);
- if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */
- expr_kvalue(v, &val);
- } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */
- settabV(fs->L, v, t); /* Preserve key with table itself as value. */
- fixt = 1; /* Fix this later, after all resizes. */
- goto nonconst;
- }
- } else {
- nonconst:
- if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; }
- if (expr_isk(&key)) expr_index(fs, e, &key);
- bcemit_store(fs, e, &val);
- }
- fs->freereg = freg;
- if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break;
- }
- lex_match(ls, '}', '{', line);
- if (vcall) {
- BCInsLine *ilp = &fs->bcbase[fs->pc-1];
- ExpDesc en;
- lua_assert(bc_a(ilp->ins) == freg &&
- bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB));
- expr_init(&en, VKNUM, 0);
- en.u.nval.u32.lo = narr-1;
- en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */
- if (narr > 256) { fs->pc--; ilp--; }
- ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en));
- setbc_b(&ilp[-1].ins, 0);
- }
- if (pc == fs->pc-1) { /* Make expr relocable if possible. */
- e->u.s.info = pc;
- fs->freereg--;
- e->k = VRELOCABLE;
- } else {
- e->k = VNONRELOC; /* May have been changed by expr_index. */
- }
- if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */
- BCIns *ip = &fs->bcbase[pc].ins;
- if (!needarr) narr = 0;
- else if (narr < 3) narr = 3;
- else if (narr > 0x7ff) narr = 0x7ff;
- setbc_d(ip, narr|(hsize2hbits(nhash)<<11));
- } else {
- if (needarr && t->asize < narr)
- lj_tab_reasize(fs->L, t, narr-1);
- if (fixt) { /* Fix value for dummy keys in template table. */
- Node *node = noderef(t->node);
- uint32_t i, hmask = t->hmask;
- for (i = 0; i <= hmask; i++) {
- Node *n = &node[i];
- if (tvistab(&n->val)) {
- lua_assert(tabV(&n->val) == t);
- setnilV(&n->val); /* Turn value into nil. */
- }
- }
- }
- lj_gc_check(fs->L);
- }
-}
-
-/* Parse function parameters. */
-static BCReg parse_params(LexState *ls, int needself)
-{
- FuncState *fs = ls->fs;
- BCReg nparams = 0;
- lex_check(ls, '(');
- if (needself)
- var_new_lit(ls, nparams++, "self");
- if (ls->token != ')') {
- do {
- if (ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) {
- var_new(ls, nparams++, lex_str(ls));
- } else if (ls->token == TK_dots) {
- lj_lex_next(ls);
- fs->flags |= PROTO_VARARG;
- break;
- } else {
- err_syntax(ls, LJ_ERR_XPARAM);
- }
- } while (lex_opt(ls, ','));
- }
- var_add(ls, nparams);
- lua_assert(fs->nactvar == nparams);
- bcreg_reserve(fs, nparams);
- lex_check(ls, ')');
- return nparams;
-}
-
-/* Forward declaration. */
-static void parse_chunk(LexState *ls);
-
-/* Parse body of a function. */
-static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line)
-{
- FuncState fs, *pfs = ls->fs;
- FuncScope bl;
- GCproto *pt;
- ptrdiff_t oldbase = pfs->bcbase - ls->bcstack;
- fs_init(ls, &fs);
- fscope_begin(&fs, &bl, 0);
- fs.linedefined = line;
- fs.numparams = (uint8_t)parse_params(ls, needself);
- fs.bcbase = pfs->bcbase + pfs->pc;
- fs.bclim = pfs->bclim - pfs->pc;
- bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */
- parse_chunk(ls);
- if (ls->token != TK_end) lex_match(ls, TK_end, TK_function, line);
- pt = fs_finish(ls, (ls->lastline = ls->linenumber));
- pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */
- pfs->bclim = (BCPos)(ls->sizebcstack - oldbase);
- /* Store new prototype in the constant array of the parent. */
- expr_init(e, VRELOCABLE,
- bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO)));
-#if LJ_HASFFI
- pfs->flags |= (fs.flags & PROTO_FFI);
-#endif
- if (!(pfs->flags & PROTO_CHILD)) {
- if (pfs->flags & PROTO_HAS_RETURN)
- pfs->flags |= PROTO_FIXUP_RETURN;
- pfs->flags |= PROTO_CHILD;
- }
- lj_lex_next(ls);
-}
-
-/* Parse expression list. Last expression is left open. */
-static BCReg expr_list(LexState *ls, ExpDesc *v)
-{
- BCReg n = 1;
- expr(ls, v);
- while (lex_opt(ls, ',')) {
- expr_tonextreg(ls->fs, v);
- expr(ls, v);
- n++;
- }
- return n;
-}
-
-/* Parse function argument list. */
-static void parse_args(LexState *ls, ExpDesc *e)
-{
- FuncState *fs = ls->fs;
- ExpDesc args;
- BCIns ins;
- BCReg base;
- BCLine line = ls->linenumber;
- if (ls->token == '(') {
-#if !LJ_52
- if (line != ls->lastline)
- err_syntax(ls, LJ_ERR_XAMBIG);
-#endif
- lj_lex_next(ls);
- if (ls->token == ')') { /* f(). */
- args.k = VVOID;
- } else {
- expr_list(ls, &args);
- if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */
- setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */
- }
- lex_match(ls, ')', '(', line);
- } else if (ls->token == '{') {
- expr_table(ls, &args);
- } else if (ls->token == TK_string) {
- expr_init(&args, VKSTR, 0);
- args.u.sval = strV(&ls->tokenval);
- lj_lex_next(ls);
- } else {
- err_syntax(ls, LJ_ERR_XFUNARG);
- return; /* Silence compiler. */
- }
- lua_assert(e->k == VNONRELOC);
- base = e->u.s.info; /* Base register for call. */
- if (args.k == VCALL) {
- ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1);
- } else {
- if (args.k != VVOID)
- expr_tonextreg(fs, &args);
- ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base);
- }
- expr_init(e, VCALL, bcemit_INS(fs, ins));
- e->u.s.aux = base;
- fs->bcbase[fs->pc - 1].line = line;
- fs->freereg = base+1; /* Leave one result by default. */
-}
-
-/* Parse primary expression. */
-static void expr_primary(LexState *ls, ExpDesc *v)
-{
- FuncState *fs = ls->fs;
- /* Parse prefix expression. */
- if (ls->token == '(') {
- BCLine line = ls->linenumber;
- lj_lex_next(ls);
- expr(ls, v);
- lex_match(ls, ')', '(', line);
- expr_discharge(ls->fs, v);
- } else if (ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) {
- var_lookup(ls, v);
- } else {
- err_syntax(ls, LJ_ERR_XSYMBOL);
- }
- for (;;) { /* Parse multiple expression suffixes. */
- if (ls->token == '.') {
- expr_field(ls, v);
- } else if (ls->token == '[') {
- ExpDesc key;
- expr_toanyreg(fs, v);
- expr_bracket(ls, &key);
- expr_index(fs, v, &key);
- } else if (ls->token == ':') {
- ExpDesc key;
- lj_lex_next(ls);
- expr_str(ls, &key);
- bcemit_method(fs, v, &key);
- parse_args(ls, v);
- } else if (ls->token == '(' || ls->token == TK_string || ls->token == '{') {
- expr_tonextreg(fs, v);
- parse_args(ls, v);
- } else {
- break;
- }
- }
-}
-
-/* Parse simple expression. */
-static void expr_simple(LexState *ls, ExpDesc *v)
-{
- switch (ls->token) {
- case TK_number:
- expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokenval)) ? VKCDATA : VKNUM, 0);
- copyTV(ls->L, &v->u.nval, &ls->tokenval);
- break;
- case TK_string:
- expr_init(v, VKSTR, 0);
- v->u.sval = strV(&ls->tokenval);
- break;
- case TK_nil:
- expr_init(v, VKNIL, 0);
- break;
- case TK_true:
- expr_init(v, VKTRUE, 0);
- break;
- case TK_false:
- expr_init(v, VKFALSE, 0);
- break;
- case TK_dots: { /* Vararg. */
- FuncState *fs = ls->fs;
- BCReg base;
- checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS);
- bcreg_reserve(fs, 1);
- base = fs->freereg-1;
- expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams));
- v->u.s.aux = base;
- break;
- }
- case '{': /* Table constructor. */
- expr_table(ls, v);
- return;
- case TK_function:
- lj_lex_next(ls);
- parse_body(ls, v, 0, ls->linenumber);
- return;
- default:
- expr_primary(ls, v);
- return;
- }
- lj_lex_next(ls);
-}
-
-/* Manage syntactic levels to avoid blowing up the stack. */
-static void synlevel_begin(LexState *ls)
-{
- if (++ls->level >= LJ_MAX_XLEVEL)
- lj_lex_error(ls, 0, LJ_ERR_XLEVELS);
-}
-
-#define synlevel_end(ls) ((ls)->level--)
-
-/* Convert token to binary operator. */
-static BinOpr token2binop(LexToken tok)
-{
- switch (tok) {
- case '+': return OPR_ADD;
- case '-': return OPR_SUB;
- case '*': return OPR_MUL;
- case '/': return OPR_DIV;
- case '%': return OPR_MOD;
- case '^': return OPR_POW;
- case TK_concat: return OPR_CONCAT;
- case TK_ne: return OPR_NE;
- case TK_eq: return OPR_EQ;
- case '<': return OPR_LT;
- case TK_le: return OPR_LE;
- case '>': return OPR_GT;
- case TK_ge: return OPR_GE;
- case TK_and: return OPR_AND;
- case TK_or: return OPR_OR;
- default: return OPR_NOBINOPR;
- }
-}
-
-/* Priorities for each binary operator. ORDER OPR. */
-static const struct {
- uint8_t left; /* Left priority. */
- uint8_t right; /* Right priority. */
-} priority[] = {
- {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */
- {10,9}, {5,4}, /* POW CONCAT (right associative) */
- {3,3}, {3,3}, /* EQ NE */
- {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */
- {2,2}, {1,1} /* AND OR */
-};
-
-#define UNARY_PRIORITY 8 /* Priority for unary operators. */
-
-/* Forward declaration. */
-static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit);
-
-/* Parse unary expression. */
-static void expr_unop(LexState *ls, ExpDesc *v)
-{
- BCOp op;
- if (ls->token == TK_not) {
- op = BC_NOT;
- } else if (ls->token == '-') {
- op = BC_UNM;
- } else if (ls->token == '#') {
- op = BC_LEN;
- } else {
- expr_simple(ls, v);
- return;
- }
- lj_lex_next(ls);
- expr_binop(ls, v, UNARY_PRIORITY);
- bcemit_unop(ls->fs, op, v);
-}
-
-/* Parse binary expressions with priority higher than the limit. */
-static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit)
-{
- BinOpr op;
- synlevel_begin(ls);
- expr_unop(ls, v);
- op = token2binop(ls->token);
- while (op != OPR_NOBINOPR && priority[op].left > limit) {
- ExpDesc v2;
- BinOpr nextop;
- lj_lex_next(ls);
- bcemit_binop_left(ls->fs, op, v);
- /* Parse binary expression with higher priority. */
- nextop = expr_binop(ls, &v2, priority[op].right);
- bcemit_binop(ls->fs, op, v, &v2);
- op = nextop;
- }
- synlevel_end(ls);
- return op; /* Return unconsumed binary operator (if any). */
-}
-
-/* Parse expression. */
-static void expr(LexState *ls, ExpDesc *v)
-{
- expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */
-}
-
-/* Assign expression to the next register. */
-static void expr_next(LexState *ls)
-{
- ExpDesc e;
- expr(ls, &e);
- expr_tonextreg(ls->fs, &e);
-}
-
-/* Parse conditional expression. */
-static BCPos expr_cond(LexState *ls)
-{
- ExpDesc v;
- expr(ls, &v);
- if (v.k == VKNIL) v.k = VKFALSE;
- bcemit_branch_t(ls->fs, &v);
- return v.f;
-}
-
-/* -- Assignments --------------------------------------------------------- */
-
-/* List of LHS variables. */
-typedef struct LHSVarList {
- ExpDesc v; /* LHS variable. */
- struct LHSVarList *prev; /* Link to previous LHS variable. */
-} LHSVarList;
-
-/* Eliminate write-after-read hazards for local variable assignment. */
-static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v)
-{
- FuncState *fs = ls->fs;
- BCReg reg = v->u.s.info; /* Check against this variable. */
- BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */
- int hazard = 0;
- for (; lh; lh = lh->prev) {
- if (lh->v.k == VINDEXED) {
- if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */
- hazard = 1;
- lh->v.u.s.info = tmp;
- }
- if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */
- hazard = 1;
- lh->v.u.s.aux = tmp;
- }
- }
- }
- if (hazard) {
- bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */
- bcreg_reserve(fs, 1);
- }
-}
-
-/* Adjust LHS/RHS of an assignment. */
-static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e)
-{
- FuncState *fs = ls->fs;
- int32_t extra = (int32_t)nvars - (int32_t)nexps;
- if (e->k == VCALL) {
- extra++; /* Compensate for the VCALL itself. */
- if (extra < 0) extra = 0;
- setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */
- if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1);
- } else {
- if (e->k != VVOID)
- expr_tonextreg(fs, e); /* Close last expression. */
- if (extra > 0) { /* Leftover LHS are set to nil. */
- BCReg reg = fs->freereg;
- bcreg_reserve(fs, (BCReg)extra);
- bcemit_nil(fs, reg, (BCReg)extra);
- }
- }
-}
-
-/* Recursively parse assignment statement. */
-static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars)
-{
- ExpDesc e;
- checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX);
- if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */
- LHSVarList vl;
- vl.prev = lh;
- expr_primary(ls, &vl.v);
- if (vl.v.k == VLOCAL)
- assign_hazard(ls, lh, &vl.v);
- checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names");
- parse_assignment(ls, &vl, nvars+1);
- } else { /* Parse RHS. */
- BCReg nexps;
- lex_check(ls, '=');
- nexps = expr_list(ls, &e);
- if (nexps == nvars) {
- if (e.k == VCALL) {
- if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */
- ls->fs->freereg--;
- e.k = VRELOCABLE;
- } else { /* Multiple call results. */
- e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */
- e.k = VNONRELOC;
- }
- }
- bcemit_store(ls->fs, &lh->v, &e);
- return;
- }
- assign_adjust(ls, nvars, nexps, &e);
- if (nexps > nvars)
- ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */
- }
- /* Assign RHS to LHS and recurse downwards. */
- expr_init(&e, VNONRELOC, ls->fs->freereg-1);
- bcemit_store(ls->fs, &lh->v, &e);
-}
-
-/* Parse call statement or assignment. */
-static void parse_call_assign(LexState *ls)
-{
- FuncState *fs = ls->fs;
- LHSVarList vl;
- expr_primary(ls, &vl.v);
- if (vl.v.k == VCALL) { /* Function call statement. */
- setbc_b(bcptr(fs, &vl.v), 1); /* No results. */
- } else { /* Start of an assignment. */
- vl.prev = NULL;
- parse_assignment(ls, &vl, 1);
- }
-}
-
-/* Parse 'local' statement. */
-static void parse_local(LexState *ls)
-{
- if (lex_opt(ls, TK_function)) { /* Local function declaration. */
- ExpDesc v, b;
- FuncState *fs = ls->fs;
- var_new(ls, 0, lex_str(ls));
- expr_init(&v, VLOCAL, fs->freereg);
- v.u.s.aux = fs->varmap[fs->freereg];
- bcreg_reserve(fs, 1);
- var_add(ls, 1);
- parse_body(ls, &b, 0, ls->linenumber);
- /* bcemit_store(fs, &v, &b) without setting VSTACK_VAR_RW. */
- expr_free(fs, &b);
- expr_toreg(fs, &b, v.u.s.info);
- /* The upvalue is in scope, but the local is only valid after the store. */
- var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc;
- } else { /* Local variable declaration. */
- ExpDesc e;
- BCReg nexps, nvars = 0;
- do { /* Collect LHS. */
- var_new(ls, nvars++, lex_str(ls));
- } while (lex_opt(ls, ','));
- if (lex_opt(ls, '=')) { /* Optional RHS. */
- nexps = expr_list(ls, &e);
- } else { /* Or implicitly set to nil. */
- e.k = VVOID;
- nexps = 0;
- }
- assign_adjust(ls, nvars, nexps, &e);
- var_add(ls, nvars);
- }
-}
-
-/* Parse 'function' statement. */
-static void parse_func(LexState *ls, BCLine line)
-{
- FuncState *fs;
- ExpDesc v, b;
- int needself = 0;
- lj_lex_next(ls); /* Skip 'function'. */
- /* Parse function name. */
- var_lookup(ls, &v);
- while (ls->token == '.') /* Multiple dot-separated fields. */
- expr_field(ls, &v);
- if (ls->token == ':') { /* Optional colon to signify method call. */
- needself = 1;
- expr_field(ls, &v);
- }
- parse_body(ls, &b, needself, line);
- fs = ls->fs;
- bcemit_store(fs, &v, &b);
- fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */
-}
-
-/* -- Control transfer statements ----------------------------------------- */
-
-/* Check for end of block. */
-static int endofblock(LexToken token)
-{
- switch (token) {
- case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof:
- return 1;
- default:
- return 0;
- }
-}
-
-/* Parse 'return' statement. */
-static void parse_return(LexState *ls)
-{
- BCIns ins;
- FuncState *fs = ls->fs;
- lj_lex_next(ls); /* Skip 'return'. */
- fs->flags |= PROTO_HAS_RETURN;
- if (endofblock(ls->token) || ls->token == ';') { /* Bare return. */
- ins = BCINS_AD(BC_RET0, 0, 1);
- } else { /* Return with one or more values. */
- ExpDesc e; /* Receives the _last_ expression in the list. */
- BCReg nret = expr_list(ls, &e);
- if (nret == 1) { /* Return one result. */
- if (e.k == VCALL) { /* Check for tail call. */
- BCIns *ip = bcptr(fs, &e);
- /* It doesn't pay off to add BC_VARGT just for 'return ...'. */
- if (bc_op(*ip) == BC_VARG) goto notailcall;
- fs->pc--;
- ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip));
- } else { /* Can return the result from any register. */
- ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2);
- }
- } else {
- if (e.k == VCALL) { /* Append all results from a call. */
- notailcall:
- setbc_b(bcptr(fs, &e), 0);
- ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar);
- } else {
- expr_tonextreg(fs, &e); /* Force contiguous registers. */
- ins = BCINS_AD(BC_RET, fs->nactvar, nret+1);
- }
- }
- }
- if (fs->flags & PROTO_CHILD)
- bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */
- bcemit_INS(fs, ins);
-}
-
-/* Parse 'break' statement. */
-static void parse_break(LexState *ls)
-{
- ls->fs->bl->flags |= FSCOPE_BREAK;
- gola_new(ls, NAME_BREAK, VSTACK_GOTO, bcemit_jmp(ls->fs));
-}
-
-/* Parse 'goto' statement. */
-static void parse_goto(LexState *ls)
-{
- FuncState *fs = ls->fs;
- GCstr *name = lex_str(ls);
- VarInfo *vl = gola_findlabel(ls, name);
- if (vl) /* Treat backwards goto within same scope like a loop. */
- bcemit_AJ(fs, BC_LOOP, vl->slot, -1); /* No BC range check. */
- fs->bl->flags |= FSCOPE_GOLA;
- gola_new(ls, name, VSTACK_GOTO, bcemit_jmp(fs));
-}
-
-/* Parse label. */
-static void parse_label(LexState *ls)
-{
- FuncState *fs = ls->fs;
- GCstr *name;
- MSize idx;
- fs->lasttarget = fs->pc;
- fs->bl->flags |= FSCOPE_GOLA;
- lj_lex_next(ls); /* Skip '::'. */
- name = lex_str(ls);
- if (gola_findlabel(ls, name))
- lj_lex_error(ls, 0, LJ_ERR_XLDUP, strdata(name));
- idx = gola_new(ls, name, VSTACK_LABEL, fs->pc);
- lex_check(ls, TK_label);
- /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */
- for (;;) {
- if (ls->token == TK_label) {
- synlevel_begin(ls);
- parse_label(ls);
- synlevel_end(ls);
- } else if (LJ_52 && ls->token == ';') {
- lj_lex_next(ls);
- } else {
- break;
- }
- }
- /* Trailing label is considered to be outside of scope. */
- if (endofblock(ls->token) && ls->token != TK_until)
- ls->vstack[idx].slot = fs->bl->nactvar;
- gola_resolve(ls, fs->bl, idx);
-}
-
-/* -- Blocks, loops and conditional statements ---------------------------- */
-
-/* Parse a block. */
-static void parse_block(LexState *ls)
-{
- FuncState *fs = ls->fs;
- FuncScope bl;
- fscope_begin(fs, &bl, 0);
- parse_chunk(ls);
- fscope_end(fs);
-}
-
-/* Parse 'while' statement. */
-static void parse_while(LexState *ls, BCLine line)
-{
- FuncState *fs = ls->fs;
- BCPos start, loop, condexit;
- FuncScope bl;
- lj_lex_next(ls); /* Skip 'while'. */
- start = fs->lasttarget = fs->pc;
- condexit = expr_cond(ls);
- fscope_begin(fs, &bl, FSCOPE_LOOP);
- lex_check(ls, TK_do);
- loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
- parse_block(ls);
- jmp_patch(fs, bcemit_jmp(fs), start);
- lex_match(ls, TK_end, TK_while, line);
- fscope_end(fs);
- jmp_tohere(fs, condexit);
- jmp_patchins(fs, loop, fs->pc);
-}
-
-/* Parse 'repeat' statement. */
-static void parse_repeat(LexState *ls, BCLine line)
-{
- FuncState *fs = ls->fs;
- BCPos loop = fs->lasttarget = fs->pc;
- BCPos condexit;
- FuncScope bl1, bl2;
- fscope_begin(fs, &bl1, FSCOPE_LOOP); /* Breakable loop scope. */
- fscope_begin(fs, &bl2, 0); /* Inner scope. */
- lj_lex_next(ls); /* Skip 'repeat'. */
- bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
- parse_chunk(ls);
- lex_match(ls, TK_until, TK_repeat, line);
- condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */
- if (!(bl2.flags & FSCOPE_UPVAL)) { /* No upvalues? Just end inner scope. */
- fscope_end(fs);
- } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */
- parse_break(ls); /* Break from loop and close upvalues. */
- jmp_tohere(fs, condexit);
- fscope_end(fs); /* End inner scope and close upvalues. */
- condexit = bcemit_jmp(fs);
- }
- jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */
- jmp_patchins(fs, loop, fs->pc);
- fscope_end(fs); /* End loop scope. */
-}
-
-/* Parse numeric 'for'. */
-static void parse_for_num(LexState *ls, GCstr *varname, BCLine line)
-{
- FuncState *fs = ls->fs;
- BCReg base = fs->freereg;
- FuncScope bl;
- BCPos loop, loopend;
- /* Hidden control variables. */
- var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX);
- var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP);
- var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP);
- /* Visible copy of index variable. */
- var_new(ls, FORL_EXT, varname);
- lex_check(ls, '=');
- expr_next(ls);
- lex_check(ls, ',');
- expr_next(ls);
- if (lex_opt(ls, ',')) {
- expr_next(ls);
- } else {
- bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */
- bcreg_reserve(fs, 1);
- }
- var_add(ls, 3); /* Hidden control variables. */
- lex_check(ls, TK_do);
- loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP);
- fscope_begin(fs, &bl, 0); /* Scope for visible variables. */
- var_add(ls, 1);
- bcreg_reserve(fs, 1);
- parse_block(ls);
- fscope_end(fs);
- /* Perform loop inversion. Loop control instructions are at the end. */
- loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP);
- fs->bcbase[loopend].line = line; /* Fix line for control ins. */
- jmp_patchins(fs, loopend, loop+1);
- jmp_patchins(fs, loop, fs->pc);
-}
-
-/* Try to predict whether the iterator is next() and specialize the bytecode.
-** Detecting next() and pairs() by name is simplistic, but quite effective.
-** The interpreter backs off if the check for the closure fails at runtime.
-*/
-static int predict_next(LexState *ls, FuncState *fs, BCPos pc)
-{
- BCIns ins = fs->bcbase[pc].ins;
- GCstr *name;
- cTValue *o;
- switch (bc_op(ins)) {
- case BC_MOV:
- name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name));
- break;
- case BC_UGET:
- name = gco2str(gcref(ls->vstack[fs->uvmap[bc_d(ins)]].name));
- break;
- case BC_GGET:
- /* There's no inverse index (yet), so lookup the strings. */
- o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs"));
- if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
- return 1;
- o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next"));
- if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
- return 1;
- return 0;
- default:
- return 0;
- }
- return (name->len == 5 && !strcmp(strdata(name), "pairs")) ||
- (name->len == 4 && !strcmp(strdata(name), "next"));
-}
-
-/* Parse 'for' iterator. */
-static void parse_for_iter(LexState *ls, GCstr *indexname)
-{
- FuncState *fs = ls->fs;
- ExpDesc e;
- BCReg nvars = 0;
- BCLine line;
- BCReg base = fs->freereg + 3;
- BCPos loop, loopend, exprpc = fs->pc;
- FuncScope bl;
- int isnext;
- /* Hidden control variables. */
- var_new_fixed(ls, nvars++, VARNAME_FOR_GEN);
- var_new_fixed(ls, nvars++, VARNAME_FOR_STATE);
- var_new_fixed(ls, nvars++, VARNAME_FOR_CTL);
- /* Visible variables returned from iterator. */
- var_new(ls, nvars++, indexname);
- while (lex_opt(ls, ','))
- var_new(ls, nvars++, lex_str(ls));
- lex_check(ls, TK_in);
- line = ls->linenumber;
- assign_adjust(ls, 3, expr_list(ls, &e), &e);
- bcreg_bump(fs, 3); /* The iterator needs another 3 slots (func + 2 args). */
- isnext = (nvars <= 5 && predict_next(ls, fs, exprpc));
- var_add(ls, 3); /* Hidden control variables. */
- lex_check(ls, TK_do);
- loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP);
- fscope_begin(fs, &bl, 0); /* Scope for visible variables. */
- var_add(ls, nvars-3);
- bcreg_reserve(fs, nvars-3);
- parse_block(ls);
- fscope_end(fs);
- /* Perform loop inversion. Loop control instructions are at the end. */
- jmp_patchins(fs, loop, fs->pc);
- bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1);
- loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP);
- fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */
- fs->bcbase[loopend].line = line;
- jmp_patchins(fs, loopend, loop+1);
-}
-
-/* Parse 'for' statement. */
-static void parse_for(LexState *ls, BCLine line)
-{
- FuncState *fs = ls->fs;
- GCstr *varname;
- FuncScope bl;
- fscope_begin(fs, &bl, FSCOPE_LOOP);
- lj_lex_next(ls); /* Skip 'for'. */
- varname = lex_str(ls); /* Get first variable name. */
- if (ls->token == '=')
- parse_for_num(ls, varname, line);
- else if (ls->token == ',' || ls->token == TK_in)
- parse_for_iter(ls, varname);
- else
- err_syntax(ls, LJ_ERR_XFOR);
- lex_match(ls, TK_end, TK_for, line);
- fscope_end(fs); /* Resolve break list. */
-}
-
-/* Parse condition and 'then' block. */
-static BCPos parse_then(LexState *ls)
-{
- BCPos condexit;
- lj_lex_next(ls); /* Skip 'if' or 'elseif'. */
- condexit = expr_cond(ls);
- lex_check(ls, TK_then);
- parse_block(ls);
- return condexit;
-}
-
-/* Parse 'if' statement. */
-static void parse_if(LexState *ls, BCLine line)
-{
- FuncState *fs = ls->fs;
- BCPos flist;
- BCPos escapelist = NO_JMP;
- flist = parse_then(ls);
- while (ls->token == TK_elseif) { /* Parse multiple 'elseif' blocks. */
- jmp_append(fs, &escapelist, bcemit_jmp(fs));
- jmp_tohere(fs, flist);
- flist = parse_then(ls);
- }
- if (ls->token == TK_else) { /* Parse optional 'else' block. */
- jmp_append(fs, &escapelist, bcemit_jmp(fs));
- jmp_tohere(fs, flist);
- lj_lex_next(ls); /* Skip 'else'. */
- parse_block(ls);
- } else {
- jmp_append(fs, &escapelist, flist);
- }
- jmp_tohere(fs, escapelist);
- lex_match(ls, TK_end, TK_if, line);
-}
-
-/* -- Parse statements ---------------------------------------------------- */
-
-/* Parse a statement. Returns 1 if it must be the last one in a chunk. */
-static int parse_stmt(LexState *ls)
-{
- BCLine line = ls->linenumber;
- switch (ls->token) {
- case TK_if:
- parse_if(ls, line);
- break;
- case TK_while:
- parse_while(ls, line);
- break;
- case TK_do:
- lj_lex_next(ls);
- parse_block(ls);
- lex_match(ls, TK_end, TK_do, line);
- break;
- case TK_for:
- parse_for(ls, line);
- break;
- case TK_repeat:
- parse_repeat(ls, line);
- break;
- case TK_function:
- parse_func(ls, line);
- break;
- case TK_local:
- lj_lex_next(ls);
- parse_local(ls);
- break;
- case TK_return:
- parse_return(ls);
- return 1; /* Must be last. */
- case TK_break:
- lj_lex_next(ls);
- parse_break(ls);
- return !LJ_52; /* Must be last in Lua 5.1. */
-#if LJ_52
- case ';':
- lj_lex_next(ls);
- break;
-#endif
- case TK_label:
- parse_label(ls);
- break;
- case TK_goto:
- if (LJ_52 || lj_lex_lookahead(ls) == TK_name) {
- lj_lex_next(ls);
- parse_goto(ls);
- break;
- } /* else: fallthrough */
- default:
- parse_call_assign(ls);
- break;
- }
- return 0;
-}
-
-/* A chunk is a list of statements optionally separated by semicolons. */
-static void parse_chunk(LexState *ls)
-{
- int islast = 0;
- synlevel_begin(ls);
- while (!islast && !endofblock(ls->token)) {
- islast = parse_stmt(ls);
- lex_opt(ls, ';');
- lua_assert(ls->fs->framesize >= ls->fs->freereg &&
- ls->fs->freereg >= ls->fs->nactvar);
- ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */
- }
- synlevel_end(ls);
-}
-
-/* Entry point of bytecode parser. */
-GCproto *lj_parse(LexState *ls)
-{
- FuncState fs;
- FuncScope bl;
- GCproto *pt;
- lua_State *L = ls->L;
-#ifdef LUAJIT_DISABLE_DEBUGINFO
- ls->chunkname = lj_str_newlit(L, "=");
-#else
- ls->chunkname = lj_str_newz(L, ls->chunkarg);
-#endif
- setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */
- incr_top(L);
- ls->level = 0;
- fs_init(ls, &fs);
- fs.linedefined = 0;
- fs.numparams = 0;
- fs.bcbase = NULL;
- fs.bclim = 0;
- fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */
- fscope_begin(&fs, &bl, 0);
- bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */
- lj_lex_next(ls); /* Read-ahead first token. */
- parse_chunk(ls);
- if (ls->token != TK_eof)
- err_token(ls, TK_eof);
- pt = fs_finish(ls, ls->linenumber);
- L->top--; /* Drop chunkname. */
- lua_assert(fs.prev == NULL);
- lua_assert(ls->fs == NULL);
- lua_assert(pt->sizeuv == 0);
- return pt;
-}
-
+/*
+** Lua parser (source code -> bytecode).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_parse_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+
+/* -- Parser structures and definitions ----------------------------------- */
+
+/* Expression kinds. */
+typedef enum {
+ /* Constant expressions must be first and in this order: */
+ VKNIL,
+ VKFALSE,
+ VKTRUE,
+ VKSTR, /* sval = string value */
+ VKNUM, /* nval = number value */
+ VKLAST = VKNUM,
+ VKCDATA, /* nval = cdata value, not treated as a constant expression */
+ /* Non-constant expressions follow: */
+ VLOCAL, /* info = local register, aux = vstack index */
+ VUPVAL, /* info = upvalue index, aux = vstack index */
+ VGLOBAL, /* sval = string value */
+ VINDEXED, /* info = table register, aux = index reg/byte/string const */
+ VJMP, /* info = instruction PC */
+ VRELOCABLE, /* info = instruction PC */
+ VNONRELOC, /* info = result register */
+ VCALL, /* info = instruction PC, aux = base */
+ VVOID
+} ExpKind;
+
+/* Expression descriptor. */
+typedef struct ExpDesc {
+ union {
+ struct {
+ uint32_t info; /* Primary info. */
+ uint32_t aux; /* Secondary info. */
+ } s;
+ TValue nval; /* Number value. */
+ GCstr *sval; /* String value. */
+ } u;
+ ExpKind k;
+ BCPos t; /* True condition jump list. */
+ BCPos f; /* False condition jump list. */
+} ExpDesc;
+
+/* Macros for expressions. */
+#define expr_hasjump(e) ((e)->t != (e)->f)
+
+#define expr_isk(e) ((e)->k <= VKLAST)
+#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e))
+#define expr_isnumk(e) ((e)->k == VKNUM)
+#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e))
+#define expr_isstrk(e) ((e)->k == VKSTR)
+
+#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval)
+#define expr_numberV(e) numberVnum(expr_numtv((e)))
+
+/* Initialize expression. */
+static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info)
+{
+ e->k = k;
+ e->u.s.info = info;
+ e->f = e->t = NO_JMP;
+}
+
+/* Check number constant for +-0. */
+static int expr_numiszero(ExpDesc *e)
+{
+ TValue *o = expr_numtv(e);
+ return tvisint(o) ? (intV(o) == 0) : tviszero(o);
+}
+
+/* Per-function linked list of scope blocks. */
+typedef struct FuncScope {
+ struct FuncScope *prev; /* Link to outer scope. */
+ MSize vstart; /* Start of block-local variables. */
+ uint8_t nactvar; /* Number of active vars outside the scope. */
+ uint8_t flags; /* Scope flags. */
+} FuncScope;
+
+#define FSCOPE_LOOP 0x01 /* Scope is a (breakable) loop. */
+#define FSCOPE_BREAK 0x02 /* Break used in scope. */
+#define FSCOPE_GOLA 0x04 /* Goto or label used in scope. */
+#define FSCOPE_UPVAL 0x08 /* Upvalue in scope. */
+#define FSCOPE_NOCLOSE 0x10 /* Do not close upvalues. */
+
+#define NAME_BREAK ((GCstr *)(uintptr_t)1)
+
+/* Index into variable stack. */
+typedef uint16_t VarIndex;
+#define LJ_MAX_VSTACK (65536 - LJ_MAX_UPVAL)
+
+/* Variable/goto/label info. */
+#define VSTACK_VAR_RW 0x01 /* R/W variable. */
+#define VSTACK_GOTO 0x02 /* Pending goto. */
+#define VSTACK_LABEL 0x04 /* Label. */
+
+/* Per-function state. */
+typedef struct FuncState {
+ GCtab *kt; /* Hash table for constants. */
+ LexState *ls; /* Lexer state. */
+ lua_State *L; /* Lua state. */
+ FuncScope *bl; /* Current scope. */
+ struct FuncState *prev; /* Enclosing function. */
+ BCPos pc; /* Next bytecode position. */
+ BCPos lasttarget; /* Bytecode position of last jump target. */
+ BCPos jpc; /* Pending jump list to next bytecode. */
+ BCReg freereg; /* First free register. */
+ BCReg nactvar; /* Number of active local variables. */
+ BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */
+ BCLine linedefined; /* First line of the function definition. */
+ BCInsLine *bcbase; /* Base of bytecode stack. */
+ BCPos bclim; /* Limit of bytecode stack. */
+ MSize vbase; /* Base of variable stack for this function. */
+ uint8_t flags; /* Prototype flags. */
+ uint8_t numparams; /* Number of parameters. */
+ uint8_t framesize; /* Fixed frame size. */
+ uint8_t nuv; /* Number of upvalues */
+ VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */
+ VarIndex uvmap[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx. */
+ VarIndex uvtmp[LJ_MAX_UPVAL]; /* Temporary upvalue map. */
+} FuncState;
+
+/* Binary and unary operators. ORDER OPR */
+typedef enum BinOpr {
+ OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */
+ OPR_CONCAT,
+ OPR_NE, OPR_EQ,
+ OPR_LT, OPR_GE, OPR_LE, OPR_GT,
+ OPR_AND, OPR_OR,
+ OPR_NOBINOPR
+} BinOpr;
+
+LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD);
+
+/* -- Error handling ------------------------------------------------------ */
+
+LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em)
+{
+ lj_lex_error(ls, ls->token, em);
+}
+
+LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken token)
+{
+ lj_lex_error(ls, ls->token, LJ_ERR_XTOKEN, lj_lex_token2str(ls, token));
+}
+
+LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what)
+{
+ if (fs->linedefined == 0)
+ lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what);
+ else
+ lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what);
+}
+
+#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m)
+#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m)
+#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); }
+
+/* -- Management of constants --------------------------------------------- */
+
+/* Return bytecode encoding for primitive constant. */
+#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k)
+
+#define tvhaskslot(o) ((o)->u32.hi == 0)
+#define tvkslot(o) ((o)->u32.lo)
+
+/* Add a number constant. */
+static BCReg const_num(FuncState *fs, ExpDesc *e)
+{
+ lua_State *L = fs->L;
+ TValue *o;
+ lua_assert(expr_isnumk(e));
+ o = lj_tab_set(L, fs->kt, &e->u.nval);
+ if (tvhaskslot(o))
+ return tvkslot(o);
+ o->u64 = fs->nkn;
+ return fs->nkn++;
+}
+
+/* Add a GC object constant. */
+static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype)
+{
+ lua_State *L = fs->L;
+ TValue key, *o;
+ setgcV(L, &key, gc, itype);
+ /* NOBARRIER: the key is new or kept alive. */
+ o = lj_tab_set(L, fs->kt, &key);
+ if (tvhaskslot(o))
+ return tvkslot(o);
+ o->u64 = fs->nkgc;
+ return fs->nkgc++;
+}
+
+/* Add a string constant. */
+static BCReg const_str(FuncState *fs, ExpDesc *e)
+{
+ lua_assert(expr_isstrk(e) || e->k == VGLOBAL);
+ return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR);
+}
+
+/* Anchor string constant to avoid GC. */
+GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len)
+{
+ /* NOBARRIER: the key is new or kept alive. */
+ lua_State *L = ls->L;
+ GCstr *s = lj_str_new(L, str, len);
+ TValue *tv = lj_tab_setstr(L, ls->fs->kt, s);
+ if (tvisnil(tv)) setboolV(tv, 1);
+ lj_gc_check(L);
+ return s;
+}
+
+#if LJ_HASFFI
+/* Anchor cdata to avoid GC. */
+void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd)
+{
+ /* NOBARRIER: the key is new or kept alive. */
+ lua_State *L = ls->L;
+ setcdataV(L, tv, cd);
+ setboolV(lj_tab_set(L, ls->fs->kt, tv), 1);
+}
+#endif
+
+/* -- Jump list handling -------------------------------------------------- */
+
+/* Get next element in jump list. */
+static BCPos jmp_next(FuncState *fs, BCPos pc)
+{
+ ptrdiff_t delta = bc_j(fs->bcbase[pc].ins);
+ if ((BCPos)delta == NO_JMP)
+ return NO_JMP;
+ else
+ return (BCPos)(((ptrdiff_t)pc+1)+delta);
+}
+
+/* Check if any of the instructions on the jump list produce no value. */
+static int jmp_novalue(FuncState *fs, BCPos list)
+{
+ for (; list != NO_JMP; list = jmp_next(fs, list)) {
+ BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins;
+ if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG))
+ return 1;
+ }
+ return 0;
+}
+
+/* Patch register of test instructions. */
+static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg)
+{
+ BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc];
+ BCOp op = bc_op(ilp->ins);
+ if (op == BC_ISTC || op == BC_ISFC) {
+ if (reg != NO_REG && reg != bc_d(ilp->ins)) {
+ setbc_a(&ilp->ins, reg);
+ } else { /* Nothing to store or already in the right register. */
+ setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC));
+ setbc_a(&ilp->ins, 0);
+ }
+ } else if (bc_a(ilp->ins) == NO_REG) {
+ if (reg == NO_REG) {
+ ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0);
+ } else {
+ setbc_a(&ilp->ins, reg);
+ if (reg >= bc_a(ilp[1].ins))
+ setbc_a(&ilp[1].ins, reg+1);
+ }
+ } else {
+ return 0; /* Cannot patch other instructions. */
+ }
+ return 1;
+}
+
+/* Drop values for all instructions on jump list. */
+static void jmp_dropval(FuncState *fs, BCPos list)
+{
+ for (; list != NO_JMP; list = jmp_next(fs, list))
+ jmp_patchtestreg(fs, list, NO_REG);
+}
+
+/* Patch jump instruction to target. */
+static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest)
+{
+ BCIns *jmp = &fs->bcbase[pc].ins;
+ BCPos offset = dest-(pc+1)+BCBIAS_J;
+ lua_assert(dest != NO_JMP);
+ if (offset > BCMAX_D)
+ err_syntax(fs->ls, LJ_ERR_XJUMP);
+ setbc_d(jmp, offset);
+}
+
+/* Append to jump list. */
+static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2)
+{
+ if (l2 == NO_JMP) {
+ return;
+ } else if (*l1 == NO_JMP) {
+ *l1 = l2;
+ } else {
+ BCPos list = *l1;
+ BCPos next;
+ while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */
+ list = next;
+ jmp_patchins(fs, list, l2);
+ }
+}
+
+/* Patch jump list and preserve produced values. */
+static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget,
+ BCReg reg, BCPos dtarget)
+{
+ while (list != NO_JMP) {
+ BCPos next = jmp_next(fs, list);
+ if (jmp_patchtestreg(fs, list, reg))
+ jmp_patchins(fs, list, vtarget); /* Jump to target with value. */
+ else
+ jmp_patchins(fs, list, dtarget); /* Jump to default target. */
+ list = next;
+ }
+}
+
+/* Jump to following instruction. Append to list of pending jumps. */
+static void jmp_tohere(FuncState *fs, BCPos list)
+{
+ fs->lasttarget = fs->pc;
+ jmp_append(fs, &fs->jpc, list);
+}
+
+/* Patch jump list to target. */
+static void jmp_patch(FuncState *fs, BCPos list, BCPos target)
+{
+ if (target == fs->pc) {
+ jmp_tohere(fs, list);
+ } else {
+ lua_assert(target < fs->pc);
+ jmp_patchval(fs, list, target, NO_REG, target);
+ }
+}
+
+/* -- Bytecode register allocator ----------------------------------------- */
+
+/* Bump frame size. */
+static void bcreg_bump(FuncState *fs, BCReg n)
+{
+ BCReg sz = fs->freereg + n;
+ if (sz > fs->framesize) {
+ if (sz >= LJ_MAX_SLOTS)
+ err_syntax(fs->ls, LJ_ERR_XSLOTS);
+ fs->framesize = (uint8_t)sz;
+ }
+}
+
+/* Reserve registers. */
+static void bcreg_reserve(FuncState *fs, BCReg n)
+{
+ bcreg_bump(fs, n);
+ fs->freereg += n;
+}
+
+/* Free register. */
+static void bcreg_free(FuncState *fs, BCReg reg)
+{
+ if (reg >= fs->nactvar) {
+ fs->freereg--;
+ lua_assert(reg == fs->freereg);
+ }
+}
+
+/* Free register for expression. */
+static void expr_free(FuncState *fs, ExpDesc *e)
+{
+ if (e->k == VNONRELOC)
+ bcreg_free(fs, e->u.s.info);
+}
+
+/* -- Bytecode emitter ---------------------------------------------------- */
+
+/* Emit bytecode instruction. */
+static BCPos bcemit_INS(FuncState *fs, BCIns ins)
+{
+ BCPos pc = fs->pc;
+ LexState *ls = fs->ls;
+ jmp_patchval(fs, fs->jpc, pc, NO_REG, pc);
+ fs->jpc = NO_JMP;
+ if (LJ_UNLIKELY(pc >= fs->bclim)) {
+ ptrdiff_t base = fs->bcbase - ls->bcstack;
+ checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions");
+ lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine);
+ fs->bclim = (BCPos)(ls->sizebcstack - base);
+ fs->bcbase = ls->bcstack + base;
+ }
+ fs->bcbase[pc].ins = ins;
+ fs->bcbase[pc].line = ls->lastline;
+ fs->pc = pc+1;
+ return pc;
+}
+
+#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c))
+#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d))
+#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j))
+
+#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins)
+
+/* -- Bytecode emitter for expressions ------------------------------------ */
+
+/* Discharge non-constant expression to any register. */
+static void expr_discharge(FuncState *fs, ExpDesc *e)
+{
+ BCIns ins;
+ if (e->k == VUPVAL) {
+ ins = BCINS_AD(BC_UGET, 0, e->u.s.info);
+ } else if (e->k == VGLOBAL) {
+ ins = BCINS_AD(BC_GGET, 0, const_str(fs, e));
+ } else if (e->k == VINDEXED) {
+ BCReg rc = e->u.s.aux;
+ if ((int32_t)rc < 0) {
+ ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc);
+ } else if (rc > BCMAX_C) {
+ ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1));
+ } else {
+ bcreg_free(fs, rc);
+ ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc);
+ }
+ bcreg_free(fs, e->u.s.info);
+ } else if (e->k == VCALL) {
+ e->u.s.info = e->u.s.aux;
+ e->k = VNONRELOC;
+ return;
+ } else if (e->k == VLOCAL) {
+ e->k = VNONRELOC;
+ return;
+ } else {
+ return;
+ }
+ e->u.s.info = bcemit_INS(fs, ins);
+ e->k = VRELOCABLE;
+}
+
+/* Emit bytecode to set a range of registers to nil. */
+static void bcemit_nil(FuncState *fs, BCReg from, BCReg n)
+{
+ if (fs->pc > fs->lasttarget) { /* No jumps to current position? */
+ BCIns *ip = &fs->bcbase[fs->pc-1].ins;
+ BCReg pto, pfrom = bc_a(*ip);
+ switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */
+ case BC_KPRI:
+ if (bc_d(*ip) != ~LJ_TNIL) break;
+ if (from == pfrom) {
+ if (n == 1) return;
+ } else if (from == pfrom+1) {
+ from = pfrom;
+ n++;
+ } else {
+ break;
+ }
+ *ip = BCINS_AD(BC_KNIL, from, from+n-1); /* Replace KPRI. */
+ return;
+ case BC_KNIL:
+ pto = bc_d(*ip);
+ if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */
+ if (from+n-1 > pto)
+ setbc_d(ip, from+n-1); /* Patch previous instruction range. */
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ /* Emit new instruction or replace old instruction. */
+ bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) :
+ BCINS_AD(BC_KNIL, from, from+n-1));
+}
+
+/* Discharge an expression to a specific register. Ignore branches. */
+static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg)
+{
+ BCIns ins;
+ expr_discharge(fs, e);
+ if (e->k == VKSTR) {
+ ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e));
+ } else if (e->k == VKNUM) {
+#if LJ_DUALNUM
+ cTValue *tv = expr_numtv(e);
+ if (tvisint(tv) && checki16(intV(tv)))
+ ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv));
+ else
+#else
+ lua_Number n = expr_numberV(e);
+ int32_t k = lj_num2int(n);
+ if (checki16(k) && n == (lua_Number)k)
+ ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k);
+ else
+#endif
+ ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e));
+#if LJ_HASFFI
+ } else if (e->k == VKCDATA) {
+ fs->flags |= PROTO_FFI;
+ ins = BCINS_AD(BC_KCDATA, reg,
+ const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA));
+#endif
+ } else if (e->k == VRELOCABLE) {
+ setbc_a(bcptr(fs, e), reg);
+ goto noins;
+ } else if (e->k == VNONRELOC) {
+ if (reg == e->u.s.info)
+ goto noins;
+ ins = BCINS_AD(BC_MOV, reg, e->u.s.info);
+ } else if (e->k == VKNIL) {
+ bcemit_nil(fs, reg, 1);
+ goto noins;
+ } else if (e->k <= VKTRUE) {
+ ins = BCINS_AD(BC_KPRI, reg, const_pri(e));
+ } else {
+ lua_assert(e->k == VVOID || e->k == VJMP);
+ return;
+ }
+ bcemit_INS(fs, ins);
+noins:
+ e->u.s.info = reg;
+ e->k = VNONRELOC;
+}
+
+/* Forward declaration. */
+static BCPos bcemit_jmp(FuncState *fs);
+
+/* Discharge an expression to a specific register. */
+static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg)
+{
+ expr_toreg_nobranch(fs, e, reg);
+ if (e->k == VJMP)
+ jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */
+ if (expr_hasjump(e)) { /* Discharge expression with branches. */
+ BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP;
+ if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) {
+ BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs);
+ jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE);
+ bcemit_AJ(fs, BC_JMP, fs->freereg, 1);
+ jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE);
+ jmp_tohere(fs, jval);
+ }
+ jend = fs->pc;
+ fs->lasttarget = jend;
+ jmp_patchval(fs, e->f, jend, reg, jfalse);
+ jmp_patchval(fs, e->t, jend, reg, jtrue);
+ }
+ e->f = e->t = NO_JMP;
+ e->u.s.info = reg;
+ e->k = VNONRELOC;
+}
+
+/* Discharge an expression to the next free register. */
+static void expr_tonextreg(FuncState *fs, ExpDesc *e)
+{
+ expr_discharge(fs, e);
+ expr_free(fs, e);
+ bcreg_reserve(fs, 1);
+ expr_toreg(fs, e, fs->freereg - 1);
+}
+
+/* Discharge an expression to any register. */
+static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e)
+{
+ expr_discharge(fs, e);
+ if (e->k == VNONRELOC) {
+ if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */
+ if (e->u.s.info >= fs->nactvar) {
+ expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */
+ return e->u.s.info;
+ }
+ }
+ expr_tonextreg(fs, e); /* Discharge to next register. */
+ return e->u.s.info;
+}
+
+/* Partially discharge expression to a value. */
+static void expr_toval(FuncState *fs, ExpDesc *e)
+{
+ if (expr_hasjump(e))
+ expr_toanyreg(fs, e);
+ else
+ expr_discharge(fs, e);
+}
+
+/* Emit store for LHS expression. */
+static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e)
+{
+ BCIns ins;
+ if (var->k == VLOCAL) {
+ fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW;
+ expr_free(fs, e);
+ expr_toreg(fs, e, var->u.s.info);
+ return;
+ } else if (var->k == VUPVAL) {
+ fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW;
+ expr_toval(fs, e);
+ if (e->k <= VKTRUE)
+ ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e));
+ else if (e->k == VKSTR)
+ ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e));
+ else if (e->k == VKNUM)
+ ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e));
+ else
+ ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e));
+ } else if (var->k == VGLOBAL) {
+ BCReg ra = expr_toanyreg(fs, e);
+ ins = BCINS_AD(BC_GSET, ra, const_str(fs, var));
+ } else {
+ BCReg ra, rc;
+ lua_assert(var->k == VINDEXED);
+ ra = expr_toanyreg(fs, e);
+ rc = var->u.s.aux;
+ if ((int32_t)rc < 0) {
+ ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc);
+ } else if (rc > BCMAX_C) {
+ ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1));
+ } else {
+ /* Free late alloced key reg to avoid assert on free of value reg. */
+ /* This can only happen when called from expr_table(). */
+ lua_assert(e->k != VNONRELOC || ra < fs->nactvar ||
+ rc < ra || (bcreg_free(fs, rc),1));
+ ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc);
+ }
+ }
+ bcemit_INS(fs, ins);
+ expr_free(fs, e);
+}
+
+/* Emit method lookup expression. */
+static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key)
+{
+ BCReg idx, func, obj = expr_toanyreg(fs, e);
+ expr_free(fs, e);
+ func = fs->freereg;
+ bcemit_AD(fs, BC_MOV, func+1, obj); /* Copy object to first argument. */
+ lua_assert(expr_isstrk(key));
+ idx = const_str(fs, key);
+ if (idx <= BCMAX_C) {
+ bcreg_reserve(fs, 2);
+ bcemit_ABC(fs, BC_TGETS, func, obj, idx);
+ } else {
+ bcreg_reserve(fs, 3);
+ bcemit_AD(fs, BC_KSTR, func+2, idx);
+ bcemit_ABC(fs, BC_TGETV, func, obj, func+2);
+ fs->freereg--;
+ }
+ e->u.s.info = func;
+ e->k = VNONRELOC;
+}
+
+/* -- Bytecode emitter for branches --------------------------------------- */
+
+/* Emit unconditional branch. */
+static BCPos bcemit_jmp(FuncState *fs)
+{
+ BCPos jpc = fs->jpc;
+ BCPos j = fs->pc - 1;
+ BCIns *ip = &fs->bcbase[j].ins;
+ fs->jpc = NO_JMP;
+ if ((int32_t)j >= (int32_t)fs->lasttarget && bc_op(*ip) == BC_UCLO)
+ setbc_j(ip, NO_JMP);
+ else
+ j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP);
+ jmp_append(fs, &j, jpc);
+ return j;
+}
+
+/* Invert branch condition of bytecode instruction. */
+static void invertcond(FuncState *fs, ExpDesc *e)
+{
+ BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins;
+ setbc_op(ip, bc_op(*ip)^1);
+}
+
+/* Emit conditional branch. */
+static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond)
+{
+ BCPos pc;
+ if (e->k == VRELOCABLE) {
+ BCIns *ip = bcptr(fs, e);
+ if (bc_op(*ip) == BC_NOT) {
+ *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip));
+ return bcemit_jmp(fs);
+ }
+ }
+ if (e->k != VNONRELOC) {
+ bcreg_reserve(fs, 1);
+ expr_toreg_nobranch(fs, e, fs->freereg-1);
+ }
+ bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info);
+ pc = bcemit_jmp(fs);
+ expr_free(fs, e);
+ return pc;
+}
+
+/* Emit branch on true condition. */
+static void bcemit_branch_t(FuncState *fs, ExpDesc *e)
+{
+ BCPos pc;
+ expr_discharge(fs, e);
+ if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
+ pc = NO_JMP; /* Never jump. */
+ else if (e->k == VJMP)
+ invertcond(fs, e), pc = e->u.s.info;
+ else if (e->k == VKFALSE || e->k == VKNIL)
+ expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
+ else
+ pc = bcemit_branch(fs, e, 0);
+ jmp_append(fs, &e->f, pc);
+ jmp_tohere(fs, e->t);
+ e->t = NO_JMP;
+}
+
+/* Emit branch on false condition. */
+static void bcemit_branch_f(FuncState *fs, ExpDesc *e)
+{
+ BCPos pc;
+ expr_discharge(fs, e);
+ if (e->k == VKNIL || e->k == VKFALSE)
+ pc = NO_JMP; /* Never jump. */
+ else if (e->k == VJMP)
+ pc = e->u.s.info;
+ else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
+ expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
+ else
+ pc = bcemit_branch(fs, e, 1);
+ jmp_append(fs, &e->t, pc);
+ jmp_tohere(fs, e->f);
+ e->f = NO_JMP;
+}
+
+/* -- Bytecode emitter for operators -------------------------------------- */
+
+/* Try constant-folding of arithmetic operators. */
+static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ TValue o;
+ lua_Number n;
+ if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0;
+ n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD);
+ setnumV(&o, n);
+ if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */
+ if (LJ_DUALNUM) {
+ int32_t k = lj_num2int(n);
+ if ((lua_Number)k == n) {
+ setintV(&e1->u.nval, k);
+ return 1;
+ }
+ }
+ setnumV(&e1->u.nval, n);
+ return 1;
+}
+
+/* Emit arithmetic operator. */
+static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ BCReg rb, rc, t;
+ uint32_t op;
+ if (foldarith(opr, e1, e2))
+ return;
+ if (opr == OPR_POW) {
+ op = BC_POW;
+ rc = expr_toanyreg(fs, e2);
+ rb = expr_toanyreg(fs, e1);
+ } else {
+ op = opr-OPR_ADD+BC_ADDVV;
+ /* Must discharge 2nd operand first since VINDEXED might free regs. */
+ expr_toval(fs, e2);
+ if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C)
+ op -= BC_ADDVV-BC_ADDVN;
+ else
+ rc = expr_toanyreg(fs, e2);
+ /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */
+ lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC);
+ expr_toval(fs, e1);
+ /* Avoid two consts to satisfy bytecode constraints. */
+ if (expr_isnumk(e1) && !expr_isnumk(e2) &&
+ (t = const_num(fs, e1)) <= BCMAX_B) {
+ rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV;
+ } else {
+ rb = expr_toanyreg(fs, e1);
+ }
+ }
+ /* Using expr_free might cause asserts if the order is wrong. */
+ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
+ if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
+ e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc);
+ e1->k = VRELOCABLE;
+}
+
+/* Emit comparison operator. */
+static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ ExpDesc *eret = e1;
+ BCIns ins;
+ expr_toval(fs, e1);
+ if (opr == OPR_EQ || opr == OPR_NE) {
+ BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV;
+ BCReg ra;
+ if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */
+ ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */
+ expr_toval(fs, e2);
+ switch (e2->k) {
+ case VKNIL: case VKFALSE: case VKTRUE:
+ ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2));
+ break;
+ case VKSTR:
+ ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2));
+ break;
+ case VKNUM:
+ ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2));
+ break;
+ default:
+ ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2));
+ break;
+ }
+ } else {
+ uint32_t op = opr-OPR_LT+BC_ISLT;
+ BCReg ra, rd;
+ if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */
+ e1 = e2; e2 = eret; /* Swap operands. */
+ op = ((op-BC_ISLT)^3)+BC_ISLT;
+ expr_toval(fs, e1);
+ }
+ rd = expr_toanyreg(fs, e2);
+ ra = expr_toanyreg(fs, e1);
+ ins = BCINS_AD(op, ra, rd);
+ }
+ /* Using expr_free might cause asserts if the order is wrong. */
+ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
+ if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
+ bcemit_INS(fs, ins);
+ eret->u.s.info = bcemit_jmp(fs);
+ eret->k = VJMP;
+}
+
+/* Fixup left side of binary operator. */
+static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e)
+{
+ if (op == OPR_AND) {
+ bcemit_branch_t(fs, e);
+ } else if (op == OPR_OR) {
+ bcemit_branch_f(fs, e);
+ } else if (op == OPR_CONCAT) {
+ expr_tonextreg(fs, e);
+ } else if (op == OPR_EQ || op == OPR_NE) {
+ if (!expr_isk_nojump(e)) expr_toanyreg(fs, e);
+ } else {
+ if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e);
+ }
+}
+
+/* Emit binary operator. */
+static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2)
+{
+ if (op <= OPR_POW) {
+ bcemit_arith(fs, op, e1, e2);
+ } else if (op == OPR_AND) {
+ lua_assert(e1->t == NO_JMP); /* List must be closed. */
+ expr_discharge(fs, e2);
+ jmp_append(fs, &e2->f, e1->f);
+ *e1 = *e2;
+ } else if (op == OPR_OR) {
+ lua_assert(e1->f == NO_JMP); /* List must be closed. */
+ expr_discharge(fs, e2);
+ jmp_append(fs, &e2->t, e1->t);
+ *e1 = *e2;
+ } else if (op == OPR_CONCAT) {
+ expr_toval(fs, e2);
+ if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) {
+ lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1);
+ expr_free(fs, e1);
+ setbc_b(bcptr(fs, e2), e1->u.s.info);
+ e1->u.s.info = e2->u.s.info;
+ } else {
+ expr_tonextreg(fs, e2);
+ expr_free(fs, e2);
+ expr_free(fs, e1);
+ e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info);
+ }
+ e1->k = VRELOCABLE;
+ } else {
+ lua_assert(op == OPR_NE || op == OPR_EQ ||
+ op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT);
+ bcemit_comp(fs, op, e1, e2);
+ }
+}
+
+/* Emit unary operator. */
+static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e)
+{
+ if (op == BC_NOT) {
+ /* Swap true and false lists. */
+ { BCPos temp = e->f; e->f = e->t; e->t = temp; }
+ jmp_dropval(fs, e->f);
+ jmp_dropval(fs, e->t);
+ expr_discharge(fs, e);
+ if (e->k == VKNIL || e->k == VKFALSE) {
+ e->k = VKTRUE;
+ return;
+ } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) {
+ e->k = VKFALSE;
+ return;
+ } else if (e->k == VJMP) {
+ invertcond(fs, e);
+ return;
+ } else if (e->k == VRELOCABLE) {
+ bcreg_reserve(fs, 1);
+ setbc_a(bcptr(fs, e), fs->freereg-1);
+ e->u.s.info = fs->freereg-1;
+ e->k = VNONRELOC;
+ } else {
+ lua_assert(e->k == VNONRELOC);
+ }
+ } else {
+ lua_assert(op == BC_UNM || op == BC_LEN);
+ if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */
+#if LJ_HASFFI
+ if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */
+ GCcdata *cd = cdataV(&e->u.nval);
+ int64_t *p = (int64_t *)cdataptr(cd);
+ if (cd->ctypeid == CTID_COMPLEX_DOUBLE)
+ p[1] ^= (int64_t)U64x(80000000,00000000);
+ else
+ *p = -*p;
+ return;
+ } else
+#endif
+ if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */
+ TValue *o = expr_numtv(e);
+ if (tvisint(o)) {
+ int32_t k = intV(o);
+ if (k == -k)
+ setnumV(o, -(lua_Number)k);
+ else
+ setintV(o, -k);
+ return;
+ } else {
+ o->u64 ^= U64x(80000000,00000000);
+ return;
+ }
+ }
+ }
+ expr_toanyreg(fs, e);
+ }
+ expr_free(fs, e);
+ e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info);
+ e->k = VRELOCABLE;
+}
+
+/* -- Lexer support ------------------------------------------------------- */
+
+/* Check and consume optional token. */
+static int lex_opt(LexState *ls, LexToken tok)
+{
+ if (ls->token == tok) {
+ lj_lex_next(ls);
+ return 1;
+ }
+ return 0;
+}
+
+/* Check and consume token. */
+static void lex_check(LexState *ls, LexToken tok)
+{
+ if (ls->token != tok)
+ err_token(ls, tok);
+ lj_lex_next(ls);
+}
+
+/* Check for matching token. */
+static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line)
+{
+ if (!lex_opt(ls, what)) {
+ if (line == ls->linenumber) {
+ err_token(ls, what);
+ } else {
+ const char *swhat = lj_lex_token2str(ls, what);
+ const char *swho = lj_lex_token2str(ls, who);
+ lj_lex_error(ls, ls->token, LJ_ERR_XMATCH, swhat, swho, line);
+ }
+ }
+}
+
+/* Check for string token. */
+static GCstr *lex_str(LexState *ls)
+{
+ GCstr *s;
+ if (ls->token != TK_name && (LJ_52 || ls->token != TK_goto))
+ err_token(ls, TK_name);
+ s = strV(&ls->tokenval);
+ lj_lex_next(ls);
+ return s;
+}
+
+/* -- Variable handling --------------------------------------------------- */
+
+#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]])
+
+/* Define a new local variable. */
+static void var_new(LexState *ls, BCReg n, GCstr *name)
+{
+ FuncState *fs = ls->fs;
+ MSize vtop = ls->vtop;
+ checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables");
+ if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
+ if (ls->sizevstack >= LJ_MAX_VSTACK)
+ lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
+ lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
+ }
+ lua_assert((uintptr_t)name < VARNAME__MAX ||
+ lj_tab_getstr(fs->kt, name) != NULL);
+ /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
+ setgcref(ls->vstack[vtop].name, obj2gco(name));
+ fs->varmap[fs->nactvar+n] = (uint16_t)vtop;
+ ls->vtop = vtop+1;
+}
+
+#define var_new_lit(ls, n, v) \
+ var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1))
+
+#define var_new_fixed(ls, n, vn) \
+ var_new(ls, (n), (GCstr *)(uintptr_t)(vn))
+
+/* Add local variables. */
+static void var_add(LexState *ls, BCReg nvars)
+{
+ FuncState *fs = ls->fs;
+ BCReg nactvar = fs->nactvar;
+ while (nvars--) {
+ VarInfo *v = &var_get(ls, fs, nactvar);
+ v->startpc = fs->pc;
+ v->slot = nactvar++;
+ v->info = 0;
+ }
+ fs->nactvar = nactvar;
+}
+
+/* Remove local variables. */
+static void var_remove(LexState *ls, BCReg tolevel)
+{
+ FuncState *fs = ls->fs;
+ while (fs->nactvar > tolevel)
+ var_get(ls, fs, --fs->nactvar).endpc = fs->pc;
+}
+
+/* Lookup local variable name. */
+static BCReg var_lookup_local(FuncState *fs, GCstr *n)
+{
+ int i;
+ for (i = fs->nactvar-1; i >= 0; i--) {
+ if (n == strref(var_get(fs->ls, fs, i).name))
+ return (BCReg)i;
+ }
+ return (BCReg)-1; /* Not found. */
+}
+
+/* Lookup or add upvalue index. */
+static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e)
+{
+ MSize i, n = fs->nuv;
+ for (i = 0; i < n; i++)
+ if (fs->uvmap[i] == vidx)
+ return i; /* Already exists. */
+ /* Otherwise create a new one. */
+ checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues");
+ lua_assert(e->k == VLOCAL || e->k == VUPVAL);
+ fs->uvmap[n] = (uint16_t)vidx;
+ fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info);
+ fs->nuv = n+1;
+ return n;
+}
+
+/* Forward declaration. */
+static void fscope_uvmark(FuncState *fs, BCReg level);
+
+/* Recursively lookup variables in enclosing functions. */
+static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first)
+{
+ if (fs) {
+ BCReg reg = var_lookup_local(fs, name);
+ if ((int32_t)reg >= 0) { /* Local in this function? */
+ expr_init(e, VLOCAL, reg);
+ if (!first)
+ fscope_uvmark(fs, reg); /* Scope now has an upvalue. */
+ return (MSize)(e->u.s.aux = (uint32_t)fs->varmap[reg]);
+ } else {
+ MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */
+ if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */
+ e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e);
+ e->k = VUPVAL;
+ return vidx;
+ }
+ }
+ } else { /* Not found in any function, must be a global. */
+ expr_init(e, VGLOBAL, 0);
+ e->u.sval = name;
+ }
+ return (MSize)-1; /* Global. */
+}
+
+/* Lookup variable name. */
+#define var_lookup(ls, e) \
+ var_lookup_((ls)->fs, lex_str(ls), (e), 1)
+
+/* -- Goto an label handling ---------------------------------------------- */
+
+/* Add a new goto or label. */
+static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc)
+{
+ FuncState *fs = ls->fs;
+ MSize vtop = ls->vtop;
+ if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
+ if (ls->sizevstack >= LJ_MAX_VSTACK)
+ lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
+ lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
+ }
+ lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL);
+ /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
+ setgcref(ls->vstack[vtop].name, obj2gco(name));
+ ls->vstack[vtop].startpc = pc;
+ ls->vstack[vtop].slot = (uint8_t)fs->nactvar;
+ ls->vstack[vtop].info = info;
+ ls->vtop = vtop+1;
+ return vtop;
+}
+
+#define gola_isgoto(v) ((v)->info & VSTACK_GOTO)
+#define gola_islabel(v) ((v)->info & VSTACK_LABEL)
+#define gola_isgotolabel(v) ((v)->info & (VSTACK_GOTO|VSTACK_LABEL))
+
+/* Patch goto to jump to label. */
+static void gola_patch(LexState *ls, VarInfo *vg, VarInfo *vl)
+{
+ FuncState *fs = ls->fs;
+ BCPos pc = vg->startpc;
+ setgcrefnull(vg->name); /* Invalidate pending goto. */
+ setbc_a(&fs->bcbase[pc].ins, vl->slot);
+ jmp_patch(fs, pc, vl->startpc);
+}
+
+/* Patch goto to close upvalues. */
+static void gola_close(LexState *ls, VarInfo *vg)
+{
+ FuncState *fs = ls->fs;
+ BCPos pc = vg->startpc;
+ BCIns *ip = &fs->bcbase[pc].ins;
+ lua_assert(gola_isgoto(vg));
+ lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO);
+ setbc_a(ip, vg->slot);
+ if (bc_op(*ip) == BC_JMP) {
+ BCPos next = jmp_next(fs, pc);
+ if (next != NO_JMP) jmp_patch(fs, next, pc); /* Jump to UCLO. */
+ setbc_op(ip, BC_UCLO); /* Turn into UCLO. */
+ setbc_j(ip, NO_JMP);
+ }
+}
+
+/* Resolve pending forward gotos for label. */
+static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx)
+{
+ VarInfo *vg = ls->vstack + bl->vstart;
+ VarInfo *vl = ls->vstack + idx;
+ for (; vg < vl; vg++)
+ if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) {
+ if (vg->slot < vl->slot) {
+ GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name);
+ lua_assert((uintptr_t)name >= VARNAME__MAX);
+ ls->linenumber = ls->fs->bcbase[vg->startpc].line;
+ lua_assert(strref(vg->name) != NAME_BREAK);
+ lj_lex_error(ls, 0, LJ_ERR_XGSCOPE,
+ strdata(strref(vg->name)), strdata(name));
+ }
+ gola_patch(ls, vg, vl);
+ }
+}
+
+/* Fixup remaining gotos and labels for scope. */
+static void gola_fixup(LexState *ls, FuncScope *bl)
+{
+ VarInfo *v = ls->vstack + bl->vstart;
+ VarInfo *ve = ls->vstack + ls->vtop;
+ for (; v < ve; v++) {
+ GCstr *name = strref(v->name);
+ if (name != NULL) { /* Only consider remaining valid gotos/labels. */
+ if (gola_islabel(v)) {
+ VarInfo *vg;
+ setgcrefnull(v->name); /* Invalidate label that goes out of scope. */
+ for (vg = v+1; vg < ve; vg++) /* Resolve pending backward gotos. */
+ if (strref(vg->name) == name && gola_isgoto(vg)) {
+ if ((bl->flags&FSCOPE_UPVAL) && vg->slot > v->slot)
+ gola_close(ls, vg);
+ gola_patch(ls, vg, v);
+ }
+ } else if (gola_isgoto(v)) {
+ if (bl->prev) { /* Propagate goto or break to outer scope. */
+ bl->prev->flags |= name == NAME_BREAK ? FSCOPE_BREAK : FSCOPE_GOLA;
+ v->slot = bl->nactvar;
+ if ((bl->flags & FSCOPE_UPVAL))
+ gola_close(ls, v);
+ } else { /* No outer scope: undefined goto label or no loop. */
+ ls->linenumber = ls->fs->bcbase[v->startpc].line;
+ if (name == NAME_BREAK)
+ lj_lex_error(ls, 0, LJ_ERR_XBREAK);
+ else
+ lj_lex_error(ls, 0, LJ_ERR_XLUNDEF, strdata(name));
+ }
+ }
+ }
+ }
+}
+
+/* Find existing label. */
+static VarInfo *gola_findlabel(LexState *ls, GCstr *name)
+{
+ VarInfo *v = ls->vstack + ls->fs->bl->vstart;
+ VarInfo *ve = ls->vstack + ls->vtop;
+ for (; v < ve; v++)
+ if (strref(v->name) == name && gola_islabel(v))
+ return v;
+ return NULL;
+}
+
+/* -- Scope handling ------------------------------------------------------ */
+
+/* Begin a scope. */
+static void fscope_begin(FuncState *fs, FuncScope *bl, int flags)
+{
+ bl->nactvar = (uint8_t)fs->nactvar;
+ bl->flags = flags;
+ bl->vstart = fs->ls->vtop;
+ bl->prev = fs->bl;
+ fs->bl = bl;
+ lua_assert(fs->freereg == fs->nactvar);
+}
+
+/* End a scope. */
+static void fscope_end(FuncState *fs)
+{
+ FuncScope *bl = fs->bl;
+ LexState *ls = fs->ls;
+ fs->bl = bl->prev;
+ var_remove(ls, bl->nactvar);
+ fs->freereg = fs->nactvar;
+ lua_assert(bl->nactvar == fs->nactvar);
+ if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL)
+ bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0);
+ if ((bl->flags & FSCOPE_BREAK)) {
+ if ((bl->flags & FSCOPE_LOOP)) {
+ MSize idx = gola_new(ls, NAME_BREAK, VSTACK_LABEL, fs->pc);
+ ls->vtop = idx; /* Drop break label immediately. */
+ gola_resolve(ls, bl, idx);
+ return;
+ } /* else: need the fixup step to propagate the breaks. */
+ } else if (!(bl->flags & FSCOPE_GOLA)) {
+ return;
+ }
+ gola_fixup(ls, bl);
+}
+
+/* Mark scope as having an upvalue. */
+static void fscope_uvmark(FuncState *fs, BCReg level)
+{
+ FuncScope *bl;
+ for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev)
+ ;
+ if (bl)
+ bl->flags |= FSCOPE_UPVAL;
+}
+
+/* -- Function state management ------------------------------------------- */
+
+/* Fixup bytecode for prototype. */
+static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n)
+{
+ BCInsLine *base = fs->bcbase;
+ MSize i;
+ pt->sizebc = n;
+ bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
+ fs->framesize, 0);
+ for (i = 1; i < n; i++)
+ bc[i] = base[i].ins;
+}
+
+/* Fixup upvalues for child prototype, step #2. */
+static void fs_fixup_uv2(FuncState *fs, GCproto *pt)
+{
+ VarInfo *vstack = fs->ls->vstack;
+ uint16_t *uv = proto_uv(pt);
+ MSize i, n = pt->sizeuv;
+ for (i = 0; i < n; i++) {
+ VarIndex vidx = uv[i];
+ if (vidx >= LJ_MAX_VSTACK)
+ uv[i] = vidx - LJ_MAX_VSTACK;
+ else if ((vstack[vidx].info & VSTACK_VAR_RW))
+ uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL;
+ else
+ uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL | PROTO_UV_IMMUTABLE;
+ }
+}
+
+/* Fixup constants for prototype. */
+static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr)
+{
+ GCtab *kt;
+ TValue *array;
+ Node *node;
+ MSize i, hmask;
+ checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants");
+ checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants");
+ setmref(pt->k, kptr);
+ pt->sizekn = fs->nkn;
+ pt->sizekgc = fs->nkgc;
+ kt = fs->kt;
+ array = tvref(kt->array);
+ for (i = 0; i < kt->asize; i++)
+ if (tvhaskslot(&array[i])) {
+ TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])];
+ if (LJ_DUALNUM)
+ setintV(tv, (int32_t)i);
+ else
+ setnumV(tv, (lua_Number)i);
+ }
+ node = noderef(kt->node);
+ hmask = kt->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (tvhaskslot(&n->val)) {
+ ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val);
+ lua_assert(!tvisint(&n->key));
+ if (tvisnum(&n->key)) {
+ TValue *tv = &((TValue *)kptr)[kidx];
+ if (LJ_DUALNUM) {
+ lua_Number nn = numV(&n->key);
+ int32_t k = lj_num2int(nn);
+ lua_assert(!tvismzero(&n->key));
+ if ((lua_Number)k == nn)
+ setintV(tv, k);
+ else
+ *tv = n->key;
+ } else {
+ *tv = n->key;
+ }
+ } else {
+ GCobj *o = gcV(&n->key);
+ setgcref(((GCRef *)kptr)[~kidx], o);
+ lj_gc_objbarrier(fs->L, pt, o);
+ if (tvisproto(&n->key))
+ fs_fixup_uv2(fs, gco2pt(o));
+ }
+ }
+ }
+}
+
+/* Fixup upvalues for prototype, step #1. */
+static void fs_fixup_uv1(FuncState *fs, GCproto *pt, uint16_t *uv)
+{
+ setmref(pt->uv, uv);
+ pt->sizeuv = fs->nuv;
+ memcpy(uv, fs->uvtmp, fs->nuv*sizeof(VarIndex));
+}
+
+#ifndef LUAJIT_DISABLE_DEBUGINFO
+/* Prepare lineinfo for prototype. */
+static size_t fs_prep_line(FuncState *fs, BCLine numline)
+{
+ return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
+}
+
+/* Fixup lineinfo for prototype. */
+static void fs_fixup_line(FuncState *fs, GCproto *pt,
+ void *lineinfo, BCLine numline)
+{
+ BCInsLine *base = fs->bcbase + 1;
+ BCLine first = fs->linedefined;
+ MSize i = 0, n = fs->pc-1;
+ pt->firstline = fs->linedefined;
+ pt->numline = numline;
+ setmref(pt->lineinfo, lineinfo);
+ if (LJ_LIKELY(numline < 256)) {
+ uint8_t *li = (uint8_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lua_assert(delta >= 0 && delta < 256);
+ li[i] = (uint8_t)delta;
+ } while (++i < n);
+ } else if (LJ_LIKELY(numline < 65536)) {
+ uint16_t *li = (uint16_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lua_assert(delta >= 0 && delta < 65536);
+ li[i] = (uint16_t)delta;
+ } while (++i < n);
+ } else {
+ uint32_t *li = (uint32_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lua_assert(delta >= 0);
+ li[i] = (uint32_t)delta;
+ } while (++i < n);
+ }
+}
+
+/* Resize buffer if needed. */
+static LJ_NOINLINE void fs_buf_resize(LexState *ls, MSize len)
+{
+ MSize sz = ls->sb.sz * 2;
+ while (ls->sb.n + len > sz) sz = sz * 2;
+ lj_str_resizebuf(ls->L, &ls->sb, sz);
+}
+
+static LJ_AINLINE void fs_buf_need(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY(ls->sb.n + len > ls->sb.sz))
+ fs_buf_resize(ls, len);
+}
+
+/* Add string to buffer. */
+static void fs_buf_str(LexState *ls, const char *str, MSize len)
+{
+ char *p = ls->sb.buf + ls->sb.n;
+ MSize i;
+ ls->sb.n += len;
+ for (i = 0; i < len; i++) p[i] = str[i];
+}
+
+/* Add ULEB128 value to buffer. */
+static void fs_buf_uleb128(LexState *ls, uint32_t v)
+{
+ MSize n = ls->sb.n;
+ uint8_t *p = (uint8_t *)ls->sb.buf;
+ for (; v >= 0x80; v >>= 7)
+ p[n++] = (uint8_t)((v & 0x7f) | 0x80);
+ p[n++] = (uint8_t)v;
+ ls->sb.n = n;
+}
+
+/* Prepare variable info for prototype. */
+static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar)
+{
+ VarInfo *vs =ls->vstack, *ve;
+ MSize i, n;
+ BCPos lastpc;
+ lj_str_resetbuf(&ls->sb); /* Copy to temp. string buffer. */
+ /* Store upvalue names. */
+ for (i = 0, n = fs->nuv; i < n; i++) {
+ GCstr *s = strref(vs[fs->uvmap[i]].name);
+ MSize len = s->len+1;
+ fs_buf_need(ls, len);
+ fs_buf_str(ls, strdata(s), len);
+ }
+ *ofsvar = ls->sb.n;
+ lastpc = 0;
+ /* Store local variable names and compressed ranges. */
+ for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) {
+ if (!gola_isgotolabel(vs)) {
+ GCstr *s = strref(vs->name);
+ BCPos startpc;
+ if ((uintptr_t)s < VARNAME__MAX) {
+ fs_buf_need(ls, 1 + 2*5);
+ ls->sb.buf[ls->sb.n++] = (uint8_t)(uintptr_t)s;
+ } else {
+ MSize len = s->len+1;
+ fs_buf_need(ls, len + 2*5);
+ fs_buf_str(ls, strdata(s), len);
+ }
+ startpc = vs->startpc;
+ fs_buf_uleb128(ls, startpc-lastpc);
+ fs_buf_uleb128(ls, vs->endpc-startpc);
+ lastpc = startpc;
+ }
+ }
+ fs_buf_need(ls, 1);
+ ls->sb.buf[ls->sb.n++] = '\0'; /* Terminator for varinfo. */
+ return ls->sb.n;
+}
+
+/* Fixup variable info for prototype. */
+static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar)
+{
+ setmref(pt->uvinfo, p);
+ setmref(pt->varinfo, (char *)p + ofsvar);
+ memcpy(p, ls->sb.buf, ls->sb.n); /* Copy from temp. string buffer. */
+}
+#else
+
+/* Initialize with empty debug info, if disabled. */
+#define fs_prep_line(fs, numline) (UNUSED(numline), 0)
+#define fs_fixup_line(fs, pt, li, numline) \
+ pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL)
+#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0)
+#define fs_fixup_var(ls, pt, p, ofsvar) \
+ setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL)
+
+#endif
+
+/* Check if bytecode op returns. */
+static int bcopisret(BCOp op)
+{
+ switch (op) {
+ case BC_CALLMT: case BC_CALLT:
+ case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Fixup return instruction for prototype. */
+static void fs_fixup_ret(FuncState *fs)
+{
+ BCPos lastpc = fs->pc;
+ if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) {
+ if ((fs->bl->flags & FSCOPE_UPVAL))
+ bcemit_AJ(fs, BC_UCLO, 0, 0);
+ bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */
+ }
+ fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */
+ fscope_end(fs);
+ lua_assert(fs->bl == NULL);
+ /* May need to fixup returns encoded before first function was created. */
+ if (fs->flags & PROTO_FIXUP_RETURN) {
+ BCPos pc;
+ for (pc = 1; pc < lastpc; pc++) {
+ BCIns ins = fs->bcbase[pc].ins;
+ BCPos offset;
+ switch (bc_op(ins)) {
+ case BC_CALLMT: case BC_CALLT:
+ case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
+ offset = bcemit_INS(fs, ins)-(pc+1)+BCBIAS_J; /* Copy return ins. */
+ if (offset > BCMAX_D)
+ err_syntax(fs->ls, LJ_ERR_XFIXUP);
+ /* Replace with UCLO plus branch. */
+ fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset);
+ break;
+ case BC_UCLO:
+ return; /* We're done. */
+ default:
+ break;
+ }
+ }
+ }
+}
+
+/* Finish a FuncState and return the new prototype. */
+static GCproto *fs_finish(LexState *ls, BCLine line)
+{
+ lua_State *L = ls->L;
+ FuncState *fs = ls->fs;
+ BCLine numline = line - fs->linedefined;
+ size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar;
+ GCproto *pt;
+
+ /* Apply final fixups. */
+ fs_fixup_ret(fs);
+
+ /* Calculate total size of prototype including all colocated arrays. */
+ sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef);
+ sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1);
+ ofsk = sizept; sizept += fs->nkn*sizeof(TValue);
+ ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2;
+ ofsli = sizept; sizept += fs_prep_line(fs, numline);
+ ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar);
+
+ /* Allocate prototype and initialize its fields. */
+ pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept);
+ pt->gct = ~LJ_TPROTO;
+ pt->sizept = (MSize)sizept;
+ pt->trace = 0;
+ pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN));
+ pt->numparams = fs->numparams;
+ pt->framesize = fs->framesize;
+ setgcref(pt->chunkname, obj2gco(ls->chunkname));
+
+ /* Close potentially uninitialized gap between bc and kgc. */
+ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0;
+ fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc);
+ fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk));
+ fs_fixup_uv1(fs, pt, (uint16_t *)((char *)pt + ofsuv));
+ fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline);
+ fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar);
+
+ lj_vmevent_send(L, BC,
+ setprotoV(L, L->top++, pt);
+ );
+
+ L->top--; /* Pop table of constants. */
+ ls->vtop = fs->vbase; /* Reset variable stack. */
+ ls->fs = fs->prev;
+ lua_assert(ls->fs != NULL || ls->token == TK_eof);
+ return pt;
+}
+
+/* Initialize a new FuncState. */
+static void fs_init(LexState *ls, FuncState *fs)
+{
+ lua_State *L = ls->L;
+ fs->prev = ls->fs; ls->fs = fs; /* Append to list. */
+ fs->ls = ls;
+ fs->vbase = ls->vtop;
+ fs->L = L;
+ fs->pc = 0;
+ fs->lasttarget = 0;
+ fs->jpc = NO_JMP;
+ fs->freereg = 0;
+ fs->nkgc = 0;
+ fs->nkn = 0;
+ fs->nactvar = 0;
+ fs->nuv = 0;
+ fs->bl = NULL;
+ fs->flags = 0;
+ fs->framesize = 1; /* Minimum frame size. */
+ fs->kt = lj_tab_new(L, 0, 0);
+ /* Anchor table of constants in stack to avoid being collected. */
+ settabV(L, L->top, fs->kt);
+ incr_top(L);
+}
+
+/* -- Expressions --------------------------------------------------------- */
+
+/* Forward declaration. */
+static void expr(LexState *ls, ExpDesc *v);
+
+/* Return string expression. */
+static void expr_str(LexState *ls, ExpDesc *e)
+{
+ expr_init(e, VKSTR, 0);
+ e->u.sval = lex_str(ls);
+}
+
+/* Return index expression. */
+static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e)
+{
+ /* Already called: expr_toval(fs, e). */
+ t->k = VINDEXED;
+ if (expr_isnumk(e)) {
+#if LJ_DUALNUM
+ if (tvisint(expr_numtv(e))) {
+ int32_t k = intV(expr_numtv(e));
+ if (checku8(k)) {
+ t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
+ return;
+ }
+ }
+#else
+ lua_Number n = expr_numberV(e);
+ int32_t k = lj_num2int(n);
+ if (checku8(k) && n == (lua_Number)k) {
+ t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
+ return;
+ }
+#endif
+ } else if (expr_isstrk(e)) {
+ BCReg idx = const_str(fs, e);
+ if (idx <= BCMAX_C) {
+ t->u.s.aux = ~idx; /* -256..-1: const string key */
+ return;
+ }
+ }
+ t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */
+}
+
+/* Parse index expression with named field. */
+static void expr_field(LexState *ls, ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc key;
+ expr_toanyreg(fs, v);
+ lj_lex_next(ls); /* Skip dot or colon. */
+ expr_str(ls, &key);
+ expr_index(fs, v, &key);
+}
+
+/* Parse index expression with brackets. */
+static void expr_bracket(LexState *ls, ExpDesc *v)
+{
+ lj_lex_next(ls); /* Skip '['. */
+ expr(ls, v);
+ expr_toval(ls->fs, v);
+ lex_check(ls, ']');
+}
+
+/* Get value of constant expression. */
+static void expr_kvalue(TValue *v, ExpDesc *e)
+{
+ if (e->k <= VKTRUE) {
+ setitype(v, ~(uint32_t)e->k);
+ } else if (e->k == VKSTR) {
+ setgcref(v->gcr, obj2gco(e->u.sval));
+ setitype(v, LJ_TSTR);
+ } else {
+ lua_assert(tvisnumber(expr_numtv(e)));
+ *v = *expr_numtv(e);
+ }
+}
+
+/* Parse table constructor expression. */
+static void expr_table(LexState *ls, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ BCLine line = ls->linenumber;
+ GCtab *t = NULL;
+ int vcall = 0, needarr = 0, fixt = 0;
+ uint32_t narr = 1; /* First array index. */
+ uint32_t nhash = 0; /* Number of hash entries. */
+ BCReg freg = fs->freereg;
+ BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0);
+ expr_init(e, VNONRELOC, freg);
+ bcreg_reserve(fs, 1);
+ freg++;
+ lex_check(ls, '{');
+ while (ls->token != '}') {
+ ExpDesc key, val;
+ vcall = 0;
+ if (ls->token == '[') {
+ expr_bracket(ls, &key); /* Already calls expr_toval. */
+ if (!expr_isk(&key)) expr_index(fs, e, &key);
+ if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++;
+ lex_check(ls, '=');
+ } else if ((ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) &&
+ lj_lex_lookahead(ls) == '=') {
+ expr_str(ls, &key);
+ lex_check(ls, '=');
+ nhash++;
+ } else {
+ expr_init(&key, VKNUM, 0);
+ setintV(&key.u.nval, (int)narr);
+ narr++;
+ needarr = vcall = 1;
+ }
+ expr(ls, &val);
+ if (expr_isk(&key) && key.k != VKNIL &&
+ (key.k == VKSTR || expr_isk_nojump(&val))) {
+ TValue k, *v;
+ if (!t) { /* Create template table on demand. */
+ BCReg kidx;
+ t = lj_tab_new(fs->L, needarr ? narr : 0, hsize2hbits(nhash));
+ kidx = const_gc(fs, obj2gco(t), LJ_TTAB);
+ fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx);
+ }
+ vcall = 0;
+ expr_kvalue(&k, &key);
+ v = lj_tab_set(fs->L, t, &k);
+ lj_gc_anybarriert(fs->L, t);
+ if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */
+ expr_kvalue(v, &val);
+ } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */
+ settabV(fs->L, v, t); /* Preserve key with table itself as value. */
+ fixt = 1; /* Fix this later, after all resizes. */
+ goto nonconst;
+ }
+ } else {
+ nonconst:
+ if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; }
+ if (expr_isk(&key)) expr_index(fs, e, &key);
+ bcemit_store(fs, e, &val);
+ }
+ fs->freereg = freg;
+ if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break;
+ }
+ lex_match(ls, '}', '{', line);
+ if (vcall) {
+ BCInsLine *ilp = &fs->bcbase[fs->pc-1];
+ ExpDesc en;
+ lua_assert(bc_a(ilp->ins) == freg &&
+ bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB));
+ expr_init(&en, VKNUM, 0);
+ en.u.nval.u32.lo = narr-1;
+ en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */
+ if (narr > 256) { fs->pc--; ilp--; }
+ ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en));
+ setbc_b(&ilp[-1].ins, 0);
+ }
+ if (pc == fs->pc-1) { /* Make expr relocable if possible. */
+ e->u.s.info = pc;
+ fs->freereg--;
+ e->k = VRELOCABLE;
+ } else {
+ e->k = VNONRELOC; /* May have been changed by expr_index. */
+ }
+ if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */
+ BCIns *ip = &fs->bcbase[pc].ins;
+ if (!needarr) narr = 0;
+ else if (narr < 3) narr = 3;
+ else if (narr > 0x7ff) narr = 0x7ff;
+ setbc_d(ip, narr|(hsize2hbits(nhash)<<11));
+ } else {
+ if (needarr && t->asize < narr)
+ lj_tab_reasize(fs->L, t, narr-1);
+ if (fixt) { /* Fix value for dummy keys in template table. */
+ Node *node = noderef(t->node);
+ uint32_t i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (tvistab(&n->val)) {
+ lua_assert(tabV(&n->val) == t);
+ setnilV(&n->val); /* Turn value into nil. */
+ }
+ }
+ }
+ lj_gc_check(fs->L);
+ }
+}
+
+/* Parse function parameters. */
+static BCReg parse_params(LexState *ls, int needself)
+{
+ FuncState *fs = ls->fs;
+ BCReg nparams = 0;
+ lex_check(ls, '(');
+ if (needself)
+ var_new_lit(ls, nparams++, "self");
+ if (ls->token != ')') {
+ do {
+ if (ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) {
+ var_new(ls, nparams++, lex_str(ls));
+ } else if (ls->token == TK_dots) {
+ lj_lex_next(ls);
+ fs->flags |= PROTO_VARARG;
+ break;
+ } else {
+ err_syntax(ls, LJ_ERR_XPARAM);
+ }
+ } while (lex_opt(ls, ','));
+ }
+ var_add(ls, nparams);
+ lua_assert(fs->nactvar == nparams);
+ bcreg_reserve(fs, nparams);
+ lex_check(ls, ')');
+ return nparams;
+}
+
+/* Forward declaration. */
+static void parse_chunk(LexState *ls);
+
+/* Parse body of a function. */
+static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line)
+{
+ FuncState fs, *pfs = ls->fs;
+ FuncScope bl;
+ GCproto *pt;
+ ptrdiff_t oldbase = pfs->bcbase - ls->bcstack;
+ fs_init(ls, &fs);
+ fscope_begin(&fs, &bl, 0);
+ fs.linedefined = line;
+ fs.numparams = (uint8_t)parse_params(ls, needself);
+ fs.bcbase = pfs->bcbase + pfs->pc;
+ fs.bclim = pfs->bclim - pfs->pc;
+ bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */
+ parse_chunk(ls);
+ if (ls->token != TK_end) lex_match(ls, TK_end, TK_function, line);
+ pt = fs_finish(ls, (ls->lastline = ls->linenumber));
+ pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */
+ pfs->bclim = (BCPos)(ls->sizebcstack - oldbase);
+ /* Store new prototype in the constant array of the parent. */
+ expr_init(e, VRELOCABLE,
+ bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO)));
+#if LJ_HASFFI
+ pfs->flags |= (fs.flags & PROTO_FFI);
+#endif
+ if (!(pfs->flags & PROTO_CHILD)) {
+ if (pfs->flags & PROTO_HAS_RETURN)
+ pfs->flags |= PROTO_FIXUP_RETURN;
+ pfs->flags |= PROTO_CHILD;
+ }
+ lj_lex_next(ls);
+}
+
+/* Parse expression list. Last expression is left open. */
+static BCReg expr_list(LexState *ls, ExpDesc *v)
+{
+ BCReg n = 1;
+ expr(ls, v);
+ while (lex_opt(ls, ',')) {
+ expr_tonextreg(ls->fs, v);
+ expr(ls, v);
+ n++;
+ }
+ return n;
+}
+
+/* Parse function argument list. */
+static void parse_args(LexState *ls, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc args;
+ BCIns ins;
+ BCReg base;
+ BCLine line = ls->linenumber;
+ if (ls->token == '(') {
+#if !LJ_52
+ if (line != ls->lastline)
+ err_syntax(ls, LJ_ERR_XAMBIG);
+#endif
+ lj_lex_next(ls);
+ if (ls->token == ')') { /* f(). */
+ args.k = VVOID;
+ } else {
+ expr_list(ls, &args);
+ if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */
+ setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */
+ }
+ lex_match(ls, ')', '(', line);
+ } else if (ls->token == '{') {
+ expr_table(ls, &args);
+ } else if (ls->token == TK_string) {
+ expr_init(&args, VKSTR, 0);
+ args.u.sval = strV(&ls->tokenval);
+ lj_lex_next(ls);
+ } else {
+ err_syntax(ls, LJ_ERR_XFUNARG);
+ return; /* Silence compiler. */
+ }
+ lua_assert(e->k == VNONRELOC);
+ base = e->u.s.info; /* Base register for call. */
+ if (args.k == VCALL) {
+ ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1);
+ } else {
+ if (args.k != VVOID)
+ expr_tonextreg(fs, &args);
+ ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base);
+ }
+ expr_init(e, VCALL, bcemit_INS(fs, ins));
+ e->u.s.aux = base;
+ fs->bcbase[fs->pc - 1].line = line;
+ fs->freereg = base+1; /* Leave one result by default. */
+}
+
+/* Parse primary expression. */
+static void expr_primary(LexState *ls, ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ /* Parse prefix expression. */
+ if (ls->token == '(') {
+ BCLine line = ls->linenumber;
+ lj_lex_next(ls);
+ expr(ls, v);
+ lex_match(ls, ')', '(', line);
+ expr_discharge(ls->fs, v);
+ } else if (ls->token == TK_name || (!LJ_52 && ls->token == TK_goto)) {
+ var_lookup(ls, v);
+ } else {
+ err_syntax(ls, LJ_ERR_XSYMBOL);
+ }
+ for (;;) { /* Parse multiple expression suffixes. */
+ if (ls->token == '.') {
+ expr_field(ls, v);
+ } else if (ls->token == '[') {
+ ExpDesc key;
+ expr_toanyreg(fs, v);
+ expr_bracket(ls, &key);
+ expr_index(fs, v, &key);
+ } else if (ls->token == ':') {
+ ExpDesc key;
+ lj_lex_next(ls);
+ expr_str(ls, &key);
+ bcemit_method(fs, v, &key);
+ parse_args(ls, v);
+ } else if (ls->token == '(' || ls->token == TK_string || ls->token == '{') {
+ expr_tonextreg(fs, v);
+ parse_args(ls, v);
+ } else {
+ break;
+ }
+ }
+}
+
+/* Parse simple expression. */
+static void expr_simple(LexState *ls, ExpDesc *v)
+{
+ switch (ls->token) {
+ case TK_number:
+ expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokenval)) ? VKCDATA : VKNUM, 0);
+ copyTV(ls->L, &v->u.nval, &ls->tokenval);
+ break;
+ case TK_string:
+ expr_init(v, VKSTR, 0);
+ v->u.sval = strV(&ls->tokenval);
+ break;
+ case TK_nil:
+ expr_init(v, VKNIL, 0);
+ break;
+ case TK_true:
+ expr_init(v, VKTRUE, 0);
+ break;
+ case TK_false:
+ expr_init(v, VKFALSE, 0);
+ break;
+ case TK_dots: { /* Vararg. */
+ FuncState *fs = ls->fs;
+ BCReg base;
+ checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS);
+ bcreg_reserve(fs, 1);
+ base = fs->freereg-1;
+ expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams));
+ v->u.s.aux = base;
+ break;
+ }
+ case '{': /* Table constructor. */
+ expr_table(ls, v);
+ return;
+ case TK_function:
+ lj_lex_next(ls);
+ parse_body(ls, v, 0, ls->linenumber);
+ return;
+ default:
+ expr_primary(ls, v);
+ return;
+ }
+ lj_lex_next(ls);
+}
+
+/* Manage syntactic levels to avoid blowing up the stack. */
+static void synlevel_begin(LexState *ls)
+{
+ if (++ls->level >= LJ_MAX_XLEVEL)
+ lj_lex_error(ls, 0, LJ_ERR_XLEVELS);
+}
+
+#define synlevel_end(ls) ((ls)->level--)
+
+/* Convert token to binary operator. */
+static BinOpr token2binop(LexToken tok)
+{
+ switch (tok) {
+ case '+': return OPR_ADD;
+ case '-': return OPR_SUB;
+ case '*': return OPR_MUL;
+ case '/': return OPR_DIV;
+ case '%': return OPR_MOD;
+ case '^': return OPR_POW;
+ case TK_concat: return OPR_CONCAT;
+ case TK_ne: return OPR_NE;
+ case TK_eq: return OPR_EQ;
+ case '<': return OPR_LT;
+ case TK_le: return OPR_LE;
+ case '>': return OPR_GT;
+ case TK_ge: return OPR_GE;
+ case TK_and: return OPR_AND;
+ case TK_or: return OPR_OR;
+ default: return OPR_NOBINOPR;
+ }
+}
+
+/* Priorities for each binary operator. ORDER OPR. */
+static const struct {
+ uint8_t left; /* Left priority. */
+ uint8_t right; /* Right priority. */
+} priority[] = {
+ {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */
+ {10,9}, {5,4}, /* POW CONCAT (right associative) */
+ {3,3}, {3,3}, /* EQ NE */
+ {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */
+ {2,2}, {1,1} /* AND OR */
+};
+
+#define UNARY_PRIORITY 8 /* Priority for unary operators. */
+
+/* Forward declaration. */
+static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit);
+
+/* Parse unary expression. */
+static void expr_unop(LexState *ls, ExpDesc *v)
+{
+ BCOp op;
+ if (ls->token == TK_not) {
+ op = BC_NOT;
+ } else if (ls->token == '-') {
+ op = BC_UNM;
+ } else if (ls->token == '#') {
+ op = BC_LEN;
+ } else {
+ expr_simple(ls, v);
+ return;
+ }
+ lj_lex_next(ls);
+ expr_binop(ls, v, UNARY_PRIORITY);
+ bcemit_unop(ls->fs, op, v);
+}
+
+/* Parse binary expressions with priority higher than the limit. */
+static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit)
+{
+ BinOpr op;
+ synlevel_begin(ls);
+ expr_unop(ls, v);
+ op = token2binop(ls->token);
+ while (op != OPR_NOBINOPR && priority[op].left > limit) {
+ ExpDesc v2;
+ BinOpr nextop;
+ lj_lex_next(ls);
+ bcemit_binop_left(ls->fs, op, v);
+ /* Parse binary expression with higher priority. */
+ nextop = expr_binop(ls, &v2, priority[op].right);
+ bcemit_binop(ls->fs, op, v, &v2);
+ op = nextop;
+ }
+ synlevel_end(ls);
+ return op; /* Return unconsumed binary operator (if any). */
+}
+
+/* Parse expression. */
+static void expr(LexState *ls, ExpDesc *v)
+{
+ expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */
+}
+
+/* Assign expression to the next register. */
+static void expr_next(LexState *ls)
+{
+ ExpDesc e;
+ expr(ls, &e);
+ expr_tonextreg(ls->fs, &e);
+}
+
+/* Parse conditional expression. */
+static BCPos expr_cond(LexState *ls)
+{
+ ExpDesc v;
+ expr(ls, &v);
+ if (v.k == VKNIL) v.k = VKFALSE;
+ bcemit_branch_t(ls->fs, &v);
+ return v.f;
+}
+
+/* -- Assignments --------------------------------------------------------- */
+
+/* List of LHS variables. */
+typedef struct LHSVarList {
+ ExpDesc v; /* LHS variable. */
+ struct LHSVarList *prev; /* Link to previous LHS variable. */
+} LHSVarList;
+
+/* Eliminate write-after-read hazards for local variable assignment. */
+static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ BCReg reg = v->u.s.info; /* Check against this variable. */
+ BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */
+ int hazard = 0;
+ for (; lh; lh = lh->prev) {
+ if (lh->v.k == VINDEXED) {
+ if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */
+ hazard = 1;
+ lh->v.u.s.info = tmp;
+ }
+ if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */
+ hazard = 1;
+ lh->v.u.s.aux = tmp;
+ }
+ }
+ }
+ if (hazard) {
+ bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */
+ bcreg_reserve(fs, 1);
+ }
+}
+
+/* Adjust LHS/RHS of an assignment. */
+static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ int32_t extra = (int32_t)nvars - (int32_t)nexps;
+ if (e->k == VCALL) {
+ extra++; /* Compensate for the VCALL itself. */
+ if (extra < 0) extra = 0;
+ setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */
+ if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1);
+ } else {
+ if (e->k != VVOID)
+ expr_tonextreg(fs, e); /* Close last expression. */
+ if (extra > 0) { /* Leftover LHS are set to nil. */
+ BCReg reg = fs->freereg;
+ bcreg_reserve(fs, (BCReg)extra);
+ bcemit_nil(fs, reg, (BCReg)extra);
+ }
+ }
+}
+
+/* Recursively parse assignment statement. */
+static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars)
+{
+ ExpDesc e;
+ checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX);
+ if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */
+ LHSVarList vl;
+ vl.prev = lh;
+ expr_primary(ls, &vl.v);
+ if (vl.v.k == VLOCAL)
+ assign_hazard(ls, lh, &vl.v);
+ checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names");
+ parse_assignment(ls, &vl, nvars+1);
+ } else { /* Parse RHS. */
+ BCReg nexps;
+ lex_check(ls, '=');
+ nexps = expr_list(ls, &e);
+ if (nexps == nvars) {
+ if (e.k == VCALL) {
+ if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */
+ ls->fs->freereg--;
+ e.k = VRELOCABLE;
+ } else { /* Multiple call results. */
+ e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */
+ e.k = VNONRELOC;
+ }
+ }
+ bcemit_store(ls->fs, &lh->v, &e);
+ return;
+ }
+ assign_adjust(ls, nvars, nexps, &e);
+ if (nexps > nvars)
+ ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */
+ }
+ /* Assign RHS to LHS and recurse downwards. */
+ expr_init(&e, VNONRELOC, ls->fs->freereg-1);
+ bcemit_store(ls->fs, &lh->v, &e);
+}
+
+/* Parse call statement or assignment. */
+static void parse_call_assign(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ LHSVarList vl;
+ expr_primary(ls, &vl.v);
+ if (vl.v.k == VCALL) { /* Function call statement. */
+ setbc_b(bcptr(fs, &vl.v), 1); /* No results. */
+ } else { /* Start of an assignment. */
+ vl.prev = NULL;
+ parse_assignment(ls, &vl, 1);
+ }
+}
+
+/* Parse 'local' statement. */
+static void parse_local(LexState *ls)
+{
+ if (lex_opt(ls, TK_function)) { /* Local function declaration. */
+ ExpDesc v, b;
+ FuncState *fs = ls->fs;
+ var_new(ls, 0, lex_str(ls));
+ expr_init(&v, VLOCAL, fs->freereg);
+ v.u.s.aux = fs->varmap[fs->freereg];
+ bcreg_reserve(fs, 1);
+ var_add(ls, 1);
+ parse_body(ls, &b, 0, ls->linenumber);
+ /* bcemit_store(fs, &v, &b) without setting VSTACK_VAR_RW. */
+ expr_free(fs, &b);
+ expr_toreg(fs, &b, v.u.s.info);
+ /* The upvalue is in scope, but the local is only valid after the store. */
+ var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc;
+ } else { /* Local variable declaration. */
+ ExpDesc e;
+ BCReg nexps, nvars = 0;
+ do { /* Collect LHS. */
+ var_new(ls, nvars++, lex_str(ls));
+ } while (lex_opt(ls, ','));
+ if (lex_opt(ls, '=')) { /* Optional RHS. */
+ nexps = expr_list(ls, &e);
+ } else { /* Or implicitly set to nil. */
+ e.k = VVOID;
+ nexps = 0;
+ }
+ assign_adjust(ls, nvars, nexps, &e);
+ var_add(ls, nvars);
+ }
+}
+
+/* Parse 'function' statement. */
+static void parse_func(LexState *ls, BCLine line)
+{
+ FuncState *fs;
+ ExpDesc v, b;
+ int needself = 0;
+ lj_lex_next(ls); /* Skip 'function'. */
+ /* Parse function name. */
+ var_lookup(ls, &v);
+ while (ls->token == '.') /* Multiple dot-separated fields. */
+ expr_field(ls, &v);
+ if (ls->token == ':') { /* Optional colon to signify method call. */
+ needself = 1;
+ expr_field(ls, &v);
+ }
+ parse_body(ls, &b, needself, line);
+ fs = ls->fs;
+ bcemit_store(fs, &v, &b);
+ fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */
+}
+
+/* -- Control transfer statements ----------------------------------------- */
+
+/* Check for end of block. */
+static int endofblock(LexToken token)
+{
+ switch (token) {
+ case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Parse 'return' statement. */
+static void parse_return(LexState *ls)
+{
+ BCIns ins;
+ FuncState *fs = ls->fs;
+ lj_lex_next(ls); /* Skip 'return'. */
+ fs->flags |= PROTO_HAS_RETURN;
+ if (endofblock(ls->token) || ls->token == ';') { /* Bare return. */
+ ins = BCINS_AD(BC_RET0, 0, 1);
+ } else { /* Return with one or more values. */
+ ExpDesc e; /* Receives the _last_ expression in the list. */
+ BCReg nret = expr_list(ls, &e);
+ if (nret == 1) { /* Return one result. */
+ if (e.k == VCALL) { /* Check for tail call. */
+ BCIns *ip = bcptr(fs, &e);
+ /* It doesn't pay off to add BC_VARGT just for 'return ...'. */
+ if (bc_op(*ip) == BC_VARG) goto notailcall;
+ fs->pc--;
+ ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip));
+ } else { /* Can return the result from any register. */
+ ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2);
+ }
+ } else {
+ if (e.k == VCALL) { /* Append all results from a call. */
+ notailcall:
+ setbc_b(bcptr(fs, &e), 0);
+ ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar);
+ } else {
+ expr_tonextreg(fs, &e); /* Force contiguous registers. */
+ ins = BCINS_AD(BC_RET, fs->nactvar, nret+1);
+ }
+ }
+ }
+ if (fs->flags & PROTO_CHILD)
+ bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */
+ bcemit_INS(fs, ins);
+}
+
+/* Parse 'break' statement. */
+static void parse_break(LexState *ls)
+{
+ ls->fs->bl->flags |= FSCOPE_BREAK;
+ gola_new(ls, NAME_BREAK, VSTACK_GOTO, bcemit_jmp(ls->fs));
+}
+
+/* Parse 'goto' statement. */
+static void parse_goto(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ GCstr *name = lex_str(ls);
+ VarInfo *vl = gola_findlabel(ls, name);
+ if (vl) /* Treat backwards goto within same scope like a loop. */
+ bcemit_AJ(fs, BC_LOOP, vl->slot, -1); /* No BC range check. */
+ fs->bl->flags |= FSCOPE_GOLA;
+ gola_new(ls, name, VSTACK_GOTO, bcemit_jmp(fs));
+}
+
+/* Parse label. */
+static void parse_label(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ GCstr *name;
+ MSize idx;
+ fs->lasttarget = fs->pc;
+ fs->bl->flags |= FSCOPE_GOLA;
+ lj_lex_next(ls); /* Skip '::'. */
+ name = lex_str(ls);
+ if (gola_findlabel(ls, name))
+ lj_lex_error(ls, 0, LJ_ERR_XLDUP, strdata(name));
+ idx = gola_new(ls, name, VSTACK_LABEL, fs->pc);
+ lex_check(ls, TK_label);
+ /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */
+ for (;;) {
+ if (ls->token == TK_label) {
+ synlevel_begin(ls);
+ parse_label(ls);
+ synlevel_end(ls);
+ } else if (LJ_52 && ls->token == ';') {
+ lj_lex_next(ls);
+ } else {
+ break;
+ }
+ }
+ /* Trailing label is considered to be outside of scope. */
+ if (endofblock(ls->token) && ls->token != TK_until)
+ ls->vstack[idx].slot = fs->bl->nactvar;
+ gola_resolve(ls, fs->bl, idx);
+}
+
+/* -- Blocks, loops and conditional statements ---------------------------- */
+
+/* Parse a block. */
+static void parse_block(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ FuncScope bl;
+ fscope_begin(fs, &bl, 0);
+ parse_chunk(ls);
+ fscope_end(fs);
+}
+
+/* Parse 'while' statement. */
+static void parse_while(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos start, loop, condexit;
+ FuncScope bl;
+ lj_lex_next(ls); /* Skip 'while'. */
+ start = fs->lasttarget = fs->pc;
+ condexit = expr_cond(ls);
+ fscope_begin(fs, &bl, FSCOPE_LOOP);
+ lex_check(ls, TK_do);
+ loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
+ parse_block(ls);
+ jmp_patch(fs, bcemit_jmp(fs), start);
+ lex_match(ls, TK_end, TK_while, line);
+ fscope_end(fs);
+ jmp_tohere(fs, condexit);
+ jmp_patchins(fs, loop, fs->pc);
+}
+
+/* Parse 'repeat' statement. */
+static void parse_repeat(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos loop = fs->lasttarget = fs->pc;
+ BCPos condexit;
+ FuncScope bl1, bl2;
+ fscope_begin(fs, &bl1, FSCOPE_LOOP); /* Breakable loop scope. */
+ fscope_begin(fs, &bl2, 0); /* Inner scope. */
+ lj_lex_next(ls); /* Skip 'repeat'. */
+ bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
+ parse_chunk(ls);
+ lex_match(ls, TK_until, TK_repeat, line);
+ condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */
+ if (!(bl2.flags & FSCOPE_UPVAL)) { /* No upvalues? Just end inner scope. */
+ fscope_end(fs);
+ } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */
+ parse_break(ls); /* Break from loop and close upvalues. */
+ jmp_tohere(fs, condexit);
+ fscope_end(fs); /* End inner scope and close upvalues. */
+ condexit = bcemit_jmp(fs);
+ }
+ jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */
+ jmp_patchins(fs, loop, fs->pc);
+ fscope_end(fs); /* End loop scope. */
+}
+
+/* Parse numeric 'for'. */
+static void parse_for_num(LexState *ls, GCstr *varname, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCReg base = fs->freereg;
+ FuncScope bl;
+ BCPos loop, loopend;
+ /* Hidden control variables. */
+ var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX);
+ var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP);
+ var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP);
+ /* Visible copy of index variable. */
+ var_new(ls, FORL_EXT, varname);
+ lex_check(ls, '=');
+ expr_next(ls);
+ lex_check(ls, ',');
+ expr_next(ls);
+ if (lex_opt(ls, ',')) {
+ expr_next(ls);
+ } else {
+ bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */
+ bcreg_reserve(fs, 1);
+ }
+ var_add(ls, 3); /* Hidden control variables. */
+ lex_check(ls, TK_do);
+ loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP);
+ fscope_begin(fs, &bl, 0); /* Scope for visible variables. */
+ var_add(ls, 1);
+ bcreg_reserve(fs, 1);
+ parse_block(ls);
+ fscope_end(fs);
+ /* Perform loop inversion. Loop control instructions are at the end. */
+ loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP);
+ fs->bcbase[loopend].line = line; /* Fix line for control ins. */
+ jmp_patchins(fs, loopend, loop+1);
+ jmp_patchins(fs, loop, fs->pc);
+}
+
+/* Try to predict whether the iterator is next() and specialize the bytecode.
+** Detecting next() and pairs() by name is simplistic, but quite effective.
+** The interpreter backs off if the check for the closure fails at runtime.
+*/
+static int predict_next(LexState *ls, FuncState *fs, BCPos pc)
+{
+ BCIns ins = fs->bcbase[pc].ins;
+ GCstr *name;
+ cTValue *o;
+ switch (bc_op(ins)) {
+ case BC_MOV:
+ name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name));
+ break;
+ case BC_UGET:
+ name = gco2str(gcref(ls->vstack[fs->uvmap[bc_d(ins)]].name));
+ break;
+ case BC_GGET:
+ /* There's no inverse index (yet), so lookup the strings. */
+ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs"));
+ if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
+ return 1;
+ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next"));
+ if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
+ return 1;
+ return 0;
+ default:
+ return 0;
+ }
+ return (name->len == 5 && !strcmp(strdata(name), "pairs")) ||
+ (name->len == 4 && !strcmp(strdata(name), "next"));
+}
+
+/* Parse 'for' iterator. */
+static void parse_for_iter(LexState *ls, GCstr *indexname)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc e;
+ BCReg nvars = 0;
+ BCLine line;
+ BCReg base = fs->freereg + 3;
+ BCPos loop, loopend, exprpc = fs->pc;
+ FuncScope bl;
+ int isnext;
+ /* Hidden control variables. */
+ var_new_fixed(ls, nvars++, VARNAME_FOR_GEN);
+ var_new_fixed(ls, nvars++, VARNAME_FOR_STATE);
+ var_new_fixed(ls, nvars++, VARNAME_FOR_CTL);
+ /* Visible variables returned from iterator. */
+ var_new(ls, nvars++, indexname);
+ while (lex_opt(ls, ','))
+ var_new(ls, nvars++, lex_str(ls));
+ lex_check(ls, TK_in);
+ line = ls->linenumber;
+ assign_adjust(ls, 3, expr_list(ls, &e), &e);
+ bcreg_bump(fs, 3); /* The iterator needs another 3 slots (func + 2 args). */
+ isnext = (nvars <= 5 && predict_next(ls, fs, exprpc));
+ var_add(ls, 3); /* Hidden control variables. */
+ lex_check(ls, TK_do);
+ loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP);
+ fscope_begin(fs, &bl, 0); /* Scope for visible variables. */
+ var_add(ls, nvars-3);
+ bcreg_reserve(fs, nvars-3);
+ parse_block(ls);
+ fscope_end(fs);
+ /* Perform loop inversion. Loop control instructions are at the end. */
+ jmp_patchins(fs, loop, fs->pc);
+ bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1);
+ loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP);
+ fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */
+ fs->bcbase[loopend].line = line;
+ jmp_patchins(fs, loopend, loop+1);
+}
+
+/* Parse 'for' statement. */
+static void parse_for(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ GCstr *varname;
+ FuncScope bl;
+ fscope_begin(fs, &bl, FSCOPE_LOOP);
+ lj_lex_next(ls); /* Skip 'for'. */
+ varname = lex_str(ls); /* Get first variable name. */
+ if (ls->token == '=')
+ parse_for_num(ls, varname, line);
+ else if (ls->token == ',' || ls->token == TK_in)
+ parse_for_iter(ls, varname);
+ else
+ err_syntax(ls, LJ_ERR_XFOR);
+ lex_match(ls, TK_end, TK_for, line);
+ fscope_end(fs); /* Resolve break list. */
+}
+
+/* Parse condition and 'then' block. */
+static BCPos parse_then(LexState *ls)
+{
+ BCPos condexit;
+ lj_lex_next(ls); /* Skip 'if' or 'elseif'. */
+ condexit = expr_cond(ls);
+ lex_check(ls, TK_then);
+ parse_block(ls);
+ return condexit;
+}
+
+/* Parse 'if' statement. */
+static void parse_if(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos flist;
+ BCPos escapelist = NO_JMP;
+ flist = parse_then(ls);
+ while (ls->token == TK_elseif) { /* Parse multiple 'elseif' blocks. */
+ jmp_append(fs, &escapelist, bcemit_jmp(fs));
+ jmp_tohere(fs, flist);
+ flist = parse_then(ls);
+ }
+ if (ls->token == TK_else) { /* Parse optional 'else' block. */
+ jmp_append(fs, &escapelist, bcemit_jmp(fs));
+ jmp_tohere(fs, flist);
+ lj_lex_next(ls); /* Skip 'else'. */
+ parse_block(ls);
+ } else {
+ jmp_append(fs, &escapelist, flist);
+ }
+ jmp_tohere(fs, escapelist);
+ lex_match(ls, TK_end, TK_if, line);
+}
+
+/* -- Parse statements ---------------------------------------------------- */
+
+/* Parse a statement. Returns 1 if it must be the last one in a chunk. */
+static int parse_stmt(LexState *ls)
+{
+ BCLine line = ls->linenumber;
+ switch (ls->token) {
+ case TK_if:
+ parse_if(ls, line);
+ break;
+ case TK_while:
+ parse_while(ls, line);
+ break;
+ case TK_do:
+ lj_lex_next(ls);
+ parse_block(ls);
+ lex_match(ls, TK_end, TK_do, line);
+ break;
+ case TK_for:
+ parse_for(ls, line);
+ break;
+ case TK_repeat:
+ parse_repeat(ls, line);
+ break;
+ case TK_function:
+ parse_func(ls, line);
+ break;
+ case TK_local:
+ lj_lex_next(ls);
+ parse_local(ls);
+ break;
+ case TK_return:
+ parse_return(ls);
+ return 1; /* Must be last. */
+ case TK_break:
+ lj_lex_next(ls);
+ parse_break(ls);
+ return !LJ_52; /* Must be last in Lua 5.1. */
+#if LJ_52
+ case ';':
+ lj_lex_next(ls);
+ break;
+#endif
+ case TK_label:
+ parse_label(ls);
+ break;
+ case TK_goto:
+ if (LJ_52 || lj_lex_lookahead(ls) == TK_name) {
+ lj_lex_next(ls);
+ parse_goto(ls);
+ break;
+ } /* else: fallthrough */
+ default:
+ parse_call_assign(ls);
+ break;
+ }
+ return 0;
+}
+
+/* A chunk is a list of statements optionally separated by semicolons. */
+static void parse_chunk(LexState *ls)
+{
+ int islast = 0;
+ synlevel_begin(ls);
+ while (!islast && !endofblock(ls->token)) {
+ islast = parse_stmt(ls);
+ lex_opt(ls, ';');
+ lua_assert(ls->fs->framesize >= ls->fs->freereg &&
+ ls->fs->freereg >= ls->fs->nactvar);
+ ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */
+ }
+ synlevel_end(ls);
+}
+
+/* Entry point of bytecode parser. */
+GCproto *lj_parse(LexState *ls)
+{
+ FuncState fs;
+ FuncScope bl;
+ GCproto *pt;
+ lua_State *L = ls->L;
+#ifdef LUAJIT_DISABLE_DEBUGINFO
+ ls->chunkname = lj_str_newlit(L, "=");
+#else
+ ls->chunkname = lj_str_newz(L, ls->chunkarg);
+#endif
+ setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */
+ incr_top(L);
+ ls->level = 0;
+ fs_init(ls, &fs);
+ fs.linedefined = 0;
+ fs.numparams = 0;
+ fs.bcbase = NULL;
+ fs.bclim = 0;
+ fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */
+ fscope_begin(&fs, &bl, 0);
+ bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */
+ lj_lex_next(ls); /* Read-ahead first token. */
+ parse_chunk(ls);
+ if (ls->token != TK_eof)
+ err_token(ls, TK_eof);
+ pt = fs_finish(ls, ls->linenumber);
+ L->top--; /* Drop chunkname. */
+ lua_assert(fs.prev == NULL);
+ lua_assert(ls->fs == NULL);
+ lua_assert(pt->sizeuv == 0);
+ return pt;
+}
+
diff --git a/3rdparty/lua/src/lj_parse.h b/3rdparty/lua/src/lj_parse.h
index 6b5df54..558b4e2 100644
--- a/3rdparty/lua/src/lj_parse.h
+++ b/3rdparty/lua/src/lj_parse.h
@@ -1,18 +1,18 @@
-/*
-** Lua parser (source code -> bytecode).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_PARSE_H
-#define _LJ_PARSE_H
-
-#include "lj_obj.h"
-#include "lj_lex.h"
-
-LJ_FUNC GCproto *lj_parse(LexState *ls);
-LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l);
-#if LJ_HASFFI
-LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd);
-#endif
-
-#endif
+/*
+** Lua parser (source code -> bytecode).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_PARSE_H
+#define _LJ_PARSE_H
+
+#include "lj_obj.h"
+#include "lj_lex.h"
+
+LJ_FUNC GCproto *lj_parse(LexState *ls);
+LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l);
+#if LJ_HASFFI
+LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_record.c b/3rdparty/lua/src/lj_record.c
index 993b40e..7336e0a 100644
--- a/3rdparty/lua/src/lj_record.c
+++ b/3rdparty/lua/src/lj_record.c
@@ -1,2252 +1,2247 @@
-/*
-** Trace recorder (bytecode -> SSA IR).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_record_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_meta.h"
-#include "lj_frame.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#endif
-#include "lj_bc.h"
-#include "lj_ff.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-#include "lj_ircall.h"
-#include "lj_iropt.h"
-#include "lj_trace.h"
-#include "lj_record.h"
-#include "lj_ffrecord.h"
-#include "lj_snap.h"
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-
-/* Some local macros to save typing. Undef'd at the end. */
-#define IR(ref) (&J->cur.ir[(ref)])
-
-/* Pass IR on to next optimization in chain (FOLD). */
-#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
-
-/* Emit raw IR without passing through optimizations. */
-#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
-
-/* -- Sanity checks ------------------------------------------------------- */
-
-#ifdef LUA_USE_ASSERT
-/* Sanity check the whole IR -- sloooow. */
-static void rec_check_ir(jit_State *J)
-{
- IRRef i, nins = J->cur.nins, nk = J->cur.nk;
- lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536);
- for (i = nins-1; i >= nk; i--) {
- IRIns *ir = IR(i);
- uint32_t mode = lj_ir_mode[ir->o];
- IRRef op1 = ir->op1;
- IRRef op2 = ir->op2;
- switch (irm_op1(mode)) {
- case IRMnone: lua_assert(op1 == 0); break;
- case IRMref: lua_assert(op1 >= nk);
- lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break;
- case IRMlit: break;
- case IRMcst: lua_assert(i < REF_BIAS); continue;
- }
- switch (irm_op2(mode)) {
- case IRMnone: lua_assert(op2 == 0); break;
- case IRMref: lua_assert(op2 >= nk);
- lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break;
- case IRMlit: break;
- case IRMcst: lua_assert(0); break;
- }
- if (ir->prev) {
- lua_assert(ir->prev >= nk);
- lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i);
- lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o);
- }
- }
-}
-
-/* Compare stack slots and frames of the recorder and the VM. */
-static void rec_check_slots(jit_State *J)
-{
- BCReg s, nslots = J->baseslot + J->maxslot;
- int32_t depth = 0;
- cTValue *base = J->L->base - J->baseslot;
- lua_assert(J->baseslot >= 1 && J->baseslot < LJ_MAX_JSLOTS);
- lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME));
- lua_assert(nslots < LJ_MAX_JSLOTS);
- for (s = 0; s < nslots; s++) {
- TRef tr = J->slot[s];
- if (tr) {
- cTValue *tv = &base[s];
- IRRef ref = tref_ref(tr);
- IRIns *ir;
- lua_assert(ref >= J->cur.nk && ref < J->cur.nins);
- ir = IR(ref);
- lua_assert(irt_t(ir->t) == tref_t(tr));
- if (s == 0) {
- lua_assert(tref_isfunc(tr));
- } else if ((tr & TREF_FRAME)) {
- GCfunc *fn = gco2func(frame_gc(tv));
- BCReg delta = (BCReg)(tv - frame_prev(tv));
- lua_assert(tref_isfunc(tr));
- if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir));
- lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta));
- depth++;
- } else if ((tr & TREF_CONT)) {
- lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void));
- lua_assert((J->slot[s+1] & TREF_FRAME));
- depth++;
- } else {
- if (tvisnumber(tv))
- lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */
- else
- lua_assert(itype2irt(tv) == tref_type(tr));
- if (tref_isk(tr)) { /* Compare constants. */
- TValue tvk;
- lj_ir_kvalue(J->L, &tvk, ir);
- if (!(tvisnum(&tvk) && tvisnan(&tvk)))
- lua_assert(lj_obj_equal(tv, &tvk));
- else
- lua_assert(tvisnum(tv) && tvisnan(tv));
- }
- }
- }
- }
- lua_assert(J->framedepth == depth);
-}
-#endif
-
-/* -- Type handling and specialization ------------------------------------ */
-
-/* Note: these functions return tagged references (TRef). */
-
-/* Specialize a slot to a specific type. Note: slot can be negative! */
-static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode)
-{
- /* Caller may set IRT_GUARD in t. */
- TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode);
- J->base[slot] = ref;
- return ref;
-}
-
-/* Specialize a slot to the runtime type. Note: slot can be negative! */
-static TRef sload(jit_State *J, int32_t slot)
-{
- IRType t = itype2irt(&J->L->base[slot]);
- TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot,
- IRSLOAD_TYPECHECK);
- if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */
- J->base[slot] = ref;
- return ref;
-}
-
-/* Get TRef from slot. Load slot and specialize if not done already. */
-#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
-
-/* Get TRef for current function. */
-static TRef getcurrf(jit_State *J)
-{
- if (J->base[-1])
- return J->base[-1];
- lua_assert(J->baseslot == 1);
- return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY);
-}
-
-/* Compare for raw object equality.
-** Returns 0 if the objects are the same.
-** Returns 1 if they are different, but the same type.
-** Returns 2 for two different types.
-** Comparisons between primitives always return 1 -- no caller cares about it.
-*/
-int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv)
-{
- int diff = !lj_obj_equal(av, bv);
- if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */
- IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a);
- IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b);
- if (ta != tb) {
- /* Widen mixed number/int comparisons to number/number comparison. */
- if (ta == IRT_INT && tb == IRT_NUM) {
- a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT);
- ta = IRT_NUM;
- } else if (ta == IRT_NUM && tb == IRT_INT) {
- b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT);
- } else {
- return 2; /* Two different types are never equal. */
- }
- }
- emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b);
- }
- return diff;
-}
-
-/* Constify a value. Returns 0 for non-representable object types. */
-TRef lj_record_constify(jit_State *J, cTValue *o)
-{
- if (tvisgcv(o))
- return lj_ir_kgc(J, gcV(o), itype2irt(o));
- else if (tvisint(o))
- return lj_ir_kint(J, intV(o));
- else if (tvisnum(o))
- return lj_ir_knumint(J, numV(o));
- else if (tvisbool(o))
- return TREF_PRI(itype2irt(o));
- else
- return 0; /* Can't represent lightuserdata (pointless). */
-}
-
-/* -- Record loop ops ----------------------------------------------------- */
-
-/* Loop event. */
-typedef enum {
- LOOPEV_LEAVE, /* Loop is left or not entered. */
- LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */
- LOOPEV_ENTER /* Loop is entered. */
-} LoopEvent;
-
-/* Canonicalize slots: convert integers to numbers. */
-static void canonicalize_slots(jit_State *J)
-{
- BCReg s;
- if (LJ_DUALNUM) return;
- for (s = J->baseslot+J->maxslot-1; s >= 1; s--) {
- TRef tr = J->slot[s];
- if (tref_isinteger(tr)) {
- IRIns *ir = IR(tref_ref(tr));
- if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY)))
- J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
- }
- }
-}
-
-/* Stop recording. */
-static void rec_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
-{
- lj_trace_end(J);
- J->cur.linktype = (uint8_t)linktype;
- J->cur.link = (uint16_t)lnk;
- /* Looping back at the same stack level? */
- if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) {
- if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */
- goto nocanon; /* Do not canonicalize or we lose the narrowing. */
- if (J->cur.root) /* Otherwise ensure we always link to the root trace. */
- J->cur.link = J->cur.root;
- }
- canonicalize_slots(J);
-nocanon:
- /* Note: all loop ops must set J->pc to the following instruction! */
- lj_snap_add(J); /* Add loop snapshot. */
- J->needsnap = 0;
- J->mergesnap = 1; /* In case recording continues. */
-}
-
-/* Search bytecode backwards for a int/num constant slot initializer. */
-static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t)
-{
- /* This algorithm is rather simplistic and assumes quite a bit about
- ** how the bytecode is generated. It works fine for FORI initializers,
- ** but it won't necessarily work in other cases (e.g. iterator arguments).
- ** It doesn't do anything fancy, either (like backpropagating MOVs).
- */
- const BCIns *pc, *startpc = proto_bc(J->pt);
- for (pc = endpc-1; pc > startpc; pc--) {
- BCIns ins = *pc;
- BCOp op = bc_op(ins);
- /* First try to find the last instruction that stores to this slot. */
- if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) {
- return 0; /* Multiple results, e.g. from a CALL or KNIL. */
- } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) {
- if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */
- /* Now try to verify there's no forward jump across it. */
- const BCIns *kpc = pc;
- for (; pc > startpc; pc--)
- if (bc_op(*pc) == BC_JMP) {
- const BCIns *target = pc+bc_j(*pc)+1;
- if (target > kpc && target <= endpc)
- return 0; /* Conditional assignment. */
- }
- if (op == BC_KSHORT) {
- int32_t k = (int32_t)(int16_t)bc_d(ins);
- return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k);
- } else {
- cTValue *tv = proto_knumtv(J->pt, bc_d(ins));
- if (t == IRT_INT) {
- int32_t k = numberVint(tv);
- if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */
- return lj_ir_kint(J, k);
- return 0; /* Type mismatch. */
- } else {
- return lj_ir_knum(J, numberVnum(tv));
- }
- }
- }
- return 0; /* Non-constant initializer. */
- }
- }
- return 0; /* No assignment to this slot found? */
-}
-
-/* Load and optionally convert a FORI argument from a slot. */
-static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode)
-{
- int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0;
- return sloadt(J, (int32_t)slot,
- t + (((mode & IRSLOAD_TYPECHECK) ||
- (conv && t == IRT_INT && !(mode >> 16))) ?
- IRT_GUARD : 0),
- mode + conv);
-}
-
-/* Peek before FORI to find a const initializer. Otherwise load from slot. */
-static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot,
- IRType t, int mode)
-{
- TRef tr = J->base[slot];
- if (!tr) {
- tr = find_kinit(J, fori, slot, t);
- if (!tr)
- tr = fori_load(J, slot, t, mode);
- }
- return tr;
-}
-
-/* Return the direction of the FOR loop iterator.
-** It's important to exactly reproduce the semantics of the interpreter.
-*/
-static int rec_for_direction(cTValue *o)
-{
- return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0;
-}
-
-/* Simulate the runtime behavior of the FOR loop iterator. */
-static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl)
-{
- lua_Number stopv = numberVnum(&o[FORL_STOP]);
- lua_Number idxv = numberVnum(&o[FORL_IDX]);
- lua_Number stepv = numberVnum(&o[FORL_STEP]);
- if (isforl)
- idxv += stepv;
- if (rec_for_direction(&o[FORL_STEP])) {
- if (idxv <= stopv) {
- *op = IR_LE;
- return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
- }
- *op = IR_GT; return LOOPEV_LEAVE;
- } else {
- if (stopv <= idxv) {
- *op = IR_GE;
- return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
- }
- *op = IR_LT; return LOOPEV_LEAVE;
- }
-}
-
-/* Record checks for FOR loop overflow and step direction. */
-static void rec_for_check(jit_State *J, IRType t, int dir,
- TRef stop, TRef step, int init)
-{
- if (!tref_isk(step)) {
- /* Non-constant step: need a guard for the direction. */
- TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J);
- emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero);
- /* Add hoistable overflow checks for a narrowed FORL index. */
- if (init && t == IRT_INT) {
- if (tref_isk(stop)) {
- /* Constant stop: optimize check away or to a range check for step. */
- int32_t k = IR(tref_ref(stop))->i;
- if (dir) {
- if (k > 0)
- emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k));
- } else {
- if (k < 0)
- emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k));
- }
- } else {
- /* Stop+step variable: need full overflow check. */
- TRef tr = emitir(IRTGI(IR_ADDOV), step, stop);
- emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */
- }
- }
- } else if (init && t == IRT_INT && !tref_isk(stop)) {
- /* Constant step: optimize overflow check to a range check for stop. */
- int32_t k = IR(tref_ref(step))->i;
- k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k;
- emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k));
- }
-}
-
-/* Record a FORL instruction. */
-static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
- int init)
-{
- BCReg ra = bc_a(*fori);
- cTValue *tv = &J->L->base[ra];
- TRef idx = J->base[ra+FORL_IDX];
- IRType t = idx ? tref_type(idx) :
- (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM;
- int mode = IRSLOAD_INHERIT +
- ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0);
- TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
- TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
- int tc, dir = rec_for_direction(&tv[FORL_STEP]);
- lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI);
- scev->t.irt = t;
- scev->dir = dir;
- scev->stop = tref_ref(stop);
- scev->step = tref_ref(step);
- rec_for_check(J, t, dir, stop, step, init);
- scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT));
- tc = (LJ_DUALNUM &&
- !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) &&
- tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ?
- IRSLOAD_TYPECHECK : 0;
- if (tc) {
- J->base[ra+FORL_STOP] = stop;
- J->base[ra+FORL_STEP] = step;
- }
- if (!idx)
- idx = fori_load(J, ra+FORL_IDX, t,
- IRSLOAD_INHERIT + tc + (J->scev.start << 16));
- if (!init)
- J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step);
- J->base[ra+FORL_EXT] = idx;
- scev->idx = tref_ref(idx);
- setmref(scev->pc, fori);
- J->maxslot = ra+FORL_EXT+1;
-}
-
-/* Record FORL/JFORL or FORI/JFORI. */
-static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
-{
- BCReg ra = bc_a(*fori);
- TValue *tv = &J->L->base[ra];
- TRef *tr = &J->base[ra];
- IROp op;
- LoopEvent ev;
- TRef stop;
- IRType t;
- if (isforl) { /* Handle FORL/JFORL opcodes. */
- TRef idx = tr[FORL_IDX];
- if (mref(J->scev.pc, const BCIns) == fori && tref_ref(idx) == J->scev.idx) {
- t = J->scev.t.irt;
- stop = J->scev.stop;
- idx = emitir(IRT(IR_ADD, t), idx, J->scev.step);
- tr[FORL_EXT] = tr[FORL_IDX] = idx;
- } else {
- ScEvEntry scev;
- rec_for_loop(J, fori, &scev, 0);
- t = scev.t.irt;
- stop = scev.stop;
- }
- } else { /* Handle FORI/JFORI opcodes. */
- BCReg i;
- lj_meta_for(J->L, tv);
- t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) :
- IRT_NUM;
- for (i = FORL_IDX; i <= FORL_STEP; i++) {
- if (!tr[i]) sload(J, ra+i);
- lua_assert(tref_isnumber_str(tr[i]));
- if (tref_isstr(tr[i]))
- tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
- if (t == IRT_INT) {
- if (!tref_isinteger(tr[i]))
- tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK);
- } else {
- if (!tref_isnum(tr[i]))
- tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT);
- }
- }
- tr[FORL_EXT] = tr[FORL_IDX];
- stop = tr[FORL_STOP];
- rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]),
- stop, tr[FORL_STEP], 1);
- }
-
- ev = rec_for_iter(&op, tv, isforl);
- if (ev == LOOPEV_LEAVE) {
- J->maxslot = ra+FORL_EXT+1;
- J->pc = fori+1;
- } else {
- J->maxslot = ra;
- J->pc = fori+bc_j(*fori)+1;
- }
- lj_snap_add(J);
-
- emitir(IRTG(op, t), tr[FORL_IDX], stop);
-
- if (ev == LOOPEV_LEAVE) {
- J->maxslot = ra;
- J->pc = fori+bc_j(*fori)+1;
- } else {
- J->maxslot = ra+FORL_EXT+1;
- J->pc = fori+1;
- }
- J->needsnap = 1;
- return ev;
-}
-
-/* Record ITERL/JITERL. */
-static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
-{
- BCReg ra = bc_a(iterins);
- lua_assert(J->base[ra] != 0);
- if (!tref_isnil(J->base[ra])) { /* Looping back? */
- J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */
- J->maxslot = ra-1+bc_b(J->pc[-1]);
- J->pc += bc_j(iterins)+1;
- return LOOPEV_ENTER;
- } else {
- J->maxslot = ra-3;
- J->pc++;
- return LOOPEV_LEAVE;
- }
-}
-
-/* Record LOOP/JLOOP. Now, that was easy. */
-static LoopEvent rec_loop(jit_State *J, BCReg ra)
-{
- if (ra < J->maxslot) J->maxslot = ra;
- J->pc++;
- return LOOPEV_ENTER;
-}
-
-/* Check if a loop repeatedly failed to trace because it didn't loop back. */
-static int innerloopleft(jit_State *J, const BCIns *pc)
-{
- ptrdiff_t i;
- for (i = 0; i < PENALTY_SLOTS; i++)
- if (mref(J->penalty[i].pc, const BCIns) == pc) {
- if ((J->penalty[i].reason == LJ_TRERR_LLEAVE ||
- J->penalty[i].reason == LJ_TRERR_LINNER) &&
- J->penalty[i].val >= 2*PENALTY_MIN)
- return 1;
- break;
- }
- return 0;
-}
-
-/* Handle the case when an interpreted loop op is hit. */
-static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
-{
- if (J->parent == 0) {
- if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
- /* Same loop? */
- if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */
- lj_trace_err(J, LJ_TRERR_LLEAVE);
- rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping root trace. */
- } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */
- /* It's usually better to abort here and wait until the inner loop
- ** is traced. But if the inner loop repeatedly didn't loop back,
- ** this indicates a low trip count. In this case try unrolling
- ** an inner loop even in a root trace. But it's better to be a bit
- ** more conservative here and only do it for very short loops.
- */
- if (bc_j(*pc) != -1 && !innerloopleft(J, pc))
- lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */
- if ((ev != LOOPEV_ENTERLO &&
- J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0)
- lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
- J->loopref = J->cur.nins;
- }
- } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */
- J->loopref = J->cur.nins;
- if (--J->loopunroll < 0)
- lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
- } /* Side trace continues across a loop that's left or not entered. */
-}
-
-/* Handle the case when an already compiled loop op is hit. */
-static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
-{
- if (J->parent == 0) { /* Root trace hit an inner loop. */
- /* Better let the inner loop spawn a side trace back here. */
- lj_trace_err(J, LJ_TRERR_LINNER);
- } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */
- J->instunroll = 0; /* Cannot continue across a compiled loop op. */
- if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
- rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form an extra loop. */
- else
- rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */
- } /* Side trace continues across a loop that's left or not entered. */
-}
-
-/* -- Record calls and returns -------------------------------------------- */
-
-/* Specialize to the runtime value of the called function or its prototype. */
-static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
-{
- TRef kfunc;
- if (isluafunc(fn)) {
- GCproto *pt = funcproto(fn);
- /* Too many closures created? Probably not a monomorphic function. */
- if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */
- TRef trpt = emitir(IRT(IR_FLOAD, IRT_P32), tr, IRFL_FUNC_PC);
- emitir(IRTG(IR_EQ, IRT_P32), trpt, lj_ir_kptr(J, proto_bc(pt)));
- (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */
- return tr;
- }
- }
- /* Otherwise specialize to the function (closure) value itself. */
- kfunc = lj_ir_kfunc(J, fn);
- emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc);
- return kfunc;
-}
-
-/* Record call setup. */
-static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
-{
- RecordIndex ix;
- TValue *functv = &J->L->base[func];
- TRef *fbase = &J->base[func];
- ptrdiff_t i;
- for (i = 0; i <= nargs; i++)
- (void)getslot(J, func+i); /* Ensure func and all args have a reference. */
- if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */
- ix.tab = fbase[0];
- copyTV(J->L, &ix.tabv, functv);
- if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
- lj_trace_err(J, LJ_TRERR_NOMM);
- for (i = ++nargs; i > 0; i--) /* Shift arguments up. */
- fbase[i] = fbase[i-1];
- fbase[0] = ix.mobj; /* Replace function. */
- functv = &ix.mobjv;
- }
- fbase[0] = TREF_FRAME | rec_call_specialize(J, funcV(functv), fbase[0]);
- J->maxslot = (BCReg)nargs;
-}
-
-/* Record call. */
-void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
-{
- rec_call_setup(J, func, nargs);
- /* Bump frame. */
- J->framedepth++;
- J->base += func+1;
- J->baseslot += func+1;
-}
-
-/* Record tail call. */
-void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
-{
- rec_call_setup(J, func, nargs);
- if (frame_isvarg(J->L->base - 1)) {
- BCReg cbase = (BCReg)frame_delta(J->L->base - 1);
- if (--J->framedepth < 0)
- lj_trace_err(J, LJ_TRERR_NYIRETL);
- J->baseslot -= (BCReg)cbase;
- J->base -= cbase;
- func += cbase;
- }
- /* Move func + args down. */
- memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1));
- /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
- /* Tailcalls can form a loop, so count towards the loop unroll limit. */
- if (++J->tailcalled > J->loopunroll)
- lj_trace_err(J, LJ_TRERR_LUNROLL);
-}
-
-/* Check unroll limits for down-recursion. */
-static int check_downrec_unroll(jit_State *J, GCproto *pt)
-{
- IRRef ptref;
- for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev)
- if (ir_kgc(IR(ptref)) == obj2gco(pt)) {
- int count = 0;
- IRRef ref;
- for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev)
- if (IR(ref)->op1 == ptref)
- count++;
- if (count) {
- if (J->pc == J->startpc) {
- if (count + J->tailcalled > J->param[JIT_P_recunroll])
- return 1;
- } else {
- lj_trace_err(J, LJ_TRERR_DOWNREC);
- }
- }
- }
- return 0;
-}
-
-/* Record return. */
-void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
-{
- TValue *frame = J->L->base - 1;
- ptrdiff_t i;
- for (i = 0; i < gotresults; i++)
- (void)getslot(J, rbase+i); /* Ensure all results have a reference. */
- while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */
- BCReg cbase = (BCReg)frame_delta(frame);
- if (--J->framedepth < 0)
- lj_trace_err(J, LJ_TRERR_NYIRETL);
- lua_assert(J->baseslot > 1);
- gotresults++;
- rbase += cbase;
- J->baseslot -= (BCReg)cbase;
- J->base -= cbase;
- J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */
- frame = frame_prevd(frame);
- }
- /* Return to lower frame via interpreter for unhandled cases. */
- if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
- (!frame_islua(frame) ||
- (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))))) {
- /* NYI: specialize to frame type and return directly, not via RET*. */
- for (i = 0; i < (ptrdiff_t)rbase; i++)
- J->base[i] = 0; /* Purge dead slots. */
- J->maxslot = rbase + (BCReg)gotresults;
- rec_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */
- return;
- }
- if (frame_isvarg(frame)) {
- BCReg cbase = (BCReg)frame_delta(frame);
- if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */
- lj_trace_err(J, LJ_TRERR_NYIRETL);
- lua_assert(J->baseslot > 1);
- rbase += cbase;
- J->baseslot -= (BCReg)cbase;
- J->base -= cbase;
- frame = frame_prevd(frame);
- }
- if (frame_islua(frame)) { /* Return to Lua frame. */
- BCIns callins = *(frame_pc(frame)-1);
- ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
- BCReg cbase = bc_a(callins);
- GCproto *pt = funcproto(frame_func(frame - (cbase+1)));
- if ((pt->flags & PROTO_NOJIT))
- lj_trace_err(J, LJ_TRERR_CJITOFF);
- if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
- if (check_downrec_unroll(J, pt)) {
- J->maxslot = (BCReg)(rbase + gotresults);
- lj_snap_purge(J);
- rec_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-recursion. */
- return;
- }
- lj_snap_add(J);
- }
- for (i = 0; i < nresults; i++) /* Adjust results. */
- J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
- J->maxslot = cbase+(BCReg)nresults;
- if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */
- J->framedepth--;
- lua_assert(J->baseslot > cbase+1);
- J->baseslot -= cbase+1;
- J->base -= cbase+1;
- } else if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
- /* Return to lower frame would leave the loop in a root trace. */
- lj_trace_err(J, LJ_TRERR_LLEAVE);
- } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */
- lj_trace_err(J, LJ_TRERR_NYIRETL); /* No way to insert snapshot here. */
- } else { /* Return to lower frame. Guard for the target we return to. */
- TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
- TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
- emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc);
- J->retdepth++;
- J->needsnap = 1;
- lua_assert(J->baseslot == 1);
- /* Shift result slots up and clear the slots of the new frame below. */
- memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults);
- memset(J->base-1, 0, sizeof(TRef)*(cbase+1));
- }
- } else if (frame_iscont(frame)) { /* Return to continuation frame. */
- ASMFunction cont = frame_contf(frame);
- BCReg cbase = (BCReg)frame_delta(frame);
- if ((J->framedepth -= 2) < 0)
- lj_trace_err(J, LJ_TRERR_NYIRETL);
- J->baseslot -= (BCReg)cbase;
- J->base -= cbase;
- J->maxslot = cbase-2;
- if (cont == lj_cont_ra) {
- /* Copy result to destination slot. */
- BCReg dst = bc_a(*(frame_contpc(frame)-1));
- J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
- if (dst >= J->maxslot) J->maxslot = dst+1;
- } else if (cont == lj_cont_nop) {
- /* Nothing to do here. */
- } else if (cont == lj_cont_cat) {
- lua_assert(0);
- } else {
- /* Result type already specialized. */
- lua_assert(cont == lj_cont_condf || cont == lj_cont_condt);
- }
- } else {
- lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */
- }
- lua_assert(J->baseslot >= 1);
-}
-
-/* -- Metamethod handling ------------------------------------------------- */
-
-/* Prepare to record call to metamethod. */
-static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
-{
- BCReg s, top = curr_proto(J->L)->framesize;
- TRef trcont;
- setcont(&J->L->base[top], cont);
-#if LJ_64
- trcont = lj_ir_kptr(J, (void *)((int64_t)cont - (int64_t)lj_vm_asm_begin));
-#else
- trcont = lj_ir_kptr(J, (void *)cont);
-#endif
- J->base[top] = trcont | TREF_CONT;
- J->framedepth++;
- for (s = J->maxslot; s < top; s++)
- J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
- return top+1;
-}
-
-/* Record metamethod lookup. */
-int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
-{
- RecordIndex mix;
- GCtab *mt;
- if (tref_istab(ix->tab)) {
- mt = tabref(tabV(&ix->tabv)->metatable);
- mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
- } else if (tref_isudata(ix->tab)) {
- int udtype = udataV(&ix->tabv)->udtype;
- mt = tabref(udataV(&ix->tabv)->metatable);
- /* The metatables of special userdata objects are treated as immutable. */
- if (udtype != UDTYPE_USERDATA) {
- cTValue *mo;
- if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
- /* Specialize to the C library namespace object. */
- emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
- } else {
- /* Specialize to the type of userdata. */
- TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
- emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype));
- }
- immutable_mt:
- mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm));
- if (!mo || tvisnil(mo))
- return 0; /* No metamethod. */
- /* Treat metamethod or index table as immutable, too. */
- if (!(tvisfunc(mo) || tvistab(mo)))
- lj_trace_err(J, LJ_TRERR_BADTYPE);
- copyTV(J->L, &ix->mobjv, mo);
- ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB);
- ix->mtv = mt;
- ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */
- return 1; /* Got metamethod or index table. */
- }
- mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META);
- } else {
- /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
- mt = tabref(basemt_obj(J2G(J), &ix->tabv));
- if (mt == NULL) {
- ix->mt = TREF_NIL;
- return 0; /* No metamethod. */
- }
- /* The cdata metatable is treated as immutable. */
- if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
- ix->mt = mix.tab = lj_ir_ktab(J, mt);
- goto nocheck;
- }
- ix->mt = mt ? mix.tab : TREF_NIL;
- emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB));
-nocheck:
- if (mt) {
- GCstr *mmstr = mmname_str(J2G(J), mm);
- cTValue *mo = lj_tab_getstr(mt, mmstr);
- if (mo && !tvisnil(mo))
- copyTV(J->L, &ix->mobjv, mo);
- ix->mtv = mt;
- settabV(J->L, &mix.tabv, mt);
- setstrV(J->L, &mix.keyv, mmstr);
- mix.key = lj_ir_kstr(J, mmstr);
- mix.val = 0;
- mix.idxchain = 0;
- ix->mobj = lj_record_idx(J, &mix);
- return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */
- }
- return 0; /* No metamethod. */
-}
-
-/* Record call to arithmetic metamethod. */
-static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
-{
- /* Set up metamethod call first to save ix->tab and ix->tabv. */
- BCReg func = rec_mm_prep(J, lj_cont_ra);
- TRef *base = J->base + func;
- TValue *basev = J->L->base + func;
- base[1] = ix->tab; base[2] = ix->key;
- copyTV(J->L, basev+1, &ix->tabv);
- copyTV(J->L, basev+2, &ix->keyv);
- if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
- if (mm != MM_unm) {
- ix->tab = ix->key;
- copyTV(J->L, &ix->tabv, &ix->keyv);
- if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
- goto ok;
- }
- lj_trace_err(J, LJ_TRERR_NOMM);
- }
-ok:
- base[0] = ix->mobj;
- copyTV(J->L, basev+0, &ix->mobjv);
- lj_record_call(J, func, 2);
- return 0; /* No result yet. */
-}
-
-/* Record call to __len metamethod. */
-static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
-{
- RecordIndex ix;
- ix.tab = tr;
- copyTV(J->L, &ix.tabv, tv);
- if (lj_record_mm_lookup(J, &ix, MM_len)) {
- BCReg func = rec_mm_prep(J, lj_cont_ra);
- TRef *base = J->base + func;
- TValue *basev = J->L->base + func;
- base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
- base[1] = tr; copyTV(J->L, basev+1, tv);
-#if LJ_52
- base[2] = tr; copyTV(J->L, basev+2, tv);
-#else
- base[2] = TREF_NIL; setnilV(basev+2);
-#endif
- lj_record_call(J, func, 2);
- } else {
- if (LJ_52 && tref_istab(tr))
- return lj_ir_call(J, IRCALL_lj_tab_len, tr);
- lj_trace_err(J, LJ_TRERR_NOMM);
- }
- return 0; /* No result yet. */
-}
-
-/* Call a comparison metamethod. */
-static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
-{
- BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
- TRef *base = J->base + func;
- TValue *tv = J->L->base + func;
- base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
- copyTV(J->L, tv+0, &ix->mobjv);
- copyTV(J->L, tv+1, &ix->valv);
- copyTV(J->L, tv+2, &ix->keyv);
- lj_record_call(J, func, 2);
-}
-
-/* Record call to equality comparison metamethod (for tab and udata only). */
-static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op)
-{
- ix->tab = ix->val;
- copyTV(J->L, &ix->tabv, &ix->valv);
- if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */
- cTValue *bv;
- TRef mo1 = ix->mobj;
- TValue mo1v;
- copyTV(J->L, &mo1v, &ix->mobjv);
- /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
- bv = &ix->keyv;
- if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
- TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
- emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
- } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
- TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
- emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
- } else { /* Lookup metamethod on 2nd operand and compare both. */
- ix->tab = ix->key;
- copyTV(J->L, &ix->tabv, bv);
- if (!lj_record_mm_lookup(J, ix, MM_eq) ||
- lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
- return;
- }
- rec_mm_callcomp(J, ix, op);
- }
-}
-
-/* Record call to ordered comparison metamethods (for arbitrary objects). */
-static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op)
-{
- ix->tab = ix->val;
- copyTV(J->L, &ix->tabv, &ix->valv);
- while (1) {
- MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */
-#if LJ_52
- if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
- ix->tab = ix->key;
- copyTV(J->L, &ix->tabv, &ix->keyv);
- if (!lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
- goto nomatch;
- }
- rec_mm_callcomp(J, ix, op);
- return;
-#else
- if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
- cTValue *bv;
- TRef mo1 = ix->mobj;
- TValue mo1v;
- copyTV(J->L, &mo1v, &ix->mobjv);
- /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
- bv = &ix->keyv;
- if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
- TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
- emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
- } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
- TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
- emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
- } else { /* Lookup metamethod on 2nd operand and compare both. */
- ix->tab = ix->key;
- copyTV(J->L, &ix->tabv, bv);
- if (!lj_record_mm_lookup(J, ix, mm) ||
- lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
- goto nomatch;
- }
- rec_mm_callcomp(J, ix, op);
- return;
- }
-#endif
- nomatch:
- /* Lookup failed. Retry with __lt and swapped operands. */
- if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */
- ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab;
- copyTV(J->L, &ix->tabv, &ix->keyv);
- copyTV(J->L, &ix->keyv, &ix->valv);
- copyTV(J->L, &ix->valv, &ix->tabv);
- op ^= 3;
- }
-}
-
-#if LJ_HASFFI
-/* Setup call to cdata comparison metamethod. */
-static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
-{
- lj_snap_add(J);
- if (tref_iscdata(ix->val)) {
- ix->tab = ix->val;
- copyTV(J->L, &ix->tabv, &ix->valv);
- } else {
- lua_assert(tref_iscdata(ix->key));
- ix->tab = ix->key;
- copyTV(J->L, &ix->tabv, &ix->keyv);
- }
- lj_record_mm_lookup(J, ix, mm);
- rec_mm_callcomp(J, ix, op);
-}
-#endif
-
-/* -- Indexed access ------------------------------------------------------ */
-
-/* Record bounds-check. */
-static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
-{
- /* Try to emit invariant bounds checks. */
- if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) ==
- (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) {
- IRRef ref = tref_ref(ikey);
- IRIns *ir = IR(ref);
- int32_t ofs = 0;
- IRRef ofsref = 0;
- /* Handle constant offsets. */
- if (ir->o == IR_ADD && irref_isk(ir->op2)) {
- ofsref = ir->op2;
- ofs = IR(ofsref)->i;
- ref = ir->op1;
- ir = IR(ref);
- }
- /* Got scalar evolution analysis results for this reference? */
- if (ref == J->scev.idx) {
- int32_t stop;
- lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD);
- stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
- /* Runtime value for stop of loop is within bounds? */
- if ((uint64_t)stop + ofs < (uint64_t)asize) {
- /* Emit invariant bounds check for stop. */
- emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop :
- emitir(IRTI(IR_ADD), J->scev.stop, ofsref));
- /* Emit invariant bounds check for start, if not const or negative. */
- if (!(J->scev.dir && J->scev.start &&
- (int64_t)IR(J->scev.start)->i + ofs >= 0))
- emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey);
- return;
- }
- }
- }
- emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */
-}
-
-/* Record indexed key lookup. */
-static TRef rec_idx_key(jit_State *J, RecordIndex *ix)
-{
- TRef key;
- GCtab *t = tabV(&ix->tabv);
- ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */
-
- /* Integer keys are looked up in the array part first. */
- key = ix->key;
- if (tref_isnumber(key)) {
- int32_t k = numberVint(&ix->keyv);
- if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k)
- k = LJ_MAX_ASIZE;
- if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */
- TRef ikey = lj_opt_narrow_index(J, key);
- TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
- if ((MSize)k < t->asize) { /* Currently an array key? */
- TRef arrayref;
- rec_idx_abc(J, asizeref, ikey, t->asize);
- arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY);
- return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey);
- } else { /* Currently not in array (may be an array extension)? */
- emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */
- if (k == 0 && tref_isk(key))
- key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */
- /* And continue with the hash lookup. */
- }
- } else if (!tref_isk(key)) {
- /* We can rule out const numbers which failed the integerness test
- ** above. But all other numbers are potential array keys.
- */
- if (t->asize == 0) { /* True sparse tables have an empty array part. */
- /* Guard that the array part stays empty. */
- TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
- emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
- } else {
- lj_trace_err(J, LJ_TRERR_NYITMIX);
- }
- }
- }
-
- /* Otherwise the key is located in the hash part. */
- if (t->hmask == 0) { /* Shortcut for empty hash part. */
- /* Guard that the hash part stays empty. */
- TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
- emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
- return lj_ir_kkptr(J, niltvg(J2G(J)));
- }
- if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */
- key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
- if (tref_isk(key)) {
- /* Optimize lookup of constant hash keys. */
- MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val);
- if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) &&
- hslot <= 65535*(MSize)sizeof(Node)) {
- TRef node, kslot;
- TRef hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
- emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
- node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE);
- kslot = lj_ir_kslot(J, key, hslot / sizeof(Node));
- return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot);
- }
- }
- /* Fall back to a regular hash lookup. */
- return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key);
-}
-
-/* Determine whether a key is NOT one of the fast metamethod names. */
-static int nommstr(jit_State *J, TRef key)
-{
- if (tref_isstr(key)) {
- if (tref_isk(key)) {
- GCstr *str = ir_kstr(IR(tref_ref(key)));
- uint32_t mm;
- for (mm = 0; mm <= MM_FAST; mm++)
- if (mmname_str(J2G(J), mm) == str)
- return 0; /* MUST be one the fast metamethod names. */
- } else {
- return 0; /* Variable string key MAY be a metamethod name. */
- }
- }
- return 1; /* CANNOT be a metamethod name. */
-}
-
-/* Record indexed load/store. */
-TRef lj_record_idx(jit_State *J, RecordIndex *ix)
-{
- TRef xref;
- IROp xrefop, loadop;
- cTValue *oldv;
-
- while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
- /* Never call raw lj_record_idx() on non-table. */
- lua_assert(ix->idxchain != 0);
- if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
- lj_trace_err(J, LJ_TRERR_NOMM);
- handlemm:
- if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */
- BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
- TRef *base = J->base + func;
- TValue *tv = J->L->base + func;
- base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
- setfuncV(J->L, tv+0, funcV(&ix->mobjv));
- copyTV(J->L, tv+1, &ix->tabv);
- copyTV(J->L, tv+2, &ix->keyv);
- if (ix->val) {
- base[3] = ix->val;
- copyTV(J->L, tv+3, &ix->valv);
- lj_record_call(J, func, 3); /* mobj(tab, key, val) */
- return 0;
- } else {
- lj_record_call(J, func, 2); /* res = mobj(tab, key) */
- return 0; /* No result yet. */
- }
- }
- /* Otherwise retry lookup with metaobject. */
- ix->tab = ix->mobj;
- copyTV(J->L, &ix->tabv, &ix->mobjv);
- if (--ix->idxchain == 0)
- lj_trace_err(J, LJ_TRERR_IDXLOOP);
- }
-
- /* First catch nil and NaN keys for tables. */
- if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) {
- if (ix->val) /* Better fail early. */
- lj_trace_err(J, LJ_TRERR_STORENN);
- if (tref_isk(ix->key)) {
- if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
- goto handlemm;
- return TREF_NIL;
- }
- }
-
- /* Record the key lookup. */
- xref = rec_idx_key(J, ix);
- xrefop = IR(tref_ref(xref))->o;
- loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
- /* The lj_meta_tset() inconsistency is gone, but better play safe. */
- oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv;
-
- if (ix->val == 0) { /* Indexed load */
- IRType t = itype2irt(oldv);
- TRef res;
- if (oldv == niltvg(J2G(J))) {
- emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
- res = TREF_NIL;
- } else {
- res = emitir(IRTG(loadop, t), xref, 0);
- }
- if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
- goto handlemm;
- if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */
- return res;
- } else { /* Indexed store. */
- GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
- int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
- if (tvisnil(oldv)) { /* Previous value was nil? */
- /* Need to duplicate the hasmm check for the early guards. */
- int hasmm = 0;
- if (ix->idxchain && mt) {
- cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex));
- hasmm = mo && !tvisnil(mo);
- }
- if (hasmm)
- emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */
- else if (xrefop == IR_HREF)
- emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32),
- xref, lj_ir_kkptr(J, niltvg(J2G(J))));
- if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
- lua_assert(hasmm);
- goto handlemm;
- }
- lua_assert(!hasmm);
- if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */
- TRef key = ix->key;
- if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */
- key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
- xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key);
- keybarrier = 0; /* NEWREF already takes care of the key barrier. */
- }
- } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
- /* Cannot derive that the previous value was non-nil, must do checks. */
- if (xrefop == IR_HREF) /* Guard against store to niltv. */
- emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
- if (ix->idxchain) { /* Metamethod lookup required? */
- /* A check for NULL metatable is cheaper (hoistable) than a load. */
- if (!mt) {
- TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
- emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
- } else {
- IRType t = itype2irt(oldv);
- emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */
- }
- }
- } else {
- keybarrier = 0; /* Previous non-nil value kept the key alive. */
- }
- /* Convert int to number before storing. */
- if (!LJ_DUALNUM && tref_isinteger(ix->val))
- ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT);
- emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val);
- if (keybarrier || tref_isgcv(ix->val))
- emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
- /* Invalidate neg. metamethod cache for stores with certain string keys. */
- if (!nommstr(J, ix->key)) {
- TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM);
- emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
- }
- J->needsnap = 1;
- return 0;
- }
-}
-
-/* -- Upvalue access ------------------------------------------------------ */
-
-/* Check whether upvalue is immutable and ok to constify. */
-static int rec_upvalue_constify(jit_State *J, GCupval *uvp)
-{
- if (uvp->immutable) {
- cTValue *o = uvval(uvp);
- /* Don't constify objects that may retain large amounts of memory. */
-#if LJ_HASFFI
- if (tviscdata(o)) {
- GCcdata *cd = cdataV(o);
- if (!cdataisv(cd) && !(cd->marked & LJ_GC_CDATA_FIN)) {
- CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid);
- if (!ctype_hassize(ct->info) || ct->size <= 16)
- return 1;
- }
- return 0;
- }
-#else
- UNUSED(J);
-#endif
- if (!(tvistab(o) || tvisudata(o) || tvisthread(o)))
- return 1;
- }
- return 0;
-}
-
-/* Record upvalue load/store. */
-static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
-{
- GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv;
- TRef fn = getcurrf(J);
- IRRef uref;
- int needbarrier = 0;
- if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */
- TRef tr, kfunc;
- lua_assert(val == 0);
- if (!tref_isk(fn)) { /* Late specialization of current function. */
- if (J->pt->flags >= PROTO_CLC_POLY)
- goto noconstify;
- kfunc = lj_ir_kfunc(J, J->fn);
- emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc);
- J->base[-1] = TREF_FRAME | kfunc;
- fn = kfunc;
- }
- tr = lj_record_constify(J, uvval(uvp));
- if (tr)
- return tr;
- }
-noconstify:
- /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
- uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
- if (!uvp->closed) {
- /* In current stack? */
- if (uvval(uvp) >= tvref(J->L->stack) &&
- uvval(uvp) < tvref(J->L->maxstack)) {
- int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
- if (slot >= 0) { /* Aliases an SSA slot? */
- slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */
- /* NYI: add IR to guard that it's still aliasing the same slot. */
- if (val == 0) {
- return getslot(J, slot);
- } else {
- J->base[slot] = val;
- if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1);
- return 0;
- }
- }
- }
- uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv));
- } else {
- needbarrier = 1;
- uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv));
- }
- if (val == 0) { /* Upvalue load */
- IRType t = itype2irt(uvval(uvp));
- TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0);
- if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */
- return res;
- } else { /* Upvalue store. */
- /* Convert int to number before storing. */
- if (!LJ_DUALNUM && tref_isinteger(val))
- val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
- emitir(IRT(IR_USTORE, tref_type(val)), uref, val);
- if (needbarrier && tref_isgcv(val))
- emitir(IRT(IR_OBAR, IRT_NIL), uref, val);
- J->needsnap = 1;
- return 0;
- }
-}
-
-/* -- Record calls to Lua functions --------------------------------------- */
-
-/* Check unroll limits for calls. */
-static void check_call_unroll(jit_State *J, TraceNo lnk)
-{
- cTValue *frame = J->L->base - 1;
- void *pc = mref(frame_func(frame)->l.pc, void);
- int32_t depth = J->framedepth;
- int32_t count = 0;
- if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */
- for (; depth > 0; depth--) { /* Count frames with same prototype. */
- if (frame_iscont(frame)) depth--;
- frame = frame_prev(frame);
- if (mref(frame_func(frame)->l.pc, void) == pc)
- count++;
- }
- if (J->pc == J->startpc) {
- if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
- J->pc++;
- if (J->framedepth + J->retdepth == 0)
- rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-recursion. */
- else
- rec_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */
- }
- } else {
- if (count > J->param[JIT_P_callunroll]) {
- if (lnk) { /* Possible tail- or up-recursion. */
- lj_trace_flush(J, lnk); /* Flush trace that only returns. */
- /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
- hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4));
- }
- lj_trace_err(J, LJ_TRERR_CUNROLL);
- }
- }
-}
-
-/* Record Lua function setup. */
-static void rec_func_setup(jit_State *J)
-{
- GCproto *pt = J->pt;
- BCReg s, numparams = pt->numparams;
- if ((pt->flags & PROTO_NOJIT))
- lj_trace_err(J, LJ_TRERR_CJITOFF);
- if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS)
- lj_trace_err(J, LJ_TRERR_STACKOV);
- /* Fill up missing parameters with nil. */
- for (s = J->maxslot; s < numparams; s++)
- J->base[s] = TREF_NIL;
- /* The remaining slots should never be read before they are written. */
- J->maxslot = numparams;
-}
-
-/* Record Lua vararg function setup. */
-static void rec_func_vararg(jit_State *J)
-{
- GCproto *pt = J->pt;
- BCReg s, fixargs, vframe = J->maxslot+1;
- lua_assert((pt->flags & PROTO_VARARG));
- if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
- lj_trace_err(J, LJ_TRERR_STACKOV);
- J->base[vframe-1] = J->base[-1]; /* Copy function up. */
- /* Copy fixarg slots up and set their original slots to nil. */
- fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
- for (s = 0; s < fixargs; s++) {
- J->base[vframe+s] = J->base[s];
- J->base[s] = TREF_NIL;
- }
- J->maxslot = fixargs;
- J->framedepth++;
- J->base += vframe;
- J->baseslot += vframe;
-}
-
-/* Record entry to a Lua function. */
-static void rec_func_lua(jit_State *J)
-{
- rec_func_setup(J);
- check_call_unroll(J, 0);
-}
-
-/* Record entry to an already compiled function. */
-static void rec_func_jit(jit_State *J, TraceNo lnk)
-{
- GCtrace *T;
- rec_func_setup(J);
- T = traceref(J, lnk);
- if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */
- check_call_unroll(J, lnk);
- /* Temporarily unpatch JFUNC* to continue recording across function. */
- J->patchins = *J->pc;
- J->patchpc = (BCIns *)J->pc;
- *J->patchpc = T->startins;
- return;
- }
- J->instunroll = 0; /* Cannot continue across a compiled function. */
- if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
- rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-recursion. */
- else
- rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */
-}
-
-/* -- Vararg handling ----------------------------------------------------- */
-
-/* Detect y = select(x, ...) idiom. */
-static int select_detect(jit_State *J)
-{
- BCIns ins = J->pc[1];
- if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) {
- cTValue *func = &J->L->base[bc_a(ins)];
- if (tvisfunc(func) && funcV(func)->c.ffid == FF_select)
- return 1;
- }
- return 0;
-}
-
-/* Record vararg instruction. */
-static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
-{
- int32_t numparams = J->pt->numparams;
- ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1;
- lua_assert(frame_isvarg(J->L->base-1));
- if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */
- ptrdiff_t i;
- if (nvararg < 0) nvararg = 0;
- if (nresults == -1) {
- nresults = nvararg;
- J->maxslot = dst + (BCReg)nvararg;
- } else if (dst + nresults > J->maxslot) {
- J->maxslot = dst + (BCReg)nresults;
- }
- for (i = 0; i < nresults; i++)
- J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1) : TREF_NIL;
- } else { /* Unknown number of varargs passed to trace. */
- TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME);
- int32_t frofs = 8*(1+numparams)+FRAME_VARG;
- if (nresults >= 0) { /* Known fixed number of results. */
- ptrdiff_t i;
- if (nvararg > 0) {
- ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg;
- TRef vbase;
- if (nvararg >= nresults)
- emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
- else
- emitir(IRTGI(IR_EQ), fr, lj_ir_kint(J, frame_ftsz(J->L->base-1)));
- vbase = emitir(IRTI(IR_SUB), REF_BASE, fr);
- vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8));
- for (i = 0; i < nload; i++) {
- IRType t = itype2irt(&J->L->base[i-1-nvararg]);
- TRef aref = emitir(IRT(IR_AREF, IRT_P32),
- vbase, lj_ir_kint(J, (int32_t)i));
- TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
- if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
- J->base[dst+i] = tr;
- }
- } else {
- emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs));
- nvararg = 0;
- }
- for (i = nvararg; i < nresults; i++)
- J->base[dst+i] = TREF_NIL;
- if (dst + (BCReg)nresults > J->maxslot)
- J->maxslot = dst + (BCReg)nresults;
- } else if (select_detect(J)) { /* y = select(x, ...) */
- TRef tridx = J->base[dst-1];
- TRef tr = TREF_NIL;
- ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]);
- if (idx < 0) goto nyivarg;
- if (idx != 0 && !tref_isinteger(tridx))
- tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX);
- if (idx != 0 && tref_isk(tridx)) {
- emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT),
- fr, lj_ir_kint(J, frofs+8*(int32_t)idx));
- frofs -= 8; /* Bias for 1-based index. */
- } else if (idx <= nvararg) { /* Compute size. */
- TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs));
- if (numparams)
- emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0));
- tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3));
- if (idx != 0) {
- tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1));
- rec_idx_abc(J, tr, tridx, (uint32_t)nvararg);
- }
- } else {
- TRef tmp = lj_ir_kint(J, frofs);
- if (idx != 0) {
- TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3));
- tmp = emitir(IRTI(IR_ADD), tmp2, tmp);
- } else {
- tr = lj_ir_kint(J, 0);
- }
- emitir(IRTGI(IR_LT), fr, tmp);
- }
- if (idx != 0 && idx <= nvararg) {
- IRType t;
- TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr);
- vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8));
- t = itype2irt(&J->L->base[idx-2-nvararg]);
- aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx);
- tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
- if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
- }
- J->base[dst-2] = tr;
- J->maxslot = dst-1;
- J->bcskip = 2; /* Skip CALLM + select. */
- } else {
- nyivarg:
- setintV(&J->errinfo, BC_VARG);
- lj_trace_err_info(J, LJ_TRERR_NYIBC);
- }
- }
-}
-
-/* -- Record allocations -------------------------------------------------- */
-
-static TRef rec_tnew(jit_State *J, uint32_t ah)
-{
- uint32_t asize = ah & 0x7ff;
- uint32_t hbits = ah >> 11;
- if (asize == 0x7ff) asize = 0x801;
- return emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
-}
-
-/* -- Record bytecode ops ------------------------------------------------- */
-
-/* Prepare for comparison. */
-static void rec_comp_prep(jit_State *J)
-{
- /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
- if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins)
- emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
- lj_snap_add(J);
-}
-
-/* Fixup comparison. */
-static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
-{
- BCIns jmpins = pc[1];
- const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
- SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
- /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
- J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
- J->needsnap = 1;
- if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
- lj_snap_shrink(J); /* Shrink last snapshot if possible. */
-}
-
-/* Record the next bytecode instruction (_before_ it's executed). */
-void lj_record_ins(jit_State *J)
-{
- cTValue *lbase;
- RecordIndex ix;
- const BCIns *pc;
- BCIns ins;
- BCOp op;
- TRef ra, rb, rc;
-
- /* Perform post-processing action before recording the next instruction. */
- if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
- switch (J->postproc) {
- case LJ_POST_FIXCOMP: /* Fixup comparison. */
- pc = frame_pc(&J2G(J)->tmptv);
- rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
- /* fallthrough */
- case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */
- case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */
- if (!tvistruecond(&J2G(J)->tmptv2)) {
- J->fold.ins.o ^= 1; /* Flip guard to opposite. */
- if (J->postproc == LJ_POST_FIXGUARDSNAP) {
- SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
- J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */
- }
- }
- lj_opt_fold(J); /* Emit pending guard. */
- /* fallthrough */
- case LJ_POST_FIXBOOL:
- if (!tvistruecond(&J2G(J)->tmptv2)) {
- BCReg s;
- TValue *tv = J->L->base;
- for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */
- if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) {
- J->base[s] = TREF_FALSE;
- break;
- }
- }
- break;
- case LJ_POST_FIXCONST:
- {
- BCReg s;
- TValue *tv = J->L->base;
- for (s = 0; s < J->maxslot; s++) /* Constify stack slots (if any). */
- if (J->base[s] == TREF_NIL && !tvisnil(&tv[s]))
- J->base[s] = lj_record_constify(J, &tv[s]);
- }
- break;
- case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */
- if (bc_op(*J->pc) >= BC__MAX)
- return;
- break;
- default: lua_assert(0); break;
- }
- J->postproc = LJ_POST_NONE;
- }
-
- /* Need snapshot before recording next bytecode (e.g. after a store). */
- if (J->needsnap) {
- J->needsnap = 0;
- lj_snap_purge(J);
- lj_snap_add(J);
- J->mergesnap = 1;
- }
-
- /* Skip some bytecodes. */
- if (LJ_UNLIKELY(J->bcskip > 0)) {
- J->bcskip--;
- return;
- }
-
- /* Record only closed loops for root traces. */
- pc = J->pc;
- if (J->framedepth == 0 &&
- (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent)
- lj_trace_err(J, LJ_TRERR_LLEAVE);
-
-#ifdef LUA_USE_ASSERT
- rec_check_slots(J);
- rec_check_ir(J);
-#endif
-
- /* Keep a copy of the runtime values of var/num/str operands. */
-#define rav (&ix.valv)
-#define rbv (&ix.tabv)
-#define rcv (&ix.keyv)
-
- lbase = J->L->base;
- ins = *pc;
- op = bc_op(ins);
- ra = bc_a(ins);
- ix.val = 0;
- switch (bcmode_a(op)) {
- case BCMvar:
- copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break;
- default: break; /* Handled later. */
- }
- rb = bc_b(ins);
- rc = bc_c(ins);
- switch (bcmode_b(op)) {
- case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */
- case BCMvar:
- copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break;
- default: break; /* Handled later. */
- }
- switch (bcmode_c(op)) {
- case BCMvar:
- copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
- case BCMpri: setitype(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
- case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
- copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
- lj_ir_knumint(J, numV(tv)); } break;
- case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc));
- setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break;
- default: break; /* Handled later. */
- }
-
- switch (op) {
-
- /* -- Comparison ops ---------------------------------------------------- */
-
- case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
-#if LJ_HASFFI
- if (tref_iscdata(ra) || tref_iscdata(rc)) {
- rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt);
- break;
- }
-#endif
- /* Emit nothing for two numeric or string consts. */
- if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) {
- IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra);
- IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc);
- int irop;
- if (ta != tc) {
- /* Widen mixed number/int comparisons to number/number comparison. */
- if (ta == IRT_INT && tc == IRT_NUM) {
- ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT);
- ta = IRT_NUM;
- } else if (ta == IRT_NUM && tc == IRT_INT) {
- rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
- } else if (LJ_52) {
- ta = IRT_NIL; /* Force metamethod for different types. */
- } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) &&
- (tc == IRT_FALSE || tc == IRT_TRUE))) {
- break; /* Interpreter will throw for two different types. */
- }
- }
- rec_comp_prep(J);
- irop = (int)op - (int)BC_ISLT + (int)IR_LT;
- if (ta == IRT_NUM) {
- if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */
- if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
- irop ^= 5;
- } else if (ta == IRT_INT) {
- if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
- irop ^= 1;
- } else if (ta == IRT_STR) {
- if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1;
- ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc);
- rc = lj_ir_kint(J, 0);
- ta = IRT_INT;
- } else {
- rec_mm_comp(J, &ix, (int)op);
- break;
- }
- emitir(IRTG(irop, ta), ra, rc);
- rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1);
- }
- break;
-
- case BC_ISEQV: case BC_ISNEV:
- case BC_ISEQS: case BC_ISNES:
- case BC_ISEQN: case BC_ISNEN:
- case BC_ISEQP: case BC_ISNEP:
-#if LJ_HASFFI
- if (tref_iscdata(ra) || tref_iscdata(rc)) {
- rec_mm_comp_cdata(J, &ix, op, MM_eq);
- break;
- }
-#endif
- /* Emit nothing for two non-table, non-udata consts. */
- if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) {
- int diff;
- rec_comp_prep(J);
- diff = lj_record_objcmp(J, ra, rc, rav, rcv);
- if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra)))
- rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff);
- else if (diff == 1) /* Only check __eq if different, but same type. */
- rec_mm_equal(J, &ix, (int)op);
- }
- break;
-
- /* -- Unary test and copy ops ------------------------------------------- */
-
- case BC_ISTC: case BC_ISFC:
- if ((op & 1) == tref_istruecond(rc))
- rc = 0; /* Don't store if condition is not true. */
- /* fallthrough */
- case BC_IST: case BC_ISF: /* Type specialization suffices. */
- if (bc_a(pc[1]) < J->maxslot)
- J->maxslot = bc_a(pc[1]); /* Shrink used slots. */
- break;
-
- /* -- Unary ops --------------------------------------------------------- */
-
- case BC_NOT:
- /* Type specialization already forces const result. */
- rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE;
- break;
-
- case BC_LEN:
- if (tref_isstr(rc))
- rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
- else if (!LJ_52 && tref_istab(rc))
- rc = lj_ir_call(J, IRCALL_lj_tab_len, rc);
- else
- rc = rec_mm_len(J, rc, rcv);
- break;
-
- /* -- Arithmetic ops ---------------------------------------------------- */
-
- case BC_UNM:
- if (tref_isnumber_str(rc)) {
- rc = lj_opt_narrow_unm(J, rc, rcv);
- } else {
- ix.tab = rc;
- copyTV(J->L, &ix.tabv, rcv);
- rc = rec_mm_arith(J, &ix, MM_unm);
- }
- break;
-
- case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV:
- /* Swap rb/rc and rbv/rcv. rav is temp. */
- ix.tab = rc; ix.key = rc = rb; rb = ix.tab;
- copyTV(J->L, rav, rbv);
- copyTV(J->L, rbv, rcv);
- copyTV(J->L, rcv, rav);
- if (op == BC_MODNV)
- goto recmod;
- /* fallthrough */
- case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN:
- case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: {
- MMS mm = bcmode_mm(op);
- if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
- rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv,
- (int)mm - (int)MM_add + (int)IR_ADD);
- else
- rc = rec_mm_arith(J, &ix, mm);
- break;
- }
-
- case BC_MODVN: case BC_MODVV:
- recmod:
- if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
- rc = lj_opt_narrow_mod(J, rb, rc, rcv);
- else
- rc = rec_mm_arith(J, &ix, MM_mod);
- break;
-
- case BC_POW:
- if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
- rc = lj_opt_narrow_pow(J, lj_ir_tonum(J, rb), rc, rcv);
- else
- rc = rec_mm_arith(J, &ix, MM_pow);
- break;
-
- /* -- Constant and move ops --------------------------------------------- */
-
- case BC_MOV:
- /* Clear gap of method call to avoid resurrecting previous refs. */
- if (ra > J->maxslot) J->base[ra-1] = 0;
- break;
- case BC_KSTR: case BC_KNUM: case BC_KPRI:
- break;
- case BC_KSHORT:
- rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
- break;
- case BC_KNIL:
- while (ra <= rc)
- J->base[ra++] = TREF_NIL;
- if (rc >= J->maxslot) J->maxslot = rc+1;
- break;
-#if LJ_HASFFI
- case BC_KCDATA:
- rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA);
- break;
-#endif
-
- /* -- Upvalue and function ops ------------------------------------------ */
-
- case BC_UGET:
- rc = rec_upvalue(J, rc, 0);
- break;
- case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP:
- rec_upvalue(J, ra, rc);
- break;
-
- /* -- Table ops --------------------------------------------------------- */
-
- case BC_GGET: case BC_GSET:
- settabV(J->L, &ix.tabv, tabref(J->fn->l.env));
- ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV);
- ix.idxchain = LJ_MAX_IDXCHAIN;
- rc = lj_record_idx(J, &ix);
- break;
-
- case BC_TGETB: case BC_TSETB:
- setintV(&ix.keyv, (int32_t)rc);
- ix.key = lj_ir_kint(J, (int32_t)rc);
- /* fallthrough */
- case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS:
- ix.idxchain = LJ_MAX_IDXCHAIN;
- rc = lj_record_idx(J, &ix);
- break;
-
- case BC_TNEW:
- rc = rec_tnew(J, rc);
- break;
- case BC_TDUP:
- rc = emitir(IRTG(IR_TDUP, IRT_TAB),
- lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
- break;
-
- /* -- Calls and vararg handling ----------------------------------------- */
-
- case BC_ITERC:
- J->base[ra] = getslot(J, ra-3);
- J->base[ra+1] = getslot(J, ra-2);
- J->base[ra+2] = getslot(J, ra-1);
- { /* Do the actual copy now because lj_record_call needs the values. */
- TValue *b = &J->L->base[ra];
- copyTV(J->L, b, b-3);
- copyTV(J->L, b+1, b-2);
- copyTV(J->L, b+2, b-1);
- }
- lj_record_call(J, ra, (ptrdiff_t)rc-1);
- break;
-
- /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
- case BC_CALLM:
- rc = (BCReg)(J->L->top - J->L->base) - ra;
- /* fallthrough */
- case BC_CALL:
- lj_record_call(J, ra, (ptrdiff_t)rc-1);
- break;
-
- case BC_CALLMT:
- rc = (BCReg)(J->L->top - J->L->base) - ra;
- /* fallthrough */
- case BC_CALLT:
- lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
- break;
-
- case BC_VARG:
- rec_varg(J, ra, (ptrdiff_t)rb-1);
- break;
-
- /* -- Returns ----------------------------------------------------------- */
-
- case BC_RETM:
- /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
- rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
- /* fallthrough */
- case BC_RET: case BC_RET0: case BC_RET1:
- lj_record_ret(J, ra, (ptrdiff_t)rc-1);
- break;
-
- /* -- Loops and branches ------------------------------------------------ */
-
- case BC_FORI:
- if (rec_for(J, pc, 0) != LOOPEV_LEAVE)
- J->loopref = J->cur.nins;
- break;
- case BC_JFORI:
- lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL);
- if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */
- rec_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
- /* Continue tracing if the loop is not entered. */
- break;
-
- case BC_FORL:
- rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1));
- break;
- case BC_ITERL:
- rec_loop_interp(J, pc, rec_iterl(J, *pc));
- break;
- case BC_LOOP:
- rec_loop_interp(J, pc, rec_loop(J, ra));
- break;
-
- case BC_JFORL:
- rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1));
- break;
- case BC_JITERL:
- rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins));
- break;
- case BC_JLOOP:
- rec_loop_jit(J, rc, rec_loop(J, ra));
- break;
-
- case BC_IFORL:
- case BC_IITERL:
- case BC_ILOOP:
- case BC_IFUNCF:
- case BC_IFUNCV:
- lj_trace_err(J, LJ_TRERR_BLACKL);
- break;
-
- case BC_JMP:
- if (ra < J->maxslot)
- J->maxslot = ra; /* Shrink used slots. */
- break;
-
- /* -- Function headers -------------------------------------------------- */
-
- case BC_FUNCF:
- rec_func_lua(J);
- break;
- case BC_JFUNCF:
- rec_func_jit(J, rc);
- break;
-
- case BC_FUNCV:
- rec_func_vararg(J);
- rec_func_lua(J);
- break;
- case BC_JFUNCV:
- lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */
- break;
-
- case BC_FUNCC:
- case BC_FUNCCW:
- lj_ffrecord_func(J);
- break;
-
- default:
- if (op >= BC__MAX) {
- lj_ffrecord_func(J);
- break;
- }
- /* fallthrough */
- case BC_ITERN:
- case BC_ISNEXT:
- case BC_CAT:
- case BC_UCLO:
- case BC_FNEW:
- case BC_TSETM:
- setintV(&J->errinfo, (int32_t)op);
- lj_trace_err_info(J, LJ_TRERR_NYIBC);
- break;
- }
-
- /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
- if (bcmode_a(op) == BCMdst && rc) {
- J->base[ra] = rc;
- if (ra >= J->maxslot) J->maxslot = ra+1;
- }
-
-#undef rav
-#undef rbv
-#undef rcv
-
- /* Limit the number of recorded IR instructions. */
- if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord])
- lj_trace_err(J, LJ_TRERR_TRACEOV);
-}
-
-/* -- Recording setup ----------------------------------------------------- */
-
-/* Setup recording for a root trace started by a hot loop. */
-static const BCIns *rec_setup_root(jit_State *J)
-{
- /* Determine the next PC and the bytecode range for the loop. */
- const BCIns *pcj, *pc = J->pc;
- BCIns ins = *pc;
- BCReg ra = bc_a(ins);
- switch (bc_op(ins)) {
- case BC_FORL:
- J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
- pc += 1+bc_j(ins);
- J->bc_min = pc;
- break;
- case BC_ITERL:
- lua_assert(bc_op(pc[-1]) == BC_ITERC);
- J->maxslot = ra + bc_b(pc[-1]) - 1;
- J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
- pc += 1+bc_j(ins);
- lua_assert(bc_op(pc[-1]) == BC_JMP);
- J->bc_min = pc;
- break;
- case BC_LOOP:
- /* Only check BC range for real loops, but not for "repeat until true". */
- pcj = pc + bc_j(ins);
- ins = *pcj;
- if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) {
- J->bc_min = pcj+1 + bc_j(ins);
- J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
- }
- J->maxslot = ra;
- pc++;
- break;
- case BC_RET:
- case BC_RET0:
- case BC_RET1:
- /* No bytecode range check for down-recursive root traces. */
- J->maxslot = ra + bc_d(ins) - 1;
- break;
- case BC_FUNCF:
- /* No bytecode range check for root traces started by a hot call. */
- J->maxslot = J->pt->numparams;
- pc++;
- break;
- default:
- lua_assert(0);
- break;
- }
- return pc;
-}
-
-/* Setup for recording a new trace. */
-void lj_record_setup(jit_State *J)
-{
- uint32_t i;
-
- /* Initialize state related to current trace. */
- memset(J->slot, 0, sizeof(J->slot));
- memset(J->chain, 0, sizeof(J->chain));
- memset(J->bpropcache, 0, sizeof(J->bpropcache));
- J->scev.idx = REF_NIL;
- setmref(J->scev.pc, NULL);
-
- J->baseslot = 1; /* Invoking function is at base[-1]. */
- J->base = J->slot + J->baseslot;
- J->maxslot = 0;
- J->framedepth = 0;
- J->retdepth = 0;
-
- J->instunroll = J->param[JIT_P_instunroll];
- J->loopunroll = J->param[JIT_P_loopunroll];
- J->tailcalled = 0;
- J->loopref = 0;
-
- J->bc_min = NULL; /* Means no limit. */
- J->bc_extent = ~(MSize)0;
-
- /* Emit instructions for fixed references. Also triggers initial IR alloc. */
- emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno);
- for (i = 0; i <= 2; i++) {
- IRIns *ir = IR(REF_NIL-i);
- ir->i = 0;
- ir->t.irt = (uint8_t)(IRT_NIL+i);
- ir->o = IR_KPRI;
- ir->prev = 0;
- }
- J->cur.nk = REF_TRUE;
-
- J->startpc = J->pc;
- setmref(J->cur.startpc, J->pc);
- if (J->parent) { /* Side trace. */
- GCtrace *T = traceref(J, J->parent);
- TraceNo root = T->root ? T->root : J->parent;
- J->cur.root = (uint16_t)root;
- J->cur.startins = BCINS_AD(BC_JMP, 0, 0);
- /* Check whether we could at least potentially form an extra loop. */
- if (J->exitno == 0 && T->snap[0].nent == 0) {
- /* We can narrow a FORL for some side traces, too. */
- if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI &&
- bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) {
- lj_snap_add(J);
- rec_for_loop(J, J->pc-1, &J->scev, 1);
- goto sidecheck;
- }
- } else {
- J->startpc = NULL; /* Prevent forming an extra loop. */
- }
- lj_snap_replay(J, T);
- sidecheck:
- if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
- T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
- J->param[JIT_P_tryside]) {
- rec_stop(J, LJ_TRLINK_INTERP, 0);
- }
- } else { /* Root trace. */
- J->cur.root = 0;
- J->cur.startins = *J->pc;
- J->pc = rec_setup_root(J);
- /* Note: the loop instruction itself is recorded at the end and not
- ** at the start! So snapshot #0 needs to point to the *next* instruction.
- */
- lj_snap_add(J);
- if (bc_op(J->cur.startins) == BC_FORL)
- rec_for_loop(J, J->pc-1, &J->scev, 1);
- if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
- lj_trace_err(J, LJ_TRERR_STACKOV);
- }
-#ifdef LUAJIT_ENABLE_CHECKHOOK
- /* Regularly check for instruction/line hooks from compiled code and
- ** exit to the interpreter if the hooks are set.
- **
- ** This is a compile-time option and disabled by default, since the
- ** hook checks may be quite expensive in tight loops.
- **
- ** Note this is only useful if hooks are *not* set most of the time.
- ** Use this only if you want to *asynchronously* interrupt the execution.
- **
- ** You can set the instruction hook via lua_sethook() with a count of 1
- ** from a signal handler or another native thread. Please have a look
- ** at the first few functions in luajit.c for an example (Ctrl-C handler).
- */
- {
- TRef tr = emitir(IRT(IR_XLOAD, IRT_U8),
- lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE);
- tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT)));
- emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
- }
-#endif
-}
-
-#undef IR
-#undef emitir_raw
-#undef emitir
-
-#endif
+/*
+** Trace recorder (bytecode -> SSA IR).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_record_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_snap.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Sanity checks ------------------------------------------------------- */
+
+#ifdef LUA_USE_ASSERT
+/* Sanity check the whole IR -- sloooow. */
+static void rec_check_ir(jit_State *J)
+{
+ IRRef i, nins = J->cur.nins, nk = J->cur.nk;
+ lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536);
+ for (i = nins-1; i >= nk; i--) {
+ IRIns *ir = IR(i);
+ uint32_t mode = lj_ir_mode[ir->o];
+ IRRef op1 = ir->op1;
+ IRRef op2 = ir->op2;
+ switch (irm_op1(mode)) {
+ case IRMnone: lua_assert(op1 == 0); break;
+ case IRMref: lua_assert(op1 >= nk);
+ lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break;
+ case IRMlit: break;
+ case IRMcst: lua_assert(i < REF_BIAS); continue;
+ }
+ switch (irm_op2(mode)) {
+ case IRMnone: lua_assert(op2 == 0); break;
+ case IRMref: lua_assert(op2 >= nk);
+ lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break;
+ case IRMlit: break;
+ case IRMcst: lua_assert(0); break;
+ }
+ if (ir->prev) {
+ lua_assert(ir->prev >= nk);
+ lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i);
+ lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o);
+ }
+ }
+}
+
+/* Compare stack slots and frames of the recorder and the VM. */
+static void rec_check_slots(jit_State *J)
+{
+ BCReg s, nslots = J->baseslot + J->maxslot;
+ int32_t depth = 0;
+ cTValue *base = J->L->base - J->baseslot;
+ lua_assert(J->baseslot >= 1 && J->baseslot < LJ_MAX_JSLOTS);
+ lua_assert(J->baseslot == 1 || (J->slot[J->baseslot-1] & TREF_FRAME));
+ lua_assert(nslots < LJ_MAX_JSLOTS);
+ for (s = 0; s < nslots; s++) {
+ TRef tr = J->slot[s];
+ if (tr) {
+ cTValue *tv = &base[s];
+ IRRef ref = tref_ref(tr);
+ IRIns *ir;
+ lua_assert(ref >= J->cur.nk && ref < J->cur.nins);
+ ir = IR(ref);
+ lua_assert(irt_t(ir->t) == tref_t(tr));
+ if (s == 0) {
+ lua_assert(tref_isfunc(tr));
+ } else if ((tr & TREF_FRAME)) {
+ GCfunc *fn = gco2func(frame_gc(tv));
+ BCReg delta = (BCReg)(tv - frame_prev(tv));
+ lua_assert(tref_isfunc(tr));
+ if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir));
+ lua_assert(s > delta ? (J->slot[s-delta] & TREF_FRAME) : (s == delta));
+ depth++;
+ } else if ((tr & TREF_CONT)) {
+ lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void));
+ lua_assert((J->slot[s+1] & TREF_FRAME));
+ depth++;
+ } else {
+ if (tvisnumber(tv))
+ lua_assert(tref_isnumber(tr)); /* Could be IRT_INT etc., too. */
+ else
+ lua_assert(itype2irt(tv) == tref_type(tr));
+ if (tref_isk(tr)) { /* Compare constants. */
+ TValue tvk;
+ lj_ir_kvalue(J->L, &tvk, ir);
+ if (!(tvisnum(&tvk) && tvisnan(&tvk)))
+ lua_assert(lj_obj_equal(tv, &tvk));
+ else
+ lua_assert(tvisnum(tv) && tvisnan(tv));
+ }
+ }
+ }
+ }
+ lua_assert(J->framedepth == depth);
+}
+#endif
+
+/* -- Type handling and specialization ------------------------------------ */
+
+/* Note: these functions return tagged references (TRef). */
+
+/* Specialize a slot to a specific type. Note: slot can be negative! */
+static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode)
+{
+ /* Caller may set IRT_GUARD in t. */
+ TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode);
+ J->base[slot] = ref;
+ return ref;
+}
+
+/* Specialize a slot to the runtime type. Note: slot can be negative! */
+static TRef sload(jit_State *J, int32_t slot)
+{
+ IRType t = itype2irt(&J->L->base[slot]);
+ TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot,
+ IRSLOAD_TYPECHECK);
+ if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */
+ J->base[slot] = ref;
+ return ref;
+}
+
+/* Get TRef from slot. Load slot and specialize if not done already. */
+#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
+
+/* Get TRef for current function. */
+static TRef getcurrf(jit_State *J)
+{
+ if (J->base[-1])
+ return J->base[-1];
+ lua_assert(J->baseslot == 1);
+ return sloadt(J, -1, IRT_FUNC, IRSLOAD_READONLY);
+}
+
+/* Compare for raw object equality.
+** Returns 0 if the objects are the same.
+** Returns 1 if they are different, but the same type.
+** Returns 2 for two different types.
+** Comparisons between primitives always return 1 -- no caller cares about it.
+*/
+int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv)
+{
+ int diff = !lj_obj_equal(av, bv);
+ if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */
+ IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a);
+ IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b);
+ if (ta != tb) {
+ /* Widen mixed number/int comparisons to number/number comparison. */
+ if (ta == IRT_INT && tb == IRT_NUM) {
+ a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT);
+ ta = IRT_NUM;
+ } else if (ta == IRT_NUM && tb == IRT_INT) {
+ b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT);
+ } else {
+ return 2; /* Two different types are never equal. */
+ }
+ }
+ emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b);
+ }
+ return diff;
+}
+
+/* Constify a value. Returns 0 for non-representable object types. */
+TRef lj_record_constify(jit_State *J, cTValue *o)
+{
+ if (tvisgcv(o))
+ return lj_ir_kgc(J, gcV(o), itype2irt(o));
+ else if (tvisint(o))
+ return lj_ir_kint(J, intV(o));
+ else if (tvisnum(o))
+ return lj_ir_knumint(J, numV(o));
+ else if (tvisbool(o))
+ return TREF_PRI(itype2irt(o));
+ else
+ return 0; /* Can't represent lightuserdata (pointless). */
+}
+
+/* -- Record loop ops ----------------------------------------------------- */
+
+/* Loop event. */
+typedef enum {
+ LOOPEV_LEAVE, /* Loop is left or not entered. */
+ LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */
+ LOOPEV_ENTER /* Loop is entered. */
+} LoopEvent;
+
+/* Canonicalize slots: convert integers to numbers. */
+static void canonicalize_slots(jit_State *J)
+{
+ BCReg s;
+ if (LJ_DUALNUM) return;
+ for (s = J->baseslot+J->maxslot-1; s >= 1; s--) {
+ TRef tr = J->slot[s];
+ if (tref_isinteger(tr)) {
+ IRIns *ir = IR(tref_ref(tr));
+ if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY)))
+ J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ }
+ }
+}
+
+/* Stop recording. */
+static void rec_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
+{
+ lj_trace_end(J);
+ J->cur.linktype = (uint8_t)linktype;
+ J->cur.link = (uint16_t)lnk;
+ /* Looping back at the same stack level? */
+ if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) {
+ if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */
+ goto nocanon; /* Do not canonicalize or we lose the narrowing. */
+ if (J->cur.root) /* Otherwise ensure we always link to the root trace. */
+ J->cur.link = J->cur.root;
+ }
+ canonicalize_slots(J);
+nocanon:
+ /* Note: all loop ops must set J->pc to the following instruction! */
+ lj_snap_add(J); /* Add loop snapshot. */
+ J->needsnap = 0;
+ J->mergesnap = 1; /* In case recording continues. */
+}
+
+/* Search bytecode backwards for a int/num constant slot initializer. */
+static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t)
+{
+ /* This algorithm is rather simplistic and assumes quite a bit about
+ ** how the bytecode is generated. It works fine for FORI initializers,
+ ** but it won't necessarily work in other cases (e.g. iterator arguments).
+ ** It doesn't do anything fancy, either (like backpropagating MOVs).
+ */
+ const BCIns *pc, *startpc = proto_bc(J->pt);
+ for (pc = endpc-1; pc > startpc; pc--) {
+ BCIns ins = *pc;
+ BCOp op = bc_op(ins);
+ /* First try to find the last instruction that stores to this slot. */
+ if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) {
+ return 0; /* Multiple results, e.g. from a CALL or KNIL. */
+ } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) {
+ if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */
+ /* Now try to verify there's no forward jump across it. */
+ const BCIns *kpc = pc;
+ for (; pc > startpc; pc--)
+ if (bc_op(*pc) == BC_JMP) {
+ const BCIns *target = pc+bc_j(*pc)+1;
+ if (target > kpc && target <= endpc)
+ return 0; /* Conditional assignment. */
+ }
+ if (op == BC_KSHORT) {
+ int32_t k = (int32_t)(int16_t)bc_d(ins);
+ return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k);
+ } else {
+ cTValue *tv = proto_knumtv(J->pt, bc_d(ins));
+ if (t == IRT_INT) {
+ int32_t k = numberVint(tv);
+ if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */
+ return lj_ir_kint(J, k);
+ return 0; /* Type mismatch. */
+ } else {
+ return lj_ir_knum(J, numberVnum(tv));
+ }
+ }
+ }
+ return 0; /* Non-constant initializer. */
+ }
+ }
+ return 0; /* No assignment to this slot found? */
+}
+
+/* Load and optionally convert a FORI argument from a slot. */
+static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode)
+{
+ int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0;
+ return sloadt(J, (int32_t)slot,
+ t + (((mode & IRSLOAD_TYPECHECK) ||
+ (conv && t == IRT_INT && !(mode >> 16))) ?
+ IRT_GUARD : 0),
+ mode + conv);
+}
+
+/* Peek before FORI to find a const initializer. Otherwise load from slot. */
+static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot,
+ IRType t, int mode)
+{
+ TRef tr = J->base[slot];
+ if (!tr) {
+ tr = find_kinit(J, fori, slot, t);
+ if (!tr)
+ tr = fori_load(J, slot, t, mode);
+ }
+ return tr;
+}
+
+/* Return the direction of the FOR loop iterator.
+** It's important to exactly reproduce the semantics of the interpreter.
+*/
+static int rec_for_direction(cTValue *o)
+{
+ return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0;
+}
+
+/* Simulate the runtime behavior of the FOR loop iterator. */
+static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl)
+{
+ lua_Number stopv = numberVnum(&o[FORL_STOP]);
+ lua_Number idxv = numberVnum(&o[FORL_IDX]);
+ lua_Number stepv = numberVnum(&o[FORL_STEP]);
+ if (isforl)
+ idxv += stepv;
+ if (rec_for_direction(&o[FORL_STEP])) {
+ if (idxv <= stopv) {
+ *op = IR_LE;
+ return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
+ }
+ *op = IR_GT; return LOOPEV_LEAVE;
+ } else {
+ if (stopv <= idxv) {
+ *op = IR_GE;
+ return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
+ }
+ *op = IR_LT; return LOOPEV_LEAVE;
+ }
+}
+
+/* Record checks for FOR loop overflow and step direction. */
+static void rec_for_check(jit_State *J, IRType t, int dir,
+ TRef stop, TRef step, int init)
+{
+ if (!tref_isk(step)) {
+ /* Non-constant step: need a guard for the direction. */
+ TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J);
+ emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero);
+ /* Add hoistable overflow checks for a narrowed FORL index. */
+ if (init && t == IRT_INT) {
+ if (tref_isk(stop)) {
+ /* Constant stop: optimize check away or to a range check for step. */
+ int32_t k = IR(tref_ref(stop))->i;
+ if (dir) {
+ if (k > 0)
+ emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k));
+ } else {
+ if (k < 0)
+ emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k));
+ }
+ } else {
+ /* Stop+step variable: need full overflow check. */
+ TRef tr = emitir(IRTGI(IR_ADDOV), step, stop);
+ emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */
+ }
+ }
+ } else if (init && t == IRT_INT && !tref_isk(stop)) {
+ /* Constant step: optimize overflow check to a range check for stop. */
+ int32_t k = IR(tref_ref(step))->i;
+ k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k;
+ emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k));
+ }
+}
+
+/* Record a FORL instruction. */
+static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
+ int init)
+{
+ BCReg ra = bc_a(*fori);
+ cTValue *tv = &J->L->base[ra];
+ TRef idx = J->base[ra+FORL_IDX];
+ IRType t = idx ? tref_type(idx) :
+ (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM;
+ int mode = IRSLOAD_INHERIT +
+ ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0);
+ TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
+ TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
+ int tc, dir = rec_for_direction(&tv[FORL_STEP]);
+ lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI);
+ scev->t.irt = t;
+ scev->dir = dir;
+ scev->stop = tref_ref(stop);
+ scev->step = tref_ref(step);
+ rec_for_check(J, t, dir, stop, step, init);
+ scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT));
+ tc = (LJ_DUALNUM &&
+ !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) &&
+ tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ?
+ IRSLOAD_TYPECHECK : 0;
+ if (tc) {
+ J->base[ra+FORL_STOP] = stop;
+ J->base[ra+FORL_STEP] = step;
+ }
+ if (!idx)
+ idx = fori_load(J, ra+FORL_IDX, t,
+ IRSLOAD_INHERIT + tc + (J->scev.start << 16));
+ if (!init)
+ J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step);
+ J->base[ra+FORL_EXT] = idx;
+ scev->idx = tref_ref(idx);
+ J->maxslot = ra+FORL_EXT+1;
+}
+
+/* Record FORL/JFORL or FORI/JFORI. */
+static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
+{
+ BCReg ra = bc_a(*fori);
+ TValue *tv = &J->L->base[ra];
+ TRef *tr = &J->base[ra];
+ IROp op;
+ LoopEvent ev;
+ TRef stop;
+ IRType t;
+ if (isforl) { /* Handle FORL/JFORL opcodes. */
+ TRef idx = tr[FORL_IDX];
+ if (tref_ref(idx) == J->scev.idx) {
+ t = J->scev.t.irt;
+ stop = J->scev.stop;
+ idx = emitir(IRT(IR_ADD, t), idx, J->scev.step);
+ tr[FORL_EXT] = tr[FORL_IDX] = idx;
+ } else {
+ ScEvEntry scev;
+ rec_for_loop(J, fori, &scev, 0);
+ t = scev.t.irt;
+ stop = scev.stop;
+ }
+ } else { /* Handle FORI/JFORI opcodes. */
+ BCReg i;
+ lj_meta_for(J->L, tv);
+ t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) :
+ IRT_NUM;
+ for (i = FORL_IDX; i <= FORL_STEP; i++) {
+ if (!tr[i]) sload(J, ra+i);
+ lua_assert(tref_isnumber_str(tr[i]));
+ if (tref_isstr(tr[i]))
+ tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
+ if (t == IRT_INT) {
+ if (!tref_isinteger(tr[i]))
+ tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK);
+ } else {
+ if (!tref_isnum(tr[i]))
+ tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT);
+ }
+ }
+ tr[FORL_EXT] = tr[FORL_IDX];
+ stop = tr[FORL_STOP];
+ rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]),
+ stop, tr[FORL_STEP], 1);
+ }
+
+ ev = rec_for_iter(&op, tv, isforl);
+ if (ev == LOOPEV_LEAVE) {
+ J->maxslot = ra+FORL_EXT+1;
+ J->pc = fori+1;
+ } else {
+ J->maxslot = ra;
+ J->pc = fori+bc_j(*fori)+1;
+ }
+ lj_snap_add(J);
+
+ emitir(IRTG(op, t), tr[FORL_IDX], stop);
+
+ if (ev == LOOPEV_LEAVE) {
+ J->maxslot = ra;
+ J->pc = fori+bc_j(*fori)+1;
+ } else {
+ J->maxslot = ra+FORL_EXT+1;
+ J->pc = fori+1;
+ }
+ J->needsnap = 1;
+ return ev;
+}
+
+/* Record ITERL/JITERL. */
+static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
+{
+ BCReg ra = bc_a(iterins);
+ lua_assert(J->base[ra] != 0);
+ if (!tref_isnil(J->base[ra])) { /* Looping back? */
+ J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */
+ J->maxslot = ra-1+bc_b(J->pc[-1]);
+ J->pc += bc_j(iterins)+1;
+ return LOOPEV_ENTER;
+ } else {
+ J->maxslot = ra-3;
+ J->pc++;
+ return LOOPEV_LEAVE;
+ }
+}
+
+/* Record LOOP/JLOOP. Now, that was easy. */
+static LoopEvent rec_loop(jit_State *J, BCReg ra)
+{
+ if (ra < J->maxslot) J->maxslot = ra;
+ J->pc++;
+ return LOOPEV_ENTER;
+}
+
+/* Check if a loop repeatedly failed to trace because it didn't loop back. */
+static int innerloopleft(jit_State *J, const BCIns *pc)
+{
+ ptrdiff_t i;
+ for (i = 0; i < PENALTY_SLOTS; i++)
+ if (mref(J->penalty[i].pc, const BCIns) == pc) {
+ if ((J->penalty[i].reason == LJ_TRERR_LLEAVE ||
+ J->penalty[i].reason == LJ_TRERR_LINNER) &&
+ J->penalty[i].val >= 2*PENALTY_MIN)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/* Handle the case when an interpreted loop op is hit. */
+static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
+{
+ if (J->parent == 0) {
+ if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
+ /* Same loop? */
+ if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+ rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping root trace. */
+ } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */
+ /* It's usually better to abort here and wait until the inner loop
+ ** is traced. But if the inner loop repeatedly didn't loop back,
+ ** this indicates a low trip count. In this case try unrolling
+ ** an inner loop even in a root trace. But it's better to be a bit
+ ** more conservative here and only do it for very short loops.
+ */
+ if (bc_j(*pc) != -1 && !innerloopleft(J, pc))
+ lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */
+ if ((ev != LOOPEV_ENTERLO &&
+ J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0)
+ lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
+ J->loopref = J->cur.nins;
+ }
+ } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */
+ J->loopref = J->cur.nins;
+ if (--J->loopunroll < 0)
+ lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
+ } /* Side trace continues across a loop that's left or not entered. */
+}
+
+/* Handle the case when an already compiled loop op is hit. */
+static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
+{
+ if (J->parent == 0) { /* Root trace hit an inner loop. */
+ /* Better let the inner loop spawn a side trace back here. */
+ lj_trace_err(J, LJ_TRERR_LINNER);
+ } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */
+ J->instunroll = 0; /* Cannot continue across a compiled loop op. */
+ if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
+ rec_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form an extra loop. */
+ else
+ rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */
+ } /* Side trace continues across a loop that's left or not entered. */
+}
+
+/* -- Record calls and returns -------------------------------------------- */
+
+/* Specialize to the runtime value of the called function or its prototype. */
+static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
+{
+ TRef kfunc;
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ /* Too many closures created? Probably not a monomorphic function. */
+ if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */
+ TRef trpt = emitir(IRT(IR_FLOAD, IRT_P32), tr, IRFL_FUNC_PC);
+ emitir(IRTG(IR_EQ, IRT_P32), trpt, lj_ir_kptr(J, proto_bc(pt)));
+ (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */
+ return tr;
+ }
+ }
+ /* Otherwise specialize to the function (closure) value itself. */
+ kfunc = lj_ir_kfunc(J, fn);
+ emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc);
+ return kfunc;
+}
+
+/* Record call setup. */
+static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ RecordIndex ix;
+ TValue *functv = &J->L->base[func];
+ TRef *fbase = &J->base[func];
+ ptrdiff_t i;
+ for (i = 0; i <= nargs; i++)
+ (void)getslot(J, func+i); /* Ensure func and all args have a reference. */
+ if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */
+ ix.tab = fbase[0];
+ copyTV(J->L, &ix.tabv, functv);
+ if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ for (i = ++nargs; i > 0; i--) /* Shift arguments up. */
+ fbase[i] = fbase[i-1];
+ fbase[0] = ix.mobj; /* Replace function. */
+ functv = &ix.mobjv;
+ }
+ fbase[0] = TREF_FRAME | rec_call_specialize(J, funcV(functv), fbase[0]);
+ J->maxslot = (BCReg)nargs;
+}
+
+/* Record call. */
+void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ rec_call_setup(J, func, nargs);
+ /* Bump frame. */
+ J->framedepth++;
+ J->base += func+1;
+ J->baseslot += func+1;
+}
+
+/* Record tail call. */
+void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ rec_call_setup(J, func, nargs);
+ if (frame_isvarg(J->L->base - 1)) {
+ BCReg cbase = (BCReg)frame_delta(J->L->base - 1);
+ if (--J->framedepth < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ func += cbase;
+ }
+ /* Move func + args down. */
+ memmove(&J->base[-1], &J->base[func], sizeof(TRef)*(J->maxslot+1));
+ /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
+ /* Tailcalls can form a loop, so count towards the loop unroll limit. */
+ if (++J->tailcalled > J->loopunroll)
+ lj_trace_err(J, LJ_TRERR_LUNROLL);
+}
+
+/* Check unroll limits for down-recursion. */
+static int check_downrec_unroll(jit_State *J, GCproto *pt)
+{
+ IRRef ptref;
+ for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev)
+ if (ir_kgc(IR(ptref)) == obj2gco(pt)) {
+ int count = 0;
+ IRRef ref;
+ for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev)
+ if (IR(ref)->op1 == ptref)
+ count++;
+ if (count) {
+ if (J->pc == J->startpc) {
+ if (count + J->tailcalled > J->param[JIT_P_recunroll])
+ return 1;
+ } else {
+ lj_trace_err(J, LJ_TRERR_DOWNREC);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Record return. */
+void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
+{
+ TValue *frame = J->L->base - 1;
+ ptrdiff_t i;
+ for (i = 0; i < gotresults; i++)
+ (void)getslot(J, rbase+i); /* Ensure all results have a reference. */
+ while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if (--J->framedepth < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ lua_assert(J->baseslot > 1);
+ gotresults++;
+ rbase += cbase;
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */
+ frame = frame_prevd(frame);
+ }
+ /* Return to lower frame via interpreter for unhandled cases. */
+ if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
+ (!frame_islua(frame) ||
+ (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))))) {
+ /* NYI: specialize to frame type and return directly, not via RET*. */
+ for (i = -1; i < (ptrdiff_t)rbase; i++)
+ J->base[i] = 0; /* Purge dead slots. */
+ J->maxslot = rbase + (BCReg)gotresults;
+ rec_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */
+ return;
+ }
+ if (frame_isvarg(frame)) {
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ lua_assert(J->baseslot > 1);
+ rbase += cbase;
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ frame = frame_prevd(frame);
+ }
+ if (frame_islua(frame)) { /* Return to Lua frame. */
+ BCIns callins = *(frame_pc(frame)-1);
+ ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
+ BCReg cbase = bc_a(callins);
+ GCproto *pt = funcproto(frame_func(frame - (cbase+1)));
+ if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
+ if (check_downrec_unroll(J, pt)) {
+ J->maxslot = (BCReg)(rbase + gotresults);
+ lj_snap_purge(J);
+ rec_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-recursion. */
+ return;
+ }
+ lj_snap_add(J);
+ }
+ for (i = 0; i < nresults; i++) /* Adjust results. */
+ J->base[i-1] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
+ J->maxslot = cbase+(BCReg)nresults;
+ if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */
+ J->framedepth--;
+ lua_assert(J->baseslot > cbase+1);
+ J->baseslot -= cbase+1;
+ J->base -= cbase+1;
+ } else if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
+ /* Return to lower frame would leave the loop in a root trace. */
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+ } else { /* Return to lower frame. Guard for the target we return to. */
+ TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
+ TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
+ emitir(IRTG(IR_RETF, IRT_P32), trpt, trpc);
+ J->retdepth++;
+ J->needsnap = 1;
+ lua_assert(J->baseslot == 1);
+ /* Shift result slots up and clear the slots of the new frame below. */
+ memmove(J->base + cbase, J->base-1, sizeof(TRef)*nresults);
+ memset(J->base-1, 0, sizeof(TRef)*(cbase+1));
+ }
+ } else if (frame_iscont(frame)) { /* Return to continuation frame. */
+ ASMFunction cont = frame_contf(frame);
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if ((J->framedepth -= 2) < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ J->maxslot = cbase-2;
+ if (cont == lj_cont_ra) {
+ /* Copy result to destination slot. */
+ BCReg dst = bc_a(*(frame_contpc(frame)-1));
+ J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
+ if (dst >= J->maxslot) J->maxslot = dst+1;
+ } else if (cont == lj_cont_nop) {
+ /* Nothing to do here. */
+ } else if (cont == lj_cont_cat) {
+ lua_assert(0);
+ } else {
+ /* Result type already specialized. */
+ lua_assert(cont == lj_cont_condf || cont == lj_cont_condt);
+ }
+ } else {
+ lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */
+ }
+ lua_assert(J->baseslot >= 1);
+}
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* Prepare to record call to metamethod. */
+static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
+{
+ BCReg s, top = curr_proto(J->L)->framesize;
+ TRef trcont;
+ setcont(&J->L->base[top], cont);
+#if LJ_64
+ trcont = lj_ir_kptr(J, (void *)((int64_t)cont - (int64_t)lj_vm_asm_begin));
+#else
+ trcont = lj_ir_kptr(J, (void *)cont);
+#endif
+ J->base[top] = trcont | TREF_CONT;
+ J->framedepth++;
+ for (s = J->maxslot; s < top; s++)
+ J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
+ return top+1;
+}
+
+/* Record metamethod lookup. */
+int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
+{
+ RecordIndex mix;
+ GCtab *mt;
+ if (tref_istab(ix->tab)) {
+ mt = tabref(tabV(&ix->tabv)->metatable);
+ mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
+ } else if (tref_isudata(ix->tab)) {
+ int udtype = udataV(&ix->tabv)->udtype;
+ mt = tabref(udataV(&ix->tabv)->metatable);
+ /* The metatables of special userdata objects are treated as immutable. */
+ if (udtype != UDTYPE_USERDATA) {
+ cTValue *mo;
+ if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
+ /* Specialize to the C library namespace object. */
+ emitir(IRTG(IR_EQ, IRT_P32), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
+ } else {
+ /* Specialize to the type of userdata. */
+ TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype));
+ }
+ immutable_mt:
+ mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm));
+ if (!mo || tvisnil(mo))
+ return 0; /* No metamethod. */
+ /* Treat metamethod or index table as immutable, too. */
+ if (!(tvisfunc(mo) || tvistab(mo)))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ copyTV(J->L, &ix->mobjv, mo);
+ ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB);
+ ix->mtv = mt;
+ ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */
+ return 1; /* Got metamethod or index table. */
+ }
+ mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META);
+ } else {
+ /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
+ mt = tabref(basemt_obj(J2G(J), &ix->tabv));
+ if (mt == NULL) {
+ ix->mt = TREF_NIL;
+ return 0; /* No metamethod. */
+ }
+ /* The cdata metatable is treated as immutable. */
+ if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
+ ix->mt = mix.tab = lj_ir_ktab(J, mt);
+ goto nocheck;
+ }
+ ix->mt = mt ? mix.tab : TREF_NIL;
+ emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB));
+nocheck:
+ if (mt) {
+ GCstr *mmstr = mmname_str(J2G(J), mm);
+ cTValue *mo = lj_tab_getstr(mt, mmstr);
+ if (mo && !tvisnil(mo))
+ copyTV(J->L, &ix->mobjv, mo);
+ ix->mtv = mt;
+ settabV(J->L, &mix.tabv, mt);
+ setstrV(J->L, &mix.keyv, mmstr);
+ mix.key = lj_ir_kstr(J, mmstr);
+ mix.val = 0;
+ mix.idxchain = 0;
+ ix->mobj = lj_record_idx(J, &mix);
+ return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */
+ }
+ return 0; /* No metamethod. */
+}
+
+/* Record call to arithmetic metamethod. */
+static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
+{
+ /* Set up metamethod call first to save ix->tab and ix->tabv. */
+ BCReg func = rec_mm_prep(J, lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *basev = J->L->base + func;
+ base[1] = ix->tab; base[2] = ix->key;
+ copyTV(J->L, basev+1, &ix->tabv);
+ copyTV(J->L, basev+2, &ix->keyv);
+ if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ if (mm != MM_unm) {
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
+ goto ok;
+ }
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ }
+ok:
+ base[0] = ix->mobj;
+ copyTV(J->L, basev+0, &ix->mobjv);
+ lj_record_call(J, func, 2);
+ return 0; /* No result yet. */
+}
+
+/* Record call to __len metamethod. */
+static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
+{
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, tv);
+ if (lj_record_mm_lookup(J, &ix, MM_len)) {
+ BCReg func = rec_mm_prep(J, lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *basev = J->L->base + func;
+ base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
+ base[1] = tr; copyTV(J->L, basev+1, tv);
+#if LJ_52
+ base[2] = tr; copyTV(J->L, basev+2, tv);
+#else
+ base[2] = TREF_NIL; setnilV(basev+2);
+#endif
+ lj_record_call(J, func, 2);
+ } else {
+ if (LJ_52 && tref_istab(tr))
+ return lj_ir_call(J, IRCALL_lj_tab_len, tr);
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ }
+ return 0; /* No result yet. */
+}
+
+/* Call a comparison metamethod. */
+static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
+{
+ BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
+ TRef *base = J->base + func;
+ TValue *tv = J->L->base + func;
+ base[0] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
+ copyTV(J->L, tv+0, &ix->mobjv);
+ copyTV(J->L, tv+1, &ix->valv);
+ copyTV(J->L, tv+2, &ix->keyv);
+ lj_record_call(J, func, 2);
+}
+
+/* Record call to equality comparison metamethod (for tab and udata only). */
+static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op)
+{
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */
+ cTValue *bv;
+ TRef mo1 = ix->mobj;
+ TValue mo1v;
+ copyTV(J->L, &mo1v, &ix->mobjv);
+ /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
+ bv = &ix->keyv;
+ if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else { /* Lookup metamethod on 2nd operand and compare both. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, bv);
+ if (!lj_record_mm_lookup(J, ix, MM_eq) ||
+ lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
+ return;
+ }
+ rec_mm_callcomp(J, ix, op);
+ }
+}
+
+/* Record call to ordered comparison metamethods (for arbitrary objects). */
+static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op)
+{
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ while (1) {
+ MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */
+#if LJ_52
+ if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ if (!lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
+ goto nomatch;
+ }
+ rec_mm_callcomp(J, ix, op);
+ return;
+#else
+ if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ cTValue *bv;
+ TRef mo1 = ix->mobj;
+ TValue mo1v;
+ copyTV(J->L, &mo1v, &ix->mobjv);
+ /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
+ bv = &ix->keyv;
+ if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else { /* Lookup metamethod on 2nd operand and compare both. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, bv);
+ if (!lj_record_mm_lookup(J, ix, mm) ||
+ lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
+ goto nomatch;
+ }
+ rec_mm_callcomp(J, ix, op);
+ return;
+ }
+#endif
+ nomatch:
+ /* Lookup failed. Retry with __lt and swapped operands. */
+ if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */
+ ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ copyTV(J->L, &ix->keyv, &ix->valv);
+ copyTV(J->L, &ix->valv, &ix->tabv);
+ op ^= 3;
+ }
+}
+
+#if LJ_HASFFI
+/* Setup call to cdata comparison metamethod. */
+static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
+{
+ lj_snap_add(J);
+ if (tref_iscdata(ix->val)) {
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ } else {
+ lua_assert(tref_iscdata(ix->key));
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ }
+ lj_record_mm_lookup(J, ix, mm);
+ rec_mm_callcomp(J, ix, op);
+}
+#endif
+
+/* -- Indexed access ------------------------------------------------------ */
+
+/* Record bounds-check. */
+static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
+{
+ /* Try to emit invariant bounds checks. */
+ if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) ==
+ (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) {
+ IRRef ref = tref_ref(ikey);
+ IRIns *ir = IR(ref);
+ int32_t ofs = 0;
+ IRRef ofsref = 0;
+ /* Handle constant offsets. */
+ if (ir->o == IR_ADD && irref_isk(ir->op2)) {
+ ofsref = ir->op2;
+ ofs = IR(ofsref)->i;
+ ref = ir->op1;
+ ir = IR(ref);
+ }
+ /* Got scalar evolution analysis results for this reference? */
+ if (ref == J->scev.idx) {
+ int32_t stop;
+ lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD);
+ stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
+ /* Runtime value for stop of loop is within bounds? */
+ if ((int64_t)stop + ofs < (int64_t)asize) {
+ /* Emit invariant bounds check for stop. */
+ emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop :
+ emitir(IRTI(IR_ADD), J->scev.stop, ofsref));
+ /* Emit invariant bounds check for start, if not const or negative. */
+ if (!(J->scev.dir && J->scev.start &&
+ (int64_t)IR(J->scev.start)->i + ofs >= 0))
+ emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey);
+ return;
+ }
+ }
+ }
+ emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */
+}
+
+/* Record indexed key lookup. */
+static TRef rec_idx_key(jit_State *J, RecordIndex *ix)
+{
+ TRef key;
+ GCtab *t = tabV(&ix->tabv);
+ ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */
+
+ /* Integer keys are looked up in the array part first. */
+ key = ix->key;
+ if (tref_isnumber(key)) {
+ int32_t k = numberVint(&ix->keyv);
+ if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k)
+ k = LJ_MAX_ASIZE;
+ if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */
+ TRef ikey = lj_opt_narrow_index(J, key);
+ TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
+ if ((MSize)k < t->asize) { /* Currently an array key? */
+ TRef arrayref;
+ rec_idx_abc(J, asizeref, ikey, t->asize);
+ arrayref = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_ARRAY);
+ return emitir(IRT(IR_AREF, IRT_P32), arrayref, ikey);
+ } else { /* Currently not in array (may be an array extension)? */
+ emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */
+ if (k == 0 && tref_isk(key))
+ key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */
+ /* And continue with the hash lookup. */
+ }
+ } else if (!tref_isk(key)) {
+ /* We can rule out const numbers which failed the integerness test
+ ** above. But all other numbers are potential array keys.
+ */
+ if (t->asize == 0) { /* True sparse tables have an empty array part. */
+ /* Guard that the array part stays empty. */
+ TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
+ emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
+ } else {
+ lj_trace_err(J, LJ_TRERR_NYITMIX);
+ }
+ }
+ }
+
+ /* Otherwise the key is located in the hash part. */
+ if (t->hmask == 0) { /* Shortcut for empty hash part. */
+ /* Guard that the hash part stays empty. */
+ TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
+ emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ }
+ if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */
+ key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
+ if (tref_isk(key)) {
+ /* Optimize lookup of constant hash keys. */
+ MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val);
+ if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) &&
+ hslot <= 65535*(MSize)sizeof(Node)) {
+ TRef node, kslot;
+ TRef hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
+ emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
+ node = emitir(IRT(IR_FLOAD, IRT_P32), ix->tab, IRFL_TAB_NODE);
+ kslot = lj_ir_kslot(J, key, hslot / sizeof(Node));
+ return emitir(IRTG(IR_HREFK, IRT_P32), node, kslot);
+ }
+ }
+ /* Fall back to a regular hash lookup. */
+ return emitir(IRT(IR_HREF, IRT_P32), ix->tab, key);
+}
+
+/* Determine whether a key is NOT one of the fast metamethod names. */
+static int nommstr(jit_State *J, TRef key)
+{
+ if (tref_isstr(key)) {
+ if (tref_isk(key)) {
+ GCstr *str = ir_kstr(IR(tref_ref(key)));
+ uint32_t mm;
+ for (mm = 0; mm <= MM_FAST; mm++)
+ if (mmname_str(J2G(J), mm) == str)
+ return 0; /* MUST be one the fast metamethod names. */
+ } else {
+ return 0; /* Variable string key MAY be a metamethod name. */
+ }
+ }
+ return 1; /* CANNOT be a metamethod name. */
+}
+
+/* Record indexed load/store. */
+TRef lj_record_idx(jit_State *J, RecordIndex *ix)
+{
+ TRef xref;
+ IROp xrefop, loadop;
+ cTValue *oldv;
+
+ while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
+ /* Never call raw lj_record_idx() on non-table. */
+ lua_assert(ix->idxchain != 0);
+ if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ handlemm:
+ if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */
+ BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *tv = J->L->base + func;
+ base[0] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
+ setfuncV(J->L, tv+0, funcV(&ix->mobjv));
+ copyTV(J->L, tv+1, &ix->tabv);
+ copyTV(J->L, tv+2, &ix->keyv);
+ if (ix->val) {
+ base[3] = ix->val;
+ copyTV(J->L, tv+3, &ix->valv);
+ lj_record_call(J, func, 3); /* mobj(tab, key, val) */
+ return 0;
+ } else {
+ lj_record_call(J, func, 2); /* res = mobj(tab, key) */
+ return 0; /* No result yet. */
+ }
+ }
+ /* Otherwise retry lookup with metaobject. */
+ ix->tab = ix->mobj;
+ copyTV(J->L, &ix->tabv, &ix->mobjv);
+ if (--ix->idxchain == 0)
+ lj_trace_err(J, LJ_TRERR_IDXLOOP);
+ }
+
+ /* First catch nil and NaN keys for tables. */
+ if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) {
+ if (ix->val) /* Better fail early. */
+ lj_trace_err(J, LJ_TRERR_STORENN);
+ if (tref_isk(ix->key)) {
+ if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
+ goto handlemm;
+ return TREF_NIL;
+ }
+ }
+
+ /* Record the key lookup. */
+ xref = rec_idx_key(J, ix);
+ xrefop = IR(tref_ref(xref))->o;
+ loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
+ /* The lj_meta_tset() inconsistency is gone, but better play safe. */
+ oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv;
+
+ if (ix->val == 0) { /* Indexed load */
+ IRType t = itype2irt(oldv);
+ TRef res;
+ if (oldv == niltvg(J2G(J))) {
+ emitir(IRTG(IR_EQ, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ res = TREF_NIL;
+ } else {
+ res = emitir(IRTG(loadop, t), xref, 0);
+ }
+ if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
+ goto handlemm;
+ if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */
+ return res;
+ } else { /* Indexed store. */
+ GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
+ int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
+ if (tvisnil(oldv)) { /* Previous value was nil? */
+ /* Need to duplicate the hasmm check for the early guards. */
+ int hasmm = 0;
+ if (ix->idxchain && mt) {
+ cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex));
+ hasmm = mo && !tvisnil(mo);
+ }
+ if (hasmm)
+ emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */
+ else if (xrefop == IR_HREF)
+ emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_P32),
+ xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
+ lua_assert(hasmm);
+ goto handlemm;
+ }
+ lua_assert(!hasmm);
+ if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */
+ TRef key = ix->key;
+ if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */
+ key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
+ xref = emitir(IRT(IR_NEWREF, IRT_P32), ix->tab, key);
+ keybarrier = 0; /* NEWREF already takes care of the key barrier. */
+ }
+ } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
+ /* Cannot derive that the previous value was non-nil, must do checks. */
+ if (xrefop == IR_HREF) /* Guard against store to niltv. */
+ emitir(IRTG(IR_NE, IRT_P32), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ if (ix->idxchain) { /* Metamethod lookup required? */
+ /* A check for NULL metatable is cheaper (hoistable) than a load. */
+ if (!mt) {
+ TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
+ } else {
+ IRType t = itype2irt(oldv);
+ emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */
+ }
+ }
+ } else {
+ keybarrier = 0; /* Previous non-nil value kept the key alive. */
+ }
+ /* Convert int to number before storing. */
+ if (!LJ_DUALNUM && tref_isinteger(ix->val))
+ ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT);
+ emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val);
+ if (keybarrier || tref_isgcv(ix->val))
+ emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
+ /* Invalidate neg. metamethod cache for stores with certain string keys. */
+ if (!nommstr(J, ix->key)) {
+ TRef fref = emitir(IRT(IR_FREF, IRT_P32), ix->tab, IRFL_TAB_NOMM);
+ emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
+ }
+ J->needsnap = 1;
+ return 0;
+ }
+}
+
+/* -- Upvalue access ------------------------------------------------------ */
+
+/* Check whether upvalue is immutable and ok to constify. */
+static int rec_upvalue_constify(jit_State *J, GCupval *uvp)
+{
+ if (uvp->immutable) {
+ cTValue *o = uvval(uvp);
+ /* Don't constify objects that may retain large amounts of memory. */
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ if (!cdataisv(cd) && !(cd->marked & LJ_GC_CDATA_FIN)) {
+ CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid);
+ if (!ctype_hassize(ct->info) || ct->size <= 16)
+ return 1;
+ }
+ return 0;
+ }
+#else
+ UNUSED(J);
+#endif
+ if (!(tvistab(o) || tvisudata(o) || tvisthread(o)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Record upvalue load/store. */
+static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
+{
+ GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv;
+ TRef fn = getcurrf(J);
+ IRRef uref;
+ int needbarrier = 0;
+ if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */
+ TRef tr, kfunc;
+ lua_assert(val == 0);
+ if (!tref_isk(fn)) { /* Late specialization of current function. */
+ if (J->pt->flags >= PROTO_CLC_POLY)
+ goto noconstify;
+ kfunc = lj_ir_kfunc(J, J->fn);
+ emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc);
+ J->base[-1] = TREF_FRAME | kfunc;
+ fn = kfunc;
+ }
+ tr = lj_record_constify(J, uvval(uvp));
+ if (tr)
+ return tr;
+ }
+noconstify:
+ /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
+ uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
+ if (!uvp->closed) {
+ /* In current stack? */
+ if (uvval(uvp) >= tvref(J->L->stack) &&
+ uvval(uvp) < tvref(J->L->maxstack)) {
+ int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
+ if (slot >= 0) { /* Aliases an SSA slot? */
+ slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */
+ /* NYI: add IR to guard that it's still aliasing the same slot. */
+ if (val == 0) {
+ return getslot(J, slot);
+ } else {
+ J->base[slot] = val;
+ if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1);
+ return 0;
+ }
+ }
+ }
+ uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_P32), fn, uv));
+ } else {
+ needbarrier = 1;
+ uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_P32), fn, uv));
+ }
+ if (val == 0) { /* Upvalue load */
+ IRType t = itype2irt(uvval(uvp));
+ TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0);
+ if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */
+ return res;
+ } else { /* Upvalue store. */
+ /* Convert int to number before storing. */
+ if (!LJ_DUALNUM && tref_isinteger(val))
+ val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
+ emitir(IRT(IR_USTORE, tref_type(val)), uref, val);
+ if (needbarrier && tref_isgcv(val))
+ emitir(IRT(IR_OBAR, IRT_NIL), uref, val);
+ J->needsnap = 1;
+ return 0;
+ }
+}
+
+/* -- Record calls to Lua functions --------------------------------------- */
+
+/* Check unroll limits for calls. */
+static void check_call_unroll(jit_State *J, TraceNo lnk)
+{
+ cTValue *frame = J->L->base - 1;
+ void *pc = mref(frame_func(frame)->l.pc, void);
+ int32_t depth = J->framedepth;
+ int32_t count = 0;
+ if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */
+ for (; depth > 0; depth--) { /* Count frames with same prototype. */
+ frame = frame_prev(frame);
+ if (mref(frame_func(frame)->l.pc, void) == pc)
+ count++;
+ }
+ if (J->pc == J->startpc) {
+ if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
+ J->pc++;
+ if (J->framedepth + J->retdepth == 0)
+ rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-recursion. */
+ else
+ rec_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */
+ }
+ } else {
+ if (count > J->param[JIT_P_callunroll]) {
+ if (lnk) { /* Possible tail- or up-recursion. */
+ lj_trace_flush(J, lnk); /* Flush trace that only returns. */
+ /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
+ hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4));
+ }
+ lj_trace_err(J, LJ_TRERR_CUNROLL);
+ }
+ }
+}
+
+/* Record Lua function setup. */
+static void rec_func_setup(jit_State *J)
+{
+ GCproto *pt = J->pt;
+ BCReg s, numparams = pt->numparams;
+ if ((pt->flags & PROTO_NOJIT))
+ lj_trace_err(J, LJ_TRERR_CJITOFF);
+ if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ /* Fill up missing parameters with nil. */
+ for (s = J->maxslot; s < numparams; s++)
+ J->base[s] = TREF_NIL;
+ /* The remaining slots should never be read before they are written. */
+ J->maxslot = numparams;
+}
+
+/* Record Lua vararg function setup. */
+static void rec_func_vararg(jit_State *J)
+{
+ GCproto *pt = J->pt;
+ BCReg s, fixargs, vframe = J->maxslot+1;
+ lua_assert((pt->flags & PROTO_VARARG));
+ if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ J->base[vframe-1] = J->base[-1]; /* Copy function up. */
+ /* Copy fixarg slots up and set their original slots to nil. */
+ fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
+ for (s = 0; s < fixargs; s++) {
+ J->base[vframe+s] = J->base[s];
+ J->base[s] = TREF_NIL;
+ }
+ J->maxslot = fixargs;
+ J->framedepth++;
+ J->base += vframe;
+ J->baseslot += vframe;
+}
+
+/* Record entry to a Lua function. */
+static void rec_func_lua(jit_State *J)
+{
+ rec_func_setup(J);
+ check_call_unroll(J, 0);
+}
+
+/* Record entry to an already compiled function. */
+static void rec_func_jit(jit_State *J, TraceNo lnk)
+{
+ GCtrace *T;
+ rec_func_setup(J);
+ T = traceref(J, lnk);
+ if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */
+ check_call_unroll(J, lnk);
+ /* Temporarily unpatch JFUNC* to continue recording across function. */
+ J->patchins = *J->pc;
+ J->patchpc = (BCIns *)J->pc;
+ *J->patchpc = T->startins;
+ return;
+ }
+ J->instunroll = 0; /* Cannot continue across a compiled function. */
+ if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
+ rec_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-recursion. */
+ else
+ rec_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */
+}
+
+/* -- Vararg handling ----------------------------------------------------- */
+
+/* Detect y = select(x, ...) idiom. */
+static int select_detect(jit_State *J)
+{
+ BCIns ins = J->pc[1];
+ if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) {
+ cTValue *func = &J->L->base[bc_a(ins)];
+ if (tvisfunc(func) && funcV(func)->c.ffid == FF_select)
+ return 1;
+ }
+ return 0;
+}
+
+/* Record vararg instruction. */
+static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
+{
+ int32_t numparams = J->pt->numparams;
+ ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1;
+ lua_assert(frame_isvarg(J->L->base-1));
+ if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */
+ ptrdiff_t i;
+ if (nvararg < 0) nvararg = 0;
+ if (nresults == -1) {
+ nresults = nvararg;
+ J->maxslot = dst + (BCReg)nvararg;
+ } else if (dst + nresults > J->maxslot) {
+ J->maxslot = dst + (BCReg)nresults;
+ }
+ for (i = 0; i < nresults; i++) {
+ J->base[dst+i] = i < nvararg ? J->base[i - nvararg - 1] : TREF_NIL;
+ lua_assert(J->base[dst+i] != 0);
+ }
+ } else { /* Unknown number of varargs passed to trace. */
+ TRef fr = emitir(IRTI(IR_SLOAD), 0, IRSLOAD_READONLY|IRSLOAD_FRAME);
+ int32_t frofs = 8*(1+numparams)+FRAME_VARG;
+ if (nresults >= 0) { /* Known fixed number of results. */
+ ptrdiff_t i;
+ if (nvararg > 0) {
+ ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg;
+ TRef vbase;
+ if (nvararg >= nresults)
+ emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
+ else
+ emitir(IRTGI(IR_EQ), fr, lj_ir_kint(J, frame_ftsz(J->L->base-1)));
+ vbase = emitir(IRTI(IR_SUB), REF_BASE, fr);
+ vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8));
+ for (i = 0; i < nload; i++) {
+ IRType t = itype2irt(&J->L->base[i-1-nvararg]);
+ TRef aref = emitir(IRT(IR_AREF, IRT_P32),
+ vbase, lj_ir_kint(J, (int32_t)i));
+ TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
+ if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
+ J->base[dst+i] = tr;
+ }
+ } else {
+ emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs));
+ nvararg = 0;
+ }
+ for (i = nvararg; i < nresults; i++)
+ J->base[dst+i] = TREF_NIL;
+ if (dst + (BCReg)nresults > J->maxslot)
+ J->maxslot = dst + (BCReg)nresults;
+ } else if (select_detect(J)) { /* y = select(x, ...) */
+ TRef tridx = J->base[dst-1];
+ TRef tr = TREF_NIL;
+ ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]);
+ if (idx < 0) goto nyivarg;
+ if (idx != 0 && !tref_isinteger(tridx))
+ tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX);
+ if (idx != 0 && tref_isk(tridx)) {
+ emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT),
+ fr, lj_ir_kint(J, frofs+8*(int32_t)idx));
+ frofs -= 8; /* Bias for 1-based index. */
+ } else if (idx <= nvararg) { /* Compute size. */
+ TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs));
+ if (numparams)
+ emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0));
+ tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3));
+ if (idx != 0) {
+ tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1));
+ rec_idx_abc(J, tr, tridx, (uint32_t)nvararg);
+ }
+ } else {
+ TRef tmp = lj_ir_kint(J, frofs);
+ if (idx != 0) {
+ TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3));
+ tmp = emitir(IRTI(IR_ADD), tmp2, tmp);
+ } else {
+ tr = lj_ir_kint(J, 0);
+ }
+ emitir(IRTGI(IR_LT), fr, tmp);
+ }
+ if (idx != 0 && idx <= nvararg) {
+ IRType t;
+ TRef aref, vbase = emitir(IRTI(IR_SUB), REF_BASE, fr);
+ vbase = emitir(IRT(IR_ADD, IRT_P32), vbase, lj_ir_kint(J, frofs-8));
+ t = itype2irt(&J->L->base[idx-2-nvararg]);
+ aref = emitir(IRT(IR_AREF, IRT_P32), vbase, tridx);
+ tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
+ if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
+ }
+ J->base[dst-2] = tr;
+ J->maxslot = dst-1;
+ J->bcskip = 2; /* Skip CALLM + select. */
+ } else {
+ nyivarg:
+ setintV(&J->errinfo, BC_VARG);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+ }
+ }
+}
+
+/* -- Record allocations -------------------------------------------------- */
+
+static TRef rec_tnew(jit_State *J, uint32_t ah)
+{
+ uint32_t asize = ah & 0x7ff;
+ uint32_t hbits = ah >> 11;
+ if (asize == 0x7ff) asize = 0x801;
+ return emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
+}
+
+/* -- Record bytecode ops ------------------------------------------------- */
+
+/* Prepare for comparison. */
+static void rec_comp_prep(jit_State *J)
+{
+ /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
+ if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins)
+ emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
+ lj_snap_add(J);
+}
+
+/* Fixup comparison. */
+static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
+{
+ BCIns jmpins = pc[1];
+ const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
+ J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
+ J->needsnap = 1;
+ if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
+ lj_snap_shrink(J); /* Shrink last snapshot if possible. */
+}
+
+/* Record the next bytecode instruction (_before_ it's executed). */
+void lj_record_ins(jit_State *J)
+{
+ cTValue *lbase;
+ RecordIndex ix;
+ const BCIns *pc;
+ BCIns ins;
+ BCOp op;
+ TRef ra, rb, rc;
+
+ /* Perform post-processing action before recording the next instruction. */
+ if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
+ switch (J->postproc) {
+ case LJ_POST_FIXCOMP: /* Fixup comparison. */
+ pc = frame_pc(&J2G(J)->tmptv);
+ rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
+ /* fallthrough */
+ case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */
+ case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */
+ if (!tvistruecond(&J2G(J)->tmptv2)) {
+ J->fold.ins.o ^= 1; /* Flip guard to opposite. */
+ if (J->postproc == LJ_POST_FIXGUARDSNAP) {
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */
+ }
+ }
+ lj_opt_fold(J); /* Emit pending guard. */
+ /* fallthrough */
+ case LJ_POST_FIXBOOL:
+ if (!tvistruecond(&J2G(J)->tmptv2)) {
+ BCReg s;
+ TValue *tv = J->L->base;
+ for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */
+ if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) {
+ J->base[s] = TREF_FALSE;
+ break;
+ }
+ }
+ break;
+ case LJ_POST_FIXCONST:
+ {
+ BCReg s;
+ TValue *tv = J->L->base;
+ for (s = 0; s < J->maxslot; s++) /* Constify stack slots (if any). */
+ if (J->base[s] == TREF_NIL && !tvisnil(&tv[s]))
+ J->base[s] = lj_record_constify(J, &tv[s]);
+ }
+ break;
+ case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */
+ if (bc_op(*J->pc) >= BC__MAX)
+ return;
+ break;
+ default: lua_assert(0); break;
+ }
+ J->postproc = LJ_POST_NONE;
+ }
+
+ /* Need snapshot before recording next bytecode (e.g. after a store). */
+ if (J->needsnap) {
+ J->needsnap = 0;
+ lj_snap_purge(J);
+ lj_snap_add(J);
+ J->mergesnap = 1;
+ }
+
+ /* Skip some bytecodes. */
+ if (LJ_UNLIKELY(J->bcskip > 0)) {
+ J->bcskip--;
+ return;
+ }
+
+ /* Record only closed loops for root traces. */
+ pc = J->pc;
+ if (J->framedepth == 0 &&
+ (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent)
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+
+#ifdef LUA_USE_ASSERT
+ rec_check_slots(J);
+ rec_check_ir(J);
+#endif
+
+ /* Keep a copy of the runtime values of var/num/str operands. */
+#define rav (&ix.valv)
+#define rbv (&ix.tabv)
+#define rcv (&ix.keyv)
+
+ lbase = J->L->base;
+ ins = *pc;
+ op = bc_op(ins);
+ ra = bc_a(ins);
+ ix.val = 0;
+ switch (bcmode_a(op)) {
+ case BCMvar:
+ copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break;
+ default: break; /* Handled later. */
+ }
+ rb = bc_b(ins);
+ rc = bc_c(ins);
+ switch (bcmode_b(op)) {
+ case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */
+ case BCMvar:
+ copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break;
+ default: break; /* Handled later. */
+ }
+ switch (bcmode_c(op)) {
+ case BCMvar:
+ copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
+ case BCMpri: setitype(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
+ case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
+ copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
+ lj_ir_knumint(J, numV(tv)); } break;
+ case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc));
+ setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break;
+ default: break; /* Handled later. */
+ }
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+#if LJ_HASFFI
+ if (tref_iscdata(ra) || tref_iscdata(rc)) {
+ rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt);
+ break;
+ }
+#endif
+ /* Emit nothing for two numeric or string consts. */
+ if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) {
+ IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra);
+ IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc);
+ int irop;
+ if (ta != tc) {
+ /* Widen mixed number/int comparisons to number/number comparison. */
+ if (ta == IRT_INT && tc == IRT_NUM) {
+ ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT);
+ ta = IRT_NUM;
+ } else if (ta == IRT_NUM && tc == IRT_INT) {
+ rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ } else if (LJ_52) {
+ ta = IRT_NIL; /* Force metamethod for different types. */
+ } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) &&
+ (tc == IRT_FALSE || tc == IRT_TRUE))) {
+ break; /* Interpreter will throw for two different types. */
+ }
+ }
+ rec_comp_prep(J);
+ irop = (int)op - (int)BC_ISLT + (int)IR_LT;
+ if (ta == IRT_NUM) {
+ if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */
+ if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
+ irop ^= 5;
+ } else if (ta == IRT_INT) {
+ if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
+ irop ^= 1;
+ } else if (ta == IRT_STR) {
+ if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1;
+ ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc);
+ rc = lj_ir_kint(J, 0);
+ ta = IRT_INT;
+ } else {
+ rec_mm_comp(J, &ix, (int)op);
+ break;
+ }
+ emitir(IRTG(irop, ta), ra, rc);
+ rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ case BC_ISEQS: case BC_ISNES:
+ case BC_ISEQN: case BC_ISNEN:
+ case BC_ISEQP: case BC_ISNEP:
+#if LJ_HASFFI
+ if (tref_iscdata(ra) || tref_iscdata(rc)) {
+ rec_mm_comp_cdata(J, &ix, op, MM_eq);
+ break;
+ }
+#endif
+ /* Emit nothing for two non-table, non-udata consts. */
+ if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) {
+ int diff;
+ rec_comp_prep(J);
+ diff = lj_record_objcmp(J, ra, rc, rav, rcv);
+ if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra)))
+ rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff);
+ else if (diff == 1) /* Only check __eq if different, but same type. */
+ rec_mm_equal(J, &ix, (int)op);
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC:
+ if ((op & 1) == tref_istruecond(rc))
+ rc = 0; /* Don't store if condition is not true. */
+ /* fallthrough */
+ case BC_IST: case BC_ISF: /* Type specialization suffices. */
+ if (bc_a(pc[1]) < J->maxslot)
+ J->maxslot = bc_a(pc[1]); /* Shrink used slots. */
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_NOT:
+ /* Type specialization already forces const result. */
+ rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE;
+ break;
+
+ case BC_LEN:
+ if (tref_isstr(rc))
+ rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
+ else if (!LJ_52 && tref_istab(rc))
+ rc = lj_ir_call(J, IRCALL_lj_tab_len, rc);
+ else
+ rc = rec_mm_len(J, rc, rcv);
+ break;
+
+ /* -- Arithmetic ops ---------------------------------------------------- */
+
+ case BC_UNM:
+ if (tref_isnumber_str(rc)) {
+ rc = lj_opt_narrow_unm(J, rc, rcv);
+ } else {
+ ix.tab = rc;
+ copyTV(J->L, &ix.tabv, rcv);
+ rc = rec_mm_arith(J, &ix, MM_unm);
+ }
+ break;
+
+ case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV:
+ /* Swap rb/rc and rbv/rcv. rav is temp. */
+ ix.tab = rc; ix.key = rc = rb; rb = ix.tab;
+ copyTV(J->L, rav, rbv);
+ copyTV(J->L, rbv, rcv);
+ copyTV(J->L, rcv, rav);
+ if (op == BC_MODNV)
+ goto recmod;
+ /* fallthrough */
+ case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN:
+ case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: {
+ MMS mm = bcmode_mm(op);
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv,
+ (int)mm - (int)MM_add + (int)IR_ADD);
+ else
+ rc = rec_mm_arith(J, &ix, mm);
+ break;
+ }
+
+ case BC_MODVN: case BC_MODVV:
+ recmod:
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_mod(J, rb, rc, rcv);
+ else
+ rc = rec_mm_arith(J, &ix, MM_mod);
+ break;
+
+ case BC_POW:
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_pow(J, lj_ir_tonum(J, rb), rc, rcv);
+ else
+ rc = rec_mm_arith(J, &ix, MM_pow);
+ break;
+
+ /* -- Constant and move ops --------------------------------------------- */
+
+ case BC_MOV:
+ /* Clear gap of method call to avoid resurrecting previous refs. */
+ if (ra > J->maxslot) J->base[ra-1] = 0;
+ break;
+ case BC_KSTR: case BC_KNUM: case BC_KPRI:
+ break;
+ case BC_KSHORT:
+ rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
+ break;
+ case BC_KNIL:
+ while (ra <= rc)
+ J->base[ra++] = TREF_NIL;
+ if (rc >= J->maxslot) J->maxslot = rc+1;
+ break;
+#if LJ_HASFFI
+ case BC_KCDATA:
+ rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA);
+ break;
+#endif
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ rc = rec_upvalue(J, rc, 0);
+ break;
+ case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP:
+ rec_upvalue(J, ra, rc);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_GGET: case BC_GSET:
+ settabV(J->L, &ix.tabv, tabref(J->fn->l.env));
+ ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV);
+ ix.idxchain = LJ_MAX_IDXCHAIN;
+ rc = lj_record_idx(J, &ix);
+ break;
+
+ case BC_TGETB: case BC_TSETB:
+ setintV(&ix.keyv, (int32_t)rc);
+ ix.key = lj_ir_kint(J, (int32_t)rc);
+ /* fallthrough */
+ case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS:
+ ix.idxchain = LJ_MAX_IDXCHAIN;
+ rc = lj_record_idx(J, &ix);
+ break;
+
+ case BC_TNEW:
+ rc = rec_tnew(J, rc);
+ break;
+ case BC_TDUP:
+ rc = emitir(IRTG(IR_TDUP, IRT_TAB),
+ lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_ITERC:
+ J->base[ra] = getslot(J, ra-3);
+ J->base[ra+1] = getslot(J, ra-2);
+ J->base[ra+2] = getslot(J, ra-1);
+ { /* Do the actual copy now because lj_record_call needs the values. */
+ TValue *b = &J->L->base[ra];
+ copyTV(J->L, b, b-3);
+ copyTV(J->L, b+1, b-2);
+ copyTV(J->L, b+2, b-1);
+ }
+ lj_record_call(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
+ case BC_CALLM:
+ rc = (BCReg)(J->L->top - J->L->base) - ra;
+ /* fallthrough */
+ case BC_CALL:
+ lj_record_call(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ case BC_CALLMT:
+ rc = (BCReg)(J->L->top - J->L->base) - ra;
+ /* fallthrough */
+ case BC_CALLT:
+ lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ case BC_VARG:
+ rec_varg(J, ra, (ptrdiff_t)rb-1);
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
+ rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
+ /* fallthrough */
+ case BC_RET: case BC_RET0: case BC_RET1:
+ lj_record_ret(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORI:
+ if (rec_for(J, pc, 0) != LOOPEV_LEAVE)
+ J->loopref = J->cur.nins;
+ break;
+ case BC_JFORI:
+ lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL);
+ if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */
+ rec_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
+ /* Continue tracing if the loop is not entered. */
+ break;
+
+ case BC_FORL:
+ rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1));
+ break;
+ case BC_ITERL:
+ rec_loop_interp(J, pc, rec_iterl(J, *pc));
+ break;
+ case BC_LOOP:
+ rec_loop_interp(J, pc, rec_loop(J, ra));
+ break;
+
+ case BC_JFORL:
+ rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1));
+ break;
+ case BC_JITERL:
+ rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins));
+ break;
+ case BC_JLOOP:
+ rec_loop_jit(J, rc, rec_loop(J, ra));
+ break;
+
+ case BC_IFORL:
+ case BC_IITERL:
+ case BC_ILOOP:
+ case BC_IFUNCF:
+ case BC_IFUNCV:
+ lj_trace_err(J, LJ_TRERR_BLACKL);
+ break;
+
+ case BC_JMP:
+ if (ra < J->maxslot)
+ J->maxslot = ra; /* Shrink used slots. */
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ rec_func_lua(J);
+ break;
+ case BC_JFUNCF:
+ rec_func_jit(J, rc);
+ break;
+
+ case BC_FUNCV:
+ rec_func_vararg(J);
+ rec_func_lua(J);
+ break;
+ case BC_JFUNCV:
+ lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ lj_ffrecord_func(J);
+ break;
+
+ default:
+ if (op >= BC__MAX) {
+ lj_ffrecord_func(J);
+ break;
+ }
+ /* fallthrough */
+ case BC_ITERN:
+ case BC_ISNEXT:
+ case BC_CAT:
+ case BC_UCLO:
+ case BC_FNEW:
+ case BC_TSETM:
+ setintV(&J->errinfo, (int32_t)op);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+ break;
+ }
+
+ /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
+ if (bcmode_a(op) == BCMdst && rc) {
+ J->base[ra] = rc;
+ if (ra >= J->maxslot) J->maxslot = ra+1;
+ }
+
+#undef rav
+#undef rbv
+#undef rcv
+
+ /* Limit the number of recorded IR instructions. */
+ if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord])
+ lj_trace_err(J, LJ_TRERR_TRACEOV);
+}
+
+/* -- Recording setup ----------------------------------------------------- */
+
+/* Setup recording for a root trace started by a hot loop. */
+static const BCIns *rec_setup_root(jit_State *J)
+{
+ /* Determine the next PC and the bytecode range for the loop. */
+ const BCIns *pcj, *pc = J->pc;
+ BCIns ins = *pc;
+ BCReg ra = bc_a(ins);
+ switch (bc_op(ins)) {
+ case BC_FORL:
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ pc += 1+bc_j(ins);
+ J->bc_min = pc;
+ break;
+ case BC_ITERL:
+ lua_assert(bc_op(pc[-1]) == BC_ITERC);
+ J->maxslot = ra + bc_b(pc[-1]) - 1;
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ pc += 1+bc_j(ins);
+ lua_assert(bc_op(pc[-1]) == BC_JMP);
+ J->bc_min = pc;
+ break;
+ case BC_LOOP:
+ /* Only check BC range for real loops, but not for "repeat until true". */
+ pcj = pc + bc_j(ins);
+ ins = *pcj;
+ if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) {
+ J->bc_min = pcj+1 + bc_j(ins);
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ }
+ J->maxslot = ra;
+ pc++;
+ break;
+ case BC_RET:
+ case BC_RET0:
+ case BC_RET1:
+ /* No bytecode range check for down-recursive root traces. */
+ J->maxslot = ra + bc_d(ins);
+ break;
+ case BC_FUNCF:
+ /* No bytecode range check for root traces started by a hot call. */
+ J->maxslot = J->pt->numparams;
+ pc++;
+ break;
+ default:
+ lua_assert(0);
+ break;
+ }
+ return pc;
+}
+
+/* Setup for recording a new trace. */
+void lj_record_setup(jit_State *J)
+{
+ uint32_t i;
+
+ /* Initialize state related to current trace. */
+ memset(J->slot, 0, sizeof(J->slot));
+ memset(J->chain, 0, sizeof(J->chain));
+ memset(J->bpropcache, 0, sizeof(J->bpropcache));
+ J->scev.idx = REF_NIL;
+
+ J->baseslot = 1; /* Invoking function is at base[-1]. */
+ J->base = J->slot + J->baseslot;
+ J->maxslot = 0;
+ J->framedepth = 0;
+ J->retdepth = 0;
+
+ J->instunroll = J->param[JIT_P_instunroll];
+ J->loopunroll = J->param[JIT_P_loopunroll];
+ J->tailcalled = 0;
+ J->loopref = 0;
+
+ J->bc_min = NULL; /* Means no limit. */
+ J->bc_extent = ~(MSize)0;
+
+ /* Emit instructions for fixed references. Also triggers initial IR alloc. */
+ emitir_raw(IRT(IR_BASE, IRT_P32), J->parent, J->exitno);
+ for (i = 0; i <= 2; i++) {
+ IRIns *ir = IR(REF_NIL-i);
+ ir->i = 0;
+ ir->t.irt = (uint8_t)(IRT_NIL+i);
+ ir->o = IR_KPRI;
+ ir->prev = 0;
+ }
+ J->cur.nk = REF_TRUE;
+
+ J->startpc = J->pc;
+ setmref(J->cur.startpc, J->pc);
+ if (J->parent) { /* Side trace. */
+ GCtrace *T = traceref(J, J->parent);
+ TraceNo root = T->root ? T->root : J->parent;
+ J->cur.root = (uint16_t)root;
+ J->cur.startins = BCINS_AD(BC_JMP, 0, 0);
+ /* Check whether we could at least potentially form an extra loop. */
+ if (J->exitno == 0 && T->snap[0].nent == 0) {
+ /* We can narrow a FORL for some side traces, too. */
+ if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI &&
+ bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) {
+ lj_snap_add(J);
+ rec_for_loop(J, J->pc-1, &J->scev, 1);
+ goto sidecheck;
+ }
+ } else {
+ J->startpc = NULL; /* Prevent forming an extra loop. */
+ }
+ lj_snap_replay(J, T);
+ sidecheck:
+ if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
+ T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
+ J->param[JIT_P_tryside]) {
+ rec_stop(J, LJ_TRLINK_INTERP, 0);
+ }
+ } else { /* Root trace. */
+ J->cur.root = 0;
+ J->cur.startins = *J->pc;
+ J->pc = rec_setup_root(J);
+ /* Note: the loop instruction itself is recorded at the end and not
+ ** at the start! So snapshot #0 needs to point to the *next* instruction.
+ */
+ lj_snap_add(J);
+ if (bc_op(J->cur.startins) == BC_FORL)
+ rec_for_loop(J, J->pc-1, &J->scev, 1);
+ if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ }
+#ifdef LUAJIT_ENABLE_CHECKHOOK
+ /* Regularly check for instruction/line hooks from compiled code and
+ ** exit to the interpreter if the hooks are set.
+ **
+ ** This is a compile-time option and disabled by default, since the
+ ** hook checks may be quite expensive in tight loops.
+ **
+ ** Note this is only useful if hooks are *not* set most of the time.
+ ** Use this only if you want to *asynchronously* interrupt the execution.
+ **
+ ** You can set the instruction hook via lua_sethook() with a count of 1
+ ** from a signal handler or another native thread. Please have a look
+ ** at the first few functions in luajit.c for an example (Ctrl-C handler).
+ */
+ {
+ TRef tr = emitir(IRT(IR_XLOAD, IRT_U8),
+ lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE);
+ tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT)));
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
+ }
+#endif
+}
+
+#undef IR
+#undef emitir_raw
+#undef emitir
+
+#endif
diff --git a/3rdparty/lua/src/lj_record.h b/3rdparty/lua/src/lj_record.h
index 995702e..287b260 100644
--- a/3rdparty/lua/src/lj_record.h
+++ b/3rdparty/lua/src/lj_record.h
@@ -1,44 +1,44 @@
-/*
-** Trace recorder (bytecode -> SSA IR).
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_RECORD_H
-#define _LJ_RECORD_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-/* Context for recording an indexed load/store. */
-typedef struct RecordIndex {
- TValue tabv; /* Runtime value of table (or indexed object). */
- TValue keyv; /* Runtime value of key. */
- TValue valv; /* Runtime value of stored value. */
- TValue mobjv; /* Runtime value of metamethod object. */
- GCtab *mtv; /* Runtime value of metatable object. */
- cTValue *oldv; /* Runtime value of previously stored value. */
- TRef tab; /* Table (or indexed object) reference. */
- TRef key; /* Key reference. */
- TRef val; /* Value reference for a store or 0 for a load. */
- TRef mt; /* Metatable reference. */
- TRef mobj; /* Metamethod object reference. */
- int idxchain; /* Index indirections left or 0 for raw lookup. */
-} RecordIndex;
-
-LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b,
- cTValue *av, cTValue *bv);
-LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o);
-
-LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs);
-LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs);
-LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults);
-
-LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm);
-LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix);
-
-LJ_FUNC void lj_record_ins(jit_State *J);
-LJ_FUNC void lj_record_setup(jit_State *J);
-#endif
-
-#endif
+/*
+** Trace recorder (bytecode -> SSA IR).
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_RECORD_H
+#define _LJ_RECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* Context for recording an indexed load/store. */
+typedef struct RecordIndex {
+ TValue tabv; /* Runtime value of table (or indexed object). */
+ TValue keyv; /* Runtime value of key. */
+ TValue valv; /* Runtime value of stored value. */
+ TValue mobjv; /* Runtime value of metamethod object. */
+ GCtab *mtv; /* Runtime value of metatable object. */
+ cTValue *oldv; /* Runtime value of previously stored value. */
+ TRef tab; /* Table (or indexed object) reference. */
+ TRef key; /* Key reference. */
+ TRef val; /* Value reference for a store or 0 for a load. */
+ TRef mt; /* Metatable reference. */
+ TRef mobj; /* Metamethod object reference. */
+ int idxchain; /* Index indirections left or 0 for raw lookup. */
+} RecordIndex;
+
+LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b,
+ cTValue *av, cTValue *bv);
+LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o);
+
+LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs);
+LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs);
+LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults);
+
+LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm);
+LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix);
+
+LJ_FUNC void lj_record_ins(jit_State *J);
+LJ_FUNC void lj_record_setup(jit_State *J);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_snap.c b/3rdparty/lua/src/lj_snap.c
index 5c870ba..30ff915 100644
--- a/3rdparty/lua/src/lj_snap.c
+++ b/3rdparty/lua/src/lj_snap.c
@@ -1,6 +1,6 @@
/*
** Snapshot handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_snap_c
@@ -104,6 +104,8 @@ static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map)
if (frame_islua(frame)) {
map[f++] = SNAP_MKPC(frame_pc(frame));
frame = frame_prevl(frame);
+ if (frame + funcproto(frame_func(frame))->framesize > ftop)
+ ftop = frame + funcproto(frame_func(frame))->framesize;
} else if (frame_iscont(frame)) {
map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
map[f++] = SNAP_MKPC(frame_contpc(frame));
@@ -112,10 +114,7 @@ static BCReg snapshot_framelinks(jit_State *J, SnapEntry *map)
lua_assert(!frame_isc(frame));
map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
frame = frame_prevd(frame);
- continue;
}
- if (frame + funcproto(frame_func(frame))->framesize > ftop)
- ftop = frame + funcproto(frame_func(frame))->framesize;
}
lua_assert(f == (MSize)(1 + J->framedepth));
return (BCReg)(ftop - lim);
@@ -709,7 +708,7 @@ static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
ir->o == IR_CNEW || ir->o == IR_CNEWI);
#if LJ_HASFFI
if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
- CTState *cts = ctype_cts(J->L);
+ CTState *cts = ctype_ctsG(J2G(J));
CTypeID id = (CTypeID)T->ir[ir->op1].i;
CTSize sz = lj_ctype_size(cts, id);
GCcdata *cd = lj_cdata_new(cts, id, sz);
@@ -846,15 +845,12 @@ const BCIns *lj_snap_restore(jit_State *J, void *exptr)
/* Compute current stack top. */
switch (bc_op(*pc)) {
- default:
- if (bc_op(*pc) < BC_FUNCF) {
- L->top = curr_topL(L);
- break;
- }
- /* fallthrough */
case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
L->top = frame + snap->nslots;
break;
+ default:
+ L->top = curr_topL(L);
+ break;
}
return pc;
}
diff --git a/3rdparty/lua/src/lj_snap.h b/3rdparty/lua/src/lj_snap.h
index fac8942..aff97e5 100644
--- a/3rdparty/lua/src/lj_snap.h
+++ b/3rdparty/lua/src/lj_snap.h
@@ -1,34 +1,34 @@
-/*
-** Snapshot handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_SNAP_H
-#define _LJ_SNAP_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-LJ_FUNC void lj_snap_add(jit_State *J);
-LJ_FUNC void lj_snap_purge(jit_State *J);
-LJ_FUNC void lj_snap_shrink(jit_State *J);
-LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir);
-LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
-LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
-LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
-LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
-
-static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
-{
- if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
-}
-
-static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
-{
- if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
-}
-
-#endif
-
-#endif
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_SNAP_H
+#define _LJ_SNAP_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_snap_add(jit_State *J);
+LJ_FUNC void lj_snap_purge(jit_State *J);
+LJ_FUNC void lj_snap_shrink(jit_State *J);
+LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir);
+LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
+LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
+LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
+LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
+
+static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
+{
+ if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
+}
+
+static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
+{
+ if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
+}
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_state.c b/3rdparty/lua/src/lj_state.c
index fced5d6..8c53d37 100644
--- a/3rdparty/lua/src/lj_state.c
+++ b/3rdparty/lua/src/lj_state.c
@@ -1,287 +1,287 @@
-/*
-** State and stack handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_state_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_func.h"
-#include "lj_meta.h"
-#include "lj_state.h"
-#include "lj_frame.h"
-#if LJ_HASFFI
-#include "lj_ctype.h"
-#endif
-#include "lj_trace.h"
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-#include "lj_lex.h"
-#include "lj_alloc.h"
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Stack sizes. */
-#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */
-#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */
-#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */
-#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA)
-
-/* Explanation of LJ_STACK_EXTRA:
-**
-** Calls to metamethods store their arguments beyond the current top
-** without checking for the stack limit. This avoids stack resizes which
-** would invalidate passed TValue pointers. The stack check is performed
-** later by the function header. This can safely resize the stack or raise
-** an error. Thus we need some extra slots beyond the current stack limit.
-**
-** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus
-** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
-** slots above top, but then mobj is always a function. So we can get by
-** with 5 extra slots.
-*/
-
-/* Resize stack slots and adjust pointers in state. */
-static void resizestack(lua_State *L, MSize n)
-{
- TValue *st, *oldst = tvref(L->stack);
- ptrdiff_t delta;
- MSize oldsize = L->stacksize;
- MSize realsize = n + 1 + LJ_STACK_EXTRA;
- GCobj *up;
- lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1);
- st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
- (MSize)(L->stacksize*sizeof(TValue)),
- (MSize)(realsize*sizeof(TValue)));
- setmref(L->stack, st);
- delta = (char *)st - (char *)oldst;
- setmref(L->maxstack, st + n);
- while (oldsize < realsize) /* Clear new slots. */
- setnilV(st + oldsize++);
- L->stacksize = realsize;
- L->base = (TValue *)((char *)L->base + delta);
- L->top = (TValue *)((char *)L->top + delta);
- for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
- setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
- if (obj2gco(L) == gcref(G(L)->jit_L))
- setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
-}
-
-/* Relimit stack after error, in case the limit was overdrawn. */
-void lj_state_relimitstack(lua_State *L)
-{
- if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1)
- resizestack(L, LJ_STACK_MAX);
-}
-
-/* Try to shrink the stack (called from GC). */
-void lj_state_shrinkstack(lua_State *L, MSize used)
-{
- if (L->stacksize > LJ_STACK_MAXEX)
- return; /* Avoid stack shrinking while handling stack overflow. */
- if (4*used < L->stacksize &&
- 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
- obj2gco(L) != gcref(G(L)->jit_L)) /* Don't shrink stack of live trace. */
- resizestack(L, L->stacksize >> 1);
-}
-
-/* Try to grow stack. */
-void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need)
-{
- MSize n;
- if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */
- lj_err_throw(L, LUA_ERRERR);
- n = L->stacksize + need;
- if (n > LJ_STACK_MAX) {
- n += 2*LUA_MINSTACK;
- } else if (n < 2*L->stacksize) {
- n = 2*L->stacksize;
- if (n >= LJ_STACK_MAX)
- n = LJ_STACK_MAX;
- }
- resizestack(L, n);
- if (L->stacksize > LJ_STACK_MAXEX)
- lj_err_msg(L, LJ_ERR_STKOV);
-}
-
-void LJ_FASTCALL lj_state_growstack1(lua_State *L)
-{
- lj_state_growstack(L, 1);
-}
-
-/* Allocate basic stack for new state. */
-static void stack_init(lua_State *L1, lua_State *L)
-{
- TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue);
- setmref(L1->stack, st);
- L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
- stend = st + L1->stacksize;
- setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
- L1->base = L1->top = st+1;
- setthreadV(L1, st, L1); /* Needed for curr_funcisL() on empty stack. */
- while (st < stend) /* Clear new slots. */
- setnilV(st++);
-}
-
-/* -- State handling ------------------------------------------------------ */
-
-/* Open parts that may cause memory-allocation errors. */
-static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
-{
- global_State *g = G(L);
- UNUSED(dummy);
- UNUSED(ud);
- stack_init(L, L);
- /* NOBARRIER: State initialization, all objects are white. */
- setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
- settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
- lj_str_resize(L, LJ_MIN_STRTAB-1);
- lj_meta_init(L);
- lj_lex_init(L);
- fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */
- g->gc.threshold = 4*g->gc.total;
- lj_trace_initstate(g);
- return NULL;
-}
-
-static void close_state(lua_State *L)
-{
- global_State *g = G(L);
- lj_func_closeuv(L, tvref(L->stack));
- lj_gc_freeall(g);
- lua_assert(gcref(g->gc.root) == obj2gco(L));
- lua_assert(g->strnum == 0);
- lj_trace_freestate(g);
-#if LJ_HASFFI
- lj_ctype_freestate(g);
-#endif
- lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
- lj_str_freebuf(g, &g->tmpbuf);
- lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
- lua_assert(g->gc.total == sizeof(GG_State));
-#ifndef LUAJIT_USE_SYSMALLOC
- if (g->allocf == lj_alloc_f)
- lj_alloc_destroy(g->allocd);
- else
-#endif
- g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
-}
-
-#if LJ_64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
-lua_State *lj_state_newstate(lua_Alloc f, void *ud)
-#else
-LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
-#endif
-{
- GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State));
- lua_State *L = &GG->L;
- global_State *g = &GG->g;
- if (GG == NULL || !checkptr32(GG)) return NULL;
- memset(GG, 0, sizeof(GG_State));
- L->gct = ~LJ_TTHREAD;
- L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */
- L->dummy_ffid = FF_C;
- setmref(L->glref, g);
- g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
- g->strempty.marked = LJ_GC_WHITE0;
- g->strempty.gct = ~LJ_TSTR;
- g->allocf = f;
- g->allocd = ud;
- setgcref(g->mainthref, obj2gco(L));
- setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
- setgcref(g->uvhead.next, obj2gco(&g->uvhead));
- g->strmask = ~(MSize)0;
- setnilV(registry(L));
- setnilV(&g->nilnode.val);
- setnilV(&g->nilnode.key);
- setmref(g->nilnode.freetop, &g->nilnode);
- lj_str_initbuf(&g->tmpbuf);
- g->gc.state = GCSpause;
- setgcref(g->gc.root, obj2gco(L));
- setmref(g->gc.sweep, &g->gc.root);
- g->gc.total = sizeof(GG_State);
- g->gc.pause = LUAI_GCPAUSE;
- g->gc.stepmul = LUAI_GCMUL;
- lj_dispatch_init((GG_State *)L);
- L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */
- if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) {
- /* Memory allocation error: free partial state. */
- close_state(L);
- return NULL;
- }
- L->status = 0;
- return L;
-}
-
-static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud)
-{
- UNUSED(dummy);
- UNUSED(ud);
- lj_gc_finalize_cdata(L);
- lj_gc_finalize_udata(L);
- /* Frame pop omitted. */
- return NULL;
-}
-
-LUA_API void lua_close(lua_State *L)
-{
- global_State *g = G(L);
- int i;
- L = mainthread(g); /* Only the main thread can be closed. */
- lj_func_closeuv(L, tvref(L->stack));
- lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */
-#if LJ_HASJIT
- G2J(g)->flags &= ~JIT_F_ON;
- G2J(g)->state = LJ_TRACE_IDLE;
- lj_dispatch_update(g);
-#endif
- for (i = 0;;) {
- hook_enter(g);
- L->status = 0;
- L->cframe = NULL;
- L->base = L->top = tvref(L->stack) + 1;
- if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) {
- if (++i >= 10) break;
- lj_gc_separateudata(g, 1); /* Separate udata again. */
- if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */
- break;
- }
- }
- close_state(L);
-}
-
-lua_State *lj_state_new(lua_State *L)
-{
- lua_State *L1 = lj_mem_newobj(L, lua_State);
- L1->gct = ~LJ_TTHREAD;
- L1->dummy_ffid = FF_C;
- L1->status = 0;
- L1->stacksize = 0;
- setmref(L1->stack, NULL);
- L1->cframe = NULL;
- /* NOBARRIER: The lua_State is new (marked white). */
- setgcrefnull(L1->openupval);
- setmrefr(L1->glref, L->glref);
- setgcrefr(L1->env, L->env);
- stack_init(L1, L); /* init stack */
- lua_assert(iswhite(obj2gco(L1)));
- return L1;
-}
-
-void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
-{
- lua_assert(L != mainthread(g));
- lj_func_closeuv(L, tvref(L->stack));
- lua_assert(gcref(L->openupval) == NULL);
- lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
- lj_mem_freet(g, L);
-}
-
+/*
+** State and stack handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_state_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_alloc.h"
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Stack sizes. */
+#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */
+#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */
+#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */
+#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA)
+
+/* Explanation of LJ_STACK_EXTRA:
+**
+** Calls to metamethods store their arguments beyond the current top
+** without checking for the stack limit. This avoids stack resizes which
+** would invalidate passed TValue pointers. The stack check is performed
+** later by the function header. This can safely resize the stack or raise
+** an error. Thus we need some extra slots beyond the current stack limit.
+**
+** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus
+** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
+** slots above top, but then mobj is always a function. So we can get by
+** with 5 extra slots.
+*/
+
+/* Resize stack slots and adjust pointers in state. */
+static void resizestack(lua_State *L, MSize n)
+{
+ TValue *st, *oldst = tvref(L->stack);
+ ptrdiff_t delta;
+ MSize oldsize = L->stacksize;
+ MSize realsize = n + 1 + LJ_STACK_EXTRA;
+ GCobj *up;
+ lua_assert((MSize)(tvref(L->maxstack)-oldst)==L->stacksize-LJ_STACK_EXTRA-1);
+ st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
+ (MSize)(L->stacksize*sizeof(TValue)),
+ (MSize)(realsize*sizeof(TValue)));
+ setmref(L->stack, st);
+ delta = (char *)st - (char *)oldst;
+ setmref(L->maxstack, st + n);
+ while (oldsize < realsize) /* Clear new slots. */
+ setnilV(st + oldsize++);
+ L->stacksize = realsize;
+ L->base = (TValue *)((char *)L->base + delta);
+ L->top = (TValue *)((char *)L->top + delta);
+ for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
+ setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
+ if (obj2gco(L) == gcref(G(L)->jit_L))
+ setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
+}
+
+/* Relimit stack after error, in case the limit was overdrawn. */
+void lj_state_relimitstack(lua_State *L)
+{
+ if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1)
+ resizestack(L, LJ_STACK_MAX);
+}
+
+/* Try to shrink the stack (called from GC). */
+void lj_state_shrinkstack(lua_State *L, MSize used)
+{
+ if (L->stacksize > LJ_STACK_MAXEX)
+ return; /* Avoid stack shrinking while handling stack overflow. */
+ if (4*used < L->stacksize &&
+ 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
+ obj2gco(L) != gcref(G(L)->jit_L)) /* Don't shrink stack of live trace. */
+ resizestack(L, L->stacksize >> 1);
+}
+
+/* Try to grow stack. */
+void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need)
+{
+ MSize n;
+ if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */
+ lj_err_throw(L, LUA_ERRERR);
+ n = L->stacksize + need;
+ if (n > LJ_STACK_MAX) {
+ n += 2*LUA_MINSTACK;
+ } else if (n < 2*L->stacksize) {
+ n = 2*L->stacksize;
+ if (n >= LJ_STACK_MAX)
+ n = LJ_STACK_MAX;
+ }
+ resizestack(L, n);
+ if (L->stacksize > LJ_STACK_MAXEX)
+ lj_err_msg(L, LJ_ERR_STKOV);
+}
+
+void LJ_FASTCALL lj_state_growstack1(lua_State *L)
+{
+ lj_state_growstack(L, 1);
+}
+
+/* Allocate basic stack for new state. */
+static void stack_init(lua_State *L1, lua_State *L)
+{
+ TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue);
+ setmref(L1->stack, st);
+ L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
+ stend = st + L1->stacksize;
+ setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
+ L1->base = L1->top = st+1;
+ setthreadV(L1, st, L1); /* Needed for curr_funcisL() on empty stack. */
+ while (st < stend) /* Clear new slots. */
+ setnilV(st++);
+}
+
+/* -- State handling ------------------------------------------------------ */
+
+/* Open parts that may cause memory-allocation errors. */
+static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ global_State *g = G(L);
+ UNUSED(dummy);
+ UNUSED(ud);
+ stack_init(L, L);
+ /* NOBARRIER: State initialization, all objects are white. */
+ setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
+ settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
+ lj_str_resize(L, LJ_MIN_STRTAB-1);
+ lj_meta_init(L);
+ lj_lex_init(L);
+ fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */
+ g->gc.threshold = 4*g->gc.total;
+ lj_trace_initstate(g);
+ return NULL;
+}
+
+static void close_state(lua_State *L)
+{
+ global_State *g = G(L);
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_gc_freeall(g);
+ lua_assert(gcref(g->gc.root) == obj2gco(L));
+ lua_assert(g->strnum == 0);
+ lj_trace_freestate(g);
+#if LJ_HASFFI
+ lj_ctype_freestate(g);
+#endif
+ lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
+ lj_str_freebuf(g, &g->tmpbuf);
+ lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+ lua_assert(g->gc.total == sizeof(GG_State));
+#ifndef LUAJIT_USE_SYSMALLOC
+ if (g->allocf == lj_alloc_f)
+ lj_alloc_destroy(g->allocd);
+ else
+#endif
+ g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
+}
+
+#if LJ_64
+lua_State *lj_state_newstate(lua_Alloc f, void *ud)
+#else
+LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
+#endif
+{
+ GG_State *GG = (GG_State *)f(ud, NULL, 0, sizeof(GG_State));
+ lua_State *L = &GG->L;
+ global_State *g = &GG->g;
+ if (GG == NULL || !checkptr32(GG)) return NULL;
+ memset(GG, 0, sizeof(GG_State));
+ L->gct = ~LJ_TTHREAD;
+ L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */
+ L->dummy_ffid = FF_C;
+ setmref(L->glref, g);
+ g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
+ g->strempty.marked = LJ_GC_WHITE0;
+ g->strempty.gct = ~LJ_TSTR;
+ g->allocf = f;
+ g->allocd = ud;
+ setgcref(g->mainthref, obj2gco(L));
+ setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
+ setgcref(g->uvhead.next, obj2gco(&g->uvhead));
+ g->strmask = ~(MSize)0;
+ setnilV(registry(L));
+ setnilV(&g->nilnode.val);
+ setnilV(&g->nilnode.key);
+ setmref(g->nilnode.freetop, &g->nilnode);
+ lj_str_initbuf(&g->tmpbuf);
+ g->gc.state = GCSpause;
+ setgcref(g->gc.root, obj2gco(L));
+ setmref(g->gc.sweep, &g->gc.root);
+ g->gc.total = sizeof(GG_State);
+ g->gc.pause = LUAI_GCPAUSE;
+ g->gc.stepmul = LUAI_GCMUL;
+ lj_dispatch_init((GG_State *)L);
+ L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */
+ if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) {
+ /* Memory allocation error: free partial state. */
+ close_state(L);
+ return NULL;
+ }
+ L->status = 0;
+ return L;
+}
+
+static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ UNUSED(dummy);
+ UNUSED(ud);
+ lj_gc_finalize_cdata(L);
+ lj_gc_finalize_udata(L);
+ /* Frame pop omitted. */
+ return NULL;
+}
+
+LUA_API void lua_close(lua_State *L)
+{
+ global_State *g = G(L);
+ int i;
+ L = mainthread(g); /* Only the main thread can be closed. */
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */
+#if LJ_HASJIT
+ G2J(g)->flags &= ~JIT_F_ON;
+ G2J(g)->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(g);
+#endif
+ for (i = 0;;) {
+ hook_enter(g);
+ L->status = 0;
+ L->cframe = NULL;
+ L->base = L->top = tvref(L->stack) + 1;
+ if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == 0) {
+ if (++i >= 10) break;
+ lj_gc_separateudata(g, 1); /* Separate udata again. */
+ if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */
+ break;
+ }
+ }
+ close_state(L);
+}
+
+lua_State *lj_state_new(lua_State *L)
+{
+ lua_State *L1 = lj_mem_newobj(L, lua_State);
+ L1->gct = ~LJ_TTHREAD;
+ L1->dummy_ffid = FF_C;
+ L1->status = 0;
+ L1->stacksize = 0;
+ setmref(L1->stack, NULL);
+ L1->cframe = NULL;
+ /* NOBARRIER: The lua_State is new (marked white). */
+ setgcrefnull(L1->openupval);
+ setmrefr(L1->glref, L->glref);
+ setgcrefr(L1->env, L->env);
+ stack_init(L1, L); /* init stack */
+ lua_assert(iswhite(obj2gco(L1)));
+ return L1;
+}
+
+void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
+{
+ lua_assert(L != mainthread(g));
+ lj_func_closeuv(L, tvref(L->stack));
+ lua_assert(gcref(L->openupval) == NULL);
+ lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+ lj_mem_freet(g, L);
+}
+
diff --git a/3rdparty/lua/src/lj_state.h b/3rdparty/lua/src/lj_state.h
index 683aec6..527f054 100644
--- a/3rdparty/lua/src/lj_state.h
+++ b/3rdparty/lua/src/lj_state.h
@@ -1,35 +1,35 @@
-/*
-** State and stack handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_STATE_H
-#define _LJ_STATE_H
-
-#include "lj_obj.h"
-
-#define incr_top(L) \
- (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0))
-
-#define savestack(L, p) ((char *)(p) - mref(L->stack, char))
-#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n)))
-
-LJ_FUNC void lj_state_relimitstack(lua_State *L);
-LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used);
-LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need);
-LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L);
-
-static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
-{
- if ((mref(L->maxstack, char) - (char *)L->top) <=
- (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue))
- lj_state_growstack(L, need);
-}
-
-LJ_FUNC lua_State *lj_state_new(lua_State *L);
-LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
-#if LJ_64
-LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
-#endif
-
-#endif
+/*
+** State and stack handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STATE_H
+#define _LJ_STATE_H
+
+#include "lj_obj.h"
+
+#define incr_top(L) \
+ (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0))
+
+#define savestack(L, p) ((char *)(p) - mref(L->stack, char))
+#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n)))
+
+LJ_FUNC void lj_state_relimitstack(lua_State *L);
+LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used);
+LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need);
+LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L);
+
+static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
+{
+ if ((mref(L->maxstack, char) - (char *)L->top) <=
+ (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue))
+ lj_state_growstack(L, need);
+}
+
+LJ_FUNC lua_State *lj_state_new(lua_State *L);
+LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
+#if LJ_64
+LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_str.c b/3rdparty/lua/src/lj_str.c
index 795bd93..6548ee4 100644
--- a/3rdparty/lua/src/lj_str.c
+++ b/3rdparty/lua/src/lj_str.c
@@ -1,339 +1,339 @@
-/*
-** String handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#include <stdio.h>
-
-#define lj_str_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_str.h"
-#include "lj_state.h"
-#include "lj_char.h"
-
-/* -- String interning ---------------------------------------------------- */
-
-/* Ordered compare of strings. Assumes string data is 4-byte aligned. */
-int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
-{
- MSize i, n = a->len > b->len ? b->len : a->len;
- for (i = 0; i < n; i += 4) {
- /* Note: innocuous access up to end of string + 3. */
- uint32_t va = *(const uint32_t *)(strdata(a)+i);
- uint32_t vb = *(const uint32_t *)(strdata(b)+i);
- if (va != vb) {
-#if LJ_LE
- va = lj_bswap(va); vb = lj_bswap(vb);
-#endif
- i -= n;
- if ((int32_t)i >= -3) {
- va >>= 32+(i<<3); vb >>= 32+(i<<3);
- if (va == vb) break;
- }
- return va < vb ? -1 : 1;
- }
- }
- return (int32_t)(a->len - b->len);
-}
-
-/* Fast string data comparison. Caveat: unaligned access to 1st string! */
-static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len)
-{
- MSize i = 0;
- lua_assert(len > 0);
- lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4);
- do { /* Note: innocuous access up to end of string + 3. */
- uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i);
- if (v) {
- i -= len;
-#if LJ_LE
- return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1;
-#else
- return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1;
-#endif
- }
- i += 4;
- } while (i < len);
- return 0;
-}
-
-/* Resize the string hash table (grow and shrink). */
-void lj_str_resize(lua_State *L, MSize newmask)
-{
- global_State *g = G(L);
- GCRef *newhash;
- MSize i;
- if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1)
- return; /* No resizing during GC traversal or if already too big. */
- newhash = lj_mem_newvec(L, newmask+1, GCRef);
- memset(newhash, 0, (newmask+1)*sizeof(GCRef));
- for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */
- GCobj *p = gcref(g->strhash[i]);
- while (p) { /* Follow each hash chain and reinsert all strings. */
- MSize h = gco2str(p)->hash & newmask;
- GCobj *next = gcnext(p);
- /* NOBARRIER: The string table is a GC root. */
- setgcrefr(p->gch.nextgc, newhash[h]);
- setgcref(newhash[h], p);
- p = next;
- }
- }
- lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
- g->strmask = newmask;
- g->strhash = newhash;
-}
-
-/* Intern a string and return string object. */
-GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx)
-{
- global_State *g;
- GCstr *s;
- GCobj *o;
- MSize len = (MSize)lenx;
- MSize a, b, h = len;
- if (lenx >= LJ_MAX_STR)
- lj_err_msg(L, LJ_ERR_STROV);
- g = G(L);
- /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */
- if (len >= 4) { /* Caveat: unaligned access! */
- a = lj_getu32(str);
- h ^= lj_getu32(str+len-4);
- b = lj_getu32(str+(len>>1)-2);
- h ^= b; h -= lj_rol(b, 14);
- b += lj_getu32(str+(len>>2)-1);
- } else if (len > 0) {
- a = *(const uint8_t *)str;
- h ^= *(const uint8_t *)(str+len-1);
- b = *(const uint8_t *)(str+(len>>1));
- h ^= b; h -= lj_rol(b, 14);
- } else {
- return &g->strempty;
- }
- a ^= h; a -= lj_rol(h, 11);
- b ^= a; b -= lj_rol(a, 25);
- h ^= b; h -= lj_rol(b, 16);
- /* Check if the string has already been interned. */
- o = gcref(g->strhash[h & g->strmask]);
- if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) {
- while (o != NULL) {
- GCstr *sx = gco2str(o);
- if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) {
- /* Resurrect if dead. Can only happen with fixstring() (keywords). */
- if (isdead(g, o)) flipwhite(o);
- return sx; /* Return existing string. */
- }
- o = gcnext(o);
- }
- } else { /* Slow path: end of string is too close to a page boundary. */
- while (o != NULL) {
- GCstr *sx = gco2str(o);
- if (sx->len == len && memcmp(str, strdata(sx), len) == 0) {
- /* Resurrect if dead. Can only happen with fixstring() (keywords). */
- if (isdead(g, o)) flipwhite(o);
- return sx; /* Return existing string. */
- }
- o = gcnext(o);
- }
- }
- /* Nope, create a new string. */
- s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr);
- newwhite(g, s);
- s->gct = ~LJ_TSTR;
- s->len = len;
- s->hash = h;
- s->reserved = 0;
- memcpy(strdatawr(s), str, len);
- strdatawr(s)[len] = '\0'; /* Zero-terminate string. */
- /* Add it to string hash table. */
- h &= g->strmask;
- s->nextgc = g->strhash[h];
- /* NOBARRIER: The string table is a GC root. */
- setgcref(g->strhash[h], obj2gco(s));
- if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */
- lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */
- return s; /* Return newly interned string. */
-}
-
-void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s)
-{
- g->strnum--;
- lj_mem_free(g, s, sizestring(s));
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-/* Print number to buffer. Canonicalizes non-finite values. */
-size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o)
-{
- if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */
- lua_Number n = o->n;
-#if __BIONIC__
- if (tvismzero(o)) { s[0] = '-'; s[1] = '0'; return 2; }
-#endif
- return (size_t)lua_number2str(s, n);
- } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) {
- s[0] = 'n'; s[1] = 'a'; s[2] = 'n'; return 3;
- } else if ((o->u32.hi & 0x80000000) == 0) {
- s[0] = 'i'; s[1] = 'n'; s[2] = 'f'; return 3;
- } else {
- s[0] = '-'; s[1] = 'i'; s[2] = 'n'; s[3] = 'f'; return 4;
- }
-}
-
-/* Print integer to buffer. Returns pointer to start. */
-char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k)
-{
- uint32_t u = (uint32_t)(k < 0 ? -k : k);
- p += 1+10;
- do { *--p = (char)('0' + u % 10); } while (u /= 10);
- if (k < 0) *--p = '-';
- return p;
-}
-
-/* Convert number to string. */
-GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np)
-{
- char buf[LJ_STR_NUMBUF];
- size_t len = lj_str_bufnum(buf, (TValue *)np);
- return lj_str_new(L, buf, len);
-}
-
-/* Convert integer to string. */
-GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k)
-{
- char s[1+10];
- char *p = lj_str_bufint(s, k);
- return lj_str_new(L, p, (size_t)(s+sizeof(s)-p));
-}
-
-GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o)
-{
- return tvisint(o) ? lj_str_fromint(L, intV(o)) : lj_str_fromnum(L, &o->n);
-}
-
-/* -- String formatting --------------------------------------------------- */
-
-static void addstr(lua_State *L, SBuf *sb, const char *str, MSize len)
-{
- char *p;
- MSize i;
- if (sb->n + len > sb->sz) {
- MSize sz = sb->sz * 2;
- while (sb->n + len > sz) sz = sz * 2;
- lj_str_resizebuf(L, sb, sz);
- }
- p = sb->buf + sb->n;
- sb->n += len;
- for (i = 0; i < len; i++) p[i] = str[i];
-}
-
-static void addchar(lua_State *L, SBuf *sb, int c)
-{
- if (sb->n + 1 > sb->sz) {
- MSize sz = sb->sz * 2;
- lj_str_resizebuf(L, sb, sz);
- }
- sb->buf[sb->n++] = (char)c;
-}
-
-/* Push formatted message as a string object to Lua stack. va_list variant. */
-const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp)
-{
- SBuf *sb = &G(L)->tmpbuf;
- lj_str_needbuf(L, sb, (MSize)strlen(fmt));
- lj_str_resetbuf(sb);
- for (;;) {
- const char *e = strchr(fmt, '%');
- if (e == NULL) break;
- addstr(L, sb, fmt, (MSize)(e-fmt));
- /* This function only handles %s, %c, %d, %f and %p formats. */
- switch (e[1]) {
- case 's': {
- const char *s = va_arg(argp, char *);
- if (s == NULL) s = "(null)";
- addstr(L, sb, s, (MSize)strlen(s));
- break;
- }
- case 'c':
- addchar(L, sb, va_arg(argp, int));
- break;
- case 'd': {
- char buf[LJ_STR_INTBUF];
- char *p = lj_str_bufint(buf, va_arg(argp, int32_t));
- addstr(L, sb, p, (MSize)(buf+LJ_STR_INTBUF-p));
- break;
- }
- case 'f': {
- char buf[LJ_STR_NUMBUF];
- TValue tv;
- MSize len;
- tv.n = (lua_Number)(va_arg(argp, LUAI_UACNUMBER));
- len = (MSize)lj_str_bufnum(buf, &tv);
- addstr(L, sb, buf, len);
- break;
- }
- case 'p': {
-#define FMTP_CHARS (2*sizeof(ptrdiff_t))
- char buf[2+FMTP_CHARS];
- ptrdiff_t p = (ptrdiff_t)(va_arg(argp, void *));
- ptrdiff_t i, lasti = 2+FMTP_CHARS;
- if (p == 0) {
- addstr(L, sb, "NULL", 4);
- break;
- }
-#if LJ_64
- /* Shorten output for 64 bit pointers. */
- lasti = 2+2*4+((p >> 32) ? 2+2*(lj_fls((uint32_t)(p >> 32))>>3) : 0);
-#endif
- buf[0] = '0';
- buf[1] = 'x';
- for (i = lasti-1; i >= 2; i--, p >>= 4)
- buf[i] = "0123456789abcdef"[(p & 15)];
- addstr(L, sb, buf, (MSize)lasti);
- break;
- }
- case '%':
- addchar(L, sb, '%');
- break;
- default:
- addchar(L, sb, '%');
- addchar(L, sb, e[1]);
- break;
- }
- fmt = e+2;
- }
- addstr(L, sb, fmt, (MSize)strlen(fmt));
- setstrV(L, L->top, lj_str_new(L, sb->buf, sb->n));
- incr_top(L);
- return strVdata(L->top - 1);
-}
-
-/* Push formatted message as a string object to Lua stack. Vararg variant. */
-const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
-{
- const char *msg;
- va_list argp;
- va_start(argp, fmt);
- msg = lj_str_pushvf(L, fmt, argp);
- va_end(argp);
- return msg;
-}
-
-/* -- Buffer handling ----------------------------------------------------- */
-
-char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz)
-{
- if (sz > sb->sz) {
- if (sz < LJ_MIN_SBUF) sz = LJ_MIN_SBUF;
- lj_str_resizebuf(L, sb, sz);
- }
- return sb->buf;
-}
-
+/*
+** String handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+
+#define lj_str_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_state.h"
+#include "lj_char.h"
+
+/* -- String interning ---------------------------------------------------- */
+
+/* Ordered compare of strings. Assumes string data is 4-byte aligned. */
+int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
+{
+ MSize i, n = a->len > b->len ? b->len : a->len;
+ for (i = 0; i < n; i += 4) {
+ /* Note: innocuous access up to end of string + 3. */
+ uint32_t va = *(const uint32_t *)(strdata(a)+i);
+ uint32_t vb = *(const uint32_t *)(strdata(b)+i);
+ if (va != vb) {
+#if LJ_LE
+ va = lj_bswap(va); vb = lj_bswap(vb);
+#endif
+ i -= n;
+ if ((int32_t)i >= -3) {
+ va >>= 32+(i<<3); vb >>= 32+(i<<3);
+ if (va == vb) break;
+ }
+ return va < vb ? -1 : 1;
+ }
+ }
+ return (int32_t)(a->len - b->len);
+}
+
+/* Fast string data comparison. Caveat: unaligned access to 1st string! */
+static LJ_AINLINE int str_fastcmp(const char *a, const char *b, MSize len)
+{
+ MSize i = 0;
+ lua_assert(len > 0);
+ lua_assert((((uintptr_t)a+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4);
+ do { /* Note: innocuous access up to end of string + 3. */
+ uint32_t v = lj_getu32(a+i) ^ *(const uint32_t *)(b+i);
+ if (v) {
+ i -= len;
+#if LJ_LE
+ return (int32_t)i >= -3 ? (v << (32+(i<<3))) : 1;
+#else
+ return (int32_t)i >= -3 ? (v >> (32+(i<<3))) : 1;
+#endif
+ }
+ i += 4;
+ } while (i < len);
+ return 0;
+}
+
+/* Resize the string hash table (grow and shrink). */
+void lj_str_resize(lua_State *L, MSize newmask)
+{
+ global_State *g = G(L);
+ GCRef *newhash;
+ MSize i;
+ if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1)
+ return; /* No resizing during GC traversal or if already too big. */
+ newhash = lj_mem_newvec(L, newmask+1, GCRef);
+ memset(newhash, 0, (newmask+1)*sizeof(GCRef));
+ for (i = g->strmask; i != ~(MSize)0; i--) { /* Rehash old table. */
+ GCobj *p = gcref(g->strhash[i]);
+ while (p) { /* Follow each hash chain and reinsert all strings. */
+ MSize h = gco2str(p)->hash & newmask;
+ GCobj *next = gcnext(p);
+ /* NOBARRIER: The string table is a GC root. */
+ setgcrefr(p->gch.nextgc, newhash[h]);
+ setgcref(newhash[h], p);
+ p = next;
+ }
+ }
+ lj_mem_freevec(g, g->strhash, g->strmask+1, GCRef);
+ g->strmask = newmask;
+ g->strhash = newhash;
+}
+
+/* Intern a string and return string object. */
+GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx)
+{
+ global_State *g;
+ GCstr *s;
+ GCobj *o;
+ MSize len = (MSize)lenx;
+ MSize a, b, h = len;
+ if (lenx >= LJ_MAX_STR)
+ lj_err_msg(L, LJ_ERR_STROV);
+ g = G(L);
+ /* Compute string hash. Constants taken from lookup3 hash by Bob Jenkins. */
+ if (len >= 4) { /* Caveat: unaligned access! */
+ a = lj_getu32(str);
+ h ^= lj_getu32(str+len-4);
+ b = lj_getu32(str+(len>>1)-2);
+ h ^= b; h -= lj_rol(b, 14);
+ b += lj_getu32(str+(len>>2)-1);
+ } else if (len > 0) {
+ a = *(const uint8_t *)str;
+ h ^= *(const uint8_t *)(str+len-1);
+ b = *(const uint8_t *)(str+(len>>1));
+ h ^= b; h -= lj_rol(b, 14);
+ } else {
+ return &g->strempty;
+ }
+ a ^= h; a -= lj_rol(h, 11);
+ b ^= a; b -= lj_rol(a, 25);
+ h ^= b; h -= lj_rol(b, 16);
+ /* Check if the string has already been interned. */
+ o = gcref(g->strhash[h & g->strmask]);
+ if (LJ_LIKELY((((uintptr_t)str+len-1) & (LJ_PAGESIZE-1)) <= LJ_PAGESIZE-4)) {
+ while (o != NULL) {
+ GCstr *sx = gco2str(o);
+ if (sx->len == len && str_fastcmp(str, strdata(sx), len) == 0) {
+ /* Resurrect if dead. Can only happen with fixstring() (keywords). */
+ if (isdead(g, o)) flipwhite(o);
+ return sx; /* Return existing string. */
+ }
+ o = gcnext(o);
+ }
+ } else { /* Slow path: end of string is too close to a page boundary. */
+ while (o != NULL) {
+ GCstr *sx = gco2str(o);
+ if (sx->len == len && memcmp(str, strdata(sx), len) == 0) {
+ /* Resurrect if dead. Can only happen with fixstring() (keywords). */
+ if (isdead(g, o)) flipwhite(o);
+ return sx; /* Return existing string. */
+ }
+ o = gcnext(o);
+ }
+ }
+ /* Nope, create a new string. */
+ s = lj_mem_newt(L, sizeof(GCstr)+len+1, GCstr);
+ newwhite(g, s);
+ s->gct = ~LJ_TSTR;
+ s->len = len;
+ s->hash = h;
+ s->reserved = 0;
+ memcpy(strdatawr(s), str, len);
+ strdatawr(s)[len] = '\0'; /* Zero-terminate string. */
+ /* Add it to string hash table. */
+ h &= g->strmask;
+ s->nextgc = g->strhash[h];
+ /* NOBARRIER: The string table is a GC root. */
+ setgcref(g->strhash[h], obj2gco(s));
+ if (g->strnum++ > g->strmask) /* Allow a 100% load factor. */
+ lj_str_resize(L, (g->strmask<<1)+1); /* Grow string table. */
+ return s; /* Return newly interned string. */
+}
+
+void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s)
+{
+ g->strnum--;
+ lj_mem_free(g, s, sizestring(s));
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+/* Print number to buffer. Canonicalizes non-finite values. */
+size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o)
+{
+ if (LJ_LIKELY((o->u32.hi << 1) < 0xffe00000)) { /* Finite? */
+ lua_Number n = o->n;
+#if __BIONIC__
+ if (tvismzero(o)) { s[0] = '-'; s[1] = '0'; return 2; }
+#endif
+ return (size_t)lua_number2str(s, n);
+ } else if (((o->u32.hi & 0x000fffff) | o->u32.lo) != 0) {
+ s[0] = 'n'; s[1] = 'a'; s[2] = 'n'; return 3;
+ } else if ((o->u32.hi & 0x80000000) == 0) {
+ s[0] = 'i'; s[1] = 'n'; s[2] = 'f'; return 3;
+ } else {
+ s[0] = '-'; s[1] = 'i'; s[2] = 'n'; s[3] = 'f'; return 4;
+ }
+}
+
+/* Print integer to buffer. Returns pointer to start. */
+char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k)
+{
+ uint32_t u = (uint32_t)(k < 0 ? -k : k);
+ p += 1+10;
+ do { *--p = (char)('0' + u % 10); } while (u /= 10);
+ if (k < 0) *--p = '-';
+ return p;
+}
+
+/* Convert number to string. */
+GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np)
+{
+ char buf[LJ_STR_NUMBUF];
+ size_t len = lj_str_bufnum(buf, (TValue *)np);
+ return lj_str_new(L, buf, len);
+}
+
+/* Convert integer to string. */
+GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k)
+{
+ char s[1+10];
+ char *p = lj_str_bufint(s, k);
+ return lj_str_new(L, p, (size_t)(s+sizeof(s)-p));
+}
+
+GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o)
+{
+ return tvisint(o) ? lj_str_fromint(L, intV(o)) : lj_str_fromnum(L, &o->n);
+}
+
+/* -- String formatting --------------------------------------------------- */
+
+static void addstr(lua_State *L, SBuf *sb, const char *str, MSize len)
+{
+ char *p;
+ MSize i;
+ if (sb->n + len > sb->sz) {
+ MSize sz = sb->sz * 2;
+ while (sb->n + len > sz) sz = sz * 2;
+ lj_str_resizebuf(L, sb, sz);
+ }
+ p = sb->buf + sb->n;
+ sb->n += len;
+ for (i = 0; i < len; i++) p[i] = str[i];
+}
+
+static void addchar(lua_State *L, SBuf *sb, int c)
+{
+ if (sb->n + 1 > sb->sz) {
+ MSize sz = sb->sz * 2;
+ lj_str_resizebuf(L, sb, sz);
+ }
+ sb->buf[sb->n++] = (char)c;
+}
+
+/* Push formatted message as a string object to Lua stack. va_list variant. */
+const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp)
+{
+ SBuf *sb = &G(L)->tmpbuf;
+ lj_str_needbuf(L, sb, (MSize)strlen(fmt));
+ lj_str_resetbuf(sb);
+ for (;;) {
+ const char *e = strchr(fmt, '%');
+ if (e == NULL) break;
+ addstr(L, sb, fmt, (MSize)(e-fmt));
+ /* This function only handles %s, %c, %d, %f and %p formats. */
+ switch (e[1]) {
+ case 's': {
+ const char *s = va_arg(argp, char *);
+ if (s == NULL) s = "(null)";
+ addstr(L, sb, s, (MSize)strlen(s));
+ break;
+ }
+ case 'c':
+ addchar(L, sb, va_arg(argp, int));
+ break;
+ case 'd': {
+ char buf[LJ_STR_INTBUF];
+ char *p = lj_str_bufint(buf, va_arg(argp, int32_t));
+ addstr(L, sb, p, (MSize)(buf+LJ_STR_INTBUF-p));
+ break;
+ }
+ case 'f': {
+ char buf[LJ_STR_NUMBUF];
+ TValue tv;
+ MSize len;
+ tv.n = (lua_Number)(va_arg(argp, LUAI_UACNUMBER));
+ len = (MSize)lj_str_bufnum(buf, &tv);
+ addstr(L, sb, buf, len);
+ break;
+ }
+ case 'p': {
+#define FMTP_CHARS (2*sizeof(ptrdiff_t))
+ char buf[2+FMTP_CHARS];
+ ptrdiff_t p = (ptrdiff_t)(va_arg(argp, void *));
+ ptrdiff_t i, lasti = 2+FMTP_CHARS;
+ if (p == 0) {
+ addstr(L, sb, "NULL", 4);
+ break;
+ }
+#if LJ_64
+ /* Shorten output for 64 bit pointers. */
+ lasti = 2+2*4+((p >> 32) ? 2+2*(lj_fls((uint32_t)(p >> 32))>>3) : 0);
+#endif
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = lasti-1; i >= 2; i--, p >>= 4)
+ buf[i] = "0123456789abcdef"[(p & 15)];
+ addstr(L, sb, buf, (MSize)lasti);
+ break;
+ }
+ case '%':
+ addchar(L, sb, '%');
+ break;
+ default:
+ addchar(L, sb, '%');
+ addchar(L, sb, e[1]);
+ break;
+ }
+ fmt = e+2;
+ }
+ addstr(L, sb, fmt, (MSize)strlen(fmt));
+ setstrV(L, L->top, lj_str_new(L, sb->buf, sb->n));
+ incr_top(L);
+ return strVdata(L->top - 1);
+}
+
+/* Push formatted message as a string object to Lua stack. Vararg variant. */
+const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, fmt);
+ msg = lj_str_pushvf(L, fmt, argp);
+ va_end(argp);
+ return msg;
+}
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz)
+{
+ if (sz > sb->sz) {
+ if (sz < LJ_MIN_SBUF) sz = LJ_MIN_SBUF;
+ lj_str_resizebuf(L, sb, sz);
+ }
+ return sb->buf;
+}
+
diff --git a/3rdparty/lua/src/lj_str.h b/3rdparty/lua/src/lj_str.h
index 81153dd..3aa0366 100644
--- a/3rdparty/lua/src/lj_str.h
+++ b/3rdparty/lua/src/lj_str.h
@@ -1,50 +1,50 @@
-/*
-** String handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_STR_H
-#define _LJ_STR_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-
-/* String interning. */
-LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
-LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
-LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
-LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
-
-#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s)))
-#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1))
-
-/* Type conversions. */
-LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o);
-LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k);
-LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np);
-LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k);
-LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o);
-
-#define LJ_STR_INTBUF (1+10)
-#define LJ_STR_NUMBUF LUAI_MAXNUMBER2STR
-
-/* String formatting. */
-LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp);
-LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
-#if defined(__GNUC__)
- __attribute__ ((format (printf, 2, 3)))
-#endif
- ;
-
-/* Resizable string buffers. Struct definition in lj_obj.h. */
-LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz);
-
-#define lj_str_initbuf(sb) ((sb)->buf = NULL, (sb)->sz = 0)
-#define lj_str_resetbuf(sb) ((sb)->n = 0)
-#define lj_str_resizebuf(L, sb, size) \
- ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \
- (sb)->sz = (size))
-#define lj_str_freebuf(g, sb) lj_mem_free(g, (void *)(sb)->buf, (sb)->sz)
-
-#endif
+/*
+** String handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STR_H
+#define _LJ_STR_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+
+/* String interning. */
+LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
+LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
+LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
+LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
+
+#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s)))
+#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1))
+
+/* Type conversions. */
+LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o);
+LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np);
+LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o);
+
+#define LJ_STR_INTBUF (1+10)
+#define LJ_STR_NUMBUF LUAI_MAXNUMBER2STR
+
+/* String formatting. */
+LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp);
+LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
+#if defined(__GNUC__)
+ __attribute__ ((format (printf, 2, 3)))
+#endif
+ ;
+
+/* Resizable string buffers. Struct definition in lj_obj.h. */
+LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz);
+
+#define lj_str_initbuf(sb) ((sb)->buf = NULL, (sb)->sz = 0)
+#define lj_str_resetbuf(sb) ((sb)->n = 0)
+#define lj_str_resizebuf(L, sb, size) \
+ ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \
+ (sb)->sz = (size))
+#define lj_str_freebuf(g, sb) lj_mem_free(g, (void *)(sb)->buf, (sb)->sz)
+
+#endif
diff --git a/3rdparty/lua/src/lj_strscan.c b/3rdparty/lua/src/lj_strscan.c
index 6525a42..a71b86a 100644
--- a/3rdparty/lua/src/lj_strscan.c
+++ b/3rdparty/lua/src/lj_strscan.c
@@ -1,498 +1,497 @@
-/*
-** String scanning.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include <math.h>
-
-#define lj_strscan_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_char.h"
-#include "lj_strscan.h"
-
-/* -- Scanning numbers ---------------------------------------------------- */
-
-/*
-** Rationale for the builtin string to number conversion library:
-**
-** It removes a dependency on libc's strtod(), which is a true portability
-** nightmare. Mainly due to the plethora of supported OS and toolchain
-** combinations. Sadly, the various implementations
-** a) are often buggy, incomplete (no hex floats) and/or imprecise,
-** b) sometimes crash or hang on certain inputs,
-** c) return non-standard NaNs that need to be filtered out, and
-** d) fail if the locale-specific decimal separator is not a dot,
-** which can only be fixed with atrocious workarounds.
-**
-** Also, most of the strtod() implementations are hopelessly bloated,
-** which is not just an I-cache hog, but a problem for static linkage
-** on embedded systems, too.
-**
-** OTOH the builtin conversion function is very compact. Even though it
-** does a lot more, like parsing long longs, octal or imaginary numbers
-** and returning the result in different formats:
-** a) It needs less than 3 KB (!) of machine code (on x64 with -Os),
-** b) it doesn't perform any dynamic allocation and,
-** c) it needs only around 600 bytes of stack space.
-**
-** The builtin function is faster than strtod() for typical inputs, e.g.
-** "123", "1.5" or "1e6". Arguably, it's slower for very large exponents,
-** which are not very common (this could be fixed, if needed).
-**
-** And most importantly, the builtin function is equally precise on all
-** platforms. It correctly converts and rounds any input to a double.
-** If this is not the case, please send a bug report -- but PLEASE verify
-** that the implementation you're comparing to is not the culprit!
-**
-** The implementation quickly pre-scans the entire string first and
-** handles simple integers on-the-fly. Otherwise, it dispatches to the
-** base-specific parser. Hex and octal is straightforward.
-**
-** Decimal to binary conversion uses a fixed-length circular buffer in
-** base 100. Some simple cases are handled directly. For other cases, the
-** number in the buffer is up-scaled or down-scaled until the integer part
-** is in the proper range. Then the integer part is rounded and converted
-** to a double which is finally rescaled to the result. Denormals need
-** special treatment to prevent incorrect 'double rounding'.
-*/
-
-/* Definitions for circular decimal digit buffer (base 100 = 2 digits/byte). */
-#define STRSCAN_DIG 1024
-#define STRSCAN_MAXDIG 800 /* 772 + extra are sufficient. */
-#define STRSCAN_DDIG (STRSCAN_DIG/2)
-#define STRSCAN_DMASK (STRSCAN_DDIG-1)
-
-/* Helpers for circular buffer. */
-#define DNEXT(a) (((a)+1) & STRSCAN_DMASK)
-#define DPREV(a) (((a)-1) & STRSCAN_DMASK)
-#define DLEN(lo, hi) ((int32_t)(((lo)-(hi)) & STRSCAN_DMASK))
-
-#define casecmp(c, k) (((c) | 0x20) == k)
-
-/* Final conversion to double. */
-static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg)
-{
- double n;
-
- /* Avoid double rounding for denormals. */
- if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) {
- /* NYI: all of this generates way too much code on 32 bit CPUs. */
-#if defined(__GNUC__) && LJ_64
- int32_t b = (int32_t)(__builtin_clzll(x)^63);
-#else
- int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) :
- (int32_t)lj_fls((uint32_t)x);
-#endif
- if ((int32_t)b + ex2 <= -1023 && (int32_t)b + ex2 >= -1075) {
- uint64_t rb = (uint64_t)1 << (-1075-ex2);
- if ((x & rb) && ((x & (rb+rb+rb-1)))) x += rb+rb;
- x = (x & ~(rb+rb-1));
- }
- }
-
- /* Convert to double using a signed int64_t conversion, then rescale. */
- lua_assert((int64_t)x >= 0);
- n = (double)(int64_t)x;
- if (neg) n = -n;
- if (ex2) n = ldexp(n, ex2);
- o->n = n;
-}
-
-/* Parse hexadecimal number. */
-static StrScanFmt strscan_hex(const uint8_t *p, TValue *o,
- StrScanFmt fmt, uint32_t opt,
- int32_t ex2, int32_t neg, uint32_t dig)
-{
- uint64_t x = 0;
- uint32_t i;
-
- /* Scan hex digits. */
- for (i = dig > 16 ? 16 : dig ; i; i--, p++) {
- uint32_t d = (*p != '.' ? *p : *++p); if (d > '9') d += 9;
- x = (x << 4) + (d & 15);
- }
-
- /* Summarize rounding-effect of excess digits. */
- for (i = 16; i < dig; i++, p++)
- x |= ((*p != '.' ? *p : *++p) != '0'), ex2 += 4;
-
- /* Format-specific handling. */
- switch (fmt) {
- case STRSCAN_INT:
- if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
- o->i = neg ? -(int32_t)x : (int32_t)x;
- return STRSCAN_INT; /* Fast path for 32 bit integers. */
- }
- if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; }
- /* fallthrough */
- case STRSCAN_U32:
- if (dig > 8) return STRSCAN_ERROR;
- o->i = neg ? -(int32_t)x : (int32_t)x;
- return STRSCAN_U32;
- case STRSCAN_I64:
- case STRSCAN_U64:
- if (dig > 16) return STRSCAN_ERROR;
- o->u64 = neg ? (uint64_t)-(int64_t)x : x;
- return fmt;
- default:
- break;
- }
-
- /* Reduce range then convert to double. */
- if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
- strscan_double(x, o, ex2, neg);
- return fmt;
-}
-
-/* Parse octal number. */
-static StrScanFmt strscan_oct(const uint8_t *p, TValue *o,
- StrScanFmt fmt, int32_t neg, uint32_t dig)
-{
- uint64_t x = 0;
-
- /* Scan octal digits. */
- if (dig > 22 || (dig == 22 && *p > '1')) return STRSCAN_ERROR;
- while (dig-- > 0) {
- if (!(*p >= '0' && *p <= '7')) return STRSCAN_ERROR;
- x = (x << 3) + (*p++ & 7);
- }
-
- /* Format-specific handling. */
- switch (fmt) {
- case STRSCAN_INT:
- if (x >= 0x80000000u+neg) fmt = STRSCAN_U32;
- /* fallthrough */
- case STRSCAN_U32:
- if ((x >> 32)) return STRSCAN_ERROR;
- o->i = neg ? -(int32_t)x : (int32_t)x;
- break;
- default:
- case STRSCAN_I64:
- case STRSCAN_U64:
- o->u64 = neg ? (uint64_t)-(int64_t)x : x;
- break;
- }
- return fmt;
-}
-
-/* Parse decimal number. */
-static StrScanFmt strscan_dec(const uint8_t *p, TValue *o,
- StrScanFmt fmt, uint32_t opt,
- int32_t ex10, int32_t neg, uint32_t dig)
-{
- uint8_t xi[STRSCAN_DDIG], *xip = xi;
-
- if (dig) {
- uint32_t i = dig;
- if (i > STRSCAN_MAXDIG) {
- ex10 += (int32_t)(i - STRSCAN_MAXDIG);
- i = STRSCAN_MAXDIG;
- }
- /* Scan unaligned leading digit. */
- if (((ex10^i) & 1))
- *xip++ = ((*p != '.' ? *p : *++p) & 15), i--, p++;
- /* Scan aligned double-digits. */
- for ( ; i > 1; i -= 2) {
- uint32_t d = 10 * ((*p != '.' ? *p : *++p) & 15); p++;
- *xip++ = d + ((*p != '.' ? *p : *++p) & 15); p++;
- }
- /* Scan and realign trailing digit. */
- if (i) *xip++ = 10 * ((*p != '.' ? *p : *++p) & 15), ex10--, dig++, p++;
-
- /* Summarize rounding-effect of excess digits. */
- if (dig > STRSCAN_MAXDIG) {
- do {
- if ((*p != '.' ? *p : *++p) != '0') { xip[-1] |= 1; break; }
- p++;
- } while (--dig > STRSCAN_MAXDIG);
- dig = STRSCAN_MAXDIG;
- } else { /* Simplify exponent. */
- while (ex10 > 0 && dig <= 18) *xip++ = 0, ex10 -= 2, dig += 2;
- }
- } else { /* Only got zeros. */
- ex10 = 0;
- xi[0] = 0;
- }
-
- /* Fast path for numbers in integer format (but handles e.g. 1e6, too). */
- if (dig <= 20 && ex10 == 0) {
- uint8_t *xis;
- uint64_t x = xi[0];
- double n;
- for (xis = xi+1; xis < xip; xis++) x = x * 100 + *xis;
- if (!(dig == 20 && (xi[0] > 18 || (int64_t)x >= 0))) { /* No overflow? */
- /* Format-specific handling. */
- switch (fmt) {
- case STRSCAN_INT:
- if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
- o->i = neg ? -(int32_t)x : (int32_t)x;
- return STRSCAN_INT; /* Fast path for 32 bit integers. */
- }
- if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; goto plainnumber; }
- /* fallthrough */
- case STRSCAN_U32:
- if ((x >> 32) != 0) return STRSCAN_ERROR;
- o->i = neg ? -(int32_t)x : (int32_t)x;
- return STRSCAN_U32;
- case STRSCAN_I64:
- case STRSCAN_U64:
- o->u64 = neg ? (uint64_t)-(int64_t)x : x;
- return fmt;
- default:
- plainnumber: /* Fast path for plain numbers < 2^63. */
- if ((int64_t)x < 0) break;
- n = (double)(int64_t)x;
- if (neg) n = -n;
- o->n = n;
- return fmt;
- }
- }
- }
-
- /* Slow non-integer path. */
- if (fmt == STRSCAN_INT) {
- if ((opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
- fmt = STRSCAN_NUM;
- } else if (fmt > STRSCAN_INT) {
- return STRSCAN_ERROR;
- }
- {
- uint32_t hi = 0, lo = (uint32_t)(xip-xi);
- int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1);
-
- lua_assert(lo > 0 && (ex10 & 1) == 0);
-
- /* Handle simple overflow/underflow. */
- if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; }
- else if (idig < -326/2) { o->n = neg ? -0.0 : 0.0; return fmt; }
-
- /* Scale up until we have at least 17 or 18 integer part digits. */
- while (idig < 9 && idig < DLEN(lo, hi)) {
- uint32_t i, cy = 0;
- ex2 -= 6;
- for (i = DPREV(lo); ; i = DPREV(i)) {
- uint32_t d = (xi[i] << 6) + cy;
- cy = (((d >> 2) * 5243) >> 17); d = d - cy * 100; /* Div/mod 100. */
- xi[i] = (uint8_t)d;
- if (i == hi) break;
- if (d == 0 && i == DPREV(lo)) lo = i;
- }
- if (cy) {
- hi = DPREV(hi);
- if (xi[DPREV(lo)] == 0) lo = DPREV(lo);
- else if (hi == lo) { lo = DPREV(lo); xi[DPREV(lo)] |= xi[lo]; }
- xi[hi] = (uint8_t)cy; idig++;
- }
- }
-
- /* Scale down until no more than 17 or 18 integer part digits remain. */
- while (idig > 9) {
- uint32_t i = hi, cy = 0;
- ex2 += 6;
- do {
- cy += xi[i];
- xi[i] = (cy >> 6);
- cy = 100 * (cy & 0x3f);
- if (xi[i] == 0 && i == hi) hi = DNEXT(hi), idig--;
- i = DNEXT(i);
- } while (i != lo);
- while (cy) {
- if (hi == lo) { xi[DPREV(lo)] |= 1; break; }
- xi[lo] = (cy >> 6); lo = DNEXT(lo);
- cy = 100 * (cy & 0x3f);
- }
- }
-
- /* Collect integer part digits and convert to rescaled double. */
- {
- uint64_t x = xi[hi];
- uint32_t i;
- for (i = DNEXT(hi); --idig > 0 && i != lo; i = DNEXT(i))
- x = x * 100 + xi[i];
- if (i == lo) {
- while (--idig >= 0) x = x * 100;
- } else { /* Gather round bit from remaining digits. */
- x <<= 1; ex2--;
- do {
- if (xi[i]) { x |= 1; break; }
- i = DNEXT(i);
- } while (i != lo);
- }
- strscan_double(x, o, ex2, neg);
- }
- }
- return fmt;
-}
-
-/* Scan string containing a number. Returns format. Returns value in o. */
-StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
-{
- int32_t neg = 0;
-
- /* Remove leading space, parse sign and non-numbers. */
- if (LJ_UNLIKELY(!lj_char_isdigit(*p))) {
- while (lj_char_isspace(*p)) p++;
- if (*p == '+' || *p == '-') neg = (*p++ == '-');
- if (LJ_UNLIKELY(*p >= 'A')) { /* Parse "inf", "infinity" or "nan". */
- TValue tmp;
- setnanV(&tmp);
- if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'f')) {
- if (neg) setminfV(&tmp); else setpinfV(&tmp);
- p += 3;
- if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'i') &&
- casecmp(p[3],'t') && casecmp(p[4],'y')) p += 5;
- } else if (casecmp(p[0],'n') && casecmp(p[1],'a') && casecmp(p[2],'n')) {
- p += 3;
- }
- while (lj_char_isspace(*p)) p++;
- if (*p) return STRSCAN_ERROR;
- o->u64 = tmp.u64;
- return STRSCAN_NUM;
- }
- }
-
- /* Parse regular number. */
- {
- StrScanFmt fmt = STRSCAN_INT;
- int cmask = LJ_CHAR_DIGIT;
- int base = (opt & STRSCAN_OPT_C) && *p == '0' ? 0 : 10;
- const uint8_t *sp, *dp = NULL;
- uint32_t dig = 0, hasdig = 0, x = 0;
- int32_t ex = 0;
-
- /* Determine base and skip leading zeros. */
- if (LJ_UNLIKELY(*p <= '0')) {
- if (*p == '0' && casecmp(p[1], 'x'))
- base = 16, cmask = LJ_CHAR_XDIGIT, p += 2;
- for ( ; ; p++) {
- if (*p == '0') {
- hasdig = 1;
- } else if (*p == '.') {
- if (dp) return STRSCAN_ERROR;
- dp = p;
- } else {
- break;
- }
- }
- }
-
- /* Preliminary digit and decimal point scan. */
- for (sp = p; ; p++) {
- if (LJ_LIKELY(lj_char_isa(*p, cmask))) {
- x = x * 10 + (*p & 15); /* For fast path below. */
- dig++;
- } else if (*p == '.') {
- if (dp) return STRSCAN_ERROR;
- dp = p;
- } else {
- break;
- }
- }
- if (!(hasdig | dig)) return STRSCAN_ERROR;
-
- /* Handle decimal point. */
- if (dp) {
- fmt = STRSCAN_NUM;
- if (dig) {
- ex = (int32_t)(dp-(p-1)); dp = p-1;
- while (ex < 0 && *dp-- == '0') ex++, dig--; /* Skip trailing zeros. */
- if (base == 16) ex *= 4;
- }
- }
-
- /* Parse exponent. */
- if (casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) {
- uint32_t xx;
- int negx = 0;
- fmt = STRSCAN_NUM; p++;
- if (*p == '+' || *p == '-') negx = (*p++ == '-');
- if (!lj_char_isdigit(*p)) return STRSCAN_ERROR;
- xx = (*p++ & 15);
- while (lj_char_isdigit(*p)) {
- if (xx < 65536) xx = xx * 10 + (*p & 15);
- p++;
- }
- ex += negx ? -(int32_t)xx : (int32_t)xx;
- }
-
- /* Parse suffix. */
- if (*p) {
- /* I (IMAG), U (U32), LL (I64), ULL/LLU (U64), L (long), UL/LU (ulong). */
- /* NYI: f (float). Not needed until cp_number() handles non-integers. */
- if (casecmp(*p, 'i')) {
- if (!(opt & STRSCAN_OPT_IMAG)) return STRSCAN_ERROR;
- p++; fmt = STRSCAN_IMAG;
- } else if (fmt == STRSCAN_INT) {
- if (casecmp(*p, 'u')) p++, fmt = STRSCAN_U32;
- if (casecmp(*p, 'l')) {
- p++;
- if (casecmp(*p, 'l')) p++, fmt += STRSCAN_I64 - STRSCAN_INT;
- else if (!(opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
- else if (sizeof(long) == 8) fmt += STRSCAN_I64 - STRSCAN_INT;
- }
- if (casecmp(*p, 'u') && (fmt == STRSCAN_INT || fmt == STRSCAN_I64))
- p++, fmt += STRSCAN_U32 - STRSCAN_INT;
- if ((fmt == STRSCAN_U32 && !(opt & STRSCAN_OPT_C)) ||
- (fmt >= STRSCAN_I64 && !(opt & STRSCAN_OPT_LL)))
- return STRSCAN_ERROR;
- }
- while (lj_char_isspace(*p)) p++;
- if (*p) return STRSCAN_ERROR;
- }
-
- /* Fast path for decimal 32 bit integers. */
- if (fmt == STRSCAN_INT && base == 10 &&
- (dig < 10 || (dig == 10 && *sp <= '2' && x < 0x80000000u+neg))) {
- int32_t y = neg ? -(int32_t)x : (int32_t)x;
- if ((opt & STRSCAN_OPT_TONUM)) {
- o->n = (double)y;
- return STRSCAN_NUM;
- } else {
- o->i = y;
- return STRSCAN_INT;
- }
- }
-
- /* Dispatch to base-specific parser. */
- if (base == 0 && !(fmt == STRSCAN_NUM || fmt == STRSCAN_IMAG))
- return strscan_oct(sp, o, fmt, neg, dig);
- if (base == 16)
- fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig);
- else
- fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig);
-
- /* Try to convert number to integer, if requested. */
- if (fmt == STRSCAN_NUM && (opt & STRSCAN_OPT_TOINT)) {
- double n = o->n;
- int32_t i = lj_num2int(n);
- if (n == (lua_Number)i) { o->i = i; return STRSCAN_INT; }
- }
- return fmt;
- }
-}
-
-int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o)
-{
- StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o,
- STRSCAN_OPT_TONUM);
- lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM);
- return (fmt != STRSCAN_ERROR);
-}
-
-#if LJ_DUALNUM
-int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o)
-{
- StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o,
- STRSCAN_OPT_TOINT);
- lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT);
- if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM);
- return (fmt != STRSCAN_ERROR);
-}
-#endif
-
-#undef DNEXT
-#undef DPREV
-#undef DLEN
-
+/*
+** String scanning.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <math.h>
+
+#define lj_strscan_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+
+/* -- Scanning numbers ---------------------------------------------------- */
+
+/*
+** Rationale for the builtin string to number conversion library:
+**
+** It removes a dependency on libc's strtod(), which is a true portability
+** nightmare. Mainly due to the plethora of supported OS and toolchain
+** combinations. Sadly, the various implementations
+** a) are often buggy, incomplete (no hex floats) and/or imprecise,
+** b) sometimes crash or hang on certain inputs,
+** c) return non-standard NaNs that need to be filtered out, and
+** d) fail if the locale-specific decimal separator is not a dot,
+** which can only be fixed with atrocious workarounds.
+**
+** Also, most of the strtod() implementations are hopelessly bloated,
+** which is not just an I-cache hog, but a problem for static linkage
+** on embedded systems, too.
+**
+** OTOH the builtin conversion function is very compact. Even though it
+** does a lot more, like parsing long longs, octal or imaginary numbers
+** and returning the result in different formats:
+** a) It needs less than 3 KB (!) of machine code (on x64 with -Os),
+** b) it doesn't perform any dynamic allocation and,
+** c) it needs only around 600 bytes of stack space.
+**
+** The builtin function is faster than strtod() for typical inputs, e.g.
+** "123", "1.5" or "1e6". Arguably, it's slower for very large exponents,
+** which are not very common (this could be fixed, if needed).
+**
+** And most importantly, the builtin function is equally precise on all
+** platforms. It correctly converts and rounds any input to a double.
+** If this is not the case, please send a bug report -- but PLEASE verify
+** that the implementation you're comparing to is not the culprit!
+**
+** The implementation quickly pre-scans the entire string first and
+** handles simple integers on-the-fly. Otherwise, it dispatches to the
+** base-specific parser. Hex and octal is straightforward.
+**
+** Decimal to binary conversion uses a fixed-length circular buffer in
+** base 100. Some simple cases are handled directly. For other cases, the
+** number in the buffer is up-scaled or down-scaled until the integer part
+** is in the proper range. Then the integer part is rounded and converted
+** to a double which is finally rescaled to the result. Denormals need
+** special treatment to prevent incorrect 'double rounding'.
+*/
+
+/* Definitions for circular decimal digit buffer (base 100 = 2 digits/byte). */
+#define STRSCAN_DIG 1024
+#define STRSCAN_MAXDIG 800 /* 772 + extra are sufficient. */
+#define STRSCAN_DDIG (STRSCAN_DIG/2)
+#define STRSCAN_DMASK (STRSCAN_DDIG-1)
+
+/* Helpers for circular buffer. */
+#define DNEXT(a) (((a)+1) & STRSCAN_DMASK)
+#define DPREV(a) (((a)-1) & STRSCAN_DMASK)
+#define DLEN(lo, hi) ((int32_t)(((lo)-(hi)) & STRSCAN_DMASK))
+
+#define casecmp(c, k) (((c) | 0x20) == k)
+
+/* Final conversion to double. */
+static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg)
+{
+ double n;
+
+ /* Avoid double rounding for denormals. */
+ if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) {
+ /* NYI: all of this generates way too much code on 32 bit CPUs. */
+#if defined(__GNUC__) && LJ_64
+ int32_t b = (int32_t)(__builtin_clzll(x)^63);
+#else
+ int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) :
+ (int32_t)lj_fls((uint32_t)x);
+#endif
+ if ((int32_t)b + ex2 <= -1023 && (int32_t)b + ex2 >= -1075) {
+ uint64_t rb = (uint64_t)1 << (-1075-ex2);
+ if ((x & rb) && ((x & (rb+rb+rb-1)))) x += rb+rb;
+ x = (x & ~(rb+rb-1));
+ }
+ }
+
+ /* Convert to double using a signed int64_t conversion, then rescale. */
+ lua_assert((int64_t)x >= 0);
+ n = (double)(int64_t)x;
+ if (neg) n = -n;
+ if (ex2) n = ldexp(n, ex2);
+ o->n = n;
+}
+
+/* Parse hexadecimal number. */
+static StrScanFmt strscan_hex(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, uint32_t opt,
+ int32_t ex2, int32_t neg, uint32_t dig)
+{
+ uint64_t x = 0;
+ uint32_t i;
+
+ /* Scan hex digits. */
+ for (i = dig > 16 ? 16 : dig ; i; i--, p++) {
+ uint32_t d = (*p != '.' ? *p : *++p); if (d > '9') d += 9;
+ x = (x << 4) + (d & 15);
+ }
+
+ /* Summarize rounding-effect of excess digits. */
+ for (i = 16; i < dig; i++, p++)
+ x |= ((*p != '.' ? *p : *++p) != '0'), ex2 += 4;
+
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_INT; /* Fast path for 32 bit integers. */
+ }
+ if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; }
+ /* fallthrough */
+ case STRSCAN_U32:
+ if (dig > 8) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_U32;
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ if (dig > 16) return STRSCAN_ERROR;
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ return fmt;
+ default:
+ break;
+ }
+
+ /* Reduce range then convert to double. */
+ if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
+ strscan_double(x, o, ex2, neg);
+ return fmt;
+}
+
+/* Parse octal number. */
+static StrScanFmt strscan_oct(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, int32_t neg, uint32_t dig)
+{
+ uint64_t x = 0;
+
+ /* Scan octal digits. */
+ if (dig > 22 || (dig == 22 && *p > '1')) return STRSCAN_ERROR;
+ while (dig-- > 0) {
+ if (!(*p >= '0' && *p <= '7')) return STRSCAN_ERROR;
+ x = (x << 3) + (*p++ & 7);
+ }
+
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (x >= 0x80000000u+neg) fmt = STRSCAN_U32;
+ /* fallthrough */
+ case STRSCAN_U32:
+ if ((x >> 32)) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ break;
+ default:
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ break;
+ }
+ return fmt;
+}
+
+/* Parse decimal number. */
+static StrScanFmt strscan_dec(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, uint32_t opt,
+ int32_t ex10, int32_t neg, uint32_t dig)
+{
+ uint8_t xi[STRSCAN_DDIG], *xip = xi;
+
+ if (dig) {
+ uint32_t i = dig;
+ if (i > STRSCAN_MAXDIG) {
+ ex10 += (int32_t)(i - STRSCAN_MAXDIG);
+ i = STRSCAN_MAXDIG;
+ }
+ /* Scan unaligned leading digit. */
+ if (((ex10^i) & 1))
+ *xip++ = ((*p != '.' ? *p : *++p) & 15), i--, p++;
+ /* Scan aligned double-digits. */
+ for ( ; i > 1; i -= 2) {
+ uint32_t d = 10 * ((*p != '.' ? *p : *++p) & 15); p++;
+ *xip++ = d + ((*p != '.' ? *p : *++p) & 15); p++;
+ }
+ /* Scan and realign trailing digit. */
+ if (i) *xip++ = 10 * ((*p != '.' ? *p : *++p) & 15), ex10--, p++;
+
+ /* Summarize rounding-effect of excess digits. */
+ if (dig > STRSCAN_MAXDIG) {
+ do {
+ if ((*p != '.' ? *p : *++p) != '0') { xip[-1] |= 1; break; }
+ p++;
+ } while (--dig > STRSCAN_MAXDIG);
+ dig = STRSCAN_MAXDIG;
+ } else { /* Simplify exponent. */
+ while (ex10 > 0 && dig <= 18) *xip++ = 0, ex10 -= 2, dig += 2;
+ }
+ } else { /* Only got zeros. */
+ ex10 = 0;
+ xi[0] = 0;
+ }
+
+ /* Fast path for numbers in integer format (but handles e.g. 1e6, too). */
+ if (dig <= 20 && ex10 == 0) {
+ uint8_t *xis;
+ uint64_t x = xi[0];
+ double n;
+ for (xis = xi+1; xis < xip; xis++) x = x * 100 + *xis;
+ if (!(dig == 20 && (xi[0] > 18 || (int64_t)x >= 0))) { /* No overflow? */
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_INT; /* Fast path for 32 bit integers. */
+ }
+ if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; goto plainnumber; }
+ /* fallthrough */
+ case STRSCAN_U32:
+ if ((x >> 32) != 0) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_U32;
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ return fmt;
+ default:
+ plainnumber: /* Fast path for plain numbers < 2^63. */
+ if ((int64_t)x < 0) break;
+ n = (double)(int64_t)x;
+ if (neg) n = -n;
+ o->n = n;
+ return fmt;
+ }
+ }
+ }
+
+ /* Slow non-integer path. */
+ if (fmt == STRSCAN_INT) {
+ if ((opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
+ fmt = STRSCAN_NUM;
+ } else if (fmt > STRSCAN_INT) {
+ return STRSCAN_ERROR;
+ }
+ {
+ uint32_t hi = 0, lo = (uint32_t)(xip-xi);
+ int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1);
+
+ lua_assert(lo > 0 && (ex10 & 1) == 0);
+
+ /* Handle simple overflow/underflow. */
+ if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; }
+ else if (idig < -326/2) { o->n = neg ? -0.0 : 0.0; return fmt; }
+
+ /* Scale up until we have at least 17 or 18 integer part digits. */
+ while (idig < 9 && idig < DLEN(lo, hi)) {
+ uint32_t i, cy = 0;
+ ex2 -= 6;
+ for (i = DPREV(lo); ; i = DPREV(i)) {
+ uint32_t d = (xi[i] << 6) + cy;
+ cy = (((d >> 2) * 5243) >> 17); d = d - cy * 100; /* Div/mod 100. */
+ xi[i] = (uint8_t)d;
+ if (i == hi) break;
+ if (d == 0 && i == DPREV(lo)) lo = i;
+ }
+ if (cy) {
+ hi = DPREV(hi);
+ if (xi[DPREV(lo)] == 0) lo = DPREV(lo);
+ else if (hi == lo) { lo = DPREV(lo); xi[DPREV(lo)] |= xi[lo]; }
+ xi[hi] = (uint8_t)cy; idig++;
+ }
+ }
+
+ /* Scale down until no more than 17 or 18 integer part digits remain. */
+ while (idig > 9) {
+ uint32_t i, cy = 0;
+ ex2 += 6;
+ for (i = hi; i != lo; i = DNEXT(i)) {
+ cy += xi[i];
+ xi[i] = (cy >> 6);
+ cy = 100 * (cy & 0x3f);
+ if (xi[i] == 0 && i == hi) hi = DNEXT(hi), idig--;
+ }
+ while (cy) {
+ if (hi == lo) { xi[DPREV(lo)] |= 1; break; }
+ xi[lo] = (cy >> 6); lo = DNEXT(lo);
+ cy = 100 * (cy & 0x3f);
+ }
+ }
+
+ /* Collect integer part digits and convert to rescaled double. */
+ {
+ uint64_t x = xi[hi];
+ uint32_t i;
+ for (i = DNEXT(hi); --idig > 0 && i != lo; i = DNEXT(i))
+ x = x * 100 + xi[i];
+ if (i == lo) {
+ while (--idig >= 0) x = x * 100;
+ } else { /* Gather round bit from remaining digits. */
+ x <<= 1; ex2--;
+ do {
+ if (xi[i]) { x |= 1; break; }
+ i = DNEXT(i);
+ } while (i != lo);
+ }
+ strscan_double(x, o, ex2, neg);
+ }
+ }
+ return fmt;
+}
+
+/* Scan string containing a number. Returns format. Returns value in o. */
+StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt)
+{
+ int32_t neg = 0;
+
+ /* Remove leading space, parse sign and non-numbers. */
+ if (LJ_UNLIKELY(!lj_char_isdigit(*p))) {
+ while (lj_char_isspace(*p)) p++;
+ if (*p == '+' || *p == '-') neg = (*p++ == '-');
+ if (LJ_UNLIKELY(*p >= 'A')) { /* Parse "inf", "infinity" or "nan". */
+ TValue tmp;
+ setnanV(&tmp);
+ if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'f')) {
+ if (neg) setminfV(&tmp); else setpinfV(&tmp);
+ p += 3;
+ if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'i') &&
+ casecmp(p[3],'t') && casecmp(p[4],'y')) p += 5;
+ } else if (casecmp(p[0],'n') && casecmp(p[1],'a') && casecmp(p[2],'n')) {
+ p += 3;
+ }
+ while (lj_char_isspace(*p)) p++;
+ if (*p) return STRSCAN_ERROR;
+ o->u64 = tmp.u64;
+ return STRSCAN_NUM;
+ }
+ }
+
+ /* Parse regular number. */
+ {
+ StrScanFmt fmt = STRSCAN_INT;
+ int cmask = LJ_CHAR_DIGIT;
+ int base = (opt & STRSCAN_OPT_C) && *p == '0' ? 0 : 10;
+ const uint8_t *sp, *dp = NULL;
+ uint32_t dig = 0, hasdig = 0, x = 0;
+ int32_t ex = 0;
+
+ /* Determine base and skip leading zeros. */
+ if (LJ_UNLIKELY(*p <= '0')) {
+ if (*p == '0' && casecmp(p[1], 'x'))
+ base = 16, cmask = LJ_CHAR_XDIGIT, p += 2;
+ for ( ; ; p++) {
+ if (*p == '0') {
+ hasdig = 1;
+ } else if (*p == '.') {
+ if (dp) return STRSCAN_ERROR;
+ dp = p;
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Preliminary digit and decimal point scan. */
+ for (sp = p; ; p++) {
+ if (LJ_LIKELY(lj_char_isa(*p, cmask))) {
+ x = x * 10 + (*p & 15); /* For fast path below. */
+ dig++;
+ } else if (*p == '.') {
+ if (dp) return STRSCAN_ERROR;
+ dp = p;
+ } else {
+ break;
+ }
+ }
+ if (!(hasdig | dig)) return STRSCAN_ERROR;
+
+ /* Handle decimal point. */
+ if (dp) {
+ fmt = STRSCAN_NUM;
+ if (dig) {
+ ex = (int32_t)(dp-(p-1)); dp = p-1;
+ while (ex < 0 && *dp-- == '0') ex++, dig--; /* Skip trailing zeros. */
+ if (base == 16) ex *= 4;
+ }
+ }
+
+ /* Parse exponent. */
+ if (casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) {
+ uint32_t xx;
+ int negx = 0;
+ fmt = STRSCAN_NUM; p++;
+ if (*p == '+' || *p == '-') negx = (*p++ == '-');
+ if (!lj_char_isdigit(*p)) return STRSCAN_ERROR;
+ xx = (*p++ & 15);
+ while (lj_char_isdigit(*p)) {
+ if (xx < 65536) xx = xx * 10 + (*p & 15);
+ p++;
+ }
+ ex += negx ? -(int32_t)xx : (int32_t)xx;
+ }
+
+ /* Parse suffix. */
+ if (*p) {
+ /* I (IMAG), U (U32), LL (I64), ULL/LLU (U64), L (long), UL/LU (ulong). */
+ /* NYI: f (float). Not needed until cp_number() handles non-integers. */
+ if (casecmp(*p, 'i')) {
+ if (!(opt & STRSCAN_OPT_IMAG)) return STRSCAN_ERROR;
+ p++; fmt = STRSCAN_IMAG;
+ } else if (fmt == STRSCAN_INT) {
+ if (casecmp(*p, 'u')) p++, fmt = STRSCAN_U32;
+ if (casecmp(*p, 'l')) {
+ p++;
+ if (casecmp(*p, 'l')) p++, fmt += STRSCAN_I64 - STRSCAN_INT;
+ else if (!(opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
+ else if (sizeof(long) == 8) fmt += STRSCAN_I64 - STRSCAN_INT;
+ }
+ if (casecmp(*p, 'u') && (fmt == STRSCAN_INT || fmt == STRSCAN_I64))
+ p++, fmt += STRSCAN_U32 - STRSCAN_INT;
+ if ((fmt == STRSCAN_U32 && !(opt & STRSCAN_OPT_C)) ||
+ (fmt >= STRSCAN_I64 && !(opt & STRSCAN_OPT_LL)))
+ return STRSCAN_ERROR;
+ }
+ while (lj_char_isspace(*p)) p++;
+ if (*p) return STRSCAN_ERROR;
+ }
+
+ /* Fast path for decimal 32 bit integers. */
+ if (fmt == STRSCAN_INT && base == 10 &&
+ (dig < 10 || (dig == 10 && *sp <= '2' && x < 0x80000000u+neg))) {
+ int32_t y = neg ? -(int32_t)x : (int32_t)x;
+ if ((opt & STRSCAN_OPT_TONUM)) {
+ o->n = (double)y;
+ return STRSCAN_NUM;
+ } else {
+ o->i = y;
+ return STRSCAN_INT;
+ }
+ }
+
+ /* Dispatch to base-specific parser. */
+ if (base == 0 && !(fmt == STRSCAN_NUM || fmt == STRSCAN_IMAG))
+ return strscan_oct(sp, o, fmt, neg, dig);
+ if (base == 16)
+ fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig);
+ else
+ fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig);
+
+ /* Try to convert number to integer, if requested. */
+ if (fmt == STRSCAN_NUM && (opt & STRSCAN_OPT_TOINT)) {
+ double n = o->n;
+ int32_t i = lj_num2int(n);
+ if (n == (lua_Number)i) { o->i = i; return STRSCAN_INT; }
+ }
+ return fmt;
+ }
+}
+
+int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o)
+{
+ StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o,
+ STRSCAN_OPT_TONUM);
+ lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM);
+ return (fmt != STRSCAN_ERROR);
+}
+
+#if LJ_DUALNUM
+int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o)
+{
+ StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), o,
+ STRSCAN_OPT_TOINT);
+ lua_assert(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT);
+ if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM);
+ return (fmt != STRSCAN_ERROR);
+}
+#endif
+
+#undef DNEXT
+#undef DPREV
+#undef DLEN
+
diff --git a/3rdparty/lua/src/lj_strscan.h b/3rdparty/lua/src/lj_strscan.h
index e187196..9557d67 100644
--- a/3rdparty/lua/src/lj_strscan.h
+++ b/3rdparty/lua/src/lj_strscan.h
@@ -1,39 +1,39 @@
-/*
-** String scanning.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_STRSCAN_H
-#define _LJ_STRSCAN_H
-
-#include "lj_obj.h"
-
-/* Options for accepted/returned formats. */
-#define STRSCAN_OPT_TOINT 0x01 /* Convert to int32_t, if possible. */
-#define STRSCAN_OPT_TONUM 0x02 /* Always convert to double. */
-#define STRSCAN_OPT_IMAG 0x04
-#define STRSCAN_OPT_LL 0x08
-#define STRSCAN_OPT_C 0x10
-
-/* Returned format. */
-typedef enum {
- STRSCAN_ERROR,
- STRSCAN_NUM, STRSCAN_IMAG,
- STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64,
-} StrScanFmt;
-
-LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt);
-LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o);
-#if LJ_DUALNUM
-LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o);
-#else
-#define lj_strscan_number(s, o) lj_strscan_num((s), (o))
-#endif
-
-/* Check for number or convert string to number/int in-place (!). */
-static LJ_AINLINE int lj_strscan_numberobj(TValue *o)
-{
- return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o));
-}
-
-#endif
+/*
+** String scanning.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STRSCAN_H
+#define _LJ_STRSCAN_H
+
+#include "lj_obj.h"
+
+/* Options for accepted/returned formats. */
+#define STRSCAN_OPT_TOINT 0x01 /* Convert to int32_t, if possible. */
+#define STRSCAN_OPT_TONUM 0x02 /* Always convert to double. */
+#define STRSCAN_OPT_IMAG 0x04
+#define STRSCAN_OPT_LL 0x08
+#define STRSCAN_OPT_C 0x10
+
+/* Returned format. */
+typedef enum {
+ STRSCAN_ERROR,
+ STRSCAN_NUM, STRSCAN_IMAG,
+ STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64,
+} StrScanFmt;
+
+LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt);
+LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o);
+#if LJ_DUALNUM
+LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o);
+#else
+#define lj_strscan_number(s, o) lj_strscan_num((s), (o))
+#endif
+
+/* Check for number or convert string to number/int in-place (!). */
+static LJ_AINLINE int lj_strscan_numberobj(TValue *o)
+{
+ return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o));
+}
+
+#endif
diff --git a/3rdparty/lua/src/lj_tab.c b/3rdparty/lua/src/lj_tab.c
index 2646abe..ccad1f6 100644
--- a/3rdparty/lua/src/lj_tab.c
+++ b/3rdparty/lua/src/lj_tab.c
@@ -1,631 +1,624 @@
-/*
-** Table handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#define lj_tab_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_err.h"
-#include "lj_tab.h"
-
-/* -- Object hashing ------------------------------------------------------ */
-
-/* Hash values are masked with the table hash mask and used as an index. */
-static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash)
-{
- Node *n = noderef(t->node);
- return &n[hash & t->hmask];
-}
-
-/* String hashes are precomputed when they are interned. */
-#define hashstr(t, s) hashmask(t, (s)->hash)
-
-#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi)))
-#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1))
-#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS)
-#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS)
-
-/* Hash an arbitrary key and return its anchor position in the hash table. */
-static Node *hashkey(const GCtab *t, cTValue *key)
-{
- lua_assert(!tvisint(key));
- if (tvisstr(key))
- return hashstr(t, strV(key));
- else if (tvisnum(key))
- return hashnum(t, key);
- else if (tvisbool(key))
- return hashmask(t, boolV(key));
- else
- return hashgcref(t, key->gcr);
- /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */
-}
-
-/* -- Table creation and destruction -------------------------------------- */
-
-/* Create new hash part for table. */
-static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits)
-{
- uint32_t hsize;
- Node *node;
- lua_assert(hbits != 0);
- if (hbits > LJ_MAX_HBITS)
- lj_err_msg(L, LJ_ERR_TABOV);
- hsize = 1u << hbits;
- node = lj_mem_newvec(L, hsize, Node);
- setmref(node->freetop, &node[hsize]);
- setmref(t->node, node);
- t->hmask = hsize-1;
-}
-
-/*
-** Q: Why all of these copies of t->hmask, t->node etc. to local variables?
-** A: Because alias analysis for C is _really_ tough.
-** Even state-of-the-art C compilers won't produce good code without this.
-*/
-
-/* Clear hash part of table. */
-static LJ_AINLINE void clearhpart(GCtab *t)
-{
- uint32_t i, hmask = t->hmask;
- Node *node = noderef(t->node);
- lua_assert(t->hmask != 0);
- for (i = 0; i <= hmask; i++) {
- Node *n = &node[i];
- setmref(n->next, NULL);
- setnilV(&n->key);
- setnilV(&n->val);
- }
-}
-
-/* Clear array part of table. */
-static LJ_AINLINE void clearapart(GCtab *t)
-{
- uint32_t i, asize = t->asize;
- TValue *array = tvref(t->array);
- for (i = 0; i < asize; i++)
- setnilV(&array[i]);
-}
-
-/* Create a new table. Note: the slots are not initialized (yet). */
-static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
-{
- GCtab *t;
- /* First try to colocate the array part. */
- if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) {
- lua_assert((sizeof(GCtab) & 7) == 0);
- t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize));
- t->gct = ~LJ_TTAB;
- t->nomm = (uint8_t)~0;
- t->colo = (int8_t)asize;
- setmref(t->array, (TValue *)((char *)t + sizeof(GCtab)));
- setgcrefnull(t->metatable);
- t->asize = asize;
- t->hmask = 0;
- setmref(t->node, &G(L)->nilnode);
- } else { /* Otherwise separately allocate the array part. */
- t = lj_mem_newobj(L, GCtab);
- t->gct = ~LJ_TTAB;
- t->nomm = (uint8_t)~0;
- t->colo = 0;
- setmref(t->array, NULL);
- setgcrefnull(t->metatable);
- t->asize = 0; /* In case the array allocation fails. */
- t->hmask = 0;
- setmref(t->node, &G(L)->nilnode);
- if (asize > 0) {
- if (asize > LJ_MAX_ASIZE)
- lj_err_msg(L, LJ_ERR_TABOV);
- setmref(t->array, lj_mem_newvec(L, asize, TValue));
- t->asize = asize;
- }
- }
- if (hbits)
- newhpart(L, t, hbits);
- return t;
-}
-
-/* Create a new table.
-**
-** IMPORTANT NOTE: The API differs from lua_createtable()!
-**
-** The array size is non-inclusive. E.g. asize=128 creates array slots
-** for 0..127, but not for 128. If you need slots 1..128, pass asize=129
-** (slot 0 is wasted in this case).
-**
-** The hash size is given in hash bits. hbits=0 means no hash part.
-** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on.
-*/
-GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits)
-{
- GCtab *t = newtab(L, asize, hbits);
- clearapart(t);
- if (t->hmask > 0) clearhpart(t);
- return t;
-}
-
-#if LJ_HASJIT
-GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize)
-{
- GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24);
- clearapart(t);
- if (t->hmask > 0) clearhpart(t);
- return t;
-}
-#endif
-
-/* Duplicate a table. */
-GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
-{
- GCtab *t;
- uint32_t asize, hmask;
- t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0);
- lua_assert(kt->asize == t->asize && kt->hmask == t->hmask);
- t->nomm = 0; /* Keys with metamethod names may be present. */
- asize = kt->asize;
- if (asize > 0) {
- TValue *array = tvref(t->array);
- TValue *karray = tvref(kt->array);
- if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */
- uint32_t i;
- for (i = 0; i < asize; i++)
- copyTV(L, &array[i], &karray[i]);
- } else {
- memcpy(array, karray, asize*sizeof(TValue));
- }
- }
- hmask = kt->hmask;
- if (hmask > 0) {
- uint32_t i;
- Node *node = noderef(t->node);
- Node *knode = noderef(kt->node);
- ptrdiff_t d = (char *)node - (char *)knode;
- setmref(node->freetop, (Node *)((char *)noderef(knode->freetop) + d));
- for (i = 0; i <= hmask; i++) {
- Node *kn = &knode[i];
- Node *n = &node[i];
- Node *next = nextnode(kn);
- /* Don't use copyTV here, since it asserts on a copy of a dead key. */
- n->val = kn->val; n->key = kn->key;
- setmref(n->next, next == NULL? next : (Node *)((char *)next + d));
- }
- }
- return t;
-}
-
-/* Free a table. */
-void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
-{
- if (t->hmask > 0)
- lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node);
- if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
- lj_mem_freevec(g, tvref(t->array), t->asize, TValue);
- if (LJ_MAX_COLOSIZE != 0 && t->colo)
- lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f));
- else
- lj_mem_freet(g, t);
-}
-
-/* -- Table resizing ------------------------------------------------------ */
-
-/* Resize a table to fit the new array/hash part sizes. */
-static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
-{
- Node *oldnode = noderef(t->node);
- uint32_t oldasize = t->asize;
- uint32_t oldhmask = t->hmask;
- if (asize > oldasize) { /* Array part grows? */
- TValue *array;
- uint32_t i;
- if (asize > LJ_MAX_ASIZE)
- lj_err_msg(L, LJ_ERR_TABOV);
- if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) {
- /* A colocated array must be separated and copied. */
- TValue *oarray = tvref(t->array);
- array = lj_mem_newvec(L, asize, TValue);
- t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */
- for (i = 0; i < oldasize; i++)
- copyTV(L, &array[i], &oarray[i]);
- } else {
- array = (TValue *)lj_mem_realloc(L, tvref(t->array),
- oldasize*sizeof(TValue), asize*sizeof(TValue));
- }
- setmref(t->array, array);
- t->asize = asize;
- for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */
- setnilV(&array[i]);
- }
- /* Create new (empty) hash part. */
- if (hbits) {
- newhpart(L, t, hbits);
- clearhpart(t);
- } else {
- global_State *g = G(L);
- setmref(t->node, &g->nilnode);
- t->hmask = 0;
- }
- if (asize < oldasize) { /* Array part shrinks? */
- TValue *array = tvref(t->array);
- uint32_t i;
- t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */
- for (i = asize; i < oldasize; i++) /* Reinsert old array values. */
- if (!tvisnil(&array[i]))
- copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]);
- /* Physically shrink only separated arrays. */
- if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
- setmref(t->array, lj_mem_realloc(L, array,
- oldasize*sizeof(TValue), asize*sizeof(TValue)));
- }
- if (oldhmask > 0) { /* Reinsert pairs from old hash part. */
- global_State *g;
- uint32_t i;
- for (i = 0; i <= oldhmask; i++) {
- Node *n = &oldnode[i];
- if (!tvisnil(&n->val))
- copyTV(L, lj_tab_set(L, t, &n->key), &n->val);
- }
- g = G(L);
- lj_mem_freevec(g, oldnode, oldhmask+1, Node);
- }
-}
-
-static uint32_t countint(cTValue *key, uint32_t *bins)
-{
- lua_assert(!tvisint(key));
- if (tvisnum(key)) {
- lua_Number nk = numV(key);
- int32_t k = lj_num2int(nk);
- if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) {
- bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++;
- return 1;
- }
- }
- return 0;
-}
-
-static uint32_t countarray(const GCtab *t, uint32_t *bins)
-{
- uint32_t na, b, i;
- if (t->asize == 0) return 0;
- for (na = i = b = 0; b < LJ_MAX_ABITS; b++) {
- uint32_t n, top = 2u << b;
- TValue *array;
- if (top >= t->asize) {
- top = t->asize-1;
- if (i > top)
- break;
- }
- array = tvref(t->array);
- for (n = 0; i <= top; i++)
- if (!tvisnil(&array[i]))
- n++;
- bins[b] += n;
- na += n;
- }
- return na;
-}
-
-static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray)
-{
- uint32_t total, na, i, hmask = t->hmask;
- Node *node = noderef(t->node);
- for (total = na = 0, i = 0; i <= hmask; i++) {
- Node *n = &node[i];
- if (!tvisnil(&n->val)) {
- na += countint(&n->key, bins);
- total++;
- }
- }
- *narray += na;
- return total;
-}
-
-static uint32_t bestasize(uint32_t bins[], uint32_t *narray)
-{
- uint32_t b, sum, na = 0, sz = 0, nn = *narray;
- for (b = 0, sum = 0; 2*nn > (1u<<b) && sum != nn; b++)
- if (bins[b] > 0 && 2*(sum += bins[b]) > (1u<<b)) {
- sz = (2u<<b)+1;
- na = sum;
- }
- *narray = sz;
- return na;
-}
-
-static void rehashtab(lua_State *L, GCtab *t, cTValue *ek)
-{
- uint32_t bins[LJ_MAX_ABITS];
- uint32_t total, asize, na, i;
- for (i = 0; i < LJ_MAX_ABITS; i++) bins[i] = 0;
- asize = countarray(t, bins);
- total = 1 + asize;
- total += counthash(t, bins, &asize);
- asize += countint(ek, bins);
- na = bestasize(bins, &asize);
- total -= na;
- resizetab(L, t, asize, hsize2hbits(total));
-}
-
-#if LJ_HASFFI
-void lj_tab_rehash(lua_State *L, GCtab *t)
-{
- rehashtab(L, t, niltv(L));
-}
-#endif
-
-void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize)
-{
- resizetab(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0);
-}
-
-/* -- Table getters ------------------------------------------------------- */
-
-cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key)
-{
- TValue k;
- Node *n;
- k.n = (lua_Number)key;
- n = hashnum(t, &k);
- do {
- if (tvisnum(&n->key) && n->key.n == k.n)
- return &n->val;
- } while ((n = nextnode(n)));
- return NULL;
-}
-
-cTValue *lj_tab_getstr(GCtab *t, GCstr *key)
-{
- Node *n = hashstr(t, key);
- do {
- if (tvisstr(&n->key) && strV(&n->key) == key)
- return &n->val;
- } while ((n = nextnode(n)));
- return NULL;
-}
-
-cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key)
-{
- if (tvisstr(key)) {
- cTValue *tv = lj_tab_getstr(t, strV(key));
- if (tv)
- return tv;
- } else if (tvisint(key)) {
- cTValue *tv = lj_tab_getint(t, intV(key));
- if (tv)
- return tv;
- } else if (tvisnum(key)) {
- lua_Number nk = numV(key);
- int32_t k = lj_num2int(nk);
- if (nk == (lua_Number)k) {
- cTValue *tv = lj_tab_getint(t, k);
- if (tv)
- return tv;
- } else {
- goto genlookup; /* Else use the generic lookup. */
- }
- } else if (!tvisnil(key)) {
- Node *n;
- genlookup:
- n = hashkey(t, key);
- do {
- if (lj_obj_equal(&n->key, key))
- return &n->val;
- } while ((n = nextnode(n)));
- }
- return niltv(L);
-}
-
-/* -- Table setters ------------------------------------------------------- */
-
-/* Insert new key. Use Brent's variation to optimize the chain length. */
-TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
-{
- Node *n = hashkey(t, key);
- if (!tvisnil(&n->val) || t->hmask == 0) {
- Node *nodebase = noderef(t->node);
- Node *collide, *freenode = noderef(nodebase->freetop);
- lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1);
- do {
- if (freenode == nodebase) { /* No free node found? */
- rehashtab(L, t, key); /* Rehash table. */
- return lj_tab_set(L, t, key); /* Retry key insertion. */
- }
- } while (!tvisnil(&(--freenode)->key));
- setmref(nodebase->freetop, freenode);
- lua_assert(freenode != &G(L)->nilnode);
- collide = hashkey(t, &n->key);
- if (collide != n) { /* Colliding node not the main node? */
- while (noderef(collide->next) != n) /* Find predecessor. */
- collide = nextnode(collide);
- setmref(collide->next, freenode); /* Relink chain. */
- /* Copy colliding node into free node and free main node. */
- freenode->val = n->val;
- freenode->key = n->key;
- freenode->next = n->next;
- setmref(n->next, NULL);
- setnilV(&n->val);
- /* Rechain pseudo-resurrected string keys with colliding hashes. */
- while (nextnode(freenode)) {
- Node *nn = nextnode(freenode);
- if (tvisstr(&nn->key) && !tvisnil(&nn->val) &&
- hashstr(t, strV(&nn->key)) == n) {
- freenode->next = nn->next;
- nn->next = n->next;
- setmref(n->next, nn);
- } else {
- freenode = nn;
- }
- }
- } else { /* Otherwise use free node. */
- setmrefr(freenode->next, n->next); /* Insert into chain. */
- setmref(n->next, freenode);
- n = freenode;
- }
- }
- n->key.u64 = key->u64;
- if (LJ_UNLIKELY(tvismzero(&n->key)))
- n->key.u64 = 0;
- lj_gc_anybarriert(L, t);
- lua_assert(tvisnil(&n->val));
- return &n->val;
-}
-
-TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key)
-{
- TValue k;
- Node *n;
- k.n = (lua_Number)key;
- n = hashnum(t, &k);
- do {
- if (tvisnum(&n->key) && n->key.n == k.n)
- return &n->val;
- } while ((n = nextnode(n)));
- return lj_tab_newkey(L, t, &k);
-}
-
-TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key)
-{
- TValue k;
- Node *n = hashstr(t, key);
- do {
- if (tvisstr(&n->key) && strV(&n->key) == key)
- return &n->val;
- } while ((n = nextnode(n)));
- setstrV(L, &k, key);
- return lj_tab_newkey(L, t, &k);
-}
-
-TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key)
-{
- Node *n;
- t->nomm = 0; /* Invalidate negative metamethod cache. */
- if (tvisstr(key)) {
- return lj_tab_setstr(L, t, strV(key));
- } else if (tvisint(key)) {
- return lj_tab_setint(L, t, intV(key));
- } else if (tvisnum(key)) {
- lua_Number nk = numV(key);
- int32_t k = lj_num2int(nk);
- if (nk == (lua_Number)k)
- return lj_tab_setint(L, t, k);
- if (tvisnan(key))
- lj_err_msg(L, LJ_ERR_NANIDX);
- /* Else use the generic lookup. */
- } else if (tvisnil(key)) {
- lj_err_msg(L, LJ_ERR_NILIDX);
- }
- n = hashkey(t, key);
- do {
- if (lj_obj_equal(&n->key, key))
- return &n->val;
- } while ((n = nextnode(n)));
- return lj_tab_newkey(L, t, key);
-}
-
-/* -- Table traversal ----------------------------------------------------- */
-
-/* Get the traversal index of a key. */
-static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key)
-{
- TValue tmp;
- if (tvisint(key)) {
- int32_t k = intV(key);
- if ((uint32_t)k < t->asize)
- return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */
- setnumV(&tmp, (lua_Number)k);
- key = &tmp;
- } else if (tvisnum(key)) {
- lua_Number nk = numV(key);
- int32_t k = lj_num2int(nk);
- if ((uint32_t)k < t->asize && nk == (lua_Number)k)
- return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */
- }
- if (!tvisnil(key)) {
- Node *n = hashkey(t, key);
- do {
- if (lj_obj_equal(&n->key, key))
- return t->asize + (uint32_t)(n - noderef(t->node));
- /* Hash key indexes: [t->asize..t->asize+t->nmask] */
- } while ((n = nextnode(n)));
- if (key->u32.hi == 0xfffe7fff) /* ITERN was despecialized while running. */
- return key->u32.lo - 1;
- lj_err_msg(L, LJ_ERR_NEXTIDX);
- return 0; /* unreachable */
- }
- return ~0u; /* A nil key starts the traversal. */
-}
-
-/* Advance to the next step in a table traversal. */
-int lj_tab_next(lua_State *L, GCtab *t, TValue *key)
-{
- uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */
- for (i++; i < t->asize; i++) /* First traverse the array keys. */
- if (!tvisnil(arrayslot(t, i))) {
- setintV(key, i);
- copyTV(L, key+1, arrayslot(t, i));
- return 1;
- }
- for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */
- Node *n = &noderef(t->node)[i];
- if (!tvisnil(&n->val)) {
- copyTV(L, key, &n->key);
- copyTV(L, key+1, &n->val);
- return 1;
- }
- }
- return 0; /* End of traversal. */
-}
-
-/* -- Table length calculation -------------------------------------------- */
-
-static MSize unbound_search(GCtab *t, MSize j)
-{
- cTValue *tv;
- MSize i = j; /* i is zero or a present index */
- j++;
- /* find `i' and `j' such that i is present and j is not */
- while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) {
- i = j;
- j *= 2;
- if (j > (MSize)(INT_MAX-2)) { /* overflow? */
- /* table was built with bad purposes: resort to linear search */
- i = 1;
- while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++;
- return i - 1;
- }
- }
- /* now do a binary search between them */
- while (j - i > 1) {
- MSize m = (i+j)/2;
- cTValue *tvb = lj_tab_getint(t, (int32_t)m);
- if (tvb && !tvisnil(tvb)) i = m; else j = m;
- }
- return i;
-}
-
-/*
-** Try to find a boundary in table `t'. A `boundary' is an integer index
-** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
-*/
-MSize LJ_FASTCALL lj_tab_len(GCtab *t)
-{
- MSize j = (MSize)t->asize;
- if (j > 1 && tvisnil(arrayslot(t, j-1))) {
- MSize i = 1;
- while (j - i > 1) {
- MSize m = (i+j)/2;
- if (tvisnil(arrayslot(t, m-1))) j = m; else i = m;
- }
- return i-1;
- }
- if (j) j--;
- if (t->hmask <= 0)
- return j;
- return unbound_search(t, j);
-}
-
+/*
+** Table handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_tab_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+
+/* -- Object hashing ------------------------------------------------------ */
+
+/* Hash values are masked with the table hash mask and used as an index. */
+static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash)
+{
+ Node *n = noderef(t->node);
+ return &n[hash & t->hmask];
+}
+
+/* String hashes are precomputed when they are interned. */
+#define hashstr(t, s) hashmask(t, (s)->hash)
+
+#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi)))
+#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1))
+#define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS)
+#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS)
+
+/* Hash an arbitrary key and return its anchor position in the hash table. */
+static Node *hashkey(const GCtab *t, cTValue *key)
+{
+ lua_assert(!tvisint(key));
+ if (tvisstr(key))
+ return hashstr(t, strV(key));
+ else if (tvisnum(key))
+ return hashnum(t, key);
+ else if (tvisbool(key))
+ return hashmask(t, boolV(key));
+ else
+ return hashgcref(t, key->gcr);
+ /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */
+}
+
+/* -- Table creation and destruction -------------------------------------- */
+
+/* Create new hash part for table. */
+static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits)
+{
+ uint32_t hsize;
+ Node *node;
+ lua_assert(hbits != 0);
+ if (hbits > LJ_MAX_HBITS)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ hsize = 1u << hbits;
+ node = lj_mem_newvec(L, hsize, Node);
+ setmref(node->freetop, &node[hsize]);
+ setmref(t->node, node);
+ t->hmask = hsize-1;
+}
+
+/*
+** Q: Why all of these copies of t->hmask, t->node etc. to local variables?
+** A: Because alias analysis for C is _really_ tough.
+** Even state-of-the-art C compilers won't produce good code without this.
+*/
+
+/* Clear hash part of table. */
+static LJ_AINLINE void clearhpart(GCtab *t)
+{
+ uint32_t i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ lua_assert(t->hmask != 0);
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ setmref(n->next, NULL);
+ setnilV(&n->key);
+ setnilV(&n->val);
+ }
+}
+
+/* Clear array part of table. */
+static LJ_AINLINE void clearapart(GCtab *t)
+{
+ uint32_t i, asize = t->asize;
+ TValue *array = tvref(t->array);
+ for (i = 0; i < asize; i++)
+ setnilV(&array[i]);
+}
+
+/* Create a new table. Note: the slots are not initialized (yet). */
+static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
+{
+ GCtab *t;
+ /* First try to colocate the array part. */
+ if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) {
+ lua_assert((sizeof(GCtab) & 7) == 0);
+ t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize));
+ t->gct = ~LJ_TTAB;
+ t->nomm = (uint8_t)~0;
+ t->colo = (int8_t)asize;
+ setmref(t->array, (TValue *)((char *)t + sizeof(GCtab)));
+ setgcrefnull(t->metatable);
+ t->asize = asize;
+ t->hmask = 0;
+ setmref(t->node, &G(L)->nilnode);
+ } else { /* Otherwise separately allocate the array part. */
+ t = lj_mem_newobj(L, GCtab);
+ t->gct = ~LJ_TTAB;
+ t->nomm = (uint8_t)~0;
+ t->colo = 0;
+ setmref(t->array, NULL);
+ setgcrefnull(t->metatable);
+ t->asize = 0; /* In case the array allocation fails. */
+ t->hmask = 0;
+ setmref(t->node, &G(L)->nilnode);
+ if (asize > 0) {
+ if (asize > LJ_MAX_ASIZE)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ setmref(t->array, lj_mem_newvec(L, asize, TValue));
+ t->asize = asize;
+ }
+ }
+ if (hbits)
+ newhpart(L, t, hbits);
+ return t;
+}
+
+/* Create a new table.
+**
+** IMPORTANT NOTE: The API differs from lua_createtable()!
+**
+** The array size is non-inclusive. E.g. asize=128 creates array slots
+** for 0..127, but not for 128. If you need slots 1..128, pass asize=129
+** (slot 0 is wasted in this case).
+**
+** The hash size is given in hash bits. hbits=0 means no hash part.
+** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on.
+*/
+GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits)
+{
+ GCtab *t = newtab(L, asize, hbits);
+ clearapart(t);
+ if (t->hmask > 0) clearhpart(t);
+ return t;
+}
+
+#if LJ_HASJIT
+GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize)
+{
+ GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24);
+ clearapart(t);
+ if (t->hmask > 0) clearhpart(t);
+ return t;
+}
+#endif
+
+/* Duplicate a table. */
+GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
+{
+ GCtab *t;
+ uint32_t asize, hmask;
+ t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0);
+ lua_assert(kt->asize == t->asize && kt->hmask == t->hmask);
+ t->nomm = 0; /* Keys with metamethod names may be present. */
+ asize = kt->asize;
+ if (asize > 0) {
+ TValue *array = tvref(t->array);
+ TValue *karray = tvref(kt->array);
+ if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */
+ uint32_t i;
+ for (i = 0; i < asize; i++)
+ copyTV(L, &array[i], &karray[i]);
+ } else {
+ memcpy(array, karray, asize*sizeof(TValue));
+ }
+ }
+ hmask = kt->hmask;
+ if (hmask > 0) {
+ uint32_t i;
+ Node *node = noderef(t->node);
+ Node *knode = noderef(kt->node);
+ ptrdiff_t d = (char *)node - (char *)knode;
+ setmref(node->freetop, (Node *)((char *)noderef(knode->freetop) + d));
+ for (i = 0; i <= hmask; i++) {
+ Node *kn = &knode[i];
+ Node *n = &node[i];
+ Node *next = nextnode(kn);
+ /* Don't use copyTV here, since it asserts on a copy of a dead key. */
+ n->val = kn->val; n->key = kn->key;
+ setmref(n->next, next == NULL? next : (Node *)((char *)next + d));
+ }
+ }
+ return t;
+}
+
+/* Free a table. */
+void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
+{
+ if (t->hmask > 0)
+ lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node);
+ if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
+ lj_mem_freevec(g, tvref(t->array), t->asize, TValue);
+ if (LJ_MAX_COLOSIZE != 0 && t->colo)
+ lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f));
+ else
+ lj_mem_freet(g, t);
+}
+
+/* -- Table resizing ------------------------------------------------------ */
+
+/* Resize a table to fit the new array/hash part sizes. */
+static void resizetab(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
+{
+ Node *oldnode = noderef(t->node);
+ uint32_t oldasize = t->asize;
+ uint32_t oldhmask = t->hmask;
+ if (asize > oldasize) { /* Array part grows? */
+ TValue *array;
+ uint32_t i;
+ if (asize > LJ_MAX_ASIZE)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) {
+ /* A colocated array must be separated and copied. */
+ TValue *oarray = tvref(t->array);
+ array = lj_mem_newvec(L, asize, TValue);
+ t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */
+ for (i = 0; i < oldasize; i++)
+ copyTV(L, &array[i], &oarray[i]);
+ } else {
+ array = (TValue *)lj_mem_realloc(L, tvref(t->array),
+ oldasize*sizeof(TValue), asize*sizeof(TValue));
+ }
+ setmref(t->array, array);
+ t->asize = asize;
+ for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */
+ setnilV(&array[i]);
+ }
+ /* Create new (empty) hash part. */
+ if (hbits) {
+ newhpart(L, t, hbits);
+ clearhpart(t);
+ } else {
+ global_State *g = G(L);
+ setmref(t->node, &g->nilnode);
+ t->hmask = 0;
+ }
+ if (asize < oldasize) { /* Array part shrinks? */
+ TValue *array = tvref(t->array);
+ uint32_t i;
+ t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */
+ for (i = asize; i < oldasize; i++) /* Reinsert old array values. */
+ if (!tvisnil(&array[i]))
+ copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]);
+ /* Physically shrink only separated arrays. */
+ if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
+ setmref(t->array, lj_mem_realloc(L, array,
+ oldasize*sizeof(TValue), asize*sizeof(TValue)));
+ }
+ if (oldhmask > 0) { /* Reinsert pairs from old hash part. */
+ global_State *g;
+ uint32_t i;
+ for (i = 0; i <= oldhmask; i++) {
+ Node *n = &oldnode[i];
+ if (!tvisnil(&n->val))
+ copyTV(L, lj_tab_set(L, t, &n->key), &n->val);
+ }
+ g = G(L);
+ lj_mem_freevec(g, oldnode, oldhmask+1, Node);
+ }
+}
+
+static uint32_t countint(cTValue *key, uint32_t *bins)
+{
+ lua_assert(!tvisint(key));
+ if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) {
+ bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static uint32_t countarray(const GCtab *t, uint32_t *bins)
+{
+ uint32_t na, b, i;
+ if (t->asize == 0) return 0;
+ for (na = i = b = 0; b < LJ_MAX_ABITS; b++) {
+ uint32_t n, top = 2u << b;
+ TValue *array;
+ if (top >= t->asize) {
+ top = t->asize-1;
+ if (i > top)
+ break;
+ }
+ array = tvref(t->array);
+ for (n = 0; i <= top; i++)
+ if (!tvisnil(&array[i]))
+ n++;
+ bins[b] += n;
+ na += n;
+ }
+ return na;
+}
+
+static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray)
+{
+ uint32_t total, na, i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (total = na = 0, i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (!tvisnil(&n->val)) {
+ na += countint(&n->key, bins);
+ total++;
+ }
+ }
+ *narray += na;
+ return total;
+}
+
+static uint32_t bestasize(uint32_t bins[], uint32_t *narray)
+{
+ uint32_t b, sum, na = 0, sz = 0, nn = *narray;
+ for (b = 0, sum = 0; 2*nn > (1u<<b) && sum != nn; b++)
+ if (bins[b] > 0 && 2*(sum += bins[b]) > (1u<<b)) {
+ sz = (2u<<b)+1;
+ na = sum;
+ }
+ *narray = sz;
+ return na;
+}
+
+static void rehashtab(lua_State *L, GCtab *t, cTValue *ek)
+{
+ uint32_t bins[LJ_MAX_ABITS];
+ uint32_t total, asize, na, i;
+ for (i = 0; i < LJ_MAX_ABITS; i++) bins[i] = 0;
+ asize = countarray(t, bins);
+ total = 1 + asize;
+ total += counthash(t, bins, &asize);
+ asize += countint(ek, bins);
+ na = bestasize(bins, &asize);
+ total -= na;
+ resizetab(L, t, asize, hsize2hbits(total));
+}
+
+void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize)
+{
+ resizetab(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0);
+}
+
+/* -- Table getters ------------------------------------------------------- */
+
+cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key)
+{
+ TValue k;
+ Node *n;
+ k.n = (lua_Number)key;
+ n = hashnum(t, &k);
+ do {
+ if (tvisnum(&n->key) && n->key.n == k.n)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return NULL;
+}
+
+cTValue *lj_tab_getstr(GCtab *t, GCstr *key)
+{
+ Node *n = hashstr(t, key);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == key)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return NULL;
+}
+
+cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key)
+{
+ if (tvisstr(key)) {
+ cTValue *tv = lj_tab_getstr(t, strV(key));
+ if (tv)
+ return tv;
+ } else if (tvisint(key)) {
+ cTValue *tv = lj_tab_getint(t, intV(key));
+ if (tv)
+ return tv;
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if (nk == (lua_Number)k) {
+ cTValue *tv = lj_tab_getint(t, k);
+ if (tv)
+ return tv;
+ } else {
+ goto genlookup; /* Else use the generic lookup. */
+ }
+ } else if (!tvisnil(key)) {
+ Node *n;
+ genlookup:
+ n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return &n->val;
+ } while ((n = nextnode(n)));
+ }
+ return niltv(L);
+}
+
+/* -- Table setters ------------------------------------------------------- */
+
+/* Insert new key. Use Brent's variation to optimize the chain length. */
+TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
+{
+ Node *n = hashkey(t, key);
+ if (!tvisnil(&n->val) || t->hmask == 0) {
+ Node *nodebase = noderef(t->node);
+ Node *collide, *freenode = noderef(nodebase->freetop);
+ lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1);
+ do {
+ if (freenode == nodebase) { /* No free node found? */
+ rehashtab(L, t, key); /* Rehash table. */
+ return lj_tab_set(L, t, key); /* Retry key insertion. */
+ }
+ } while (!tvisnil(&(--freenode)->key));
+ setmref(nodebase->freetop, freenode);
+ lua_assert(freenode != &G(L)->nilnode);
+ collide = hashkey(t, &n->key);
+ if (collide != n) { /* Colliding node not the main node? */
+ while (noderef(collide->next) != n) /* Find predecessor. */
+ collide = nextnode(collide);
+ setmref(collide->next, freenode); /* Relink chain. */
+ /* Copy colliding node into free node and free main node. */
+ freenode->val = n->val;
+ freenode->key = n->key;
+ freenode->next = n->next;
+ setmref(n->next, NULL);
+ setnilV(&n->val);
+ /* Rechain pseudo-resurrected string keys with colliding hashes. */
+ while (nextnode(freenode)) {
+ Node *nn = nextnode(freenode);
+ if (tvisstr(&nn->key) && !tvisnil(&nn->val) &&
+ hashstr(t, strV(&nn->key)) == n) {
+ freenode->next = nn->next;
+ nn->next = n->next;
+ setmref(n->next, nn);
+ } else {
+ freenode = nn;
+ }
+ }
+ } else { /* Otherwise use free node. */
+ setmrefr(freenode->next, n->next); /* Insert into chain. */
+ setmref(n->next, freenode);
+ n = freenode;
+ }
+ }
+ n->key.u64 = key->u64;
+ if (LJ_UNLIKELY(tvismzero(&n->key)))
+ n->key.u64 = 0;
+ lj_gc_anybarriert(L, t);
+ lua_assert(tvisnil(&n->val));
+ return &n->val;
+}
+
+TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key)
+{
+ TValue k;
+ Node *n;
+ k.n = (lua_Number)key;
+ n = hashnum(t, &k);
+ do {
+ if (tvisnum(&n->key) && n->key.n == k.n)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return lj_tab_newkey(L, t, &k);
+}
+
+TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key)
+{
+ TValue k;
+ Node *n = hashstr(t, key);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == key)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ setstrV(L, &k, key);
+ return lj_tab_newkey(L, t, &k);
+}
+
+TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key)
+{
+ Node *n;
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ if (tvisstr(key)) {
+ return lj_tab_setstr(L, t, strV(key));
+ } else if (tvisint(key)) {
+ return lj_tab_setint(L, t, intV(key));
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if (nk == (lua_Number)k)
+ return lj_tab_setint(L, t, k);
+ if (tvisnan(key))
+ lj_err_msg(L, LJ_ERR_NANIDX);
+ /* Else use the generic lookup. */
+ } else if (tvisnil(key)) {
+ lj_err_msg(L, LJ_ERR_NILIDX);
+ }
+ n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return lj_tab_newkey(L, t, key);
+}
+
+/* -- Table traversal ----------------------------------------------------- */
+
+/* Get the traversal index of a key. */
+static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key)
+{
+ TValue tmp;
+ if (tvisint(key)) {
+ int32_t k = intV(key);
+ if ((uint32_t)k < t->asize)
+ return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */
+ setnumV(&tmp, (lua_Number)k);
+ key = &tmp;
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if ((uint32_t)k < t->asize && nk == (lua_Number)k)
+ return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */
+ }
+ if (!tvisnil(key)) {
+ Node *n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return t->asize + (uint32_t)(n - noderef(t->node));
+ /* Hash key indexes: [t->asize..t->asize+t->nmask] */
+ } while ((n = nextnode(n)));
+ if (key->u32.hi == 0xfffe7fff) /* ITERN was despecialized while running. */
+ return key->u32.lo - 1;
+ lj_err_msg(L, LJ_ERR_NEXTIDX);
+ return 0; /* unreachable */
+ }
+ return ~0u; /* A nil key starts the traversal. */
+}
+
+/* Advance to the next step in a table traversal. */
+int lj_tab_next(lua_State *L, GCtab *t, TValue *key)
+{
+ uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */
+ for (i++; i < t->asize; i++) /* First traverse the array keys. */
+ if (!tvisnil(arrayslot(t, i))) {
+ setintV(key, i);
+ copyTV(L, key+1, arrayslot(t, i));
+ return 1;
+ }
+ for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */
+ Node *n = &noderef(t->node)[i];
+ if (!tvisnil(&n->val)) {
+ copyTV(L, key, &n->key);
+ copyTV(L, key+1, &n->val);
+ return 1;
+ }
+ }
+ return 0; /* End of traversal. */
+}
+
+/* -- Table length calculation -------------------------------------------- */
+
+static MSize unbound_search(GCtab *t, MSize j)
+{
+ cTValue *tv;
+ MSize i = j; /* i is zero or a present index */
+ j++;
+ /* find `i' and `j' such that i is present and j is not */
+ while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) {
+ i = j;
+ j *= 2;
+ if (j > (MSize)(INT_MAX-2)) { /* overflow? */
+ /* table was built with bad purposes: resort to linear search */
+ i = 1;
+ while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++;
+ return i - 1;
+ }
+ }
+ /* now do a binary search between them */
+ while (j - i > 1) {
+ MSize m = (i+j)/2;
+ cTValue *tvb = lj_tab_getint(t, (int32_t)m);
+ if (tvb && !tvisnil(tvb)) i = m; else j = m;
+ }
+ return i;
+}
+
+/*
+** Try to find a boundary in table `t'. A `boundary' is an integer index
+** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
+*/
+MSize LJ_FASTCALL lj_tab_len(GCtab *t)
+{
+ MSize j = (MSize)t->asize;
+ if (j > 1 && tvisnil(arrayslot(t, j-1))) {
+ MSize i = 1;
+ while (j - i > 1) {
+ MSize m = (i+j)/2;
+ if (tvisnil(arrayslot(t, m-1))) j = m; else i = m;
+ }
+ return i-1;
+ }
+ if (j) j--;
+ if (t->hmask <= 0)
+ return j;
+ return unbound_search(t, j);
+}
+
diff --git a/3rdparty/lua/src/lj_tab.h b/3rdparty/lua/src/lj_tab.h
index 8b1ae70..2787caa 100644
--- a/3rdparty/lua/src/lj_tab.h
+++ b/3rdparty/lua/src/lj_tab.h
@@ -1,70 +1,67 @@
-/*
-** Table handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TAB_H
-#define _LJ_TAB_H
-
-#include "lj_obj.h"
-
-/* Hash constants. Tuned using a brute force search. */
-#define HASH_BIAS (-0x04c11db7)
-#define HASH_ROT1 14
-#define HASH_ROT2 5
-#define HASH_ROT3 13
-
-/* Scramble the bits of numbers and pointers. */
-static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi)
-{
-#if LJ_TARGET_X86ORX64
- /* Prefer variant that compiles well for a 2-operand CPU. */
- lo ^= hi; hi = lj_rol(hi, HASH_ROT1);
- lo -= hi; hi = lj_rol(hi, HASH_ROT2);
- hi ^= lo; hi -= lj_rol(lo, HASH_ROT3);
-#else
- lo ^= hi;
- lo = lo - lj_rol(hi, HASH_ROT1);
- hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2);
- hi = hi - lj_rol(lo, HASH_ROT3);
-#endif
- return hi;
-}
-
-#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0)
-
-LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits);
-#if LJ_HASJIT
-LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize);
-#endif
-LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt);
-LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t);
-#if LJ_HASFFI
-LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t);
-#endif
-LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize);
-
-/* Caveat: all getters except lj_tab_get() can return NULL! */
-
-LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key);
-LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key);
-LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key);
-
-/* Caveat: all setters require a write barrier for the stored value. */
-
-LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key);
-LJ_FUNC TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key);
-LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key);
-LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
-
-#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize)
-#define arrayslot(t, i) (&tvref((t)->array)[(i)])
-#define lj_tab_getint(t, key) \
- (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key)))
-#define lj_tab_setint(L, t, key) \
- (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key)))
-
-LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key);
-LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t);
-
-#endif
+/*
+** Table handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TAB_H
+#define _LJ_TAB_H
+
+#include "lj_obj.h"
+
+/* Hash constants. Tuned using a brute force search. */
+#define HASH_BIAS (-0x04c11db7)
+#define HASH_ROT1 14
+#define HASH_ROT2 5
+#define HASH_ROT3 13
+
+/* Scramble the bits of numbers and pointers. */
+static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi)
+{
+#if LJ_TARGET_X86ORX64
+ /* Prefer variant that compiles well for a 2-operand CPU. */
+ lo ^= hi; hi = lj_rol(hi, HASH_ROT1);
+ lo -= hi; hi = lj_rol(hi, HASH_ROT2);
+ hi ^= lo; hi -= lj_rol(lo, HASH_ROT3);
+#else
+ lo ^= hi;
+ lo = lo - lj_rol(hi, HASH_ROT1);
+ hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2);
+ hi = hi - lj_rol(lo, HASH_ROT3);
+#endif
+ return hi;
+}
+
+#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0)
+
+LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits);
+#if LJ_HASJIT
+LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize);
+#endif
+LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt);
+LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t);
+LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize);
+
+/* Caveat: all getters except lj_tab_get() can return NULL! */
+
+LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key);
+LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key);
+LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key);
+
+/* Caveat: all setters require a write barrier for the stored value. */
+
+LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key);
+LJ_FUNC TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key);
+LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key);
+LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
+
+#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize)
+#define arrayslot(t, i) (&tvref((t)->array)[(i)])
+#define lj_tab_getint(t, key) \
+ (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key)))
+#define lj_tab_setint(L, t, key) \
+ (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key)))
+
+LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key);
+LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t);
+
+#endif
diff --git a/3rdparty/lua/src/lj_target.h b/3rdparty/lua/src/lj_target.h
index 7d09ba7..eed69d1 100644
--- a/3rdparty/lua/src/lj_target.h
+++ b/3rdparty/lua/src/lj_target.h
@@ -1,162 +1,162 @@
-/*
-** Definitions for target CPU.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_H
-#define _LJ_TARGET_H
-
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* -- Registers and spill slots ------------------------------------------- */
-
-/* Register type (uint8_t in ir->r). */
-typedef uint32_t Reg;
-
-/* The hi-bit is NOT set for an allocated register. This means the value
-** can be directly used without masking. The hi-bit is set for a register
-** allocation hint or for RID_INIT, RID_SINK or RID_SUNK.
-*/
-#define RID_NONE 0x80
-#define RID_MASK 0x7f
-#define RID_INIT (RID_NONE|RID_MASK)
-#define RID_SINK (RID_INIT-1)
-#define RID_SUNK (RID_INIT-2)
-
-#define ra_noreg(r) ((r) & RID_NONE)
-#define ra_hasreg(r) (!((r) & RID_NONE))
-
-/* The ra_hashint() macro assumes a previous test for ra_noreg(). */
-#define ra_hashint(r) ((r) < RID_SUNK)
-#define ra_gethint(r) ((Reg)((r) & RID_MASK))
-#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE)
-#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0)
-
-/* Spill slot 0 means no spill slot has been allocated. */
-#define SPS_NONE 0
-
-#define ra_hasspill(s) ((s) != SPS_NONE)
-
-/* Combined register and spill slot (uint16_t in ir->prev). */
-typedef uint32_t RegSP;
-
-#define REGSP(r, s) ((r) + ((s) << 8))
-#define REGSP_HINT(r) ((r)|RID_NONE)
-#define REGSP_INIT REGSP(RID_INIT, 0)
-
-#define regsp_reg(rs) ((rs) & 255)
-#define regsp_spill(rs) ((rs) >> 8)
-#define regsp_used(rs) \
- (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0))
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Bitset for registers. 32 registers suffice for most architectures.
-** Note that one set holds bits for both GPRs and FPRs.
-*/
-#if LJ_TARGET_PPC || LJ_TARGET_MIPS
-typedef uint64_t RegSet;
-#else
-typedef uint32_t RegSet;
-#endif
-
-#define RID2RSET(r) (((RegSet)1) << (r))
-#define RSET_EMPTY ((RegSet)0)
-#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo))
-
-#define rset_test(rs, r) ((int)((rs) >> (r)) & 1)
-#define rset_set(rs, r) (rs |= RID2RSET(r))
-#define rset_clear(rs, r) (rs &= ~RID2RSET(r))
-#define rset_exclude(rs, r) (rs & ~RID2RSET(r))
-#if LJ_TARGET_PPC || LJ_TARGET_MIPS
-#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63))
-#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs))
-#else
-#define rset_picktop(rs) ((Reg)lj_fls(rs))
-#define rset_pickbot(rs) ((Reg)lj_ffs(rs))
-#endif
-
-/* -- Register allocation cost -------------------------------------------- */
-
-/* The register allocation heuristic keeps track of the cost for allocating
-** a specific register:
-**
-** A free register (obviously) has a cost of 0 and a 1-bit in the free mask.
-**
-** An already allocated register has the (non-zero) IR reference in the lowest
-** bits and the result of a blended cost-model in the higher bits.
-**
-** The allocator first checks the free mask for a hit. Otherwise an (unrolled)
-** linear search for the minimum cost is used. The search doesn't need to
-** keep track of the position of the minimum, which makes it very fast.
-** The lowest bits of the minimum cost show the desired IR reference whose
-** register is the one to evict.
-**
-** Without the cost-model this degenerates to the standard heuristics for
-** (reverse) linear-scan register allocation. Since code generation is done
-** in reverse, a live interval extends from the last use to the first def.
-** For an SSA IR the IR reference is the first (and only) def and thus
-** trivially marks the end of the interval. The LSRA heuristics says to pick
-** the register whose live interval has the furthest extent, i.e. the lowest
-** IR reference in our case.
-**
-** A cost-model should take into account other factors, like spill-cost and
-** restore- or rematerialization-cost, which depend on the kind of instruction.
-** E.g. constants have zero spill costs, variant instructions have higher
-** costs than invariants and PHIs should preferably never be spilled.
-**
-** Here's a first cut at simple, but effective blended cost-model for R-LSRA:
-** - Due to careful design of the IR, constants already have lower IR
-** references than invariants and invariants have lower IR references
-** than variants.
-** - The cost in the upper 16 bits is the sum of the IR reference and a
-** weighted score. The score currently only takes into account whether
-** the IRT_ISPHI bit is set in the instruction type.
-** - The PHI weight is the minimum distance (in IR instructions) a PHI
-** reference has to be further apart from a non-PHI reference to be spilled.
-** - It should be a power of two (for speed) and must be between 2 and 32768.
-** Good values for the PHI weight seem to be between 40 and 150.
-** - Further study is required.
-*/
-#define REGCOST_PHI_WEIGHT 64
-
-/* Cost for allocating a specific register. */
-typedef uint32_t RegCost;
-
-/* Note: assumes 16 bit IRRef1. */
-#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16))
-#define regcost_ref(rc) ((IRRef1)(rc))
-
-#define REGCOST_T(t) \
- ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI))
-#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t)))
-
-/* -- Target-specific definitions ----------------------------------------- */
-
-#if LJ_TARGET_X86ORX64
-#include "lj_target_x86.h"
-#elif LJ_TARGET_ARM
-#include "lj_target_arm.h"
-#elif LJ_TARGET_PPC
-#include "lj_target_ppc.h"
-#elif LJ_TARGET_MIPS
-#include "lj_target_mips.h"
-#else
-#error "Missing include for target CPU"
-#endif
-
-#ifdef EXITSTUBS_PER_GROUP
-/* Return the address of an exit stub. */
-static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno)
-{
- lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL);
- return (char *)group[exitno / EXITSTUBS_PER_GROUP] +
- EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
-}
-/* Avoid dependence on lj_jit.h if only including lj_target.h. */
-#define exitstub_addr(J, exitno) \
- ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno)))
-#endif
-
-#endif
+/*
+** Definitions for target CPU.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_H
+#define _LJ_TARGET_H
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Registers and spill slots ------------------------------------------- */
+
+/* Register type (uint8_t in ir->r). */
+typedef uint32_t Reg;
+
+/* The hi-bit is NOT set for an allocated register. This means the value
+** can be directly used without masking. The hi-bit is set for a register
+** allocation hint or for RID_INIT, RID_SINK or RID_SUNK.
+*/
+#define RID_NONE 0x80
+#define RID_MASK 0x7f
+#define RID_INIT (RID_NONE|RID_MASK)
+#define RID_SINK (RID_INIT-1)
+#define RID_SUNK (RID_INIT-2)
+
+#define ra_noreg(r) ((r) & RID_NONE)
+#define ra_hasreg(r) (!((r) & RID_NONE))
+
+/* The ra_hashint() macro assumes a previous test for ra_noreg(). */
+#define ra_hashint(r) ((r) < RID_SUNK)
+#define ra_gethint(r) ((Reg)((r) & RID_MASK))
+#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE)
+#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0)
+
+/* Spill slot 0 means no spill slot has been allocated. */
+#define SPS_NONE 0
+
+#define ra_hasspill(s) ((s) != SPS_NONE)
+
+/* Combined register and spill slot (uint16_t in ir->prev). */
+typedef uint32_t RegSP;
+
+#define REGSP(r, s) ((r) + ((s) << 8))
+#define REGSP_HINT(r) ((r)|RID_NONE)
+#define REGSP_INIT REGSP(RID_INIT, 0)
+
+#define regsp_reg(rs) ((rs) & 255)
+#define regsp_spill(rs) ((rs) >> 8)
+#define regsp_used(rs) \
+ (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0))
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Bitset for registers. 32 registers suffice for most architectures.
+** Note that one set holds bits for both GPRs and FPRs.
+*/
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS
+typedef uint64_t RegSet;
+#else
+typedef uint32_t RegSet;
+#endif
+
+#define RID2RSET(r) (((RegSet)1) << (r))
+#define RSET_EMPTY ((RegSet)0)
+#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo))
+
+#define rset_test(rs, r) ((int)((rs) >> (r)) & 1)
+#define rset_set(rs, r) (rs |= RID2RSET(r))
+#define rset_clear(rs, r) (rs &= ~RID2RSET(r))
+#define rset_exclude(rs, r) (rs & ~RID2RSET(r))
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS
+#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63))
+#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs))
+#else
+#define rset_picktop(rs) ((Reg)lj_fls(rs))
+#define rset_pickbot(rs) ((Reg)lj_ffs(rs))
+#endif
+
+/* -- Register allocation cost -------------------------------------------- */
+
+/* The register allocation heuristic keeps track of the cost for allocating
+** a specific register:
+**
+** A free register (obviously) has a cost of 0 and a 1-bit in the free mask.
+**
+** An already allocated register has the (non-zero) IR reference in the lowest
+** bits and the result of a blended cost-model in the higher bits.
+**
+** The allocator first checks the free mask for a hit. Otherwise an (unrolled)
+** linear search for the minimum cost is used. The search doesn't need to
+** keep track of the position of the minimum, which makes it very fast.
+** The lowest bits of the minimum cost show the desired IR reference whose
+** register is the one to evict.
+**
+** Without the cost-model this degenerates to the standard heuristics for
+** (reverse) linear-scan register allocation. Since code generation is done
+** in reverse, a live interval extends from the last use to the first def.
+** For an SSA IR the IR reference is the first (and only) def and thus
+** trivially marks the end of the interval. The LSRA heuristics says to pick
+** the register whose live interval has the furthest extent, i.e. the lowest
+** IR reference in our case.
+**
+** A cost-model should take into account other factors, like spill-cost and
+** restore- or rematerialization-cost, which depend on the kind of instruction.
+** E.g. constants have zero spill costs, variant instructions have higher
+** costs than invariants and PHIs should preferably never be spilled.
+**
+** Here's a first cut at simple, but effective blended cost-model for R-LSRA:
+** - Due to careful design of the IR, constants already have lower IR
+** references than invariants and invariants have lower IR references
+** than variants.
+** - The cost in the upper 16 bits is the sum of the IR reference and a
+** weighted score. The score currently only takes into account whether
+** the IRT_ISPHI bit is set in the instruction type.
+** - The PHI weight is the minimum distance (in IR instructions) a PHI
+** reference has to be further apart from a non-PHI reference to be spilled.
+** - It should be a power of two (for speed) and must be between 2 and 32768.
+** Good values for the PHI weight seem to be between 40 and 150.
+** - Further study is required.
+*/
+#define REGCOST_PHI_WEIGHT 64
+
+/* Cost for allocating a specific register. */
+typedef uint32_t RegCost;
+
+/* Note: assumes 16 bit IRRef1. */
+#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16))
+#define regcost_ref(rc) ((IRRef1)(rc))
+
+#define REGCOST_T(t) \
+ ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI))
+#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t)))
+
+/* -- Target-specific definitions ----------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_target_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_target_arm.h"
+#elif LJ_TARGET_PPC
+#include "lj_target_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_target_mips.h"
+#else
+#error "Missing include for target CPU"
+#endif
+
+#ifdef EXITSTUBS_PER_GROUP
+/* Return the address of an exit stub. */
+static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno)
+{
+ lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL);
+ return (char *)group[exitno / EXITSTUBS_PER_GROUP] +
+ EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_addr(J, exitno) \
+ ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno)))
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_target_arm.h b/3rdparty/lua/src/lj_target_arm.h
index 2a92063..bec5577 100644
--- a/3rdparty/lua/src/lj_target_arm.h
+++ b/3rdparty/lua/src/lj_target_arm.h
@@ -1,274 +1,274 @@
-/*
-** Definitions for ARM CPUs.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_ARM_H
-#define _LJ_TARGET_ARM_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#define GPRDEF(_) \
- _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
- _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC)
-#if LJ_SOFTFP
-#define FPRDEF(_)
-#else
-#define FPRDEF(_) \
- _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \
- _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15)
-#endif
-#define VRIDDEF(_)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_TMP = RID_LR,
-
- /* Calling conventions. */
- RID_RET = RID_R0,
- RID_RETLO = RID_R0,
- RID_RETHI = RID_R1,
-#if LJ_SOFTFP
- RID_FPRET = RID_R0,
-#else
- RID_FPRET = RID_D0,
-#endif
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_R9, /* Interpreter BASE. */
- RID_LPC = RID_R6, /* Interpreter PC. */
- RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */
- RID_LREG = RID_R8, /* Interpreter L. */
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_R0,
- RID_MAX_GPR = RID_PC+1,
- RID_MIN_FPR = RID_MAX_GPR,
-#if LJ_SOFTFP
- RID_MAX_FPR = RID_MIN_FPR,
-#else
- RID_MAX_FPR = RID_D15+1,
-#endif
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
-};
-
-#define RID_NUM_KREF RID_NUM_GPR
-#define RID_MIN_KREF RID_R0
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except sp, lr and pc. */
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1))
-#define RSET_GPREVEN \
- (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \
- RID2RSET(RID_R8)|RID2RSET(RID_R10))
-#define RSET_GPRODD \
- (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \
- RID2RSET(RID_R9)|RID2RSET(RID_R11))
-#if LJ_SOFTFP
-#define RSET_FPR 0
-#else
-#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
-#endif
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-/* ABI-specific register sets. lr is an implicit scratch register. */
-#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12))
-#ifdef __APPLE__
-#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9))
-#else
-#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_
-#endif
-#if LJ_SOFTFP
-#define RSET_SCRATCH_FPR 0
-#else
-#define RSET_SCRATCH_FPR (RSET_RANGE(RID_D0, RID_D7+1))
-#endif
-#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
-#define REGARG_FIRSTGPR RID_R0
-#define REGARG_LASTGPR RID_R3
-#define REGARG_NUMGPR 4
-#if LJ_ABI_SOFTFP
-#define REGARG_FIRSTFPR 0
-#define REGARG_LASTFPR 0
-#define REGARG_NUMFPR 0
-#else
-#define REGARG_FIRSTFPR RID_D0
-#define REGARG_LASTFPR RID_D7
-#define REGARG_NUMFPR 8
-#endif
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
-*/
-#define SPS_FIXED 2
-#define SPS_FIRST 2
-
-#define SPOFS_TMP 0
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
-#if !LJ_SOFTFP
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
-#endif
- int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* PC after instruction that caused an exit. Used to find the trace number. */
-#define EXITSTATE_PCREG RID_PC
-/* Highest exit + 1 indicates stack check. */
-#define EXITSTATE_CHECKEXIT 1
-
-#define EXITSTUB_SPACING 4
-#define EXITSTUBS_PER_GROUP 32
-
-/* -- Instructions -------------------------------------------------------- */
-
-/* Instruction fields. */
-#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28))
-#define ARMF_N(r) ((r) << 16)
-#define ARMF_D(r) ((r) << 12)
-#define ARMF_S(r) ((r) << 8)
-#define ARMF_M(r) (r)
-#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7))
-#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r))
-
-typedef enum ARMIns {
- ARMI_CCAL = 0xe0000000,
- ARMI_S = 0x000100000,
- ARMI_K12 = 0x02000000,
- ARMI_KNEG = 0x00200000,
- ARMI_LS_W = 0x00200000,
- ARMI_LS_U = 0x00800000,
- ARMI_LS_P = 0x01000000,
- ARMI_LS_R = 0x02000000,
- ARMI_LSX_I = 0x00400000,
-
- ARMI_AND = 0xe0000000,
- ARMI_EOR = 0xe0200000,
- ARMI_SUB = 0xe0400000,
- ARMI_RSB = 0xe0600000,
- ARMI_ADD = 0xe0800000,
- ARMI_ADC = 0xe0a00000,
- ARMI_SBC = 0xe0c00000,
- ARMI_RSC = 0xe0e00000,
- ARMI_TST = 0xe1100000,
- ARMI_TEQ = 0xe1300000,
- ARMI_CMP = 0xe1500000,
- ARMI_CMN = 0xe1700000,
- ARMI_ORR = 0xe1800000,
- ARMI_MOV = 0xe1a00000,
- ARMI_BIC = 0xe1c00000,
- ARMI_MVN = 0xe1e00000,
-
- ARMI_NOP = 0xe1a00000,
-
- ARMI_MUL = 0xe0000090,
- ARMI_SMULL = 0xe0c00090,
-
- ARMI_LDR = 0xe4100000,
- ARMI_LDRB = 0xe4500000,
- ARMI_LDRH = 0xe01000b0,
- ARMI_LDRSB = 0xe01000d0,
- ARMI_LDRSH = 0xe01000f0,
- ARMI_LDRD = 0xe00000d0,
- ARMI_STR = 0xe4000000,
- ARMI_STRB = 0xe4400000,
- ARMI_STRH = 0xe00000b0,
- ARMI_STRD = 0xe00000f0,
- ARMI_PUSH = 0xe92d0000,
-
- ARMI_B = 0xea000000,
- ARMI_BL = 0xeb000000,
- ARMI_BLX = 0xfa000000,
- ARMI_BLXr = 0xe12fff30,
-
- /* ARMv6 */
- ARMI_REV = 0xe6bf0f30,
- ARMI_SXTB = 0xe6af0070,
- ARMI_SXTH = 0xe6bf0070,
- ARMI_UXTB = 0xe6ef0070,
- ARMI_UXTH = 0xe6ff0070,
-
- /* ARMv6T2 */
- ARMI_MOVW = 0xe3000000,
- ARMI_MOVT = 0xe3400000,
-
- /* VFP */
- ARMI_VMOV_D = 0xeeb00b40,
- ARMI_VMOV_S = 0xeeb00a40,
- ARMI_VMOVI_D = 0xeeb00b00,
-
- ARMI_VMOV_R_S = 0xee100a10,
- ARMI_VMOV_S_R = 0xee000a10,
- ARMI_VMOV_RR_D = 0xec500b10,
- ARMI_VMOV_D_RR = 0xec400b10,
-
- ARMI_VADD_D = 0xee300b00,
- ARMI_VSUB_D = 0xee300b40,
- ARMI_VMUL_D = 0xee200b00,
- ARMI_VMLA_D = 0xee000b00,
- ARMI_VMLS_D = 0xee000b40,
- ARMI_VNMLS_D = 0xee100b00,
- ARMI_VDIV_D = 0xee800b00,
-
- ARMI_VABS_D = 0xeeb00bc0,
- ARMI_VNEG_D = 0xeeb10b40,
- ARMI_VSQRT_D = 0xeeb10bc0,
-
- ARMI_VCMP_D = 0xeeb40b40,
- ARMI_VCMPZ_D = 0xeeb50b40,
-
- ARMI_VMRS = 0xeef1fa10,
-
- ARMI_VCVT_S32_F32 = 0xeebd0ac0,
- ARMI_VCVT_S32_F64 = 0xeebd0bc0,
- ARMI_VCVT_U32_F32 = 0xeebc0ac0,
- ARMI_VCVT_U32_F64 = 0xeebc0bc0,
- ARMI_VCVTR_S32_F32 = 0xeebd0a40,
- ARMI_VCVTR_S32_F64 = 0xeebd0b40,
- ARMI_VCVTR_U32_F32 = 0xeebc0a40,
- ARMI_VCVTR_U32_F64 = 0xeebc0b40,
- ARMI_VCVT_F32_S32 = 0xeeb80ac0,
- ARMI_VCVT_F64_S32 = 0xeeb80bc0,
- ARMI_VCVT_F32_U32 = 0xeeb80a40,
- ARMI_VCVT_F64_U32 = 0xeeb80b40,
- ARMI_VCVT_F32_F64 = 0xeeb70bc0,
- ARMI_VCVT_F64_F32 = 0xeeb70ac0,
-
- ARMI_VLDR_S = 0xed100a00,
- ARMI_VLDR_D = 0xed100b00,
- ARMI_VSTR_S = 0xed000a00,
- ARMI_VSTR_D = 0xed000b00,
-} ARMIns;
-
-typedef enum ARMShift {
- ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR
-} ARMShift;
-
-/* ARM condition codes. */
-typedef enum ARMCC {
- CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
- CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
- CC_HS = CC_CS, CC_LO = CC_CC
-} ARMCC;
-
-#endif
+/*
+** Definitions for ARM CPUs.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_ARM_H
+#define _LJ_TARGET_ARM_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC)
+#if LJ_SOFTFP
+#define FPRDEF(_)
+#else
+#define FPRDEF(_) \
+ _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \
+ _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15)
+#endif
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_LR,
+
+ /* Calling conventions. */
+ RID_RET = RID_R0,
+ RID_RETLO = RID_R0,
+ RID_RETHI = RID_R1,
+#if LJ_SOFTFP
+ RID_FPRET = RID_R0,
+#else
+ RID_FPRET = RID_D0,
+#endif
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R9, /* Interpreter BASE. */
+ RID_LPC = RID_R6, /* Interpreter PC. */
+ RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R8, /* Interpreter L. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_PC+1,
+ RID_MIN_FPR = RID_MAX_GPR,
+#if LJ_SOFTFP
+ RID_MAX_FPR = RID_MIN_FPR,
+#else
+ RID_MAX_FPR = RID_D15+1,
+#endif
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except sp, lr and pc. */
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1))
+#define RSET_GPREVEN \
+ (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \
+ RID2RSET(RID_R8)|RID2RSET(RID_R10))
+#define RSET_GPRODD \
+ (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \
+ RID2RSET(RID_R9)|RID2RSET(RID_R11))
+#if LJ_SOFTFP
+#define RSET_FPR 0
+#else
+#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
+#endif
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+/* ABI-specific register sets. lr is an implicit scratch register. */
+#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12))
+#ifdef __APPLE__
+#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9))
+#else
+#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_
+#endif
+#if LJ_SOFTFP
+#define RSET_SCRATCH_FPR 0
+#else
+#define RSET_SCRATCH_FPR (RSET_RANGE(RID_D0, RID_D7+1))
+#endif
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R0
+#define REGARG_LASTGPR RID_R3
+#define REGARG_NUMGPR 4
+#if LJ_ABI_SOFTFP
+#define REGARG_FIRSTFPR 0
+#define REGARG_LASTFPR 0
+#define REGARG_NUMFPR 0
+#else
+#define REGARG_FIRSTFPR RID_D0
+#define REGARG_LASTFPR RID_D7
+#define REGARG_NUMFPR 8
+#endif
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#define SPS_FIXED 2
+#define SPS_FIRST 2
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+#if !LJ_SOFTFP
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+#endif
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* PC after instruction that caused an exit. Used to find the trace number. */
+#define EXITSTATE_PCREG RID_PC
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+#define EXITSTUB_SPACING 4
+#define EXITSTUBS_PER_GROUP 32
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28))
+#define ARMF_N(r) ((r) << 16)
+#define ARMF_D(r) ((r) << 12)
+#define ARMF_S(r) ((r) << 8)
+#define ARMF_M(r) (r)
+#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7))
+#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r))
+
+typedef enum ARMIns {
+ ARMI_CCAL = 0xe0000000,
+ ARMI_S = 0x000100000,
+ ARMI_K12 = 0x02000000,
+ ARMI_KNEG = 0x00200000,
+ ARMI_LS_W = 0x00200000,
+ ARMI_LS_U = 0x00800000,
+ ARMI_LS_P = 0x01000000,
+ ARMI_LS_R = 0x02000000,
+ ARMI_LSX_I = 0x00400000,
+
+ ARMI_AND = 0xe0000000,
+ ARMI_EOR = 0xe0200000,
+ ARMI_SUB = 0xe0400000,
+ ARMI_RSB = 0xe0600000,
+ ARMI_ADD = 0xe0800000,
+ ARMI_ADC = 0xe0a00000,
+ ARMI_SBC = 0xe0c00000,
+ ARMI_RSC = 0xe0e00000,
+ ARMI_TST = 0xe1100000,
+ ARMI_TEQ = 0xe1300000,
+ ARMI_CMP = 0xe1500000,
+ ARMI_CMN = 0xe1700000,
+ ARMI_ORR = 0xe1800000,
+ ARMI_MOV = 0xe1a00000,
+ ARMI_BIC = 0xe1c00000,
+ ARMI_MVN = 0xe1e00000,
+
+ ARMI_NOP = 0xe1a00000,
+
+ ARMI_MUL = 0xe0000090,
+ ARMI_SMULL = 0xe0c00090,
+
+ ARMI_LDR = 0xe4100000,
+ ARMI_LDRB = 0xe4500000,
+ ARMI_LDRH = 0xe01000b0,
+ ARMI_LDRSB = 0xe01000d0,
+ ARMI_LDRSH = 0xe01000f0,
+ ARMI_LDRD = 0xe00000d0,
+ ARMI_STR = 0xe4000000,
+ ARMI_STRB = 0xe4400000,
+ ARMI_STRH = 0xe00000b0,
+ ARMI_STRD = 0xe00000f0,
+ ARMI_PUSH = 0xe92d0000,
+
+ ARMI_B = 0xea000000,
+ ARMI_BL = 0xeb000000,
+ ARMI_BLX = 0xfa000000,
+ ARMI_BLXr = 0xe12fff30,
+
+ /* ARMv6 */
+ ARMI_REV = 0xe6bf0f30,
+ ARMI_SXTB = 0xe6af0070,
+ ARMI_SXTH = 0xe6bf0070,
+ ARMI_UXTB = 0xe6ef0070,
+ ARMI_UXTH = 0xe6ff0070,
+
+ /* ARMv6T2 */
+ ARMI_MOVW = 0xe3000000,
+ ARMI_MOVT = 0xe3400000,
+
+ /* VFP */
+ ARMI_VMOV_D = 0xeeb00b40,
+ ARMI_VMOV_S = 0xeeb00a40,
+ ARMI_VMOVI_D = 0xeeb00b00,
+
+ ARMI_VMOV_R_S = 0xee100a10,
+ ARMI_VMOV_S_R = 0xee000a10,
+ ARMI_VMOV_RR_D = 0xec500b10,
+ ARMI_VMOV_D_RR = 0xec400b10,
+
+ ARMI_VADD_D = 0xee300b00,
+ ARMI_VSUB_D = 0xee300b40,
+ ARMI_VMUL_D = 0xee200b00,
+ ARMI_VMLA_D = 0xee000b00,
+ ARMI_VMLS_D = 0xee000b40,
+ ARMI_VNMLS_D = 0xee100b00,
+ ARMI_VDIV_D = 0xee800b00,
+
+ ARMI_VABS_D = 0xeeb00bc0,
+ ARMI_VNEG_D = 0xeeb10b40,
+ ARMI_VSQRT_D = 0xeeb10bc0,
+
+ ARMI_VCMP_D = 0xeeb40b40,
+ ARMI_VCMPZ_D = 0xeeb50b40,
+
+ ARMI_VMRS = 0xeef1fa10,
+
+ ARMI_VCVT_S32_F32 = 0xeebd0ac0,
+ ARMI_VCVT_S32_F64 = 0xeebd0bc0,
+ ARMI_VCVT_U32_F32 = 0xeebc0ac0,
+ ARMI_VCVT_U32_F64 = 0xeebc0bc0,
+ ARMI_VCVTR_S32_F32 = 0xeebd0a40,
+ ARMI_VCVTR_S32_F64 = 0xeebd0b40,
+ ARMI_VCVTR_U32_F32 = 0xeebc0a40,
+ ARMI_VCVTR_U32_F64 = 0xeebc0b40,
+ ARMI_VCVT_F32_S32 = 0xeeb80ac0,
+ ARMI_VCVT_F64_S32 = 0xeeb80bc0,
+ ARMI_VCVT_F32_U32 = 0xeeb80a40,
+ ARMI_VCVT_F64_U32 = 0xeeb80b40,
+ ARMI_VCVT_F32_F64 = 0xeeb70bc0,
+ ARMI_VCVT_F64_F32 = 0xeeb70ac0,
+
+ ARMI_VLDR_S = 0xed100a00,
+ ARMI_VLDR_D = 0xed100b00,
+ ARMI_VSTR_S = 0xed000a00,
+ ARMI_VSTR_D = 0xed000b00,
+} ARMIns;
+
+typedef enum ARMShift {
+ ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR
+} ARMShift;
+
+/* ARM condition codes. */
+typedef enum ARMCC {
+ CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
+ CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
+ CC_HS = CC_CS, CC_LO = CC_CC
+} ARMCC;
+
+#endif
diff --git a/3rdparty/lua/src/lj_target_mips.h b/3rdparty/lua/src/lj_target_mips.h
index d6e7a16..36f46c8 100644
--- a/3rdparty/lua/src/lj_target_mips.h
+++ b/3rdparty/lua/src/lj_target_mips.h
@@ -1,257 +1,257 @@
-/*
-** Definitions for MIPS CPUs.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_MIPS_H
-#define _LJ_TARGET_MIPS_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#define GPRDEF(_) \
- _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
- _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \
- _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
- _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA)
-#define FPRDEF(_) \
- _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
- _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
- _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
- _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
-#define VRIDDEF(_)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_ZERO = RID_R0,
- RID_TMP = RID_RA,
-
- /* Calling conventions. */
- RID_RET = RID_R2,
-#if LJ_LE
- RID_RETHI = RID_R3,
- RID_RETLO = RID_R2,
-#else
- RID_RETHI = RID_R2,
- RID_RETLO = RID_R3,
-#endif
- RID_FPRET = RID_F0,
- RID_CFUNCADDR = RID_R25,
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_R16, /* Interpreter BASE. */
- RID_LPC = RID_R18, /* Interpreter PC. */
- RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */
- RID_LREG = RID_R20, /* Interpreter L. */
- RID_JGL = RID_R30, /* On-trace: global_State + 32768. */
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_R0,
- RID_MAX_GPR = RID_RA+1,
- RID_MIN_FPR = RID_F0,
- RID_MAX_FPR = RID_F31+1,
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */
-};
-
-#define RID_NUM_KREF RID_NUM_GPR
-#define RID_MIN_KREF RID_R0
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2 and JGL. */
-#define RSET_FIXED \
- (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\
- RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
-#define RSET_FPR \
- (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
- RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
- RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\
- RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30))
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-#define RSET_SCRATCH_GPR \
- (RSET_RANGE(RID_R1, RID_R15+1)|\
- RID2RSET(RID_R24)|RID2RSET(RID_R25)|RID2RSET(RID_R28))
-#define RSET_SCRATCH_FPR \
- (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
- RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
- RID2RSET(RID_F16)|RID2RSET(RID_F18))
-#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
-#define REGARG_FIRSTGPR RID_R4
-#define REGARG_LASTGPR RID_R7
-#define REGARG_NUMGPR 4
-#define REGARG_FIRSTFPR RID_F12
-#define REGARG_LASTFPR RID_F14
-#define REGARG_NUMFPR 2
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use.
-*/
-#define SPS_FIXED 5
-#define SPS_FIRST 4
-
-#define SPOFS_TMP 0
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
- int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* Highest exit + 1 indicates stack check. */
-#define EXITSTATE_CHECKEXIT 1
-
-/* Return the address of a per-trace exit stub. */
-static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p)
-{
- while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */
- return p;
-}
-/* Avoid dependence on lj_jit.h if only including lj_target.h. */
-#define exitstub_trace_addr(T, exitno) \
- exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode))
-
-/* -- Instructions -------------------------------------------------------- */
-
-/* Instruction fields. */
-#define MIPSF_S(r) ((r) << 21)
-#define MIPSF_T(r) ((r) << 16)
-#define MIPSF_D(r) ((r) << 11)
-#define MIPSF_R(r) ((r) << 21)
-#define MIPSF_H(r) ((r) << 16)
-#define MIPSF_G(r) ((r) << 11)
-#define MIPSF_F(r) ((r) << 6)
-#define MIPSF_A(n) ((n) << 6)
-#define MIPSF_M(n) ((n) << 11)
-
-typedef enum MIPSIns {
- /* Integer instructions. */
- MIPSI_MOVE = 0x00000021,
- MIPSI_NOP = 0x00000000,
-
- MIPSI_LI = 0x24000000,
- MIPSI_LU = 0x34000000,
- MIPSI_LUI = 0x3c000000,
-
- MIPSI_ADDIU = 0x24000000,
- MIPSI_ANDI = 0x30000000,
- MIPSI_ORI = 0x34000000,
- MIPSI_XORI = 0x38000000,
- MIPSI_SLTI = 0x28000000,
- MIPSI_SLTIU = 0x2c000000,
-
- MIPSI_ADDU = 0x00000021,
- MIPSI_SUBU = 0x00000023,
- MIPSI_MUL = 0x70000002,
- MIPSI_AND = 0x00000024,
- MIPSI_OR = 0x00000025,
- MIPSI_XOR = 0x00000026,
- MIPSI_NOR = 0x00000027,
- MIPSI_SLT = 0x0000002a,
- MIPSI_SLTU = 0x0000002b,
- MIPSI_MOVZ = 0x0000000a,
- MIPSI_MOVN = 0x0000000b,
-
- MIPSI_SLL = 0x00000000,
- MIPSI_SRL = 0x00000002,
- MIPSI_SRA = 0x00000003,
- MIPSI_ROTR = 0x00200002, /* MIPS32R2 */
- MIPSI_SLLV = 0x00000004,
- MIPSI_SRLV = 0x00000006,
- MIPSI_SRAV = 0x00000007,
- MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */
-
- MIPSI_SEB = 0x7c000420, /* MIPS32R2 */
- MIPSI_SEH = 0x7c000620, /* MIPS32R2 */
- MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */
-
- MIPSI_B = 0x10000000,
- MIPSI_J = 0x08000000,
- MIPSI_JAL = 0x0c000000,
- MIPSI_JR = 0x00000008,
- MIPSI_JALR = 0x0000f809,
-
- MIPSI_BEQ = 0x10000000,
- MIPSI_BNE = 0x14000000,
- MIPSI_BLEZ = 0x18000000,
- MIPSI_BGTZ = 0x1c000000,
- MIPSI_BLTZ = 0x04000000,
- MIPSI_BGEZ = 0x04010000,
-
- /* Load/store instructions. */
- MIPSI_LW = 0x8c000000,
- MIPSI_SW = 0xac000000,
- MIPSI_LB = 0x80000000,
- MIPSI_SB = 0xa0000000,
- MIPSI_LH = 0x84000000,
- MIPSI_SH = 0xa4000000,
- MIPSI_LBU = 0x90000000,
- MIPSI_LHU = 0x94000000,
- MIPSI_LWC1 = 0xc4000000,
- MIPSI_SWC1 = 0xe4000000,
- MIPSI_LDC1 = 0xd4000000,
- MIPSI_SDC1 = 0xf4000000,
-
- /* FP instructions. */
- MIPSI_MOV_S = 0x46000006,
- MIPSI_MOV_D = 0x46200006,
- MIPSI_MOVT_D = 0x46210011,
- MIPSI_MOVF_D = 0x46200011,
-
- MIPSI_ABS_D = 0x46200005,
- MIPSI_NEG_D = 0x46200007,
-
- MIPSI_ADD_D = 0x46200000,
- MIPSI_SUB_D = 0x46200001,
- MIPSI_MUL_D = 0x46200002,
- MIPSI_DIV_D = 0x46200003,
- MIPSI_SQRT_D = 0x46200004,
-
- MIPSI_ADD_S = 0x46000000,
- MIPSI_SUB_S = 0x46000001,
-
- MIPSI_CVT_D_S = 0x46000021,
- MIPSI_CVT_W_S = 0x46000024,
- MIPSI_CVT_S_D = 0x46200020,
- MIPSI_CVT_W_D = 0x46200024,
- MIPSI_CVT_S_W = 0x46800020,
- MIPSI_CVT_D_W = 0x46800021,
-
- MIPSI_TRUNC_W_S = 0x4600000d,
- MIPSI_TRUNC_W_D = 0x4620000d,
- MIPSI_FLOOR_W_S = 0x4600000f,
- MIPSI_FLOOR_W_D = 0x4620000f,
-
- MIPSI_MFC1 = 0x44000000,
- MIPSI_MTC1 = 0x44800000,
-
- MIPSI_BC1F = 0x45000000,
- MIPSI_BC1T = 0x45010000,
-
- MIPSI_C_EQ_D = 0x46200032,
- MIPSI_C_OLT_D = 0x46200034,
- MIPSI_C_ULT_D = 0x46200035,
- MIPSI_C_OLE_D = 0x46200036,
- MIPSI_C_ULE_D = 0x46200037,
-
-} MIPSIns;
-
-#endif
+/*
+** Definitions for MIPS CPUs.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_MIPS_H
+#define _LJ_TARGET_MIPS_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \
+ _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
+ _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA)
+#define FPRDEF(_) \
+ _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
+ _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
+ _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
+ _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_ZERO = RID_R0,
+ RID_TMP = RID_RA,
+
+ /* Calling conventions. */
+ RID_RET = RID_R2,
+#if LJ_LE
+ RID_RETHI = RID_R3,
+ RID_RETLO = RID_R2,
+#else
+ RID_RETHI = RID_R2,
+ RID_RETLO = RID_R3,
+#endif
+ RID_FPRET = RID_F0,
+ RID_CFUNCADDR = RID_R25,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R16, /* Interpreter BASE. */
+ RID_LPC = RID_R18, /* Interpreter PC. */
+ RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R20, /* Interpreter L. */
+ RID_JGL = RID_R30, /* On-trace: global_State + 32768. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_RA+1,
+ RID_MIN_FPR = RID_F0,
+ RID_MAX_FPR = RID_F31+1,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2 and JGL. */
+#define RSET_FIXED \
+ (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\
+ RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#define RSET_FPR \
+ (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
+ RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
+ RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\
+ RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30))
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#define RSET_SCRATCH_GPR \
+ (RSET_RANGE(RID_R1, RID_R15+1)|\
+ RID2RSET(RID_R24)|RID2RSET(RID_R25)|RID2RSET(RID_R28))
+#define RSET_SCRATCH_FPR \
+ (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
+ RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
+ RID2RSET(RID_F16)|RID2RSET(RID_F18))
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R4
+#define REGARG_LASTGPR RID_R7
+#define REGARG_NUMGPR 4
+#define REGARG_FIRSTFPR RID_F12
+#define REGARG_LASTFPR RID_F14
+#define REGARG_NUMFPR 2
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use.
+*/
+#define SPS_FIXED 5
+#define SPS_FIRST 4
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p)
+{
+ while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */
+ return p;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define MIPSF_S(r) ((r) << 21)
+#define MIPSF_T(r) ((r) << 16)
+#define MIPSF_D(r) ((r) << 11)
+#define MIPSF_R(r) ((r) << 21)
+#define MIPSF_H(r) ((r) << 16)
+#define MIPSF_G(r) ((r) << 11)
+#define MIPSF_F(r) ((r) << 6)
+#define MIPSF_A(n) ((n) << 6)
+#define MIPSF_M(n) ((n) << 11)
+
+typedef enum MIPSIns {
+ /* Integer instructions. */
+ MIPSI_MOVE = 0x00000021,
+ MIPSI_NOP = 0x00000000,
+
+ MIPSI_LI = 0x24000000,
+ MIPSI_LU = 0x34000000,
+ MIPSI_LUI = 0x3c000000,
+
+ MIPSI_ADDIU = 0x24000000,
+ MIPSI_ANDI = 0x30000000,
+ MIPSI_ORI = 0x34000000,
+ MIPSI_XORI = 0x38000000,
+ MIPSI_SLTI = 0x28000000,
+ MIPSI_SLTIU = 0x2c000000,
+
+ MIPSI_ADDU = 0x00000021,
+ MIPSI_SUBU = 0x00000023,
+ MIPSI_MUL = 0x70000002,
+ MIPSI_AND = 0x00000024,
+ MIPSI_OR = 0x00000025,
+ MIPSI_XOR = 0x00000026,
+ MIPSI_NOR = 0x00000027,
+ MIPSI_SLT = 0x0000002a,
+ MIPSI_SLTU = 0x0000002b,
+ MIPSI_MOVZ = 0x0000000a,
+ MIPSI_MOVN = 0x0000000b,
+
+ MIPSI_SLL = 0x00000000,
+ MIPSI_SRL = 0x00000002,
+ MIPSI_SRA = 0x00000003,
+ MIPSI_ROTR = 0x00200002, /* MIPS32R2 */
+ MIPSI_SLLV = 0x00000004,
+ MIPSI_SRLV = 0x00000006,
+ MIPSI_SRAV = 0x00000007,
+ MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */
+
+ MIPSI_SEB = 0x7c000420, /* MIPS32R2 */
+ MIPSI_SEH = 0x7c000620, /* MIPS32R2 */
+ MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */
+
+ MIPSI_B = 0x10000000,
+ MIPSI_J = 0x08000000,
+ MIPSI_JAL = 0x0c000000,
+ MIPSI_JR = 0x00000008,
+ MIPSI_JALR = 0x0000f809,
+
+ MIPSI_BEQ = 0x10000000,
+ MIPSI_BNE = 0x14000000,
+ MIPSI_BLEZ = 0x18000000,
+ MIPSI_BGTZ = 0x1c000000,
+ MIPSI_BLTZ = 0x04000000,
+ MIPSI_BGEZ = 0x04010000,
+
+ /* Load/store instructions. */
+ MIPSI_LW = 0x8c000000,
+ MIPSI_SW = 0xac000000,
+ MIPSI_LB = 0x80000000,
+ MIPSI_SB = 0xa0000000,
+ MIPSI_LH = 0x84000000,
+ MIPSI_SH = 0xa4000000,
+ MIPSI_LBU = 0x90000000,
+ MIPSI_LHU = 0x94000000,
+ MIPSI_LWC1 = 0xc4000000,
+ MIPSI_SWC1 = 0xe4000000,
+ MIPSI_LDC1 = 0xd4000000,
+ MIPSI_SDC1 = 0xf4000000,
+
+ /* FP instructions. */
+ MIPSI_MOV_S = 0x46000006,
+ MIPSI_MOV_D = 0x46200006,
+ MIPSI_MOVT_D = 0x46210011,
+ MIPSI_MOVF_D = 0x46200011,
+
+ MIPSI_ABS_D = 0x46200005,
+ MIPSI_NEG_D = 0x46200007,
+
+ MIPSI_ADD_D = 0x46200000,
+ MIPSI_SUB_D = 0x46200001,
+ MIPSI_MUL_D = 0x46200002,
+ MIPSI_DIV_D = 0x46200003,
+ MIPSI_SQRT_D = 0x46200004,
+
+ MIPSI_ADD_S = 0x46000000,
+ MIPSI_SUB_S = 0x46000001,
+
+ MIPSI_CVT_D_S = 0x46000021,
+ MIPSI_CVT_W_S = 0x46000024,
+ MIPSI_CVT_S_D = 0x46200020,
+ MIPSI_CVT_W_D = 0x46200024,
+ MIPSI_CVT_S_W = 0x46800020,
+ MIPSI_CVT_D_W = 0x46800021,
+
+ MIPSI_TRUNC_W_S = 0x4600000d,
+ MIPSI_TRUNC_W_D = 0x4620000d,
+ MIPSI_FLOOR_W_S = 0x4600000f,
+ MIPSI_FLOOR_W_D = 0x4620000f,
+
+ MIPSI_MFC1 = 0x44000000,
+ MIPSI_MTC1 = 0x44800000,
+
+ MIPSI_BC1F = 0x45000000,
+ MIPSI_BC1T = 0x45010000,
+
+ MIPSI_C_EQ_D = 0x46200032,
+ MIPSI_C_OLT_D = 0x46200034,
+ MIPSI_C_ULT_D = 0x46200035,
+ MIPSI_C_OLE_D = 0x46200036,
+ MIPSI_C_ULE_D = 0x46200037,
+
+} MIPSIns;
+
+#endif
diff --git a/3rdparty/lua/src/lj_target_ppc.h b/3rdparty/lua/src/lj_target_ppc.h
index c2189ad..4e95c3a 100644
--- a/3rdparty/lua/src/lj_target_ppc.h
+++ b/3rdparty/lua/src/lj_target_ppc.h
@@ -1,280 +1,280 @@
-/*
-** Definitions for PPC CPUs.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_PPC_H
-#define _LJ_TARGET_PPC_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#define GPRDEF(_) \
- _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \
- _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \
- _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
- _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31)
-#define FPRDEF(_) \
- _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
- _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
- _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
- _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
-#define VRIDDEF(_)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_TMP = RID_R0,
-
- /* Calling conventions. */
- RID_RET = RID_R3,
- RID_RETHI = RID_R3,
- RID_RETLO = RID_R4,
- RID_FPRET = RID_F1,
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_R14, /* Interpreter BASE. */
- RID_LPC = RID_R16, /* Interpreter PC. */
- RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */
- RID_LREG = RID_R18, /* Interpreter L. */
- RID_JGL = RID_R31, /* On-trace: global_State + 32768. */
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_R0,
- RID_MAX_GPR = RID_R31+1,
- RID_MIN_FPR = RID_F0,
- RID_MAX_FPR = RID_F31+1,
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
-};
-
-#define RID_NUM_KREF RID_NUM_GPR
-#define RID_MIN_KREF RID_R0
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */
-#define RSET_FIXED \
- (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\
- RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
-#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1))
-#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1))
-#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
-#define REGARG_FIRSTGPR RID_R3
-#define REGARG_LASTGPR RID_R10
-#define REGARG_NUMGPR 8
-#define REGARG_FIRSTFPR RID_F1
-#define REGARG_LASTFPR RID_F8
-#define REGARG_NUMFPR 8
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use.
-** [sp+12] tmplo word \
-** [sp+ 8] tmphi word / tmp dword, parameter area for callee
-** [sp+ 4] tmpw, LR of callee
-** [sp+ 0] stack chain
-*/
-#define SPS_FIXED 7
-#define SPS_FIRST 4
-
-/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */
-#define SPOFS_TMPW 4
-#define SPOFS_TMP 8
-#define SPOFS_TMPHI 8
-#define SPOFS_TMPLO 12
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
- int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* Highest exit + 1 indicates stack check. */
-#define EXITSTATE_CHECKEXIT 1
-
-/* Return the address of a per-trace exit stub. */
-static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
-{
- while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */
- return p + 3 + exitno;
-}
-/* Avoid dependence on lj_jit.h if only including lj_target.h. */
-#define exitstub_trace_addr(T, exitno) \
- exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
-
-/* -- Instructions -------------------------------------------------------- */
-
-/* Instruction fields. */
-#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22))
-#define PPCF_T(r) ((r) << 21)
-#define PPCF_A(r) ((r) << 16)
-#define PPCF_B(r) ((r) << 11)
-#define PPCF_C(r) ((r) << 6)
-#define PPCF_MB(n) ((n) << 6)
-#define PPCF_ME(n) ((n) << 1)
-#define PPCF_Y 0x00200000
-#define PPCF_DOT 0x00000001
-
-typedef enum PPCIns {
- /* Integer instructions. */
- PPCI_MR = 0x7c000378,
- PPCI_NOP = 0x60000000,
-
- PPCI_LI = 0x38000000,
- PPCI_LIS = 0x3c000000,
-
- PPCI_ADD = 0x7c000214,
- PPCI_ADDC = 0x7c000014,
- PPCI_ADDO = 0x7c000614,
- PPCI_ADDE = 0x7c000114,
- PPCI_ADDZE = 0x7c000194,
- PPCI_ADDME = 0x7c0001d4,
- PPCI_ADDI = 0x38000000,
- PPCI_ADDIS = 0x3c000000,
- PPCI_ADDIC = 0x30000000,
- PPCI_ADDICDOT = 0x34000000,
-
- PPCI_SUBF = 0x7c000050,
- PPCI_SUBFC = 0x7c000010,
- PPCI_SUBFO = 0x7c000450,
- PPCI_SUBFE = 0x7c000110,
- PPCI_SUBFZE = 0x7c000190,
- PPCI_SUBFME = 0x7c0001d0,
- PPCI_SUBFIC = 0x20000000,
-
- PPCI_NEG = 0x7c0000d0,
-
- PPCI_AND = 0x7c000038,
- PPCI_ANDC = 0x7c000078,
- PPCI_NAND = 0x7c0003b8,
- PPCI_ANDIDOT = 0x70000000,
- PPCI_ANDISDOT = 0x74000000,
-
- PPCI_OR = 0x7c000378,
- PPCI_NOR = 0x7c0000f8,
- PPCI_ORI = 0x60000000,
- PPCI_ORIS = 0x64000000,
-
- PPCI_XOR = 0x7c000278,
- PPCI_EQV = 0x7c000238,
- PPCI_XORI = 0x68000000,
- PPCI_XORIS = 0x6c000000,
-
- PPCI_CMPW = 0x7c000000,
- PPCI_CMPLW = 0x7c000040,
- PPCI_CMPWI = 0x2c000000,
- PPCI_CMPLWI = 0x28000000,
-
- PPCI_MULLW = 0x7c0001d6,
- PPCI_MULLI = 0x1c000000,
- PPCI_MULLWO = 0x7c0005d6,
-
- PPCI_EXTSB = 0x7c000774,
- PPCI_EXTSH = 0x7c000734,
-
- PPCI_SLW = 0x7c000030,
- PPCI_SRW = 0x7c000430,
- PPCI_SRAW = 0x7c000630,
- PPCI_SRAWI = 0x7c000670,
-
- PPCI_RLWNM = 0x5c000000,
- PPCI_RLWINM = 0x54000000,
- PPCI_RLWIMI = 0x50000000,
-
- PPCI_B = 0x48000000,
- PPCI_BL = 0x48000001,
- PPCI_BC = 0x40800000,
- PPCI_BCL = 0x40800001,
- PPCI_BCTR = 0x4e800420,
- PPCI_BCTRL = 0x4e800421,
-
- PPCI_CRANDC = 0x4c000102,
- PPCI_CRXOR = 0x4c000182,
- PPCI_CRAND = 0x4c000202,
- PPCI_CREQV = 0x4c000242,
- PPCI_CRORC = 0x4c000342,
- PPCI_CROR = 0x4c000382,
-
- PPCI_MFLR = 0x7c0802a6,
- PPCI_MTCTR = 0x7c0903a6,
-
- PPCI_MCRXR = 0x7c000400,
-
- /* Load/store instructions. */
- PPCI_LWZ = 0x80000000,
- PPCI_LBZ = 0x88000000,
- PPCI_STW = 0x90000000,
- PPCI_STB = 0x98000000,
- PPCI_LHZ = 0xa0000000,
- PPCI_LHA = 0xa8000000,
- PPCI_STH = 0xb0000000,
-
- PPCI_STWU = 0x94000000,
-
- PPCI_LFS = 0xc0000000,
- PPCI_LFD = 0xc8000000,
- PPCI_STFS = 0xd0000000,
- PPCI_STFD = 0xd8000000,
-
- PPCI_LWZX = 0x7c00002e,
- PPCI_LBZX = 0x7c0000ae,
- PPCI_STWX = 0x7c00012e,
- PPCI_STBX = 0x7c0001ae,
- PPCI_LHZX = 0x7c00022e,
- PPCI_LHAX = 0x7c0002ae,
- PPCI_STHX = 0x7c00032e,
-
- PPCI_LWBRX = 0x7c00042c,
- PPCI_STWBRX = 0x7c00052c,
-
- PPCI_LFSX = 0x7c00042e,
- PPCI_LFDX = 0x7c0004ae,
- PPCI_STFSX = 0x7c00052e,
- PPCI_STFDX = 0x7c0005ae,
-
- /* FP instructions. */
- PPCI_FMR = 0xfc000090,
- PPCI_FNEG = 0xfc000050,
- PPCI_FABS = 0xfc000210,
-
- PPCI_FRSP = 0xfc000018,
- PPCI_FCTIWZ = 0xfc00001e,
-
- PPCI_FADD = 0xfc00002a,
- PPCI_FSUB = 0xfc000028,
- PPCI_FMUL = 0xfc000032,
- PPCI_FDIV = 0xfc000024,
- PPCI_FSQRT = 0xfc00002c,
-
- PPCI_FMADD = 0xfc00003a,
- PPCI_FMSUB = 0xfc000038,
- PPCI_FNMSUB = 0xfc00003c,
-
- PPCI_FCMPU = 0xfc000000,
- PPCI_FSEL = 0xfc00002e,
-} PPCIns;
-
-typedef enum PPCCC {
- CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO
-} PPCCC;
-
-#endif
+/*
+** Definitions for PPC CPUs.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_PPC_H
+#define _LJ_TARGET_PPC_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \
+ _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
+ _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31)
+#define FPRDEF(_) \
+ _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
+ _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
+ _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
+ _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_R0,
+
+ /* Calling conventions. */
+ RID_RET = RID_R3,
+ RID_RETHI = RID_R3,
+ RID_RETLO = RID_R4,
+ RID_FPRET = RID_F1,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R14, /* Interpreter BASE. */
+ RID_LPC = RID_R16, /* Interpreter PC. */
+ RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R18, /* Interpreter L. */
+ RID_JGL = RID_R31, /* On-trace: global_State + 32768. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_R31+1,
+ RID_MIN_FPR = RID_F0,
+ RID_MAX_FPR = RID_F31+1,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */
+#define RSET_FIXED \
+ (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\
+ RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1))
+#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1))
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R3
+#define REGARG_LASTGPR RID_R10
+#define REGARG_NUMGPR 8
+#define REGARG_FIRSTFPR RID_F1
+#define REGARG_LASTFPR RID_F8
+#define REGARG_NUMFPR 8
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use.
+** [sp+12] tmplo word \
+** [sp+ 8] tmphi word / tmp dword, parameter area for callee
+** [sp+ 4] tmpw, LR of callee
+** [sp+ 0] stack chain
+*/
+#define SPS_FIXED 7
+#define SPS_FIRST 4
+
+/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */
+#define SPOFS_TMPW 4
+#define SPOFS_TMP 8
+#define SPOFS_TMPHI 8
+#define SPOFS_TMPLO 12
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
+{
+ while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */
+ return p + 3 + exitno;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22))
+#define PPCF_T(r) ((r) << 21)
+#define PPCF_A(r) ((r) << 16)
+#define PPCF_B(r) ((r) << 11)
+#define PPCF_C(r) ((r) << 6)
+#define PPCF_MB(n) ((n) << 6)
+#define PPCF_ME(n) ((n) << 1)
+#define PPCF_Y 0x00200000
+#define PPCF_DOT 0x00000001
+
+typedef enum PPCIns {
+ /* Integer instructions. */
+ PPCI_MR = 0x7c000378,
+ PPCI_NOP = 0x60000000,
+
+ PPCI_LI = 0x38000000,
+ PPCI_LIS = 0x3c000000,
+
+ PPCI_ADD = 0x7c000214,
+ PPCI_ADDC = 0x7c000014,
+ PPCI_ADDO = 0x7c000614,
+ PPCI_ADDE = 0x7c000114,
+ PPCI_ADDZE = 0x7c000194,
+ PPCI_ADDME = 0x7c0001d4,
+ PPCI_ADDI = 0x38000000,
+ PPCI_ADDIS = 0x3c000000,
+ PPCI_ADDIC = 0x30000000,
+ PPCI_ADDICDOT = 0x34000000,
+
+ PPCI_SUBF = 0x7c000050,
+ PPCI_SUBFC = 0x7c000010,
+ PPCI_SUBFO = 0x7c000450,
+ PPCI_SUBFE = 0x7c000110,
+ PPCI_SUBFZE = 0x7c000190,
+ PPCI_SUBFME = 0x7c0001d0,
+ PPCI_SUBFIC = 0x20000000,
+
+ PPCI_NEG = 0x7c0000d0,
+
+ PPCI_AND = 0x7c000038,
+ PPCI_ANDC = 0x7c000078,
+ PPCI_NAND = 0x7c0003b8,
+ PPCI_ANDIDOT = 0x70000000,
+ PPCI_ANDISDOT = 0x74000000,
+
+ PPCI_OR = 0x7c000378,
+ PPCI_NOR = 0x7c0000f8,
+ PPCI_ORI = 0x60000000,
+ PPCI_ORIS = 0x64000000,
+
+ PPCI_XOR = 0x7c000278,
+ PPCI_EQV = 0x7c000238,
+ PPCI_XORI = 0x68000000,
+ PPCI_XORIS = 0x6c000000,
+
+ PPCI_CMPW = 0x7c000000,
+ PPCI_CMPLW = 0x7c000040,
+ PPCI_CMPWI = 0x2c000000,
+ PPCI_CMPLWI = 0x28000000,
+
+ PPCI_MULLW = 0x7c0001d6,
+ PPCI_MULLI = 0x1c000000,
+ PPCI_MULLWO = 0x7c0005d6,
+
+ PPCI_EXTSB = 0x7c000774,
+ PPCI_EXTSH = 0x7c000734,
+
+ PPCI_SLW = 0x7c000030,
+ PPCI_SRW = 0x7c000430,
+ PPCI_SRAW = 0x7c000630,
+ PPCI_SRAWI = 0x7c000670,
+
+ PPCI_RLWNM = 0x5c000000,
+ PPCI_RLWINM = 0x54000000,
+ PPCI_RLWIMI = 0x50000000,
+
+ PPCI_B = 0x48000000,
+ PPCI_BL = 0x48000001,
+ PPCI_BC = 0x40800000,
+ PPCI_BCL = 0x40800001,
+ PPCI_BCTR = 0x4e800420,
+ PPCI_BCTRL = 0x4e800421,
+
+ PPCI_CRANDC = 0x4c000102,
+ PPCI_CRXOR = 0x4c000182,
+ PPCI_CRAND = 0x4c000202,
+ PPCI_CREQV = 0x4c000242,
+ PPCI_CRORC = 0x4c000342,
+ PPCI_CROR = 0x4c000382,
+
+ PPCI_MFLR = 0x7c0802a6,
+ PPCI_MTCTR = 0x7c0903a6,
+
+ PPCI_MCRXR = 0x7c000400,
+
+ /* Load/store instructions. */
+ PPCI_LWZ = 0x80000000,
+ PPCI_LBZ = 0x88000000,
+ PPCI_STW = 0x90000000,
+ PPCI_STB = 0x98000000,
+ PPCI_LHZ = 0xa0000000,
+ PPCI_LHA = 0xa8000000,
+ PPCI_STH = 0xb0000000,
+
+ PPCI_STWU = 0x94000000,
+
+ PPCI_LFS = 0xc0000000,
+ PPCI_LFD = 0xc8000000,
+ PPCI_STFS = 0xd0000000,
+ PPCI_STFD = 0xd8000000,
+
+ PPCI_LWZX = 0x7c00002e,
+ PPCI_LBZX = 0x7c0000ae,
+ PPCI_STWX = 0x7c00012e,
+ PPCI_STBX = 0x7c0001ae,
+ PPCI_LHZX = 0x7c00022e,
+ PPCI_LHAX = 0x7c0002ae,
+ PPCI_STHX = 0x7c00032e,
+
+ PPCI_LWBRX = 0x7c00042c,
+ PPCI_STWBRX = 0x7c00052c,
+
+ PPCI_LFSX = 0x7c00042e,
+ PPCI_LFDX = 0x7c0004ae,
+ PPCI_STFSX = 0x7c00052e,
+ PPCI_STFDX = 0x7c0005ae,
+
+ /* FP instructions. */
+ PPCI_FMR = 0xfc000090,
+ PPCI_FNEG = 0xfc000050,
+ PPCI_FABS = 0xfc000210,
+
+ PPCI_FRSP = 0xfc000018,
+ PPCI_FCTIWZ = 0xfc00001e,
+
+ PPCI_FADD = 0xfc00002a,
+ PPCI_FSUB = 0xfc000028,
+ PPCI_FMUL = 0xfc000032,
+ PPCI_FDIV = 0xfc000024,
+ PPCI_FSQRT = 0xfc00002c,
+
+ PPCI_FMADD = 0xfc00003a,
+ PPCI_FMSUB = 0xfc000038,
+ PPCI_FNMSUB = 0xfc00003c,
+
+ PPCI_FCMPU = 0xfc000000,
+ PPCI_FSEL = 0xfc00002e,
+} PPCIns;
+
+typedef enum PPCCC {
+ CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO
+} PPCCC;
+
+#endif
diff --git a/3rdparty/lua/src/lj_target_x86.h b/3rdparty/lua/src/lj_target_x86.h
index f129186..84b0871 100644
--- a/3rdparty/lua/src/lj_target_x86.h
+++ b/3rdparty/lua/src/lj_target_x86.h
@@ -1,342 +1,342 @@
-/*
-** Definitions for x86 and x64 CPUs.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_X86_H
-#define _LJ_TARGET_X86_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#if LJ_64
-#define GPRDEF(_) \
- _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \
- _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D)
-#define FPRDEF(_) \
- _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \
- _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15)
-#else
-#define GPRDEF(_) \
- _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI)
-#define FPRDEF(_) \
- _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7)
-#endif
-#define VRIDDEF(_) \
- _(MRM)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */
-
- /* Calling conventions. */
- RID_RET = RID_EAX,
-#if LJ_64
- RID_FPRET = RID_XMM0,
-#else
- RID_RETLO = RID_EAX,
- RID_RETHI = RID_EDX,
-#endif
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_EDX, /* Interpreter BASE. */
-#if LJ_64 && !LJ_ABI_WIN
- RID_LPC = RID_EBX, /* Interpreter PC. */
- RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */
-#else
- RID_LPC = RID_ESI, /* Interpreter PC. */
- RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */
-#endif
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_EAX,
- RID_MIN_FPR = RID_XMM0,
- RID_MAX_GPR = RID_MIN_FPR,
- RID_MAX_FPR = RID_MAX,
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR,
-};
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except the stack pointer. */
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP))
-#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-#if LJ_64
-/* Note: this requires the use of FORCE_REX! */
-#define RSET_GPR8 RSET_GPR
-#else
-#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1))
-#endif
-
-/* ABI-specific register sets. */
-#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX))
-#if LJ_64
-#if LJ_ABI_WIN
-/* Windows x64 ABI. */
-#define RSET_SCRATCH \
- (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1))
-#define REGARG_GPRS \
- (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5))
-#define REGARG_NUMGPR 4
-#define REGARG_NUMFPR 4
-#define REGARG_FIRSTFPR RID_XMM0
-#define REGARG_LASTFPR RID_XMM3
-#define STACKARG_OFS (4*8)
-#else
-/* The rest of the civilized x64 world has a common ABI. */
-#define RSET_SCRATCH \
- (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR)
-#define REGARG_GPRS \
- (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \
- <<5))<<5))<<5))<<5))<<5))
-#define REGARG_NUMGPR 6
-#define REGARG_NUMFPR 8
-#define REGARG_FIRSTFPR RID_XMM0
-#define REGARG_LASTFPR RID_XMM7
-#define STACKARG_OFS 0
-#endif
-#else
-/* Common x86 ABI. */
-#define RSET_SCRATCH (RSET_ACD|RSET_FPR)
-#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */
-#define REGARG_NUMGPR 2 /* Fastcall only. */
-#define REGARG_NUMFPR 0
-#define STACKARG_OFS 0
-#endif
-
-#if LJ_64
-/* Prefer the low 8 regs of each type to reduce REX prefixes. */
-#undef rset_picktop
-#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18)
-#endif
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
-*/
-#if LJ_64
-#if LJ_ABI_WIN
-#define SPS_FIXED (4*2)
-#define SPS_FIRST (4*2) /* Don't use callee register save area. */
-#else
-#define SPS_FIXED 4
-#define SPS_FIRST 2
-#endif
-#else
-#define SPS_FIXED 6
-#define SPS_FIRST 2
-#endif
-
-#define SPOFS_TMP 0
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
- intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
-#define EXITSTUB_SPACING (2+2)
-#define EXITSTUBS_PER_GROUP 32
-
-/* -- x86 ModRM operand encoding ------------------------------------------ */
-
-typedef enum {
- XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0,
- XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0,
- XM_MASK = 0xc0
-} x86Mode;
-
-/* Structure to hold variable ModRM operand. */
-typedef struct {
- int32_t ofs; /* Offset. */
- uint8_t base; /* Base register or RID_NONE. */
- uint8_t idx; /* Index register or RID_NONE. */
- uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */
-} x86ModRM;
-
-/* -- Opcodes ------------------------------------------------------------- */
-
-/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */
-#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24)))
-#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24)))
-#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24)))
-#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24)))
-#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24)))
-#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24)))
-#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24)))
-
-/* This list of x86 opcodes is not intended to be complete. Opcodes are only
-** included when needed. Take a look at DynASM or jit.dis_x86 to see the
-** whole mess.
-*/
-typedef enum {
- /* Fixed length opcodes. XI_* prefix. */
- XI_NOP = 0x90,
- XI_XCHGa = 0x90,
- XI_CALL = 0xe8,
- XI_JMP = 0xe9,
- XI_JMPs = 0xeb,
- XI_PUSH = 0x50, /* Really 50+r. */
- XI_JCCs = 0x70, /* Really 7x. */
- XI_JCCn = 0x80, /* Really 0f8x. */
- XI_LEA = 0x8d,
- XI_MOVrib = 0xb0, /* Really b0+r. */
- XI_MOVri = 0xb8, /* Really b8+r. */
- XI_ARITHib = 0x80,
- XI_ARITHi = 0x81,
- XI_ARITHi8 = 0x83,
- XI_PUSHi8 = 0x6a,
- XI_TESTb = 0x84,
- XI_TEST = 0x85,
- XI_MOVmi = 0xc7,
- XI_GROUP5 = 0xff,
-
- /* Note: little-endian byte-order! */
- XI_FLDZ = 0xeed9,
- XI_FLD1 = 0xe8d9,
- XI_FLDLG2 = 0xecd9,
- XI_FLDLN2 = 0xedd9,
- XI_FDUP = 0xc0d9, /* Really fld st0. */
- XI_FPOP = 0xd8dd, /* Really fstp st0. */
- XI_FPOP1 = 0xd9dd, /* Really fstp st1. */
- XI_FRNDINT = 0xfcd9,
- XI_FSIN = 0xfed9,
- XI_FCOS = 0xffd9,
- XI_FPTAN = 0xf2d9,
- XI_FPATAN = 0xf3d9,
- XI_FSCALE = 0xfdd9,
- XI_FYL2X = 0xf1d9,
-
- /* Variable-length opcodes. XO_* prefix. */
- XO_MOV = XO_(8b),
- XO_MOVto = XO_(89),
- XO_MOVtow = XO_66(89),
- XO_MOVtob = XO_(88),
- XO_MOVmi = XO_(c7),
- XO_MOVmib = XO_(c6),
- XO_LEA = XO_(8d),
- XO_ARITHib = XO_(80),
- XO_ARITHi = XO_(81),
- XO_ARITHi8 = XO_(83),
- XO_ARITHiw8 = XO_66(83),
- XO_SHIFTi = XO_(c1),
- XO_SHIFT1 = XO_(d1),
- XO_SHIFTcl = XO_(d3),
- XO_IMUL = XO_0f(af),
- XO_IMULi = XO_(69),
- XO_IMULi8 = XO_(6b),
- XO_CMP = XO_(3b),
- XO_TESTb = XO_(84),
- XO_TEST = XO_(85),
- XO_GROUP3b = XO_(f6),
- XO_GROUP3 = XO_(f7),
- XO_GROUP5b = XO_(fe),
- XO_GROUP5 = XO_(ff),
- XO_MOVZXb = XO_0f(b6),
- XO_MOVZXw = XO_0f(b7),
- XO_MOVSXb = XO_0f(be),
- XO_MOVSXw = XO_0f(bf),
- XO_MOVSXd = XO_(63),
- XO_BSWAP = XO_0f(c8),
- XO_CMOV = XO_0f(40),
-
- XO_MOVSD = XO_f20f(10),
- XO_MOVSDto = XO_f20f(11),
- XO_MOVSS = XO_f30f(10),
- XO_MOVSSto = XO_f30f(11),
- XO_MOVLPD = XO_660f(12),
- XO_MOVAPS = XO_0f(28),
- XO_XORPS = XO_0f(57),
- XO_ANDPS = XO_0f(54),
- XO_ADDSD = XO_f20f(58),
- XO_SUBSD = XO_f20f(5c),
- XO_MULSD = XO_f20f(59),
- XO_DIVSD = XO_f20f(5e),
- XO_SQRTSD = XO_f20f(51),
- XO_MINSD = XO_f20f(5d),
- XO_MAXSD = XO_f20f(5f),
- XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */
- XO_UCOMISD = XO_660f(2e),
- XO_CVTSI2SD = XO_f20f(2a),
- XO_CVTSD2SI = XO_f20f(2d),
- XO_CVTTSD2SI= XO_f20f(2c),
- XO_CVTSI2SS = XO_f30f(2a),
- XO_CVTSS2SI = XO_f30f(2d),
- XO_CVTTSS2SI= XO_f30f(2c),
- XO_CVTSS2SD = XO_f30f(5a),
- XO_CVTSD2SS = XO_f20f(5a),
- XO_ADDSS = XO_f30f(58),
- XO_MOVD = XO_660f(6e),
- XO_MOVDto = XO_660f(7e),
-
- XO_FLDd = XO_(d9), XOg_FLDd = 0,
- XO_FLDq = XO_(dd), XOg_FLDq = 0,
- XO_FILDd = XO_(db), XOg_FILDd = 0,
- XO_FILDq = XO_(df), XOg_FILDq = 5,
- XO_FSTPd = XO_(d9), XOg_FSTPd = 3,
- XO_FSTPq = XO_(dd), XOg_FSTPq = 3,
- XO_FISTPq = XO_(df), XOg_FISTPq = 7,
- XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1,
- XO_FADDq = XO_(dc), XOg_FADDq = 0,
- XO_FLDCW = XO_(d9), XOg_FLDCW = 5,
- XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7
-} x86Op;
-
-/* x86 opcode groups. */
-typedef uint32_t x86Group;
-
-#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g)))
-#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g)
-#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000)))
-#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000)))
-
-#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27)))
-#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27)))
-
-typedef enum {
- XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP,
- XOg_X_IMUL
-} x86Arith;
-
-typedef enum {
- XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR
-} x86Shift;
-
-typedef enum {
- XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV
-} x86Group3;
-
-typedef enum {
- XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH
-} x86Group5;
-
-/* x86 condition codes. */
-typedef enum {
- CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE,
- CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE,
- CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB,
- CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE,
- CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL,
- CC_NG = CC_LE, CC_G = CC_NLE
-} x86CC;
-
-#endif
+/*
+** Definitions for x86 and x64 CPUs.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_X86_H
+#define _LJ_TARGET_X86_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#if LJ_64
+#define GPRDEF(_) \
+ _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \
+ _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D)
+#define FPRDEF(_) \
+ _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \
+ _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15)
+#else
+#define GPRDEF(_) \
+ _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI)
+#define FPRDEF(_) \
+ _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7)
+#endif
+#define VRIDDEF(_) \
+ _(MRM)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */
+
+ /* Calling conventions. */
+ RID_RET = RID_EAX,
+#if LJ_64
+ RID_FPRET = RID_XMM0,
+#else
+ RID_RETLO = RID_EAX,
+ RID_RETHI = RID_EDX,
+#endif
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_EDX, /* Interpreter BASE. */
+#if LJ_64 && !LJ_ABI_WIN
+ RID_LPC = RID_EBX, /* Interpreter PC. */
+ RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */
+#else
+ RID_LPC = RID_ESI, /* Interpreter PC. */
+ RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */
+#endif
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_EAX,
+ RID_MIN_FPR = RID_XMM0,
+ RID_MAX_GPR = RID_MIN_FPR,
+ RID_MAX_FPR = RID_MAX,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR,
+};
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except the stack pointer. */
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP))
+#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#if LJ_64
+/* Note: this requires the use of FORCE_REX! */
+#define RSET_GPR8 RSET_GPR
+#else
+#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1))
+#endif
+
+/* ABI-specific register sets. */
+#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX))
+#if LJ_64
+#if LJ_ABI_WIN
+/* Windows x64 ABI. */
+#define RSET_SCRATCH \
+ (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1))
+#define REGARG_GPRS \
+ (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5))
+#define REGARG_NUMGPR 4
+#define REGARG_NUMFPR 4
+#define REGARG_FIRSTFPR RID_XMM0
+#define REGARG_LASTFPR RID_XMM3
+#define STACKARG_OFS (4*8)
+#else
+/* The rest of the civilized x64 world has a common ABI. */
+#define RSET_SCRATCH \
+ (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR)
+#define REGARG_GPRS \
+ (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \
+ <<5))<<5))<<5))<<5))<<5))
+#define REGARG_NUMGPR 6
+#define REGARG_NUMFPR 8
+#define REGARG_FIRSTFPR RID_XMM0
+#define REGARG_LASTFPR RID_XMM7
+#define STACKARG_OFS 0
+#endif
+#else
+/* Common x86 ABI. */
+#define RSET_SCRATCH (RSET_ACD|RSET_FPR)
+#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */
+#define REGARG_NUMGPR 2 /* Fastcall only. */
+#define REGARG_NUMFPR 0
+#define STACKARG_OFS 0
+#endif
+
+#if LJ_64
+/* Prefer the low 8 regs of each type to reduce REX prefixes. */
+#undef rset_picktop
+#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18)
+#endif
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#if LJ_64
+#if LJ_ABI_WIN
+#define SPS_FIXED (4*2)
+#define SPS_FIRST (4*2) /* Don't use callee register save area. */
+#else
+#define SPS_FIXED 4
+#define SPS_FIRST 2
+#endif
+#else
+#define SPS_FIXED 6
+#define SPS_FIRST 2
+#endif
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
+#define EXITSTUB_SPACING (2+2)
+#define EXITSTUBS_PER_GROUP 32
+
+/* -- x86 ModRM operand encoding ------------------------------------------ */
+
+typedef enum {
+ XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0,
+ XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0,
+ XM_MASK = 0xc0
+} x86Mode;
+
+/* Structure to hold variable ModRM operand. */
+typedef struct {
+ int32_t ofs; /* Offset. */
+ uint8_t base; /* Base register or RID_NONE. */
+ uint8_t idx; /* Index register or RID_NONE. */
+ uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */
+} x86ModRM;
+
+/* -- Opcodes ------------------------------------------------------------- */
+
+/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */
+#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24)))
+#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24)))
+#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24)))
+#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24)))
+#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24)))
+#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24)))
+#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24)))
+
+/* This list of x86 opcodes is not intended to be complete. Opcodes are only
+** included when needed. Take a look at DynASM or jit.dis_x86 to see the
+** whole mess.
+*/
+typedef enum {
+ /* Fixed length opcodes. XI_* prefix. */
+ XI_NOP = 0x90,
+ XI_XCHGa = 0x90,
+ XI_CALL = 0xe8,
+ XI_JMP = 0xe9,
+ XI_JMPs = 0xeb,
+ XI_PUSH = 0x50, /* Really 50+r. */
+ XI_JCCs = 0x70, /* Really 7x. */
+ XI_JCCn = 0x80, /* Really 0f8x. */
+ XI_LEA = 0x8d,
+ XI_MOVrib = 0xb0, /* Really b0+r. */
+ XI_MOVri = 0xb8, /* Really b8+r. */
+ XI_ARITHib = 0x80,
+ XI_ARITHi = 0x81,
+ XI_ARITHi8 = 0x83,
+ XI_PUSHi8 = 0x6a,
+ XI_TESTb = 0x84,
+ XI_TEST = 0x85,
+ XI_MOVmi = 0xc7,
+ XI_GROUP5 = 0xff,
+
+ /* Note: little-endian byte-order! */
+ XI_FLDZ = 0xeed9,
+ XI_FLD1 = 0xe8d9,
+ XI_FLDLG2 = 0xecd9,
+ XI_FLDLN2 = 0xedd9,
+ XI_FDUP = 0xc0d9, /* Really fld st0. */
+ XI_FPOP = 0xd8dd, /* Really fstp st0. */
+ XI_FPOP1 = 0xd9dd, /* Really fstp st1. */
+ XI_FRNDINT = 0xfcd9,
+ XI_FSIN = 0xfed9,
+ XI_FCOS = 0xffd9,
+ XI_FPTAN = 0xf2d9,
+ XI_FPATAN = 0xf3d9,
+ XI_FSCALE = 0xfdd9,
+ XI_FYL2X = 0xf1d9,
+
+ /* Variable-length opcodes. XO_* prefix. */
+ XO_MOV = XO_(8b),
+ XO_MOVto = XO_(89),
+ XO_MOVtow = XO_66(89),
+ XO_MOVtob = XO_(88),
+ XO_MOVmi = XO_(c7),
+ XO_MOVmib = XO_(c6),
+ XO_LEA = XO_(8d),
+ XO_ARITHib = XO_(80),
+ XO_ARITHi = XO_(81),
+ XO_ARITHi8 = XO_(83),
+ XO_ARITHiw8 = XO_66(83),
+ XO_SHIFTi = XO_(c1),
+ XO_SHIFT1 = XO_(d1),
+ XO_SHIFTcl = XO_(d3),
+ XO_IMUL = XO_0f(af),
+ XO_IMULi = XO_(69),
+ XO_IMULi8 = XO_(6b),
+ XO_CMP = XO_(3b),
+ XO_TESTb = XO_(84),
+ XO_TEST = XO_(85),
+ XO_GROUP3b = XO_(f6),
+ XO_GROUP3 = XO_(f7),
+ XO_GROUP5b = XO_(fe),
+ XO_GROUP5 = XO_(ff),
+ XO_MOVZXb = XO_0f(b6),
+ XO_MOVZXw = XO_0f(b7),
+ XO_MOVSXb = XO_0f(be),
+ XO_MOVSXw = XO_0f(bf),
+ XO_MOVSXd = XO_(63),
+ XO_BSWAP = XO_0f(c8),
+ XO_CMOV = XO_0f(40),
+
+ XO_MOVSD = XO_f20f(10),
+ XO_MOVSDto = XO_f20f(11),
+ XO_MOVSS = XO_f30f(10),
+ XO_MOVSSto = XO_f30f(11),
+ XO_MOVLPD = XO_660f(12),
+ XO_MOVAPS = XO_0f(28),
+ XO_XORPS = XO_0f(57),
+ XO_ANDPS = XO_0f(54),
+ XO_ADDSD = XO_f20f(58),
+ XO_SUBSD = XO_f20f(5c),
+ XO_MULSD = XO_f20f(59),
+ XO_DIVSD = XO_f20f(5e),
+ XO_SQRTSD = XO_f20f(51),
+ XO_MINSD = XO_f20f(5d),
+ XO_MAXSD = XO_f20f(5f),
+ XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */
+ XO_UCOMISD = XO_660f(2e),
+ XO_CVTSI2SD = XO_f20f(2a),
+ XO_CVTSD2SI = XO_f20f(2d),
+ XO_CVTTSD2SI= XO_f20f(2c),
+ XO_CVTSI2SS = XO_f30f(2a),
+ XO_CVTSS2SI = XO_f30f(2d),
+ XO_CVTTSS2SI= XO_f30f(2c),
+ XO_CVTSS2SD = XO_f30f(5a),
+ XO_CVTSD2SS = XO_f20f(5a),
+ XO_ADDSS = XO_f30f(58),
+ XO_MOVD = XO_660f(6e),
+ XO_MOVDto = XO_660f(7e),
+
+ XO_FLDd = XO_(d9), XOg_FLDd = 0,
+ XO_FLDq = XO_(dd), XOg_FLDq = 0,
+ XO_FILDd = XO_(db), XOg_FILDd = 0,
+ XO_FILDq = XO_(df), XOg_FILDq = 5,
+ XO_FSTPd = XO_(d9), XOg_FSTPd = 3,
+ XO_FSTPq = XO_(dd), XOg_FSTPq = 3,
+ XO_FISTPq = XO_(df), XOg_FISTPq = 7,
+ XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1,
+ XO_FADDq = XO_(dc), XOg_FADDq = 0,
+ XO_FLDCW = XO_(d9), XOg_FLDCW = 5,
+ XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7
+} x86Op;
+
+/* x86 opcode groups. */
+typedef uint32_t x86Group;
+
+#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g)))
+#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g)
+#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000)))
+#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000)))
+
+#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27)))
+#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27)))
+
+typedef enum {
+ XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP,
+ XOg_X_IMUL
+} x86Arith;
+
+typedef enum {
+ XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR
+} x86Shift;
+
+typedef enum {
+ XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV
+} x86Group3;
+
+typedef enum {
+ XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH
+} x86Group5;
+
+/* x86 condition codes. */
+typedef enum {
+ CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE,
+ CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE,
+ CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB,
+ CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE,
+ CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL,
+ CC_NG = CC_LE, CC_G = CC_NLE
+} x86CC;
+
+#endif
diff --git a/3rdparty/lua/src/lj_trace.c b/3rdparty/lua/src/lj_trace.c
index e51ec54..c70fc24 100644
--- a/3rdparty/lua/src/lj_trace.c
+++ b/3rdparty/lua/src/lj_trace.c
@@ -1,6 +1,6 @@
/*
** Trace management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
#define lj_trace_c
@@ -607,7 +607,6 @@ static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
}
lj_opt_split(J);
lj_opt_sink(J);
- if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
J->state = LJ_TRACE_ASM;
break;
diff --git a/3rdparty/lua/src/lj_trace.h b/3rdparty/lua/src/lj_trace.h
index 824611f..e30d3d5 100644
--- a/3rdparty/lua/src/lj_trace.h
+++ b/3rdparty/lua/src/lj_trace.h
@@ -1,53 +1,53 @@
-/*
-** Trace management.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TRACE_H
-#define _LJ_TRACE_H
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-#include "lj_jit.h"
-#include "lj_dispatch.h"
-
-/* Trace errors. */
-typedef enum {
-#define TREDEF(name, msg) LJ_TRERR_##name,
-#include "lj_traceerr.h"
- LJ_TRERR__MAX
-} TraceError;
-
-LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e);
-LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e);
-
-/* Trace management. */
-LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T);
-LJ_FUNC void lj_trace_reenableproto(GCproto *pt);
-LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt);
-LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno);
-LJ_FUNC int lj_trace_flushall(lua_State *L);
-LJ_FUNC void lj_trace_initstate(global_State *g);
-LJ_FUNC void lj_trace_freestate(global_State *g);
-
-/* Event handling. */
-LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc);
-LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc);
-LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr);
-
-/* Signal asynchronous abort of trace or end of trace. */
-#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE)
-#define lj_trace_end(J) (J->state = LJ_TRACE_END)
-
-#else
-
-#define lj_trace_flushall(L) (UNUSED(L), 0)
-#define lj_trace_initstate(g) UNUSED(g)
-#define lj_trace_freestate(g) UNUSED(g)
-#define lj_trace_abort(g) UNUSED(g)
-#define lj_trace_end(J) UNUSED(J)
-
-#endif
-
-#endif
+/*
+** Trace management.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TRACE_H
+#define _LJ_TRACE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+#include "lj_jit.h"
+#include "lj_dispatch.h"
+
+/* Trace errors. */
+typedef enum {
+#define TREDEF(name, msg) LJ_TRERR_##name,
+#include "lj_traceerr.h"
+ LJ_TRERR__MAX
+} TraceError;
+
+LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e);
+LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e);
+
+/* Trace management. */
+LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T);
+LJ_FUNC void lj_trace_reenableproto(GCproto *pt);
+LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt);
+LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno);
+LJ_FUNC int lj_trace_flushall(lua_State *L);
+LJ_FUNC void lj_trace_initstate(global_State *g);
+LJ_FUNC void lj_trace_freestate(global_State *g);
+
+/* Event handling. */
+LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc);
+LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc);
+LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr);
+
+/* Signal asynchronous abort of trace or end of trace. */
+#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE)
+#define lj_trace_end(J) (J->state = LJ_TRACE_END)
+
+#else
+
+#define lj_trace_flushall(L) (UNUSED(L), 0)
+#define lj_trace_initstate(g) UNUSED(g)
+#define lj_trace_freestate(g) UNUSED(g)
+#define lj_trace_abort(g) UNUSED(g)
+#define lj_trace_end(J) UNUSED(J)
+
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_traceerr.h b/3rdparty/lua/src/lj_traceerr.h
index 9bef117..2ef4ad6 100644
--- a/3rdparty/lua/src/lj_traceerr.h
+++ b/3rdparty/lua/src/lj_traceerr.h
@@ -1,6 +1,6 @@
/*
** Trace compiler error messages.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
*/
/* This file may be included multiple times with different TREDEF macros. */
@@ -20,7 +20,7 @@ TREDEF(LUNROLL, "loop unroll limit reached")
/* Recording calls/returns. */
TREDEF(BADTYPE, "bad argument type")
-TREDEF(CJITOFF, "JIT compilation disabled for function")
+TREDEF(CJITOFF, "call to JIT-disabled function")
TREDEF(CUNROLL, "call unroll limit reached")
TREDEF(DOWNREC, "down-recursion, restarting")
TREDEF(NYICF, "NYI: C function %p")
diff --git a/3rdparty/lua/src/lj_udata.c b/3rdparty/lua/src/lj_udata.c
index 1352848..df5e7f3 100644
--- a/3rdparty/lua/src/lj_udata.c
+++ b/3rdparty/lua/src/lj_udata.c
@@ -1,34 +1,34 @@
-/*
-** Userdata handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_udata_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_udata.h"
-
-GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env)
-{
- GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata);
- global_State *g = G(L);
- newwhite(g, ud); /* Not finalized. */
- ud->gct = ~LJ_TUDATA;
- ud->udtype = UDTYPE_USERDATA;
- ud->len = sz;
- /* NOBARRIER: The GCudata is new (marked white). */
- setgcrefnull(ud->metatable);
- setgcref(ud->env, obj2gco(env));
- /* Chain to userdata list (after main thread). */
- setgcrefr(ud->nextgc, mainthread(g)->nextgc);
- setgcref(mainthread(g)->nextgc, obj2gco(ud));
- return ud;
-}
-
-void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud)
-{
- lj_mem_free(g, ud, sizeudata(ud));
-}
-
+/*
+** Userdata handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_udata_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_udata.h"
+
+GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env)
+{
+ GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata);
+ global_State *g = G(L);
+ newwhite(g, ud); /* Not finalized. */
+ ud->gct = ~LJ_TUDATA;
+ ud->udtype = UDTYPE_USERDATA;
+ ud->len = sz;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcrefnull(ud->metatable);
+ setgcref(ud->env, obj2gco(env));
+ /* Chain to userdata list (after main thread). */
+ setgcrefr(ud->nextgc, mainthread(g)->nextgc);
+ setgcref(mainthread(g)->nextgc, obj2gco(ud));
+ return ud;
+}
+
+void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud)
+{
+ lj_mem_free(g, ud, sizeudata(ud));
+}
+
diff --git a/3rdparty/lua/src/lj_udata.h b/3rdparty/lua/src/lj_udata.h
index 01cc94b..f62c02b 100644
--- a/3rdparty/lua/src/lj_udata.h
+++ b/3rdparty/lua/src/lj_udata.h
@@ -1,14 +1,14 @@
-/*
-** Userdata handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_UDATA_H
-#define _LJ_UDATA_H
-
-#include "lj_obj.h"
-
-LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env);
-LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud);
-
-#endif
+/*
+** Userdata handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_UDATA_H
+#define _LJ_UDATA_H
+
+#include "lj_obj.h"
+
+LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env);
+LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud);
+
+#endif
diff --git a/3rdparty/lua/src/lj_vm.h b/3rdparty/lua/src/lj_vm.h
index 1523445..c5d05de 100644
--- a/3rdparty/lua/src/lj_vm.h
+++ b/3rdparty/lua/src/lj_vm.h
@@ -1,116 +1,116 @@
-/*
-** Assembler VM interface definitions.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_VM_H
-#define _LJ_VM_H
-
-#include "lj_obj.h"
-
-/* Entry points for ASM parts of VM. */
-LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1);
-LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
-typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud);
-LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud,
- lua_CPFunction cp);
-LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
-LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode);
-LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe);
-LJ_ASMF void lj_vm_unwind_c_eh(void);
-LJ_ASMF void lj_vm_unwind_ff_eh(void);
-#if LJ_TARGET_X86ORX64
-LJ_ASMF void lj_vm_unwind_rethrow(void);
-#endif
-
-/* Miscellaneous functions. */
-#if LJ_TARGET_X86ORX64
-LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]);
-#endif
-#if LJ_TARGET_PPC
-void lj_vm_cachesync(void *start, void *end);
-#endif
-LJ_ASMF double lj_vm_foldarith(double x, double y, int op);
-#if LJ_HASJIT
-LJ_ASMF double lj_vm_foldfpm(double x, int op);
-#endif
-#if !LJ_ARCH_HASFPU
-/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */
-#endif
-
-/* Dispatch targets for recording and hooks. */
-LJ_ASMF void lj_vm_record(void);
-LJ_ASMF void lj_vm_inshook(void);
-LJ_ASMF void lj_vm_rethook(void);
-LJ_ASMF void lj_vm_callhook(void);
-
-/* Trace exit handling. */
-LJ_ASMF void lj_vm_exit_handler(void);
-LJ_ASMF void lj_vm_exit_interp(void);
-
-/* Internal math helper functions. */
-#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC
-#define lj_vm_floor floor
-#define lj_vm_ceil ceil
-#else
-LJ_ASMF double lj_vm_floor(double);
-LJ_ASMF double lj_vm_ceil(double);
-#if LJ_TARGET_ARM
-LJ_ASMF double lj_vm_floor_sf(double);
-LJ_ASMF double lj_vm_ceil_sf(double);
-#endif
-#endif
-#if defined(LUAJIT_NO_LOG2) || LJ_TARGET_X86ORX64
-LJ_ASMF double lj_vm_log2(double);
-#else
-#define lj_vm_log2 log2
-#endif
-
-#if LJ_HASJIT
-#if LJ_TARGET_X86ORX64
-LJ_ASMF void lj_vm_floor_sse(void);
-LJ_ASMF void lj_vm_ceil_sse(void);
-LJ_ASMF void lj_vm_trunc_sse(void);
-LJ_ASMF void lj_vm_exp_x87(void);
-LJ_ASMF void lj_vm_exp2_x87(void);
-LJ_ASMF void lj_vm_pow_sse(void);
-LJ_ASMF void lj_vm_powi_sse(void);
-#else
-#if LJ_TARGET_PPC
-#define lj_vm_trunc trunc
-#else
-LJ_ASMF double lj_vm_trunc(double);
-#if LJ_TARGET_ARM
-LJ_ASMF double lj_vm_trunc_sf(double);
-#endif
-#endif
-LJ_ASMF double lj_vm_powi(double, int32_t);
-#ifdef LUAJIT_NO_EXP2
-LJ_ASMF double lj_vm_exp2(double);
-#else
-#define lj_vm_exp2 exp2
-#endif
-#endif
-LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
-#if LJ_HASFFI
-LJ_ASMF int lj_vm_errno(void);
-#endif
-#endif
-
-/* Continuations for metamethods. */
-LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */
-LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */
-LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */
-LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */
-LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */
-LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */
-
-enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
-
-/* Start of the ASM code. */
-LJ_ASMF char lj_vm_asm_begin[];
-
-/* Bytecode offsets are relative to lj_vm_asm_begin. */
-#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs)))
-
-#endif
+/*
+** Assembler VM interface definitions.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_VM_H
+#define _LJ_VM_H
+
+#include "lj_obj.h"
+
+/* Entry points for ASM parts of VM. */
+LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1);
+LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
+typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud);
+LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud,
+ lua_CPFunction cp);
+LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode);
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe);
+LJ_ASMF void lj_vm_unwind_c_eh(void);
+LJ_ASMF void lj_vm_unwind_ff_eh(void);
+#if LJ_TARGET_X86ORX64
+LJ_ASMF void lj_vm_unwind_rethrow(void);
+#endif
+
+/* Miscellaneous functions. */
+#if LJ_TARGET_X86ORX64
+LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]);
+#endif
+#if LJ_TARGET_PPC
+void lj_vm_cachesync(void *start, void *end);
+#endif
+LJ_ASMF double lj_vm_foldarith(double x, double y, int op);
+#if LJ_HASJIT
+LJ_ASMF double lj_vm_foldfpm(double x, int op);
+#endif
+#if !LJ_ARCH_HASFPU
+/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */
+#endif
+
+/* Dispatch targets for recording and hooks. */
+LJ_ASMF void lj_vm_record(void);
+LJ_ASMF void lj_vm_inshook(void);
+LJ_ASMF void lj_vm_rethook(void);
+LJ_ASMF void lj_vm_callhook(void);
+
+/* Trace exit handling. */
+LJ_ASMF void lj_vm_exit_handler(void);
+LJ_ASMF void lj_vm_exit_interp(void);
+
+/* Internal math helper functions. */
+#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC
+#define lj_vm_floor floor
+#define lj_vm_ceil ceil
+#else
+LJ_ASMF double lj_vm_floor(double);
+LJ_ASMF double lj_vm_ceil(double);
+#if LJ_TARGET_ARM
+LJ_ASMF double lj_vm_floor_sf(double);
+LJ_ASMF double lj_vm_ceil_sf(double);
+#endif
+#endif
+#if defined(LUAJIT_NO_LOG2) || LJ_TARGET_X86ORX64
+LJ_ASMF double lj_vm_log2(double);
+#else
+#define lj_vm_log2 log2
+#endif
+
+#if LJ_HASJIT
+#if LJ_TARGET_X86ORX64
+LJ_ASMF void lj_vm_floor_sse(void);
+LJ_ASMF void lj_vm_ceil_sse(void);
+LJ_ASMF void lj_vm_trunc_sse(void);
+LJ_ASMF void lj_vm_exp_x87(void);
+LJ_ASMF void lj_vm_exp2_x87(void);
+LJ_ASMF void lj_vm_pow_sse(void);
+LJ_ASMF void lj_vm_powi_sse(void);
+#else
+#if LJ_TARGET_PPC
+#define lj_vm_trunc trunc
+#else
+LJ_ASMF double lj_vm_trunc(double);
+#if LJ_TARGET_ARM
+LJ_ASMF double lj_vm_trunc_sf(double);
+#endif
+#endif
+LJ_ASMF double lj_vm_powi(double, int32_t);
+#ifdef LUAJIT_NO_EXP2
+LJ_ASMF double lj_vm_exp2(double);
+#else
+#define lj_vm_exp2 exp2
+#endif
+#endif
+LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
+#if LJ_HASFFI
+LJ_ASMF int lj_vm_errno(void);
+#endif
+#endif
+
+/* Continuations for metamethods. */
+LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */
+LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */
+LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */
+LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */
+LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */
+LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */
+
+enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
+
+/* Start of the ASM code. */
+LJ_ASMF char lj_vm_asm_begin[];
+
+/* Bytecode offsets are relative to lj_vm_asm_begin. */
+#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs)))
+
+#endif
diff --git a/3rdparty/lua/src/lj_vmevent.c b/3rdparty/lua/src/lj_vmevent.c
index 5decde0..21ad08f 100644
--- a/3rdparty/lua/src/lj_vmevent.c
+++ b/3rdparty/lua/src/lj_vmevent.c
@@ -1,57 +1,57 @@
-/*
-** VM event handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#include <stdio.h>
-
-#define lj_vmevent_c
-#define LUA_CORE
-
-#include "lj_obj.h"
-#include "lj_str.h"
-#include "lj_tab.h"
-#include "lj_state.h"
-#include "lj_dispatch.h"
-#include "lj_vm.h"
-#include "lj_vmevent.h"
-
-ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev)
-{
- global_State *g = G(L);
- GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY);
- cTValue *tv = lj_tab_getstr(tabV(registry(L)), s);
- if (tvistab(tv)) {
- int hash = VMEVENT_HASH(ev);
- tv = lj_tab_getint(tabV(tv), hash);
- if (tv && tvisfunc(tv)) {
- lj_state_checkstack(L, LUA_MINSTACK);
- setfuncV(L, L->top++, funcV(tv));
- return savestack(L, L->top);
- }
- }
- g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */
- return 0;
-}
-
-void lj_vmevent_call(lua_State *L, ptrdiff_t argbase)
-{
- global_State *g = G(L);
- uint8_t oldmask = g->vmevmask;
- uint8_t oldh = hook_save(g);
- int status;
- g->vmevmask = 0; /* Disable all events. */
- hook_vmevent(g);
- status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0);
- if (LJ_UNLIKELY(status)) {
- /* Really shouldn't use stderr here, but where else to complain? */
- L->top--;
- fputs("VM handler failed: ", stderr);
- fputs(tvisstr(L->top) ? strVdata(L->top) : "?", stderr);
- fputc('\n', stderr);
- }
- hook_restore(g, oldh);
- if (g->vmevmask != VMEVENT_NOCACHE)
- g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */
-}
-
+/*
+** VM event handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <stdio.h>
+
+#define lj_vmevent_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+
+ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev)
+{
+ global_State *g = G(L);
+ GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY);
+ cTValue *tv = lj_tab_getstr(tabV(registry(L)), s);
+ if (tvistab(tv)) {
+ int hash = VMEVENT_HASH(ev);
+ tv = lj_tab_getint(tabV(tv), hash);
+ if (tv && tvisfunc(tv)) {
+ lj_state_checkstack(L, LUA_MINSTACK);
+ setfuncV(L, L->top++, funcV(tv));
+ return savestack(L, L->top);
+ }
+ }
+ g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */
+ return 0;
+}
+
+void lj_vmevent_call(lua_State *L, ptrdiff_t argbase)
+{
+ global_State *g = G(L);
+ uint8_t oldmask = g->vmevmask;
+ uint8_t oldh = hook_save(g);
+ int status;
+ g->vmevmask = 0; /* Disable all events. */
+ hook_vmevent(g);
+ status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0);
+ if (LJ_UNLIKELY(status)) {
+ /* Really shouldn't use stderr here, but where else to complain? */
+ L->top--;
+ fputs("VM handler failed: ", stderr);
+ fputs(tvisstr(L->top) ? strVdata(L->top) : "?", stderr);
+ fputc('\n', stderr);
+ }
+ hook_restore(g, oldh);
+ if (g->vmevmask != VMEVENT_NOCACHE)
+ g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */
+}
+
diff --git a/3rdparty/lua/src/lj_vmevent.h b/3rdparty/lua/src/lj_vmevent.h
index 9cd0639..11dedb4 100644
--- a/3rdparty/lua/src/lj_vmevent.h
+++ b/3rdparty/lua/src/lj_vmevent.h
@@ -1,59 +1,59 @@
-/*
-** VM event handling.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_VMEVENT_H
-#define _LJ_VMEVENT_H
-
-#include "lj_obj.h"
-
-/* Registry key for VM event handler table. */
-#define LJ_VMEVENTS_REGKEY "_VMEVENTS"
-#define LJ_VMEVENTS_HSIZE 4
-
-#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7))
-#define VMEVENT_HASH(ev) ((int)(ev) & ~7)
-#define VMEVENT_HASHIDX(h) ((int)(h) << 3)
-#define VMEVENT_NOCACHE 255
-
-#define VMEVENT_DEF(name, hash) \
- LJ_VMEVENT_##name##_, \
- LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3)
-
-/* VM event IDs. */
-typedef enum {
- VMEVENT_DEF(BC, 0x00003883),
- VMEVENT_DEF(TRACE, 0xb2d91467),
- VMEVENT_DEF(RECORD, 0x9284bf4f),
- VMEVENT_DEF(TEXIT, 0xb29df2b0),
- LJ_VMEVENT__MAX
-} VMEvent;
-
-#ifdef LUAJIT_DISABLE_VMEVENT
-#define lj_vmevent_send(L, ev, args) UNUSED(L)
-#define lj_vmevent_send_(L, ev, args, post) UNUSED(L)
-#else
-#define lj_vmevent_send(L, ev, args) \
- if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
- ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
- if (argbase) { \
- args \
- lj_vmevent_call(L, argbase); \
- } \
- }
-#define lj_vmevent_send_(L, ev, args, post) \
- if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
- ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
- if (argbase) { \
- args \
- lj_vmevent_call(L, argbase); \
- post \
- } \
- }
-
-LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev);
-LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase);
-#endif
-
-#endif
+/*
+** VM event handling.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_VMEVENT_H
+#define _LJ_VMEVENT_H
+
+#include "lj_obj.h"
+
+/* Registry key for VM event handler table. */
+#define LJ_VMEVENTS_REGKEY "_VMEVENTS"
+#define LJ_VMEVENTS_HSIZE 4
+
+#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7))
+#define VMEVENT_HASH(ev) ((int)(ev) & ~7)
+#define VMEVENT_HASHIDX(h) ((int)(h) << 3)
+#define VMEVENT_NOCACHE 255
+
+#define VMEVENT_DEF(name, hash) \
+ LJ_VMEVENT_##name##_, \
+ LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3)
+
+/* VM event IDs. */
+typedef enum {
+ VMEVENT_DEF(BC, 0x00003883),
+ VMEVENT_DEF(TRACE, 0xb2d91467),
+ VMEVENT_DEF(RECORD, 0x9284bf4f),
+ VMEVENT_DEF(TEXIT, 0xb29df2b0),
+ LJ_VMEVENT__MAX
+} VMEvent;
+
+#ifdef LUAJIT_DISABLE_VMEVENT
+#define lj_vmevent_send(L, ev, args) UNUSED(L)
+#define lj_vmevent_send_(L, ev, args, post) UNUSED(L)
+#else
+#define lj_vmevent_send(L, ev, args) \
+ if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
+ ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
+ if (argbase) { \
+ args \
+ lj_vmevent_call(L, argbase); \
+ } \
+ }
+#define lj_vmevent_send_(L, ev, args, post) \
+ if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
+ ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
+ if (argbase) { \
+ args \
+ lj_vmevent_call(L, argbase); \
+ post \
+ } \
+ }
+
+LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev);
+LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase);
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/lj_vmmath.c b/3rdparty/lua/src/lj_vmmath.c
index 1f14b53..aa69030 100644
--- a/3rdparty/lua/src/lj_vmmath.c
+++ b/3rdparty/lua/src/lj_vmmath.c
@@ -1,140 +1,140 @@
-/*
-** Math helper functions for assembler VM.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#define lj_vmmath_c
-#define LUA_CORE
-
-#include <errno.h>
-#include <math.h>
-
-#include "lj_obj.h"
-#include "lj_ir.h"
-#include "lj_vm.h"
-
-/* -- Helper functions for generated machine code ------------------------- */
-
-#if LJ_TARGET_X86ORX64
-/* Wrapper functions to avoid linker issues on OSX. */
-LJ_FUNCA double lj_vm_sinh(double x) { return sinh(x); }
-LJ_FUNCA double lj_vm_cosh(double x) { return cosh(x); }
-LJ_FUNCA double lj_vm_tanh(double x) { return tanh(x); }
-#endif
-
-#if !LJ_TARGET_X86ORX64
-double lj_vm_foldarith(double x, double y, int op)
-{
- switch (op) {
- case IR_ADD - IR_ADD: return x+y; break;
- case IR_SUB - IR_ADD: return x-y; break;
- case IR_MUL - IR_ADD: return x*y; break;
- case IR_DIV - IR_ADD: return x/y; break;
- case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break;
- case IR_POW - IR_ADD: return pow(x, y); break;
- case IR_NEG - IR_ADD: return -x; break;
- case IR_ABS - IR_ADD: return fabs(x); break;
-#if LJ_HASJIT
- case IR_ATAN2 - IR_ADD: return atan2(x, y); break;
- case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break;
- case IR_MIN - IR_ADD: return x > y ? y : x; break;
- case IR_MAX - IR_ADD: return x < y ? y : x; break;
-#endif
- default: return x;
- }
-}
-#endif
-
-#if LJ_HASJIT
-
-#ifdef LUAJIT_NO_LOG2
-double lj_vm_log2(double a)
-{
- return log(a) * 1.4426950408889634074;
-}
-#endif
-
-#ifdef LUAJIT_NO_EXP2
-double lj_vm_exp2(double a)
-{
- return exp(a * 0.6931471805599453);
-}
-#endif
-
-#if !(LJ_TARGET_ARM || LJ_TARGET_PPC)
-int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b)
-{
- uint32_t y, ua, ub;
- lua_assert(b != 0); /* This must be checked before using this function. */
- ua = a < 0 ? (uint32_t)-a : (uint32_t)a;
- ub = b < 0 ? (uint32_t)-b : (uint32_t)b;
- y = ua % ub;
- if (y != 0 && (a^b) < 0) y = y - ub;
- if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y;
- return (int32_t)y;
-}
-#endif
-
-#if !LJ_TARGET_X86ORX64
-/* Unsigned x^k. */
-static double lj_vm_powui(double x, uint32_t k)
-{
- double y;
- lua_assert(k != 0);
- for (; (k & 1) == 0; k >>= 1) x *= x;
- y = x;
- if ((k >>= 1) != 0) {
- for (;;) {
- x *= x;
- if (k == 1) break;
- if (k & 1) y *= x;
- k >>= 1;
- }
- y *= x;
- }
- return y;
-}
-
-/* Signed x^k. */
-double lj_vm_powi(double x, int32_t k)
-{
- if (k > 1)
- return lj_vm_powui(x, (uint32_t)k);
- else if (k == 1)
- return x;
- else if (k == 0)
- return 1.0;
- else
- return 1.0 / lj_vm_powui(x, (uint32_t)-k);
-}
-
-/* Computes fpm(x) for extended math functions. */
-double lj_vm_foldfpm(double x, int fpm)
-{
- switch (fpm) {
- case IRFPM_FLOOR: return lj_vm_floor(x);
- case IRFPM_CEIL: return lj_vm_ceil(x);
- case IRFPM_TRUNC: return lj_vm_trunc(x);
- case IRFPM_SQRT: return sqrt(x);
- case IRFPM_EXP: return exp(x);
- case IRFPM_EXP2: return lj_vm_exp2(x);
- case IRFPM_LOG: return log(x);
- case IRFPM_LOG2: return lj_vm_log2(x);
- case IRFPM_LOG10: return log10(x);
- case IRFPM_SIN: return sin(x);
- case IRFPM_COS: return cos(x);
- case IRFPM_TAN: return tan(x);
- default: lua_assert(0);
- }
- return 0;
-}
-#endif
-
-#if LJ_HASFFI
-int lj_vm_errno(void)
-{
- return errno;
-}
-#endif
-
-#endif
+/*
+** Math helper functions for assembler VM.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_vmmath_c
+#define LUA_CORE
+
+#include <errno.h>
+#include <math.h>
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+#include "lj_vm.h"
+
+/* -- Helper functions for generated machine code ------------------------- */
+
+#if LJ_TARGET_X86ORX64
+/* Wrapper functions to avoid linker issues on OSX. */
+LJ_FUNCA double lj_vm_sinh(double x) { return sinh(x); }
+LJ_FUNCA double lj_vm_cosh(double x) { return cosh(x); }
+LJ_FUNCA double lj_vm_tanh(double x) { return tanh(x); }
+#endif
+
+#if !LJ_TARGET_X86ORX64
+double lj_vm_foldarith(double x, double y, int op)
+{
+ switch (op) {
+ case IR_ADD - IR_ADD: return x+y; break;
+ case IR_SUB - IR_ADD: return x-y; break;
+ case IR_MUL - IR_ADD: return x*y; break;
+ case IR_DIV - IR_ADD: return x/y; break;
+ case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break;
+ case IR_POW - IR_ADD: return pow(x, y); break;
+ case IR_NEG - IR_ADD: return -x; break;
+ case IR_ABS - IR_ADD: return fabs(x); break;
+#if LJ_HASJIT
+ case IR_ATAN2 - IR_ADD: return atan2(x, y); break;
+ case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break;
+ case IR_MIN - IR_ADD: return x > y ? y : x; break;
+ case IR_MAX - IR_ADD: return x < y ? y : x; break;
+#endif
+ default: return x;
+ }
+}
+#endif
+
+#if LJ_HASJIT
+
+#ifdef LUAJIT_NO_LOG2
+double lj_vm_log2(double a)
+{
+ return log(a) * 1.4426950408889634074;
+}
+#endif
+
+#ifdef LUAJIT_NO_EXP2
+double lj_vm_exp2(double a)
+{
+ return exp(a * 0.6931471805599453);
+}
+#endif
+
+#if !(LJ_TARGET_ARM || LJ_TARGET_PPC)
+int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b)
+{
+ uint32_t y, ua, ub;
+ lua_assert(b != 0); /* This must be checked before using this function. */
+ ua = a < 0 ? (uint32_t)-a : (uint32_t)a;
+ ub = b < 0 ? (uint32_t)-b : (uint32_t)b;
+ y = ua % ub;
+ if (y != 0 && (a^b) < 0) y = y - ub;
+ if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y;
+ return (int32_t)y;
+}
+#endif
+
+#if !LJ_TARGET_X86ORX64
+/* Unsigned x^k. */
+static double lj_vm_powui(double x, uint32_t k)
+{
+ double y;
+ lua_assert(k != 0);
+ for (; (k & 1) == 0; k >>= 1) x *= x;
+ y = x;
+ if ((k >>= 1) != 0) {
+ for (;;) {
+ x *= x;
+ if (k == 1) break;
+ if (k & 1) y *= x;
+ k >>= 1;
+ }
+ y *= x;
+ }
+ return y;
+}
+
+/* Signed x^k. */
+double lj_vm_powi(double x, int32_t k)
+{
+ if (k > 1)
+ return lj_vm_powui(x, (uint32_t)k);
+ else if (k == 1)
+ return x;
+ else if (k == 0)
+ return 1.0;
+ else
+ return 1.0 / lj_vm_powui(x, (uint32_t)-k);
+}
+
+/* Computes fpm(x) for extended math functions. */
+double lj_vm_foldfpm(double x, int fpm)
+{
+ switch (fpm) {
+ case IRFPM_FLOOR: return lj_vm_floor(x);
+ case IRFPM_CEIL: return lj_vm_ceil(x);
+ case IRFPM_TRUNC: return lj_vm_trunc(x);
+ case IRFPM_SQRT: return sqrt(x);
+ case IRFPM_EXP: return exp(x);
+ case IRFPM_EXP2: return lj_vm_exp2(x);
+ case IRFPM_LOG: return log(x);
+ case IRFPM_LOG2: return lj_vm_log2(x);
+ case IRFPM_LOG10: return log10(x);
+ case IRFPM_SIN: return sin(x);
+ case IRFPM_COS: return cos(x);
+ case IRFPM_TAN: return tan(x);
+ default: lua_assert(0);
+ }
+ return 0;
+}
+#endif
+
+#if LJ_HASFFI
+int lj_vm_errno(void)
+{
+ return errno;
+}
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/ljamalg.c b/3rdparty/lua/src/ljamalg.c
index 5d4cbb5..962b313 100644
--- a/3rdparty/lua/src/ljamalg.c
+++ b/3rdparty/lua/src/ljamalg.c
@@ -1,93 +1,93 @@
-/*
-** LuaJIT core and libraries amalgamation.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/*
-+--------------------------------------------------------------------------+
-| WARNING: Compiling the amalgamation needs a lot of virtual memory |
-| (around 300 MB with GCC 4.x)! If you don't have enough physical memory |
-| your machine will start swapping to disk and the compile will not finish |
-| within a reasonable amount of time. |
-| So either compile on a bigger machine or use the non-amalgamated build. |
-+--------------------------------------------------------------------------+
-*/
-
-#define ljamalg_c
-#define LUA_CORE
-
-/* To get the mremap prototype. Must be defined before any system includes. */
-#if defined(__linux__) && !defined(_GNU_SOURCE)
-#define _GNU_SOURCE
-#endif
-
-#ifndef WINVER
-#define WINVER 0x0501
-#endif
-
-#include "lua.h"
-#include "lauxlib.h"
-
-#include "lj_gc.c"
-#include "lj_err.c"
-#include "lj_char.c"
-#include "lj_bc.c"
-#include "lj_obj.c"
-#include "lj_str.c"
-#include "lj_tab.c"
-#include "lj_func.c"
-#include "lj_udata.c"
-#include "lj_meta.c"
-#include "lj_debug.c"
-#include "lj_state.c"
-#include "lj_dispatch.c"
-#include "lj_vmevent.c"
-#include "lj_vmmath.c"
-#include "lj_strscan.c"
-#include "lj_api.c"
-#include "lj_lex.c"
-#include "lj_parse.c"
-#include "lj_bcread.c"
-#include "lj_bcwrite.c"
-#include "lj_load.c"
-#include "lj_ctype.c"
-#include "lj_cdata.c"
-#include "lj_cconv.c"
-#include "lj_ccall.c"
-#include "lj_ccallback.c"
-#include "lj_carith.c"
-#include "lj_clib.c"
-#include "lj_cparse.c"
-#include "lj_lib.c"
-#include "lj_ir.c"
-#include "lj_opt_mem.c"
-#include "lj_opt_fold.c"
-#include "lj_opt_narrow.c"
-#include "lj_opt_dce.c"
-#include "lj_opt_loop.c"
-#include "lj_opt_split.c"
-#include "lj_opt_sink.c"
-#include "lj_mcode.c"
-#include "lj_snap.c"
-#include "lj_record.c"
-#include "lj_crecord.c"
-#include "lj_ffrecord.c"
-#include "lj_asm.c"
-#include "lj_trace.c"
-#include "lj_gdbjit.c"
-#include "lj_alloc.c"
-
-#include "lib_aux.c"
-#include "lib_base.c"
-#include "lib_math.c"
-#include "lib_string.c"
-#include "lib_table.c"
-#include "lib_io.c"
-#include "lib_os.c"
-#include "lib_package.c"
-#include "lib_debug.c"
-#include "lib_bit.c"
-#include "lib_jit.c"
-#include "lib_ffi.c"
-#include "lib_init.c"
-
+/*
+** LuaJIT core and libraries amalgamation.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/*
++--------------------------------------------------------------------------+
+| WARNING: Compiling the amalgamation needs a lot of virtual memory |
+| (around 200 MB with GCC 4.x)! If you don't have enough physical memory |
+| your machine will start swapping to disk and the compile will not finish |
+| within a reasonable amount of time. |
+| So either compile on a bigger machine or use the non-amalgamated build. |
++--------------------------------------------------------------------------+
+*/
+
+#define ljamalg_c
+#define LUA_CORE
+
+/* To get the mremap prototype. Must be defined before any system includes. */
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_gc.c"
+#include "lj_err.c"
+#include "lj_char.c"
+#include "lj_bc.c"
+#include "lj_obj.c"
+#include "lj_str.c"
+#include "lj_tab.c"
+#include "lj_func.c"
+#include "lj_udata.c"
+#include "lj_meta.c"
+#include "lj_debug.c"
+#include "lj_state.c"
+#include "lj_dispatch.c"
+#include "lj_vmevent.c"
+#include "lj_vmmath.c"
+#include "lj_strscan.c"
+#include "lj_api.c"
+#include "lj_lex.c"
+#include "lj_parse.c"
+#include "lj_bcread.c"
+#include "lj_bcwrite.c"
+#include "lj_load.c"
+#include "lj_ctype.c"
+#include "lj_cdata.c"
+#include "lj_cconv.c"
+#include "lj_ccall.c"
+#include "lj_ccallback.c"
+#include "lj_carith.c"
+#include "lj_clib.c"
+#include "lj_cparse.c"
+#include "lj_lib.c"
+#include "lj_ir.c"
+#include "lj_opt_mem.c"
+#include "lj_opt_fold.c"
+#include "lj_opt_narrow.c"
+#include "lj_opt_dce.c"
+#include "lj_opt_loop.c"
+#include "lj_opt_split.c"
+#include "lj_opt_sink.c"
+#include "lj_mcode.c"
+#include "lj_snap.c"
+#include "lj_record.c"
+#include "lj_crecord.c"
+#include "lj_ffrecord.c"
+#include "lj_asm.c"
+#include "lj_trace.c"
+#include "lj_gdbjit.c"
+#include "lj_alloc.c"
+
+#include "lib_aux.c"
+#include "lib_base.c"
+#include "lib_math.c"
+#include "lib_string.c"
+#include "lib_table.c"
+#include "lib_io.c"
+#include "lib_os.c"
+#include "lib_package.c"
+#include "lib_debug.c"
+#include "lib_bit.c"
+#include "lib_jit.c"
+#include "lib_ffi.c"
+#include "lib_init.c"
+
diff --git a/3rdparty/lua/src/lua.h b/3rdparty/lua/src/lua.h
index 4909d29..b07e134 100644
--- a/3rdparty/lua/src/lua.h
+++ b/3rdparty/lua/src/lua.h
@@ -1,393 +1,393 @@
-/*
-** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $
-** Lua - An Extensible Extension Language
-** Lua.org, PUC-Rio, Brazil (http://www.lua.org)
-** See Copyright Notice at the end of this file
-*/
-
-
-#ifndef lua_h
-#define lua_h
-
-#include <stdarg.h>
-#include <stddef.h>
-
-
-#include "luaconf.h"
-
-
-#define LUA_VERSION "Lua 5.1"
-#define LUA_RELEASE "Lua 5.1.4"
-#define LUA_VERSION_NUM 501
-#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio"
-#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
-
-
-/* mark for precompiled code (`<esc>Lua') */
-#define LUA_SIGNATURE "\033Lua"
-
-/* option for multiple returns in `lua_pcall' and `lua_call' */
-#define LUA_MULTRET (-1)
-
-
-/*
-** pseudo-indices
-*/
-#define LUA_REGISTRYINDEX (-10000)
-#define LUA_ENVIRONINDEX (-10001)
-#define LUA_GLOBALSINDEX (-10002)
-#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
-
-
-/* thread status; 0 is OK */
-#define LUA_YIELD 1
-#define LUA_ERRRUN 2
-#define LUA_ERRSYNTAX 3
-#define LUA_ERRMEM 4
-#define LUA_ERRERR 5
-
-
-typedef struct lua_State lua_State;
-
-typedef int (*lua_CFunction) (lua_State *L);
-
-
-/*
-** functions that read/write blocks when loading/dumping Lua chunks
-*/
-typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz);
-
-typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud);
-
-
-/*
-** prototype for memory-allocation functions
-*/
-typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize);
-
-
-/*
-** basic types
-*/
-#define LUA_TNONE (-1)
-
-#define LUA_TNIL 0
-#define LUA_TBOOLEAN 1
-#define LUA_TLIGHTUSERDATA 2
-#define LUA_TNUMBER 3
-#define LUA_TSTRING 4
-#define LUA_TTABLE 5
-#define LUA_TFUNCTION 6
-#define LUA_TUSERDATA 7
-#define LUA_TTHREAD 8
-
-
-
-/* minimum Lua stack available to a C function */
-#define LUA_MINSTACK 20
-
-
-/*
-** generic extra include file
-*/
-#if defined(LUA_USER_H)
-#include LUA_USER_H
-#endif
-
-
-/* type of numbers in Lua */
-typedef LUA_NUMBER lua_Number;
-
-
-/* type for integer functions */
-typedef LUA_INTEGER lua_Integer;
-
-
-
-/*
-** state manipulation
-*/
-LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud);
-LUA_API void (lua_close) (lua_State *L);
-LUA_API lua_State *(lua_newthread) (lua_State *L);
-
-LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf);
-
-
-/*
-** basic stack manipulation
-*/
-LUA_API int (lua_gettop) (lua_State *L);
-LUA_API void (lua_settop) (lua_State *L, int idx);
-LUA_API void (lua_pushvalue) (lua_State *L, int idx);
-LUA_API void (lua_remove) (lua_State *L, int idx);
-LUA_API void (lua_insert) (lua_State *L, int idx);
-LUA_API void (lua_replace) (lua_State *L, int idx);
-LUA_API int (lua_checkstack) (lua_State *L, int sz);
-
-LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n);
-
-
-/*
-** access functions (stack -> C)
-*/
-
-LUA_API int (lua_isnumber) (lua_State *L, int idx);
-LUA_API int (lua_isstring) (lua_State *L, int idx);
-LUA_API int (lua_iscfunction) (lua_State *L, int idx);
-LUA_API int (lua_isuserdata) (lua_State *L, int idx);
-LUA_API int (lua_type) (lua_State *L, int idx);
-LUA_API const char *(lua_typename) (lua_State *L, int tp);
-
-LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2);
-LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2);
-LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2);
-
-LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx);
-LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx);
-LUA_API int (lua_toboolean) (lua_State *L, int idx);
-LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len);
-LUA_API size_t (lua_objlen) (lua_State *L, int idx);
-LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx);
-LUA_API void *(lua_touserdata) (lua_State *L, int idx);
-LUA_API lua_State *(lua_tothread) (lua_State *L, int idx);
-LUA_API const void *(lua_topointer) (lua_State *L, int idx);
-
-
-/*
-** push functions (C -> stack)
-*/
-LUA_API void (lua_pushnil) (lua_State *L);
-LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n);
-LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n);
-LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l);
-LUA_API void (lua_pushstring) (lua_State *L, const char *s);
-LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt,
- va_list argp);
-LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...);
-LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n);
-LUA_API void (lua_pushboolean) (lua_State *L, int b);
-LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p);
-LUA_API int (lua_pushthread) (lua_State *L);
-
-
-/*
-** get functions (Lua -> stack)
-*/
-LUA_API void (lua_gettable) (lua_State *L, int idx);
-LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k);
-LUA_API void (lua_rawget) (lua_State *L, int idx);
-LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n);
-LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec);
-LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz);
-LUA_API int (lua_getmetatable) (lua_State *L, int objindex);
-LUA_API void (lua_getfenv) (lua_State *L, int idx);
-
-
-/*
-** set functions (stack -> Lua)
-*/
-LUA_API void (lua_settable) (lua_State *L, int idx);
-LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k);
-LUA_API void (lua_rawset) (lua_State *L, int idx);
-LUA_API void (lua_rawseti) (lua_State *L, int idx, int n);
-LUA_API int (lua_setmetatable) (lua_State *L, int objindex);
-LUA_API int (lua_setfenv) (lua_State *L, int idx);
-
-
-/*
-** `load' and `call' functions (load and run Lua code)
-*/
-LUA_API void (lua_call) (lua_State *L, int nargs, int nresults);
-LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc);
-LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud);
-LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt,
- const char *chunkname);
-
-LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data);
-
-
-/*
-** coroutine functions
-*/
-LUA_API int (lua_yield) (lua_State *L, int nresults);
-LUA_API int (lua_resume) (lua_State *L, int narg);
-LUA_API int (lua_status) (lua_State *L);
-
-/*
-** garbage-collection function and options
-*/
-
-#define LUA_GCSTOP 0
-#define LUA_GCRESTART 1
-#define LUA_GCCOLLECT 2
-#define LUA_GCCOUNT 3
-#define LUA_GCCOUNTB 4
-#define LUA_GCSTEP 5
-#define LUA_GCSETPAUSE 6
-#define LUA_GCSETSTEPMUL 7
-
-LUA_API int (lua_gc) (lua_State *L, int what, int data);
-
-
-/*
-** miscellaneous functions
-*/
-
-LUA_API int (lua_error) (lua_State *L);
-
-LUA_API int (lua_next) (lua_State *L, int idx);
-
-LUA_API void (lua_concat) (lua_State *L, int n);
-
-LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud);
-LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
-
-
-
-/*
-** ===============================================================
-** some useful macros
-** ===============================================================
-*/
-
-#define lua_pop(L,n) lua_settop(L, -(n)-1)
-
-#define lua_newtable(L) lua_createtable(L, 0, 0)
-
-#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n)))
-
-#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0)
-
-#define lua_strlen(L,i) lua_objlen(L, (i))
-
-#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION)
-#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE)
-#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
-#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL)
-#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN)
-#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD)
-#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE)
-#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0)
-
-#define lua_pushliteral(L, s) \
- lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1)
-
-#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
-#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
-
-#define lua_tostring(L,i) lua_tolstring(L, (i), NULL)
-
-
-
-/*
-** compatibility macros and functions
-*/
-
-#define lua_open() luaL_newstate()
-
-#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX)
-
-#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0)
-
-#define lua_Chunkreader lua_Reader
-#define lua_Chunkwriter lua_Writer
-
-
-/* hack */
-LUA_API void lua_setlevel (lua_State *from, lua_State *to);
-
-
-/*
-** {======================================================================
-** Debug API
-** =======================================================================
-*/
-
-
-/*
-** Event codes
-*/
-#define LUA_HOOKCALL 0
-#define LUA_HOOKRET 1
-#define LUA_HOOKLINE 2
-#define LUA_HOOKCOUNT 3
-#define LUA_HOOKTAILRET 4
-
-
-/*
-** Event masks
-*/
-#define LUA_MASKCALL (1 << LUA_HOOKCALL)
-#define LUA_MASKRET (1 << LUA_HOOKRET)
-#define LUA_MASKLINE (1 << LUA_HOOKLINE)
-#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT)
-
-typedef struct lua_Debug lua_Debug; /* activation record */
-
-
-/* Functions to be called by the debuger in specific events */
-typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
-
-
-LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar);
-LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
-LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
-LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
-LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n);
-LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n);
-LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
-LUA_API lua_Hook lua_gethook (lua_State *L);
-LUA_API int lua_gethookmask (lua_State *L);
-LUA_API int lua_gethookcount (lua_State *L);
-
-/* From Lua 5.2. */
-LUA_API void *lua_upvalueid (lua_State *L, int idx, int n);
-LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2);
-LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt,
- const char *chunkname, const char *mode);
-
-
-struct lua_Debug {
- int event;
- const char *name; /* (n) */
- const char *namewhat; /* (n) `global', `local', `field', `method' */
- const char *what; /* (S) `Lua', `C', `main', `tail' */
- const char *source; /* (S) */
- int currentline; /* (l) */
- int nups; /* (u) number of upvalues */
- int linedefined; /* (S) */
- int lastlinedefined; /* (S) */
- char short_src[LUA_IDSIZE]; /* (S) */
- /* private part */
- int i_ci; /* active function */
-};
-
-/* }====================================================================== */
-
-
-/******************************************************************************
-* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved.
-*
-* Permission is hereby granted, free of charge, to any person obtaining
-* a copy of this software and associated documentation files (the
-* "Software"), to deal in the Software without restriction, including
-* without limitation the rights to use, copy, modify, merge, publish,
-* distribute, sublicense, and/or sell copies of the Software, and to
-* permit persons to whom the Software is furnished to do so, subject to
-* the following conditions:
-*
-* The above copyright notice and this permission notice shall be
-* included in all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-******************************************************************************/
-
-
-#endif
+/*
+** $Id: lua.h,v ec74646b41df 2013/11/19 00:23:10 oliver $
+** Lua - An Extensible Extension Language
+** Lua.org, PUC-Rio, Brazil (http://www.lua.org)
+** See Copyright Notice at the end of this file
+*/
+
+
+#ifndef lua_h
+#define lua_h
+
+#include <stdarg.h>
+#include <stddef.h>
+
+
+#include "luaconf.h"
+
+
+#define LUA_VERSION "Lua 5.1"
+#define LUA_RELEASE "Lua 5.1.4"
+#define LUA_VERSION_NUM 501
+#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio"
+#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
+
+
+/* mark for precompiled code (`<esc>Lua') */
+#define LUA_SIGNATURE "\033Lua"
+
+/* option for multiple returns in `lua_pcall' and `lua_call' */
+#define LUA_MULTRET (-1)
+
+
+/*
+** pseudo-indices
+*/
+#define LUA_REGISTRYINDEX (-10000)
+#define LUA_ENVIRONINDEX (-10001)
+#define LUA_GLOBALSINDEX (-10002)
+#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
+
+
+/* thread status; 0 is OK */
+#define LUA_YIELD 1
+#define LUA_ERRRUN 2
+#define LUA_ERRSYNTAX 3
+#define LUA_ERRMEM 4
+#define LUA_ERRERR 5
+
+
+typedef struct lua_State lua_State;
+
+typedef int (*lua_CFunction) (lua_State *L);
+
+
+/*
+** functions that read/write blocks when loading/dumping Lua chunks
+*/
+typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz);
+
+typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud);
+
+
+/*
+** prototype for memory-allocation functions
+*/
+typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize);
+
+
+/*
+** basic types
+*/
+#define LUA_TNONE (-1)
+
+#define LUA_TNIL 0
+#define LUA_TBOOLEAN 1
+#define LUA_TLIGHTUSERDATA 2
+#define LUA_TNUMBER 3
+#define LUA_TSTRING 4
+#define LUA_TTABLE 5
+#define LUA_TFUNCTION 6
+#define LUA_TUSERDATA 7
+#define LUA_TTHREAD 8
+
+
+
+/* minimum Lua stack available to a C function */
+#define LUA_MINSTACK 20
+
+
+/*
+** generic extra include file
+*/
+#if defined(LUA_USER_H)
+#include LUA_USER_H
+#endif
+
+
+/* type of numbers in Lua */
+typedef LUA_NUMBER lua_Number;
+
+
+/* type for integer functions */
+typedef LUA_INTEGER lua_Integer;
+
+
+
+/*
+** state manipulation
+*/
+LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud);
+LUA_API void (lua_close) (lua_State *L);
+LUA_API lua_State *(lua_newthread) (lua_State *L);
+
+LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf);
+
+
+/*
+** basic stack manipulation
+*/
+LUA_API int (lua_gettop) (lua_State *L);
+LUA_API void (lua_settop) (lua_State *L, int idx);
+LUA_API void (lua_pushvalue) (lua_State *L, int idx);
+LUA_API void (lua_remove) (lua_State *L, int idx);
+LUA_API void (lua_insert) (lua_State *L, int idx);
+LUA_API void (lua_replace) (lua_State *L, int idx);
+LUA_API int (lua_checkstack) (lua_State *L, int sz);
+
+LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n);
+
+
+/*
+** access functions (stack -> C)
+*/
+
+LUA_API int (lua_isnumber) (lua_State *L, int idx);
+LUA_API int (lua_isstring) (lua_State *L, int idx);
+LUA_API int (lua_iscfunction) (lua_State *L, int idx);
+LUA_API int (lua_isuserdata) (lua_State *L, int idx);
+LUA_API int (lua_type) (lua_State *L, int idx);
+LUA_API const char *(lua_typename) (lua_State *L, int tp);
+
+LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2);
+
+LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx);
+LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx);
+LUA_API int (lua_toboolean) (lua_State *L, int idx);
+LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len);
+LUA_API size_t (lua_objlen) (lua_State *L, int idx);
+LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx);
+LUA_API void *(lua_touserdata) (lua_State *L, int idx);
+LUA_API lua_State *(lua_tothread) (lua_State *L, int idx);
+LUA_API const void *(lua_topointer) (lua_State *L, int idx);
+
+
+/*
+** push functions (C -> stack)
+*/
+LUA_API void (lua_pushnil) (lua_State *L);
+LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n);
+LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n);
+LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l);
+LUA_API void (lua_pushstring) (lua_State *L, const char *s);
+LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt,
+ va_list argp);
+LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...);
+LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n);
+LUA_API void (lua_pushboolean) (lua_State *L, int b);
+LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p);
+LUA_API int (lua_pushthread) (lua_State *L);
+
+
+/*
+** get functions (Lua -> stack)
+*/
+LUA_API void (lua_gettable) (lua_State *L, int idx);
+LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawget) (lua_State *L, int idx);
+LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n);
+LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec);
+LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz);
+LUA_API int (lua_getmetatable) (lua_State *L, int objindex);
+LUA_API void (lua_getfenv) (lua_State *L, int idx);
+
+
+/*
+** set functions (stack -> Lua)
+*/
+LUA_API void (lua_settable) (lua_State *L, int idx);
+LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawset) (lua_State *L, int idx);
+LUA_API void (lua_rawseti) (lua_State *L, int idx, int n);
+LUA_API int (lua_setmetatable) (lua_State *L, int objindex);
+LUA_API int (lua_setfenv) (lua_State *L, int idx);
+
+
+/*
+** `load' and `call' functions (load and run Lua code)
+*/
+LUA_API void (lua_call) (lua_State *L, int nargs, int nresults);
+LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc);
+LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud);
+LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt,
+ const char *chunkname);
+
+LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data);
+
+
+/*
+** coroutine functions
+*/
+LUA_API int (lua_yield) (lua_State *L, int nresults);
+LUA_API int (lua_resume) (lua_State *L, int narg);
+LUA_API int (lua_status) (lua_State *L);
+
+/*
+** garbage-collection function and options
+*/
+
+#define LUA_GCSTOP 0
+#define LUA_GCRESTART 1
+#define LUA_GCCOLLECT 2
+#define LUA_GCCOUNT 3
+#define LUA_GCCOUNTB 4
+#define LUA_GCSTEP 5
+#define LUA_GCSETPAUSE 6
+#define LUA_GCSETSTEPMUL 7
+
+LUA_API int (lua_gc) (lua_State *L, int what, int data);
+
+
+/*
+** miscellaneous functions
+*/
+
+LUA_API int (lua_error) (lua_State *L);
+
+LUA_API int (lua_next) (lua_State *L, int idx);
+
+LUA_API void (lua_concat) (lua_State *L, int n);
+
+LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud);
+LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
+
+
+
+/*
+** ===============================================================
+** some useful macros
+** ===============================================================
+*/
+
+#define lua_pop(L,n) lua_settop(L, -(n)-1)
+
+#define lua_newtable(L) lua_createtable(L, 0, 0)
+
+#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n)))
+
+#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0)
+
+#define lua_strlen(L,i) lua_objlen(L, (i))
+
+#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION)
+#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE)
+#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
+#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL)
+#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN)
+#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD)
+#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE)
+#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0)
+
+#define lua_pushliteral(L, s) \
+ lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1)
+
+#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
+#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
+
+#define lua_tostring(L,i) lua_tolstring(L, (i), NULL)
+
+
+
+/*
+** compatibility macros and functions
+*/
+
+#define lua_open() luaL_newstate()
+
+#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX)
+
+#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0)
+
+#define lua_Chunkreader lua_Reader
+#define lua_Chunkwriter lua_Writer
+
+
+/* hack */
+LUA_API void lua_setlevel (lua_State *from, lua_State *to);
+
+
+/*
+** {======================================================================
+** Debug API
+** =======================================================================
+*/
+
+
+/*
+** Event codes
+*/
+#define LUA_HOOKCALL 0
+#define LUA_HOOKRET 1
+#define LUA_HOOKLINE 2
+#define LUA_HOOKCOUNT 3
+#define LUA_HOOKTAILRET 4
+
+
+/*
+** Event masks
+*/
+#define LUA_MASKCALL (1 << LUA_HOOKCALL)
+#define LUA_MASKRET (1 << LUA_HOOKRET)
+#define LUA_MASKLINE (1 << LUA_HOOKLINE)
+#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT)
+
+typedef struct lua_Debug lua_Debug; /* activation record */
+
+
+/* Functions to be called by the debuger in specific events */
+typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
+
+
+LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar);
+LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
+LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n);
+LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n);
+LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
+LUA_API lua_Hook lua_gethook (lua_State *L);
+LUA_API int lua_gethookmask (lua_State *L);
+LUA_API int lua_gethookcount (lua_State *L);
+
+/* From Lua 5.2. */
+LUA_API void *lua_upvalueid (lua_State *L, int idx, int n);
+LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2);
+LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt,
+ const char *chunkname, const char *mode);
+
+
+struct lua_Debug {
+ int event;
+ const char *name; /* (n) */
+ const char *namewhat; /* (n) `global', `local', `field', `method' */
+ const char *what; /* (S) `Lua', `C', `main', `tail' */
+ const char *source; /* (S) */
+ int currentline; /* (l) */
+ int nups; /* (u) number of upvalues */
+ int linedefined; /* (S) */
+ int lastlinedefined; /* (S) */
+ char short_src[LUA_IDSIZE]; /* (S) */
+ /* private part */
+ int i_ci; /* active function */
+};
+
+/* }====================================================================== */
+
+
+/******************************************************************************
+* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files (the
+* "Software"), to deal in the Software without restriction, including
+* without limitation the rights to use, copy, modify, merge, publish,
+* distribute, sublicense, and/or sell copies of the Software, and to
+* permit persons to whom the Software is furnished to do so, subject to
+* the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+
+#endif
diff --git a/3rdparty/lua/src/lua.hpp b/3rdparty/lua/src/lua.hpp
index 8842ae9..07e9002 100644
--- a/3rdparty/lua/src/lua.hpp
+++ b/3rdparty/lua/src/lua.hpp
@@ -1,9 +1,9 @@
-// C++ wrapper for LuaJIT header files.
-
-extern "C" {
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-#include "luajit.h"
-}
-
+// C++ wrapper for LuaJIT header files.
+
+extern "C" {
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "luajit.h"
+}
+
diff --git a/3rdparty/lua/src/luaconf.h b/3rdparty/lua/src/luaconf.h
index f4d140c..d55caab 100644
--- a/3rdparty/lua/src/luaconf.h
+++ b/3rdparty/lua/src/luaconf.h
@@ -1,156 +1,139 @@
-/*
-** Configuration header.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef luaconf_h
-#define luaconf_h
-
-#ifndef WINVER
-#define WINVER 0x0501
-#endif
-#include <limits.h>
-#include <stddef.h>
-
-/* Default path for loading Lua and C modules with require(). */
-#if defined(_WIN32)
-/*
-** In Windows, any exclamation mark ('!') in the path is replaced by the
-** path of the directory of the executable file of the current process.
-*/
-#define LUA_LDIR "!\\lua\\"
-#define LUA_CDIR "!\\"
-#define LUA_PATH_DEFAULT \
- ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;"
-#define LUA_CPATH_DEFAULT \
- ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
-#else
-/*
-** Note to distribution maintainers: do NOT patch the following lines!
-** Please read ../doc/install.html#distro and pass PREFIX=/usr instead.
-*/
-#ifndef LUA_MULTILIB
-#define LUA_MULTILIB "lib"
-#endif
-#ifndef LUA_LMULTILIB
-#define LUA_LMULTILIB "lib"
-#endif
-#define LUA_LROOT "/usr/local"
-#define LUA_LUADIR "/lua/5.1/"
-#define LUA_LJDIR "/luajit-2.0.4/"
-
-#ifdef LUA_ROOT
-#define LUA_JROOT LUA_ROOT
-#define LUA_RLDIR LUA_ROOT "/share" LUA_LUADIR
-#define LUA_RCDIR LUA_ROOT "/" LUA_MULTILIB LUA_LUADIR
-#define LUA_RLPATH ";" LUA_RLDIR "?.lua;" LUA_RLDIR "?/init.lua"
-#define LUA_RCPATH ";" LUA_RCDIR "?.so"
-#else
-#define LUA_JROOT LUA_LROOT
-#define LUA_RLPATH
-#define LUA_RCPATH
-#endif
-
-#define LUA_JPATH ";" LUA_JROOT "/share" LUA_LJDIR "?.lua"
-#define LUA_LLDIR LUA_LROOT "/share" LUA_LUADIR
-#define LUA_LCDIR LUA_LROOT "/" LUA_LMULTILIB LUA_LUADIR
-#define LUA_LLPATH ";" LUA_LLDIR "?.lua;" LUA_LLDIR "?/init.lua"
-#define LUA_LCPATH1 ";" LUA_LCDIR "?.so"
-#define LUA_LCPATH2 ";" LUA_LCDIR "loadall.so"
-
-#define LUA_PATH_DEFAULT "./?.lua" LUA_JPATH LUA_LLPATH LUA_RLPATH
-#define LUA_CPATH_DEFAULT "./?.so" LUA_LCPATH1 LUA_RCPATH LUA_LCPATH2
-#endif
-
-/* Environment variable names for path overrides and initialization code. */
-#define LUA_PATH "LUA_PATH"
-#define LUA_CPATH "LUA_CPATH"
-#define LUA_INIT "LUA_INIT"
-
-/* Special file system characters. */
-#if defined(_WIN32)
-#define LUA_DIRSEP "\\"
-#else
-#define LUA_DIRSEP "/"
-#endif
-#define LUA_PATHSEP ";"
-#define LUA_PATH_MARK "?"
-#define LUA_EXECDIR "!"
-#define LUA_IGMARK "-"
-#define LUA_PATH_CONFIG \
- LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \
- LUA_EXECDIR "\n" LUA_IGMARK
-
-/* Quoting in error messages. */
-#define LUA_QL(x) "'" x "'"
-#define LUA_QS LUA_QL("%s")
-
-/* Various tunables. */
-#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */
-#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */
-#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */
-#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */
-#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */
-
-/* Compatibility with older library function names. */
-#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */
-#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */
-
-/* Configuration for the frontend (the luajit executable). */
-#if defined(luajit_c)
-#define LUA_PROGNAME "luajit" /* Fallback frontend name. */
-#define LUA_PROMPT "> " /* Interactive prompt. */
-#define LUA_PROMPT2 ">> " /* Continuation prompt. */
-#define LUA_MAXINPUT 512 /* Max. input line length. */
-#endif
-
-/* Note: changing the following defines breaks the Lua 5.1 ABI. */
-#define LUA_INTEGER ptrdiff_t
-#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */
-/*
-** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using
-** unreasonable amounts of stack space, but still retain ABI compatibility.
-** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it.
-*/
-#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ)
-
-/* The following defines are here only for compatibility with luaconf.h
-** from the standard Lua distribution. They must not be changed for LuaJIT.
-*/
-#define LUA_NUMBER_DOUBLE
-#define LUA_NUMBER double
-#define LUAI_UACNUMBER double
-#define LUA_NUMBER_SCAN "%lf"
-#define LUA_NUMBER_FMT "%.14g"
-#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n))
-#define LUAI_MAXNUMBER2STR 32
-#define LUA_INTFRMLEN "l"
-#define LUA_INTFRM_T long
-
-/* Linkage of public API functions. */
-#if defined(LUA_BUILD_AS_DLL)
-#if defined(LUA_CORE) || defined(LUA_LIB)
-#define LUA_API __declspec(dllexport)
-#else
-#define LUA_API __declspec(dllimport)
-#endif
-#else
-#define LUA_API extern
-#endif
-
-#define LUALIB_API LUA_API
-
-/* Support for internal assertions. */
-#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
-#include <assert.h>
-#endif
-#ifdef LUA_USE_ASSERT
-#define lua_assert(x) assert(x)
-#endif
-#ifdef LUA_USE_APICHECK
-#define luai_apicheck(L, o) { (void)L; assert(o); }
-#else
-#define luai_apicheck(L, o) { (void)L; }
-#endif
-
-#endif
+/*
+** Configuration header.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef luaconf_h
+#define luaconf_h
+
+#include <limits.h>
+#include <stddef.h>
+
+/* Default path for loading Lua and C modules with require(). */
+#if defined(_WIN32)
+/*
+** In Windows, any exclamation mark ('!') in the path is replaced by the
+** path of the directory of the executable file of the current process.
+*/
+#define LUA_LDIR "!\\lua\\"
+#define LUA_CDIR "!\\"
+#define LUA_PATH_DEFAULT \
+ ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;"
+#define LUA_CPATH_DEFAULT \
+ ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
+#else
+/*
+** Note to distribution maintainers: do NOT patch the following line!
+** Please read ../doc/install.html#distro and pass PREFIX=/usr instead.
+*/
+#define LUA_ROOT "/usr/local/"
+#define LUA_LDIR LUA_ROOT "share/lua/5.1/"
+#define LUA_CDIR LUA_ROOT "lib/lua/5.1/"
+#ifdef LUA_XROOT
+#define LUA_JDIR LUA_XROOT "share/luajit-2.0.2/"
+#define LUA_XPATH \
+ ";" LUA_XROOT "share/lua/5.1/?.lua;" LUA_XROOT "share/lua/5.1/?/init.lua"
+#define LUA_XCPATH LUA_XROOT "lib/lua/5.1/?.so;"
+#else
+#define LUA_JDIR LUA_ROOT "share/luajit-2.0.2/"
+#define LUA_XPATH
+#define LUA_XCPATH
+#endif
+#define LUA_PATH_DEFAULT \
+ "./?.lua;" LUA_JDIR"?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua" LUA_XPATH
+#define LUA_CPATH_DEFAULT \
+ "./?.so;" LUA_CDIR"?.so;" LUA_XCPATH LUA_CDIR"loadall.so"
+#endif
+
+/* Environment variable names for path overrides and initialization code. */
+#define LUA_PATH "LUA_PATH"
+#define LUA_CPATH "LUA_CPATH"
+#define LUA_INIT "LUA_INIT"
+
+/* Special file system characters. */
+#if defined(_WIN32)
+#define LUA_DIRSEP "\\"
+#else
+#define LUA_DIRSEP "/"
+#endif
+#define LUA_PATHSEP ";"
+#define LUA_PATH_MARK "?"
+#define LUA_EXECDIR "!"
+#define LUA_IGMARK "-"
+#define LUA_PATH_CONFIG \
+ LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \
+ LUA_EXECDIR "\n" LUA_IGMARK
+
+/* Quoting in error messages. */
+#define LUA_QL(x) "'" x "'"
+#define LUA_QS LUA_QL("%s")
+
+/* Various tunables. */
+#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */
+#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */
+#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */
+#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */
+#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */
+
+/* Compatibility with older library function names. */
+#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */
+#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */
+
+/* Configuration for the frontend (the luajit executable). */
+#if defined(luajit_c)
+#define LUA_PROGNAME "luajit" /* Fallback frontend name. */
+#define LUA_PROMPT "> " /* Interactive prompt. */
+#define LUA_PROMPT2 ">> " /* Continuation prompt. */
+#define LUA_MAXINPUT 512 /* Max. input line length. */
+#endif
+
+/* Note: changing the following defines breaks the Lua 5.1 ABI. */
+#define LUA_INTEGER ptrdiff_t
+#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */
+/*
+** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using
+** unreasonable amounts of stack space, but still retain ABI compatibility.
+** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it.
+*/
+#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ)
+
+/* The following defines are here only for compatibility with luaconf.h
+** from the standard Lua distribution. They must not be changed for LuaJIT.
+*/
+#define LUA_NUMBER_DOUBLE
+#define LUA_NUMBER double
+#define LUAI_UACNUMBER double
+#define LUA_NUMBER_SCAN "%lf"
+#define LUA_NUMBER_FMT "%.14g"
+#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n))
+#define LUAI_MAXNUMBER2STR 32
+#define LUA_INTFRMLEN "l"
+#define LUA_INTFRM_T long
+
+/* Linkage of public API functions. */
+#if defined(LUA_BUILD_AS_DLL)
+#if defined(LUA_CORE) || defined(LUA_LIB)
+#define LUA_API __declspec(dllexport)
+#else
+#define LUA_API __declspec(dllimport)
+#endif
+#else
+#define LUA_API extern
+#endif
+
+#define LUALIB_API LUA_API
+
+/* Support for internal assertions. */
+#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
+#include <assert.h>
+#endif
+#ifdef LUA_USE_ASSERT
+#define lua_assert(x) assert(x)
+#endif
+#ifdef LUA_USE_APICHECK
+#define luai_apicheck(L, o) { (void)L; assert(o); }
+#else
+#define luai_apicheck(L, o) { (void)L; }
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/luajit.c b/3rdparty/lua/src/luajit.c
index a6924a5..e0eacc4 100644
--- a/3rdparty/lua/src/luajit.c
+++ b/3rdparty/lua/src/luajit.c
@@ -1,571 +1,571 @@
-/*
-** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-**
-** Major portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define luajit_c
-
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-#include "luajit.h"
-
-#include "lj_arch.h"
-
-#if LJ_TARGET_POSIX
-#include <unistd.h>
-#define lua_stdin_is_tty() isatty(0)
-#elif LJ_TARGET_WINDOWS
-#include <io.h>
-#ifdef __BORLANDC__
-#define lua_stdin_is_tty() isatty(_fileno(stdin))
-#else
-#define lua_stdin_is_tty() _isatty(_fileno(stdin))
-#endif
-#else
-#define lua_stdin_is_tty() 1
-#endif
-
-#if !LJ_TARGET_CONSOLE
-#include <signal.h>
-#endif
-
-static lua_State *globalL = NULL;
-static const char *progname = LUA_PROGNAME;
-
-#if !LJ_TARGET_CONSOLE
-static void lstop(lua_State *L, lua_Debug *ar)
-{
- (void)ar; /* unused arg. */
- lua_sethook(L, NULL, 0, 0);
- /* Avoid luaL_error -- a C hook doesn't add an extra frame. */
- luaL_where(L, 0);
- lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1));
- lua_error(L);
-}
-
-static void laction(int i)
-{
- signal(i, SIG_DFL); /* if another SIGINT happens before lstop,
- terminate process (default action) */
- lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1);
-}
-#endif
-
-static void print_usage(void)
-{
- fprintf(stderr,
- "usage: %s [options]... [script [args]...].\n"
- "Available options are:\n"
- " -e chunk Execute string " LUA_QL("chunk") ".\n"
- " -l name Require library " LUA_QL("name") ".\n"
- " -b ... Save or list bytecode.\n"
- " -j cmd Perform LuaJIT control command.\n"
- " -O[opt] Control LuaJIT optimizations.\n"
- " -i Enter interactive mode after executing " LUA_QL("script") ".\n"
- " -v Show version information.\n"
- " -E Ignore environment variables.\n"
- " -- Stop handling options.\n"
- " - Execute stdin and stop handling options.\n"
- ,
- progname);
- fflush(stderr);
-}
-
-static void l_message(const char *pname, const char *msg)
-{
- if (pname) fprintf(stderr, "%s: ", pname);
- fprintf(stderr, "%s\n", msg);
- fflush(stderr);
-}
-
-static int report(lua_State *L, int status)
-{
- if (status && !lua_isnil(L, -1)) {
- const char *msg = lua_tostring(L, -1);
- if (msg == NULL) msg = "(error object is not a string)";
- l_message(progname, msg);
- lua_pop(L, 1);
- }
- return status;
-}
-
-static int traceback(lua_State *L)
-{
- if (!lua_isstring(L, 1)) { /* Non-string error object? Try metamethod. */
- if (lua_isnoneornil(L, 1) ||
- !luaL_callmeta(L, 1, "__tostring") ||
- !lua_isstring(L, -1))
- return 1; /* Return non-string error object. */
- lua_remove(L, 1); /* Replace object by result of __tostring metamethod. */
- }
- luaL_traceback(L, L, lua_tostring(L, 1), 1);
- return 1;
-}
-
-static int docall(lua_State *L, int narg, int clear)
-{
- int status;
- int base = lua_gettop(L) - narg; /* function index */
- lua_pushcfunction(L, traceback); /* push traceback function */
- lua_insert(L, base); /* put it under chunk and args */
-#if !LJ_TARGET_CONSOLE
- signal(SIGINT, laction);
-#endif
- status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base);
-#if !LJ_TARGET_CONSOLE
- signal(SIGINT, SIG_DFL);
-#endif
- lua_remove(L, base); /* remove traceback function */
- /* force a complete garbage collection in case of errors */
- if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0);
- return status;
-}
-
-static void print_version(void)
-{
- fputs(LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n", stdout);
-}
-
-static void print_jit_status(lua_State *L)
-{
- int n;
- const char *s;
- lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
- lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
- lua_remove(L, -2);
- lua_getfield(L, -1, "status");
- lua_remove(L, -2);
- n = lua_gettop(L);
- lua_call(L, 0, LUA_MULTRET);
- fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stdout);
- for (n++; (s = lua_tostring(L, n)); n++) {
- putc(' ', stdout);
- fputs(s, stdout);
- }
- putc('\n', stdout);
-}
-
-static int getargs(lua_State *L, char **argv, int n)
-{
- int narg;
- int i;
- int argc = 0;
- while (argv[argc]) argc++; /* count total number of arguments */
- narg = argc - (n + 1); /* number of arguments to the script */
- luaL_checkstack(L, narg + 3, "too many arguments to script");
- for (i = n+1; i < argc; i++)
- lua_pushstring(L, argv[i]);
- lua_createtable(L, narg, n + 1);
- for (i = 0; i < argc; i++) {
- lua_pushstring(L, argv[i]);
- lua_rawseti(L, -2, i - n);
- }
- return narg;
-}
-
-static int dofile(lua_State *L, const char *name)
-{
- int status = luaL_loadfile(L, name) || docall(L, 0, 1);
- return report(L, status);
-}
-
-static int dostring(lua_State *L, const char *s, const char *name)
-{
- int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1);
- return report(L, status);
-}
-
-static int dolibrary(lua_State *L, const char *name)
-{
- lua_getglobal(L, "require");
- lua_pushstring(L, name);
- return report(L, docall(L, 1, 1));
-}
-
-static void write_prompt(lua_State *L, int firstline)
-{
- const char *p;
- lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2");
- p = lua_tostring(L, -1);
- if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2;
- fputs(p, stdout);
- fflush(stdout);
- lua_pop(L, 1); /* remove global */
-}
-
-static int incomplete(lua_State *L, int status)
-{
- if (status == LUA_ERRSYNTAX) {
- size_t lmsg;
- const char *msg = lua_tolstring(L, -1, &lmsg);
- const char *tp = msg + lmsg - (sizeof(LUA_QL("<eof>")) - 1);
- if (strstr(msg, LUA_QL("<eof>")) == tp) {
- lua_pop(L, 1);
- return 1;
- }
- }
- return 0; /* else... */
-}
-
-static int pushline(lua_State *L, int firstline)
-{
- char buf[LUA_MAXINPUT];
- write_prompt(L, firstline);
- if (fgets(buf, LUA_MAXINPUT, stdin)) {
- size_t len = strlen(buf);
- if (len > 0 && buf[len-1] == '\n')
- buf[len-1] = '\0';
- if (firstline && buf[0] == '=')
- lua_pushfstring(L, "return %s", buf+1);
- else
- lua_pushstring(L, buf);
- return 1;
- }
- return 0;
-}
-
-static int loadline(lua_State *L)
-{
- int status;
- lua_settop(L, 0);
- if (!pushline(L, 1))
- return -1; /* no input */
- for (;;) { /* repeat until gets a complete line */
- status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin");
- if (!incomplete(L, status)) break; /* cannot try to add lines? */
- if (!pushline(L, 0)) /* no more input? */
- return -1;
- lua_pushliteral(L, "\n"); /* add a new line... */
- lua_insert(L, -2); /* ...between the two lines */
- lua_concat(L, 3); /* join them */
- }
- lua_remove(L, 1); /* remove line */
- return status;
-}
-
-static void dotty(lua_State *L)
-{
- int status;
- const char *oldprogname = progname;
- progname = NULL;
- while ((status = loadline(L)) != -1) {
- if (status == 0) status = docall(L, 0, 0);
- report(L, status);
- if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */
- lua_getglobal(L, "print");
- lua_insert(L, 1);
- if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
- l_message(progname,
- lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)",
- lua_tostring(L, -1)));
- }
- }
- lua_settop(L, 0); /* clear stack */
- fputs("\n", stdout);
- fflush(stdout);
- progname = oldprogname;
-}
-
-static int handle_script(lua_State *L, char **argv, int n)
-{
- int status;
- const char *fname;
- int narg = getargs(L, argv, n); /* collect arguments */
- lua_setglobal(L, "arg");
- fname = argv[n];
- if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0)
- fname = NULL; /* stdin */
- status = luaL_loadfile(L, fname);
- lua_insert(L, -(narg+1));
- if (status == 0)
- status = docall(L, narg, 0);
- else
- lua_pop(L, narg);
- return report(L, status);
-}
-
-/* Load add-on module. */
-static int loadjitmodule(lua_State *L)
-{
- lua_getglobal(L, "require");
- lua_pushliteral(L, "jit.");
- lua_pushvalue(L, -3);
- lua_concat(L, 2);
- if (lua_pcall(L, 1, 1, 0)) {
- const char *msg = lua_tostring(L, -1);
- if (msg && !strncmp(msg, "module ", 7))
- goto nomodule;
- return report(L, 1);
- }
- lua_getfield(L, -1, "start");
- if (lua_isnil(L, -1)) {
- nomodule:
- l_message(progname,
- "unknown luaJIT command or jit.* modules not installed");
- return 1;
- }
- lua_remove(L, -2); /* Drop module table. */
- return 0;
-}
-
-/* Run command with options. */
-static int runcmdopt(lua_State *L, const char *opt)
-{
- int narg = 0;
- if (opt && *opt) {
- for (;;) { /* Split arguments. */
- const char *p = strchr(opt, ',');
- narg++;
- if (!p) break;
- if (p == opt)
- lua_pushnil(L);
- else
- lua_pushlstring(L, opt, (size_t)(p - opt));
- opt = p + 1;
- }
- if (*opt)
- lua_pushstring(L, opt);
- else
- lua_pushnil(L);
- }
- return report(L, lua_pcall(L, narg, 0, 0));
-}
-
-/* JIT engine control command: try jit library first or load add-on module. */
-static int dojitcmd(lua_State *L, const char *cmd)
-{
- const char *opt = strchr(cmd, '=');
- lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd));
- lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
- lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
- lua_remove(L, -2);
- lua_pushvalue(L, -2);
- lua_gettable(L, -2); /* Lookup library function. */
- if (!lua_isfunction(L, -1)) {
- lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */
- if (loadjitmodule(L))
- return 1;
- } else {
- lua_remove(L, -2); /* Drop jit.* table. */
- }
- lua_remove(L, -2); /* Drop module name. */
- return runcmdopt(L, opt ? opt+1 : opt);
-}
-
-/* Optimization flags. */
-static int dojitopt(lua_State *L, const char *opt)
-{
- lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
- lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */
- lua_remove(L, -2);
- lua_getfield(L, -1, "start");
- lua_remove(L, -2);
- return runcmdopt(L, opt);
-}
-
-/* Save or list bytecode. */
-static int dobytecode(lua_State *L, char **argv)
-{
- int narg = 0;
- lua_pushliteral(L, "bcsave");
- if (loadjitmodule(L))
- return 1;
- if (argv[0][2]) {
- narg++;
- argv[0][1] = '-';
- lua_pushstring(L, argv[0]+1);
- }
- for (argv++; *argv != NULL; narg++, argv++)
- lua_pushstring(L, *argv);
- return report(L, lua_pcall(L, narg, 0, 0));
-}
-
-/* check that argument has no extra characters at the end */
-#define notail(x) {if ((x)[2] != '\0') return -1;}
-
-#define FLAGS_INTERACTIVE 1
-#define FLAGS_VERSION 2
-#define FLAGS_EXEC 4
-#define FLAGS_OPTION 8
-#define FLAGS_NOENV 16
-
-static int collectargs(char **argv, int *flags)
-{
- int i;
- for (i = 1; argv[i] != NULL; i++) {
- if (argv[i][0] != '-') /* Not an option? */
- return i;
- switch (argv[i][1]) { /* Check option. */
- case '-':
- notail(argv[i]);
- return (argv[i+1] != NULL ? i+1 : 0);
- case '\0':
- return i;
- case 'i':
- notail(argv[i]);
- *flags |= FLAGS_INTERACTIVE;
- /* fallthrough */
- case 'v':
- notail(argv[i]);
- *flags |= FLAGS_VERSION;
- break;
- case 'e':
- *flags |= FLAGS_EXEC;
- case 'j': /* LuaJIT extension */
- case 'l':
- *flags |= FLAGS_OPTION;
- if (argv[i][2] == '\0') {
- i++;
- if (argv[i] == NULL) return -1;
- }
- break;
- case 'O': break; /* LuaJIT extension */
- case 'b': /* LuaJIT extension */
- if (*flags) return -1;
- *flags |= FLAGS_EXEC;
- return 0;
- case 'E':
- *flags |= FLAGS_NOENV;
- break;
- default: return -1; /* invalid option */
- }
- }
- return 0;
-}
-
-static int runargs(lua_State *L, char **argv, int n)
-{
- int i;
- for (i = 1; i < n; i++) {
- if (argv[i] == NULL) continue;
- lua_assert(argv[i][0] == '-');
- switch (argv[i][1]) { /* option */
- case 'e': {
- const char *chunk = argv[i] + 2;
- if (*chunk == '\0') chunk = argv[++i];
- lua_assert(chunk != NULL);
- if (dostring(L, chunk, "=(command line)") != 0)
- return 1;
- break;
- }
- case 'l': {
- const char *filename = argv[i] + 2;
- if (*filename == '\0') filename = argv[++i];
- lua_assert(filename != NULL);
- if (dolibrary(L, filename))
- return 1; /* stop if file fails */
- break;
- }
- case 'j': { /* LuaJIT extension */
- const char *cmd = argv[i] + 2;
- if (*cmd == '\0') cmd = argv[++i];
- lua_assert(cmd != NULL);
- if (dojitcmd(L, cmd))
- return 1;
- break;
- }
- case 'O': /* LuaJIT extension */
- if (dojitopt(L, argv[i] + 2))
- return 1;
- break;
- case 'b': /* LuaJIT extension */
- return dobytecode(L, argv+i);
- default: break;
- }
- }
- return 0;
-}
-
-static int handle_luainit(lua_State *L)
-{
-#if LJ_TARGET_CONSOLE
- const char *init = NULL;
-#else
- const char *init = getenv(LUA_INIT);
-#endif
- if (init == NULL)
- return 0; /* status OK */
- else if (init[0] == '@')
- return dofile(L, init+1);
- else
- return dostring(L, init, "=" LUA_INIT);
-}
-
-static struct Smain {
- char **argv;
- int argc;
- int status;
-} smain;
-
-static int pmain(lua_State *L)
-{
- struct Smain *s = &smain;
- char **argv = s->argv;
- int script;
- int flags = 0;
- globalL = L;
- if (argv[0] && argv[0][0]) progname = argv[0];
- LUAJIT_VERSION_SYM(); /* linker-enforced version check */
- script = collectargs(argv, &flags);
- if (script < 0) { /* invalid args? */
- print_usage();
- s->status = 1;
- return 0;
- }
- if ((flags & FLAGS_NOENV)) {
- lua_pushboolean(L, 1);
- lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
- }
- lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */
- luaL_openlibs(L); /* open libraries */
- lua_gc(L, LUA_GCRESTART, -1);
- if (!(flags & FLAGS_NOENV)) {
- s->status = handle_luainit(L);
- if (s->status != 0) return 0;
- }
- if ((flags & FLAGS_VERSION)) print_version();
- s->status = runargs(L, argv, (script > 0) ? script : s->argc);
- if (s->status != 0) return 0;
- if (script) {
- s->status = handle_script(L, argv, script);
- if (s->status != 0) return 0;
- }
- if ((flags & FLAGS_INTERACTIVE)) {
- print_jit_status(L);
- dotty(L);
- } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) {
- if (lua_stdin_is_tty()) {
- print_version();
- print_jit_status(L);
- dotty(L);
- } else {
- dofile(L, NULL); /* executes stdin as a file */
- }
- }
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- int status;
- lua_State *L = lua_open(); /* create state */
- if (L == NULL) {
- l_message(argv[0], "cannot create state: not enough memory");
- return EXIT_FAILURE;
- }
- smain.argc = argc;
- smain.argv = argv;
- status = lua_cpcall(L, pmain, NULL);
- report(L, status);
- lua_close(L);
- return (status || smain.status) ? EXIT_FAILURE : EXIT_SUCCESS;
-}
-
+/*
+** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define luajit_c
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "luajit.h"
+
+#include "lj_arch.h"
+
+#if LJ_TARGET_POSIX
+#include <unistd.h>
+#define lua_stdin_is_tty() isatty(0)
+#elif LJ_TARGET_WINDOWS
+#include <io.h>
+#ifdef __BORLANDC__
+#define lua_stdin_is_tty() isatty(_fileno(stdin))
+#else
+#define lua_stdin_is_tty() _isatty(_fileno(stdin))
+#endif
+#else
+#define lua_stdin_is_tty() 1
+#endif
+
+#if !LJ_TARGET_CONSOLE
+#include <signal.h>
+#endif
+
+static lua_State *globalL = NULL;
+static const char *progname = LUA_PROGNAME;
+
+#if !LJ_TARGET_CONSOLE
+static void lstop(lua_State *L, lua_Debug *ar)
+{
+ (void)ar; /* unused arg. */
+ lua_sethook(L, NULL, 0, 0);
+ /* Avoid luaL_error -- a C hook doesn't add an extra frame. */
+ luaL_where(L, 0);
+ lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1));
+ lua_error(L);
+}
+
+static void laction(int i)
+{
+ signal(i, SIG_DFL); /* if another SIGINT happens before lstop,
+ terminate process (default action) */
+ lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1);
+}
+#endif
+
+static void print_usage(void)
+{
+ fprintf(stderr,
+ "usage: %s [options]... [script [args]...].\n"
+ "Available options are:\n"
+ " -e chunk Execute string " LUA_QL("chunk") ".\n"
+ " -l name Require library " LUA_QL("name") ".\n"
+ " -b ... Save or list bytecode.\n"
+ " -j cmd Perform LuaJIT control command.\n"
+ " -O[opt] Control LuaJIT optimizations.\n"
+ " -i Enter interactive mode after executing " LUA_QL("script") ".\n"
+ " -v Show version information.\n"
+ " -E Ignore environment variables.\n"
+ " -- Stop handling options.\n"
+ " - Execute stdin and stop handling options.\n"
+ ,
+ progname);
+ fflush(stderr);
+}
+
+static void l_message(const char *pname, const char *msg)
+{
+ if (pname) fprintf(stderr, "%s: ", pname);
+ fprintf(stderr, "%s\n", msg);
+ fflush(stderr);
+}
+
+static int report(lua_State *L, int status)
+{
+ if (status && !lua_isnil(L, -1)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg == NULL) msg = "(error object is not a string)";
+ l_message(progname, msg);
+ lua_pop(L, 1);
+ }
+ return status;
+}
+
+static int traceback(lua_State *L)
+{
+ if (!lua_isstring(L, 1)) { /* Non-string error object? Try metamethod. */
+ if (lua_isnoneornil(L, 1) ||
+ !luaL_callmeta(L, 1, "__tostring") ||
+ !lua_isstring(L, -1))
+ return 1; /* Return non-string error object. */
+ lua_remove(L, 1); /* Replace object by result of __tostring metamethod. */
+ }
+ luaL_traceback(L, L, lua_tostring(L, 1), 1);
+ return 1;
+}
+
+static int docall(lua_State *L, int narg, int clear)
+{
+ int status;
+ int base = lua_gettop(L) - narg; /* function index */
+ lua_pushcfunction(L, traceback); /* push traceback function */
+ lua_insert(L, base); /* put it under chunk and args */
+#if !LJ_TARGET_CONSOLE
+ signal(SIGINT, laction);
+#endif
+ status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base);
+#if !LJ_TARGET_CONSOLE
+ signal(SIGINT, SIG_DFL);
+#endif
+ lua_remove(L, base); /* remove traceback function */
+ /* force a complete garbage collection in case of errors */
+ if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0);
+ return status;
+}
+
+static void print_version(void)
+{
+ fputs(LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n", stdout);
+}
+
+static void print_jit_status(lua_State *L)
+{
+ int n;
+ const char *s;
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
+ lua_remove(L, -2);
+ lua_getfield(L, -1, "status");
+ lua_remove(L, -2);
+ n = lua_gettop(L);
+ lua_call(L, 0, LUA_MULTRET);
+ fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stdout);
+ for (n++; (s = lua_tostring(L, n)); n++) {
+ putc(' ', stdout);
+ fputs(s, stdout);
+ }
+ putc('\n', stdout);
+}
+
+static int getargs(lua_State *L, char **argv, int n)
+{
+ int narg;
+ int i;
+ int argc = 0;
+ while (argv[argc]) argc++; /* count total number of arguments */
+ narg = argc - (n + 1); /* number of arguments to the script */
+ luaL_checkstack(L, narg + 3, "too many arguments to script");
+ for (i = n+1; i < argc; i++)
+ lua_pushstring(L, argv[i]);
+ lua_createtable(L, narg, n + 1);
+ for (i = 0; i < argc; i++) {
+ lua_pushstring(L, argv[i]);
+ lua_rawseti(L, -2, i - n);
+ }
+ return narg;
+}
+
+static int dofile(lua_State *L, const char *name)
+{
+ int status = luaL_loadfile(L, name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+static int dostring(lua_State *L, const char *s, const char *name)
+{
+ int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+static int dolibrary(lua_State *L, const char *name)
+{
+ lua_getglobal(L, "require");
+ lua_pushstring(L, name);
+ return report(L, docall(L, 1, 1));
+}
+
+static void write_prompt(lua_State *L, int firstline)
+{
+ const char *p;
+ lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2");
+ p = lua_tostring(L, -1);
+ if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2;
+ fputs(p, stdout);
+ fflush(stdout);
+ lua_pop(L, 1); /* remove global */
+}
+
+static int incomplete(lua_State *L, int status)
+{
+ if (status == LUA_ERRSYNTAX) {
+ size_t lmsg;
+ const char *msg = lua_tolstring(L, -1, &lmsg);
+ const char *tp = msg + lmsg - (sizeof(LUA_QL("<eof>")) - 1);
+ if (strstr(msg, LUA_QL("<eof>")) == tp) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ }
+ return 0; /* else... */
+}
+
+static int pushline(lua_State *L, int firstline)
+{
+ char buf[LUA_MAXINPUT];
+ write_prompt(L, firstline);
+ if (fgets(buf, LUA_MAXINPUT, stdin)) {
+ size_t len = strlen(buf);
+ if (len > 0 && buf[len-1] == '\n')
+ buf[len-1] = '\0';
+ if (firstline && buf[0] == '=')
+ lua_pushfstring(L, "return %s", buf+1);
+ else
+ lua_pushstring(L, buf);
+ return 1;
+ }
+ return 0;
+}
+
+static int loadline(lua_State *L)
+{
+ int status;
+ lua_settop(L, 0);
+ if (!pushline(L, 1))
+ return -1; /* no input */
+ for (;;) { /* repeat until gets a complete line */
+ status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin");
+ if (!incomplete(L, status)) break; /* cannot try to add lines? */
+ if (!pushline(L, 0)) /* no more input? */
+ return -1;
+ lua_pushliteral(L, "\n"); /* add a new line... */
+ lua_insert(L, -2); /* ...between the two lines */
+ lua_concat(L, 3); /* join them */
+ }
+ lua_remove(L, 1); /* remove line */
+ return status;
+}
+
+static void dotty(lua_State *L)
+{
+ int status;
+ const char *oldprogname = progname;
+ progname = NULL;
+ while ((status = loadline(L)) != -1) {
+ if (status == 0) status = docall(L, 0, 0);
+ report(L, status);
+ if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */
+ lua_getglobal(L, "print");
+ lua_insert(L, 1);
+ if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
+ l_message(progname,
+ lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)",
+ lua_tostring(L, -1)));
+ }
+ }
+ lua_settop(L, 0); /* clear stack */
+ fputs("\n", stdout);
+ fflush(stdout);
+ progname = oldprogname;
+}
+
+static int handle_script(lua_State *L, char **argv, int n)
+{
+ int status;
+ const char *fname;
+ int narg = getargs(L, argv, n); /* collect arguments */
+ lua_setglobal(L, "arg");
+ fname = argv[n];
+ if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0)
+ fname = NULL; /* stdin */
+ status = luaL_loadfile(L, fname);
+ lua_insert(L, -(narg+1));
+ if (status == 0)
+ status = docall(L, narg, 0);
+ else
+ lua_pop(L, narg);
+ return report(L, status);
+}
+
+/* Load add-on module. */
+static int loadjitmodule(lua_State *L)
+{
+ lua_getglobal(L, "require");
+ lua_pushliteral(L, "jit.");
+ lua_pushvalue(L, -3);
+ lua_concat(L, 2);
+ if (lua_pcall(L, 1, 1, 0)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg && !strncmp(msg, "module ", 7)) {
+ err:
+ l_message(progname,
+ "unknown luaJIT command or jit.* modules not installed");
+ return 1;
+ } else {
+ return report(L, 1);
+ }
+ }
+ lua_getfield(L, -1, "start");
+ if (lua_isnil(L, -1)) goto err;
+ lua_remove(L, -2); /* Drop module table. */
+ return 0;
+}
+
+/* Run command with options. */
+static int runcmdopt(lua_State *L, const char *opt)
+{
+ int narg = 0;
+ if (opt && *opt) {
+ for (;;) { /* Split arguments. */
+ const char *p = strchr(opt, ',');
+ narg++;
+ if (!p) break;
+ if (p == opt)
+ lua_pushnil(L);
+ else
+ lua_pushlstring(L, opt, (size_t)(p - opt));
+ opt = p + 1;
+ }
+ if (*opt)
+ lua_pushstring(L, opt);
+ else
+ lua_pushnil(L);
+ }
+ return report(L, lua_pcall(L, narg, 0, 0));
+}
+
+/* JIT engine control command: try jit library first or load add-on module. */
+static int dojitcmd(lua_State *L, const char *cmd)
+{
+ const char *opt = strchr(cmd, '=');
+ lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd));
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
+ lua_remove(L, -2);
+ lua_pushvalue(L, -2);
+ lua_gettable(L, -2); /* Lookup library function. */
+ if (!lua_isfunction(L, -1)) {
+ lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */
+ if (loadjitmodule(L))
+ return 1;
+ } else {
+ lua_remove(L, -2); /* Drop jit.* table. */
+ }
+ lua_remove(L, -2); /* Drop module name. */
+ return runcmdopt(L, opt ? opt+1 : opt);
+}
+
+/* Optimization flags. */
+static int dojitopt(lua_State *L, const char *opt)
+{
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */
+ lua_remove(L, -2);
+ lua_getfield(L, -1, "start");
+ lua_remove(L, -2);
+ return runcmdopt(L, opt);
+}
+
+/* Save or list bytecode. */
+static int dobytecode(lua_State *L, char **argv)
+{
+ int narg = 0;
+ lua_pushliteral(L, "bcsave");
+ if (loadjitmodule(L))
+ return 1;
+ if (argv[0][2]) {
+ narg++;
+ argv[0][1] = '-';
+ lua_pushstring(L, argv[0]+1);
+ }
+ for (argv++; *argv != NULL; narg++, argv++)
+ lua_pushstring(L, *argv);
+ return report(L, lua_pcall(L, narg, 0, 0));
+}
+
+/* check that argument has no extra characters at the end */
+#define notail(x) {if ((x)[2] != '\0') return -1;}
+
+#define FLAGS_INTERACTIVE 1
+#define FLAGS_VERSION 2
+#define FLAGS_EXEC 4
+#define FLAGS_OPTION 8
+#define FLAGS_NOENV 16
+
+static int collectargs(char **argv, int *flags)
+{
+ int i;
+ for (i = 1; argv[i] != NULL; i++) {
+ if (argv[i][0] != '-') /* Not an option? */
+ return i;
+ switch (argv[i][1]) { /* Check option. */
+ case '-':
+ notail(argv[i]);
+ return (argv[i+1] != NULL ? i+1 : 0);
+ case '\0':
+ return i;
+ case 'i':
+ notail(argv[i]);
+ *flags |= FLAGS_INTERACTIVE;
+ /* fallthrough */
+ case 'v':
+ notail(argv[i]);
+ *flags |= FLAGS_VERSION;
+ break;
+ case 'e':
+ *flags |= FLAGS_EXEC;
+ case 'j': /* LuaJIT extension */
+ case 'l':
+ *flags |= FLAGS_OPTION;
+ if (argv[i][2] == '\0') {
+ i++;
+ if (argv[i] == NULL) return -1;
+ }
+ break;
+ case 'O': break; /* LuaJIT extension */
+ case 'b': /* LuaJIT extension */
+ if (*flags) return -1;
+ *flags |= FLAGS_EXEC;
+ return 0;
+ case 'E':
+ *flags |= FLAGS_NOENV;
+ break;
+ default: return -1; /* invalid option */
+ }
+ }
+ return 0;
+}
+
+static int runargs(lua_State *L, char **argv, int n)
+{
+ int i;
+ for (i = 1; i < n; i++) {
+ if (argv[i] == NULL) continue;
+ lua_assert(argv[i][0] == '-');
+ switch (argv[i][1]) { /* option */
+ case 'e': {
+ const char *chunk = argv[i] + 2;
+ if (*chunk == '\0') chunk = argv[++i];
+ lua_assert(chunk != NULL);
+ if (dostring(L, chunk, "=(command line)") != 0)
+ return 1;
+ break;
+ }
+ case 'l': {
+ const char *filename = argv[i] + 2;
+ if (*filename == '\0') filename = argv[++i];
+ lua_assert(filename != NULL);
+ if (dolibrary(L, filename))
+ return 1; /* stop if file fails */
+ break;
+ }
+ case 'j': { /* LuaJIT extension */
+ const char *cmd = argv[i] + 2;
+ if (*cmd == '\0') cmd = argv[++i];
+ lua_assert(cmd != NULL);
+ if (dojitcmd(L, cmd))
+ return 1;
+ break;
+ }
+ case 'O': /* LuaJIT extension */
+ if (dojitopt(L, argv[i] + 2))
+ return 1;
+ break;
+ case 'b': /* LuaJIT extension */
+ return dobytecode(L, argv+i);
+ default: break;
+ }
+ }
+ return 0;
+}
+
+static int handle_luainit(lua_State *L)
+{
+#if LJ_TARGET_CONSOLE
+ const char *init = NULL;
+#else
+ const char *init = getenv(LUA_INIT);
+#endif
+ if (init == NULL)
+ return 0; /* status OK */
+ else if (init[0] == '@')
+ return dofile(L, init+1);
+ else
+ return dostring(L, init, "=" LUA_INIT);
+}
+
+static struct Smain {
+ char **argv;
+ int argc;
+ int status;
+} smain;
+
+static int pmain(lua_State *L)
+{
+ struct Smain *s = &smain;
+ char **argv = s->argv;
+ int script;
+ int flags = 0;
+ globalL = L;
+ if (argv[0] && argv[0][0]) progname = argv[0];
+ LUAJIT_VERSION_SYM(); /* linker-enforced version check */
+ script = collectargs(argv, &flags);
+ if (script < 0) { /* invalid args? */
+ print_usage();
+ s->status = 1;
+ return 0;
+ }
+ if ((flags & FLAGS_NOENV)) {
+ lua_pushboolean(L, 1);
+ lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
+ }
+ lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */
+ luaL_openlibs(L); /* open libraries */
+ lua_gc(L, LUA_GCRESTART, -1);
+ if (!(flags & FLAGS_NOENV)) {
+ s->status = handle_luainit(L);
+ if (s->status != 0) return 0;
+ }
+ if ((flags & FLAGS_VERSION)) print_version();
+ s->status = runargs(L, argv, (script > 0) ? script : s->argc);
+ if (s->status != 0) return 0;
+ if (script) {
+ s->status = handle_script(L, argv, script);
+ if (s->status != 0) return 0;
+ }
+ if ((flags & FLAGS_INTERACTIVE)) {
+ print_jit_status(L);
+ dotty(L);
+ } else if (script == 0 && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) {
+ if (lua_stdin_is_tty()) {
+ print_version();
+ print_jit_status(L);
+ dotty(L);
+ } else {
+ dofile(L, NULL); /* executes stdin as a file */
+ }
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int status;
+ lua_State *L = lua_open(); /* create state */
+ if (L == NULL) {
+ l_message(argv[0], "cannot create state: not enough memory");
+ return EXIT_FAILURE;
+ }
+ smain.argc = argc;
+ smain.argv = argv;
+ status = lua_cpcall(L, pmain, NULL);
+ report(L, status);
+ lua_close(L);
+ return (status || smain.status) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/3rdparty/lua/src/luajit.h b/3rdparty/lua/src/luajit.h
index 215fbfb..f33b64c 100644
--- a/3rdparty/lua/src/luajit.h
+++ b/3rdparty/lua/src/luajit.h
@@ -1,70 +1,70 @@
-/*
-** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
-**
-** Copyright (C) 2005-2015 Mike Pall. All rights reserved.
-**
-** Permission is hereby granted, free of charge, to any person obtaining
-** a copy of this software and associated documentation files (the
-** "Software"), to deal in the Software without restriction, including
-** without limitation the rights to use, copy, modify, merge, publish,
-** distribute, sublicense, and/or sell copies of the Software, and to
-** permit persons to whom the Software is furnished to do so, subject to
-** the following conditions:
-**
-** The above copyright notice and this permission notice shall be
-** included in all copies or substantial portions of the Software.
-**
-** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-**
-** [ MIT license: http://www.opensource.org/licenses/mit-license.php ]
-*/
-
-#ifndef _LUAJIT_H
-#define _LUAJIT_H
-
-#include "lua.h"
-
-#define LUAJIT_VERSION "LuaJIT 2.0.4"
-#define LUAJIT_VERSION_NUM 20004 /* Version 2.0.4 = 02.00.04. */
-#define LUAJIT_VERSION_SYM luaJIT_version_2_0_4
-#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2015 Mike Pall"
-#define LUAJIT_URL "http://luajit.org/"
-
-/* Modes for luaJIT_setmode. */
-#define LUAJIT_MODE_MASK 0x00ff
-
-enum {
- LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */
- LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */
-
- LUAJIT_MODE_FUNC, /* Change mode for a function. */
- LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */
- LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */
-
- LUAJIT_MODE_TRACE, /* Flush a compiled trace. */
-
- LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */
-
- LUAJIT_MODE_MAX
-};
-
-/* Flags or'ed in to the mode. */
-#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */
-#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */
-#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */
-
-/* LuaJIT public C API. */
-
-/* Control the JIT engine. */
-LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
-
-/* Enforce (dynamic) linker error for version mismatches. Call from main. */
-LUA_API void LUAJIT_VERSION_SYM(void);
-
-#endif
+/*
+** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
+**
+** Copyright (C) 2005-2013 Mike Pall. All rights reserved.
+**
+** Permission is hereby granted, free of charge, to any person obtaining
+** a copy of this software and associated documentation files (the
+** "Software"), to deal in the Software without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Software, and to
+** permit persons to whom the Software is furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be
+** included in all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**
+** [ MIT license: http://www.opensource.org/licenses/mit-license.php ]
+*/
+
+#ifndef _LUAJIT_H
+#define _LUAJIT_H
+
+#include "lua.h"
+
+#define LUAJIT_VERSION "LuaJIT 2.0.2"
+#define LUAJIT_VERSION_NUM 20002 /* Version 2.0.2 = 02.00.02. */
+#define LUAJIT_VERSION_SYM luaJIT_version_2_0_2
+#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2013 Mike Pall"
+#define LUAJIT_URL "http://luajit.org/"
+
+/* Modes for luaJIT_setmode. */
+#define LUAJIT_MODE_MASK 0x00ff
+
+enum {
+ LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */
+ LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */
+
+ LUAJIT_MODE_FUNC, /* Change mode for a function. */
+ LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */
+ LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */
+
+ LUAJIT_MODE_TRACE, /* Flush a compiled trace. */
+
+ LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */
+
+ LUAJIT_MODE_MAX
+};
+
+/* Flags or'ed in to the mode. */
+#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */
+#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */
+#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */
+
+/* LuaJIT public C API. */
+
+/* Control the JIT engine. */
+LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
+
+/* Enforce (dynamic) linker error for version mismatches. Call from main. */
+LUA_API void LUAJIT_VERSION_SYM(void);
+
+#endif
diff --git a/3rdparty/lua/src/lualib.h b/3rdparty/lua/src/lualib.h
index e380911..18c6234 100644
--- a/3rdparty/lua/src/lualib.h
+++ b/3rdparty/lua/src/lualib.h
@@ -1,43 +1,43 @@
-/*
-** Standard library header.
-** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LUALIB_H
-#define _LUALIB_H
-
-#include "lua.h"
-
-#define LUA_FILEHANDLE "FILE*"
-
-#define LUA_COLIBNAME "coroutine"
-#define LUA_MATHLIBNAME "math"
-#define LUA_STRLIBNAME "string"
-#define LUA_TABLIBNAME "table"
-#define LUA_IOLIBNAME "io"
-#define LUA_OSLIBNAME "os"
-#define LUA_LOADLIBNAME "package"
-#define LUA_DBLIBNAME "debug"
-#define LUA_BITLIBNAME "bit"
-#define LUA_JITLIBNAME "jit"
-#define LUA_FFILIBNAME "ffi"
-
-LUALIB_API int luaopen_base(lua_State *L);
-LUALIB_API int luaopen_math(lua_State *L);
-LUALIB_API int luaopen_string(lua_State *L);
-LUALIB_API int luaopen_table(lua_State *L);
-LUALIB_API int luaopen_io(lua_State *L);
-LUALIB_API int luaopen_os(lua_State *L);
-LUALIB_API int luaopen_package(lua_State *L);
-LUALIB_API int luaopen_debug(lua_State *L);
-LUALIB_API int luaopen_bit(lua_State *L);
-LUALIB_API int luaopen_jit(lua_State *L);
-LUALIB_API int luaopen_ffi(lua_State *L);
-
-LUALIB_API void luaL_openlibs(lua_State *L);
-
-#ifndef lua_assert
-#define lua_assert(x) ((void)0)
-#endif
-
-#endif
+/*
+** Standard library header.
+** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LUALIB_H
+#define _LUALIB_H
+
+#include "lua.h"
+
+#define LUA_FILEHANDLE "FILE*"
+
+#define LUA_COLIBNAME "coroutine"
+#define LUA_MATHLIBNAME "math"
+#define LUA_STRLIBNAME "string"
+#define LUA_TABLIBNAME "table"
+#define LUA_IOLIBNAME "io"
+#define LUA_OSLIBNAME "os"
+#define LUA_LOADLIBNAME "package"
+#define LUA_DBLIBNAME "debug"
+#define LUA_BITLIBNAME "bit"
+#define LUA_JITLIBNAME "jit"
+#define LUA_FFILIBNAME "ffi"
+
+LUALIB_API int luaopen_base(lua_State *L);
+LUALIB_API int luaopen_math(lua_State *L);
+LUALIB_API int luaopen_string(lua_State *L);
+LUALIB_API int luaopen_table(lua_State *L);
+LUALIB_API int luaopen_io(lua_State *L);
+LUALIB_API int luaopen_os(lua_State *L);
+LUALIB_API int luaopen_package(lua_State *L);
+LUALIB_API int luaopen_debug(lua_State *L);
+LUALIB_API int luaopen_bit(lua_State *L);
+LUALIB_API int luaopen_jit(lua_State *L);
+LUALIB_API int luaopen_ffi(lua_State *L);
+
+LUALIB_API void luaL_openlibs(lua_State *L);
+
+#ifndef lua_assert
+#define lua_assert(x) ((void)0)
+#endif
+
+#endif
diff --git a/3rdparty/lua/src/msvcbuild.bat b/3rdparty/lua/src/msvcbuild.bat
index 4b50185..9160e0f 100644
--- a/3rdparty/lua/src/msvcbuild.bat
+++ b/3rdparty/lua/src/msvcbuild.bat
@@ -1,5 +1,5 @@
@rem Script to build LuaJIT with MSVC.
-@rem Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+@rem Copyright (C) 2005-2014 Mike Pall. See Copyright Notice in luajit.h
@rem
@rem Either open a "Visual Studio .NET Command Prompt"
@rem (Note that the Express Edition does not contain an x64 compiler)
diff --git a/3rdparty/lua/src/vm_arm.dasc b/3rdparty/lua/src/vm_arm.dasc
index a9120c5..114416a 100644
--- a/3rdparty/lua/src/vm_arm.dasc
+++ b/3rdparty/lua/src/vm_arm.dasc
@@ -1,4486 +1,4487 @@
-|// Low-level VM code for ARM CPUs.
-|// Bytecode interpreter, fast functions and helper functions.
-|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-|
-|.arch arm
-|.section code_op, code_sub
-|
-|.actionlist build_actionlist
-|.globals GLOB_
-|.globalnames globnames
-|.externnames extnames
-|
-|// Note: The ragged indentation of the instructions is intentional.
-|// The starting columns indicate data dependencies.
-|
-|//-----------------------------------------------------------------------
-|
-|// Fixed register assignments for the interpreter.
-|
-|// The following must be C callee-save.
-|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding.
-|.define KBASE, r5 // Constants of current Lua function.
-|.define PC, r6 // Next PC.
-|.define DISPATCH, r7 // Opcode dispatch table.
-|.define LREG, r8 // Register holding lua_State (also in SAVE_L).
-|
-|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+.
-|.define BASE, r9 // Base of current Lua stack frame.
-|
-|// The following temporaries are not saved across C calls, except for RA/RC.
-|.define RA, r10 // Callee-save.
-|.define RC, r11 // Callee-save.
-|.define RB, r12
-|.define OP, r12 // Overlaps RB, must not be lr.
-|.define INS, lr
-|
-|// Calling conventions. Also used as temporaries.
-|.define CARG1, r0
-|.define CARG2, r1
-|.define CARG3, r2
-|.define CARG4, r3
-|.define CARG12, r0 // For 1st soft-fp double.
-|.define CARG34, r2 // For 2nd soft-fp double.
-|
-|.define CRET1, r0
-|.define CRET2, r1
-|
-|// Stack layout while in interpreter. Must match with lj_frame.h.
-|.define SAVE_R4, [sp, #28]
-|.define CFRAME_SPACE, #28
-|.define SAVE_ERRF, [sp, #24]
-|.define SAVE_NRES, [sp, #20]
-|.define SAVE_CFRAME, [sp, #16]
-|.define SAVE_L, [sp, #12]
-|.define SAVE_PC, [sp, #8]
-|.define SAVE_MULTRES, [sp, #4]
-|.define ARG5, [sp]
-|
-|.define TMPDhi, [sp, #4]
-|.define TMPDlo, [sp]
-|.define TMPD, [sp]
-|.define TMPDp, sp
-|
-|.if FPU
-|.macro saveregs
-| push {r5, r6, r7, r8, r9, r10, r11, lr}
-| vpush {d8-d15}
-| sub sp, sp, CFRAME_SPACE+4
-| str r4, SAVE_R4
-|.endmacro
-|.macro restoreregs_ret
-| ldr r4, SAVE_R4
-| add sp, sp, CFRAME_SPACE+4
-| vpop {d8-d15}
-| pop {r5, r6, r7, r8, r9, r10, r11, pc}
-|.endmacro
-|.else
-|.macro saveregs
-| push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-| sub sp, sp, CFRAME_SPACE
-|.endmacro
-|.macro restoreregs_ret
-| add sp, sp, CFRAME_SPACE
-| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-|.endmacro
-|.endif
-|
-|// Type definitions. Some of these are only used for documentation.
-|.type L, lua_State, LREG
-|.type GL, global_State
-|.type TVALUE, TValue
-|.type GCOBJ, GCobj
-|.type STR, GCstr
-|.type TAB, GCtab
-|.type LFUNC, GCfuncL
-|.type CFUNC, GCfuncC
-|.type PROTO, GCproto
-|.type UPVAL, GCupval
-|.type NODE, Node
-|.type NARGS8, int
-|.type TRACE, GCtrace
-|
-|//-----------------------------------------------------------------------
-|
-|// Trap for not-yet-implemented parts.
-|.macro NYI; ud; .endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Access to frame relative to BASE.
-|.define FRAME_FUNC, #-8
-|.define FRAME_PC, #-4
-|
-|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro
-|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro
-|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro
-|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro
-|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro
-|
-|// Instruction fetch.
-|.macro ins_NEXT1
-| ldrb OP, [PC]
-|.endmacro
-|.macro ins_NEXT2
-| ldr INS, [PC], #4
-|.endmacro
-|// Instruction decode+dispatch.
-|.macro ins_NEXT3
-| ldr OP, [DISPATCH, OP, lsl #2]
-| decode_RA8 RA, INS
-| decode_RD RC, INS
-| bx OP
-|.endmacro
-|.macro ins_NEXT
-| ins_NEXT1
-| ins_NEXT2
-| ins_NEXT3
-|.endmacro
-|
-|// Instruction footer.
-|.if 1
-| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
-| .define ins_next, ins_NEXT
-| .define ins_next_, ins_NEXT
-| .define ins_next1, ins_NEXT1
-| .define ins_next2, ins_NEXT2
-| .define ins_next3, ins_NEXT3
-|.else
-| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
-| // Affects only certain kinds of benchmarks (and only with -j off).
-| .macro ins_next
-| b ->ins_next
-| .endmacro
-| .macro ins_next1
-| .endmacro
-| .macro ins_next2
-| .endmacro
-| .macro ins_next3
-| b ->ins_next
-| .endmacro
-| .macro ins_next_
-| ->ins_next:
-| ins_NEXT
-| .endmacro
-|.endif
-|
-|// Avoid register name substitution for field name.
-#define field_pc pc
-|
-|// Call decode and dispatch.
-|.macro ins_callt
-| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
-| ldr PC, LFUNC:CARG3->field_pc
-| ldrb OP, [PC] // STALL: load PC. early PC.
-| ldr INS, [PC], #4
-| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP.
-| decode_RA8 RA, INS
-| add RA, RA, BASE
-| bx OP
-|.endmacro
-|
-|.macro ins_call
-| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
-| str PC, [BASE, FRAME_PC]
-| ins_callt // STALL: locked PC.
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Macros to test operand types.
-|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro
-|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro
-|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro
-|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro
-|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro
-|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro
-|
-|// Assumes DISPATCH is relative to GL.
-#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
-#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
-|
-#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
-|
-|.macro hotcheck, delta
-| lsr CARG1, PC, #1
-| and CARG1, CARG1, #126
-| sub CARG1, CARG1, #-GG_DISP2HOT
-| ldrh CARG2, [DISPATCH, CARG1]
-| subs CARG2, CARG2, #delta
-| strh CARG2, [DISPATCH, CARG1]
-|.endmacro
-|
-|.macro hotloop
-| hotcheck HOTCOUNT_LOOP
-| blo ->vm_hotloop
-|.endmacro
-|
-|.macro hotcall
-| hotcheck HOTCOUNT_CALL
-| blo ->vm_hotcall
-|.endmacro
-|
-|// Set current VM state.
-|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro
-|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro
-|
-|// Move table write barrier back. Overwrites mark and tmp.
-|.macro barrierback, tab, mark, tmp
-| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
-| bic mark, mark, #LJ_GC_BLACK // black2gray(tab)
-| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
-| strb mark, tab->marked
-| str tmp, tab->gclist
-|.endmacro
-|
-|.macro .IOS, a, b
-|.if IOS
-| a, b
-|.endif
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-
-#if !LJ_DUALNUM
-#error "Only dual-number mode supported for ARM target"
-#endif
-
-/* Generate subroutines used by opcodes and other parts of the VM. */
-/* The .code_sub section should be last to help static branch prediction. */
-static void build_subroutines(BuildCtx *ctx)
-{
- |.code_sub
- |
- |//-----------------------------------------------------------------------
- |//-- Return handling ----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_returnp:
- | // See vm_return. Also: RB = previous base.
- | tst PC, #FRAME_P
- | beq ->cont_dispatch
- |
- | // Return from pcall or xpcall fast func.
- | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
- | mvn CARG2, #~LJ_TTRUE
- | mov BASE, RB
- | // Prepending may overwrite the pcall frame, so do it at the end.
- | str CARG2, [RA, FRAME_PC] // Prepend true to results.
- | sub RA, RA, #8
- |
- |->vm_returnc:
- | adds RC, RC, #8 // RC = (nresults+1)*8.
- | mov CRET1, #LUA_YIELD
- | beq ->vm_unwind_c_eh
- | str RC, SAVE_MULTRES
- | ands CARG1, PC, #FRAME_TYPE
- | beq ->BC_RET_Z // Handle regular return to Lua.
- |
- |->vm_return:
- | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
- | // CARG1 = PC & FRAME_TYPE
- | bic RB, PC, #FRAME_TYPEP
- | cmp CARG1, #FRAME_C
- | sub RB, BASE, RB // RB = previous base.
- | bne ->vm_returnp
- |
- | str RB, L->base
- | ldr KBASE, SAVE_NRES
- | mv_vmstate CARG4, C
- | sub BASE, BASE, #8
- | subs CARG3, RC, #8
- | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8
- | st_vmstate CARG4
- | beq >2
- |1:
- | subs CARG3, CARG3, #8
- | ldrd CARG12, [RA], #8
- | strd CARG12, [BASE], #8
- | bne <1
- |2:
- | cmp KBASE, RC // More/less results wanted?
- | bne >6
- |3:
- | str BASE, L->top // Store new top.
- |
- |->vm_leave_cp:
- | ldr RC, SAVE_CFRAME // Restore previous C frame.
- | mov CRET1, #0 // Ok return status for vm_pcall.
- | str RC, L->cframe
- |
- |->vm_leave_unw:
- | restoreregs_ret
- |
- |6:
- | blt >7 // Less results wanted?
- | // More results wanted. Check stack size and fill up results with nil.
- | ldr CARG3, L->maxstack
- | mvn CARG2, #~LJ_TNIL
- | cmp BASE, CARG3
- | bhs >8
- | str CARG2, [BASE, #4]
- | add RC, RC, #8
- | add BASE, BASE, #8
- | b <2
- |
- |7: // Less results wanted.
- | sub CARG1, RC, KBASE
- | cmp KBASE, #0 // LUA_MULTRET+1 case?
- | subne BASE, BASE, CARG1 // Either keep top or shrink it.
- | b <3
- |
- |8: // Corner case: need to grow stack for filling up results.
- | // This can happen if:
- | // - A C function grows the stack (a lot).
- | // - The GC shrinks the stack in between.
- | // - A return back from a lua_call() with (high) nresults adjustment.
- | str BASE, L->top // Save current top held in BASE (yes).
- | lsr CARG2, KBASE, #3
- | mov CARG1, L
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | ldr BASE, L->top // Need the (realloced) L->top in BASE.
- | b <2
- |
- |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
- | // (void *cframe, int errcode)
- | mov sp, CARG1
- | mov CRET1, CARG2
- |->vm_unwind_c_eh: // Landing pad for external unwinder.
- | ldr L, SAVE_L
- | mv_vmstate CARG4, C
- | ldr GL:CARG3, L->glref
- | str CARG4, GL:CARG3->vmstate
- | b ->vm_leave_unw
- |
- |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
- | // (void *cframe)
- | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
- | mov sp, CARG1
- |->vm_unwind_ff_eh: // Landing pad for external unwinder.
- | ldr L, SAVE_L
- | mov MASKR8, #255
- | mov RC, #16 // 2 results: false + error message.
- | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
- | ldr BASE, L->base
- | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
- | mvn CARG1, #~LJ_TFALSE
- | sub RA, BASE, #8 // Results start at BASE-8.
- | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
- | add DISPATCH, DISPATCH, #GG_G2DISP
- | mv_vmstate CARG2, INTERP
- | str CARG1, [BASE, #-4] // Prepend false to error message.
- | st_vmstate CARG2
- | b ->vm_returnc
- |
- |//-----------------------------------------------------------------------
- |//-- Grow stack for calls -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_growstack_c: // Grow stack for C function.
- | // CARG1 = L
- | mov CARG2, #LUA_MINSTACK
- | b >2
- |
- |->vm_growstack_l: // Grow stack for Lua function.
- | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
- | add RC, BASE, RC
- | sub RA, RA, BASE
- | mov CARG1, L
- | str BASE, L->base
- | add PC, PC, #4 // Must point after first instruction.
- | str RC, L->top
- | lsr CARG2, RA, #3
- |2:
- | // L->base = new base, L->top = top
- | str PC, SAVE_PC
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | ldr BASE, L->base
- | ldr RC, L->top
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
- | sub NARGS8:RC, RC, BASE
- | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
- | ins_callt // Just retry the call.
- |
- |//-----------------------------------------------------------------------
- |//-- Entry points into the assembler VM ---------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_resume: // Setup C frame and resume thread.
- | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
- | saveregs
- | mov L, CARG1
- | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table.
- | mov BASE, CARG2
- | add DISPATCH, DISPATCH, #GG_G2DISP
- | str L, SAVE_L
- | mov PC, #FRAME_CP
- | str CARG3, SAVE_NRES
- | add CARG2, sp, #CFRAME_RESUME
- | ldrb CARG1, L->status
- | str CARG3, SAVE_ERRF
- | str CARG2, L->cframe
- | str CARG3, SAVE_CFRAME
- | cmp CARG1, #0
- | str L, SAVE_PC // Any value outside of bytecode is ok.
- | beq >3
- |
- | // Resume after yield (like a return).
- | mov RA, BASE
- | ldr BASE, L->base
- | ldr CARG1, L->top
- | mov MASKR8, #255
- | strb CARG3, L->status
- | sub RC, CARG1, BASE
- | ldr PC, [BASE, FRAME_PC]
- | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
- | mv_vmstate CARG2, INTERP
- | add RC, RC, #8
- | ands CARG1, PC, #FRAME_TYPE
- | st_vmstate CARG2
- | str RC, SAVE_MULTRES
- | beq ->BC_RET_Z
- | b ->vm_return
- |
- |->vm_pcall: // Setup protected C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
- | saveregs
- | mov PC, #FRAME_CP
- | str CARG4, SAVE_ERRF
- | b >1
- |
- |->vm_call: // Setup C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1)
- | saveregs
- | mov PC, #FRAME_C
- |
- |1: // Entry point for vm_pcall above (PC = ftype).
- | ldr RC, L:CARG1->cframe
- | str CARG3, SAVE_NRES
- | mov L, CARG1
- | str CARG1, SAVE_L
- | mov BASE, CARG2
- | str sp, L->cframe // Add our C frame to cframe chain.
- | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
- | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | str RC, SAVE_CFRAME
- | add DISPATCH, DISPATCH, #GG_G2DISP
- |
- |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
- | ldr RB, L->base // RB = old base (for vmeta_call).
- | ldr CARG1, L->top
- | mov MASKR8, #255
- | add PC, PC, BASE
- | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
- | sub PC, PC, RB // PC = frame delta + frame type
- | mv_vmstate CARG2, INTERP
- | sub NARGS8:RC, CARG1, BASE
- | st_vmstate CARG2
- |
- |->vm_call_dispatch:
- | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
- | ldrd CARG34, [BASE, FRAME_FUNC]
- | checkfunc CARG4, ->vmeta_call
- |
- |->vm_call_dispatch_f:
- | ins_call
- | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
- |
- |->vm_cpcall: // Setup protected C frame, call C.
- | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
- | saveregs
- | mov L, CARG1
- | ldr RA, L:CARG1->stack
- | str CARG1, SAVE_L
- | ldr RB, L->top
- | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | ldr RC, L->cframe
- | sub RA, RA, RB // Compute -savestack(L, L->top).
- | str sp, L->cframe // Add our C frame to cframe chain.
- | mov RB, #0
- | str RA, SAVE_NRES // Neg. delta means cframe w/o frame.
- | str RB, SAVE_ERRF // No error function.
- | str RC, SAVE_CFRAME
- | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud)
- | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
- | movs BASE, CRET1
- | mov PC, #FRAME_CP
- | add DISPATCH, DISPATCH, #GG_G2DISP
- | bne <3 // Else continue with the call.
- | b ->vm_leave_cp // No base? Just remove C frame.
- |
- |//-----------------------------------------------------------------------
- |//-- Metamethod handling ------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |//-- Continuation dispatch ----------------------------------------------
- |
- |->cont_dispatch:
- | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
- | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
- | ldr CARG1, [BASE, #-16] // Get continuation.
- | mov CARG4, BASE
- | mov BASE, RB // Restore caller BASE.
- |.if FFI
- | cmp CARG1, #1
- |.endif
- | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC].
- | ldr CARG3, LFUNC:CARG3->field_pc
- | mvn INS, #~LJ_TNIL
- | add CARG2, RA, RC
- | str INS, [CARG2, #-4] // Ensure one valid arg.
- |.if FFI
- | bls >1
- |.endif
- | ldr KBASE, [CARG3, #PC2PROTO(k)]
- | // BASE = base, RA = resultptr, CARG4 = meta base
- | bx CARG1
- |
- |.if FFI
- |1:
- | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
- | // cont = 0: tailcall from C function.
- | sub CARG4, CARG4, #16
- | sub RC, CARG4, BASE
- | b ->vm_call_tail
- |.endif
- |
- |->cont_cat: // RA = resultptr, CARG4 = meta base
- | ldr INS, [PC, #-4]
- | sub CARG2, CARG4, #16
- | ldrd CARG34, [RA]
- | str BASE, L->base
- | decode_RB8 RC, INS
- | decode_RA8 RA, INS
- | add CARG1, BASE, RC
- | subs CARG1, CARG2, CARG1
- | strdne CARG34, [CARG2]
- | movne CARG3, CARG1
- | bne ->BC_CAT_Z
- | strd CARG34, [BASE, RA]
- | b ->cont_nop
- |
- |//-- Table indexing metamethods -----------------------------------------
- |
- |->vmeta_tgets1:
- | add CARG2, BASE, RB
- | b >2
- |
- |->vmeta_tgets:
- | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
- | mvn CARG4, #~LJ_TTAB
- | str TAB:RB, [CARG2]
- | str CARG4, [CARG2, #4]
- |2:
- | mvn CARG4, #~LJ_TSTR
- | str STR:RC, TMPDlo
- | str CARG4, TMPDhi
- | mov CARG3, TMPDp
- | b >1
- |
- |->vmeta_tgetb: // RC = index
- | decode_RB8 RB, INS
- | str RC, TMPDlo
- | mvn CARG4, #~LJ_TISNUM
- | add CARG2, BASE, RB
- | str CARG4, TMPDhi
- | mov CARG3, TMPDp
- | b >1
- |
- |->vmeta_tgetv:
- | add CARG2, BASE, RB
- | add CARG3, BASE, RC
- |1:
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
- | // Returns TValue * (finished) or NULL (metamethod).
- | .IOS ldr BASE, L->base
- | cmp CRET1, #0
- | beq >3
- | ldrd CARG34, [CRET1]
- | ins_next1
- | ins_next2
- | strd CARG34, [BASE, RA]
- | ins_next3
- |
- |3: // Call __index metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k
- | rsb CARG1, BASE, #FRAME_CONT
- | ldr BASE, L->top
- | mov NARGS8:RC, #16 // 2 args for func(t, k).
- | str PC, [BASE, #-12] // [cont|PC]
- | add PC, CARG1, BASE
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
- | b ->vm_call_dispatch_f
- |
- |//-----------------------------------------------------------------------
- |
- |->vmeta_tsets1:
- | add CARG2, BASE, RB
- | b >2
- |
- |->vmeta_tsets:
- | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
- | mvn CARG4, #~LJ_TTAB
- | str TAB:RB, [CARG2]
- | str CARG4, [CARG2, #4]
- |2:
- | mvn CARG4, #~LJ_TSTR
- | str STR:RC, TMPDlo
- | str CARG4, TMPDhi
- | mov CARG3, TMPDp
- | b >1
- |
- |->vmeta_tsetb: // RC = index
- | decode_RB8 RB, INS
- | str RC, TMPDlo
- | mvn CARG4, #~LJ_TISNUM
- | add CARG2, BASE, RB
- | str CARG4, TMPDhi
- | mov CARG3, TMPDp
- | b >1
- |
- |->vmeta_tsetv:
- | add CARG2, BASE, RB
- | add CARG3, BASE, RC
- |1:
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
- | // Returns TValue * (finished) or NULL (metamethod).
- | .IOS ldr BASE, L->base
- | cmp CRET1, #0
- | ldrd CARG34, [BASE, RA]
- | beq >3
- | ins_next1
- | // NOBARRIER: lj_meta_tset ensures the table is not black.
- | strd CARG34, [CRET1]
- | ins_next2
- | ins_next3
- |
- |3: // Call __newindex metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
- | rsb CARG1, BASE, #FRAME_CONT
- | ldr BASE, L->top
- | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
- | strd CARG34, [BASE, #16] // Copy value to third argument.
- | str PC, [BASE, #-12] // [cont|PC]
- | add PC, CARG1, BASE
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
- | b ->vm_call_dispatch_f
- |
- |//-- Comparison metamethods ---------------------------------------------
- |
- |->vmeta_comp:
- | mov CARG1, L
- | sub PC, PC, #4
- | mov CARG2, RA
- | str BASE, L->base
- | mov CARG3, RC
- | str PC, SAVE_PC
- | decode_OP CARG4, INS
- | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
- | // Returns 0/1 or TValue * (metamethod).
- |3:
- | .IOS ldr BASE, L->base
- | cmp CRET1, #1
- | bhi ->vmeta_binop
- |4:
- | ldrh RB, [PC, #2]
- | add PC, PC, #4
- | add RB, PC, RB, lsl #2
- | subhs PC, RB, #0x20000
- |->cont_nop:
- | ins_next
- |
- |->cont_ra: // RA = resultptr
- | ldr INS, [PC, #-4]
- | ldrd CARG12, [RA]
- | decode_RA8 CARG3, INS
- | strd CARG12, [BASE, CARG3]
- | b ->cont_nop
- |
- |->cont_condt: // RA = resultptr
- | ldr CARG2, [RA, #4]
- | mvn CARG1, #~LJ_TTRUE
- | cmp CARG1, CARG2 // Branch if result is true.
- | b <4
- |
- |->cont_condf: // RA = resultptr
- | ldr CARG2, [RA, #4]
- | checktp CARG2, LJ_TFALSE // Branch if result is false.
- | b <4
- |
- |->vmeta_equal:
- | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
- | sub PC, PC, #4
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |
- |->vmeta_equal_cd:
- |.if FFI
- | sub PC, PC, #4
- | str BASE, L->base
- | mov CARG1, L
- | mov CARG2, INS
- | str PC, SAVE_PC
- | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |.endif
- |
- |//-- Arithmetic metamethods ---------------------------------------------
- |
- |->vmeta_arith_vn:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG3, BASE, RB
- | add CARG4, KBASE, RC
- | b >1
- |
- |->vmeta_arith_nv:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG4, BASE, RB
- | add CARG3, KBASE, RC
- | b >1
- |
- |->vmeta_unm:
- | ldr INS, [PC, #-8]
- | sub PC, PC, #4
- | add CARG3, BASE, RC
- | add CARG4, BASE, RC
- | b >1
- |
- |->vmeta_arith_vv:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG3, BASE, RB
- | add CARG4, BASE, RC
- |1:
- | decode_OP OP, INS
- | add CARG2, BASE, RA
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | str OP, ARG5
- | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
- | // Returns NULL (finished) or TValue * (metamethod).
- | .IOS ldr BASE, L->base
- | cmp CRET1, #0
- | beq ->cont_nop
- |
- | // Call metamethod for binary op.
- |->vmeta_binop:
- | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
- | sub CARG2, CRET1, BASE
- | str PC, [CRET1, #-12] // [cont|PC]
- | add PC, CARG2, #FRAME_CONT
- | mov BASE, CRET1
- | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
- | b ->vm_call_dispatch
- |
- |->vmeta_len:
- | add CARG2, BASE, RC
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | bl extern lj_meta_len // (lua_State *L, TValue *o)
- | // Returns NULL (retry) or TValue * (metamethod base).
- | .IOS ldr BASE, L->base
-#if LJ_52
- | cmp CRET1, #0
- | bne ->vmeta_binop // Binop call for compatibility.
- | ldr TAB:CARG1, [BASE, RC]
- | b ->BC_LEN_Z
-#else
- | b ->vmeta_binop // Binop call for compatibility.
-#endif
- |
- |//-- Call metamethod ----------------------------------------------------
- |
- |->vmeta_call: // Resolve and call __call metamethod.
- | // RB = old base, BASE = new base, RC = nargs*8
- | mov CARG1, L
- | str RB, L->base // This is the callers base!
- | sub CARG2, BASE, #8
- | str PC, SAVE_PC
- | add CARG3, BASE, NARGS8:RC
- | .IOS mov RA, BASE
- | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- | .IOS mov BASE, RA
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
- | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
- | ins_call
- |
- |->vmeta_callt: // Resolve __call for BC_CALLT.
- | // BASE = old base, RA = new base, RC = nargs*8
- | mov CARG1, L
- | str BASE, L->base
- | sub CARG2, RA, #8
- | str PC, SAVE_PC
- | add CARG3, RA, NARGS8:RC
- | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- | .IOS ldr BASE, L->base
- | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here.
- | ldr PC, [BASE, FRAME_PC]
- | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
- | b ->BC_CALLT2_Z
- |
- |//-- Argument coercion for 'for' statement ------------------------------
- |
- |->vmeta_for:
- | mov CARG1, L
- | str BASE, L->base
- | mov CARG2, RA
- | str PC, SAVE_PC
- | bl extern lj_meta_for // (lua_State *L, TValue *base)
- | .IOS ldr BASE, L->base
- |.if JIT
- | ldrb OP, [PC, #-4]
- |.endif
- | ldr INS, [PC, #-4]
- |.if JIT
- | cmp OP, #BC_JFORI
- |.endif
- | decode_RA8 RA, INS
- | decode_RD RC, INS
- |.if JIT
- | beq =>BC_JFORI
- |.endif
- | b =>BC_FORI
- |
- |//-----------------------------------------------------------------------
- |//-- Fast functions -----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |.macro .ffunc, name
- |->ff_ .. name:
- |.endmacro
- |
- |.macro .ffunc_1, name
- |->ff_ .. name:
- | ldrd CARG12, [BASE]
- | cmp NARGS8:RC, #8
- | blo ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_2, name
- |->ff_ .. name:
- | ldrd CARG12, [BASE]
- | ldrd CARG34, [BASE, #8]
- | cmp NARGS8:RC, #16
- | blo ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_n, name
- | .ffunc_1 name
- | checktp CARG2, LJ_TISNUM
- | bhs ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_nn, name
- | .ffunc_2 name
- | checktp CARG2, LJ_TISNUM
- | cmnlo CARG4, #-LJ_TISNUM
- | bhs ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_d, name
- | .ffunc name
- | ldr CARG2, [BASE, #4]
- | cmp NARGS8:RC, #8
- | vldr d0, [BASE]
- | blo ->fff_fallback
- | checktp CARG2, LJ_TISNUM
- | bhs ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_dd, name
- | .ffunc name
- | ldr CARG2, [BASE, #4]
- | ldr CARG4, [BASE, #12]
- | cmp NARGS8:RC, #16
- | vldr d0, [BASE]
- | vldr d1, [BASE, #8]
- | blo ->fff_fallback
- | checktp CARG2, LJ_TISNUM
- | cmnlo CARG4, #-LJ_TISNUM
- | bhs ->fff_fallback
- |.endmacro
- |
- |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
- |.macro ffgccheck
- | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)]
- | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)]
- | cmp CARG1, CARG2
- | blge ->fff_gcstep
- |.endmacro
- |
- |//-- Base library: checks -----------------------------------------------
- |
- |.ffunc_1 assert
- | checktp CARG2, LJ_TTRUE
- | bhi ->fff_fallback
- | ldr PC, [BASE, FRAME_PC]
- | strd CARG12, [BASE, #-8]
- | mov RB, BASE
- | subs RA, NARGS8:RC, #8
- | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
- | beq ->fff_res // Done if exactly 1 argument.
- |1:
- | ldrd CARG12, [RB, #8]
- | subs RA, RA, #8
- | strd CARG12, [RB], #8
- | bne <1
- | b ->fff_res
- |
- |.ffunc type
- | ldr CARG2, [BASE, #4]
- | cmp NARGS8:RC, #8
- | blo ->fff_fallback
- | checktp CARG2, LJ_TISNUM
- | mvnlo CARG2, #~LJ_TISNUM
- | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1
- | lsl CARG4, CARG4, #3
- | ldrd CARG12, [CFUNC:CARG3, CARG4]
- | b ->fff_restv
- |
- |//-- Base library: getters and setters ---------------------------------
- |
- |.ffunc_1 getmetatable
- | checktp CARG2, LJ_TTAB
- | cmnne CARG2, #-LJ_TUDATA
- | bne >6
- |1: // Field metatable must be at same offset for GCtab and GCudata!
- | ldr TAB:RB, TAB:CARG1->metatable
- |2:
- | mvn CARG2, #~LJ_TNIL
- | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])]
- | cmp TAB:RB, #0
- | beq ->fff_restv
- | ldr CARG3, TAB:RB->hmask
- | ldr CARG4, STR:RC->hash
- | ldr NODE:INS, TAB:RB->node
- | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
- | add CARG3, CARG3, CARG3, lsl #1
- | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
- |3: // Rearranged logic, because we expect _not_ to find the key.
- | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS.
- | ldrd CARG12, NODE:INS->val
- | ldr NODE:INS, NODE:INS->next
- | checktp CARG4, LJ_TSTR
- | cmpeq CARG3, STR:RC
- | beq >5
- | cmp NODE:INS, #0
- | bne <3
- |4:
- | mov CARG1, RB // Use metatable as default result.
- | mvn CARG2, #~LJ_TTAB
- | b ->fff_restv
- |5:
- | checktp CARG2, LJ_TNIL
- | bne ->fff_restv
- | b <4
- |
- |6:
- | checktp CARG2, LJ_TISNUM
- | mvnhs CARG2, CARG2
- | movlo CARG2, #~LJ_TISNUM
- | add CARG4, DISPATCH, CARG2, lsl #2
- | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])]
- | b <2
- |
- |.ffunc_2 setmetatable
- | // Fast path: no mt for table yet and not clearing the mt.
- | checktp CARG2, LJ_TTAB
- | ldreq TAB:RB, TAB:CARG1->metatable
- | checktpeq CARG4, LJ_TTAB
- | ldrbeq CARG4, TAB:CARG1->marked
- | cmpeq TAB:RB, #0
- | bne ->fff_fallback
- | tst CARG4, #LJ_GC_BLACK // isblack(table)
- | str TAB:CARG3, TAB:CARG1->metatable
- | beq ->fff_restv
- | barrierback TAB:CARG1, CARG4, CARG3
- | b ->fff_restv
- |
- |.ffunc rawget
- | ldrd CARG34, [BASE]
- | cmp NARGS8:RC, #16
- | blo ->fff_fallback
- | mov CARG2, CARG3
- | checktab CARG4, ->fff_fallback
- | mov CARG1, L
- | add CARG3, BASE, #8
- | .IOS mov RA, BASE
- | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
- | // Returns cTValue *.
- | .IOS mov BASE, RA
- | ldrd CARG12, [CRET1]
- | b ->fff_restv
- |
- |//-- Base library: conversions ------------------------------------------
- |
- |.ffunc tonumber
- | // Only handles the number case inline (without a base argument).
- | ldrd CARG12, [BASE]
- | cmp NARGS8:RC, #8
- | bne ->fff_fallback
- | checktp CARG2, LJ_TISNUM
- | bls ->fff_restv
- | b ->fff_fallback
- |
- |.ffunc_1 tostring
- | // Only handles the string or number case inline.
- | checktp CARG2, LJ_TSTR
- | // A __tostring method in the string base metatable is ignored.
- | beq ->fff_restv
- | // Handle numbers inline, unless a number base metatable is present.
- | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])]
- | str BASE, L->base
- | checktp CARG2, LJ_TISNUM
- | cmpls CARG4, #0
- | str PC, SAVE_PC // Redundant (but a defined value).
- | bhi ->fff_fallback
- | ffgccheck
- | mov CARG1, L
- | mov CARG2, BASE
- | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o)
- | // Returns GCstr *.
- | ldr BASE, L->base
- | mvn CARG2, #~LJ_TSTR
- | b ->fff_restv
- |
- |//-- Base library: iterators -------------------------------------------
- |
- |.ffunc_1 next
- | mvn CARG4, #~LJ_TNIL
- | checktab CARG2, ->fff_fallback
- | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
- | ldr PC, [BASE, FRAME_PC]
- | mov CARG2, CARG1
- | str BASE, L->base // Add frame since C call can throw.
- | mov CARG1, L
- | str BASE, L->top // Dummy frame length is ok.
- | add CARG3, BASE, #8
- | str PC, SAVE_PC
- | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
- | // Returns 0 at end of traversal.
- | .IOS ldr BASE, L->base
- | cmp CRET1, #0
- | mvneq CRET2, #~LJ_TNIL
- | beq ->fff_restv // End of traversal: return nil.
- | ldrd CARG12, [BASE, #8] // Copy key and value to results.
- | ldrd CARG34, [BASE, #16]
- | mov RC, #(2+1)*8
- | strd CARG12, [BASE, #-8]
- | strd CARG34, [BASE]
- | b ->fff_res
- |
- |.ffunc_1 pairs
- | checktab CARG2, ->fff_fallback
-#if LJ_52
- | ldr TAB:RB, TAB:CARG1->metatable
-#endif
- | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
- | ldr PC, [BASE, FRAME_PC]
-#if LJ_52
- | cmp TAB:RB, #0
- | bne ->fff_fallback
-#endif
- | mvn CARG2, #~LJ_TNIL
- | mov RC, #(3+1)*8
- | strd CFUNC:CARG34, [BASE, #-8]
- | str CARG2, [BASE, #12]
- | b ->fff_res
- |
- |.ffunc_2 ipairs_aux
- | checktp CARG2, LJ_TTAB
- | checktpeq CARG4, LJ_TISNUM
- | bne ->fff_fallback
- | ldr RB, TAB:CARG1->asize
- | ldr RC, TAB:CARG1->array
- | add CARG3, CARG3, #1
- | ldr PC, [BASE, FRAME_PC]
- | cmp CARG3, RB
- | add RC, RC, CARG3, lsl #3
- | strd CARG34, [BASE, #-8]
- | ldrdlo CARG12, [RC]
- | mov RC, #(0+1)*8
- | bhs >2 // Not in array part?
- |1:
- | checktp CARG2, LJ_TNIL
- | movne RC, #(2+1)*8
- | strdne CARG12, [BASE]
- | b ->fff_res
- |2: // Check for empty hash part first. Otherwise call C function.
- | ldr RB, TAB:CARG1->hmask
- | mov CARG2, CARG3
- | cmp RB, #0
- | beq ->fff_res
- | .IOS mov RA, BASE
- | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
- | // Returns cTValue * or NULL.
- | .IOS mov BASE, RA
- | cmp CRET1, #0
- | beq ->fff_res
- | ldrd CARG12, [CRET1]
- | b <1
- |
- |.ffunc_1 ipairs
- | checktab CARG2, ->fff_fallback
-#if LJ_52
- | ldr TAB:RB, TAB:CARG1->metatable
-#endif
- | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
- | ldr PC, [BASE, FRAME_PC]
-#if LJ_52
- | cmp TAB:RB, #0
- | bne ->fff_fallback
-#endif
- | mov CARG1, #0
- | mvn CARG2, #~LJ_TISNUM
- | mov RC, #(3+1)*8
- | strd CFUNC:CARG34, [BASE, #-8]
- | strd CARG12, [BASE, #8]
- | b ->fff_res
- |
- |//-- Base library: catch errors ----------------------------------------
- |
- |.ffunc pcall
- | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
- | cmp NARGS8:RC, #8
- | blo ->fff_fallback
- | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
- | mov RB, BASE
- | add BASE, BASE, #8
- | moveq PC, #8+FRAME_PCALL
- | movne PC, #8+FRAME_PCALLH
- | sub NARGS8:RC, NARGS8:RC, #8
- | b ->vm_call_dispatch
- |
- |.ffunc_2 xpcall
- | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
- | checkfunc CARG4, ->fff_fallback // Traceback must be a function.
- | mov RB, BASE
- | strd CARG12, [BASE, #8] // Swap function and traceback.
- | strd CARG34, [BASE]
- | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
- | add BASE, BASE, #16
- | moveq PC, #16+FRAME_PCALL
- | movne PC, #16+FRAME_PCALLH
- | sub NARGS8:RC, NARGS8:RC, #16
- | b ->vm_call_dispatch
- |
- |//-- Coroutine library --------------------------------------------------
- |
- |.macro coroutine_resume_wrap, resume
- |.if resume
- |.ffunc_1 coroutine_resume
- | checktp CARG2, LJ_TTHREAD
- | bne ->fff_fallback
- |.else
- |.ffunc coroutine_wrap_aux
- | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
- |.endif
- | ldr PC, [BASE, FRAME_PC]
- | str BASE, L->base
- | ldr CARG2, L:CARG1->top
- | ldrb RA, L:CARG1->status
- | ldr RB, L:CARG1->base
- | add CARG3, CARG2, NARGS8:RC
- | add CARG4, CARG2, RA
- | str PC, SAVE_PC
- | cmp CARG4, RB
- | beq ->fff_fallback
- | ldr CARG4, L:CARG1->maxstack
- | ldr RB, L:CARG1->cframe
- | cmp RA, #LUA_YIELD
- | cmpls CARG3, CARG4
- | cmpls RB, #0
- | bhi ->fff_fallback
- |1:
- |.if resume
- | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
- | add BASE, BASE, #8
- | sub NARGS8:RC, NARGS8:RC, #8
- |.endif
- | str CARG3, L:CARG1->top
- | str BASE, L->top
- |2: // Move args to coroutine.
- | ldrd CARG34, [BASE, RB]
- | cmp RB, NARGS8:RC
- | strdne CARG34, [CARG2, RB]
- | add RB, RB, #8
- | bne <2
- |
- | mov CARG3, #0
- | mov L:RA, L:CARG1
- | mov CARG4, #0
- | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
- | // Returns thread status.
- |4:
- | ldr CARG3, L:RA->base
- | mv_vmstate CARG2, INTERP
- | ldr CARG4, L:RA->top
- | st_vmstate CARG2
- | cmp CRET1, #LUA_YIELD
- | ldr BASE, L->base
- | bhi >8
- | subs RC, CARG4, CARG3
- | ldr CARG1, L->maxstack
- | add CARG2, BASE, RC
- | beq >6 // No results?
- | cmp CARG2, CARG1
- | mov RB, #0
- | bhi >9 // Need to grow stack?
- |
- | sub CARG4, RC, #8
- | str CARG3, L:RA->top // Clear coroutine stack.
- |5: // Move results from coroutine.
- | ldrd CARG12, [CARG3, RB]
- | cmp RB, CARG4
- | strd CARG12, [BASE, RB]
- | add RB, RB, #8
- | bne <5
- |6:
- |.if resume
- | mvn CARG3, #~LJ_TTRUE
- | add RC, RC, #16
- |7:
- | str CARG3, [BASE, #-4] // Prepend true/false to results.
- | sub RA, BASE, #8
- |.else
- | mov RA, BASE
- | add RC, RC, #8
- |.endif
- | ands CARG1, PC, #FRAME_TYPE
- | str PC, SAVE_PC
- | str RC, SAVE_MULTRES
- | beq ->BC_RET_Z
- | b ->vm_return
- |
- |8: // Coroutine returned with error (at co->top-1).
- |.if resume
- | ldrd CARG12, [CARG4, #-8]!
- | mvn CARG3, #~LJ_TFALSE
- | mov RC, #(2+1)*8
- | str CARG4, L:RA->top // Remove error from coroutine stack.
- | strd CARG12, [BASE] // Copy error message.
- | b <7
- |.else
- | mov CARG1, L
- | mov CARG2, L:RA
- | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
- | // Never returns.
- |.endif
- |
- |9: // Handle stack expansion on return from yield.
- | mov CARG1, L
- | lsr CARG2, RC, #3
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | mov CRET1, #0
- | b <4
- |.endmacro
- |
- | coroutine_resume_wrap 1 // coroutine.resume
- | coroutine_resume_wrap 0 // coroutine.wrap
- |
- |.ffunc coroutine_yield
- | ldr CARG1, L->cframe
- | add CARG2, BASE, NARGS8:RC
- | str BASE, L->base
- | tst CARG1, #CFRAME_RESUME
- | str CARG2, L->top
- | mov CRET1, #LUA_YIELD
- | mov CARG3, #0
- | beq ->fff_fallback
- | str CARG3, L->cframe
- | strb CRET1, L->status
- | b ->vm_leave_unw
- |
- |//-- Math library -------------------------------------------------------
- |
- |.macro math_round, func
- | .ffunc_1 math_ .. func
- | checktp CARG2, LJ_TISNUM
- | beq ->fff_restv
- | bhi ->fff_fallback
- | // Round FP value and normalize result.
- | lsl CARG3, CARG2, #1
- | adds RB, CARG3, #0x00200000
- | bpl >2 // |x| < 1?
- | mvn CARG4, #0x3e0
- | subs RB, CARG4, RB, asr #21
- | lsl CARG4, CARG2, #11
- | lsl CARG3, CARG1, #11
- | orr CARG4, CARG4, #0x80000000
- | rsb INS, RB, #32
- | orr CARG4, CARG4, CARG1, lsr #21
- | bls >3 // |x| >= 2^31?
- | orr CARG3, CARG3, CARG4, lsl INS
- | lsr CARG1, CARG4, RB
- |.if "func" == "floor"
- | tst CARG3, CARG2, asr #31
- | addne CARG1, CARG1, #1
- |.else
- | bics CARG3, CARG3, CARG2, asr #31
- | addsne CARG1, CARG1, #1
- | ldrdvs CARG12, >9
- | bvs ->fff_restv
- |.endif
- | cmp CARG2, #0
- | rsblt CARG1, CARG1, #0
- |1:
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |
- |2: // |x| < 1
- | bcs ->fff_restv // |x| is not finite.
- | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo
- |.if "func" == "floor"
- | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1
- | moveq CARG1, #0
- | mvnne CARG1, #0
- |.else
- | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1
- | moveq CARG1, #0
- | movne CARG1, #1
- |.endif
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |
- |3: // |x| >= 2^31. Check for x == -(2^31).
- | cmpeq CARG4, #0x80000000
- |.if "func" == "floor"
- | cmpeq CARG3, #0
- |.endif
- | bne >4
- | cmp CARG2, #0
- | movmi CARG1, #0x80000000
- | bmi <1
- |4:
- | bl ->vm_..func.._sf
- | b ->fff_restv
- |.endmacro
- |
- | math_round floor
- | math_round ceil
- |
- |.align 8
- |9:
- | .long 0x00000000, 0x41e00000 // 2^31.
- |
- |.ffunc_1 math_abs
- | checktp CARG2, LJ_TISNUM
- | bhi ->fff_fallback
- | bicne CARG2, CARG2, #0x80000000
- | bne ->fff_restv
- | cmp CARG1, #0
- | rsbslt CARG1, CARG1, #0
- | ldrdvs CARG12, <9
- | // Fallthrough.
- |
- |->fff_restv:
- | // CARG12 = TValue result.
- | ldr PC, [BASE, FRAME_PC]
- | strd CARG12, [BASE, #-8]
- |->fff_res1:
- | // PC = return.
- | mov RC, #(1+1)*8
- |->fff_res:
- | // RC = (nresults+1)*8, PC = return.
- | ands CARG1, PC, #FRAME_TYPE
- | ldreq INS, [PC, #-4]
- | str RC, SAVE_MULTRES
- | sub RA, BASE, #8
- | bne ->vm_return
- | decode_RB8 RB, INS
- |5:
- | cmp RB, RC // More results expected?
- | bhi >6
- | decode_RA8 CARG1, INS
- | ins_next1
- | ins_next2
- | // Adjust BASE. KBASE is assumed to be set for the calling frame.
- | sub BASE, RA, CARG1
- | ins_next3
- |
- |6: // Fill up results with nil.
- | add CARG2, RA, RC
- | mvn CARG1, #~LJ_TNIL
- | add RC, RC, #8
- | str CARG1, [CARG2, #-4]
- | b <5
- |
- |.macro math_extern, func
- |.if HFABI
- | .ffunc_d math_ .. func
- |.else
- | .ffunc_n math_ .. func
- |.endif
- | .IOS mov RA, BASE
- | bl extern func
- | .IOS mov BASE, RA
- |.if HFABI
- | b ->fff_resd
- |.else
- | b ->fff_restv
- |.endif
- |.endmacro
- |
- |.macro math_extern2, func
- |.if HFABI
- | .ffunc_dd math_ .. func
- |.else
- | .ffunc_nn math_ .. func
- |.endif
- | .IOS mov RA, BASE
- | bl extern func
- | .IOS mov BASE, RA
- |.if HFABI
- | b ->fff_resd
- |.else
- | b ->fff_restv
- |.endif
- |.endmacro
- |
- |.if FPU
- | .ffunc_d math_sqrt
- | vsqrt.f64 d0, d0
- |->fff_resd:
- | ldr PC, [BASE, FRAME_PC]
- | vstr d0, [BASE, #-8]
- | b ->fff_res1
- |.else
- | math_extern sqrt
- |.endif
- |
- |.ffunc math_log
- |.if HFABI
- | ldr CARG2, [BASE, #4]
- | cmp NARGS8:RC, #8 // Need exactly 1 argument.
- | vldr d0, [BASE]
- | bne ->fff_fallback
- |.else
- | ldrd CARG12, [BASE]
- | cmp NARGS8:RC, #8 // Need exactly 1 argument.
- | bne ->fff_fallback
- |.endif
- | checktp CARG2, LJ_TISNUM
- | bhs ->fff_fallback
- | .IOS mov RA, BASE
- | bl extern log
- | .IOS mov BASE, RA
- |.if HFABI
- | b ->fff_resd
- |.else
- | b ->fff_restv
- |.endif
- |
- | math_extern log10
- | math_extern exp
- | math_extern sin
- | math_extern cos
- | math_extern tan
- | math_extern asin
- | math_extern acos
- | math_extern atan
- | math_extern sinh
- | math_extern cosh
- | math_extern tanh
- | math_extern2 pow
- | math_extern2 atan2
- | math_extern2 fmod
- |
- |->ff_math_deg:
- |.if FPU
- | .ffunc_d math_rad
- | vldr d1, CFUNC:CARG3->upvalue[0]
- | vmul.f64 d0, d0, d1
- | b ->fff_resd
- |.else
- | .ffunc_n math_rad
- | ldrd CARG34, CFUNC:CARG3->upvalue[0]
- | bl extern __aeabi_dmul
- | b ->fff_restv
- |.endif
- |
- |.if HFABI
- | .ffunc math_ldexp
- | ldr CARG4, [BASE, #4]
- | ldrd CARG12, [BASE, #8]
- | cmp NARGS8:RC, #16
- | blo ->fff_fallback
- | vldr d0, [BASE]
- | checktp CARG4, LJ_TISNUM
- | bhs ->fff_fallback
- | checktp CARG2, LJ_TISNUM
- | bne ->fff_fallback
- | .IOS mov RA, BASE
- | bl extern ldexp // (double x, int exp)
- | .IOS mov BASE, RA
- | b ->fff_resd
- |.else
- |.ffunc_2 math_ldexp
- | checktp CARG2, LJ_TISNUM
- | bhs ->fff_fallback
- | checktp CARG4, LJ_TISNUM
- | bne ->fff_fallback
- | .IOS mov RA, BASE
- | bl extern ldexp // (double x, int exp)
- | .IOS mov BASE, RA
- | b ->fff_restv
- |.endif
- |
- |.if HFABI
- |.ffunc_d math_frexp
- | mov CARG1, sp
- | .IOS mov RA, BASE
- | bl extern frexp
- | .IOS mov BASE, RA
- | ldr CARG3, [sp]
- | mvn CARG4, #~LJ_TISNUM
- | ldr PC, [BASE, FRAME_PC]
- | vstr d0, [BASE, #-8]
- | mov RC, #(2+1)*8
- | strd CARG34, [BASE]
- | b ->fff_res
- |.else
- |.ffunc_n math_frexp
- | mov CARG3, sp
- | .IOS mov RA, BASE
- | bl extern frexp
- | .IOS mov BASE, RA
- | ldr CARG3, [sp]
- | mvn CARG4, #~LJ_TISNUM
- | ldr PC, [BASE, FRAME_PC]
- | strd CARG12, [BASE, #-8]
- | mov RC, #(2+1)*8
- | strd CARG34, [BASE]
- | b ->fff_res
- |.endif
- |
- |.if HFABI
- |.ffunc_d math_modf
- | sub CARG1, BASE, #8
- | ldr PC, [BASE, FRAME_PC]
- | .IOS mov RA, BASE
- | bl extern modf
- | .IOS mov BASE, RA
- | mov RC, #(2+1)*8
- | vstr d0, [BASE]
- | b ->fff_res
- |.else
- |.ffunc_n math_modf
- | sub CARG3, BASE, #8
- | ldr PC, [BASE, FRAME_PC]
- | .IOS mov RA, BASE
- | bl extern modf
- | .IOS mov BASE, RA
- | mov RC, #(2+1)*8
- | strd CARG12, [BASE]
- | b ->fff_res
- |.endif
- |
- |.macro math_minmax, name, cond, fcond
- |.if FPU
- | .ffunc_1 name
- | add RB, BASE, RC
- | checktp CARG2, LJ_TISNUM
- | add RA, BASE, #8
- | bne >4
- |1: // Handle integers.
- | ldrd CARG34, [RA]
- | cmp RA, RB
- | bhs ->fff_restv
- | checktp CARG4, LJ_TISNUM
- | bne >3
- | cmp CARG1, CARG3
- | add RA, RA, #8
- | mov..cond CARG1, CARG3
- | b <1
- |3: // Convert intermediate result to number and continue below.
- | vmov s4, CARG1
- | bhi ->fff_fallback
- | vldr d1, [RA]
- | vcvt.f64.s32 d0, s4
- | b >6
- |
- |4:
- | vldr d0, [BASE]
- | bhi ->fff_fallback
- |5: // Handle numbers.
- | ldrd CARG34, [RA]
- | vldr d1, [RA]
- | cmp RA, RB
- | bhs ->fff_resd
- | checktp CARG4, LJ_TISNUM
- | bhs >7
- |6:
- | vcmp.f64 d0, d1
- | vmrs
- | add RA, RA, #8
- | vmov..fcond.f64 d0, d1
- | b <5
- |7: // Convert integer to number and continue above.
- | vmov s4, CARG3
- | bhi ->fff_fallback
- | vcvt.f64.s32 d1, s4
- | b <6
- |
- |.else
- |
- | .ffunc_1 name
- | checktp CARG2, LJ_TISNUM
- | mov RA, #8
- | bne >4
- |1: // Handle integers.
- | ldrd CARG34, [BASE, RA]
- | cmp RA, RC
- | bhs ->fff_restv
- | checktp CARG4, LJ_TISNUM
- | bne >3
- | cmp CARG1, CARG3
- | add RA, RA, #8
- | mov..cond CARG1, CARG3
- | b <1
- |3: // Convert intermediate result to number and continue below.
- | bhi ->fff_fallback
- | bl extern __aeabi_i2d
- | ldrd CARG34, [BASE, RA]
- | b >6
- |
- |4:
- | bhi ->fff_fallback
- |5: // Handle numbers.
- | ldrd CARG34, [BASE, RA]
- | cmp RA, RC
- | bhs ->fff_restv
- | checktp CARG4, LJ_TISNUM
- | bhs >7
- |6:
- | bl extern __aeabi_cdcmple
- | add RA, RA, #8
- | mov..fcond CARG1, CARG3
- | mov..fcond CARG2, CARG4
- | b <5
- |7: // Convert integer to number and continue above.
- | bhi ->fff_fallback
- | strd CARG12, TMPD
- | mov CARG1, CARG3
- | bl extern __aeabi_i2d
- | ldrd CARG34, TMPD
- | b <6
- |.endif
- |.endmacro
- |
- | math_minmax math_min, gt, hi
- | math_minmax math_max, lt, lo
- |
- |//-- String library -----------------------------------------------------
- |
- |.ffunc_1 string_len
- | checkstr CARG2, ->fff_fallback
- | ldr CARG1, STR:CARG1->len
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |
- |.ffunc string_byte // Only handle the 1-arg case here.
- | ldrd CARG12, [BASE]
- | ldr PC, [BASE, FRAME_PC]
- | cmp NARGS8:RC, #8
- | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument.
- | bne ->fff_fallback
- | ldr CARG3, STR:CARG1->len
- | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
- | mvn CARG2, #~LJ_TISNUM
- | cmp CARG3, #0
- | moveq RC, #(0+1)*8
- | movne RC, #(1+1)*8
- | strd CARG12, [BASE, #-8]
- | b ->fff_res
- |
- |.ffunc string_char // Only handle the 1-arg case here.
- | ffgccheck
- | ldrd CARG12, [BASE]
- | ldr PC, [BASE, FRAME_PC]
- | cmp NARGS8:RC, #8 // Need exactly 1 argument.
- | checktpeq CARG2, LJ_TISNUM
- | bicseq CARG4, CARG1, #255
- | mov CARG3, #1
- | bne ->fff_fallback
- | str CARG1, TMPD
- | mov CARG2, TMPDp // Points to stack. Little-endian.
- |->fff_newstr:
- | // CARG2 = str, CARG3 = len.
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
- | // Returns GCstr *.
- | ldr BASE, L->base
- | mvn CARG2, #~LJ_TSTR
- | b ->fff_restv
- |
- |.ffunc string_sub
- | ffgccheck
- | ldrd CARG12, [BASE]
- | ldrd CARG34, [BASE, #16]
- | cmp NARGS8:RC, #16
- | mvn RB, #0
- | beq >1
- | blo ->fff_fallback
- | checktp CARG4, LJ_TISNUM
- | mov RB, CARG3
- | bne ->fff_fallback
- |1:
- | ldrd CARG34, [BASE, #8]
- | checktp CARG2, LJ_TSTR
- | ldreq CARG2, STR:CARG1->len
- | checktpeq CARG4, LJ_TISNUM
- | bne ->fff_fallback
- | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end
- | add CARG4, CARG2, #1
- | cmp CARG3, #0 // if (start < 0) start += len+1
- | addlt CARG3, CARG3, CARG4
- | cmp CARG3, #1 // if (start < 1) start = 1
- | movlt CARG3, #1
- | cmp RB, #0 // if (end < 0) end += len+1
- | addlt RB, RB, CARG4
- | bic RB, RB, RB, asr #31 // if (end < 0) end = 0
- | cmp RB, CARG2 // if (end > len) end = len
- | add CARG1, STR:CARG1, #sizeof(GCstr)-1
- | movgt RB, CARG2
- | add CARG2, CARG1, CARG3
- | subs CARG3, RB, CARG3 // len = end - start
- | add CARG3, CARG3, #1 // len += 1
- | bge ->fff_newstr
- |->fff_emptystr:
- | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty)
- | mvn CARG2, #~LJ_TSTR
- | b ->fff_restv
- |
- |.ffunc string_rep // Only handle the 1-char case inline.
- | ffgccheck
- | ldrd CARG12, [BASE]
- | ldrd CARG34, [BASE, #8]
- | cmp NARGS8:RC, #16
- | bne ->fff_fallback // Exactly 2 arguments
- | checktp CARG2, LJ_TSTR
- | checktpeq CARG4, LJ_TISNUM
- | bne ->fff_fallback
- | subs CARG4, CARG3, #1
- | ldr CARG2, STR:CARG1->len
- | blt ->fff_emptystr // Count <= 0?
- | cmp CARG2, #1
- | blo ->fff_emptystr // Zero-length string?
- | bne ->fff_fallback // Fallback for > 1-char strings.
- | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
- | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
- | ldr CARG1, STR:CARG1[1]
- | cmp RB, CARG3
- | blo ->fff_fallback
- |1: // Fill buffer with char.
- | strb CARG1, [CARG2, CARG4]
- | subs CARG4, CARG4, #1
- | bge <1
- | b ->fff_newstr
- |
- |.ffunc string_reverse
- | ffgccheck
- | ldrd CARG12, [BASE]
- | cmp NARGS8:RC, #8
- | blo ->fff_fallback
- | checkstr CARG2, ->fff_fallback
- | ldr CARG3, STR:CARG1->len
- | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
- | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
- | mov CARG4, CARG3
- | add CARG1, STR:CARG1, #sizeof(GCstr)
- | cmp RB, CARG3
- | blo ->fff_fallback
- |1: // Reverse string copy.
- | ldrb RB, [CARG1], #1
- | subs CARG4, CARG4, #1
- | blt ->fff_newstr
- | strb RB, [CARG2, CARG4]
- | b <1
- |
- |.macro ffstring_case, name, lo
- | .ffunc name
- | ffgccheck
- | ldrd CARG12, [BASE]
- | cmp NARGS8:RC, #8
- | blo ->fff_fallback
- | checkstr CARG2, ->fff_fallback
- | ldr CARG3, STR:CARG1->len
- | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
- | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
- | mov CARG4, #0
- | add CARG1, STR:CARG1, #sizeof(GCstr)
- | cmp RB, CARG3
- | blo ->fff_fallback
- |1: // ASCII case conversion.
- | ldrb RB, [CARG1, CARG4]
- | cmp CARG4, CARG3
- | bhs ->fff_newstr
- | sub RC, RB, #lo
- | cmp RC, #26
- | eorlo RB, RB, #0x20
- | strb RB, [CARG2, CARG4]
- | add CARG4, CARG4, #1
- | b <1
- |.endmacro
- |
- |ffstring_case string_lower, 65
- |ffstring_case string_upper, 97
- |
- |//-- Table library ------------------------------------------------------
- |
- |.ffunc_1 table_getn
- | checktab CARG2, ->fff_fallback
- | .IOS mov RA, BASE
- | bl extern lj_tab_len // (GCtab *t)
- | // Returns uint32_t (but less than 2^31).
- | .IOS mov BASE, RA
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |
- |//-- Bit library --------------------------------------------------------
- |
- |// FP number to bit conversion for soft-float. Clobbers r0-r3.
- |->vm_tobit_fb:
- | bhi ->fff_fallback
- |->vm_tobit:
- | lsl RB, CARG2, #1
- | adds RB, RB, #0x00200000
- | movpl CARG1, #0 // |x| < 1?
- | bxpl lr
- | mvn CARG4, #0x3e0
- | subs RB, CARG4, RB, asr #21
- | bmi >1 // |x| >= 2^32?
- | lsl CARG4, CARG2, #11
- | orr CARG4, CARG4, #0x80000000
- | orr CARG4, CARG4, CARG1, lsr #21
- | cmp CARG2, #0
- | lsr CARG1, CARG4, RB
- | rsblt CARG1, CARG1, #0
- | bx lr
- |1:
- | add RB, RB, #21
- | lsr CARG4, CARG1, RB
- | rsb RB, RB, #20
- | lsl CARG1, CARG2, #12
- | cmp CARG2, #0
- | orr CARG1, CARG4, CARG1, lsl RB
- | rsblt CARG1, CARG1, #0
- | bx lr
- |
- |.macro .ffunc_bit, name
- | .ffunc_1 bit_..name
- | checktp CARG2, LJ_TISNUM
- | blne ->vm_tobit_fb
- |.endmacro
- |
- |.ffunc_bit tobit
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |
- |.macro .ffunc_bit_op, name, ins
- | .ffunc_bit name
- | mov CARG3, CARG1
- | mov RA, #8
- |1:
- | ldrd CARG12, [BASE, RA]
- | cmp RA, NARGS8:RC
- | add RA, RA, #8
- | bge >2
- | checktp CARG2, LJ_TISNUM
- | blne ->vm_tobit_fb
- | ins CARG3, CARG3, CARG1
- | b <1
- |.endmacro
- |
- |.ffunc_bit_op band, and
- |.ffunc_bit_op bor, orr
- |.ffunc_bit_op bxor, eor
- |
- |2:
- | mvn CARG4, #~LJ_TISNUM
- | ldr PC, [BASE, FRAME_PC]
- | strd CARG34, [BASE, #-8]
- | b ->fff_res1
- |
- |.ffunc_bit bswap
- | eor CARG3, CARG1, CARG1, ror #16
- | bic CARG3, CARG3, #0x00ff0000
- | ror CARG1, CARG1, #8
- | mvn CARG2, #~LJ_TISNUM
- | eor CARG1, CARG1, CARG3, lsr #8
- | b ->fff_restv
- |
- |.ffunc_bit bnot
- | mvn CARG1, CARG1
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |
- |.macro .ffunc_bit_sh, name, ins, shmod
- | .ffunc bit_..name
- | ldrd CARG12, [BASE, #8]
- | cmp NARGS8:RC, #16
- | blo ->fff_fallback
- | checktp CARG2, LJ_TISNUM
- | blne ->vm_tobit_fb
- |.if shmod == 0
- | and RA, CARG1, #31
- |.else
- | rsb RA, CARG1, #0
- |.endif
- | ldrd CARG12, [BASE]
- | checktp CARG2, LJ_TISNUM
- | blne ->vm_tobit_fb
- | ins CARG1, CARG1, RA
- | mvn CARG2, #~LJ_TISNUM
- | b ->fff_restv
- |.endmacro
- |
- |.ffunc_bit_sh lshift, lsl, 0
- |.ffunc_bit_sh rshift, lsr, 0
- |.ffunc_bit_sh arshift, asr, 0
- |.ffunc_bit_sh rol, ror, 1
- |.ffunc_bit_sh ror, ror, 0
- |
- |//-----------------------------------------------------------------------
- |
- |->fff_fallback: // Call fast function fallback handler.
- | // BASE = new base, RC = nargs*8
- | ldr CARG3, [BASE, FRAME_FUNC]
- | ldr CARG2, L->maxstack
- | add CARG1, BASE, NARGS8:RC
- | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC.
- | str CARG1, L->top
- | ldr CARG3, CFUNC:CARG3->f
- | str BASE, L->base
- | add CARG1, CARG1, #8*LUA_MINSTACK
- | str PC, SAVE_PC // Redundant (but a defined value).
- | cmp CARG1, CARG2
- | mov CARG1, L
- | bhi >5 // Need to grow stack.
- | blx CARG3 // (lua_State *L)
- | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
- | ldr BASE, L->base
- | cmp CRET1, #0
- | lsl RC, CRET1, #3
- | sub RA, BASE, #8
- | bgt ->fff_res // Returned nresults+1?
- |1: // Returned 0 or -1: retry fast path.
- | ldr CARG1, L->top
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
- | sub NARGS8:RC, CARG1, BASE
- | bne ->vm_call_tail // Returned -1?
- | ins_callt // Returned 0: retry fast path.
- |
- |// Reconstruct previous base for vmeta_call during tailcall.
- |->vm_call_tail:
- | ands CARG1, PC, #FRAME_TYPE
- | bic CARG2, PC, #FRAME_TYPEP
- | ldreq INS, [PC, #-4]
- | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8.
- | addeq CARG2, CARG2, #8
- | sub RB, BASE, CARG2
- | b ->vm_call_dispatch // Resolve again for tailcall.
- |
- |5: // Grow stack for fallback handler.
- | mov CARG2, #LUA_MINSTACK
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | ldr BASE, L->base
- | cmp CARG1, CARG1 // Set zero-flag to force retry.
- | b <1
- |
- |->fff_gcstep: // Call GC step function.
- | // BASE = new base, RC = nargs*8
- | mov RA, lr
- | str BASE, L->base
- | add CARG2, BASE, NARGS8:RC
- | str PC, SAVE_PC // Redundant (but a defined value).
- | str CARG2, L->top
- | mov CARG1, L
- | bl extern lj_gc_step // (lua_State *L)
- | ldr BASE, L->base
- | mov lr, RA // Help return address predictor.
- | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
- | bx lr
- |
- |//-----------------------------------------------------------------------
- |//-- Special dispatch targets -------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_record: // Dispatch target for recording phase.
- |.if JIT
- | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
- | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
- | bne >5
- | // Decrement the hookcount for consistency, but always do the call.
- | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
- | tst CARG1, #HOOK_ACTIVE
- | bne >1
- | sub CARG2, CARG2, #1
- | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
- | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
- | b >1
- |.endif
- |
- |->vm_rethook: // Dispatch target for return hooks.
- | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
- | tst CARG1, #HOOK_ACTIVE // Hook already active?
- | beq >1
- |5: // Re-dispatch to static ins.
- | decode_OP OP, INS
- | add OP, DISPATCH, OP, lsl #2
- | ldr pc, [OP, #GG_DISP2STATIC]
- |
- |->vm_inshook: // Dispatch target for instr/line hooks.
- | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
- | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
- | tst CARG1, #HOOK_ACTIVE // Hook already active?
- | bne <5
- | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
- | beq <5
- | subs CARG2, CARG2, #1
- | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
- | beq >1
- | tst CARG1, #LUA_MASKLINE
- | beq <5
- |1:
- | mov CARG1, L
- | str BASE, L->base
- | mov CARG2, PC
- | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
- | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
- |3:
- | ldr BASE, L->base
- |4: // Re-dispatch to static ins.
- | ldrb OP, [PC, #-4]
- | ldr INS, [PC, #-4]
- | add OP, DISPATCH, OP, lsl #2
- | ldr OP, [OP, #GG_DISP2STATIC]
- | decode_RA8 RA, INS
- | decode_RD RC, INS
- | bx OP
- |
- |->cont_hook: // Continue from hook yield.
- | ldr CARG1, [CARG4, #-24]
- | add PC, PC, #4
- | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins.
- | b <4
- |
- |->vm_hotloop: // Hot loop counter underflow.
- |.if JIT
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
- | sub CARG1, DISPATCH, #-GG_DISP2J
- | str PC, SAVE_PC
- | ldr CARG3, LFUNC:CARG3->field_pc
- | mov CARG2, PC
- | str L, [DISPATCH, #DISPATCH_J(L)]
- | ldrb CARG3, [CARG3, #PC2PROTO(framesize)]
- | str BASE, L->base
- | add CARG3, BASE, CARG3, lsl #3
- | str CARG3, L->top
- | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
- | b <3
- |.endif
- |
- |->vm_callhook: // Dispatch target for call hooks.
- | mov CARG2, PC
- |.if JIT
- | b >1
- |.endif
- |
- |->vm_hotcall: // Hot call counter underflow.
- |.if JIT
- | orr CARG2, PC, #1
- |1:
- |.endif
- | add CARG4, BASE, RC
- | str PC, SAVE_PC
- | mov CARG1, L
- | str BASE, L->base
- | sub RA, RA, BASE
- | str CARG4, L->top
- | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
- | // Returns ASMFunction.
- | ldr BASE, L->base
- | ldr CARG4, L->top
- | mov CARG2, #0
- | add RA, BASE, RA
- | sub NARGS8:RC, CARG4, BASE
- | str CARG2, SAVE_PC // Invalidate for subsequent line hook.
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
- | ldr INS, [PC, #-4]
- | bx CRET1
- |
- |//-----------------------------------------------------------------------
- |//-- Trace exit handler -------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_exit_handler:
- |.if JIT
- | sub sp, sp, #12
- | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12}
- | ldr CARG1, [sp, #64] // Load original value of lr.
- | ldr DISPATCH, [lr] // Load DISPATCH.
- | add CARG3, sp, #64 // Recompute original value of sp.
- | mv_vmstate CARG4, EXIT
- | str CARG3, [sp, #52] // Store sp in RID_SP
- | st_vmstate CARG4
- | ldr CARG2, [CARG1, #-4]! // Get exit instruction.
- | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC.
- | str CARG1, [sp, #60]
- |.if FPU
- | vpush {d0-d15}
- |.endif
- | lsl CARG2, CARG2, #8
- | add CARG1, CARG1, CARG2, asr #6
- | ldr CARG2, [lr, #4] // Load exit stub group offset.
- | sub CARG1, CARG1, lr
- | ldr L, [DISPATCH, #DISPATCH_GL(jit_L)]
- | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number.
- | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
- | str CARG1, [DISPATCH, #DISPATCH_J(exitno)]
- | mov CARG4, #0
- | str L, [DISPATCH, #DISPATCH_J(L)]
- | str BASE, L->base
- | str CARG4, [DISPATCH, #DISPATCH_GL(jit_L)]
- | sub CARG1, DISPATCH, #-GG_DISP2J
- | mov CARG2, sp
- | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
- | // Returns MULTRES (unscaled) or negated error code.
- | ldr CARG2, L->cframe
- | ldr BASE, L->base
- | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
- | mov sp, CARG2
- | ldr PC, SAVE_PC // Get SAVE_PC.
- | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
- | b >1
- |.endif
- |->vm_exit_interp:
- | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set.
- |.if JIT
- | ldr L, SAVE_L
- |1:
- | cmp CARG1, #0
- | blt >3 // Check for error from exit.
- | lsl RC, CARG1, #3
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | str RC, SAVE_MULTRES
- | mov CARG3, #0
- | ldr CARG2, LFUNC:CARG2->field_pc
- | str CARG3, [DISPATCH, #DISPATCH_GL(jit_L)]
- | mv_vmstate CARG4, INTERP
- | ldr KBASE, [CARG2, #PC2PROTO(k)]
- | // Modified copy of ins_next which handles function header dispatch, too.
- | ldrb OP, [PC]
- | mov MASKR8, #255
- | ldr INS, [PC], #4
- | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
- | st_vmstate CARG4
- | cmp OP, #BC_FUNCF // Function header?
- | ldr OP, [DISPATCH, OP, lsl #2]
- | decode_RA8 RA, INS
- | lsrlo RC, INS, #16 // No: Decode operands A*8 and D.
- | subhs RC, RC, #8
- | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8
- | bx OP
- |
- |3: // Rethrow error from the right C frame.
- | rsb CARG2, CARG1, #0
- | mov CARG1, L
- | bl extern lj_err_throw // (lua_State *L, int errcode)
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Math helper functions ----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// FP value rounding. Called from JIT code.
- |//
- |// double lj_vm_floor/ceil/trunc(double x);
- |.macro vm_round, func, hf
- |.if hf == 1
- | vmov CARG1, CARG2, d0
- |.endif
- | lsl CARG3, CARG2, #1
- | adds RB, CARG3, #0x00200000
- | bpl >2 // |x| < 1?
- | mvn CARG4, #0x3cc
- | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
- | bxlo lr // |x| >= 2^52: done.
- | mvn CARG4, #1
- | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask
- | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
- | subs RB, RB, #32
- | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask
- | orrpl CARG3, CARG3, CARG4
- | mvnpl CARG4, #1
- | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
- |.if "func" == "floor"
- | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
- |.else
- | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
- |.endif
- |.if hf == 1
- | vmoveq d0, CARG1, CARG2
- |.endif
- | bxeq lr // iszero: done.
- | mvn CARG4, #1
- | cmp RB, #0
- | lslpl CARG3, CARG4, RB
- | mvnmi CARG3, #0
- | add RB, RB, #32
- | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask
- | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry
- |.if hf == 1
- | vmov d0, CARG1, CARG2
- |.endif
- | bx lr
- |
- |2: // |x| < 1:
- | bxcs lr // |x| is not finite.
- | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo
- |.if "func" == "floor"
- | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
- |.else
- | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
- |.endif
- | mov CARG1, #0 // lo = 0
- | and CARG2, CARG2, #0x80000000
- | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0)
- | orrne CARG2, CARG2, CARG4
- |.if hf == 1
- | vmov d0, CARG1, CARG2
- |.endif
- | bx lr
- |.endmacro
- |
- |9:
- | .long 0x3ff00000 // hiword(+1.0)
- |
- |->vm_floor:
- |.if HFABI
- | vm_round floor, 1
- |.endif
- |->vm_floor_sf:
- | vm_round floor, 0
- |
- |->vm_ceil:
- |.if HFABI
- | vm_round ceil, 1
- |.endif
- |->vm_ceil_sf:
- | vm_round ceil, 0
- |
- |.macro vm_trunc, hf
- |.if JIT
- |.if hf == 1
- | vmov CARG1, CARG2, d0
- |.endif
- | lsl CARG3, CARG2, #1
- | adds RB, CARG3, #0x00200000
- | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0.
- | movpl CARG1, #0
- |.if hf == 1
- | vmovpl d0, CARG1, CARG2
- |.endif
- | bxpl lr
- | mvn CARG4, #0x3cc
- | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
- | bxlo lr // |x| >= 2^52: already done.
- | mvn CARG4, #1
- | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
- | subs RB, RB, #32
- | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
- |.if hf == 1
- | vmov d0, CARG1, CARG2
- |.endif
- | bx lr
- |.endif
- |.endmacro
- |
- |->vm_trunc:
- |.if HFABI
- | vm_trunc 1
- |.endif
- |->vm_trunc_sf:
- | vm_trunc 0
- |
- | // double lj_vm_mod(double dividend, double divisor);
- |->vm_mod:
- |.if FPU
- | // Special calling convention. Also, RC (r11) is not preserved.
- | vdiv.f64 d0, d6, d7
- | mov RC, lr
- | vmov CARG1, CARG2, d0
- | bl ->vm_floor_sf
- | vmov d0, CARG1, CARG2
- | vmul.f64 d0, d0, d7
- | mov lr, RC
- | vsub.f64 d6, d6, d0
- | bx lr
- |.else
- | push {r0, r1, r2, r3, r4, lr}
- | bl extern __aeabi_ddiv
- | bl ->vm_floor_sf
- | ldrd CARG34, [sp, #8]
- | bl extern __aeabi_dmul
- | ldrd CARG34, [sp]
- | eor CARG2, CARG2, #0x80000000
- | bl extern __aeabi_dadd
- | add sp, sp, #20
- | pop {pc}
- |.endif
- |
- | // int lj_vm_modi(int dividend, int divisor);
- |->vm_modi:
- | ands RB, CARG1, #0x80000000
- | rsbmi CARG1, CARG1, #0 // a = |dividend|
- | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor).
- | cmp CARG2, #0
- | rsbmi CARG2, CARG2, #0 // b = |divisor|
- | subs CARG4, CARG2, #1
- | cmpne CARG1, CARG2
- | moveq CARG1, #0 // if (b == 1 || a == b) a = 0
- | tsthi CARG2, CARG4
- | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1
- | bls >1
- | // Use repeated subtraction to get the remainder.
- | clz CARG3, CARG1
- | clz CARG4, CARG2
- | sub CARG4, CARG4, CARG3
- | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8
- | addne pc, pc, CARG3, lsl #3 // Duff's device.
- | nop
- {
- int i;
- for (i = 31; i >= 0; i--) {
- | cmp CARG1, CARG2, lsl #i
- | subhs CARG1, CARG1, CARG2, lsl #i
- }
- }
- |1:
- | cmp CARG1, #0
- | cmpne RB, #0
- | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b
- | eors CARG2, CARG1, RB, lsl #1
- | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y
- | bx lr
- |
- |//-----------------------------------------------------------------------
- |//-- Miscellaneous functions --------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |//-----------------------------------------------------------------------
- |//-- FFI helper functions -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// Handler for callback functions.
- |// Saveregs already performed. Callback slot number in [sp], g in r12.
- |->vm_ffi_callback:
- |.if FFI
- |.type CTSTATE, CTState, PC
- | ldr CTSTATE, GL:r12->ctype_state
- | add DISPATCH, r12, #GG_G2DISP
- |.if FPU
- | str r4, SAVE_R4
- | add r4, sp, CFRAME_SPACE+4+8*8
- | vstmdb r4!, {d8-d15}
- |.endif
- |.if HFABI
- | add r12, CTSTATE, #offsetof(CTState, cb.fpr[8])
- |.endif
- | strd CARG34, CTSTATE->cb.gpr[2]
- | strd CARG12, CTSTATE->cb.gpr[0]
- |.if HFABI
- | vstmdb r12!, {d0-d7}
- |.endif
- | ldr CARG4, [sp]
- | add CARG3, sp, #CFRAME_SIZE
- | mov CARG1, CTSTATE
- | lsr CARG4, CARG4, #3
- | str CARG3, CTSTATE->cb.stack
- | mov CARG2, sp
- | str CARG4, CTSTATE->cb.slot
- | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
- | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
- | // Returns lua_State *.
- | ldr BASE, L:CRET1->base
- | mv_vmstate CARG2, INTERP
- | ldr RC, L:CRET1->top
- | mov MASKR8, #255
- | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
- | mov L, CRET1
- | sub RC, RC, BASE
- | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
- | st_vmstate CARG2
- | ins_callt
- |.endif
- |
- |->cont_ffi_callback: // Return from FFI callback.
- |.if FFI
- | ldr CTSTATE, [DISPATCH, #DISPATCH_GL(ctype_state)]
- | str BASE, L->base
- | str CARG4, L->top
- | str L, CTSTATE->L
- | mov CARG1, CTSTATE
- | mov CARG2, RA
- | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
- | ldrd CARG12, CTSTATE->cb.gpr[0]
- |.if HFABI
- | vldr d0, CTSTATE->cb.fpr[0]
- |.endif
- | b ->vm_leave_unw
- |.endif
- |
- |->vm_ffi_call: // Call C function via FFI.
- | // Caveat: needs special frame unwinding, see below.
- |.if FFI
- | .type CCSTATE, CCallState, r4
- | push {CCSTATE, r5, r11, lr}
- | mov CCSTATE, CARG1
- | ldr CARG1, CCSTATE:CARG1->spadj
- | ldrb CARG2, CCSTATE->nsp
- | add CARG3, CCSTATE, #offsetof(CCallState, stack)
- |.if HFABI
- | add RB, CCSTATE, #offsetof(CCallState, fpr[0])
- |.endif
- | mov r11, sp
- | sub sp, sp, CARG1 // Readjust stack.
- | subs CARG2, CARG2, #1
- |.if HFABI
- | vldm RB, {d0-d7}
- |.endif
- | ldr RB, CCSTATE->func
- | bmi >2
- |1: // Copy stack slots.
- | ldr CARG4, [CARG3, CARG2, lsl #2]
- | str CARG4, [sp, CARG2, lsl #2]
- | subs CARG2, CARG2, #1
- | bpl <1
- |2:
- | ldrd CARG12, CCSTATE->gpr[0]
- | ldrd CARG34, CCSTATE->gpr[2]
- | blx RB
- | mov sp, r11
- |.if HFABI
- | add r12, CCSTATE, #offsetof(CCallState, fpr[4])
- |.endif
- | strd CRET1, CCSTATE->gpr[0]
- |.if HFABI
- | vstmdb r12!, {d0-d3}
- |.endif
- | pop {CCSTATE, r5, r11, pc}
- |.endif
- |// Note: vm_ffi_call must be the last function in this object file!
- |
- |//-----------------------------------------------------------------------
-}
-
-/* Generate the code for a single instruction. */
-static void build_ins(BuildCtx *ctx, BCOp op, int defop)
-{
- int vk = 0;
- |=>defop:
-
- switch (op) {
-
- /* -- Comparison ops ---------------------------------------------------- */
-
- /* Remember: all ops branch for a true comparison, fall through otherwise. */
-
- case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
- | // RA = src1*8, RC = src2, JMP with RC = target
- | lsl RC, RC, #3
- | ldrd CARG12, [RA, BASE]!
- | ldrh RB, [PC, #2]
- | ldrd CARG34, [RC, BASE]!
- | add PC, PC, #4
- | add RB, PC, RB, lsl #2
- | checktp CARG2, LJ_TISNUM
- | bne >3
- | checktp CARG4, LJ_TISNUM
- | bne >4
- | cmp CARG1, CARG3
- if (op == BC_ISLT) {
- | sublt PC, RB, #0x20000
- } else if (op == BC_ISGE) {
- | subge PC, RB, #0x20000
- } else if (op == BC_ISLE) {
- | suble PC, RB, #0x20000
- } else {
- | subgt PC, RB, #0x20000
- }
- |1:
- | ins_next
- |
- |3: // CARG12 is not an integer.
- |.if FPU
- | vldr d0, [RA]
- | bhi ->vmeta_comp
- | // d0 is a number.
- | checktp CARG4, LJ_TISNUM
- | vldr d1, [RC]
- | blo >5
- | bhi ->vmeta_comp
- | // d0 is a number, CARG3 is an integer.
- | vmov s4, CARG3
- | vcvt.f64.s32 d1, s4
- | b >5
- |4: // CARG1 is an integer, CARG34 is not an integer.
- | vldr d1, [RC]
- | bhi ->vmeta_comp
- | // CARG1 is an integer, d1 is a number.
- | vmov s4, CARG1
- | vcvt.f64.s32 d0, s4
- |5: // d0 and d1 are numbers.
- | vcmp.f64 d0, d1
- | vmrs
- | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
- if (op == BC_ISLT) {
- | sublo PC, RB, #0x20000
- } else if (op == BC_ISGE) {
- | subhs PC, RB, #0x20000
- } else if (op == BC_ISLE) {
- | subls PC, RB, #0x20000
- } else {
- | subhi PC, RB, #0x20000
- }
- | b <1
- |.else
- | bhi ->vmeta_comp
- | // CARG12 is a number.
- | checktp CARG4, LJ_TISNUM
- | movlo RA, RB // Save RB.
- | blo >5
- | bhi ->vmeta_comp
- | // CARG12 is a number, CARG3 is an integer.
- | mov CARG1, CARG3
- | mov RC, RA
- | mov RA, RB // Save RB.
- | bl extern __aeabi_i2d
- | mov CARG3, CARG1
- | mov CARG4, CARG2
- | ldrd CARG12, [RC] // Restore first operand.
- | b >5
- |4: // CARG1 is an integer, CARG34 is not an integer.
- | bhi ->vmeta_comp
- | // CARG1 is an integer, CARG34 is a number.
- | mov RA, RB // Save RB.
- | bl extern __aeabi_i2d
- | ldrd CARG34, [RC] // Restore second operand.
- |5: // CARG12 and CARG34 are numbers.
- | bl extern __aeabi_cdcmple
- | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
- if (op == BC_ISLT) {
- | sublo PC, RA, #0x20000
- } else if (op == BC_ISGE) {
- | subhs PC, RA, #0x20000
- } else if (op == BC_ISLE) {
- | subls PC, RA, #0x20000
- } else {
- | subhi PC, RA, #0x20000
- }
- | b <1
- |.endif
- break;
-
- case BC_ISEQV: case BC_ISNEV:
- vk = op == BC_ISEQV;
- | // RA = src1*8, RC = src2, JMP with RC = target
- | lsl RC, RC, #3
- | ldrd CARG12, [RA, BASE]!
- | ldrh RB, [PC, #2]
- | ldrd CARG34, [RC, BASE]!
- | add PC, PC, #4
- | add RB, PC, RB, lsl #2
- | checktp CARG2, LJ_TISNUM
- | cmnls CARG4, #-LJ_TISNUM
- if (vk) {
- | bls ->BC_ISEQN_Z
- } else {
- | bls ->BC_ISNEN_Z
- }
- | // Either or both types are not numbers.
- |.if FFI
- | checktp CARG2, LJ_TCDATA
- | checktpne CARG4, LJ_TCDATA
- | beq ->vmeta_equal_cd
- |.endif
- | cmp CARG2, CARG4 // Compare types.
- | bne >2 // Not the same type?
- | checktp CARG2, LJ_TISPRI
- | bhs >1 // Same type and primitive type?
- |
- | // Same types and not a primitive type. Compare GCobj or pvalue.
- | cmp CARG1, CARG3
- if (vk) {
- | bne >3 // Different GCobjs or pvalues?
- |1: // Branch if same.
- | sub PC, RB, #0x20000
- |2: // Different.
- | ins_next
- |3:
- | checktp CARG2, LJ_TISTABUD
- | bhi <2 // Different objects and not table/ud?
- } else {
- | beq >1 // Same GCobjs or pvalues?
- | checktp CARG2, LJ_TISTABUD
- | bhi >2 // Different objects and not table/ud?
- }
- | // Different tables or userdatas. Need to check __eq metamethod.
- | // Field metatable must be at same offset for GCtab and GCudata!
- | ldr TAB:RA, TAB:CARG1->metatable
- | cmp TAB:RA, #0
- if (vk) {
- | beq <2 // No metatable?
- } else {
- | beq >2 // No metatable?
- }
- | ldrb RA, TAB:RA->nomm
- | mov CARG4, #1-vk // ne = 0 or 1.
- | mov CARG2, CARG1
- | tst RA, #1<<MM_eq
- | beq ->vmeta_equal // 'no __eq' flag not set?
- if (vk) {
- | b <2
- } else {
- |2: // Branch if different.
- | sub PC, RB, #0x20000
- |1: // Same.
- | ins_next
- }
- break;
-
- case BC_ISEQS: case BC_ISNES:
- vk = op == BC_ISEQS;
- | // RA = src*8, RC = str_const (~), JMP with RC = target
- | mvn RC, RC
- | ldrd CARG12, [BASE, RA]
- | ldrh RB, [PC, #2]
- | ldr STR:CARG3, [KBASE, RC, lsl #2]
- | add PC, PC, #4
- | add RB, PC, RB, lsl #2
- | checktp CARG2, LJ_TSTR
- |.if FFI
- | bne >7
- | cmp CARG1, CARG3
- |.else
- | cmpeq CARG1, CARG3
- |.endif
- if (vk) {
- | subeq PC, RB, #0x20000
- |1:
- } else {
- |1:
- | subne PC, RB, #0x20000
- }
- | ins_next
- |
- |.if FFI
- |7:
- | checktp CARG2, LJ_TCDATA
- | bne <1
- | b ->vmeta_equal_cd
- |.endif
- break;
-
- case BC_ISEQN: case BC_ISNEN:
- vk = op == BC_ISEQN;
- | // RA = src*8, RC = num_const (~), JMP with RC = target
- | lsl RC, RC, #3
- | ldrd CARG12, [RA, BASE]!
- | ldrh RB, [PC, #2]
- | ldrd CARG34, [RC, KBASE]!
- | add PC, PC, #4
- | add RB, PC, RB, lsl #2
- if (vk) {
- |->BC_ISEQN_Z:
- } else {
- |->BC_ISNEN_Z:
- }
- | checktp CARG2, LJ_TISNUM
- | bne >3
- | checktp CARG4, LJ_TISNUM
- | bne >4
- | cmp CARG1, CARG3
- if (vk) {
- | subeq PC, RB, #0x20000
- |1:
- } else {
- |1:
- | subne PC, RB, #0x20000
- }
- |2:
- | ins_next
- |
- |3: // CARG12 is not an integer.
- |.if FFI
- | bhi >7
- |.else
- if (!vk) {
- | subhi PC, RB, #0x20000
- }
- | bhi <2
- |.endif
- |.if FPU
- | checktp CARG4, LJ_TISNUM
- | vmov s4, CARG3
- | vldr d0, [RA]
- | vldrlo d1, [RC]
- | vcvths.f64.s32 d1, s4
- | b >5
- |4: // CARG1 is an integer, d1 is a number.
- | vmov s4, CARG1
- | vldr d1, [RC]
- | vcvt.f64.s32 d0, s4
- |5: // d0 and d1 are numbers.
- | vcmp.f64 d0, d1
- | vmrs
- if (vk) {
- | subeq PC, RB, #0x20000
- } else {
- | subne PC, RB, #0x20000
- }
- | b <2
- |.else
- | // CARG12 is a number.
- | checktp CARG4, LJ_TISNUM
- | movlo RA, RB // Save RB.
- | blo >5
- | // CARG12 is a number, CARG3 is an integer.
- | mov CARG1, CARG3
- | mov RC, RA
- |4: // CARG1 is an integer, CARG34 is a number.
- | mov RA, RB // Save RB.
- | bl extern __aeabi_i2d
- | ldrd CARG34, [RC] // Restore other operand.
- |5: // CARG12 and CARG34 are numbers.
- | bl extern __aeabi_cdcmpeq
- if (vk) {
- | subeq PC, RA, #0x20000
- } else {
- | subne PC, RA, #0x20000
- }
- | b <2
- |.endif
- |
- |.if FFI
- |7:
- | checktp CARG2, LJ_TCDATA
- | bne <1
- | b ->vmeta_equal_cd
- |.endif
- break;
-
- case BC_ISEQP: case BC_ISNEP:
- vk = op == BC_ISEQP;
- | // RA = src*8, RC = primitive_type (~), JMP with RC = target
- | ldrd CARG12, [BASE, RA]
- | ldrh RB, [PC, #2]
- | add PC, PC, #4
- | mvn RC, RC
- | add RB, PC, RB, lsl #2
- |.if FFI
- | checktp CARG2, LJ_TCDATA
- | beq ->vmeta_equal_cd
- |.endif
- | cmp CARG2, RC
- if (vk) {
- | subeq PC, RB, #0x20000
- } else {
- | subne PC, RB, #0x20000
- }
- | ins_next
- break;
-
- /* -- Unary test and copy ops ------------------------------------------- */
-
- case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
- | // RA = dst*8 or unused, RC = src, JMP with RC = target
- | add RC, BASE, RC, lsl #3
- | ldrh RB, [PC, #2]
- | ldrd CARG12, [RC]
- | add PC, PC, #4
- | add RB, PC, RB, lsl #2
- | checktp CARG2, LJ_TTRUE
- if (op == BC_ISTC || op == BC_IST) {
- | subls PC, RB, #0x20000
- if (op == BC_ISTC) {
- | strdls CARG12, [BASE, RA]
- }
- } else {
- | subhi PC, RB, #0x20000
- if (op == BC_ISFC) {
- | strdhi CARG12, [BASE, RA]
- }
- }
- | ins_next
- break;
-
- /* -- Unary ops --------------------------------------------------------- */
-
- case BC_MOV:
- | // RA = dst*8, RC = src
- | lsl RC, RC, #3
- | ins_next1
- | ldrd CARG12, [BASE, RC]
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- break;
- case BC_NOT:
- | // RA = dst*8, RC = src
- | add RC, BASE, RC, lsl #3
- | ins_next1
- | ldr CARG1, [RC, #4]
- | add RA, BASE, RA
- | ins_next2
- | checktp CARG1, LJ_TTRUE
- | mvnls CARG2, #~LJ_TFALSE
- | mvnhi CARG2, #~LJ_TTRUE
- | str CARG2, [RA, #4]
- | ins_next3
- break;
- case BC_UNM:
- | // RA = dst*8, RC = src
- | lsl RC, RC, #3
- | ldrd CARG12, [BASE, RC]
- | ins_next1
- | ins_next2
- | checktp CARG2, LJ_TISNUM
- | bhi ->vmeta_unm
- | eorne CARG2, CARG2, #0x80000000
- | bne >5
- | rsbseq CARG1, CARG1, #0
- | ldrdvs CARG12, >9
- |5:
- | strd CARG12, [BASE, RA]
- | ins_next3
- |
- |.align 8
- |9:
- | .long 0x00000000, 0x41e00000 // 2^31.
- break;
- case BC_LEN:
- | // RA = dst*8, RC = src
- | lsl RC, RC, #3
- | ldrd CARG12, [BASE, RC]
- | checkstr CARG2, >2
- | ldr CARG1, STR:CARG1->len
- |1:
- | mvn CARG2, #~LJ_TISNUM
- | ins_next1
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- |2:
- | checktab CARG2, ->vmeta_len
-#if LJ_52
- | ldr TAB:CARG3, TAB:CARG1->metatable
- | cmp TAB:CARG3, #0
- | bne >9
- |3:
-#endif
- |->BC_LEN_Z:
- | .IOS mov RC, BASE
- | bl extern lj_tab_len // (GCtab *t)
- | // Returns uint32_t (but less than 2^31).
- | .IOS mov BASE, RC
- | b <1
-#if LJ_52
- |9:
- | ldrb CARG4, TAB:CARG3->nomm
- | tst CARG4, #1<<MM_len
- | bne <3 // 'no __len' flag set: done.
- | b ->vmeta_len
-#endif
- break;
-
- /* -- Binary ops -------------------------------------------------------- */
-
- |.macro ins_arithcheck, cond, ncond, target
- ||if (vk == 1) {
- | cmn CARG4, #-LJ_TISNUM
- | cmn..cond CARG2, #-LJ_TISNUM
- ||} else {
- | cmn CARG2, #-LJ_TISNUM
- | cmn..cond CARG4, #-LJ_TISNUM
- ||}
- | b..ncond target
- |.endmacro
- |.macro ins_arithcheck_int, target
- | ins_arithcheck eq, ne, target
- |.endmacro
- |.macro ins_arithcheck_num, target
- | ins_arithcheck lo, hs, target
- |.endmacro
- |
- |.macro ins_arithpre
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
- ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
- ||switch (vk) {
- ||case 0:
- | .if FPU
- | ldrd CARG12, [RB, BASE]!
- | ldrd CARG34, [RC, KBASE]!
- | .else
- | ldrd CARG12, [BASE, RB]
- | ldrd CARG34, [KBASE, RC]
- | .endif
- || break;
- ||case 1:
- | .if FPU
- | ldrd CARG34, [RB, BASE]!
- | ldrd CARG12, [RC, KBASE]!
- | .else
- | ldrd CARG34, [BASE, RB]
- | ldrd CARG12, [KBASE, RC]
- | .endif
- || break;
- ||default:
- | .if FPU
- | ldrd CARG12, [RB, BASE]!
- | ldrd CARG34, [RC, BASE]!
- | .else
- | ldrd CARG12, [BASE, RB]
- | ldrd CARG34, [BASE, RC]
- | .endif
- || break;
- ||}
- |.endmacro
- |
- |.macro ins_arithpre_fpu, reg1, reg2
- |.if FPU
- ||if (vk == 1) {
- | vldr reg2, [RB]
- | vldr reg1, [RC]
- ||} else {
- | vldr reg1, [RB]
- | vldr reg2, [RC]
- ||}
- |.endif
- |.endmacro
- |
- |.macro ins_arithpost_fpu, reg
- | ins_next1
- | add RA, BASE, RA
- | ins_next2
- | vstr reg, [RA]
- | ins_next3
- |.endmacro
- |
- |.macro ins_arithfallback, ins
- ||switch (vk) {
- ||case 0:
- | ins ->vmeta_arith_vn
- || break;
- ||case 1:
- | ins ->vmeta_arith_nv
- || break;
- ||default:
- | ins ->vmeta_arith_vv
- || break;
- ||}
- |.endmacro
- |
- |.macro ins_arithdn, intins, fpins, fpcall
- | ins_arithpre
- |.if "intins" ~= "vm_modi" and not FPU
- | ins_next1
- |.endif
- | ins_arithcheck_int >5
- |.if "intins" == "smull"
- | smull CARG1, RC, CARG3, CARG1
- | cmp RC, CARG1, asr #31
- | ins_arithfallback bne
- |.elif "intins" == "vm_modi"
- | movs CARG2, CARG3
- | ins_arithfallback beq
- | bl ->vm_modi
- | mvn CARG2, #~LJ_TISNUM
- |.else
- | intins CARG1, CARG1, CARG3
- | ins_arithfallback bvs
- |.endif
- |4:
- |.if "intins" == "vm_modi" or FPU
- | ins_next1
- |.endif
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- |5: // FP variant.
- | ins_arithpre_fpu d6, d7
- | ins_arithfallback ins_arithcheck_num
- |.if FPU
- |.if "intins" == "vm_modi"
- | bl fpcall
- |.else
- | fpins d6, d6, d7
- |.endif
- | ins_arithpost_fpu d6
- |.else
- | bl fpcall
- |.if "intins" ~= "vm_modi"
- | ins_next1
- |.endif
- | b <4
- |.endif
- |.endmacro
- |
- |.macro ins_arithfp, fpins, fpcall
- | ins_arithpre
- |.if "fpins" ~= "extern" or HFABI
- | ins_arithpre_fpu d0, d1
- |.endif
- | ins_arithfallback ins_arithcheck_num
- |.if "fpins" == "extern"
- | .IOS mov RC, BASE
- | bl fpcall
- | .IOS mov BASE, RC
- |.elif FPU
- | fpins d0, d0, d1
- |.else
- | bl fpcall
- |.endif
- |.if ("fpins" ~= "extern" or HFABI) and FPU
- | ins_arithpost_fpu d0
- |.else
- | ins_next1
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- |.endif
- |.endmacro
-
- case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
- | ins_arithdn adds, vadd.f64, extern __aeabi_dadd
- break;
- case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
- | ins_arithdn subs, vsub.f64, extern __aeabi_dsub
- break;
- case BC_MULVN: case BC_MULNV: case BC_MULVV:
- | ins_arithdn smull, vmul.f64, extern __aeabi_dmul
- break;
- case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
- | ins_arithfp vdiv.f64, extern __aeabi_ddiv
- break;
- case BC_MODVN: case BC_MODNV: case BC_MODVV:
- | ins_arithdn vm_modi, vm_mod, ->vm_mod
- break;
- case BC_POW:
- | // NYI: (partial) integer arithmetic.
- | ins_arithfp extern, extern pow
- break;
-
- case BC_CAT:
- | decode_RB8 RC, INS
- | decode_RC8 RB, INS
- | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!)
- | sub CARG3, RB, RC
- | str BASE, L->base
- | add CARG2, BASE, RB
- |->BC_CAT_Z:
- | // RA = dst*8, RC = src_start*8, CARG2 = top-1
- | mov CARG1, L
- | str PC, SAVE_PC
- | lsr CARG3, CARG3, #3
- | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
- | // Returns NULL (finished) or TValue * (metamethod).
- | ldr BASE, L->base
- | cmp CRET1, #0
- | bne ->vmeta_binop
- | ldrd CARG34, [BASE, RC]
- | ins_next1
- | ins_next2
- | strd CARG34, [BASE, RA] // Copy result to RA.
- | ins_next3
- break;
-
- /* -- Constant ops ------------------------------------------------------ */
-
- case BC_KSTR:
- | // RA = dst*8, RC = str_const (~)
- | mvn RC, RC
- | ins_next1
- | ldr CARG1, [KBASE, RC, lsl #2]
- | mvn CARG2, #~LJ_TSTR
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- break;
- case BC_KCDATA:
- |.if FFI
- | // RA = dst*8, RC = cdata_const (~)
- | mvn RC, RC
- | ins_next1
- | ldr CARG1, [KBASE, RC, lsl #2]
- | mvn CARG2, #~LJ_TCDATA
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- |.endif
- break;
- case BC_KSHORT:
- | // RA = dst*8, (RC = int16_literal)
- | mov CARG1, INS, asr #16 // Refetch sign-extended reg.
- | mvn CARG2, #~LJ_TISNUM
- | ins_next1
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- break;
- case BC_KNUM:
- | // RA = dst*8, RC = num_const
- | lsl RC, RC, #3
- | ins_next1
- | ldrd CARG12, [KBASE, RC]
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- break;
- case BC_KPRI:
- | // RA = dst*8, RC = primitive_type (~)
- | add RA, BASE, RA
- | mvn RC, RC
- | ins_next1
- | ins_next2
- | str RC, [RA, #4]
- | ins_next3
- break;
- case BC_KNIL:
- | // RA = base*8, RC = end
- | add RA, BASE, RA
- | add RC, BASE, RC, lsl #3
- | mvn CARG1, #~LJ_TNIL
- | str CARG1, [RA, #4]
- | add RA, RA, #8
- |1:
- | str CARG1, [RA, #4]
- | cmp RA, RC
- | add RA, RA, #8
- | blt <1
- | ins_next_
- break;
-
- /* -- Upvalue and function ops ------------------------------------------ */
-
- case BC_UGET:
- | // RA = dst*8, RC = uvnum
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | lsl RC, RC, #2
- | add RC, RC, #offsetof(GCfuncL, uvptr)
- | ldr UPVAL:CARG2, [LFUNC:CARG2, RC]
- | ldr CARG2, UPVAL:CARG2->v
- | ldrd CARG34, [CARG2]
- | ins_next1
- | ins_next2
- | strd CARG34, [BASE, RA]
- | ins_next3
- break;
- case BC_USETV:
- | // RA = uvnum*8, RC = src
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | lsr RA, RA, #1
- | add RA, RA, #offsetof(GCfuncL, uvptr)
- | lsl RC, RC, #3
- | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
- | ldrd CARG34, [BASE, RC]
- | ldrb RB, UPVAL:CARG2->marked
- | ldrb RC, UPVAL:CARG2->closed
- | ldr CARG2, UPVAL:CARG2->v
- | tst RB, #LJ_GC_BLACK // isblack(uv)
- | add RB, CARG4, #-LJ_TISGCV
- | cmpne RC, #0
- | strd CARG34, [CARG2]
- | bne >2 // Upvalue is closed and black?
- |1:
- | ins_next
- |
- |2: // Check if new value is collectable.
- | cmn RB, #-(LJ_TNUMX - LJ_TISGCV)
- | ldrbhi RC, GCOBJ:CARG3->gch.marked
- | bls <1 // tvisgcv(v)
- | sub CARG1, DISPATCH, #-GG_DISP2G
- | tst RC, #LJ_GC_WHITES
- | // Crossed a write barrier. Move the barrier forward.
- |.if IOS
- | beq <1
- | mov RC, BASE
- | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- | mov BASE, RC
- |.else
- | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- |.endif
- | b <1
- break;
- case BC_USETS:
- | // RA = uvnum*8, RC = str_const (~)
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | lsr RA, RA, #1
- | add RA, RA, #offsetof(GCfuncL, uvptr)
- | mvn RC, RC
- | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
- | ldr STR:CARG3, [KBASE, RC, lsl #2]
- | ldrb RB, UPVAL:CARG2->marked
- | ldrb RC, UPVAL:CARG2->closed
- | ldr CARG2, UPVAL:CARG2->v
- | mvn CARG4, #~LJ_TSTR
- | tst RB, #LJ_GC_BLACK // isblack(uv)
- | ldrb RB, STR:CARG3->marked
- | strd CARG34, [CARG2]
- | bne >2
- |1:
- | ins_next
- |
- |2: // Check if string is white and ensure upvalue is closed.
- | tst RB, #LJ_GC_WHITES // iswhite(str)
- | cmpne RC, #0
- | sub CARG1, DISPATCH, #-GG_DISP2G
- | // Crossed a write barrier. Move the barrier forward.
- |.if IOS
- | beq <1
- | mov RC, BASE
- | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- | mov BASE, RC
- |.else
- | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- |.endif
- | b <1
- break;
- case BC_USETN:
- | // RA = uvnum*8, RC = num_const
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | lsr RA, RA, #1
- | add RA, RA, #offsetof(GCfuncL, uvptr)
- | lsl RC, RC, #3
- | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
- | ldrd CARG34, [KBASE, RC]
- | ldr CARG2, UPVAL:CARG2->v
- | ins_next1
- | ins_next2
- | strd CARG34, [CARG2]
- | ins_next3
- break;
- case BC_USETP:
- | // RA = uvnum*8, RC = primitive_type (~)
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | lsr RA, RA, #1
- | add RA, RA, #offsetof(GCfuncL, uvptr)
- | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
- | mvn RC, RC
- | ldr CARG2, UPVAL:CARG2->v
- | ins_next1
- | ins_next2
- | str RC, [CARG2, #4]
- | ins_next3
- break;
-
- case BC_UCLO:
- | // RA = level*8, RC = target
- | ldr CARG3, L->openupval
- | add RC, PC, RC, lsl #2
- | str BASE, L->base
- | cmp CARG3, #0
- | sub PC, RC, #0x20000
- | beq >1
- | mov CARG1, L
- | add CARG2, BASE, RA
- | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
- | ldr BASE, L->base
- |1:
- | ins_next
- break;
-
- case BC_FNEW:
- | // RA = dst*8, RC = proto_const (~) (holding function prototype)
- | mvn RC, RC
- | str BASE, L->base
- | ldr CARG2, [KBASE, RC, lsl #2]
- | str PC, SAVE_PC
- | ldr CARG3, [BASE, FRAME_FUNC]
- | mov CARG1, L
- | // (lua_State *L, GCproto *pt, GCfuncL *parent)
- | bl extern lj_func_newL_gc
- | // Returns GCfuncL *.
- | ldr BASE, L->base
- | mvn CARG2, #~LJ_TFUNC
- | ins_next1
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- break;
-
- /* -- Table ops --------------------------------------------------------- */
-
- case BC_TNEW:
- case BC_TDUP:
- | // RA = dst*8, RC = (hbits|asize) | tab_const (~)
- if (op == BC_TDUP) {
- | mvn RC, RC
- }
- | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)]
- | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)]
- | str BASE, L->base
- | str PC, SAVE_PC
- | cmp CARG3, CARG4
- | mov CARG1, L
- | bhs >5
- |1:
- if (op == BC_TNEW) {
- | lsl CARG2, RC, #21
- | lsr CARG3, RC, #11
- | asr RC, CARG2, #21
- | lsr CARG2, CARG2, #21
- | cmn RC, #1
- | addeq CARG2, CARG2, #2
- | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
- | // Returns GCtab *.
- } else {
- | ldr CARG2, [KBASE, RC, lsl #2]
- | bl extern lj_tab_dup // (lua_State *L, Table *kt)
- | // Returns GCtab *.
- }
- | ldr BASE, L->base
- | mvn CARG2, #~LJ_TTAB
- | ins_next1
- | ins_next2
- | strd CARG12, [BASE, RA]
- | ins_next3
- |5:
- | bl extern lj_gc_step_fixtop // (lua_State *L)
- | mov CARG1, L
- | b <1
- break;
-
- case BC_GGET:
- | // RA = dst*8, RC = str_const (~)
- case BC_GSET:
- | // RA = dst*8, RC = str_const (~)
- | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
- | mvn RC, RC
- | ldr TAB:CARG1, LFUNC:CARG2->env
- | ldr STR:RC, [KBASE, RC, lsl #2]
- if (op == BC_GGET) {
- | b ->BC_TGETS_Z
- } else {
- | b ->BC_TSETS_Z
- }
- break;
-
- case BC_TGETV:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | // RA = dst*8, RB = table*8, RC = key*8
- | ldrd TAB:CARG12, [BASE, RB]
- | ldrd CARG34, [BASE, RC]
- | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12.
- | checktp CARG4, LJ_TISNUM // Integer key?
- | ldreq CARG4, TAB:CARG1->array
- | ldreq CARG2, TAB:CARG1->asize
- | bne >9
- |
- | add CARG4, CARG4, CARG3, lsl #3
- | cmp CARG3, CARG2 // In array part?
- | ldrdlo CARG34, [CARG4]
- | bhs ->vmeta_tgetv
- | ins_next1 // Overwrites RB!
- | checktp CARG4, LJ_TNIL
- | beq >5
- |1:
- | ins_next2
- | strd CARG34, [BASE, RA]
- | ins_next3
- |
- |5: // Check for __index if table value is nil.
- | ldr TAB:CARG2, TAB:CARG1->metatable
- | cmp TAB:CARG2, #0
- | beq <1 // No metatable: done.
- | ldrb CARG2, TAB:CARG2->nomm
- | tst CARG2, #1<<MM_index
- | bne <1 // 'no __index' flag set: done.
- | decode_RB8 RB, INS // Restore RB.
- | b ->vmeta_tgetv
- |
- |9:
- | checktp CARG4, LJ_TSTR // String key?
- | moveq STR:RC, CARG3
- | beq ->BC_TGETS_Z
- | b ->vmeta_tgetv
- break;
- case BC_TGETS:
- | decode_RB8 RB, INS
- | and RC, RC, #255
- | // RA = dst*8, RB = table*8, RC = str_const (~)
- | ldrd CARG12, [BASE, RB]
- | mvn RC, RC
- | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
- | checktab CARG2, ->vmeta_tgets1
- |->BC_TGETS_Z:
- | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
- | ldr CARG3, TAB:CARG1->hmask
- | ldr CARG4, STR:RC->hash
- | ldr NODE:INS, TAB:CARG1->node
- | mov TAB:RB, TAB:CARG1
- | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
- | add CARG3, CARG3, CARG3, lsl #1
- | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
- |1:
- | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS.
- | ldrd CARG34, NODE:INS->val
- | ldr NODE:INS, NODE:INS->next
- | checktp CARG2, LJ_TSTR
- | cmpeq CARG1, STR:RC
- | bne >4
- | checktp CARG4, LJ_TNIL
- | beq >5
- |3:
- | ins_next1
- | ins_next2
- | strd CARG34, [BASE, RA]
- | ins_next3
- |
- |4: // Follow hash chain.
- | cmp NODE:INS, #0
- | bne <1
- | // End of hash chain: key not found, nil result.
- |
- |5: // Check for __index if table value is nil.
- | ldr TAB:CARG1, TAB:RB->metatable
- | mov CARG3, #0 // Optional clear of undef. value (during load stall).
- | mvn CARG4, #~LJ_TNIL
- | cmp TAB:CARG1, #0
- | beq <3 // No metatable: done.
- | ldrb CARG2, TAB:CARG1->nomm
- | tst CARG2, #1<<MM_index
- | bne <3 // 'no __index' flag set: done.
- | b ->vmeta_tgets
- break;
- case BC_TGETB:
- | decode_RB8 RB, INS
- | and RC, RC, #255
- | // RA = dst*8, RB = table*8, RC = index
- | ldrd CARG12, [BASE, RB]
- | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12.
- | ldr CARG3, TAB:CARG1->asize
- | ldr CARG4, TAB:CARG1->array
- | lsl CARG2, RC, #3
- | cmp RC, CARG3
- | ldrdlo CARG34, [CARG4, CARG2]
- | bhs ->vmeta_tgetb
- | ins_next1 // Overwrites RB!
- | checktp CARG4, LJ_TNIL
- | beq >5
- |1:
- | ins_next2
- | strd CARG34, [BASE, RA]
- | ins_next3
- |
- |5: // Check for __index if table value is nil.
- | ldr TAB:CARG2, TAB:CARG1->metatable
- | cmp TAB:CARG2, #0
- | beq <1 // No metatable: done.
- | ldrb CARG2, TAB:CARG2->nomm
- | tst CARG2, #1<<MM_index
- | bne <1 // 'no __index' flag set: done.
- | b ->vmeta_tgetb
- break;
-
- case BC_TSETV:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | // RA = src*8, RB = table*8, RC = key*8
- | ldrd TAB:CARG12, [BASE, RB]
- | ldrd CARG34, [BASE, RC]
- | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12.
- | checktp CARG4, LJ_TISNUM // Integer key?
- | ldreq CARG2, TAB:CARG1->array
- | ldreq CARG4, TAB:CARG1->asize
- | bne >9
- |
- | add CARG2, CARG2, CARG3, lsl #3
- | cmp CARG3, CARG4 // In array part?
- | ldrlo INS, [CARG2, #4]
- | bhs ->vmeta_tsetv
- | ins_next1 // Overwrites RB!
- | checktp INS, LJ_TNIL
- | ldrb INS, TAB:CARG1->marked
- | ldrd CARG34, [BASE, RA]
- | beq >5
- |1:
- | tst INS, #LJ_GC_BLACK // isblack(table)
- | strd CARG34, [CARG2]
- | bne >7
- |2:
- | ins_next2
- | ins_next3
- |
- |5: // Check for __newindex if previous value is nil.
- | ldr TAB:RA, TAB:CARG1->metatable
- | cmp TAB:RA, #0
- | beq <1 // No metatable: done.
- | ldrb RA, TAB:RA->nomm
- | tst RA, #1<<MM_newindex
- | bne <1 // 'no __newindex' flag set: done.
- | ldr INS, [PC, #-4] // Restore RA and RB.
- | decode_RB8 RB, INS
- | decode_RA8 RA, INS
- | b ->vmeta_tsetv
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:CARG1, INS, CARG3
- | b <2
- |
- |9:
- | checktp CARG4, LJ_TSTR // String key?
- | moveq STR:RC, CARG3
- | beq ->BC_TSETS_Z
- | b ->vmeta_tsetv
- break;
- case BC_TSETS:
- | decode_RB8 RB, INS
- | and RC, RC, #255
- | // RA = src*8, RB = table*8, RC = str_const (~)
- | ldrd CARG12, [BASE, RB]
- | mvn RC, RC
- | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
- | checktab CARG2, ->vmeta_tsets1
- |->BC_TSETS_Z:
- | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
- | ldr CARG3, TAB:CARG1->hmask
- | ldr CARG4, STR:RC->hash
- | ldr NODE:INS, TAB:CARG1->node
- | mov TAB:RB, TAB:CARG1
- | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
- | add CARG3, CARG3, CARG3, lsl #1
- | mov CARG4, #0
- | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
- | strb CARG4, TAB:RB->nomm // Clear metamethod cache.
- |1:
- | ldrd CARG12, NODE:INS->key
- | ldr CARG4, NODE:INS->val.it
- | ldr NODE:CARG3, NODE:INS->next
- | checktp CARG2, LJ_TSTR
- | cmpeq CARG1, STR:RC
- | bne >5
- | ldrb CARG2, TAB:RB->marked
- | checktp CARG4, LJ_TNIL // Key found, but nil value?
- | ldrd CARG34, [BASE, RA]
- | beq >4
- |2:
- | tst CARG2, #LJ_GC_BLACK // isblack(table)
- | strd CARG34, NODE:INS->val
- | bne >7
- |3:
- | ins_next
- |
- |4: // Check for __newindex if previous value is nil.
- | ldr TAB:CARG1, TAB:RB->metatable
- | cmp TAB:CARG1, #0
- | beq <2 // No metatable: done.
- | ldrb CARG1, TAB:CARG1->nomm
- | tst CARG1, #1<<MM_newindex
- | bne <2 // 'no __newindex' flag set: done.
- | b ->vmeta_tsets
- |
- |5: // Follow hash chain.
- | movs NODE:INS, NODE:CARG3
- | bne <1
- | // End of hash chain: key not found, add a new one.
- |
- | // But check for __newindex first.
- | ldr TAB:CARG1, TAB:RB->metatable
- | mov CARG3, TMPDp
- | str PC, SAVE_PC
- | cmp TAB:CARG1, #0 // No metatable: continue.
- | str BASE, L->base
- | ldrbne CARG2, TAB:CARG1->nomm
- | mov CARG1, L
- | beq >6
- | tst CARG2, #1<<MM_newindex
- | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
- |6:
- | mvn CARG4, #~LJ_TSTR
- | str STR:RC, TMPDlo
- | mov CARG2, TAB:RB
- | str CARG4, TMPDhi
- | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
- | // Returns TValue *.
- | ldr BASE, L->base
- | ldrd CARG34, [BASE, RA]
- | strd CARG34, [CRET1]
- | b <3 // No 2nd write barrier needed.
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, CARG2, CARG3
- | b <3
- break;
- case BC_TSETB:
- | decode_RB8 RB, INS
- | and RC, RC, #255
- | // RA = src*8, RB = table*8, RC = index
- | ldrd CARG12, [BASE, RB]
- | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12.
- | ldr CARG3, TAB:CARG1->asize
- | ldr RB, TAB:CARG1->array
- | lsl CARG2, RC, #3
- | cmp RC, CARG3
- | ldrdlo CARG34, [CARG2, RB]!
- | bhs ->vmeta_tsetb
- | ins_next1 // Overwrites RB!
- | checktp CARG4, LJ_TNIL
- | ldrb INS, TAB:CARG1->marked
- | ldrd CARG34, [BASE, RA]
- | beq >5
- |1:
- | tst INS, #LJ_GC_BLACK // isblack(table)
- | strd CARG34, [CARG2]
- | bne >7
- |2:
- | ins_next2
- | ins_next3
- |
- |5: // Check for __newindex if previous value is nil.
- | ldr TAB:RA, TAB:CARG1->metatable
- | cmp TAB:RA, #0
- | beq <1 // No metatable: done.
- | ldrb RA, TAB:RA->nomm
- | tst RA, #1<<MM_newindex
- | bne <1 // 'no __newindex' flag set: done.
- | ldr INS, [PC, #-4] // Restore INS.
- | decode_RA8 RA, INS
- | b ->vmeta_tsetb
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:CARG1, INS, CARG3
- | b <2
- break;
-
- case BC_TSETM:
- | // RA = base*8 (table at base-1), RC = num_const (start index)
- | add RA, BASE, RA
- |1:
- | ldr RB, SAVE_MULTRES
- | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
- | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
- | subs RB, RB, #8
- | ldr CARG4, TAB:CARG2->asize
- | beq >4 // Nothing to copy?
- | add CARG3, CARG1, RB, lsr #3
- | cmp CARG3, CARG4
- | ldr CARG4, TAB:CARG2->array
- | add RB, RA, RB
- | bhi >5
- | add INS, CARG4, CARG1, lsl #3
- | ldrb CARG1, TAB:CARG2->marked
- |3: // Copy result slots to table.
- | ldrd CARG34, [RA], #8
- | strd CARG34, [INS], #8
- | cmp RA, RB
- | blo <3
- | tst CARG1, #LJ_GC_BLACK // isblack(table)
- | bne >7
- |4:
- | ins_next
- |
- |5: // Need to resize array part.
- | str BASE, L->base
- | mov CARG1, L
- | str PC, SAVE_PC
- | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
- | // Must not reallocate the stack.
- | .IOS ldr BASE, L->base
- | b <1
- |
- |7: // Possible table write barrier for any value. Skip valiswhite check.
- | barrierback TAB:CARG2, CARG1, CARG3
- | b <4
- break;
-
- /* -- Calls and vararg handling ----------------------------------------- */
-
- case BC_CALLM:
- | // RA = base*8, (RB = nresults+1,) RC = extra_nargs
- | ldr CARG1, SAVE_MULTRES
- | decode_RC8 NARGS8:RC, INS
- | add NARGS8:RC, NARGS8:RC, CARG1
- | b ->BC_CALL_Z
- break;
- case BC_CALL:
- | decode_RC8 NARGS8:RC, INS
- | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8
- |->BC_CALL_Z:
- | mov RB, BASE // Save old BASE for vmeta_call.
- | ldrd CARG34, [BASE, RA]!
- | sub NARGS8:RC, NARGS8:RC, #8
- | add BASE, BASE, #8
- | checkfunc CARG4, ->vmeta_call
- | ins_call
- break;
-
- case BC_CALLMT:
- | // RA = base*8, (RB = 0,) RC = extra_nargs
- | ldr CARG1, SAVE_MULTRES
- | add NARGS8:RC, CARG1, RC, lsl #3
- | b ->BC_CALLT1_Z
- break;
- case BC_CALLT:
- | lsl NARGS8:RC, RC, #3
- | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
- |->BC_CALLT1_Z:
- | ldrd LFUNC:CARG34, [RA, BASE]!
- | sub NARGS8:RC, NARGS8:RC, #8
- | add RA, RA, #8
- | checkfunc CARG4, ->vmeta_callt
- | ldr PC, [BASE, FRAME_PC]
- |->BC_CALLT2_Z:
- | mov RB, #0
- | ldrb CARG4, LFUNC:CARG3->ffid
- | tst PC, #FRAME_TYPE
- | bne >7
- |1:
- | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
- | cmp NARGS8:RC, #0
- | beq >3
- |2:
- | ldrd CARG12, [RA, RB]
- | add INS, RB, #8
- | cmp INS, NARGS8:RC
- | strd CARG12, [BASE, RB]
- | mov RB, INS
- | bne <2
- |3:
- | cmp CARG4, #1 // (> FF_C) Calling a fast function?
- | bhi >5
- |4:
- | ins_callt
- |
- |5: // Tailcall to a fast function with a Lua frame below.
- | ldr INS, [PC, #-4]
- | decode_RA8 RA, INS
- | sub CARG1, BASE, RA
- | ldr LFUNC:CARG1, [CARG1, #-16]
- | ldr CARG1, LFUNC:CARG1->field_pc
- | ldr KBASE, [CARG1, #PC2PROTO(k)]
- | b <4
- |
- |7: // Tailcall from a vararg function.
- | eor PC, PC, #FRAME_VARG
- | tst PC, #FRAME_TYPEP // Vararg frame below?
- | movne CARG4, #0 // Clear ffid if no Lua function below.
- | bne <1
- | sub BASE, BASE, PC
- | ldr PC, [BASE, FRAME_PC]
- | tst PC, #FRAME_TYPE
- | movne CARG4, #0 // Clear ffid if no Lua function below.
- | b <1
- break;
-
- case BC_ITERC:
- | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
- | add RA, BASE, RA
- | mov RB, BASE // Save old BASE for vmeta_call.
- | ldrd CARG34, [RA, #-16]
- | ldrd CARG12, [RA, #-8]
- | add BASE, RA, #8
- | strd CARG34, [RA, #8] // Copy state.
- | strd CARG12, [RA, #16] // Copy control var.
- | // STALL: locked CARG34.
- | ldrd LFUNC:CARG34, [RA, #-24]
- | mov NARGS8:RC, #16 // Iterators get 2 arguments.
- | // STALL: load CARG34.
- | strd LFUNC:CARG34, [RA] // Copy callable.
- | checkfunc CARG4, ->vmeta_call
- | ins_call
- break;
-
- case BC_ITERN:
- | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
- |.if JIT
- | // NYI: add hotloop, record BC_ITERN.
- |.endif
- | add RA, BASE, RA
- | ldr TAB:RB, [RA, #-16]
- | ldr CARG1, [RA, #-8] // Get index from control var.
- | ldr INS, TAB:RB->asize
- | ldr CARG2, TAB:RB->array
- | add PC, PC, #4
- |1: // Traverse array part.
- | subs RC, CARG1, INS
- | add CARG3, CARG2, CARG1, lsl #3
- | bhs >5 // Index points after array part?
- | ldrd CARG34, [CARG3]
- | checktp CARG4, LJ_TNIL
- | addeq CARG1, CARG1, #1 // Skip holes in array part.
- | beq <1
- | ldrh RC, [PC, #-2]
- | mvn CARG2, #~LJ_TISNUM
- | strd CARG34, [RA, #8]
- | add RC, PC, RC, lsl #2
- | add RB, CARG1, #1
- | strd CARG12, [RA]
- | sub PC, RC, #0x20000
- | str RB, [RA, #-8] // Update control var.
- |3:
- | ins_next
- |
- |5: // Traverse hash part.
- | ldr CARG4, TAB:RB->hmask
- | ldr NODE:RB, TAB:RB->node
- |6:
- | add CARG1, RC, RC, lsl #1
- | cmp RC, CARG4 // End of iteration? Branch to ITERL+1.
- | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
- | bhi <3
- | ldrd CARG12, NODE:CARG3->val
- | checktp CARG2, LJ_TNIL
- | add RC, RC, #1
- | beq <6 // Skip holes in hash part.
- | ldrh RB, [PC, #-2]
- | add RC, RC, INS
- | ldrd CARG34, NODE:CARG3->key
- | str RC, [RA, #-8] // Update control var.
- | strd CARG12, [RA, #8]
- | add RC, PC, RB, lsl #2
- | sub PC, RC, #0x20000
- | strd CARG34, [RA]
- | b <3
- break;
-
- case BC_ISNEXT:
- | // RA = base*8, RC = target (points to ITERN)
- | add RA, BASE, RA
- | add RC, PC, RC, lsl #2
- | ldrd CFUNC:CARG12, [RA, #-24]
- | ldr CARG3, [RA, #-12]
- | ldr CARG4, [RA, #-4]
- | checktp CARG2, LJ_TFUNC
- | ldrbeq CARG1, CFUNC:CARG1->ffid
- | checktpeq CARG3, LJ_TTAB
- | checktpeq CARG4, LJ_TNIL
- | cmpeq CARG1, #FF_next_N
- | subeq PC, RC, #0x20000
- | bne >5
- | ins_next1
- | ins_next2
- | mov CARG1, #0
- | mvn CARG2, #0x00018000
- | strd CARG1, [RA, #-8] // Initialize control var.
- |1:
- | ins_next3
- |5: // Despecialize bytecode if any of the checks fail.
- | mov CARG1, #BC_JMP
- | mov OP, #BC_ITERC
- | strb CARG1, [PC, #-4]
- | sub PC, RC, #0x20000
- | strb OP, [PC] // Subsumes ins_next1.
- | ins_next2
- | b <1
- break;
-
- case BC_VARG:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
- | ldr CARG1, [BASE, FRAME_PC]
- | add RC, BASE, RC
- | add RA, BASE, RA
- | add RC, RC, #FRAME_VARG
- | add CARG4, RA, RB
- | sub CARG3, BASE, #8 // CARG3 = vtop
- | sub RC, RC, CARG1 // RC = vbase
- | // Note: RC may now be even _above_ BASE if nargs was < numparams.
- | cmp RB, #0
- | sub CARG1, CARG3, RC
- | beq >5 // Copy all varargs?
- | sub CARG4, CARG4, #16
- |1: // Copy vararg slots to destination slots.
- | cmp RC, CARG3
- | ldrdlo CARG12, [RC], #8
- | mvnhs CARG2, #~LJ_TNIL
- | cmp RA, CARG4
- | strd CARG12, [RA], #8
- | blo <1
- |2:
- | ins_next
- |
- |5: // Copy all varargs.
- | ldr CARG4, L->maxstack
- | cmp CARG1, #0
- | movle RB, #8 // MULTRES = (0+1)*8
- | addgt RB, CARG1, #8
- | add CARG2, RA, CARG1
- | str RB, SAVE_MULTRES
- | ble <2
- | cmp CARG2, CARG4
- | bhi >7
- |6:
- | ldrd CARG12, [RC], #8
- | strd CARG12, [RA], #8
- | cmp RC, CARG3
- | blo <6
- | b <2
- |
- |7: // Grow stack for varargs.
- | lsr CARG2, CARG1, #3
- | str RA, L->top
- | mov CARG1, L
- | str BASE, L->base
- | sub RC, RC, BASE // Need delta, because BASE may change.
- | str PC, SAVE_PC
- | sub RA, RA, BASE
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | ldr BASE, L->base
- | add RA, BASE, RA
- | add RC, BASE, RC
- | sub CARG3, BASE, #8
- | b <6
- break;
-
- /* -- Returns ----------------------------------------------------------- */
-
- case BC_RETM:
- | // RA = results*8, RC = extra results
- | ldr CARG1, SAVE_MULTRES
- | ldr PC, [BASE, FRAME_PC]
- | add RA, BASE, RA
- | add RC, CARG1, RC, lsl #3
- | b ->BC_RETM_Z
- break;
-
- case BC_RET:
- | // RA = results*8, RC = nresults+1
- | ldr PC, [BASE, FRAME_PC]
- | lsl RC, RC, #3
- | add RA, BASE, RA
- |->BC_RETM_Z:
- | str RC, SAVE_MULTRES
- |1:
- | ands CARG1, PC, #FRAME_TYPE
- | eor CARG2, PC, #FRAME_VARG
- | bne ->BC_RETV2_Z
- |
- |->BC_RET_Z:
- | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
- | ldr INS, [PC, #-4]
- | subs CARG4, RC, #8
- | sub CARG3, BASE, #8
- | beq >3
- |2:
- | ldrd CARG12, [RA], #8
- | add BASE, BASE, #8
- | subs CARG4, CARG4, #8
- | strd CARG12, [BASE, #-16]
- | bne <2
- |3:
- | decode_RA8 RA, INS
- | sub CARG4, CARG3, RA
- | decode_RB8 RB, INS
- | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
- |5:
- | cmp RB, RC // More results expected?
- | bhi >6
- | mov BASE, CARG4
- | ldr CARG2, LFUNC:CARG1->field_pc
- | ins_next1
- | ins_next2
- | ldr KBASE, [CARG2, #PC2PROTO(k)]
- | ins_next3
- |
- |6: // Fill up results with nil.
- | mvn CARG2, #~LJ_TNIL
- | add BASE, BASE, #8
- | add RC, RC, #8
- | str CARG2, [BASE, #-12]
- | b <5
- |
- |->BC_RETV1_Z: // Non-standard return case.
- | add RA, BASE, RA
- |->BC_RETV2_Z:
- | tst CARG2, #FRAME_TYPEP
- | bne ->vm_return
- | // Return from vararg function: relocate BASE down.
- | sub BASE, BASE, CARG2
- | ldr PC, [BASE, FRAME_PC]
- | b <1
- break;
-
- case BC_RET0: case BC_RET1:
- | // RA = results*8, RC = nresults+1
- | ldr PC, [BASE, FRAME_PC]
- | lsl RC, RC, #3
- | str RC, SAVE_MULTRES
- | ands CARG1, PC, #FRAME_TYPE
- | eor CARG2, PC, #FRAME_VARG
- | ldreq INS, [PC, #-4]
- | bne ->BC_RETV1_Z
- if (op == BC_RET1) {
- | ldrd CARG12, [BASE, RA]
- }
- | sub CARG4, BASE, #8
- | decode_RA8 RA, INS
- if (op == BC_RET1) {
- | strd CARG12, [CARG4]
- }
- | sub BASE, CARG4, RA
- | decode_RB8 RB, INS
- | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
- |5:
- | cmp RB, RC
- | bhi >6
- | ldr CARG2, LFUNC:CARG1->field_pc
- | ins_next1
- | ins_next2
- | ldr KBASE, [CARG2, #PC2PROTO(k)]
- | ins_next3
- |
- |6: // Fill up results with nil.
- | sub CARG2, CARG4, #4
- | mvn CARG3, #~LJ_TNIL
- | str CARG3, [CARG2, RC]
- | add RC, RC, #8
- | b <5
- break;
-
- /* -- Loops and branches ------------------------------------------------ */
-
- |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
- |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
- |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
- |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
-
- case BC_FORL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IFORL follows.
- break;
-
- case BC_JFORI:
- case BC_JFORL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_FORI:
- case BC_IFORL:
- | // RA = base*8, RC = target (after end of loop or start of loop)
- vk = (op == BC_IFORL || op == BC_JFORL);
- | ldrd CARG12, [RA, BASE]!
- if (op != BC_JFORL) {
- | add RC, PC, RC, lsl #2
- }
- if (!vk) {
- | ldrd CARG34, FOR_STOP
- | checktp CARG2, LJ_TISNUM
- | ldr RB, FOR_TSTEP
- | bne >5
- | checktp CARG4, LJ_TISNUM
- | ldr CARG4, FOR_STEP
- | checktpeq RB, LJ_TISNUM
- | bne ->vmeta_for
- | cmp CARG4, #0
- | blt >4
- | cmp CARG1, CARG3
- } else {
- | ldrd CARG34, FOR_STEP
- | checktp CARG2, LJ_TISNUM
- | bne >5
- | adds CARG1, CARG1, CARG3
- | ldr CARG4, FOR_STOP
- if (op == BC_IFORL) {
- | addvs RC, PC, #0x20000 // Overflow: prevent branch.
- } else {
- | bvs >2 // Overflow: do not enter mcode.
- }
- | cmp CARG3, #0
- | blt >4
- | cmp CARG1, CARG4
- }
- |1:
- if (op == BC_FORI) {
- | subgt PC, RC, #0x20000
- } else if (op == BC_JFORI) {
- | sub PC, RC, #0x20000
- | ldrhle RC, [PC, #-2]
- } else if (op == BC_IFORL) {
- | suble PC, RC, #0x20000
- }
- if (vk) {
- | strd CARG12, FOR_IDX
- }
- |2:
- | ins_next1
- | ins_next2
- | strd CARG12, FOR_EXT
- if (op == BC_JFORI || op == BC_JFORL) {
- | ble =>BC_JLOOP
- }
- |3:
- | ins_next3
- |
- |4: // Invert check for negative step.
- if (!vk) {
- | cmp CARG3, CARG1
- } else {
- | cmp CARG4, CARG1
- }
- | b <1
- |
- |5: // FP loop.
- if (!vk) {
- | cmnlo CARG4, #-LJ_TISNUM
- | cmnlo RB, #-LJ_TISNUM
- | bhs ->vmeta_for
- |.if FPU
- | vldr d0, FOR_IDX
- | vldr d1, FOR_STOP
- | cmp RB, #0
- | vstr d0, FOR_EXT
- |.else
- | cmp RB, #0
- | strd CARG12, FOR_EXT
- | blt >8
- |.endif
- } else {
- |.if FPU
- | vldr d0, FOR_IDX
- | vldr d2, FOR_STEP
- | vldr d1, FOR_STOP
- | cmp CARG4, #0
- | vadd.f64 d0, d0, d2
- |.else
- | cmp CARG4, #0
- | blt >8
- | bl extern __aeabi_dadd
- | strd CARG12, FOR_IDX
- | ldrd CARG34, FOR_STOP
- | strd CARG12, FOR_EXT
- |.endif
- }
- |6:
- |.if FPU
- | vcmpge.f64 d0, d1
- | vcmplt.f64 d1, d0
- | vmrs
- |.else
- | bl extern __aeabi_cdcmple
- |.endif
- if (vk) {
- |.if FPU
- | vstr d0, FOR_IDX
- | vstr d0, FOR_EXT
- |.endif
- }
- if (op == BC_FORI) {
- | subhi PC, RC, #0x20000
- } else if (op == BC_JFORI) {
- | sub PC, RC, #0x20000
- | ldrhls RC, [PC, #-2]
- | bls =>BC_JLOOP
- } else if (op == BC_IFORL) {
- | subls PC, RC, #0x20000
- } else {
- | bls =>BC_JLOOP
- }
- | ins_next1
- | ins_next2
- | b <3
- |
- |.if not FPU
- |8: // Invert check for negative step.
- if (vk) {
- | bl extern __aeabi_dadd
- | strd CARG12, FOR_IDX
- | strd CARG12, FOR_EXT
- }
- | mov CARG3, CARG1
- | mov CARG4, CARG2
- | ldrd CARG12, FOR_STOP
- | b <6
- |.endif
- break;
-
- case BC_ITERL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IITERL follows.
- break;
-
- case BC_JITERL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IITERL:
- | // RA = base*8, RC = target
- | ldrd CARG12, [RA, BASE]!
- if (op == BC_JITERL) {
- | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
- | strdne CARG12, [RA, #-8]
- | bne =>BC_JLOOP
- } else {
- | add RC, PC, RC, lsl #2
- | // STALL: load CARG12.
- | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
- | subne PC, RC, #0x20000 // Otherwise save control var + branch.
- | strdne CARG12, [RA, #-8]
- }
- | ins_next
- break;
-
- case BC_LOOP:
- | // RA = base*8, RC = target (loop extent)
- | // Note: RA/RC is only used by trace recorder to determine scope/extent
- | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_ILOOP follows.
- break;
-
- case BC_ILOOP:
- | // RA = base*8, RC = target (loop extent)
- | ins_next
- break;
-
- case BC_JLOOP:
- |.if JIT
- | // RA = base (ignored), RC = traceno
- | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)]
- | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0.
- | ldr TRACE:RC, [CARG1, RC, lsl #2]
- | st_vmstate CARG2
- | ldr RA, TRACE:RC->mcode
- | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
- | str L, [DISPATCH, #DISPATCH_GL(jit_L)]
- | bx RA
- |.endif
- break;
-
- case BC_JMP:
- | // RA = base*8 (only used by trace recorder), RC = target
- | add RC, PC, RC, lsl #2
- | sub PC, RC, #0x20000
- | ins_next
- break;
-
- /* -- Function headers -------------------------------------------------- */
-
- case BC_FUNCF:
- |.if JIT
- | hotcall
- |.endif
- case BC_FUNCV: /* NYI: compiled vararg functions. */
- | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
- break;
-
- case BC_JFUNCF:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IFUNCF:
- | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
- | ldr CARG1, L->maxstack
- | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)]
- | ldr KBASE, [PC, #-4+PC2PROTO(k)]
- | cmp RA, CARG1
- | bhi ->vm_growstack_l
- if (op != BC_JFUNCF) {
- | ins_next1
- | ins_next2
- }
- |2:
- | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters.
- | mvn CARG4, #~LJ_TNIL
- | blo >3
- if (op == BC_JFUNCF) {
- | decode_RD RC, INS
- | b =>BC_JLOOP
- } else {
- | ins_next3
- }
- |
- |3: // Clear missing parameters.
- | strd CARG34, [BASE, NARGS8:RC]
- | add NARGS8:RC, NARGS8:RC, #8
- | b <2
- break;
-
- case BC_JFUNCV:
-#if !LJ_HASJIT
- break;
-#endif
- | NYI // NYI: compiled vararg functions
- break; /* NYI: compiled vararg functions. */
-
- case BC_IFUNCV:
- | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
- | ldr CARG1, L->maxstack
- | add CARG4, BASE, RC
- | add RA, RA, RC
- | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC.
- | add CARG2, RC, #8+FRAME_VARG
- | ldr KBASE, [PC, #-4+PC2PROTO(k)]
- | cmp RA, CARG1
- | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG.
- | bhs ->vm_growstack_l
- | ldrb RB, [PC, #-4+PC2PROTO(numparams)]
- | mov RA, BASE
- | mov RC, CARG4
- | cmp RB, #0
- | add BASE, CARG4, #8
- | beq >3
- | mvn CARG3, #~LJ_TNIL
- |1:
- | cmp RA, RC // Less args than parameters?
- | ldrdlo CARG12, [RA], #8
- | movhs CARG2, CARG3
- | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC).
- |2:
- | subs RB, RB, #1
- | strd CARG12, [CARG4, #8]!
- | bne <1
- |3:
- | ins_next
- break;
-
- case BC_FUNCC:
- case BC_FUNCCW:
- | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
- if (op == BC_FUNCC) {
- | ldr CARG4, CFUNC:CARG3->f
- } else {
- | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)]
- }
- | add CARG2, RA, NARGS8:RC
- | ldr CARG1, L->maxstack
- | add RC, BASE, NARGS8:RC
- | str BASE, L->base
- | cmp CARG2, CARG1
- | str RC, L->top
- if (op == BC_FUNCCW) {
- | ldr CARG2, CFUNC:CARG3->f
- }
- | mv_vmstate CARG3, C
- | mov CARG1, L
- | bhi ->vm_growstack_c // Need to grow stack.
- | st_vmstate CARG3
- | blx CARG4 // (lua_State *L [, lua_CFunction f])
- | // Returns nresults.
- | ldr BASE, L->base
- | mv_vmstate CARG3, INTERP
- | ldr CRET2, L->top
- | lsl RC, CRET1, #3
- | st_vmstate CARG3
- | ldr PC, [BASE, FRAME_PC]
- | sub RA, CRET2, RC // RA = L->top - nresults*8
- | b ->vm_returnc
- break;
-
- /* ---------------------------------------------------------------------- */
-
- default:
- fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
- exit(2);
- break;
- }
-}
-
-static int build_backend(BuildCtx *ctx)
-{
- int op;
-
- dasm_growpc(Dst, BC__MAX);
-
- build_subroutines(ctx);
-
- |.code_op
- for (op = 0; op < BC__MAX; op++)
- build_ins(ctx, (BCOp)op, op);
-
- return BC__MAX;
-}
-
-/* Emit pseudo frame-info for all assembler functions. */
-static void emit_asm_debug(BuildCtx *ctx)
-{
- int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
- int i;
- switch (ctx->mode) {
- case BUILD_elfasm:
- fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
- fprintf(ctx->fp,
- ".Lframe0:\n"
- "\t.long .LECIE0-.LSCIE0\n"
- ".LSCIE0:\n"
- "\t.long 0xffffffff\n"
- "\t.byte 0x1\n"
- "\t.string \"\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 0xe\n" /* Return address is in lr. */
- "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */
- "\t.align 2\n"
- ".LECIE0:\n\n");
- fprintf(ctx->fp,
- ".LSFDE0:\n"
- "\t.long .LEFDE0-.LASFDE0\n"
- ".LASFDE0:\n"
- "\t.long .Lframe0\n"
- "\t.long .Lbegin\n"
- "\t.long %d\n"
- "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
- "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */
- fcofs, CFRAME_SIZE);
- for (i = 11; i >= (LJ_ARCH_HASFPU ? 5 : 4); i--) /* offset r4-r11 */
- fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i));
-#if LJ_ARCH_HASFPU
- for (i = 15; i >= 8; i--) /* offset d8-d15 */
- fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 %d, %d\n",
- 64+2*i, 10+2*(15-i));
- fprintf(ctx->fp, "\t.byte 0x84\n\t.uleb128 %d\n", 25); /* offset r4 */
-#endif
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE0:\n\n");
-#if LJ_HASFFI
- fprintf(ctx->fp,
- ".LSFDE1:\n"
- "\t.long .LEFDE1-.LASFDE1\n"
- ".LASFDE1:\n"
- "\t.long .Lframe0\n"
- "\t.long lj_vm_ffi_call\n"
- "\t.long %d\n"
- "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
- "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */
- "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */
- "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */
- "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */
- "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */
- "\t.align 2\n"
- ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
-#endif
- break;
- default:
- break;
- }
-}
-
+|// Low-level VM code for ARM CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch arm
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|
+|// The following must be C callee-save.
+|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding.
+|.define KBASE, r5 // Constants of current Lua function.
+|.define PC, r6 // Next PC.
+|.define DISPATCH, r7 // Opcode dispatch table.
+|.define LREG, r8 // Register holding lua_State (also in SAVE_L).
+|
+|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+.
+|.define BASE, r9 // Base of current Lua stack frame.
+|
+|// The following temporaries are not saved across C calls, except for RA/RC.
+|.define RA, r10 // Callee-save.
+|.define RC, r11 // Callee-save.
+|.define RB, r12
+|.define OP, r12 // Overlaps RB, must not be lr.
+|.define INS, lr
+|
+|// Calling conventions. Also used as temporaries.
+|.define CARG1, r0
+|.define CARG2, r1
+|.define CARG3, r2
+|.define CARG4, r3
+|.define CARG12, r0 // For 1st soft-fp double.
+|.define CARG34, r2 // For 2nd soft-fp double.
+|
+|.define CRET1, r0
+|.define CRET2, r1
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define SAVE_R4, [sp, #28]
+|.define CFRAME_SPACE, #28
+|.define SAVE_ERRF, [sp, #24]
+|.define SAVE_NRES, [sp, #20]
+|.define SAVE_CFRAME, [sp, #16]
+|.define SAVE_L, [sp, #12]
+|.define SAVE_PC, [sp, #8]
+|.define SAVE_MULTRES, [sp, #4]
+|.define ARG5, [sp]
+|
+|.define TMPDhi, [sp, #4]
+|.define TMPDlo, [sp]
+|.define TMPD, [sp]
+|.define TMPDp, sp
+|
+|.if FPU
+|.macro saveregs
+| push {r5, r6, r7, r8, r9, r10, r11, lr}
+| vpush {d8-d15}
+| sub sp, sp, CFRAME_SPACE+4
+| str r4, SAVE_R4
+|.endmacro
+|.macro restoreregs_ret
+| ldr r4, SAVE_R4
+| add sp, sp, CFRAME_SPACE+4
+| vpop {d8-d15}
+| pop {r5, r6, r7, r8, r9, r10, r11, pc}
+|.endmacro
+|.else
+|.macro saveregs
+| push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+| sub sp, sp, CFRAME_SPACE
+|.endmacro
+|.macro restoreregs_ret
+| add sp, sp, CFRAME_SPACE
+| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+|.endmacro
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; ud; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_FUNC, #-8
+|.define FRAME_PC, #-4
+|
+|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro
+|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro
+|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro
+|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro
+|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| ldrb OP, [PC]
+|.endmacro
+|.macro ins_NEXT2
+| ldr INS, [PC], #4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT3
+| ldr OP, [DISPATCH, OP, lsl #2]
+| decode_RA8 RA, INS
+| decode_RD RC, INS
+| bx OP
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+| ins_NEXT3
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+| .define ins_next3, ins_NEXT3
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| .endmacro
+| .macro ins_next3
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Avoid register name substitution for field name.
+#define field_pc pc
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ldr PC, LFUNC:CARG3->field_pc
+| ldrb OP, [PC] // STALL: load PC. early PC.
+| ldr INS, [PC], #4
+| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP.
+| decode_RA8 RA, INS
+| add RA, RA, BASE
+| bx OP
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| str PC, [BASE, FRAME_PC]
+| ins_callt // STALL: locked PC.
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro
+|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro
+|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta
+| lsr CARG1, PC, #1
+| and CARG1, CARG1, #126
+| sub CARG1, CARG1, #-GG_DISP2HOT
+| ldrh CARG2, [DISPATCH, CARG1]
+| subs CARG2, CARG2, #delta
+| strh CARG2, [DISPATCH, CARG1]
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP
+| blo ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL
+| blo ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro
+|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
+| bic mark, mark, #LJ_GC_BLACK // black2gray(tab)
+| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
+| strb mark, tab->marked
+| str tmp, tab->gclist
+|.endmacro
+|
+|.macro .IOS, a, b
+|.if IOS
+| a, b
+|.endif
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+#if !LJ_DUALNUM
+#error "Only dual-number mode supported for ARM target"
+#endif
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: RB = previous base.
+ | tst PC, #FRAME_P
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
+ | mvn CARG2, #~LJ_TTRUE
+ | mov BASE, RB
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | str CARG2, [RA, FRAME_PC] // Prepend true to results.
+ | sub RA, RA, #8
+ |
+ |->vm_returnc:
+ | adds RC, RC, #8 // RC = (nresults+1)*8.
+ | mov CRET1, #LUA_YIELD
+ | beq ->vm_unwind_c_eh
+ | str RC, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
+ | // CARG1 = PC & FRAME_TYPE
+ | bic RB, PC, #FRAME_TYPEP
+ | cmp CARG1, #FRAME_C
+ | sub RB, BASE, RB // RB = previous base.
+ | bne ->vm_returnp
+ |
+ | str RB, L->base
+ | ldr KBASE, SAVE_NRES
+ | mv_vmstate CARG4, C
+ | sub BASE, BASE, #8
+ | subs CARG3, RC, #8
+ | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8
+ | st_vmstate CARG4
+ | beq >2
+ |1:
+ | subs CARG3, CARG3, #8
+ | ldrd CARG12, [RA], #8
+ | strd CARG12, [BASE], #8
+ | bne <1
+ |2:
+ | cmp KBASE, RC // More/less results wanted?
+ | bne >6
+ |3:
+ | str BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ldr RC, SAVE_CFRAME // Restore previous C frame.
+ | mov CRET1, #0 // Ok return status for vm_pcall.
+ | str RC, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | blt >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | ldr CARG3, L->maxstack
+ | mvn CARG2, #~LJ_TNIL
+ | cmp BASE, CARG3
+ | bhs >8
+ | str CARG2, [BASE, #4]
+ | add RC, RC, #8
+ | add BASE, BASE, #8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | sub CARG1, RC, KBASE
+ | cmp KBASE, #0 // LUA_MULTRET+1 case?
+ | subne BASE, BASE, CARG1 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | str BASE, L->top // Save current top held in BASE (yes).
+ | mov CARG2, KBASE
+ | mov CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mov sp, CARG1
+ | mov CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mv_vmstate CARG4, C
+ | ldr GL:CARG3, L->glref
+ | str CARG4, GL:CARG3->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
+ | mov sp, CARG1
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mov MASKR8, #255
+ | mov RC, #16 // 2 results: false + error message.
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | ldr BASE, L->base
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mvn CARG1, #~LJ_TFALSE
+ | sub RA, BASE, #8 // Results start at BASE-8.
+ | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | mv_vmstate CARG2, INTERP
+ | str CARG1, [BASE, #-4] // Prepend false to error message.
+ | st_vmstate CARG2
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | // CARG1 = L
+ | mov CARG2, #LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | mov CARG1, L
+ | str BASE, L->base
+ | add PC, PC, #4 // Must point after first instruction.
+ | str RC, L->top
+ | lsr CARG3, RA, #3
+ |2:
+ | // L->base = new base, L->top = top
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | ldr RC, L->top
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L, CARG1
+ | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table.
+ | mov BASE, CARG2
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | str L, SAVE_L
+ | mov PC, #FRAME_CP
+ | str CARG3, SAVE_NRES
+ | add CARG2, sp, #CFRAME_RESUME
+ | ldrb CARG1, L->status
+ | str CARG3, SAVE_ERRF
+ | str CARG2, L->cframe
+ | str CARG3, SAVE_CFRAME
+ | cmp CARG1, #0
+ | str L, SAVE_PC // Any value outside of bytecode is ok.
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | mov RA, BASE
+ | ldr BASE, L->base
+ | ldr CARG1, L->top
+ | mov MASKR8, #255
+ | strb CARG3, L->status
+ | sub RC, CARG1, BASE
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | mv_vmstate CARG2, INTERP
+ | add RC, RC, #8
+ | ands CARG1, PC, #FRAME_TYPE
+ | st_vmstate CARG2
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, #FRAME_CP
+ | str CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, #FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ldr RC, L:CARG1->cframe
+ | str CARG3, SAVE_NRES
+ | mov L, CARG1
+ | str CARG1, SAVE_L
+ | mov BASE, CARG2
+ | str sp, L->cframe // Add our C frame to cframe chain.
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | str RC, SAVE_CFRAME
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | ldr RB, L->base // RB = old base (for vmeta_call).
+ | ldr CARG1, L->top
+ | mov MASKR8, #255
+ | add PC, PC, BASE
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | sub PC, PC, RB // PC = frame delta + frame type
+ | mv_vmstate CARG2, INTERP
+ | sub NARGS8:RC, CARG1, BASE
+ | st_vmstate CARG2
+ |
+ |->vm_call_dispatch:
+ | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ldrd CARG34, [BASE, FRAME_FUNC]
+ | checkfunc CARG4, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L, CARG1
+ | ldr RA, L:CARG1->stack
+ | str CARG1, SAVE_L
+ | ldr RB, L->top
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | ldr RC, L->cframe
+ | sub RA, RA, RB // Compute -savestack(L, L->top).
+ | str sp, L->cframe // Add our C frame to cframe chain.
+ | mov RB, #0
+ | str RA, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | str RB, SAVE_ERRF // No error function.
+ | str RC, SAVE_CFRAME
+ | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | movs BASE, CRET1
+ | mov PC, #FRAME_CP
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
+ | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
+ | ldr CARG1, [BASE, #-16] // Get continuation.
+ | mov CARG4, BASE
+ | mov BASE, RB // Restore caller BASE.
+ |.if FFI
+ | cmp CARG1, #1
+ |.endif
+ | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC].
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | mvn INS, #~LJ_TNIL
+ | add CARG2, RA, RC
+ | str INS, [CARG2, #-4] // Ensure one valid arg.
+ |.if FFI
+ | bls >1
+ |.endif
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | // BASE = base, RA = resultptr, CARG4 = meta base
+ | bx CARG1
+ |
+ |.if FFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | sub CARG4, CARG4, #16
+ | sub RC, CARG4, BASE
+ | b ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, CARG4 = meta base
+ | ldr INS, [PC, #-4]
+ | sub CARG2, CARG4, #16
+ | ldrd CARG34, [RA]
+ | str BASE, L->base
+ | decode_RB8 RC, INS
+ | decode_RA8 RA, INS
+ | add CARG1, BASE, RC
+ | subs CARG1, CARG2, CARG1
+ | strdne CARG34, [CARG2]
+ | movne CARG3, CARG1
+ | bne ->BC_CAT_Z
+ | strd CARG34, [BASE, RA]
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | add CARG2, BASE, RB
+ | b >2
+ |
+ |->vmeta_tgets:
+ | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
+ | mvn CARG4, #~LJ_TTAB
+ | str TAB:RB, [CARG2]
+ | str CARG4, [CARG2, #4]
+ |2:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tgetb: // RC = index
+ | decode_RB8 RB, INS
+ | str RC, TMPDlo
+ | mvn CARG4, #~LJ_TISNUM
+ | add CARG2, BASE, RB
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | beq >3
+ | ldrd CARG34, [CRET1]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | rsb CARG1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #16 // 2 args for func(t, k).
+ | str PC, [BASE, #-12] // [cont|PC]
+ | add PC, CARG1, BASE
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | add CARG2, BASE, RB
+ | b >2
+ |
+ |->vmeta_tsets:
+ | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
+ | mvn CARG4, #~LJ_TTAB
+ | str TAB:RB, [CARG2]
+ | str CARG4, [CARG2, #4]
+ |2:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tsetb: // RC = index
+ | decode_RB8 RB, INS
+ | str RC, TMPDlo
+ | mvn CARG4, #~LJ_TISNUM
+ | add CARG2, BASE, RB
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | ldrd CARG34, [BASE, RA]
+ | beq >3
+ | ins_next1
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | strd CARG34, [CRET1]
+ | ins_next2
+ | ins_next3
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | rsb CARG1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
+ | strd CARG34, [BASE, #16] // Copy value to third argument.
+ | str PC, [BASE, #-12] // [cont|PC]
+ | add PC, CARG1, BASE
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mov CARG1, L
+ | sub PC, PC, #4
+ | mov CARG2, RA
+ | str BASE, L->base
+ | mov CARG3, RC
+ | str PC, SAVE_PC
+ | decode_OP CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #1
+ | bhi ->vmeta_binop
+ |4:
+ | ldrh RB, [PC, #2]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | subhs PC, RB, #0x20000
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | ldr INS, [PC, #-4]
+ | ldrd CARG12, [RA]
+ | decode_RA8 CARG3, INS
+ | strd CARG12, [BASE, CARG3]
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | ldr CARG2, [RA, #4]
+ | mvn CARG1, #~LJ_TTRUE
+ | cmp CARG1, CARG2 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | ldr CARG2, [RA, #4]
+ | checktp CARG2, LJ_TFALSE // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, INS
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |.endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG4, BASE, RB
+ | add CARG3, KBASE, RC
+ | b >1
+ |
+ |->vmeta_unm:
+ | ldr INS, [PC, #-8]
+ | sub PC, PC, #4
+ | add CARG3, BASE, RC
+ | add CARG4, BASE, RC
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |1:
+ | decode_OP OP, INS
+ | add CARG2, BASE, RA
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | str OP, ARG5
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub CARG2, CRET1, BASE
+ | str PC, [CRET1, #-12] // [cont|PC]
+ | add PC, CARG2, #FRAME_CONT
+ | mov BASE, CRET1
+ | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | add CARG2, BASE, RC
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+ | .IOS ldr BASE, L->base
+#if LJ_52
+ | cmp CRET1, #0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | ldr TAB:CARG1, [BASE, RC]
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // RB = old base, BASE = new base, RC = nargs*8
+ | mov CARG1, L
+ | str RB, L->base // This is the callers base!
+ | sub CARG2, BASE, #8
+ | str PC, SAVE_PC
+ | add CARG3, BASE, NARGS8:RC
+ | .IOS mov RA, BASE
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | .IOS mov BASE, RA
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub CARG2, RA, #8
+ | str PC, SAVE_PC
+ | add CARG3, RA, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | .IOS ldr BASE, L->base
+ | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here.
+ | ldr PC, [BASE, FRAME_PC]
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | b ->BC_CALLT2_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, RA
+ | str PC, SAVE_PC
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ | .IOS ldr BASE, L->base
+ |.if JIT
+ | ldrb OP, [PC, #-4]
+ |.endif
+ | ldr INS, [PC, #-4]
+ |.if JIT
+ | cmp OP, #BC_JFORI
+ |.endif
+ | decode_RA8 RA, INS
+ | decode_RD RC, INS
+ |.if JIT
+ | beq =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_1 name
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | checktp CARG2, LJ_TISNUM
+ | cmnlo CARG4, #-LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_d, name
+ | .ffunc name
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8
+ | vldr d0, [BASE]
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_dd, name
+ | .ffunc name
+ | ldr CARG2, [BASE, #4]
+ | ldr CARG4, [BASE, #12]
+ | cmp NARGS8:RC, #16
+ | vldr d0, [BASE]
+ | vldr d1, [BASE, #8]
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | cmnlo CARG4, #-LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
+ |.macro ffgccheck
+ | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)]
+ | cmp CARG1, CARG2
+ | blge ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | checktp CARG2, LJ_TTRUE
+ | bhi ->fff_fallback
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ | mov RB, BASE
+ | subs RA, NARGS8:RC, #8
+ | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
+ | beq ->fff_res // Done if exactly 1 argument.
+ |1:
+ | ldrd CARG12, [RB, #8]
+ | subs RA, RA, #8
+ | strd CARG12, [RB], #8
+ | bne <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | mvnlo CARG2, #~LJ_TISNUM
+ | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1
+ | lsl CARG4, CARG4, #3
+ | ldrd CARG12, [CFUNC:CARG3, CARG4]
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktp CARG2, LJ_TTAB
+ | cmnne CARG2, #-LJ_TUDATA
+ | bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RB, TAB:CARG1->metatable
+ |2:
+ | mvn CARG2, #~LJ_TNIL
+ | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])]
+ | cmp TAB:RB, #0
+ | beq ->fff_restv
+ | ldr CARG3, TAB:RB->hmask
+ | ldr CARG4, STR:RC->hash
+ | ldr NODE:INS, TAB:RB->node
+ | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS.
+ | ldrd CARG12, NODE:INS->val
+ | ldr NODE:INS, NODE:INS->next
+ | checktp CARG4, LJ_TSTR
+ | cmpeq CARG3, STR:RC
+ | beq >5
+ | cmp NODE:INS, #0
+ | bne <3
+ |4:
+ | mov CARG1, RB // Use metatable as default result.
+ | mvn CARG2, #~LJ_TTAB
+ | b ->fff_restv
+ |5:
+ | checktp CARG2, LJ_TNIL
+ | bne ->fff_restv
+ | b <4
+ |
+ |6:
+ | checktp CARG2, LJ_TISNUM
+ | mvnhs CARG2, CARG2
+ | movlo CARG2, #~LJ_TISNUM
+ | add CARG4, DISPATCH, CARG2, lsl #2
+ | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp CARG2, LJ_TTAB
+ | ldreq TAB:RB, TAB:CARG1->metatable
+ | checktpeq CARG4, LJ_TTAB
+ | ldrbeq CARG4, TAB:CARG1->marked
+ | cmpeq TAB:RB, #0
+ | bne ->fff_fallback
+ | tst CARG4, #LJ_GC_BLACK // isblack(table)
+ | str TAB:CARG3, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, CARG4, CARG3
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | ldrd CARG34, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | mov CARG2, CARG3
+ | checktab CARG4, ->fff_fallback
+ | mov CARG1, L
+ | add CARG3, BASE, #8
+ | .IOS mov RA, BASE
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | .IOS mov BASE, RA
+ | ldrd CARG12, [CRET1]
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | bne ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | bls ->fff_restv
+ | b ->fff_fallback
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checktp CARG2, LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv
+ | // Handle numbers inline, unless a number base metatable is present.
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])]
+ | str BASE, L->base
+ | checktp CARG2, LJ_TISNUM
+ | cmpls CARG4, #0
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | bhi ->fff_fallback
+ | ffgccheck
+ | mov CARG1, L
+ | mov CARG2, BASE
+ | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o)
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | mvn CARG4, #~LJ_TNIL
+ | checktab CARG2, ->fff_fallback
+ | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
+ | ldr PC, [BASE, FRAME_PC]
+ | mov CARG2, CARG1
+ | str BASE, L->base // Add frame since C call can throw.
+ | mov CARG1, L
+ | str BASE, L->top // Dummy frame length is ok.
+ | add CARG3, BASE, #8
+ | str PC, SAVE_PC
+ | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Returns 0 at end of traversal.
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | mvneq CRET2, #~LJ_TNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | ldrd CARG12, [BASE, #8] // Copy key and value to results.
+ | ldrd CARG34, [BASE, #16]
+ | mov RC, #(2+1)*8
+ | strd CARG12, [BASE, #-8]
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |
+ |.ffunc_1 pairs
+ | checktab CARG2, ->fff_fallback
+#if LJ_52
+ | ldr TAB:RB, TAB:CARG1->metatable
+#endif
+ | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cmp TAB:RB, #0
+ | bne ->fff_fallback
+#endif
+ | mvn CARG2, #~LJ_TNIL
+ | mov RC, #(3+1)*8
+ | strd CFUNC:CARG34, [BASE, #-8]
+ | str CARG2, [BASE, #12]
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktp CARG2, LJ_TTAB
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | ldr RB, TAB:CARG1->asize
+ | ldr RC, TAB:CARG1->array
+ | add CARG3, CARG3, #1
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp CARG3, RB
+ | add RC, RC, CARG3, lsl #3
+ | strd CARG34, [BASE, #-8]
+ | ldrdlo CARG12, [RC]
+ | mov RC, #(0+1)*8
+ | bhs >2 // Not in array part?
+ |1:
+ | checktp CARG2, LJ_TNIL
+ | movne RC, #(2+1)*8
+ | strdne CARG12, [BASE]
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | ldr RB, TAB:CARG1->hmask
+ | mov CARG2, CARG3
+ | cmp RB, #0
+ | beq ->fff_res
+ | .IOS mov RA, BASE
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | .IOS mov BASE, RA
+ | cmp CRET1, #0
+ | beq ->fff_res
+ | ldrd CARG12, [CRET1]
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab CARG2, ->fff_fallback
+#if LJ_52
+ | ldr TAB:RB, TAB:CARG1->metatable
+#endif
+ | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cmp TAB:RB, #0
+ | bne ->fff_fallback
+#endif
+ | mov CARG1, #0
+ | mvn CARG2, #~LJ_TISNUM
+ | mov RC, #(3+1)*8
+ | strd CFUNC:CARG34, [BASE, #-8]
+ | strd CARG12, [BASE, #8]
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
+ | mov RB, BASE
+ | add BASE, BASE, #8
+ | moveq PC, #8+FRAME_PCALL
+ | movne PC, #8+FRAME_PCALLH
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | b ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | checkfunc CARG4, ->fff_fallback // Traceback must be a function.
+ | mov RB, BASE
+ | strd CARG12, [BASE, #8] // Swap function and traceback.
+ | strd CARG34, [BASE]
+ | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
+ | add BASE, BASE, #16
+ | moveq PC, #16+FRAME_PCALL
+ | movne PC, #16+FRAME_PCALLH
+ | sub NARGS8:RC, NARGS8:RC, #16
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG2, LJ_TTHREAD
+ | bne ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
+ |.endif
+ | ldr PC, [BASE, FRAME_PC]
+ | str BASE, L->base
+ | ldr CARG2, L:CARG1->top
+ | ldrb RA, L:CARG1->status
+ | ldr RB, L:CARG1->base
+ | add CARG3, CARG2, NARGS8:RC
+ | add CARG4, CARG2, RA
+ | str PC, SAVE_PC
+ | cmp CARG4, RB
+ | beq ->fff_fallback
+ | ldr CARG4, L:CARG1->maxstack
+ | ldr RB, L:CARG1->cframe
+ | cmp RA, #LUA_YIELD
+ | cmpls CARG3, CARG4
+ | cmpls RB, #0
+ | bhi ->fff_fallback
+ |1:
+ |.if resume
+ | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
+ | add BASE, BASE, #8
+ | sub NARGS8:RC, NARGS8:RC, #8
+ |.endif
+ | str CARG3, L:CARG1->top
+ | str BASE, L->top
+ |2: // Move args to coroutine.
+ | ldrd CARG34, [BASE, RB]
+ | cmp RB, NARGS8:RC
+ | strdne CARG34, [CARG2, RB]
+ | add RB, RB, #8
+ | bne <2
+ |
+ | mov CARG3, #0
+ | mov L:RA, L:CARG1
+ | mov CARG4, #0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | ldr CARG3, L:RA->base
+ | mv_vmstate CARG2, INTERP
+ | ldr CARG4, L:RA->top
+ | st_vmstate CARG2
+ | cmp CRET1, #LUA_YIELD
+ | ldr BASE, L->base
+ | bhi >8
+ | subs RC, CARG4, CARG3
+ | ldr CARG1, L->maxstack
+ | add CARG2, BASE, RC
+ | beq >6 // No results?
+ | cmp CARG2, CARG1
+ | mov RB, #0
+ | bhi >9 // Need to grow stack?
+ |
+ | sub CARG4, RC, #8
+ | str CARG3, L:RA->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | ldrd CARG12, [CARG3, RB]
+ | cmp RB, CARG4
+ | strd CARG12, [BASE, RB]
+ | add RB, RB, #8
+ | bne <5
+ |6:
+ |.if resume
+ | mvn CARG3, #~LJ_TTRUE
+ | add RC, RC, #16
+ |7:
+ | str CARG3, [BASE, #-4] // Prepend true/false to results.
+ | sub RA, BASE, #8
+ |.else
+ | mov RA, BASE
+ | add RC, RC, #8
+ |.endif
+ | ands CARG1, PC, #FRAME_TYPE
+ | str PC, SAVE_PC
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | ldrd CARG12, [CARG4, #-8]!
+ | mvn CARG3, #~LJ_TFALSE
+ | mov RC, #(2+1)*8
+ | str CARG4, L:RA->top // Remove error from coroutine stack.
+ | strd CARG12, [BASE] // Copy error message.
+ | b <7
+ |.else
+ | mov CARG1, L
+ | mov CARG2, L:RA
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Never returns.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov CARG1, L
+ | lsr CARG2, RC, #3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | mov CRET1, #0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ldr CARG1, L->cframe
+ | add CARG2, BASE, NARGS8:RC
+ | str BASE, L->base
+ | tst CARG1, #CFRAME_RESUME
+ | str CARG2, L->top
+ | mov CRET1, #LUA_YIELD
+ | mov CARG3, #0
+ | beq ->fff_fallback
+ | str CARG3, L->cframe
+ | strb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.macro math_round, func
+ | .ffunc_1 math_ .. func
+ | checktp CARG2, LJ_TISNUM
+ | beq ->fff_restv
+ | bhi ->fff_fallback
+ | // Round FP value and normalize result.
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | bpl >2 // |x| < 1?
+ | mvn CARG4, #0x3e0
+ | subs RB, CARG4, RB, asr #21
+ | lsl CARG4, CARG2, #11
+ | lsl CARG3, CARG1, #11
+ | orr CARG4, CARG4, #0x80000000
+ | rsb INS, RB, #32
+ | orr CARG4, CARG4, CARG1, lsr #21
+ | bls >3 // |x| >= 2^31?
+ | orr CARG3, CARG3, CARG4, lsl INS
+ | lsr CARG1, CARG4, RB
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31
+ | addne CARG1, CARG1, #1
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31
+ | addsne CARG1, CARG1, #1
+ | ldrdvs CARG12, >9
+ | bvs ->fff_restv
+ |.endif
+ | cmp CARG2, #0
+ | rsblt CARG1, CARG1, #0
+ |1:
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |2: // |x| < 1
+ | bcs ->fff_restv // |x| is not finite.
+ | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1
+ | moveq CARG1, #0
+ | mvnne CARG1, #0
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1
+ | moveq CARG1, #0
+ | movne CARG1, #1
+ |.endif
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |3: // |x| >= 2^31. Check for x == -(2^31).
+ | cmpeq CARG4, #0x80000000
+ |.if "func" == "floor"
+ | cmpeq CARG3, #0
+ |.endif
+ | bne >4
+ | cmp CARG2, #0
+ | movmi CARG1, #0x80000000
+ | bmi <1
+ |4:
+ | bl ->vm_..func.._sf
+ | b ->fff_restv
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.align 8
+ |9:
+ | .long 0x00000000, 0x41e00000 // 2^31.
+ |
+ |.ffunc_1 math_abs
+ | checktp CARG2, LJ_TISNUM
+ | bhi ->fff_fallback
+ | bicne CARG2, CARG2, #0x80000000
+ | bne ->fff_restv
+ | cmp CARG1, #0
+ | rsbslt CARG1, CARG1, #0
+ | ldrdvs CARG12, <9
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG12 = TValue result.
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ |->fff_res1:
+ | // PC = return.
+ | mov RC, #(1+1)*8
+ |->fff_res:
+ | // RC = (nresults+1)*8, PC = return.
+ | ands CARG1, PC, #FRAME_TYPE
+ | ldreq INS, [PC, #-4]
+ | str RC, SAVE_MULTRES
+ | sub RA, BASE, #8
+ | bne ->vm_return
+ | decode_RB8 RB, INS
+ |5:
+ | cmp RB, RC // More results expected?
+ | bhi >6
+ | decode_RA8 CARG1, INS
+ | ins_next1
+ | ins_next2
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, CARG1
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | add CARG2, RA, RC
+ | mvn CARG1, #~LJ_TNIL
+ | add RC, RC, #8
+ | str CARG1, [CARG2, #-4]
+ | b <5
+ |
+ |.macro math_extern, func
+ |.if HFABI
+ | .ffunc_d math_ .. func
+ |.else
+ | .ffunc_n math_ .. func
+ |.endif
+ | .IOS mov RA, BASE
+ | bl extern func
+ | .IOS mov BASE, RA
+ |.if HFABI
+ | b ->fff_resd
+ |.else
+ | b ->fff_restv
+ |.endif
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ |.if HFABI
+ | .ffunc_dd math_ .. func
+ |.else
+ | .ffunc_nn math_ .. func
+ |.endif
+ | .IOS mov RA, BASE
+ | bl extern func
+ | .IOS mov BASE, RA
+ |.if HFABI
+ | b ->fff_resd
+ |.else
+ | b ->fff_restv
+ |.endif
+ |.endmacro
+ |
+ |.if FPU
+ | .ffunc_d math_sqrt
+ | vsqrt.f64 d0, d0
+ |->fff_resd:
+ | ldr PC, [BASE, FRAME_PC]
+ | vstr d0, [BASE, #-8]
+ | b ->fff_res1
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |.ffunc math_log
+ |.if HFABI
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | vldr d0, [BASE]
+ | bne ->fff_fallback
+ |.else
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | bne ->fff_fallback
+ |.endif
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern log
+ | .IOS mov BASE, RA
+ |.if HFABI
+ | b ->fff_resd
+ |.else
+ | b ->fff_restv
+ |.endif
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |->ff_math_deg:
+ |.if FPU
+ | .ffunc_d math_rad
+ | vldr d1, CFUNC:CARG3->upvalue[0]
+ | vmul.f64 d0, d0, d1
+ | b ->fff_resd
+ |.else
+ | .ffunc_n math_rad
+ | ldrd CARG34, CFUNC:CARG3->upvalue[0]
+ | bl extern __aeabi_dmul
+ | b ->fff_restv
+ |.endif
+ |
+ |.if HFABI
+ | .ffunc math_ldexp
+ | ldr CARG4, [BASE, #4]
+ | ldrd CARG12, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | vldr d0, [BASE]
+ | checktp CARG4, LJ_TISNUM
+ | bhs ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | bne ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern ldexp // (double x, int exp)
+ | .IOS mov BASE, RA
+ | b ->fff_resd
+ |.else
+ |.ffunc_2 math_ldexp
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ | checktp CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern ldexp // (double x, int exp)
+ | .IOS mov BASE, RA
+ | b ->fff_restv
+ |.endif
+ |
+ |.if HFABI
+ |.ffunc_d math_frexp
+ | mov CARG1, sp
+ | .IOS mov RA, BASE
+ | bl extern frexp
+ | .IOS mov BASE, RA
+ | ldr CARG3, [sp]
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | vstr d0, [BASE, #-8]
+ | mov RC, #(2+1)*8
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |.else
+ |.ffunc_n math_frexp
+ | mov CARG3, sp
+ | .IOS mov RA, BASE
+ | bl extern frexp
+ | .IOS mov BASE, RA
+ | ldr CARG3, [sp]
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ | mov RC, #(2+1)*8
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |.endif
+ |
+ |.if HFABI
+ |.ffunc_d math_modf
+ | sub CARG1, BASE, #8
+ | ldr PC, [BASE, FRAME_PC]
+ | .IOS mov RA, BASE
+ | bl extern modf
+ | .IOS mov BASE, RA
+ | mov RC, #(2+1)*8
+ | vstr d0, [BASE]
+ | b ->fff_res
+ |.else
+ |.ffunc_n math_modf
+ | sub CARG3, BASE, #8
+ | ldr PC, [BASE, FRAME_PC]
+ | .IOS mov RA, BASE
+ | bl extern modf
+ | .IOS mov BASE, RA
+ | mov RC, #(2+1)*8
+ | strd CARG12, [BASE]
+ | b ->fff_res
+ |.endif
+ |
+ |.macro math_minmax, name, cond, fcond
+ |.if FPU
+ | .ffunc_1 name
+ | add RB, BASE, RC
+ | checktp CARG2, LJ_TISNUM
+ | add RA, BASE, #8
+ | bne >4
+ |1: // Handle integers.
+ | ldrd CARG34, [RA]
+ | cmp RA, RB
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bne >3
+ | cmp CARG1, CARG3
+ | add RA, RA, #8
+ | mov..cond CARG1, CARG3
+ | b <1
+ |3: // Convert intermediate result to number and continue below.
+ | vmov s4, CARG1
+ | bhi ->fff_fallback
+ | vldr d1, [RA]
+ | vcvt.f64.s32 d0, s4
+ | b >6
+ |
+ |4:
+ | vldr d0, [BASE]
+ | bhi ->fff_fallback
+ |5: // Handle numbers.
+ | ldrd CARG34, [RA]
+ | vldr d1, [RA]
+ | cmp RA, RB
+ | bhs ->fff_resd
+ | checktp CARG4, LJ_TISNUM
+ | bhs >7
+ |6:
+ | vcmp.f64 d0, d1
+ | vmrs
+ | add RA, RA, #8
+ | vmov..fcond.f64 d0, d1
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | vmov s4, CARG3
+ | bhi ->fff_fallback
+ | vcvt.f64.s32 d1, s4
+ | b <6
+ |
+ |.else
+ |
+ | .ffunc_1 name
+ | checktp CARG2, LJ_TISNUM
+ | mov RA, #8
+ | bne >4
+ |1: // Handle integers.
+ | ldrd CARG34, [BASE, RA]
+ | cmp RA, RC
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bne >3
+ | cmp CARG1, CARG3
+ | add RA, RA, #8
+ | mov..cond CARG1, CARG3
+ | b <1
+ |3: // Convert intermediate result to number and continue below.
+ | bhi ->fff_fallback
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [BASE, RA]
+ | b >6
+ |
+ |4:
+ | bhi ->fff_fallback
+ |5: // Handle numbers.
+ | ldrd CARG34, [BASE, RA]
+ | cmp RA, RC
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bhs >7
+ |6:
+ | bl extern __aeabi_cdcmple
+ | add RA, RA, #8
+ | mov..fcond CARG1, CARG3
+ | mov..fcond CARG2, CARG4
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | bhi ->fff_fallback
+ | strd CARG12, TMPD
+ | mov CARG1, CARG3
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, TMPD
+ | b <6
+ |.endif
+ |.endmacro
+ |
+ | math_minmax math_min, gt, hi
+ | math_minmax math_max, lt, lo
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | checkstr CARG2, ->fff_fallback
+ | ldr CARG1, STR:CARG1->len
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ldrd CARG12, [BASE]
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8
+ | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument.
+ | bne ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | mvn CARG2, #~LJ_TISNUM
+ | cmp CARG3, #0
+ | moveq RC, #(0+1)*8
+ | movne RC, #(1+1)*8
+ | strd CARG12, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | checktpeq CARG2, LJ_TISNUM
+ | bicseq CARG4, CARG1, #255
+ | mov CARG3, #1
+ | bne ->fff_fallback
+ | str CARG1, TMPD
+ | mov CARG2, TMPDp // Points to stack. Little-endian.
+ |->fff_newstr:
+ | // CARG2 = str, CARG3 = len.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #16]
+ | cmp NARGS8:RC, #16
+ | mvn RB, #0
+ | beq >1
+ | blo ->fff_fallback
+ | checktp CARG4, LJ_TISNUM
+ | mov RB, CARG3
+ | bne ->fff_fallback
+ |1:
+ | ldrd CARG34, [BASE, #8]
+ | checktp CARG2, LJ_TSTR
+ | ldreq CARG2, STR:CARG1->len
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end
+ | add CARG4, CARG2, #1
+ | cmp CARG3, #0 // if (start < 0) start += len+1
+ | addlt CARG3, CARG3, CARG4
+ | cmp CARG3, #1 // if (start < 1) start = 1
+ | movlt CARG3, #1
+ | cmp RB, #0 // if (end < 0) end += len+1
+ | addlt RB, RB, CARG4
+ | bic RB, RB, RB, asr #31 // if (end < 0) end = 0
+ | cmp RB, CARG2 // if (end > len) end = len
+ | add CARG1, STR:CARG1, #sizeof(GCstr)-1
+ | movgt RB, CARG2
+ | add CARG2, CARG1, CARG3
+ | subs CARG3, RB, CARG3 // len = end - start
+ | add CARG3, CARG3, #1 // len += 1
+ | bge ->fff_newstr
+ |->fff_emptystr:
+ | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty)
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | bne ->fff_fallback // Exactly 2 arguments
+ | checktp CARG2, LJ_TSTR
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | subs CARG4, CARG3, #1
+ | ldr CARG2, STR:CARG1->len
+ | blt ->fff_emptystr // Count <= 0?
+ | cmp CARG2, #1
+ | blo ->fff_emptystr // Zero-length string?
+ | bne ->fff_fallback // Fallback for > 1-char strings.
+ | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
+ | ldr CARG1, STR:CARG1[1]
+ | cmp RB, CARG3
+ | blo ->fff_fallback
+ |1: // Fill buffer with char.
+ | strb CARG1, [CARG2, CARG4]
+ | subs CARG4, CARG4, #1
+ | bge <1
+ | b ->fff_newstr
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checkstr CARG2, ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
+ | mov CARG4, CARG3
+ | add CARG1, STR:CARG1, #sizeof(GCstr)
+ | cmp RB, CARG3
+ | blo ->fff_fallback
+ |1: // Reverse string copy.
+ | ldrb RB, [CARG1], #1
+ | subs CARG4, CARG4, #1
+ | blt ->fff_newstr
+ | strb RB, [CARG2, CARG4]
+ | b <1
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checkstr CARG2, ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldr RB, [DISPATCH, #DISPATCH_GL(tmpbuf.sz)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(tmpbuf.buf)]
+ | mov CARG4, #0
+ | add CARG1, STR:CARG1, #sizeof(GCstr)
+ | cmp RB, CARG3
+ | blo ->fff_fallback
+ |1: // ASCII case conversion.
+ | ldrb RB, [CARG1, CARG4]
+ | cmp CARG4, CARG3
+ | bhs ->fff_newstr
+ | sub RC, RB, #lo
+ | cmp RC, #26
+ | eorlo RB, RB, #0x20
+ | strb RB, [CARG2, CARG4]
+ | add CARG4, CARG4, #1
+ | b <1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | checktab CARG2, ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | .IOS mov BASE, RA
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |// FP number to bit conversion for soft-float. Clobbers r0-r3.
+ |->vm_tobit_fb:
+ | bhi ->fff_fallback
+ |->vm_tobit:
+ | lsl RB, CARG2, #1
+ | adds RB, RB, #0x00200000
+ | movpl CARG1, #0 // |x| < 1?
+ | bxpl lr
+ | mvn CARG4, #0x3e0
+ | subs RB, CARG4, RB, asr #21
+ | bmi >1 // |x| >= 2^32?
+ | lsl CARG4, CARG2, #11
+ | orr CARG4, CARG4, #0x80000000
+ | orr CARG4, CARG4, CARG1, lsr #21
+ | cmp CARG2, #0
+ | lsr CARG1, CARG4, RB
+ | rsblt CARG1, CARG1, #0
+ | bx lr
+ |1:
+ | add RB, RB, #21
+ | lsr CARG4, CARG1, RB
+ | rsb RB, RB, #20
+ | lsl CARG1, CARG2, #12
+ | cmp CARG2, #0
+ | orr CARG1, CARG4, CARG1, lsl RB
+ | rsblt CARG1, CARG1, #0
+ | bx lr
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ |.endmacro
+ |
+ |.ffunc_bit tobit
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | mov CARG3, CARG1
+ | mov RA, #8
+ |1:
+ | ldrd CARG12, [BASE, RA]
+ | cmp RA, NARGS8:RC
+ | add RA, RA, #8
+ | bge >2
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ | ins CARG3, CARG3, CARG1
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, orr
+ |.ffunc_bit_op bxor, eor
+ |
+ |2:
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG34, [BASE, #-8]
+ | b ->fff_res1
+ |
+ |.ffunc_bit bswap
+ | eor CARG3, CARG1, CARG1, ror #16
+ | bic CARG3, CARG3, #0x00ff0000
+ | ror CARG1, CARG1, #8
+ | mvn CARG2, #~LJ_TISNUM
+ | eor CARG1, CARG1, CARG3, lsr #8
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | mvn CARG1, CARG1
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc bit_..name
+ | ldrd CARG12, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ |.if shmod == 0
+ | and RA, CARG1, #31
+ |.else
+ | rsb RA, CARG1, #0
+ |.endif
+ | ldrd CARG12, [BASE]
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ | ins CARG1, CARG1, RA
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, lsl, 0
+ |.ffunc_bit_sh rshift, lsr, 0
+ |.ffunc_bit_sh arshift, asr, 0
+ |.ffunc_bit_sh rol, ror, 1
+ |.ffunc_bit_sh ror, ror, 0
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RC = nargs*8
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | ldr CARG2, L->maxstack
+ | add CARG1, BASE, NARGS8:RC
+ | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC.
+ | str CARG1, L->top
+ | ldr CARG3, CFUNC:CARG3->f
+ | str BASE, L->base
+ | add CARG1, CARG1, #8*LUA_MINSTACK
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | cmp CARG1, CARG2
+ | mov CARG1, L
+ | bhi >5 // Need to grow stack.
+ | blx CARG3 // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ldr BASE, L->base
+ | cmp CRET1, #0
+ | lsl RC, CRET1, #3
+ | sub RA, BASE, #8
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | ldr CARG1, L->top
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, CARG1, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | ands CARG1, PC, #FRAME_TYPE
+ | bic CARG2, PC, #FRAME_TYPEP
+ | ldreq INS, [PC, #-4]
+ | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8.
+ | addeq CARG2, CARG2, #8
+ | sub RB, BASE, CARG2
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2, #LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | cmp CARG1, CARG1 // Set zero-flag to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mov RA, lr
+ | str BASE, L->base
+ | add CARG2, BASE, NARGS8:RC
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | str CARG2, L->top
+ | mov CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | ldr BASE, L->base
+ | mov lr, RA // Help return address predictor.
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | bx lr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | tst CARG1, #HOOK_ACTIVE
+ | bne >1
+ | sub CARG2, CARG2, #1
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | b >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | tst CARG1, #HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | decode_OP OP, INS
+ | add OP, DISPATCH, OP, lsl #2
+ | ldr pc, [OP, #GG_DISP2STATIC]
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | tst CARG1, #HOOK_ACTIVE // Hook already active?
+ | bne <5
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq <5
+ | subs CARG2, CARG2, #1
+ | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | beq >1
+ | tst CARG1, #LUA_MASKLINE
+ | beq <5
+ |1:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | ldr BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | ldrb OP, [PC, #-4]
+ | ldr INS, [PC, #-4]
+ | add OP, DISPATCH, OP, lsl #2
+ | ldr OP, [OP, #GG_DISP2STATIC]
+ | decode_RA8 RA, INS
+ | decode_RD RC, INS
+ | bx OP
+ |
+ |->cont_hook: // Continue from hook yield.
+ | ldr CARG1, [CARG4, #-24]
+ | add PC, PC, #4
+ | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | str PC, SAVE_PC
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | mov CARG2, PC
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | ldrb CARG3, [CARG3, #PC2PROTO(framesize)]
+ | str BASE, L->base
+ | add CARG3, BASE, CARG3, lsl #3
+ | str CARG3, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | orr CARG2, PC, #1
+ |1:
+ |.endif
+ | add CARG4, BASE, RC
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub RA, RA, BASE
+ | str CARG4, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | ldr BASE, L->base
+ | ldr CARG4, L->top
+ | mov CARG2, #0
+ | add RA, BASE, RA
+ | sub NARGS8:RC, CARG4, BASE
+ | str CARG2, SAVE_PC // Invalidate for subsequent line hook.
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | ldr INS, [PC, #-4]
+ | bx CRET1
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | sub sp, sp, #12
+ | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12}
+ | ldr CARG1, [sp, #64] // Load original value of lr.
+ | ldr DISPATCH, [lr] // Load DISPATCH.
+ | add CARG3, sp, #64 // Recompute original value of sp.
+ | mv_vmstate CARG4, EXIT
+ | str CARG3, [sp, #52] // Store sp in RID_SP
+ | st_vmstate CARG4
+ | ldr CARG2, [CARG1, #-4]! // Get exit instruction.
+ | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC.
+ | str CARG1, [sp, #60]
+ |.if FPU
+ | vpush {d0-d15}
+ |.endif
+ | lsl CARG2, CARG2, #8
+ | add CARG1, CARG1, CARG2, asr #6
+ | ldr CARG2, [lr, #4] // Load exit stub group offset.
+ | sub CARG1, CARG1, lr
+ | ldr L, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number.
+ | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | str CARG1, [DISPATCH, #DISPATCH_J(exitno)]
+ | mov CARG4, #0
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | str BASE, L->base
+ | str CARG4, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | mov CARG2, sp
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ldr CARG2, L->cframe
+ | ldr BASE, L->base
+ | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
+ | mov sp, CARG2
+ | ldr PC, SAVE_PC // Get SAVE_PC.
+ | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+ |.endif
+ |->vm_exit_interp:
+ | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set.
+ |.if JIT
+ | ldr L, SAVE_L
+ |1:
+ | cmp CARG1, #0
+ | blt >3 // Check for error from exit.
+ | lsl RC, CARG1, #3
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | str RC, SAVE_MULTRES
+ | mov CARG3, #0
+ | ldr CARG2, LFUNC:CARG2->field_pc
+ | str CARG3, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | mv_vmstate CARG4, INTERP
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | ldrb OP, [PC]
+ | mov MASKR8, #255
+ | ldr INS, [PC], #4
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | st_vmstate CARG4
+ | cmp OP, #BC_FUNCF // Function header?
+ | ldr OP, [DISPATCH, OP, lsl #2]
+ | decode_RA8 RA, INS
+ | lsrlo RC, INS, #16 // No: Decode operands A*8 and D.
+ | subhs RC, RC, #8
+ | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8
+ | bx OP
+ |
+ |3: // Rethrow error from the right C frame.
+ | rsb CARG2, CARG1, #0
+ | mov CARG1, L
+ | bl extern lj_err_throw // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called from JIT code.
+ |//
+ |// double lj_vm_floor/ceil/trunc(double x);
+ |.macro vm_round, func, hf
+ |.if hf == 1
+ | vmov CARG1, CARG2, d0
+ |.endif
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | bpl >2 // |x| < 1?
+ | mvn CARG4, #0x3cc
+ | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
+ | bxlo lr // |x| >= 2^52: done.
+ | mvn CARG4, #1
+ | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask
+ | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
+ | subs RB, RB, #32
+ | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask
+ | orrpl CARG3, CARG3, CARG4
+ | mvnpl CARG4, #1
+ | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ |.if hf == 1
+ | vmoveq d0, CARG1, CARG2
+ |.endif
+ | bxeq lr // iszero: done.
+ | mvn CARG4, #1
+ | cmp RB, #0
+ | lslpl CARG3, CARG4, RB
+ | mvnmi CARG3, #0
+ | add RB, RB, #32
+ | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask
+ | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry
+ |.if hf == 1
+ | vmov d0, CARG1, CARG2
+ |.endif
+ | bx lr
+ |
+ |2: // |x| < 1:
+ | bxcs lr // |x| is not finite.
+ | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | mov CARG1, #0 // lo = 0
+ | and CARG2, CARG2, #0x80000000
+ | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0)
+ | orrne CARG2, CARG2, CARG4
+ |.if hf == 1
+ | vmov d0, CARG1, CARG2
+ |.endif
+ | bx lr
+ |.endmacro
+ |
+ |9:
+ | .long 0x3ff00000 // hiword(+1.0)
+ |
+ |->vm_floor:
+ |.if HFABI
+ | vm_round floor, 1
+ |.endif
+ |->vm_floor_sf:
+ | vm_round floor, 0
+ |
+ |->vm_ceil:
+ |.if HFABI
+ | vm_round ceil, 1
+ |.endif
+ |->vm_ceil_sf:
+ | vm_round ceil, 0
+ |
+ |.macro vm_trunc, hf
+ |.if JIT
+ |.if hf == 1
+ | vmov CARG1, CARG2, d0
+ |.endif
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0.
+ | movpl CARG1, #0
+ |.if hf == 1
+ | vmovpl d0, CARG1, CARG2
+ |.endif
+ | bxpl lr
+ | mvn CARG4, #0x3cc
+ | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
+ | bxlo lr // |x| >= 2^52: already done.
+ | mvn CARG4, #1
+ | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
+ | subs RB, RB, #32
+ | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
+ |.if hf == 1
+ | vmov d0, CARG1, CARG2
+ |.endif
+ | bx lr
+ |.endif
+ |.endmacro
+ |
+ |->vm_trunc:
+ |.if HFABI
+ | vm_trunc 1
+ |.endif
+ |->vm_trunc_sf:
+ | vm_trunc 0
+ |
+ | // double lj_vm_mod(double dividend, double divisor);
+ |->vm_mod:
+ |.if FPU
+ | // Special calling convention. Also, RC (r11) is not preserved.
+ | vdiv.f64 d0, d6, d7
+ | mov RC, lr
+ | vmov CARG1, CARG2, d0
+ | bl ->vm_floor_sf
+ | vmov d0, CARG1, CARG2
+ | vmul.f64 d0, d0, d7
+ | mov lr, RC
+ | vsub.f64 d6, d6, d0
+ | bx lr
+ |.else
+ | push {r0, r1, r2, r3, r4, lr}
+ | bl extern __aeabi_ddiv
+ | bl ->vm_floor_sf
+ | ldrd CARG34, [sp, #8]
+ | bl extern __aeabi_dmul
+ | ldrd CARG34, [sp]
+ | eor CARG2, CARG2, #0x80000000
+ | bl extern __aeabi_dadd
+ | add sp, sp, #20
+ | pop {pc}
+ |.endif
+ |
+ | // int lj_vm_modi(int dividend, int divisor);
+ |->vm_modi:
+ | ands RB, CARG1, #0x80000000
+ | rsbmi CARG1, CARG1, #0 // a = |dividend|
+ | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor).
+ | cmp CARG2, #0
+ | rsbmi CARG2, CARG2, #0 // b = |divisor|
+ | subs CARG4, CARG2, #1
+ | cmpne CARG1, CARG2
+ | moveq CARG1, #0 // if (b == 1 || a == b) a = 0
+ | tsthi CARG2, CARG4
+ | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1
+ | bls >1
+ | // Use repeated subtraction to get the remainder.
+ | clz CARG3, CARG1
+ | clz CARG4, CARG2
+ | sub CARG4, CARG4, CARG3
+ | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8
+ | addne pc, pc, CARG3, lsl #3 // Duff's device.
+ | nop
+ {
+ int i;
+ for (i = 31; i >= 0; i--) {
+ | cmp CARG1, CARG2, lsl #i
+ | subhs CARG1, CARG1, CARG2, lsl #i
+ }
+ }
+ |1:
+ | cmp CARG1, #0
+ | cmpne RB, #0
+ | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b
+ | eors CARG2, CARG1, RB, lsl #1
+ | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y
+ | bx lr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions.
+ |// Saveregs already performed. Callback slot number in [sp], g in r12.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | ldr CTSTATE, GL:r12->ctype_state
+ | add DISPATCH, r12, #GG_G2DISP
+ |.if FPU
+ | str r4, SAVE_R4
+ | add r4, sp, CFRAME_SPACE+4+8*8
+ | vstmdb r4!, {d8-d15}
+ |.endif
+ |.if HFABI
+ | add r12, CTSTATE, #offsetof(CTState, cb.fpr[8])
+ |.endif
+ | strd CARG34, CTSTATE->cb.gpr[2]
+ | strd CARG12, CTSTATE->cb.gpr[0]
+ |.if HFABI
+ | vstmdb r12!, {d0-d7}
+ |.endif
+ | ldr CARG4, [sp]
+ | add CARG3, sp, #CFRAME_SIZE
+ | mov CARG1, CTSTATE
+ | lsr CARG4, CARG4, #3
+ | str CARG3, CTSTATE->cb.stack
+ | mov CARG2, sp
+ | str CARG4, CTSTATE->cb.slot
+ | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | ldr BASE, L:CRET1->base
+ | mv_vmstate CARG2, INTERP
+ | ldr RC, L:CRET1->top
+ | mov MASKR8, #255
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | mov L, CRET1
+ | sub RC, RC, BASE
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | st_vmstate CARG2
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | ldr CTSTATE, [DISPATCH, #DISPATCH_GL(ctype_state)]
+ | str BASE, L->base
+ | str CARG4, L->top
+ | str L, CTSTATE->L
+ | mov CARG1, CTSTATE
+ | mov CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | ldrd CARG12, CTSTATE->cb.gpr[0]
+ |.if HFABI
+ | vldr d0, CTSTATE->cb.fpr[0]
+ |.endif
+ | b ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, r4
+ | push {CCSTATE, r5, r11, lr}
+ | mov CCSTATE, CARG1
+ | ldr CARG1, CCSTATE:CARG1->spadj
+ | ldrb CARG2, CCSTATE->nsp
+ | add CARG3, CCSTATE, #offsetof(CCallState, stack)
+ |.if HFABI
+ | add RB, CCSTATE, #offsetof(CCallState, fpr[0])
+ |.endif
+ | mov r11, sp
+ | sub sp, sp, CARG1 // Readjust stack.
+ | subs CARG2, CARG2, #1
+ |.if HFABI
+ | vldm RB, {d0-d7}
+ |.endif
+ | ldr RB, CCSTATE->func
+ | bmi >2
+ |1: // Copy stack slots.
+ | ldr CARG4, [CARG3, CARG2, lsl #2]
+ | str CARG4, [sp, CARG2, lsl #2]
+ | subs CARG2, CARG2, #1
+ | bpl <1
+ |2:
+ | ldrd CARG12, CCSTATE->gpr[0]
+ | ldrd CARG34, CCSTATE->gpr[2]
+ | blx RB
+ | mov sp, r11
+ |.if HFABI
+ | add r12, CCSTATE, #offsetof(CCallState, fpr[4])
+ |.endif
+ | strd CRET1, CCSTATE->gpr[0]
+ |.if HFABI
+ | vstmdb r12!, {d0-d3}
+ |.endif
+ | pop {CCSTATE, r5, r11, pc}
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RC = src2, JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, BASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TISNUM
+ | bne >3
+ | checktp CARG4, LJ_TISNUM
+ | bne >4
+ | cmp CARG1, CARG3
+ if (op == BC_ISLT) {
+ | sublt PC, RB, #0x20000
+ } else if (op == BC_ISGE) {
+ | subge PC, RB, #0x20000
+ } else if (op == BC_ISLE) {
+ | suble PC, RB, #0x20000
+ } else {
+ | subgt PC, RB, #0x20000
+ }
+ |1:
+ | ins_next
+ |
+ |3: // CARG12 is not an integer.
+ |.if FPU
+ | vldr d0, [RA]
+ | bhi ->vmeta_comp
+ | // d0 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | vldr d1, [RC]
+ | blo >5
+ | bhi ->vmeta_comp
+ | // d0 is a number, CARG3 is an integer.
+ | vmov s4, CARG3
+ | vcvt.f64.s32 d1, s4
+ | b >5
+ |4: // CARG1 is an integer, CARG34 is not an integer.
+ | vldr d1, [RC]
+ | bhi ->vmeta_comp
+ | // CARG1 is an integer, d1 is a number.
+ | vmov s4, CARG1
+ | vcvt.f64.s32 d0, s4
+ |5: // d0 and d1 are numbers.
+ | vcmp.f64 d0, d1
+ | vmrs
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | sublo PC, RB, #0x20000
+ } else if (op == BC_ISGE) {
+ | subhs PC, RB, #0x20000
+ } else if (op == BC_ISLE) {
+ | subls PC, RB, #0x20000
+ } else {
+ | subhi PC, RB, #0x20000
+ }
+ | b <1
+ |.else
+ | bhi ->vmeta_comp
+ | // CARG12 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | movlo RA, RB // Save RB.
+ | blo >5
+ | bhi ->vmeta_comp
+ | // CARG12 is a number, CARG3 is an integer.
+ | mov CARG1, CARG3
+ | mov RC, RA
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | mov CARG3, CARG1
+ | mov CARG4, CARG2
+ | ldrd CARG12, [RC] // Restore first operand.
+ | b >5
+ |4: // CARG1 is an integer, CARG34 is not an integer.
+ | bhi ->vmeta_comp
+ | // CARG1 is an integer, CARG34 is a number.
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [RC] // Restore second operand.
+ |5: // CARG12 and CARG34 are numbers.
+ | bl extern __aeabi_cdcmple
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | sublo PC, RA, #0x20000
+ } else if (op == BC_ISGE) {
+ | subhs PC, RA, #0x20000
+ } else if (op == BC_ISLE) {
+ | subls PC, RA, #0x20000
+ } else {
+ | subhi PC, RA, #0x20000
+ }
+ | b <1
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RC = src2, JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, BASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TISNUM
+ | cmnls CARG4, #-LJ_TISNUM
+ if (vk) {
+ | bls ->BC_ISEQN_Z
+ } else {
+ | bls ->BC_ISNEN_Z
+ }
+ | // Either or both types are not numbers.
+ |.if FFI
+ | checktp CARG2, LJ_TCDATA
+ | checktpne CARG4, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG2, CARG4 // Compare types.
+ | bne >2 // Not the same type?
+ | checktp CARG2, LJ_TISPRI
+ | bhs >1 // Same type and primitive type?
+ |
+ | // Same types and not a primitive type. Compare GCobj or pvalue.
+ | cmp CARG1, CARG3
+ if (vk) {
+ | bne >3 // Different GCobjs or pvalues?
+ |1: // Branch if same.
+ | sub PC, RB, #0x20000
+ |2: // Different.
+ | ins_next
+ |3:
+ | checktp CARG2, LJ_TISTABUD
+ | bhi <2 // Different objects and not table/ud?
+ } else {
+ | beq >1 // Same GCobjs or pvalues?
+ | checktp CARG2, LJ_TISTABUD
+ | bhi >2 // Different objects and not table/ud?
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ if (vk) {
+ | beq <2 // No metatable?
+ } else {
+ | beq >2 // No metatable?
+ }
+ | ldrb RA, TAB:RA->nomm
+ | mov CARG4, #1-vk // ne = 0 or 1.
+ | mov CARG2, CARG1
+ | tst RA, #1<<MM_eq
+ | beq ->vmeta_equal // 'no __eq' flag not set?
+ if (vk) {
+ | b <2
+ } else {
+ |2: // Branch if different.
+ | sub PC, RB, #0x20000
+ |1: // Same.
+ | ins_next
+ }
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RC = str_const (~), JMP with RC = target
+ | mvn RC, RC
+ | ldrd CARG12, [BASE, RA]
+ | ldrh RB, [PC, #2]
+ | ldr STR:CARG3, [KBASE, RC, lsl #2]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TSTR
+ |.if FFI
+ | bne >7
+ | cmp CARG1, CARG3
+ |.else
+ | cmpeq CARG1, CARG3
+ |.endif
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ |1:
+ } else {
+ |1:
+ | subne PC, RB, #0x20000
+ }
+ | ins_next
+ |
+ |.if FFI
+ |7:
+ | checktp CARG2, LJ_TCDATA
+ | bne <1
+ | b ->vmeta_equal_cd
+ |.endif
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RC = num_const (~), JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, KBASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | checktp CARG2, LJ_TISNUM
+ | bne >3
+ | checktp CARG4, LJ_TISNUM
+ | bne >4
+ | cmp CARG1, CARG3
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ |1:
+ } else {
+ |1:
+ | subne PC, RB, #0x20000
+ }
+ |2:
+ | ins_next
+ |
+ |3: // CARG12 is not an integer.
+ |.if FFI
+ | bhi >7
+ |.else
+ if (!vk) {
+ | subhi PC, RB, #0x20000
+ }
+ | bhi <2
+ |.endif
+ |.if FPU
+ | checktp CARG4, LJ_TISNUM
+ | vmov s4, CARG3
+ | vldr d0, [RA]
+ | vldrlo d1, [RC]
+ | vcvths.f64.s32 d1, s4
+ | b >5
+ |4: // CARG1 is an integer, d1 is a number.
+ | vmov s4, CARG1
+ | vldr d1, [RC]
+ | vcvt.f64.s32 d0, s4
+ |5: // d0 and d1 are numbers.
+ | vcmp.f64 d0, d1
+ | vmrs
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ } else {
+ | subne PC, RB, #0x20000
+ }
+ | b <2
+ |.else
+ | // CARG12 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | movlo RA, RB // Save RB.
+ | blo >5
+ | // CARG12 is a number, CARG3 is an integer.
+ | mov CARG1, CARG3
+ | mov RC, RA
+ |4: // CARG1 is an integer, CARG34 is a number.
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [RC] // Restore other operand.
+ |5: // CARG12 and CARG34 are numbers.
+ | bl extern __aeabi_cdcmpeq
+ if (vk) {
+ | subeq PC, RA, #0x20000
+ } else {
+ | subne PC, RA, #0x20000
+ }
+ | b <2
+ |.endif
+ |
+ |.if FFI
+ |7:
+ | checktp CARG2, LJ_TCDATA
+ | bne <1
+ | b ->vmeta_equal_cd
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RC = primitive_type (~), JMP with RC = target
+ | ldrd CARG12, [BASE, RA]
+ | ldrh RB, [PC, #2]
+ | add PC, PC, #4
+ | mvn RC, RC
+ | add RB, PC, RB, lsl #2
+ |.if FFI
+ | checktp CARG2, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG2, RC
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ } else {
+ | subne PC, RB, #0x20000
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RC = src, JMP with RC = target
+ | add RC, BASE, RC, lsl #3
+ | ldrh RB, [PC, #2]
+ | ldrd CARG12, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TTRUE
+ if (op == BC_ISTC || op == BC_IST) {
+ | subls PC, RB, #0x20000
+ if (op == BC_ISTC) {
+ | strdls CARG12, [BASE, RA]
+ }
+ } else {
+ | subhi PC, RB, #0x20000
+ if (op == BC_ISFC) {
+ | strdhi CARG12, [BASE, RA]
+ }
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ins_next1
+ | ldrd CARG12, [BASE, RC]
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RC = src
+ | add RC, BASE, RC, lsl #3
+ | ins_next1
+ | ldr CARG1, [RC, #4]
+ | add RA, BASE, RA
+ | ins_next2
+ | checktp CARG1, LJ_TTRUE
+ | mvnls CARG2, #~LJ_TFALSE
+ | mvnhi CARG2, #~LJ_TTRUE
+ | str CARG2, [RA, #4]
+ | ins_next3
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ldrd CARG12, [BASE, RC]
+ | ins_next1
+ | ins_next2
+ | checktp CARG2, LJ_TISNUM
+ | bhi ->vmeta_unm
+ | eorne CARG2, CARG2, #0x80000000
+ | bne >5
+ | rsbseq CARG1, CARG1, #0
+ | ldrdvs CARG12, >9
+ |5:
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |
+ |.align 8
+ |9:
+ | .long 0x00000000, 0x41e00000 // 2^31.
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ldrd CARG12, [BASE, RC]
+ | checkstr CARG2, >2
+ | ldr CARG1, STR:CARG1->len
+ |1:
+ | mvn CARG2, #~LJ_TISNUM
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |2:
+ | checktab CARG2, ->vmeta_len
+#if LJ_52
+ | ldr TAB:CARG3, TAB:CARG1->metatable
+ | cmp TAB:CARG3, #0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | .IOS mov RC, BASE
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | .IOS mov BASE, RC
+ | b <1
+#if LJ_52
+ |9:
+ | ldrb CARG4, TAB:CARG3->nomm
+ | tst CARG4, #1<<MM_len
+ | bne <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithcheck, cond, ncond, target
+ ||if (vk == 1) {
+ | cmn CARG4, #-LJ_TISNUM
+ | cmn..cond CARG2, #-LJ_TISNUM
+ ||} else {
+ | cmn CARG2, #-LJ_TISNUM
+ | cmn..cond CARG4, #-LJ_TISNUM
+ ||}
+ | b..ncond target
+ |.endmacro
+ |.macro ins_arithcheck_int, target
+ | ins_arithcheck eq, ne, target
+ |.endmacro
+ |.macro ins_arithcheck_num, target
+ | ins_arithcheck lo, hs, target
+ |.endmacro
+ |
+ |.macro ins_arithpre
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | .if FPU
+ | ldrd CARG12, [RB, BASE]!
+ | ldrd CARG34, [RC, KBASE]!
+ | .else
+ | ldrd CARG12, [BASE, RB]
+ | ldrd CARG34, [KBASE, RC]
+ | .endif
+ || break;
+ ||case 1:
+ | .if FPU
+ | ldrd CARG34, [RB, BASE]!
+ | ldrd CARG12, [RC, KBASE]!
+ | .else
+ | ldrd CARG34, [BASE, RB]
+ | ldrd CARG12, [KBASE, RC]
+ | .endif
+ || break;
+ ||default:
+ | .if FPU
+ | ldrd CARG12, [RB, BASE]!
+ | ldrd CARG34, [RC, BASE]!
+ | .else
+ | ldrd CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | .endif
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithpre_fpu, reg1, reg2
+ |.if FPU
+ ||if (vk == 1) {
+ | vldr reg2, [RB]
+ | vldr reg1, [RC]
+ ||} else {
+ | vldr reg1, [RB]
+ | vldr reg2, [RC]
+ ||}
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithpost_fpu, reg
+ | ins_next1
+ | add RA, BASE, RA
+ | ins_next2
+ | vstr reg, [RA]
+ | ins_next3
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins, fpcall
+ | ins_arithpre
+ |.if "intins" ~= "vm_modi" and not FPU
+ | ins_next1
+ |.endif
+ | ins_arithcheck_int >5
+ |.if "intins" == "smull"
+ | smull CARG1, RC, CARG3, CARG1
+ | cmp RC, CARG1, asr #31
+ | ins_arithfallback bne
+ |.elif "intins" == "vm_modi"
+ | movs CARG2, CARG3
+ | ins_arithfallback beq
+ | bl ->vm_modi
+ | mvn CARG2, #~LJ_TISNUM
+ |.else
+ | intins CARG1, CARG1, CARG3
+ | ins_arithfallback bvs
+ |.endif
+ |4:
+ |.if "intins" == "vm_modi" or FPU
+ | ins_next1
+ |.endif
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |5: // FP variant.
+ | ins_arithpre_fpu d6, d7
+ | ins_arithfallback ins_arithcheck_num
+ |.if FPU
+ |.if "intins" == "vm_modi"
+ | bl fpcall
+ |.else
+ | fpins d6, d6, d7
+ |.endif
+ | ins_arithpost_fpu d6
+ |.else
+ | bl fpcall
+ |.if "intins" ~= "vm_modi"
+ | ins_next1
+ |.endif
+ | b <4
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins, fpcall
+ | ins_arithpre
+ |.if "fpins" ~= "extern" or HFABI
+ | ins_arithpre_fpu d0, d1
+ |.endif
+ | ins_arithfallback ins_arithcheck_num
+ |.if "fpins" == "extern"
+ | .IOS mov RC, BASE
+ | bl fpcall
+ | .IOS mov BASE, RC
+ |.elif FPU
+ | fpins d0, d0, d1
+ |.else
+ | bl fpcall
+ |.endif
+ |.if ("fpins" ~= "extern" or HFABI) and FPU
+ | ins_arithpost_fpu d0
+ |.else
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |.endif
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arithdn adds, vadd.f64, extern __aeabi_dadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arithdn subs, vsub.f64, extern __aeabi_dsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arithdn smull, vmul.f64, extern __aeabi_dmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp vdiv.f64, extern __aeabi_ddiv
+ break;
+ case BC_MODVN: case BC_MODNV: case BC_MODVV:
+ | ins_arithdn vm_modi, vm_mod, ->vm_mod
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | ins_arithfp extern, extern pow
+ break;
+
+ case BC_CAT:
+ | decode_RB8 RC, INS
+ | decode_RC8 RB, INS
+ | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!)
+ | sub CARG3, RB, RC
+ | str BASE, L->base
+ | add CARG2, BASE, RB
+ |->BC_CAT_Z:
+ | // RA = dst*8, RC = src_start*8, CARG2 = top-1
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | lsr CARG3, CARG3, #3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | ldr BASE, L->base
+ | cmp CRET1, #0
+ | bne ->vmeta_binop
+ | ldrd CARG34, [BASE, RC]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA] // Copy result to RA.
+ | ins_next3
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RC = str_const (~)
+ | mvn RC, RC
+ | ins_next1
+ | ldr CARG1, [KBASE, RC, lsl #2]
+ | mvn CARG2, #~LJ_TSTR
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RC = cdata_const (~)
+ | mvn RC, RC
+ | ins_next1
+ | ldr CARG1, [KBASE, RC, lsl #2]
+ | mvn CARG2, #~LJ_TCDATA
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, (RC = int16_literal)
+ | mov CARG1, INS, asr #16 // Refetch sign-extended reg.
+ | mvn CARG2, #~LJ_TISNUM
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RC = num_const
+ | lsl RC, RC, #3
+ | ins_next1
+ | ldrd CARG12, [KBASE, RC]
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RC = primitive_type (~)
+ | add RA, BASE, RA
+ | mvn RC, RC
+ | ins_next1
+ | ins_next2
+ | str RC, [RA, #4]
+ | ins_next3
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RC = end
+ | add RA, BASE, RA
+ | add RC, BASE, RC, lsl #3
+ | mvn CARG1, #~LJ_TNIL
+ | str CARG1, [RA, #4]
+ | add RA, RA, #8
+ |1:
+ | str CARG1, [RA, #4]
+ | cmp RA, RC
+ | add RA, RA, #8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RC = uvnum
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsl RC, RC, #2
+ | add RC, RC, #offsetof(GCfuncL, uvptr)
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RC]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldrd CARG34, [CARG2]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RC = src
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | lsl RC, RC, #3
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldrd CARG34, [BASE, RC]
+ | ldrb RB, UPVAL:CARG2->marked
+ | ldrb RC, UPVAL:CARG2->closed
+ | ldr CARG2, UPVAL:CARG2->v
+ | tst RB, #LJ_GC_BLACK // isblack(uv)
+ | add RB, CARG4, #-LJ_TISGCV
+ | cmpne RC, #0
+ | strd CARG34, [CARG2]
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmn RB, #-(LJ_TISNUM - LJ_TISGCV)
+ | ldrbhi RC, GCOBJ:CARG3->gch.marked
+ | bls <1 // tvisgcv(v)
+ | sub CARG1, DISPATCH, #-GG_DISP2G
+ | tst RC, #LJ_GC_WHITES
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if IOS
+ | beq <1
+ | mov RC, BASE
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RC
+ |.else
+ | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |.endif
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | mvn RC, RC
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldr STR:CARG3, [KBASE, RC, lsl #2]
+ | mvn CARG4, #~LJ_TSTR
+ | ldrb RB, UPVAL:CARG2->marked
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldrb RC, UPVAL:CARG2->closed
+ | tst RB, #LJ_GC_BLACK // isblack(uv)
+ | ldrb RB, STR:CARG3->marked
+ | strd CARG34, [CARG2]
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | tst RB, #LJ_GC_WHITES // iswhite(str)
+ | cmpne RC, #0
+ | sub CARG1, DISPATCH, #-GG_DISP2G
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if IOS
+ | beq <1
+ | mov RC, BASE
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RC
+ |.else
+ | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |.endif
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RC = num_const
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | lsl RC, RC, #3
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldrd CARG34, [KBASE, RC]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [CARG2]
+ | ins_next3
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RC = primitive_type (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | mvn RC, RC
+ | ldr CARG2, UPVAL:CARG2->v
+ | ins_next1
+ | ins_next2
+ | str RC, [CARG2, #4]
+ | ins_next3
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RC = target
+ | ldr CARG3, L->openupval
+ | add RC, PC, RC, lsl #2
+ | str BASE, L->base
+ | cmp CARG3, #0
+ | sub PC, RC, #0x20000
+ | beq >1
+ | mov CARG1, L
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | ldr BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RC = proto_const (~) (holding function prototype)
+ | mvn RC, RC
+ | str BASE, L->base
+ | ldr CARG2, [KBASE, RC, lsl #2]
+ | str PC, SAVE_PC
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | mov CARG1, L
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TFUNC
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RC = (hbits|asize) | tab_const (~)
+ if (op == BC_TDUP) {
+ | mvn RC, RC
+ }
+ | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)]
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)]
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | cmp CARG3, CARG4
+ | mov CARG1, L
+ | bhs >5
+ |1:
+ if (op == BC_TNEW) {
+ | lsl CARG2, RC, #21
+ | lsr CARG3, RC, #11
+ | asr RC, CARG2, #21
+ | lsr CARG2, CARG2, #21
+ | cmn RC, #1
+ | addeq CARG2, CARG2, #2
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns GCtab *.
+ } else {
+ | ldr CARG2, [KBASE, RC, lsl #2]
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns GCtab *.
+ }
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TTAB
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |5:
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mov CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RC = str_const (~)
+ case BC_GSET:
+ | // RA = dst*8, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | mvn RC, RC
+ | ldr TAB:CARG1, LFUNC:CARG2->env
+ | ldr STR:RC, [KBASE, RC, lsl #2]
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | ldrd TAB:CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12.
+ | checktp CARG4, LJ_TISNUM // Integer key?
+ | ldreq CARG4, TAB:CARG1->array
+ | ldreq CARG2, TAB:CARG1->asize
+ | bne >9
+ |
+ | add CARG4, CARG4, CARG3, lsl #3
+ | cmp CARG3, CARG2 // In array part?
+ | ldrdlo CARG34, [CARG4]
+ | bhs ->vmeta_tgetv
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |1:
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cmp TAB:CARG2, #0
+ | beq <1 // No metatable: done.
+ | ldrb CARG2, TAB:CARG2->nomm
+ | tst CARG2, #1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | decode_RB8 RB, INS // Restore RB.
+ | b ->vmeta_tgetv
+ |
+ |9:
+ | checktp CARG4, LJ_TSTR // String key?
+ | moveq STR:RC, CARG3
+ | beq ->BC_TGETS_Z
+ | b ->vmeta_tgetv
+ break;
+ case BC_TGETS:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = dst*8, RB = table*8, RC = str_const (~)
+ | ldrd CARG12, [BASE, RB]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
+ | checktab CARG2, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | ldr CARG3, TAB:CARG1->hmask
+ | ldr CARG4, STR:RC->hash
+ | ldr NODE:INS, TAB:CARG1->node
+ | mov TAB:RB, TAB:CARG1
+ | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ |1:
+ | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS.
+ | ldrd CARG34, NODE:INS->val
+ | ldr NODE:INS, NODE:INS->next
+ | checktp CARG2, LJ_TSTR
+ | cmpeq CARG1, STR:RC
+ | bne >4
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |3:
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |4: // Follow hash chain.
+ | cmp NODE:INS, #0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | mov CARG3, #0 // Optional clear of undef. value (during load stall).
+ | mvn CARG4, #~LJ_TNIL
+ | cmp TAB:CARG1, #0
+ | beq <3 // No metatable: done.
+ | ldrb CARG2, TAB:CARG1->nomm
+ | tst CARG2, #1<<MM_index
+ | bne <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = dst*8, RB = table*8, RC = index
+ | ldrd CARG12, [BASE, RB]
+ | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12.
+ | ldr CARG3, TAB:CARG1->asize
+ | ldr CARG4, TAB:CARG1->array
+ | lsl CARG2, RC, #3
+ | cmp RC, CARG3
+ | ldrdlo CARG34, [CARG4, CARG2]
+ | bhs ->vmeta_tgetb
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |1:
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cmp TAB:CARG2, #0
+ | beq <1 // No metatable: done.
+ | ldrb CARG2, TAB:CARG2->nomm
+ | tst CARG2, #1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb
+ break;
+
+ case BC_TSETV:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = src*8, RB = table*8, RC = key*8
+ | ldrd TAB:CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12.
+ | checktp CARG4, LJ_TISNUM // Integer key?
+ | ldreq CARG2, TAB:CARG1->array
+ | ldreq CARG4, TAB:CARG1->asize
+ | bne >9
+ |
+ | add CARG2, CARG2, CARG3, lsl #3
+ | cmp CARG3, CARG4 // In array part?
+ | ldrlo INS, [CARG2, #4]
+ | bhs ->vmeta_tsetv
+ | ins_next1 // Overwrites RB!
+ | checktp INS, LJ_TNIL
+ | ldrb INS, TAB:CARG1->marked
+ | ldrd CARG34, [BASE, RA]
+ | beq >5
+ |1:
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, [CARG2]
+ | bne >7
+ |2:
+ | ins_next2
+ | ins_next3
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ | beq <1 // No metatable: done.
+ | ldrb RA, TAB:RA->nomm
+ | tst RA, #1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | ldr INS, [PC, #-4] // Restore RA and RB.
+ | decode_RB8 RB, INS
+ | decode_RA8 RA, INS
+ | b ->vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG1, INS, CARG3
+ | b <2
+ |
+ |9:
+ | checktp CARG4, LJ_TSTR // String key?
+ | moveq STR:RC, CARG3
+ | beq ->BC_TSETS_Z
+ | b ->vmeta_tsetv
+ break;
+ case BC_TSETS:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = src*8, RB = table*8, RC = str_const (~)
+ | ldrd CARG12, [BASE, RB]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
+ | checktab CARG2, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | ldr CARG3, TAB:CARG1->hmask
+ | ldr CARG4, STR:RC->hash
+ | ldr NODE:INS, TAB:CARG1->node
+ | mov TAB:RB, TAB:CARG1
+ | and CARG3, CARG3, CARG4 // idx = str->hash & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | mov CARG4, #0
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ | strb CARG4, TAB:RB->nomm // Clear metamethod cache.
+ |1:
+ | ldrd CARG12, NODE:INS->key
+ | ldr CARG4, NODE:INS->val.it
+ | ldr NODE:CARG3, NODE:INS->next
+ | checktp CARG2, LJ_TSTR
+ | cmpeq CARG1, STR:RC
+ | bne >5
+ | ldrb CARG2, TAB:RB->marked
+ | checktp CARG4, LJ_TNIL // Key found, but nil value?
+ | ldrd CARG34, [BASE, RA]
+ | beq >4
+ |2:
+ | tst CARG2, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, NODE:INS->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | cmp TAB:CARG1, #0
+ | beq <2 // No metatable: done.
+ | ldrb CARG1, TAB:CARG1->nomm
+ | tst CARG1, #1<<MM_newindex
+ | bne <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | movs NODE:INS, NODE:CARG3
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | mov CARG3, TMPDp
+ | str PC, SAVE_PC
+ | cmp TAB:CARG1, #0 // No metatable: continue.
+ | str BASE, L->base
+ | ldrbne CARG2, TAB:CARG1->nomm
+ | mov CARG1, L
+ | beq >6
+ | tst CARG2, #1<<MM_newindex
+ | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | mov CARG2, TAB:RB
+ | str CARG4, TMPDhi
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | ldr BASE, L->base
+ | ldrd CARG34, [BASE, RA]
+ | strd CARG34, [CRET1]
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, CARG2, CARG3
+ | b <3
+ break;
+ case BC_TSETB:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = src*8, RB = table*8, RC = index
+ | ldrd CARG12, [BASE, RB]
+ | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12.
+ | ldr CARG3, TAB:CARG1->asize
+ | ldr RB, TAB:CARG1->array
+ | lsl CARG2, RC, #3
+ | cmp RC, CARG3
+ | ldrdlo CARG34, [CARG2, RB]!
+ | bhs ->vmeta_tsetb
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | ldrb INS, TAB:CARG1->marked
+ | ldrd CARG34, [BASE, RA]
+ | beq >5
+ |1:
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, [CARG2]
+ | bne >7
+ |2:
+ | ins_next2
+ | ins_next3
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ | beq <1 // No metatable: done.
+ | ldrb RA, TAB:RA->nomm
+ | tst RA, #1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | ldr INS, [PC, #-4] // Restore INS.
+ | decode_RA8 RA, INS
+ | b ->vmeta_tsetb
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG1, INS, CARG3
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RC = num_const (start index)
+ | add RA, BASE, RA
+ |1:
+ | ldr RB, SAVE_MULTRES
+ | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
+ | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
+ | subs RB, RB, #8
+ | ldr CARG4, TAB:CARG2->asize
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG1, RB, lsr #3
+ | cmp CARG3, CARG4
+ | ldr CARG4, TAB:CARG2->array
+ | add RB, RA, RB
+ | bhi >5
+ | add INS, CARG4, CARG1, lsl #3
+ | ldrb CARG1, TAB:CARG2->marked
+ |3: // Copy result slots to table.
+ | ldrd CARG34, [RA], #8
+ | strd CARG34, [INS], #8
+ | cmp RA, RB
+ | blo <3
+ | tst CARG1, #LJ_GC_BLACK // isblack(table)
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | .IOS ldr BASE, L->base
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, CARG1, CARG3
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = nresults+1,) RC = extra_nargs
+ | ldr CARG1, SAVE_MULTRES
+ | decode_RC8 NARGS8:RC, INS
+ | add NARGS8:RC, NARGS8:RC, CARG1
+ | b ->BC_CALL_Z
+ break;
+ case BC_CALL:
+ | decode_RC8 NARGS8:RC, INS
+ | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8
+ |->BC_CALL_Z:
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldrd CARG34, [BASE, RA]!
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add BASE, BASE, #8
+ | checkfunc CARG4, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs
+ | ldr CARG1, SAVE_MULTRES
+ | add NARGS8:RC, CARG1, RC, lsl #3
+ | b ->BC_CALLT1_Z
+ break;
+ case BC_CALLT:
+ | lsl NARGS8:RC, RC, #3
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ |->BC_CALLT1_Z:
+ | ldrd LFUNC:CARG34, [RA, BASE]!
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add RA, RA, #8
+ | checkfunc CARG4, ->vmeta_callt
+ | ldr PC, [BASE, FRAME_PC]
+ |->BC_CALLT2_Z:
+ | mov RB, #0
+ | ldrb CARG4, LFUNC:CARG3->ffid
+ | tst PC, #FRAME_TYPE
+ | bne >7
+ |1:
+ | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
+ | cmp NARGS8:RC, #0
+ | beq >3
+ |2:
+ | ldrd CARG12, [RA, RB]
+ | add INS, RB, #8
+ | cmp INS, NARGS8:RC
+ | strd CARG12, [BASE, RB]
+ | mov RB, INS
+ | bne <2
+ |3:
+ | cmp CARG4, #1 // (> FF_C) Calling a fast function?
+ | bhi >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | ldr INS, [PC, #-4]
+ | decode_RA8 RA, INS
+ | sub CARG1, BASE, RA
+ | ldr LFUNC:CARG1, [CARG1, #-16]
+ | ldr CARG1, LFUNC:CARG1->field_pc
+ | ldr KBASE, [CARG1, #PC2PROTO(k)]
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | eor PC, PC, #FRAME_VARG
+ | tst PC, #FRAME_TYPEP // Vararg frame below?
+ | movne CARG4, #0 // Clear ffid if no Lua function below.
+ | bne <1
+ | sub BASE, BASE, PC
+ | ldr PC, [BASE, FRAME_PC]
+ | tst PC, #FRAME_TYPE
+ | movne CARG4, #0 // Clear ffid if no Lua function below.
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldrd CARG34, [RA, #-16]
+ | ldrd CARG12, [RA, #-8]
+ | add BASE, RA, #8
+ | strd CARG34, [RA, #8] // Copy state.
+ | strd CARG12, [RA, #16] // Copy control var.
+ | // STALL: locked CARG34.
+ | ldrd LFUNC:CARG34, [RA, #-24]
+ | mov NARGS8:RC, #16 // Iterators get 2 arguments.
+ | // STALL: load CARG34.
+ | strd LFUNC:CARG34, [RA] // Copy callable.
+ | checkfunc CARG4, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
+ |.if JIT
+ | // NYI: add hotloop, record BC_ITERN.
+ |.endif
+ | add RA, BASE, RA
+ | ldr TAB:RB, [RA, #-16]
+ | ldr CARG1, [RA, #-8] // Get index from control var.
+ | ldr INS, TAB:RB->asize
+ | ldr CARG2, TAB:RB->array
+ | add PC, PC, #4
+ |1: // Traverse array part.
+ | subs RC, CARG1, INS
+ | add CARG3, CARG2, CARG1, lsl #3
+ | bhs >5 // Index points after array part?
+ | ldrd CARG34, [CARG3]
+ | checktp CARG4, LJ_TNIL
+ | addeq CARG1, CARG1, #1 // Skip holes in array part.
+ | beq <1
+ | ldrh RC, [PC, #-2]
+ | mvn CARG2, #~LJ_TISNUM
+ | strd CARG34, [RA, #8]
+ | add RC, PC, RC, lsl #2
+ | add RB, CARG1, #1
+ | strd CARG12, [RA]
+ | sub PC, RC, #0x20000
+ | str RB, [RA, #-8] // Update control var.
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | ldr CARG4, TAB:RB->hmask
+ | ldr NODE:RB, TAB:RB->node
+ |6:
+ | add CARG1, RC, RC, lsl #1
+ | cmp RC, CARG4 // End of iteration? Branch to ITERL+1.
+ | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
+ | bhi <3
+ | ldrd CARG12, NODE:CARG3->val
+ | checktp CARG2, LJ_TNIL
+ | add RC, RC, #1
+ | beq <6 // Skip holes in hash part.
+ | ldrh RB, [PC, #-2]
+ | add RC, RC, INS
+ | ldrd CARG34, NODE:CARG3->key
+ | str RC, [RA, #-8] // Update control var.
+ | strd CARG12, [RA, #8]
+ | add RC, PC, RB, lsl #2
+ | sub PC, RC, #0x20000
+ | strd CARG34, [RA]
+ | b <3
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RC = target (points to ITERN)
+ | add RA, BASE, RA
+ | add RC, PC, RC, lsl #2
+ | ldrd CFUNC:CARG12, [RA, #-24]
+ | ldr CARG3, [RA, #-12]
+ | ldr CARG4, [RA, #-4]
+ | checktp CARG2, LJ_TFUNC
+ | ldrbeq CARG1, CFUNC:CARG1->ffid
+ | checktpeq CARG3, LJ_TTAB
+ | checktpeq CARG4, LJ_TNIL
+ | cmpeq CARG1, #FF_next_N
+ | subeq PC, RC, #0x20000
+ | bne >5
+ | ins_next1
+ | ins_next2
+ | mov CARG1, #0
+ | mvn CARG2, #0x00018000
+ | strd CARG1, [RA, #-8] // Initialize control var.
+ |1:
+ | ins_next3
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov CARG1, #BC_JMP
+ | mov OP, #BC_ITERC
+ | strb CARG1, [PC, #-4]
+ | sub PC, RC, #0x20000
+ | strb OP, [PC] // Subsumes ins_next1.
+ | ins_next2
+ | b <1
+ break;
+
+ case BC_VARG:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | ldr CARG1, [BASE, FRAME_PC]
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | add RC, RC, #FRAME_VARG
+ | add CARG4, RA, RB
+ | sub CARG3, BASE, #8 // CARG3 = vtop
+ | sub RC, RC, CARG1 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmp RB, #0
+ | sub CARG1, CARG3, RC
+ | beq >5 // Copy all varargs?
+ | sub CARG4, CARG4, #16
+ |1: // Copy vararg slots to destination slots.
+ | cmp RC, CARG3
+ | ldrdlo CARG12, [RC], #8
+ | mvnhs CARG2, #~LJ_TNIL
+ | cmp RA, CARG4
+ | strd CARG12, [RA], #8
+ | blo <1
+ |2:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ldr CARG4, L->maxstack
+ | cmp CARG1, #0
+ | movle RB, #8 // MULTRES = (0+1)*8
+ | addgt RB, CARG1, #8
+ | add CARG2, RA, CARG1
+ | str RB, SAVE_MULTRES
+ | ble <2
+ | cmp CARG2, CARG4
+ | bhi >7
+ |6:
+ | ldrd CARG12, [RC], #8
+ | strd CARG12, [RA], #8
+ | cmp RC, CARG3
+ | blo <6
+ | b <2
+ |
+ |7: // Grow stack for varargs.
+ | lsr CARG2, CARG1, #3
+ | str RA, L->top
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub RC, RC, BASE // Need delta, because BASE may change.
+ | str PC, SAVE_PC
+ | sub RA, RA, BASE
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, RC
+ | sub CARG3, BASE, #8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RC = extra results
+ | ldr CARG1, SAVE_MULTRES
+ | ldr PC, [BASE, FRAME_PC]
+ | add RA, BASE, RA
+ | add RC, CARG1, RC, lsl #3
+ | b ->BC_RETM_Z
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | add RA, BASE, RA
+ |->BC_RETM_Z:
+ | str RC, SAVE_MULTRES
+ |1:
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV2_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
+ | ldr INS, [PC, #-4]
+ | subs CARG4, RC, #8
+ | sub CARG3, BASE, #8
+ | beq >3
+ |2:
+ | ldrd CARG12, [RA], #8
+ | add BASE, BASE, #8
+ | subs CARG4, CARG4, #8
+ | strd CARG12, [BASE, #-16]
+ | bne <2
+ |3:
+ | decode_RA8 RA, INS
+ | sub CARG4, CARG3, RA
+ | decode_RB8 RB, INS
+ | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
+ |5:
+ | cmp RB, RC // More results expected?
+ | bhi >6
+ | mov BASE, CARG4
+ | ldr CARG2, LFUNC:CARG1->field_pc
+ | ins_next1
+ | ins_next2
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | mvn CARG2, #~LJ_TNIL
+ | add BASE, BASE, #8
+ | add RC, RC, #8
+ | str CARG2, [BASE, #-12]
+ | b <5
+ |
+ |->BC_RETV1_Z: // Non-standard return case.
+ | add RA, BASE, RA
+ |->BC_RETV2_Z:
+ | tst CARG2, #FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, CARG2
+ | ldr PC, [BASE, FRAME_PC]
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | str RC, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | ldreq INS, [PC, #-4]
+ | bne ->BC_RETV1_Z
+ if (op == BC_RET1) {
+ | ldrd CARG12, [BASE, RA]
+ }
+ | sub CARG4, BASE, #8
+ | decode_RA8 RA, INS
+ if (op == BC_RET1) {
+ | strd CARG12, [CARG4]
+ }
+ | sub BASE, CARG4, RA
+ | decode_RB8 RB, INS
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ |5:
+ | cmp RB, RC
+ | bhi >6
+ | ldr CARG2, LFUNC:CARG1->field_pc
+ | ins_next1
+ | ins_next2
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | sub CARG2, CARG4, #4
+ | mvn CARG3, #~LJ_TNIL
+ | str CARG3, [CARG2, RC]
+ | add RC, RC, #8
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
+ |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
+ |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
+ |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RC = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ldrd CARG12, [RA, BASE]!
+ if (op != BC_JFORL) {
+ | add RC, PC, RC, lsl #2
+ }
+ if (!vk) {
+ | ldrd CARG34, FOR_STOP
+ | checktp CARG2, LJ_TISNUM
+ | ldr RB, FOR_TSTEP
+ | bne >5
+ | checktp CARG4, LJ_TISNUM
+ | ldr CARG4, FOR_STEP
+ | checktpeq RB, LJ_TISNUM
+ | bne ->vmeta_for
+ | cmp CARG4, #0
+ | blt >4
+ | cmp CARG1, CARG3
+ } else {
+ | ldrd CARG34, FOR_STEP
+ | checktp CARG2, LJ_TISNUM
+ | bne >5
+ | adds CARG1, CARG1, CARG3
+ | ldr CARG4, FOR_STOP
+ if (op == BC_IFORL) {
+ | addvs RC, PC, #0x20000 // Overflow: prevent branch.
+ } else {
+ | bvs >2 // Overflow: do not enter mcode.
+ }
+ | cmp CARG3, #0
+ | blt >4
+ | cmp CARG1, CARG4
+ }
+ |1:
+ if (op == BC_FORI) {
+ | subgt PC, RC, #0x20000
+ } else if (op == BC_JFORI) {
+ | sub PC, RC, #0x20000
+ | ldrhle RC, [PC, #-2]
+ } else if (op == BC_IFORL) {
+ | suble PC, RC, #0x20000
+ }
+ if (vk) {
+ | strd CARG12, FOR_IDX
+ }
+ |2:
+ | ins_next1
+ | ins_next2
+ | strd CARG12, FOR_EXT
+ if (op == BC_JFORI || op == BC_JFORL) {
+ | ble =>BC_JLOOP
+ }
+ |3:
+ | ins_next3
+ |
+ |4: // Invert check for negative step.
+ if (!vk) {
+ | cmp CARG3, CARG1
+ } else {
+ | cmp CARG4, CARG1
+ }
+ | b <1
+ |
+ |5: // FP loop.
+ if (!vk) {
+ | cmnlo CARG4, #-LJ_TISNUM
+ | cmnlo RB, #-LJ_TISNUM
+ | bhs ->vmeta_for
+ |.if FPU
+ | vldr d0, FOR_IDX
+ | vldr d1, FOR_STOP
+ | cmp RB, #0
+ | vstr d0, FOR_EXT
+ |.else
+ | cmp RB, #0
+ | strd CARG12, FOR_EXT
+ | blt >8
+ |.endif
+ } else {
+ |.if FPU
+ | vldr d0, FOR_IDX
+ | vldr d2, FOR_STEP
+ | vldr d1, FOR_STOP
+ | cmp CARG4, #0
+ | vadd.f64 d0, d0, d2
+ |.else
+ | cmp CARG4, #0
+ | blt >8
+ | bl extern __aeabi_dadd
+ | strd CARG12, FOR_IDX
+ | ldrd CARG34, FOR_STOP
+ | strd CARG12, FOR_EXT
+ |.endif
+ }
+ |6:
+ |.if FPU
+ | vcmpge.f64 d0, d1
+ | vcmplt.f64 d1, d0
+ | vmrs
+ |.else
+ | bl extern __aeabi_cdcmple
+ |.endif
+ if (vk) {
+ |.if FPU
+ | vstr d0, FOR_IDX
+ | vstr d0, FOR_EXT
+ |.endif
+ }
+ if (op == BC_FORI) {
+ | subhi PC, RC, #0x20000
+ } else if (op == BC_JFORI) {
+ | sub PC, RC, #0x20000
+ | ldrhls RC, [PC, #-2]
+ | bls =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | subls PC, RC, #0x20000
+ } else {
+ | bls =>BC_JLOOP
+ }
+ | ins_next1
+ | ins_next2
+ | b <3
+ |
+ |.if not FPU
+ |8: // Invert check for negative step.
+ if (vk) {
+ | bl extern __aeabi_dadd
+ | strd CARG12, FOR_IDX
+ | strd CARG12, FOR_EXT
+ }
+ | mov CARG3, CARG1
+ | mov CARG4, CARG2
+ | ldrd CARG12, FOR_STOP
+ | b <6
+ |.endif
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RC = target
+ | ldrd CARG12, [RA, BASE]!
+ if (op == BC_JITERL) {
+ | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
+ | strdne CARG12, [RA, #-8]
+ | bne =>BC_JLOOP
+ } else {
+ | add RC, PC, RC, lsl #2
+ | // STALL: load CARG12.
+ | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
+ | subne PC, RC, #0x20000 // Otherwise save control var + branch.
+ | strdne CARG12, [RA, #-8]
+ }
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RC = target (loop extent)
+ | // Note: RA/RC is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RC = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base (ignored), RC = traceno
+ | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)]
+ | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0.
+ | ldr TRACE:RC, [CARG1, RC, lsl #2]
+ | st_vmstate CARG2
+ | ldr RA, TRACE:RC->mcode
+ | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | str L, [DISPATCH, #DISPATCH_GL(jit_L)]
+ | bx RA
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RC = target
+ | add RC, PC, RC, lsl #2
+ | sub PC, RC, #0x20000
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)]
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | bhi ->vm_growstack_l
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ | ins_next2
+ }
+ |2:
+ | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters.
+ | mvn CARG4, #~LJ_TNIL
+ | blo >3
+ if (op == BC_JFUNCF) {
+ | decode_RD RC, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next3
+ }
+ |
+ |3: // Clear missing parameters.
+ | strd CARG34, [BASE, NARGS8:RC]
+ | add NARGS8:RC, NARGS8:RC, #8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | add CARG4, BASE, RC
+ | add RA, RA, RC
+ | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC.
+ | add CARG2, RC, #8+FRAME_VARG
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG.
+ | bhs ->vm_growstack_l
+ | ldrb RB, [PC, #-4+PC2PROTO(numparams)]
+ | mov RA, BASE
+ | mov RC, CARG4
+ | cmp RB, #0
+ | add BASE, CARG4, #8
+ | beq >3
+ | mvn CARG3, #~LJ_TNIL
+ |1:
+ | cmp RA, RC // Less args than parameters?
+ | ldrdlo CARG12, [RA], #8
+ | movhs CARG2, CARG3
+ | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC).
+ |2:
+ | subs RB, RB, #1
+ | strd CARG12, [CARG4, #8]!
+ | bne <1
+ |3:
+ | ins_next
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ldr CARG4, CFUNC:CARG3->f
+ } else {
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)]
+ }
+ | add CARG2, RA, NARGS8:RC
+ | ldr CARG1, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | str BASE, L->base
+ | cmp CARG2, CARG1
+ | str RC, L->top
+ if (op == BC_FUNCCW) {
+ | ldr CARG2, CFUNC:CARG3->f
+ }
+ | mv_vmstate CARG3, C
+ | mov CARG1, L
+ | bhi ->vm_growstack_c // Need to grow stack.
+ | st_vmstate CARG3
+ | blx CARG4 // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | ldr BASE, L->base
+ | mv_vmstate CARG3, INTERP
+ | ldr CRET2, L->top
+ | lsl RC, CRET1, #3
+ | st_vmstate CARG3
+ | ldr PC, [BASE, FRAME_PC]
+ | sub RA, CRET2, RC // RA = L->top - nresults*8
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 0xe\n" /* Return address is in lr. */
+ "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */
+ fcofs, CFRAME_SIZE);
+ for (i = 11; i >= (LJ_ARCH_HASFPU ? 5 : 4); i--) /* offset r4-r11 */
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i));
+#if LJ_ARCH_HASFPU
+ for (i = 15; i >= 8; i--) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 %d, %d\n",
+ 64+2*i, 10+2*(15-i));
+ fprintf(ctx->fp, "\t.byte 0x84\n\t.uleb128 %d\n", 25); /* offset r4 */
+#endif
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */
+ "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */
+ "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */
+ "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/3rdparty/lua/src/vm_mips.dasc b/3rdparty/lua/src/vm_mips.dasc
index a70799d..a81dbee 100644
--- a/3rdparty/lua/src/vm_mips.dasc
+++ b/3rdparty/lua/src/vm_mips.dasc
@@ -1,4241 +1,4241 @@
-|// Low-level VM code for MIPS CPUs.
-|// Bytecode interpreter, fast functions and helper functions.
-|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-|
-|.arch mips
-|.section code_op, code_sub
-|
-|.actionlist build_actionlist
-|.globals GLOB_
-|.globalnames globnames
-|.externnames extnames
-|
-|// Note: The ragged indentation of the instructions is intentional.
-|// The starting columns indicate data dependencies.
-|
-|//-----------------------------------------------------------------------
-|
-|// Fixed register assignments for the interpreter.
-|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
-|
-|// The following must be C callee-save (but BASE is often refetched).
-|.define BASE, r16 // Base of current Lua stack frame.
-|.define KBASE, r17 // Constants of current Lua function.
-|.define PC, r18 // Next PC.
-|.define DISPATCH, r19 // Opcode dispatch table.
-|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
-|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
-|// NYI: r22 currently unused.
-|
-|.define JGL, r30 // On-trace: global_State + 32768.
-|
-|// Constants for type-comparisons, stores and conversions. C callee-save.
-|.define TISNIL, r30
-|.define TOBIT, f30 // 2^52 + 2^51.
-|
-|// The following temporaries are not saved across C calls, except for RA.
-|.define RA, r23 // Callee-save.
-|.define RB, r8
-|.define RC, r9
-|.define RD, r10
-|.define INS, r11
-|
-|.define AT, r1 // Assembler temporary.
-|.define TMP0, r12
-|.define TMP1, r13
-|.define TMP2, r14
-|.define TMP3, r15
-|
-|// Calling conventions.
-|.define CFUNCADDR, r25
-|.define CARG1, r4
-|.define CARG2, r5
-|.define CARG3, r6
-|.define CARG4, r7
-|
-|.define CRET1, r2
-|.define CRET2, r3
-|
-|.define FARG1, f12
-|.define FARG2, f14
-|
-|.define FRET1, f0
-|.define FRET2, f2
-|
-|// Stack layout while in interpreter. Must match with lj_frame.h.
-|.define CFRAME_SPACE, 112 // Delta for sp.
-|
-|.define SAVE_ERRF, 124(sp) // 32 bit C frame info.
-|.define SAVE_NRES, 120(sp)
-|.define SAVE_CFRAME, 116(sp)
-|.define SAVE_L, 112(sp)
-|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
-|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves.
-|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves.
-|.define SAVE_PC, 20(sp)
-|.define ARG5, 16(sp)
-|.define CSAVE_4, 12(sp)
-|.define CSAVE_3, 8(sp)
-|.define CSAVE_2, 4(sp)
-|.define CSAVE_1, 0(sp)
-|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by callee.
-|
-|.define ARG5_OFS, 16
-|.define SAVE_MULTRES, ARG5
-|
-|.macro saveregs
-| addiu sp, sp, -CFRAME_SPACE
-| sw ra, SAVE_GPR_+9*4(sp)
-| sw r30, SAVE_GPR_+8*4(sp)
-| sdc1 f30, SAVE_FPR_+5*8(sp)
-| sw r23, SAVE_GPR_+7*4(sp)
-| sw r22, SAVE_GPR_+6*4(sp)
-| sdc1 f28, SAVE_FPR_+4*8(sp)
-| sw r21, SAVE_GPR_+5*4(sp)
-| sw r20, SAVE_GPR_+4*4(sp)
-| sdc1 f26, SAVE_FPR_+3*8(sp)
-| sw r19, SAVE_GPR_+3*4(sp)
-| sw r18, SAVE_GPR_+2*4(sp)
-| sdc1 f24, SAVE_FPR_+2*8(sp)
-| sw r17, SAVE_GPR_+1*4(sp)
-| sw r16, SAVE_GPR_+0*4(sp)
-| sdc1 f22, SAVE_FPR_+1*8(sp)
-| sdc1 f20, SAVE_FPR_+0*8(sp)
-|.endmacro
-|
-|.macro restoreregs_ret
-| lw ra, SAVE_GPR_+9*4(sp)
-| lw r30, SAVE_GPR_+8*4(sp)
-| ldc1 f30, SAVE_FPR_+5*8(sp)
-| lw r23, SAVE_GPR_+7*4(sp)
-| lw r22, SAVE_GPR_+6*4(sp)
-| ldc1 f28, SAVE_FPR_+4*8(sp)
-| lw r21, SAVE_GPR_+5*4(sp)
-| lw r20, SAVE_GPR_+4*4(sp)
-| ldc1 f26, SAVE_FPR_+3*8(sp)
-| lw r19, SAVE_GPR_+3*4(sp)
-| lw r18, SAVE_GPR_+2*4(sp)
-| ldc1 f24, SAVE_FPR_+2*8(sp)
-| lw r17, SAVE_GPR_+1*4(sp)
-| lw r16, SAVE_GPR_+0*4(sp)
-| ldc1 f22, SAVE_FPR_+1*8(sp)
-| ldc1 f20, SAVE_FPR_+0*8(sp)
-| jr ra
-| addiu sp, sp, CFRAME_SPACE
-|.endmacro
-|
-|// Type definitions. Some of these are only used for documentation.
-|.type L, lua_State, LREG
-|.type GL, global_State
-|.type TVALUE, TValue
-|.type GCOBJ, GCobj
-|.type STR, GCstr
-|.type TAB, GCtab
-|.type LFUNC, GCfuncL
-|.type CFUNC, GCfuncC
-|.type PROTO, GCproto
-|.type UPVAL, GCupval
-|.type NODE, Node
-|.type NARGS8, int
-|.type TRACE, GCtrace
-|
-|//-----------------------------------------------------------------------
-|
-|// Trap for not-yet-implemented parts.
-|.macro NYI; .long 0xf0f0f0f0; .endmacro
-|
-|// Macros to mark delay slots.
-|.macro ., a; a; .endmacro
-|.macro ., a,b; a,b; .endmacro
-|.macro ., a,b,c; a,b,c; .endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Endian-specific defines.
-|.define FRAME_PC, LJ_ENDIAN_SELECT(-4,-8)
-|.define FRAME_FUNC, LJ_ENDIAN_SELECT(-8,-4)
-|.define HI, LJ_ENDIAN_SELECT(4,0)
-|.define LO, LJ_ENDIAN_SELECT(0,4)
-|.define OFS_RD, LJ_ENDIAN_SELECT(2,0)
-|.define OFS_RA, LJ_ENDIAN_SELECT(1,2)
-|.define OFS_OP, LJ_ENDIAN_SELECT(0,3)
-|
-|// Instruction decode.
-|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
-|.macro decode_OP4a, dst, ins; andi dst, ins, 0xff; .endmacro
-|.macro decode_OP4b, dst; sll dst, dst, 2; .endmacro
-|.macro decode_RC4a, dst, ins; srl dst, ins, 14; .endmacro
-|.macro decode_RC4b, dst; andi dst, dst, 0x3fc; .endmacro
-|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
-|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
-|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
-|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
-|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
-|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
-|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
-|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
-|
-|// Instruction fetch.
-|.macro ins_NEXT1
-| lw INS, 0(PC)
-| addiu PC, PC, 4
-|.endmacro
-|// Instruction decode+dispatch.
-|.macro ins_NEXT2
-| decode_OP4a TMP1, INS
-| decode_OP4b TMP1
-| addu TMP0, DISPATCH, TMP1
-| decode_RD8a RD, INS
-| lw AT, 0(TMP0)
-| decode_RA8a RA, INS
-| decode_RD8b RD
-| jr AT
-| decode_RA8b RA
-|.endmacro
-|.macro ins_NEXT
-| ins_NEXT1
-| ins_NEXT2
-|.endmacro
-|
-|// Instruction footer.
-|.if 1
-| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
-| .define ins_next, ins_NEXT
-| .define ins_next_, ins_NEXT
-| .define ins_next1, ins_NEXT1
-| .define ins_next2, ins_NEXT2
-|.else
-| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
-| // Affects only certain kinds of benchmarks (and only with -j off).
-| .macro ins_next
-| b ->ins_next
-| .endmacro
-| .macro ins_next1
-| .endmacro
-| .macro ins_next2
-| b ->ins_next
-| .endmacro
-| .macro ins_next_
-| ->ins_next:
-| ins_NEXT
-| .endmacro
-|.endif
-|
-|// Call decode and dispatch.
-|.macro ins_callt
-| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
-| lw PC, LFUNC:RB->pc
-| lw INS, 0(PC)
-| addiu PC, PC, 4
-| decode_OP4a TMP1, INS
-| decode_RA8a RA, INS
-| decode_OP4b TMP1
-| decode_RA8b RA
-| addu TMP0, DISPATCH, TMP1
-| lw TMP0, 0(TMP0)
-| jr TMP0
-| addu RA, RA, BASE
-|.endmacro
-|
-|.macro ins_call
-| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
-| sw PC, FRAME_PC(BASE)
-| ins_callt
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|.macro branch_RD
-| srl TMP0, RD, 1
-| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
-| addu TMP0, TMP0, AT
-| addu PC, PC, TMP0
-|.endmacro
-|
-|// Assumes DISPATCH is relative to GL.
-#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
-#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
-#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
-#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name)
-|
-#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
-|
-|.macro load_got, func
-| lw CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
-|.endmacro
-|// Much faster. Sadly, there's no easy way to force the required code layout.
-|// .macro call_intern, func; bal extern func; .endmacro
-|.macro call_intern, func; jalr CFUNCADDR; .endmacro
-|.macro call_extern; jalr CFUNCADDR; .endmacro
-|.macro jmp_extern; jr CFUNCADDR; .endmacro
-|
-|.macro hotcheck, delta, target
-| srl TMP1, PC, 1
-| andi TMP1, TMP1, 126
-| addu TMP1, TMP1, DISPATCH
-| lhu TMP2, GG_DISP2HOT(TMP1)
-| addiu TMP2, TMP2, -delta
-| bltz TMP2, target
-|. sh TMP2, GG_DISP2HOT(TMP1)
-|.endmacro
-|
-|.macro hotloop
-| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
-|.endmacro
-|
-|.macro hotcall
-| hotcheck HOTCOUNT_CALL, ->vm_hotcall
-|.endmacro
-|
-|// Set current VM state. Uses TMP0.
-|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
-|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
-|
-|// Move table write barrier back. Overwrites mark and tmp.
-|.macro barrierback, tab, mark, tmp, target
-| lw tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
-| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
-| sw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
-| sb mark, tab->marked
-| b target
-|. sw tmp, tab->gclist
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-
-/* Generate subroutines used by opcodes and other parts of the VM. */
-/* The .code_sub section should be last to help static branch prediction. */
-static void build_subroutines(BuildCtx *ctx)
-{
- |.code_sub
- |
- |//-----------------------------------------------------------------------
- |//-- Return handling ----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_returnp:
- | // See vm_return. Also: TMP2 = previous base.
- | andi AT, PC, FRAME_P
- | beqz AT, ->cont_dispatch
- |. li TMP1, LJ_TTRUE
- |
- | // Return from pcall or xpcall fast func.
- | lw PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
- | move BASE, TMP2 // Restore caller base.
- | // Prepending may overwrite the pcall frame, so do it at the end.
- | sw TMP1, FRAME_PC(RA) // Prepend true to results.
- | addiu RA, RA, -8
- |
- |->vm_returnc:
- | addiu RD, RD, 8 // RD = (nresults+1)*8.
- | andi TMP0, PC, FRAME_TYPE
- | beqz RD, ->vm_unwind_c_eh
- |. li CRET1, LUA_YIELD
- | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
- |. move MULTRES, RD
- |
- |->vm_return:
- | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
- | // TMP0 = PC & FRAME_TYPE
- | li TMP2, -8
- | xori AT, TMP0, FRAME_C
- | and TMP2, PC, TMP2
- | bnez AT, ->vm_returnp
- | subu TMP2, BASE, TMP2 // TMP2 = previous base.
- |
- | addiu TMP1, RD, -8
- | sw TMP2, L->base
- | li_vmstate C
- | lw TMP2, SAVE_NRES
- | addiu BASE, BASE, -8
- | st_vmstate
- | beqz TMP1, >2
- |. sll TMP2, TMP2, 3
- |1:
- | addiu TMP1, TMP1, -8
- | ldc1 f0, 0(RA)
- | addiu RA, RA, 8
- | sdc1 f0, 0(BASE)
- | bnez TMP1, <1
- |. addiu BASE, BASE, 8
- |
- |2:
- | bne TMP2, RD, >6
- |3:
- |. sw BASE, L->top // Store new top.
- |
- |->vm_leave_cp:
- | lw TMP0, SAVE_CFRAME // Restore previous C frame.
- | move CRET1, r0 // Ok return status for vm_pcall.
- | sw TMP0, L->cframe
- |
- |->vm_leave_unw:
- | restoreregs_ret
- |
- |6:
- | lw TMP1, L->maxstack
- | slt AT, TMP2, RD
- | bnez AT, >7 // Less results wanted?
- | // More results wanted. Check stack size and fill up results with nil.
- |. slt AT, BASE, TMP1
- | beqz AT, >8
- |. nop
- | sw TISNIL, HI(BASE)
- | addiu RD, RD, 8
- | b <2
- |. addiu BASE, BASE, 8
- |
- |7: // Less results wanted.
- | subu TMP0, RD, TMP2
- | subu TMP0, BASE, TMP0 // Either keep top or shrink it.
- | b <3
- |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
- |
- |8: // Corner case: need to grow stack for filling up results.
- | // This can happen if:
- | // - A C function grows the stack (a lot).
- | // - The GC shrinks the stack in between.
- | // - A return back from a lua_call() with (high) nresults adjustment.
- | load_got lj_state_growstack
- | move MULTRES, RD
- | srl CARG2, TMP2, 3
- | call_intern lj_state_growstack // (lua_State *L, int n)
- |. move CARG1, L
- | lw TMP2, SAVE_NRES
- | lw BASE, L->top // Need the (realloced) L->top in BASE.
- | move RD, MULTRES
- | b <2
- |. sll TMP2, TMP2, 3
- |
- |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
- | // (void *cframe, int errcode)
- | move sp, CARG1
- | move CRET1, CARG2
- |->vm_unwind_c_eh: // Landing pad for external unwinder.
- | lw L, SAVE_L
- | li TMP0, ~LJ_VMST_C
- | lw GL:TMP1, L->glref
- | b ->vm_leave_unw
- |. sw TMP0, GL:TMP1->vmstate
- |
- |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
- | // (void *cframe)
- | li AT, -4
- | and sp, CARG1, AT
- |->vm_unwind_ff_eh: // Landing pad for external unwinder.
- | lw L, SAVE_L
- | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | li TISNIL, LJ_TNIL
- | lw BASE, L->base
- | lw DISPATCH, L->glref // Setup pointer to dispatch table.
- | mtc1 TMP3, TOBIT
- | li TMP1, LJ_TFALSE
- | li_vmstate INTERP
- | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame.
- | cvt.d.s TOBIT, TOBIT
- | addiu RA, BASE, -8 // Results start at BASE-8.
- | addiu DISPATCH, DISPATCH, GG_G2DISP
- | sw TMP1, HI(RA) // Prepend false to error message.
- | st_vmstate
- | b ->vm_returnc
- |. li RD, 16 // 2 results: false + error message.
- |
- |//-----------------------------------------------------------------------
- |//-- Grow stack for calls -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_growstack_c: // Grow stack for C function.
- | b >2
- |. li CARG2, LUA_MINSTACK
- |
- |->vm_growstack_l: // Grow stack for Lua function.
- | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
- | addu RC, BASE, RC
- | subu RA, RA, BASE
- | sw BASE, L->base
- | addiu PC, PC, 4 // Must point after first instruction.
- | sw RC, L->top
- | srl CARG2, RA, 3
- |2:
- | // L->base = new base, L->top = top
- | load_got lj_state_growstack
- | sw PC, SAVE_PC
- | call_intern lj_state_growstack // (lua_State *L, int n)
- |. move CARG1, L
- | lw BASE, L->base
- | lw RC, L->top
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | subu RC, RC, BASE
- | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
- | ins_callt // Just retry the call.
- |
- |//-----------------------------------------------------------------------
- |//-- Entry points into the assembler VM ---------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_resume: // Setup C frame and resume thread.
- | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
- | saveregs
- | move L, CARG1
- | lw DISPATCH, L->glref // Setup pointer to dispatch table.
- | move BASE, CARG2
- | lbu TMP1, L->status
- | sw L, SAVE_L
- | li PC, FRAME_CP
- | addiu TMP0, sp, CFRAME_RESUME
- | addiu DISPATCH, DISPATCH, GG_G2DISP
- | sw r0, SAVE_NRES
- | sw r0, SAVE_ERRF
- | sw TMP0, L->cframe
- | sw r0, SAVE_CFRAME
- | beqz TMP1, >3
- |. sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- |
- | // Resume after yield (like a return).
- | move RA, BASE
- | lw BASE, L->base
- | lw TMP1, L->top
- | lw PC, FRAME_PC(BASE)
- | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | subu RD, TMP1, BASE
- | mtc1 TMP3, TOBIT
- | sb r0, L->status
- | cvt.d.s TOBIT, TOBIT
- | li_vmstate INTERP
- | addiu RD, RD, 8
- | st_vmstate
- | move MULTRES, RD
- | andi TMP0, PC, FRAME_TYPE
- | beqz TMP0, ->BC_RET_Z
- |. li TISNIL, LJ_TNIL
- | b ->vm_return
- |. nop
- |
- |->vm_pcall: // Setup protected C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
- | saveregs
- | sw CARG4, SAVE_ERRF
- | b >1
- |. li PC, FRAME_CP
- |
- |->vm_call: // Setup C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1)
- | saveregs
- | li PC, FRAME_C
- |
- |1: // Entry point for vm_pcall above (PC = ftype).
- | lw TMP1, L:CARG1->cframe
- | sw CARG3, SAVE_NRES
- | move L, CARG1
- | sw CARG1, SAVE_L
- | move BASE, CARG2
- | sw sp, L->cframe // Add our C frame to cframe chain.
- | lw DISPATCH, L->glref // Setup pointer to dispatch table.
- | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | sw TMP1, SAVE_CFRAME
- | addiu DISPATCH, DISPATCH, GG_G2DISP
- |
- |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
- | lw TMP2, L->base // TMP2 = old base (used in vmeta_call).
- | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | lw TMP1, L->top
- | mtc1 TMP3, TOBIT
- | addu PC, PC, BASE
- | subu NARGS8:RC, TMP1, BASE
- | subu PC, PC, TMP2 // PC = frame delta + frame type
- | cvt.d.s TOBIT, TOBIT
- | li_vmstate INTERP
- | li TISNIL, LJ_TNIL
- | st_vmstate
- |
- |->vm_call_dispatch:
- | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
- | lw TMP0, FRAME_PC(BASE)
- | li AT, LJ_TFUNC
- | bne TMP0, AT, ->vmeta_call
- |. lw LFUNC:RB, FRAME_FUNC(BASE)
- |
- |->vm_call_dispatch_f:
- | ins_call
- | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
- |
- |->vm_cpcall: // Setup protected C frame, call C.
- | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
- | saveregs
- | move L, CARG1
- | lw TMP0, L:CARG1->stack
- | sw CARG1, SAVE_L
- | lw TMP1, L->top
- | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
- | lw TMP1, L->cframe
- | sw sp, L->cframe // Add our C frame to cframe chain.
- | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
- | sw r0, SAVE_ERRF // No error function.
- | move CFUNCADDR, CARG4
- | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
- |. sw TMP1, SAVE_CFRAME
- | move BASE, CRET1
- | lw DISPATCH, L->glref // Setup pointer to dispatch table.
- | li PC, FRAME_CP
- | bnez CRET1, <3 // Else continue with the call.
- |. addiu DISPATCH, DISPATCH, GG_G2DISP
- | b ->vm_leave_cp // No base? Just remove C frame.
- |. nop
- |
- |//-----------------------------------------------------------------------
- |//-- Metamethod handling ------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
- |// stack, so BASE doesn't need to be reloaded across these calls.
- |
- |//-- Continuation dispatch ----------------------------------------------
- |
- |->cont_dispatch:
- | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
- | lw TMP0, -16+LO(BASE) // Continuation.
- | move RB, BASE
- | move BASE, TMP2 // Restore caller BASE.
- | lw LFUNC:TMP1, FRAME_FUNC(TMP2)
- |.if FFI
- | sltiu AT, TMP0, 2
- |.endif
- | lw PC, -16+HI(RB) // Restore PC from [cont|PC].
- | addu TMP2, RA, RD
- | lw TMP1, LFUNC:TMP1->pc
- |.if FFI
- | bnez AT, >1
- |.endif
- |. sw TISNIL, -8+HI(TMP2) // Ensure one valid arg.
- | // BASE = base, RA = resultptr, RB = meta base
- | jr TMP0 // Jump to continuation.
- |. lw KBASE, PC2PROTO(k)(TMP1)
- |
- |.if FFI
- |1:
- | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
- | // cont = 0: tailcall from C function.
- |. addiu TMP1, RB, -16
- | b ->vm_call_tail
- |. subu RC, TMP1, BASE
- |.endif
- |
- |->cont_cat: // RA = resultptr, RB = meta base
- | lw INS, -4(PC)
- | addiu CARG2, RB, -16
- | ldc1 f0, 0(RA)
- | decode_RB8a MULTRES, INS
- | decode_RA8a RA, INS
- | decode_RB8b MULTRES
- | decode_RA8b RA
- | addu TMP1, BASE, MULTRES
- | sw BASE, L->base
- | subu CARG3, CARG2, TMP1
- | bne TMP1, CARG2, ->BC_CAT_Z
- |. sdc1 f0, 0(CARG2)
- | addu RA, BASE, RA
- | b ->cont_nop
- |. sdc1 f0, 0(RA)
- |
- |//-- Table indexing metamethods -----------------------------------------
- |
- |->vmeta_tgets1:
- | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
- | li TMP0, LJ_TSTR
- | sw STR:RC, LO(CARG3)
- | b >1
- |. sw TMP0, HI(CARG3)
- |
- |->vmeta_tgets:
- | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
- | li TMP0, LJ_TTAB
- | sw TAB:RB, LO(CARG2)
- | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
- | sw TMP0, HI(CARG2)
- | li TMP1, LJ_TSTR
- | sw STR:RC, LO(CARG3)
- | b >1
- |. sw TMP1, HI(CARG3)
- |
- |->vmeta_tgetb: // TMP0 = index
- | mtc1 TMP0, f0
- | cvt.d.w f0, f0
- | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
- | sdc1 f0, 0(CARG3)
- |
- |->vmeta_tgetv:
- |1:
- | load_got lj_meta_tget
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
- |. move CARG1, L
- | // Returns TValue * (finished) or NULL (metamethod).
- | beqz CRET1, >3
- |. addiu TMP1, BASE, -FRAME_CONT
- | ldc1 f0, 0(CRET1)
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- |
- |3: // Call __index metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k
- | lw BASE, L->top
- | sw PC, -16+HI(BASE) // [cont|PC]
- | subu PC, BASE, TMP1
- | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | b ->vm_call_dispatch_f
- |. li NARGS8:RC, 16 // 2 args for func(t, k).
- |
- |//-----------------------------------------------------------------------
- |
- |->vmeta_tsets1:
- | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
- | li TMP0, LJ_TSTR
- | sw STR:RC, LO(CARG3)
- | b >1
- |. sw TMP0, HI(CARG3)
- |
- |->vmeta_tsets:
- | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
- | li TMP0, LJ_TTAB
- | sw TAB:RB, LO(CARG2)
- | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
- | sw TMP0, HI(CARG2)
- | li TMP1, LJ_TSTR
- | sw STR:RC, LO(CARG3)
- | b >1
- |. sw TMP1, HI(CARG3)
- |
- |->vmeta_tsetb: // TMP0 = index
- | mtc1 TMP0, f0
- | cvt.d.w f0, f0
- | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
- | sdc1 f0, 0(CARG3)
- |
- |->vmeta_tsetv:
- |1:
- | load_got lj_meta_tset
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
- |. move CARG1, L
- | // Returns TValue * (finished) or NULL (metamethod).
- | beqz CRET1, >3
- |. ldc1 f0, 0(RA)
- | // NOBARRIER: lj_meta_tset ensures the table is not black.
- | ins_next1
- | sdc1 f0, 0(CRET1)
- | ins_next2
- |
- |3: // Call __newindex metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
- | addiu TMP1, BASE, -FRAME_CONT
- | lw BASE, L->top
- | sw PC, -16+HI(BASE) // [cont|PC]
- | subu PC, BASE, TMP1
- | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | sdc1 f0, 16(BASE) // Copy value to third argument.
- | b ->vm_call_dispatch_f
- |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
- |
- |//-- Comparison metamethods ---------------------------------------------
- |
- |->vmeta_comp:
- | // CARG2, CARG3 are already set by BC_ISLT/BC_ISGE/BC_ISLE/BC_ISGT.
- | load_got lj_meta_comp
- | addiu PC, PC, -4
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | decode_OP1 CARG4, INS
- | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
- |. move CARG1, L
- | // Returns 0/1 or TValue * (metamethod).
- |3:
- | sltiu AT, CRET1, 2
- | beqz AT, ->vmeta_binop
- | negu TMP2, CRET1
- |4:
- | lhu RD, OFS_RD(PC)
- | addiu PC, PC, 4
- | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
- | sll RD, RD, 2
- | addu RD, RD, TMP1
- | and RD, RD, TMP2
- | addu PC, PC, RD
- |->cont_nop:
- | ins_next
- |
- |->cont_ra: // RA = resultptr
- | lbu TMP1, -4+OFS_RA(PC)
- | ldc1 f0, 0(RA)
- | sll TMP1, TMP1, 3
- | addu TMP1, BASE, TMP1
- | b ->cont_nop
- |. sdc1 f0, 0(TMP1)
- |
- |->cont_condt: // RA = resultptr
- | lw TMP0, HI(RA)
- | sltiu AT, TMP0, LJ_TISTRUECOND
- | b <4
- |. negu TMP2, AT // Branch if result is true.
- |
- |->cont_condf: // RA = resultptr
- | lw TMP0, HI(RA)
- | sltiu AT, TMP0, LJ_TISTRUECOND
- | b <4
- |. addiu TMP2, AT, -1 // Branch if result is false.
- |
- |->vmeta_equal:
- | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
- | load_got lj_meta_equal
- | addiu PC, PC, -4
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
- |. move CARG1, L
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |. nop
- |
- |->vmeta_equal_cd:
- |.if FFI
- | load_got lj_meta_equal_cd
- | move CARG2, INS
- | addiu PC, PC, -4
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
- |. move CARG1, L
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |. nop
- |.endif
- |
- |//-- Arithmetic metamethods ---------------------------------------------
- |
- |->vmeta_unm:
- | move CARG4, CARG3
- |
- |->vmeta_arith:
- | load_got lj_meta_arith
- | decode_OP1 TMP0, INS
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | move CARG2, RA
- | sw TMP0, ARG5
- | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
- |. move CARG1, L
- | // Returns NULL (finished) or TValue * (metamethod).
- | beqz CRET1, ->cont_nop
- |. nop
- |
- | // Call metamethod for binary op.
- |->vmeta_binop:
- | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
- | subu TMP1, CRET1, BASE
- | sw PC, -16+HI(CRET1) // [cont|PC]
- | move TMP2, BASE
- | addiu PC, TMP1, FRAME_CONT
- | move BASE, CRET1
- | b ->vm_call_dispatch
- |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
- |
- |->vmeta_len:
- | // CARG2 already set by BC_LEN.
-#if LJ_52
- | move MULTRES, CARG1
-#endif
- | load_got lj_meta_len
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | call_intern lj_meta_len // (lua_State *L, TValue *o)
- |. move CARG1, L
- | // Returns NULL (retry) or TValue * (metamethod base).
-#if LJ_52
- | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
- |. nop
- | b ->BC_LEN_Z
- |. move CARG1, MULTRES
-#else
- | b ->vmeta_binop // Binop call for compatibility.
- |. nop
-#endif
- |
- |//-- Call metamethod ----------------------------------------------------
- |
- |->vmeta_call: // Resolve and call __call metamethod.
- | // TMP2 = old base, BASE = new base, RC = nargs*8
- | load_got lj_meta_call
- | sw TMP2, L->base // This is the callers base!
- | addiu CARG2, BASE, -8
- | sw PC, SAVE_PC
- | addu CARG3, BASE, RC
- | move MULTRES, NARGS8:RC
- | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- |. move CARG1, L
- | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
- | ins_call
- |
- |->vmeta_callt: // Resolve __call for BC_CALLT.
- | // BASE = old base, RA = new base, RC = nargs*8
- | load_got lj_meta_call
- | sw BASE, L->base
- | addiu CARG2, RA, -8
- | sw PC, SAVE_PC
- | addu CARG3, RA, RC
- | move MULTRES, NARGS8:RC
- | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- |. move CARG1, L
- | lw TMP1, FRAME_PC(BASE)
- | lw LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
- | b ->BC_CALLT_Z
- |. addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
- |
- |//-- Argument coercion for 'for' statement ------------------------------
- |
- |->vmeta_for:
- | load_got lj_meta_for
- | sw BASE, L->base
- | move CARG2, RA
- | sw PC, SAVE_PC
- | move MULTRES, INS
- | call_intern lj_meta_for // (lua_State *L, TValue *base)
- |. move CARG1, L
- |.if JIT
- | decode_OP1 TMP0, MULTRES
- | li AT, BC_JFORI
- |.endif
- | decode_RA8a RA, MULTRES
- | decode_RD8a RD, MULTRES
- | decode_RA8b RA
- |.if JIT
- | beq TMP0, AT, =>BC_JFORI
- |. decode_RD8b RD
- | b =>BC_FORI
- |. nop
- |.else
- | b =>BC_FORI
- |. decode_RD8b RD
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Fast functions -----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |.macro .ffunc, name
- |->ff_ .. name:
- |.endmacro
- |
- |.macro .ffunc_1, name
- |->ff_ .. name:
- | beqz NARGS8:RC, ->fff_fallback
- |. lw CARG3, HI(BASE)
- | lw CARG1, LO(BASE)
- |.endmacro
- |
- |.macro .ffunc_2, name
- |->ff_ .. name:
- | sltiu AT, NARGS8:RC, 16
- | lw CARG3, HI(BASE)
- | bnez AT, ->fff_fallback
- |. lw CARG4, 8+HI(BASE)
- | lw CARG1, LO(BASE)
- | lw CARG2, 8+LO(BASE)
- |.endmacro
- |
- |.macro .ffunc_n, name // Caveat: has delay slot!
- |->ff_ .. name:
- | lw CARG3, HI(BASE)
- | beqz NARGS8:RC, ->fff_fallback
- |. ldc1 FARG1, 0(BASE)
- | sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_nn, name // Caveat: has delay slot!
- |->ff_ .. name:
- | sltiu AT, NARGS8:RC, 16
- | lw CARG3, HI(BASE)
- | bnez AT, ->fff_fallback
- |. lw CARG4, 8+HI(BASE)
- | ldc1 FARG1, 0(BASE)
- | ldc1 FARG2, 8(BASE)
- | sltiu TMP0, CARG3, LJ_TISNUM
- | sltiu TMP1, CARG4, LJ_TISNUM
- | and TMP0, TMP0, TMP1
- | beqz TMP0, ->fff_fallback
- |.endmacro
- |
- |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
- |.macro ffgccheck
- | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
- | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
- | subu AT, TMP0, TMP1
- | bgezal AT, ->fff_gcstep
- |.endmacro
- |
- |//-- Base library: checks -----------------------------------------------
- |
- |.ffunc_1 assert
- | sltiu AT, CARG3, LJ_TISTRUECOND
- | beqz AT, ->fff_fallback
- |. addiu RA, BASE, -8
- | lw PC, FRAME_PC(BASE)
- | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
- | addu TMP2, RA, NARGS8:RC
- | sw CARG3, HI(RA)
- | addiu TMP1, BASE, 8
- | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
- |. sw CARG1, LO(RA)
- |1:
- | ldc1 f0, 0(TMP1)
- | sdc1 f0, -8(TMP1)
- | bne TMP1, TMP2, <1
- |. addiu TMP1, TMP1, 8
- | b ->fff_res
- |. nop
- |
- |.ffunc type
- | lw CARG3, HI(BASE)
- | li TMP1, LJ_TISNUM
- | beqz NARGS8:RC, ->fff_fallback
- |. sltiu TMP0, CARG3, LJ_TISNUM
- | movz TMP1, CARG3, TMP0
- | not TMP1, TMP1
- | sll TMP1, TMP1, 3
- | addu TMP1, CFUNC:RB, TMP1
- | b ->fff_resn
- |. ldc1 FRET1, CFUNC:TMP1->upvalue
- |
- |//-- Base library: getters and setters ---------------------------------
- |
- |.ffunc_1 getmetatable
- | li AT, LJ_TTAB
- | bne CARG3, AT, >6
- |. li AT, LJ_TUDATA
- |1: // Field metatable must be at same offset for GCtab and GCudata!
- | lw TAB:CARG1, TAB:CARG1->metatable
- |2:
- | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
- | beqz TAB:CARG1, ->fff_restv
- |. li CARG3, LJ_TNIL
- | lw TMP0, TAB:CARG1->hmask
- | li CARG3, LJ_TTAB // Use metatable as default result.
- | lw TMP1, STR:RC->hash
- | lw NODE:TMP2, TAB:CARG1->node
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | sll TMP0, TMP1, 5
- | sll TMP1, TMP1, 3
- | subu TMP1, TMP0, TMP1
- | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- | li AT, LJ_TSTR
- |3: // Rearranged logic, because we expect _not_ to find the key.
- | lw CARG4, offsetof(Node, key)+HI(NODE:TMP2)
- | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
- | lw NODE:TMP3, NODE:TMP2->next
- | bne CARG4, AT, >4
- |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
- | beq TMP0, STR:RC, >5
- |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2)
- |4:
- | beqz NODE:TMP3, ->fff_restv // Not found, keep default result.
- |. move NODE:TMP2, NODE:TMP3
- | b <3
- |. nop
- |5:
- | beq CARG2, TISNIL, ->fff_restv // Ditto for nil value.
- |. nop
- | move CARG3, CARG2 // Return value of mt.__metatable.
- | b ->fff_restv
- |. move CARG1, TMP1
- |
- |6:
- | beq CARG3, AT, <1
- |. sltiu TMP0, CARG3, LJ_TISNUM
- | li TMP1, LJ_TISNUM
- | movz TMP1, CARG3, TMP0
- | not TMP1, TMP1
- | sll TMP1, TMP1, 2
- | addu TMP1, DISPATCH, TMP1
- | b <2
- |. lw TAB:CARG1, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1)
- |
- |.ffunc_2 setmetatable
- | // Fast path: no mt for table yet and not clearing the mt.
- | li AT, LJ_TTAB
- | bne CARG3, AT, ->fff_fallback
- |. addiu CARG4, CARG4, -LJ_TTAB
- | lw TAB:TMP1, TAB:CARG1->metatable
- | lbu TMP3, TAB:CARG1->marked
- | or AT, CARG4, TAB:TMP1
- | bnez AT, ->fff_fallback
- |. andi AT, TMP3, LJ_GC_BLACK // isblack(table)
- | beqz AT, ->fff_restv
- |. sw TAB:CARG2, TAB:CARG1->metatable
- | barrierback TAB:CARG1, TMP3, TMP0, ->fff_restv
- |
- |.ffunc rawget
- | lw CARG4, HI(BASE)
- | sltiu AT, NARGS8:RC, 16
- | lw TAB:CARG2, LO(BASE)
- | load_got lj_tab_get
- | addiu CARG4, CARG4, -LJ_TTAB
- | or AT, AT, CARG4
- | bnez AT, ->fff_fallback
- | addiu CARG3, BASE, 8
- | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
- |. move CARG1, L
- | // Returns cTValue *.
- | b ->fff_resn
- |. ldc1 FRET1, 0(CRET1)
- |
- |//-- Base library: conversions ------------------------------------------
- |
- |.ffunc tonumber
- | // Only handles the number case inline (without a base argument).
- | lw CARG1, HI(BASE)
- | xori AT, NARGS8:RC, 8
- | sltiu CARG1, CARG1, LJ_TISNUM
- | movn CARG1, r0, AT
- | beqz CARG1, ->fff_fallback // Exactly one number argument.
- |. ldc1 FRET1, 0(BASE)
- | b ->fff_resn
- |. nop
- |
- |.ffunc_1 tostring
- | // Only handles the string or number case inline.
- | li AT, LJ_TSTR
- | // A __tostring method in the string base metatable is ignored.
- | beq CARG3, AT, ->fff_restv // String key?
- | // Handle numbers inline, unless a number base metatable is present.
- |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
- | sltiu TMP0, CARG3, LJ_TISNUM
- | sltiu TMP1, TMP1, 1
- | and TMP0, TMP0, TMP1
- | beqz TMP0, ->fff_fallback
- |. sw BASE, L->base // Add frame since C call can throw.
- | ffgccheck
- |. sw PC, SAVE_PC // Redundant (but a defined value).
- | load_got lj_str_fromnum
- | move CARG1, L
- | call_intern lj_str_fromnum // (lua_State *L, lua_Number *np)
- |. move CARG2, BASE
- | // Returns GCstr *.
- | li CARG3, LJ_TSTR
- | b ->fff_restv
- |. move CARG1, CRET1
- |
- |//-- Base library: iterators -------------------------------------------
- |
- |.ffunc next
- | lw CARG1, HI(BASE)
- | lw TAB:CARG2, LO(BASE)
- | beqz NARGS8:RC, ->fff_fallback
- |. addu TMP2, BASE, NARGS8:RC
- | li AT, LJ_TTAB
- | sw TISNIL, HI(TMP2) // Set missing 2nd arg to nil.
- | bne CARG1, AT, ->fff_fallback
- |. lw PC, FRAME_PC(BASE)
- | load_got lj_tab_next
- | sw BASE, L->base // Add frame since C call can throw.
- | sw BASE, L->top // Dummy frame length is ok.
- | addiu CARG3, BASE, 8
- | sw PC, SAVE_PC
- | call_intern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
- |. move CARG1, L
- | // Returns 0 at end of traversal.
- | beqz CRET1, ->fff_restv // End of traversal: return nil.
- |. li CARG3, LJ_TNIL
- | ldc1 f0, 8(BASE) // Copy key and value to results.
- | addiu RA, BASE, -8
- | ldc1 f2, 16(BASE)
- | li RD, (2+1)*8
- | sdc1 f0, 0(RA)
- | b ->fff_res
- |. sdc1 f2, 8(RA)
- |
- |.ffunc_1 pairs
- | li AT, LJ_TTAB
- | bne CARG3, AT, ->fff_fallback
- |. lw PC, FRAME_PC(BASE)
-#if LJ_52
- | lw TAB:TMP2, TAB:CARG1->metatable
- | ldc1 f0, CFUNC:RB->upvalue[0]
- | bnez TAB:TMP2, ->fff_fallback
-#else
- | ldc1 f0, CFUNC:RB->upvalue[0]
-#endif
- |. addiu RA, BASE, -8
- | sw TISNIL, 8+HI(BASE)
- | li RD, (3+1)*8
- | b ->fff_res
- |. sdc1 f0, 0(RA)
- |
- |.ffunc ipairs_aux
- | sltiu AT, NARGS8:RC, 16
- | lw CARG3, HI(BASE)
- | lw TAB:CARG1, LO(BASE)
- | lw CARG4, 8+HI(BASE)
- | bnez AT, ->fff_fallback
- |. ldc1 FARG2, 8(BASE)
- | addiu CARG3, CARG3, -LJ_TTAB
- | sltiu AT, CARG4, LJ_TISNUM
- | li TMP0, 1
- | movn AT, r0, CARG3
- | mtc1 TMP0, FARG1
- | beqz AT, ->fff_fallback
- |. lw PC, FRAME_PC(BASE)
- | cvt.w.d FRET1, FARG2
- | cvt.d.w FARG1, FARG1
- | lw TMP0, TAB:CARG1->asize
- | lw TMP1, TAB:CARG1->array
- | mfc1 TMP2, FRET1
- | addiu RA, BASE, -8
- | add.d FARG2, FARG2, FARG1
- | addiu TMP2, TMP2, 1
- | sltu AT, TMP2, TMP0
- | sll TMP3, TMP2, 3
- | addu TMP3, TMP1, TMP3
- | beqz AT, >2 // Not in array part?
- |. sdc1 FARG2, 0(RA)
- | lw TMP2, HI(TMP3)
- | ldc1 f0, 0(TMP3)
- |1:
- | beq TMP2, TISNIL, ->fff_res // End of iteration, return 0 results.
- |. li RD, (0+1)*8
- | li RD, (2+1)*8
- | b ->fff_res
- |. sdc1 f0, 8(RA)
- |2: // Check for empty hash part first. Otherwise call C function.
- | lw TMP0, TAB:CARG1->hmask
- | load_got lj_tab_getinth
- | beqz TMP0, ->fff_res
- |. li RD, (0+1)*8
- | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
- |. move CARG2, TMP2
- | // Returns cTValue * or NULL.
- | beqz CRET1, ->fff_res
- |. li RD, (0+1)*8
- | lw TMP2, HI(CRET1)
- | b <1
- |. ldc1 f0, 0(CRET1)
- |
- |.ffunc_1 ipairs
- | li AT, LJ_TTAB
- | bne CARG3, AT, ->fff_fallback
- |. lw PC, FRAME_PC(BASE)
-#if LJ_52
- | lw TAB:TMP2, TAB:CARG1->metatable
- | ldc1 f0, CFUNC:RB->upvalue[0]
- | bnez TAB:TMP2, ->fff_fallback
-#else
- | ldc1 f0, CFUNC:RB->upvalue[0]
-#endif
- |. addiu RA, BASE, -8
- | sw r0, 8+HI(BASE)
- | sw r0, 8+LO(BASE)
- | li RD, (3+1)*8
- | b ->fff_res
- |. sdc1 f0, 0(RA)
- |
- |//-- Base library: catch errors ----------------------------------------
- |
- |.ffunc pcall
- | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | beqz NARGS8:RC, ->fff_fallback
- | move TMP2, BASE
- | addiu BASE, BASE, 8
- | // Remember active hook before pcall.
- | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
- | andi TMP3, TMP3, 1
- | addiu PC, TMP3, 8+FRAME_PCALL
- | b ->vm_call_dispatch
- |. addiu NARGS8:RC, NARGS8:RC, -8
- |
- |.ffunc xpcall
- | sltiu AT, NARGS8:RC, 16
- | lw CARG4, 8+HI(BASE)
- | bnez AT, ->fff_fallback
- |. ldc1 FARG2, 8(BASE)
- | ldc1 FARG1, 0(BASE)
- | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
- | li AT, LJ_TFUNC
- | move TMP2, BASE
- | bne CARG4, AT, ->fff_fallback // Traceback must be a function.
- | addiu BASE, BASE, 16
- | // Remember active hook before pcall.
- | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
- | sdc1 FARG2, 0(TMP2) // Swap function and traceback.
- | andi TMP3, TMP3, 1
- | sdc1 FARG1, 8(TMP2)
- | addiu PC, TMP3, 16+FRAME_PCALL
- | b ->vm_call_dispatch
- |. addiu NARGS8:RC, NARGS8:RC, -16
- |
- |//-- Coroutine library --------------------------------------------------
- |
- |.macro coroutine_resume_wrap, resume
- |.if resume
- |.ffunc_1 coroutine_resume
- | li AT, LJ_TTHREAD
- | bne CARG3, AT, ->fff_fallback
- |.else
- |.ffunc coroutine_wrap_aux
- | lw L:CARG1, CFUNC:RB->upvalue[0].gcr
- |.endif
- | lbu TMP0, L:CARG1->status
- | lw TMP1, L:CARG1->cframe
- | lw CARG2, L:CARG1->top
- | lw TMP2, L:CARG1->base
- | addiu TMP3, TMP0, -LUA_YIELD
- | bgtz TMP3, ->fff_fallback // st > LUA_YIELD?
- |. xor TMP2, TMP2, CARG2
- | bnez TMP1, ->fff_fallback // cframe != 0?
- |. or AT, TMP2, TMP0
- | lw TMP0, L:CARG1->maxstack
- | beqz AT, ->fff_fallback // base == top && st == 0?
- |. lw PC, FRAME_PC(BASE)
- | addu TMP2, CARG2, NARGS8:RC
- | sltu AT, TMP0, TMP2
- | bnez AT, ->fff_fallback // Stack overflow?
- |. sw PC, SAVE_PC
- | sw BASE, L->base
- |1:
- |.if resume
- | addiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
- | addiu NARGS8:RC, NARGS8:RC, -8
- | addiu TMP2, TMP2, -8
- |.endif
- | sw TMP2, L:CARG1->top
- | addu TMP1, BASE, NARGS8:RC
- | move CARG3, CARG2
- | sw BASE, L->top
- |2: // Move args to coroutine.
- | ldc1 f0, 0(BASE)
- | sltu AT, BASE, TMP1
- | beqz AT, >3
- |. addiu BASE, BASE, 8
- | sdc1 f0, 0(CARG3)
- | b <2
- |. addiu CARG3, CARG3, 8
- |3:
- | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
- |. move L:RA, L:CARG1
- | // Returns thread status.
- |4:
- | lw TMP2, L:RA->base
- | sltiu AT, CRET1, LUA_YIELD+1
- | lw TMP3, L:RA->top
- | li_vmstate INTERP
- | lw BASE, L->base
- | st_vmstate
- | beqz AT, >8
- |. subu RD, TMP3, TMP2
- | lw TMP0, L->maxstack
- | beqz RD, >6 // No results?
- |. addu TMP1, BASE, RD
- | sltu AT, TMP0, TMP1
- | bnez AT, >9 // Need to grow stack?
- |. addu TMP3, TMP2, RD
- | sw TMP2, L:RA->top // Clear coroutine stack.
- | move TMP1, BASE
- |5: // Move results from coroutine.
- | ldc1 f0, 0(TMP2)
- | addiu TMP2, TMP2, 8
- | sltu AT, TMP2, TMP3
- | sdc1 f0, 0(TMP1)
- | bnez AT, <5
- |. addiu TMP1, TMP1, 8
- |6:
- | andi TMP0, PC, FRAME_TYPE
- |.if resume
- | li TMP1, LJ_TTRUE
- | addiu RA, BASE, -8
- | sw TMP1, -8+HI(BASE) // Prepend true to results.
- | addiu RD, RD, 16
- |.else
- | move RA, BASE
- | addiu RD, RD, 8
- |.endif
- |7:
- | sw PC, SAVE_PC
- | beqz TMP0, ->BC_RET_Z
- |. move MULTRES, RD
- | b ->vm_return
- |. nop
- |
- |8: // Coroutine returned with error (at co->top-1).
- |.if resume
- | addiu TMP3, TMP3, -8
- | li TMP1, LJ_TFALSE
- | ldc1 f0, 0(TMP3)
- | sw TMP3, L:RA->top // Remove error from coroutine stack.
- | li RD, (2+1)*8
- | sw TMP1, -8+HI(BASE) // Prepend false to results.
- | addiu RA, BASE, -8
- | sdc1 f0, 0(BASE) // Copy error message.
- | b <7
- |. andi TMP0, PC, FRAME_TYPE
- |.else
- | load_got lj_ffh_coroutine_wrap_err
- | move CARG2, L:RA
- | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
- |. move CARG1, L
- |.endif
- |
- |9: // Handle stack expansion on return from yield.
- | load_got lj_state_growstack
- | srl CARG2, RD, 3
- | call_intern lj_state_growstack // (lua_State *L, int n)
- |. move CARG1, L
- | b <4
- |. li CRET1, 0
- |.endmacro
- |
- | coroutine_resume_wrap 1 // coroutine.resume
- | coroutine_resume_wrap 0 // coroutine.wrap
- |
- |.ffunc coroutine_yield
- | lw TMP0, L->cframe
- | addu TMP1, BASE, NARGS8:RC
- | sw BASE, L->base
- | andi TMP0, TMP0, CFRAME_RESUME
- | sw TMP1, L->top
- | beqz TMP0, ->fff_fallback
- |. li CRET1, LUA_YIELD
- | sw r0, L->cframe
- | b ->vm_leave_unw
- |. sb CRET1, L->status
- |
- |//-- Math library -------------------------------------------------------
- |
- |.ffunc_n math_abs
- |. abs.d FRET1, FARG1
- |->fff_resn:
- | lw PC, FRAME_PC(BASE)
- | addiu RA, BASE, -8
- | b ->fff_res1
- |. sdc1 FRET1, -8(BASE)
- |
- |->fff_restv:
- | // CARG3/CARG1 = TValue result.
- | lw PC, FRAME_PC(BASE)
- | sw CARG3, -8+HI(BASE)
- | addiu RA, BASE, -8
- | sw CARG1, -8+LO(BASE)
- |->fff_res1:
- | // RA = results, PC = return.
- | li RD, (1+1)*8
- |->fff_res:
- | // RA = results, RD = (nresults+1)*8, PC = return.
- | andi TMP0, PC, FRAME_TYPE
- | bnez TMP0, ->vm_return
- |. move MULTRES, RD
- | lw INS, -4(PC)
- | decode_RB8a RB, INS
- | decode_RB8b RB
- |5:
- | sltu AT, RD, RB
- | bnez AT, >6 // More results expected?
- |. decode_RA8a TMP0, INS
- | decode_RA8b TMP0
- | ins_next1
- | // Adjust BASE. KBASE is assumed to be set for the calling frame.
- | subu BASE, RA, TMP0
- | ins_next2
- |
- |6: // Fill up results with nil.
- | addu TMP1, RA, RD
- | addiu RD, RD, 8
- | b <5
- |. sw TISNIL, -8+HI(TMP1)
- |
- |.macro math_extern, func
- |->ff_math_ .. func:
- | lw CARG3, HI(BASE)
- | beqz NARGS8:RC, ->fff_fallback
- |. load_got func
- | sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |. nop
- | call_extern
- |. ldc1 FARG1, 0(BASE)
- | b ->fff_resn
- |. nop
- |.endmacro
- |
- |.macro math_extern2, func
- | .ffunc_nn math_ .. func
- |. load_got func
- | call_extern
- |. nop
- | b ->fff_resn
- |. nop
- |.endmacro
- |
- |.macro math_round, func
- | .ffunc_n math_ .. func
- |. nop
- | bal ->vm_ .. func
- |. nop
- | b ->fff_resn
- |. nop
- |.endmacro
- |
- | math_round floor
- | math_round ceil
- |
- |.ffunc math_log
- | lw CARG3, HI(BASE)
- | li AT, 8
- | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
- |. load_got log
- | sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |. nop
- | call_extern
- |. ldc1 FARG1, 0(BASE)
- | b ->fff_resn
- |. nop
- |
- | math_extern log10
- | math_extern exp
- | math_extern sin
- | math_extern cos
- | math_extern tan
- | math_extern asin
- | math_extern acos
- | math_extern atan
- | math_extern sinh
- | math_extern cosh
- | math_extern tanh
- | math_extern2 pow
- | math_extern2 atan2
- | math_extern2 fmod
- |
- |.ffunc_n math_sqrt
- |. sqrt.d FRET1, FARG1
- | b ->fff_resn
- |. nop
- |
- |->ff_math_deg:
- |.ffunc_n math_rad
- |. ldc1 FARG2, CFUNC:RB->upvalue[0]
- | b ->fff_resn
- |. mul.d FRET1, FARG1, FARG2
- |
- |.ffunc_nn math_ldexp
- | cvt.w.d FARG2, FARG2
- | load_got ldexp
- | mfc1 CARG3, FARG2
- | call_extern
- |. nop
- | b ->fff_resn
- |. nop
- |
- |.ffunc_n math_frexp
- | load_got frexp
- | lw PC, FRAME_PC(BASE)
- | call_extern
- |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
- | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
- | addiu RA, BASE, -8
- | mtc1 TMP1, FARG2
- | sdc1 FRET1, 0(RA)
- | cvt.d.w FARG2, FARG2
- | sdc1 FARG2, 8(RA)
- | b ->fff_res
- |. li RD, (2+1)*8
- |
- |.ffunc_n math_modf
- | load_got modf
- | lw PC, FRAME_PC(BASE)
- | call_extern
- |. addiu CARG3, BASE, -8
- | addiu RA, BASE, -8
- | sdc1 FRET1, 0(BASE)
- | b ->fff_res
- |. li RD, (2+1)*8
- |
- |.macro math_minmax, name, ismax
- |->ff_ .. name:
- | lw CARG3, HI(BASE)
- | beqz NARGS8:RC, ->fff_fallback
- |. ldc1 FRET1, 0(BASE)
- | sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |. addu TMP2, BASE, NARGS8:RC
- | addiu TMP1, BASE, 8
- | beq TMP1, TMP2, ->fff_resn
- |1:
- |. lw CARG3, HI(TMP1)
- | ldc1 FARG1, 0(TMP1)
- | addiu TMP1, TMP1, 8
- | sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |.if ismax
- |. c.olt.d FARG1, FRET1
- |.else
- |. c.olt.d FRET1, FARG1
- |.endif
- | bne TMP1, TMP2, <1
- |. movf.d FRET1, FARG1
- | b ->fff_resn
- |. nop
- |.endmacro
- |
- | math_minmax math_min, 0
- | math_minmax math_max, 1
- |
- |//-- String library -----------------------------------------------------
- |
- |.ffunc_1 string_len
- | li AT, LJ_TSTR
- | bne CARG3, AT, ->fff_fallback
- |. nop
- | b ->fff_resi
- |. lw CRET1, STR:CARG1->len
- |
- |.ffunc string_byte // Only handle the 1-arg case here.
- | lw CARG3, HI(BASE)
- | lw STR:CARG1, LO(BASE)
- | xori AT, NARGS8:RC, 8
- | addiu CARG3, CARG3, -LJ_TSTR
- | or AT, AT, CARG3
- | bnez AT, ->fff_fallback // Need exactly 1 string argument.
- |. nop
- | lw TMP0, STR:CARG1->len
- | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
- | addiu RA, BASE, -8
- | sltu RD, r0, TMP0
- | mtc1 TMP1, f0
- | addiu RD, RD, 1
- | cvt.d.w f0, f0
- | lw PC, FRAME_PC(BASE)
- | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
- | b ->fff_res
- |. sdc1 f0, 0(RA)
- |
- |.ffunc string_char // Only handle the 1-arg case here.
- | ffgccheck
- | lw CARG3, HI(BASE)
- | ldc1 FARG1, 0(BASE)
- | li AT, 8
- | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
- |. sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |. li CARG3, 1
- | cvt.w.d FARG1, FARG1
- | addiu CARG2, sp, ARG5_OFS
- | sltiu AT, TMP0, 256
- | mfc1 TMP0, FARG1
- | beqz AT, ->fff_fallback
- |. sw TMP0, ARG5
- |->fff_newstr:
- | load_got lj_str_new
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
- |. move CARG1, L
- | // Returns GCstr *.
- | lw BASE, L->base
- | move CARG1, CRET1
- | b ->fff_restv
- |. li CARG3, LJ_TSTR
- |
- |.ffunc string_sub
- | ffgccheck
- | addiu AT, NARGS8:RC, -16
- | lw CARG3, 16+HI(BASE)
- | ldc1 f0, 16(BASE)
- | lw TMP0, HI(BASE)
- | lw STR:CARG1, LO(BASE)
- | bltz AT, ->fff_fallback
- | lw CARG2, 8+HI(BASE)
- | ldc1 f2, 8(BASE)
- | beqz AT, >1
- |. li CARG4, -1
- | cvt.w.d f0, f0
- | sltiu AT, CARG3, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |. mfc1 CARG4, f0
- |1:
- | sltiu AT, CARG2, LJ_TISNUM
- | beqz AT, ->fff_fallback
- |. li AT, LJ_TSTR
- | cvt.w.d f2, f2
- | bne TMP0, AT, ->fff_fallback
- |. lw CARG2, STR:CARG1->len
- | mfc1 CARG3, f2
- | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
- | slt AT, CARG4, r0
- | addiu TMP0, CARG2, 1
- | addu TMP1, CARG4, TMP0
- | slt TMP3, CARG3, r0
- | movn CARG4, TMP1, AT // if (end < 0) end += len+1
- | addu TMP1, CARG3, TMP0
- | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
- | li TMP2, 1
- | slt AT, CARG4, r0
- | slt TMP3, r0, CARG3
- | movn CARG4, r0, AT // if (end < 0) end = 0
- | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
- | slt AT, CARG2, CARG4
- | movn CARG4, CARG2, AT // if (end > len) end = len
- | addu CARG2, STR:CARG1, CARG3
- | subu CARG3, CARG4, CARG3 // len = end - start
- | addiu CARG2, CARG2, sizeof(GCstr)-1
- | bgez CARG3, ->fff_newstr
- |. addiu CARG3, CARG3, 1 // len++
- |->fff_emptystr: // Return empty string.
- | addiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty)
- | b ->fff_restv
- |. li CARG3, LJ_TSTR
- |
- |.ffunc string_rep // Only handle the 1-char case inline.
- | ffgccheck
- | lw TMP0, HI(BASE)
- | addiu AT, NARGS8:RC, -16 // Exactly 2 arguments.
- | lw CARG4, 8+HI(BASE)
- | lw STR:CARG1, LO(BASE)
- | addiu TMP0, TMP0, -LJ_TSTR
- | ldc1 f0, 8(BASE)
- | or AT, AT, TMP0
- | bnez AT, ->fff_fallback
- |. sltiu AT, CARG4, LJ_TISNUM
- | cvt.w.d f0, f0
- | beqz AT, ->fff_fallback
- |. lw TMP0, STR:CARG1->len
- | mfc1 CARG3, f0
- | lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | li AT, 1
- | blez CARG3, ->fff_emptystr // Count <= 0?
- |. sltu AT, AT, TMP0
- | beqz TMP0, ->fff_emptystr // Zero length string?
- |. sltu TMP0, TMP1, CARG3
- | or AT, AT, TMP0
- | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | bnez AT, ->fff_fallback // Fallback for > 1-char strings.
- |. lbu TMP0, STR:CARG1[1]
- | addu TMP2, CARG2, CARG3
- |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
- | addiu TMP2, TMP2, -1
- | sltu AT, CARG2, TMP2
- | bnez AT, <1
- |. sb TMP0, 0(TMP2)
- | b ->fff_newstr
- |. nop
- |
- |.ffunc string_reverse
- | ffgccheck
- | lw CARG3, HI(BASE)
- | lw STR:CARG1, LO(BASE)
- | beqz NARGS8:RC, ->fff_fallback
- |. li AT, LJ_TSTR
- | bne CARG3, AT, ->fff_fallback
- |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | lw CARG3, STR:CARG1->len
- | addiu CARG1, STR:CARG1, #STR
- | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | sltu AT, TMP1, CARG3
- | bnez AT, ->fff_fallback
- |. addu TMP3, CARG1, CARG3
- | addu CARG4, CARG2, CARG3
- |1: // Reverse string copy.
- | lbu TMP1, 0(CARG1)
- | sltu AT, CARG1, TMP3
- | beqz AT, ->fff_newstr
- |. addiu CARG1, CARG1, 1
- | addiu CARG4, CARG4, -1
- | b <1
- | sb TMP1, 0(CARG4)
- |
- |.macro ffstring_case, name, lo
- | .ffunc name
- | ffgccheck
- | lw CARG3, HI(BASE)
- | lw STR:CARG1, LO(BASE)
- | beqz NARGS8:RC, ->fff_fallback
- |. li AT, LJ_TSTR
- | bne CARG3, AT, ->fff_fallback
- |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | lw CARG3, STR:CARG1->len
- | addiu CARG1, STR:CARG1, #STR
- | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | sltu AT, TMP1, CARG3
- | bnez AT, ->fff_fallback
- |. addu TMP3, CARG1, CARG3
- | move CARG4, CARG2
- |1: // ASCII case conversion.
- | lbu TMP1, 0(CARG1)
- | sltu AT, CARG1, TMP3
- | beqz AT, ->fff_newstr
- |. addiu TMP0, TMP1, -lo
- | xori TMP2, TMP1, 0x20
- | sltiu AT, TMP0, 26
- | movn TMP1, TMP2, AT
- | addiu CARG1, CARG1, 1
- | sb TMP1, 0(CARG4)
- | b <1
- |. addiu CARG4, CARG4, 1
- |.endmacro
- |
- |ffstring_case string_lower, 65
- |ffstring_case string_upper, 97
- |
- |//-- Table library ------------------------------------------------------
- |
- |.ffunc_1 table_getn
- | li AT, LJ_TTAB
- | bne CARG3, AT, ->fff_fallback
- |. load_got lj_tab_len
- | call_intern lj_tab_len // (GCtab *t)
- |. nop
- | // Returns uint32_t (but less than 2^31).
- | b ->fff_resi
- |. nop
- |
- |//-- Bit library --------------------------------------------------------
- |
- |.macro .ffunc_bit, name
- | .ffunc_n bit_..name
- |. add.d FARG1, FARG1, TOBIT
- | mfc1 CRET1, FARG1
- |.endmacro
- |
- |.macro .ffunc_bit_op, name, ins
- | .ffunc_bit name
- | addiu TMP1, BASE, 8
- | addu TMP2, BASE, NARGS8:RC
- |1:
- | lw CARG4, HI(TMP1)
- | beq TMP1, TMP2, ->fff_resi
- |. ldc1 FARG1, 0(TMP1)
- | sltiu AT, CARG4, LJ_TISNUM
- | beqz AT, ->fff_fallback
- | add.d FARG1, FARG1, TOBIT
- | mfc1 CARG2, FARG1
- | ins CRET1, CRET1, CARG2
- | b <1
- |. addiu TMP1, TMP1, 8
- |.endmacro
- |
- |.ffunc_bit_op band, and
- |.ffunc_bit_op bor, or
- |.ffunc_bit_op bxor, xor
- |
- |.ffunc_bit bswap
- | srl TMP0, CRET1, 24
- | srl TMP2, CRET1, 8
- | sll TMP1, CRET1, 24
- | andi TMP2, TMP2, 0xff00
- | or TMP0, TMP0, TMP1
- | andi CRET1, CRET1, 0xff00
- | or TMP0, TMP0, TMP2
- | sll CRET1, CRET1, 8
- | b ->fff_resi
- |. or CRET1, TMP0, CRET1
- |
- |.ffunc_bit bnot
- | b ->fff_resi
- |. not CRET1, CRET1
- |
- |.macro .ffunc_bit_sh, name, ins, shmod
- | .ffunc_nn bit_..name
- |. add.d FARG1, FARG1, TOBIT
- | add.d FARG2, FARG2, TOBIT
- | mfc1 CARG1, FARG1
- | mfc1 CARG2, FARG2
- |.if shmod == 1
- | li AT, 32
- | subu TMP0, AT, CARG2
- | sllv CARG2, CARG1, CARG2
- | srlv CARG1, CARG1, TMP0
- |.elif shmod == 2
- | li AT, 32
- | subu TMP0, AT, CARG2
- | srlv CARG2, CARG1, CARG2
- | sllv CARG1, CARG1, TMP0
- |.endif
- | b ->fff_resi
- |. ins CRET1, CARG1, CARG2
- |.endmacro
- |
- |.ffunc_bit_sh lshift, sllv, 0
- |.ffunc_bit_sh rshift, srlv, 0
- |.ffunc_bit_sh arshift, srav, 0
- |// Can't use rotrv, since it's only in MIPS32R2.
- |.ffunc_bit_sh rol, or, 1
- |.ffunc_bit_sh ror, or, 2
- |
- |.ffunc_bit tobit
- |->fff_resi:
- | mtc1 CRET1, FRET1
- | b ->fff_resn
- |. cvt.d.w FRET1, FRET1
- |
- |//-----------------------------------------------------------------------
- |
- |->fff_fallback: // Call fast function fallback handler.
- | // BASE = new base, RB = CFUNC, RC = nargs*8
- | lw TMP3, CFUNC:RB->f
- | addu TMP1, BASE, NARGS8:RC
- | lw PC, FRAME_PC(BASE) // Fallback may overwrite PC.
- | addiu TMP0, TMP1, 8*LUA_MINSTACK
- | lw TMP2, L->maxstack
- | sw PC, SAVE_PC // Redundant (but a defined value).
- | sltu AT, TMP2, TMP0
- | sw BASE, L->base
- | sw TMP1, L->top
- | bnez AT, >5 // Need to grow stack.
- |. move CFUNCADDR, TMP3
- | jalr TMP3 // (lua_State *L)
- |. move CARG1, L
- | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
- | lw BASE, L->base
- | sll RD, CRET1, 3
- | bgtz CRET1, ->fff_res // Returned nresults+1?
- |. addiu RA, BASE, -8
- |1: // Returned 0 or -1: retry fast path.
- | lw TMP0, L->top
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | bnez CRET1, ->vm_call_tail // Returned -1?
- |. subu NARGS8:RC, TMP0, BASE
- | ins_callt // Returned 0: retry fast path.
- |
- |// Reconstruct previous base for vmeta_call during tailcall.
- |->vm_call_tail:
- | andi TMP0, PC, FRAME_TYPE
- | li AT, -4
- | bnez TMP0, >3
- |. and TMP1, PC, AT
- | lbu TMP1, OFS_RA(PC)
- | sll TMP1, TMP1, 3
- | addiu TMP1, TMP1, 8
- |3:
- | b ->vm_call_dispatch // Resolve again for tailcall.
- |. subu TMP2, BASE, TMP1
- |
- |5: // Grow stack for fallback handler.
- | load_got lj_state_growstack
- | li CARG2, LUA_MINSTACK
- | call_intern lj_state_growstack // (lua_State *L, int n)
- |. move CARG1, L
- | lw BASE, L->base
- | b <1
- |. li CRET1, 0 // Force retry.
- |
- |->fff_gcstep: // Call GC step function.
- | // BASE = new base, RC = nargs*8
- | move MULTRES, ra
- | load_got lj_gc_step
- | sw BASE, L->base
- | addu TMP0, BASE, NARGS8:RC
- | sw PC, SAVE_PC // Redundant (but a defined value).
- | sw TMP0, L->top
- | call_intern lj_gc_step // (lua_State *L)
- |. move CARG1, L
- | lw BASE, L->base
- | move ra, MULTRES
- | lw TMP0, L->top
- | lw CFUNC:RB, FRAME_FUNC(BASE)
- | jr ra
- |. subu NARGS8:RC, TMP0, BASE
- |
- |//-----------------------------------------------------------------------
- |//-- Special dispatch targets -------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_record: // Dispatch target for recording phase.
- |.if JIT
- | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
- | bnez AT, >5
- | // Decrement the hookcount for consistency, but always do the call.
- |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | andi AT, TMP3, HOOK_ACTIVE
- | bnez AT, >1
- |. addiu TMP2, TMP2, -1
- | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
- | beqz AT, >1
- |. nop
- | b >1
- |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- |.endif
- |
- |->vm_rethook: // Dispatch target for return hooks.
- | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
- | beqz AT, >1
- |5: // Re-dispatch to static ins.
- |. lw AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
- | jr AT
- |. nop
- |
- |->vm_inshook: // Dispatch target for instr/line hooks.
- | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
- | bnez AT, <5
- |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
- | beqz AT, <5
- |. addiu TMP2, TMP2, -1
- | beqz TMP2, >1
- |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | andi AT, TMP3, LUA_MASKLINE
- | beqz AT, <5
- |1:
- |. load_got lj_dispatch_ins
- | sw MULTRES, SAVE_MULTRES
- | move CARG2, PC
- | sw BASE, L->base
- | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
- | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
- |. move CARG1, L
- |3:
- | lw BASE, L->base
- |4: // Re-dispatch to static ins.
- | lw INS, -4(PC)
- | decode_OP4a TMP1, INS
- | decode_OP4b TMP1
- | addu TMP0, DISPATCH, TMP1
- | decode_RD8a RD, INS
- | lw AT, GG_DISP2STATIC(TMP0)
- | decode_RA8a RA, INS
- | decode_RD8b RD
- | jr AT
- | decode_RA8b RA
- |
- |->cont_hook: // Continue from hook yield.
- | addiu PC, PC, 4
- | b <4
- |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
- |
- |->vm_hotloop: // Hot loop counter underflow.
- |.if JIT
- | lw LFUNC:TMP1, FRAME_FUNC(BASE)
- | addiu CARG1, DISPATCH, GG_DISP2J
- | sw PC, SAVE_PC
- | lw TMP1, LFUNC:TMP1->pc
- | move CARG2, PC
- | sw L, DISPATCH_J(L)(DISPATCH)
- | lbu TMP1, PC2PROTO(framesize)(TMP1)
- | load_got lj_trace_hot
- | sw BASE, L->base
- | sll TMP1, TMP1, 3
- | addu TMP1, BASE, TMP1
- | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
- |. sw TMP1, L->top
- | b <3
- |. nop
- |.endif
- |
- |->vm_callhook: // Dispatch target for call hooks.
- |.if JIT
- | b >1
- |.endif
- |. move CARG2, PC
- |
- |->vm_hotcall: // Hot call counter underflow.
- |.if JIT
- | ori CARG2, PC, 1
- |1:
- |.endif
- | load_got lj_dispatch_call
- | addu TMP0, BASE, RC
- | sw PC, SAVE_PC
- | sw BASE, L->base
- | subu RA, RA, BASE
- | sw TMP0, L->top
- | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
- |. move CARG1, L
- | // Returns ASMFunction.
- | lw BASE, L->base
- | lw TMP0, L->top
- | sw r0, SAVE_PC // Invalidate for subsequent line hook.
- | subu NARGS8:RC, TMP0, BASE
- | addu RA, BASE, RA
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | jr CRET1
- |. lw INS, -4(PC)
- |
- |//-----------------------------------------------------------------------
- |//-- Trace exit handler -------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |.macro savex_, a, b
- | sdc1 f..a, 16+a*8(sp)
- | sw r..a, 16+32*8+a*4(sp)
- | sw r..b, 16+32*8+b*4(sp)
- |.endmacro
- |
- |->vm_exit_handler:
- |.if JIT
- | addiu sp, sp, -(16+32*8+32*4)
- | savex_ 0, 1
- | savex_ 2, 3
- | savex_ 4, 5
- | savex_ 6, 7
- | savex_ 8, 9
- | savex_ 10, 11
- | savex_ 12, 13
- | savex_ 14, 15
- | savex_ 16, 17
- | savex_ 18, 19
- | savex_ 20, 21
- | savex_ 22, 23
- | savex_ 24, 25
- | savex_ 26, 27
- | sdc1 f28, 16+28*8(sp)
- | sw r28, 16+32*8+28*4(sp)
- | sdc1 f30, 16+30*8(sp)
- | sw r30, 16+32*8+30*4(sp)
- | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP.
- | li_vmstate EXIT
- | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp.
- | addiu DISPATCH, JGL, -GG_DISP2G-32768
- | lw TMP1, 0(TMP2) // Load exit number.
- | st_vmstate
- | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP.
- | lw L, DISPATCH_GL(jit_L)(DISPATCH)
- | lw BASE, DISPATCH_GL(jit_base)(DISPATCH)
- | load_got lj_trace_exit
- | sw L, DISPATCH_J(L)(DISPATCH)
- | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
- | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
- | addiu CARG1, DISPATCH, GG_DISP2J
- | sw BASE, L->base
- | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
- |. addiu CARG2, sp, 16
- | // Returns MULTRES (unscaled) or negated error code.
- | lw TMP1, L->cframe
- | li AT, -4
- | lw BASE, L->base
- | and sp, TMP1, AT
- | lw PC, SAVE_PC // Get SAVE_PC.
- | b >1
- |. sw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
- |.endif
- |->vm_exit_interp:
- |.if JIT
- | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
- | lw L, SAVE_L
- | addiu DISPATCH, JGL, -GG_DISP2G-32768
- |1:
- | bltz CRET1, >3 // Check for error from exit.
- |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
- | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | sll MULTRES, CRET1, 3
- | li TISNIL, LJ_TNIL
- | sw MULTRES, SAVE_MULTRES
- | mtc1 TMP3, TOBIT
- | lw TMP1, LFUNC:TMP1->pc
- | sw r0, DISPATCH_GL(jit_L)(DISPATCH)
- | lw KBASE, PC2PROTO(k)(TMP1)
- | cvt.d.s TOBIT, TOBIT
- | // Modified copy of ins_next which handles function header dispatch, too.
- | lw INS, 0(PC)
- | addiu PC, PC, 4
- | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
- | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
- | decode_OP4a TMP1, INS
- | decode_OP4b TMP1
- | sltiu TMP2, TMP1, BC_FUNCF*4 // Function header?
- | addu TMP0, DISPATCH, TMP1
- | decode_RD8a RD, INS
- | lw AT, 0(TMP0)
- | decode_RA8a RA, INS
- | beqz TMP2, >2
- |. decode_RA8b RA
- | jr AT
- |. decode_RD8b RD
- |2:
- | addiu RC, MULTRES, -8
- | jr AT
- |. addu RA, RA, BASE
- |
- |3: // Rethrow error from the right C frame.
- | load_got lj_err_throw
- | negu CARG2, CRET1
- | call_intern lj_err_throw // (lua_State *L, int errcode)
- |. move CARG1, L
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Math helper functions ----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
- |.macro vm_round, func
- | lui TMP0, 0x4330 // Hiword of 2^52 (double).
- | mtc1 r0, f4
- | mtc1 TMP0, f5
- | abs.d FRET2, FARG1 // |x|
- | mfc1 AT, f13
- | c.olt.d 0, FRET2, f4
- | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
- | bc1f 0, >1 // Truncate only if |x| < 2^52.
- |. sub.d FRET1, FRET1, f4
- | slt AT, AT, r0
- |.if "func" == "ceil"
- | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
- |.else
- | lui TMP0, 0x3ff0 // Hiword of +1 (double).
- |.endif
- |.if "func" == "trunc"
- | mtc1 TMP0, f5
- | c.olt.d 0, FRET2, FRET1 // |x| < result?
- | sub.d FRET2, FRET1, f4
- | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
- | neg.d FRET2, FRET1
- | jr ra
- |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
- |.else
- | neg.d FRET2, FRET1
- | mtc1 TMP0, f5
- | movn.d FRET1, FRET2, AT // Merge sign bit back in.
- |.if "func" == "ceil"
- | c.olt.d 0, FRET1, FARG1 // x > result?
- |.else
- | c.olt.d 0, FARG1, FRET1 // x < result?
- |.endif
- | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
- | jr ra
- |. movt.d FRET1, FRET2, 0
- |.endif
- |1:
- | jr ra
- |. mov.d FRET1, FARG1
- |.endmacro
- |
- |->vm_floor:
- | vm_round floor
- |->vm_ceil:
- | vm_round ceil
- |->vm_trunc:
- |.if JIT
- | vm_round trunc
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Miscellaneous functions --------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |//-----------------------------------------------------------------------
- |//-- FFI helper functions -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// Handler for callback functions. Callback slot number in r1, g in r2.
- |->vm_ffi_callback:
- |.if FFI
- |.type CTSTATE, CTState, PC
- | saveregs
- | lw CTSTATE, GL:r2->ctype_state
- | addiu DISPATCH, r2, GG_G2DISP
- | load_got lj_ccallback_enter
- | sw r1, CTSTATE->cb.slot
- | sw CARG1, CTSTATE->cb.gpr[0]
- | sw CARG2, CTSTATE->cb.gpr[1]
- | sdc1 FARG1, CTSTATE->cb.fpr[0]
- | sw CARG3, CTSTATE->cb.gpr[2]
- | sw CARG4, CTSTATE->cb.gpr[3]
- | sdc1 FARG2, CTSTATE->cb.fpr[1]
- | addiu TMP0, sp, CFRAME_SPACE+16
- | sw TMP0, CTSTATE->cb.stack
- | sw r0, SAVE_PC // Any value outside of bytecode is ok.
- | move CARG2, sp
- | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
- |. move CARG1, CTSTATE
- | // Returns lua_State *.
- | lw BASE, L:CRET1->base
- | lw RC, L:CRET1->top
- | move L, CRET1
- | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | mtc1 TMP3, TOBIT
- | li_vmstate INTERP
- | li TISNIL, LJ_TNIL
- | subu RC, RC, BASE
- | st_vmstate
- | cvt.d.s TOBIT, TOBIT
- | ins_callt
- |.endif
- |
- |->cont_ffi_callback: // Return from FFI callback.
- |.if FFI
- | load_got lj_ccallback_leave
- | lw CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
- | sw BASE, L->base
- | sw RB, L->top
- | sw L, CTSTATE->L
- | move CARG2, RA
- | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
- |. move CARG1, CTSTATE
- | lw CRET1, CTSTATE->cb.gpr[0]
- | ldc1 FRET1, CTSTATE->cb.fpr[0]
- | lw CRET2, CTSTATE->cb.gpr[1]
- | b ->vm_leave_unw
- |. ldc1 FRET2, CTSTATE->cb.fpr[1]
- |.endif
- |
- |->vm_ffi_call: // Call C function via FFI.
- | // Caveat: needs special frame unwinding, see below.
- |.if FFI
- | .type CCSTATE, CCallState, CARG1
- | lw TMP1, CCSTATE->spadj
- | lbu CARG2, CCSTATE->nsp
- | move TMP2, sp
- | subu sp, sp, TMP1
- | sw ra, -4(TMP2)
- | sll CARG2, CARG2, 2
- | sw r16, -8(TMP2)
- | sw CCSTATE, -12(TMP2)
- | move r16, TMP2
- | addiu TMP1, CCSTATE, offsetof(CCallState, stack)
- | addiu TMP2, sp, 16
- | beqz CARG2, >2
- |. addu TMP3, TMP1, CARG2
- |1:
- | lw TMP0, 0(TMP1)
- | addiu TMP1, TMP1, 4
- | sltu AT, TMP1, TMP3
- | sw TMP0, 0(TMP2)
- | bnez AT, <1
- |. addiu TMP2, TMP2, 4
- |2:
- | lw CFUNCADDR, CCSTATE->func
- | lw CARG2, CCSTATE->gpr[1]
- | lw CARG3, CCSTATE->gpr[2]
- | lw CARG4, CCSTATE->gpr[3]
- | ldc1 FARG1, CCSTATE->fpr[0]
- | ldc1 FARG2, CCSTATE->fpr[1]
- | jalr CFUNCADDR
- |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
- | lw CCSTATE:TMP1, -12(r16)
- | lw TMP2, -8(r16)
- | lw ra, -4(r16)
- | sw CRET1, CCSTATE:TMP1->gpr[0]
- | sw CRET2, CCSTATE:TMP1->gpr[1]
- | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
- | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
- | move sp, r16
- | jr ra
- |. move r16, TMP2
- |.endif
- |// Note: vm_ffi_call must be the last function in this object file!
- |
- |//-----------------------------------------------------------------------
-}
-
-/* Generate the code for a single instruction. */
-static void build_ins(BuildCtx *ctx, BCOp op, int defop)
-{
- int vk = 0;
- |=>defop:
-
- switch (op) {
-
- /* -- Comparison ops ---------------------------------------------------- */
-
- /* Remember: all ops branch for a true comparison, fall through otherwise. */
-
- case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
- | // RA = src1*8, RD = src2*8, JMP with RD = target
- | addu CARG2, BASE, RA
- | addu CARG3, BASE, RD
- | lw TMP0, HI(CARG2)
- | lw TMP1, HI(CARG3)
- | ldc1 f0, 0(CARG2)
- | ldc1 f2, 0(CARG3)
- | sltiu TMP0, TMP0, LJ_TISNUM
- | sltiu TMP1, TMP1, LJ_TISNUM
- | lhu TMP2, OFS_RD(PC)
- | and TMP0, TMP0, TMP1
- | addiu PC, PC, 4
- | beqz TMP0, ->vmeta_comp
- |. lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
- | decode_RD4b TMP2
- | addu TMP2, TMP2, TMP1
- if (op == BC_ISLT || op == BC_ISGE) {
- | c.olt.d f0, f2
- } else {
- | c.ole.d f0, f2
- }
- if (op == BC_ISLT || op == BC_ISLE) {
- | movf TMP2, r0
- } else {
- | movt TMP2, r0
- }
- | addu PC, PC, TMP2
- |1:
- | ins_next
- break;
-
- case BC_ISEQV: case BC_ISNEV:
- vk = op == BC_ISEQV;
- | // RA = src1*8, RD = src2*8, JMP with RD = target
- | addu RA, BASE, RA
- | addiu PC, PC, 4
- | lw TMP0, HI(RA)
- | ldc1 f0, 0(RA)
- | addu RD, BASE, RD
- | lhu TMP2, -4+OFS_RD(PC)
- | lw TMP1, HI(RD)
- | ldc1 f2, 0(RD)
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | sltiu AT, TMP0, LJ_TISNUM
- | sltiu CARG1, TMP1, LJ_TISNUM
- | decode_RD4b TMP2
- | and AT, AT, CARG1
- | beqz AT, >5
- |. addu TMP2, TMP2, TMP3
- | c.eq.d f0, f2
- if (vk) {
- | movf TMP2, r0
- } else {
- | movt TMP2, r0
- }
- |1:
- | addu PC, PC, TMP2
- | ins_next
- |5: // Either or both types are not numbers.
- | lw CARG2, LO(RA)
- | lw CARG3, LO(RD)
- |.if FFI
- | li TMP3, LJ_TCDATA
- | beq TMP0, TMP3, ->vmeta_equal_cd
- |.endif
- |. sltiu AT, TMP0, LJ_TISPRI // Not a primitive?
- |.if FFI
- | beq TMP1, TMP3, ->vmeta_equal_cd
- |.endif
- |. xor TMP3, CARG2, CARG3 // Same tv?
- | xor TMP1, TMP1, TMP0 // Same type?
- | sltiu CARG1, TMP0, LJ_TISTABUD+1 // Table or userdata?
- | movz TMP3, r0, AT // Ignore tv if primitive.
- | movn CARG1, r0, TMP1 // Tab/ud and same type?
- | or AT, TMP1, TMP3 // Same type && (pri||same tv).
- | movz CARG1, r0, AT
- | beqz CARG1, <1 // Done if not tab/ud or not same type or same tv.
- if (vk) {
- |. movn TMP2, r0, AT
- } else {
- |. movz TMP2, r0, AT
- }
- | // Different tables or userdatas. Need to check __eq metamethod.
- | // Field metatable must be at same offset for GCtab and GCudata!
- | lw TAB:TMP1, TAB:CARG2->metatable
- | beqz TAB:TMP1, <1 // No metatable?
- |. nop
- | lbu TMP1, TAB:TMP1->nomm
- | andi TMP1, TMP1, 1<<MM_eq
- | bnez TMP1, <1 // Or 'no __eq' flag set?
- |. nop
- | b ->vmeta_equal // Handle __eq metamethod.
- |. li CARG4, 1-vk // ne = 0 or 1.
- break;
-
- case BC_ISEQS: case BC_ISNES:
- vk = op == BC_ISEQS;
- | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
- | addu RA, BASE, RA
- | addiu PC, PC, 4
- | lw TMP0, HI(RA)
- | srl RD, RD, 1
- | lw STR:TMP3, LO(RA)
- | subu RD, KBASE, RD
- | lhu TMP2, -4+OFS_RD(PC)
- |.if FFI
- | li AT, LJ_TCDATA
- | beq TMP0, AT, ->vmeta_equal_cd
- |.endif
- |. lw STR:TMP1, -4(RD) // KBASE-4-str_const*4
- | addiu TMP0, TMP0, -LJ_TSTR
- | decode_RD4b TMP2
- | xor TMP1, STR:TMP1, STR:TMP3
- | or TMP0, TMP0, TMP1
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | addu TMP2, TMP2, TMP3
- if (vk) {
- | movn TMP2, r0, TMP0
- } else {
- | movz TMP2, r0, TMP0
- }
- | addu PC, PC, TMP2
- | ins_next
- break;
-
- case BC_ISEQN: case BC_ISNEN:
- vk = op == BC_ISEQN;
- | // RA = src*8, RD = num_const*8, JMP with RD = target
- | addu RA, BASE, RA
- | addiu PC, PC, 4
- | lw TMP0, HI(RA)
- | ldc1 f0, 0(RA)
- | addu RD, KBASE, RD
- | lhu TMP2, -4+OFS_RD(PC)
- | ldc1 f2, 0(RD)
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | sltiu AT, TMP0, LJ_TISNUM
- | decode_RD4b TMP2
- |.if FFI
- | beqz AT, >5
- |.else
- | beqz AT, >1
- |.endif
- |. addu TMP2, TMP2, TMP3
- | c.eq.d f0, f2
- if (vk) {
- | movf TMP2, r0
- | addu PC, PC, TMP2
- |1:
- } else {
- | movt TMP2, r0
- |1:
- | addu PC, PC, TMP2
- }
- | ins_next
- |.if FFI
- |5:
- | li AT, LJ_TCDATA
- | beq TMP0, AT, ->vmeta_equal_cd
- |. nop
- | b <1
- |. nop
- |.endif
- break;
-
- case BC_ISEQP: case BC_ISNEP:
- vk = op == BC_ISEQP;
- | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
- | addu RA, BASE, RA
- | srl TMP1, RD, 3
- | lw TMP0, HI(RA)
- | lhu TMP2, OFS_RD(PC)
- | not TMP1, TMP1
- | addiu PC, PC, 4
- |.if FFI
- | li AT, LJ_TCDATA
- | beq TMP0, AT, ->vmeta_equal_cd
- |.endif
- |. xor TMP0, TMP0, TMP1
- | decode_RD4b TMP2
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | addu TMP2, TMP2, TMP3
- if (vk) {
- | movn TMP2, r0, TMP0
- } else {
- | movz TMP2, r0, TMP0
- }
- | addu PC, PC, TMP2
- | ins_next
- break;
-
- /* -- Unary test and copy ops ------------------------------------------- */
-
- case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
- | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
- | addu RD, BASE, RD
- | lhu TMP2, OFS_RD(PC)
- | lw TMP0, HI(RD)
- | addiu PC, PC, 4
- if (op == BC_IST || op == BC_ISF) {
- | sltiu TMP0, TMP0, LJ_TISTRUECOND
- | decode_RD4b TMP2
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | addu TMP2, TMP2, TMP3
- if (op == BC_IST) {
- | movz TMP2, r0, TMP0
- } else {
- | movn TMP2, r0, TMP0
- }
- | addu PC, PC, TMP2
- } else {
- | sltiu TMP0, TMP0, LJ_TISTRUECOND
- | ldc1 f0, 0(RD)
- if (op == BC_ISTC) {
- | beqz TMP0, >1
- } else {
- | bnez TMP0, >1
- }
- |. addu RA, BASE, RA
- | decode_RD4b TMP2
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | addu TMP2, TMP2, TMP3
- | sdc1 f0, 0(RA)
- | addu PC, PC, TMP2
- |1:
- }
- | ins_next
- break;
-
- /* -- Unary ops --------------------------------------------------------- */
-
- case BC_MOV:
- | // RA = dst*8, RD = src*8
- | addu RD, BASE, RD
- | addu RA, BASE, RA
- | ldc1 f0, 0(RD)
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- break;
- case BC_NOT:
- | // RA = dst*8, RD = src*8
- | addu RD, BASE, RD
- | addu RA, BASE, RA
- | lw TMP0, HI(RD)
- | li TMP1, LJ_TFALSE
- | sltiu TMP0, TMP0, LJ_TISTRUECOND
- | addiu TMP1, TMP0, LJ_TTRUE
- | ins_next1
- | sw TMP1, HI(RA)
- | ins_next2
- break;
- case BC_UNM:
- | // RA = dst*8, RD = src*8
- | addu CARG3, BASE, RD
- | addu RA, BASE, RA
- | lw TMP0, HI(CARG3)
- | ldc1 f0, 0(CARG3)
- | sltiu AT, TMP0, LJ_TISNUM
- | beqz AT, ->vmeta_unm
- |. neg.d f0, f0
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- break;
- case BC_LEN:
- | // RA = dst*8, RD = src*8
- | addu CARG2, BASE, RD
- | addu RA, BASE, RA
- | lw TMP0, HI(CARG2)
- | lw CARG1, LO(CARG2)
- | li AT, LJ_TSTR
- | bne TMP0, AT, >2
- |. li AT, LJ_TTAB
- | lw CRET1, STR:CARG1->len
- |1:
- | mtc1 CRET1, f0
- | cvt.d.w f0, f0
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- |2:
- | bne TMP0, AT, ->vmeta_len
- |. nop
-#if LJ_52
- | lw TAB:TMP2, TAB:CARG1->metatable
- | bnez TAB:TMP2, >9
- |. nop
- |3:
-#endif
- |->BC_LEN_Z:
- | load_got lj_tab_len
- | call_intern lj_tab_len // (GCtab *t)
- |. nop
- | // Returns uint32_t (but less than 2^31).
- | b <1
- |. nop
-#if LJ_52
- |9:
- | lbu TMP0, TAB:TMP2->nomm
- | andi TMP0, TMP0, 1<<MM_len
- | bnez TMP0, <3 // 'no __len' flag set: done.
- |. nop
- | b ->vmeta_len
- |. nop
-#endif
- break;
-
- /* -- Binary ops -------------------------------------------------------- */
-
- |.macro ins_arithpre
- ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | decode_RDtoRC8 RC, RD
- | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
- ||switch (vk) {
- ||case 0:
- | addu CARG3, BASE, RB
- | addu CARG4, KBASE, RC
- | lw TMP1, HI(CARG3)
- | ldc1 f20, 0(CARG3)
- | ldc1 f22, 0(CARG4)
- | sltiu AT, TMP1, LJ_TISNUM
- || break;
- ||case 1:
- | addu CARG4, BASE, RB
- | addu CARG3, KBASE, RC
- | lw TMP1, HI(CARG4)
- | ldc1 f22, 0(CARG4)
- | ldc1 f20, 0(CARG3)
- | sltiu AT, TMP1, LJ_TISNUM
- || break;
- ||default:
- | addu CARG3, BASE, RB
- | addu CARG4, BASE, RC
- | lw TMP1, HI(CARG3)
- | lw TMP2, HI(CARG4)
- | ldc1 f20, 0(CARG3)
- | ldc1 f22, 0(CARG4)
- | sltiu AT, TMP1, LJ_TISNUM
- | sltiu TMP0, TMP2, LJ_TISNUM
- | and AT, AT, TMP0
- || break;
- ||}
- | beqz AT, ->vmeta_arith
- |. addu RA, BASE, RA
- |.endmacro
- |
- |.macro fpmod, a, b, c
- |->BC_MODVN_Z:
- | bal ->vm_floor // floor(b/c)
- |. div.d FARG1, b, c
- | mul.d a, FRET1, c
- | sub.d a, b, a // b - floor(b/c)*c
- |.endmacro
- |
- |.macro ins_arith, ins
- | ins_arithpre
- |.if "ins" == "fpmod_"
- | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
- |. nop
- |.else
- | ins f0, f20, f22
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- |.endif
- |.endmacro
-
- case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
- | ins_arith add.d
- break;
- case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
- | ins_arith sub.d
- break;
- case BC_MULVN: case BC_MULNV: case BC_MULVV:
- | ins_arith mul.d
- break;
- case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
- | ins_arith div.d
- break;
- case BC_MODVN:
- | ins_arith fpmod
- break;
- case BC_MODNV: case BC_MODVV:
- | ins_arith fpmod_
- break;
- case BC_POW:
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | decode_RDtoRC8 RC, RD
- | addu CARG3, BASE, RB
- | addu CARG4, BASE, RC
- | lw TMP1, HI(CARG3)
- | lw TMP2, HI(CARG4)
- | ldc1 FARG1, 0(CARG3)
- | ldc1 FARG2, 0(CARG4)
- | sltiu AT, TMP1, LJ_TISNUM
- | sltiu TMP0, TMP2, LJ_TISNUM
- | and AT, AT, TMP0
- | load_got pow
- | beqz AT, ->vmeta_arith
- |. addu RA, BASE, RA
- | call_extern
- |. nop
- | ins_next1
- | sdc1 FRET1, 0(RA)
- | ins_next2
- break;
-
- case BC_CAT:
- | // RA = dst*8, RB = src_start*8, RC = src_end*8
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | decode_RDtoRC8 RC, RD
- | subu CARG3, RC, RB
- | sw BASE, L->base
- | addu CARG2, BASE, RC
- | move MULTRES, RB
- |->BC_CAT_Z:
- | load_got lj_meta_cat
- | srl CARG3, CARG3, 3
- | sw PC, SAVE_PC
- | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
- |. move CARG1, L
- | // Returns NULL (finished) or TValue * (metamethod).
- | bnez CRET1, ->vmeta_binop
- |. lw BASE, L->base
- | addu RB, BASE, MULTRES
- | ldc1 f0, 0(RB)
- | addu RA, BASE, RA
- | ins_next1
- | sdc1 f0, 0(RA) // Copy result from RB to RA.
- | ins_next2
- break;
-
- /* -- Constant ops ------------------------------------------------------ */
-
- case BC_KSTR:
- | // RA = dst*8, RD = str_const*8 (~)
- | srl TMP1, RD, 1
- | subu TMP1, KBASE, TMP1
- | ins_next1
- | lw TMP0, -4(TMP1) // KBASE-4-str_const*4
- | addu RA, BASE, RA
- | li TMP2, LJ_TSTR
- | sw TMP0, LO(RA)
- | sw TMP2, HI(RA)
- | ins_next2
- break;
- case BC_KCDATA:
- |.if FFI
- | // RA = dst*8, RD = cdata_const*8 (~)
- | srl TMP1, RD, 1
- | subu TMP1, KBASE, TMP1
- | ins_next1
- | lw TMP0, -4(TMP1) // KBASE-4-cdata_const*4
- | addu RA, BASE, RA
- | li TMP2, LJ_TCDATA
- | sw TMP0, LO(RA)
- | sw TMP2, HI(RA)
- | ins_next2
- |.endif
- break;
- case BC_KSHORT:
- | // RA = dst*8, RD = int16_literal*8
- | sra RD, INS, 16
- | mtc1 RD, f0
- | addu RA, BASE, RA
- | cvt.d.w f0, f0
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- break;
- case BC_KNUM:
- | // RA = dst*8, RD = num_const*8
- | addu RD, KBASE, RD
- | addu RA, BASE, RA
- | ldc1 f0, 0(RD)
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- break;
- case BC_KPRI:
- | // RA = dst*8, RD = primitive_type*8 (~)
- | srl TMP1, RD, 3
- | addu RA, BASE, RA
- | not TMP0, TMP1
- | ins_next1
- | sw TMP0, HI(RA)
- | ins_next2
- break;
- case BC_KNIL:
- | // RA = base*8, RD = end*8
- | addu RA, BASE, RA
- | sw TISNIL, HI(RA)
- | addiu RA, RA, 8
- | addu RD, BASE, RD
- |1:
- | sw TISNIL, HI(RA)
- | slt AT, RA, RD
- | bnez AT, <1
- |. addiu RA, RA, 8
- | ins_next_
- break;
-
- /* -- Upvalue and function ops ------------------------------------------ */
-
- case BC_UGET:
- | // RA = dst*8, RD = uvnum*8
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | srl RD, RD, 1
- | addu RD, RD, LFUNC:RB
- | lw UPVAL:RB, LFUNC:RD->uvptr
- | ins_next1
- | lw TMP1, UPVAL:RB->v
- | ldc1 f0, 0(TMP1)
- | addu RA, BASE, RA
- | sdc1 f0, 0(RA)
- | ins_next2
- break;
- case BC_USETV:
- | // RA = uvnum*8, RD = src*8
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | srl RA, RA, 1
- | addu RD, BASE, RD
- | addu RA, RA, LFUNC:RB
- | ldc1 f0, 0(RD)
- | lw UPVAL:RB, LFUNC:RA->uvptr
- | lbu TMP3, UPVAL:RB->marked
- | lw CARG2, UPVAL:RB->v
- | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
- | lbu TMP0, UPVAL:RB->closed
- | lw TMP2, HI(RD)
- | sdc1 f0, 0(CARG2)
- | li AT, LJ_GC_BLACK|1
- | or TMP3, TMP3, TMP0
- | beq TMP3, AT, >2 // Upvalue is closed and black?
- |. addiu TMP2, TMP2, -(LJ_TNUMX+1)
- |1:
- | ins_next
- |
- |2: // Check if new value is collectable.
- | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1)
- | beqz AT, <1 // tvisgcv(v)
- |. lw TMP1, LO(RD)
- | lbu TMP3, GCOBJ:TMP1->gch.marked
- | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
- | beqz TMP3, <1
- |. load_got lj_gc_barrieruv
- | // Crossed a write barrier. Move the barrier forward.
- | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
- |. addiu CARG1, DISPATCH, GG_DISP2G
- | b <1
- |. nop
- break;
- case BC_USETS:
- | // RA = uvnum*8, RD = str_const*8 (~)
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | srl RA, RA, 1
- | srl TMP1, RD, 1
- | addu RA, RA, LFUNC:RB
- | subu TMP1, KBASE, TMP1
- | lw UPVAL:RB, LFUNC:RA->uvptr
- | lw STR:TMP1, -4(TMP1) // KBASE-4-str_const*4
- | lbu TMP2, UPVAL:RB->marked
- | lw CARG2, UPVAL:RB->v
- | lbu TMP3, STR:TMP1->marked
- | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
- | lbu TMP2, UPVAL:RB->closed
- | li TMP0, LJ_TSTR
- | sw STR:TMP1, LO(CARG2)
- | bnez AT, >2
- |. sw TMP0, HI(CARG2)
- |1:
- | ins_next
- |
- |2: // Check if string is white and ensure upvalue is closed.
- | beqz TMP2, <1
- |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
- | beqz AT, <1
- |. load_got lj_gc_barrieruv
- | // Crossed a write barrier. Move the barrier forward.
- | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
- |. addiu CARG1, DISPATCH, GG_DISP2G
- | b <1
- |. nop
- break;
- case BC_USETN:
- | // RA = uvnum*8, RD = num_const*8
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | srl RA, RA, 1
- | addu RD, KBASE, RD
- | addu RA, RA, LFUNC:RB
- | ldc1 f0, 0(RD)
- | lw UPVAL:RB, LFUNC:RA->uvptr
- | ins_next1
- | lw TMP1, UPVAL:RB->v
- | sdc1 f0, 0(TMP1)
- | ins_next2
- break;
- case BC_USETP:
- | // RA = uvnum*8, RD = primitive_type*8 (~)
- | lw LFUNC:RB, FRAME_FUNC(BASE)
- | srl RA, RA, 1
- | srl TMP0, RD, 3
- | addu RA, RA, LFUNC:RB
- | not TMP0, TMP0
- | lw UPVAL:RB, LFUNC:RA->uvptr
- | ins_next1
- | lw TMP1, UPVAL:RB->v
- | sw TMP0, HI(TMP1)
- | ins_next2
- break;
-
- case BC_UCLO:
- | // RA = level*8, RD = target
- | lw TMP2, L->openupval
- | branch_RD // Do this first since RD is not saved.
- | load_got lj_func_closeuv
- | sw BASE, L->base
- | beqz TMP2, >1
- |. move CARG1, L
- | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
- |. addu CARG2, BASE, RA
- | lw BASE, L->base
- |1:
- | ins_next
- break;
-
- case BC_FNEW:
- | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
- | srl TMP1, RD, 1
- | load_got lj_func_newL_gc
- | subu TMP1, KBASE, TMP1
- | lw CARG3, FRAME_FUNC(BASE)
- | lw CARG2, -4(TMP1) // KBASE-4-tab_const*4
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | // (lua_State *L, GCproto *pt, GCfuncL *parent)
- | call_intern lj_func_newL_gc
- |. move CARG1, L
- | // Returns GCfuncL *.
- | lw BASE, L->base
- | li TMP0, LJ_TFUNC
- | ins_next1
- | addu RA, BASE, RA
- | sw TMP0, HI(RA)
- | sw LFUNC:CRET1, LO(RA)
- | ins_next2
- break;
-
- /* -- Table ops --------------------------------------------------------- */
-
- case BC_TNEW:
- case BC_TDUP:
- | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
- | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
- | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | sltu AT, TMP0, TMP1
- | beqz AT, >5
- |1:
- if (op == BC_TNEW) {
- | load_got lj_tab_new
- | srl CARG2, RD, 3
- | andi CARG2, CARG2, 0x7ff
- | li TMP0, 0x801
- | addiu AT, CARG2, -0x7ff
- | srl CARG3, RD, 14
- | movz CARG2, TMP0, AT
- | // (lua_State *L, int32_t asize, uint32_t hbits)
- | call_intern lj_tab_new
- |. move CARG1, L
- | // Returns Table *.
- } else {
- | load_got lj_tab_dup
- | srl TMP1, RD, 1
- | subu TMP1, KBASE, TMP1
- | move CARG1, L
- | call_intern lj_tab_dup // (lua_State *L, Table *kt)
- |. lw CARG2, -4(TMP1) // KBASE-4-str_const*4
- | // Returns Table *.
- }
- | lw BASE, L->base
- | ins_next1
- | addu RA, BASE, RA
- | li TMP0, LJ_TTAB
- | sw TAB:CRET1, LO(RA)
- | sw TMP0, HI(RA)
- | ins_next2
- |5:
- | load_got lj_gc_step_fixtop
- | move MULTRES, RD
- | call_intern lj_gc_step_fixtop // (lua_State *L)
- |. move CARG1, L
- | b <1
- |. move RD, MULTRES
- break;
-
- case BC_GGET:
- | // RA = dst*8, RD = str_const*8 (~)
- case BC_GSET:
- | // RA = src*8, RD = str_const*8 (~)
- | lw LFUNC:TMP2, FRAME_FUNC(BASE)
- | srl TMP1, RD, 1
- | subu TMP1, KBASE, TMP1
- | lw TAB:RB, LFUNC:TMP2->env
- | lw STR:RC, -4(TMP1) // KBASE-4-str_const*4
- if (op == BC_GGET) {
- | b ->BC_TGETS_Z
- } else {
- | b ->BC_TSETS_Z
- }
- |. addu RA, BASE, RA
- break;
-
- case BC_TGETV:
- | // RA = dst*8, RB = table*8, RC = key*8
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | decode_RDtoRC8 RC, RD
- | addu CARG2, BASE, RB
- | addu CARG3, BASE, RC
- | lw TMP1, HI(CARG2)
- | lw TMP2, HI(CARG3)
- | lw TAB:RB, LO(CARG2)
- | li AT, LJ_TTAB
- | ldc1 f0, 0(CARG3)
- | bne TMP1, AT, ->vmeta_tgetv
- |. addu RA, BASE, RA
- | sltiu AT, TMP2, LJ_TISNUM
- | beqz AT, >5
- |. li AT, LJ_TSTR
- |
- | // Convert number key to integer, check for integerness and range.
- | cvt.w.d f2, f0
- | lw TMP0, TAB:RB->asize
- | mfc1 TMP2, f2
- | cvt.d.w f4, f2
- | lw TMP1, TAB:RB->array
- | c.eq.d f0, f4
- | sltu AT, TMP2, TMP0
- | movf AT, r0
- | sll TMP2, TMP2, 3
- | beqz AT, ->vmeta_tgetv // Integer key and in array part?
- |. addu TMP2, TMP1, TMP2
- | lw TMP0, HI(TMP2)
- | beq TMP0, TISNIL, >2
- |. ldc1 f0, 0(TMP2)
- |1:
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- |
- |2: // Check for __index if table value is nil.
- | lw TAB:TMP2, TAB:RB->metatable
- | beqz TAB:TMP2, <1 // No metatable: done.
- |. nop
- | lbu TMP0, TAB:TMP2->nomm
- | andi TMP0, TMP0, 1<<MM_index
- | bnez TMP0, <1 // 'no __index' flag set: done.
- |. nop
- | b ->vmeta_tgetv
- |. nop
- |
- |5:
- | bne TMP2, AT, ->vmeta_tgetv
- |. lw STR:RC, LO(CARG3)
- | b ->BC_TGETS_Z // String key?
- |. nop
- break;
- case BC_TGETS:
- | // RA = dst*8, RB = table*8, RC = str_const*4 (~)
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | addu CARG2, BASE, RB
- | decode_RC4a RC, INS
- | lw TMP0, HI(CARG2)
- | decode_RC4b RC
- | li AT, LJ_TTAB
- | lw TAB:RB, LO(CARG2)
- | subu CARG3, KBASE, RC
- | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
- | bne TMP0, AT, ->vmeta_tgets1
- |. addu RA, BASE, RA
- |->BC_TGETS_Z:
- | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
- | lw TMP0, TAB:RB->hmask
- | lw TMP1, STR:RC->hash
- | lw NODE:TMP2, TAB:RB->node
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | sll TMP0, TMP1, 5
- | sll TMP1, TMP1, 3
- | subu TMP1, TMP0, TMP1
- | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |1:
- | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
- | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
- | lw NODE:TMP1, NODE:TMP2->next
- | lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
- | addiu CARG1, CARG1, -LJ_TSTR
- | xor TMP0, TMP0, STR:RC
- | or AT, CARG1, TMP0
- | bnez AT, >4
- |. lw TAB:TMP3, TAB:RB->metatable
- | beq CARG2, TISNIL, >5 // Key found, but nil value?
- |. lw CARG1, offsetof(Node, val)+LO(NODE:TMP2)
- |3:
- | ins_next1
- | sw CARG2, HI(RA)
- | sw CARG1, LO(RA)
- | ins_next2
- |
- |4: // Follow hash chain.
- | bnez NODE:TMP1, <1
- |. move NODE:TMP2, NODE:TMP1
- | // End of hash chain: key not found, nil result.
- |
- |5: // Check for __index if table value is nil.
- | beqz TAB:TMP3, <3 // No metatable: done.
- |. li CARG2, LJ_TNIL
- | lbu TMP0, TAB:TMP3->nomm
- | andi TMP0, TMP0, 1<<MM_index
- | bnez TMP0, <3 // 'no __index' flag set: done.
- |. nop
- | b ->vmeta_tgets
- |. nop
- break;
- case BC_TGETB:
- | // RA = dst*8, RB = table*8, RC = index*8
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | addu CARG2, BASE, RB
- | decode_RDtoRC8 RC, RD
- | lw CARG1, HI(CARG2)
- | li AT, LJ_TTAB
- | lw TAB:RB, LO(CARG2)
- | addu RA, BASE, RA
- | bne CARG1, AT, ->vmeta_tgetb
- |. srl TMP0, RC, 3
- | lw TMP1, TAB:RB->asize
- | lw TMP2, TAB:RB->array
- | sltu AT, TMP0, TMP1
- | beqz AT, ->vmeta_tgetb
- |. addu RC, TMP2, RC
- | lw TMP1, HI(RC)
- | beq TMP1, TISNIL, >5
- |. ldc1 f0, 0(RC)
- |1:
- | ins_next1
- | sdc1 f0, 0(RA)
- | ins_next2
- |
- |5: // Check for __index if table value is nil.
- | lw TAB:TMP2, TAB:RB->metatable
- | beqz TAB:TMP2, <1 // No metatable: done.
- |. nop
- | lbu TMP1, TAB:TMP2->nomm
- | andi TMP1, TMP1, 1<<MM_index
- | bnez TMP1, <1 // 'no __index' flag set: done.
- |. nop
- | b ->vmeta_tgetb // Caveat: preserve TMP0!
- |. nop
- break;
-
- case BC_TSETV:
- | // RA = src*8, RB = table*8, RC = key*8
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | decode_RDtoRC8 RC, RD
- | addu CARG2, BASE, RB
- | addu CARG3, BASE, RC
- | lw TMP1, HI(CARG2)
- | lw TMP2, HI(CARG3)
- | lw TAB:RB, LO(CARG2)
- | li AT, LJ_TTAB
- | ldc1 f0, 0(CARG3)
- | bne TMP1, AT, ->vmeta_tsetv
- |. addu RA, BASE, RA
- | sltiu AT, TMP2, LJ_TISNUM
- | beqz AT, >5
- |. li AT, LJ_TSTR
- |
- | // Convert number key to integer, check for integerness and range.
- | cvt.w.d f2, f0
- | lw TMP0, TAB:RB->asize
- | mfc1 TMP2, f2
- | cvt.d.w f4, f2
- | lw TMP1, TAB:RB->array
- | c.eq.d f0, f4
- | sltu AT, TMP2, TMP0
- | movf AT, r0
- | sll TMP2, TMP2, 3
- | beqz AT, ->vmeta_tsetv // Integer key and in array part?
- |. addu TMP1, TMP1, TMP2
- | lbu TMP3, TAB:RB->marked
- | lw TMP0, HI(TMP1)
- | beq TMP0, TISNIL, >3
- |. ldc1 f0, 0(RA)
- |1:
- | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
- | bnez AT, >7
- |. sdc1 f0, 0(TMP1)
- |2:
- | ins_next
- |
- |3: // Check for __newindex if previous value is nil.
- | lw TAB:TMP2, TAB:RB->metatable
- | beqz TAB:TMP2, <1 // No metatable: done.
- |. nop
- | lbu TMP2, TAB:TMP2->nomm
- | andi TMP2, TMP2, 1<<MM_newindex
- | bnez TMP2, <1 // 'no __newindex' flag set: done.
- |. nop
- | b ->vmeta_tsetv
- |. nop
- |
- |5:
- | bne TMP2, AT, ->vmeta_tsetv
- |. lw STR:RC, LO(CARG3)
- | b ->BC_TSETS_Z // String key?
- |. nop
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0, <2
- break;
- case BC_TSETS:
- | // RA = src*8, RB = table*8, RC = str_const*8 (~)
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | addu CARG2, BASE, RB
- | decode_RC4a RC, INS
- | lw TMP0, HI(CARG2)
- | decode_RC4b RC
- | li AT, LJ_TTAB
- | subu CARG3, KBASE, RC
- | lw TAB:RB, LO(CARG2)
- | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
- | bne TMP0, AT, ->vmeta_tsets1
- |. addu RA, BASE, RA
- |->BC_TSETS_Z:
- | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
- | lw TMP0, TAB:RB->hmask
- | lw TMP1, STR:RC->hash
- | lw NODE:TMP2, TAB:RB->node
- | sb r0, TAB:RB->nomm // Clear metamethod cache.
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | sll TMP0, TMP1, 5
- | sll TMP1, TMP1, 3
- | subu TMP1, TMP0, TMP1
- | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- | ldc1 f20, 0(RA)
- |1:
- | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
- | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
- | li AT, LJ_TSTR
- | lw NODE:TMP1, NODE:TMP2->next
- | bne CARG1, AT, >5
- |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
- | bne TMP0, STR:RC, >5
- |. lbu TMP3, TAB:RB->marked
- | beq CARG2, TISNIL, >4 // Key found, but nil value?
- |. lw TAB:TMP0, TAB:RB->metatable
- |2:
- | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
- | bnez AT, >7
- |. sdc1 f20, NODE:TMP2->val
- |3:
- | ins_next
- |
- |4: // Check for __newindex if previous value is nil.
- | beqz TAB:TMP0, <2 // No metatable: done.
- |. nop
- | lbu TMP0, TAB:TMP0->nomm
- | andi TMP0, TMP0, 1<<MM_newindex
- | bnez TMP0, <2 // 'no __newindex' flag set: done.
- |. nop
- | b ->vmeta_tsets
- |. nop
- |
- |5: // Follow hash chain.
- | bnez NODE:TMP1, <1
- |. move NODE:TMP2, NODE:TMP1
- | // End of hash chain: key not found, add a new one
- |
- | // But check for __newindex first.
- | lw TAB:TMP2, TAB:RB->metatable
- | beqz TAB:TMP2, >6 // No metatable: continue.
- |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
- | lbu TMP0, TAB:TMP2->nomm
- | andi TMP0, TMP0, 1<<MM_newindex
- | beqz TMP0, ->vmeta_tsets // 'no __newindex' flag NOT set: check.
- |. li AT, LJ_TSTR
- |6:
- | load_got lj_tab_newkey
- | sw STR:RC, LO(CARG3)
- | sw AT, HI(CARG3)
- | sw BASE, L->base
- | move CARG2, TAB:RB
- | sw PC, SAVE_PC
- | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
- |. move CARG1, L
- | // Returns TValue *.
- | lw BASE, L->base
- | b <3 // No 2nd write barrier needed.
- |. sdc1 f20, 0(CRET1)
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0, <3
- break;
- case BC_TSETB:
- | // RA = src*8, RB = table*8, RC = index*8
- | decode_RB8a RB, INS
- | decode_RB8b RB
- | addu CARG2, BASE, RB
- | decode_RDtoRC8 RC, RD
- | lw CARG1, HI(CARG2)
- | li AT, LJ_TTAB
- | lw TAB:RB, LO(CARG2)
- | addu RA, BASE, RA
- | bne CARG1, AT, ->vmeta_tsetb
- |. srl TMP0, RC, 3
- | lw TMP1, TAB:RB->asize
- | lw TMP2, TAB:RB->array
- | sltu AT, TMP0, TMP1
- | beqz AT, ->vmeta_tsetb
- |. addu RC, TMP2, RC
- | lw TMP1, HI(RC)
- | lbu TMP3, TAB:RB->marked
- | beq TMP1, TISNIL, >5
- |. ldc1 f0, 0(RA)
- |1:
- | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
- | bnez AT, >7
- |. sdc1 f0, 0(RC)
- |2:
- | ins_next
- |
- |5: // Check for __newindex if previous value is nil.
- | lw TAB:TMP2, TAB:RB->metatable
- | beqz TAB:TMP2, <1 // No metatable: done.
- |. nop
- | lbu TMP1, TAB:TMP2->nomm
- | andi TMP1, TMP1, 1<<MM_newindex
- | bnez TMP1, <1 // 'no __newindex' flag set: done.
- |. nop
- | b ->vmeta_tsetb // Caveat: preserve TMP0!
- |. nop
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0, <2
- break;
-
- case BC_TSETM:
- | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
- | addu RA, BASE, RA
- |1:
- | addu TMP3, KBASE, RD
- | lw TAB:CARG2, -8+LO(RA) // Guaranteed to be a table.
- | addiu TMP0, MULTRES, -8
- | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
- | beqz TMP0, >4 // Nothing to copy?
- |. srl CARG3, TMP0, 3
- | addu CARG3, CARG3, TMP3
- | lw TMP2, TAB:CARG2->asize
- | sll TMP1, TMP3, 3
- | lbu TMP3, TAB:CARG2->marked
- | lw CARG1, TAB:CARG2->array
- | sltu AT, TMP2, CARG3
- | bnez AT, >5
- |. addu TMP2, RA, TMP0
- | addu TMP1, TMP1, CARG1
- | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- |3: // Copy result slots to table.
- | ldc1 f0, 0(RA)
- | addiu RA, RA, 8
- | sltu AT, RA, TMP2
- | sdc1 f0, 0(TMP1)
- | bnez AT, <3
- |. addiu TMP1, TMP1, 8
- | bnez TMP0, >7
- |. nop
- |4:
- | ins_next
- |
- |5: // Need to resize array part.
- | load_got lj_tab_reasize
- | sw BASE, L->base
- | sw PC, SAVE_PC
- | move BASE, RD
- | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
- |. move CARG1, L
- | // Must not reallocate the stack.
- | move RD, BASE
- | b <1
- |. lw BASE, L->base // Reload BASE for lack of a saved register.
- |
- |7: // Possible table write barrier for any value. Skip valiswhite check.
- | barrierback TAB:CARG2, TMP3, TMP0, <4
- break;
-
- /* -- Calls and vararg handling ----------------------------------------- */
-
- case BC_CALLM:
- | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
- | decode_RDtoRC8 NARGS8:RC, RD
- | b ->BC_CALL_Z
- |. addu NARGS8:RC, NARGS8:RC, MULTRES
- break;
- case BC_CALL:
- | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
- | decode_RDtoRC8 NARGS8:RC, RD
- |->BC_CALL_Z:
- | move TMP2, BASE
- | addu BASE, BASE, RA
- | li AT, LJ_TFUNC
- | lw TMP0, HI(BASE)
- | lw LFUNC:RB, LO(BASE)
- | addiu BASE, BASE, 8
- | bne TMP0, AT, ->vmeta_call
- |. addiu NARGS8:RC, NARGS8:RC, -8
- | ins_call
- break;
-
- case BC_CALLMT:
- | // RA = base*8, (RB = 0,) RC = extra_nargs*8
- | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
- | // Fall through. Assumes BC_CALLT follows.
- break;
- case BC_CALLT:
- | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
- | addu RA, BASE, RA
- | li AT, LJ_TFUNC
- | lw TMP0, HI(RA)
- | lw LFUNC:RB, LO(RA)
- | move NARGS8:RC, RD
- | lw TMP1, FRAME_PC(BASE)
- | addiu RA, RA, 8
- | bne TMP0, AT, ->vmeta_callt
- |. addiu NARGS8:RC, NARGS8:RC, -8
- |->BC_CALLT_Z:
- | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
- | lbu TMP3, LFUNC:RB->ffid
- | bnez TMP0, >7
- |. xori TMP2, TMP1, FRAME_VARG
- |1:
- | sw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
- | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
- | move TMP2, BASE
- | beqz NARGS8:RC, >3
- |. move TMP3, NARGS8:RC
- |2:
- | ldc1 f0, 0(RA)
- | addiu RA, RA, 8
- | addiu TMP3, TMP3, -8
- | sdc1 f0, 0(TMP2)
- | bnez TMP3, <2
- |. addiu TMP2, TMP2, 8
- |3:
- | or TMP0, TMP0, AT
- | beqz TMP0, >5
- |. nop
- |4:
- | ins_callt
- |
- |5: // Tailcall to a fast function with a Lua frame below.
- | lw INS, -4(TMP1)
- | decode_RA8a RA, INS
- | decode_RA8b RA
- | subu TMP1, BASE, RA
- | lw LFUNC:TMP1, -8+FRAME_FUNC(TMP1)
- | lw TMP1, LFUNC:TMP1->pc
- | b <4
- |. lw KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
- |
- |7: // Tailcall from a vararg function.
- | andi AT, TMP2, FRAME_TYPEP
- | bnez AT, <1 // Vararg frame below?
- |. subu TMP2, BASE, TMP2 // Relocate BASE down.
- | move BASE, TMP2
- | lw TMP1, FRAME_PC(TMP2)
- | b <1
- |. andi TMP0, TMP1, FRAME_TYPE
- break;
-
- case BC_ITERC:
- | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
- | move TMP2, BASE
- | addu BASE, BASE, RA
- | li AT, LJ_TFUNC
- | lw TMP1, -24+HI(BASE)
- | lw LFUNC:RB, -24+LO(BASE)
- | ldc1 f2, -8(BASE)
- | ldc1 f0, -16(BASE)
- | sw TMP1, HI(BASE) // Copy callable.
- | sw LFUNC:RB, LO(BASE)
- | sdc1 f2, 16(BASE) // Copy control var.
- | sdc1 f0, 8(BASE) // Copy state.
- | addiu BASE, BASE, 8
- | bne TMP1, AT, ->vmeta_call
- |. li NARGS8:RC, 16 // Iterators get 2 arguments.
- | ins_call
- break;
-
- case BC_ITERN:
- | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
- |.if JIT
- | // NYI: add hotloop, record BC_ITERN.
- |.endif
- | addu RA, BASE, RA
- | lw TAB:RB, -16+LO(RA)
- | lw RC, -8+LO(RA) // Get index from control var.
- | lw TMP0, TAB:RB->asize
- | lw TMP1, TAB:RB->array
- | addiu PC, PC, 4
- |1: // Traverse array part.
- | sltu AT, RC, TMP0
- | beqz AT, >5 // Index points after array part?
- |. sll TMP3, RC, 3
- | addu TMP3, TMP1, TMP3
- | lw TMP2, HI(TMP3)
- | ldc1 f0, 0(TMP3)
- | mtc1 RC, f2
- | lhu RD, -4+OFS_RD(PC)
- | beq TMP2, TISNIL, <1 // Skip holes in array part.
- |. addiu RC, RC, 1
- | cvt.d.w f2, f2
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | sdc1 f0, 8(RA)
- | decode_RD4b RD
- | addu RD, RD, TMP3
- | sw RC, -8+LO(RA) // Update control var.
- | addu PC, PC, RD
- | sdc1 f2, 0(RA)
- |3:
- | ins_next
- |
- |5: // Traverse hash part.
- | lw TMP1, TAB:RB->hmask
- | subu RC, RC, TMP0
- | lw TMP2, TAB:RB->node
- |6:
- | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
- | bnez AT, <3
- |. sll TMP3, RC, 5
- | sll RB, RC, 3
- | subu TMP3, TMP3, RB
- | addu NODE:TMP3, TMP3, TMP2
- | lw RB, HI(NODE:TMP3)
- | ldc1 f0, 0(NODE:TMP3)
- | lhu RD, -4+OFS_RD(PC)
- | beq RB, TISNIL, <6 // Skip holes in hash part.
- |. addiu RC, RC, 1
- | ldc1 f2, NODE:TMP3->key
- | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
- | sdc1 f0, 8(RA)
- | addu RC, RC, TMP0
- | decode_RD4b RD
- | addu RD, RD, TMP3
- | sdc1 f2, 0(RA)
- | addu PC, PC, RD
- | b <3
- |. sw RC, -8+LO(RA) // Update control var.
- break;
-
- case BC_ISNEXT:
- | // RA = base*8, RD = target (points to ITERN)
- | addu RA, BASE, RA
- | lw TMP0, -24+HI(RA)
- | lw CFUNC:TMP1, -24+LO(RA)
- | lw TMP2, -16+HI(RA)
- | lw TMP3, -8+HI(RA)
- | li AT, LJ_TFUNC
- | bne TMP0, AT, >5
- |. addiu TMP2, TMP2, -LJ_TTAB
- | lbu TMP1, CFUNC:TMP1->ffid
- | addiu TMP3, TMP3, -LJ_TNIL
- | srl TMP0, RD, 1
- | or TMP2, TMP2, TMP3
- | addiu TMP1, TMP1, -FF_next_N
- | addu TMP0, PC, TMP0
- | or TMP1, TMP1, TMP2
- | bnez TMP1, >5
- |. lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
- | addu PC, TMP0, TMP2
- | lui TMP1, 0xfffe
- | ori TMP1, TMP1, 0x7fff
- | sw r0, -8+LO(RA) // Initialize control var.
- | sw TMP1, -8+HI(RA)
- |1:
- | ins_next
- |5: // Despecialize bytecode if any of the checks fail.
- | li TMP3, BC_JMP
- | li TMP1, BC_ITERC
- | sb TMP3, -4+OFS_OP(PC)
- | addu PC, TMP0, TMP2
- | b <1
- |. sb TMP1, OFS_OP(PC)
- break;
-
- case BC_VARG:
- | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
- | lw TMP0, FRAME_PC(BASE)
- | decode_RDtoRC8 RC, RD
- | decode_RB8a RB, INS
- | addu RC, BASE, RC
- | decode_RB8b RB
- | addu RA, BASE, RA
- | addiu RC, RC, FRAME_VARG
- | addu TMP2, RA, RB
- | addiu TMP3, BASE, -8 // TMP3 = vtop
- | subu RC, RC, TMP0 // RC = vbase
- | // Note: RC may now be even _above_ BASE if nargs was < numparams.
- | beqz RB, >5 // Copy all varargs?
- |. subu TMP1, TMP3, RC
- | addiu TMP2, TMP2, -16
- |1: // Copy vararg slots to destination slots.
- | lw CARG1, HI(RC)
- | sltu AT, RC, TMP3
- | lw CARG2, LO(RC)
- | addiu RC, RC, 8
- | movz CARG1, TISNIL, AT
- | sw CARG1, HI(RA)
- | sw CARG2, LO(RA)
- | sltu AT, RA, TMP2
- | bnez AT, <1
- |. addiu RA, RA, 8
- |3:
- | ins_next
- |
- |5: // Copy all varargs.
- | lw TMP0, L->maxstack
- | blez TMP1, <3 // No vararg slots?
- |. li MULTRES, 8 // MULTRES = (0+1)*8
- | addu TMP2, RA, TMP1
- | sltu AT, TMP0, TMP2
- | bnez AT, >7
- |. addiu MULTRES, TMP1, 8
- |6:
- | ldc1 f0, 0(RC)
- | addiu RC, RC, 8
- | sdc1 f0, 0(RA)
- | sltu AT, RC, TMP3
- | bnez AT, <6 // More vararg slots?
- |. addiu RA, RA, 8
- | b <3
- |. nop
- |
- |7: // Grow stack for varargs.
- | load_got lj_state_growstack
- | sw RA, L->top
- | subu RA, RA, BASE
- | sw BASE, L->base
- | subu BASE, RC, BASE // Need delta, because BASE may change.
- | sw PC, SAVE_PC
- | srl CARG2, TMP1, 3
- | call_intern lj_state_growstack // (lua_State *L, int n)
- |. move CARG1, L
- | move RC, BASE
- | lw BASE, L->base
- | addu RA, BASE, RA
- | addu RC, BASE, RC
- | b <6
- |. addiu TMP3, BASE, -8
- break;
-
- /* -- Returns ----------------------------------------------------------- */
-
- case BC_RETM:
- | // RA = results*8, RD = extra_nresults*8
- | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
- | // Fall through. Assumes BC_RET follows.
- break;
-
- case BC_RET:
- | // RA = results*8, RD = (nresults+1)*8
- | lw PC, FRAME_PC(BASE)
- | addu RA, BASE, RA
- | move MULTRES, RD
- |1:
- | andi TMP0, PC, FRAME_TYPE
- | bnez TMP0, ->BC_RETV_Z
- |. xori TMP1, PC, FRAME_VARG
- |
- |->BC_RET_Z:
- | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
- | lw INS, -4(PC)
- | addiu TMP2, BASE, -8
- | addiu RC, RD, -8
- | decode_RA8a TMP0, INS
- | decode_RB8a RB, INS
- | decode_RA8b TMP0
- | decode_RB8b RB
- | addu TMP3, TMP2, RB
- | beqz RC, >3
- |. subu BASE, TMP2, TMP0
- |2:
- | ldc1 f0, 0(RA)
- | addiu RA, RA, 8
- | addiu RC, RC, -8
- | sdc1 f0, 0(TMP2)
- | bnez RC, <2
- |. addiu TMP2, TMP2, 8
- |3:
- | addiu TMP3, TMP3, -8
- |5:
- | sltu AT, TMP2, TMP3
- | bnez AT, >6
- |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
- | ins_next1
- | lw TMP1, LFUNC:TMP1->pc
- | lw KBASE, PC2PROTO(k)(TMP1)
- | ins_next2
- |
- |6: // Fill up results with nil.
- | sw TISNIL, HI(TMP2)
- | b <5
- |. addiu TMP2, TMP2, 8
- |
- |->BC_RETV_Z: // Non-standard return case.
- | andi TMP2, TMP1, FRAME_TYPEP
- | bnez TMP2, ->vm_return
- |. nop
- | // Return from vararg function: relocate BASE down.
- | subu BASE, BASE, TMP1
- | b <1
- |. lw PC, FRAME_PC(BASE)
- break;
-
- case BC_RET0: case BC_RET1:
- | // RA = results*8, RD = (nresults+1)*8
- | lw PC, FRAME_PC(BASE)
- | addu RA, BASE, RA
- | move MULTRES, RD
- | andi TMP0, PC, FRAME_TYPE
- | bnez TMP0, ->BC_RETV_Z
- |. xori TMP1, PC, FRAME_VARG
- |
- | lw INS, -4(PC)
- | addiu TMP2, BASE, -8
- if (op == BC_RET1) {
- | ldc1 f0, 0(RA)
- }
- | decode_RB8a RB, INS
- | decode_RA8a RA, INS
- | decode_RB8b RB
- | decode_RA8b RA
- if (op == BC_RET1) {
- | sdc1 f0, 0(TMP2)
- }
- | subu BASE, TMP2, RA
- |5:
- | sltu AT, RD, RB
- | bnez AT, >6
- |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
- | ins_next1
- | lw TMP1, LFUNC:TMP1->pc
- | lw KBASE, PC2PROTO(k)(TMP1)
- | ins_next2
- |
- |6: // Fill up results with nil.
- | addiu TMP2, TMP2, 8
- | addiu RD, RD, 8
- | b <5
- if (op == BC_RET1) {
- |. sw TISNIL, HI(TMP2)
- } else {
- |. sw TISNIL, -8+HI(TMP2)
- }
- break;
-
- /* -- Loops and branches ------------------------------------------------ */
-
- case BC_FORL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IFORL follows.
- break;
-
- case BC_JFORI:
- case BC_JFORL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_FORI:
- case BC_IFORL:
- | // RA = base*8, RD = target (after end of loop or start of loop)
- vk = (op == BC_IFORL || op == BC_JFORL);
- | addu RA, BASE, RA
- if (vk) {
- | ldc1 f0, FORL_IDX*8(RA)
- | ldc1 f4, FORL_STEP*8(RA)
- | ldc1 f2, FORL_STOP*8(RA)
- | lw TMP3, FORL_STEP*8+HI(RA)
- | add.d f0, f0, f4
- | sdc1 f0, FORL_IDX*8(RA)
- } else {
- | lw TMP1, FORL_IDX*8+HI(RA)
- | lw TMP3, FORL_STEP*8+HI(RA)
- | lw TMP2, FORL_STOP*8+HI(RA)
- | sltiu TMP1, TMP1, LJ_TISNUM
- | sltiu TMP0, TMP3, LJ_TISNUM
- | sltiu TMP2, TMP2, LJ_TISNUM
- | and TMP1, TMP1, TMP0
- | and TMP1, TMP1, TMP2
- | ldc1 f0, FORL_IDX*8(RA)
- | beqz TMP1, ->vmeta_for
- |. ldc1 f2, FORL_STOP*8(RA)
- }
- if (op != BC_JFORL) {
- | srl RD, RD, 1
- | lui TMP0, (-(BCBIAS_J*4 >> 16) & 65535)
- }
- | c.le.d 0, f0, f2
- | c.le.d 1, f2, f0
- | sdc1 f0, FORL_EXT*8(RA)
- if (op == BC_JFORI) {
- | li TMP1, 1
- | li TMP2, 1
- | addu TMP0, RD, TMP0
- | slt TMP3, TMP3, r0
- | movf TMP1, r0, 0
- | addu PC, PC, TMP0
- | movf TMP2, r0, 1
- | lhu RD, -4+OFS_RD(PC)
- | movn TMP1, TMP2, TMP3
- | bnez TMP1, =>BC_JLOOP
- |. decode_RD8b RD
- } else if (op == BC_JFORL) {
- | li TMP1, 1
- | li TMP2, 1
- | slt TMP3, TMP3, r0
- | movf TMP1, r0, 0
- | movf TMP2, r0, 1
- | movn TMP1, TMP2, TMP3
- | bnez TMP1, =>BC_JLOOP
- |. nop
- } else {
- | addu TMP1, RD, TMP0
- | slt TMP3, TMP3, r0
- | move TMP2, TMP1
- if (op == BC_FORI) {
- | movt TMP1, r0, 0
- | movt TMP2, r0, 1
- } else {
- | movf TMP1, r0, 0
- | movf TMP2, r0, 1
- }
- | movn TMP1, TMP2, TMP3
- | addu PC, PC, TMP1
- }
- | ins_next
- break;
-
- case BC_ITERL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IITERL follows.
- break;
-
- case BC_JITERL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IITERL:
- | // RA = base*8, RD = target
- | addu RA, BASE, RA
- | lw TMP1, HI(RA)
- | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
- |. lw TMP2, LO(RA)
- if (op == BC_JITERL) {
- | sw TMP1, -8+HI(RA)
- | b =>BC_JLOOP
- |. sw TMP2, -8+LO(RA)
- } else {
- | branch_RD // Otherwise save control var + branch.
- | sw TMP1, -8+HI(RA)
- | sw TMP2, -8+LO(RA)
- }
- |1:
- | ins_next
- break;
-
- case BC_LOOP:
- | // RA = base*8, RD = target (loop extent)
- | // Note: RA/RD is only used by trace recorder to determine scope/extent
- | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_ILOOP follows.
- break;
-
- case BC_ILOOP:
- | // RA = base*8, RD = target (loop extent)
- | ins_next
- break;
-
- case BC_JLOOP:
- |.if JIT
- | // RA = base*8 (ignored), RD = traceno*8
- | lw TMP1, DISPATCH_J(trace)(DISPATCH)
- | srl RD, RD, 1
- | li AT, 0
- | addu TMP1, TMP1, RD
- | // Traces on MIPS don't store the trace number, so use 0.
- | sw AT, DISPATCH_GL(vmstate)(DISPATCH)
- | lw TRACE:TMP2, 0(TMP1)
- | sw BASE, DISPATCH_GL(jit_base)(DISPATCH)
- | sw L, DISPATCH_GL(jit_L)(DISPATCH)
- | lw TMP2, TRACE:TMP2->mcode
- | jr TMP2
- |. addiu JGL, DISPATCH, GG_DISP2G+32768
- |.endif
- break;
-
- case BC_JMP:
- | // RA = base*8 (only used by trace recorder), RD = target
- | branch_RD
- | ins_next
- break;
-
- /* -- Function headers -------------------------------------------------- */
-
- case BC_FUNCF:
- |.if JIT
- | hotcall
- |.endif
- case BC_FUNCV: /* NYI: compiled vararg functions. */
- | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
- break;
-
- case BC_JFUNCF:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IFUNCF:
- | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
- | lw TMP2, L->maxstack
- | lbu TMP1, -4+PC2PROTO(numparams)(PC)
- | lw KBASE, -4+PC2PROTO(k)(PC)
- | sltu AT, TMP2, RA
- | bnez AT, ->vm_growstack_l
- |. sll TMP1, TMP1, 3
- if (op != BC_JFUNCF) {
- | ins_next1
- }
- |2:
- | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
- | bnez AT, >3
- |. addu AT, BASE, NARGS8:RC
- if (op == BC_JFUNCF) {
- | decode_RD8a RD, INS
- | b =>BC_JLOOP
- |. decode_RD8b RD
- } else {
- | ins_next2
- }
- |
- |3: // Clear missing parameters.
- | sw TISNIL, HI(AT)
- | b <2
- |. addiu NARGS8:RC, NARGS8:RC, 8
- break;
-
- case BC_JFUNCV:
-#if !LJ_HASJIT
- break;
-#endif
- | NYI // NYI: compiled vararg functions
- break; /* NYI: compiled vararg functions. */
-
- case BC_IFUNCV:
- | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
- | addu TMP1, BASE, RC
- | lw TMP2, L->maxstack
- | addu TMP0, RA, RC
- | sw LFUNC:RB, LO(TMP1) // Store copy of LFUNC.
- | addiu TMP3, RC, 8+FRAME_VARG
- | sltu AT, TMP0, TMP2
- | lw KBASE, -4+PC2PROTO(k)(PC)
- | beqz AT, ->vm_growstack_l
- |. sw TMP3, HI(TMP1) // Store delta + FRAME_VARG.
- | lbu TMP2, -4+PC2PROTO(numparams)(PC)
- | move RA, BASE
- | move RC, TMP1
- | ins_next1
- | beqz TMP2, >3
- |. addiu BASE, TMP1, 8
- |1:
- | lw TMP0, HI(RA)
- | lw TMP3, LO(RA)
- | sltu AT, RA, RC // Less args than parameters?
- | move CARG1, TMP0
- | movz TMP0, TISNIL, AT // Clear missing parameters.
- | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
- | sw TMP3, 8+LO(TMP1)
- | addiu TMP2, TMP2, -1
- | sw TMP0, 8+HI(TMP1)
- | addiu TMP1, TMP1, 8
- | sw CARG1, HI(RA)
- | bnez TMP2, <1
- |. addiu RA, RA, 8
- |3:
- | ins_next2
- break;
-
- case BC_FUNCC:
- case BC_FUNCCW:
- | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
- if (op == BC_FUNCC) {
- | lw CFUNCADDR, CFUNC:RB->f
- } else {
- | lw CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
- }
- | addu TMP1, RA, NARGS8:RC
- | lw TMP2, L->maxstack
- | addu RC, BASE, NARGS8:RC
- | sw BASE, L->base
- | sltu AT, TMP2, TMP1
- | sw RC, L->top
- | li_vmstate C
- if (op == BC_FUNCCW) {
- | lw CARG2, CFUNC:RB->f
- }
- | bnez AT, ->vm_growstack_c // Need to grow stack.
- |. move CARG1, L
- | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
- |. st_vmstate
- | // Returns nresults.
- | lw BASE, L->base
- | sll RD, CRET1, 3
- | lw TMP1, L->top
- | li_vmstate INTERP
- | lw PC, FRAME_PC(BASE) // Fetch PC of caller.
- | subu RA, TMP1, RD // RA = L->top - nresults*8
- | b ->vm_returnc
- |. st_vmstate
- break;
-
- /* ---------------------------------------------------------------------- */
-
- default:
- fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
- exit(2);
- break;
- }
-}
-
-static int build_backend(BuildCtx *ctx)
-{
- int op;
-
- dasm_growpc(Dst, BC__MAX);
-
- build_subroutines(ctx);
-
- |.code_op
- for (op = 0; op < BC__MAX; op++)
- build_ins(ctx, (BCOp)op, op);
-
- return BC__MAX;
-}
-
-/* Emit pseudo frame-info for all assembler functions. */
-static void emit_asm_debug(BuildCtx *ctx)
-{
- int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
- int i;
- switch (ctx->mode) {
- case BUILD_elfasm:
- fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
- fprintf(ctx->fp,
- ".Lframe0:\n"
- "\t.4byte .LECIE0-.LSCIE0\n"
- ".LSCIE0:\n"
- "\t.4byte 0xffffffff\n"
- "\t.byte 0x1\n"
- "\t.string \"\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 31\n"
- "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE0:\n\n");
- fprintf(ctx->fp,
- ".LSFDE0:\n"
- "\t.4byte .LEFDE0-.LASFDE0\n"
- ".LASFDE0:\n"
- "\t.4byte .Lframe0\n"
- "\t.4byte .Lbegin\n"
- "\t.4byte %d\n"
- "\t.byte 0xe\n\t.uleb128 %d\n"
- "\t.byte 0x9f\n\t.sleb128 1\n"
- "\t.byte 0x9e\n\t.sleb128 2\n",
- fcofs, CFRAME_SIZE);
- for (i = 23; i >= 16; i--)
- fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
- for (i = 30; i >= 20; i -= 2)
- fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE0:\n\n");
-#if LJ_HASFFI
- fprintf(ctx->fp,
- ".LSFDE1:\n"
- "\t.4byte .LEFDE1-.LASFDE1\n"
- ".LASFDE1:\n"
- "\t.4byte .Lframe0\n"
- "\t.4byte lj_vm_ffi_call\n"
- "\t.4byte %d\n"
- "\t.byte 0x9f\n\t.uleb128 1\n"
- "\t.byte 0x90\n\t.uleb128 2\n"
- "\t.byte 0xd\n\t.uleb128 0x10\n"
- "\t.align 2\n"
- ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
-#endif
- fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
- fprintf(ctx->fp,
- "\t.globl lj_err_unwind_dwarf\n"
- ".Lframe1:\n"
- "\t.4byte .LECIE1-.LSCIE1\n"
- ".LSCIE1:\n"
- "\t.4byte 0\n"
- "\t.byte 0x1\n"
- "\t.string \"zPR\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 31\n"
- "\t.uleb128 6\n" /* augmentation length */
- "\t.byte 0\n"
- "\t.4byte lj_err_unwind_dwarf\n"
- "\t.byte 0\n"
- "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE1:\n\n");
- fprintf(ctx->fp,
- ".LSFDE2:\n"
- "\t.4byte .LEFDE2-.LASFDE2\n"
- ".LASFDE2:\n"
- "\t.4byte .LASFDE2-.Lframe1\n"
- "\t.4byte .Lbegin\n"
- "\t.4byte %d\n"
- "\t.uleb128 0\n" /* augmentation length */
- "\t.byte 0xe\n\t.uleb128 %d\n"
- "\t.byte 0x9f\n\t.sleb128 1\n"
- "\t.byte 0x9e\n\t.sleb128 2\n",
- fcofs, CFRAME_SIZE);
- for (i = 23; i >= 16; i--)
- fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
- for (i = 30; i >= 20; i -= 2)
- fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE2:\n\n");
-#if LJ_HASFFI
- fprintf(ctx->fp,
- ".Lframe2:\n"
- "\t.4byte .LECIE2-.LSCIE2\n"
- ".LSCIE2:\n"
- "\t.4byte 0\n"
- "\t.byte 0x1\n"
- "\t.string \"zR\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 31\n"
- "\t.uleb128 1\n" /* augmentation length */
- "\t.byte 0\n"
- "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE2:\n\n");
- fprintf(ctx->fp,
- ".LSFDE3:\n"
- "\t.4byte .LEFDE3-.LASFDE3\n"
- ".LASFDE3:\n"
- "\t.4byte .LASFDE3-.Lframe2\n"
- "\t.4byte lj_vm_ffi_call\n"
- "\t.4byte %d\n"
- "\t.uleb128 0\n" /* augmentation length */
- "\t.byte 0x9f\n\t.uleb128 1\n"
- "\t.byte 0x90\n\t.uleb128 2\n"
- "\t.byte 0xd\n\t.uleb128 0x10\n"
- "\t.align 2\n"
- ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
-#endif
- break;
- default:
- break;
- }
-}
-
+|// Low-level VM code for MIPS CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch mips
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r16 // Base of current Lua stack frame.
+|.define KBASE, r17 // Constants of current Lua function.
+|.define PC, r18 // Next PC.
+|.define DISPATCH, r19 // Opcode dispatch table.
+|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
+|// NYI: r22 currently unused.
+|
+|.define JGL, r30 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNIL, r30
+|.define TOBIT, f30 // 2^52 + 2^51.
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r23 // Callee-save.
+|.define RB, r8
+|.define RC, r9
+|.define RD, r10
+|.define INS, r11
+|
+|.define AT, r1 // Assembler temporary.
+|.define TMP0, r12
+|.define TMP1, r13
+|.define TMP2, r14
+|.define TMP3, r15
+|
+|// Calling conventions.
+|.define CFUNCADDR, r25
+|.define CARG1, r4
+|.define CARG2, r5
+|.define CARG3, r6
+|.define CARG4, r7
+|
+|.define CRET1, r2
+|.define CRET2, r3
+|
+|.define FARG1, f12
+|.define FARG2, f14
+|
+|.define FRET1, f0
+|.define FRET2, f2
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define CFRAME_SPACE, 112 // Delta for sp.
+|
+|.define SAVE_ERRF, 124(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 120(sp)
+|.define SAVE_CFRAME, 116(sp)
+|.define SAVE_L, 112(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
+|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves.
+|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves.
+|.define SAVE_PC, 20(sp)
+|.define ARG5, 16(sp)
+|.define CSAVE_4, 12(sp)
+|.define CSAVE_3, 8(sp)
+|.define CSAVE_2, 4(sp)
+|.define CSAVE_1, 0(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by callee.
+|
+|.define ARG5_OFS, 16
+|.define SAVE_MULTRES, ARG5
+|
+|.macro saveregs
+| addiu sp, sp, -CFRAME_SPACE
+| sw ra, SAVE_GPR_+9*4(sp)
+| sw r30, SAVE_GPR_+8*4(sp)
+| sdc1 f30, SAVE_FPR_+5*8(sp)
+| sw r23, SAVE_GPR_+7*4(sp)
+| sw r22, SAVE_GPR_+6*4(sp)
+| sdc1 f28, SAVE_FPR_+4*8(sp)
+| sw r21, SAVE_GPR_+5*4(sp)
+| sw r20, SAVE_GPR_+4*4(sp)
+| sdc1 f26, SAVE_FPR_+3*8(sp)
+| sw r19, SAVE_GPR_+3*4(sp)
+| sw r18, SAVE_GPR_+2*4(sp)
+| sdc1 f24, SAVE_FPR_+2*8(sp)
+| sw r17, SAVE_GPR_+1*4(sp)
+| sw r16, SAVE_GPR_+0*4(sp)
+| sdc1 f22, SAVE_FPR_+1*8(sp)
+| sdc1 f20, SAVE_FPR_+0*8(sp)
+|.endmacro
+|
+|.macro restoreregs_ret
+| lw ra, SAVE_GPR_+9*4(sp)
+| lw r30, SAVE_GPR_+8*4(sp)
+| ldc1 f30, SAVE_FPR_+5*8(sp)
+| lw r23, SAVE_GPR_+7*4(sp)
+| lw r22, SAVE_GPR_+6*4(sp)
+| ldc1 f28, SAVE_FPR_+4*8(sp)
+| lw r21, SAVE_GPR_+5*4(sp)
+| lw r20, SAVE_GPR_+4*4(sp)
+| ldc1 f26, SAVE_FPR_+3*8(sp)
+| lw r19, SAVE_GPR_+3*4(sp)
+| lw r18, SAVE_GPR_+2*4(sp)
+| ldc1 f24, SAVE_FPR_+2*8(sp)
+| lw r17, SAVE_GPR_+1*4(sp)
+| lw r16, SAVE_GPR_+0*4(sp)
+| ldc1 f22, SAVE_FPR_+1*8(sp)
+| ldc1 f20, SAVE_FPR_+0*8(sp)
+| jr ra
+| addiu sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; .long 0xf0f0f0f0; .endmacro
+|
+|// Macros to mark delay slots.
+|.macro ., a; a; .endmacro
+|.macro ., a,b; a,b; .endmacro
+|.macro ., a,b,c; a,b,c; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Endian-specific defines.
+|.define FRAME_PC, LJ_ENDIAN_SELECT(-4,-8)
+|.define FRAME_FUNC, LJ_ENDIAN_SELECT(-8,-4)
+|.define HI, LJ_ENDIAN_SELECT(4,0)
+|.define LO, LJ_ENDIAN_SELECT(0,4)
+|.define OFS_RD, LJ_ENDIAN_SELECT(2,0)
+|.define OFS_RA, LJ_ENDIAN_SELECT(1,2)
+|.define OFS_OP, LJ_ENDIAN_SELECT(0,3)
+|
+|// Instruction decode.
+|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP4a, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RC4a, dst, ins; srl dst, ins, 14; .endmacro
+|.macro decode_RC4b, dst; andi dst, dst, 0x3fc; .endmacro
+|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
+|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
+|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
+|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lw INS, 0(PC)
+| addiu PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP4a TMP1, INS
+| decode_OP4b TMP1
+| addu TMP0, DISPATCH, TMP1
+| decode_RD8a RD, INS
+| lw AT, 0(TMP0)
+| decode_RA8a RA, INS
+| decode_RD8b RD
+| jr AT
+| decode_RA8b RA
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lw PC, LFUNC:RB->pc
+| lw INS, 0(PC)
+| addiu PC, PC, 4
+| decode_OP4a TMP1, INS
+| decode_RA8a RA, INS
+| decode_OP4b TMP1
+| decode_RA8b RA
+| addu TMP0, DISPATCH, TMP1
+| lw TMP0, 0(TMP0)
+| jr TMP0
+| addu RA, RA, BASE
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| sw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|.macro branch_RD
+| srl TMP0, RD, 1
+| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
+| addu TMP0, TMP0, AT
+| addu PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
+#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name)
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro load_got, func
+| lw CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
+|.endmacro
+|// Much faster. Sadly, there's no easy way to force the required code layout.
+|// .macro call_intern, func; bal extern func; .endmacro
+|.macro call_intern, func; jalr CFUNCADDR; .endmacro
+|.macro call_extern; jalr CFUNCADDR; .endmacro
+|.macro jmp_extern; jr CFUNCADDR; .endmacro
+|
+|.macro hotcheck, delta, target
+| srl TMP1, PC, 1
+| andi TMP1, TMP1, 126
+| addu TMP1, TMP1, DISPATCH
+| lhu TMP2, GG_DISP2HOT(TMP1)
+| addiu TMP2, TMP2, -delta
+| bltz TMP2, target
+|. sh TMP2, GG_DISP2HOT(TMP1)
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp, target
+| lw tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
+| sw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| sb mark, tab->marked
+| b target
+|. sw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi AT, PC, FRAME_P
+ | beqz AT, ->cont_dispatch
+ |. li TMP1, LJ_TTRUE
+ |
+ | // Return from pcall or xpcall fast func.
+ | lw PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | move BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | sw TMP1, FRAME_PC(RA) // Prepend true to results.
+ | addiu RA, RA, -8
+ |
+ |->vm_returnc:
+ | addiu RD, RD, 8 // RD = (nresults+1)*8.
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz RD, ->vm_unwind_c_eh
+ |. li CRET1, LUA_YIELD
+ | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
+ |. move MULTRES, RD
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | li TMP2, -8
+ | xori AT, TMP0, FRAME_C
+ | and TMP2, PC, TMP2
+ | bnez AT, ->vm_returnp
+ | subu TMP2, BASE, TMP2 // TMP2 = previous base.
+ |
+ | addiu TMP1, RD, -8
+ | sw TMP2, L->base
+ | li_vmstate C
+ | lw TMP2, SAVE_NRES
+ | addiu BASE, BASE, -8
+ | st_vmstate
+ | beqz TMP1, >2
+ |. sll TMP2, TMP2, 3
+ |1:
+ | addiu TMP1, TMP1, -8
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | sdc1 f0, 0(BASE)
+ | bnez TMP1, <1
+ |. addiu BASE, BASE, 8
+ |
+ |2:
+ | bne TMP2, RD, >6
+ |3:
+ |. sw BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lw TMP0, SAVE_CFRAME // Restore previous C frame.
+ | move CRET1, r0 // Ok return status for vm_pcall.
+ | sw TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | lw TMP1, L->maxstack
+ | slt AT, TMP2, RD
+ | bnez AT, >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ |. slt AT, BASE, TMP1
+ | beqz AT, >8
+ |. nop
+ | sw TISNIL, HI(BASE)
+ | addiu RD, RD, 8
+ | b <2
+ |. addiu BASE, BASE, 8
+ |
+ |7: // Less results wanted.
+ | subu TMP0, RD, TMP2
+ | subu TMP0, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | load_got lj_state_growstack
+ | move MULTRES, RD
+ | move CARG2, TMP2
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw TMP2, SAVE_NRES
+ | lw BASE, L->top // Need the (realloced) L->top in BASE.
+ | move RD, MULTRES
+ | b <2
+ |. sll TMP2, TMP2, 3
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | move sp, CARG1
+ | move CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lw L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | lw GL:TMP1, L->glref
+ | b ->vm_leave_unw
+ |. sw TMP0, GL:TMP1->vmstate
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | li AT, -4
+ | and sp, CARG1, AT
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lw L, SAVE_L
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li TISNIL, LJ_TNIL
+ | lw BASE, L->base
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mtc1 TMP3, TOBIT
+ | li TMP1, LJ_TFALSE
+ | li_vmstate INTERP
+ | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | cvt.d.s TOBIT, TOBIT
+ | addiu RA, BASE, -8 // Results start at BASE-8.
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw TMP1, HI(RA) // Prepend false to error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |. li RD, 16 // 2 results: false + error message.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | b >2
+ |. li CARG2, LUA_MINSTACK
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | addu RC, BASE, RC
+ | subu RA, RA, BASE
+ | sw BASE, L->base
+ | addiu PC, PC, 4 // Must point after first instruction.
+ | sw RC, L->top
+ | srl CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | load_got lj_state_growstack
+ | sw PC, SAVE_PC
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | lw RC, L->top
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | subu RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | move L, CARG1
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | move BASE, CARG2
+ | lbu TMP1, L->status
+ | sw L, SAVE_L
+ | li PC, FRAME_CP
+ | addiu TMP0, sp, CFRAME_RESUME
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw r0, SAVE_NRES
+ | sw r0, SAVE_ERRF
+ | sw TMP0, L->cframe
+ | sw r0, SAVE_CFRAME
+ | beqz TMP1, >3
+ |. sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ |
+ | // Resume after yield (like a return).
+ | move RA, BASE
+ | lw BASE, L->base
+ | lw TMP1, L->top
+ | lw PC, FRAME_PC(BASE)
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | subu RD, TMP1, BASE
+ | mtc1 TMP3, TOBIT
+ | sb r0, L->status
+ | cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | addiu RD, RD, 8
+ | st_vmstate
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz TMP0, ->BC_RET_Z
+ |. li TISNIL, LJ_TNIL
+ | b ->vm_return
+ |. nop
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | sw CARG4, SAVE_ERRF
+ | b >1
+ |. li PC, FRAME_CP
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lw TMP1, L:CARG1->cframe
+ | sw CARG3, SAVE_NRES
+ | move L, CARG1
+ | sw CARG1, SAVE_L
+ | move BASE, CARG2
+ | sw sp, L->cframe // Add our C frame to cframe chain.
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sw TMP1, SAVE_CFRAME
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | lw TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lw TMP1, L->top
+ | mtc1 TMP3, TOBIT
+ | addu PC, PC, BASE
+ | subu NARGS8:RC, TMP1, BASE
+ | subu PC, PC, TMP2 // PC = frame delta + frame type
+ | cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | lw TMP0, FRAME_PC(BASE)
+ | li AT, LJ_TFUNC
+ | bne TMP0, AT, ->vmeta_call
+ |. lw LFUNC:RB, FRAME_FUNC(BASE)
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | move L, CARG1
+ | lw TMP0, L:CARG1->stack
+ | sw CARG1, SAVE_L
+ | lw TMP1, L->top
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lw TMP1, L->cframe
+ | sw sp, L->cframe // Add our C frame to cframe chain.
+ | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | sw r0, SAVE_ERRF // No error function.
+ | move CFUNCADDR, CARG4
+ | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |. sw TMP1, SAVE_CFRAME
+ | move BASE, CRET1
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li PC, FRAME_CP
+ | bnez CRET1, <3 // Else continue with the call.
+ |. addiu DISPATCH, DISPATCH, GG_G2DISP
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |. nop
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lw TMP0, -16+LO(BASE) // Continuation.
+ | move RB, BASE
+ | move BASE, TMP2 // Restore caller BASE.
+ | lw LFUNC:TMP1, FRAME_FUNC(TMP2)
+ |.if FFI
+ | sltiu AT, TMP0, 2
+ |.endif
+ | lw PC, -16+HI(RB) // Restore PC from [cont|PC].
+ | addu TMP2, RA, RD
+ | lw TMP1, LFUNC:TMP1->pc
+ |.if FFI
+ | bnez AT, >1
+ |.endif
+ |. sw TISNIL, -8+HI(TMP2) // Ensure one valid arg.
+ | // BASE = base, RA = resultptr, RB = meta base
+ | jr TMP0 // Jump to continuation.
+ |. lw KBASE, PC2PROTO(k)(TMP1)
+ |
+ |.if FFI
+ |1:
+ | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ |. addiu TMP1, RB, -16
+ | b ->vm_call_tail
+ |. subu RC, TMP1, BASE
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | addiu CARG2, RB, -16
+ | ldc1 f0, 0(RA)
+ | decode_RB8a MULTRES, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b MULTRES
+ | decode_RA8b RA
+ | addu TMP1, BASE, MULTRES
+ | sw BASE, L->base
+ | subu CARG3, CARG2, TMP1
+ | bne TMP1, CARG2, ->BC_CAT_Z
+ |. sdc1 f0, 0(CARG2)
+ | addu RA, BASE, RA
+ | b ->cont_nop
+ |. sdc1 f0, 0(RA)
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP0, HI(CARG3)
+ |
+ |->vmeta_tgets:
+ | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | sw TAB:RB, LO(CARG2)
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sw TMP0, HI(CARG2)
+ | li TMP1, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP1, HI(CARG3)
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | mtc1 TMP0, f0
+ | cvt.d.w f0, f0
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | sdc1 f0, 0(CARG3)
+ |
+ |->vmeta_tgetv:
+ |1:
+ | load_got lj_meta_tget
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. addiu TMP1, BASE, -FRAME_CONT
+ | ldc1 f0, 0(CRET1)
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | lw BASE, L->top
+ | sw PC, -16+HI(BASE) // [cont|PC]
+ | subu PC, BASE, TMP1
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 16 // 2 args for func(t, k).
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP0, HI(CARG3)
+ |
+ |->vmeta_tsets:
+ | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | sw TAB:RB, LO(CARG2)
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sw TMP0, HI(CARG2)
+ | li TMP1, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP1, HI(CARG3)
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | mtc1 TMP0, f0
+ | cvt.d.w f0, f0
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | sdc1 f0, 0(CARG3)
+ |
+ |->vmeta_tsetv:
+ |1:
+ | load_got lj_meta_tset
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. ldc1 f0, 0(RA)
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | sdc1 f0, 0(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | addiu TMP1, BASE, -FRAME_CONT
+ | lw BASE, L->top
+ | sw PC, -16+HI(BASE) // [cont|PC]
+ | subu PC, BASE, TMP1
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | sdc1 f0, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | // CARG2, CARG3 are already set by BC_ISLT/BC_ISGE/BC_ISLE/BC_ISGT.
+ | load_got lj_meta_comp
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | decode_OP1 CARG4, INS
+ | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | sltiu AT, CRET1, 2
+ | beqz AT, ->vmeta_binop
+ | negu TMP2, CRET1
+ |4:
+ | lhu RD, OFS_RD(PC)
+ | addiu PC, PC, 4
+ | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sll RD, RD, 2
+ | addu RD, RD, TMP1
+ | and RD, RD, TMP2
+ | addu PC, PC, RD
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lbu TMP1, -4+OFS_RA(PC)
+ | ldc1 f0, 0(RA)
+ | sll TMP1, TMP1, 3
+ | addu TMP1, BASE, TMP1
+ | b ->cont_nop
+ |. sdc1 f0, 0(TMP1)
+ |
+ |->cont_condt: // RA = resultptr
+ | lw TMP0, HI(RA)
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. negu TMP2, AT // Branch if result is true.
+ |
+ |->cont_condf: // RA = resultptr
+ | lw TMP0, HI(RA)
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. addiu TMP2, AT, -1 // Branch if result is false.
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | load_got lj_meta_equal
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | load_got lj_meta_equal_cd
+ | move CARG2, INS
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |.endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_unm:
+ | move CARG4, CARG3
+ |
+ |->vmeta_arith:
+ | load_got lj_meta_arith
+ | decode_OP1 TMP0, INS
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | move CARG2, RA
+ | sw TMP0, ARG5
+ | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | beqz CRET1, ->cont_nop
+ |. nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | subu TMP1, CRET1, BASE
+ | sw PC, -16+HI(CRET1) // [cont|PC]
+ | move TMP2, BASE
+ | addiu PC, TMP1, FRAME_CONT
+ | move BASE, CRET1
+ | b ->vm_call_dispatch
+ |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ |
+ |->vmeta_len:
+ | // CARG2 already set by BC_LEN.
+#if LJ_52
+ | move MULTRES, CARG1
+#endif
+ | load_got lj_meta_len
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_len // (lua_State *L, TValue *o)
+ |. move CARG1, L
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
+ |. nop
+ | b ->BC_LEN_Z
+ |. move CARG1, MULTRES
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+ |. nop
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sw TMP2, L->base // This is the callers base!
+ | addiu CARG2, BASE, -8
+ | sw PC, SAVE_PC
+ | addu CARG3, BASE, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sw BASE, L->base
+ | addiu CARG2, RA, -8
+ | sw PC, SAVE_PC
+ | addu CARG3, RA, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | lw TMP1, FRAME_PC(BASE)
+ | lw LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |. addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | load_got lj_meta_for
+ | sw BASE, L->base
+ | move CARG2, RA
+ | sw PC, SAVE_PC
+ | move MULTRES, INS
+ | call_intern lj_meta_for // (lua_State *L, TValue *base)
+ |. move CARG1, L
+ |.if JIT
+ | decode_OP1 TMP0, MULTRES
+ | li AT, BC_JFORI
+ |.endif
+ | decode_RA8a RA, MULTRES
+ | decode_RD8a RD, MULTRES
+ | decode_RA8b RA
+ |.if JIT
+ | beq TMP0, AT, =>BC_JFORI
+ |. decode_RD8b RD
+ | b =>BC_FORI
+ |. nop
+ |.else
+ | b =>BC_FORI
+ |. decode_RD8b RD
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | beqz NARGS8:RC, ->fff_fallback
+ |. lw CARG3, HI(BASE)
+ | lw CARG1, LO(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw CARG4, 8+HI(BASE)
+ | lw CARG1, LO(BASE)
+ | lw CARG2, 8+LO(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_n, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ldc1 FARG1, 0(BASE)
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw CARG4, 8+HI(BASE)
+ | ldc1 FARG1, 0(BASE)
+ | ldc1 FARG2, 8(BASE)
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, CARG4, LJ_TISNUM
+ | and TMP0, TMP0, TMP1
+ | beqz TMP0, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
+ |.macro ffgccheck
+ | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | subu AT, TMP0, TMP1
+ | bgezal AT, ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | sltiu AT, CARG3, LJ_TISTRUECOND
+ | beqz AT, ->fff_fallback
+ |. addiu RA, BASE, -8
+ | lw PC, FRAME_PC(BASE)
+ | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | addu TMP2, RA, NARGS8:RC
+ | sw CARG3, HI(RA)
+ | addiu TMP1, BASE, 8
+ | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
+ |. sw CARG1, LO(RA)
+ |1:
+ | ldc1 f0, 0(TMP1)
+ | sdc1 f0, -8(TMP1)
+ | bne TMP1, TMP2, <1
+ |. addiu TMP1, TMP1, 8
+ | b ->fff_res
+ |. nop
+ |
+ |.ffunc type
+ | lw CARG3, HI(BASE)
+ | li TMP1, LJ_TISNUM
+ | beqz NARGS8:RC, ->fff_fallback
+ |. sltiu TMP0, CARG3, LJ_TISNUM
+ | movz TMP1, CARG3, TMP0
+ | not TMP1, TMP1
+ | sll TMP1, TMP1, 3
+ | addu TMP1, CFUNC:RB, TMP1
+ | b ->fff_resn
+ |. ldc1 FRET1, CFUNC:TMP1->upvalue
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, >6
+ |. li AT, LJ_TUDATA
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lw TAB:CARG1, TAB:CARG1->metatable
+ |2:
+ | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beqz TAB:CARG1, ->fff_restv
+ |. li CARG3, LJ_TNIL
+ | lw TMP0, TAB:CARG1->hmask
+ | li CARG3, LJ_TTAB // Use metatable as default result.
+ | lw TMP1, STR:RC->hash
+ | lw NODE:TMP2, TAB:CARG1->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | li AT, LJ_TSTR
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | lw CARG4, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | lw NODE:TMP3, NODE:TMP2->next
+ | bne CARG4, AT, >4
+ |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | beq TMP0, STR:RC, >5
+ |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2)
+ |4:
+ | beqz NODE:TMP3, ->fff_restv // Not found, keep default result.
+ |. move NODE:TMP2, NODE:TMP3
+ | b <3
+ |. nop
+ |5:
+ | beq CARG2, TISNIL, ->fff_restv // Ditto for nil value.
+ |. nop
+ | move CARG3, CARG2 // Return value of mt.__metatable.
+ | b ->fff_restv
+ |. move CARG1, TMP1
+ |
+ |6:
+ | beq CARG3, AT, <1
+ |. sltiu TMP0, CARG3, LJ_TISNUM
+ | li TMP1, LJ_TISNUM
+ | movz TMP1, CARG3, TMP0
+ | not TMP1, TMP1
+ | sll TMP1, TMP1, 2
+ | addu TMP1, DISPATCH, TMP1
+ | b <2
+ |. lw TAB:CARG1, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1)
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. addiu CARG4, CARG4, -LJ_TTAB
+ | lw TAB:TMP1, TAB:CARG1->metatable
+ | lbu TMP3, TAB:CARG1->marked
+ | or AT, CARG4, TAB:TMP1
+ | bnez AT, ->fff_fallback
+ |. andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | beqz AT, ->fff_restv
+ |. sw TAB:CARG2, TAB:CARG1->metatable
+ | barrierback TAB:CARG1, TMP3, TMP0, ->fff_restv
+ |
+ |.ffunc rawget
+ | lw CARG4, HI(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | lw TAB:CARG2, LO(BASE)
+ | load_got lj_tab_get
+ | addiu CARG4, CARG4, -LJ_TTAB
+ | or AT, AT, CARG4
+ | bnez AT, ->fff_fallback
+ | addiu CARG3, BASE, 8
+ | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ |. move CARG1, L
+ | // Returns cTValue *.
+ | b ->fff_resn
+ |. ldc1 FRET1, 0(CRET1)
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | lw CARG1, HI(BASE)
+ | xori AT, NARGS8:RC, 8
+ | sltiu CARG1, CARG1, LJ_TISNUM
+ | movn CARG1, r0, AT
+ | beqz CARG1, ->fff_fallback // Exactly one number argument.
+ |. ldc1 FRET1, 0(BASE)
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | li AT, LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq CARG3, AT, ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, TMP1, 1
+ | and TMP0, TMP0, TMP1
+ | beqz TMP0, ->fff_fallback
+ |. sw BASE, L->base // Add frame since C call can throw.
+ | ffgccheck
+ |. sw PC, SAVE_PC // Redundant (but a defined value).
+ | load_got lj_str_fromnum
+ | move CARG1, L
+ | call_intern lj_str_fromnum // (lua_State *L, lua_Number *np)
+ |. move CARG2, BASE
+ | // Returns GCstr *.
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |. move CARG1, CRET1
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | lw CARG1, HI(BASE)
+ | lw TAB:CARG2, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. addu TMP2, BASE, NARGS8:RC
+ | li AT, LJ_TTAB
+ | sw TISNIL, HI(TMP2) // Set missing 2nd arg to nil.
+ | bne CARG1, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+ | load_got lj_tab_next
+ | sw BASE, L->base // Add frame since C call can throw.
+ | sw BASE, L->top // Dummy frame length is ok.
+ | addiu CARG3, BASE, 8
+ | sw PC, SAVE_PC
+ | call_intern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ |. move CARG1, L
+ | // Returns 0 at end of traversal.
+ | beqz CRET1, ->fff_restv // End of traversal: return nil.
+ |. li CARG3, LJ_TNIL
+ | ldc1 f0, 8(BASE) // Copy key and value to results.
+ | addiu RA, BASE, -8
+ | ldc1 f2, 16(BASE)
+ | li RD, (2+1)*8
+ | sdc1 f0, 0(RA)
+ | b ->fff_res
+ |. sdc1 f2, 8(RA)
+ |
+ |.ffunc_1 pairs
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+#if LJ_52
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+#endif
+ |. addiu RA, BASE, -8
+ | sw TISNIL, 8+HI(BASE)
+ | li RD, (3+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 0(RA)
+ |
+ |.ffunc ipairs_aux
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | lw TAB:CARG1, LO(BASE)
+ | lw CARG4, 8+HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. ldc1 FARG2, 8(BASE)
+ | addiu CARG3, CARG3, -LJ_TTAB
+ | sltiu AT, CARG4, LJ_TISNUM
+ | li TMP0, 1
+ | movn AT, r0, CARG3
+ | mtc1 TMP0, FARG1
+ | beqz AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+ | cvt.w.d FRET1, FARG2
+ | cvt.d.w FARG1, FARG1
+ | lw TMP0, TAB:CARG1->asize
+ | lw TMP1, TAB:CARG1->array
+ | mfc1 TMP2, FRET1
+ | addiu RA, BASE, -8
+ | add.d FARG2, FARG2, FARG1
+ | addiu TMP2, TMP2, 1
+ | sltu AT, TMP2, TMP0
+ | sll TMP3, TMP2, 3
+ | addu TMP3, TMP1, TMP3
+ | beqz AT, >2 // Not in array part?
+ |. sdc1 FARG2, 0(RA)
+ | lw TMP2, HI(TMP3)
+ | ldc1 f0, 0(TMP3)
+ |1:
+ | beq TMP2, TISNIL, ->fff_res // End of iteration, return 0 results.
+ |. li RD, (0+1)*8
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 8(RA)
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lw TMP0, TAB:CARG1->hmask
+ | load_got lj_tab_getinth
+ | beqz TMP0, ->fff_res
+ |. li RD, (0+1)*8
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. move CARG2, TMP2
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->fff_res
+ |. li RD, (0+1)*8
+ | lw TMP2, HI(CRET1)
+ | b <1
+ |. ldc1 f0, 0(CRET1)
+ |
+ |.ffunc_1 ipairs
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+#if LJ_52
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ldc1 f0, CFUNC:RB->upvalue[0]
+#endif
+ |. addiu RA, BASE, -8
+ | sw r0, 8+HI(BASE)
+ | sw r0, 8+LO(BASE)
+ | li RD, (3+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 0(RA)
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | beqz NARGS8:RC, ->fff_fallback
+ | move TMP2, BASE
+ | addiu BASE, BASE, 8
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | andi TMP3, TMP3, 1
+ | addiu PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ |
+ |.ffunc xpcall
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG4, 8+HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. ldc1 FARG2, 8(BASE)
+ | ldc1 FARG1, 0(BASE)
+ | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | li AT, LJ_TFUNC
+ | move TMP2, BASE
+ | bne CARG4, AT, ->fff_fallback // Traceback must be a function.
+ | addiu BASE, BASE, 16
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | sdc1 FARG2, 0(TMP2) // Swap function and traceback.
+ | andi TMP3, TMP3, 1
+ | sdc1 FARG1, 8(TMP2)
+ | addiu PC, TMP3, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |. addiu NARGS8:RC, NARGS8:RC, -16
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | li AT, LJ_TTHREAD
+ | bne CARG3, AT, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lw L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | lbu TMP0, L:CARG1->status
+ | lw TMP1, L:CARG1->cframe
+ | lw CARG2, L:CARG1->top
+ | lw TMP2, L:CARG1->base
+ | addiu TMP3, TMP0, -LUA_YIELD
+ | bgtz TMP3, ->fff_fallback // st > LUA_YIELD?
+ |. xor TMP2, TMP2, CARG2
+ | bnez TMP1, ->fff_fallback // cframe != 0?
+ |. or AT, TMP2, TMP0
+ | lw TMP0, L:CARG1->maxstack
+ | beqz AT, ->fff_fallback // base == top && st == 0?
+ |. lw PC, FRAME_PC(BASE)
+ | addu TMP2, CARG2, NARGS8:RC
+ | sltu AT, TMP0, TMP2
+ | bnez AT, ->fff_fallback // Stack overflow?
+ |. sw PC, SAVE_PC
+ | sw BASE, L->base
+ |1:
+ |.if resume
+ | addiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | addiu TMP2, TMP2, -8
+ |.endif
+ | sw TMP2, L:CARG1->top
+ | addu TMP1, BASE, NARGS8:RC
+ | move CARG3, CARG2
+ | sw BASE, L->top
+ |2: // Move args to coroutine.
+ | ldc1 f0, 0(BASE)
+ | sltu AT, BASE, TMP1
+ | beqz AT, >3
+ |. addiu BASE, BASE, 8
+ | sdc1 f0, 0(CARG3)
+ | b <2
+ |. addiu CARG3, CARG3, 8
+ |3:
+ | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |. move L:RA, L:CARG1
+ | // Returns thread status.
+ |4:
+ | lw TMP2, L:RA->base
+ | sltiu AT, CRET1, LUA_YIELD+1
+ | lw TMP3, L:RA->top
+ | li_vmstate INTERP
+ | lw BASE, L->base
+ | st_vmstate
+ | beqz AT, >8
+ |. subu RD, TMP3, TMP2
+ | lw TMP0, L->maxstack
+ | beqz RD, >6 // No results?
+ |. addu TMP1, BASE, RD
+ | sltu AT, TMP0, TMP1
+ | bnez AT, >9 // Need to grow stack?
+ |. addu TMP3, TMP2, RD
+ | sw TMP2, L:RA->top // Clear coroutine stack.
+ | move TMP1, BASE
+ |5: // Move results from coroutine.
+ | ldc1 f0, 0(TMP2)
+ | addiu TMP2, TMP2, 8
+ | sltu AT, TMP2, TMP3
+ | sdc1 f0, 0(TMP1)
+ | bnez AT, <5
+ |. addiu TMP1, TMP1, 8
+ |6:
+ | andi TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | addiu RA, BASE, -8
+ | sw TMP1, -8+HI(BASE) // Prepend true to results.
+ | addiu RD, RD, 16
+ |.else
+ | move RA, BASE
+ | addiu RD, RD, 8
+ |.endif
+ |7:
+ | sw PC, SAVE_PC
+ | beqz TMP0, ->BC_RET_Z
+ |. move MULTRES, RD
+ | b ->vm_return
+ |. nop
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | addiu TMP3, TMP3, -8
+ | li TMP1, LJ_TFALSE
+ | ldc1 f0, 0(TMP3)
+ | sw TMP3, L:RA->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | sw TMP1, -8+HI(BASE) // Prepend false to results.
+ | addiu RA, BASE, -8
+ | sdc1 f0, 0(BASE) // Copy error message.
+ | b <7
+ |. andi TMP0, PC, FRAME_TYPE
+ |.else
+ | load_got lj_ffh_coroutine_wrap_err
+ | move CARG2, L:RA
+ | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |. move CARG1, L
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | load_got lj_state_growstack
+ | srl CARG2, RD, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | b <4
+ |. li CRET1, 0
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lw TMP0, L->cframe
+ | addu TMP1, BASE, NARGS8:RC
+ | sw BASE, L->base
+ | andi TMP0, TMP0, CFRAME_RESUME
+ | sw TMP1, L->top
+ | beqz TMP0, ->fff_fallback
+ |. li CRET1, LUA_YIELD
+ | sw r0, L->cframe
+ | b ->vm_leave_unw
+ |. sb CRET1, L->status
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_n math_abs
+ |. abs.d FRET1, FARG1
+ |->fff_resn:
+ | lw PC, FRAME_PC(BASE)
+ | addiu RA, BASE, -8
+ | b ->fff_res1
+ |. sdc1 FRET1, -8(BASE)
+ |
+ |->fff_restv:
+ | // CARG3/CARG1 = TValue result.
+ | lw PC, FRAME_PC(BASE)
+ | sw CARG3, -8+HI(BASE)
+ | addiu RA, BASE, -8
+ | sw CARG1, -8+LO(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->vm_return
+ |. move MULTRES, RD
+ | lw INS, -4(PC)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6 // More results expected?
+ |. decode_RA8a TMP0, INS
+ | decode_RA8b TMP0
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | subu BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | addu TMP1, RA, RD
+ | addiu RD, RD, 8
+ | b <5
+ |. sw TISNIL, -8+HI(TMP1)
+ |
+ |.macro math_extern, func
+ |->ff_math_ .. func:
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. load_got func
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. nop
+ | call_extern
+ |. ldc1 FARG1, 0(BASE)
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ |. load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc_n math_ .. func
+ |. nop
+ | bal ->vm_ .. func
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc math_log
+ | lw CARG3, HI(BASE)
+ | li AT, 8
+ | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
+ |. load_got log
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. nop
+ | call_extern
+ |. ldc1 FARG1, 0(BASE)
+ | b ->fff_resn
+ |. nop
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_n math_sqrt
+ |. sqrt.d FRET1, FARG1
+ | b ->fff_resn
+ |. nop
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ |. ldc1 FARG2, CFUNC:RB->upvalue[0]
+ | b ->fff_resn
+ |. mul.d FRET1, FARG1, FARG2
+ |
+ |.ffunc_nn math_ldexp
+ | cvt.w.d FARG2, FARG2
+ | load_got ldexp
+ | mfc1 CARG3, FARG2
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_n math_frexp
+ | load_got frexp
+ | lw PC, FRAME_PC(BASE)
+ | call_extern
+ |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | addiu RA, BASE, -8
+ | mtc1 TMP1, FARG2
+ | sdc1 FRET1, 0(RA)
+ | cvt.d.w FARG2, FARG2
+ | sdc1 FARG2, 8(RA)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.ffunc_n math_modf
+ | load_got modf
+ | lw PC, FRAME_PC(BASE)
+ | call_extern
+ |. addiu CARG3, BASE, -8
+ | addiu RA, BASE, -8
+ | sdc1 FRET1, 0(BASE)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.macro math_minmax, name, ismax
+ |->ff_ .. name:
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ldc1 FRET1, 0(BASE)
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. addu TMP2, BASE, NARGS8:RC
+ | addiu TMP1, BASE, 8
+ | beq TMP1, TMP2, ->fff_resn
+ |1:
+ |. lw CARG3, HI(TMP1)
+ | ldc1 FARG1, 0(TMP1)
+ | addiu TMP1, TMP1, 8
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.if ismax
+ |. c.olt.d FARG1, FRET1
+ |.else
+ |. c.olt.d FRET1, FARG1
+ |.endif
+ | bne TMP1, TMP2, <1
+ |. movf.d FRET1, FARG1
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_minmax math_min, 0
+ | math_minmax math_max, 1
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. nop
+ | b ->fff_resi
+ |. lw CRET1, STR:CARG1->len
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | xori AT, NARGS8:RC, 8
+ | addiu CARG3, CARG3, -LJ_TSTR
+ | or AT, AT, CARG3
+ | bnez AT, ->fff_fallback // Need exactly 1 string argument.
+ |. nop
+ | lw TMP0, STR:CARG1->len
+ | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addiu RA, BASE, -8
+ | sltu RD, r0, TMP0
+ | mtc1 TMP1, f0
+ | addiu RD, RD, 1
+ | cvt.d.w f0, f0
+ | lw PC, FRAME_PC(BASE)
+ | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
+ | b ->fff_res
+ |. sdc1 f0, 0(RA)
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | lw CARG3, HI(BASE)
+ | ldc1 FARG1, 0(BASE)
+ | li AT, 8
+ | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
+ |. sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. li CARG3, 1
+ | cvt.w.d FARG1, FARG1
+ | addiu CARG2, sp, ARG5_OFS
+ | sltiu AT, TMP0, 256
+ | mfc1 TMP0, FARG1
+ | beqz AT, ->fff_fallback
+ |. sw TMP0, ARG5
+ |->fff_newstr:
+ | load_got lj_str_new
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
+ |. move CARG1, L
+ | // Returns GCstr *.
+ | lw BASE, L->base
+ | move CARG1, CRET1
+ | b ->fff_restv
+ |. li CARG3, LJ_TSTR
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | addiu AT, NARGS8:RC, -16
+ | lw CARG3, 16+HI(BASE)
+ | ldc1 f0, 16(BASE)
+ | lw TMP0, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | bltz AT, ->fff_fallback
+ | lw CARG2, 8+HI(BASE)
+ | ldc1 f2, 8(BASE)
+ | beqz AT, >1
+ |. li CARG4, -1
+ | cvt.w.d f0, f0
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. mfc1 CARG4, f0
+ |1:
+ | sltiu AT, CARG2, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | cvt.w.d f2, f2
+ | bne TMP0, AT, ->fff_fallback
+ |. lw CARG2, STR:CARG1->len
+ | mfc1 CARG3, f2
+ | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
+ | slt AT, CARG4, r0
+ | addiu TMP0, CARG2, 1
+ | addu TMP1, CARG4, TMP0
+ | slt TMP3, CARG3, r0
+ | movn CARG4, TMP1, AT // if (end < 0) end += len+1
+ | addu TMP1, CARG3, TMP0
+ | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | movn CARG4, r0, AT // if (end < 0) end = 0
+ | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | movn CARG4, CARG2, AT // if (end > len) end = len
+ | addu CARG2, STR:CARG1, CARG3
+ | subu CARG3, CARG4, CARG3 // len = end - start
+ | addiu CARG2, CARG2, sizeof(GCstr)-1
+ | bgez CARG3, ->fff_newstr
+ |. addiu CARG3, CARG3, 1 // len++
+ |->fff_emptystr: // Return empty string.
+ | addiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty)
+ | b ->fff_restv
+ |. li CARG3, LJ_TSTR
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | lw TMP0, HI(BASE)
+ | addiu AT, NARGS8:RC, -16 // Exactly 2 arguments.
+ | lw CARG4, 8+HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | addiu TMP0, TMP0, -LJ_TSTR
+ | ldc1 f0, 8(BASE)
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. sltiu AT, CARG4, LJ_TISNUM
+ | cvt.w.d f0, f0
+ | beqz AT, ->fff_fallback
+ |. lw TMP0, STR:CARG1->len
+ | mfc1 CARG3, f0
+ | lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | li AT, 1
+ | blez CARG3, ->fff_emptystr // Count <= 0?
+ |. sltu AT, AT, TMP0
+ | beqz TMP0, ->fff_emptystr // Zero length string?
+ |. sltu TMP0, TMP1, CARG3
+ | or AT, AT, TMP0
+ | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | bnez AT, ->fff_fallback // Fallback for > 1-char strings.
+ |. lbu TMP0, STR:CARG1[1]
+ | addu TMP2, CARG2, CARG3
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | addiu TMP2, TMP2, -1
+ | sltu AT, CARG2, TMP2
+ | bnez AT, <1
+ |. sb TMP0, 0(TMP2)
+ | b ->fff_newstr
+ |. nop
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | lw CARG3, STR:CARG1->len
+ | addiu CARG1, STR:CARG1, #STR
+ | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | sltu AT, TMP1, CARG3
+ | bnez AT, ->fff_fallback
+ |. addu TMP3, CARG1, CARG3
+ | addu CARG4, CARG2, CARG3
+ |1: // Reverse string copy.
+ | lbu TMP1, 0(CARG1)
+ | sltu AT, CARG1, TMP3
+ | beqz AT, ->fff_newstr
+ |. addiu CARG1, CARG1, 1
+ | addiu CARG4, CARG4, -1
+ | b <1
+ | sb TMP1, 0(CARG4)
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. lw TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | lw CARG3, STR:CARG1->len
+ | addiu CARG1, STR:CARG1, #STR
+ | lw CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | sltu AT, TMP1, CARG3
+ | bnez AT, ->fff_fallback
+ |. addu TMP3, CARG1, CARG3
+ | move CARG4, CARG2
+ |1: // ASCII case conversion.
+ | lbu TMP1, 0(CARG1)
+ | sltu AT, CARG1, TMP3
+ | beqz AT, ->fff_newstr
+ |. addiu TMP0, TMP1, -lo
+ | xori TMP2, TMP1, 0x20
+ | sltiu AT, TMP0, 26
+ | movn TMP1, TMP2, AT
+ | addiu CARG1, CARG1, 1
+ | sb TMP1, 0(CARG4)
+ | b <1
+ |. addiu CARG4, CARG4, 1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | li AT, LJ_TTAB
+ | bne CARG3, AT, ->fff_fallback
+ |. load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b ->fff_resi
+ |. nop
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_n bit_..name
+ |. add.d FARG1, FARG1, TOBIT
+ | mfc1 CRET1, FARG1
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | addiu TMP1, BASE, 8
+ | addu TMP2, BASE, NARGS8:RC
+ |1:
+ | lw CARG4, HI(TMP1)
+ | beq TMP1, TMP2, ->fff_resi
+ |. ldc1 FARG1, 0(TMP1)
+ | sltiu AT, CARG4, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ | add.d FARG1, FARG1, TOBIT
+ | mfc1 CARG2, FARG1
+ | ins CRET1, CRET1, CARG2
+ | b <1
+ |. addiu TMP1, TMP1, 8
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | srl TMP0, CRET1, 24
+ | srl TMP2, CRET1, 8
+ | sll TMP1, CRET1, 24
+ | andi TMP2, TMP2, 0xff00
+ | or TMP0, TMP0, TMP1
+ | andi CRET1, CRET1, 0xff00
+ | or TMP0, TMP0, TMP2
+ | sll CRET1, CRET1, 8
+ | b ->fff_resi
+ |. or CRET1, TMP0, CRET1
+ |
+ |.ffunc_bit bnot
+ | b ->fff_resi
+ |. not CRET1, CRET1
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc_nn bit_..name
+ |. add.d FARG1, FARG1, TOBIT
+ | add.d FARG2, FARG2, TOBIT
+ | mfc1 CARG1, FARG1
+ | mfc1 CARG2, FARG2
+ |.if shmod == 1
+ | li AT, 32
+ | subu TMP0, AT, CARG2
+ | sllv CARG2, CARG1, CARG2
+ | srlv CARG1, CARG1, TMP0
+ |.elif shmod == 2
+ | li AT, 32
+ | subu TMP0, AT, CARG2
+ | srlv CARG2, CARG1, CARG2
+ | sllv CARG1, CARG1, TMP0
+ |.endif
+ | b ->fff_resi
+ |. ins CRET1, CARG1, CARG2
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, sllv, 0
+ |.ffunc_bit_sh rshift, srlv, 0
+ |.ffunc_bit_sh arshift, srav, 0
+ |// Can't use rotrv, since it's only in MIPS32R2.
+ |.ffunc_bit_sh rol, or, 1
+ |.ffunc_bit_sh ror, or, 2
+ |
+ |.ffunc_bit tobit
+ |->fff_resi:
+ | mtc1 CRET1, FRET1
+ | b ->fff_resn
+ |. cvt.d.w FRET1, FRET1
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lw TMP3, CFUNC:RB->f
+ | addu TMP1, BASE, NARGS8:RC
+ | lw PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addiu TMP0, TMP1, 8*LUA_MINSTACK
+ | lw TMP2, L->maxstack
+ | sw PC, SAVE_PC // Redundant (but a defined value).
+ | sltu AT, TMP2, TMP0
+ | sw BASE, L->base
+ | sw TMP1, L->top
+ | bnez AT, >5 // Need to grow stack.
+ |. move CFUNCADDR, TMP3
+ | jalr TMP3 // (lua_State *L)
+ |. move CARG1, L
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lw BASE, L->base
+ | sll RD, CRET1, 3
+ | bgtz CRET1, ->fff_res // Returned nresults+1?
+ |. addiu RA, BASE, -8
+ |1: // Returned 0 or -1: retry fast path.
+ | lw TMP0, L->top
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | bnez CRET1, ->vm_call_tail // Returned -1?
+ |. subu NARGS8:RC, TMP0, BASE
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi TMP0, PC, FRAME_TYPE
+ | li AT, -4
+ | bnez TMP0, >3
+ |. and TMP1, PC, AT
+ | lbu TMP1, OFS_RA(PC)
+ | sll TMP1, TMP1, 3
+ | addiu TMP1, TMP1, 8
+ |3:
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |. subu TMP2, BASE, TMP1
+ |
+ |5: // Grow stack for fallback handler.
+ | load_got lj_state_growstack
+ | li CARG2, LUA_MINSTACK
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | b <1
+ |. li CRET1, 0 // Force retry.
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | move MULTRES, ra
+ | load_got lj_gc_step
+ | sw BASE, L->base
+ | addu TMP0, BASE, NARGS8:RC
+ | sw PC, SAVE_PC // Redundant (but a defined value).
+ | sw TMP0, L->top
+ | call_intern lj_gc_step // (lua_State *L)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | move ra, MULTRES
+ | lw TMP0, L->top
+ | lw CFUNC:RB, FRAME_FUNC(BASE)
+ | jr ra
+ |. subu NARGS8:RC, TMP0, BASE
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bnez AT, >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE
+ | bnez AT, >1
+ |. addiu TMP2, TMP2, -1
+ | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, >1
+ |. nop
+ | b >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | beqz AT, >1
+ |5: // Re-dispatch to static ins.
+ |. lw AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
+ | jr AT
+ |. nop
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | bnez AT, <5
+ |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, <5
+ |. addiu TMP2, TMP2, -1
+ | beqz TMP2, >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, LUA_MASKLINE
+ | beqz AT, <5
+ |1:
+ |. load_got lj_dispatch_ins
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sw BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ |3:
+ | lw BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lw INS, -4(PC)
+ | decode_OP4a TMP1, INS
+ | decode_OP4b TMP1
+ | addu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | lw AT, GG_DISP2STATIC(TMP0)
+ | decode_RA8a RA, INS
+ | decode_RD8b RD
+ | jr AT
+ | decode_RA8b RA
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addiu PC, PC, 4
+ | b <4
+ |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | sw PC, SAVE_PC
+ | lw TMP1, LFUNC:TMP1->pc
+ | move CARG2, PC
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | lbu TMP1, PC2PROTO(framesize)(TMP1)
+ | load_got lj_trace_hot
+ | sw BASE, L->base
+ | sll TMP1, TMP1, 3
+ | addu TMP1, BASE, TMP1
+ | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ |. sw TMP1, L->top
+ | b <3
+ |. nop
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ |.if JIT
+ | b >1
+ |.endif
+ |. move CARG2, PC
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | load_got lj_dispatch_call
+ | addu TMP0, BASE, RC
+ | sw PC, SAVE_PC
+ | sw BASE, L->base
+ | subu RA, RA, BASE
+ | sw TMP0, L->top
+ | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // Returns ASMFunction.
+ | lw BASE, L->base
+ | lw TMP0, L->top
+ | sw r0, SAVE_PC // Invalidate for subsequent line hook.
+ | subu NARGS8:RC, TMP0, BASE
+ | addu RA, BASE, RA
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | jr CRET1
+ |. lw INS, -4(PC)
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ | sdc1 f..a, 16+a*8(sp)
+ | sw r..a, 16+32*8+a*4(sp)
+ | sw r..b, 16+32*8+b*4(sp)
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | addiu sp, sp, -(16+32*8+32*4)
+ | savex_ 0, 1
+ | savex_ 2, 3
+ | savex_ 4, 5
+ | savex_ 6, 7
+ | savex_ 8, 9
+ | savex_ 10, 11
+ | savex_ 12, 13
+ | savex_ 14, 15
+ | savex_ 16, 17
+ | savex_ 18, 19
+ | savex_ 20, 21
+ | savex_ 22, 23
+ | savex_ 24, 25
+ | savex_ 26, 27
+ | sdc1 f28, 16+28*8(sp)
+ | sw r28, 16+32*8+28*4(sp)
+ | sdc1 f30, 16+30*8(sp)
+ | sw r30, 16+32*8+30*4(sp)
+ | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP.
+ | li_vmstate EXIT
+ | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp.
+ | addiu DISPATCH, JGL, -GG_DISP2G-32768
+ | lw TMP1, 0(TMP2) // Load exit number.
+ | st_vmstate
+ | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP.
+ | lw L, DISPATCH_GL(jit_L)(DISPATCH)
+ | lw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | load_got lj_trace_exit
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
+ | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | sw BASE, L->base
+ | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
+ |. addiu CARG2, sp, 16
+ | // Returns MULTRES (unscaled) or negated error code.
+ | lw TMP1, L->cframe
+ | li AT, -4
+ | lw BASE, L->base
+ | and sp, TMP1, AT
+ | lw PC, SAVE_PC // Get SAVE_PC.
+ | b >1
+ |. sw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | lw L, SAVE_L
+ | addiu DISPATCH, JGL, -GG_DISP2G-32768
+ |1:
+ | bltz CRET1, >3 // Check for error from exit.
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | sll MULTRES, CRET1, 3
+ | li TISNIL, LJ_TNIL
+ | sw MULTRES, SAVE_MULTRES
+ | mtc1 TMP3, TOBIT
+ | lw TMP1, LFUNC:TMP1->pc
+ | sw r0, DISPATCH_GL(jit_L)(DISPATCH)
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | cvt.d.s TOBIT, TOBIT
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lw INS, 0(PC)
+ | addiu PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
+ | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OP4a TMP1, INS
+ | decode_OP4b TMP1
+ | sltiu TMP2, TMP1, BC_FUNCF*4 // Function header?
+ | addu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | lw AT, 0(TMP0)
+ | decode_RA8a RA, INS
+ | beqz TMP2, >2
+ |. decode_RA8b RA
+ | jr AT
+ |. decode_RD8b RD
+ |2:
+ | addiu RC, MULTRES, -8
+ | jr AT
+ |. add RA, RA, BASE
+ |
+ |3: // Rethrow error from the right C frame.
+ | load_got lj_err_throw
+ | negu CARG2, CRET1
+ | call_intern lj_err_throw // (lua_State *L, int errcode)
+ |. move CARG1, L
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
+ |.macro vm_round, func
+ | lui TMP0, 0x4330 // Hiword of 2^52 (double).
+ | mtc1 r0, f4
+ | mtc1 TMP0, f5
+ | abs.d FRET2, FARG1 // |x|
+ | mfc1 AT, f13
+ | c.olt.d 0, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1f 0, >1 // Truncate only if |x| < 2^52.
+ |. sub.d FRET1, FRET1, f4
+ | slt AT, AT, r0
+ |.if "func" == "ceil"
+ | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
+ |.else
+ | lui TMP0, 0x3ff0 // Hiword of +1 (double).
+ |.endif
+ |.if "func" == "trunc"
+ | mtc1 TMP0, f5
+ | c.olt.d 0, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
+ | neg.d FRET2, FRET1
+ | jr ra
+ |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.else
+ | neg.d FRET2, FRET1
+ | mtc1 TMP0, f5
+ | movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.if "func" == "ceil"
+ | c.olt.d 0, FRET1, FARG1 // x > result?
+ |.else
+ | c.olt.d 0, FARG1, FRET1 // x < result?
+ |.endif
+ | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. movt.d FRET1, FRET2, 0
+ |.endif
+ |1:
+ | jr ra
+ |. mov.d FRET1, FARG1
+ |.endmacro
+ |
+ |->vm_floor:
+ | vm_round floor
+ |->vm_ceil:
+ | vm_round ceil
+ |->vm_trunc:
+ |.if JIT
+ | vm_round trunc
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r1, g in r2.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | lw CTSTATE, GL:r2->ctype_state
+ | addiu DISPATCH, r2, GG_G2DISP
+ | load_got lj_ccallback_enter
+ | sw r1, CTSTATE->cb.slot
+ | sw CARG1, CTSTATE->cb.gpr[0]
+ | sw CARG2, CTSTATE->cb.gpr[1]
+ | sdc1 FARG1, CTSTATE->cb.fpr[0]
+ | sw CARG3, CTSTATE->cb.gpr[2]
+ | sw CARG4, CTSTATE->cb.gpr[3]
+ | sdc1 FARG2, CTSTATE->cb.fpr[1]
+ | addiu TMP0, sp, CFRAME_SPACE+16
+ | sw TMP0, CTSTATE->cb.stack
+ | sw r0, SAVE_PC // Any value outside of bytecode is ok.
+ | move CARG2, sp
+ | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
+ |. move CARG1, CTSTATE
+ | // Returns lua_State *.
+ | lw BASE, L:CRET1->base
+ | lw RC, L:CRET1->top
+ | move L, CRET1
+ | lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | mtc1 TMP3, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | subu RC, RC, BASE
+ | st_vmstate
+ | cvt.d.s TOBIT, TOBIT
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | load_got lj_ccallback_leave
+ | lw CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | sw BASE, L->base
+ | sw RB, L->top
+ | sw L, CTSTATE->L
+ | move CARG2, RA
+ | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
+ |. move CARG1, CTSTATE
+ | lw CRET1, CTSTATE->cb.gpr[0]
+ | ldc1 FRET1, CTSTATE->cb.fpr[0]
+ | lw CRET2, CTSTATE->cb.gpr[1]
+ | b ->vm_leave_unw
+ |. ldc1 FRET2, CTSTATE->cb.fpr[1]
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, CARG1
+ | lw TMP1, CCSTATE->spadj
+ | lbu CARG2, CCSTATE->nsp
+ | move TMP2, sp
+ | subu sp, sp, TMP1
+ | sw ra, -4(TMP2)
+ | sll CARG2, CARG2, 2
+ | sw r16, -8(TMP2)
+ | sw CCSTATE, -12(TMP2)
+ | move r16, TMP2
+ | addiu TMP1, CCSTATE, offsetof(CCallState, stack)
+ | addiu TMP2, sp, 16
+ | beqz CARG2, >2
+ |. addu TMP3, TMP1, CARG2
+ |1:
+ | lw TMP0, 0(TMP1)
+ | addiu TMP1, TMP1, 4
+ | sltu AT, TMP1, TMP3
+ | sw TMP0, 0(TMP2)
+ | bnez AT, <1
+ |. addiu TMP2, TMP2, 4
+ |2:
+ | lw CFUNCADDR, CCSTATE->func
+ | lw CARG2, CCSTATE->gpr[1]
+ | lw CARG3, CCSTATE->gpr[2]
+ | lw CARG4, CCSTATE->gpr[3]
+ | ldc1 FARG1, CCSTATE->fpr[0]
+ | ldc1 FARG2, CCSTATE->fpr[1]
+ | jalr CFUNCADDR
+ |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | lw CCSTATE:TMP1, -12(r16)
+ | lw TMP2, -8(r16)
+ | lw ra, -4(r16)
+ | sw CRET1, CCSTATE:TMP1->gpr[0]
+ | sw CRET2, CCSTATE:TMP1->gpr[1]
+ | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
+ | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
+ | move sp, r16
+ | jr ra
+ |. move r16, TMP2
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | addu CARG2, BASE, RA
+ | addu CARG3, BASE, RD
+ | lw TMP0, HI(CARG2)
+ | lw TMP1, HI(CARG3)
+ | ldc1 f0, 0(CARG2)
+ | ldc1 f2, 0(CARG3)
+ | sltiu TMP0, TMP0, LJ_TISNUM
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | lhu TMP2, OFS_RD(PC)
+ | and TMP0, TMP0, TMP1
+ | addiu PC, PC, 4
+ | beqz TMP0, ->vmeta_comp
+ |. lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | decode_RD4b TMP2
+ | addu TMP2, TMP2, TMP1
+ if (op == BC_ISLT || op == BC_ISGE) {
+ | c.olt.d f0, f2
+ } else {
+ | c.ole.d f0, f2
+ }
+ if (op == BC_ISLT || op == BC_ISLE) {
+ | movf TMP2, r0
+ } else {
+ | movt TMP2, r0
+ }
+ | addu PC, PC, TMP2
+ |1:
+ | ins_next
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | ldc1 f0, 0(RA)
+ | addu RD, BASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ | lw TMP1, HI(RD)
+ | ldc1 f2, 0(RD)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | sltiu CARG1, TMP1, LJ_TISNUM
+ | decode_RD4b TMP2
+ | and AT, AT, CARG1
+ | beqz AT, >5
+ |. addu TMP2, TMP2, TMP3
+ | c.eq.d f0, f2
+ if (vk) {
+ | movf TMP2, r0
+ } else {
+ | movt TMP2, r0
+ }
+ |1:
+ | addu PC, PC, TMP2
+ | ins_next
+ |5: // Either or both types are not numbers.
+ | lw CARG2, LO(RA)
+ | lw CARG3, LO(RD)
+ |.if FFI
+ | li TMP3, LJ_TCDATA
+ | beq TMP0, TMP3, ->vmeta_equal_cd
+ |.endif
+ |. sltiu AT, TMP0, LJ_TISPRI // Not a primitive?
+ |.if FFI
+ | beq TMP1, TMP3, ->vmeta_equal_cd
+ |.endif
+ |. xor TMP3, CARG2, CARG3 // Same tv?
+ | xor TMP1, TMP1, TMP0 // Same type?
+ | sltiu CARG1, TMP0, LJ_TISTABUD+1 // Table or userdata?
+ | movz TMP3, r0, AT // Ignore tv if primitive.
+ | movn CARG1, r0, TMP1 // Tab/ud and same type?
+ | or AT, TMP1, TMP3 // Same type && (pri||same tv).
+ | movz CARG1, r0, AT
+ | beqz CARG1, <1 // Done if not tab/ud or not same type or same tv.
+ if (vk) {
+ |. movn TMP2, r0, AT
+ } else {
+ |. movz TMP2, r0, AT
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lw TAB:TMP1, TAB:CARG2->metatable
+ | beqz TAB:TMP1, <1 // No metatable?
+ |. nop
+ | lbu TMP1, TAB:TMP1->nomm
+ | andi TMP1, TMP1, 1<<MM_eq
+ | bnez TMP1, <1 // Or 'no __eq' flag set?
+ |. nop
+ | b ->vmeta_equal // Handle __eq metamethod.
+ |. li CARG4, 1-vk // ne = 0 or 1.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | srl RD, RD, 1
+ | lw STR:TMP3, LO(RA)
+ | subu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ |.if FFI
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. lw STR:TMP1, -4(RD) // KBASE-4-str_const*4
+ | addiu TMP0, TMP0, -LJ_TSTR
+ | decode_RD4b TMP2
+ | xor TMP1, STR:TMP1, STR:TMP3
+ | or TMP0, TMP0, TMP1
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | ldc1 f0, 0(RA)
+ | addu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ | ldc1 f2, 0(RD)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | decode_RD4b TMP2
+ |.if FFI
+ | beqz AT, >5
+ |.else
+ | beqz AT, >1
+ |.endif
+ |. addu TMP2, TMP2, TMP3
+ | c.eq.d f0, f2
+ if (vk) {
+ | movf TMP2, r0
+ | addu PC, PC, TMP2
+ |1:
+ } else {
+ | movt TMP2, r0
+ |1:
+ | addu PC, PC, TMP2
+ }
+ | ins_next
+ |.if FFI
+ |5:
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |. nop
+ | b <1
+ |. nop
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | addu RA, BASE, RA
+ | srl TMP1, RD, 3
+ | lw TMP0, HI(RA)
+ | lhu TMP2, OFS_RD(PC)
+ | not TMP1, TMP1
+ | addiu PC, PC, 4
+ |.if FFI
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. xor TMP0, TMP0, TMP1
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | addu RD, BASE, RD
+ | lhu TMP2, OFS_RD(PC)
+ | lw TMP0, HI(RD)
+ | addiu PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (op == BC_IST) {
+ | movz TMP2, r0, TMP0
+ } else {
+ | movn TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ } else {
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | ldc1 f0, 0(RD)
+ if (op == BC_ISTC) {
+ | beqz TMP0, >1
+ } else {
+ | bnez TMP0, >1
+ }
+ |. addu RA, BASE, RA
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ | sdc1 f0, 0(RA)
+ | addu PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | addu RD, BASE, RD
+ | addu RA, BASE, RA
+ | ldc1 f0, 0(RD)
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | addu RD, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(RD)
+ | li TMP1, LJ_TFALSE
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | addiu TMP1, TMP0, LJ_TTRUE
+ | ins_next1
+ | sw TMP1, HI(RA)
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | addu CARG3, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(CARG3)
+ | ldc1 f0, 0(CARG3)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | beqz AT, ->vmeta_unm
+ |. neg.d f0, f0
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | addu CARG2, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(CARG2)
+ | lw CARG1, LO(CARG2)
+ | li AT, LJ_TSTR
+ | bne TMP0, AT, >2
+ |. li AT, LJ_TTAB
+ | lw CRET1, STR:CARG1->len
+ |1:
+ | mtc1 CRET1, f0
+ | cvt.d.w f0, f0
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |2:
+ | bne TMP0, AT, ->vmeta_len
+ |. nop
+#if LJ_52
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | bnez TAB:TMP2, >9
+ |. nop
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |. nop
+#if LJ_52
+ |9:
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_len
+ | bnez TMP0, <3 // 'no __len' flag set: done.
+ |. nop
+ | b ->vmeta_len
+ |. nop
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||switch (vk) {
+ ||case 0:
+ | addu CARG3, BASE, RB
+ | addu CARG4, KBASE, RC
+ | lw TMP1, HI(CARG3)
+ | ldc1 f20, 0(CARG3)
+ | ldc1 f22, 0(CARG4)
+ | sltiu AT, TMP1, LJ_TISNUM
+ || break;
+ ||case 1:
+ | addu CARG4, BASE, RB
+ | addu CARG3, KBASE, RC
+ | lw TMP1, HI(CARG4)
+ | ldc1 f22, 0(CARG4)
+ | ldc1 f20, 0(CARG3)
+ | sltiu AT, TMP1, LJ_TISNUM
+ || break;
+ ||default:
+ | addu CARG3, BASE, RB
+ | addu CARG4, BASE, RC
+ | lw TMP1, HI(CARG3)
+ | lw TMP2, HI(CARG4)
+ | ldc1 f20, 0(CARG3)
+ | ldc1 f22, 0(CARG4)
+ | sltiu AT, TMP1, LJ_TISNUM
+ | sltiu TMP0, TMP2, LJ_TISNUM
+ | and AT, AT, TMP0
+ || break;
+ ||}
+ | beqz AT, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.endmacro
+ |
+ |.macro fpmod, a, b, c
+ |->BC_MODVN_Z:
+ | bal ->vm_floor // floor(b/c)
+ |. div.d FARG1, b, c
+ | mul.d a, FRET1, c
+ | sub.d a, b, a // b - floor(b/c)*c
+ |.endmacro
+ |
+ |.macro ins_arith, ins
+ | ins_arithpre
+ |.if "ins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |. nop
+ |.else
+ | ins f0, f20, f22
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |.endif
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith add.d
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith sub.d
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mul.d
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith div.d
+ break;
+ case BC_MODVN:
+ | ins_arith fpmod
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arith fpmod_
+ break;
+ case BC_POW:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG3, BASE, RB
+ | addu CARG4, BASE, RC
+ | lw TMP1, HI(CARG3)
+ | lw TMP2, HI(CARG4)
+ | ldc1 FARG1, 0(CARG3)
+ | ldc1 FARG2, 0(CARG4)
+ | sltiu AT, TMP1, LJ_TISNUM
+ | sltiu TMP0, TMP2, LJ_TISNUM
+ | and AT, AT, TMP0
+ | load_got pow
+ | beqz AT, ->vmeta_arith
+ |. addu RA, BASE, RA
+ | call_extern
+ |. nop
+ | ins_next1
+ | sdc1 FRET1, 0(RA)
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | subu CARG3, RC, RB
+ | sw BASE, L->base
+ | addu CARG2, BASE, RC
+ | move MULTRES, RB
+ |->BC_CAT_Z:
+ | load_got lj_meta_cat
+ | srl CARG3, CARG3, 3
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | bnez CRET1, ->vmeta_binop
+ |. lw BASE, L->base
+ | addu RB, BASE, MULTRES
+ | ldc1 f0, 0(RB)
+ | addu RA, BASE, RA
+ | ins_next1
+ | sdc1 f0, 0(RA) // Copy result from RB to RA.
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | ins_next1
+ | lw TMP0, -4(TMP1) // KBASE-4-str_const*4
+ | addu RA, BASE, RA
+ | li TMP2, LJ_TSTR
+ | sw TMP0, LO(RA)
+ | sw TMP2, HI(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | ins_next1
+ | lw TMP0, -4(TMP1) // KBASE-4-cdata_const*4
+ | addu RA, BASE, RA
+ | li TMP2, LJ_TCDATA
+ | sw TMP0, LO(RA)
+ | sw TMP2, HI(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | sra RD, INS, 16
+ | mtc1 RD, f0
+ | addu RA, BASE, RA
+ | cvt.d.w f0, f0
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | addu RD, KBASE, RD
+ | addu RA, BASE, RA
+ | ldc1 f0, 0(RD)
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srl TMP1, RD, 3
+ | addu RA, BASE, RA
+ | not TMP0, TMP1
+ | ins_next1
+ | sw TMP0, HI(RA)
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | addu RA, BASE, RA
+ | sw TISNIL, HI(RA)
+ | addiu RA, RA, 8
+ | addu RD, BASE, RD
+ |1:
+ | sw TISNIL, HI(RA)
+ | slt AT, RA, RD
+ | bnez AT, <1
+ |. addiu RA, RA, 8
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RD, RD, 1
+ | addu RD, RD, LFUNC:RB
+ | lw UPVAL:RB, LFUNC:RD->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | ldc1 f0, 0(TMP1)
+ | addu RA, BASE, RA
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | addu RD, BASE, RD
+ | addu RA, RA, LFUNC:RB
+ | ldc1 f0, 0(RD)
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lbu TMP3, UPVAL:RB->marked
+ | lw CARG2, UPVAL:RB->v
+ | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP0, UPVAL:RB->closed
+ | lw TMP2, HI(RD)
+ | sdc1 f0, 0(CARG2)
+ | li AT, LJ_GC_BLACK|1
+ | or TMP3, TMP3, TMP0
+ | beq TMP3, AT, >2 // Upvalue is closed and black?
+ |. addiu TMP2, TMP2, -(LJ_TISNUM+1)
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | sltiu AT, TMP2, LJ_TISGCV - (LJ_TISNUM+1)
+ | beqz AT, <1 // tvisgcv(v)
+ |. lw TMP1, LO(RD)
+ | lbu TMP3, GCOBJ:TMP1->gch.marked
+ | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | beqz TMP3, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. addiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | srl TMP1, RD, 1
+ | addu RA, RA, LFUNC:RB
+ | subu TMP1, KBASE, TMP1
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lw STR:TMP1, -4(TMP1) // KBASE-4-str_const*4
+ | lbu TMP2, UPVAL:RB->marked
+ | lw CARG2, UPVAL:RB->v
+ | lbu TMP3, STR:TMP1->marked
+ | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | sw STR:TMP1, LO(CARG2)
+ | bnez AT, >2
+ |. sw TMP0, HI(CARG2)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | beqz TMP2, <1
+ |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
+ | beqz AT, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. addiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | addu RD, KBASE, RD
+ | addu RA, RA, LFUNC:RB
+ | ldc1 f0, 0(RD)
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | sdc1 f0, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | srl TMP0, RD, 3
+ | addu RA, RA, LFUNC:RB
+ | not TMP0, TMP0
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | sw TMP0, HI(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lw TMP2, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | load_got lj_func_closeuv
+ | sw BASE, L->base
+ | beqz TMP2, >1
+ |. move CARG1, L
+ | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
+ |. addu CARG2, BASE, RA
+ | lw BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srl TMP1, RD, 1
+ | load_got lj_func_newL_gc
+ | subu TMP1, KBASE, TMP1
+ | lw CARG3, FRAME_FUNC(BASE)
+ | lw CARG2, -4(TMP1) // KBASE-4-tab_const*4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call_intern lj_func_newL_gc
+ |. move CARG1, L
+ | // Returns GCfuncL *.
+ | lw BASE, L->base
+ | li TMP0, LJ_TFUNC
+ | ins_next1
+ | addu RA, BASE, RA
+ | sw TMP0, HI(RA)
+ | sw LFUNC:CRET1, LO(RA)
+ | ins_next2
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | sltu AT, TMP0, TMP1
+ | beqz AT, >5
+ |1:
+ if (op == BC_TNEW) {
+ | load_got lj_tab_new
+ | srl CARG2, RD, 3
+ | andi CARG2, CARG2, 0x7ff
+ | li TMP0, 0x801
+ | addiu AT, CARG2, -0x7ff
+ | srl CARG3, RD, 14
+ | movz CARG2, TMP0, AT
+ | // (lua_State *L, int32_t asize, uint32_t hbits)
+ | call_intern lj_tab_new
+ |. move CARG1, L
+ | // Returns Table *.
+ } else {
+ | load_got lj_tab_dup
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | move CARG1, L
+ | call_intern lj_tab_dup // (lua_State *L, Table *kt)
+ |. lw CARG2, -4(TMP1) // KBASE-4-str_const*4
+ | // Returns Table *.
+ }
+ | lw BASE, L->base
+ | ins_next1
+ | addu RA, BASE, RA
+ | li TMP0, LJ_TTAB
+ | sw TAB:CRET1, LO(RA)
+ | sw TMP0, HI(RA)
+ | ins_next2
+ |5:
+ | load_got lj_gc_step_fixtop
+ | move MULTRES, RD
+ | call_intern lj_gc_step_fixtop // (lua_State *L)
+ |. move CARG1, L
+ | b <1
+ |. move RD, MULTRES
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lw LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | lw TAB:RB, LFUNC:TMP2->env
+ | lw STR:RC, -4(TMP1) // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ |. addu RA, BASE, RA
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG2, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TMP1, HI(CARG2)
+ | lw TMP2, HI(CARG3)
+ | lw TAB:RB, LO(CARG2)
+ | li AT, LJ_TTAB
+ | ldc1 f0, 0(CARG3)
+ | bne TMP1, AT, ->vmeta_tgetv
+ |. addu RA, BASE, RA
+ | sltiu AT, TMP2, LJ_TISNUM
+ | beqz AT, >5
+ |. li AT, LJ_TSTR
+ |
+ | // Convert number key to integer, check for integerness and range.
+ | cvt.w.d f2, f0
+ | lw TMP0, TAB:RB->asize
+ | mfc1 TMP2, f2
+ | cvt.d.w f4, f2
+ | lw TMP1, TAB:RB->array
+ | c.eq.d f0, f4
+ | sltu AT, TMP2, TMP0
+ | movf AT, r0
+ | sll TMP2, TMP2, 3
+ | beqz AT, ->vmeta_tgetv // Integer key and in array part?
+ |. addu TMP2, TMP1, TMP2
+ | lw TMP0, HI(TMP2)
+ | beq TMP0, TISNIL, >2
+ |. ldc1 f0, 0(TMP2)
+ |1:
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetv
+ |. nop
+ |
+ |5:
+ | bne TMP2, AT, ->vmeta_tgetv
+ |. lw STR:RC, LO(CARG3)
+ | b ->BC_TGETS_Z // String key?
+ |. nop
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*4 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RC4a RC, INS
+ | lw TMP0, HI(CARG2)
+ | decode_RC4b RC
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | subu CARG3, KBASE, RC
+ | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
+ | bne TMP0, AT, ->vmeta_tgets1
+ |. addu RA, BASE, RA
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->hash
+ | lw NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | lw NODE:TMP1, NODE:TMP2->next
+ | lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | addiu CARG1, CARG1, -LJ_TSTR
+ | xor TMP0, TMP0, STR:RC
+ | or AT, CARG1, TMP0
+ | bnez AT, >4
+ |. lw TAB:TMP3, TAB:RB->metatable
+ | beq CARG2, TISNIL, >5 // Key found, but nil value?
+ |. lw CARG1, offsetof(Node, val)+LO(NODE:TMP2)
+ |3:
+ | ins_next1
+ | sw CARG2, HI(RA)
+ | sw CARG1, LO(RA)
+ | ins_next2
+ |
+ |4: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | beqz TAB:TMP3, <3 // No metatable: done.
+ |. li CARG2, LJ_TNIL
+ | lbu TMP0, TAB:TMP3->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <3 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgets
+ |. nop
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | lw CARG1, HI(CARG2)
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | addu RA, BASE, RA
+ | bne CARG1, AT, ->vmeta_tgetb
+ |. srl TMP0, RC, 3
+ | lw TMP1, TAB:RB->asize
+ | lw TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tgetb
+ |. addu RC, TMP2, RC
+ | lw TMP1, HI(RC)
+ | beq TMP1, TISNIL, >5
+ |. ldc1 f0, 0(RC)
+ |1:
+ | ins_next1
+ | sdc1 f0, 0(RA)
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_index
+ | bnez TMP1, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetb // Caveat: preserve TMP0!
+ |. nop
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG2, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TMP1, HI(CARG2)
+ | lw TMP2, HI(CARG3)
+ | lw TAB:RB, LO(CARG2)
+ | li AT, LJ_TTAB
+ | ldc1 f0, 0(CARG3)
+ | bne TMP1, AT, ->vmeta_tsetv
+ |. addu RA, BASE, RA
+ | sltiu AT, TMP2, LJ_TISNUM
+ | beqz AT, >5
+ |. li AT, LJ_TSTR
+ |
+ | // Convert number key to integer, check for integerness and range.
+ | cvt.w.d f2, f0
+ | lw TMP0, TAB:RB->asize
+ | mfc1 TMP2, f2
+ | cvt.d.w f4, f2
+ | lw TMP1, TAB:RB->array
+ | c.eq.d f0, f4
+ | sltu AT, TMP2, TMP0
+ | movf AT, r0
+ | sll TMP2, TMP2, 3
+ | beqz AT, ->vmeta_tsetv // Integer key and in array part?
+ |. addu TMP1, TMP1, TMP2
+ | lbu TMP3, TAB:RB->marked
+ | lw TMP0, HI(TMP1)
+ | beq TMP0, TISNIL, >3
+ |. ldc1 f0, 0(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sdc1 f0, 0(TMP1)
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP2, TAB:TMP2->nomm
+ | andi TMP2, TMP2, 1<<MM_newindex
+ | bnez TMP2, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetv
+ |. nop
+ |
+ |5:
+ | bne TMP2, AT, ->vmeta_tsetv
+ |. lw STR:RC, LO(CARG3)
+ | b ->BC_TSETS_Z // String key?
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RC4a RC, INS
+ | lw TMP0, HI(CARG2)
+ | decode_RC4b RC
+ | li AT, LJ_TTAB
+ | subu CARG3, KBASE, RC
+ | lw TAB:RB, LO(CARG2)
+ | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
+ | bne TMP0, AT, ->vmeta_tsets1
+ |. addu RA, BASE, RA
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->hash
+ | lw NODE:TMP2, TAB:RB->node
+ | sb r0, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | ldc1 f20, 0(RA)
+ |1:
+ | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | li AT, LJ_TSTR
+ | lw NODE:TMP1, NODE:TMP2->next
+ | bne CARG1, AT, >5
+ |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | bne TMP0, STR:RC, >5
+ |. lbu TMP3, TAB:RB->marked
+ | beq CARG2, TISNIL, >4 // Key found, but nil value?
+ |. lw TAB:TMP0, TAB:RB->metatable
+ |2:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sdc1 f20, NODE:TMP2->val
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | beqz TAB:TMP0, <2 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP0->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | bnez TMP0, <2 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsets
+ |. nop
+ |
+ |5: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, add a new one
+ |
+ | // But check for __newindex first.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, >6 // No metatable: continue.
+ |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | beqz TMP0, ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |. li AT, LJ_TSTR
+ |6:
+ | load_got lj_tab_newkey
+ | sw STR:RC, LO(CARG3)
+ | sw AT, HI(CARG3)
+ | sw BASE, L->base
+ | move CARG2, TAB:RB
+ | sw PC, SAVE_PC
+ | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
+ |. move CARG1, L
+ | // Returns TValue *.
+ | lw BASE, L->base
+ | b <3 // No 2nd write barrier needed.
+ |. sdc1 f20, 0(CRET1)
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | lw CARG1, HI(CARG2)
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | addu RA, BASE, RA
+ | bne CARG1, AT, ->vmeta_tsetb
+ |. srl TMP0, RC, 3
+ | lw TMP1, TAB:RB->asize
+ | lw TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tsetb
+ |. addu RC, TMP2, RC
+ | lw TMP1, HI(RC)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP1, TISNIL, >5
+ |. ldc1 f0, 0(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sdc1 f0, 0(RC)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_newindex
+ | bnez TMP1, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetb // Caveat: preserve TMP0!
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | addu RA, BASE, RA
+ |1:
+ | addu TMP3, KBASE, RD
+ | lw TAB:CARG2, -8+LO(RA) // Guaranteed to be a table.
+ | addiu TMP0, MULTRES, -8
+ | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
+ | beqz TMP0, >4 // Nothing to copy?
+ |. srl CARG3, TMP0, 3
+ | addu CARG3, CARG3, TMP3
+ | lw TMP2, TAB:CARG2->asize
+ | sll TMP1, TMP3, 3
+ | lbu TMP3, TAB:CARG2->marked
+ | lw CARG1, TAB:CARG2->array
+ | sltu AT, TMP2, CARG3
+ | bnez AT, >5
+ |. addu TMP2, RA, TMP0
+ | addu TMP1, TMP1, CARG1
+ | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | sltu AT, RA, TMP2
+ | sdc1 f0, 0(TMP1)
+ | bnez AT, <3
+ |. addiu TMP1, TMP1, 8
+ | bnez TMP0, >7
+ |. nop
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | load_got lj_tab_reasize
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | move BASE, RD
+ | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ |. move CARG1, L
+ | // Must not reallocate the stack.
+ | move RD, BASE
+ | b <1
+ |. lw BASE, L->base // Reload BASE for lack of a saved register.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0, <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ | b ->BC_CALL_Z
+ |. addu NARGS8:RC, NARGS8:RC, MULTRES
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ |->BC_CALL_Z:
+ | move TMP2, BASE
+ | addu BASE, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP0, HI(BASE)
+ | lw LFUNC:RB, LO(BASE)
+ | addiu BASE, BASE, 8
+ | bne TMP0, AT, ->vmeta_call
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | addu RA, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP0, HI(RA)
+ | lw LFUNC:RB, LO(RA)
+ | move NARGS8:RC, RD
+ | lw TMP1, FRAME_PC(BASE)
+ | addiu RA, RA, 8
+ | bne TMP0, AT, ->vmeta_callt
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ |->BC_CALLT_Z:
+ | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
+ | lbu TMP3, LFUNC:RB->ffid
+ | bnez TMP0, >7
+ |. xori TMP2, TMP1, FRAME_VARG
+ |1:
+ | sw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
+ | move TMP2, BASE
+ | beqz NARGS8:RC, >3
+ |. move TMP3, NARGS8:RC
+ |2:
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | addiu TMP3, TMP3, -8
+ | sdc1 f0, 0(TMP2)
+ | bnez TMP3, <2
+ |. addiu TMP2, TMP2, 8
+ |3:
+ | or TMP0, TMP0, AT
+ | beqz TMP0, >5
+ |. nop
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lw INS, -4(TMP1)
+ | decode_RA8a RA, INS
+ | decode_RA8b RA
+ | subu TMP1, BASE, RA
+ | lw LFUNC:TMP1, -8+FRAME_FUNC(TMP1)
+ | lw TMP1, LFUNC:TMP1->pc
+ | b <4
+ |. lw KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ |
+ |7: // Tailcall from a vararg function.
+ | andi AT, TMP2, FRAME_TYPEP
+ | bnez AT, <1 // Vararg frame below?
+ |. subu TMP2, BASE, TMP2 // Relocate BASE down.
+ | move BASE, TMP2
+ | lw TMP1, FRAME_PC(TMP2)
+ | b <1
+ |. andi TMP0, TMP1, FRAME_TYPE
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | move TMP2, BASE
+ | addu BASE, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP1, -24+HI(BASE)
+ | lw LFUNC:RB, -24+LO(BASE)
+ | ldc1 f2, -8(BASE)
+ | ldc1 f0, -16(BASE)
+ | sw TMP1, HI(BASE) // Copy callable.
+ | sw LFUNC:RB, LO(BASE)
+ | sdc1 f2, 16(BASE) // Copy control var.
+ | sdc1 f0, 8(BASE) // Copy state.
+ | addiu BASE, BASE, 8
+ | bne TMP1, AT, ->vmeta_call
+ |. li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ |.if JIT
+ | // NYI: add hotloop, record BC_ITERN.
+ |.endif
+ | addu RA, BASE, RA
+ | lw TAB:RB, -16+LO(RA)
+ | lw RC, -8+LO(RA) // Get index from control var.
+ | lw TMP0, TAB:RB->asize
+ | lw TMP1, TAB:RB->array
+ | addiu PC, PC, 4
+ |1: // Traverse array part.
+ | sltu AT, RC, TMP0
+ | beqz AT, >5 // Index points after array part?
+ |. sll TMP3, RC, 3
+ | addu TMP3, TMP1, TMP3
+ | lw TMP2, HI(TMP3)
+ | ldc1 f0, 0(TMP3)
+ | mtc1 RC, f2
+ | lhu RD, -4+OFS_RD(PC)
+ | beq TMP2, TISNIL, <1 // Skip holes in array part.
+ |. addiu RC, RC, 1
+ | cvt.d.w f2, f2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sdc1 f0, 8(RA)
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sw RC, -8+LO(RA) // Update control var.
+ | addu PC, PC, RD
+ | sdc1 f2, 0(RA)
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | lw TMP1, TAB:RB->hmask
+ | subu RC, RC, TMP0
+ | lw TMP2, TAB:RB->node
+ |6:
+ | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
+ | bnez AT, <3
+ |. sll TMP3, RC, 5
+ | sll RB, RC, 3
+ | subu TMP3, TMP3, RB
+ | addu NODE:TMP3, TMP3, TMP2
+ | lw RB, HI(NODE:TMP3)
+ | ldc1 f0, 0(NODE:TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | beq RB, TISNIL, <6 // Skip holes in hash part.
+ |. addiu RC, RC, 1
+ | ldc1 f2, NODE:TMP3->key
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sdc1 f0, 8(RA)
+ | addu RC, RC, TMP0
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sdc1 f2, 0(RA)
+ | addu PC, PC, RD
+ | b <3
+ |. sw RC, -8+LO(RA) // Update control var.
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | addu RA, BASE, RA
+ | lw TMP0, -24+HI(RA)
+ | lw CFUNC:TMP1, -24+LO(RA)
+ | lw TMP2, -16+HI(RA)
+ | lw TMP3, -8+HI(RA)
+ | li AT, LJ_TFUNC
+ | bne TMP0, AT, >5
+ |. addiu TMP2, TMP2, -LJ_TTAB
+ | lbu TMP1, CFUNC:TMP1->ffid
+ | addiu TMP3, TMP3, -LJ_TNIL
+ | srl TMP0, RD, 1
+ | or TMP2, TMP2, TMP3
+ | addiu TMP1, TMP1, -FF_next_N
+ | addu TMP0, PC, TMP0
+ | or TMP1, TMP1, TMP2
+ | bnez TMP1, >5
+ |. lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu PC, TMP0, TMP2
+ | lui TMP1, 0xfffe
+ | ori TMP1, TMP1, 0x7fff
+ | sw r0, -8+LO(RA) // Initialize control var.
+ | sw TMP1, -8+HI(RA)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP3, BC_JMP
+ | li TMP1, BC_ITERC
+ | sb TMP3, -4+OFS_OP(PC)
+ | addu PC, TMP0, TMP2
+ | b <1
+ |. sb TMP1, OFS_OP(PC)
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lw TMP0, FRAME_PC(BASE)
+ | decode_RDtoRC8 RC, RD
+ | decode_RB8a RB, INS
+ | addu RC, BASE, RC
+ | decode_RB8b RB
+ | addu RA, BASE, RA
+ | addiu RC, RC, FRAME_VARG
+ | addu TMP2, RA, RB
+ | addiu TMP3, BASE, -8 // TMP3 = vtop
+ | subu RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | beqz RB, >5 // Copy all varargs?
+ |. subu TMP1, TMP3, RC
+ | addiu TMP2, TMP2, -16
+ |1: // Copy vararg slots to destination slots.
+ | lw CARG1, HI(RC)
+ | sltu AT, RC, TMP3
+ | lw CARG2, LO(RC)
+ | addiu RC, RC, 8
+ | movz CARG1, TISNIL, AT
+ | sw CARG1, HI(RA)
+ | sw CARG2, LO(RA)
+ | sltu AT, RA, TMP2
+ | bnez AT, <1
+ |. addiu RA, RA, 8
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lw TMP0, L->maxstack
+ | blez TMP1, <3 // No vararg slots?
+ |. li MULTRES, 8 // MULTRES = (0+1)*8
+ | addu TMP2, RA, TMP1
+ | sltu AT, TMP0, TMP2
+ | bnez AT, >7
+ |. addiu MULTRES, TMP1, 8
+ |6:
+ | ldc1 f0, 0(RC)
+ | addiu RC, RC, 8
+ | sdc1 f0, 0(RA)
+ | sltu AT, RC, TMP3
+ | bnez AT, <6 // More vararg slots?
+ |. addiu RA, RA, 8
+ | b <3
+ |. nop
+ |
+ |7: // Grow stack for varargs.
+ | load_got lj_state_growstack
+ | sw RA, L->top
+ | subu RA, RA, BASE
+ | sw BASE, L->base
+ | subu BASE, RC, BASE // Need delta, because BASE may change.
+ | sw PC, SAVE_PC
+ | srl CARG2, TMP1, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | move RC, BASE
+ | lw BASE, L->base
+ | addu RA, BASE, RA
+ | addu RC, BASE, RC
+ | b <6
+ |. addiu TMP3, BASE, -8
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lw PC, FRAME_PC(BASE)
+ | addu RA, BASE, RA
+ | move MULTRES, RD
+ |1:
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lw INS, -4(PC)
+ | addiu TMP2, BASE, -8
+ | addiu RC, RD, -8
+ | decode_RA8a TMP0, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b TMP0
+ | decode_RB8b RB
+ | addu TMP3, TMP2, RB
+ | beqz RC, >3
+ |. subu BASE, TMP2, TMP0
+ |2:
+ | ldc1 f0, 0(RA)
+ | addiu RA, RA, 8
+ | addiu RC, RC, -8
+ | sdc1 f0, 0(TMP2)
+ | bnez RC, <2
+ |. addiu TMP2, TMP2, 8
+ |3:
+ | addiu TMP3, TMP3, -8
+ |5:
+ | sltu AT, TMP2, TMP3
+ | bnez AT, >6
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lw TMP1, LFUNC:TMP1->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | sw TISNIL, HI(TMP2)
+ | b <5
+ |. addiu TMP2, TMP2, 8
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi TMP2, TMP1, FRAME_TYPEP
+ | bnez TMP2, ->vm_return
+ |. nop
+ | // Return from vararg function: relocate BASE down.
+ | subu BASE, BASE, TMP1
+ | b <1
+ |. lw PC, FRAME_PC(BASE)
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lw PC, FRAME_PC(BASE)
+ | addu RA, BASE, RA
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ | lw INS, -4(PC)
+ | addiu TMP2, BASE, -8
+ if (op == BC_RET1) {
+ | ldc1 f0, 0(RA)
+ }
+ | decode_RB8a RB, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b RB
+ | decode_RA8b RA
+ if (op == BC_RET1) {
+ | sdc1 f0, 0(TMP2)
+ }
+ | subu BASE, TMP2, RA
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lw TMP1, LFUNC:TMP1->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | addiu TMP2, TMP2, 8
+ | addiu RD, RD, 8
+ | b <5
+ if (op == BC_RET1) {
+ |. sw TISNIL, HI(TMP2)
+ } else {
+ |. sw TISNIL, -8+HI(TMP2)
+ }
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | addu RA, BASE, RA
+ if (vk) {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f4, FORL_STEP*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | lw TMP3, FORL_STEP*8+HI(RA)
+ | add.d f0, f0, f4
+ | sdc1 f0, FORL_IDX*8(RA)
+ } else {
+ | lw TMP1, FORL_IDX*8+HI(RA)
+ | lw TMP3, FORL_STEP*8+HI(RA)
+ | lw TMP2, FORL_STOP*8+HI(RA)
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | sltiu TMP0, TMP3, LJ_TISNUM
+ | sltiu TMP2, TMP2, LJ_TISNUM
+ | and TMP1, TMP1, TMP0
+ | and TMP1, TMP1, TMP2
+ | ldc1 f0, FORL_IDX*8(RA)
+ | beqz TMP1, ->vmeta_for
+ |. ldc1 f2, FORL_STOP*8(RA)
+ }
+ if (op != BC_JFORL) {
+ | srl RD, RD, 1
+ | lui TMP0, (-(BCBIAS_J*4 >> 16) & 65535)
+ }
+ | c.le.d 0, f0, f2
+ | c.le.d 1, f2, f0
+ | sdc1 f0, FORL_EXT*8(RA)
+ if (op == BC_JFORI) {
+ | li TMP1, 1
+ | li TMP2, 1
+ | addu TMP0, RD, TMP0
+ | slt TMP3, TMP3, r0
+ | movf TMP1, r0, 0
+ | addu PC, PC, TMP0
+ | movf TMP2, r0, 1
+ | lhu RD, -4+OFS_RD(PC)
+ | movn TMP1, TMP2, TMP3
+ | bnez TMP1, =>BC_JLOOP
+ |. decode_RD8b RD
+ } else if (op == BC_JFORL) {
+ | li TMP1, 1
+ | li TMP2, 1
+ | slt TMP3, TMP3, r0
+ | movf TMP1, r0, 0
+ | movf TMP2, r0, 1
+ | movn TMP1, TMP2, TMP3
+ | bnez TMP1, =>BC_JLOOP
+ |. nop
+ } else {
+ | addu TMP1, RD, TMP0
+ | slt TMP3, TMP3, r0
+ | move TMP2, TMP1
+ if (op == BC_FORI) {
+ | movt TMP1, r0, 0
+ | movt TMP2, r0, 1
+ } else {
+ | movf TMP1, r0, 0
+ | movf TMP2, r0, 1
+ }
+ | movn TMP1, TMP2, TMP3
+ | addu PC, PC, TMP1
+ }
+ | ins_next
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | addu RA, BASE, RA
+ | lw TMP1, HI(RA)
+ | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
+ |. lw TMP2, LO(RA)
+ if (op == BC_JITERL) {
+ | sw TMP1, -8+HI(RA)
+ | b =>BC_JLOOP
+ |. sw TMP2, -8+LO(RA)
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | sw TMP1, -8+HI(RA)
+ | sw TMP2, -8+LO(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | lw TMP1, DISPATCH_J(trace)(DISPATCH)
+ | srl RD, RD, 1
+ | li AT, 0
+ | addu TMP1, TMP1, RD
+ | // Traces on MIPS don't store the trace number, so use 0.
+ | sw AT, DISPATCH_GL(vmstate)(DISPATCH)
+ | lw TRACE:TMP2, 0(TMP1)
+ | sw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | sw L, DISPATCH_GL(jit_L)(DISPATCH)
+ | lw TMP2, TRACE:TMP2->mcode
+ | jr TMP2
+ |. addiu JGL, DISPATCH, GG_DISP2G+32768
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lw TMP2, L->maxstack
+ | lbu TMP1, -4+PC2PROTO(numparams)(PC)
+ | lw KBASE, -4+PC2PROTO(k)(PC)
+ | sltu AT, TMP2, RA
+ | bnez AT, ->vm_growstack_l
+ |. sll TMP1, TMP1, 3
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
+ | bnez AT, >3
+ |. addu AT, BASE, NARGS8:RC
+ if (op == BC_JFUNCF) {
+ | decode_RD8a RD, INS
+ | b =>BC_JLOOP
+ |. decode_RD8b RD
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | sw TISNIL, HI(AT)
+ | b <2
+ |. addiu NARGS8:RC, NARGS8:RC, 8
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | addu TMP1, BASE, RC
+ | lw TMP2, L->maxstack
+ | addu TMP0, RA, RC
+ | sw LFUNC:RB, LO(TMP1) // Store copy of LFUNC.
+ | addiu TMP3, RC, 8+FRAME_VARG
+ | sltu AT, TMP0, TMP2
+ | lw KBASE, -4+PC2PROTO(k)(PC)
+ | beqz AT, ->vm_growstack_l
+ |. sw TMP3, HI(TMP1) // Store delta + FRAME_VARG.
+ | lbu TMP2, -4+PC2PROTO(numparams)(PC)
+ | move RA, BASE
+ | move RC, TMP1
+ | ins_next1
+ | beqz TMP2, >3
+ |. addiu BASE, TMP1, 8
+ |1:
+ | lw TMP0, HI(RA)
+ | lw TMP3, LO(RA)
+ | sltu AT, RA, RC // Less args than parameters?
+ | move CARG1, TMP0
+ | movz TMP0, TISNIL, AT // Clear missing parameters.
+ | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
+ | sw TMP3, 8+LO(TMP1)
+ | addiu TMP2, TMP2, -1
+ | sw TMP0, 8+HI(TMP1)
+ | addiu TMP1, TMP1, 8
+ | sw CARG1, HI(RA)
+ | bnez TMP2, <1
+ |. addiu RA, RA, 8
+ |3:
+ | ins_next2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lw CFUNCADDR, CFUNC:RB->f
+ } else {
+ | lw CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | addu TMP1, RA, NARGS8:RC
+ | lw TMP2, L->maxstack
+ | addu RC, BASE, NARGS8:RC
+ | sw BASE, L->base
+ | sltu AT, TMP2, TMP1
+ | sw RC, L->top
+ | li_vmstate C
+ if (op == BC_FUNCCW) {
+ | lw CARG2, CFUNC:RB->f
+ }
+ | bnez AT, ->vm_growstack_c // Need to grow stack.
+ |. move CARG1, L
+ | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
+ |. st_vmstate
+ | // Returns nresults.
+ | lw BASE, L->base
+ | sll RD, CRET1, 3
+ | lw TMP1, L->top
+ | li_vmstate INTERP
+ | lw PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | subu RA, TMP1, RD // RA = L->top - nresults*8
+ | b ->vm_returnc
+ |. st_vmstate
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.4byte .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.4byte 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.4byte .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.4byte .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+ fprintf(ctx->fp,
+ "\t.globl lj_err_unwind_dwarf\n"
+ ".Lframe1:\n"
+ "\t.4byte .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.4byte lj_err_unwind_dwarf\n"
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.4byte .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.4byte .LASFDE2-.Lframe1\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.4byte .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.4byte .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.4byte .LASFDE3-.Lframe2\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/3rdparty/lua/src/vm_ppc.dasc b/3rdparty/lua/src/vm_ppc.dasc
index 25567dd..f253081 100644
--- a/3rdparty/lua/src/vm_ppc.dasc
+++ b/3rdparty/lua/src/vm_ppc.dasc
@@ -1,5160 +1,5137 @@
-|// Low-level VM code for PowerPC CPUs.
-|// Bytecode interpreter, fast functions and helper functions.
-|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-|
-|.arch ppc
-|.section code_op, code_sub
-|
-|.actionlist build_actionlist
-|.globals GLOB_
-|.globalnames globnames
-|.externnames extnames
-|
-|// Note: The ragged indentation of the instructions is intentional.
-|// The starting columns indicate data dependencies.
-|
-|//-----------------------------------------------------------------------
-|
-|// DynASM defines used by the PPC port:
-|//
-|// P64 64 bit pointers (only for GPR64 testing).
-|// Note: a full PPC64 _LP64 port is not planned.
-|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3).
-|// Affects reg saves, stack layout, carry/overflow/dot flags etc.
-|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360).
-|// TOC Need table of contents (64 bit or 32 bit variant, e.g. PS3).
-|// Function pointers are really a struct: code, TOC, env (optional).
-|// TOCENV Function pointers have an environment pointer, too (not on PS3).
-|// PPE Power Processor Element of Cell (PS3) or Xenon (Xbox 360).
-|// Must avoid (slow) micro-coded instructions.
-|
-|.if P64
-|.define TOC, 1
-|.define TOCENV, 1
-|.macro lpx, a, b, c; ldx a, b, c; .endmacro
-|.macro lp, a, b; ld a, b; .endmacro
-|.macro stp, a, b; std a, b; .endmacro
-|.define decode_OPP, decode_OP8
-|.if FFI
-|// Missing: Calling conventions, 64 bit regs, TOC.
-|.error lib_ffi not yet implemented for PPC64
-|.endif
-|.else
-|.macro lpx, a, b, c; lwzx a, b, c; .endmacro
-|.macro lp, a, b; lwz a, b; .endmacro
-|.macro stp, a, b; stw a, b; .endmacro
-|.define decode_OPP, decode_OP4
-|.endif
-|
-|// Convenience macros for TOC handling.
-|.if TOC
-|// Linker needs a TOC patch area for every external call relocation.
-|.macro blex, target; bl extern target@plt; nop; .endmacro
-|.macro .toc, a, b; a, b; .endmacro
-|.if P64
-|.define TOC_OFS, 8
-|.define ENV_OFS, 16
-|.else
-|.define TOC_OFS, 4
-|.define ENV_OFS, 8
-|.endif
-|.else // No TOC.
-|.macro blex, target; bl extern target@plt; .endmacro
-|.macro .toc, a, b; .endmacro
-|.endif
-|.macro .tocenv, a, b; .if TOCENV; a, b; .endif; .endmacro
-|
-|.macro .gpr64, a, b; .if GPR64; a, b; .endif; .endmacro
-|
-|.macro andix., y, a, i
-|.if PPE
-| rlwinm y, a, 0, 31-lj_fls(i), 31-lj_ffs(i)
-| cmpwi y, 0
-|.else
-| andi. y, a, i
-|.endif
-|.endmacro
-|
-|.macro clrso, reg
-|.if PPE
-| li reg, 0
-| mtxer reg
-|.else
-| mcrxr cr0
-|.endif
-|.endmacro
-|
-|.macro checkov, reg, noov
-|.if PPE
-| mfxer reg
-| add reg, reg, reg
-| cmpwi reg, 0
-| li reg, 0
-| mtxer reg
-| bgey noov
-|.else
-| mcrxr cr0
-| bley noov
-|.endif
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Fixed register assignments for the interpreter.
-|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA)
-|
-|// The following must be C callee-save (but BASE is often refetched).
-|.define BASE, r14 // Base of current Lua stack frame.
-|.define KBASE, r15 // Constants of current Lua function.
-|.define PC, r16 // Next PC.
-|.define DISPATCH, r17 // Opcode dispatch table.
-|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
-|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
-|.define JGL, r31 // On-trace: global_State + 32768.
-|
-|// Constants for type-comparisons, stores and conversions. C callee-save.
-|.define TISNUM, r22
-|.define TISNIL, r23
-|.define ZERO, r24
-|.define TOBIT, f30 // 2^52 + 2^51.
-|.define TONUM, f31 // 2^52 + 2^51 + 2^31.
-|
-|// The following temporaries are not saved across C calls, except for RA.
-|.define RA, r20 // Callee-save.
-|.define RB, r10
-|.define RC, r11
-|.define RD, r12
-|.define INS, r7 // Overlaps CARG5.
-|
-|.define TMP0, r0
-|.define TMP1, r8
-|.define TMP2, r9
-|.define TMP3, r6 // Overlaps CARG4.
-|
-|// Saved temporaries.
-|.define SAVE0, r21
-|
-|// Calling conventions.
-|.define CARG1, r3
-|.define CARG2, r4
-|.define CARG3, r5
-|.define CARG4, r6 // Overlaps TMP3.
-|.define CARG5, r7 // Overlaps INS.
-|
-|.define FARG1, f1
-|.define FARG2, f2
-|
-|.define CRET1, r3
-|.define CRET2, r4
-|
-|.define TOCREG, r2 // TOC register (only used by C code).
-|.define ENVREG, r11 // Environment pointer (nested C functions).
-|
-|// Stack layout while in interpreter. Must match with lj_frame.h.
-|.if GPR64
-|.if FRAME32
-|
-|// 456(sp) // \ 32/64 bit C frame info
-|.define TONUM_LO, 452(sp) // |
-|.define TONUM_HI, 448(sp) // |
-|.define TMPD_LO, 444(sp) // |
-|.define TMPD_HI, 440(sp) // |
-|.define SAVE_CR, 432(sp) // | 64 bit CR save.
-|.define SAVE_ERRF, 424(sp) // > Parameter save area.
-|.define SAVE_NRES, 420(sp) // |
-|.define SAVE_L, 416(sp) // |
-|.define SAVE_PC, 412(sp) // |
-|.define SAVE_MULTRES, 408(sp) // |
-|.define SAVE_CFRAME, 400(sp) // / 64 bit C frame chain.
-|// 392(sp) // Reserved.
-|.define CFRAME_SPACE, 384 // Delta for sp.
-|// Back chain for sp: 384(sp) <-- sp entering interpreter
-|.define SAVE_LR, 376(sp) // 32 bit LR stored in hi-part.
-|.define SAVE_GPR_, 232 // .. 232+18*8: 64 bit GPR saves.
-|.define SAVE_FPR_, 88 // .. 88+18*8: 64 bit FPR saves.
-|// 80(sp) // Needed for 16 byte stack frame alignment.
-|// 16(sp) // Callee parameter save area (ABI mandated).
-|// 8(sp) // Reserved
-|// Back chain for sp: 0(sp) <-- sp while in interpreter
-|// 32 bit sp stored in hi-part of 0(sp).
-|
-|.define TMPD_BLO, 447(sp)
-|.define TMPD, TMPD_HI
-|.define TONUM_D, TONUM_HI
-|
-|.else
-|
-|// 508(sp) // \ 32 bit C frame info.
-|.define SAVE_ERRF, 472(sp) // |
-|.define SAVE_NRES, 468(sp) // |
-|.define SAVE_L, 464(sp) // > Parameter save area.
-|.define SAVE_PC, 460(sp) // |
-|.define SAVE_MULTRES, 456(sp) // |
-|.define SAVE_CFRAME, 448(sp) // / 64 bit C frame chain.
-|.define SAVE_LR, 416(sp)
-|.define CFRAME_SPACE, 400 // Delta for sp.
-|// Back chain for sp: 400(sp) <-- sp entering interpreter
-|.define SAVE_FPR_, 256 // .. 256+18*8: 64 bit FPR saves.
-|.define SAVE_GPR_, 112 // .. 112+18*8: 64 bit GPR saves.
-|// 48(sp) // Callee parameter save area (ABI mandated).
-|.define SAVE_TOC, 40(sp) // TOC save area.
-|.define TMPD_LO, 36(sp) // \ Link editor temp (ABI mandated).
-|.define TMPD_HI, 32(sp) // /
-|.define TONUM_LO, 28(sp) // \ Compiler temp (ABI mandated).
-|.define TONUM_HI, 24(sp) // /
-|// Next frame lr: 16(sp)
-|.define SAVE_CR, 8(sp) // 64 bit CR save.
-|// Back chain for sp: 0(sp) <-- sp while in interpreter
-|
-|.define TMPD_BLO, 39(sp)
-|.define TMPD, TMPD_HI
-|.define TONUM_D, TONUM_HI
-|
-|.endif
-|.else
-|
-|.define SAVE_LR, 276(sp)
-|.define CFRAME_SPACE, 272 // Delta for sp.
-|// Back chain for sp: 272(sp) <-- sp entering interpreter
-|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves.
-|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves.
-|.define SAVE_CR, 52(sp) // 32 bit CR save.
-|.define SAVE_ERRF, 48(sp) // 32 bit C frame info.
-|.define SAVE_NRES, 44(sp)
-|.define SAVE_CFRAME, 40(sp)
-|.define SAVE_L, 36(sp)
-|.define SAVE_PC, 32(sp)
-|.define SAVE_MULTRES, 28(sp)
-|.define UNUSED1, 24(sp)
-|.define TMPD_LO, 20(sp)
-|.define TMPD_HI, 16(sp)
-|.define TONUM_LO, 12(sp)
-|.define TONUM_HI, 8(sp)
-|// Next frame lr: 4(sp)
-|// Back chain for sp: 0(sp) <-- sp while in interpreter
-|
-|.define TMPD_BLO, 23(sp)
-|.define TMPD, TMPD_HI
-|.define TONUM_D, TONUM_HI
-|
-|.endif
-|
-|.macro save_, reg
-|.if GPR64
-| std r..reg, SAVE_GPR_+(reg-14)*8(sp)
-|.else
-| stw r..reg, SAVE_GPR_+(reg-14)*4(sp)
-|.endif
-| stfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
-|.endmacro
-|.macro rest_, reg
-|.if GPR64
-| ld r..reg, SAVE_GPR_+(reg-14)*8(sp)
-|.else
-| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp)
-|.endif
-| lfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
-|.endmacro
-|
-|.macro saveregs
-|.if GPR64 and not FRAME32
-| stdu sp, -CFRAME_SPACE(sp)
-|.else
-| stwu sp, -CFRAME_SPACE(sp)
-|.endif
-| save_ 14; save_ 15; save_ 16
-| mflr r0
-| save_ 17; save_ 18; save_ 19; save_ 20; save_ 21; save_ 22
-|.if GPR64 and not FRAME32
-| std r0, SAVE_LR
-|.else
-| stw r0, SAVE_LR
-|.endif
-| save_ 23; save_ 24; save_ 25
-| mfcr r0
-| save_ 26; save_ 27; save_ 28; save_ 29; save_ 30; save_ 31
-|.if GPR64
-| std r0, SAVE_CR
-|.else
-| stw r0, SAVE_CR
-|.endif
-| .toc std TOCREG, SAVE_TOC
-|.endmacro
-|
-|.macro restoreregs
-|.if GPR64 and not FRAME32
-| ld r0, SAVE_LR
-|.else
-| lwz r0, SAVE_LR
-|.endif
-|.if GPR64
-| ld r12, SAVE_CR
-|.else
-| lwz r12, SAVE_CR
-|.endif
-| rest_ 14; rest_ 15; rest_ 16; rest_ 17; rest_ 18; rest_ 19
-| mtlr r0;
-|.if PPE; mtocrf 0x20, r12; .else; mtcrf 0x38, r12; .endif
-| rest_ 20; rest_ 21; rest_ 22; rest_ 23; rest_ 24; rest_ 25
-|.if PPE; mtocrf 0x10, r12; .endif
-| rest_ 26; rest_ 27; rest_ 28; rest_ 29; rest_ 30; rest_ 31
-|.if PPE; mtocrf 0x08, r12; .endif
-| addi sp, sp, CFRAME_SPACE
-|.endmacro
-|
-|// Type definitions. Some of these are only used for documentation.
-|.type L, lua_State, LREG
-|.type GL, global_State
-|.type TVALUE, TValue
-|.type GCOBJ, GCobj
-|.type STR, GCstr
-|.type TAB, GCtab
-|.type LFUNC, GCfuncL
-|.type CFUNC, GCfuncC
-|.type PROTO, GCproto
-|.type UPVAL, GCupval
-|.type NODE, Node
-|.type NARGS8, int
-|.type TRACE, GCtrace
-|
-|//-----------------------------------------------------------------------
-|
-|// These basic macros should really be part of DynASM.
-|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
-|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
-|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
-|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
-|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
-|
-|// Trap for not-yet-implemented parts.
-|.macro NYI; tw 4, sp, sp; .endmacro
-|
-|// int/FP conversions.
-|.macro tonum_i, freg, reg
-| xoris reg, reg, 0x8000
-| stw reg, TONUM_LO
-| lfd freg, TONUM_D
-| fsub freg, freg, TONUM
-|.endmacro
-|
-|.macro tonum_u, freg, reg
-| stw reg, TONUM_LO
-| lfd freg, TONUM_D
-| fsub freg, freg, TOBIT
-|.endmacro
-|
-|.macro toint, reg, freg, tmpfreg
-| fctiwz tmpfreg, freg
-| stfd tmpfreg, TMPD
-| lwz reg, TMPD_LO
-|.endmacro
-|
-|.macro toint, reg, freg
-| toint reg, freg, freg
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Access to frame relative to BASE.
-|.define FRAME_PC, -8
-|.define FRAME_FUNC, -4
-|
-|// Instruction decode.
-|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
-|.macro decode_OP8, dst, ins; rlwinm dst, ins, 3, 21, 28; .endmacro
-|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
-|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
-|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
-|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
-|
-|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
-|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
-|
-|// Instruction fetch.
-|.macro ins_NEXT1
-| lwz INS, 0(PC)
-| addi PC, PC, 4
-|.endmacro
-|// Instruction decode+dispatch. Note: optimized for e300!
-|.macro ins_NEXT2
-| decode_OPP TMP1, INS
-| lpx TMP0, DISPATCH, TMP1
-| mtctr TMP0
-| decode_RB8 RB, INS
-| decode_RD8 RD, INS
-| decode_RA8 RA, INS
-| decode_RC8 RC, INS
-| bctr
-|.endmacro
-|.macro ins_NEXT
-| ins_NEXT1
-| ins_NEXT2
-|.endmacro
-|
-|// Instruction footer.
-|.if 1
-| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
-| .define ins_next, ins_NEXT
-| .define ins_next_, ins_NEXT
-| .define ins_next1, ins_NEXT1
-| .define ins_next2, ins_NEXT2
-|.else
-| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
-| // Affects only certain kinds of benchmarks (and only with -j off).
-| .macro ins_next
-| b ->ins_next
-| .endmacro
-| .macro ins_next1
-| .endmacro
-| .macro ins_next2
-| b ->ins_next
-| .endmacro
-| .macro ins_next_
-| ->ins_next:
-| ins_NEXT
-| .endmacro
-|.endif
-|
-|// Call decode and dispatch.
-|.macro ins_callt
-| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
-| lwz PC, LFUNC:RB->pc
-| lwz INS, 0(PC)
-| addi PC, PC, 4
-| decode_OPP TMP1, INS
-| decode_RA8 RA, INS
-| lpx TMP0, DISPATCH, TMP1
-| add RA, RA, BASE
-| mtctr TMP0
-| bctr
-|.endmacro
-|
-|.macro ins_call
-| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
-| stw PC, FRAME_PC(BASE)
-| ins_callt
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Macros to test operand types.
-|.macro checknum, reg; cmplw reg, TISNUM; .endmacro
-|.macro checknum, cr, reg; cmplw cr, reg, TISNUM; .endmacro
-|.macro checkstr, reg; cmpwi reg, LJ_TSTR; .endmacro
-|.macro checktab, reg; cmpwi reg, LJ_TTAB; .endmacro
-|.macro checkfunc, reg; cmpwi reg, LJ_TFUNC; .endmacro
-|.macro checknil, reg; cmpwi reg, LJ_TNIL; .endmacro
-|
-|.macro branch_RD
-| srwi TMP0, RD, 1
-| addis PC, PC, -(BCBIAS_J*4 >> 16)
-| add PC, PC, TMP0
-|.endmacro
-|
-|// Assumes DISPATCH is relative to GL.
-#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
-#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
-|
-#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
-|
-|.macro hotcheck, delta, target
-| rlwinm TMP1, PC, 31, 25, 30
-| addi TMP1, TMP1, GG_DISP2HOT
-| lhzx TMP2, DISPATCH, TMP1
-| addic. TMP2, TMP2, -delta
-| sthx TMP2, DISPATCH, TMP1
-| blt target
-|.endmacro
-|
-|.macro hotloop
-| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
-|.endmacro
-|
-|.macro hotcall
-| hotcheck HOTCOUNT_CALL, ->vm_hotcall
-|.endmacro
-|
-|// Set current VM state. Uses TMP0.
-|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
-|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
-|
-|// Move table write barrier back. Overwrites mark and tmp.
-|.macro barrierback, tab, mark, tmp
-| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
-| // Assumes LJ_GC_BLACK is 0x04.
-| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
-| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
-| stb mark, tab->marked
-| stw tmp, tab->gclist
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-
-/* Generate subroutines used by opcodes and other parts of the VM. */
-/* The .code_sub section should be last to help static branch prediction. */
-static void build_subroutines(BuildCtx *ctx)
-{
- |.code_sub
- |
- |//-----------------------------------------------------------------------
- |//-- Return handling ----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_returnp:
- | // See vm_return. Also: TMP2 = previous base.
- | andix. TMP0, PC, FRAME_P
- | li TMP1, LJ_TTRUE
- | beq ->cont_dispatch
- |
- | // Return from pcall or xpcall fast func.
- | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
- | mr BASE, TMP2 // Restore caller base.
- | // Prepending may overwrite the pcall frame, so do it at the end.
- | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
- |
- |->vm_returnc:
- | addi RD, RD, 8 // RD = (nresults+1)*8.
- | andix. TMP0, PC, FRAME_TYPE
- | cmpwi cr1, RD, 0
- | li CRET1, LUA_YIELD
- | beq cr1, ->vm_unwind_c_eh
- | mr MULTRES, RD
- | beq ->BC_RET_Z // Handle regular return to Lua.
- |
- |->vm_return:
- | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
- | // TMP0 = PC & FRAME_TYPE
- | cmpwi TMP0, FRAME_C
- | rlwinm TMP2, PC, 0, 0, 28
- | li_vmstate C
- | sub TMP2, BASE, TMP2 // TMP2 = previous base.
- | bney ->vm_returnp
- |
- | addic. TMP1, RD, -8
- | stp TMP2, L->base
- | lwz TMP2, SAVE_NRES
- | subi BASE, BASE, 8
- | st_vmstate
- | slwi TMP2, TMP2, 3
- | beq >2
- |1:
- | addic. TMP1, TMP1, -8
- | lfd f0, 0(RA)
- | addi RA, RA, 8
- | stfd f0, 0(BASE)
- | addi BASE, BASE, 8
- | bney <1
- |
- |2:
- | cmpw TMP2, RD // More/less results wanted?
- | bne >6
- |3:
- | stp BASE, L->top // Store new top.
- |
- |->vm_leave_cp:
- | lp TMP0, SAVE_CFRAME // Restore previous C frame.
- | li CRET1, 0 // Ok return status for vm_pcall.
- | stp TMP0, L->cframe
- |
- |->vm_leave_unw:
- | restoreregs
- | blr
- |
- |6:
- | ble >7 // Less results wanted?
- | // More results wanted. Check stack size and fill up results with nil.
- | lwz TMP1, L->maxstack
- | cmplw BASE, TMP1
- | bge >8
- | stw TISNIL, 0(BASE)
- | addi RD, RD, 8
- | addi BASE, BASE, 8
- | b <2
- |
- |7: // Less results wanted.
- | subfic TMP3, TMP2, 0 // LUA_MULTRET+1 case?
- | sub TMP0, RD, TMP2
- | subfe TMP1, TMP1, TMP1 // TMP1 = TMP2 == 0 ? 0 : -1
- | and TMP0, TMP0, TMP1
- | sub BASE, BASE, TMP0 // Either keep top or shrink it.
- | b <3
- |
- |8: // Corner case: need to grow stack for filling up results.
- | // This can happen if:
- | // - A C function grows the stack (a lot).
- | // - The GC shrinks the stack in between.
- | // - A return back from a lua_call() with (high) nresults adjustment.
- | stp BASE, L->top // Save current top held in BASE (yes).
- | mr SAVE0, RD
- | srwi CARG2, TMP2, 3
- | mr CARG1, L
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lwz TMP2, SAVE_NRES
- | mr RD, SAVE0
- | slwi TMP2, TMP2, 3
- | lp BASE, L->top // Need the (realloced) L->top in BASE.
- | b <2
- |
- |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
- | // (void *cframe, int errcode)
- | mr sp, CARG1
- | mr CRET1, CARG2
- |->vm_unwind_c_eh: // Landing pad for external unwinder.
- | lwz L, SAVE_L
- | .toc ld TOCREG, SAVE_TOC
- | li TMP0, ~LJ_VMST_C
- | lwz GL:TMP1, L->glref
- | stw TMP0, GL:TMP1->vmstate
- | b ->vm_leave_unw
- |
- |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
- | // (void *cframe)
- |.if GPR64
- | rldicr sp, CARG1, 0, 61
- |.else
- | rlwinm sp, CARG1, 0, 0, 29
- |.endif
- |->vm_unwind_ff_eh: // Landing pad for external unwinder.
- | lwz L, SAVE_L
- | .toc ld TOCREG, SAVE_TOC
- | li TISNUM, LJ_TISNUM // Setup type comparison constants.
- | lp BASE, L->base
- | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | li ZERO, 0
- | stw TMP3, TMPD
- | li TMP1, LJ_TFALSE
- | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
- | li TISNIL, LJ_TNIL
- | li_vmstate INTERP
- | lfs TOBIT, TMPD
- | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
- | la RA, -8(BASE) // Results start at BASE-8.
- | stw TMP3, TMPD
- | addi DISPATCH, DISPATCH, GG_G2DISP
- | stw TMP1, 0(RA) // Prepend false to error message.
- | li RD, 16 // 2 results: false + error message.
- | st_vmstate
- | lfs TONUM, TMPD
- | b ->vm_returnc
- |
- |//-----------------------------------------------------------------------
- |//-- Grow stack for calls -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_growstack_c: // Grow stack for C function.
- | li CARG2, LUA_MINSTACK
- | b >2
- |
- |->vm_growstack_l: // Grow stack for Lua function.
- | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
- | add RC, BASE, RC
- | sub RA, RA, BASE
- | stp BASE, L->base
- | addi PC, PC, 4 // Must point after first instruction.
- | stp RC, L->top
- | srwi CARG2, RA, 3
- |2:
- | // L->base = new base, L->top = top
- | stw PC, SAVE_PC
- | mr CARG1, L
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lp BASE, L->base
- | lp RC, L->top
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | sub RC, RC, BASE
- | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
- | ins_callt // Just retry the call.
- |
- |//-----------------------------------------------------------------------
- |//-- Entry points into the assembler VM ---------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_resume: // Setup C frame and resume thread.
- | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
- | saveregs
- | mr L, CARG1
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | mr BASE, CARG2
- | lbz TMP1, L->status
- | stw L, SAVE_L
- | li PC, FRAME_CP
- | addi TMP0, sp, CFRAME_RESUME
- | addi DISPATCH, DISPATCH, GG_G2DISP
- | stw CARG3, SAVE_NRES
- | cmplwi TMP1, 0
- | stw CARG3, SAVE_ERRF
- | stp TMP0, L->cframe
- | stp CARG3, SAVE_CFRAME
- | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | beq >3
- |
- | // Resume after yield (like a return).
- | mr RA, BASE
- | lp BASE, L->base
- | li TISNUM, LJ_TISNUM // Setup type comparison constants.
- | lp TMP1, L->top
- | lwz PC, FRAME_PC(BASE)
- | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | stb CARG3, L->status
- | stw TMP3, TMPD
- | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
- | lfs TOBIT, TMPD
- | sub RD, TMP1, BASE
- | stw TMP3, TMPD
- | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
- | addi RD, RD, 8
- | stw TMP0, TONUM_HI
- | li_vmstate INTERP
- | li ZERO, 0
- | st_vmstate
- | andix. TMP0, PC, FRAME_TYPE
- | mr MULTRES, RD
- | lfs TONUM, TMPD
- | li TISNIL, LJ_TNIL
- | beq ->BC_RET_Z
- | b ->vm_return
- |
- |->vm_pcall: // Setup protected C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
- | saveregs
- | li PC, FRAME_CP
- | stw CARG4, SAVE_ERRF
- | b >1
- |
- |->vm_call: // Setup C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1)
- | saveregs
- | li PC, FRAME_C
- |
- |1: // Entry point for vm_pcall above (PC = ftype).
- | lp TMP1, L:CARG1->cframe
- | stw CARG3, SAVE_NRES
- | mr L, CARG1
- | stw CARG1, SAVE_L
- | mr BASE, CARG2
- | stp sp, L->cframe // Add our C frame to cframe chain.
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | stp TMP1, SAVE_CFRAME
- | addi DISPATCH, DISPATCH, GG_G2DISP
- |
- |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
- | lp TMP2, L->base // TMP2 = old base (used in vmeta_call).
- | li TISNUM, LJ_TISNUM // Setup type comparison constants.
- | lp TMP1, L->top
- | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | add PC, PC, BASE
- | stw TMP3, TMPD
- | li ZERO, 0
- | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
- | lfs TOBIT, TMPD
- | sub PC, PC, TMP2 // PC = frame delta + frame type
- | stw TMP3, TMPD
- | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
- | sub NARGS8:RC, TMP1, BASE
- | stw TMP0, TONUM_HI
- | li_vmstate INTERP
- | lfs TONUM, TMPD
- | li TISNIL, LJ_TNIL
- | st_vmstate
- |
- |->vm_call_dispatch:
- | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
- | lwz TMP0, FRAME_PC(BASE)
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | checkfunc TMP0; bne ->vmeta_call
- |
- |->vm_call_dispatch_f:
- | ins_call
- | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
- |
- |->vm_cpcall: // Setup protected C frame, call C.
- | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
- | saveregs
- | mr L, CARG1
- | lwz TMP0, L:CARG1->stack
- | stw CARG1, SAVE_L
- | lp TMP1, L->top
- | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
- | lp TMP1, L->cframe
- | stp sp, L->cframe // Add our C frame to cframe chain.
- | .toc lp CARG4, 0(CARG4)
- | li TMP2, 0
- | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
- | stw TMP2, SAVE_ERRF // No error function.
- | stp TMP1, SAVE_CFRAME
- | mtctr CARG4
- | bctrl // (lua_State *L, lua_CFunction func, void *ud)
- |.if PPE
- | mr BASE, CRET1
- | cmpwi CRET1, 0
- |.else
- | mr. BASE, CRET1
- |.endif
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | li PC, FRAME_CP
- | addi DISPATCH, DISPATCH, GG_G2DISP
- | bne <3 // Else continue with the call.
- | b ->vm_leave_cp // No base? Just remove C frame.
- |
- |//-----------------------------------------------------------------------
- |//-- Metamethod handling ------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
- |// stack, so BASE doesn't need to be reloaded across these calls.
- |
- |//-- Continuation dispatch ----------------------------------------------
- |
- |->cont_dispatch:
- | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
- | lwz TMP0, -12(BASE) // Continuation.
- | mr RB, BASE
- | mr BASE, TMP2 // Restore caller BASE.
- | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
- |.if FFI
- | cmplwi TMP0, 1
- |.endif
- | lwz PC, -16(RB) // Restore PC from [cont|PC].
- | subi TMP2, RD, 8
- | lwz TMP1, LFUNC:TMP1->pc
- | stwx TISNIL, RA, TMP2 // Ensure one valid arg.
- |.if FFI
- | ble >1
- |.endif
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | // BASE = base, RA = resultptr, RB = meta base
- | mtctr TMP0
- | bctr // Jump to continuation.
- |
- |.if FFI
- |1:
- | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
- | // cont = 0: tailcall from C function.
- | subi TMP1, RB, 16
- | sub RC, TMP1, BASE
- | b ->vm_call_tail
- |.endif
- |
- |->cont_cat: // RA = resultptr, RB = meta base
- | lwz INS, -4(PC)
- | subi CARG2, RB, 16
- | decode_RB8 SAVE0, INS
- | lfd f0, 0(RA)
- | add TMP1, BASE, SAVE0
- | stp BASE, L->base
- | cmplw TMP1, CARG2
- | sub CARG3, CARG2, TMP1
- | decode_RA8 RA, INS
- | stfd f0, 0(CARG2)
- | bney ->BC_CAT_Z
- | stfdx f0, BASE, RA
- | b ->cont_nop
- |
- |//-- Table indexing metamethods -----------------------------------------
- |
- |->vmeta_tgets1:
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | li TMP0, LJ_TSTR
- | decode_RB8 RB, INS
- | stw STR:RC, 4(CARG3)
- | add CARG2, BASE, RB
- | stw TMP0, 0(CARG3)
- | b >1
- |
- |->vmeta_tgets:
- | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
- | li TMP0, LJ_TTAB
- | stw TAB:RB, 4(CARG2)
- | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
- | stw TMP0, 0(CARG2)
- | li TMP1, LJ_TSTR
- | stw STR:RC, 4(CARG3)
- | stw TMP1, 0(CARG3)
- | b >1
- |
- |->vmeta_tgetb: // TMP0 = index
- |.if not DUALNUM
- | tonum_u f0, TMP0
- |.endif
- | decode_RB8 RB, INS
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | add CARG2, BASE, RB
- |.if DUALNUM
- | stw TISNUM, 0(CARG3)
- | stw TMP0, 4(CARG3)
- |.else
- | stfd f0, 0(CARG3)
- |.endif
- | b >1
- |
- |->vmeta_tgetv:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG2, BASE, RB
- | add CARG3, BASE, RC
- |1:
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
- | // Returns TValue * (finished) or NULL (metamethod).
- | cmplwi CRET1, 0
- | beq >3
- | lfd f0, 0(CRET1)
- | ins_next1
- | stfdx f0, BASE, RA
- | ins_next2
- |
- |3: // Call __index metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k
- | subfic TMP1, BASE, FRAME_CONT
- | lp BASE, L->top
- | stw PC, -16(BASE) // [cont|PC]
- | add PC, TMP1, BASE
- | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | li NARGS8:RC, 16 // 2 args for func(t, k).
- | b ->vm_call_dispatch_f
- |
- |//-----------------------------------------------------------------------
- |
- |->vmeta_tsets1:
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | li TMP0, LJ_TSTR
- | decode_RB8 RB, INS
- | stw STR:RC, 4(CARG3)
- | add CARG2, BASE, RB
- | stw TMP0, 0(CARG3)
- | b >1
- |
- |->vmeta_tsets:
- | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
- | li TMP0, LJ_TTAB
- | stw TAB:RB, 4(CARG2)
- | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
- | stw TMP0, 0(CARG2)
- | li TMP1, LJ_TSTR
- | stw STR:RC, 4(CARG3)
- | stw TMP1, 0(CARG3)
- | b >1
- |
- |->vmeta_tsetb: // TMP0 = index
- |.if not DUALNUM
- | tonum_u f0, TMP0
- |.endif
- | decode_RB8 RB, INS
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | add CARG2, BASE, RB
- |.if DUALNUM
- | stw TISNUM, 0(CARG3)
- | stw TMP0, 4(CARG3)
- |.else
- | stfd f0, 0(CARG3)
- |.endif
- | b >1
- |
- |->vmeta_tsetv:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG2, BASE, RB
- | add CARG3, BASE, RC
- |1:
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
- | // Returns TValue * (finished) or NULL (metamethod).
- | cmplwi CRET1, 0
- | lfdx f0, BASE, RA
- | beq >3
- | // NOBARRIER: lj_meta_tset ensures the table is not black.
- | ins_next1
- | stfd f0, 0(CRET1)
- | ins_next2
- |
- |3: // Call __newindex metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
- | subfic TMP1, BASE, FRAME_CONT
- | lp BASE, L->top
- | stw PC, -16(BASE) // [cont|PC]
- | add PC, TMP1, BASE
- | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | li NARGS8:RC, 24 // 3 args for func(t, k, v)
- | stfd f0, 16(BASE) // Copy value to third argument.
- | b ->vm_call_dispatch_f
- |
- |//-- Comparison metamethods ---------------------------------------------
- |
- |->vmeta_comp:
- | mr CARG1, L
- | subi PC, PC, 4
- |.if DUALNUM
- | mr CARG2, RA
- |.else
- | add CARG2, BASE, RA
- |.endif
- | stw PC, SAVE_PC
- |.if DUALNUM
- | mr CARG3, RD
- |.else
- | add CARG3, BASE, RD
- |.endif
- | stp BASE, L->base
- | decode_OP1 CARG4, INS
- | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
- | // Returns 0/1 or TValue * (metamethod).
- |3:
- | cmplwi CRET1, 1
- | bgt ->vmeta_binop
- | subfic CRET1, CRET1, 0
- |4:
- | lwz INS, 0(PC)
- | addi PC, PC, 4
- | decode_RD4 TMP2, INS
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | and TMP2, TMP2, CRET1
- | add PC, PC, TMP2
- |->cont_nop:
- | ins_next
- |
- |->cont_ra: // RA = resultptr
- | lwz INS, -4(PC)
- | lfd f0, 0(RA)
- | decode_RA8 TMP1, INS
- | stfdx f0, BASE, TMP1
- | b ->cont_nop
- |
- |->cont_condt: // RA = resultptr
- | lwz TMP0, 0(RA)
- | .gpr64 extsw TMP0, TMP0
- | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is true.
- | subfe CRET1, CRET1, CRET1
- | not CRET1, CRET1
- | b <4
- |
- |->cont_condf: // RA = resultptr
- | lwz TMP0, 0(RA)
- | .gpr64 extsw TMP0, TMP0
- | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is false.
- | subfe CRET1, CRET1, CRET1
- | b <4
- |
- |->vmeta_equal:
- | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
- | subi PC, PC, 4
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |
- |->vmeta_equal_cd:
- |.if FFI
- | mr CARG2, INS
- | subi PC, PC, 4
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |.endif
- |
- |//-- Arithmetic metamethods ---------------------------------------------
- |
- |->vmeta_arith_nv:
- | add CARG3, KBASE, RC
- | add CARG4, BASE, RB
- | b >1
- |->vmeta_arith_nv2:
- |.if DUALNUM
- | mr CARG3, RC
- | mr CARG4, RB
- | b >1
- |.endif
- |
- |->vmeta_unm:
- | mr CARG3, RD
- | mr CARG4, RD
- | b >1
- |
- |->vmeta_arith_vn:
- | add CARG3, BASE, RB
- | add CARG4, KBASE, RC
- | b >1
- |
- |->vmeta_arith_vv:
- | add CARG3, BASE, RB
- | add CARG4, BASE, RC
- |.if DUALNUM
- | b >1
- |.endif
- |->vmeta_arith_vn2:
- |->vmeta_arith_vv2:
- |.if DUALNUM
- | mr CARG3, RB
- | mr CARG4, RC
- |.endif
- |1:
- | add CARG2, BASE, RA
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
- | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
- | // Returns NULL (finished) or TValue * (metamethod).
- | cmplwi CRET1, 0
- | beq ->cont_nop
- |
- | // Call metamethod for binary op.
- |->vmeta_binop:
- | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
- | sub TMP1, CRET1, BASE
- | stw PC, -16(CRET1) // [cont|PC]
- | mr TMP2, BASE
- | addi PC, TMP1, FRAME_CONT
- | mr BASE, CRET1
- | li NARGS8:RC, 16 // 2 args for func(o1, o2).
- | b ->vm_call_dispatch
- |
- |->vmeta_len:
-#if LJ_52
- | mr SAVE0, CARG1
-#endif
- | mr CARG2, RD
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_len // (lua_State *L, TValue *o)
- | // Returns NULL (retry) or TValue * (metamethod base).
-#if LJ_52
- | cmplwi CRET1, 0
- | bne ->vmeta_binop // Binop call for compatibility.
- | mr CARG1, SAVE0
- | b ->BC_LEN_Z
-#else
- | b ->vmeta_binop // Binop call for compatibility.
-#endif
- |
- |//-- Call metamethod ----------------------------------------------------
- |
- |->vmeta_call: // Resolve and call __call metamethod.
- | // TMP2 = old base, BASE = new base, RC = nargs*8
- | mr CARG1, L
- | stp TMP2, L->base // This is the callers base!
- | subi CARG2, BASE, 8
- | stw PC, SAVE_PC
- | add CARG3, BASE, RC
- | mr SAVE0, NARGS8:RC
- | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
- | ins_call
- |
- |->vmeta_callt: // Resolve __call for BC_CALLT.
- | // BASE = old base, RA = new base, RC = nargs*8
- | mr CARG1, L
- | stp BASE, L->base
- | subi CARG2, RA, 8
- | stw PC, SAVE_PC
- | add CARG3, RA, RC
- | mr SAVE0, NARGS8:RC
- | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- | lwz TMP1, FRAME_PC(BASE)
- | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
- | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
- | b ->BC_CALLT_Z
- |
- |//-- Argument coercion for 'for' statement ------------------------------
- |
- |->vmeta_for:
- | mr CARG1, L
- | stp BASE, L->base
- | mr CARG2, RA
- | stw PC, SAVE_PC
- | mr SAVE0, INS
- | bl extern lj_meta_for // (lua_State *L, TValue *base)
- |.if JIT
- | decode_OP1 TMP0, SAVE0
- |.endif
- | decode_RA8 RA, SAVE0
- |.if JIT
- | cmpwi TMP0, BC_JFORI
- |.endif
- | decode_RD8 RD, SAVE0
- |.if JIT
- | beqy =>BC_JFORI
- |.endif
- | b =>BC_FORI
- |
- |//-----------------------------------------------------------------------
- |//-- Fast functions -----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |.macro .ffunc, name
- |->ff_ .. name:
- |.endmacro
- |
- |.macro .ffunc_1, name
- |->ff_ .. name:
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- | lwz CARG1, 4(BASE)
- | blt ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_2, name
- |->ff_ .. name:
- | cmplwi NARGS8:RC, 16
- | lwz CARG3, 0(BASE)
- | lwz CARG4, 8(BASE)
- | lwz CARG1, 4(BASE)
- | lwz CARG2, 12(BASE)
- | blt ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_n, name
- |->ff_ .. name:
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- | lfd FARG1, 0(BASE)
- | blt ->fff_fallback
- | checknum CARG3; bge ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_nn, name
- |->ff_ .. name:
- | cmplwi NARGS8:RC, 16
- | lwz CARG3, 0(BASE)
- | lfd FARG1, 0(BASE)
- | lwz CARG4, 8(BASE)
- | lfd FARG2, 8(BASE)
- | blt ->fff_fallback
- | checknum CARG3; bge ->fff_fallback
- | checknum CARG4; bge ->fff_fallback
- |.endmacro
- |
- |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
- |.macro ffgccheck
- | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
- | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
- | cmplw TMP0, TMP1
- | bgel ->fff_gcstep
- |.endmacro
- |
- |//-- Base library: checks -----------------------------------------------
- |
- |.ffunc_1 assert
- | li TMP1, LJ_TFALSE
- | la RA, -8(BASE)
- | cmplw cr1, CARG3, TMP1
- | lwz PC, FRAME_PC(BASE)
- | bge cr1, ->fff_fallback
- | stw CARG3, 0(RA)
- | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
- | stw CARG1, 4(RA)
- | beq ->fff_res // Done if exactly 1 argument.
- | li TMP1, 8
- | subi RC, RC, 8
- |1:
- | cmplw TMP1, RC
- | lfdx f0, BASE, TMP1
- | stfdx f0, RA, TMP1
- | addi TMP1, TMP1, 8
- | bney <1
- | b ->fff_res
- |
- |.ffunc type
- | cmplwi NARGS8:RC, 8
- | lwz CARG1, 0(BASE)
- | blt ->fff_fallback
- | .gpr64 extsw CARG1, CARG1
- | subfc TMP0, TISNUM, CARG1
- | subfe TMP2, CARG1, CARG1
- | orc TMP1, TMP2, TMP0
- | addi TMP1, TMP1, ~LJ_TISNUM+1
- | slwi TMP1, TMP1, 3
- | la TMP2, CFUNC:RB->upvalue
- | lfdx FARG1, TMP2, TMP1
- | b ->fff_resn
- |
- |//-- Base library: getters and setters ---------------------------------
- |
- |.ffunc_1 getmetatable
- | checktab CARG3; bne >6
- |1: // Field metatable must be at same offset for GCtab and GCudata!
- | lwz TAB:CARG1, TAB:CARG1->metatable
- |2:
- | li CARG3, LJ_TNIL
- | cmplwi TAB:CARG1, 0
- | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
- | beq ->fff_restv
- | lwz TMP0, TAB:CARG1->hmask
- | li CARG3, LJ_TTAB // Use metatable as default result.
- | lwz TMP1, STR:RC->hash
- | lwz NODE:TMP2, TAB:CARG1->node
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | slwi TMP0, TMP1, 5
- | slwi TMP1, TMP1, 3
- | sub TMP1, TMP0, TMP1
- | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |3: // Rearranged logic, because we expect _not_ to find the key.
- | lwz CARG4, NODE:TMP2->key
- | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
- | lwz CARG2, NODE:TMP2->val
- | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
- | checkstr CARG4; bne >4
- | cmpw TMP0, STR:RC; beq >5
- |4:
- | lwz NODE:TMP2, NODE:TMP2->next
- | cmplwi NODE:TMP2, 0
- | beq ->fff_restv // Not found, keep default result.
- | b <3
- |5:
- | checknil CARG2
- | beq ->fff_restv // Ditto for nil value.
- | mr CARG3, CARG2 // Return value of mt.__metatable.
- | mr CARG1, TMP1
- | b ->fff_restv
- |
- |6:
- | cmpwi CARG3, LJ_TUDATA; beq <1
- | .gpr64 extsw CARG3, CARG3
- | subfc TMP0, TISNUM, CARG3
- | subfe TMP2, CARG3, CARG3
- | orc TMP1, TMP2, TMP0
- | addi TMP1, TMP1, ~LJ_TISNUM+1
- | slwi TMP1, TMP1, 2
- | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
- | lwzx TAB:CARG1, TMP2, TMP1
- | b <2
- |
- |.ffunc_2 setmetatable
- | // Fast path: no mt for table yet and not clearing the mt.
- | checktab CARG3; bne ->fff_fallback
- | lwz TAB:TMP1, TAB:CARG1->metatable
- | checktab CARG4; bne ->fff_fallback
- | cmplwi TAB:TMP1, 0
- | lbz TMP3, TAB:CARG1->marked
- | bne ->fff_fallback
- | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- | stw TAB:CARG2, TAB:CARG1->metatable
- | beq ->fff_restv
- | barrierback TAB:CARG1, TMP3, TMP0
- | b ->fff_restv
- |
- |.ffunc rawget
- | cmplwi NARGS8:RC, 16
- | lwz CARG4, 0(BASE)
- | lwz TAB:CARG2, 4(BASE)
- | blt ->fff_fallback
- | checktab CARG4; bne ->fff_fallback
- | la CARG3, 8(BASE)
- | mr CARG1, L
- | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
- | // Returns cTValue *.
- | lfd FARG1, 0(CRET1)
- | b ->fff_resn
- |
- |//-- Base library: conversions ------------------------------------------
- |
- |.ffunc tonumber
- | // Only handles the number case inline (without a base argument).
- | cmplwi NARGS8:RC, 8
- | lwz CARG1, 0(BASE)
- | lfd FARG1, 0(BASE)
- | bne ->fff_fallback // Exactly one argument.
- | checknum CARG1; bgt ->fff_fallback
- | b ->fff_resn
- |
- |.ffunc_1 tostring
- | // Only handles the string or number case inline.
- | checkstr CARG3
- | // A __tostring method in the string base metatable is ignored.
- | beq ->fff_restv // String key?
- | // Handle numbers inline, unless a number base metatable is present.
- | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
- | checknum CARG3
- | cmplwi cr1, TMP0, 0
- | stp BASE, L->base // Add frame since C call can throw.
- | crorc 4*cr0+eq, 4*cr0+gt, 4*cr1+eq
- | stw PC, SAVE_PC // Redundant (but a defined value).
- | beq ->fff_fallback
- | ffgccheck
- | mr CARG1, L
- | mr CARG2, BASE
- |.if DUALNUM
- | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o)
- |.else
- | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
- |.endif
- | // Returns GCstr *.
- | li CARG3, LJ_TSTR
- | b ->fff_restv
- |
- |//-- Base library: iterators -------------------------------------------
- |
- |.ffunc next
- | cmplwi NARGS8:RC, 8
- | lwz CARG1, 0(BASE)
- | lwz TAB:CARG2, 4(BASE)
- | blt ->fff_fallback
- | stwx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
- | checktab CARG1
- | lwz PC, FRAME_PC(BASE)
- | bne ->fff_fallback
- | stp BASE, L->base // Add frame since C call can throw.
- | mr CARG1, L
- | stp BASE, L->top // Dummy frame length is ok.
- | la CARG3, 8(BASE)
- | stw PC, SAVE_PC
- | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
- | // Returns 0 at end of traversal.
- | cmplwi CRET1, 0
- | li CARG3, LJ_TNIL
- | beq ->fff_restv // End of traversal: return nil.
- | lfd f0, 8(BASE) // Copy key and value to results.
- | la RA, -8(BASE)
- | lfd f1, 16(BASE)
- | stfd f0, 0(RA)
- | li RD, (2+1)*8
- | stfd f1, 8(RA)
- | b ->fff_res
- |
- |.ffunc_1 pairs
- | checktab CARG3
- | lwz PC, FRAME_PC(BASE)
- | bne ->fff_fallback
-#if LJ_52
- | lwz TAB:TMP2, TAB:CARG1->metatable
- | lfd f0, CFUNC:RB->upvalue[0]
- | cmplwi TAB:TMP2, 0
- | la RA, -8(BASE)
- | bne ->fff_fallback
-#else
- | lfd f0, CFUNC:RB->upvalue[0]
- | la RA, -8(BASE)
-#endif
- | stw TISNIL, 8(BASE)
- | li RD, (3+1)*8
- | stfd f0, 0(RA)
- | b ->fff_res
- |
- |.ffunc ipairs_aux
- | cmplwi NARGS8:RC, 16
- | lwz CARG3, 0(BASE)
- | lwz TAB:CARG1, 4(BASE)
- | lwz CARG4, 8(BASE)
- |.if DUALNUM
- | lwz TMP2, 12(BASE)
- |.else
- | lfd FARG2, 8(BASE)
- |.endif
- | blt ->fff_fallback
- | checktab CARG3
- | checknum cr1, CARG4
- | lwz PC, FRAME_PC(BASE)
- |.if DUALNUM
- | bne ->fff_fallback
- | bne cr1, ->fff_fallback
- |.else
- | lus TMP0, 0x3ff0
- | stw ZERO, TMPD_LO
- | bne ->fff_fallback
- | stw TMP0, TMPD_HI
- | bge cr1, ->fff_fallback
- | lfd FARG1, TMPD
- | toint TMP2, FARG2, f0
- |.endif
- | lwz TMP0, TAB:CARG1->asize
- | lwz TMP1, TAB:CARG1->array
- |.if not DUALNUM
- | fadd FARG2, FARG2, FARG1
- |.endif
- | addi TMP2, TMP2, 1
- | la RA, -8(BASE)
- | cmplw TMP0, TMP2
- |.if DUALNUM
- | stw TISNUM, 0(RA)
- | slwi TMP3, TMP2, 3
- | stw TMP2, 4(RA)
- |.else
- | slwi TMP3, TMP2, 3
- | stfd FARG2, 0(RA)
- |.endif
- | ble >2 // Not in array part?
- | lwzx TMP2, TMP1, TMP3
- | lfdx f0, TMP1, TMP3
- |1:
- | checknil TMP2
- | li RD, (0+1)*8
- | beq ->fff_res // End of iteration, return 0 results.
- | li RD, (2+1)*8
- | stfd f0, 8(RA)
- | b ->fff_res
- |2: // Check for empty hash part first. Otherwise call C function.
- | lwz TMP0, TAB:CARG1->hmask
- | cmplwi TMP0, 0
- | li RD, (0+1)*8
- | beq ->fff_res
- | mr CARG2, TMP2
- | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
- | // Returns cTValue * or NULL.
- | cmplwi CRET1, 0
- | li RD, (0+1)*8
- | beq ->fff_res
- | lwz TMP2, 0(CRET1)
- | lfd f0, 0(CRET1)
- | b <1
- |
- |.ffunc_1 ipairs
- | checktab CARG3
- | lwz PC, FRAME_PC(BASE)
- | bne ->fff_fallback
-#if LJ_52
- | lwz TAB:TMP2, TAB:CARG1->metatable
- | lfd f0, CFUNC:RB->upvalue[0]
- | cmplwi TAB:TMP2, 0
- | la RA, -8(BASE)
- | bne ->fff_fallback
-#else
- | lfd f0, CFUNC:RB->upvalue[0]
- | la RA, -8(BASE)
-#endif
- |.if DUALNUM
- | stw TISNUM, 8(BASE)
- |.else
- | stw ZERO, 8(BASE)
- |.endif
- | stw ZERO, 12(BASE)
- | li RD, (3+1)*8
- | stfd f0, 0(RA)
- | b ->fff_res
- |
- |//-- Base library: catch errors ----------------------------------------
- |
- |.ffunc pcall
- | cmplwi NARGS8:RC, 8
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | blt ->fff_fallback
- | mr TMP2, BASE
- | la BASE, 8(BASE)
- | // Remember active hook before pcall.
- | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
- | subi NARGS8:RC, NARGS8:RC, 8
- | addi PC, TMP3, 8+FRAME_PCALL
- | b ->vm_call_dispatch
- |
- |.ffunc xpcall
- | cmplwi NARGS8:RC, 16
- | lwz CARG4, 8(BASE)
- | lfd FARG2, 8(BASE)
- | lfd FARG1, 0(BASE)
- | blt ->fff_fallback
- | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH)
- | mr TMP2, BASE
- | checkfunc CARG4; bne ->fff_fallback // Traceback must be a function.
- | la BASE, 16(BASE)
- | // Remember active hook before pcall.
- | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31
- | stfd FARG2, 0(TMP2) // Swap function and traceback.
- | subi NARGS8:RC, NARGS8:RC, 16
- | stfd FARG1, 8(TMP2)
- | addi PC, TMP1, 16+FRAME_PCALL
- | b ->vm_call_dispatch
- |
- |//-- Coroutine library --------------------------------------------------
- |
- |.macro coroutine_resume_wrap, resume
- |.if resume
- |.ffunc_1 coroutine_resume
- | cmpwi CARG3, LJ_TTHREAD; bne ->fff_fallback
- |.else
- |.ffunc coroutine_wrap_aux
- | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
- |.endif
- | lbz TMP0, L:CARG1->status
- | lp TMP1, L:CARG1->cframe
- | lp CARG2, L:CARG1->top
- | cmplwi cr0, TMP0, LUA_YIELD
- | lp TMP2, L:CARG1->base
- | cmplwi cr1, TMP1, 0
- | lwz TMP0, L:CARG1->maxstack
- | cmplw cr7, CARG2, TMP2
- | lwz PC, FRAME_PC(BASE)
- | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
- | add TMP2, CARG2, NARGS8:RC
- | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
- | cmplw cr1, TMP2, TMP0
- | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
- | stw PC, SAVE_PC
- | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
- | stp BASE, L->base
- | blt cr6, ->fff_fallback
- |1:
- |.if resume
- | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
- | subi NARGS8:RC, NARGS8:RC, 8
- | subi TMP2, TMP2, 8
- |.endif
- | stp TMP2, L:CARG1->top
- | li TMP1, 0
- | stp BASE, L->top
- |2: // Move args to coroutine.
- | cmpw TMP1, NARGS8:RC
- | lfdx f0, BASE, TMP1
- | beq >3
- | stfdx f0, CARG2, TMP1
- | addi TMP1, TMP1, 8
- | b <2
- |3:
- | li CARG3, 0
- | mr L:SAVE0, L:CARG1
- | li CARG4, 0
- | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
- | // Returns thread status.
- |4:
- | lp TMP2, L:SAVE0->base
- | cmplwi CRET1, LUA_YIELD
- | lp TMP3, L:SAVE0->top
- | li_vmstate INTERP
- | lp BASE, L->base
- | st_vmstate
- | bgt >8
- | sub RD, TMP3, TMP2
- | lwz TMP0, L->maxstack
- | cmplwi RD, 0
- | add TMP1, BASE, RD
- | beq >6 // No results?
- | cmplw TMP1, TMP0
- | li TMP1, 0
- | bgt >9 // Need to grow stack?
- |
- | subi TMP3, RD, 8
- | stp TMP2, L:SAVE0->top // Clear coroutine stack.
- |5: // Move results from coroutine.
- | cmplw TMP1, TMP3
- | lfdx f0, TMP2, TMP1
- | stfdx f0, BASE, TMP1
- | addi TMP1, TMP1, 8
- | bne <5
- |6:
- | andix. TMP0, PC, FRAME_TYPE
- |.if resume
- | li TMP1, LJ_TTRUE
- | la RA, -8(BASE)
- | stw TMP1, -8(BASE) // Prepend true to results.
- | addi RD, RD, 16
- |.else
- | mr RA, BASE
- | addi RD, RD, 8
- |.endif
- |7:
- | stw PC, SAVE_PC
- | mr MULTRES, RD
- | beq ->BC_RET_Z
- | b ->vm_return
- |
- |8: // Coroutine returned with error (at co->top-1).
- |.if resume
- | andix. TMP0, PC, FRAME_TYPE
- | la TMP3, -8(TMP3)
- | li TMP1, LJ_TFALSE
- | lfd f0, 0(TMP3)
- | stp TMP3, L:SAVE0->top // Remove error from coroutine stack.
- | li RD, (2+1)*8
- | stw TMP1, -8(BASE) // Prepend false to results.
- | la RA, -8(BASE)
- | stfd f0, 0(BASE) // Copy error message.
- | b <7
- |.else
- | mr CARG1, L
- | mr CARG2, L:SAVE0
- | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
- |.endif
- |
- |9: // Handle stack expansion on return from yield.
- | mr CARG1, L
- | srwi CARG2, RD, 3
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | li CRET1, 0
- | b <4
- |.endmacro
- |
- | coroutine_resume_wrap 1 // coroutine.resume
- | coroutine_resume_wrap 0 // coroutine.wrap
- |
- |.ffunc coroutine_yield
- | lp TMP0, L->cframe
- | add TMP1, BASE, NARGS8:RC
- | stp BASE, L->base
- | andix. TMP0, TMP0, CFRAME_RESUME
- | stp TMP1, L->top
- | li CRET1, LUA_YIELD
- | beq ->fff_fallback
- | stp ZERO, L->cframe
- | stb CRET1, L->status
- | b ->vm_leave_unw
- |
- |//-- Math library -------------------------------------------------------
- |
- |.ffunc_1 math_abs
- | checknum CARG3
- |.if DUALNUM
- | bne >2
- | srawi TMP1, CARG1, 31
- | xor TMP2, TMP1, CARG1
- |.if GPR64
- | lus TMP0, 0x8000
- | sub CARG1, TMP2, TMP1
- | cmplw CARG1, TMP0
- | beq >1
- |.else
- | sub. CARG1, TMP2, TMP1
- | blt >1
- |.endif
- |->fff_resi:
- | lwz PC, FRAME_PC(BASE)
- | la RA, -8(BASE)
- | stw TISNUM, -8(BASE)
- | stw CRET1, -4(BASE)
- | b ->fff_res1
- |1:
- | lus CARG3, 0x41e0 // 2^31.
- | li CARG1, 0
- | b ->fff_restv
- |2:
- |.endif
- | bge ->fff_fallback
- | rlwinm CARG3, CARG3, 0, 1, 31
- | // Fallthrough.
- |
- |->fff_restv:
- | // CARG3/CARG1 = TValue result.
- | lwz PC, FRAME_PC(BASE)
- | stw CARG3, -8(BASE)
- | la RA, -8(BASE)
- | stw CARG1, -4(BASE)
- |->fff_res1:
- | // RA = results, PC = return.
- | li RD, (1+1)*8
- |->fff_res:
- | // RA = results, RD = (nresults+1)*8, PC = return.
- | andix. TMP0, PC, FRAME_TYPE
- | mr MULTRES, RD
- | bney ->vm_return
- | lwz INS, -4(PC)
- | decode_RB8 RB, INS
- |5:
- | cmplw RB, RD // More results expected?
- | decode_RA8 TMP0, INS
- | bgt >6
- | ins_next1
- | // Adjust BASE. KBASE is assumed to be set for the calling frame.
- | sub BASE, RA, TMP0
- | ins_next2
- |
- |6: // Fill up results with nil.
- | subi TMP1, RD, 8
- | addi RD, RD, 8
- | stwx TISNIL, RA, TMP1
- | b <5
- |
- |.macro math_extern, func
- | .ffunc_n math_ .. func
- | blex func
- | b ->fff_resn
- |.endmacro
- |
- |.macro math_extern2, func
- | .ffunc_nn math_ .. func
- | blex func
- | b ->fff_resn
- |.endmacro
- |
- |.macro math_round, func
- | .ffunc_1 math_ .. func
- | checknum CARG3; beqy ->fff_restv
- | rlwinm TMP2, CARG3, 12, 21, 31
- | bge ->fff_fallback
- | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
- | cmplwi cr1, TMP2, 31 // 0 <= exp < 31?
- | subfic TMP0, TMP2, 31
- | blt >3
- | slwi TMP1, CARG3, 11
- | srwi TMP3, CARG1, 21
- | oris TMP1, TMP1, 0x8000
- | addi TMP2, TMP2, 1
- | or TMP1, TMP1, TMP3
- | slwi CARG2, CARG1, 11
- | bge cr1, >4
- | slw TMP3, TMP1, TMP2
- | srw RD, TMP1, TMP0
- | or TMP3, TMP3, CARG2
- | srawi TMP2, CARG3, 31
- |.if "func" == "floor"
- | and TMP1, TMP3, TMP2
- | addic TMP0, TMP1, -1
- | subfe TMP1, TMP0, TMP1
- | add CARG1, RD, TMP1
- | xor CARG1, CARG1, TMP2
- | sub CARG1, CARG1, TMP2
- | b ->fff_resi
- |.else
- | andc TMP1, TMP3, TMP2
- | addic TMP0, TMP1, -1
- | subfe TMP1, TMP0, TMP1
- | add CARG1, RD, TMP1
- | cmpw CARG1, RD
- | xor CARG1, CARG1, TMP2
- | sub CARG1, CARG1, TMP2
- | bge ->fff_resi
- | // Overflow to 2^31.
- | lus CARG3, 0x41e0 // 2^31.
- | li CARG1, 0
- | b ->fff_restv
- |.endif
- |3: // |x| < 1
- | slwi TMP2, CARG3, 1
- | srawi TMP1, CARG3, 31
- | or TMP2, CARG1, TMP2 // ztest = (hi+hi) | lo
- |.if "func" == "floor"
- | and TMP1, TMP2, TMP1 // (ztest & sign) == 0 ? 0 : -1
- | subfic TMP2, TMP1, 0
- | subfe CARG1, CARG1, CARG1
- |.else
- | andc TMP1, TMP2, TMP1 // (ztest & ~sign) == 0 ? 0 : 1
- | addic TMP2, TMP1, -1
- | subfe CARG1, TMP2, TMP1
- |.endif
- | b ->fff_resi
- |4: // exp >= 31. Check for -(2^31).
- | xoris TMP1, TMP1, 0x8000
- | srawi TMP2, CARG3, 31
- |.if "func" == "floor"
- | or TMP1, TMP1, CARG2
- |.endif
- |.if PPE
- | orc TMP1, TMP1, TMP2
- | cmpwi TMP1, 0
- |.else
- | orc. TMP1, TMP1, TMP2
- |.endif
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
- | lus CARG1, 0x8000 // -(2^31).
- | beqy ->fff_resi
- |5:
- | lfd FARG1, 0(BASE)
- | blex func
- | b ->fff_resn
- |.endmacro
- |
- |.if DUALNUM
- | math_round floor
- | math_round ceil
- |.else
- | // NYI: use internal implementation.
- | math_extern floor
- | math_extern ceil
- |.endif
- |
- |.if SQRT
- |.ffunc_n math_sqrt
- | fsqrt FARG1, FARG1
- | b ->fff_resn
- |.else
- | math_extern sqrt
- |.endif
- |
- |.ffunc math_log
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- | lfd FARG1, 0(BASE)
- | bne ->fff_fallback // Need exactly 1 argument.
- | checknum CARG3; bge ->fff_fallback
- | blex log
- | b ->fff_resn
- |
- | math_extern log10
- | math_extern exp
- | math_extern sin
- | math_extern cos
- | math_extern tan
- | math_extern asin
- | math_extern acos
- | math_extern atan
- | math_extern sinh
- | math_extern cosh
- | math_extern tanh
- | math_extern2 pow
- | math_extern2 atan2
- | math_extern2 fmod
- |
- |->ff_math_deg:
- |.ffunc_n math_rad
- | lfd FARG2, CFUNC:RB->upvalue[0]
- | fmul FARG1, FARG1, FARG2
- | b ->fff_resn
- |
- |.if DUALNUM
- |.ffunc math_ldexp
- | cmplwi NARGS8:RC, 16
- | lwz CARG3, 0(BASE)
- | lfd FARG1, 0(BASE)
- | lwz CARG4, 8(BASE)
- |.if GPR64
- | lwz CARG2, 12(BASE)
- |.else
- | lwz CARG1, 12(BASE)
- |.endif
- | blt ->fff_fallback
- | checknum CARG3; bge ->fff_fallback
- | checknum CARG4; bne ->fff_fallback
- |.else
- |.ffunc_nn math_ldexp
- |.if GPR64
- | toint CARG2, FARG2
- |.else
- | toint CARG1, FARG2
- |.endif
- |.endif
- | blex ldexp
- | b ->fff_resn
- |
- |.ffunc_n math_frexp
- |.if GPR64
- | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
- |.else
- | la CARG1, DISPATCH_GL(tmptv)(DISPATCH)
- |.endif
- | lwz PC, FRAME_PC(BASE)
- | blex frexp
- | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
- | la RA, -8(BASE)
- |.if not DUALNUM
- | tonum_i FARG2, TMP1
- |.endif
- | stfd FARG1, 0(RA)
- | li RD, (2+1)*8
- |.if DUALNUM
- | stw TISNUM, 8(RA)
- | stw TMP1, 12(RA)
- |.else
- | stfd FARG2, 8(RA)
- |.endif
- | b ->fff_res
- |
- |.ffunc_n math_modf
- |.if GPR64
- | la CARG2, -8(BASE)
- |.else
- | la CARG1, -8(BASE)
- |.endif
- | lwz PC, FRAME_PC(BASE)
- | blex modf
- | la RA, -8(BASE)
- | stfd FARG1, 0(BASE)
- | li RD, (2+1)*8
- | b ->fff_res
- |
- |.macro math_minmax, name, ismax
- |.if DUALNUM
- | .ffunc_1 name
- | checknum CARG3
- | addi TMP1, BASE, 8
- | add TMP2, BASE, NARGS8:RC
- | bne >4
- |1: // Handle integers.
- | lwz CARG4, 0(TMP1)
- | cmplw cr1, TMP1, TMP2
- | lwz CARG2, 4(TMP1)
- | bge cr1, ->fff_resi
- | checknum CARG4
- | xoris TMP0, CARG1, 0x8000
- | xoris TMP3, CARG2, 0x8000
- | bne >3
- | subfc TMP3, TMP3, TMP0
- | subfe TMP0, TMP0, TMP0
- |.if ismax
- | andc TMP3, TMP3, TMP0
- |.else
- | and TMP3, TMP3, TMP0
- |.endif
- | add CARG1, TMP3, CARG2
- |.if GPR64
- | rldicl CARG1, CARG1, 0, 32
- |.endif
- | addi TMP1, TMP1, 8
- | b <1
- |3:
- | bge ->fff_fallback
- | // Convert intermediate result to number and continue below.
- | tonum_i FARG1, CARG1
- | lfd FARG2, 0(TMP1)
- | b >6
- |4:
- | lfd FARG1, 0(BASE)
- | bge ->fff_fallback
- |5: // Handle numbers.
- | lwz CARG4, 0(TMP1)
- | cmplw cr1, TMP1, TMP2
- | lfd FARG2, 0(TMP1)
- | bge cr1, ->fff_resn
- | checknum CARG4; bge >7
- |6:
- | fsub f0, FARG1, FARG2
- | addi TMP1, TMP1, 8
- |.if ismax
- | fsel FARG1, f0, FARG1, FARG2
- |.else
- | fsel FARG1, f0, FARG2, FARG1
- |.endif
- | b <5
- |7: // Convert integer to number and continue above.
- | lwz CARG2, 4(TMP1)
- | bne ->fff_fallback
- | tonum_i FARG2, CARG2
- | b <6
- |.else
- | .ffunc_n name
- | li TMP1, 8
- |1:
- | lwzx CARG2, BASE, TMP1
- | lfdx FARG2, BASE, TMP1
- | cmplw cr1, TMP1, NARGS8:RC
- | checknum CARG2
- | bge cr1, ->fff_resn
- | bge ->fff_fallback
- | fsub f0, FARG1, FARG2
- | addi TMP1, TMP1, 8
- |.if ismax
- | fsel FARG1, f0, FARG1, FARG2
- |.else
- | fsel FARG1, f0, FARG2, FARG1
- |.endif
- | b <1
- |.endif
- |.endmacro
- |
- | math_minmax math_min, 0
- | math_minmax math_max, 1
- |
- |//-- String library -----------------------------------------------------
- |
- |.ffunc_1 string_len
- | checkstr CARG3; bne ->fff_fallback
- | lwz CRET1, STR:CARG1->len
- | b ->fff_resi
- |
- |.ffunc string_byte // Only handle the 1-arg case here.
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- | lwz STR:CARG1, 4(BASE)
- | bne ->fff_fallback // Need exactly 1 argument.
- | checkstr CARG3
- | bne ->fff_fallback
- | lwz TMP0, STR:CARG1->len
- |.if DUALNUM
- | lbz CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
- | li RD, (0+1)*8
- | lwz PC, FRAME_PC(BASE)
- | cmplwi TMP0, 0
- | la RA, -8(BASE)
- | beqy ->fff_res
- | b ->fff_resi
- |.else
- | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
- | addic TMP3, TMP0, -1 // RD = ((str->len != 0)+1)*8
- | subfe RD, TMP3, TMP0
- | stw TMP1, TONUM_LO // Inlined tonum_u f0, TMP1.
- | addi RD, RD, 1
- | lfd f0, TONUM_D
- | la RA, -8(BASE)
- | lwz PC, FRAME_PC(BASE)
- | fsub f0, f0, TOBIT
- | slwi RD, RD, 3
- | stfd f0, 0(RA)
- | b ->fff_res
- |.endif
- |
- |.ffunc string_char // Only handle the 1-arg case here.
- | ffgccheck
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- |.if DUALNUM
- | lwz TMP0, 4(BASE)
- | bne ->fff_fallback // Exactly 1 argument.
- | checknum CARG3; bne ->fff_fallback
- | la CARG2, 7(BASE)
- |.else
- | lfd FARG1, 0(BASE)
- | bne ->fff_fallback // Exactly 1 argument.
- | checknum CARG3; bge ->fff_fallback
- | toint TMP0, FARG1
- | la CARG2, TMPD_BLO
- |.endif
- | li CARG3, 1
- | cmplwi TMP0, 255; bgt ->fff_fallback
- |->fff_newstr:
- | mr CARG1, L
- | stp BASE, L->base
- | stw PC, SAVE_PC
- | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
- | // Returns GCstr *.
- | lp BASE, L->base
- | li CARG3, LJ_TSTR
- | b ->fff_restv
- |
- |.ffunc string_sub
- | ffgccheck
- | cmplwi NARGS8:RC, 16
- | lwz CARG3, 16(BASE)
- |.if not DUALNUM
- | lfd f0, 16(BASE)
- |.endif
- | lwz TMP0, 0(BASE)
- | lwz STR:CARG1, 4(BASE)
- | blt ->fff_fallback
- | lwz CARG2, 8(BASE)
- |.if DUALNUM
- | lwz TMP1, 12(BASE)
- |.else
- | lfd f1, 8(BASE)
- |.endif
- | li TMP2, -1
- | beq >1
- |.if DUALNUM
- | checknum CARG3
- | lwz TMP2, 20(BASE)
- | bne ->fff_fallback
- |1:
- | checknum CARG2; bne ->fff_fallback
- |.else
- | checknum CARG3; bge ->fff_fallback
- | toint TMP2, f0
- |1:
- | checknum CARG2; bge ->fff_fallback
- |.endif
- | checkstr TMP0; bne ->fff_fallback
- |.if not DUALNUM
- | toint TMP1, f1
- |.endif
- | lwz TMP0, STR:CARG1->len
- | cmplw TMP0, TMP2 // len < end? (unsigned compare)
- | addi TMP3, TMP2, 1
- | blt >5
- |2:
- | cmpwi TMP1, 0 // start <= 0?
- | add TMP3, TMP1, TMP0
- | ble >7
- |3:
- | sub CARG3, TMP2, TMP1
- | addi CARG2, STR:CARG1, #STR-1
- | srawi TMP0, CARG3, 31
- | addi CARG3, CARG3, 1
- | add CARG2, CARG2, TMP1
- | andc CARG3, CARG3, TMP0
- |.if GPR64
- | rldicl CARG2, CARG2, 0, 32
- | rldicl CARG3, CARG3, 0, 32
- |.endif
- | b ->fff_newstr
- |
- |5: // Negative end or overflow.
- | cmpw TMP0, TMP2 // len >= end? (signed compare)
- | add TMP2, TMP0, TMP3 // Negative end: end = end+len+1.
- | bge <2
- | mr TMP2, TMP0 // Overflow: end = len.
- | b <2
- |
- |7: // Negative start or underflow.
- | .gpr64 extsw TMP1, TMP1
- | addic CARG3, TMP1, -1
- | subfe CARG3, CARG3, CARG3
- | srawi CARG2, TMP3, 31 // Note: modifies carry.
- | andc TMP3, TMP3, CARG3
- | andc TMP1, TMP3, CARG2
- | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
- | b <3
- |
- |.ffunc string_rep // Only handle the 1-char case inline.
- | ffgccheck
- | cmplwi NARGS8:RC, 16
- | lwz TMP0, 0(BASE)
- | lwz STR:CARG1, 4(BASE)
- | lwz CARG4, 8(BASE)
- |.if DUALNUM
- | lwz CARG3, 12(BASE)
- |.else
- | lfd FARG2, 8(BASE)
- |.endif
- | bne ->fff_fallback // Exactly 2 arguments.
- | checkstr TMP0; bne ->fff_fallback
- |.if DUALNUM
- | checknum CARG4; bne ->fff_fallback
- |.else
- | checknum CARG4; bge ->fff_fallback
- | toint CARG3, FARG2
- |.endif
- | lwz TMP0, STR:CARG1->len
- | cmpwi CARG3, 0
- | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | ble >2 // Count <= 0? (or non-int)
- | cmplwi TMP0, 1
- | subi TMP2, CARG3, 1
- | blt >2 // Zero length string?
- | cmplw cr1, TMP1, CARG3
- | bne ->fff_fallback // Fallback for > 1-char strings.
- | lbz TMP0, STR:CARG1[1]
- | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | blt cr1, ->fff_fallback
- |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
- | cmplwi TMP2, 0
- | stbx TMP0, CARG2, TMP2
- | subi TMP2, TMP2, 1
- | bne <1
- | b ->fff_newstr
- |2: // Return empty string.
- | la STR:CARG1, DISPATCH_GL(strempty)(DISPATCH)
- | li CARG3, LJ_TSTR
- | b ->fff_restv
- |
- |.ffunc string_reverse
- | ffgccheck
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- | lwz STR:CARG1, 4(BASE)
- | blt ->fff_fallback
- | checkstr CARG3
- | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | bne ->fff_fallback
- | lwz CARG3, STR:CARG1->len
- | la CARG1, #STR(STR:CARG1)
- | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | li TMP2, 0
- | cmplw TMP1, CARG3
- | subi TMP3, CARG3, 1
- | blt ->fff_fallback
- |1: // Reverse string copy.
- | cmpwi TMP3, 0
- | lbzx TMP1, CARG1, TMP2
- | blty ->fff_newstr
- | stbx TMP1, CARG2, TMP3
- | subi TMP3, TMP3, 1
- | addi TMP2, TMP2, 1
- | b <1
- |
- |.macro ffstring_case, name, lo
- | .ffunc name
- | ffgccheck
- | cmplwi NARGS8:RC, 8
- | lwz CARG3, 0(BASE)
- | lwz STR:CARG1, 4(BASE)
- | blt ->fff_fallback
- | checkstr CARG3
- | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | bne ->fff_fallback
- | lwz CARG3, STR:CARG1->len
- | la CARG1, #STR(STR:CARG1)
- | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | cmplw TMP1, CARG3
- | li TMP2, 0
- | blt ->fff_fallback
- |1: // ASCII case conversion.
- | cmplw TMP2, CARG3
- | lbzx TMP1, CARG1, TMP2
- | bgey ->fff_newstr
- | subi TMP0, TMP1, lo
- | xori TMP3, TMP1, 0x20
- | addic TMP0, TMP0, -26
- | subfe TMP3, TMP3, TMP3
- | rlwinm TMP3, TMP3, 0, 26, 26 // x &= 0x20.
- | xor TMP1, TMP1, TMP3
- | stbx TMP1, CARG2, TMP2
- | addi TMP2, TMP2, 1
- | b <1
- |.endmacro
- |
- |ffstring_case string_lower, 65
- |ffstring_case string_upper, 97
- |
- |//-- Table library ------------------------------------------------------
- |
- |.ffunc_1 table_getn
- | checktab CARG3; bne ->fff_fallback
- | bl extern lj_tab_len // (GCtab *t)
- | // Returns uint32_t (but less than 2^31).
- | b ->fff_resi
- |
- |//-- Bit library --------------------------------------------------------
- |
- |.macro .ffunc_bit, name
- |.if DUALNUM
- | .ffunc_1 bit_..name
- | checknum CARG3; bnel ->fff_tobit_fb
- |.else
- | .ffunc_n bit_..name
- | fadd FARG1, FARG1, TOBIT
- | stfd FARG1, TMPD
- | lwz CARG1, TMPD_LO
- |.endif
- |.endmacro
- |
- |.macro .ffunc_bit_op, name, ins
- | .ffunc_bit name
- | addi TMP1, BASE, 8
- | add TMP2, BASE, NARGS8:RC
- |1:
- | lwz CARG4, 0(TMP1)
- | cmplw cr1, TMP1, TMP2
- |.if DUALNUM
- | lwz CARG2, 4(TMP1)
- |.else
- | lfd FARG1, 0(TMP1)
- |.endif
- | bgey cr1, ->fff_resi
- | checknum CARG4
- |.if DUALNUM
- | bnel ->fff_bitop_fb
- |.else
- | fadd FARG1, FARG1, TOBIT
- | bge ->fff_fallback
- | stfd FARG1, TMPD
- | lwz CARG2, TMPD_LO
- |.endif
- | ins CARG1, CARG1, CARG2
- | addi TMP1, TMP1, 8
- | b <1
- |.endmacro
- |
- |.ffunc_bit_op band, and
- |.ffunc_bit_op bor, or
- |.ffunc_bit_op bxor, xor
- |
- |.ffunc_bit bswap
- | rotlwi TMP0, CARG1, 8
- | rlwimi TMP0, CARG1, 24, 0, 7
- | rlwimi TMP0, CARG1, 24, 16, 23
- | mr CRET1, TMP0
- | b ->fff_resi
- |
- |.ffunc_bit bnot
- | not CRET1, CARG1
- | b ->fff_resi
- |
- |.macro .ffunc_bit_sh, name, ins, shmod
- |.if DUALNUM
- | .ffunc_2 bit_..name
- | checknum CARG3; bnel ->fff_tobit_fb
- | // Note: no inline conversion from number for 2nd argument!
- | checknum CARG4; bne ->fff_fallback
- |.else
- | .ffunc_nn bit_..name
- | fadd FARG1, FARG1, TOBIT
- | fadd FARG2, FARG2, TOBIT
- | stfd FARG1, TMPD
- | lwz CARG1, TMPD_LO
- | stfd FARG2, TMPD
- | lwz CARG2, TMPD_LO
- |.endif
- |.if shmod == 1
- | rlwinm CARG2, CARG2, 0, 27, 31
- |.elif shmod == 2
- | neg CARG2, CARG2
- |.endif
- | ins CRET1, CARG1, CARG2
- | b ->fff_resi
- |.endmacro
- |
- |.ffunc_bit_sh lshift, slw, 1
- |.ffunc_bit_sh rshift, srw, 1
- |.ffunc_bit_sh arshift, sraw, 1
- |.ffunc_bit_sh rol, rotlw, 0
- |.ffunc_bit_sh ror, rotlw, 2
- |
- |.ffunc_bit tobit
- |.if DUALNUM
- | b ->fff_resi
- |.else
- |->fff_resi:
- | tonum_i FARG1, CRET1
- |.endif
- |->fff_resn:
- | lwz PC, FRAME_PC(BASE)
- | la RA, -8(BASE)
- | stfd FARG1, -8(BASE)
- | b ->fff_res1
- |
- |// Fallback FP number to bit conversion.
- |->fff_tobit_fb:
- |.if DUALNUM
- | lfd FARG1, 0(BASE)
- | bgt ->fff_fallback
- | fadd FARG1, FARG1, TOBIT
- | stfd FARG1, TMPD
- | lwz CARG1, TMPD_LO
- | blr
- |.endif
- |->fff_bitop_fb:
- |.if DUALNUM
- | lfd FARG1, 0(TMP1)
- | bgt ->fff_fallback
- | fadd FARG1, FARG1, TOBIT
- | stfd FARG1, TMPD
- | lwz CARG2, TMPD_LO
- | blr
- |.endif
- |
- |//-----------------------------------------------------------------------
- |
- |->fff_fallback: // Call fast function fallback handler.
- | // BASE = new base, RB = CFUNC, RC = nargs*8
- | lp TMP3, CFUNC:RB->f
- | add TMP1, BASE, NARGS8:RC
- | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
- | addi TMP0, TMP1, 8*LUA_MINSTACK
- | lwz TMP2, L->maxstack
- | stw PC, SAVE_PC // Redundant (but a defined value).
- | .toc lp TMP3, 0(TMP3)
- | cmplw TMP0, TMP2
- | stp BASE, L->base
- | stp TMP1, L->top
- | mr CARG1, L
- | bgt >5 // Need to grow stack.
- | mtctr TMP3
- | bctrl // (lua_State *L)
- | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
- | lp BASE, L->base
- | cmpwi CRET1, 0
- | slwi RD, CRET1, 3
- | la RA, -8(BASE)
- | bgt ->fff_res // Returned nresults+1?
- |1: // Returned 0 or -1: retry fast path.
- | lp TMP0, L->top
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | sub NARGS8:RC, TMP0, BASE
- | bne ->vm_call_tail // Returned -1?
- | ins_callt // Returned 0: retry fast path.
- |
- |// Reconstruct previous base for vmeta_call during tailcall.
- |->vm_call_tail:
- | andix. TMP0, PC, FRAME_TYPE
- | rlwinm TMP1, PC, 0, 0, 28
- | bne >3
- | lwz INS, -4(PC)
- | decode_RA8 TMP1, INS
- | addi TMP1, TMP1, 8
- |3:
- | sub TMP2, BASE, TMP1
- | b ->vm_call_dispatch // Resolve again for tailcall.
- |
- |5: // Grow stack for fallback handler.
- | li CARG2, LUA_MINSTACK
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lp BASE, L->base
- | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
- | b <1
- |
- |->fff_gcstep: // Call GC step function.
- | // BASE = new base, RC = nargs*8
- | mflr SAVE0
- | stp BASE, L->base
- | add TMP0, BASE, NARGS8:RC
- | stw PC, SAVE_PC // Redundant (but a defined value).
- | stp TMP0, L->top
- | mr CARG1, L
- | bl extern lj_gc_step // (lua_State *L)
- | lp BASE, L->base
- | mtlr SAVE0
- | lp TMP0, L->top
- | sub NARGS8:RC, TMP0, BASE
- | lwz CFUNC:RB, FRAME_FUNC(BASE)
- | blr
- |
- |//-----------------------------------------------------------------------
- |//-- Special dispatch targets -------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_record: // Dispatch target for recording phase.
- |.if JIT
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | andix. TMP0, TMP3, HOOK_VMEVENT // No recording while in vmevent.
- | bne >5
- | // Decrement the hookcount for consistency, but always do the call.
- | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | andix. TMP0, TMP3, HOOK_ACTIVE
- | bne >1
- | subi TMP2, TMP2, 1
- | andi. TMP0, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
- | beqy >1
- | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | b >1
- |.endif
- |
- |->vm_rethook: // Dispatch target for return hooks.
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
- | beq >1
- |5: // Re-dispatch to static ins.
- | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OPP TMP1, INS.
- | lpx TMP0, DISPATCH, TMP1
- | mtctr TMP0
- | bctr
- |
- |->vm_inshook: // Dispatch target for instr/line hooks.
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
- | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
- | bne <5
- |
- | cmpwi cr1, TMP0, 0
- | addic. TMP2, TMP2, -1
- | beq cr1, <5
- | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | beq >1
- | bge cr1, <5
- |1:
- | mr CARG1, L
- | stw MULTRES, SAVE_MULTRES
- | mr CARG2, PC
- | stp BASE, L->base
- | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
- | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
- |3:
- | lp BASE, L->base
- |4: // Re-dispatch to static ins.
- | lwz INS, -4(PC)
- | decode_OPP TMP1, INS
- | decode_RB8 RB, INS
- | addi TMP1, TMP1, GG_DISP2STATIC
- | decode_RD8 RD, INS
- | lpx TMP0, DISPATCH, TMP1
- | decode_RA8 RA, INS
- | decode_RC8 RC, INS
- | mtctr TMP0
- | bctr
- |
- |->cont_hook: // Continue from hook yield.
- | addi PC, PC, 4
- | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
- | b <4
- |
- |->vm_hotloop: // Hot loop counter underflow.
- |.if JIT
- | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
- | addi CARG1, DISPATCH, GG_DISP2J
- | stw PC, SAVE_PC
- | lwz TMP1, LFUNC:TMP1->pc
- | mr CARG2, PC
- | stw L, DISPATCH_J(L)(DISPATCH)
- | lbz TMP1, PC2PROTO(framesize)(TMP1)
- | stp BASE, L->base
- | slwi TMP1, TMP1, 3
- | add TMP1, BASE, TMP1
- | stp TMP1, L->top
- | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
- | b <3
- |.endif
- |
- |->vm_callhook: // Dispatch target for call hooks.
- | mr CARG2, PC
- |.if JIT
- | b >1
- |.endif
- |
- |->vm_hotcall: // Hot call counter underflow.
- |.if JIT
- | ori CARG2, PC, 1
- |1:
- |.endif
- | add TMP0, BASE, RC
- | stw PC, SAVE_PC
- | mr CARG1, L
- | stp BASE, L->base
- | sub RA, RA, BASE
- | stp TMP0, L->top
- | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
- | // Returns ASMFunction.
- | lp BASE, L->base
- | lp TMP0, L->top
- | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
- | sub NARGS8:RC, TMP0, BASE
- | add RA, BASE, RA
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | lwz INS, -4(PC)
- | mtctr CRET1
- | bctr
- |
- |//-----------------------------------------------------------------------
- |//-- Trace exit handler -------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |.macro savex_, a, b, c, d
- | stfd f..a, 16+a*8(sp)
- | stfd f..b, 16+b*8(sp)
- | stfd f..c, 16+c*8(sp)
- | stfd f..d, 16+d*8(sp)
- |.endmacro
- |
- |->vm_exit_handler:
- |.if JIT
- | addi sp, sp, -(16+32*8+32*4)
- | stmw r2, 16+32*8+2*4(sp)
- | addi DISPATCH, JGL, -GG_DISP2G-32768
- | li CARG2, ~LJ_VMST_EXIT
- | lwz CARG1, 16+32*8+32*4(sp) // Get stack chain.
- | stw CARG2, DISPATCH_GL(vmstate)(DISPATCH)
- | savex_ 0,1,2,3
- | stw CARG1, 0(sp) // Store extended stack chain.
- | clrso TMP1
- | savex_ 4,5,6,7
- | addi CARG2, sp, 16+32*8+32*4 // Recompute original value of sp.
- | savex_ 8,9,10,11
- | stw CARG2, 16+32*8+1*4(sp) // Store sp in RID_SP.
- | savex_ 12,13,14,15
- | mflr CARG3
- | li TMP1, 0
- | savex_ 16,17,18,19
- | stw TMP1, 16+32*8+0*4(sp) // Clear RID_TMP.
- | savex_ 20,21,22,23
- | lhz CARG4, 2(CARG3) // Load trace number.
- | savex_ 24,25,26,27
- | lwz L, DISPATCH_GL(jit_L)(DISPATCH)
- | savex_ 28,29,30,31
- | sub CARG3, TMP0, CARG3 // Compute exit number.
- | lp BASE, DISPATCH_GL(jit_base)(DISPATCH)
- | srwi CARG3, CARG3, 2
- | stw L, DISPATCH_J(L)(DISPATCH)
- | subi CARG3, CARG3, 2
- | stw TMP1, DISPATCH_GL(jit_L)(DISPATCH)
- | stw CARG4, DISPATCH_J(parent)(DISPATCH)
- | stp BASE, L->base
- | addi CARG1, DISPATCH, GG_DISP2J
- | stw CARG3, DISPATCH_J(exitno)(DISPATCH)
- | addi CARG2, sp, 16
- | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
- | // Returns MULTRES (unscaled) or negated error code.
- | lp TMP1, L->cframe
- | lwz TMP2, 0(sp)
- | lp BASE, L->base
- |.if GPR64
- | rldicr sp, TMP1, 0, 61
- |.else
- | rlwinm sp, TMP1, 0, 0, 29
- |.endif
- | lwz PC, SAVE_PC // Get SAVE_PC.
- | stw TMP2, 0(sp)
- | stw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
- | b >1
- |.endif
- |->vm_exit_interp:
- |.if JIT
- | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set.
- | lwz L, SAVE_L
- | addi DISPATCH, JGL, -GG_DISP2G-32768
- |1:
- | cmpwi CARG1, 0
- | blt >3 // Check for error from exit.
- | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
- | slwi MULTRES, CARG1, 3
- | li TMP2, 0
- | stw MULTRES, SAVE_MULTRES
- | lwz TMP1, LFUNC:TMP1->pc
- | stw TMP2, DISPATCH_GL(jit_L)(DISPATCH)
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | // Setup type comparison constants.
- | li TISNUM, LJ_TISNUM
- | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | stw TMP3, TMPD
- | li ZERO, 0
- | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
- | lfs TOBIT, TMPD
- | stw TMP3, TMPD
- | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
- | li TISNIL, LJ_TNIL
- | stw TMP0, TONUM_HI
- | lfs TONUM, TMPD
- | // Modified copy of ins_next which handles function header dispatch, too.
- | lwz INS, 0(PC)
- | addi PC, PC, 4
- | // Assumes TISNIL == ~LJ_VMST_INTERP == -1.
- | stw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
- | decode_OPP TMP1, INS
- | decode_RA8 RA, INS
- | lpx TMP0, DISPATCH, TMP1
- | mtctr TMP0
- | cmplwi TMP1, BC_FUNCF*4 // Function header?
- | bge >2
- | decode_RB8 RB, INS
- | decode_RD8 RD, INS
- | decode_RC8 RC, INS
- | bctr
- |2:
- | subi RC, MULTRES, 8
- | add RA, RA, BASE
- | bctr
- |
- |3: // Rethrow error from the right C frame.
- | neg CARG2, CARG1
- | mr CARG1, L
- | bl extern lj_err_throw // (lua_State *L, int errcode)
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Math helper functions ----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// NYI: Use internal implementations of floor, ceil, trunc.
- |
- |->vm_modi:
- | divwo. TMP0, CARG1, CARG2
- | bso >1
- |.if GPR64
- | xor CARG3, CARG1, CARG2
- | cmpwi CARG3, 0
- |.else
- | xor. CARG3, CARG1, CARG2
- |.endif
- | mullw TMP0, TMP0, CARG2
- | sub CARG1, CARG1, TMP0
- | bgelr
- | cmpwi CARG1, 0; beqlr
- | add CARG1, CARG1, CARG2
- | blr
- |1:
- | cmpwi CARG2, 0
- | li CARG1, 0
- | beqlr
- | clrso TMP0 // Clear SO for -2147483648 % -1 and return 0.
- | blr
- |
- |//-----------------------------------------------------------------------
- |//-- Miscellaneous functions --------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// void lj_vm_cachesync(void *start, void *end)
- |// Flush D-Cache and invalidate I-Cache. Assumes 32 byte cache line size.
- |// This is a good lower bound, except for very ancient PPC models.
- |->vm_cachesync:
- |.if JIT or FFI
- | // Compute start of first cache line and number of cache lines.
- | rlwinm CARG1, CARG1, 0, 0, 26
- | sub CARG2, CARG2, CARG1
- | addi CARG2, CARG2, 31
- | rlwinm. CARG2, CARG2, 27, 5, 31
- | beqlr
- | mtctr CARG2
- | mr CARG3, CARG1
- |1: // Flush D-Cache.
- | dcbst r0, CARG1
- | addi CARG1, CARG1, 32
- | bdnz <1
- | sync
- | mtctr CARG2
- |1: // Invalidate I-Cache.
- | icbi r0, CARG3
- | addi CARG3, CARG3, 32
- | bdnz <1
- | isync
- | blr
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- FFI helper functions -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// Handler for callback functions. Callback slot number in r11, g in r12.
- |->vm_ffi_callback:
- |.if FFI
- |.type CTSTATE, CTState, PC
- | saveregs
- | lwz CTSTATE, GL:r12->ctype_state
- | addi DISPATCH, r12, GG_G2DISP
- | stw r11, CTSTATE->cb.slot
- | stw r3, CTSTATE->cb.gpr[0]
- | stfd f1, CTSTATE->cb.fpr[0]
- | stw r4, CTSTATE->cb.gpr[1]
- | stfd f2, CTSTATE->cb.fpr[1]
- | stw r5, CTSTATE->cb.gpr[2]
- | stfd f3, CTSTATE->cb.fpr[2]
- | stw r6, CTSTATE->cb.gpr[3]
- | stfd f4, CTSTATE->cb.fpr[3]
- | stw r7, CTSTATE->cb.gpr[4]
- | stfd f5, CTSTATE->cb.fpr[4]
- | stw r8, CTSTATE->cb.gpr[5]
- | stfd f6, CTSTATE->cb.fpr[5]
- | stw r9, CTSTATE->cb.gpr[6]
- | stfd f7, CTSTATE->cb.fpr[6]
- | stw r10, CTSTATE->cb.gpr[7]
- | stfd f8, CTSTATE->cb.fpr[7]
- | addi TMP0, sp, CFRAME_SPACE+8
- | stw TMP0, CTSTATE->cb.stack
- | mr CARG1, CTSTATE
- | stw CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
- | mr CARG2, sp
- | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
- | // Returns lua_State *.
- | lp BASE, L:CRET1->base
- | li TISNUM, LJ_TISNUM // Setup type comparison constants.
- | lp RC, L:CRET1->top
- | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
- | li ZERO, 0
- | mr L, CRET1
- | stw TMP3, TMPD
- | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
- | stw TMP0, TONUM_HI
- | li TISNIL, LJ_TNIL
- | li_vmstate INTERP
- | lfs TOBIT, TMPD
- | stw TMP3, TMPD
- | sub RC, RC, BASE
- | st_vmstate
- | lfs TONUM, TMPD
- | ins_callt
- |.endif
- |
- |->cont_ffi_callback: // Return from FFI callback.
- |.if FFI
- | lwz CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
- | stp BASE, L->base
- | stp RB, L->top
- | stp L, CTSTATE->L
- | mr CARG1, CTSTATE
- | mr CARG2, RA
- | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
- | lwz CRET1, CTSTATE->cb.gpr[0]
- | lfd FARG1, CTSTATE->cb.fpr[0]
- | lwz CRET2, CTSTATE->cb.gpr[1]
- | b ->vm_leave_unw
- |.endif
- |
- |->vm_ffi_call: // Call C function via FFI.
- | // Caveat: needs special frame unwinding, see below.
- |.if FFI
- | .type CCSTATE, CCallState, CARG1
- | lwz TMP1, CCSTATE->spadj
- | mflr TMP0
- | lbz CARG2, CCSTATE->nsp
- | lbz CARG3, CCSTATE->nfpr
- | neg TMP1, TMP1
- | stw TMP0, 4(sp)
- | cmpwi cr1, CARG3, 0
- | mr TMP2, sp
- | addic. CARG2, CARG2, -1
- | stwux sp, sp, TMP1
- | crnot 4*cr1+eq, 4*cr1+eq // For vararg calls.
- | stw r14, -4(TMP2)
- | stw CCSTATE, -8(TMP2)
- | mr r14, TMP2
- | la TMP1, CCSTATE->stack
- | slwi CARG2, CARG2, 2
- | blty >2
- | la TMP2, 8(sp)
- |1:
- | lwzx TMP0, TMP1, CARG2
- | stwx TMP0, TMP2, CARG2
- | addic. CARG2, CARG2, -4
- | bge <1
- |2:
- | bney cr1, >3
- | lfd f1, CCSTATE->fpr[0]
- | lfd f2, CCSTATE->fpr[1]
- | lfd f3, CCSTATE->fpr[2]
- | lfd f4, CCSTATE->fpr[3]
- | lfd f5, CCSTATE->fpr[4]
- | lfd f6, CCSTATE->fpr[5]
- | lfd f7, CCSTATE->fpr[6]
- | lfd f8, CCSTATE->fpr[7]
- |3:
- | lp TMP0, CCSTATE->func
- | lwz CARG2, CCSTATE->gpr[1]
- | lwz CARG3, CCSTATE->gpr[2]
- | lwz CARG4, CCSTATE->gpr[3]
- | lwz CARG5, CCSTATE->gpr[4]
- | mtctr TMP0
- | lwz r8, CCSTATE->gpr[5]
- | lwz r9, CCSTATE->gpr[6]
- | lwz r10, CCSTATE->gpr[7]
- | lwz CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
- | bctrl
- | lwz CCSTATE:TMP1, -8(r14)
- | lwz TMP2, -4(r14)
- | lwz TMP0, 4(r14)
- | stw CARG1, CCSTATE:TMP1->gpr[0]
- | stfd FARG1, CCSTATE:TMP1->fpr[0]
- | stw CARG2, CCSTATE:TMP1->gpr[1]
- | mtlr TMP0
- | stw CARG3, CCSTATE:TMP1->gpr[2]
- | mr sp, r14
- | stw CARG4, CCSTATE:TMP1->gpr[3]
- | mr r14, TMP2
- | blr
- |.endif
- |// Note: vm_ffi_call must be the last function in this object file!
- |
- |//-----------------------------------------------------------------------
-}
-
-/* Generate the code for a single instruction. */
-static void build_ins(BuildCtx *ctx, BCOp op, int defop)
-{
- int vk = 0;
- |=>defop:
-
- switch (op) {
-
- /* -- Comparison ops ---------------------------------------------------- */
-
- /* Remember: all ops branch for a true comparison, fall through otherwise. */
-
- case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
- | // RA = src1*8, RD = src2*8, JMP with RD = target
- |.if DUALNUM
- | lwzux TMP0, RA, BASE
- | addi PC, PC, 4
- | lwz CARG2, 4(RA)
- | lwzux TMP1, RD, BASE
- | lwz TMP2, -4(PC)
- | checknum cr0, TMP0
- | lwz CARG3, 4(RD)
- | decode_RD4 TMP2, TMP2
- | checknum cr1, TMP1
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | bne cr0, >7
- | bne cr1, >8
- | cmpw CARG2, CARG3
- if (op == BC_ISLT) {
- | bge >2
- } else if (op == BC_ISGE) {
- | blt >2
- } else if (op == BC_ISLE) {
- | bgt >2
- } else {
- | ble >2
- }
- |1:
- | add PC, PC, TMP2
- |2:
- | ins_next
- |
- |7: // RA is not an integer.
- | bgt cr0, ->vmeta_comp
- | // RA is a number.
- | lfd f0, 0(RA)
- | bgt cr1, ->vmeta_comp
- | blt cr1, >4
- | // RA is a number, RD is an integer.
- | tonum_i f1, CARG3
- | b >5
- |
- |8: // RA is an integer, RD is not an integer.
- | bgt cr1, ->vmeta_comp
- | // RA is an integer, RD is a number.
- | tonum_i f0, CARG2
- |4:
- | lfd f1, 0(RD)
- |5:
- | fcmpu cr0, f0, f1
- if (op == BC_ISLT) {
- | bge <2
- } else if (op == BC_ISGE) {
- | blt <2
- } else if (op == BC_ISLE) {
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
- | bge <2
- } else {
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
- | blt <2
- }
- | b <1
- |.else
- | lwzx TMP0, BASE, RA
- | addi PC, PC, 4
- | lfdx f0, BASE, RA
- | lwzx TMP1, BASE, RD
- | checknum cr0, TMP0
- | lwz TMP2, -4(PC)
- | lfdx f1, BASE, RD
- | checknum cr1, TMP1
- | decode_RD4 TMP2, TMP2
- | bge cr0, ->vmeta_comp
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | bge cr1, ->vmeta_comp
- | fcmpu cr0, f0, f1
- if (op == BC_ISLT) {
- | bge >1
- } else if (op == BC_ISGE) {
- | blt >1
- } else if (op == BC_ISLE) {
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
- | bge >1
- } else {
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
- | blt >1
- }
- | add PC, PC, TMP2
- |1:
- | ins_next
- |.endif
- break;
-
- case BC_ISEQV: case BC_ISNEV:
- vk = op == BC_ISEQV;
- | // RA = src1*8, RD = src2*8, JMP with RD = target
- |.if DUALNUM
- | lwzux TMP0, RA, BASE
- | addi PC, PC, 4
- | lwz CARG2, 4(RA)
- | lwzux TMP1, RD, BASE
- | checknum cr0, TMP0
- | lwz TMP2, -4(PC)
- | checknum cr1, TMP1
- | decode_RD4 TMP2, TMP2
- | lwz CARG3, 4(RD)
- | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- if (vk) {
- | ble cr7, ->BC_ISEQN_Z
- } else {
- | ble cr7, ->BC_ISNEN_Z
- }
- |.else
- | lwzux TMP0, RA, BASE
- | lwz TMP2, 0(PC)
- | lfd f0, 0(RA)
- | addi PC, PC, 4
- | lwzux TMP1, RD, BASE
- | checknum cr0, TMP0
- | decode_RD4 TMP2, TMP2
- | lfd f1, 0(RD)
- | checknum cr1, TMP1
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | bge cr0, >5
- | bge cr1, >5
- | fcmpu cr0, f0, f1
- if (vk) {
- | bne >1
- | add PC, PC, TMP2
- } else {
- | beq >1
- | add PC, PC, TMP2
- }
- |1:
- | ins_next
- |.endif
- |5: // Either or both types are not numbers.
- |.if not DUALNUM
- | lwz CARG2, 4(RA)
- | lwz CARG3, 4(RD)
- |.endif
- |.if FFI
- | cmpwi cr7, TMP0, LJ_TCDATA
- | cmpwi cr5, TMP1, LJ_TCDATA
- |.endif
- | not TMP3, TMP0
- | cmplw TMP0, TMP1
- | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
- |.if FFI
- | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq
- |.endif
- | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
- |.if FFI
- | beq cr7, ->vmeta_equal_cd
- |.endif
- | cmplw cr5, CARG2, CARG3
- | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive.
- | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type.
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv.
- | mr SAVE0, PC
- | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2.
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2.
- if (vk) {
- | bne cr0, >6
- | add PC, PC, TMP2
- |6:
- } else {
- | beq cr0, >6
- | add PC, PC, TMP2
- |6:
- }
- |.if DUALNUM
- | bge cr0, >2 // Done if 1 or 2.
- |1:
- | ins_next
- |2:
- |.else
- | blt cr0, <1 // Done if 1 or 2.
- |.endif
- | blt cr6, <1 // Done if not tab/ud.
- |
- | // Different tables or userdatas. Need to check __eq metamethod.
- | // Field metatable must be at same offset for GCtab and GCudata!
- | lwz TAB:TMP2, TAB:CARG2->metatable
- | li CARG4, 1-vk // ne = 0 or 1.
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable?
- | lbz TMP2, TAB:TMP2->nomm
- | andix. TMP2, TMP2, 1<<MM_eq
- | bne <1 // Or 'no __eq' flag set?
- | mr PC, SAVE0 // Restore old PC.
- | b ->vmeta_equal // Handle __eq metamethod.
- break;
-
- case BC_ISEQS: case BC_ISNES:
- vk = op == BC_ISEQS;
- | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
- | lwzux TMP0, RA, BASE
- | srwi RD, RD, 1
- | lwz STR:TMP3, 4(RA)
- | lwz TMP2, 0(PC)
- | subfic RD, RD, -4
- | addi PC, PC, 4
- |.if FFI
- | cmpwi TMP0, LJ_TCDATA
- |.endif
- | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
- | .gpr64 extsw TMP0, TMP0
- | subfic TMP0, TMP0, LJ_TSTR
- |.if FFI
- | beq ->vmeta_equal_cd
- |.endif
- | sub TMP1, STR:TMP1, STR:TMP3
- | or TMP0, TMP0, TMP1
- | decode_RD4 TMP2, TMP2
- | subfic TMP0, TMP0, 0
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | subfe TMP1, TMP1, TMP1
- if (vk) {
- | andc TMP2, TMP2, TMP1
- } else {
- | and TMP2, TMP2, TMP1
- }
- | add PC, PC, TMP2
- | ins_next
- break;
-
- case BC_ISEQN: case BC_ISNEN:
- vk = op == BC_ISEQN;
- | // RA = src*8, RD = num_const*8, JMP with RD = target
- |.if DUALNUM
- | lwzux TMP0, RA, BASE
- | addi PC, PC, 4
- | lwz CARG2, 4(RA)
- | lwzux TMP1, RD, KBASE
- | checknum cr0, TMP0
- | lwz TMP2, -4(PC)
- | checknum cr1, TMP1
- | decode_RD4 TMP2, TMP2
- | lwz CARG3, 4(RD)
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- if (vk) {
- |->BC_ISEQN_Z:
- } else {
- |->BC_ISNEN_Z:
- }
- | bne cr0, >7
- | bne cr1, >8
- | cmpw CARG2, CARG3
- |4:
- |.else
- if (vk) {
- |->BC_ISEQN_Z: // Dummy label.
- } else {
- |->BC_ISNEN_Z: // Dummy label.
- }
- | lwzx TMP0, BASE, RA
- | addi PC, PC, 4
- | lfdx f0, BASE, RA
- | lwz TMP2, -4(PC)
- | lfdx f1, KBASE, RD
- | decode_RD4 TMP2, TMP2
- | checknum TMP0
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | bge >3
- | fcmpu cr0, f0, f1
- |.endif
- if (vk) {
- | bne >1
- | add PC, PC, TMP2
- |1:
- |.if not FFI
- |3:
- |.endif
- } else {
- | beq >2
- |1:
- |.if not FFI
- |3:
- |.endif
- | add PC, PC, TMP2
- |2:
- }
- | ins_next
- |.if FFI
- |3:
- | cmpwi TMP0, LJ_TCDATA
- | beq ->vmeta_equal_cd
- | b <1
- |.endif
- |.if DUALNUM
- |7: // RA is not an integer.
- | bge cr0, <3
- | // RA is a number.
- | lfd f0, 0(RA)
- | blt cr1, >1
- | // RA is a number, RD is an integer.
- | tonum_i f1, CARG3
- | b >2
- |
- |8: // RA is an integer, RD is a number.
- | tonum_i f0, CARG2
- |1:
- | lfd f1, 0(RD)
- |2:
- | fcmpu cr0, f0, f1
- | b <4
- |.endif
- break;
-
- case BC_ISEQP: case BC_ISNEP:
- vk = op == BC_ISEQP;
- | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
- | lwzx TMP0, BASE, RA
- | srwi TMP1, RD, 3
- | lwz TMP2, 0(PC)
- | not TMP1, TMP1
- | addi PC, PC, 4
- |.if FFI
- | cmpwi TMP0, LJ_TCDATA
- |.endif
- | sub TMP0, TMP0, TMP1
- |.if FFI
- | beq ->vmeta_equal_cd
- |.endif
- | decode_RD4 TMP2, TMP2
- | .gpr64 extsw TMP0, TMP0
- | addic TMP0, TMP0, -1
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- | subfe TMP1, TMP1, TMP1
- if (vk) {
- | and TMP2, TMP2, TMP1
- } else {
- | andc TMP2, TMP2, TMP1
- }
- | add PC, PC, TMP2
- | ins_next
- break;
-
- /* -- Unary test and copy ops ------------------------------------------- */
-
- case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
- | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
- | lwzx TMP0, BASE, RD
- | lwz INS, 0(PC)
- | addi PC, PC, 4
- if (op == BC_IST || op == BC_ISF) {
- | .gpr64 extsw TMP0, TMP0
- | subfic TMP0, TMP0, LJ_TTRUE
- | decode_RD4 TMP2, INS
- | subfe TMP1, TMP1, TMP1
- | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
- if (op == BC_IST) {
- | andc TMP2, TMP2, TMP1
- } else {
- | and TMP2, TMP2, TMP1
- }
- | add PC, PC, TMP2
- } else {
- | li TMP1, LJ_TFALSE
- | lfdx f0, BASE, RD
- | cmplw TMP0, TMP1
- if (op == BC_ISTC) {
- | bge >1
- } else {
- | blt >1
- }
- | addis PC, PC, -(BCBIAS_J*4 >> 16)
- | decode_RD4 TMP2, INS
- | stfdx f0, BASE, RA
- | add PC, PC, TMP2
- |1:
- }
- | ins_next
- break;
-
- /* -- Unary ops --------------------------------------------------------- */
-
- case BC_MOV:
- | // RA = dst*8, RD = src*8
- | ins_next1
- | lfdx f0, BASE, RD
- | stfdx f0, BASE, RA
- | ins_next2
- break;
- case BC_NOT:
- | // RA = dst*8, RD = src*8
- | ins_next1
- | lwzx TMP0, BASE, RD
- | .gpr64 extsw TMP0, TMP0
- | subfic TMP1, TMP0, LJ_TTRUE
- | adde TMP0, TMP0, TMP1
- | stwx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_UNM:
- | // RA = dst*8, RD = src*8
- | lwzux TMP1, RD, BASE
- | lwz TMP0, 4(RD)
- | checknum TMP1
- |.if DUALNUM
- | bne >5
- |.if GPR64
- | lus TMP2, 0x8000
- | neg TMP0, TMP0
- | cmplw TMP0, TMP2
- | beq >4
- |.else
- | nego. TMP0, TMP0
- | bso >4
- |1:
- |.endif
- | ins_next1
- | stwux TISNUM, RA, BASE
- | stw TMP0, 4(RA)
- |3:
- | ins_next2
- |4:
- |.if not GPR64
- | // Potential overflow.
- | checkov TMP1, <1 // Ignore unrelated overflow.
- |.endif
- | lus TMP1, 0x41e0 // 2^31.
- | li TMP0, 0
- | b >7
- |.endif
- |5:
- | bge ->vmeta_unm
- | xoris TMP1, TMP1, 0x8000
- |7:
- | ins_next1
- | stwux TMP1, RA, BASE
- | stw TMP0, 4(RA)
- |.if DUALNUM
- | b <3
- |.else
- | ins_next2
- |.endif
- break;
- case BC_LEN:
- | // RA = dst*8, RD = src*8
- | lwzux TMP0, RD, BASE
- | lwz CARG1, 4(RD)
- | checkstr TMP0; bne >2
- | lwz CRET1, STR:CARG1->len
- |1:
- |.if DUALNUM
- | ins_next1
- | stwux TISNUM, RA, BASE
- | stw CRET1, 4(RA)
- |.else
- | tonum_u f0, CRET1 // Result is a non-negative integer.
- | ins_next1
- | stfdx f0, BASE, RA
- |.endif
- | ins_next2
- |2:
- | checktab TMP0; bne ->vmeta_len
-#if LJ_52
- | lwz TAB:TMP2, TAB:CARG1->metatable
- | cmplwi TAB:TMP2, 0
- | bne >9
- |3:
-#endif
- |->BC_LEN_Z:
- | bl extern lj_tab_len // (GCtab *t)
- | // Returns uint32_t (but less than 2^31).
- | b <1
-#if LJ_52
- |9:
- | lbz TMP0, TAB:TMP2->nomm
- | andix. TMP0, TMP0, 1<<MM_len
- | bne <3 // 'no __len' flag set: done.
- | b ->vmeta_len
-#endif
- break;
-
- /* -- Binary ops -------------------------------------------------------- */
-
- |.macro ins_arithpre
- | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
- ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
- ||switch (vk) {
- ||case 0:
- | lwzx TMP1, BASE, RB
- | .if DUALNUM
- | lwzx TMP2, KBASE, RC
- | .endif
- | lfdx f14, BASE, RB
- | lfdx f15, KBASE, RC
- | .if DUALNUM
- | checknum cr0, TMP1
- | checknum cr1, TMP2
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | bge ->vmeta_arith_vn
- | .else
- | checknum TMP1; bge ->vmeta_arith_vn
- | .endif
- || break;
- ||case 1:
- | lwzx TMP1, BASE, RB
- | .if DUALNUM
- | lwzx TMP2, KBASE, RC
- | .endif
- | lfdx f15, BASE, RB
- | lfdx f14, KBASE, RC
- | .if DUALNUM
- | checknum cr0, TMP1
- | checknum cr1, TMP2
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | bge ->vmeta_arith_nv
- | .else
- | checknum TMP1; bge ->vmeta_arith_nv
- | .endif
- || break;
- ||default:
- | lwzx TMP1, BASE, RB
- | lwzx TMP2, BASE, RC
- | lfdx f14, BASE, RB
- | lfdx f15, BASE, RC
- | checknum cr0, TMP1
- | checknum cr1, TMP2
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | bge ->vmeta_arith_vv
- || break;
- ||}
- |.endmacro
- |
- |.macro ins_arithfallback, ins
- ||switch (vk) {
- ||case 0:
- | ins ->vmeta_arith_vn2
- || break;
- ||case 1:
- | ins ->vmeta_arith_nv2
- || break;
- ||default:
- | ins ->vmeta_arith_vv2
- || break;
- ||}
- |.endmacro
- |
- |.macro intmod, a, b, c
- | bl ->vm_modi
- |.endmacro
- |
- |.macro fpmod, a, b, c
- |->BC_MODVN_Z:
- | fdiv FARG1, b, c
- | // NYI: Use internal implementation of floor.
- | blex floor // floor(b/c)
- | fmul a, FARG1, c
- | fsub a, b, a // b - floor(b/c)*c
- |.endmacro
- |
- |.macro ins_arithfp, fpins
- | ins_arithpre
- |.if "fpins" == "fpmod_"
- | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
- |.else
- | fpins f0, f14, f15
- | ins_next1
- | stfdx f0, BASE, RA
- | ins_next2
- |.endif
- |.endmacro
- |
- |.macro ins_arithdn, intins, fpins
- | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
- ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
- ||switch (vk) {
- ||case 0:
- | lwzux TMP1, RB, BASE
- | lwzux TMP2, RC, KBASE
- | lwz CARG1, 4(RB)
- | checknum cr0, TMP1
- | lwz CARG2, 4(RC)
- || break;
- ||case 1:
- | lwzux TMP1, RB, BASE
- | lwzux TMP2, RC, KBASE
- | lwz CARG2, 4(RB)
- | checknum cr0, TMP1
- | lwz CARG1, 4(RC)
- || break;
- ||default:
- | lwzux TMP1, RB, BASE
- | lwzux TMP2, RC, BASE
- | lwz CARG1, 4(RB)
- | checknum cr0, TMP1
- | lwz CARG2, 4(RC)
- || break;
- ||}
- | checknum cr1, TMP2
- | bne >5
- | bne cr1, >5
- | intins CARG1, CARG1, CARG2
- | bso >4
- |1:
- | ins_next1
- | stwux TISNUM, RA, BASE
- | stw CARG1, 4(RA)
- |2:
- | ins_next2
- |4: // Overflow.
- | checkov TMP0, <1 // Ignore unrelated overflow.
- | ins_arithfallback b
- |5: // FP variant.
- ||if (vk == 1) {
- | lfd f15, 0(RB)
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | lfd f14, 0(RC)
- ||} else {
- | lfd f14, 0(RB)
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | lfd f15, 0(RC)
- ||}
- | ins_arithfallback bge
- |.if "fpins" == "fpmod_"
- | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
- |.else
- | fpins f0, f14, f15
- | ins_next1
- | stfdx f0, BASE, RA
- | b <2
- |.endif
- |.endmacro
- |
- |.macro ins_arith, intins, fpins
- |.if DUALNUM
- | ins_arithdn intins, fpins
- |.else
- | ins_arithfp fpins
- |.endif
- |.endmacro
-
- case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
- |.if GPR64
- |.macro addo32., y, a, b
- | // Need to check overflow for (a<<32) + (b<<32).
- | rldicr TMP0, a, 32, 31
- | rldicr TMP3, b, 32, 31
- | addo. TMP0, TMP0, TMP3
- | add y, a, b
- |.endmacro
- | ins_arith addo32., fadd
- |.else
- | ins_arith addo., fadd
- |.endif
- break;
- case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
- |.if GPR64
- |.macro subo32., y, a, b
- | // Need to check overflow for (a<<32) - (b<<32).
- | rldicr TMP0, a, 32, 31
- | rldicr TMP3, b, 32, 31
- | subo. TMP0, TMP0, TMP3
- | sub y, a, b
- |.endmacro
- | ins_arith subo32., fsub
- |.else
- | ins_arith subo., fsub
- |.endif
- break;
- case BC_MULVN: case BC_MULNV: case BC_MULVV:
- | ins_arith mullwo., fmul
- break;
- case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
- | ins_arithfp fdiv
- break;
- case BC_MODVN:
- | ins_arith intmod, fpmod
- break;
- case BC_MODNV: case BC_MODVV:
- | ins_arith intmod, fpmod_
- break;
- case BC_POW:
- | // NYI: (partial) integer arithmetic.
- | lwzx TMP1, BASE, RB
- | lfdx FARG1, BASE, RB
- | lwzx TMP2, BASE, RC
- | lfdx FARG2, BASE, RC
- | checknum cr0, TMP1
- | checknum cr1, TMP2
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | bge ->vmeta_arith_vv
- | blex pow
- | ins_next1
- | stfdx FARG1, BASE, RA
- | ins_next2
- break;
-
- case BC_CAT:
- | // RA = dst*8, RB = src_start*8, RC = src_end*8
- | sub CARG3, RC, RB
- | stp BASE, L->base
- | add CARG2, BASE, RC
- | mr SAVE0, RB
- |->BC_CAT_Z:
- | stw PC, SAVE_PC
- | mr CARG1, L
- | srwi CARG3, CARG3, 3
- | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
- | // Returns NULL (finished) or TValue * (metamethod).
- | cmplwi CRET1, 0
- | lp BASE, L->base
- | bne ->vmeta_binop
- | ins_next1
- | lfdx f0, BASE, SAVE0 // Copy result from RB to RA.
- | stfdx f0, BASE, RA
- | ins_next2
- break;
-
- /* -- Constant ops ------------------------------------------------------ */
-
- case BC_KSTR:
- | // RA = dst*8, RD = str_const*8 (~)
- | srwi TMP1, RD, 1
- | subfic TMP1, TMP1, -4
- | ins_next1
- | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
- | li TMP2, LJ_TSTR
- | stwux TMP2, RA, BASE
- | stw TMP0, 4(RA)
- | ins_next2
- break;
- case BC_KCDATA:
- |.if FFI
- | // RA = dst*8, RD = cdata_const*8 (~)
- | srwi TMP1, RD, 1
- | subfic TMP1, TMP1, -4
- | ins_next1
- | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
- | li TMP2, LJ_TCDATA
- | stwux TMP2, RA, BASE
- | stw TMP0, 4(RA)
- | ins_next2
- |.endif
- break;
- case BC_KSHORT:
- | // RA = dst*8, RD = int16_literal*8
- |.if DUALNUM
- | slwi RD, RD, 13
- | srawi RD, RD, 16
- | ins_next1
- | stwux TISNUM, RA, BASE
- | stw RD, 4(RA)
- | ins_next2
- |.else
- | // The soft-float approach is faster.
- | slwi RD, RD, 13
- | srawi TMP1, RD, 31
- | xor TMP2, TMP1, RD
- | sub TMP2, TMP2, TMP1 // TMP2 = abs(x)
- | cntlzw TMP3, TMP2
- | subfic TMP1, TMP3, 0x40d // TMP1 = exponent-1
- | slw TMP2, TMP2, TMP3 // TMP2 = left aligned mantissa
- | subfic TMP3, RD, 0
- | slwi TMP1, TMP1, 20
- | rlwimi RD, TMP2, 21, 1, 31 // hi = sign(x) | (mantissa>>11)
- | subfe TMP0, TMP0, TMP0
- | add RD, RD, TMP1 // hi = hi + exponent-1
- | and RD, RD, TMP0 // hi = x == 0 ? 0 : hi
- | ins_next1
- | stwux RD, RA, BASE
- | stw ZERO, 4(RA)
- | ins_next2
- |.endif
- break;
- case BC_KNUM:
- | // RA = dst*8, RD = num_const*8
- | ins_next1
- | lfdx f0, KBASE, RD
- | stfdx f0, BASE, RA
- | ins_next2
- break;
- case BC_KPRI:
- | // RA = dst*8, RD = primitive_type*8 (~)
- | srwi TMP1, RD, 3
- | not TMP0, TMP1
- | ins_next1
- | stwx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_KNIL:
- | // RA = base*8, RD = end*8
- | stwx TISNIL, BASE, RA
- | addi RA, RA, 8
- |1:
- | stwx TISNIL, BASE, RA
- | cmpw RA, RD
- | addi RA, RA, 8
- | blt <1
- | ins_next_
- break;
-
- /* -- Upvalue and function ops ------------------------------------------ */
-
- case BC_UGET:
- | // RA = dst*8, RD = uvnum*8
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RD, RD, 1
- | addi RD, RD, offsetof(GCfuncL, uvptr)
- | lwzx UPVAL:RB, LFUNC:RB, RD
- | ins_next1
- | lwz TMP1, UPVAL:RB->v
- | lfd f0, 0(TMP1)
- | stfdx f0, BASE, RA
- | ins_next2
- break;
- case BC_USETV:
- | // RA = uvnum*8, RD = src*8
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RA, RA, 1
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | lfdux f0, RD, BASE
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | lbz TMP3, UPVAL:RB->marked
- | lwz CARG2, UPVAL:RB->v
- | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
- | lbz TMP0, UPVAL:RB->closed
- | lwz TMP2, 0(RD)
- | stfd f0, 0(CARG2)
- | cmplwi cr1, TMP0, 0
- | lwz TMP1, 4(RD)
- | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
- | subi TMP2, TMP2, (LJ_TNUMX+1)
- | bne >2 // Upvalue is closed and black?
- |1:
- | ins_next
- |
- |2: // Check if new value is collectable.
- | cmplwi TMP2, LJ_TISGCV - (LJ_TNUMX+1)
- | bge <1 // tvisgcv(v)
- | lbz TMP3, GCOBJ:TMP1->gch.marked
- | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
- | la CARG1, GG_DISP2G(DISPATCH)
- | // Crossed a write barrier. Move the barrier forward.
- | beq <1
- | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- | b <1
- break;
- case BC_USETS:
- | // RA = uvnum*8, RD = str_const*8 (~)
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi TMP1, RD, 1
- | srwi RA, RA, 1
- | subfic TMP1, TMP1, -4
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | lbz TMP3, UPVAL:RB->marked
- | lwz CARG2, UPVAL:RB->v
- | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
- | lbz TMP3, STR:TMP1->marked
- | lbz TMP2, UPVAL:RB->closed
- | li TMP0, LJ_TSTR
- | stw STR:TMP1, 4(CARG2)
- | stw TMP0, 0(CARG2)
- | bne >2
- |1:
- | ins_next
- |
- |2: // Check if string is white and ensure upvalue is closed.
- | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
- | cmplwi cr1, TMP2, 0
- | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
- | la CARG1, GG_DISP2G(DISPATCH)
- | // Crossed a write barrier. Move the barrier forward.
- | beq <1
- | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- | b <1
- break;
- case BC_USETN:
- | // RA = uvnum*8, RD = num_const*8
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RA, RA, 1
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | lfdx f0, KBASE, RD
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | ins_next1
- | lwz TMP1, UPVAL:RB->v
- | stfd f0, 0(TMP1)
- | ins_next2
- break;
- case BC_USETP:
- | // RA = uvnum*8, RD = primitive_type*8 (~)
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RA, RA, 1
- | srwi TMP0, RD, 3
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | not TMP0, TMP0
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | ins_next1
- | lwz TMP1, UPVAL:RB->v
- | stw TMP0, 0(TMP1)
- | ins_next2
- break;
-
- case BC_UCLO:
- | // RA = level*8, RD = target
- | lwz TMP1, L->openupval
- | branch_RD // Do this first since RD is not saved.
- | stp BASE, L->base
- | cmplwi TMP1, 0
- | mr CARG1, L
- | beq >1
- | add CARG2, BASE, RA
- | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
- | lp BASE, L->base
- |1:
- | ins_next
- break;
-
- case BC_FNEW:
- | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
- | srwi TMP1, RD, 1
- | stp BASE, L->base
- | subfic TMP1, TMP1, -4
- | stw PC, SAVE_PC
- | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
- | mr CARG1, L
- | lwz CARG3, FRAME_FUNC(BASE)
- | // (lua_State *L, GCproto *pt, GCfuncL *parent)
- | bl extern lj_func_newL_gc
- | // Returns GCfuncL *.
- | lp BASE, L->base
- | li TMP0, LJ_TFUNC
- | stwux TMP0, RA, BASE
- | stw LFUNC:CRET1, 4(RA)
- | ins_next
- break;
-
- /* -- Table ops --------------------------------------------------------- */
-
- case BC_TNEW:
- case BC_TDUP:
- | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
- | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
- | mr CARG1, L
- | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
- | stp BASE, L->base
- | cmplw TMP0, TMP1
- | stw PC, SAVE_PC
- | bge >5
- |1:
- if (op == BC_TNEW) {
- | rlwinm CARG2, RD, 29, 21, 31
- | rlwinm CARG3, RD, 18, 27, 31
- | cmpwi CARG2, 0x7ff; beq >3
- |2:
- | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
- | // Returns Table *.
- } else {
- | srwi TMP1, RD, 1
- | subfic TMP1, TMP1, -4
- | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
- | bl extern lj_tab_dup // (lua_State *L, Table *kt)
- | // Returns Table *.
- }
- | lp BASE, L->base
- | li TMP0, LJ_TTAB
- | stwux TMP0, RA, BASE
- | stw TAB:CRET1, 4(RA)
- | ins_next
- if (op == BC_TNEW) {
- |3:
- | li CARG2, 0x801
- | b <2
- }
- |5:
- | mr SAVE0, RD
- | bl extern lj_gc_step_fixtop // (lua_State *L)
- | mr RD, SAVE0
- | mr CARG1, L
- | b <1
- break;
-
- case BC_GGET:
- | // RA = dst*8, RD = str_const*8 (~)
- case BC_GSET:
- | // RA = src*8, RD = str_const*8 (~)
- | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
- | srwi TMP1, RD, 1
- | lwz TAB:RB, LFUNC:TMP2->env
- | subfic TMP1, TMP1, -4
- | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
- if (op == BC_GGET) {
- | b ->BC_TGETS_Z
- } else {
- | b ->BC_TSETS_Z
- }
- break;
-
- case BC_TGETV:
- | // RA = dst*8, RB = table*8, RC = key*8
- | lwzux CARG1, RB, BASE
- | lwzux CARG2, RC, BASE
- | lwz TAB:RB, 4(RB)
- |.if DUALNUM
- | lwz RC, 4(RC)
- |.else
- | lfd f0, 0(RC)
- |.endif
- | checktab CARG1
- | checknum cr1, CARG2
- | bne ->vmeta_tgetv
- |.if DUALNUM
- | lwz TMP0, TAB:RB->asize
- | bne cr1, >5
- | lwz TMP1, TAB:RB->array
- | cmplw TMP0, RC
- | slwi TMP2, RC, 3
- |.else
- | bge cr1, >5
- | // Convert number key to integer, check for integerness and range.
- | fctiwz f1, f0
- | fadd f2, f0, TOBIT
- | stfd f1, TMPD
- | lwz TMP0, TAB:RB->asize
- | fsub f2, f2, TOBIT
- | lwz TMP2, TMPD_LO
- | lwz TMP1, TAB:RB->array
- | fcmpu cr1, f0, f2
- | cmplw cr0, TMP0, TMP2
- | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
- | slwi TMP2, TMP2, 3
- |.endif
- | ble ->vmeta_tgetv // Integer key and in array part?
- | lwzx TMP0, TMP1, TMP2
- | lfdx f14, TMP1, TMP2
- | checknil TMP0; beq >2
- |1:
- | ins_next1
- | stfdx f14, BASE, RA
- | ins_next2
- |
- |2: // Check for __index if table value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable: done.
- | lbz TMP0, TAB:TMP2->nomm
- | andix. TMP0, TMP0, 1<<MM_index
- | bne <1 // 'no __index' flag set: done.
- | b ->vmeta_tgetv
- |
- |5:
- | checkstr CARG2; bne ->vmeta_tgetv
- |.if not DUALNUM
- | lwz STR:RC, 4(RC)
- |.endif
- | b ->BC_TGETS_Z // String key?
- break;
- case BC_TGETS:
- | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
- | lwzux CARG1, RB, BASE
- | srwi TMP1, RC, 1
- | lwz TAB:RB, 4(RB)
- | subfic TMP1, TMP1, -4
- | checktab CARG1
- | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
- | bne ->vmeta_tgets1
- |->BC_TGETS_Z:
- | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
- | lwz TMP0, TAB:RB->hmask
- | lwz TMP1, STR:RC->hash
- | lwz NODE:TMP2, TAB:RB->node
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | slwi TMP0, TMP1, 5
- | slwi TMP1, TMP1, 3
- | sub TMP1, TMP0, TMP1
- | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |1:
- | lwz CARG1, NODE:TMP2->key
- | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
- | lwz CARG2, NODE:TMP2->val
- | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
- | checkstr CARG1; bne >4
- | cmpw TMP0, STR:RC; bne >4
- | checknil CARG2; beq >5 // Key found, but nil value?
- |3:
- | stwux CARG2, RA, BASE
- | stw TMP1, 4(RA)
- | ins_next
- |
- |4: // Follow hash chain.
- | lwz NODE:TMP2, NODE:TMP2->next
- | cmplwi NODE:TMP2, 0
- | bne <1
- | // End of hash chain: key not found, nil result.
- | li CARG2, LJ_TNIL
- |
- |5: // Check for __index if table value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <3 // No metatable: done.
- | lbz TMP0, TAB:TMP2->nomm
- | andix. TMP0, TMP0, 1<<MM_index
- | bne <3 // 'no __index' flag set: done.
- | b ->vmeta_tgets
- break;
- case BC_TGETB:
- | // RA = dst*8, RB = table*8, RC = index*8
- | lwzux CARG1, RB, BASE
- | srwi TMP0, RC, 3
- | lwz TAB:RB, 4(RB)
- | checktab CARG1; bne ->vmeta_tgetb
- | lwz TMP1, TAB:RB->asize
- | lwz TMP2, TAB:RB->array
- | cmplw TMP0, TMP1; bge ->vmeta_tgetb
- | lwzx TMP1, TMP2, RC
- | lfdx f0, TMP2, RC
- | checknil TMP1; beq >5
- |1:
- | ins_next1
- | stfdx f0, BASE, RA
- | ins_next2
- |
- |5: // Check for __index if table value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable: done.
- | lbz TMP2, TAB:TMP2->nomm
- | andix. TMP2, TMP2, 1<<MM_index
- | bne <1 // 'no __index' flag set: done.
- | b ->vmeta_tgetb // Caveat: preserve TMP0!
- break;
-
- case BC_TSETV:
- | // RA = src*8, RB = table*8, RC = key*8
- | lwzux CARG1, RB, BASE
- | lwzux CARG2, RC, BASE
- | lwz TAB:RB, 4(RB)
- |.if DUALNUM
- | lwz RC, 4(RC)
- |.else
- | lfd f0, 0(RC)
- |.endif
- | checktab CARG1
- | checknum cr1, CARG2
- | bne ->vmeta_tsetv
- |.if DUALNUM
- | lwz TMP0, TAB:RB->asize
- | bne cr1, >5
- | lwz TMP1, TAB:RB->array
- | cmplw TMP0, RC
- | slwi TMP0, RC, 3
- |.else
- | bge cr1, >5
- | // Convert number key to integer, check for integerness and range.
- | fctiwz f1, f0
- | fadd f2, f0, TOBIT
- | stfd f1, TMPD
- | lwz TMP0, TAB:RB->asize
- | fsub f2, f2, TOBIT
- | lwz TMP2, TMPD_LO
- | lwz TMP1, TAB:RB->array
- | fcmpu cr1, f0, f2
- | cmplw cr0, TMP0, TMP2
- | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
- | slwi TMP0, TMP2, 3
- |.endif
- | ble ->vmeta_tsetv // Integer key and in array part?
- | lwzx TMP2, TMP1, TMP0
- | lbz TMP3, TAB:RB->marked
- | lfdx f14, BASE, RA
- | checknil TMP2; beq >3
- |1:
- | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
- | stfdx f14, TMP1, TMP0
- | bne >7
- |2:
- | ins_next
- |
- |3: // Check for __newindex if previous value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable: done.
- | lbz TMP2, TAB:TMP2->nomm
- | andix. TMP2, TMP2, 1<<MM_newindex
- | bne <1 // 'no __newindex' flag set: done.
- | b ->vmeta_tsetv
- |
- |5:
- | checkstr CARG2; bne ->vmeta_tsetv
- |.if not DUALNUM
- | lwz STR:RC, 4(RC)
- |.endif
- | b ->BC_TSETS_Z // String key?
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0
- | b <2
- break;
- case BC_TSETS:
- | // RA = src*8, RB = table*8, RC = str_const*8 (~)
- | lwzux CARG1, RB, BASE
- | srwi TMP1, RC, 1
- | lwz TAB:RB, 4(RB)
- | subfic TMP1, TMP1, -4
- | checktab CARG1
- | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
- | bne ->vmeta_tsets1
- |->BC_TSETS_Z:
- | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
- | lwz TMP0, TAB:RB->hmask
- | lwz TMP1, STR:RC->hash
- | lwz NODE:TMP2, TAB:RB->node
- | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | lfdx f14, BASE, RA
- | slwi TMP0, TMP1, 5
- | slwi TMP1, TMP1, 3
- | sub TMP1, TMP0, TMP1
- | lbz TMP3, TAB:RB->marked
- | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |1:
- | lwz CARG1, NODE:TMP2->key
- | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
- | lwz CARG2, NODE:TMP2->val
- | lwz NODE:TMP1, NODE:TMP2->next
- | checkstr CARG1; bne >5
- | cmpw TMP0, STR:RC; bne >5
- | checknil CARG2; beq >4 // Key found, but nil value?
- |2:
- | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- | stfd f14, NODE:TMP2->val
- | bne >7
- |3:
- | ins_next
- |
- |4: // Check for __newindex if previous value is nil.
- | lwz TAB:TMP1, TAB:RB->metatable
- | cmplwi TAB:TMP1, 0
- | beq <2 // No metatable: done.
- | lbz TMP0, TAB:TMP1->nomm
- | andix. TMP0, TMP0, 1<<MM_newindex
- | bne <2 // 'no __newindex' flag set: done.
- | b ->vmeta_tsets
- |
- |5: // Follow hash chain.
- | cmplwi NODE:TMP1, 0
- | mr NODE:TMP2, NODE:TMP1
- | bne <1
- | // End of hash chain: key not found, add a new one.
- |
- | // But check for __newindex first.
- | lwz TAB:TMP1, TAB:RB->metatable
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | stw PC, SAVE_PC
- | mr CARG1, L
- | cmplwi TAB:TMP1, 0
- | stp BASE, L->base
- | beq >6 // No metatable: continue.
- | lbz TMP0, TAB:TMP1->nomm
- | andix. TMP0, TMP0, 1<<MM_newindex
- | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
- |6:
- | li TMP0, LJ_TSTR
- | stw STR:RC, 4(CARG3)
- | mr CARG2, TAB:RB
- | stw TMP0, 0(CARG3)
- | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
- | // Returns TValue *.
- | lp BASE, L->base
- | stfd f14, 0(CRET1)
- | b <3 // No 2nd write barrier needed.
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0
- | b <3
- break;
- case BC_TSETB:
- | // RA = src*8, RB = table*8, RC = index*8
- | lwzux CARG1, RB, BASE
- | srwi TMP0, RC, 3
- | lwz TAB:RB, 4(RB)
- | checktab CARG1; bne ->vmeta_tsetb
- | lwz TMP1, TAB:RB->asize
- | lwz TMP2, TAB:RB->array
- | lbz TMP3, TAB:RB->marked
- | cmplw TMP0, TMP1
- | lfdx f14, BASE, RA
- | bge ->vmeta_tsetb
- | lwzx TMP1, TMP2, RC
- | checknil TMP1; beq >5
- |1:
- | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- | stfdx f14, TMP2, RC
- | bne >7
- |2:
- | ins_next
- |
- |5: // Check for __newindex if previous value is nil.
- | lwz TAB:TMP1, TAB:RB->metatable
- | cmplwi TAB:TMP1, 0
- | beq <1 // No metatable: done.
- | lbz TMP1, TAB:TMP1->nomm
- | andix. TMP1, TMP1, 1<<MM_newindex
- | bne <1 // 'no __newindex' flag set: done.
- | b ->vmeta_tsetb // Caveat: preserve TMP0!
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0
- | b <2
- break;
-
- case BC_TSETM:
- | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
- | add RA, BASE, RA
- |1:
- | add TMP3, KBASE, RD
- | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
- | addic. TMP0, MULTRES, -8
- | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
- | srwi CARG3, TMP0, 3
- | beq >4 // Nothing to copy?
- | add CARG3, CARG3, TMP3
- | lwz TMP2, TAB:CARG2->asize
- | slwi TMP1, TMP3, 3
- | lbz TMP3, TAB:CARG2->marked
- | cmplw CARG3, TMP2
- | add TMP2, RA, TMP0
- | lwz TMP0, TAB:CARG2->array
- | bgt >5
- | add TMP1, TMP1, TMP0
- | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- |3: // Copy result slots to table.
- | lfd f0, 0(RA)
- | addi RA, RA, 8
- | cmpw cr1, RA, TMP2
- | stfd f0, 0(TMP1)
- | addi TMP1, TMP1, 8
- | blt cr1, <3
- | bne >7
- |4:
- | ins_next
- |
- |5: // Need to resize array part.
- | stp BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | mr SAVE0, RD
- | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
- | // Must not reallocate the stack.
- | mr RD, SAVE0
- | b <1
- |
- |7: // Possible table write barrier for any value. Skip valiswhite check.
- | barrierback TAB:CARG2, TMP3, TMP0
- | b <4
- break;
-
- /* -- Calls and vararg handling ----------------------------------------- */
-
- case BC_CALLM:
- | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
- | add NARGS8:RC, NARGS8:RC, MULTRES
- | // Fall through. Assumes BC_CALL follows.
- break;
- case BC_CALL:
- | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
- | mr TMP2, BASE
- | lwzux TMP0, BASE, RA
- | lwz LFUNC:RB, 4(BASE)
- | subi NARGS8:RC, NARGS8:RC, 8
- | addi BASE, BASE, 8
- | checkfunc TMP0; bne ->vmeta_call
- | ins_call
- break;
-
- case BC_CALLMT:
- | // RA = base*8, (RB = 0,) RC = extra_nargs*8
- | add NARGS8:RC, NARGS8:RC, MULTRES
- | // Fall through. Assumes BC_CALLT follows.
- break;
- case BC_CALLT:
- | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
- | lwzux TMP0, RA, BASE
- | lwz LFUNC:RB, 4(RA)
- | subi NARGS8:RC, NARGS8:RC, 8
- | lwz TMP1, FRAME_PC(BASE)
- | checkfunc TMP0
- | addi RA, RA, 8
- | bne ->vmeta_callt
- |->BC_CALLT_Z:
- | andix. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
- | lbz TMP3, LFUNC:RB->ffid
- | xori TMP2, TMP1, FRAME_VARG
- | cmplwi cr1, NARGS8:RC, 0
- | bne >7
- |1:
- | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
- | li TMP2, 0
- | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
- | beq cr1, >3
- |2:
- | addi TMP3, TMP2, 8
- | lfdx f0, RA, TMP2
- | cmplw cr1, TMP3, NARGS8:RC
- | stfdx f0, BASE, TMP2
- | mr TMP2, TMP3
- | bne cr1, <2
- |3:
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
- | beq >5
- |4:
- | ins_callt
- |
- |5: // Tailcall to a fast function with a Lua frame below.
- | lwz INS, -4(TMP1)
- | decode_RA8 RA, INS
- | sub TMP1, BASE, RA
- | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
- | lwz TMP1, LFUNC:TMP1->pc
- | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
- | b <4
- |
- |7: // Tailcall from a vararg function.
- | andix. TMP0, TMP2, FRAME_TYPEP
- | bne <1 // Vararg frame below?
- | sub BASE, BASE, TMP2 // Relocate BASE down.
- | lwz TMP1, FRAME_PC(BASE)
- | andix. TMP0, TMP1, FRAME_TYPE
- | b <1
- break;
-
- case BC_ITERC:
- | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
- | mr TMP2, BASE
- | add BASE, BASE, RA
- | lwz TMP1, -24(BASE)
- | lwz LFUNC:RB, -20(BASE)
- | lfd f1, -8(BASE)
- | lfd f0, -16(BASE)
- | stw TMP1, 0(BASE) // Copy callable.
- | stw LFUNC:RB, 4(BASE)
- | checkfunc TMP1
- | stfd f1, 16(BASE) // Copy control var.
- | li NARGS8:RC, 16 // Iterators get 2 arguments.
- | stfdu f0, 8(BASE) // Copy state.
- | bne ->vmeta_call
- | ins_call
- break;
-
- case BC_ITERN:
- | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
- |.if JIT
- | // NYI: add hotloop, record BC_ITERN.
- |.endif
- | add RA, BASE, RA
- | lwz TAB:RB, -12(RA)
- | lwz RC, -4(RA) // Get index from control var.
- | lwz TMP0, TAB:RB->asize
- | lwz TMP1, TAB:RB->array
- | addi PC, PC, 4
- |1: // Traverse array part.
- | cmplw RC, TMP0
- | slwi TMP3, RC, 3
- | bge >5 // Index points after array part?
- | lwzx TMP2, TMP1, TMP3
- | lfdx f0, TMP1, TMP3
- | checknil TMP2
- | lwz INS, -4(PC)
- | beq >4
- |.if DUALNUM
- | stw RC, 4(RA)
- | stw TISNUM, 0(RA)
- |.else
- | tonum_u f1, RC
- |.endif
- | addi RC, RC, 1
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | stfd f0, 8(RA)
- | decode_RD4 TMP1, INS
- | stw RC, -4(RA) // Update control var.
- | add PC, TMP1, TMP3
- |.if not DUALNUM
- | stfd f1, 0(RA)
- |.endif
- |3:
- | ins_next
- |
- |4: // Skip holes in array part.
- | addi RC, RC, 1
- | b <1
- |
- |5: // Traverse hash part.
- | lwz TMP1, TAB:RB->hmask
- | sub RC, RC, TMP0
- | lwz TMP2, TAB:RB->node
- |6:
- | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
- | slwi TMP3, RC, 5
- | bgty <3
- | slwi RB, RC, 3
- | sub TMP3, TMP3, RB
- | lwzx RB, TMP2, TMP3
- | lfdx f0, TMP2, TMP3
- | add NODE:TMP3, TMP2, TMP3
- | checknil RB
- | lwz INS, -4(PC)
- | beq >7
- | lfd f1, NODE:TMP3->key
- | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
- | stfd f0, 8(RA)
- | add RC, RC, TMP0
- | decode_RD4 TMP1, INS
- | stfd f1, 0(RA)
- | addi RC, RC, 1
- | add PC, TMP1, TMP2
- | stw RC, -4(RA) // Update control var.
- | b <3
- |
- |7: // Skip holes in hash part.
- | addi RC, RC, 1
- | b <6
- break;
-
- case BC_ISNEXT:
- | // RA = base*8, RD = target (points to ITERN)
- | add RA, BASE, RA
- | lwz TMP0, -24(RA)
- | lwz CFUNC:TMP1, -20(RA)
- | lwz TMP2, -16(RA)
- | lwz TMP3, -8(RA)
- | cmpwi cr0, TMP2, LJ_TTAB
- | cmpwi cr1, TMP0, LJ_TFUNC
- | cmpwi cr6, TMP3, LJ_TNIL
- | bne cr1, >5
- | lbz TMP1, CFUNC:TMP1->ffid
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
- | cmpwi cr7, TMP1, FF_next_N
- | srwi TMP0, RD, 1
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
- | add TMP3, PC, TMP0
- | bne cr0, >5
- | lus TMP1, 0xfffe
- | ori TMP1, TMP1, 0x7fff
- | stw ZERO, -4(RA) // Initialize control var.
- | stw TMP1, -8(RA)
- | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
- |1:
- | ins_next
- |5: // Despecialize bytecode if any of the checks fail.
- | li TMP0, BC_JMP
- | li TMP1, BC_ITERC
- | stb TMP0, -1(PC)
- | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
- | stb TMP1, 3(PC)
- | b <1
- break;
-
- case BC_VARG:
- | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
- | lwz TMP0, FRAME_PC(BASE)
- | add RC, BASE, RC
- | add RA, BASE, RA
- | addi RC, RC, FRAME_VARG
- | add TMP2, RA, RB
- | subi TMP3, BASE, 8 // TMP3 = vtop
- | sub RC, RC, TMP0 // RC = vbase
- | // Note: RC may now be even _above_ BASE if nargs was < numparams.
- | cmplwi cr1, RB, 0
- |.if PPE
- | sub TMP1, TMP3, RC
- | cmpwi TMP1, 0
- |.else
- | sub. TMP1, TMP3, RC
- |.endif
- | beq cr1, >5 // Copy all varargs?
- | subi TMP2, TMP2, 16
- | ble >2 // No vararg slots?
- |1: // Copy vararg slots to destination slots.
- | lfd f0, 0(RC)
- | addi RC, RC, 8
- | stfd f0, 0(RA)
- | cmplw RA, TMP2
- | cmplw cr1, RC, TMP3
- | bge >3 // All destination slots filled?
- | addi RA, RA, 8
- | blt cr1, <1 // More vararg slots?
- |2: // Fill up remainder with nil.
- | stw TISNIL, 0(RA)
- | cmplw RA, TMP2
- | addi RA, RA, 8
- | blt <2
- |3:
- | ins_next
- |
- |5: // Copy all varargs.
- | lwz TMP0, L->maxstack
- | li MULTRES, 8 // MULTRES = (0+1)*8
- | bley <3 // No vararg slots?
- | add TMP2, RA, TMP1
- | cmplw TMP2, TMP0
- | addi MULTRES, TMP1, 8
- | bgt >7
- |6:
- | lfd f0, 0(RC)
- | addi RC, RC, 8
- | stfd f0, 0(RA)
- | cmplw RC, TMP3
- | addi RA, RA, 8
- | blt <6 // More vararg slots?
- | b <3
- |
- |7: // Grow stack for varargs.
- | mr CARG1, L
- | stp RA, L->top
- | sub SAVE0, RC, BASE // Need delta, because BASE may change.
- | stp BASE, L->base
- | sub RA, RA, BASE
- | stw PC, SAVE_PC
- | srwi CARG2, TMP1, 3
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lp BASE, L->base
- | add RA, BASE, RA
- | add RC, BASE, SAVE0
- | subi TMP3, BASE, 8
- | b <6
- break;
-
- /* -- Returns ----------------------------------------------------------- */
-
- case BC_RETM:
- | // RA = results*8, RD = extra_nresults*8
- | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
- | // Fall through. Assumes BC_RET follows.
- break;
-
- case BC_RET:
- | // RA = results*8, RD = (nresults+1)*8
- | lwz PC, FRAME_PC(BASE)
- | add RA, BASE, RA
- | mr MULTRES, RD
- |1:
- | andix. TMP0, PC, FRAME_TYPE
- | xori TMP1, PC, FRAME_VARG
- | bne ->BC_RETV_Z
- |
- |->BC_RET_Z:
- | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
- | lwz INS, -4(PC)
- | cmpwi RD, 8
- | subi TMP2, BASE, 8
- | subi RC, RD, 8
- | decode_RB8 RB, INS
- | beq >3
- | li TMP1, 0
- |2:
- | addi TMP3, TMP1, 8
- | lfdx f0, RA, TMP1
- | cmpw TMP3, RC
- | stfdx f0, TMP2, TMP1
- | beq >3
- | addi TMP1, TMP3, 8
- | lfdx f1, RA, TMP3
- | cmpw TMP1, RC
- | stfdx f1, TMP2, TMP3
- | bne <2
- |3:
- |5:
- | cmplw RB, RD
- | decode_RA8 RA, INS
- | bgt >6
- | sub BASE, TMP2, RA
- | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
- | ins_next1
- | lwz TMP1, LFUNC:TMP1->pc
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | ins_next2
- |
- |6: // Fill up results with nil.
- | subi TMP1, RD, 8
- | addi RD, RD, 8
- | stwx TISNIL, TMP2, TMP1
- | b <5
- |
- |->BC_RETV_Z: // Non-standard return case.
- | andix. TMP2, TMP1, FRAME_TYPEP
- | bne ->vm_return
- | // Return from vararg function: relocate BASE down.
- | sub BASE, BASE, TMP1
- | lwz PC, FRAME_PC(BASE)
- | b <1
- break;
-
- case BC_RET0: case BC_RET1:
- | // RA = results*8, RD = (nresults+1)*8
- | lwz PC, FRAME_PC(BASE)
- | add RA, BASE, RA
- | mr MULTRES, RD
- | andix. TMP0, PC, FRAME_TYPE
- | xori TMP1, PC, FRAME_VARG
- | bney ->BC_RETV_Z
- |
- | lwz INS, -4(PC)
- | subi TMP2, BASE, 8
- | decode_RB8 RB, INS
- if (op == BC_RET1) {
- | lfd f0, 0(RA)
- | stfd f0, 0(TMP2)
- }
- |5:
- | cmplw RB, RD
- | decode_RA8 RA, INS
- | bgt >6
- | sub BASE, TMP2, RA
- | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
- | ins_next1
- | lwz TMP1, LFUNC:TMP1->pc
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | ins_next2
- |
- |6: // Fill up results with nil.
- | subi TMP1, RD, 8
- | addi RD, RD, 8
- | stwx TISNIL, TMP2, TMP1
- | b <5
- break;
-
- /* -- Loops and branches ------------------------------------------------ */
-
- case BC_FORL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IFORL follows.
- break;
-
- case BC_JFORI:
- case BC_JFORL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_FORI:
- case BC_IFORL:
- | // RA = base*8, RD = target (after end of loop or start of loop)
- vk = (op == BC_IFORL || op == BC_JFORL);
- |.if DUALNUM
- | // Integer loop.
- | lwzux TMP1, RA, BASE
- | lwz CARG1, FORL_IDX*8+4(RA)
- | cmplw cr0, TMP1, TISNUM
- if (vk) {
- | lwz CARG3, FORL_STEP*8+4(RA)
- | bne >9
- |.if GPR64
- | // Need to check overflow for (a<<32) + (b<<32).
- | rldicr TMP0, CARG1, 32, 31
- | rldicr TMP2, CARG3, 32, 31
- | add CARG1, CARG1, CARG3
- | addo. TMP0, TMP0, TMP2
- |.else
- | addo. CARG1, CARG1, CARG3
- |.endif
- | cmpwi cr6, CARG3, 0
- | lwz CARG2, FORL_STOP*8+4(RA)
- | bso >6
- |4:
- | stw CARG1, FORL_IDX*8+4(RA)
- } else {
- | lwz TMP3, FORL_STEP*8(RA)
- | lwz CARG3, FORL_STEP*8+4(RA)
- | lwz TMP2, FORL_STOP*8(RA)
- | lwz CARG2, FORL_STOP*8+4(RA)
- | cmplw cr7, TMP3, TISNUM
- | cmplw cr1, TMP2, TISNUM
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
- | cmpwi cr6, CARG3, 0
- | bne >9
- }
- | blt cr6, >5
- | cmpw CARG1, CARG2
- |1:
- | stw TISNUM, FORL_EXT*8(RA)
- if (op != BC_JFORL) {
- | srwi RD, RD, 1
- }
- | stw CARG1, FORL_EXT*8+4(RA)
- if (op != BC_JFORL) {
- | add RD, PC, RD
- }
- if (op == BC_FORI) {
- | bgt >3 // See FP loop below.
- } else if (op == BC_JFORI) {
- | addis PC, RD, -(BCBIAS_J*4 >> 16)
- | bley >7
- } else if (op == BC_IFORL) {
- | bgt >2
- | addis PC, RD, -(BCBIAS_J*4 >> 16)
- } else {
- | bley =>BC_JLOOP
- }
- |2:
- | ins_next
- |5: // Invert check for negative step.
- | cmpw CARG2, CARG1
- | b <1
- if (vk) {
- |6: // Potential overflow.
- | checkov TMP0, <4 // Ignore unrelated overflow.
- | b <2
- }
- |.endif
- if (vk) {
- |.if DUALNUM
- |9: // FP loop.
- | lfd f1, FORL_IDX*8(RA)
- |.else
- | lfdux f1, RA, BASE
- |.endif
- | lfd f3, FORL_STEP*8(RA)
- | lfd f2, FORL_STOP*8(RA)
- | lwz TMP3, FORL_STEP*8(RA)
- | fadd f1, f1, f3
- | stfd f1, FORL_IDX*8(RA)
- } else {
- |.if DUALNUM
- |9: // FP loop.
- |.else
- | lwzux TMP1, RA, BASE
- | lwz TMP3, FORL_STEP*8(RA)
- | lwz TMP2, FORL_STOP*8(RA)
- | cmplw cr0, TMP1, TISNUM
- | cmplw cr7, TMP3, TISNUM
- | cmplw cr1, TMP2, TISNUM
- |.endif
- | lfd f1, FORL_IDX*8(RA)
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
- | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | lfd f2, FORL_STOP*8(RA)
- | bge ->vmeta_for
- }
- | cmpwi cr6, TMP3, 0
- if (op != BC_JFORL) {
- | srwi RD, RD, 1
- }
- | stfd f1, FORL_EXT*8(RA)
- if (op != BC_JFORL) {
- | add RD, PC, RD
- }
- | fcmpu cr0, f1, f2
- if (op == BC_JFORI) {
- | addis PC, RD, -(BCBIAS_J*4 >> 16)
- }
- | blt cr6, >5
- if (op == BC_FORI) {
- | bgt >3
- } else if (op == BC_IFORL) {
- |.if DUALNUM
- | bgty <2
- |.else
- | bgt >2
- |.endif
- |1:
- | addis PC, RD, -(BCBIAS_J*4 >> 16)
- } else if (op == BC_JFORI) {
- | bley >7
- } else {
- | bley =>BC_JLOOP
- }
- |.if DUALNUM
- | b <2
- |.else
- |2:
- | ins_next
- |.endif
- |5: // Negative step.
- if (op == BC_FORI) {
- | bge <2
- |3: // Used by integer loop, too.
- | addis PC, RD, -(BCBIAS_J*4 >> 16)
- } else if (op == BC_IFORL) {
- | bgey <1
- } else if (op == BC_JFORI) {
- | bgey >7
- } else {
- | bgey =>BC_JLOOP
- }
- | b <2
- if (op == BC_JFORI) {
- |7:
- | lwz INS, -4(PC)
- | decode_RD8 RD, INS
- | b =>BC_JLOOP
- }
- break;
-
- case BC_ITERL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IITERL follows.
- break;
-
- case BC_JITERL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IITERL:
- | // RA = base*8, RD = target
- | lwzux TMP1, RA, BASE
- | lwz TMP2, 4(RA)
- | checknil TMP1; beq >1 // Stop if iterator returned nil.
- if (op == BC_JITERL) {
- | stw TMP1, -8(RA)
- | stw TMP2, -4(RA)
- | b =>BC_JLOOP
- } else {
- | branch_RD // Otherwise save control var + branch.
- | stw TMP1, -8(RA)
- | stw TMP2, -4(RA)
- }
- |1:
- | ins_next
- break;
-
- case BC_LOOP:
- | // RA = base*8, RD = target (loop extent)
- | // Note: RA/RD is only used by trace recorder to determine scope/extent
- | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_ILOOP follows.
- break;
-
- case BC_ILOOP:
- | // RA = base*8, RD = target (loop extent)
- | ins_next
- break;
-
- case BC_JLOOP:
- |.if JIT
- | // RA = base*8 (ignored), RD = traceno*8
- | lwz TMP1, DISPATCH_J(trace)(DISPATCH)
- | srwi RD, RD, 1
- | // Traces on PPC don't store the trace number, so use 0.
- | stw ZERO, DISPATCH_GL(vmstate)(DISPATCH)
- | lwzx TRACE:TMP2, TMP1, RD
- | clrso TMP1
- | lp TMP2, TRACE:TMP2->mcode
- | stw BASE, DISPATCH_GL(jit_base)(DISPATCH)
- | mtctr TMP2
- | stw L, DISPATCH_GL(jit_L)(DISPATCH)
- | addi JGL, DISPATCH, GG_DISP2G+32768
- | bctr
- |.endif
- break;
-
- case BC_JMP:
- | // RA = base*8 (only used by trace recorder), RD = target
- | branch_RD
- | ins_next
- break;
-
- /* -- Function headers -------------------------------------------------- */
-
- case BC_FUNCF:
- |.if JIT
- | hotcall
- |.endif
- case BC_FUNCV: /* NYI: compiled vararg functions. */
- | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
- break;
-
- case BC_JFUNCF:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IFUNCF:
- | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
- | lwz TMP2, L->maxstack
- | lbz TMP1, -4+PC2PROTO(numparams)(PC)
- | lwz KBASE, -4+PC2PROTO(k)(PC)
- | cmplw RA, TMP2
- | slwi TMP1, TMP1, 3
- | bgt ->vm_growstack_l
- if (op != BC_JFUNCF) {
- | ins_next1
- }
- |2:
- | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
- | blt >3
- if (op == BC_JFUNCF) {
- | decode_RD8 RD, INS
- | b =>BC_JLOOP
- } else {
- | ins_next2
- }
- |
- |3: // Clear missing parameters.
- | stwx TISNIL, BASE, NARGS8:RC
- | addi NARGS8:RC, NARGS8:RC, 8
- | b <2
- break;
-
- case BC_JFUNCV:
-#if !LJ_HASJIT
- break;
-#endif
- | NYI // NYI: compiled vararg functions
- break; /* NYI: compiled vararg functions. */
-
- case BC_IFUNCV:
- | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
- | lwz TMP2, L->maxstack
- | add TMP1, BASE, RC
- | add TMP0, RA, RC
- | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
- | addi TMP3, RC, 8+FRAME_VARG
- | lwz KBASE, -4+PC2PROTO(k)(PC)
- | cmplw TMP0, TMP2
- | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
- | bge ->vm_growstack_l
- | lbz TMP2, -4+PC2PROTO(numparams)(PC)
- | mr RA, BASE
- | mr RC, TMP1
- | ins_next1
- | cmpwi TMP2, 0
- | addi BASE, TMP1, 8
- | beq >3
- |1:
- | cmplw RA, RC // Less args than parameters?
- | lwz TMP0, 0(RA)
- | lwz TMP3, 4(RA)
- | bge >4
- | stw TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
- | addi RA, RA, 8
- |2:
- | addic. TMP2, TMP2, -1
- | stw TMP0, 8(TMP1)
- | stw TMP3, 12(TMP1)
- | addi TMP1, TMP1, 8
- | bne <1
- |3:
- | ins_next2
- |
- |4: // Clear missing parameters.
- | li TMP0, LJ_TNIL
- | b <2
- break;
-
- case BC_FUNCC:
- case BC_FUNCCW:
- | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
- if (op == BC_FUNCC) {
- | lp RD, CFUNC:RB->f
- } else {
- | lp RD, DISPATCH_GL(wrapf)(DISPATCH)
- }
- | add TMP1, RA, NARGS8:RC
- | lwz TMP2, L->maxstack
- | .toc lp TMP3, 0(RD)
- | add RC, BASE, NARGS8:RC
- | stp BASE, L->base
- | cmplw TMP1, TMP2
- | stp RC, L->top
- | li_vmstate C
- |.if TOC
- | mtctr TMP3
- |.else
- | mtctr RD
- |.endif
- if (op == BC_FUNCCW) {
- | lp CARG2, CFUNC:RB->f
- }
- | mr CARG1, L
- | bgt ->vm_growstack_c // Need to grow stack.
- | .toc lp TOCREG, TOC_OFS(RD)
- | .tocenv lp ENVREG, ENV_OFS(RD)
- | st_vmstate
- | bctrl // (lua_State *L [, lua_CFunction f])
- | // Returns nresults.
- | lp BASE, L->base
- | .toc ld TOCREG, SAVE_TOC
- | slwi RD, CRET1, 3
- | lp TMP1, L->top
- | li_vmstate INTERP
- | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
- | sub RA, TMP1, RD // RA = L->top - nresults*8
- | st_vmstate
- | b ->vm_returnc
- break;
-
- /* ---------------------------------------------------------------------- */
-
- default:
- fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
- exit(2);
- break;
- }
-}
-
-static int build_backend(BuildCtx *ctx)
-{
- int op;
-
- dasm_growpc(Dst, BC__MAX);
-
- build_subroutines(ctx);
-
- |.code_op
- for (op = 0; op < BC__MAX; op++)
- build_ins(ctx, (BCOp)op, op);
-
- return BC__MAX;
-}
-
-/* Emit pseudo frame-info for all assembler functions. */
-static void emit_asm_debug(BuildCtx *ctx)
-{
- int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
- int i;
- switch (ctx->mode) {
- case BUILD_elfasm:
- fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
- fprintf(ctx->fp,
- ".Lframe0:\n"
- "\t.long .LECIE0-.LSCIE0\n"
- ".LSCIE0:\n"
- "\t.long 0xffffffff\n"
- "\t.byte 0x1\n"
- "\t.string \"\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 65\n"
- "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE0:\n\n");
- fprintf(ctx->fp,
- ".LSFDE0:\n"
- "\t.long .LEFDE0-.LASFDE0\n"
- ".LASFDE0:\n"
- "\t.long .Lframe0\n"
- "\t.long .Lbegin\n"
- "\t.long %d\n"
- "\t.byte 0xe\n\t.uleb128 %d\n"
- "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
- "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
- fcofs, CFRAME_SIZE);
- for (i = 14; i <= 31; i++)
- fprintf(ctx->fp,
- "\t.byte %d\n\t.uleb128 %d\n"
- "\t.byte %d\n\t.uleb128 %d\n",
- 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE0:\n\n");
-#if LJ_HASFFI
- fprintf(ctx->fp,
- ".LSFDE1:\n"
- "\t.long .LEFDE1-.LASFDE1\n"
- ".LASFDE1:\n"
- "\t.long .Lframe0\n"
-#if LJ_TARGET_PS3
- "\t.long .lj_vm_ffi_call\n"
-#else
- "\t.long lj_vm_ffi_call\n"
-#endif
- "\t.long %d\n"
- "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
- "\t.byte 0x8e\n\t.uleb128 2\n"
- "\t.byte 0xd\n\t.uleb128 0xe\n"
- "\t.align 2\n"
- ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
-#endif
-#if !LJ_NO_UNWIND
- fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
- fprintf(ctx->fp,
- ".Lframe1:\n"
- "\t.long .LECIE1-.LSCIE1\n"
- ".LSCIE1:\n"
- "\t.long 0\n"
- "\t.byte 0x1\n"
- "\t.string \"zPR\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 65\n"
- "\t.uleb128 6\n" /* augmentation length */
- "\t.byte 0x1b\n" /* pcrel|sdata4 */
- "\t.long lj_err_unwind_dwarf-.\n"
- "\t.byte 0x1b\n" /* pcrel|sdata4 */
- "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE1:\n\n");
- fprintf(ctx->fp,
- ".LSFDE2:\n"
- "\t.long .LEFDE2-.LASFDE2\n"
- ".LASFDE2:\n"
- "\t.long .LASFDE2-.Lframe1\n"
- "\t.long .Lbegin-.\n"
- "\t.long %d\n"
- "\t.uleb128 0\n" /* augmentation length */
- "\t.byte 0xe\n\t.uleb128 %d\n"
- "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
- "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
- fcofs, CFRAME_SIZE);
- for (i = 14; i <= 31; i++)
- fprintf(ctx->fp,
- "\t.byte %d\n\t.uleb128 %d\n"
- "\t.byte %d\n\t.uleb128 %d\n",
- 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE2:\n\n");
-#if LJ_HASFFI
- fprintf(ctx->fp,
- ".Lframe2:\n"
- "\t.long .LECIE2-.LSCIE2\n"
- ".LSCIE2:\n"
- "\t.long 0\n"
- "\t.byte 0x1\n"
- "\t.string \"zR\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 65\n"
- "\t.uleb128 1\n" /* augmentation length */
- "\t.byte 0x1b\n" /* pcrel|sdata4 */
- "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE2:\n\n");
- fprintf(ctx->fp,
- ".LSFDE3:\n"
- "\t.long .LEFDE3-.LASFDE3\n"
- ".LASFDE3:\n"
- "\t.long .LASFDE3-.Lframe2\n"
- "\t.long lj_vm_ffi_call-.\n"
- "\t.long %d\n"
- "\t.uleb128 0\n" /* augmentation length */
- "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
- "\t.byte 0x8e\n\t.uleb128 2\n"
- "\t.byte 0xd\n\t.uleb128 0xe\n"
- "\t.align 2\n"
- ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
-#endif
-#endif
- break;
- default:
- break;
- }
-}
-
+|// Low-level VM code for PowerPC CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch ppc
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// DynASM defines used by the PPC port:
+|//
+|// P64 64 bit pointers (only for GPR64 testing).
+|// Note: a full PPC64 _LP64 port is not planned.
+|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3).
+|// Affects reg saves, stack layout, carry/overflow/dot flags etc.
+|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360).
+|// TOC Need table of contents (64 bit or 32 bit variant, e.g. PS3).
+|// Function pointers are really a struct: code, TOC, env (optional).
+|// TOCENV Function pointers have an environment pointer, too (not on PS3).
+|// PPE Power Processor Element of Cell (PS3) or Xenon (Xbox 360).
+|// Must avoid (slow) micro-coded instructions.
+|
+|.if P64
+|.define TOC, 1
+|.define TOCENV, 1
+|.macro lpx, a, b, c; ldx a, b, c; .endmacro
+|.macro lp, a, b; ld a, b; .endmacro
+|.macro stp, a, b; std a, b; .endmacro
+|.define decode_OPP, decode_OP8
+|.if FFI
+|// Missing: Calling conventions, 64 bit regs, TOC.
+|.error lib_ffi not yet implemented for PPC64
+|.endif
+|.else
+|.macro lpx, a, b, c; lwzx a, b, c; .endmacro
+|.macro lp, a, b; lwz a, b; .endmacro
+|.macro stp, a, b; stw a, b; .endmacro
+|.define decode_OPP, decode_OP4
+|.endif
+|
+|// Convenience macros for TOC handling.
+|.if TOC
+|// Linker needs a TOC patch area for every external call relocation.
+|.macro blex, target; bl extern target@plt; nop; .endmacro
+|.macro .toc, a, b; a, b; .endmacro
+|.if P64
+|.define TOC_OFS, 8
+|.define ENV_OFS, 16
+|.else
+|.define TOC_OFS, 4
+|.define ENV_OFS, 8
+|.endif
+|.else // No TOC.
+|.macro blex, target; bl extern target@plt; .endmacro
+|.macro .toc, a, b; .endmacro
+|.endif
+|.macro .tocenv, a, b; .if TOCENV; a, b; .endif; .endmacro
+|
+|.macro .gpr64, a, b; .if GPR64; a, b; .endif; .endmacro
+|
+|.macro andix., y, a, i
+|.if PPE
+| rlwinm y, a, 0, 31-lj_fls(i), 31-lj_ffs(i)
+| cmpwi y, 0
+|.else
+| andi. y, a, i
+|.endif
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA)
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r14 // Base of current Lua stack frame.
+|.define KBASE, r15 // Constants of current Lua function.
+|.define PC, r16 // Next PC.
+|.define DISPATCH, r17 // Opcode dispatch table.
+|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
+|.define JGL, r31 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNUM, r22
+|.define TISNIL, r23
+|.define ZERO, r24
+|.define TOBIT, f30 // 2^52 + 2^51.
+|.define TONUM, f31 // 2^52 + 2^51 + 2^31.
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r20 // Callee-save.
+|.define RB, r10
+|.define RC, r11
+|.define RD, r12
+|.define INS, r7 // Overlaps CARG5.
+|
+|.define TMP0, r0
+|.define TMP1, r8
+|.define TMP2, r9
+|.define TMP3, r6 // Overlaps CARG4.
+|
+|// Saved temporaries.
+|.define SAVE0, r21
+|
+|// Calling conventions.
+|.define CARG1, r3
+|.define CARG2, r4
+|.define CARG3, r5
+|.define CARG4, r6 // Overlaps TMP3.
+|.define CARG5, r7 // Overlaps INS.
+|
+|.define FARG1, f1
+|.define FARG2, f2
+|
+|.define CRET1, r3
+|.define CRET2, r4
+|
+|.define TOCREG, r2 // TOC register (only used by C code).
+|.define ENVREG, r11 // Environment pointer (nested C functions).
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.if GPR64
+|.if FRAME32
+|
+|// 456(sp) // \ 32/64 bit C frame info
+|.define TONUM_LO, 452(sp) // |
+|.define TONUM_HI, 448(sp) // |
+|.define TMPD_LO, 444(sp) // |
+|.define TMPD_HI, 440(sp) // |
+|.define SAVE_CR, 432(sp) // | 64 bit CR save.
+|.define SAVE_ERRF, 424(sp) // > Parameter save area.
+|.define SAVE_NRES, 420(sp) // |
+|.define SAVE_L, 416(sp) // |
+|.define SAVE_PC, 412(sp) // |
+|.define SAVE_MULTRES, 408(sp) // |
+|.define SAVE_CFRAME, 400(sp) // / 64 bit C frame chain.
+|// 392(sp) // Reserved.
+|.define CFRAME_SPACE, 384 // Delta for sp.
+|// Back chain for sp: 384(sp) <-- sp entering interpreter
+|.define SAVE_LR, 376(sp) // 32 bit LR stored in hi-part.
+|.define SAVE_GPR_, 232 // .. 232+18*8: 64 bit GPR saves.
+|.define SAVE_FPR_, 88 // .. 88+18*8: 64 bit FPR saves.
+|// 80(sp) // Needed for 16 byte stack frame alignment.
+|// 16(sp) // Callee parameter save area (ABI mandated).
+|// 8(sp) // Reserved
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|// 32 bit sp stored in hi-part of 0(sp).
+|
+|.define TMPD_BLO, 447(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|
+|.else
+|
+|// 508(sp) // \ 32 bit C frame info.
+|.define SAVE_ERRF, 472(sp) // |
+|.define SAVE_NRES, 468(sp) // |
+|.define SAVE_L, 464(sp) // > Parameter save area.
+|.define SAVE_PC, 460(sp) // |
+|.define SAVE_MULTRES, 456(sp) // |
+|.define SAVE_CFRAME, 448(sp) // / 64 bit C frame chain.
+|.define SAVE_LR, 416(sp)
+|.define CFRAME_SPACE, 400 // Delta for sp.
+|// Back chain for sp: 400(sp) <-- sp entering interpreter
+|.define SAVE_FPR_, 256 // .. 256+18*8: 64 bit FPR saves.
+|.define SAVE_GPR_, 112 // .. 112+18*8: 64 bit GPR saves.
+|// 48(sp) // Callee parameter save area (ABI mandated).
+|.define SAVE_TOC, 40(sp) // TOC save area.
+|.define TMPD_LO, 36(sp) // \ Link editor temp (ABI mandated).
+|.define TMPD_HI, 32(sp) // /
+|.define TONUM_LO, 28(sp) // \ Compiler temp (ABI mandated).
+|.define TONUM_HI, 24(sp) // /
+|// Next frame lr: 16(sp)
+|.define SAVE_CR, 8(sp) // 64 bit CR save.
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.define TMPD_BLO, 39(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|
+|.endif
+|.else
+|
+|.define SAVE_LR, 276(sp)
+|.define CFRAME_SPACE, 272 // Delta for sp.
+|// Back chain for sp: 272(sp) <-- sp entering interpreter
+|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves.
+|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves.
+|.define SAVE_CR, 52(sp) // 32 bit CR save.
+|.define SAVE_ERRF, 48(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 44(sp)
+|.define SAVE_CFRAME, 40(sp)
+|.define SAVE_L, 36(sp)
+|.define SAVE_PC, 32(sp)
+|.define SAVE_MULTRES, 28(sp)
+|.define UNUSED1, 24(sp)
+|.define TMPD_LO, 20(sp)
+|.define TMPD_HI, 16(sp)
+|.define TONUM_LO, 12(sp)
+|.define TONUM_HI, 8(sp)
+|// Next frame lr: 4(sp)
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.define TMPD_BLO, 23(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|
+|.endif
+|
+|.macro save_, reg
+|.if GPR64
+| std r..reg, SAVE_GPR_+(reg-14)*8(sp)
+|.else
+| stw r..reg, SAVE_GPR_+(reg-14)*4(sp)
+|.endif
+| stfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
+|.endmacro
+|.macro rest_, reg
+|.if GPR64
+| ld r..reg, SAVE_GPR_+(reg-14)*8(sp)
+|.else
+| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp)
+|.endif
+| lfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
+|.endmacro
+|
+|.macro saveregs
+|.if GPR64 and not FRAME32
+| stdu sp, -CFRAME_SPACE(sp)
+|.else
+| stwu sp, -CFRAME_SPACE(sp)
+|.endif
+| save_ 14; save_ 15; save_ 16
+| mflr r0
+| save_ 17; save_ 18; save_ 19; save_ 20; save_ 21; save_ 22
+|.if GPR64 and not FRAME32
+| std r0, SAVE_LR
+|.else
+| stw r0, SAVE_LR
+|.endif
+| save_ 23; save_ 24; save_ 25
+| mfcr r0
+| save_ 26; save_ 27; save_ 28; save_ 29; save_ 30; save_ 31
+|.if GPR64
+| std r0, SAVE_CR
+|.else
+| stw r0, SAVE_CR
+|.endif
+| .toc std TOCREG, SAVE_TOC
+|.endmacro
+|
+|.macro restoreregs
+|.if GPR64 and not FRAME32
+| ld r0, SAVE_LR
+|.else
+| lwz r0, SAVE_LR
+|.endif
+|.if GPR64
+| ld r12, SAVE_CR
+|.else
+| lwz r12, SAVE_CR
+|.endif
+| rest_ 14; rest_ 15; rest_ 16; rest_ 17; rest_ 18; rest_ 19
+| mtlr r0;
+|.if PPE; mtocrf 0x20, r12; .else; mtcrf 0x38, r12; .endif
+| rest_ 20; rest_ 21; rest_ 22; rest_ 23; rest_ 24; rest_ 25
+|.if PPE; mtocrf 0x10, r12; .endif
+| rest_ 26; rest_ 27; rest_ 28; rest_ 29; rest_ 30; rest_ 31
+|.if PPE; mtocrf 0x08, r12; .endif
+| addi sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// These basic macros should really be part of DynASM.
+|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
+|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
+|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
+|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
+|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; tw 4, sp, sp; .endmacro
+|
+|// int/FP conversions.
+|.macro tonum_i, freg, reg
+| xoris reg, reg, 0x8000
+| stw reg, TONUM_LO
+| lfd freg, TONUM_D
+| fsub freg, freg, TONUM
+|.endmacro
+|
+|.macro tonum_u, freg, reg
+| stw reg, TONUM_LO
+| lfd freg, TONUM_D
+| fsub freg, freg, TOBIT
+|.endmacro
+|
+|.macro toint, reg, freg, tmpfreg
+| fctiwz tmpfreg, freg
+| stfd tmpfreg, TMPD
+| lwz reg, TMPD_LO
+|.endmacro
+|
+|.macro toint, reg, freg
+| toint reg, freg, freg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -4
+|
+|// Instruction decode.
+|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
+|.macro decode_OP8, dst, ins; rlwinm dst, ins, 3, 21, 28; .endmacro
+|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
+|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
+|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
+|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
+|
+|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
+|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch. Note: optimized for e300!
+|.macro ins_NEXT2
+| decode_OPP TMP1, INS
+| lpx TMP0, DISPATCH, TMP1
+| mtctr TMP0
+| decode_RB8 RB, INS
+| decode_RD8 RD, INS
+| decode_RA8 RA, INS
+| decode_RC8 RC, INS
+| bctr
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lwz PC, LFUNC:RB->pc
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+| decode_OPP TMP1, INS
+| decode_RA8 RA, INS
+| lpx TMP0, DISPATCH, TMP1
+| add RA, RA, BASE
+| mtctr TMP0
+| bctr
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| stw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checknum, reg; cmplw reg, TISNUM; .endmacro
+|.macro checknum, cr, reg; cmplw cr, reg, TISNUM; .endmacro
+|.macro checkstr, reg; cmpwi reg, LJ_TSTR; .endmacro
+|.macro checktab, reg; cmpwi reg, LJ_TTAB; .endmacro
+|.macro checkfunc, reg; cmpwi reg, LJ_TFUNC; .endmacro
+|.macro checknil, reg; cmpwi reg, LJ_TNIL; .endmacro
+|
+|.macro branch_RD
+| srwi TMP0, RD, 1
+| addis PC, PC, -(BCBIAS_J*4 >> 16)
+| add PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta, target
+| rlwinm TMP1, PC, 31, 25, 30
+| addi TMP1, TMP1, GG_DISP2HOT
+| lhzx TMP2, DISPATCH, TMP1
+| addic. TMP2, TMP2, -delta
+| sthx TMP2, DISPATCH, TMP1
+| blt target
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| // Assumes LJ_GC_BLACK is 0x04.
+| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
+| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| stb mark, tab->marked
+| stw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andix. TMP0, PC, FRAME_P
+ | li TMP1, LJ_TTRUE
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | mr BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
+ |
+ |->vm_returnc:
+ | addi RD, RD, 8 // RD = (nresults+1)*8.
+ | andix. TMP0, PC, FRAME_TYPE
+ | cmpwi cr1, RD, 0
+ | li CRET1, LUA_YIELD
+ | beq cr1, ->vm_unwind_c_eh
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | cmpwi TMP0, FRAME_C
+ | rlwinm TMP2, PC, 0, 0, 28
+ | li_vmstate C
+ | sub TMP2, BASE, TMP2 // TMP2 = previous base.
+ | bney ->vm_returnp
+ |
+ | addic. TMP1, RD, -8
+ | stp TMP2, L->base
+ | lwz TMP2, SAVE_NRES
+ | subi BASE, BASE, 8
+ | st_vmstate
+ | slwi TMP2, TMP2, 3
+ | beq >2
+ |1:
+ | addic. TMP1, TMP1, -8
+ | lfd f0, 0(RA)
+ | addi RA, RA, 8
+ | stfd f0, 0(BASE)
+ | addi BASE, BASE, 8
+ | bney <1
+ |
+ |2:
+ | cmpw TMP2, RD // More/less results wanted?
+ | bne >6
+ |3:
+ | stp BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lp TMP0, SAVE_CFRAME // Restore previous C frame.
+ | li CRET1, 0 // Ok return status for vm_pcall.
+ | stp TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | blr
+ |
+ |6:
+ | ble >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | lwz TMP1, L->maxstack
+ | cmplw BASE, TMP1
+ | bge >8
+ | stw TISNIL, 0(BASE)
+ | addi RD, RD, 8
+ | addi BASE, BASE, 8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | subfic TMP3, TMP2, 0 // LUA_MULTRET+1 case?
+ | sub TMP0, RD, TMP2
+ | subfe TMP1, TMP1, TMP1 // TMP1 = TMP2 == 0 ? 0 : -1
+ | and TMP0, TMP0, TMP1
+ | sub BASE, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | stp BASE, L->top // Save current top held in BASE (yes).
+ | mr SAVE0, RD
+ | mr CARG2, TMP2
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz TMP2, SAVE_NRES
+ | mr RD, SAVE0
+ | slwi TMP2, TMP2, 3
+ | lp BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mr sp, CARG1
+ | mr CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | .toc ld TOCREG, SAVE_TOC
+ | li TMP0, ~LJ_VMST_C
+ | lwz GL:TMP1, L->glref
+ | stw TMP0, GL:TMP1->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ |.if GPR64
+ | rldicr sp, CARG1, 0, 61
+ |.else
+ | rlwinm sp, CARG1, 0, 0, 29
+ |.endif
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | .toc ld TOCREG, SAVE_TOC
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp BASE, L->base
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li ZERO, 0
+ | stw TMP3, TMPD
+ | li TMP1, LJ_TFALSE
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | li TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | lfs TOBIT, TMPD
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | la RA, -8(BASE) // Results start at BASE-8.
+ | stw TMP3, TMPD
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw TMP1, 0(RA) // Prepend false to error message.
+ | li RD, 16 // 2 results: false + error message.
+ | st_vmstate
+ | lfs TONUM, TMPD
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | li CARG2, LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | stp BASE, L->base
+ | addi PC, PC, 4 // Must point after first instruction.
+ | stp RC, L->top
+ | srwi CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lp BASE, L->base
+ | lp RC, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mr L, CARG1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mr BASE, CARG2
+ | lbz TMP1, L->status
+ | stw L, SAVE_L
+ | li PC, FRAME_CP
+ | addi TMP0, sp, CFRAME_RESUME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw CARG3, SAVE_NRES
+ | cmplwi TMP1, 0
+ | stw CARG3, SAVE_ERRF
+ | stp TMP0, L->cframe
+ | stp CARG3, SAVE_CFRAME
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | mr RA, BASE
+ | lp BASE, L->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp TMP1, L->top
+ | lwz PC, FRAME_PC(BASE)
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | stb CARG3, L->status
+ | stw TMP3, TMPD
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | lfs TOBIT, TMPD
+ | sub RD, TMP1, BASE
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | addi RD, RD, 8
+ | stw TMP0, TONUM_HI
+ | li_vmstate INTERP
+ | li ZERO, 0
+ | st_vmstate
+ | andix. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | lfs TONUM, TMPD
+ | li TISNIL, LJ_TNIL
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | li PC, FRAME_CP
+ | stw CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lp TMP1, L:CARG1->cframe
+ | stw CARG3, SAVE_NRES
+ | mr L, CARG1
+ | stw CARG1, SAVE_L
+ | mr BASE, CARG2
+ | stp sp, L->cframe // Add our C frame to cframe chain.
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | stp TMP1, SAVE_CFRAME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | lp TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp TMP1, L->top
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | add PC, PC, BASE
+ | stw TMP3, TMPD
+ | li ZERO, 0
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | lfs TOBIT, TMPD
+ | sub PC, PC, TMP2 // PC = frame delta + frame type
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | sub NARGS8:RC, TMP1, BASE
+ | stw TMP0, TONUM_HI
+ | li_vmstate INTERP
+ | lfs TONUM, TMPD
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | lwz TMP0, FRAME_PC(BASE)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | checkfunc TMP0; bne ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mr L, CARG1
+ | lwz TMP0, L:CARG1->stack
+ | stw CARG1, SAVE_L
+ | lp TMP1, L->top
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lp TMP1, L->cframe
+ | stp sp, L->cframe // Add our C frame to cframe chain.
+ | .toc lp CARG4, 0(CARG4)
+ | li TMP2, 0
+ | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | stw TMP2, SAVE_ERRF // No error function.
+ | stp TMP1, SAVE_CFRAME
+ | mtctr CARG4
+ | bctrl // (lua_State *L, lua_CFunction func, void *ud)
+ |.if PPE
+ | mr BASE, CRET1
+ | cmpwi CRET1, 0
+ |.else
+ | mr. BASE, CRET1
+ |.endif
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li PC, FRAME_CP
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lwz TMP0, -12(BASE) // Continuation.
+ | mr RB, BASE
+ | mr BASE, TMP2 // Restore caller BASE.
+ | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
+ |.if FFI
+ | cmplwi TMP0, 1
+ |.endif
+ | lwz PC, -16(RB) // Restore PC from [cont|PC].
+ | subi TMP2, RD, 8
+ | lwz TMP1, LFUNC:TMP1->pc
+ | stwx TISNIL, RA, TMP2 // Ensure one valid arg.
+ |.if FFI
+ | ble >1
+ |.endif
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // BASE = base, RA = resultptr, RB = meta base
+ | mtctr TMP0
+ | bctr // Jump to continuation.
+ |
+ |.if FFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | subi TMP1, RB, 16
+ | sub RC, TMP1, BASE
+ | b ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lwz INS, -4(PC)
+ | subi CARG2, RB, 16
+ | decode_RB8 SAVE0, INS
+ | lfd f0, 0(RA)
+ | add TMP1, BASE, SAVE0
+ | stp BASE, L->base
+ | cmplw TMP1, CARG2
+ | sub CARG3, CARG2, TMP1
+ | decode_RA8 RA, INS
+ | stfd f0, 0(CARG2)
+ | bney ->BC_CAT_Z
+ | stfdx f0, BASE, RA
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TSTR
+ | decode_RB8 RB, INS
+ | stw STR:RC, 4(CARG3)
+ | add CARG2, BASE, RB
+ | stw TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgets:
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TTAB
+ | stw TAB:RB, 4(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | stw TMP0, 0(CARG2)
+ | li TMP1, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | stw TMP1, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ |.if not DUALNUM
+ | tonum_u f0, TMP0
+ |.endif
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ |.if DUALNUM
+ | stw TISNUM, 0(CARG3)
+ | stw TMP0, 4(CARG3)
+ |.else
+ | stfd f0, 0(CARG3)
+ |.endif
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | beq >3
+ | lfd f0, 0(CRET1)
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | subfic TMP1, BASE, FRAME_CONT
+ | lp BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 16 // 2 args for func(t, k).
+ | b ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TSTR
+ | decode_RB8 RB, INS
+ | stw STR:RC, 4(CARG3)
+ | add CARG2, BASE, RB
+ | stw TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsets:
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TTAB
+ | stw TAB:RB, 4(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | stw TMP0, 0(CARG2)
+ | li TMP1, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | stw TMP1, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ |.if not DUALNUM
+ | tonum_u f0, TMP0
+ |.endif
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ |.if DUALNUM
+ | stw TISNUM, 0(CARG3)
+ | stw TMP0, 4(CARG3)
+ |.else
+ | stfd f0, 0(CARG3)
+ |.endif
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | lfdx f0, BASE, RA
+ | beq >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | stfd f0, 0(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | subfic TMP1, BASE, FRAME_CONT
+ | lp BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ | stfd f0, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mr CARG1, L
+ | subi PC, PC, 4
+ |.if DUALNUM
+ | mr CARG2, RA
+ |.else
+ | add CARG2, BASE, RA
+ |.endif
+ | stw PC, SAVE_PC
+ |.if DUALNUM
+ | mr CARG3, RD
+ |.else
+ | add CARG3, BASE, RD
+ |.endif
+ | stp BASE, L->base
+ | decode_OP1 CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmplwi CRET1, 1
+ | bgt ->vmeta_binop
+ | subfic CRET1, CRET1, 0
+ |4:
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | decode_RD4 TMP2, INS
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | and TMP2, TMP2, CRET1
+ | add PC, PC, TMP2
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lwz INS, -4(PC)
+ | lfd f0, 0(RA)
+ | decode_RA8 TMP1, INS
+ | stfdx f0, BASE, TMP1
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is true.
+ | subfe CRET1, CRET1, CRET1
+ | not CRET1, CRET1
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is false.
+ | subfe CRET1, CRET1, CRET1
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | subi PC, PC, 4
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | mr CARG2, INS
+ | subi PC, PC, 4
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |.endif
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_nv:
+ | add CARG3, KBASE, RC
+ | add CARG4, BASE, RB
+ | b >1
+ |->vmeta_arith_nv2:
+ |.if DUALNUM
+ | mr CARG3, RC
+ | mr CARG4, RB
+ | b >1
+ |.endif
+ |
+ |->vmeta_unm:
+ | mr CARG3, RD
+ | mr CARG4, RD
+ | b >1
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |.if DUALNUM
+ | b >1
+ |.endif
+ |->vmeta_arith_vn2:
+ |->vmeta_arith_vv2:
+ |.if DUALNUM
+ | mr CARG3, RB
+ | mr CARG4, RC
+ |.endif
+ |1:
+ | add CARG2, BASE, RA
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | stw PC, -16(CRET1) // [cont|PC]
+ | mr TMP2, BASE
+ | addi PC, TMP1, FRAME_CONT
+ | mr BASE, CRET1
+ | li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+#if LJ_52
+ | mr SAVE0, CARG1
+#endif
+ | mr CARG2, RD
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | cmplwi CRET1, 0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | mr CARG1, SAVE0
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | mr CARG1, L
+ | stp TMP2, L->base // This is the callers base!
+ | subi CARG2, BASE, 8
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mr CARG1, L
+ | stp BASE, L->base
+ | subi CARG2, RA, 8
+ | stw PC, SAVE_PC
+ | add CARG3, RA, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz TMP1, FRAME_PC(BASE)
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mr CARG1, L
+ | stp BASE, L->base
+ | mr CARG2, RA
+ | stw PC, SAVE_PC
+ | mr SAVE0, INS
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ |.if JIT
+ | decode_OP1 TMP0, SAVE0
+ |.endif
+ | decode_RA8 RA, SAVE0
+ |.if JIT
+ | cmpwi TMP0, BC_JFORI
+ |.endif
+ | decode_RD8 RD, SAVE0
+ |.if JIT
+ | beqy =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz CARG1, 4(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ | lwz CARG1, 4(BASE)
+ | lwz CARG2, 12(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ | lfd FARG2, 8(BASE)
+ | blt ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ | checknum CARG4; bge ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
+ |.macro ffgccheck
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | cmplw TMP0, TMP1
+ | bgel ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | li TMP1, LJ_TFALSE
+ | la RA, -8(BASE)
+ | cmplw cr1, CARG3, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | bge cr1, ->fff_fallback
+ | stw CARG3, 0(RA)
+ | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | stw CARG1, 4(RA)
+ | beq ->fff_res // Done if exactly 1 argument.
+ | li TMP1, 8
+ | subi RC, RC, 8
+ |1:
+ | cmplw TMP1, RC
+ | lfdx f0, BASE, TMP1
+ | stfdx f0, RA, TMP1
+ | addi TMP1, TMP1, 8
+ | bney <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | .gpr64 extsw CARG1, CARG1
+ | subfc TMP0, TISNUM, CARG1
+ | subfe TMP2, CARG1, CARG1
+ | orc TMP1, TMP2, TMP0
+ | addi TMP1, TMP1, ~LJ_TISNUM+1
+ | slwi TMP1, TMP1, 3
+ | la TMP2, CFUNC:RB->upvalue
+ | lfdx FARG1, TMP2, TMP1
+ | b ->fff_resn
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktab CARG3; bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:CARG1, TAB:CARG1->metatable
+ |2:
+ | li CARG3, LJ_TNIL
+ | cmplwi TAB:CARG1, 0
+ | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beq ->fff_restv
+ | lwz TMP0, TAB:CARG1->hmask
+ | li CARG3, LJ_TTAB // Use metatable as default result.
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:CARG1->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | lwz CARG4, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
+ | checkstr CARG4; bne >4
+ | cmpw TMP0, STR:RC; beq >5
+ |4:
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | beq ->fff_restv // Not found, keep default result.
+ | b <3
+ |5:
+ | checknil CARG2
+ | beq ->fff_restv // Ditto for nil value.
+ | mr CARG3, CARG2 // Return value of mt.__metatable.
+ | mr CARG1, TMP1
+ | b ->fff_restv
+ |
+ |6:
+ | cmpwi CARG3, LJ_TUDATA; beq <1
+ | .gpr64 extsw CARG3, CARG3
+ | subfc TMP0, TISNUM, CARG3
+ | subfe TMP2, CARG3, CARG3
+ | orc TMP1, TMP2, TMP0
+ | addi TMP1, TMP1, ~LJ_TISNUM+1
+ | slwi TMP1, TMP1, 2
+ | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
+ | lwzx TAB:CARG1, TMP2, TMP1
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktab CARG3; bne ->fff_fallback
+ | lwz TAB:TMP1, TAB:CARG1->metatable
+ | checktab CARG4; bne ->fff_fallback
+ | cmplwi TAB:TMP1, 0
+ | lbz TMP3, TAB:CARG1->marked
+ | bne ->fff_fallback
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stw TAB:CARG2, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, TMP3, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG4, 0(BASE)
+ | lwz TAB:CARG2, 4(BASE)
+ | blt ->fff_fallback
+ | checktab CARG4; bne ->fff_fallback
+ | la CARG3, 8(BASE)
+ | mr CARG1, L
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | lfd FARG1, 0(CRET1)
+ | b ->fff_resn
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly one argument.
+ | checknum CARG1; bgt ->fff_fallback
+ | b ->fff_resn
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checkstr CARG3
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | checknum CARG3
+ | cmplwi cr1, TMP0, 0
+ | stp BASE, L->base // Add frame since C call can throw.
+ | crorc 4*cr0+eq, 4*cr0+gt, 4*cr1+eq
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | beq ->fff_fallback
+ | ffgccheck
+ | mr CARG1, L
+ | mr CARG2, BASE
+ |.if DUALNUM
+ | bl extern lj_str_fromnumber // (lua_State *L, cTValue *o)
+ |.else
+ | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
+ |.endif
+ | // Returns GCstr *.
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | lwz TAB:CARG2, 4(BASE)
+ | blt ->fff_fallback
+ | stwx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
+ | checktab CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+ | stp BASE, L->base // Add frame since C call can throw.
+ | mr CARG1, L
+ | stp BASE, L->top // Dummy frame length is ok.
+ | la CARG3, 8(BASE)
+ | stw PC, SAVE_PC
+ | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Returns 0 at end of traversal.
+ | cmplwi CRET1, 0
+ | li CARG3, LJ_TNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | lfd f0, 8(BASE) // Copy key and value to results.
+ | la RA, -8(BASE)
+ | lfd f1, 16(BASE)
+ | stfd f0, 0(RA)
+ | li RD, (2+1)*8
+ | stfd f1, 8(RA)
+ | b ->fff_res
+ |
+ |.ffunc_1 pairs
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ | stw TISNIL, 8(BASE)
+ | li RD, (3+1)*8
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ |
+ |.ffunc ipairs_aux
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lwz TAB:CARG1, 4(BASE)
+ | lwz CARG4, 8(BASE)
+ |.if DUALNUM
+ | lwz TMP2, 12(BASE)
+ |.else
+ | lfd FARG2, 8(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | checktab CARG3
+ | checknum cr1, CARG4
+ | lwz PC, FRAME_PC(BASE)
+ |.if DUALNUM
+ | bne ->fff_fallback
+ | bne cr1, ->fff_fallback
+ |.else
+ | lus TMP0, 0x3ff0
+ | stw ZERO, TMPD_LO
+ | bne ->fff_fallback
+ | stw TMP0, TMPD_HI
+ | bge cr1, ->fff_fallback
+ | lfd FARG1, TMPD
+ | toint TMP2, FARG2, f0
+ |.endif
+ | lwz TMP0, TAB:CARG1->asize
+ | lwz TMP1, TAB:CARG1->array
+ |.if not DUALNUM
+ | fadd FARG2, FARG2, FARG1
+ |.endif
+ | addi TMP2, TMP2, 1
+ | la RA, -8(BASE)
+ | cmplw TMP0, TMP2
+ |.if DUALNUM
+ | stw TISNUM, 0(RA)
+ | slwi TMP3, TMP2, 3
+ | stw TMP2, 4(RA)
+ |.else
+ | slwi TMP3, TMP2, 3
+ | stfd FARG2, 0(RA)
+ |.endif
+ | ble >2 // Not in array part?
+ | lwzx TMP2, TMP1, TMP3
+ | lfdx f0, TMP1, TMP3
+ |1:
+ | checknil TMP2
+ | li RD, (0+1)*8
+ | beq ->fff_res // End of iteration, return 0 results.
+ | li RD, (2+1)*8
+ | stfd f0, 8(RA)
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lwz TMP0, TAB:CARG1->hmask
+ | cmplwi TMP0, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | mr CARG2, TMP2
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cmplwi CRET1, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | lwz TMP2, 0(CRET1)
+ | lfd f0, 0(CRET1)
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | lfd f0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ |.if DUALNUM
+ | stw TISNUM, 8(BASE)
+ |.else
+ | stw ZERO, 8(BASE)
+ |.endif
+ | stw ZERO, 12(BASE)
+ | li RD, (3+1)*8
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | cmplwi NARGS8:RC, 8
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | blt ->fff_fallback
+ | mr TMP2, BASE
+ | la BASE, 8(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |.ffunc xpcall
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG4, 8(BASE)
+ | lfd FARG2, 8(BASE)
+ | lfd FARG1, 0(BASE)
+ | blt ->fff_fallback
+ | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | mr TMP2, BASE
+ | checkfunc CARG4; bne ->fff_fallback // Traceback must be a function.
+ | la BASE, 16(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | stfd FARG2, 0(TMP2) // Swap function and traceback.
+ | subi NARGS8:RC, NARGS8:RC, 16
+ | stfd FARG1, 8(TMP2)
+ | addi PC, TMP1, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | cmpwi CARG3, LJ_TTHREAD; bne ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | lbz TMP0, L:CARG1->status
+ | lp TMP1, L:CARG1->cframe
+ | lp CARG2, L:CARG1->top
+ | cmplwi cr0, TMP0, LUA_YIELD
+ | lp TMP2, L:CARG1->base
+ | cmplwi cr1, TMP1, 0
+ | lwz TMP0, L:CARG1->maxstack
+ | cmplw cr7, CARG2, TMP2
+ | lwz PC, FRAME_PC(BASE)
+ | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
+ | add TMP2, CARG2, NARGS8:RC
+ | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
+ | cmplw cr1, TMP2, TMP0
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
+ | stw PC, SAVE_PC
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
+ | stp BASE, L->base
+ | blt cr6, ->fff_fallback
+ |1:
+ |.if resume
+ | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | subi TMP2, TMP2, 8
+ |.endif
+ | stp TMP2, L:CARG1->top
+ | li TMP1, 0
+ | stp BASE, L->top
+ |2: // Move args to coroutine.
+ | cmpw TMP1, NARGS8:RC
+ | lfdx f0, BASE, TMP1
+ | beq >3
+ | stfdx f0, CARG2, TMP1
+ | addi TMP1, TMP1, 8
+ | b <2
+ |3:
+ | li CARG3, 0
+ | mr L:SAVE0, L:CARG1
+ | li CARG4, 0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | lp TMP2, L:SAVE0->base
+ | cmplwi CRET1, LUA_YIELD
+ | lp TMP3, L:SAVE0->top
+ | li_vmstate INTERP
+ | lp BASE, L->base
+ | st_vmstate
+ | bgt >8
+ | sub RD, TMP3, TMP2
+ | lwz TMP0, L->maxstack
+ | cmplwi RD, 0
+ | add TMP1, BASE, RD
+ | beq >6 // No results?
+ | cmplw TMP1, TMP0
+ | li TMP1, 0
+ | bgt >9 // Need to grow stack?
+ |
+ | subi TMP3, RD, 8
+ | stp TMP2, L:SAVE0->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | cmplw TMP1, TMP3
+ | lfdx f0, TMP2, TMP1
+ | stfdx f0, BASE, TMP1
+ | addi TMP1, TMP1, 8
+ | bne <5
+ |6:
+ | andix. TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | la RA, -8(BASE)
+ | stw TMP1, -8(BASE) // Prepend true to results.
+ | addi RD, RD, 16
+ |.else
+ | mr RA, BASE
+ | addi RD, RD, 8
+ |.endif
+ |7:
+ | stw PC, SAVE_PC
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | andix. TMP0, PC, FRAME_TYPE
+ | la TMP3, -8(TMP3)
+ | li TMP1, LJ_TFALSE
+ | lfd f0, 0(TMP3)
+ | stp TMP3, L:SAVE0->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | stw TMP1, -8(BASE) // Prepend false to results.
+ | la RA, -8(BASE)
+ | stfd f0, 0(BASE) // Copy error message.
+ | b <7
+ |.else
+ | mr CARG1, L
+ | mr CARG2, L:SAVE0
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mr CARG1, L
+ | srwi CARG2, RD, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | li CRET1, 0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lp TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | stp BASE, L->base
+ | andix. TMP0, TMP0, CFRAME_RESUME
+ | stp TMP1, L->top
+ | li CRET1, LUA_YIELD
+ | beq ->fff_fallback
+ | stp ZERO, L->cframe
+ | stb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_1 math_abs
+ | checknum CARG3
+ |.if DUALNUM
+ | bne >2
+ | srawi TMP1, CARG1, 31
+ | xor TMP2, TMP1, CARG1
+ |.if GPR64
+ | lus TMP0, 0x8000
+ | sub CARG1, TMP2, TMP1
+ | cmplw CARG1, TMP0
+ | beq >1
+ |.else
+ | sub. CARG1, TMP2, TMP1
+ | blt >1
+ |.endif
+ |->fff_resi:
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | stw TISNUM, -8(BASE)
+ | stw CRET1, -4(BASE)
+ | b ->fff_res1
+ |1:
+ | lus CARG3, 0x41e0 // 2^31.
+ | li CARG1, 0
+ | b ->fff_restv
+ |2:
+ |.endif
+ | bge ->fff_fallback
+ | rlwinm CARG3, CARG3, 0, 1, 31
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG3/CARG1 = TValue result.
+ | lwz PC, FRAME_PC(BASE)
+ | stw CARG3, -8(BASE)
+ | la RA, -8(BASE)
+ | stw CARG1, -4(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andix. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | bney ->vm_return
+ | lwz INS, -4(PC)
+ | decode_RB8 RB, INS
+ |5:
+ | cmplw RB, RD // More results expected?
+ | decode_RA8 TMP0, INS
+ | bgt >6
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, RA, TMP1
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | blex func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | blex func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc_1 math_ .. func
+ | checknum CARG3; beqy ->fff_restv
+ | rlwinm TMP2, CARG3, 12, 21, 31
+ | bge ->fff_fallback
+ | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
+ | cmplwi cr1, TMP2, 31 // 0 <= exp < 31?
+ | subfic TMP0, TMP2, 31
+ | blt >3
+ | slwi TMP1, CARG3, 11
+ | srwi TMP3, CARG1, 21
+ | oris TMP1, TMP1, 0x8000
+ | addi TMP2, TMP2, 1
+ | or TMP1, TMP1, TMP3
+ | slwi CARG2, CARG1, 11
+ | bge cr1, >4
+ | slw TMP3, TMP1, TMP2
+ | srw RD, TMP1, TMP0
+ | or TMP3, TMP3, CARG2
+ | srawi TMP2, CARG3, 31
+ |.if "func" == "floor"
+ | and TMP1, TMP3, TMP2
+ | addic TMP0, TMP1, -1
+ | subfe TMP1, TMP0, TMP1
+ | add CARG1, RD, TMP1
+ | xor CARG1, CARG1, TMP2
+ | sub CARG1, CARG1, TMP2
+ | b ->fff_resi
+ |.else
+ | andc TMP1, TMP3, TMP2
+ | addic TMP0, TMP1, -1
+ | subfe TMP1, TMP0, TMP1
+ | add CARG1, RD, TMP1
+ | cmpw CARG1, RD
+ | xor CARG1, CARG1, TMP2
+ | sub CARG1, CARG1, TMP2
+ | bge ->fff_resi
+ | // Overflow to 2^31.
+ | lus CARG3, 0x41e0 // 2^31.
+ | li CARG1, 0
+ | b ->fff_restv
+ |.endif
+ |3: // |x| < 1
+ | slwi TMP2, CARG3, 1
+ | srawi TMP1, CARG3, 31
+ | or TMP2, CARG1, TMP2 // ztest = (hi+hi) | lo
+ |.if "func" == "floor"
+ | and TMP1, TMP2, TMP1 // (ztest & sign) == 0 ? 0 : -1
+ | subfic TMP2, TMP1, 0
+ | subfe CARG1, CARG1, CARG1
+ |.else
+ | andc TMP1, TMP2, TMP1 // (ztest & ~sign) == 0 ? 0 : 1
+ | addic TMP2, TMP1, -1
+ | subfe CARG1, TMP2, TMP1
+ |.endif
+ | b ->fff_resi
+ |4: // exp >= 31. Check for -(2^31).
+ | xoris TMP1, TMP1, 0x8000
+ | srawi TMP2, CARG3, 31
+ |.if "func" == "floor"
+ | or TMP1, TMP1, CARG2
+ |.endif
+ |.if PPE
+ | orc TMP1, TMP1, TMP2
+ | cmpwi TMP1, 0
+ |.else
+ | orc. TMP1, TMP1, TMP2
+ |.endif
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | lus CARG1, 0x8000 // -(2^31).
+ | beqy ->fff_resi
+ |5:
+ | lfd FARG1, 0(BASE)
+ | blex func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.if DUALNUM
+ | math_round floor
+ | math_round ceil
+ |.else
+ | // NYI: use internal implementation.
+ | math_extern floor
+ | math_extern ceil
+ |.endif
+ |
+ |.if SQRT
+ |.ffunc_n math_sqrt
+ | fsqrt FARG1, FARG1
+ | b ->fff_resn
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |.ffunc math_log
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checknum CARG3; bge ->fff_fallback
+ | blex log
+ | b ->fff_resn
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ | lfd FARG2, CFUNC:RB->upvalue[0]
+ | fmul FARG1, FARG1, FARG2
+ | b ->fff_resn
+ |
+ |.if DUALNUM
+ |.ffunc math_ldexp
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lfd FARG1, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ |.if GPR64
+ | lwz CARG2, 12(BASE)
+ |.else
+ | lwz CARG1, 12(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ | checknum CARG4; bne ->fff_fallback
+ |.else
+ |.ffunc_nn math_ldexp
+ |.if GPR64
+ | toint CARG2, FARG2
+ |.else
+ | toint CARG1, FARG2
+ |.endif
+ |.endif
+ | blex ldexp
+ | b ->fff_resn
+ |
+ |.ffunc_n math_frexp
+ |.if GPR64
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ |.else
+ | la CARG1, DISPATCH_GL(tmptv)(DISPATCH)
+ |.endif
+ | lwz PC, FRAME_PC(BASE)
+ | blex frexp
+ | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | la RA, -8(BASE)
+ |.if not DUALNUM
+ | tonum_i FARG2, TMP1
+ |.endif
+ | stfd FARG1, 0(RA)
+ | li RD, (2+1)*8
+ |.if DUALNUM
+ | stw TISNUM, 8(RA)
+ | stw TMP1, 12(RA)
+ |.else
+ | stfd FARG2, 8(RA)
+ |.endif
+ | b ->fff_res
+ |
+ |.ffunc_n math_modf
+ |.if GPR64
+ | la CARG2, -8(BASE)
+ |.else
+ | la CARG1, -8(BASE)
+ |.endif
+ | lwz PC, FRAME_PC(BASE)
+ | blex modf
+ | la RA, -8(BASE)
+ | stfd FARG1, 0(BASE)
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, ismax
+ |.if DUALNUM
+ | .ffunc_1 name
+ | checknum CARG3
+ | addi TMP1, BASE, 8
+ | add TMP2, BASE, NARGS8:RC
+ | bne >4
+ |1: // Handle integers.
+ | lwz CARG4, 0(TMP1)
+ | cmplw cr1, TMP1, TMP2
+ | lwz CARG2, 4(TMP1)
+ | bge cr1, ->fff_resi
+ | checknum CARG4
+ | xoris TMP0, CARG1, 0x8000
+ | xoris TMP3, CARG2, 0x8000
+ | bne >3
+ | subfc TMP3, TMP3, TMP0
+ | subfe TMP0, TMP0, TMP0
+ |.if ismax
+ | andc TMP3, TMP3, TMP0
+ |.else
+ | and TMP3, TMP3, TMP0
+ |.endif
+ | add CARG1, TMP3, CARG2
+ |.if GPR64
+ | rldicl CARG1, CARG1, 0, 32
+ |.endif
+ | addi TMP1, TMP1, 8
+ | b <1
+ |3:
+ | bge ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ | tonum_i FARG1, CARG1
+ | lfd FARG2, 0(TMP1)
+ | b >6
+ |4:
+ | lfd FARG1, 0(BASE)
+ | bge ->fff_fallback
+ |5: // Handle numbers.
+ | lwz CARG4, 0(TMP1)
+ | cmplw cr1, TMP1, TMP2
+ | lfd FARG2, 0(TMP1)
+ | bge cr1, ->fff_resn
+ | checknum CARG4; bge >7
+ |6:
+ | fsub f0, FARG1, FARG2
+ | addi TMP1, TMP1, 8
+ |.if ismax
+ | fsel FARG1, f0, FARG1, FARG2
+ |.else
+ | fsel FARG1, f0, FARG2, FARG1
+ |.endif
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | lwz CARG2, 4(TMP1)
+ | bne ->fff_fallback
+ | tonum_i FARG2, CARG2
+ | b <6
+ |.else
+ | .ffunc_n name
+ | li TMP1, 8
+ |1:
+ | lwzx CARG2, BASE, TMP1
+ | lfdx FARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_resn
+ | bge ->fff_fallback
+ | fsub f0, FARG1, FARG2
+ | addi TMP1, TMP1, 8
+ |.if ismax
+ | fsel FARG1, f0, FARG1, FARG2
+ |.else
+ | fsel FARG1, f0, FARG2, FARG1
+ |.endif
+ | b <1
+ |.endif
+ |.endmacro
+ |
+ | math_minmax math_min, 0
+ | math_minmax math_max, 1
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | checkstr CARG3; bne ->fff_fallback
+ | lwz CRET1, STR:CARG1->len
+ | b ->fff_resi
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checkstr CARG3
+ | bne ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ |.if DUALNUM
+ | lbz CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | li RD, (0+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | cmplwi TMP0, 0
+ | la RA, -8(BASE)
+ | beqy ->fff_res
+ | b ->fff_resi
+ |.else
+ | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addic TMP3, TMP0, -1 // RD = ((str->len != 0)+1)*8
+ | subfe RD, TMP3, TMP0
+ | stw TMP1, TONUM_LO // Inlined tonum_u f0, TMP1.
+ | addi RD, RD, 1
+ | lfd f0, TONUM_D
+ | la RA, -8(BASE)
+ | lwz PC, FRAME_PC(BASE)
+ | fsub f0, f0, TOBIT
+ | slwi RD, RD, 3
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ |.endif
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ |.if DUALNUM
+ | lwz TMP0, 4(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG3; bne ->fff_fallback
+ | la CARG2, 7(BASE)
+ |.else
+ | lfd FARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG3; bge ->fff_fallback
+ | toint TMP0, FARG1
+ | la CARG2, TMPD_BLO
+ |.endif
+ | li CARG3, 1
+ | cmplwi TMP0, 255; bgt ->fff_fallback
+ |->fff_newstr:
+ | mr CARG1, L
+ | stp BASE, L->base
+ | stw PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // Returns GCstr *.
+ | lp BASE, L->base
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 16(BASE)
+ |.if not DUALNUM
+ | lfd f0, 16(BASE)
+ |.endif
+ | lwz TMP0, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | lwz CARG2, 8(BASE)
+ |.if DUALNUM
+ | lwz TMP1, 12(BASE)
+ |.else
+ | lfd f1, 8(BASE)
+ |.endif
+ | li TMP2, -1
+ | beq >1
+ |.if DUALNUM
+ | checknum CARG3
+ | lwz TMP2, 20(BASE)
+ | bne ->fff_fallback
+ |1:
+ | checknum CARG2; bne ->fff_fallback
+ |.else
+ | checknum CARG3; bge ->fff_fallback
+ | toint TMP2, f0
+ |1:
+ | checknum CARG2; bge ->fff_fallback
+ |.endif
+ | checkstr TMP0; bne ->fff_fallback
+ |.if not DUALNUM
+ | toint TMP1, f1
+ |.endif
+ | lwz TMP0, STR:CARG1->len
+ | cmplw TMP0, TMP2 // len < end? (unsigned compare)
+ | addi TMP3, TMP2, 1
+ | blt >5
+ |2:
+ | cmpwi TMP1, 0 // start <= 0?
+ | add TMP3, TMP1, TMP0
+ | ble >7
+ |3:
+ | sub CARG3, TMP2, TMP1
+ | addi CARG2, STR:CARG1, #STR-1
+ | srawi TMP0, CARG3, 31
+ | addi CARG3, CARG3, 1
+ | add CARG2, CARG2, TMP1
+ | andc CARG3, CARG3, TMP0
+ |.if GPR64
+ | rldicl CARG2, CARG2, 0, 32
+ | rldicl CARG3, CARG3, 0, 32
+ |.endif
+ | b ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | cmpw TMP0, TMP2 // len >= end? (signed compare)
+ | add TMP2, TMP0, TMP3 // Negative end: end = end+len+1.
+ | bge <2
+ | mr TMP2, TMP0 // Overflow: end = len.
+ | b <2
+ |
+ |7: // Negative start or underflow.
+ | .gpr64 extsw TMP1, TMP1
+ | addic CARG3, TMP1, -1
+ | subfe CARG3, CARG3, CARG3
+ | srawi CARG2, TMP3, 31 // Note: modifies carry.
+ | andc TMP3, TMP3, CARG3
+ | andc TMP1, TMP3, CARG2
+ | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
+ | b <3
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | lwz TMP0, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | lwz CARG4, 8(BASE)
+ |.if DUALNUM
+ | lwz CARG3, 12(BASE)
+ |.else
+ | lfd FARG2, 8(BASE)
+ |.endif
+ | bne ->fff_fallback // Exactly 2 arguments.
+ | checkstr TMP0; bne ->fff_fallback
+ |.if DUALNUM
+ | checknum CARG4; bne ->fff_fallback
+ |.else
+ | checknum CARG4; bge ->fff_fallback
+ | toint CARG3, FARG2
+ |.endif
+ | lwz TMP0, STR:CARG1->len
+ | cmpwi CARG3, 0
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | ble >2 // Count <= 0? (or non-int)
+ | cmplwi TMP0, 1
+ | subi TMP2, CARG3, 1
+ | blt >2 // Zero length string?
+ | cmplw cr1, TMP1, CARG3
+ | bne ->fff_fallback // Fallback for > 1-char strings.
+ | lbz TMP0, STR:CARG1[1]
+ | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | blt cr1, ->fff_fallback
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | cmplwi TMP2, 0
+ | stbx TMP0, CARG2, TMP2
+ | subi TMP2, TMP2, 1
+ | bne <1
+ | b ->fff_newstr
+ |2: // Return empty string.
+ | la STR:CARG1, DISPATCH_GL(strempty)(DISPATCH)
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | checkstr CARG3
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | bne ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | li TMP2, 0
+ | cmplw TMP1, CARG3
+ | subi TMP3, CARG3, 1
+ | blt ->fff_fallback
+ |1: // Reverse string copy.
+ | cmpwi TMP3, 0
+ | lbzx TMP1, CARG1, TMP2
+ | blty ->fff_newstr
+ | stbx TMP1, CARG2, TMP3
+ | subi TMP3, TMP3, 1
+ | addi TMP2, TMP2, 1
+ | b <1
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | checkstr CARG3
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | bne ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lp CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | cmplw TMP1, CARG3
+ | li TMP2, 0
+ | blt ->fff_fallback
+ |1: // ASCII case conversion.
+ | cmplw TMP2, CARG3
+ | lbzx TMP1, CARG1, TMP2
+ | bgey ->fff_newstr
+ | subi TMP0, TMP1, lo
+ | xori TMP3, TMP1, 0x20
+ | addic TMP0, TMP0, -26
+ | subfe TMP3, TMP3, TMP3
+ | rlwinm TMP3, TMP3, 0, 26, 26 // x &= 0x20.
+ | xor TMP1, TMP1, TMP3
+ | stbx TMP1, CARG2, TMP2
+ | addi TMP2, TMP2, 1
+ | b <1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | checktab CARG3; bne ->fff_fallback
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b ->fff_resi
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ |.if DUALNUM
+ | .ffunc_1 bit_..name
+ | checknum CARG3; bnel ->fff_tobit_fb
+ |.else
+ | .ffunc_n bit_..name
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ |.endif
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | addi TMP1, BASE, 8
+ | add TMP2, BASE, NARGS8:RC
+ |1:
+ | lwz CARG4, 0(TMP1)
+ | cmplw cr1, TMP1, TMP2
+ |.if DUALNUM
+ | lwz CARG2, 4(TMP1)
+ |.else
+ | lfd FARG1, 0(TMP1)
+ |.endif
+ | bgey cr1, ->fff_resi
+ | checknum CARG4
+ |.if DUALNUM
+ | bnel ->fff_bitop_fb
+ |.else
+ | fadd FARG1, FARG1, TOBIT
+ | bge ->fff_fallback
+ | stfd FARG1, TMPD
+ | lwz CARG2, TMPD_LO
+ |.endif
+ | ins CARG1, CARG1, CARG2
+ | addi TMP1, TMP1, 8
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | rotlwi TMP0, CARG1, 8
+ | rlwimi TMP0, CARG1, 24, 0, 7
+ | rlwimi TMP0, CARG1, 24, 16, 23
+ | mr CRET1, TMP0
+ | b ->fff_resi
+ |
+ |.ffunc_bit bnot
+ | not CRET1, CARG1
+ | b ->fff_resi
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ |.if DUALNUM
+ | .ffunc_2 bit_..name
+ | checknum CARG3; bnel ->fff_tobit_fb
+ | // Note: no inline conversion from number for 2nd argument!
+ | checknum CARG4; bne ->fff_fallback
+ |.else
+ | .ffunc_nn bit_..name
+ | fadd FARG1, FARG1, TOBIT
+ | fadd FARG2, FARG2, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ | stfd FARG2, TMPD
+ | lwz CARG2, TMPD_LO
+ |.endif
+ |.if shmod == 1
+ | rlwinm CARG2, CARG2, 0, 27, 31
+ |.elif shmod == 2
+ | neg CARG2, CARG2
+ |.endif
+ | ins CRET1, CARG1, CARG2
+ | b ->fff_resi
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, slw, 1
+ |.ffunc_bit_sh rshift, srw, 1
+ |.ffunc_bit_sh arshift, sraw, 1
+ |.ffunc_bit_sh rol, rotlw, 0
+ |.ffunc_bit_sh ror, rotlw, 2
+ |
+ |.ffunc_bit tobit
+ |.if DUALNUM
+ | b ->fff_resi
+ |.else
+ |->fff_resi:
+ | tonum_i FARG1, CRET1
+ |.endif
+ |->fff_resn:
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | stfd FARG1, -8(BASE)
+ | b ->fff_res1
+ |
+ |// Fallback FP number to bit conversion.
+ |->fff_tobit_fb:
+ |.if DUALNUM
+ | lfd FARG1, 0(BASE)
+ | bgt ->fff_fallback
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ | blr
+ |.endif
+ |->fff_bitop_fb:
+ |.if DUALNUM
+ | lfd FARG1, 0(TMP1)
+ | bgt ->fff_fallback
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG2, TMPD_LO
+ | blr
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lp TMP3, CFUNC:RB->f
+ | add TMP1, BASE, NARGS8:RC
+ | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addi TMP0, TMP1, 8*LUA_MINSTACK
+ | lwz TMP2, L->maxstack
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | .toc lp TMP3, 0(TMP3)
+ | cmplw TMP0, TMP2
+ | stp BASE, L->base
+ | stp TMP1, L->top
+ | mr CARG1, L
+ | bgt >5 // Need to grow stack.
+ | mtctr TMP3
+ | bctrl // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lp BASE, L->base
+ | cmpwi CRET1, 0
+ | slwi RD, CRET1, 3
+ | la RA, -8(BASE)
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | lp TMP0, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub NARGS8:RC, TMP0, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andix. TMP0, PC, FRAME_TYPE
+ | rlwinm TMP1, PC, 0, 0, 28
+ | bne >3
+ | lwz INS, -4(PC)
+ | decode_RA8 TMP1, INS
+ | addi TMP1, TMP1, 8
+ |3:
+ | sub TMP2, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | li CARG2, LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lp BASE, L->base
+ | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mflr SAVE0
+ | stp BASE, L->base
+ | add TMP0, BASE, NARGS8:RC
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | stp TMP0, L->top
+ | mr CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | lp BASE, L->base
+ | mtlr SAVE0
+ | lp TMP0, L->top
+ | sub NARGS8:RC, TMP0, BASE
+ | lwz CFUNC:RB, FRAME_FUNC(BASE)
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_ACTIVE
+ | bne >1
+ | subi TMP2, TMP2, 1
+ | andi. TMP0, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqy >1
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | b >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OPP TMP1, INS.
+ | lpx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | bctr
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
+ | bne <5
+ |
+ | cmpwi cr1, TMP0, 0
+ | addic. TMP2, TMP2, -1
+ | beq cr1, <5
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | beq >1
+ | bge cr1, <5
+ |1:
+ | mr CARG1, L
+ | stw MULTRES, SAVE_MULTRES
+ | mr CARG2, PC
+ | stp BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | lp BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lwz INS, -4(PC)
+ | decode_OPP TMP1, INS
+ | decode_RB8 RB, INS
+ | addi TMP1, TMP1, GG_DISP2STATIC
+ | decode_RD8 RD, INS
+ | lpx TMP0, DISPATCH, TMP1
+ | decode_RA8 RA, INS
+ | decode_RC8 RC, INS
+ | mtctr TMP0
+ | bctr
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addi PC, PC, 4
+ | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | stw PC, SAVE_PC
+ | lwz TMP1, LFUNC:TMP1->pc
+ | mr CARG2, PC
+ | stw L, DISPATCH_J(L)(DISPATCH)
+ | lbz TMP1, PC2PROTO(framesize)(TMP1)
+ | stp BASE, L->base
+ | slwi TMP1, TMP1, 3
+ | add TMP1, BASE, TMP1
+ | stp TMP1, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mr CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | add TMP0, BASE, RC
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | stp BASE, L->base
+ | sub RA, RA, BASE
+ | stp TMP0, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | lp BASE, L->base
+ | lp TMP0, L->top
+ | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
+ | sub NARGS8:RC, TMP0, BASE
+ | add RA, BASE, RA
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | lwz INS, -4(PC)
+ | mtctr CRET1
+ | bctr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b, c, d
+ | stfd f..a, 16+a*8(sp)
+ | stfd f..b, 16+b*8(sp)
+ | stfd f..c, 16+c*8(sp)
+ | stfd f..d, 16+d*8(sp)
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | addi sp, sp, -(16+32*8+32*4)
+ | stmw r2, 16+32*8+2*4(sp)
+ | addi DISPATCH, JGL, -GG_DISP2G-32768
+ | li CARG2, ~LJ_VMST_EXIT
+ | lwz CARG1, 16+32*8+32*4(sp) // Get stack chain.
+ | stw CARG2, DISPATCH_GL(vmstate)(DISPATCH)
+ | savex_ 0,1,2,3
+ | stw CARG1, 0(sp) // Store extended stack chain.
+ | mcrxr cr0 // Clear SO flag.
+ | savex_ 4,5,6,7
+ | addi CARG2, sp, 16+32*8+32*4 // Recompute original value of sp.
+ | savex_ 8,9,10,11
+ | stw CARG2, 16+32*8+1*4(sp) // Store sp in RID_SP.
+ | savex_ 12,13,14,15
+ | mflr CARG3
+ | li TMP1, 0
+ | savex_ 16,17,18,19
+ | stw TMP1, 16+32*8+0*4(sp) // Clear RID_TMP.
+ | savex_ 20,21,22,23
+ | lhz CARG4, 2(CARG3) // Load trace number.
+ | savex_ 24,25,26,27
+ | lwz L, DISPATCH_GL(jit_L)(DISPATCH)
+ | savex_ 28,29,30,31
+ | sub CARG3, TMP0, CARG3 // Compute exit number.
+ | lp BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | srwi CARG3, CARG3, 2
+ | stw L, DISPATCH_J(L)(DISPATCH)
+ | subi CARG3, CARG3, 2
+ | stw TMP1, DISPATCH_GL(jit_L)(DISPATCH)
+ | stw CARG4, DISPATCH_J(parent)(DISPATCH)
+ | stp BASE, L->base
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | stw CARG3, DISPATCH_J(exitno)(DISPATCH)
+ | addi CARG2, sp, 16
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | lp TMP1, L->cframe
+ | lwz TMP2, 0(sp)
+ | lp BASE, L->base
+ |.if GPR64
+ | rldicr sp, TMP1, 0, 61
+ |.else
+ | rlwinm sp, TMP1, 0, 0, 29
+ |.endif
+ | lwz PC, SAVE_PC // Get SAVE_PC.
+ | stw TMP2, 0(sp)
+ | stw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | lwz L, SAVE_L
+ | addi DISPATCH, JGL, -GG_DISP2G-32768
+ |1:
+ | cmpwi CARG1, 0
+ | blt >3 // Check for error from exit.
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | slwi MULTRES, CARG1, 3
+ | li TMP2, 0
+ | stw MULTRES, SAVE_MULTRES
+ | lwz TMP1, LFUNC:TMP1->pc
+ | stw TMP2, DISPATCH_GL(jit_L)(DISPATCH)
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // Setup type comparison constants.
+ | li TISNUM, LJ_TISNUM
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | stw TMP3, TMPD
+ | li ZERO, 0
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | lfs TOBIT, TMPD
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | li TISNIL, LJ_TNIL
+ | stw TMP0, TONUM_HI
+ | lfs TONUM, TMPD
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1.
+ | stw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OPP TMP1, INS
+ | decode_RA8 RA, INS
+ | lpx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | cmplwi TMP1, BC_FUNCF*4 // Function header?
+ | bge >2
+ | decode_RB8 RB, INS
+ | decode_RD8 RD, INS
+ | decode_RC8 RC, INS
+ | bctr
+ |2:
+ | subi RC, MULTRES, 8
+ | add RA, RA, BASE
+ | bctr
+ |
+ |3: // Rethrow error from the right C frame.
+ | neg CARG2, CARG1
+ | mr CARG1, L
+ | bl extern lj_err_throw // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// NYI: Use internal implementations of floor, ceil, trunc.
+ |
+ |->vm_modi:
+ | divwo. TMP0, CARG1, CARG2
+ | bso >1
+ |.if GPR64
+ | xor CARG3, CARG1, CARG2
+ | cmpwi CARG3, 0
+ |.else
+ | xor. CARG3, CARG1, CARG2
+ |.endif
+ | mullw TMP0, TMP0, CARG2
+ | sub CARG1, CARG1, TMP0
+ | bgelr
+ | cmpwi CARG1, 0; beqlr
+ | add CARG1, CARG1, CARG2
+ | blr
+ |1:
+ | cmpwi CARG2, 0
+ | li CARG1, 0
+ | beqlr
+ | mcrxr cr0 // Clear SO for -2147483648 % -1 and return 0.
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// void lj_vm_cachesync(void *start, void *end)
+ |// Flush D-Cache and invalidate I-Cache. Assumes 32 byte cache line size.
+ |// This is a good lower bound, except for very ancient PPC models.
+ |->vm_cachesync:
+ |.if JIT or FFI
+ | // Compute start of first cache line and number of cache lines.
+ | rlwinm CARG1, CARG1, 0, 0, 26
+ | sub CARG2, CARG2, CARG1
+ | addi CARG2, CARG2, 31
+ | rlwinm. CARG2, CARG2, 27, 5, 31
+ | beqlr
+ | mtctr CARG2
+ | mr CARG3, CARG1
+ |1: // Flush D-Cache.
+ | dcbst r0, CARG1
+ | addi CARG1, CARG1, 32
+ | bdnz <1
+ | sync
+ | mtctr CARG2
+ |1: // Invalidate I-Cache.
+ | icbi r0, CARG3
+ | addi CARG3, CARG3, 32
+ | bdnz <1
+ | isync
+ | blr
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r11, g in r12.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | lwz CTSTATE, GL:r12->ctype_state
+ | addi DISPATCH, r12, GG_G2DISP
+ | stw r11, CTSTATE->cb.slot
+ | stw r3, CTSTATE->cb.gpr[0]
+ | stfd f1, CTSTATE->cb.fpr[0]
+ | stw r4, CTSTATE->cb.gpr[1]
+ | stfd f2, CTSTATE->cb.fpr[1]
+ | stw r5, CTSTATE->cb.gpr[2]
+ | stfd f3, CTSTATE->cb.fpr[2]
+ | stw r6, CTSTATE->cb.gpr[3]
+ | stfd f4, CTSTATE->cb.fpr[3]
+ | stw r7, CTSTATE->cb.gpr[4]
+ | stfd f5, CTSTATE->cb.fpr[4]
+ | stw r8, CTSTATE->cb.gpr[5]
+ | stfd f6, CTSTATE->cb.fpr[5]
+ | stw r9, CTSTATE->cb.gpr[6]
+ | stfd f7, CTSTATE->cb.fpr[6]
+ | stw r10, CTSTATE->cb.gpr[7]
+ | stfd f8, CTSTATE->cb.fpr[7]
+ | addi TMP0, sp, CFRAME_SPACE+8
+ | stw TMP0, CTSTATE->cb.stack
+ | mr CARG1, CTSTATE
+ | stw CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | mr CARG2, sp
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | lp BASE, L:CRET1->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp RC, L:CRET1->top
+ | lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li ZERO, 0
+ | mr L, CRET1
+ | stw TMP3, TMPD
+ | lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | stw TMP0, TONUM_HI
+ | li TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | lfs TOBIT, TMPD
+ | stw TMP3, TMPD
+ | sub RC, RC, BASE
+ | st_vmstate
+ | lfs TONUM, TMPD
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | lwz CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | stp BASE, L->base
+ | stp RB, L->top
+ | stp L, CTSTATE->L
+ | mr CARG1, CTSTATE
+ | mr CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | lwz CRET1, CTSTATE->cb.gpr[0]
+ | lfd FARG1, CTSTATE->cb.fpr[0]
+ | lwz CRET2, CTSTATE->cb.gpr[1]
+ | b ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, CARG1
+ | lwz TMP1, CCSTATE->spadj
+ | mflr TMP0
+ | lbz CARG2, CCSTATE->nsp
+ | lbz CARG3, CCSTATE->nfpr
+ | neg TMP1, TMP1
+ | stw TMP0, 4(sp)
+ | cmpwi cr1, CARG3, 0
+ | mr TMP2, sp
+ | addic. CARG2, CARG2, -1
+ | stwux sp, sp, TMP1
+ | crnot 4*cr1+eq, 4*cr1+eq // For vararg calls.
+ | stw r14, -4(TMP2)
+ | stw CCSTATE, -8(TMP2)
+ | mr r14, TMP2
+ | la TMP1, CCSTATE->stack
+ | slwi CARG2, CARG2, 2
+ | blty >2
+ | la TMP2, 8(sp)
+ |1:
+ | lwzx TMP0, TMP1, CARG2
+ | stwx TMP0, TMP2, CARG2
+ | addic. CARG2, CARG2, -4
+ | bge <1
+ |2:
+ | bney cr1, >3
+ | lfd f1, CCSTATE->fpr[0]
+ | lfd f2, CCSTATE->fpr[1]
+ | lfd f3, CCSTATE->fpr[2]
+ | lfd f4, CCSTATE->fpr[3]
+ | lfd f5, CCSTATE->fpr[4]
+ | lfd f6, CCSTATE->fpr[5]
+ | lfd f7, CCSTATE->fpr[6]
+ | lfd f8, CCSTATE->fpr[7]
+ |3:
+ | lp TMP0, CCSTATE->func
+ | lwz CARG2, CCSTATE->gpr[1]
+ | lwz CARG3, CCSTATE->gpr[2]
+ | lwz CARG4, CCSTATE->gpr[3]
+ | lwz CARG5, CCSTATE->gpr[4]
+ | mtctr TMP0
+ | lwz r8, CCSTATE->gpr[5]
+ | lwz r9, CCSTATE->gpr[6]
+ | lwz r10, CCSTATE->gpr[7]
+ | lwz CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | bctrl
+ | lwz CCSTATE:TMP1, -8(r14)
+ | lwz TMP2, -4(r14)
+ | lwz TMP0, 4(r14)
+ | stw CARG1, CCSTATE:TMP1->gpr[0]
+ | stfd FARG1, CCSTATE:TMP1->fpr[0]
+ | stw CARG2, CCSTATE:TMP1->gpr[1]
+ | mtlr TMP0
+ | stw CARG3, CCSTATE:TMP1->gpr[2]
+ | mr sp, r14
+ | stw CARG4, CCSTATE:TMP1->gpr[3]
+ | mr r14, TMP2
+ | blr
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.if DUALNUM
+ | lwzux TMP0, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux TMP1, RD, BASE
+ | lwz TMP2, -4(PC)
+ | checknum cr0, TMP0
+ | lwz CARG3, 4(RD)
+ | decode_RD4 TMP2, TMP2
+ | checknum cr1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bne cr0, >7
+ | bne cr1, >8
+ | cmpw CARG2, CARG3
+ if (op == BC_ISLT) {
+ | bge >2
+ } else if (op == BC_ISGE) {
+ | blt >2
+ } else if (op == BC_ISLE) {
+ | bgt >2
+ } else {
+ | ble >2
+ }
+ |1:
+ | add PC, PC, TMP2
+ |2:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | bgt cr0, ->vmeta_comp
+ | // RA is a number.
+ | lfd f0, 0(RA)
+ | bgt cr1, ->vmeta_comp
+ | blt cr1, >4
+ | // RA is a number, RD is an integer.
+ | tonum_i f1, CARG3
+ | b >5
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | bgt cr1, ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ | tonum_i f0, CARG2
+ |4:
+ | lfd f1, 0(RD)
+ |5:
+ | fcmpu cr0, f0, f1
+ if (op == BC_ISLT) {
+ | bge <2
+ } else if (op == BC_ISGE) {
+ | blt <2
+ } else if (op == BC_ISLE) {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | bge <2
+ } else {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | blt <2
+ }
+ | b <1
+ |.else
+ | lwzx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | lfdx f0, BASE, RA
+ | lwzx TMP1, BASE, RD
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | lfdx f1, BASE, RD
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | bge cr0, ->vmeta_comp
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge cr1, ->vmeta_comp
+ | fcmpu cr0, f0, f1
+ if (op == BC_ISLT) {
+ | bge >1
+ } else if (op == BC_ISGE) {
+ | blt >1
+ } else if (op == BC_ISLE) {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | bge >1
+ } else {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | blt >1
+ }
+ | add PC, PC, TMP2
+ |1:
+ | ins_next
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.if DUALNUM
+ | lwzux TMP0, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux TMP1, RD, BASE
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | lwz CARG3, 4(RD)
+ | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (vk) {
+ | ble cr7, ->BC_ISEQN_Z
+ } else {
+ | ble cr7, ->BC_ISNEN_Z
+ }
+ |.else
+ | lwzux TMP0, RA, BASE
+ | lwz TMP2, 0(PC)
+ | lfd f0, 0(RA)
+ | addi PC, PC, 4
+ | lwzux TMP1, RD, BASE
+ | checknum cr0, TMP0
+ | decode_RD4 TMP2, TMP2
+ | lfd f1, 0(RD)
+ | checknum cr1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge cr0, >5
+ | bge cr1, >5
+ | fcmpu cr0, f0, f1
+ if (vk) {
+ | bne >1
+ | add PC, PC, TMP2
+ } else {
+ | beq >1
+ | add PC, PC, TMP2
+ }
+ |1:
+ | ins_next
+ |.endif
+ |5: // Either or both types are not numbers.
+ |.if not DUALNUM
+ | lwz CARG2, 4(RA)
+ | lwz CARG3, 4(RD)
+ |.endif
+ |.if FFI
+ | cmpwi cr7, TMP0, LJ_TCDATA
+ | cmpwi cr5, TMP1, LJ_TCDATA
+ |.endif
+ | not TMP3, TMP0
+ | cmplw TMP0, TMP1
+ | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
+ |.if FFI
+ | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq
+ |.endif
+ | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
+ |.if FFI
+ | beq cr7, ->vmeta_equal_cd
+ |.endif
+ | cmplw cr5, CARG2, CARG3
+ | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive.
+ | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type.
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv.
+ | mr SAVE0, PC
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2.
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2.
+ if (vk) {
+ | bne cr0, >6
+ | add PC, PC, TMP2
+ |6:
+ } else {
+ | beq cr0, >6
+ | add PC, PC, TMP2
+ |6:
+ }
+ |.if DUALNUM
+ | bge cr0, >2 // Done if 1 or 2.
+ |1:
+ | ins_next
+ |2:
+ |.else
+ | blt cr0, <1 // Done if 1 or 2.
+ |.endif
+ | blt cr6, <1 // Done if not tab/ud.
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:TMP2, TAB:CARG2->metatable
+ | li CARG4, 1-vk // ne = 0 or 1.
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable?
+ | lbz TMP2, TAB:TMP2->nomm
+ | andix. TMP2, TMP2, 1<<MM_eq
+ | bne <1 // Or 'no __eq' flag set?
+ | mr PC, SAVE0 // Restore old PC.
+ | b ->vmeta_equal // Handle __eq metamethod.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | lwzux TMP0, RA, BASE
+ | srwi RD, RD, 1
+ | lwz STR:TMP3, 4(RA)
+ | lwz TMP2, 0(PC)
+ | subfic RD, RD, -4
+ | addi PC, PC, 4
+ |.if FFI
+ | cmpwi TMP0, LJ_TCDATA
+ |.endif
+ | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TSTR
+ |.if FFI
+ | beq ->vmeta_equal_cd
+ |.endif
+ | sub TMP1, STR:TMP1, STR:TMP3
+ | or TMP0, TMP0, TMP1
+ | decode_RD4 TMP2, TMP2
+ | subfic TMP0, TMP0, 0
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | subfe TMP1, TMP1, TMP1
+ if (vk) {
+ | andc TMP2, TMP2, TMP1
+ } else {
+ | and TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ |.if DUALNUM
+ | lwzux TMP0, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux TMP1, RD, KBASE
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | lwz CARG3, 4(RD)
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | bne cr0, >7
+ | bne cr1, >8
+ | cmpw CARG2, CARG3
+ |4:
+ |.else
+ if (vk) {
+ |->BC_ISEQN_Z: // Dummy label.
+ } else {
+ |->BC_ISNEN_Z: // Dummy label.
+ }
+ | lwzx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | lfdx f0, BASE, RA
+ | lwz TMP2, -4(PC)
+ | lfdx f1, KBASE, RD
+ | decode_RD4 TMP2, TMP2
+ | checknum TMP0
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge >3
+ | fcmpu cr0, f0, f1
+ |.endif
+ if (vk) {
+ | bne >1
+ | add PC, PC, TMP2
+ |1:
+ |.if not FFI
+ |3:
+ |.endif
+ } else {
+ | beq >2
+ |1:
+ |.if not FFI
+ |3:
+ |.endif
+ | add PC, PC, TMP2
+ |2:
+ }
+ | ins_next
+ |.if FFI
+ |3:
+ | cmpwi TMP0, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ | b <1
+ |.endif
+ |.if DUALNUM
+ |7: // RA is not an integer.
+ | bge cr0, <3
+ | // RA is a number.
+ | lfd f0, 0(RA)
+ | blt cr1, >1
+ | // RA is a number, RD is an integer.
+ | tonum_i f1, CARG3
+ | b >2
+ |
+ |8: // RA is an integer, RD is a number.
+ | tonum_i f0, CARG2
+ |1:
+ | lfd f1, 0(RD)
+ |2:
+ | fcmpu cr0, f0, f1
+ | b <4
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | lwzx TMP0, BASE, RA
+ | srwi TMP1, RD, 3
+ | lwz TMP2, 0(PC)
+ | not TMP1, TMP1
+ | addi PC, PC, 4
+ |.if FFI
+ | cmpwi TMP0, LJ_TCDATA
+ |.endif
+ | sub TMP0, TMP0, TMP1
+ |.if FFI
+ | beq ->vmeta_equal_cd
+ |.endif
+ | decode_RD4 TMP2, TMP2
+ | .gpr64 extsw TMP0, TMP0
+ | addic TMP0, TMP0, -1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | subfe TMP1, TMP1, TMP1
+ if (vk) {
+ | and TMP2, TMP2, TMP1
+ } else {
+ | andc TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | lwzx TMP0, BASE, RD
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TTRUE
+ | decode_RD4 TMP2, INS
+ | subfe TMP1, TMP1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (op == BC_IST) {
+ | andc TMP2, TMP2, TMP1
+ } else {
+ | and TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ } else {
+ | li TMP1, LJ_TFALSE
+ | lfdx f0, BASE, RD
+ | cmplw TMP0, TMP1
+ if (op == BC_ISTC) {
+ | bge >1
+ } else {
+ | blt >1
+ }
+ | addis PC, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | stfdx f0, BASE, RA
+ | add PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lfdx f0, BASE, RD
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lwzx TMP0, BASE, RD
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP1, TMP0, LJ_TTRUE
+ | adde TMP0, TMP0, TMP1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | lwzux TMP1, RD, BASE
+ | lwz TMP0, 4(RD)
+ | checknum TMP1
+ |.if DUALNUM
+ | bne >5
+ |.if GPR64
+ | lus TMP2, 0x8000
+ | neg TMP0, TMP0
+ | cmplw TMP0, TMP2
+ | beq >4
+ |.else
+ | nego. TMP0, TMP0
+ | bso >4
+ |1:
+ |.endif
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw TMP0, 4(RA)
+ |3:
+ | ins_next2
+ |4:
+ |.if not GPR64
+ | // Potential overflow.
+ | mcrxr cr0; bley <1 // Ignore unrelated overflow.
+ |.endif
+ | lus TMP1, 0x41e0 // 2^31.
+ | li TMP0, 0
+ | b >7
+ |.endif
+ |5:
+ | bge ->vmeta_unm
+ | xoris TMP1, TMP1, 0x8000
+ |7:
+ | ins_next1
+ | stwux TMP1, RA, BASE
+ | stw TMP0, 4(RA)
+ |.if DUALNUM
+ | b <3
+ |.else
+ | ins_next2
+ |.endif
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | lwzux TMP0, RD, BASE
+ | lwz CARG1, 4(RD)
+ | checkstr TMP0; bne >2
+ | lwz CRET1, STR:CARG1->len
+ |1:
+ |.if DUALNUM
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw CRET1, 4(RA)
+ |.else
+ | tonum_u f0, CRET1 // Result is a non-negative integer.
+ | ins_next1
+ | stfdx f0, BASE, RA
+ |.endif
+ | ins_next2
+ |2:
+ | checktab TMP0; bne ->vmeta_len
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | cmplwi TAB:TMP2, 0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+#if LJ_52
+ |9:
+ | lbz TMP0, TAB:TMP2->nomm
+ | andix. TMP0, TMP0, 1<<MM_len
+ | bne <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | lwzx TMP1, BASE, RB
+ | .if DUALNUM
+ | lwzx TMP2, KBASE, RC
+ | .endif
+ | lfdx f14, BASE, RB
+ | lfdx f15, KBASE, RC
+ | .if DUALNUM
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vn
+ | .else
+ | checknum TMP1; bge ->vmeta_arith_vn
+ | .endif
+ || break;
+ ||case 1:
+ | lwzx TMP1, BASE, RB
+ | .if DUALNUM
+ | lwzx TMP2, KBASE, RC
+ | .endif
+ | lfdx f15, BASE, RB
+ | lfdx f14, KBASE, RC
+ | .if DUALNUM
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_nv
+ | .else
+ | checknum TMP1; bge ->vmeta_arith_nv
+ | .endif
+ || break;
+ ||default:
+ | lwzx TMP1, BASE, RB
+ | lwzx TMP2, BASE, RC
+ | lfdx f14, BASE, RB
+ | lfdx f15, BASE, RC
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn2
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv2
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv2
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro intmod, a, b, c
+ | bl ->vm_modi
+ |.endmacro
+ |
+ |.macro fpmod, a, b, c
+ |->BC_MODVN_Z:
+ | fdiv FARG1, b, c
+ | // NYI: Use internal implementation of floor.
+ | blex floor // floor(b/c)
+ | fmul a, FARG1, c
+ | fsub a, b, a // b - floor(b/c)*c
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins
+ | ins_arithpre
+ |.if "fpins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |.else
+ | fpins f0, f14, f15
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | lwzux TMP1, RB, BASE
+ | lwzux TMP2, RC, KBASE
+ | lwz CARG1, 4(RB)
+ | checknum cr0, TMP1
+ | lwz CARG2, 4(RC)
+ || break;
+ ||case 1:
+ | lwzux TMP1, RB, BASE
+ | lwzux TMP2, RC, KBASE
+ | lwz CARG2, 4(RB)
+ | checknum cr0, TMP1
+ | lwz CARG1, 4(RC)
+ || break;
+ ||default:
+ | lwzux TMP1, RB, BASE
+ | lwzux TMP2, RC, BASE
+ | lwz CARG1, 4(RB)
+ | checknum cr0, TMP1
+ | lwz CARG2, 4(RC)
+ || break;
+ ||}
+ | checknum cr1, TMP2
+ | bne >5
+ | bne cr1, >5
+ | intins CARG1, CARG1, CARG2
+ | bso >4
+ |1:
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw CARG1, 4(RA)
+ |2:
+ | ins_next2
+ |4: // Overflow.
+ | mcrxr cr0; bley <1 // Ignore unrelated overflow.
+ | ins_arithfallback b
+ |5: // FP variant.
+ ||if (vk == 1) {
+ | lfd f15, 0(RB)
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lfd f14, 0(RC)
+ ||} else {
+ | lfd f14, 0(RB)
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lfd f15, 0(RC)
+ ||}
+ | ins_arithfallback bge
+ |.if "fpins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |.else
+ | fpins f0, f14, f15
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | b <2
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arith, intins, fpins
+ |.if DUALNUM
+ | ins_arithdn intins, fpins
+ |.else
+ | ins_arithfp fpins
+ |.endif
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ |.if GPR64
+ |.macro addo32., y, a, b
+ | // Need to check overflow for (a<<32) + (b<<32).
+ | rldicr TMP0, a, 32, 31
+ | rldicr TMP3, b, 32, 31
+ | addo. TMP0, TMP0, TMP3
+ | add y, a, b
+ |.endmacro
+ | ins_arith addo32., fadd
+ |.else
+ | ins_arith addo., fadd
+ |.endif
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ |.if GPR64
+ |.macro subo32., y, a, b
+ | // Need to check overflow for (a<<32) - (b<<32).
+ | rldicr TMP0, a, 32, 31
+ | rldicr TMP3, b, 32, 31
+ | subo. TMP0, TMP0, TMP3
+ | sub y, a, b
+ |.endmacro
+ | ins_arith subo32., fsub
+ |.else
+ | ins_arith subo., fsub
+ |.endif
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mullwo., fmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp fdiv
+ break;
+ case BC_MODVN:
+ | ins_arith intmod, fpmod
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arith intmod, fpmod_
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | lwzx TMP1, BASE, RB
+ | lfdx FARG1, BASE, RB
+ | lwzx TMP2, BASE, RC
+ | lfdx FARG2, BASE, RC
+ | checknum cr0, TMP1
+ | checknum cr1, TMP2
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vv
+ | blex pow
+ | ins_next1
+ | stfdx FARG1, BASE, RA
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | sub CARG3, RC, RB
+ | stp BASE, L->base
+ | add CARG2, BASE, RC
+ | mr SAVE0, RB
+ |->BC_CAT_Z:
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | srwi CARG3, CARG3, 3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | lp BASE, L->base
+ | bne ->vmeta_binop
+ | ins_next1
+ | lfdx f0, BASE, SAVE0 // Copy result from RB to RA.
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | ins_next1
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
+ | li TMP2, LJ_TSTR
+ | stwux TMP2, RA, BASE
+ | stw TMP0, 4(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | ins_next1
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
+ | li TMP2, LJ_TCDATA
+ | stwux TMP2, RA, BASE
+ | stw TMP0, 4(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ |.if DUALNUM
+ | slwi RD, RD, 13
+ | srawi RD, RD, 16
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw RD, 4(RA)
+ | ins_next2
+ |.else
+ | // The soft-float approach is faster.
+ | slwi RD, RD, 13
+ | srawi TMP1, RD, 31
+ | xor TMP2, TMP1, RD
+ | sub TMP2, TMP2, TMP1 // TMP2 = abs(x)
+ | cntlzw TMP3, TMP2
+ | subfic TMP1, TMP3, 0x40d // TMP1 = exponent-1
+ | slw TMP2, TMP2, TMP3 // TMP2 = left aligned mantissa
+ | subfic TMP3, RD, 0
+ | slwi TMP1, TMP1, 20
+ | rlwimi RD, TMP2, 21, 1, 31 // hi = sign(x) | (mantissa>>11)
+ | subfe TMP0, TMP0, TMP0
+ | add RD, RD, TMP1 // hi = hi + exponent-1
+ | and RD, RD, TMP0 // hi = x == 0 ? 0 : hi
+ | ins_next1
+ | stwux RD, RA, BASE
+ | stw ZERO, 4(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | ins_next1
+ | lfdx f0, KBASE, RD
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srwi TMP1, RD, 3
+ | not TMP0, TMP1
+ | ins_next1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | stwx TISNIL, BASE, RA
+ | addi RA, RA, 8
+ |1:
+ | stwx TISNIL, BASE, RA
+ | cmpw RA, RD
+ | addi RA, RA, 8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RD, RD, 1
+ | addi RD, RD, offsetof(GCfuncL, uvptr)
+ | lwzx UPVAL:RB, LFUNC:RB, RD
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | lfd f0, 0(TMP1)
+ | stfdx f0, BASE, RA
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lfdux f0, RD, BASE
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP0, UPVAL:RB->closed
+ | lwz TMP2, 0(RD)
+ | stfd f0, 0(CARG2)
+ | cmplwi cr1, TMP0, 0
+ | lwz TMP1, 4(RD)
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | subi TMP2, TMP2, (LJ_TISNUM+1)
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1)
+ | bge <1 // tvisgcv(v)
+ | lbz TMP3, GCOBJ:TMP1->gch.marked
+ | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | srwi RA, RA, 1
+ | subfic TMP1, TMP1, -4
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP3, STR:TMP1->marked
+ | lbz TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | stw STR:TMP1, 4(CARG2)
+ | stw TMP0, 0(CARG2)
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
+ | cmplwi cr1, TMP2, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lfdx f0, KBASE, RD
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | stfd f0, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | srwi TMP0, RD, 3
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | not TMP0, TMP0
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | stw TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lwz TMP1, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | stp BASE, L->base
+ | cmplwi TMP1, 0
+ | mr CARG1, L
+ | beq >1
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | lp BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srwi TMP1, RD, 1
+ | stp BASE, L->base
+ | subfic TMP1, TMP1, -4
+ | stw PC, SAVE_PC
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | mr CARG1, L
+ | lwz CARG3, FRAME_FUNC(BASE)
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | lp BASE, L->base
+ | li TMP0, LJ_TFUNC
+ | stwux TMP0, RA, BASE
+ | stw LFUNC:CRET1, 4(RA)
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | mr CARG1, L
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | stp BASE, L->base
+ | cmplw TMP0, TMP1
+ | stw PC, SAVE_PC
+ | bge >5
+ |1:
+ if (op == BC_TNEW) {
+ | rlwinm CARG2, RD, 29, 21, 31
+ | rlwinm CARG3, RD, 18, 27, 31
+ | cmpwi CARG2, 0x7ff; beq >3
+ |2:
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns Table *.
+ } else {
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns Table *.
+ }
+ | lp BASE, L->base
+ | li TMP0, LJ_TTAB
+ | stwux TMP0, RA, BASE
+ | stw TAB:CRET1, 4(RA)
+ | ins_next
+ if (op == BC_TNEW) {
+ |3:
+ | li CARG2, 0x801
+ | b <2
+ }
+ |5:
+ | mr SAVE0, RD
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mr RD, SAVE0
+ | mr CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | lwz TAB:RB, LFUNC:TMP2->env
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG2, RC, BASE
+ | lwz TAB:RB, 4(RB)
+ |.if DUALNUM
+ | lwz RC, 4(RC)
+ |.else
+ | lfd f0, 0(RC)
+ |.endif
+ | checktab CARG1
+ | checknum cr1, CARG2
+ | bne ->vmeta_tgetv
+ |.if DUALNUM
+ | lwz TMP0, TAB:RB->asize
+ | bne cr1, >5
+ | lwz TMP1, TAB:RB->array
+ | cmplw TMP0, RC
+ | slwi TMP2, RC, 3
+ |.else
+ | bge cr1, >5
+ | // Convert number key to integer, check for integerness and range.
+ | fctiwz f1, f0
+ | fadd f2, f0, TOBIT
+ | stfd f1, TMPD
+ | lwz TMP0, TAB:RB->asize
+ | fsub f2, f2, TOBIT
+ | lwz TMP2, TMPD_LO
+ | lwz TMP1, TAB:RB->array
+ | fcmpu cr1, f0, f2
+ | cmplw cr0, TMP0, TMP2
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
+ | slwi TMP2, TMP2, 3
+ |.endif
+ | ble ->vmeta_tgetv // Integer key and in array part?
+ | lwzx TMP0, TMP1, TMP2
+ | lfdx f14, TMP1, TMP2
+ | checknil TMP0; beq >2
+ |1:
+ | ins_next1
+ | stfdx f14, BASE, RA
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andix. TMP0, TMP0, 1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetv
+ |
+ |5:
+ | checkstr CARG2; bne ->vmeta_tgetv
+ |.if not DUALNUM
+ | lwz STR:RC, 4(RC)
+ |.endif
+ | b ->BC_TGETS_Z // String key?
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | lwzux CARG1, RB, BASE
+ | srwi TMP1, RC, 1
+ | lwz TAB:RB, 4(RB)
+ | subfic TMP1, TMP1, -4
+ | checktab CARG1
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | bne ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lwz CARG1, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
+ | checkstr CARG1; bne >4
+ | cmpw TMP0, STR:RC; bne >4
+ | checknil CARG2; beq >5 // Key found, but nil value?
+ |3:
+ | stwux CARG2, RA, BASE
+ | stw TMP1, 4(RA)
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ | li CARG2, LJ_TNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <3 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andix. TMP0, TMP0, 1<<MM_index
+ | bne <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | lwzux CARG1, RB, BASE
+ | srwi TMP0, RC, 3
+ | lwz TAB:RB, 4(RB)
+ | checktab CARG1; bne ->vmeta_tgetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | cmplw TMP0, TMP1; bge ->vmeta_tgetb
+ | lwzx TMP1, TMP2, RC
+ | lfdx f0, TMP2, RC
+ | checknil TMP1; beq >5
+ |1:
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andix. TMP2, TMP2, 1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb // Caveat: preserve TMP0!
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG2, RC, BASE
+ | lwz TAB:RB, 4(RB)
+ |.if DUALNUM
+ | lwz RC, 4(RC)
+ |.else
+ | lfd f0, 0(RC)
+ |.endif
+ | checktab CARG1
+ | checknum cr1, CARG2
+ | bne ->vmeta_tsetv
+ |.if DUALNUM
+ | lwz TMP0, TAB:RB->asize
+ | bne cr1, >5
+ | lwz TMP1, TAB:RB->array
+ | cmplw TMP0, RC
+ | slwi TMP0, RC, 3
+ |.else
+ | bge cr1, >5
+ | // Convert number key to integer, check for integerness and range.
+ | fctiwz f1, f0
+ | fadd f2, f0, TOBIT
+ | stfd f1, TMPD
+ | lwz TMP0, TAB:RB->asize
+ | fsub f2, f2, TOBIT
+ | lwz TMP2, TMPD_LO
+ | lwz TMP1, TAB:RB->array
+ | fcmpu cr1, f0, f2
+ | cmplw cr0, TMP0, TMP2
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
+ | slwi TMP0, TMP2, 3
+ |.endif
+ | ble ->vmeta_tsetv // Integer key and in array part?
+ | lwzx TMP2, TMP1, TMP0
+ | lbz TMP3, TAB:RB->marked
+ | lfdx f14, BASE, RA
+ | checknil TMP2; beq >3
+ |1:
+ | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
+ | stfdx f14, TMP1, TMP0
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andix. TMP2, TMP2, 1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetv
+ |
+ |5:
+ | checkstr CARG2; bne ->vmeta_tsetv
+ |.if not DUALNUM
+ | lwz STR:RC, 4(RC)
+ |.endif
+ | b ->BC_TSETS_Z // String key?
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | lwzux CARG1, RB, BASE
+ | srwi TMP1, RC, 1
+ | lwz TAB:RB, 4(RB)
+ | subfic TMP1, TMP1, -4
+ | checktab CARG1
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | bne ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | lfdx f14, BASE, RA
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | lbz TMP3, TAB:RB->marked
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lwz CARG1, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz NODE:TMP1, NODE:TMP2->next
+ | checkstr CARG1; bne >5
+ | cmpw TMP0, STR:RC; bne >5
+ | checknil CARG2; beq >4 // Key found, but nil value?
+ |2:
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stfd f14, NODE:TMP2->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <2 // No metatable: done.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andix. TMP0, TMP0, 1<<MM_newindex
+ | bne <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | cmplwi NODE:TMP1, 0
+ | mr NODE:TMP2, NODE:TMP1
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | cmplwi TAB:TMP1, 0
+ | stp BASE, L->base
+ | beq >6 // No metatable: continue.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andix. TMP0, TMP0, 1<<MM_newindex
+ | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | li TMP0, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | mr CARG2, TAB:RB
+ | stw TMP0, 0(CARG3)
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | lp BASE, L->base
+ | stfd f14, 0(CRET1)
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | lwzux CARG1, RB, BASE
+ | srwi TMP0, RC, 3
+ | lwz TAB:RB, 4(RB)
+ | checktab CARG1; bne ->vmeta_tsetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | lbz TMP3, TAB:RB->marked
+ | cmplw TMP0, TMP1
+ | lfdx f14, BASE, RA
+ | bge ->vmeta_tsetb
+ | lwzx TMP1, TMP2, RC
+ | checknil TMP1; beq >5
+ |1:
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stfdx f14, TMP2, RC
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP1, TAB:TMP1->nomm
+ | andix. TMP1, TMP1, 1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetb // Caveat: preserve TMP0!
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | add RA, BASE, RA
+ |1:
+ | add TMP3, KBASE, RD
+ | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
+ | addic. TMP0, MULTRES, -8
+ | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
+ | srwi CARG3, TMP0, 3
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG3, TMP3
+ | lwz TMP2, TAB:CARG2->asize
+ | slwi TMP1, TMP3, 3
+ | lbz TMP3, TAB:CARG2->marked
+ | cmplw CARG3, TMP2
+ | add TMP2, RA, TMP0
+ | lwz TMP0, TAB:CARG2->array
+ | bgt >5
+ | add TMP1, TMP1, TMP0
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | lfd f0, 0(RA)
+ | addi RA, RA, 8
+ | cmpw cr1, RA, TMP2
+ | stfd f0, 0(TMP1)
+ | addi TMP1, TMP1, 8
+ | blt cr1, <3
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | mr SAVE0, RD
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | mr RD, SAVE0
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALL follows.
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | mr TMP2, BASE
+ | lwzux TMP0, BASE, RA
+ | lwz LFUNC:RB, 4(BASE)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi BASE, BASE, 8
+ | checkfunc TMP0; bne ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | lwzux TMP0, RA, BASE
+ | lwz LFUNC:RB, 4(RA)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | lwz TMP1, FRAME_PC(BASE)
+ | checkfunc TMP0
+ | addi RA, RA, 8
+ | bne ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andix. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
+ | lbz TMP3, LFUNC:RB->ffid
+ | xori TMP2, TMP1, FRAME_VARG
+ | cmplwi cr1, NARGS8:RC, 0
+ | bne >7
+ |1:
+ | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | li TMP2, 0
+ | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
+ | beq cr1, >3
+ |2:
+ | addi TMP3, TMP2, 8
+ | lfdx f0, RA, TMP2
+ | cmplw cr1, TMP3, NARGS8:RC
+ | stfdx f0, BASE, TMP2
+ | mr TMP2, TMP3
+ | bne cr1, <2
+ |3:
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
+ | beq >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lwz INS, -4(TMP1)
+ | decode_RA8 RA, INS
+ | sub TMP1, BASE, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | andix. TMP0, TMP2, FRAME_TYPEP
+ | bne <1 // Vararg frame below?
+ | sub BASE, BASE, TMP2 // Relocate BASE down.
+ | lwz TMP1, FRAME_PC(BASE)
+ | andix. TMP0, TMP1, FRAME_TYPE
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | mr TMP2, BASE
+ | add BASE, BASE, RA
+ | lwz TMP1, -24(BASE)
+ | lwz LFUNC:RB, -20(BASE)
+ | lfd f1, -8(BASE)
+ | lfd f0, -16(BASE)
+ | stw TMP1, 0(BASE) // Copy callable.
+ | stw LFUNC:RB, 4(BASE)
+ | checkfunc TMP1
+ | stfd f1, 16(BASE) // Copy control var.
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | stfdu f0, 8(BASE) // Copy state.
+ | bne ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ |.if JIT
+ | // NYI: add hotloop, record BC_ITERN.
+ |.endif
+ | add RA, BASE, RA
+ | lwz TAB:RB, -12(RA)
+ | lwz RC, -4(RA) // Get index from control var.
+ | lwz TMP0, TAB:RB->asize
+ | lwz TMP1, TAB:RB->array
+ | addi PC, PC, 4
+ |1: // Traverse array part.
+ | cmplw RC, TMP0
+ | slwi TMP3, RC, 3
+ | bge >5 // Index points after array part?
+ | lwzx TMP2, TMP1, TMP3
+ | lfdx f0, TMP1, TMP3
+ | checknil TMP2
+ | lwz INS, -4(PC)
+ | beq >4
+ |.if DUALNUM
+ | stw RC, 4(RA)
+ | stw TISNUM, 0(RA)
+ |.else
+ | tonum_u f1, RC
+ |.endif
+ | addi RC, RC, 1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | stfd f0, 8(RA)
+ | decode_RD4 TMP1, INS
+ | stw RC, -4(RA) // Update control var.
+ | add PC, TMP1, TMP3
+ |.if not DUALNUM
+ | stfd f1, 0(RA)
+ |.endif
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | addi RC, RC, 1
+ | b <1
+ |
+ |5: // Traverse hash part.
+ | lwz TMP1, TAB:RB->hmask
+ | sub RC, RC, TMP0
+ | lwz TMP2, TAB:RB->node
+ |6:
+ | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
+ | slwi TMP3, RC, 5
+ | bgty <3
+ | slwi RB, RC, 3
+ | sub TMP3, TMP3, RB
+ | lwzx RB, TMP2, TMP3
+ | lfdx f0, TMP2, TMP3
+ | add NODE:TMP3, TMP2, TMP3
+ | checknil RB
+ | lwz INS, -4(PC)
+ | beq >7
+ | lfd f1, NODE:TMP3->key
+ | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
+ | stfd f0, 8(RA)
+ | add RC, RC, TMP0
+ | decode_RD4 TMP1, INS
+ | stfd f1, 0(RA)
+ | addi RC, RC, 1
+ | add PC, TMP1, TMP2
+ | stw RC, -4(RA) // Update control var.
+ | b <3
+ |
+ |7: // Skip holes in hash part.
+ | addi RC, RC, 1
+ | b <6
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | add RA, BASE, RA
+ | lwz TMP0, -24(RA)
+ | lwz CFUNC:TMP1, -20(RA)
+ | lwz TMP2, -16(RA)
+ | lwz TMP3, -8(RA)
+ | cmpwi cr0, TMP2, LJ_TTAB
+ | cmpwi cr1, TMP0, LJ_TFUNC
+ | cmpwi cr6, TMP3, LJ_TNIL
+ | bne cr1, >5
+ | lbz TMP1, CFUNC:TMP1->ffid
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
+ | cmpwi cr7, TMP1, FF_next_N
+ | srwi TMP0, RD, 1
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | add TMP3, PC, TMP0
+ | bne cr0, >5
+ | lus TMP1, 0xfffe
+ | ori TMP1, TMP1, 0x7fff
+ | stw ZERO, -4(RA) // Initialize control var.
+ | stw TMP1, -8(RA)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP0, BC_JMP
+ | li TMP1, BC_ITERC
+ | stb TMP0, -1(PC)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ | stb TMP1, 3(PC)
+ | b <1
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lwz TMP0, FRAME_PC(BASE)
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | addi RC, RC, FRAME_VARG
+ | add TMP2, RA, RB
+ | subi TMP3, BASE, 8 // TMP3 = vtop
+ | sub RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmplwi cr1, RB, 0
+ |.if PPE
+ | sub TMP1, TMP3, RC
+ | cmpwi TMP1, 0
+ |.else
+ | sub. TMP1, TMP3, RC
+ |.endif
+ | beq cr1, >5 // Copy all varargs?
+ | subi TMP2, TMP2, 16
+ | ble >2 // No vararg slots?
+ |1: // Copy vararg slots to destination slots.
+ | lfd f0, 0(RC)
+ | addi RC, RC, 8
+ | stfd f0, 0(RA)
+ | cmplw RA, TMP2
+ | cmplw cr1, RC, TMP3
+ | bge >3 // All destination slots filled?
+ | addi RA, RA, 8
+ | blt cr1, <1 // More vararg slots?
+ |2: // Fill up remainder with nil.
+ | stw TISNIL, 0(RA)
+ | cmplw RA, TMP2
+ | addi RA, RA, 8
+ | blt <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lwz TMP0, L->maxstack
+ | li MULTRES, 8 // MULTRES = (0+1)*8
+ | bley <3 // No vararg slots?
+ | add TMP2, RA, TMP1
+ | cmplw TMP2, TMP0
+ | addi MULTRES, TMP1, 8
+ | bgt >7
+ |6:
+ | lfd f0, 0(RC)
+ | addi RC, RC, 8
+ | stfd f0, 0(RA)
+ | cmplw RC, TMP3
+ | addi RA, RA, 8
+ | blt <6 // More vararg slots?
+ | b <3
+ |
+ |7: // Grow stack for varargs.
+ | mr CARG1, L
+ | stp RA, L->top
+ | sub SAVE0, RC, BASE // Need delta, because BASE may change.
+ | stp BASE, L->base
+ | sub RA, RA, BASE
+ | stw PC, SAVE_PC
+ | srwi CARG2, TMP1, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lp BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, SAVE0
+ | subi TMP3, BASE, 8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ |1:
+ | andix. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lwz INS, -4(PC)
+ | cmpwi RD, 8
+ | subi TMP2, BASE, 8
+ | subi RC, RD, 8
+ | decode_RB8 RB, INS
+ | beq >3
+ | li TMP1, 0
+ |2:
+ | addi TMP3, TMP1, 8
+ | lfdx f0, RA, TMP1
+ | cmpw TMP3, RC
+ | stfdx f0, TMP2, TMP1
+ | beq >3
+ | addi TMP1, TMP3, 8
+ | lfdx f1, RA, TMP3
+ | cmpw TMP1, RC
+ | stfdx f1, TMP2, TMP3
+ | bne <2
+ |3:
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, TMP2, TMP1
+ | b <5
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andix. TMP2, TMP1, FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ | andix. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bney ->BC_RETV_Z
+ |
+ | lwz INS, -4(PC)
+ | subi TMP2, BASE, 8
+ | decode_RB8 RB, INS
+ if (op == BC_RET1) {
+ | lfd f0, 0(RA)
+ | stfd f0, 0(TMP2)
+ }
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, TMP2, TMP1
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ |.if DUALNUM
+ | // Integer loop.
+ | lwzux TMP1, RA, BASE
+ | lwz CARG1, FORL_IDX*8+4(RA)
+ | cmplw cr0, TMP1, TISNUM
+ if (vk) {
+ | lwz CARG3, FORL_STEP*8+4(RA)
+ | bne >9
+ |.if GPR64
+ | // Need to check overflow for (a<<32) + (b<<32).
+ | rldicr TMP0, CARG1, 32, 31
+ | rldicr TMP2, CARG3, 32, 31
+ | add CARG1, CARG1, CARG3
+ | addo. TMP0, TMP0, TMP2
+ |.else
+ | addo. CARG1, CARG1, CARG3
+ |.endif
+ | cmpwi cr6, CARG3, 0
+ | lwz CARG2, FORL_STOP*8+4(RA)
+ | bso >6
+ |4:
+ | stw CARG1, FORL_IDX*8+4(RA)
+ } else {
+ | lwz TMP3, FORL_STEP*8(RA)
+ | lwz CARG3, FORL_STEP*8+4(RA)
+ | lwz TMP2, FORL_STOP*8(RA)
+ | lwz CARG2, FORL_STOP*8+4(RA)
+ | cmplw cr7, TMP3, TISNUM
+ | cmplw cr1, TMP2, TISNUM
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | cmpwi cr6, CARG3, 0
+ | bne >9
+ }
+ | blt cr6, >5
+ | cmpw CARG1, CARG2
+ |1:
+ | stw TISNUM, FORL_EXT*8(RA)
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ }
+ | stw CARG1, FORL_EXT*8+4(RA)
+ if (op != BC_JFORL) {
+ | add RD, PC, RD
+ }
+ if (op == BC_FORI) {
+ | bgt >3 // See FP loop below.
+ } else if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ | bley >7
+ } else if (op == BC_IFORL) {
+ | bgt >2
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else {
+ | bley =>BC_JLOOP
+ }
+ |2:
+ | ins_next
+ |5: // Invert check for negative step.
+ | cmpw CARG2, CARG1
+ | b <1
+ if (vk) {
+ |6: // Potential overflow.
+ | mcrxr cr0; bley <4 // Ignore unrelated overflow.
+ | b <2
+ }
+ |.endif
+ if (vk) {
+ |.if DUALNUM
+ |9: // FP loop.
+ | lfd f1, FORL_IDX*8(RA)
+ |.else
+ | lfdux f1, RA, BASE
+ |.endif
+ | lfd f3, FORL_STEP*8(RA)
+ | lfd f2, FORL_STOP*8(RA)
+ | lwz TMP3, FORL_STEP*8(RA)
+ | fadd f1, f1, f3
+ | stfd f1, FORL_IDX*8(RA)
+ } else {
+ |.if DUALNUM
+ |9: // FP loop.
+ |.else
+ | lwzux TMP1, RA, BASE
+ | lwz TMP3, FORL_STEP*8(RA)
+ | lwz TMP2, FORL_STOP*8(RA)
+ | cmplw cr0, TMP1, TISNUM
+ | cmplw cr7, TMP3, TISNUM
+ | cmplw cr1, TMP2, TISNUM
+ |.endif
+ | lfd f1, FORL_IDX*8(RA)
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lfd f2, FORL_STOP*8(RA)
+ | bge ->vmeta_for
+ }
+ | cmpwi cr6, TMP3, 0
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ }
+ | stfd f1, FORL_EXT*8(RA)
+ if (op != BC_JFORL) {
+ | add RD, PC, RD
+ }
+ | fcmpu cr0, f1, f2
+ if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ }
+ | blt cr6, >5
+ if (op == BC_FORI) {
+ | bgt >3
+ } else if (op == BC_IFORL) {
+ |.if DUALNUM
+ | bgty <2
+ |.else
+ | bgt >2
+ |.endif
+ |1:
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else if (op == BC_JFORI) {
+ | bley >7
+ } else {
+ | bley =>BC_JLOOP
+ }
+ |.if DUALNUM
+ | b <2
+ |.else
+ |2:
+ | ins_next
+ |.endif
+ |5: // Negative step.
+ if (op == BC_FORI) {
+ | bge <2
+ |3: // Used by integer loop, too.
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else if (op == BC_IFORL) {
+ | bgey <1
+ } else if (op == BC_JFORI) {
+ | bgey >7
+ } else {
+ | bgey =>BC_JLOOP
+ }
+ | b <2
+ if (op == BC_JFORI) {
+ |7:
+ | lwz INS, -4(PC)
+ | decode_RD8 RD, INS
+ | b =>BC_JLOOP
+ }
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | lwzux TMP1, RA, BASE
+ | lwz TMP2, 4(RA)
+ | checknil TMP1; beq >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | stw TMP1, -8(RA)
+ | stw TMP2, -4(RA)
+ | b =>BC_JLOOP
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | stw TMP1, -8(RA)
+ | stw TMP2, -4(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | lwz TMP1, DISPATCH_J(trace)(DISPATCH)
+ | srwi RD, RD, 1
+ | // Traces on PPC don't store the trace number, so use 0.
+ | stw ZERO, DISPATCH_GL(vmstate)(DISPATCH)
+ | lwzx TRACE:TMP2, TMP1, RD
+ | mcrxr cr0 // Clear SO flag.
+ | lp TMP2, TRACE:TMP2->mcode
+ | stw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | mtctr TMP2
+ | stw L, DISPATCH_GL(jit_L)(DISPATCH)
+ | addi JGL, DISPATCH, GG_DISP2G+32768
+ | bctr
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | lbz TMP1, -4+PC2PROTO(numparams)(PC)
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw RA, TMP2
+ | slwi TMP1, TMP1, 3
+ | bgt ->vm_growstack_l
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
+ | blt >3
+ if (op == BC_JFUNCF) {
+ | decode_RD8 RD, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | stwx TISNIL, BASE, NARGS8:RC
+ | addi NARGS8:RC, NARGS8:RC, 8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | add TMP1, BASE, RC
+ | add TMP0, RA, RC
+ | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
+ | addi TMP3, RC, 8+FRAME_VARG
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw TMP0, TMP2
+ | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
+ | bge ->vm_growstack_l
+ | lbz TMP2, -4+PC2PROTO(numparams)(PC)
+ | mr RA, BASE
+ | mr RC, TMP1
+ | ins_next1
+ | cmpwi TMP2, 0
+ | addi BASE, TMP1, 8
+ | beq >3
+ |1:
+ | cmplw RA, RC // Less args than parameters?
+ | lwz TMP0, 0(RA)
+ | lwz TMP3, 4(RA)
+ | bge >4
+ | stw TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
+ | addi RA, RA, 8
+ |2:
+ | addic. TMP2, TMP2, -1
+ | stw TMP0, 8(TMP1)
+ | stw TMP3, 12(TMP1)
+ | addi TMP1, TMP1, 8
+ | bne <1
+ |3:
+ | ins_next2
+ |
+ |4: // Clear missing parameters.
+ | li TMP0, LJ_TNIL
+ | b <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lp RD, CFUNC:RB->f
+ } else {
+ | lp RD, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | add TMP1, RA, NARGS8:RC
+ | lwz TMP2, L->maxstack
+ | .toc lp TMP3, 0(RD)
+ | add RC, BASE, NARGS8:RC
+ | stp BASE, L->base
+ | cmplw TMP1, TMP2
+ | stp RC, L->top
+ | li_vmstate C
+ |.if TOC
+ | mtctr TMP3
+ |.else
+ | mtctr RD
+ |.endif
+ if (op == BC_FUNCCW) {
+ | lp CARG2, CFUNC:RB->f
+ }
+ | mr CARG1, L
+ | bgt ->vm_growstack_c // Need to grow stack.
+ | .toc lp TOCREG, TOC_OFS(RD)
+ | .tocenv lp ENVREG, ENV_OFS(RD)
+ | st_vmstate
+ | bctrl // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | lp BASE, L->base
+ | .toc ld TOCREG, SAVE_TOC
+ | slwi RD, CRET1, 3
+ | lp TMP1, L->top
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | sub RA, TMP1, RD // RA = L->top - nresults*8
+ | st_vmstate
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_TARGET_PS3
+ "\t.long .lj_vm_ffi_call\n"
+#else
+ "\t.long lj_vm_ffi_call\n"
+#endif
+ "\t.long %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/3rdparty/lua/src/vm_ppcspe.dasc b/3rdparty/lua/src/vm_ppcspe.dasc
index a484930..293e391 100644
--- a/3rdparty/lua/src/vm_ppcspe.dasc
+++ b/3rdparty/lua/src/vm_ppcspe.dasc
@@ -1,3691 +1,3691 @@
-|// Low-level VM code for PowerPC/e500 CPUs.
-|// Bytecode interpreter, fast functions and helper functions.
-|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
-|
-|.arch ppc
-|.section code_op, code_sub
-|
-|.actionlist build_actionlist
-|.globals GLOB_
-|.globalnames globnames
-|.externnames extnames
-|
-|// Note: The ragged indentation of the instructions is intentional.
-|// The starting columns indicate data dependencies.
-|
-|//-----------------------------------------------------------------------
-|
-|// Fixed register assignments for the interpreter.
-|// Don't use: r1 = sp, r2 and r13 = reserved and/or small data area ptr
-|
-|// The following must be C callee-save (but BASE is often refetched).
-|.define BASE, r14 // Base of current Lua stack frame.
-|.define KBASE, r15 // Constants of current Lua function.
-|.define PC, r16 // Next PC.
-|.define DISPATCH, r17 // Opcode dispatch table.
-|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
-|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
-|
-|// Constants for vectorized type-comparisons (hi+low GPR). C callee-save.
-|.define TISNUM, r22
-|.define TISSTR, r23
-|.define TISTAB, r24
-|.define TISFUNC, r25
-|.define TISNIL, r26
-|.define TOBIT, r27
-|.define ZERO, TOBIT // Zero in lo word.
-|
-|// The following temporaries are not saved across C calls, except for RA.
-|.define RA, r20 // Callee-save.
-|.define RB, r10
-|.define RC, r11
-|.define RD, r12
-|.define INS, r7 // Overlaps CARG5.
-|
-|.define TMP0, r0
-|.define TMP1, r8
-|.define TMP2, r9
-|.define TMP3, r6 // Overlaps CARG4.
-|
-|// Saved temporaries.
-|.define SAVE0, r21
-|
-|// Calling conventions.
-|.define CARG1, r3
-|.define CARG2, r4
-|.define CARG3, r5
-|.define CARG4, r6 // Overlaps TMP3.
-|.define CARG5, r7 // Overlaps INS.
-|
-|.define CRET1, r3
-|.define CRET2, r4
-|
-|// Stack layout while in interpreter. Must match with lj_frame.h.
-|.define SAVE_LR, 188(sp)
-|.define CFRAME_SPACE, 184 // Delta for sp.
-|// Back chain for sp: 184(sp) <-- sp entering interpreter
-|.define SAVE_r31, 176(sp) // 64 bit register saves.
-|.define SAVE_r30, 168(sp)
-|.define SAVE_r29, 160(sp)
-|.define SAVE_r28, 152(sp)
-|.define SAVE_r27, 144(sp)
-|.define SAVE_r26, 136(sp)
-|.define SAVE_r25, 128(sp)
-|.define SAVE_r24, 120(sp)
-|.define SAVE_r23, 112(sp)
-|.define SAVE_r22, 104(sp)
-|.define SAVE_r21, 96(sp)
-|.define SAVE_r20, 88(sp)
-|.define SAVE_r19, 80(sp)
-|.define SAVE_r18, 72(sp)
-|.define SAVE_r17, 64(sp)
-|.define SAVE_r16, 56(sp)
-|.define SAVE_r15, 48(sp)
-|.define SAVE_r14, 40(sp)
-|.define SAVE_CR, 36(sp)
-|.define UNUSED1, 32(sp)
-|.define SAVE_ERRF, 28(sp) // 32 bit C frame info.
-|.define SAVE_NRES, 24(sp)
-|.define SAVE_CFRAME, 20(sp)
-|.define SAVE_L, 16(sp)
-|.define SAVE_PC, 12(sp)
-|.define SAVE_MULTRES, 8(sp)
-|// Next frame lr: 4(sp)
-|// Back chain for sp: 0(sp) <-- sp while in interpreter
-|
-|.macro save_, reg; evstdd reg, SAVE_..reg; .endmacro
-|.macro rest_, reg; evldd reg, SAVE_..reg; .endmacro
-|
-|.macro saveregs
-| stwu sp, -CFRAME_SPACE(sp)
-| save_ r14; save_ r15; save_ r16; save_ r17; save_ r18; save_ r19
-| mflr r0; mfcr r12
-| save_ r20; save_ r21; save_ r22; save_ r23; save_ r24; save_ r25
-| stw r0, SAVE_LR; stw r12, SAVE_CR
-| save_ r26; save_ r27; save_ r28; save_ r29; save_ r30; save_ r31
-|.endmacro
-|
-|.macro restoreregs
-| lwz r0, SAVE_LR; lwz r12, SAVE_CR
-| rest_ r14; rest_ r15; rest_ r16; rest_ r17; rest_ r18; rest_ r19
-| mtlr r0; mtcrf 0x38, r12
-| rest_ r20; rest_ r21; rest_ r22; rest_ r23; rest_ r24; rest_ r25
-| rest_ r26; rest_ r27; rest_ r28; rest_ r29; rest_ r30; rest_ r31
-| addi sp, sp, CFRAME_SPACE
-|.endmacro
-|
-|// Type definitions. Some of these are only used for documentation.
-|.type L, lua_State, LREG
-|.type GL, global_State
-|.type TVALUE, TValue
-|.type GCOBJ, GCobj
-|.type STR, GCstr
-|.type TAB, GCtab
-|.type LFUNC, GCfuncL
-|.type CFUNC, GCfuncC
-|.type PROTO, GCproto
-|.type UPVAL, GCupval
-|.type NODE, Node
-|.type NARGS8, int
-|.type TRACE, GCtrace
-|
-|//-----------------------------------------------------------------------
-|
-|// These basic macros should really be part of DynASM.
-|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
-|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
-|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
-|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
-|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
-|
-|// Trap for not-yet-implemented parts.
-|.macro NYI; tw 4, sp, sp; .endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Access to frame relative to BASE.
-|.define FRAME_PC, -8
-|.define FRAME_FUNC, -4
-|
-|// Instruction decode.
-|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
-|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
-|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
-|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
-|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
-|
-|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
-|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
-|
-|// Instruction fetch.
-|.macro ins_NEXT1
-| lwz INS, 0(PC)
-| addi PC, PC, 4
-|.endmacro
-|// Instruction decode+dispatch.
-|.macro ins_NEXT2
-| decode_OP4 TMP1, INS
-| decode_RB8 RB, INS
-| decode_RD8 RD, INS
-| lwzx TMP0, DISPATCH, TMP1
-| decode_RA8 RA, INS
-| decode_RC8 RC, INS
-| mtctr TMP0
-| bctr
-|.endmacro
-|.macro ins_NEXT
-| ins_NEXT1
-| ins_NEXT2
-|.endmacro
-|
-|// Instruction footer.
-|.if 1
-| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
-| .define ins_next, ins_NEXT
-| .define ins_next_, ins_NEXT
-| .define ins_next1, ins_NEXT1
-| .define ins_next2, ins_NEXT2
-|.else
-| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
-| // Affects only certain kinds of benchmarks (and only with -j off).
-| .macro ins_next
-| b ->ins_next
-| .endmacro
-| .macro ins_next1
-| .endmacro
-| .macro ins_next2
-| b ->ins_next
-| .endmacro
-| .macro ins_next_
-| ->ins_next:
-| ins_NEXT
-| .endmacro
-|.endif
-|
-|// Call decode and dispatch.
-|.macro ins_callt
-| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
-| lwz PC, LFUNC:RB->pc
-| lwz INS, 0(PC)
-| addi PC, PC, 4
-| decode_OP4 TMP1, INS
-| decode_RA8 RA, INS
-| lwzx TMP0, DISPATCH, TMP1
-| add RA, RA, BASE
-| mtctr TMP0
-| bctr
-|.endmacro
-|
-|.macro ins_call
-| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
-| stw PC, FRAME_PC(BASE)
-| ins_callt
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-|
-|// Macros to test operand types.
-|.macro checknum, reg; evcmpltu reg, TISNUM; .endmacro
-|.macro checkstr, reg; evcmpeq reg, TISSTR; .endmacro
-|.macro checktab, reg; evcmpeq reg, TISTAB; .endmacro
-|.macro checkfunc, reg; evcmpeq reg, TISFUNC; .endmacro
-|.macro checknil, reg; evcmpeq reg, TISNIL; .endmacro
-|.macro checkok, label; blt label; .endmacro
-|.macro checkfail, label; bge label; .endmacro
-|.macro checkanyfail, label; bns label; .endmacro
-|.macro checkallok, label; bso label; .endmacro
-|
-|.macro branch_RD
-| srwi TMP0, RD, 1
-| add PC, PC, TMP0
-| addis PC, PC, -(BCBIAS_J*4 >> 16)
-|.endmacro
-|
-|// Assumes DISPATCH is relative to GL.
-#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
-#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
-|
-#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
-|
-|.macro hotloop
-| NYI
-|.endmacro
-|
-|.macro hotcall
-| NYI
-|.endmacro
-|
-|// Set current VM state. Uses TMP0.
-|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
-|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
-|
-|// Move table write barrier back. Overwrites mark and tmp.
-|.macro barrierback, tab, mark, tmp
-| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
-| // Assumes LJ_GC_BLACK is 0x04.
-| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
-| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
-| stb mark, tab->marked
-| stw tmp, tab->gclist
-|.endmacro
-|
-|//-----------------------------------------------------------------------
-
-/* Generate subroutines used by opcodes and other parts of the VM. */
-/* The .code_sub section should be last to help static branch prediction. */
-static void build_subroutines(BuildCtx *ctx)
-{
- |.code_sub
- |
- |//-----------------------------------------------------------------------
- |//-- Return handling ----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_returnp:
- | // See vm_return. Also: TMP2 = previous base.
- | andi. TMP0, PC, FRAME_P
- | evsplati TMP1, LJ_TTRUE
- | beq ->cont_dispatch
- |
- | // Return from pcall or xpcall fast func.
- | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
- | mr BASE, TMP2 // Restore caller base.
- | // Prepending may overwrite the pcall frame, so do it at the end.
- | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
- |
- |->vm_returnc:
- | addi RD, RD, 8 // RD = (nresults+1)*8.
- | andi. TMP0, PC, FRAME_TYPE
- | cmpwi cr1, RD, 0
- | li CRET1, LUA_YIELD
- | beq cr1, ->vm_unwind_c_eh
- | mr MULTRES, RD
- | beq ->BC_RET_Z // Handle regular return to Lua.
- |
- |->vm_return:
- | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
- | // TMP0 = PC & FRAME_TYPE
- | cmpwi TMP0, FRAME_C
- | rlwinm TMP2, PC, 0, 0, 28
- | li_vmstate C
- | sub TMP2, BASE, TMP2 // TMP2 = previous base.
- | bne ->vm_returnp
- |
- | addic. TMP1, RD, -8
- | stw TMP2, L->base
- | lwz TMP2, SAVE_NRES
- | subi BASE, BASE, 8
- | st_vmstate
- | slwi TMP2, TMP2, 3
- | beq >2
- |1:
- | addic. TMP1, TMP1, -8
- | evldd TMP0, 0(RA)
- | addi RA, RA, 8
- | evstdd TMP0, 0(BASE)
- | addi BASE, BASE, 8
- | bne <1
- |
- |2:
- | cmpw TMP2, RD // More/less results wanted?
- | bne >6
- |3:
- | stw BASE, L->top // Store new top.
- |
- |->vm_leave_cp:
- | lwz TMP0, SAVE_CFRAME // Restore previous C frame.
- | li CRET1, 0 // Ok return status for vm_pcall.
- | stw TMP0, L->cframe
- |
- |->vm_leave_unw:
- | restoreregs
- | blr
- |
- |6:
- | ble >7 // Less results wanted?
- | // More results wanted. Check stack size and fill up results with nil.
- | lwz TMP1, L->maxstack
- | cmplw BASE, TMP1
- | bge >8
- | evstdd TISNIL, 0(BASE)
- | addi RD, RD, 8
- | addi BASE, BASE, 8
- | b <2
- |
- |7: // Less results wanted.
- | sub TMP0, RD, TMP2
- | cmpwi TMP2, 0 // LUA_MULTRET+1 case?
- | sub TMP0, BASE, TMP0 // Subtract the difference.
- | iseleq BASE, BASE, TMP0 // Either keep top or shrink it.
- | b <3
- |
- |8: // Corner case: need to grow stack for filling up results.
- | // This can happen if:
- | // - A C function grows the stack (a lot).
- | // - The GC shrinks the stack in between.
- | // - A return back from a lua_call() with (high) nresults adjustment.
- | stw BASE, L->top // Save current top held in BASE (yes).
- | mr SAVE0, RD
- | mr CARG2, TMP2
- | mr CARG1, L
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lwz TMP2, SAVE_NRES
- | mr RD, SAVE0
- | slwi TMP2, TMP2, 3
- | lwz BASE, L->top // Need the (realloced) L->top in BASE.
- | b <2
- |
- |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
- | // (void *cframe, int errcode)
- | mr sp, CARG1
- | mr CRET1, CARG2
- |->vm_unwind_c_eh: // Landing pad for external unwinder.
- | lwz L, SAVE_L
- | li TMP0, ~LJ_VMST_C
- | lwz GL:TMP1, L->glref
- | stw TMP0, GL:TMP1->vmstate
- | b ->vm_leave_unw
- |
- |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
- | // (void *cframe)
- | rlwinm sp, CARG1, 0, 0, 29
- |->vm_unwind_ff_eh: // Landing pad for external unwinder.
- | lwz L, SAVE_L
- | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
- | evsplati TISFUNC, LJ_TFUNC
- | lus TOBIT, 0x4338
- | evsplati TISTAB, LJ_TTAB
- | li TMP0, 0
- | lwz BASE, L->base
- | evmergelo TOBIT, TOBIT, TMP0
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | evsplati TISSTR, LJ_TSTR
- | li TMP1, LJ_TFALSE
- | evsplati TISNIL, LJ_TNIL
- | li_vmstate INTERP
- | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
- | la RA, -8(BASE) // Results start at BASE-8.
- | addi DISPATCH, DISPATCH, GG_G2DISP
- | stw TMP1, 0(RA) // Prepend false to error message.
- | li RD, 16 // 2 results: false + error message.
- | st_vmstate
- | b ->vm_returnc
- |
- |//-----------------------------------------------------------------------
- |//-- Grow stack for calls -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_growstack_c: // Grow stack for C function.
- | li CARG2, LUA_MINSTACK
- | b >2
- |
- |->vm_growstack_l: // Grow stack for Lua function.
- | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
- | add RC, BASE, RC
- | sub RA, RA, BASE
- | stw BASE, L->base
- | addi PC, PC, 4 // Must point after first instruction.
- | stw RC, L->top
- | srwi CARG2, RA, 3
- |2:
- | // L->base = new base, L->top = top
- | stw PC, SAVE_PC
- | mr CARG1, L
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lwz BASE, L->base
- | lwz RC, L->top
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | sub RC, RC, BASE
- | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
- | ins_callt // Just retry the call.
- |
- |//-----------------------------------------------------------------------
- |//-- Entry points into the assembler VM ---------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_resume: // Setup C frame and resume thread.
- | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
- | saveregs
- | mr L, CARG1
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | mr BASE, CARG2
- | lbz TMP1, L->status
- | stw L, SAVE_L
- | li PC, FRAME_CP
- | addi TMP0, sp, CFRAME_RESUME
- | addi DISPATCH, DISPATCH, GG_G2DISP
- | stw CARG3, SAVE_NRES
- | cmplwi TMP1, 0
- | stw CARG3, SAVE_ERRF
- | stw TMP0, L->cframe
- | stw CARG3, SAVE_CFRAME
- | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | beq >3
- |
- | // Resume after yield (like a return).
- | mr RA, BASE
- | lwz BASE, L->base
- | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
- | lwz TMP1, L->top
- | evsplati TISFUNC, LJ_TFUNC
- | lus TOBIT, 0x4338
- | evsplati TISTAB, LJ_TTAB
- | lwz PC, FRAME_PC(BASE)
- | li TMP2, 0
- | evsplati TISSTR, LJ_TSTR
- | sub RD, TMP1, BASE
- | evmergelo TOBIT, TOBIT, TMP2
- | stb CARG3, L->status
- | andi. TMP0, PC, FRAME_TYPE
- | li_vmstate INTERP
- | addi RD, RD, 8
- | evsplati TISNIL, LJ_TNIL
- | mr MULTRES, RD
- | st_vmstate
- | beq ->BC_RET_Z
- | b ->vm_return
- |
- |->vm_pcall: // Setup protected C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
- | saveregs
- | li PC, FRAME_CP
- | stw CARG4, SAVE_ERRF
- | b >1
- |
- |->vm_call: // Setup C frame and enter VM.
- | // (lua_State *L, TValue *base, int nres1)
- | saveregs
- | li PC, FRAME_C
- |
- |1: // Entry point for vm_pcall above (PC = ftype).
- | lwz TMP1, L:CARG1->cframe
- | stw CARG3, SAVE_NRES
- | mr L, CARG1
- | stw CARG1, SAVE_L
- | mr BASE, CARG2
- | stw sp, L->cframe // Add our C frame to cframe chain.
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | stw TMP1, SAVE_CFRAME
- | addi DISPATCH, DISPATCH, GG_G2DISP
- |
- |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
- | lwz TMP2, L->base // TMP2 = old base (used in vmeta_call).
- | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
- | lwz TMP1, L->top
- | evsplati TISFUNC, LJ_TFUNC
- | add PC, PC, BASE
- | evsplati TISTAB, LJ_TTAB
- | lus TOBIT, 0x4338
- | li TMP0, 0
- | sub PC, PC, TMP2 // PC = frame delta + frame type
- | evsplati TISSTR, LJ_TSTR
- | sub NARGS8:RC, TMP1, BASE
- | evmergelo TOBIT, TOBIT, TMP0
- | li_vmstate INTERP
- | evsplati TISNIL, LJ_TNIL
- | st_vmstate
- |
- |->vm_call_dispatch:
- | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
- | li TMP0, -8
- | evlddx LFUNC:RB, BASE, TMP0
- | checkfunc LFUNC:RB
- | checkfail ->vmeta_call
- |
- |->vm_call_dispatch_f:
- | ins_call
- | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
- |
- |->vm_cpcall: // Setup protected C frame, call C.
- | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
- | saveregs
- | mr L, CARG1
- | lwz TMP0, L:CARG1->stack
- | stw CARG1, SAVE_L
- | lwz TMP1, L->top
- | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
- | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
- | lwz TMP1, L->cframe
- | stw sp, L->cframe // Add our C frame to cframe chain.
- | li TMP2, 0
- | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
- | stw TMP2, SAVE_ERRF // No error function.
- | stw TMP1, SAVE_CFRAME
- | mtctr CARG4
- | bctrl // (lua_State *L, lua_CFunction func, void *ud)
- | mr. BASE, CRET1
- | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
- | li PC, FRAME_CP
- | addi DISPATCH, DISPATCH, GG_G2DISP
- | bne <3 // Else continue with the call.
- | b ->vm_leave_cp // No base? Just remove C frame.
- |
- |//-----------------------------------------------------------------------
- |//-- Metamethod handling ------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
- |// stack, so BASE doesn't need to be reloaded across these calls.
- |
- |//-- Continuation dispatch ----------------------------------------------
- |
- |->cont_dispatch:
- | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
- | lwz TMP0, -12(BASE) // Continuation.
- | mr RB, BASE
- | mr BASE, TMP2 // Restore caller BASE.
- | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
- | cmplwi TMP0, 0
- | lwz PC, -16(RB) // Restore PC from [cont|PC].
- | beq >1
- | subi TMP2, RD, 8
- | lwz TMP1, LFUNC:TMP1->pc
- | evstddx TISNIL, RA, TMP2 // Ensure one valid arg.
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | // BASE = base, RA = resultptr, RB = meta base
- | mtctr TMP0
- | bctr // Jump to continuation.
- |
- |1: // Tail call from C function.
- | subi TMP1, RB, 16
- | sub RC, TMP1, BASE
- | b ->vm_call_tail
- |
- |->cont_cat: // RA = resultptr, RB = meta base
- | lwz INS, -4(PC)
- | subi CARG2, RB, 16
- | decode_RB8 SAVE0, INS
- | evldd TMP0, 0(RA)
- | add TMP1, BASE, SAVE0
- | stw BASE, L->base
- | cmplw TMP1, CARG2
- | sub CARG3, CARG2, TMP1
- | decode_RA8 RA, INS
- | evstdd TMP0, 0(CARG2)
- | bne ->BC_CAT_Z
- | evstddx TMP0, BASE, RA
- | b ->cont_nop
- |
- |//-- Table indexing metamethods -----------------------------------------
- |
- |->vmeta_tgets1:
- | evmergelo STR:RC, TISSTR, STR:RC
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | decode_RB8 RB, INS
- | evstdd STR:RC, 0(CARG3)
- | add CARG2, BASE, RB
- | b >1
- |
- |->vmeta_tgets:
- | evmergelo TAB:RB, TISTAB, TAB:RB
- | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
- | evmergelo STR:RC, TISSTR, STR:RC
- | evstdd TAB:RB, 0(CARG2)
- | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
- | evstdd STR:RC, 0(CARG3)
- | b >1
- |
- |->vmeta_tgetb: // TMP0 = index
- | efdcfsi TMP0, TMP0
- | decode_RB8 RB, INS
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | add CARG2, BASE, RB
- | evstdd TMP0, 0(CARG3)
- | b >1
- |
- |->vmeta_tgetv:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG2, BASE, RB
- | add CARG3, BASE, RC
- |1:
- | stw BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
- | // Returns TValue * (finished) or NULL (metamethod).
- | cmplwi CRET1, 0
- | beq >3
- | evldd TMP0, 0(CRET1)
- | evstddx TMP0, BASE, RA
- | ins_next
- |
- |3: // Call __index metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k
- | subfic TMP1, BASE, FRAME_CONT
- | lwz BASE, L->top
- | stw PC, -16(BASE) // [cont|PC]
- | add PC, TMP1, BASE
- | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | li NARGS8:RC, 16 // 2 args for func(t, k).
- | b ->vm_call_dispatch_f
- |
- |//-----------------------------------------------------------------------
- |
- |->vmeta_tsets1:
- | evmergelo STR:RC, TISSTR, STR:RC
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | decode_RB8 RB, INS
- | evstdd STR:RC, 0(CARG3)
- | add CARG2, BASE, RB
- | b >1
- |
- |->vmeta_tsets:
- | evmergelo TAB:RB, TISTAB, TAB:RB
- | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
- | evmergelo STR:RC, TISSTR, STR:RC
- | evstdd TAB:RB, 0(CARG2)
- | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
- | evstdd STR:RC, 0(CARG3)
- | b >1
- |
- |->vmeta_tsetb: // TMP0 = index
- | efdcfsi TMP0, TMP0
- | decode_RB8 RB, INS
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | add CARG2, BASE, RB
- | evstdd TMP0, 0(CARG3)
- | b >1
- |
- |->vmeta_tsetv:
- | decode_RB8 RB, INS
- | decode_RC8 RC, INS
- | add CARG2, BASE, RB
- | add CARG3, BASE, RC
- |1:
- | stw BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
- | // Returns TValue * (finished) or NULL (metamethod).
- | cmplwi CRET1, 0
- | evlddx TMP0, BASE, RA
- | beq >3
- | // NOBARRIER: lj_meta_tset ensures the table is not black.
- | evstdd TMP0, 0(CRET1)
- | ins_next
- |
- |3: // Call __newindex metamethod.
- | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
- | subfic TMP1, BASE, FRAME_CONT
- | lwz BASE, L->top
- | stw PC, -16(BASE) // [cont|PC]
- | add PC, TMP1, BASE
- | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | li NARGS8:RC, 24 // 3 args for func(t, k, v)
- | evstdd TMP0, 16(BASE) // Copy value to third argument.
- | b ->vm_call_dispatch_f
- |
- |//-- Comparison metamethods ---------------------------------------------
- |
- |->vmeta_comp:
- | mr CARG1, L
- | subi PC, PC, 4
- | add CARG2, BASE, RA
- | stw PC, SAVE_PC
- | add CARG3, BASE, RD
- | stw BASE, L->base
- | decode_OP1 CARG4, INS
- | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
- | // Returns 0/1 or TValue * (metamethod).
- |3:
- | cmplwi CRET1, 1
- | bgt ->vmeta_binop
- |4:
- | lwz INS, 0(PC)
- | addi PC, PC, 4
- | decode_RD4 TMP2, INS
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | add TMP2, TMP2, TMP3
- | isellt PC, PC, TMP2
- |->cont_nop:
- | ins_next
- |
- |->cont_ra: // RA = resultptr
- | lwz INS, -4(PC)
- | evldd TMP0, 0(RA)
- | decode_RA8 TMP1, INS
- | evstddx TMP0, BASE, TMP1
- | b ->cont_nop
- |
- |->cont_condt: // RA = resultptr
- | lwz TMP0, 0(RA)
- | li TMP1, LJ_TTRUE
- | cmplw TMP1, TMP0 // Branch if result is true.
- | b <4
- |
- |->cont_condf: // RA = resultptr
- | lwz TMP0, 0(RA)
- | li TMP1, LJ_TFALSE
- | cmplw TMP0, TMP1 // Branch if result is false.
- | b <4
- |
- |->vmeta_equal:
- | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
- | subi PC, PC, 4
- | stw BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
- | // Returns 0/1 or TValue * (metamethod).
- | b <3
- |
- |//-- Arithmetic metamethods ---------------------------------------------
- |
- |->vmeta_arith_vn:
- | add CARG3, BASE, RB
- | add CARG4, KBASE, RC
- | b >1
- |
- |->vmeta_arith_nv:
- | add CARG3, KBASE, RC
- | add CARG4, BASE, RB
- | b >1
- |
- |->vmeta_unm:
- | add CARG3, BASE, RD
- | mr CARG4, CARG3
- | b >1
- |
- |->vmeta_arith_vv:
- | add CARG3, BASE, RB
- | add CARG4, BASE, RC
- |1:
- | add CARG2, BASE, RA
- | stw BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
- | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
- | // Returns NULL (finished) or TValue * (metamethod).
- | cmplwi CRET1, 0
- | beq ->cont_nop
- |
- | // Call metamethod for binary op.
- |->vmeta_binop:
- | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
- | sub TMP1, CRET1, BASE
- | stw PC, -16(CRET1) // [cont|PC]
- | mr TMP2, BASE
- | addi PC, TMP1, FRAME_CONT
- | mr BASE, CRET1
- | li NARGS8:RC, 16 // 2 args for func(o1, o2).
- | b ->vm_call_dispatch
- |
- |->vmeta_len:
-#if LJ_52
- | mr SAVE0, CARG1
-#endif
- | add CARG2, BASE, RD
- | stw BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | bl extern lj_meta_len // (lua_State *L, TValue *o)
- | // Returns NULL (retry) or TValue * (metamethod base).
-#if LJ_52
- | cmplwi CRET1, 0
- | bne ->vmeta_binop // Binop call for compatibility.
- | mr CARG1, SAVE0
- | b ->BC_LEN_Z
-#else
- | b ->vmeta_binop // Binop call for compatibility.
-#endif
- |
- |//-- Call metamethod ----------------------------------------------------
- |
- |->vmeta_call: // Resolve and call __call metamethod.
- | // TMP2 = old base, BASE = new base, RC = nargs*8
- | mr CARG1, L
- | stw TMP2, L->base // This is the callers base!
- | subi CARG2, BASE, 8
- | stw PC, SAVE_PC
- | add CARG3, BASE, RC
- | mr SAVE0, NARGS8:RC
- | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
- | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
- | ins_call
- |
- |->vmeta_callt: // Resolve __call for BC_CALLT.
- | // BASE = old base, RA = new base, RC = nargs*8
- | mr CARG1, L
- | stw BASE, L->base
- | subi CARG2, RA, 8
- | stw PC, SAVE_PC
- | add CARG3, RA, RC
- | mr SAVE0, NARGS8:RC
- | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
- | lwz TMP1, FRAME_PC(BASE)
- | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
- | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
- | b ->BC_CALLT_Z
- |
- |//-- Argument coercion for 'for' statement ------------------------------
- |
- |->vmeta_for:
- | mr CARG1, L
- | stw BASE, L->base
- | mr CARG2, RA
- | stw PC, SAVE_PC
- | mr SAVE0, INS
- | bl extern lj_meta_for // (lua_State *L, TValue *base)
- |.if JIT
- | decode_OP1 TMP0, SAVE0
- |.endif
- | decode_RA8 RA, SAVE0
- |.if JIT
- | cmpwi TMP0, BC_JFORI
- |.endif
- | decode_RD8 RD, SAVE0
- |.if JIT
- | beq =>BC_JFORI
- |.endif
- | b =>BC_FORI
- |
- |//-----------------------------------------------------------------------
- |//-- Fast functions -----------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |.macro .ffunc, name
- |->ff_ .. name:
- |.endmacro
- |
- |.macro .ffunc_1, name
- |->ff_ .. name:
- | cmplwi NARGS8:RC, 8
- | evldd CARG1, 0(BASE)
- | blt ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_2, name
- |->ff_ .. name:
- | cmplwi NARGS8:RC, 16
- | evldd CARG1, 0(BASE)
- | evldd CARG2, 8(BASE)
- | blt ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_n, name
- | .ffunc_1 name
- | checknum CARG1
- | checkfail ->fff_fallback
- |.endmacro
- |
- |.macro .ffunc_nn, name
- | .ffunc_2 name
- | evmergehi TMP0, CARG1, CARG2
- | checknum TMP0
- | checkanyfail ->fff_fallback
- |.endmacro
- |
- |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
- |.macro ffgccheck
- | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
- | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
- | cmplw TMP0, TMP1
- | bgel ->fff_gcstep
- |.endmacro
- |
- |//-- Base library: checks -----------------------------------------------
- |
- |.ffunc assert
- | cmplwi NARGS8:RC, 8
- | evldd TMP0, 0(BASE)
- | blt ->fff_fallback
- | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
- | la RA, -8(BASE)
- | evcmpltu cr1, TMP0, TMP1
- | lwz PC, FRAME_PC(BASE)
- | bge cr1, ->fff_fallback
- | evstdd TMP0, 0(RA)
- | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
- | beq ->fff_res // Done if exactly 1 argument.
- | li TMP1, 8
- | subi RC, RC, 8
- |1:
- | cmplw TMP1, RC
- | evlddx TMP0, BASE, TMP1
- | evstddx TMP0, RA, TMP1
- | addi TMP1, TMP1, 8
- | bne <1
- | b ->fff_res
- |
- |.ffunc type
- | cmplwi NARGS8:RC, 8
- | lwz CARG1, 0(BASE)
- | blt ->fff_fallback
- | li TMP2, ~LJ_TNUMX
- | cmplw CARG1, TISNUM
- | not TMP1, CARG1
- | isellt TMP1, TMP2, TMP1
- | slwi TMP1, TMP1, 3
- | la TMP2, CFUNC:RB->upvalue
- | evlddx STR:CRET1, TMP2, TMP1
- | b ->fff_restv
- |
- |//-- Base library: getters and setters ---------------------------------
- |
- |.ffunc_1 getmetatable
- | checktab CARG1
- | evmergehi TMP1, CARG1, CARG1
- | checkfail >6
- |1: // Field metatable must be at same offset for GCtab and GCudata!
- | lwz TAB:RB, TAB:CARG1->metatable
- |2:
- | evmr CRET1, TISNIL
- | cmplwi TAB:RB, 0
- | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
- | beq ->fff_restv
- | lwz TMP0, TAB:RB->hmask
- | evmergelo CRET1, TISTAB, TAB:RB // Use metatable as default result.
- | lwz TMP1, STR:RC->hash
- | lwz NODE:TMP2, TAB:RB->node
- | evmergelo STR:RC, TISSTR, STR:RC
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | slwi TMP0, TMP1, 5
- | slwi TMP1, TMP1, 3
- | sub TMP1, TMP0, TMP1
- | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |3: // Rearranged logic, because we expect _not_ to find the key.
- | evldd TMP0, NODE:TMP2->key
- | evldd TMP1, NODE:TMP2->val
- | evcmpeq TMP0, STR:RC
- | lwz NODE:TMP2, NODE:TMP2->next
- | checkallok >5
- | cmplwi NODE:TMP2, 0
- | beq ->fff_restv // Not found, keep default result.
- | b <3
- |5:
- | checknil TMP1
- | checkok ->fff_restv // Ditto for nil value.
- | evmr CRET1, TMP1 // Return value of mt.__metatable.
- | b ->fff_restv
- |
- |6:
- | cmpwi TMP1, LJ_TUDATA
- | not TMP1, TMP1
- | beq <1
- | checknum CARG1
- | slwi TMP1, TMP1, 2
- | li TMP2, 4*~LJ_TNUMX
- | isellt TMP1, TMP2, TMP1
- | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
- | lwzx TAB:RB, TMP2, TMP1
- | b <2
- |
- |.ffunc_2 setmetatable
- | // Fast path: no mt for table yet and not clearing the mt.
- | evmergehi TMP0, TAB:CARG1, TAB:CARG2
- | checktab TMP0
- | checkanyfail ->fff_fallback
- | lwz TAB:TMP1, TAB:CARG1->metatable
- | cmplwi TAB:TMP1, 0
- | lbz TMP3, TAB:CARG1->marked
- | bne ->fff_fallback
- | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- | stw TAB:CARG2, TAB:CARG1->metatable
- | beq ->fff_restv
- | barrierback TAB:CARG1, TMP3, TMP0
- | b ->fff_restv
- |
- |.ffunc rawget
- | cmplwi NARGS8:RC, 16
- | evldd CARG2, 0(BASE)
- | blt ->fff_fallback
- | checktab CARG2
- | la CARG3, 8(BASE)
- | checkfail ->fff_fallback
- | mr CARG1, L
- | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
- | // Returns cTValue *.
- | evldd CRET1, 0(CRET1)
- | b ->fff_restv
- |
- |//-- Base library: conversions ------------------------------------------
- |
- |.ffunc tonumber
- | // Only handles the number case inline (without a base argument).
- | cmplwi NARGS8:RC, 8
- | evldd CARG1, 0(BASE)
- | bne ->fff_fallback // Exactly one argument.
- | checknum CARG1
- | checkok ->fff_restv
- | b ->fff_fallback
- |
- |.ffunc_1 tostring
- | // Only handles the string or number case inline.
- | checkstr CARG1
- | // A __tostring method in the string base metatable is ignored.
- | checkok ->fff_restv // String key?
- | // Handle numbers inline, unless a number base metatable is present.
- | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
- | checknum CARG1
- | cmplwi cr1, TMP0, 0
- | stw BASE, L->base // Add frame since C call can throw.
- | crand 4*cr0+eq, 4*cr0+lt, 4*cr1+eq
- | stw PC, SAVE_PC // Redundant (but a defined value).
- | bne ->fff_fallback
- | ffgccheck
- | mr CARG1, L
- | mr CARG2, BASE
- | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
- | // Returns GCstr *.
- | evmergelo STR:CRET1, TISSTR, STR:CRET1
- | b ->fff_restv
- |
- |//-- Base library: iterators -------------------------------------------
- |
- |.ffunc next
- | cmplwi NARGS8:RC, 8
- | evldd CARG2, 0(BASE)
- | blt ->fff_fallback
- | evstddx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
- | checktab TAB:CARG2
- | lwz PC, FRAME_PC(BASE)
- | checkfail ->fff_fallback
- | stw BASE, L->base // Add frame since C call can throw.
- | mr CARG1, L
- | stw BASE, L->top // Dummy frame length is ok.
- | la CARG3, 8(BASE)
- | stw PC, SAVE_PC
- | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
- | // Returns 0 at end of traversal.
- | cmplwi CRET1, 0
- | evmr CRET1, TISNIL
- | beq ->fff_restv // End of traversal: return nil.
- | evldd TMP0, 8(BASE) // Copy key and value to results.
- | la RA, -8(BASE)
- | evldd TMP1, 16(BASE)
- | evstdd TMP0, 0(RA)
- | li RD, (2+1)*8
- | evstdd TMP1, 8(RA)
- | b ->fff_res
- |
- |.ffunc_1 pairs
- | checktab TAB:CARG1
- | lwz PC, FRAME_PC(BASE)
- | checkfail ->fff_fallback
-#if LJ_52
- | lwz TAB:TMP2, TAB:CARG1->metatable
- | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
- | cmplwi TAB:TMP2, 0
- | la RA, -8(BASE)
- | bne ->fff_fallback
-#else
- | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
- | la RA, -8(BASE)
-#endif
- | evstdd TISNIL, 8(BASE)
- | li RD, (3+1)*8
- | evstdd CFUNC:TMP0, 0(RA)
- | b ->fff_res
- |
- |.ffunc_2 ipairs_aux
- | checktab TAB:CARG1
- | lwz PC, FRAME_PC(BASE)
- | checkfail ->fff_fallback
- | checknum CARG2
- | lus TMP3, 0x3ff0
- | checkfail ->fff_fallback
- | efdctsi TMP2, CARG2
- | lwz TMP0, TAB:CARG1->asize
- | evmergelo TMP3, TMP3, ZERO
- | lwz TMP1, TAB:CARG1->array
- | efdadd CARG2, CARG2, TMP3
- | addi TMP2, TMP2, 1
- | la RA, -8(BASE)
- | cmplw TMP0, TMP2
- | slwi TMP3, TMP2, 3
- | evstdd CARG2, 0(RA)
- | ble >2 // Not in array part?
- | evlddx TMP1, TMP1, TMP3
- |1:
- | checknil TMP1
- | li RD, (0+1)*8
- | checkok ->fff_res // End of iteration, return 0 results.
- | li RD, (2+1)*8
- | evstdd TMP1, 8(RA)
- | b ->fff_res
- |2: // Check for empty hash part first. Otherwise call C function.
- | lwz TMP0, TAB:CARG1->hmask
- | cmplwi TMP0, 0
- | li RD, (0+1)*8
- | beq ->fff_res
- | mr CARG2, TMP2
- | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
- | // Returns cTValue * or NULL.
- | cmplwi CRET1, 0
- | li RD, (0+1)*8
- | beq ->fff_res
- | evldd TMP1, 0(CRET1)
- | b <1
- |
- |.ffunc_1 ipairs
- | checktab TAB:CARG1
- | lwz PC, FRAME_PC(BASE)
- | checkfail ->fff_fallback
-#if LJ_52
- | lwz TAB:TMP2, TAB:CARG1->metatable
- | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
- | cmplwi TAB:TMP2, 0
- | la RA, -8(BASE)
- | bne ->fff_fallback
-#else
- | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
- | la RA, -8(BASE)
-#endif
- | evsplati TMP1, 0
- | li RD, (3+1)*8
- | evstdd TMP1, 8(BASE)
- | evstdd CFUNC:TMP0, 0(RA)
- | b ->fff_res
- |
- |//-- Base library: catch errors ----------------------------------------
- |
- |.ffunc pcall
- | cmplwi NARGS8:RC, 8
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | blt ->fff_fallback
- | mr TMP2, BASE
- | la BASE, 8(BASE)
- | // Remember active hook before pcall.
- | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
- | subi NARGS8:RC, NARGS8:RC, 8
- | addi PC, TMP3, 8+FRAME_PCALL
- | b ->vm_call_dispatch
- |
- |.ffunc_2 xpcall
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | mr TMP2, BASE
- | checkfunc CARG2 // Traceback must be a function.
- | checkfail ->fff_fallback
- | la BASE, 16(BASE)
- | // Remember active hook before pcall.
- | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
- | evstdd CARG2, 0(TMP2) // Swap function and traceback.
- | subi NARGS8:RC, NARGS8:RC, 16
- | evstdd CARG1, 8(TMP2)
- | addi PC, TMP3, 16+FRAME_PCALL
- | b ->vm_call_dispatch
- |
- |//-- Coroutine library --------------------------------------------------
- |
- |.macro coroutine_resume_wrap, resume
- |.if resume
- |.ffunc_1 coroutine_resume
- | evmergehi TMP0, L:CARG1, L:CARG1
- |.else
- |.ffunc coroutine_wrap_aux
- | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
- |.endif
- |.if resume
- | cmpwi TMP0, LJ_TTHREAD
- | bne ->fff_fallback
- |.endif
- | lbz TMP0, L:CARG1->status
- | lwz TMP1, L:CARG1->cframe
- | lwz CARG2, L:CARG1->top
- | cmplwi cr0, TMP0, LUA_YIELD
- | lwz TMP2, L:CARG1->base
- | cmplwi cr1, TMP1, 0
- | lwz TMP0, L:CARG1->maxstack
- | cmplw cr7, CARG2, TMP2
- | lwz PC, FRAME_PC(BASE)
- | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
- | add TMP2, CARG2, NARGS8:RC
- | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
- | cmplw cr1, TMP2, TMP0
- | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
- | stw PC, SAVE_PC
- | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
- | stw BASE, L->base
- | blt cr6, ->fff_fallback
- |1:
- |.if resume
- | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
- | subi NARGS8:RC, NARGS8:RC, 8
- | subi TMP2, TMP2, 8
- |.endif
- | stw TMP2, L:CARG1->top
- | li TMP1, 0
- | stw BASE, L->top
- |2: // Move args to coroutine.
- | cmpw TMP1, NARGS8:RC
- | evlddx TMP0, BASE, TMP1
- | beq >3
- | evstddx TMP0, CARG2, TMP1
- | addi TMP1, TMP1, 8
- | b <2
- |3:
- | li CARG3, 0
- | mr L:SAVE0, L:CARG1
- | li CARG4, 0
- | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
- | // Returns thread status.
- |4:
- | lwz TMP2, L:SAVE0->base
- | cmplwi CRET1, LUA_YIELD
- | lwz TMP3, L:SAVE0->top
- | li_vmstate INTERP
- | lwz BASE, L->base
- | st_vmstate
- | bgt >8
- | sub RD, TMP3, TMP2
- | lwz TMP0, L->maxstack
- | cmplwi RD, 0
- | add TMP1, BASE, RD
- | beq >6 // No results?
- | cmplw TMP1, TMP0
- | li TMP1, 0
- | bgt >9 // Need to grow stack?
- |
- | subi TMP3, RD, 8
- | stw TMP2, L:SAVE0->top // Clear coroutine stack.
- |5: // Move results from coroutine.
- | cmplw TMP1, TMP3
- | evlddx TMP0, TMP2, TMP1
- | evstddx TMP0, BASE, TMP1
- | addi TMP1, TMP1, 8
- | bne <5
- |6:
- | andi. TMP0, PC, FRAME_TYPE
- |.if resume
- | li TMP1, LJ_TTRUE
- | la RA, -8(BASE)
- | stw TMP1, -8(BASE) // Prepend true to results.
- | addi RD, RD, 16
- |.else
- | mr RA, BASE
- | addi RD, RD, 8
- |.endif
- |7:
- | stw PC, SAVE_PC
- | mr MULTRES, RD
- | beq ->BC_RET_Z
- | b ->vm_return
- |
- |8: // Coroutine returned with error (at co->top-1).
- |.if resume
- | andi. TMP0, PC, FRAME_TYPE
- | la TMP3, -8(TMP3)
- | li TMP1, LJ_TFALSE
- | evldd TMP0, 0(TMP3)
- | stw TMP3, L:SAVE0->top // Remove error from coroutine stack.
- | li RD, (2+1)*8
- | stw TMP1, -8(BASE) // Prepend false to results.
- | la RA, -8(BASE)
- | evstdd TMP0, 0(BASE) // Copy error message.
- | b <7
- |.else
- | mr CARG1, L
- | mr CARG2, L:SAVE0
- | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
- |.endif
- |
- |9: // Handle stack expansion on return from yield.
- | mr CARG1, L
- | srwi CARG2, RD, 3
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | li CRET1, 0
- | b <4
- |.endmacro
- |
- | coroutine_resume_wrap 1 // coroutine.resume
- | coroutine_resume_wrap 0 // coroutine.wrap
- |
- |.ffunc coroutine_yield
- | lwz TMP0, L->cframe
- | add TMP1, BASE, NARGS8:RC
- | stw BASE, L->base
- | andi. TMP0, TMP0, CFRAME_RESUME
- | stw TMP1, L->top
- | li CRET1, LUA_YIELD
- | beq ->fff_fallback
- | stw ZERO, L->cframe
- | stb CRET1, L->status
- | b ->vm_leave_unw
- |
- |//-- Math library -------------------------------------------------------
- |
- |.ffunc_n math_abs
- | efdabs CRET1, CARG1
- | // Fallthrough.
- |
- |->fff_restv:
- | // CRET1 = TValue result.
- | lwz PC, FRAME_PC(BASE)
- | la RA, -8(BASE)
- | evstdd CRET1, 0(RA)
- |->fff_res1:
- | // RA = results, PC = return.
- | li RD, (1+1)*8
- |->fff_res:
- | // RA = results, RD = (nresults+1)*8, PC = return.
- | andi. TMP0, PC, FRAME_TYPE
- | mr MULTRES, RD
- | bne ->vm_return
- | lwz INS, -4(PC)
- | decode_RB8 RB, INS
- |5:
- | cmplw RB, RD // More results expected?
- | decode_RA8 TMP0, INS
- | bgt >6
- | ins_next1
- | // Adjust BASE. KBASE is assumed to be set for the calling frame.
- | sub BASE, RA, TMP0
- | ins_next2
- |
- |6: // Fill up results with nil.
- | subi TMP1, RD, 8
- | addi RD, RD, 8
- | evstddx TISNIL, RA, TMP1
- | b <5
- |
- |.macro math_extern, func
- | .ffunc math_ .. func
- | cmplwi NARGS8:RC, 8
- | evldd CARG2, 0(BASE)
- | blt ->fff_fallback
- | checknum CARG2
- | evmergehi CARG1, CARG2, CARG2
- | checkfail ->fff_fallback
- | bl extern func@plt
- | evmergelo CRET1, CRET1, CRET2
- | b ->fff_restv
- |.endmacro
- |
- |.macro math_extern2, func
- | .ffunc math_ .. func
- | cmplwi NARGS8:RC, 16
- | evldd CARG2, 0(BASE)
- | evldd CARG4, 8(BASE)
- | blt ->fff_fallback
- | evmergehi CARG1, CARG4, CARG2
- | checknum CARG1
- | evmergehi CARG3, CARG4, CARG4
- | checkanyfail ->fff_fallback
- | bl extern func@plt
- | evmergelo CRET1, CRET1, CRET2
- | b ->fff_restv
- |.endmacro
- |
- |.macro math_round, func
- | .ffunc math_ .. func
- | cmplwi NARGS8:RC, 8
- | evldd CARG2, 0(BASE)
- | blt ->fff_fallback
- | checknum CARG2
- | evmergehi CARG1, CARG2, CARG2
- | checkfail ->fff_fallback
- | lwz PC, FRAME_PC(BASE)
- | bl ->vm_..func.._hilo;
- | la RA, -8(BASE)
- | evstdd CRET2, 0(RA)
- | b ->fff_res1
- |.endmacro
- |
- | math_round floor
- | math_round ceil
- |
- | math_extern sqrt
- |
- |.ffunc math_log
- | cmplwi NARGS8:RC, 8
- | evldd CARG2, 0(BASE)
- | bne ->fff_fallback // Need exactly 1 argument.
- | checknum CARG2
- | evmergehi CARG1, CARG2, CARG2
- | checkfail ->fff_fallback
- | bl extern log@plt
- | evmergelo CRET1, CRET1, CRET2
- | b ->fff_restv
- |
- | math_extern log10
- | math_extern exp
- | math_extern sin
- | math_extern cos
- | math_extern tan
- | math_extern asin
- | math_extern acos
- | math_extern atan
- | math_extern sinh
- | math_extern cosh
- | math_extern tanh
- | math_extern2 pow
- | math_extern2 atan2
- | math_extern2 fmod
- |
- |->ff_math_deg:
- |.ffunc_n math_rad
- | evldd CARG2, CFUNC:RB->upvalue[0]
- | efdmul CRET1, CARG1, CARG2
- | b ->fff_restv
- |
- |.ffunc math_ldexp
- | cmplwi NARGS8:RC, 16
- | evldd CARG2, 0(BASE)
- | evldd CARG4, 8(BASE)
- | blt ->fff_fallback
- | evmergehi CARG1, CARG4, CARG2
- | checknum CARG1
- | checkanyfail ->fff_fallback
- | efdctsi CARG3, CARG4
- | bl extern ldexp@plt
- | evmergelo CRET1, CRET1, CRET2
- | b ->fff_restv
- |
- |.ffunc math_frexp
- | cmplwi NARGS8:RC, 8
- | evldd CARG2, 0(BASE)
- | blt ->fff_fallback
- | checknum CARG2
- | evmergehi CARG1, CARG2, CARG2
- | checkfail ->fff_fallback
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | lwz PC, FRAME_PC(BASE)
- | bl extern frexp@plt
- | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
- | evmergelo CRET1, CRET1, CRET2
- | efdcfsi CRET2, TMP1
- | la RA, -8(BASE)
- | evstdd CRET1, 0(RA)
- | li RD, (2+1)*8
- | evstdd CRET2, 8(RA)
- | b ->fff_res
- |
- |.ffunc math_modf
- | cmplwi NARGS8:RC, 8
- | evldd CARG2, 0(BASE)
- | blt ->fff_fallback
- | checknum CARG2
- | evmergehi CARG1, CARG2, CARG2
- | checkfail ->fff_fallback
- | la CARG3, -8(BASE)
- | lwz PC, FRAME_PC(BASE)
- | bl extern modf@plt
- | evmergelo CRET1, CRET1, CRET2
- | la RA, -8(BASE)
- | evstdd CRET1, 0(BASE)
- | li RD, (2+1)*8
- | b ->fff_res
- |
- |.macro math_minmax, name, cmpop
- | .ffunc_1 name
- | checknum CARG1
- | li TMP1, 8
- | checkfail ->fff_fallback
- |1:
- | evlddx CARG2, BASE, TMP1
- | cmplw cr1, TMP1, NARGS8:RC
- | checknum CARG2
- | bge cr1, ->fff_restv // Ok, since CRET1 = CARG1.
- | checkfail ->fff_fallback
- | cmpop CARG2, CARG1
- | addi TMP1, TMP1, 8
- | crmove 4*cr0+lt, 4*cr0+gt
- | evsel CARG1, CARG2, CARG1
- | b <1
- |.endmacro
- |
- | math_minmax math_min, efdtstlt
- | math_minmax math_max, efdtstgt
- |
- |//-- String library -----------------------------------------------------
- |
- |.ffunc_1 string_len
- | checkstr STR:CARG1
- | checkfail ->fff_fallback
- | lwz TMP0, STR:CARG1->len
- | efdcfsi CRET1, TMP0
- | b ->fff_restv
- |
- |.ffunc string_byte // Only handle the 1-arg case here.
- | cmplwi NARGS8:RC, 8
- | evldd STR:CARG1, 0(BASE)
- | bne ->fff_fallback // Need exactly 1 argument.
- | checkstr STR:CARG1
- | la RA, -8(BASE)
- | checkfail ->fff_fallback
- | lwz TMP0, STR:CARG1->len
- | li RD, (0+1)*8
- | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
- | li TMP2, (1+1)*8
- | cmplwi TMP0, 0
- | lwz PC, FRAME_PC(BASE)
- | efdcfsi CRET1, TMP1
- | iseleq RD, RD, TMP2
- | evstdd CRET1, 0(RA)
- | b ->fff_res
- |
- |.ffunc string_char // Only handle the 1-arg case here.
- | ffgccheck
- | cmplwi NARGS8:RC, 8
- | evldd CARG1, 0(BASE)
- | bne ->fff_fallback // Exactly 1 argument.
- | checknum CARG1
- | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
- | checkfail ->fff_fallback
- | efdctsiz TMP0, CARG1
- | li CARG3, 1
- | cmplwi TMP0, 255
- | stb TMP0, 0(CARG2)
- | bgt ->fff_fallback
- |->fff_newstr:
- | mr CARG1, L
- | stw BASE, L->base
- | stw PC, SAVE_PC
- | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
- | // Returns GCstr *.
- | lwz BASE, L->base
- | evmergelo STR:CRET1, TISSTR, STR:CRET1
- | b ->fff_restv
- |
- |.ffunc string_sub
- | ffgccheck
- | cmplwi NARGS8:RC, 16
- | evldd CARG3, 16(BASE)
- | evldd STR:CARG1, 0(BASE)
- | blt ->fff_fallback
- | evldd CARG2, 8(BASE)
- | li TMP2, -1
- | beq >1
- | checknum CARG3
- | checkfail ->fff_fallback
- | efdctsiz TMP2, CARG3
- |1:
- | checknum CARG2
- | checkfail ->fff_fallback
- | checkstr STR:CARG1
- | efdctsiz TMP1, CARG2
- | checkfail ->fff_fallback
- | lwz TMP0, STR:CARG1->len
- | cmplw TMP0, TMP2 // len < end? (unsigned compare)
- | add TMP3, TMP2, TMP0
- | blt >5
- |2:
- | cmpwi TMP1, 0 // start <= 0?
- | add TMP3, TMP1, TMP0
- | ble >7
- |3:
- | sub. CARG3, TMP2, TMP1
- | addi CARG2, STR:CARG1, #STR-1
- | addi CARG3, CARG3, 1
- | add CARG2, CARG2, TMP1
- | isellt CARG3, r0, CARG3
- | b ->fff_newstr
- |
- |5: // Negative end or overflow.
- | cmpw TMP0, TMP2
- | addi TMP3, TMP3, 1
- | iselgt TMP2, TMP3, TMP0 // end = end > len ? len : end+len+1
- | b <2
- |
- |7: // Negative start or underflow.
- | cmpwi cr1, TMP3, 0
- | iseleq TMP1, r0, TMP3
- | isel TMP1, r0, TMP1, 4*cr1+lt
- | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
- | b <3
- |
- |.ffunc string_rep // Only handle the 1-char case inline.
- | ffgccheck
- | cmplwi NARGS8:RC, 16
- | evldd CARG1, 0(BASE)
- | evldd CARG2, 8(BASE)
- | bne ->fff_fallback // Exactly 2 arguments.
- | checknum CARG2
- | checkfail ->fff_fallback
- | checkstr STR:CARG1
- | efdctsiz CARG3, CARG2
- | checkfail ->fff_fallback
- | lwz TMP0, STR:CARG1->len
- | cmpwi CARG3, 0
- | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | ble >2 // Count <= 0? (or non-int)
- | cmplwi TMP0, 1
- | subi TMP2, CARG3, 1
- | blt >2 // Zero length string?
- | cmplw cr1, TMP1, CARG3
- | bne ->fff_fallback // Fallback for > 1-char strings.
- | lbz TMP0, STR:CARG1[1]
- | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | blt cr1, ->fff_fallback
- |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
- | cmplwi TMP2, 0
- | stbx TMP0, CARG2, TMP2
- | subi TMP2, TMP2, 1
- | bne <1
- | b ->fff_newstr
- |2: // Return empty string.
- | la STR:CRET1, DISPATCH_GL(strempty)(DISPATCH)
- | evmergelo CRET1, TISSTR, STR:CRET1
- | b ->fff_restv
- |
- |.ffunc string_reverse
- | ffgccheck
- | cmplwi NARGS8:RC, 8
- | evldd CARG1, 0(BASE)
- | blt ->fff_fallback
- | checkstr STR:CARG1
- | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | checkfail ->fff_fallback
- | lwz CARG3, STR:CARG1->len
- | la CARG1, #STR(STR:CARG1)
- | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | li TMP2, 0
- | cmplw TMP1, CARG3
- | subi TMP3, CARG3, 1
- | blt ->fff_fallback
- |1: // Reverse string copy.
- | cmpwi TMP3, 0
- | lbzx TMP1, CARG1, TMP2
- | blt ->fff_newstr
- | stbx TMP1, CARG2, TMP3
- | subi TMP3, TMP3, 1
- | addi TMP2, TMP2, 1
- | b <1
- |
- |.macro ffstring_case, name, lo
- | .ffunc name
- | ffgccheck
- | cmplwi NARGS8:RC, 8
- | evldd CARG1, 0(BASE)
- | blt ->fff_fallback
- | checkstr STR:CARG1
- | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
- | checkfail ->fff_fallback
- | lwz CARG3, STR:CARG1->len
- | la CARG1, #STR(STR:CARG1)
- | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
- | cmplw TMP1, CARG3
- | li TMP2, 0
- | blt ->fff_fallback
- |1: // ASCII case conversion.
- | cmplw TMP2, CARG3
- | lbzx TMP1, CARG1, TMP2
- | bge ->fff_newstr
- | subi TMP0, TMP1, lo
- | xori TMP3, TMP1, 0x20
- | cmplwi TMP0, 26
- | isellt TMP1, TMP3, TMP1
- | stbx TMP1, CARG2, TMP2
- | addi TMP2, TMP2, 1
- | b <1
- |.endmacro
- |
- |ffstring_case string_lower, 65
- |ffstring_case string_upper, 97
- |
- |//-- Table library ------------------------------------------------------
- |
- |.ffunc_1 table_getn
- | checktab CARG1
- | checkfail ->fff_fallback
- | bl extern lj_tab_len // (GCtab *t)
- | // Returns uint32_t (but less than 2^31).
- | efdcfsi CRET1, CRET1
- | b ->fff_restv
- |
- |//-- Bit library --------------------------------------------------------
- |
- |.macro .ffunc_bit, name
- | .ffunc_n bit_..name
- | efdadd CARG1, CARG1, TOBIT
- |.endmacro
- |
- |.ffunc_bit tobit
- |->fff_resbit:
- | efdcfsi CRET1, CARG1
- | b ->fff_restv
- |
- |.macro .ffunc_bit_op, name, ins
- | .ffunc_bit name
- | li TMP1, 8
- |1:
- | evlddx CARG2, BASE, TMP1
- | cmplw cr1, TMP1, NARGS8:RC
- | checknum CARG2
- | bge cr1, ->fff_resbit
- | checkfail ->fff_fallback
- | efdadd CARG2, CARG2, TOBIT
- | ins CARG1, CARG1, CARG2
- | addi TMP1, TMP1, 8
- | b <1
- |.endmacro
- |
- |.ffunc_bit_op band, and
- |.ffunc_bit_op bor, or
- |.ffunc_bit_op bxor, xor
- |
- |.ffunc_bit bswap
- | rotlwi TMP0, CARG1, 8
- | rlwimi TMP0, CARG1, 24, 0, 7
- | rlwimi TMP0, CARG1, 24, 16, 23
- | efdcfsi CRET1, TMP0
- | b ->fff_restv
- |
- |.ffunc_bit bnot
- | not TMP0, CARG1
- | efdcfsi CRET1, TMP0
- | b ->fff_restv
- |
- |.macro .ffunc_bit_sh, name, ins, shmod
- | .ffunc_nn bit_..name
- | efdadd CARG2, CARG2, TOBIT
- | efdadd CARG1, CARG1, TOBIT
- |.if shmod == 1
- | rlwinm CARG2, CARG2, 0, 27, 31
- |.elif shmod == 2
- | neg CARG2, CARG2
- |.endif
- | ins TMP0, CARG1, CARG2
- | efdcfsi CRET1, TMP0
- | b ->fff_restv
- |.endmacro
- |
- |.ffunc_bit_sh lshift, slw, 1
- |.ffunc_bit_sh rshift, srw, 1
- |.ffunc_bit_sh arshift, sraw, 1
- |.ffunc_bit_sh rol, rotlw, 0
- |.ffunc_bit_sh ror, rotlw, 2
- |
- |//-----------------------------------------------------------------------
- |
- |->fff_fallback: // Call fast function fallback handler.
- | // BASE = new base, RB = CFUNC, RC = nargs*8
- | lwz TMP3, CFUNC:RB->f
- | add TMP1, BASE, NARGS8:RC
- | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
- | addi TMP0, TMP1, 8*LUA_MINSTACK
- | lwz TMP2, L->maxstack
- | stw PC, SAVE_PC // Redundant (but a defined value).
- | cmplw TMP0, TMP2
- | stw BASE, L->base
- | stw TMP1, L->top
- | mr CARG1, L
- | bgt >5 // Need to grow stack.
- | mtctr TMP3
- | bctrl // (lua_State *L)
- | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
- | lwz BASE, L->base
- | cmpwi CRET1, 0
- | slwi RD, CRET1, 3
- | la RA, -8(BASE)
- | bgt ->fff_res // Returned nresults+1?
- |1: // Returned 0 or -1: retry fast path.
- | lwz TMP0, L->top
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | sub NARGS8:RC, TMP0, BASE
- | bne ->vm_call_tail // Returned -1?
- | ins_callt // Returned 0: retry fast path.
- |
- |// Reconstruct previous base for vmeta_call during tailcall.
- |->vm_call_tail:
- | andi. TMP0, PC, FRAME_TYPE
- | rlwinm TMP1, PC, 0, 0, 28
- | bne >3
- | lwz INS, -4(PC)
- | decode_RA8 TMP1, INS
- | addi TMP1, TMP1, 8
- |3:
- | sub TMP2, BASE, TMP1
- | b ->vm_call_dispatch // Resolve again for tailcall.
- |
- |5: // Grow stack for fallback handler.
- | li CARG2, LUA_MINSTACK
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lwz BASE, L->base
- | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
- | b <1
- |
- |->fff_gcstep: // Call GC step function.
- | // BASE = new base, RC = nargs*8
- | mflr SAVE0
- | stw BASE, L->base
- | add TMP0, BASE, NARGS8:RC
- | stw PC, SAVE_PC // Redundant (but a defined value).
- | stw TMP0, L->top
- | mr CARG1, L
- | bl extern lj_gc_step // (lua_State *L)
- | lwz BASE, L->base
- | mtlr SAVE0
- | lwz TMP0, L->top
- | sub NARGS8:RC, TMP0, BASE
- | lwz CFUNC:RB, FRAME_FUNC(BASE)
- | blr
- |
- |//-----------------------------------------------------------------------
- |//-- Special dispatch targets -------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_record: // Dispatch target for recording phase.
- |.if JIT
- | NYI
- |.endif
- |
- |->vm_rethook: // Dispatch target for return hooks.
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
- | beq >1
- |5: // Re-dispatch to static ins.
- | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OP4 TMP1, INS.
- | lwzx TMP0, DISPATCH, TMP1
- | mtctr TMP0
- | bctr
- |
- |->vm_inshook: // Dispatch target for instr/line hooks.
- | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
- | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
- | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
- | bne <5
- |
- | cmpwi cr1, TMP0, 0
- | addic. TMP2, TMP2, -1
- | beq cr1, <5
- | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
- | beq >1
- | bge cr1, <5
- |1:
- | mr CARG1, L
- | stw MULTRES, SAVE_MULTRES
- | mr CARG2, PC
- | stw BASE, L->base
- | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
- | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
- |3:
- | lwz BASE, L->base
- |4: // Re-dispatch to static ins.
- | lwz INS, -4(PC)
- | decode_OP4 TMP1, INS
- | decode_RB8 RB, INS
- | addi TMP1, TMP1, GG_DISP2STATIC
- | decode_RD8 RD, INS
- | lwzx TMP0, DISPATCH, TMP1
- | decode_RA8 RA, INS
- | decode_RC8 RC, INS
- | mtctr TMP0
- | bctr
- |
- |->cont_hook: // Continue from hook yield.
- | addi PC, PC, 4
- | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
- | b <4
- |
- |->vm_hotloop: // Hot loop counter underflow.
- |.if JIT
- | NYI
- |.endif
- |
- |->vm_callhook: // Dispatch target for call hooks.
- | mr CARG2, PC
- |.if JIT
- | b >1
- |.endif
- |
- |->vm_hotcall: // Hot call counter underflow.
- |.if JIT
- | ori CARG2, PC, 1
- |1:
- |.endif
- | add TMP0, BASE, RC
- | stw PC, SAVE_PC
- | mr CARG1, L
- | stw BASE, L->base
- | sub RA, RA, BASE
- | stw TMP0, L->top
- | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
- | // Returns ASMFunction.
- | lwz BASE, L->base
- | lwz TMP0, L->top
- | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
- | sub NARGS8:RC, TMP0, BASE
- | add RA, BASE, RA
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | mtctr CRET1
- | bctr
- |
- |//-----------------------------------------------------------------------
- |//-- Trace exit handler -------------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_exit_handler:
- |.if JIT
- | NYI
- |.endif
- |->vm_exit_interp:
- |.if JIT
- | NYI
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Math helper functions ----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |// FP value rounding. Called by math.floor/math.ceil fast functions
- |// and from JIT code.
- |//
- |// This can be inlined if the CPU has the frin/friz/frip/frim instructions.
- |// The alternative hard-float approaches have a deep dependency chain.
- |// The resulting latency is at least 3x-7x the double-precision FP latency
- |// (e500v2: 6cy, e600: 5cy, Cell: 10cy) or around 20-70 cycles.
- |//
- |// The soft-float approach is tedious, but much faster (e500v2: ~11cy/~6cy).
- |// However it relies on a fast way to transfer the FP value to GPRs
- |// (e500v2: 0cy for lo-word, 1cy for hi-word).
- |//
- |.macro vm_round, name, mode
- | // Used temporaries: TMP0, TMP1, TMP2, TMP3.
- |->name.._efd: // Input: CARG2, output: CRET2
- | evmergehi CARG1, CARG2, CARG2
- |->name.._hilo:
- | // Input: CARG1 (hi), CARG2 (hi, lo), output: CRET2
- | rlwinm TMP2, CARG1, 12, 21, 31
- | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
- | li TMP1, -1
- | cmplwi cr1, TMP2, 51 // 0 <= exp <= 51?
- | subfic TMP0, TMP2, 52
- | bgt cr1, >1
- | lus TMP3, 0xfff0
- | slw TMP0, TMP1, TMP0 // lomask = -1 << (52-exp)
- | sraw TMP1, TMP3, TMP2 // himask = (int32_t)0xfff00000 >> exp
- |.if mode == 2 // trunc(x):
- | evmergelo TMP0, TMP1, TMP0
- | evand CRET2, CARG2, TMP0 // hi &= himask, lo &= lomask
- |.else
- | andc TMP2, CARG2, TMP0
- | andc TMP3, CARG1, TMP1
- | or TMP2, TMP2, TMP3 // ztest = (hi&~himask) | (lo&~lomask)
- | srawi TMP3, CARG1, 31 // signmask = (int32_t)hi >> 31
- |.if mode == 0 // floor(x):
- | and. TMP2, TMP2, TMP3 // iszero = ((ztest & signmask) == 0)
- |.else // ceil(x):
- | andc. TMP2, TMP2, TMP3 // iszero = ((ztest & ~signmask) == 0)
- |.endif
- | and CARG2, CARG2, TMP0 // lo &= lomask
- | and CARG1, CARG1, TMP1 // hi &= himask
- | subc TMP0, CARG2, TMP0
- | iseleq TMP0, CARG2, TMP0 // lo = iszero ? lo : lo-lomask
- | sube TMP1, CARG1, TMP1
- | iseleq TMP1, CARG1, TMP1 // hi = iszero ? hi : hi-himask+carry
- | evmergelo CRET2, TMP1, TMP0
- |.endif
- | blr
- |1:
- | bgtlr // Already done if >=2^52, +-inf or nan.
- |.if mode == 2 // trunc(x):
- | rlwinm TMP1, CARG1, 0, 0, 0 // hi = sign(x)
- | li TMP0, 0
- | evmergelo CRET2, TMP1, TMP0
- |.else
- | rlwinm TMP2, CARG1, 0, 1, 31
- | srawi TMP0, CARG1, 31 // signmask = (int32_t)hi >> 31
- | or TMP2, TMP2, CARG2 // ztest = abs(hi) | lo
- | lus TMP1, 0x3ff0
- |.if mode == 0 // floor(x):
- | and. TMP2, TMP2, TMP0 // iszero = ((ztest & signmask) == 0)
- |.else // ceil(x):
- | andc. TMP2, TMP2, TMP0 // iszero = ((ztest & ~signmask) == 0)
- |.endif
- | li TMP0, 0
- | iseleq TMP1, r0, TMP1
- | rlwimi CARG1, TMP1, 0, 1, 31 // hi = sign(x) | (iszero ? 0.0 : 1.0)
- | evmergelo CRET2, CARG1, TMP0
- |.endif
- | blr
- |.endmacro
- |
- |->vm_floor:
- | mflr CARG3
- | evmergelo CARG2, CARG1, CARG2
- | bl ->vm_floor_hilo
- | mtlr CARG3
- | evmergehi CRET1, CRET2, CRET2
- | blr
- |
- | vm_round vm_floor, 0
- | vm_round vm_ceil, 1
- |.if JIT
- | vm_round vm_trunc, 2
- |.else
- |->vm_trunc_efd:
- |->vm_trunc_hilo:
- |.endif
- |
- |//-----------------------------------------------------------------------
- |//-- Miscellaneous functions --------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |//-----------------------------------------------------------------------
- |//-- FFI helper functions -----------------------------------------------
- |//-----------------------------------------------------------------------
- |
- |->vm_ffi_call:
- |.if FFI
- | NYI
- |.endif
- |
- |//-----------------------------------------------------------------------
-}
-
-/* Generate the code for a single instruction. */
-static void build_ins(BuildCtx *ctx, BCOp op, int defop)
-{
- int vk = 0;
- |=>defop:
-
- switch (op) {
-
- /* -- Comparison ops ---------------------------------------------------- */
-
- /* Remember: all ops branch for a true comparison, fall through otherwise. */
-
- case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
- | // RA = src1*8, RD = src2*8, JMP with RD = target
- | evlddx TMP0, BASE, RA
- | addi PC, PC, 4
- | evlddx TMP1, BASE, RD
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | lwz TMP2, -4(PC)
- | evmergehi RB, TMP0, TMP1
- | decode_RD4 TMP2, TMP2
- | checknum RB
- | add TMP2, TMP2, TMP3
- | checkanyfail ->vmeta_comp
- | efdcmplt TMP0, TMP1
- if (op == BC_ISLE || op == BC_ISGT) {
- | efdcmpeq cr1, TMP0, TMP1
- | cror 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
- }
- if (op == BC_ISLT || op == BC_ISLE) {
- | iselgt PC, TMP2, PC
- } else {
- | iselgt PC, PC, TMP2
- }
- | ins_next
- break;
-
- case BC_ISEQV: case BC_ISNEV:
- vk = op == BC_ISEQV;
- | // RA = src1*8, RD = src2*8, JMP with RD = target
- | evlddx CARG2, BASE, RA
- | addi PC, PC, 4
- | evlddx CARG3, BASE, RD
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | lwz TMP2, -4(PC)
- | evmergehi RB, CARG2, CARG3
- | decode_RD4 TMP2, TMP2
- | checknum RB
- | add TMP2, TMP2, TMP3
- | checkanyfail >5
- | efdcmpeq CARG2, CARG3
- if (vk) {
- | iselgt PC, TMP2, PC
- } else {
- | iselgt PC, PC, TMP2
- }
- |1:
- | ins_next
- |
- |5: // Either or both types are not numbers.
- | evcmpeq CARG2, CARG3
- | not TMP3, RB
- | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
- | crorc 4*cr7+lt, 4*cr0+so, 4*cr0+lt // 1: Same tv or different type.
- | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
- | crandc 4*cr7+gt, 4*cr0+lt, 4*cr1+gt // 2: Same type and primitive.
- | mr SAVE0, PC
- if (vk) {
- | isel PC, TMP2, PC, 4*cr7+gt
- } else {
- | isel TMP2, PC, TMP2, 4*cr7+gt
- }
- | cror 4*cr7+lt, 4*cr7+lt, 4*cr7+gt // 1 or 2.
- if (vk) {
- | isel PC, TMP2, PC, 4*cr0+so
- } else {
- | isel PC, PC, TMP2, 4*cr0+so
- }
- | blt cr7, <1 // Done if 1 or 2.
- | blt cr6, <1 // Done if not tab/ud.
- |
- | // Different tables or userdatas. Need to check __eq metamethod.
- | // Field metatable must be at same offset for GCtab and GCudata!
- | lwz TAB:TMP2, TAB:CARG2->metatable
- | li CARG4, 1-vk // ne = 0 or 1.
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable?
- | lbz TMP2, TAB:TMP2->nomm
- | andi. TMP2, TMP2, 1<<MM_eq
- | bne <1 // Or 'no __eq' flag set?
- | mr PC, SAVE0 // Restore old PC.
- | b ->vmeta_equal // Handle __eq metamethod.
- break;
-
- case BC_ISEQS: case BC_ISNES:
- vk = op == BC_ISEQS;
- | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
- | evlddx TMP0, BASE, RA
- | srwi RD, RD, 1
- | lwz INS, 0(PC)
- | subfic RD, RD, -4
- | addi PC, PC, 4
- | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | decode_RD4 TMP2, INS
- | evmergelo STR:TMP1, TISSTR, STR:TMP1
- | add TMP2, TMP2, TMP3
- | evcmpeq TMP0, STR:TMP1
- if (vk) {
- | isel PC, TMP2, PC, 4*cr0+so
- } else {
- | isel PC, PC, TMP2, 4*cr0+so
- }
- | ins_next
- break;
-
- case BC_ISEQN: case BC_ISNEN:
- vk = op == BC_ISEQN;
- | // RA = src*8, RD = num_const*8, JMP with RD = target
- | evlddx TMP0, BASE, RA
- | addi PC, PC, 4
- | evlddx TMP1, KBASE, RD
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | lwz INS, -4(PC)
- | checknum TMP0
- | checkfail >5
- | efdcmpeq TMP0, TMP1
- |1:
- | decode_RD4 TMP2, INS
- | add TMP2, TMP2, TMP3
- if (vk) {
- | iselgt PC, TMP2, PC
- |5:
- } else {
- | iselgt PC, PC, TMP2
- }
- |3:
- | ins_next
- if (!vk) {
- |5:
- | decode_RD4 TMP2, INS
- | add PC, TMP2, TMP3
- | b <3
- }
- break;
-
- case BC_ISEQP: case BC_ISNEP:
- vk = op == BC_ISEQP;
- | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
- | lwzx TMP0, BASE, RA
- | srwi TMP1, RD, 3
- | lwz INS, 0(PC)
- | addi PC, PC, 4
- | not TMP1, TMP1
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | cmplw TMP0, TMP1
- | decode_RD4 TMP2, INS
- | add TMP2, TMP2, TMP3
- if (vk) {
- | iseleq PC, TMP2, PC
- } else {
- | iseleq PC, PC, TMP2
- }
- | ins_next
- break;
-
- /* -- Unary test and copy ops ------------------------------------------- */
-
- case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
- | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
- | evlddx TMP0, BASE, RD
- | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
- | lwz INS, 0(PC)
- | evcmpltu TMP0, TMP1
- | addi PC, PC, 4
- if (op == BC_IST || op == BC_ISF) {
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | decode_RD4 TMP2, INS
- | add TMP2, TMP2, TMP3
- if (op == BC_IST) {
- | isellt PC, TMP2, PC
- } else {
- | isellt PC, PC, TMP2
- }
- } else {
- if (op == BC_ISTC) {
- | checkfail >1
- } else {
- | checkok >1
- }
- | addis PC, PC, -(BCBIAS_J*4 >> 16)
- | decode_RD4 TMP2, INS
- | evstddx TMP0, BASE, RA
- | add PC, PC, TMP2
- |1:
- }
- | ins_next
- break;
-
- /* -- Unary ops --------------------------------------------------------- */
-
- case BC_MOV:
- | // RA = dst*8, RD = src*8
- | ins_next1
- | evlddx TMP0, BASE, RD
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_NOT:
- | // RA = dst*8, RD = src*8
- | ins_next1
- | lwzx TMP0, BASE, RD
- | subfic TMP1, TMP0, LJ_TTRUE
- | adde TMP0, TMP0, TMP1
- | stwx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_UNM:
- | // RA = dst*8, RD = src*8
- | evlddx TMP0, BASE, RD
- | checknum TMP0
- | checkfail ->vmeta_unm
- | efdneg TMP0, TMP0
- | ins_next1
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_LEN:
- | // RA = dst*8, RD = src*8
- | evlddx CARG1, BASE, RD
- | checkstr CARG1
- | checkfail >2
- | lwz CRET1, STR:CARG1->len
- |1:
- | ins_next1
- | efdcfsi TMP0, CRET1
- | evstddx TMP0, BASE, RA
- | ins_next2
- |2:
- | checktab CARG1
- | checkfail ->vmeta_len
-#if LJ_52
- | lwz TAB:TMP2, TAB:CARG1->metatable
- | cmplwi TAB:TMP2, 0
- | bne >9
- |3:
-#endif
- |->BC_LEN_Z:
- | bl extern lj_tab_len // (GCtab *t)
- | // Returns uint32_t (but less than 2^31).
- | b <1
-#if LJ_52
- |9:
- | lbz TMP0, TAB:TMP2->nomm
- | andi. TMP0, TMP0, 1<<MM_len
- | bne <3 // 'no __len' flag set: done.
- | b ->vmeta_len
-#endif
- break;
-
- /* -- Binary ops -------------------------------------------------------- */
-
- |.macro ins_arithpre, t0, t1
- | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
- ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
- ||switch (vk) {
- ||case 0:
- | evlddx t0, BASE, RB
- | checknum t0
- | evlddx t1, KBASE, RC
- | checkfail ->vmeta_arith_vn
- || break;
- ||case 1:
- | evlddx t1, BASE, RB
- | checknum t1
- | evlddx t0, KBASE, RC
- | checkfail ->vmeta_arith_nv
- || break;
- ||default:
- | evlddx t0, BASE, RB
- | evlddx t1, BASE, RC
- | evmergehi TMP2, t0, t1
- | checknum TMP2
- | checkanyfail ->vmeta_arith_vv
- || break;
- ||}
- |.endmacro
- |
- |.macro ins_arith, ins
- | ins_arithpre TMP0, TMP1
- | ins_next1
- | ins TMP0, TMP0, TMP1
- | evstddx TMP0, BASE, RA
- | ins_next2
- |.endmacro
-
- case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
- | ins_arith efdadd
- break;
- case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
- | ins_arith efdsub
- break;
- case BC_MULVN: case BC_MULNV: case BC_MULVV:
- | ins_arith efdmul
- break;
- case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
- | ins_arith efddiv
- break;
- case BC_MODVN:
- | ins_arithpre RD, SAVE0
- |->BC_MODVN_Z:
- | efddiv CARG2, RD, SAVE0
- | bl ->vm_floor_efd // floor(b/c)
- | efdmul TMP0, CRET2, SAVE0
- | ins_next1
- | efdsub TMP0, RD, TMP0 // b - floor(b/c)*c
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_MODNV: case BC_MODVV:
- | ins_arithpre RD, SAVE0
- | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
- break;
- case BC_POW:
- | evlddx CARG2, BASE, RB
- | evlddx CARG4, BASE, RC
- | evmergehi CARG1, CARG4, CARG2
- | checknum CARG1
- | evmergehi CARG3, CARG4, CARG4
- | checkanyfail ->vmeta_arith_vv
- | bl extern pow@plt
- | evmergelo CRET2, CRET1, CRET2
- | evstddx CRET2, BASE, RA
- | ins_next
- break;
-
- case BC_CAT:
- | // RA = dst*8, RB = src_start*8, RC = src_end*8
- | sub CARG3, RC, RB
- | stw BASE, L->base
- | add CARG2, BASE, RC
- | mr SAVE0, RB
- |->BC_CAT_Z:
- | stw PC, SAVE_PC
- | mr CARG1, L
- | srwi CARG3, CARG3, 3
- | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
- | // Returns NULL (finished) or TValue * (metamethod).
- | cmplwi CRET1, 0
- | lwz BASE, L->base
- | bne ->vmeta_binop
- | evlddx TMP0, BASE, SAVE0 // Copy result from RB to RA.
- | evstddx TMP0, BASE, RA
- | ins_next
- break;
-
- /* -- Constant ops ------------------------------------------------------ */
-
- case BC_KSTR:
- | // RA = dst*8, RD = str_const*8 (~)
- | ins_next1
- | srwi TMP1, RD, 1
- | subfic TMP1, TMP1, -4
- | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
- | evmergelo TMP0, TISSTR, TMP0
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_KCDATA:
- |.if FFI
- | // RA = dst*8, RD = cdata_const*8 (~)
- | ins_next1
- | srwi TMP1, RD, 1
- | subfic TMP1, TMP1, -4
- | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
- | li TMP2, LJ_TCDATA
- | evmergelo TMP0, TMP2, TMP0
- | evstddx TMP0, BASE, RA
- | ins_next2
- |.endif
- break;
- case BC_KSHORT:
- | // RA = dst*8, RD = int16_literal*8
- | srwi TMP1, RD, 3
- | extsh TMP1, TMP1
- | ins_next1
- | efdcfsi TMP0, TMP1
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_KNUM:
- | // RA = dst*8, RD = num_const*8
- | evlddx TMP0, KBASE, RD
- | ins_next1
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_KPRI:
- | // RA = dst*8, RD = primitive_type*8 (~)
- | srwi TMP1, RD, 3
- | not TMP0, TMP1
- | ins_next1
- | stwx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_KNIL:
- | // RA = base*8, RD = end*8
- | evstddx TISNIL, BASE, RA
- | addi RA, RA, 8
- |1:
- | evstddx TISNIL, BASE, RA
- | cmpw RA, RD
- | addi RA, RA, 8
- | blt <1
- | ins_next_
- break;
-
- /* -- Upvalue and function ops ------------------------------------------ */
-
- case BC_UGET:
- | // RA = dst*8, RD = uvnum*8
- | ins_next1
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RD, RD, 1
- | addi RD, RD, offsetof(GCfuncL, uvptr)
- | lwzx UPVAL:RB, LFUNC:RB, RD
- | lwz TMP1, UPVAL:RB->v
- | evldd TMP0, 0(TMP1)
- | evstddx TMP0, BASE, RA
- | ins_next2
- break;
- case BC_USETV:
- | // RA = uvnum*8, RD = src*8
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RA, RA, 1
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | evlddx TMP1, BASE, RD
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | lbz TMP3, UPVAL:RB->marked
- | lwz CARG2, UPVAL:RB->v
- | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
- | lbz TMP0, UPVAL:RB->closed
- | evmergehi TMP2, TMP1, TMP1
- | evstdd TMP1, 0(CARG2)
- | cmplwi cr1, TMP0, 0
- | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
- | subi TMP2, TMP2, (LJ_TISNUM+1)
- | bne >2 // Upvalue is closed and black?
- |1:
- | ins_next
- |
- |2: // Check if new value is collectable.
- | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1)
- | bge <1 // tvisgcv(v)
- | lbz TMP3, GCOBJ:TMP1->gch.marked
- | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
- | la CARG1, GG_DISP2G(DISPATCH)
- | // Crossed a write barrier. Move the barrier forward.
- | beq <1
- | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- | b <1
- break;
- case BC_USETS:
- | // RA = uvnum*8, RD = str_const*8 (~)
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi TMP1, RD, 1
- | srwi RA, RA, 1
- | subfic TMP1, TMP1, -4
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | evmergelo STR:TMP1, TISSTR, STR:TMP1
- | lbz TMP3, UPVAL:RB->marked
- | lwz CARG2, UPVAL:RB->v
- | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
- | lbz TMP3, STR:TMP1->marked
- | lbz TMP2, UPVAL:RB->closed
- | evstdd STR:TMP1, 0(CARG2)
- | bne >2
- |1:
- | ins_next
- |
- |2: // Check if string is white and ensure upvalue is closed.
- | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
- | cmplwi cr1, TMP2, 0
- | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
- | la CARG1, GG_DISP2G(DISPATCH)
- | // Crossed a write barrier. Move the barrier forward.
- | beq <1
- | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
- | b <1
- break;
- case BC_USETN:
- | // RA = uvnum*8, RD = num_const*8
- | ins_next1
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RA, RA, 1
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | evlddx TMP0, KBASE, RD
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | lwz TMP1, UPVAL:RB->v
- | evstdd TMP0, 0(TMP1)
- | ins_next2
- break;
- case BC_USETP:
- | // RA = uvnum*8, RD = primitive_type*8 (~)
- | ins_next1
- | lwz LFUNC:RB, FRAME_FUNC(BASE)
- | srwi RA, RA, 1
- | addi RA, RA, offsetof(GCfuncL, uvptr)
- | srwi TMP0, RD, 3
- | lwzx UPVAL:RB, LFUNC:RB, RA
- | not TMP0, TMP0
- | lwz TMP1, UPVAL:RB->v
- | stw TMP0, 0(TMP1)
- | ins_next2
- break;
-
- case BC_UCLO:
- | // RA = level*8, RD = target
- | lwz TMP1, L->openupval
- | branch_RD // Do this first since RD is not saved.
- | stw BASE, L->base
- | cmplwi TMP1, 0
- | mr CARG1, L
- | beq >1
- | add CARG2, BASE, RA
- | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
- | lwz BASE, L->base
- |1:
- | ins_next
- break;
-
- case BC_FNEW:
- | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
- | srwi TMP1, RD, 1
- | stw BASE, L->base
- | subfic TMP1, TMP1, -4
- | stw PC, SAVE_PC
- | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
- | mr CARG1, L
- | lwz CARG3, FRAME_FUNC(BASE)
- | // (lua_State *L, GCproto *pt, GCfuncL *parent)
- | bl extern lj_func_newL_gc
- | // Returns GCfuncL *.
- | lwz BASE, L->base
- | evmergelo LFUNC:CRET1, TISFUNC, LFUNC:CRET1
- | evstddx LFUNC:CRET1, BASE, RA
- | ins_next
- break;
-
- /* -- Table ops --------------------------------------------------------- */
-
- case BC_TNEW:
- case BC_TDUP:
- | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
- | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
- | mr CARG1, L
- | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
- | stw BASE, L->base
- | cmplw TMP0, TMP1
- | stw PC, SAVE_PC
- | bge >5
- |1:
- if (op == BC_TNEW) {
- | rlwinm CARG2, RD, 29, 21, 31
- | rlwinm CARG3, RD, 18, 27, 31
- | cmpwi CARG2, 0x7ff
- | li TMP1, 0x801
- | iseleq CARG2, TMP1, CARG2
- | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
- | // Returns Table *.
- } else {
- | srwi TMP1, RD, 1
- | subfic TMP1, TMP1, -4
- | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
- | bl extern lj_tab_dup // (lua_State *L, Table *kt)
- | // Returns Table *.
- }
- | lwz BASE, L->base
- | evmergelo TAB:CRET1, TISTAB, TAB:CRET1
- | evstddx TAB:CRET1, BASE, RA
- | ins_next
- |5:
- | mr SAVE0, RD
- | bl extern lj_gc_step_fixtop // (lua_State *L)
- | mr RD, SAVE0
- | mr CARG1, L
- | b <1
- break;
-
- case BC_GGET:
- | // RA = dst*8, RD = str_const*8 (~)
- case BC_GSET:
- | // RA = src*8, RD = str_const*8 (~)
- | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
- | srwi TMP1, RD, 1
- | lwz TAB:RB, LFUNC:TMP2->env
- | subfic TMP1, TMP1, -4
- | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
- if (op == BC_GGET) {
- | b ->BC_TGETS_Z
- } else {
- | b ->BC_TSETS_Z
- }
- break;
-
- case BC_TGETV:
- | // RA = dst*8, RB = table*8, RC = key*8
- | evlddx TAB:RB, BASE, RB
- | evlddx RC, BASE, RC
- | checktab TAB:RB
- | checkfail ->vmeta_tgetv
- | checknum RC
- | checkfail >5
- | // Convert number key to integer
- | efdctsi TMP2, RC
- | lwz TMP0, TAB:RB->asize
- | efdcfsi TMP1, TMP2
- | cmplw cr0, TMP0, TMP2
- | efdcmpeq cr1, RC, TMP1
- | lwz TMP1, TAB:RB->array
- | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
- | slwi TMP2, TMP2, 3
- | ble ->vmeta_tgetv // Integer key and in array part?
- | evlddx TMP1, TMP1, TMP2
- | checknil TMP1
- | checkok >2
- |1:
- | evstddx TMP1, BASE, RA
- | ins_next
- |
- |2: // Check for __index if table value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable: done.
- | lbz TMP0, TAB:TMP2->nomm
- | andi. TMP0, TMP0, 1<<MM_index
- | bne <1 // 'no __index' flag set: done.
- | b ->vmeta_tgetv
- |
- |5:
- | checkstr STR:RC // String key?
- | checkok ->BC_TGETS_Z
- | b ->vmeta_tgetv
- break;
- case BC_TGETS:
- | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
- | evlddx TAB:RB, BASE, RB
- | srwi TMP1, RC, 1
- | checktab TAB:RB
- | subfic TMP1, TMP1, -4
- | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
- | checkfail ->vmeta_tgets1
- |->BC_TGETS_Z:
- | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
- | lwz TMP0, TAB:RB->hmask
- | lwz TMP1, STR:RC->hash
- | lwz NODE:TMP2, TAB:RB->node
- | evmergelo STR:RC, TISSTR, STR:RC
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | slwi TMP0, TMP1, 5
- | slwi TMP1, TMP1, 3
- | sub TMP1, TMP0, TMP1
- | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |1:
- | evldd TMP0, NODE:TMP2->key
- | evldd TMP1, NODE:TMP2->val
- | evcmpeq TMP0, STR:RC
- | checkanyfail >4
- | checknil TMP1
- | checkok >5 // Key found, but nil value?
- |3:
- | evstddx TMP1, BASE, RA
- | ins_next
- |
- |4: // Follow hash chain.
- | lwz NODE:TMP2, NODE:TMP2->next
- | cmplwi NODE:TMP2, 0
- | bne <1
- | // End of hash chain: key not found, nil result.
- | evmr TMP1, TISNIL
- |
- |5: // Check for __index if table value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <3 // No metatable: done.
- | lbz TMP0, TAB:TMP2->nomm
- | andi. TMP0, TMP0, 1<<MM_index
- | bne <3 // 'no __index' flag set: done.
- | b ->vmeta_tgets
- break;
- case BC_TGETB:
- | // RA = dst*8, RB = table*8, RC = index*8
- | evlddx TAB:RB, BASE, RB
- | srwi TMP0, RC, 3
- | checktab TAB:RB
- | checkfail ->vmeta_tgetb
- | lwz TMP1, TAB:RB->asize
- | lwz TMP2, TAB:RB->array
- | cmplw TMP0, TMP1
- | bge ->vmeta_tgetb
- | evlddx TMP1, TMP2, RC
- | checknil TMP1
- | checkok >5
- |1:
- | ins_next1
- | evstddx TMP1, BASE, RA
- | ins_next2
- |
- |5: // Check for __index if table value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable: done.
- | lbz TMP2, TAB:TMP2->nomm
- | andi. TMP2, TMP2, 1<<MM_index
- | bne <1 // 'no __index' flag set: done.
- | b ->vmeta_tgetb // Caveat: preserve TMP0!
- break;
-
- case BC_TSETV:
- | // RA = src*8, RB = table*8, RC = key*8
- | evlddx TAB:RB, BASE, RB
- | evlddx RC, BASE, RC
- | checktab TAB:RB
- | checkfail ->vmeta_tsetv
- | checknum RC
- | checkfail >5
- | // Convert number key to integer
- | efdctsi TMP2, RC
- | evlddx SAVE0, BASE, RA
- | lwz TMP0, TAB:RB->asize
- | efdcfsi TMP1, TMP2
- | cmplw cr0, TMP0, TMP2
- | efdcmpeq cr1, RC, TMP1
- | lwz TMP1, TAB:RB->array
- | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
- | slwi TMP0, TMP2, 3
- | ble ->vmeta_tsetv // Integer key and in array part?
- | lbz TMP3, TAB:RB->marked
- | evlddx TMP2, TMP1, TMP0
- | checknil TMP2
- | checkok >3
- |1:
- | andi. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
- | evstddx SAVE0, TMP1, TMP0
- | bne >7
- |2:
- | ins_next
- |
- |3: // Check for __newindex if previous value is nil.
- | lwz TAB:TMP2, TAB:RB->metatable
- | cmplwi TAB:TMP2, 0
- | beq <1 // No metatable: done.
- | lbz TMP2, TAB:TMP2->nomm
- | andi. TMP2, TMP2, 1<<MM_newindex
- | bne <1 // 'no __newindex' flag set: done.
- | b ->vmeta_tsetv
- |
- |5:
- | checkstr STR:RC // String key?
- | checkok ->BC_TSETS_Z
- | b ->vmeta_tsetv
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0
- | b <2
- break;
- case BC_TSETS:
- | // RA = src*8, RB = table*8, RC = str_const*8 (~)
- | evlddx TAB:RB, BASE, RB
- | srwi TMP1, RC, 1
- | checktab TAB:RB
- | subfic TMP1, TMP1, -4
- | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
- | checkfail ->vmeta_tsets1
- |->BC_TSETS_Z:
- | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
- | lwz TMP0, TAB:RB->hmask
- | lwz TMP1, STR:RC->hash
- | lwz NODE:TMP2, TAB:RB->node
- | evmergelo STR:RC, TISSTR, STR:RC
- | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
- | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
- | evlddx SAVE0, BASE, RA
- | slwi TMP0, TMP1, 5
- | slwi TMP1, TMP1, 3
- | sub TMP1, TMP0, TMP1
- | lbz TMP3, TAB:RB->marked
- | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
- |1:
- | evldd TMP0, NODE:TMP2->key
- | evldd TMP1, NODE:TMP2->val
- | evcmpeq TMP0, STR:RC
- | checkanyfail >5
- | checknil TMP1
- | checkok >4 // Key found, but nil value?
- |2:
- | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- | evstdd SAVE0, NODE:TMP2->val
- | bne >7
- |3:
- | ins_next
- |
- |4: // Check for __newindex if previous value is nil.
- | lwz TAB:TMP1, TAB:RB->metatable
- | cmplwi TAB:TMP1, 0
- | beq <2 // No metatable: done.
- | lbz TMP0, TAB:TMP1->nomm
- | andi. TMP0, TMP0, 1<<MM_newindex
- | bne <2 // 'no __newindex' flag set: done.
- | b ->vmeta_tsets
- |
- |5: // Follow hash chain.
- | lwz NODE:TMP2, NODE:TMP2->next
- | cmplwi NODE:TMP2, 0
- | bne <1
- | // End of hash chain: key not found, add a new one.
- |
- | // But check for __newindex first.
- | lwz TAB:TMP1, TAB:RB->metatable
- | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
- | stw PC, SAVE_PC
- | mr CARG1, L
- | cmplwi TAB:TMP1, 0
- | stw BASE, L->base
- | beq >6 // No metatable: continue.
- | lbz TMP0, TAB:TMP1->nomm
- | andi. TMP0, TMP0, 1<<MM_newindex
- | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
- |6:
- | mr CARG2, TAB:RB
- | evstdd STR:RC, 0(CARG3)
- | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
- | // Returns TValue *.
- | lwz BASE, L->base
- | evstdd SAVE0, 0(CRET1)
- | b <3 // No 2nd write barrier needed.
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0
- | b <3
- break;
- case BC_TSETB:
- | // RA = src*8, RB = table*8, RC = index*8
- | evlddx TAB:RB, BASE, RB
- | srwi TMP0, RC, 3
- | checktab TAB:RB
- | checkfail ->vmeta_tsetb
- | lwz TMP1, TAB:RB->asize
- | lwz TMP2, TAB:RB->array
- | lbz TMP3, TAB:RB->marked
- | cmplw TMP0, TMP1
- | evlddx SAVE0, BASE, RA
- | bge ->vmeta_tsetb
- | evlddx TMP1, TMP2, RC
- | checknil TMP1
- | checkok >5
- |1:
- | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- | evstddx SAVE0, TMP2, RC
- | bne >7
- |2:
- | ins_next
- |
- |5: // Check for __newindex if previous value is nil.
- | lwz TAB:TMP1, TAB:RB->metatable
- | cmplwi TAB:TMP1, 0
- | beq <1 // No metatable: done.
- | lbz TMP1, TAB:TMP1->nomm
- | andi. TMP1, TMP1, 1<<MM_newindex
- | bne <1 // 'no __newindex' flag set: done.
- | b ->vmeta_tsetb // Caveat: preserve TMP0!
- |
- |7: // Possible table write barrier for the value. Skip valiswhite check.
- | barrierback TAB:RB, TMP3, TMP0
- | b <2
- break;
-
- case BC_TSETM:
- | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
- | add RA, BASE, RA
- |1:
- | add TMP3, KBASE, RD
- | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
- | addic. TMP0, MULTRES, -8
- | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
- | srwi CARG3, TMP0, 3
- | beq >4 // Nothing to copy?
- | add CARG3, CARG3, TMP3
- | lwz TMP2, TAB:CARG2->asize
- | slwi TMP1, TMP3, 3
- | lbz TMP3, TAB:CARG2->marked
- | cmplw CARG3, TMP2
- | add TMP2, RA, TMP0
- | lwz TMP0, TAB:CARG2->array
- | bgt >5
- | add TMP1, TMP1, TMP0
- | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
- |3: // Copy result slots to table.
- | evldd TMP0, 0(RA)
- | addi RA, RA, 8
- | cmpw cr1, RA, TMP2
- | evstdd TMP0, 0(TMP1)
- | addi TMP1, TMP1, 8
- | blt cr1, <3
- | bne >7
- |4:
- | ins_next
- |
- |5: // Need to resize array part.
- | stw BASE, L->base
- | mr CARG1, L
- | stw PC, SAVE_PC
- | mr SAVE0, RD
- | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
- | // Must not reallocate the stack.
- | mr RD, SAVE0
- | b <1
- |
- |7: // Possible table write barrier for any value. Skip valiswhite check.
- | barrierback TAB:CARG2, TMP3, TMP0
- | b <4
- break;
-
- /* -- Calls and vararg handling ----------------------------------------- */
-
- case BC_CALLM:
- | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
- | add NARGS8:RC, NARGS8:RC, MULTRES
- | // Fall through. Assumes BC_CALL follows.
- break;
- case BC_CALL:
- | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
- | evlddx LFUNC:RB, BASE, RA
- | mr TMP2, BASE
- | add BASE, BASE, RA
- | subi NARGS8:RC, NARGS8:RC, 8
- | checkfunc LFUNC:RB
- | addi BASE, BASE, 8
- | checkfail ->vmeta_call
- | ins_call
- break;
-
- case BC_CALLMT:
- | // RA = base*8, (RB = 0,) RC = extra_nargs*8
- | add NARGS8:RC, NARGS8:RC, MULTRES
- | // Fall through. Assumes BC_CALLT follows.
- break;
- case BC_CALLT:
- | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
- | evlddx LFUNC:RB, BASE, RA
- | add RA, BASE, RA
- | lwz TMP1, FRAME_PC(BASE)
- | subi NARGS8:RC, NARGS8:RC, 8
- | checkfunc LFUNC:RB
- | addi RA, RA, 8
- | checkfail ->vmeta_callt
- |->BC_CALLT_Z:
- | andi. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
- | lbz TMP3, LFUNC:RB->ffid
- | xori TMP2, TMP1, FRAME_VARG
- | cmplwi cr1, NARGS8:RC, 0
- | bne >7
- |1:
- | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
- | li TMP2, 0
- | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
- | beq cr1, >3
- |2:
- | addi TMP3, TMP2, 8
- | evlddx TMP0, RA, TMP2
- | cmplw cr1, TMP3, NARGS8:RC
- | evstddx TMP0, BASE, TMP2
- | mr TMP2, TMP3
- | bne cr1, <2
- |3:
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
- | beq >5
- |4:
- | ins_callt
- |
- |5: // Tailcall to a fast function with a Lua frame below.
- | lwz INS, -4(TMP1)
- | decode_RA8 RA, INS
- | sub TMP1, BASE, RA
- | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
- | lwz TMP1, LFUNC:TMP1->pc
- | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
- | b <4
- |
- |7: // Tailcall from a vararg function.
- | andi. TMP0, TMP2, FRAME_TYPEP
- | bne <1 // Vararg frame below?
- | sub BASE, BASE, TMP2 // Relocate BASE down.
- | lwz TMP1, FRAME_PC(BASE)
- | andi. TMP0, TMP1, FRAME_TYPE
- | b <1
- break;
-
- case BC_ITERC:
- | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
- | subi RA, RA, 24 // evldd doesn't support neg. offsets.
- | mr TMP2, BASE
- | evlddx LFUNC:RB, BASE, RA
- | add BASE, BASE, RA
- | evldd TMP0, 8(BASE)
- | evldd TMP1, 16(BASE)
- | evstdd LFUNC:RB, 24(BASE) // Copy callable.
- | checkfunc LFUNC:RB
- | evstdd TMP0, 32(BASE) // Copy state.
- | li NARGS8:RC, 16 // Iterators get 2 arguments.
- | evstdd TMP1, 40(BASE) // Copy control var.
- | addi BASE, BASE, 32
- | checkfail ->vmeta_call
- | ins_call
- break;
-
- case BC_ITERN:
- | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
- |.if JIT
- | // NYI: add hotloop, record BC_ITERN.
- |.endif
- | add RA, BASE, RA
- | lwz TAB:RB, -12(RA)
- | lwz RC, -4(RA) // Get index from control var.
- | lwz TMP0, TAB:RB->asize
- | lwz TMP1, TAB:RB->array
- | addi PC, PC, 4
- |1: // Traverse array part.
- | cmplw RC, TMP0
- | slwi TMP3, RC, 3
- | bge >5 // Index points after array part?
- | evlddx TMP2, TMP1, TMP3
- | checknil TMP2
- | lwz INS, -4(PC)
- | checkok >4
- | efdcfsi TMP0, RC
- | addi RC, RC, 1
- | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
- | evstdd TMP2, 8(RA)
- | decode_RD4 TMP1, INS
- | stw RC, -4(RA) // Update control var.
- | add PC, TMP1, TMP3
- | evstdd TMP0, 0(RA)
- |3:
- | ins_next
- |
- |4: // Skip holes in array part.
- | addi RC, RC, 1
- | b <1
- |
- |5: // Traverse hash part.
- | lwz TMP1, TAB:RB->hmask
- | sub RC, RC, TMP0
- | lwz TMP2, TAB:RB->node
- |6:
- | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
- | slwi TMP3, RC, 5
- | bgt <3
- | slwi RB, RC, 3
- | sub TMP3, TMP3, RB
- | evlddx RB, TMP2, TMP3
- | add NODE:TMP3, TMP2, TMP3
- | checknil RB
- | lwz INS, -4(PC)
- | checkok >7
- | evldd TMP3, NODE:TMP3->key
- | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
- | evstdd RB, 8(RA)
- | add RC, RC, TMP0
- | decode_RD4 TMP1, INS
- | evstdd TMP3, 0(RA)
- | addi RC, RC, 1
- | add PC, TMP1, TMP2
- | stw RC, -4(RA) // Update control var.
- | b <3
- |
- |7: // Skip holes in hash part.
- | addi RC, RC, 1
- | b <6
- break;
-
- case BC_ISNEXT:
- | // RA = base*8, RD = target (points to ITERN)
- | add RA, BASE, RA
- | li TMP2, -24
- | evlddx CFUNC:TMP1, RA, TMP2
- | lwz TMP2, -16(RA)
- | lwz TMP3, -8(RA)
- | evmergehi TMP0, CFUNC:TMP1, CFUNC:TMP1
- | cmpwi cr0, TMP2, LJ_TTAB
- | cmpwi cr1, TMP0, LJ_TFUNC
- | cmpwi cr6, TMP3, LJ_TNIL
- | bne cr1, >5
- | lbz TMP1, CFUNC:TMP1->ffid
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
- | cmpwi cr7, TMP1, FF_next_N
- | srwi TMP0, RD, 1
- | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
- | add TMP3, PC, TMP0
- | bne cr0, >5
- | lus TMP1, 0xfffe
- | ori TMP1, TMP1, 0x7fff
- | stw ZERO, -4(RA) // Initialize control var.
- | stw TMP1, -8(RA)
- | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
- |1:
- | ins_next
- |5: // Despecialize bytecode if any of the checks fail.
- | li TMP0, BC_JMP
- | li TMP1, BC_ITERC
- | stb TMP0, -1(PC)
- | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
- | stb TMP1, 3(PC)
- | b <1
- break;
-
- case BC_VARG:
- | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
- | lwz TMP0, FRAME_PC(BASE)
- | add RC, BASE, RC
- | add RA, BASE, RA
- | addi RC, RC, FRAME_VARG
- | add TMP2, RA, RB
- | subi TMP3, BASE, 8 // TMP3 = vtop
- | sub RC, RC, TMP0 // RC = vbase
- | // Note: RC may now be even _above_ BASE if nargs was < numparams.
- | cmplwi cr1, RB, 0
- | sub. TMP1, TMP3, RC
- | beq cr1, >5 // Copy all varargs?
- | subi TMP2, TMP2, 16
- | ble >2 // No vararg slots?
- |1: // Copy vararg slots to destination slots.
- | evldd TMP0, 0(RC)
- | addi RC, RC, 8
- | evstdd TMP0, 0(RA)
- | cmplw RA, TMP2
- | cmplw cr1, RC, TMP3
- | bge >3 // All destination slots filled?
- | addi RA, RA, 8
- | blt cr1, <1 // More vararg slots?
- |2: // Fill up remainder with nil.
- | evstdd TISNIL, 0(RA)
- | cmplw RA, TMP2
- | addi RA, RA, 8
- | blt <2
- |3:
- | ins_next
- |
- |5: // Copy all varargs.
- | lwz TMP0, L->maxstack
- | li MULTRES, 8 // MULTRES = (0+1)*8
- | ble <3 // No vararg slots?
- | add TMP2, RA, TMP1
- | cmplw TMP2, TMP0
- | addi MULTRES, TMP1, 8
- | bgt >7
- |6:
- | evldd TMP0, 0(RC)
- | addi RC, RC, 8
- | evstdd TMP0, 0(RA)
- | cmplw RC, TMP3
- | addi RA, RA, 8
- | blt <6 // More vararg slots?
- | b <3
- |
- |7: // Grow stack for varargs.
- | mr CARG1, L
- | stw RA, L->top
- | sub SAVE0, RC, BASE // Need delta, because BASE may change.
- | stw BASE, L->base
- | sub RA, RA, BASE
- | stw PC, SAVE_PC
- | srwi CARG2, TMP1, 3
- | bl extern lj_state_growstack // (lua_State *L, int n)
- | lwz BASE, L->base
- | add RA, BASE, RA
- | add RC, BASE, SAVE0
- | subi TMP3, BASE, 8
- | b <6
- break;
-
- /* -- Returns ----------------------------------------------------------- */
-
- case BC_RETM:
- | // RA = results*8, RD = extra_nresults*8
- | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
- | // Fall through. Assumes BC_RET follows.
- break;
-
- case BC_RET:
- | // RA = results*8, RD = (nresults+1)*8
- | lwz PC, FRAME_PC(BASE)
- | add RA, BASE, RA
- | mr MULTRES, RD
- |1:
- | andi. TMP0, PC, FRAME_TYPE
- | xori TMP1, PC, FRAME_VARG
- | bne ->BC_RETV_Z
- |
- |->BC_RET_Z:
- | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
- | lwz INS, -4(PC)
- | cmpwi RD, 8
- | subi TMP2, BASE, 8
- | subi RC, RD, 8
- | decode_RB8 RB, INS
- | beq >3
- | li TMP1, 0
- |2:
- | addi TMP3, TMP1, 8
- | evlddx TMP0, RA, TMP1
- | cmpw TMP3, RC
- | evstddx TMP0, TMP2, TMP1
- | beq >3
- | addi TMP1, TMP3, 8
- | evlddx TMP0, RA, TMP3
- | cmpw TMP1, RC
- | evstddx TMP0, TMP2, TMP3
- | bne <2
- |3:
- |5:
- | cmplw RB, RD
- | decode_RA8 RA, INS
- | bgt >6
- | sub BASE, TMP2, RA
- | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
- | ins_next1
- | lwz TMP1, LFUNC:TMP1->pc
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | ins_next2
- |
- |6: // Fill up results with nil.
- | subi TMP1, RD, 8
- | addi RD, RD, 8
- | evstddx TISNIL, TMP2, TMP1
- | b <5
- |
- |->BC_RETV_Z: // Non-standard return case.
- | andi. TMP2, TMP1, FRAME_TYPEP
- | bne ->vm_return
- | // Return from vararg function: relocate BASE down.
- | sub BASE, BASE, TMP1
- | lwz PC, FRAME_PC(BASE)
- | b <1
- break;
-
- case BC_RET0: case BC_RET1:
- | // RA = results*8, RD = (nresults+1)*8
- | lwz PC, FRAME_PC(BASE)
- | add RA, BASE, RA
- | mr MULTRES, RD
- | andi. TMP0, PC, FRAME_TYPE
- | xori TMP1, PC, FRAME_VARG
- | bne ->BC_RETV_Z
- |
- | lwz INS, -4(PC)
- | subi TMP2, BASE, 8
- | decode_RB8 RB, INS
- if (op == BC_RET1) {
- | evldd TMP0, 0(RA)
- | evstdd TMP0, 0(TMP2)
- }
- |5:
- | cmplw RB, RD
- | decode_RA8 RA, INS
- | bgt >6
- | sub BASE, TMP2, RA
- | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
- | ins_next1
- | lwz TMP1, LFUNC:TMP1->pc
- | lwz KBASE, PC2PROTO(k)(TMP1)
- | ins_next2
- |
- |6: // Fill up results with nil.
- | subi TMP1, RD, 8
- | addi RD, RD, 8
- | evstddx TISNIL, TMP2, TMP1
- | b <5
- break;
-
- /* -- Loops and branches ------------------------------------------------ */
-
- case BC_FORL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IFORL follows.
- break;
-
- case BC_JFORI:
- case BC_JFORL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_FORI:
- case BC_IFORL:
- | // RA = base*8, RD = target (after end of loop or start of loop)
- vk = (op == BC_IFORL || op == BC_JFORL);
- | add RA, BASE, RA
- | evldd TMP1, FORL_IDX*8(RA)
- | evldd TMP3, FORL_STEP*8(RA)
- | evldd TMP2, FORL_STOP*8(RA)
- if (!vk) {
- | evcmpgtu cr0, TMP1, TISNUM
- | evcmpgtu cr7, TMP3, TISNUM
- | evcmpgtu cr1, TMP2, TISNUM
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
- | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
- | blt ->vmeta_for
- }
- if (vk) {
- | efdadd TMP1, TMP1, TMP3
- | evstdd TMP1, FORL_IDX*8(RA)
- }
- | evcmpgts TMP3, TISNIL
- | evstdd TMP1, FORL_EXT*8(RA)
- | bge >2
- | efdcmpgt TMP1, TMP2
- |1:
- if (op != BC_JFORL) {
- | srwi RD, RD, 1
- | add RD, PC, RD
- if (op == BC_JFORI) {
- | addis PC, RD, -(BCBIAS_J*4 >> 16)
- } else {
- | addis RD, RD, -(BCBIAS_J*4 >> 16)
- }
- }
- if (op == BC_FORI) {
- | iselgt PC, RD, PC
- } else if (op == BC_IFORL) {
- | iselgt PC, PC, RD
- } else {
- | ble =>BC_JLOOP
- }
- | ins_next
- |2:
- | efdcmpgt TMP2, TMP1
- | b <1
- break;
-
- case BC_ITERL:
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_IITERL follows.
- break;
-
- case BC_JITERL:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IITERL:
- | // RA = base*8, RD = target
- | evlddx TMP1, BASE, RA
- | subi RA, RA, 8
- | checknil TMP1
- | checkok >1 // Stop if iterator returned nil.
- if (op == BC_JITERL) {
- | NYI
- } else {
- | branch_RD // Otherwise save control var + branch.
- | evstddx TMP1, BASE, RA
- }
- |1:
- | ins_next
- break;
-
- case BC_LOOP:
- | // RA = base*8, RD = target (loop extent)
- | // Note: RA/RD is only used by trace recorder to determine scope/extent
- | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
- |.if JIT
- | hotloop
- |.endif
- | // Fall through. Assumes BC_ILOOP follows.
- break;
-
- case BC_ILOOP:
- | // RA = base*8, RD = target (loop extent)
- | ins_next
- break;
-
- case BC_JLOOP:
- |.if JIT
- | NYI
- |.endif
- break;
-
- case BC_JMP:
- | // RA = base*8 (only used by trace recorder), RD = target
- | branch_RD
- | ins_next
- break;
-
- /* -- Function headers -------------------------------------------------- */
-
- case BC_FUNCF:
- |.if JIT
- | hotcall
- |.endif
- case BC_FUNCV: /* NYI: compiled vararg functions. */
- | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
- break;
-
- case BC_JFUNCF:
-#if !LJ_HASJIT
- break;
-#endif
- case BC_IFUNCF:
- | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
- | lwz TMP2, L->maxstack
- | lbz TMP1, -4+PC2PROTO(numparams)(PC)
- | lwz KBASE, -4+PC2PROTO(k)(PC)
- | cmplw RA, TMP2
- | slwi TMP1, TMP1, 3
- | bgt ->vm_growstack_l
- | ins_next1
- |2:
- | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
- | ble >3
- if (op == BC_JFUNCF) {
- | NYI
- } else {
- | ins_next2
- }
- |
- |3: // Clear missing parameters.
- | evstddx TISNIL, BASE, NARGS8:RC
- | addi NARGS8:RC, NARGS8:RC, 8
- | b <2
- break;
-
- case BC_JFUNCV:
-#if !LJ_HASJIT
- break;
-#endif
- | NYI // NYI: compiled vararg functions
- break; /* NYI: compiled vararg functions. */
-
- case BC_IFUNCV:
- | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
- | lwz TMP2, L->maxstack
- | add TMP1, BASE, RC
- | add TMP0, RA, RC
- | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
- | addi TMP3, RC, 8+FRAME_VARG
- | lwz KBASE, -4+PC2PROTO(k)(PC)
- | cmplw TMP0, TMP2
- | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
- | bge ->vm_growstack_l
- | lbz TMP2, -4+PC2PROTO(numparams)(PC)
- | mr RA, BASE
- | mr RC, TMP1
- | ins_next1
- | cmpwi TMP2, 0
- | addi BASE, TMP1, 8
- | beq >3
- |1:
- | cmplw RA, RC // Less args than parameters?
- | evldd TMP0, 0(RA)
- | bge >4
- | evstdd TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
- | addi RA, RA, 8
- |2:
- | addic. TMP2, TMP2, -1
- | evstdd TMP0, 8(TMP1)
- | addi TMP1, TMP1, 8
- | bne <1
- |3:
- | ins_next2
- |
- |4: // Clear missing parameters.
- | evmr TMP0, TISNIL
- | b <2
- break;
-
- case BC_FUNCC:
- case BC_FUNCCW:
- | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
- if (op == BC_FUNCC) {
- | lwz TMP3, CFUNC:RB->f
- } else {
- | lwz TMP3, DISPATCH_GL(wrapf)(DISPATCH)
- }
- | add TMP1, RA, NARGS8:RC
- | lwz TMP2, L->maxstack
- | add RC, BASE, NARGS8:RC
- | stw BASE, L->base
- | cmplw TMP1, TMP2
- | stw RC, L->top
- | li_vmstate C
- | mtctr TMP3
- if (op == BC_FUNCCW) {
- | lwz CARG2, CFUNC:RB->f
- }
- | mr CARG1, L
- | bgt ->vm_growstack_c // Need to grow stack.
- | st_vmstate
- | bctrl // (lua_State *L [, lua_CFunction f])
- | // Returns nresults.
- | lwz TMP1, L->top
- | slwi RD, CRET1, 3
- | lwz BASE, L->base
- | li_vmstate INTERP
- | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
- | sub RA, TMP1, RD // RA = L->top - nresults*8
- | st_vmstate
- | b ->vm_returnc
- break;
-
- /* ---------------------------------------------------------------------- */
-
- default:
- fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
- exit(2);
- break;
- }
-}
-
-static int build_backend(BuildCtx *ctx)
-{
- int op;
-
- dasm_growpc(Dst, BC__MAX);
-
- build_subroutines(ctx);
-
- |.code_op
- for (op = 0; op < BC__MAX; op++)
- build_ins(ctx, (BCOp)op, op);
-
- return BC__MAX;
-}
-
-/* Emit pseudo frame-info for all assembler functions. */
-static void emit_asm_debug(BuildCtx *ctx)
-{
- int i;
- switch (ctx->mode) {
- case BUILD_elfasm:
- fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
- fprintf(ctx->fp,
- ".Lframe0:\n"
- "\t.long .LECIE0-.LSCIE0\n"
- ".LSCIE0:\n"
- "\t.long 0xffffffff\n"
- "\t.byte 0x1\n"
- "\t.string \"\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 65\n"
- "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE0:\n\n");
- fprintf(ctx->fp,
- ".LSFDE0:\n"
- "\t.long .LEFDE0-.LASFDE0\n"
- ".LASFDE0:\n"
- "\t.long .Lframe0\n"
- "\t.long .Lbegin\n"
- "\t.long %d\n"
- "\t.byte 0xe\n\t.uleb128 %d\n"
- "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
- "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
- (int)ctx->codesz, CFRAME_SIZE);
- for (i = 14; i <= 31; i++)
- fprintf(ctx->fp,
- "\t.byte %d\n\t.uleb128 %d\n"
- "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
- 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE0:\n\n");
- fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
- fprintf(ctx->fp,
- ".Lframe1:\n"
- "\t.long .LECIE1-.LSCIE1\n"
- ".LSCIE1:\n"
- "\t.long 0\n"
- "\t.byte 0x1\n"
- "\t.string \"zPR\"\n"
- "\t.uleb128 0x1\n"
- "\t.sleb128 -4\n"
- "\t.byte 65\n"
- "\t.uleb128 6\n" /* augmentation length */
- "\t.byte 0x1b\n" /* pcrel|sdata4 */
- "\t.long lj_err_unwind_dwarf-.\n"
- "\t.byte 0x1b\n" /* pcrel|sdata4 */
- "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
- "\t.align 2\n"
- ".LECIE1:\n\n");
- fprintf(ctx->fp,
- ".LSFDE1:\n"
- "\t.long .LEFDE1-.LASFDE1\n"
- ".LASFDE1:\n"
- "\t.long .LASFDE1-.Lframe1\n"
- "\t.long .Lbegin-.\n"
- "\t.long %d\n"
- "\t.uleb128 0\n" /* augmentation length */
- "\t.byte 0xe\n\t.uleb128 %d\n"
- "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
- "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
- (int)ctx->codesz, CFRAME_SIZE);
- for (i = 14; i <= 31; i++)
- fprintf(ctx->fp,
- "\t.byte %d\n\t.uleb128 %d\n"
- "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
- 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
- fprintf(ctx->fp,
- "\t.align 2\n"
- ".LEFDE1:\n\n");
- break;
- default:
- break;
- }
-}
-
+|// Low-level VM code for PowerPC/e500 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch ppc
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r1 = sp, r2 and r13 = reserved and/or small data area ptr
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r14 // Base of current Lua stack frame.
+|.define KBASE, r15 // Constants of current Lua function.
+|.define PC, r16 // Next PC.
+|.define DISPATCH, r17 // Opcode dispatch table.
+|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
+|
+|// Constants for vectorized type-comparisons (hi+low GPR). C callee-save.
+|.define TISNUM, r22
+|.define TISSTR, r23
+|.define TISTAB, r24
+|.define TISFUNC, r25
+|.define TISNIL, r26
+|.define TOBIT, r27
+|.define ZERO, TOBIT // Zero in lo word.
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r20 // Callee-save.
+|.define RB, r10
+|.define RC, r11
+|.define RD, r12
+|.define INS, r7 // Overlaps CARG5.
+|
+|.define TMP0, r0
+|.define TMP1, r8
+|.define TMP2, r9
+|.define TMP3, r6 // Overlaps CARG4.
+|
+|// Saved temporaries.
+|.define SAVE0, r21
+|
+|// Calling conventions.
+|.define CARG1, r3
+|.define CARG2, r4
+|.define CARG3, r5
+|.define CARG4, r6 // Overlaps TMP3.
+|.define CARG5, r7 // Overlaps INS.
+|
+|.define CRET1, r3
+|.define CRET2, r4
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define SAVE_LR, 188(sp)
+|.define CFRAME_SPACE, 184 // Delta for sp.
+|// Back chain for sp: 184(sp) <-- sp entering interpreter
+|.define SAVE_r31, 176(sp) // 64 bit register saves.
+|.define SAVE_r30, 168(sp)
+|.define SAVE_r29, 160(sp)
+|.define SAVE_r28, 152(sp)
+|.define SAVE_r27, 144(sp)
+|.define SAVE_r26, 136(sp)
+|.define SAVE_r25, 128(sp)
+|.define SAVE_r24, 120(sp)
+|.define SAVE_r23, 112(sp)
+|.define SAVE_r22, 104(sp)
+|.define SAVE_r21, 96(sp)
+|.define SAVE_r20, 88(sp)
+|.define SAVE_r19, 80(sp)
+|.define SAVE_r18, 72(sp)
+|.define SAVE_r17, 64(sp)
+|.define SAVE_r16, 56(sp)
+|.define SAVE_r15, 48(sp)
+|.define SAVE_r14, 40(sp)
+|.define SAVE_CR, 36(sp)
+|.define UNUSED1, 32(sp)
+|.define SAVE_ERRF, 28(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 24(sp)
+|.define SAVE_CFRAME, 20(sp)
+|.define SAVE_L, 16(sp)
+|.define SAVE_PC, 12(sp)
+|.define SAVE_MULTRES, 8(sp)
+|// Next frame lr: 4(sp)
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.macro save_, reg; evstdd reg, SAVE_..reg; .endmacro
+|.macro rest_, reg; evldd reg, SAVE_..reg; .endmacro
+|
+|.macro saveregs
+| stwu sp, -CFRAME_SPACE(sp)
+| save_ r14; save_ r15; save_ r16; save_ r17; save_ r18; save_ r19
+| mflr r0; mfcr r12
+| save_ r20; save_ r21; save_ r22; save_ r23; save_ r24; save_ r25
+| stw r0, SAVE_LR; stw r12, SAVE_CR
+| save_ r26; save_ r27; save_ r28; save_ r29; save_ r30; save_ r31
+|.endmacro
+|
+|.macro restoreregs
+| lwz r0, SAVE_LR; lwz r12, SAVE_CR
+| rest_ r14; rest_ r15; rest_ r16; rest_ r17; rest_ r18; rest_ r19
+| mtlr r0; mtcrf 0x38, r12
+| rest_ r20; rest_ r21; rest_ r22; rest_ r23; rest_ r24; rest_ r25
+| rest_ r26; rest_ r27; rest_ r28; rest_ r29; rest_ r30; rest_ r31
+| addi sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|
+|//-----------------------------------------------------------------------
+|
+|// These basic macros should really be part of DynASM.
+|.macro srwi, rx, ry, n; rlwinm rx, ry, 32-n, n, 31; .endmacro
+|.macro slwi, rx, ry, n; rlwinm rx, ry, n, 0, 31-n; .endmacro
+|.macro rotlwi, rx, ry, n; rlwinm rx, ry, n, 0, 31; .endmacro
+|.macro rotlw, rx, ry, rn; rlwnm rx, ry, rn, 0, 31; .endmacro
+|.macro subi, rx, ry, i; addi rx, ry, -i; .endmacro
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; tw 4, sp, sp; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -4
+|
+|// Instruction decode.
+|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
+|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
+|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
+|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
+|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
+|
+|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
+|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP4 TMP1, INS
+| decode_RB8 RB, INS
+| decode_RD8 RD, INS
+| lwzx TMP0, DISPATCH, TMP1
+| decode_RA8 RA, INS
+| decode_RC8 RC, INS
+| mtctr TMP0
+| bctr
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lwz PC, LFUNC:RB->pc
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+| decode_OP4 TMP1, INS
+| decode_RA8 RA, INS
+| lwzx TMP0, DISPATCH, TMP1
+| add RA, RA, BASE
+| mtctr TMP0
+| bctr
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| stw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checknum, reg; evcmpltu reg, TISNUM; .endmacro
+|.macro checkstr, reg; evcmpeq reg, TISSTR; .endmacro
+|.macro checktab, reg; evcmpeq reg, TISTAB; .endmacro
+|.macro checkfunc, reg; evcmpeq reg, TISFUNC; .endmacro
+|.macro checknil, reg; evcmpeq reg, TISNIL; .endmacro
+|.macro checkok, label; blt label; .endmacro
+|.macro checkfail, label; bge label; .endmacro
+|.macro checkanyfail, label; bns label; .endmacro
+|.macro checkallok, label; bso label; .endmacro
+|
+|.macro branch_RD
+| srwi TMP0, RD, 1
+| add PC, PC, TMP0
+| addis PC, PC, -(BCBIAS_J*4 >> 16)
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotloop
+| NYI
+|.endmacro
+|
+|.macro hotcall
+| NYI
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| // Assumes LJ_GC_BLACK is 0x04.
+| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
+| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| stb mark, tab->marked
+| stw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi. TMP0, PC, FRAME_P
+ | evsplati TMP1, LJ_TTRUE
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | mr BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
+ |
+ |->vm_returnc:
+ | addi RD, RD, 8 // RD = (nresults+1)*8.
+ | andi. TMP0, PC, FRAME_TYPE
+ | cmpwi cr1, RD, 0
+ | li CRET1, LUA_YIELD
+ | beq cr1, ->vm_unwind_c_eh
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | cmpwi TMP0, FRAME_C
+ | rlwinm TMP2, PC, 0, 0, 28
+ | li_vmstate C
+ | sub TMP2, BASE, TMP2 // TMP2 = previous base.
+ | bne ->vm_returnp
+ |
+ | addic. TMP1, RD, -8
+ | stw TMP2, L->base
+ | lwz TMP2, SAVE_NRES
+ | subi BASE, BASE, 8
+ | st_vmstate
+ | slwi TMP2, TMP2, 3
+ | beq >2
+ |1:
+ | addic. TMP1, TMP1, -8
+ | evldd TMP0, 0(RA)
+ | addi RA, RA, 8
+ | evstdd TMP0, 0(BASE)
+ | addi BASE, BASE, 8
+ | bne <1
+ |
+ |2:
+ | cmpw TMP2, RD // More/less results wanted?
+ | bne >6
+ |3:
+ | stw BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lwz TMP0, SAVE_CFRAME // Restore previous C frame.
+ | li CRET1, 0 // Ok return status for vm_pcall.
+ | stw TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | blr
+ |
+ |6:
+ | ble >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | lwz TMP1, L->maxstack
+ | cmplw BASE, TMP1
+ | bge >8
+ | evstdd TISNIL, 0(BASE)
+ | addi RD, RD, 8
+ | addi BASE, BASE, 8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | sub TMP0, RD, TMP2
+ | cmpwi TMP2, 0 // LUA_MULTRET+1 case?
+ | sub TMP0, BASE, TMP0 // Subtract the difference.
+ | iseleq BASE, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | stw BASE, L->top // Save current top held in BASE (yes).
+ | mr SAVE0, RD
+ | mr CARG2, TMP2
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz TMP2, SAVE_NRES
+ | mr RD, SAVE0
+ | slwi TMP2, TMP2, 3
+ | lwz BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mr sp, CARG1
+ | mr CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | lwz GL:TMP1, L->glref
+ | stw TMP0, GL:TMP1->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | rlwinm sp, CARG1, 0, 0, 29
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
+ | evsplati TISFUNC, LJ_TFUNC
+ | lus TOBIT, 0x4338
+ | evsplati TISTAB, LJ_TTAB
+ | li TMP0, 0
+ | lwz BASE, L->base
+ | evmergelo TOBIT, TOBIT, TMP0
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | evsplati TISSTR, LJ_TSTR
+ | li TMP1, LJ_TFALSE
+ | evsplati TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | la RA, -8(BASE) // Results start at BASE-8.
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw TMP1, 0(RA) // Prepend false to error message.
+ | li RD, 16 // 2 results: false + error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | li CARG2, LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | stw BASE, L->base
+ | addi PC, PC, 4 // Must point after first instruction.
+ | stw RC, L->top
+ | srwi CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | lwz RC, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mr L, CARG1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mr BASE, CARG2
+ | lbz TMP1, L->status
+ | stw L, SAVE_L
+ | li PC, FRAME_CP
+ | addi TMP0, sp, CFRAME_RESUME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw CARG3, SAVE_NRES
+ | cmplwi TMP1, 0
+ | stw CARG3, SAVE_ERRF
+ | stw TMP0, L->cframe
+ | stw CARG3, SAVE_CFRAME
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | mr RA, BASE
+ | lwz BASE, L->base
+ | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
+ | lwz TMP1, L->top
+ | evsplati TISFUNC, LJ_TFUNC
+ | lus TOBIT, 0x4338
+ | evsplati TISTAB, LJ_TTAB
+ | lwz PC, FRAME_PC(BASE)
+ | li TMP2, 0
+ | evsplati TISSTR, LJ_TSTR
+ | sub RD, TMP1, BASE
+ | evmergelo TOBIT, TOBIT, TMP2
+ | stb CARG3, L->status
+ | andi. TMP0, PC, FRAME_TYPE
+ | li_vmstate INTERP
+ | addi RD, RD, 8
+ | evsplati TISNIL, LJ_TNIL
+ | mr MULTRES, RD
+ | st_vmstate
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | li PC, FRAME_CP
+ | stw CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lwz TMP1, L:CARG1->cframe
+ | stw CARG3, SAVE_NRES
+ | mr L, CARG1
+ | stw CARG1, SAVE_L
+ | mr BASE, CARG2
+ | stw sp, L->cframe // Add our C frame to cframe chain.
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | stw TMP1, SAVE_CFRAME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | lwz TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | evsplati TISNUM, LJ_TISNUM+1 // Setup type comparison constants.
+ | lwz TMP1, L->top
+ | evsplati TISFUNC, LJ_TFUNC
+ | add PC, PC, BASE
+ | evsplati TISTAB, LJ_TTAB
+ | lus TOBIT, 0x4338
+ | li TMP0, 0
+ | sub PC, PC, TMP2 // PC = frame delta + frame type
+ | evsplati TISSTR, LJ_TSTR
+ | sub NARGS8:RC, TMP1, BASE
+ | evmergelo TOBIT, TOBIT, TMP0
+ | li_vmstate INTERP
+ | evsplati TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | li TMP0, -8
+ | evlddx LFUNC:RB, BASE, TMP0
+ | checkfunc LFUNC:RB
+ | checkfail ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mr L, CARG1
+ | lwz TMP0, L:CARG1->stack
+ | stw CARG1, SAVE_L
+ | lwz TMP1, L->top
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lwz TMP1, L->cframe
+ | stw sp, L->cframe // Add our C frame to cframe chain.
+ | li TMP2, 0
+ | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | stw TMP2, SAVE_ERRF // No error function.
+ | stw TMP1, SAVE_CFRAME
+ | mtctr CARG4
+ | bctrl // (lua_State *L, lua_CFunction func, void *ud)
+ | mr. BASE, CRET1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li PC, FRAME_CP
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lwz TMP0, -12(BASE) // Continuation.
+ | mr RB, BASE
+ | mr BASE, TMP2 // Restore caller BASE.
+ | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
+ | cmplwi TMP0, 0
+ | lwz PC, -16(RB) // Restore PC from [cont|PC].
+ | beq >1
+ | subi TMP2, RD, 8
+ | lwz TMP1, LFUNC:TMP1->pc
+ | evstddx TISNIL, RA, TMP2 // Ensure one valid arg.
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // BASE = base, RA = resultptr, RB = meta base
+ | mtctr TMP0
+ | bctr // Jump to continuation.
+ |
+ |1: // Tail call from C function.
+ | subi TMP1, RB, 16
+ | sub RC, TMP1, BASE
+ | b ->vm_call_tail
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lwz INS, -4(PC)
+ | subi CARG2, RB, 16
+ | decode_RB8 SAVE0, INS
+ | evldd TMP0, 0(RA)
+ | add TMP1, BASE, SAVE0
+ | stw BASE, L->base
+ | cmplw TMP1, CARG2
+ | sub CARG3, CARG2, TMP1
+ | decode_RA8 RA, INS
+ | evstdd TMP0, 0(CARG2)
+ | bne ->BC_CAT_Z
+ | evstddx TMP0, BASE, RA
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | decode_RB8 RB, INS
+ | evstdd STR:RC, 0(CARG3)
+ | add CARG2, BASE, RB
+ | b >1
+ |
+ |->vmeta_tgets:
+ | evmergelo TAB:RB, TISTAB, TAB:RB
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | evstdd TAB:RB, 0(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | evstdd STR:RC, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | efdcfsi TMP0, TMP0
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ | evstdd TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | beq >3
+ | evldd TMP0, 0(CRET1)
+ | evstddx TMP0, BASE, RA
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | subfic TMP1, BASE, FRAME_CONT
+ | lwz BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 16 // 2 args for func(t, k).
+ | b ->vm_call_dispatch_f
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | decode_RB8 RB, INS
+ | evstdd STR:RC, 0(CARG3)
+ | add CARG2, BASE, RB
+ | b >1
+ |
+ |->vmeta_tsets:
+ | evmergelo TAB:RB, TISTAB, TAB:RB
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | evstdd TAB:RB, 0(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | evstdd STR:RC, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | efdcfsi TMP0, TMP0
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ | evstdd TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | evlddx TMP0, BASE, RA
+ | beq >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | evstdd TMP0, 0(CRET1)
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | subfic TMP1, BASE, FRAME_CONT
+ | lwz BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ | evstdd TMP0, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mr CARG1, L
+ | subi PC, PC, 4
+ | add CARG2, BASE, RA
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RD
+ | stw BASE, L->base
+ | decode_OP1 CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmplwi CRET1, 1
+ | bgt ->vmeta_binop
+ |4:
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | decode_RD4 TMP2, INS
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | add TMP2, TMP2, TMP3
+ | isellt PC, PC, TMP2
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lwz INS, -4(PC)
+ | evldd TMP0, 0(RA)
+ | decode_RA8 TMP1, INS
+ | evstddx TMP0, BASE, TMP1
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | li TMP1, LJ_TTRUE
+ | cmplw TMP1, TMP0 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | li TMP1, LJ_TFALSE
+ | cmplw TMP0, TMP1 // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | subi PC, PC, 4
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | add CARG3, KBASE, RC
+ | add CARG4, BASE, RB
+ | b >1
+ |
+ |->vmeta_unm:
+ | add CARG3, BASE, RD
+ | mr CARG4, CARG3
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |1:
+ | add CARG2, BASE, RA
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | stw PC, -16(CRET1) // [cont|PC]
+ | mr TMP2, BASE
+ | addi PC, TMP1, FRAME_CONT
+ | mr BASE, CRET1
+ | li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+#if LJ_52
+ | mr SAVE0, CARG1
+#endif
+ | add CARG2, BASE, RD
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | cmplwi CRET1, 0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | mr CARG1, SAVE0
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | mr CARG1, L
+ | stw TMP2, L->base // This is the callers base!
+ | subi CARG2, BASE, 8
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mr CARG1, L
+ | stw BASE, L->base
+ | subi CARG2, RA, 8
+ | stw PC, SAVE_PC
+ | add CARG3, RA, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz TMP1, FRAME_PC(BASE)
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mr CARG1, L
+ | stw BASE, L->base
+ | mr CARG2, RA
+ | stw PC, SAVE_PC
+ | mr SAVE0, INS
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ |.if JIT
+ | decode_OP1 TMP0, SAVE0
+ |.endif
+ | decode_RA8 RA, SAVE0
+ |.if JIT
+ | cmpwi TMP0, BC_JFORI
+ |.endif
+ | decode_RD8 RD, SAVE0
+ |.if JIT
+ | beq =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG1, 0(BASE)
+ | evldd CARG2, 8(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_1 name
+ | checknum CARG1
+ | checkfail ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | evmergehi TMP0, CARG1, CARG2
+ | checknum TMP0
+ | checkanyfail ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
+ |.macro ffgccheck
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | cmplw TMP0, TMP1
+ | bgel ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc assert
+ | cmplwi NARGS8:RC, 8
+ | evldd TMP0, 0(BASE)
+ | blt ->fff_fallback
+ | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
+ | la RA, -8(BASE)
+ | evcmpltu cr1, TMP0, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | bge cr1, ->fff_fallback
+ | evstdd TMP0, 0(RA)
+ | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | beq ->fff_res // Done if exactly 1 argument.
+ | li TMP1, 8
+ | subi RC, RC, 8
+ |1:
+ | cmplw TMP1, RC
+ | evlddx TMP0, BASE, TMP1
+ | evstddx TMP0, RA, TMP1
+ | addi TMP1, TMP1, 8
+ | bne <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | li TMP2, ~LJ_TNUMX
+ | cmplw CARG1, TISNUM
+ | not TMP1, CARG1
+ | isellt TMP1, TMP2, TMP1
+ | slwi TMP1, TMP1, 3
+ | la TMP2, CFUNC:RB->upvalue
+ | evlddx STR:CRET1, TMP2, TMP1
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktab CARG1
+ | evmergehi TMP1, CARG1, CARG1
+ | checkfail >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:RB, TAB:CARG1->metatable
+ |2:
+ | evmr CRET1, TISNIL
+ | cmplwi TAB:RB, 0
+ | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beq ->fff_restv
+ | lwz TMP0, TAB:RB->hmask
+ | evmergelo CRET1, TISTAB, TAB:RB // Use metatable as default result.
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | evldd TMP0, NODE:TMP2->key
+ | evldd TMP1, NODE:TMP2->val
+ | evcmpeq TMP0, STR:RC
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | checkallok >5
+ | cmplwi NODE:TMP2, 0
+ | beq ->fff_restv // Not found, keep default result.
+ | b <3
+ |5:
+ | checknil TMP1
+ | checkok ->fff_restv // Ditto for nil value.
+ | evmr CRET1, TMP1 // Return value of mt.__metatable.
+ | b ->fff_restv
+ |
+ |6:
+ | cmpwi TMP1, LJ_TUDATA
+ | not TMP1, TMP1
+ | beq <1
+ | checknum CARG1
+ | slwi TMP1, TMP1, 2
+ | li TMP2, 4*~LJ_TNUMX
+ | isellt TMP1, TMP2, TMP1
+ | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
+ | lwzx TAB:RB, TMP2, TMP1
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | evmergehi TMP0, TAB:CARG1, TAB:CARG2
+ | checktab TMP0
+ | checkanyfail ->fff_fallback
+ | lwz TAB:TMP1, TAB:CARG1->metatable
+ | cmplwi TAB:TMP1, 0
+ | lbz TMP3, TAB:CARG1->marked
+ | bne ->fff_fallback
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stw TAB:CARG2, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, TMP3, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checktab CARG2
+ | la CARG3, 8(BASE)
+ | checkfail ->fff_fallback
+ | mr CARG1, L
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | evldd CRET1, 0(CRET1)
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly one argument.
+ | checknum CARG1
+ | checkok ->fff_restv
+ | b ->fff_fallback
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checkstr CARG1
+ | // A __tostring method in the string base metatable is ignored.
+ | checkok ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | checknum CARG1
+ | cmplwi cr1, TMP0, 0
+ | stw BASE, L->base // Add frame since C call can throw.
+ | crand 4*cr0+eq, 4*cr0+lt, 4*cr1+eq
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | bne ->fff_fallback
+ | ffgccheck
+ | mr CARG1, L
+ | mr CARG2, BASE
+ | bl extern lj_str_fromnum // (lua_State *L, lua_Number *np)
+ | // Returns GCstr *.
+ | evmergelo STR:CRET1, TISSTR, STR:CRET1
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | evstddx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
+ | checktab TAB:CARG2
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+ | stw BASE, L->base // Add frame since C call can throw.
+ | mr CARG1, L
+ | stw BASE, L->top // Dummy frame length is ok.
+ | la CARG3, 8(BASE)
+ | stw PC, SAVE_PC
+ | bl extern lj_tab_next // (lua_State *L, GCtab *t, TValue *key)
+ | // Returns 0 at end of traversal.
+ | cmplwi CRET1, 0
+ | evmr CRET1, TISNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | evldd TMP0, 8(BASE) // Copy key and value to results.
+ | la RA, -8(BASE)
+ | evldd TMP1, 16(BASE)
+ | evstdd TMP0, 0(RA)
+ | li RD, (2+1)*8
+ | evstdd TMP1, 8(RA)
+ | b ->fff_res
+ |
+ |.ffunc_1 pairs
+ | checktab TAB:CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ | evstdd TISNIL, 8(BASE)
+ | li RD, (3+1)*8
+ | evstdd CFUNC:TMP0, 0(RA)
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktab TAB:CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+ | checknum CARG2
+ | lus TMP3, 0x3ff0
+ | checkfail ->fff_fallback
+ | efdctsi TMP2, CARG2
+ | lwz TMP0, TAB:CARG1->asize
+ | evmergelo TMP3, TMP3, ZERO
+ | lwz TMP1, TAB:CARG1->array
+ | efdadd CARG2, CARG2, TMP3
+ | addi TMP2, TMP2, 1
+ | la RA, -8(BASE)
+ | cmplw TMP0, TMP2
+ | slwi TMP3, TMP2, 3
+ | evstdd CARG2, 0(RA)
+ | ble >2 // Not in array part?
+ | evlddx TMP1, TMP1, TMP3
+ |1:
+ | checknil TMP1
+ | li RD, (0+1)*8
+ | checkok ->fff_res // End of iteration, return 0 results.
+ | li RD, (2+1)*8
+ | evstdd TMP1, 8(RA)
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lwz TMP0, TAB:CARG1->hmask
+ | cmplwi TMP0, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | mr CARG2, TMP2
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cmplwi CRET1, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | evldd TMP1, 0(CRET1)
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab TAB:CARG1
+ | lwz PC, FRAME_PC(BASE)
+ | checkfail ->fff_fallback
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ | evldd CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | la RA, -8(BASE)
+#endif
+ | evsplati TMP1, 0
+ | li RD, (3+1)*8
+ | evstdd TMP1, 8(BASE)
+ | evstdd CFUNC:TMP0, 0(RA)
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | cmplwi NARGS8:RC, 8
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | blt ->fff_fallback
+ | mr TMP2, BASE
+ | la BASE, 8(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | mr TMP2, BASE
+ | checkfunc CARG2 // Traceback must be a function.
+ | checkfail ->fff_fallback
+ | la BASE, 16(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | evstdd CARG2, 0(TMP2) // Swap function and traceback.
+ | subi NARGS8:RC, NARGS8:RC, 16
+ | evstdd CARG1, 8(TMP2)
+ | addi PC, TMP3, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | evmergehi TMP0, L:CARG1, L:CARG1
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ |.if resume
+ | cmpwi TMP0, LJ_TTHREAD
+ | bne ->fff_fallback
+ |.endif
+ | lbz TMP0, L:CARG1->status
+ | lwz TMP1, L:CARG1->cframe
+ | lwz CARG2, L:CARG1->top
+ | cmplwi cr0, TMP0, LUA_YIELD
+ | lwz TMP2, L:CARG1->base
+ | cmplwi cr1, TMP1, 0
+ | lwz TMP0, L:CARG1->maxstack
+ | cmplw cr7, CARG2, TMP2
+ | lwz PC, FRAME_PC(BASE)
+ | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
+ | add TMP2, CARG2, NARGS8:RC
+ | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
+ | cmplw cr1, TMP2, TMP0
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
+ | stw PC, SAVE_PC
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
+ | stw BASE, L->base
+ | blt cr6, ->fff_fallback
+ |1:
+ |.if resume
+ | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | subi TMP2, TMP2, 8
+ |.endif
+ | stw TMP2, L:CARG1->top
+ | li TMP1, 0
+ | stw BASE, L->top
+ |2: // Move args to coroutine.
+ | cmpw TMP1, NARGS8:RC
+ | evlddx TMP0, BASE, TMP1
+ | beq >3
+ | evstddx TMP0, CARG2, TMP1
+ | addi TMP1, TMP1, 8
+ | b <2
+ |3:
+ | li CARG3, 0
+ | mr L:SAVE0, L:CARG1
+ | li CARG4, 0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | lwz TMP2, L:SAVE0->base
+ | cmplwi CRET1, LUA_YIELD
+ | lwz TMP3, L:SAVE0->top
+ | li_vmstate INTERP
+ | lwz BASE, L->base
+ | st_vmstate
+ | bgt >8
+ | sub RD, TMP3, TMP2
+ | lwz TMP0, L->maxstack
+ | cmplwi RD, 0
+ | add TMP1, BASE, RD
+ | beq >6 // No results?
+ | cmplw TMP1, TMP0
+ | li TMP1, 0
+ | bgt >9 // Need to grow stack?
+ |
+ | subi TMP3, RD, 8
+ | stw TMP2, L:SAVE0->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | cmplw TMP1, TMP3
+ | evlddx TMP0, TMP2, TMP1
+ | evstddx TMP0, BASE, TMP1
+ | addi TMP1, TMP1, 8
+ | bne <5
+ |6:
+ | andi. TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | la RA, -8(BASE)
+ | stw TMP1, -8(BASE) // Prepend true to results.
+ | addi RD, RD, 16
+ |.else
+ | mr RA, BASE
+ | addi RD, RD, 8
+ |.endif
+ |7:
+ | stw PC, SAVE_PC
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | andi. TMP0, PC, FRAME_TYPE
+ | la TMP3, -8(TMP3)
+ | li TMP1, LJ_TFALSE
+ | evldd TMP0, 0(TMP3)
+ | stw TMP3, L:SAVE0->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | stw TMP1, -8(BASE) // Prepend false to results.
+ | la RA, -8(BASE)
+ | evstdd TMP0, 0(BASE) // Copy error message.
+ | b <7
+ |.else
+ | mr CARG1, L
+ | mr CARG2, L:SAVE0
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mr CARG1, L
+ | srwi CARG2, RD, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | li CRET1, 0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lwz TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | stw BASE, L->base
+ | andi. TMP0, TMP0, CFRAME_RESUME
+ | stw TMP1, L->top
+ | li CRET1, LUA_YIELD
+ | beq ->fff_fallback
+ | stw ZERO, L->cframe
+ | stb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_n math_abs
+ | efdabs CRET1, CARG1
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CRET1 = TValue result.
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | evstdd CRET1, 0(RA)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | bne ->vm_return
+ | lwz INS, -4(PC)
+ | decode_RB8 RB, INS
+ |5:
+ | cmplw RB, RD // More results expected?
+ | decode_RA8 TMP0, INS
+ | bgt >6
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | evstddx TISNIL, RA, TMP1
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc math_ .. func
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | bl extern func@plt
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc math_ .. func
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG2, 0(BASE)
+ | evldd CARG4, 8(BASE)
+ | blt ->fff_fallback
+ | evmergehi CARG1, CARG4, CARG2
+ | checknum CARG1
+ | evmergehi CARG3, CARG4, CARG4
+ | checkanyfail ->fff_fallback
+ | bl extern func@plt
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc math_ .. func
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | lwz PC, FRAME_PC(BASE)
+ | bl ->vm_..func.._hilo;
+ | la RA, -8(BASE)
+ | evstdd CRET2, 0(RA)
+ | b ->fff_res1
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ | math_extern sqrt
+ |
+ |.ffunc math_log
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | bl extern log@plt
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |->ff_math_deg:
+ |.ffunc_n math_rad
+ | evldd CARG2, CFUNC:RB->upvalue[0]
+ | efdmul CRET1, CARG1, CARG2
+ | b ->fff_restv
+ |
+ |.ffunc math_ldexp
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG2, 0(BASE)
+ | evldd CARG4, 8(BASE)
+ | blt ->fff_fallback
+ | evmergehi CARG1, CARG4, CARG2
+ | checknum CARG1
+ | checkanyfail ->fff_fallback
+ | efdctsi CARG3, CARG4
+ | bl extern ldexp@plt
+ | evmergelo CRET1, CRET1, CRET2
+ | b ->fff_restv
+ |
+ |.ffunc math_frexp
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | lwz PC, FRAME_PC(BASE)
+ | bl extern frexp@plt
+ | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | evmergelo CRET1, CRET1, CRET2
+ | efdcfsi CRET2, TMP1
+ | la RA, -8(BASE)
+ | evstdd CRET1, 0(RA)
+ | li RD, (2+1)*8
+ | evstdd CRET2, 8(RA)
+ | b ->fff_res
+ |
+ |.ffunc math_modf
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG2, 0(BASE)
+ | blt ->fff_fallback
+ | checknum CARG2
+ | evmergehi CARG1, CARG2, CARG2
+ | checkfail ->fff_fallback
+ | la CARG3, -8(BASE)
+ | lwz PC, FRAME_PC(BASE)
+ | bl extern modf@plt
+ | evmergelo CRET1, CRET1, CRET2
+ | la RA, -8(BASE)
+ | evstdd CRET1, 0(BASE)
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, cmpop
+ | .ffunc_1 name
+ | checknum CARG1
+ | li TMP1, 8
+ | checkfail ->fff_fallback
+ |1:
+ | evlddx CARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_restv // Ok, since CRET1 = CARG1.
+ | checkfail ->fff_fallback
+ | cmpop CARG2, CARG1
+ | addi TMP1, TMP1, 8
+ | crmove 4*cr0+lt, 4*cr0+gt
+ | evsel CARG1, CARG2, CARG1
+ | b <1
+ |.endmacro
+ |
+ | math_minmax math_min, efdtstlt
+ | math_minmax math_max, efdtstgt
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc_1 string_len
+ | checkstr STR:CARG1
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmplwi NARGS8:RC, 8
+ | evldd STR:CARG1, 0(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checkstr STR:CARG1
+ | la RA, -8(BASE)
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | li RD, (0+1)*8
+ | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | li TMP2, (1+1)*8
+ | cmplwi TMP0, 0
+ | lwz PC, FRAME_PC(BASE)
+ | efdcfsi CRET1, TMP1
+ | iseleq RD, RD, TMP2
+ | evstdd CRET1, 0(RA)
+ | b ->fff_res
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG1
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | checkfail ->fff_fallback
+ | efdctsiz TMP0, CARG1
+ | li CARG3, 1
+ | cmplwi TMP0, 255
+ | stb TMP0, 0(CARG2)
+ | bgt ->fff_fallback
+ |->fff_newstr:
+ | mr CARG1, L
+ | stw BASE, L->base
+ | stw PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ | // Returns GCstr *.
+ | lwz BASE, L->base
+ | evmergelo STR:CRET1, TISSTR, STR:CRET1
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG3, 16(BASE)
+ | evldd STR:CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | evldd CARG2, 8(BASE)
+ | li TMP2, -1
+ | beq >1
+ | checknum CARG3
+ | checkfail ->fff_fallback
+ | efdctsiz TMP2, CARG3
+ |1:
+ | checknum CARG2
+ | checkfail ->fff_fallback
+ | checkstr STR:CARG1
+ | efdctsiz TMP1, CARG2
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | cmplw TMP0, TMP2 // len < end? (unsigned compare)
+ | add TMP3, TMP2, TMP0
+ | blt >5
+ |2:
+ | cmpwi TMP1, 0 // start <= 0?
+ | add TMP3, TMP1, TMP0
+ | ble >7
+ |3:
+ | sub. CARG3, TMP2, TMP1
+ | addi CARG2, STR:CARG1, #STR-1
+ | addi CARG3, CARG3, 1
+ | add CARG2, CARG2, TMP1
+ | isellt CARG3, r0, CARG3
+ | b ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | cmpw TMP0, TMP2
+ | addi TMP3, TMP3, 1
+ | iselgt TMP2, TMP3, TMP0 // end = end > len ? len : end+len+1
+ | b <2
+ |
+ |7: // Negative start or underflow.
+ | cmpwi cr1, TMP3, 0
+ | iseleq TMP1, r0, TMP3
+ | isel TMP1, r0, TMP1, 4*cr1+lt
+ | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
+ | b <3
+ |
+ |.ffunc string_rep // Only handle the 1-char case inline.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | evldd CARG1, 0(BASE)
+ | evldd CARG2, 8(BASE)
+ | bne ->fff_fallback // Exactly 2 arguments.
+ | checknum CARG2
+ | checkfail ->fff_fallback
+ | checkstr STR:CARG1
+ | efdctsiz CARG3, CARG2
+ | checkfail ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ | cmpwi CARG3, 0
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | ble >2 // Count <= 0? (or non-int)
+ | cmplwi TMP0, 1
+ | subi TMP2, CARG3, 1
+ | blt >2 // Zero length string?
+ | cmplw cr1, TMP1, CARG3
+ | bne ->fff_fallback // Fallback for > 1-char strings.
+ | lbz TMP0, STR:CARG1[1]
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | blt cr1, ->fff_fallback
+ |1: // Fill buffer with char. Yes, this is suboptimal code (do you care?).
+ | cmplwi TMP2, 0
+ | stbx TMP0, CARG2, TMP2
+ | subi TMP2, TMP2, 1
+ | bne <1
+ | b ->fff_newstr
+ |2: // Return empty string.
+ | la STR:CRET1, DISPATCH_GL(strempty)(DISPATCH)
+ | evmergelo CRET1, TISSTR, STR:CRET1
+ | b ->fff_restv
+ |
+ |.ffunc string_reverse
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | checkstr STR:CARG1
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | checkfail ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | li TMP2, 0
+ | cmplw TMP1, CARG3
+ | subi TMP3, CARG3, 1
+ | blt ->fff_fallback
+ |1: // Reverse string copy.
+ | cmpwi TMP3, 0
+ | lbzx TMP1, CARG1, TMP2
+ | blt ->fff_newstr
+ | stbx TMP1, CARG2, TMP3
+ | subi TMP3, TMP3, 1
+ | addi TMP2, TMP2, 1
+ | b <1
+ |
+ |.macro ffstring_case, name, lo
+ | .ffunc name
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | evldd CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | checkstr STR:CARG1
+ | lwz TMP1, DISPATCH_GL(tmpbuf.sz)(DISPATCH)
+ | checkfail ->fff_fallback
+ | lwz CARG3, STR:CARG1->len
+ | la CARG1, #STR(STR:CARG1)
+ | lwz CARG2, DISPATCH_GL(tmpbuf.buf)(DISPATCH)
+ | cmplw TMP1, CARG3
+ | li TMP2, 0
+ | blt ->fff_fallback
+ |1: // ASCII case conversion.
+ | cmplw TMP2, CARG3
+ | lbzx TMP1, CARG1, TMP2
+ | bge ->fff_newstr
+ | subi TMP0, TMP1, lo
+ | xori TMP3, TMP1, 0x20
+ | cmplwi TMP0, 26
+ | isellt TMP1, TMP3, TMP1
+ | stbx TMP1, CARG2, TMP2
+ | addi TMP2, TMP2, 1
+ | b <1
+ |.endmacro
+ |
+ |ffstring_case string_lower, 65
+ |ffstring_case string_upper, 97
+ |
+ |//-- Table library ------------------------------------------------------
+ |
+ |.ffunc_1 table_getn
+ | checktab CARG1
+ | checkfail ->fff_fallback
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | efdcfsi CRET1, CRET1
+ | b ->fff_restv
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_n bit_..name
+ | efdadd CARG1, CARG1, TOBIT
+ |.endmacro
+ |
+ |.ffunc_bit tobit
+ |->fff_resbit:
+ | efdcfsi CRET1, CARG1
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | li TMP1, 8
+ |1:
+ | evlddx CARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_resbit
+ | checkfail ->fff_fallback
+ | efdadd CARG2, CARG2, TOBIT
+ | ins CARG1, CARG1, CARG2
+ | addi TMP1, TMP1, 8
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | rotlwi TMP0, CARG1, 8
+ | rlwimi TMP0, CARG1, 24, 0, 7
+ | rlwimi TMP0, CARG1, 24, 16, 23
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | not TMP0, CARG1
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc_nn bit_..name
+ | efdadd CARG2, CARG2, TOBIT
+ | efdadd CARG1, CARG1, TOBIT
+ |.if shmod == 1
+ | rlwinm CARG2, CARG2, 0, 27, 31
+ |.elif shmod == 2
+ | neg CARG2, CARG2
+ |.endif
+ | ins TMP0, CARG1, CARG2
+ | efdcfsi CRET1, TMP0
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, slw, 1
+ |.ffunc_bit_sh rshift, srw, 1
+ |.ffunc_bit_sh arshift, sraw, 1
+ |.ffunc_bit_sh rol, rotlw, 0
+ |.ffunc_bit_sh ror, rotlw, 2
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lwz TMP3, CFUNC:RB->f
+ | add TMP1, BASE, NARGS8:RC
+ | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addi TMP0, TMP1, 8*LUA_MINSTACK
+ | lwz TMP2, L->maxstack
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | cmplw TMP0, TMP2
+ | stw BASE, L->base
+ | stw TMP1, L->top
+ | mr CARG1, L
+ | bgt >5 // Need to grow stack.
+ | mtctr TMP3
+ | bctrl // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lwz BASE, L->base
+ | cmpwi CRET1, 0
+ | slwi RD, CRET1, 3
+ | la RA, -8(BASE)
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | lwz TMP0, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub NARGS8:RC, TMP0, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi. TMP0, PC, FRAME_TYPE
+ | rlwinm TMP1, PC, 0, 0, 28
+ | bne >3
+ | lwz INS, -4(PC)
+ | decode_RA8 TMP1, INS
+ | addi TMP1, TMP1, 8
+ |3:
+ | sub TMP2, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | li CARG2, LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mflr SAVE0
+ | stw BASE, L->base
+ | add TMP0, BASE, NARGS8:RC
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | stw TMP0, L->top
+ | mr CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | lwz BASE, L->base
+ | mtlr SAVE0
+ | lwz TMP0, L->top
+ | sub NARGS8:RC, TMP0, BASE
+ | lwz CFUNC:RB, FRAME_FUNC(BASE)
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | NYI
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OP4 TMP1, INS.
+ | lwzx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | bctr
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
+ | bne <5
+ |
+ | cmpwi cr1, TMP0, 0
+ | addic. TMP2, TMP2, -1
+ | beq cr1, <5
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | beq >1
+ | bge cr1, <5
+ |1:
+ | mr CARG1, L
+ | stw MULTRES, SAVE_MULTRES
+ | mr CARG2, PC
+ | stw BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | lwz BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lwz INS, -4(PC)
+ | decode_OP4 TMP1, INS
+ | decode_RB8 RB, INS
+ | addi TMP1, TMP1, GG_DISP2STATIC
+ | decode_RD8 RD, INS
+ | lwzx TMP0, DISPATCH, TMP1
+ | decode_RA8 RA, INS
+ | decode_RC8 RC, INS
+ | mtctr TMP0
+ | bctr
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addi PC, PC, 4
+ | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | NYI
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mr CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | add TMP0, BASE, RC
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | stw BASE, L->base
+ | sub RA, RA, BASE
+ | stw TMP0, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | lwz BASE, L->base
+ | lwz TMP0, L->top
+ | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
+ | sub NARGS8:RC, TMP0, BASE
+ | add RA, BASE, RA
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | mtctr CRET1
+ | bctr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | NYI
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | NYI
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called by math.floor/math.ceil fast functions
+ |// and from JIT code.
+ |//
+ |// This can be inlined if the CPU has the frin/friz/frip/frim instructions.
+ |// The alternative hard-float approaches have a deep dependency chain.
+ |// The resulting latency is at least 3x-7x the double-precision FP latency
+ |// (e500v2: 6cy, e600: 5cy, Cell: 10cy) or around 20-70 cycles.
+ |//
+ |// The soft-float approach is tedious, but much faster (e500v2: ~11cy/~6cy).
+ |// However it relies on a fast way to transfer the FP value to GPRs
+ |// (e500v2: 0cy for lo-word, 1cy for hi-word).
+ |//
+ |.macro vm_round, name, mode
+ | // Used temporaries: TMP0, TMP1, TMP2, TMP3.
+ |->name.._efd: // Input: CARG2, output: CRET2
+ | evmergehi CARG1, CARG2, CARG2
+ |->name.._hilo:
+ | // Input: CARG1 (hi), CARG2 (hi, lo), output: CRET2
+ | rlwinm TMP2, CARG1, 12, 21, 31
+ | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
+ | li TMP1, -1
+ | cmplwi cr1, TMP2, 51 // 0 <= exp <= 51?
+ | subfic TMP0, TMP2, 52
+ | bgt cr1, >1
+ | lus TMP3, 0xfff0
+ | slw TMP0, TMP1, TMP0 // lomask = -1 << (52-exp)
+ | sraw TMP1, TMP3, TMP2 // himask = (int32_t)0xfff00000 >> exp
+ |.if mode == 2 // trunc(x):
+ | evmergelo TMP0, TMP1, TMP0
+ | evand CRET2, CARG2, TMP0 // hi &= himask, lo &= lomask
+ |.else
+ | andc TMP2, CARG2, TMP0
+ | andc TMP3, CARG1, TMP1
+ | or TMP2, TMP2, TMP3 // ztest = (hi&~himask) | (lo&~lomask)
+ | srawi TMP3, CARG1, 31 // signmask = (int32_t)hi >> 31
+ |.if mode == 0 // floor(x):
+ | and. TMP2, TMP2, TMP3 // iszero = ((ztest & signmask) == 0)
+ |.else // ceil(x):
+ | andc. TMP2, TMP2, TMP3 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | and CARG2, CARG2, TMP0 // lo &= lomask
+ | and CARG1, CARG1, TMP1 // hi &= himask
+ | subc TMP0, CARG2, TMP0
+ | iseleq TMP0, CARG2, TMP0 // lo = iszero ? lo : lo-lomask
+ | sube TMP1, CARG1, TMP1
+ | iseleq TMP1, CARG1, TMP1 // hi = iszero ? hi : hi-himask+carry
+ | evmergelo CRET2, TMP1, TMP0
+ |.endif
+ | blr
+ |1:
+ | bgtlr // Already done if >=2^52, +-inf or nan.
+ |.if mode == 2 // trunc(x):
+ | rlwinm TMP1, CARG1, 0, 0, 0 // hi = sign(x)
+ | li TMP0, 0
+ | evmergelo CRET2, TMP1, TMP0
+ |.else
+ | rlwinm TMP2, CARG1, 0, 1, 31
+ | srawi TMP0, CARG1, 31 // signmask = (int32_t)hi >> 31
+ | or TMP2, TMP2, CARG2 // ztest = abs(hi) | lo
+ | lus TMP1, 0x3ff0
+ |.if mode == 0 // floor(x):
+ | and. TMP2, TMP2, TMP0 // iszero = ((ztest & signmask) == 0)
+ |.else // ceil(x):
+ | andc. TMP2, TMP2, TMP0 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | li TMP0, 0
+ | iseleq TMP1, r0, TMP1
+ | rlwimi CARG1, TMP1, 0, 1, 31 // hi = sign(x) | (iszero ? 0.0 : 1.0)
+ | evmergelo CRET2, CARG1, TMP0
+ |.endif
+ | blr
+ |.endmacro
+ |
+ |->vm_floor:
+ | mflr CARG3
+ | evmergelo CARG2, CARG1, CARG2
+ | bl ->vm_floor_hilo
+ | mtlr CARG3
+ | evmergehi CRET1, CRET2, CRET2
+ | blr
+ |
+ | vm_round vm_floor, 0
+ | vm_round vm_ceil, 1
+ |.if JIT
+ | vm_round vm_trunc, 2
+ |.else
+ |->vm_trunc_efd:
+ |->vm_trunc_hilo:
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_ffi_call:
+ |.if FFI
+ | NYI
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | evlddx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | evlddx TMP1, BASE, RD
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | lwz TMP2, -4(PC)
+ | evmergehi RB, TMP0, TMP1
+ | decode_RD4 TMP2, TMP2
+ | checknum RB
+ | add TMP2, TMP2, TMP3
+ | checkanyfail ->vmeta_comp
+ | efdcmplt TMP0, TMP1
+ if (op == BC_ISLE || op == BC_ISGT) {
+ | efdcmpeq cr1, TMP0, TMP1
+ | cror 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
+ }
+ if (op == BC_ISLT || op == BC_ISLE) {
+ | iselgt PC, TMP2, PC
+ } else {
+ | iselgt PC, PC, TMP2
+ }
+ | ins_next
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | evlddx CARG2, BASE, RA
+ | addi PC, PC, 4
+ | evlddx CARG3, BASE, RD
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | lwz TMP2, -4(PC)
+ | evmergehi RB, CARG2, CARG3
+ | decode_RD4 TMP2, TMP2
+ | checknum RB
+ | add TMP2, TMP2, TMP3
+ | checkanyfail >5
+ | efdcmpeq CARG2, CARG3
+ if (vk) {
+ | iselgt PC, TMP2, PC
+ } else {
+ | iselgt PC, PC, TMP2
+ }
+ |1:
+ | ins_next
+ |
+ |5: // Either or both types are not numbers.
+ | evcmpeq CARG2, CARG3
+ | not TMP3, RB
+ | cmplwi cr1, TMP3, ~LJ_TISPRI // Primitive?
+ | crorc 4*cr7+lt, 4*cr0+so, 4*cr0+lt // 1: Same tv or different type.
+ | cmplwi cr6, TMP3, ~LJ_TISTABUD // Table or userdata?
+ | crandc 4*cr7+gt, 4*cr0+lt, 4*cr1+gt // 2: Same type and primitive.
+ | mr SAVE0, PC
+ if (vk) {
+ | isel PC, TMP2, PC, 4*cr7+gt
+ } else {
+ | isel TMP2, PC, TMP2, 4*cr7+gt
+ }
+ | cror 4*cr7+lt, 4*cr7+lt, 4*cr7+gt // 1 or 2.
+ if (vk) {
+ | isel PC, TMP2, PC, 4*cr0+so
+ } else {
+ | isel PC, PC, TMP2, 4*cr0+so
+ }
+ | blt cr7, <1 // Done if 1 or 2.
+ | blt cr6, <1 // Done if not tab/ud.
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:TMP2, TAB:CARG2->metatable
+ | li CARG4, 1-vk // ne = 0 or 1.
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable?
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<<MM_eq
+ | bne <1 // Or 'no __eq' flag set?
+ | mr PC, SAVE0 // Restore old PC.
+ | b ->vmeta_equal // Handle __eq metamethod.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | evlddx TMP0, BASE, RA
+ | srwi RD, RD, 1
+ | lwz INS, 0(PC)
+ | subfic RD, RD, -4
+ | addi PC, PC, 4
+ | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | evmergelo STR:TMP1, TISSTR, STR:TMP1
+ | add TMP2, TMP2, TMP3
+ | evcmpeq TMP0, STR:TMP1
+ if (vk) {
+ | isel PC, TMP2, PC, 4*cr0+so
+ } else {
+ | isel PC, PC, TMP2, 4*cr0+so
+ }
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | evlddx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | evlddx TMP1, KBASE, RD
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | lwz INS, -4(PC)
+ | checknum TMP0
+ | checkfail >5
+ | efdcmpeq TMP0, TMP1
+ |1:
+ | decode_RD4 TMP2, INS
+ | add TMP2, TMP2, TMP3
+ if (vk) {
+ | iselgt PC, TMP2, PC
+ |5:
+ } else {
+ | iselgt PC, PC, TMP2
+ }
+ |3:
+ | ins_next
+ if (!vk) {
+ |5:
+ | decode_RD4 TMP2, INS
+ | add PC, TMP2, TMP3
+ | b <3
+ }
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | lwzx TMP0, BASE, RA
+ | srwi TMP1, RD, 3
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | not TMP1, TMP1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | cmplw TMP0, TMP1
+ | decode_RD4 TMP2, INS
+ | add TMP2, TMP2, TMP3
+ if (vk) {
+ | iseleq PC, TMP2, PC
+ } else {
+ | iseleq PC, PC, TMP2
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | evlddx TMP0, BASE, RD
+ | evaddw TMP1, TISNIL, TISNIL // Synthesize LJ_TFALSE.
+ | lwz INS, 0(PC)
+ | evcmpltu TMP0, TMP1
+ | addi PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | add TMP2, TMP2, TMP3
+ if (op == BC_IST) {
+ | isellt PC, TMP2, PC
+ } else {
+ | isellt PC, PC, TMP2
+ }
+ } else {
+ if (op == BC_ISTC) {
+ | checkfail >1
+ } else {
+ | checkok >1
+ }
+ | addis PC, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ | evstddx TMP0, BASE, RA
+ | add PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | evlddx TMP0, BASE, RD
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lwzx TMP0, BASE, RD
+ | subfic TMP1, TMP0, LJ_TTRUE
+ | adde TMP0, TMP0, TMP1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | evlddx TMP0, BASE, RD
+ | checknum TMP0
+ | checkfail ->vmeta_unm
+ | efdneg TMP0, TMP0
+ | ins_next1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | evlddx CARG1, BASE, RD
+ | checkstr CARG1
+ | checkfail >2
+ | lwz CRET1, STR:CARG1->len
+ |1:
+ | ins_next1
+ | efdcfsi TMP0, CRET1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ |2:
+ | checktab CARG1
+ | checkfail ->vmeta_len
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | cmplwi TAB:TMP2, 0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+#if LJ_52
+ |9:
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<<MM_len
+ | bne <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre, t0, t1
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | evlddx t0, BASE, RB
+ | checknum t0
+ | evlddx t1, KBASE, RC
+ | checkfail ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | evlddx t1, BASE, RB
+ | checknum t1
+ | evlddx t0, KBASE, RC
+ | checkfail ->vmeta_arith_nv
+ || break;
+ ||default:
+ | evlddx t0, BASE, RB
+ | evlddx t1, BASE, RC
+ | evmergehi TMP2, t0, t1
+ | checknum TMP2
+ | checkanyfail ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arith, ins
+ | ins_arithpre TMP0, TMP1
+ | ins_next1
+ | ins TMP0, TMP0, TMP1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith efdadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith efdsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith efdmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith efddiv
+ break;
+ case BC_MODVN:
+ | ins_arithpre RD, SAVE0
+ |->BC_MODVN_Z:
+ | efddiv CARG2, RD, SAVE0
+ | bl ->vm_floor_efd // floor(b/c)
+ | efdmul TMP0, CRET2, SAVE0
+ | ins_next1
+ | efdsub TMP0, RD, TMP0 // b - floor(b/c)*c
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre RD, SAVE0
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ break;
+ case BC_POW:
+ | evlddx CARG2, BASE, RB
+ | evlddx CARG4, BASE, RC
+ | evmergehi CARG1, CARG4, CARG2
+ | checknum CARG1
+ | evmergehi CARG3, CARG4, CARG4
+ | checkanyfail ->vmeta_arith_vv
+ | bl extern pow@plt
+ | evmergelo CRET2, CRET1, CRET2
+ | evstddx CRET2, BASE, RA
+ | ins_next
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | sub CARG3, RC, RB
+ | stw BASE, L->base
+ | add CARG2, BASE, RC
+ | mr SAVE0, RB
+ |->BC_CAT_Z:
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | srwi CARG3, CARG3, 3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | lwz BASE, L->base
+ | bne ->vmeta_binop
+ | evlddx TMP0, BASE, SAVE0 // Copy result from RB to RA.
+ | evstddx TMP0, BASE, RA
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | ins_next1
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
+ | evmergelo TMP0, TISSTR, TMP0
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | ins_next1
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
+ | li TMP2, LJ_TCDATA
+ | evmergelo TMP0, TMP2, TMP0
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | srwi TMP1, RD, 3
+ | extsh TMP1, TMP1
+ | ins_next1
+ | efdcfsi TMP0, TMP1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | evlddx TMP0, KBASE, RD
+ | ins_next1
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srwi TMP1, RD, 3
+ | not TMP0, TMP1
+ | ins_next1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | evstddx TISNIL, BASE, RA
+ | addi RA, RA, 8
+ |1:
+ | evstddx TISNIL, BASE, RA
+ | cmpw RA, RD
+ | addi RA, RA, 8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | ins_next1
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RD, RD, 1
+ | addi RD, RD, offsetof(GCfuncL, uvptr)
+ | lwzx UPVAL:RB, LFUNC:RB, RD
+ | lwz TMP1, UPVAL:RB->v
+ | evldd TMP0, 0(TMP1)
+ | evstddx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | evlddx TMP1, BASE, RD
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP0, UPVAL:RB->closed
+ | evmergehi TMP2, TMP1, TMP1
+ | evstdd TMP1, 0(CARG2)
+ | cmplwi cr1, TMP0, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | subi TMP2, TMP2, (LJ_TISNUM+1)
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmplwi TMP2, LJ_TISGCV - (LJ_TISNUM+1)
+ | bge <1 // tvisgcv(v)
+ | lbz TMP3, GCOBJ:TMP1->gch.marked
+ | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | srwi RA, RA, 1
+ | subfic TMP1, TMP1, -4
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | evmergelo STR:TMP1, TISSTR, STR:TMP1
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andi. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP3, STR:TMP1->marked
+ | lbz TMP2, UPVAL:RB->closed
+ | evstdd STR:TMP1, 0(CARG2)
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | andi. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
+ | cmplwi cr1, TMP2, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | ins_next1
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | evlddx TMP0, KBASE, RD
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lwz TMP1, UPVAL:RB->v
+ | evstdd TMP0, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | ins_next1
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | srwi TMP0, RD, 3
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | not TMP0, TMP0
+ | lwz TMP1, UPVAL:RB->v
+ | stw TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lwz TMP1, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | stw BASE, L->base
+ | cmplwi TMP1, 0
+ | mr CARG1, L
+ | beq >1
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | lwz BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srwi TMP1, RD, 1
+ | stw BASE, L->base
+ | subfic TMP1, TMP1, -4
+ | stw PC, SAVE_PC
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | mr CARG1, L
+ | lwz CARG3, FRAME_FUNC(BASE)
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | lwz BASE, L->base
+ | evmergelo LFUNC:CRET1, TISFUNC, LFUNC:CRET1
+ | evstddx LFUNC:CRET1, BASE, RA
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | mr CARG1, L
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | stw BASE, L->base
+ | cmplw TMP0, TMP1
+ | stw PC, SAVE_PC
+ | bge >5
+ |1:
+ if (op == BC_TNEW) {
+ | rlwinm CARG2, RD, 29, 21, 31
+ | rlwinm CARG3, RD, 18, 27, 31
+ | cmpwi CARG2, 0x7ff
+ | li TMP1, 0x801
+ | iseleq CARG2, TMP1, CARG2
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns Table *.
+ } else {
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns Table *.
+ }
+ | lwz BASE, L->base
+ | evmergelo TAB:CRET1, TISTAB, TAB:CRET1
+ | evstddx TAB:CRET1, BASE, RA
+ | ins_next
+ |5:
+ | mr SAVE0, RD
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mr RD, SAVE0
+ | mr CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | lwz TAB:RB, LFUNC:TMP2->env
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | evlddx TAB:RB, BASE, RB
+ | evlddx RC, BASE, RC
+ | checktab TAB:RB
+ | checkfail ->vmeta_tgetv
+ | checknum RC
+ | checkfail >5
+ | // Convert number key to integer
+ | efdctsi TMP2, RC
+ | lwz TMP0, TAB:RB->asize
+ | efdcfsi TMP1, TMP2
+ | cmplw cr0, TMP0, TMP2
+ | efdcmpeq cr1, RC, TMP1
+ | lwz TMP1, TAB:RB->array
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
+ | slwi TMP2, TMP2, 3
+ | ble ->vmeta_tgetv // Integer key and in array part?
+ | evlddx TMP1, TMP1, TMP2
+ | checknil TMP1
+ | checkok >2
+ |1:
+ | evstddx TMP1, BASE, RA
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetv
+ |
+ |5:
+ | checkstr STR:RC // String key?
+ | checkok ->BC_TGETS_Z
+ | b ->vmeta_tgetv
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP1, RC, 1
+ | checktab TAB:RB
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | checkfail ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | evldd TMP0, NODE:TMP2->key
+ | evldd TMP1, NODE:TMP2->val
+ | evcmpeq TMP0, STR:RC
+ | checkanyfail >4
+ | checknil TMP1
+ | checkok >5 // Key found, but nil value?
+ |3:
+ | evstddx TMP1, BASE, RA
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ | evmr TMP1, TISNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <3 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andi. TMP0, TMP0, 1<<MM_index
+ | bne <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP0, RC, 3
+ | checktab TAB:RB
+ | checkfail ->vmeta_tgetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | cmplw TMP0, TMP1
+ | bge ->vmeta_tgetb
+ | evlddx TMP1, TMP2, RC
+ | checknil TMP1
+ | checkok >5
+ |1:
+ | ins_next1
+ | evstddx TMP1, BASE, RA
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb // Caveat: preserve TMP0!
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | evlddx TAB:RB, BASE, RB
+ | evlddx RC, BASE, RC
+ | checktab TAB:RB
+ | checkfail ->vmeta_tsetv
+ | checknum RC
+ | checkfail >5
+ | // Convert number key to integer
+ | efdctsi TMP2, RC
+ | evlddx SAVE0, BASE, RA
+ | lwz TMP0, TAB:RB->asize
+ | efdcfsi TMP1, TMP2
+ | cmplw cr0, TMP0, TMP2
+ | efdcmpeq cr1, RC, TMP1
+ | lwz TMP1, TAB:RB->array
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+gt
+ | slwi TMP0, TMP2, 3
+ | ble ->vmeta_tsetv // Integer key and in array part?
+ | lbz TMP3, TAB:RB->marked
+ | evlddx TMP2, TMP1, TMP0
+ | checknil TMP2
+ | checkok >3
+ |1:
+ | andi. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
+ | evstddx SAVE0, TMP1, TMP0
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andi. TMP2, TMP2, 1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetv
+ |
+ |5:
+ | checkstr STR:RC // String key?
+ | checkok ->BC_TSETS_Z
+ | b ->vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP1, RC, 1
+ | checktab TAB:RB
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | checkfail ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->hash
+ | lwz NODE:TMP2, TAB:RB->node
+ | evmergelo STR:RC, TISSTR, STR:RC
+ | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->hash & tab->hmask
+ | evlddx SAVE0, BASE, RA
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | lbz TMP3, TAB:RB->marked
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | evldd TMP0, NODE:TMP2->key
+ | evldd TMP1, NODE:TMP2->val
+ | evcmpeq TMP0, STR:RC
+ | checkanyfail >5
+ | checknil TMP1
+ | checkok >4 // Key found, but nil value?
+ |2:
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | evstdd SAVE0, NODE:TMP2->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <2 // No metatable: done.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andi. TMP0, TMP0, 1<<MM_newindex
+ | bne <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | cmplwi TAB:TMP1, 0
+ | stw BASE, L->base
+ | beq >6 // No metatable: continue.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andi. TMP0, TMP0, 1<<MM_newindex
+ | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mr CARG2, TAB:RB
+ | evstdd STR:RC, 0(CARG3)
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | lwz BASE, L->base
+ | evstdd SAVE0, 0(CRET1)
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | evlddx TAB:RB, BASE, RB
+ | srwi TMP0, RC, 3
+ | checktab TAB:RB
+ | checkfail ->vmeta_tsetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | lbz TMP3, TAB:RB->marked
+ | cmplw TMP0, TMP1
+ | evlddx SAVE0, BASE, RA
+ | bge ->vmeta_tsetb
+ | evlddx TMP1, TMP2, RC
+ | checknil TMP1
+ | checkok >5
+ |1:
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | evstddx SAVE0, TMP2, RC
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP1, TAB:TMP1->nomm
+ | andi. TMP1, TMP1, 1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetb // Caveat: preserve TMP0!
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | add RA, BASE, RA
+ |1:
+ | add TMP3, KBASE, RD
+ | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
+ | addic. TMP0, MULTRES, -8
+ | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
+ | srwi CARG3, TMP0, 3
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG3, TMP3
+ | lwz TMP2, TAB:CARG2->asize
+ | slwi TMP1, TMP3, 3
+ | lbz TMP3, TAB:CARG2->marked
+ | cmplw CARG3, TMP2
+ | add TMP2, RA, TMP0
+ | lwz TMP0, TAB:CARG2->array
+ | bgt >5
+ | add TMP1, TMP1, TMP0
+ | andi. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | evldd TMP0, 0(RA)
+ | addi RA, RA, 8
+ | cmpw cr1, RA, TMP2
+ | evstdd TMP0, 0(TMP1)
+ | addi TMP1, TMP1, 8
+ | blt cr1, <3
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | stw BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | mr SAVE0, RD
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | mr RD, SAVE0
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALL follows.
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | evlddx LFUNC:RB, BASE, RA
+ | mr TMP2, BASE
+ | add BASE, BASE, RA
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | checkfunc LFUNC:RB
+ | addi BASE, BASE, 8
+ | checkfail ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | evlddx LFUNC:RB, BASE, RA
+ | add RA, BASE, RA
+ | lwz TMP1, FRAME_PC(BASE)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | checkfunc LFUNC:RB
+ | addi RA, RA, 8
+ | checkfail ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andi. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
+ | lbz TMP3, LFUNC:RB->ffid
+ | xori TMP2, TMP1, FRAME_VARG
+ | cmplwi cr1, NARGS8:RC, 0
+ | bne >7
+ |1:
+ | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | li TMP2, 0
+ | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
+ | beq cr1, >3
+ |2:
+ | addi TMP3, TMP2, 8
+ | evlddx TMP0, RA, TMP2
+ | cmplw cr1, TMP3, NARGS8:RC
+ | evstddx TMP0, BASE, TMP2
+ | mr TMP2, TMP3
+ | bne cr1, <2
+ |3:
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
+ | beq >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lwz INS, -4(TMP1)
+ | decode_RA8 RA, INS
+ | sub TMP1, BASE, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | andi. TMP0, TMP2, FRAME_TYPEP
+ | bne <1 // Vararg frame below?
+ | sub BASE, BASE, TMP2 // Relocate BASE down.
+ | lwz TMP1, FRAME_PC(BASE)
+ | andi. TMP0, TMP1, FRAME_TYPE
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | subi RA, RA, 24 // evldd doesn't support neg. offsets.
+ | mr TMP2, BASE
+ | evlddx LFUNC:RB, BASE, RA
+ | add BASE, BASE, RA
+ | evldd TMP0, 8(BASE)
+ | evldd TMP1, 16(BASE)
+ | evstdd LFUNC:RB, 24(BASE) // Copy callable.
+ | checkfunc LFUNC:RB
+ | evstdd TMP0, 32(BASE) // Copy state.
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | evstdd TMP1, 40(BASE) // Copy control var.
+ | addi BASE, BASE, 32
+ | checkfail ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ |.if JIT
+ | // NYI: add hotloop, record BC_ITERN.
+ |.endif
+ | add RA, BASE, RA
+ | lwz TAB:RB, -12(RA)
+ | lwz RC, -4(RA) // Get index from control var.
+ | lwz TMP0, TAB:RB->asize
+ | lwz TMP1, TAB:RB->array
+ | addi PC, PC, 4
+ |1: // Traverse array part.
+ | cmplw RC, TMP0
+ | slwi TMP3, RC, 3
+ | bge >5 // Index points after array part?
+ | evlddx TMP2, TMP1, TMP3
+ | checknil TMP2
+ | lwz INS, -4(PC)
+ | checkok >4
+ | efdcfsi TMP0, RC
+ | addi RC, RC, 1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ | evstdd TMP2, 8(RA)
+ | decode_RD4 TMP1, INS
+ | stw RC, -4(RA) // Update control var.
+ | add PC, TMP1, TMP3
+ | evstdd TMP0, 0(RA)
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | addi RC, RC, 1
+ | b <1
+ |
+ |5: // Traverse hash part.
+ | lwz TMP1, TAB:RB->hmask
+ | sub RC, RC, TMP0
+ | lwz TMP2, TAB:RB->node
+ |6:
+ | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
+ | slwi TMP3, RC, 5
+ | bgt <3
+ | slwi RB, RC, 3
+ | sub TMP3, TMP3, RB
+ | evlddx RB, TMP2, TMP3
+ | add NODE:TMP3, TMP2, TMP3
+ | checknil RB
+ | lwz INS, -4(PC)
+ | checkok >7
+ | evldd TMP3, NODE:TMP3->key
+ | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
+ | evstdd RB, 8(RA)
+ | add RC, RC, TMP0
+ | decode_RD4 TMP1, INS
+ | evstdd TMP3, 0(RA)
+ | addi RC, RC, 1
+ | add PC, TMP1, TMP2
+ | stw RC, -4(RA) // Update control var.
+ | b <3
+ |
+ |7: // Skip holes in hash part.
+ | addi RC, RC, 1
+ | b <6
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | add RA, BASE, RA
+ | li TMP2, -24
+ | evlddx CFUNC:TMP1, RA, TMP2
+ | lwz TMP2, -16(RA)
+ | lwz TMP3, -8(RA)
+ | evmergehi TMP0, CFUNC:TMP1, CFUNC:TMP1
+ | cmpwi cr0, TMP2, LJ_TTAB
+ | cmpwi cr1, TMP0, LJ_TFUNC
+ | cmpwi cr6, TMP3, LJ_TNIL
+ | bne cr1, >5
+ | lbz TMP1, CFUNC:TMP1->ffid
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
+ | cmpwi cr7, TMP1, FF_next_N
+ | srwi TMP0, RD, 1
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | add TMP3, PC, TMP0
+ | bne cr0, >5
+ | lus TMP1, 0xfffe
+ | ori TMP1, TMP1, 0x7fff
+ | stw ZERO, -4(RA) // Initialize control var.
+ | stw TMP1, -8(RA)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP0, BC_JMP
+ | li TMP1, BC_ITERC
+ | stb TMP0, -1(PC)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ | stb TMP1, 3(PC)
+ | b <1
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lwz TMP0, FRAME_PC(BASE)
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | addi RC, RC, FRAME_VARG
+ | add TMP2, RA, RB
+ | subi TMP3, BASE, 8 // TMP3 = vtop
+ | sub RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmplwi cr1, RB, 0
+ | sub. TMP1, TMP3, RC
+ | beq cr1, >5 // Copy all varargs?
+ | subi TMP2, TMP2, 16
+ | ble >2 // No vararg slots?
+ |1: // Copy vararg slots to destination slots.
+ | evldd TMP0, 0(RC)
+ | addi RC, RC, 8
+ | evstdd TMP0, 0(RA)
+ | cmplw RA, TMP2
+ | cmplw cr1, RC, TMP3
+ | bge >3 // All destination slots filled?
+ | addi RA, RA, 8
+ | blt cr1, <1 // More vararg slots?
+ |2: // Fill up remainder with nil.
+ | evstdd TISNIL, 0(RA)
+ | cmplw RA, TMP2
+ | addi RA, RA, 8
+ | blt <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lwz TMP0, L->maxstack
+ | li MULTRES, 8 // MULTRES = (0+1)*8
+ | ble <3 // No vararg slots?
+ | add TMP2, RA, TMP1
+ | cmplw TMP2, TMP0
+ | addi MULTRES, TMP1, 8
+ | bgt >7
+ |6:
+ | evldd TMP0, 0(RC)
+ | addi RC, RC, 8
+ | evstdd TMP0, 0(RA)
+ | cmplw RC, TMP3
+ | addi RA, RA, 8
+ | blt <6 // More vararg slots?
+ | b <3
+ |
+ |7: // Grow stack for varargs.
+ | mr CARG1, L
+ | stw RA, L->top
+ | sub SAVE0, RC, BASE // Need delta, because BASE may change.
+ | stw BASE, L->base
+ | sub RA, RA, BASE
+ | stw PC, SAVE_PC
+ | srwi CARG2, TMP1, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, SAVE0
+ | subi TMP3, BASE, 8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ |1:
+ | andi. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lwz INS, -4(PC)
+ | cmpwi RD, 8
+ | subi TMP2, BASE, 8
+ | subi RC, RD, 8
+ | decode_RB8 RB, INS
+ | beq >3
+ | li TMP1, 0
+ |2:
+ | addi TMP3, TMP1, 8
+ | evlddx TMP0, RA, TMP1
+ | cmpw TMP3, RC
+ | evstddx TMP0, TMP2, TMP1
+ | beq >3
+ | addi TMP1, TMP3, 8
+ | evlddx TMP0, RA, TMP3
+ | cmpw TMP1, RC
+ | evstddx TMP0, TMP2, TMP3
+ | bne <2
+ |3:
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | evstddx TISNIL, TMP2, TMP1
+ | b <5
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi. TMP2, TMP1, FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ | andi. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ | lwz INS, -4(PC)
+ | subi TMP2, BASE, 8
+ | decode_RB8 RB, INS
+ if (op == BC_RET1) {
+ | evldd TMP0, 0(RA)
+ | evstdd TMP0, 0(TMP2)
+ }
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | evstddx TISNIL, TMP2, TMP1
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | add RA, BASE, RA
+ | evldd TMP1, FORL_IDX*8(RA)
+ | evldd TMP3, FORL_STEP*8(RA)
+ | evldd TMP2, FORL_STOP*8(RA)
+ if (!vk) {
+ | evcmpgtu cr0, TMP1, TISNUM
+ | evcmpgtu cr7, TMP3, TISNUM
+ | evcmpgtu cr1, TMP2, TISNUM
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | blt ->vmeta_for
+ }
+ if (vk) {
+ | efdadd TMP1, TMP1, TMP3
+ | evstdd TMP1, FORL_IDX*8(RA)
+ }
+ | evcmpgts TMP3, TISNIL
+ | evstdd TMP1, FORL_EXT*8(RA)
+ | bge >2
+ | efdcmpgt TMP1, TMP2
+ |1:
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ | add RD, PC, RD
+ if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else {
+ | addis RD, RD, -(BCBIAS_J*4 >> 16)
+ }
+ }
+ if (op == BC_FORI) {
+ | iselgt PC, RD, PC
+ } else if (op == BC_IFORL) {
+ | iselgt PC, PC, RD
+ } else {
+ | ble =>BC_JLOOP
+ }
+ | ins_next
+ |2:
+ | efdcmpgt TMP2, TMP1
+ | b <1
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | evlddx TMP1, BASE, RA
+ | subi RA, RA, 8
+ | checknil TMP1
+ | checkok >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | NYI
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | evstddx TMP1, BASE, RA
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | NYI
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | lbz TMP1, -4+PC2PROTO(numparams)(PC)
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw RA, TMP2
+ | slwi TMP1, TMP1, 3
+ | bgt ->vm_growstack_l
+ | ins_next1
+ |2:
+ | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
+ | ble >3
+ if (op == BC_JFUNCF) {
+ | NYI
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | evstddx TISNIL, BASE, NARGS8:RC
+ | addi NARGS8:RC, NARGS8:RC, 8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | add TMP1, BASE, RC
+ | add TMP0, RA, RC
+ | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
+ | addi TMP3, RC, 8+FRAME_VARG
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw TMP0, TMP2
+ | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
+ | bge ->vm_growstack_l
+ | lbz TMP2, -4+PC2PROTO(numparams)(PC)
+ | mr RA, BASE
+ | mr RC, TMP1
+ | ins_next1
+ | cmpwi TMP2, 0
+ | addi BASE, TMP1, 8
+ | beq >3
+ |1:
+ | cmplw RA, RC // Less args than parameters?
+ | evldd TMP0, 0(RA)
+ | bge >4
+ | evstdd TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
+ | addi RA, RA, 8
+ |2:
+ | addic. TMP2, TMP2, -1
+ | evstdd TMP0, 8(TMP1)
+ | addi TMP1, TMP1, 8
+ | bne <1
+ |3:
+ | ins_next2
+ |
+ |4: // Clear missing parameters.
+ | evmr TMP0, TISNIL
+ | b <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lwz TMP3, CFUNC:RB->f
+ } else {
+ | lwz TMP3, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | add TMP1, RA, NARGS8:RC
+ | lwz TMP2, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | stw BASE, L->base
+ | cmplw TMP1, TMP2
+ | stw RC, L->top
+ | li_vmstate C
+ | mtctr TMP3
+ if (op == BC_FUNCCW) {
+ | lwz CARG2, CFUNC:RB->f
+ }
+ | mr CARG1, L
+ | bgt ->vm_growstack_c // Need to grow stack.
+ | st_vmstate
+ | bctrl // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | lwz TMP1, L->top
+ | slwi RD, CRET1, 3
+ | lwz BASE, L->base
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | sub RA, TMP1, RD // RA = L->top - nresults*8
+ | st_vmstate
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
+ (int)ctx->codesz, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
+ 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .LASFDE1-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.sleb128 37\n",
+ (int)ctx->codesz, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte 5\n\t.uleb128 %d\n\t.uleb128 %d\n",
+ 0x80+i, 1+2*(31-i), 1200+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE1:\n\n");
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/3rdparty/lua/src/vm_x86.dasc b/3rdparty/lua/src/vm_x86.dasc
index 6cdb8cb..f25dfd3 100644
--- a/3rdparty/lua/src/vm_x86.dasc
+++ b/3rdparty/lua/src/vm_x86.dasc
@@ -1,6 +1,6 @@
|// Low-level VM code for x86 CPUs.
|// Bytecode interpreter, fast functions and helper functions.
-|// Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
+|// Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
|
|.if P64
|.arch x64
@@ -1617,7 +1617,7 @@ static void build_subroutines(BuildCtx *ctx)
| mov RD, 1+3
| jmp ->fff_res
|
- |.ffunc_2 ipairs_aux
+ |.ffunc_1 ipairs_aux
| cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
| cmp dword [BASE+12], LJ_TISNUM
|.if DUALNUM
@@ -2198,7 +2198,7 @@ static void build_subroutines(BuildCtx *ctx)
|.endif
|
|.ffunc_nnr math_fmod
- |1: ; fprem; fnstsw ax; and ax, 0x400; jnz <1
+ |1: ; fprem; fnstsw ax; sahf; jp <1
| fpop1
| jmp ->fff_resn
|
@@ -2564,8 +2564,8 @@ static void build_subroutines(BuildCtx *ctx)
|
|.define TOBIT_BIAS, 0x59c00000 // 2^52 + 2^51 (float, not double!).
|
- |.macro .ffunc_bit, name, kind, fdef
- | fdef name
+ |.macro .ffunc_bit, name, kind
+ | .ffunc_1 name
|.if kind == 2
|.if SSE
| sseconst_tobit xmm1, RBa
@@ -2608,10 +2608,6 @@ static void build_subroutines(BuildCtx *ctx)
|2:
|.endmacro
|
- |.macro .ffunc_bit, name, kind
- | .ffunc_bit name, kind, .ffunc_1
- |.endmacro
- |
|.ffunc_bit bit_tobit, 0
|.if DUALNUM or SSE
|.if not SSE
@@ -2685,7 +2681,7 @@ static void build_subroutines(BuildCtx *ctx)
|
|.macro .ffunc_bit_sh, name, ins
|.if DUALNUM
- | .ffunc_bit name, 1, .ffunc_2
+ | .ffunc_bit name, 1
| // Note: no inline conversion from number for 2nd argument!
| cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback
| mov RA, dword [BASE+8]
@@ -4656,7 +4652,7 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
|
|2: // Upvalue is black. Check if new value is collectable and white.
| sub RD, LJ_TISGCV
- | cmp RD, LJ_TNUMX - LJ_TISGCV // tvisgcv(v)
+ | cmp RD, LJ_TISNUM - LJ_TISGCV // tvisgcv(v)
| jbe <1
| test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v)
| jz <1
@@ -5324,7 +5320,8 @@ static void build_ins(BuildCtx *ctx, BCOp op, int defop)
| jnz <4
| movzx RA, PC_RA
| not RAa
- | mov LFUNC:KBASE, [BASE+RA*8-8] // Need to prepare KBASE.
+ | lea RA, [BASE+RA*8]
+ | mov LFUNC:KBASE, [RA-8] // Need to prepare KBASE.
| mov KBASE, LFUNC:KBASE->pc
| mov KBASE, [KBASE+PC2PROTO(k)]
| jmp <4
@@ -6347,12 +6344,12 @@ static void emit_asm_debug(BuildCtx *ctx)
#if LJ_64
"\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
"\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
- "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
"\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
#else
"\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */
"\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
- "\t.byte 0xd\n\t.byte 0x4\n" /* def_cfa_register ebp */
+ "\t.byte 0xd\n\t.uleb128 0x4\n" /* def_cfa_register ebp */
"\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */
#endif
"\t.align " BSZPTR "\n"
diff --git a/3rdparty/lua/src/xedkbuild.bat b/3rdparty/lua/src/xedkbuild.bat
index 240ec87..375f195 100644
--- a/3rdparty/lua/src/xedkbuild.bat
+++ b/3rdparty/lua/src/xedkbuild.bat
@@ -56,11 +56,11 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
@rem ---- Cross compiler ----
@set LJCOMPILE="%XEDK%\bin\win32\cl" /nologo /c /MT /O2 /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /DNDEBUG /D_XBOX /D_LIB /DLUAJIT_USE_SYSMALLOC
@set LJLIB="%XEDK%\bin\win32\lib" /nologo
-@set "INCLUDE=%XEDK%\include\xbox"
+@set INCLUDE="%XEDK%\include\xbox"
@if "%1" neq "debug" goto :NODEBUG
@shift
-@set "LJCOMPILE=%LJCOMPILE% /Zi"
+@set LJCOMPILE="%LJCOMPILE%" /Zi
:NODEBUG
@if "%1"=="amalg" goto :AMALG
%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
diff --git a/3rdparty/sqlite3/shell.c b/3rdparty/sqlite3/shell.c
index f790871..480ec5b 100644
--- a/3rdparty/sqlite3/shell.c
+++ b/3rdparty/sqlite3/shell.c
@@ -18,20 +18,6 @@
#endif
/*
-** If requested, include the SQLite compiler options file for MSVC.
-*/
-#if defined(INCLUDE_MSVC_H)
-#include "msvc.h"
-#endif
-
-/*
-** No support for loadable extensions in VxWorks.
-*/
-#if (defined(__RTP__) || defined(_WRS_KERNEL)) && !SQLITE_OMIT_LOAD_EXTENSION
-# define SQLITE_OMIT_LOAD_EXTENSION 1
-#endif
-
-/*
** Enable large-file support for fopen() and friends on unix.
*/
#ifndef SQLITE_DISABLE_LFS
@@ -47,9 +33,6 @@
#include <stdio.h>
#include <assert.h>
#include "sqlite3.h"
-#if SQLITE_USER_AUTHENTICATION
-# include "sqlite3userauth.h"
-#endif
#include <ctype.h>
#include <stdarg.h>
@@ -62,65 +45,37 @@
# include <sys/types.h>
#endif
-#if HAVE_READLINE
+#ifdef HAVE_EDITLINE
+# include <editline/editline.h>
+#endif
+#if defined(HAVE_READLINE) && HAVE_READLINE==1
# include <readline/readline.h>
# include <readline/history.h>
#endif
-
-#if HAVE_EDITLINE
-# include <editline/readline.h>
-#endif
-
-#if HAVE_EDITLINE || HAVE_READLINE
-
-# define shell_add_history(X) add_history(X)
-# define shell_read_history(X) read_history(X)
-# define shell_write_history(X) write_history(X)
-# define shell_stifle_history(X) stifle_history(X)
-# define shell_readline(X) readline(X)
-
-#elif HAVE_LINENOISE
-
-# include "linenoise.h"
-# define shell_add_history(X) linenoiseHistoryAdd(X)
-# define shell_read_history(X) linenoiseHistoryLoad(X)
-# define shell_write_history(X) linenoiseHistorySave(X)
-# define shell_stifle_history(X) linenoiseHistorySetMaxLen(X)
-# define shell_readline(X) linenoise(X)
-
-#else
-
-# define shell_read_history(X)
-# define shell_write_history(X)
-# define shell_stifle_history(X)
-
-# define SHELL_USE_LOCAL_GETLINE 1
+#if !defined(HAVE_EDITLINE) && (!defined(HAVE_READLINE) || HAVE_READLINE!=1)
+# define add_history(X)
+# define read_history(X)
+# define write_history(X)
+# define stifle_history(X)
#endif
-
#if defined(_WIN32) || defined(WIN32)
# include <io.h>
-# include <fcntl.h>
-# define isatty(h) _isatty(h)
-# ifndef access
-# define access(f,m) _access((f),(m))
-# endif
-# undef popen
-# define popen _popen
-# undef pclose
-# define pclose _pclose
+#define isatty(h) _isatty(h)
+#define access(f,m) _access((f),(m))
+#undef popen
+#define popen _popen
+#undef pclose
+#define pclose _pclose
#else
- /* Make sure isatty() has a prototype. */
- extern int isatty(int);
+/* Make sure isatty() has a prototype.
+*/
+extern int isatty(int);
-# if !defined(__RTP__) && !defined(_WRS_KERNEL)
- /* popen and pclose are not C89 functions and so are
- ** sometimes omitted from the <stdio.h> header */
- extern FILE *popen(const char*,const char*);
- extern int pclose(FILE*);
-# else
-# define SQLITE_OMIT_POPEN 1
-# endif
+/* popen and pclose are not C89 functions and so are sometimes omitted from
+** the <stdio.h> header */
+extern FILE *popen(const char*,const char*);
+extern int pclose(FILE*);
#endif
#if defined(_WIN32_WCE)
@@ -136,26 +91,6 @@
#define IsDigit(X) isdigit((unsigned char)X)
#define ToLower(X) (char)tolower((unsigned char)X)
-/* On Windows, we normally run with output mode of TEXT so that \n characters
-** are automatically translated into \r\n. However, this behavior needs
-** to be disabled in some cases (ex: when generating CSV output and when
-** rendering quoted strings that contain \n characters). The following
-** routines take care of that.
-*/
-#if defined(_WIN32) || defined(WIN32)
-static void setBinaryMode(FILE *out){
- fflush(out);
- _setmode(_fileno(out), _O_BINARY);
-}
-static void setTextMode(FILE *out){
- fflush(out);
- _setmode(_fileno(out), _O_TEXT);
-}
-#else
-# define setBinaryMode(X)
-# define setTextMode(X)
-#endif
-
/* True if the timer is enabled */
static int enableTimer = 0;
@@ -175,19 +110,11 @@ static sqlite3_int64 timeOfDay(void){
return t;
}
-#if !defined(_WIN32) && !defined(WIN32) && !defined(__minux)
+#if !defined(_WIN32) && !defined(WIN32) && !defined(_WRS_KERNEL) \
+ && !defined(__minux)
#include <sys/time.h>
#include <sys/resource.h>
-/* VxWorks does not support getrusage() as far as we can determine */
-#if defined(_WRS_KERNEL) || defined(__RTP__)
-struct rusage {
- struct timeval ru_utime; /* user CPU time used */
- struct timeval ru_stime; /* system CPU time used */
-};
-#define getrusage(A,B) memset(B,0,sizeof(*B))
-#endif
-
/* Saved resource information for the beginning of an operation */
static struct rusage sBegin; /* CPU time at start */
static sqlite3_int64 iBegin; /* Wall-clock time at start */
@@ -213,8 +140,8 @@ static double timeDiff(struct timeval *pStart, struct timeval *pEnd){
*/
static void endTimer(void){
if( enableTimer ){
- sqlite3_int64 iEnd = timeOfDay();
struct rusage sEnd;
+ sqlite3_int64 iEnd = timeOfDay();
getrusage(RUSAGE_SELF, &sEnd);
printf("Run Time: real %.3f user %f sys %f\n",
(iEnd - iBegin)*0.001,
@@ -236,8 +163,7 @@ static HANDLE hProcess;
static FILETIME ftKernelBegin;
static FILETIME ftUserBegin;
static sqlite3_int64 ftWallBegin;
-typedef BOOL (WINAPI *GETPROCTIMES)(HANDLE, LPFILETIME, LPFILETIME,
- LPFILETIME, LPFILETIME);
+typedef BOOL (WINAPI *GETPROCTIMES)(HANDLE, LPFILETIME, LPFILETIME, LPFILETIME, LPFILETIME);
static GETPROCTIMES getProcessTimesAddr = NULL;
/*
@@ -248,16 +174,15 @@ static int hasTimer(void){
if( getProcessTimesAddr ){
return 1;
} else {
- /* GetProcessTimes() isn't supported in WIN95 and some other Windows
- ** versions. See if the version we are running on has it, and if it
- ** does, save off a pointer to it and the current process handle.
+ /* GetProcessTimes() isn't supported in WIN95 and some other Windows versions.
+ ** See if the version we are running on has it, and if it does, save off
+ ** a pointer to it and the current process handle.
*/
hProcess = GetCurrentProcess();
if( hProcess ){
HINSTANCE hinstLib = LoadLibrary(TEXT("Kernel32.dll"));
if( NULL != hinstLib ){
- getProcessTimesAddr =
- (GETPROCTIMES) GetProcAddress(hinstLib, "GetProcessTimes");
+ getProcessTimesAddr = (GETPROCTIMES) GetProcAddress(hinstLib, "GetProcessTimes");
if( NULL != getProcessTimesAddr ){
return 1;
}
@@ -274,8 +199,7 @@ static int hasTimer(void){
static void beginTimer(void){
if( enableTimer && getProcessTimesAddr ){
FILETIME ftCreation, ftExit;
- getProcessTimesAddr(hProcess,&ftCreation,&ftExit,
- &ftKernelBegin,&ftUserBegin);
+ getProcessTimesAddr(hProcess, &ftCreation, &ftExit, &ftKernelBegin, &ftUserBegin);
ftWallBegin = timeOfDay();
}
}
@@ -294,7 +218,7 @@ static void endTimer(void){
if( enableTimer && getProcessTimesAddr){
FILETIME ftCreation, ftExit, ftKernelEnd, ftUserEnd;
sqlite3_int64 ftWallEnd = timeOfDay();
- getProcessTimesAddr(hProcess,&ftCreation,&ftExit,&ftKernelEnd,&ftUserEnd);
+ getProcessTimesAddr(hProcess, &ftCreation, &ftExit, &ftKernelEnd, &ftUserEnd);
printf("Run Time: real %.3f user %f sys %f\n",
(ftWallEnd - ftWallBegin)*0.001,
timeDiff(&ftUserBegin, &ftUserEnd),
@@ -334,7 +258,7 @@ static int stdin_is_interactive = 1;
** to this database a static variable so that it can be accessed
** by the SIGINT handler to interrupt database processing.
*/
-static sqlite3 *globalDb = 0;
+static sqlite3 *db = 0;
/*
** True if an interrupt (Control-C) has been received.
@@ -368,7 +292,7 @@ static FILE *iotrace = 0;
** is written to iotrace.
*/
#ifdef SQLITE_ENABLE_IOTRACE
-static void SQLITE_CDECL iotracePrintf(const char *zFormat, ...){
+static void iotracePrintf(const char *zFormat, ...){
va_list ap;
char *z;
if( iotrace==0 ) return;
@@ -489,44 +413,35 @@ static char *one_input_line(FILE *in, char *zPrior, int isContinuation){
zResult = local_getline(zPrior, in);
}else{
zPrompt = isContinuation ? continuePrompt : mainPrompt;
-#if SHELL_USE_LOCAL_GETLINE
+#if defined(HAVE_READLINE) && HAVE_READLINE==1
+ free(zPrior);
+ zResult = readline(zPrompt);
+ if( zResult && *zResult ) add_history(zResult);
+#else
printf("%s", zPrompt);
fflush(stdout);
zResult = local_getline(zPrior, stdin);
-#else
- free(zPrior);
- zResult = shell_readline(zPrompt);
- if( zResult && *zResult ) shell_add_history(zResult);
#endif
}
return zResult;
}
-/*
-** Shell output mode information from before ".explain on",
-** saved so that it can be restored by ".explain off"
-*/
-typedef struct SavedModeInfo SavedModeInfo;
-struct SavedModeInfo {
- int valid; /* Is there legit data in here? */
- int mode; /* Mode prior to ".explain on" */
- int showHeader; /* The ".header" setting prior to ".explain on" */
- int colWidth[100]; /* Column widths prior to ".explain on" */
+struct previous_mode_data {
+ int valid; /* Is there legit data in here? */
+ int mode;
+ int showHeader;
+ int colWidth[100];
};
/*
-** State information about the database connection is contained in an
-** instance of the following structure.
+** An pointer to an instance of this structure is passed from
+** the main program to the callback. This is used to communicate
+** state and mode information.
*/
-typedef struct ShellState ShellState;
-struct ShellState {
+struct callback_data {
sqlite3 *db; /* The database */
int echoOn; /* True to echo input commands */
- int autoEQP; /* Run EXPLAIN QUERY PLAN prior to seach SQL stmt */
int statsOn; /* True to display memory stats before each finalize */
- int scanstatsOn; /* True to display scan stats before each finalize */
- int backslashOn; /* Resolve C-style \x escapes in SQL input text */
- int outCount; /* Revert to stdout when reaching zero */
int cnt; /* Number of records displayed so far */
FILE *out; /* Write results here */
FILE *traceOut; /* Output for sqlite3_trace() */
@@ -534,15 +449,15 @@ struct ShellState {
int mode; /* An output mode setting */
int writableSchema; /* True if PRAGMA writable_schema=ON */
int showHeader; /* True to show column names in List or Column mode */
- unsigned shellFlgs; /* Various flags */
char *zDestTable; /* Name of destination table when MODE_Insert */
- char colSeparator[20]; /* Column separator character for several modes */
- char rowSeparator[20]; /* Row separator character for MODE_Ascii */
+ char separator[20]; /* Separator character for MODE_List */
int colWidth[100]; /* Requested width of each column when in column mode*/
int actualWidth[100]; /* Actual width of each column */
- char nullValue[20]; /* The text to print when a NULL comes back from
+ char nullvalue[20]; /* The text to print when a NULL comes back from
** the database */
- SavedModeInfo normalMode;/* Holds the mode just before .explain ON */
+ struct previous_mode_data explainPrev;
+ /* Holds the mode information just before
+ ** .explain ON */
char outfile[FILENAME_MAX]; /* Filename for *out */
const char *zDbFilename; /* name of the database file */
char *zFreeOnClose; /* Filename to free when closing */
@@ -555,13 +470,6 @@ struct ShellState {
};
/*
-** These are the allowed shellFlgs values
-*/
-#define SHFLG_Scratch 0x00001 /* The --scratch option is used */
-#define SHFLG_Pagecache 0x00002 /* The --pagecache option is used */
-#define SHFLG_Lookaside 0x00004 /* Lookaside memory is used */
-
-/*
** These are the allowed modes.
*/
#define MODE_Line 0 /* One column per line. Blank line between records */
@@ -573,7 +481,6 @@ struct ShellState {
#define MODE_Tcl 6 /* Generate ANSI-C or TCL quoted elements */
#define MODE_Csv 7 /* Quote strings, numbers are plain */
#define MODE_Explain 8 /* Like MODE_Column, but do not truncate data */
-#define MODE_Ascii 9 /* Use ASCII unit and record separators (0x1F/0x1E) */
static const char *modeDescr[] = {
"line",
@@ -585,23 +492,9 @@ static const char *modeDescr[] = {
"tcl",
"csv",
"explain",
- "ascii",
};
/*
-** These are the column/row/line separators used by the various
-** import/export modes.
-*/
-#define SEP_Column "|"
-#define SEP_Row "\n"
-#define SEP_Tab "\t"
-#define SEP_Space " "
-#define SEP_Comma ","
-#define SEP_CrLf "\r\n"
-#define SEP_Unit "\x1F"
-#define SEP_Record "\x1E"
-
-/*
** Number of elements in an array
*/
#define ArraySize(X) (int)(sizeof(X)/sizeof(X[0]))
@@ -620,7 +513,7 @@ static int strlen30(const char *z){
** A callback for the sqlite3_log() interface.
*/
static void shellLog(void *pArg, int iErrCode, const char *zMsg){
- ShellState *p = (ShellState*)pArg;
+ struct callback_data *p = (struct callback_data*)pArg;
if( p->pLog==0 ) return;
fprintf(p->pLog, "(%d) %s\n", iErrCode, zMsg);
fflush(p->pLog);
@@ -643,7 +536,6 @@ static void output_hex_blob(FILE *out, const void *pBlob, int nBlob){
static void output_quoted_string(FILE *out, const char *z){
int i;
int nSingle = 0;
- setBinaryMode(out);
for(i=0; z[i]; i++){
if( z[i]=='\'' ) nSingle++;
}
@@ -666,7 +558,6 @@ static void output_quoted_string(FILE *out, const char *z){
}
fprintf(out,"'");
}
- setTextMode(out);
}
/*
@@ -706,7 +597,6 @@ static void output_c_string(FILE *out, const char *z){
*/
static void output_html_string(FILE *out, const char *z){
int i;
- if( z==0 ) z = "";
while( *z ){
for(i=0; z[i]
&& z[i]!='<'
@@ -759,22 +649,21 @@ static const char needCsvQuote[] = {
};
/*
-** Output a single term of CSV. Actually, p->colSeparator is used for
-** the separator, which may or may not be a comma. p->nullValue is
-** the null value. Strings are quoted if necessary. The separator
-** is only issued if bSep is true.
+** Output a single term of CSV. Actually, p->separator is used for
+** the separator, which may or may not be a comma. p->nullvalue is
+** the null value. Strings are quoted if necessary.
*/
-static void output_csv(ShellState *p, const char *z, int bSep){
+static void output_csv(struct callback_data *p, const char *z, int bSep){
FILE *out = p->out;
if( z==0 ){
- fprintf(out,"%s",p->nullValue);
+ fprintf(out,"%s",p->nullvalue);
}else{
int i;
- int nSep = strlen30(p->colSeparator);
+ int nSep = strlen30(p->separator);
for(i=0; z[i]; i++){
if( needCsvQuote[((unsigned char*)z)[i]]
- || (z[i]==p->colSeparator[0] &&
- (nSep==1 || memcmp(z, p->colSeparator, nSep)==0)) ){
+ || (z[i]==p->separator[0] &&
+ (nSep==1 || memcmp(z, p->separator, nSep)==0)) ){
i = 0;
break;
}
@@ -791,7 +680,7 @@ static void output_csv(ShellState *p, const char *z, int bSep){
}
}
if( bSep ){
- fprintf(p->out, "%s", p->colSeparator);
+ fprintf(p->out, "%s", p->separator);
}
}
@@ -801,9 +690,8 @@ static void output_csv(ShellState *p, const char *z, int bSep){
*/
static void interrupt_handler(int NotUsed){
UNUSED_PARAMETER(NotUsed);
- seenInterrupt++;
- if( seenInterrupt>2 ) exit(1);
- if( globalDb ) sqlite3_interrupt(globalDb);
+ seenInterrupt = 1;
+ if( db ) sqlite3_interrupt(db);
}
#endif
@@ -811,15 +699,9 @@ static void interrupt_handler(int NotUsed){
** This is the callback routine that the shell
** invokes for each row of a query result.
*/
-static int shell_callback(
- void *pArg,
- int nArg, /* Number of result columns */
- char **azArg, /* Text of each result column */
- char **azCol, /* Column names */
- int *aiType /* Column types */
-){
+static int shell_callback(void *pArg, int nArg, char **azArg, char **azCol, int *aiType){
int i;
- ShellState *p = (ShellState*)pArg;
+ struct callback_data *p = (struct callback_data*)pArg;
switch( p->mode ){
case MODE_Line: {
@@ -829,10 +711,10 @@ static int shell_callback(
int len = strlen30(azCol[i] ? azCol[i] : "");
if( len>w ) w = len;
}
- if( p->cnt++>0 ) fprintf(p->out, "%s", p->rowSeparator);
+ if( p->cnt++>0 ) fprintf(p->out,"\n");
for(i=0; i<nArg; i++){
- fprintf(p->out,"%*s = %s%s", w, azCol[i],
- azArg[i] ? azArg[i] : p->nullValue, p->rowSeparator);
+ fprintf(p->out,"%*s = %s\n", w, azCol[i],
+ azArg[i] ? azArg[i] : p->nullvalue);
}
break;
}
@@ -849,7 +731,7 @@ static int shell_callback(
if( w==0 ){
w = strlen30(azCol[i] ? azCol[i] : "");
if( w<10 ) w = 10;
- n = strlen30(azArg && azArg[i] ? azArg[i] : p->nullValue);
+ n = strlen30(azArg && azArg[i] ? azArg[i] : p->nullvalue);
if( w<n ) w = n;
}
if( i<ArraySize(p->actualWidth) ){
@@ -857,11 +739,9 @@ static int shell_callback(
}
if( p->showHeader ){
if( w<0 ){
- fprintf(p->out,"%*.*s%s",-w,-w,azCol[i],
- i==nArg-1 ? p->rowSeparator : " ");
+ fprintf(p->out,"%*.*s%s",-w,-w,azCol[i], i==nArg-1 ? "\n": " ");
}else{
- fprintf(p->out,"%-*.*s%s",w,w,azCol[i],
- i==nArg-1 ? p->rowSeparator : " ");
+ fprintf(p->out,"%-*.*s%s",w,w,azCol[i], i==nArg-1 ? "\n": " ");
}
}
}
@@ -876,7 +756,7 @@ static int shell_callback(
}
fprintf(p->out,"%-*.*s%s",w,w,"-----------------------------------"
"----------------------------------------------------------",
- i==nArg-1 ? p->rowSeparator : " ");
+ i==nArg-1 ? "\n": " ");
}
}
}
@@ -899,12 +779,10 @@ static int shell_callback(
}
if( w<0 ){
fprintf(p->out,"%*.*s%s",-w,-w,
- azArg[i] ? azArg[i] : p->nullValue,
- i==nArg-1 ? p->rowSeparator : " ");
+ azArg[i] ? azArg[i] : p->nullvalue, i==nArg-1 ? "\n": " ");
}else{
fprintf(p->out,"%-*.*s%s",w,w,
- azArg[i] ? azArg[i] : p->nullValue,
- i==nArg-1 ? p->rowSeparator : " ");
+ azArg[i] ? azArg[i] : p->nullvalue, i==nArg-1 ? "\n": " ");
}
}
break;
@@ -913,21 +791,20 @@ static int shell_callback(
case MODE_List: {
if( p->cnt++==0 && p->showHeader ){
for(i=0; i<nArg; i++){
- fprintf(p->out,"%s%s",azCol[i],
- i==nArg-1 ? p->rowSeparator : p->colSeparator);
+ fprintf(p->out,"%s%s",azCol[i], i==nArg-1 ? "\n" : p->separator);
}
}
if( azArg==0 ) break;
for(i=0; i<nArg; i++){
char *z = azArg[i];
- if( z==0 ) z = p->nullValue;
+ if( z==0 ) z = p->nullvalue;
fprintf(p->out, "%s", z);
if( i<nArg-1 ){
- fprintf(p->out, "%s", p->colSeparator);
+ fprintf(p->out, "%s", p->separator);
}else if( p->mode==MODE_Semi ){
- fprintf(p->out, ";%s", p->rowSeparator);
+ fprintf(p->out, ";\n");
}else{
- fprintf(p->out, "%s", p->rowSeparator);
+ fprintf(p->out, "\n");
}
}
break;
@@ -946,7 +823,7 @@ static int shell_callback(
fprintf(p->out,"<TR>");
for(i=0; i<nArg; i++){
fprintf(p->out,"<TD>");
- output_html_string(p->out, azArg[i] ? azArg[i] : p->nullValue);
+ output_html_string(p->out, azArg[i] ? azArg[i] : p->nullvalue);
fprintf(p->out,"</TD>\n");
}
fprintf(p->out,"</TR>\n");
@@ -956,48 +833,36 @@ static int shell_callback(
if( p->cnt++==0 && p->showHeader ){
for(i=0; i<nArg; i++){
output_c_string(p->out,azCol[i] ? azCol[i] : "");
- if(i<nArg-1) fprintf(p->out, "%s", p->colSeparator);
+ if(i<nArg-1) fprintf(p->out, "%s", p->separator);
}
- fprintf(p->out, "%s", p->rowSeparator);
+ fprintf(p->out,"\n");
}
if( azArg==0 ) break;
for(i=0; i<nArg; i++){
- output_c_string(p->out, azArg[i] ? azArg[i] : p->nullValue);
- if(i<nArg-1) fprintf(p->out, "%s", p->colSeparator);
+ output_c_string(p->out, azArg[i] ? azArg[i] : p->nullvalue);
+ if(i<nArg-1) fprintf(p->out, "%s", p->separator);
}
- fprintf(p->out, "%s", p->rowSeparator);
+ fprintf(p->out,"\n");
break;
}
case MODE_Csv: {
- setBinaryMode(p->out);
if( p->cnt++==0 && p->showHeader ){
for(i=0; i<nArg; i++){
output_csv(p, azCol[i] ? azCol[i] : "", i<nArg-1);
}
- fprintf(p->out, "%s", p->rowSeparator);
+ fprintf(p->out,"\n");
}
- if( nArg>0 ){
- for(i=0; i<nArg; i++){
- output_csv(p, azArg[i], i<nArg-1);
- }
- fprintf(p->out, "%s", p->rowSeparator);
+ if( azArg==0 ) break;
+ for(i=0; i<nArg; i++){
+ output_csv(p, azArg[i], i<nArg-1);
}
- setTextMode(p->out);
+ fprintf(p->out,"\n");
break;
}
case MODE_Insert: {
p->cnt++;
if( azArg==0 ) break;
- fprintf(p->out,"INSERT INTO %s",p->zDestTable);
- if( p->showHeader ){
- fprintf(p->out,"(");
- for(i=0; i<nArg; i++){
- char *zSep = i>0 ? ",": "";
- fprintf(p->out, "%s%s", zSep, azCol[i]);
- }
- fprintf(p->out,")");
- }
- fprintf(p->out," VALUES(");
+ fprintf(p->out,"INSERT INTO %s VALUES(",p->zDestTable);
for(i=0; i<nArg; i++){
char *zSep = i>0 ? ",": "";
if( (azArg[i]==0) || (aiType && aiType[i]==SQLITE_NULL) ){
@@ -1005,8 +870,7 @@ static int shell_callback(
}else if( aiType && aiType[i]==SQLITE_TEXT ){
if( zSep[0] ) fprintf(p->out,"%s",zSep);
output_quoted_string(p->out, azArg[i]);
- }else if( aiType && (aiType[i]==SQLITE_INTEGER
- || aiType[i]==SQLITE_FLOAT) ){
+ }else if( aiType && (aiType[i]==SQLITE_INTEGER || aiType[i]==SQLITE_FLOAT) ){
fprintf(p->out,"%s%s",zSep, azArg[i]);
}else if( aiType && aiType[i]==SQLITE_BLOB && p->pStmt ){
const void *pBlob = sqlite3_column_blob(p->pStmt, i);
@@ -1023,22 +887,6 @@ static int shell_callback(
fprintf(p->out,");\n");
break;
}
- case MODE_Ascii: {
- if( p->cnt++==0 && p->showHeader ){
- for(i=0; i<nArg; i++){
- if( i>0 ) fprintf(p->out, "%s", p->colSeparator);
- fprintf(p->out,"%s",azCol[i] ? azCol[i] : "");
- }
- fprintf(p->out, "%s", p->rowSeparator);
- }
- if( azArg==0 ) break;
- for(i=0; i<nArg; i++){
- if( i>0 ) fprintf(p->out, "%s", p->colSeparator);
- fprintf(p->out,"%s",azArg[i] ? azArg[i] : p->nullValue);
- }
- fprintf(p->out, "%s", p->rowSeparator);
- break;
- }
}
return 0;
}
@@ -1053,11 +901,11 @@ static int callback(void *pArg, int nArg, char **azArg, char **azCol){
}
/*
-** Set the destination table field of the ShellState structure to
+** Set the destination table field of the callback_data structure to
** the name of the table given. Escape any quote characters in the
** table name.
*/
-static void set_table_name(ShellState *p, const char *zName){
+static void set_table_name(struct callback_data *p, const char *zName){
int i, n;
int needQuote;
char *z;
@@ -1147,7 +995,7 @@ static char *appendText(char *zIn, char const *zAppend, char quote){
** won't consume the semicolon terminator.
*/
static int run_table_dump_query(
- ShellState *p, /* Query context */
+ struct callback_data *p, /* Query context */
const char *zSelect, /* SELECT statement to extract content */
const char *zFirstRow /* Print before first row, if not NULL */
){
@@ -1156,7 +1004,7 @@ static int run_table_dump_query(
int nResult;
int i;
const char *z;
- rc = sqlite3_prepare_v2(p->db, zSelect, -1, &pSelect, 0);
+ rc = sqlite3_prepare(p->db, zSelect, -1, &pSelect, 0);
if( rc!=SQLITE_OK || !pSelect ){
fprintf(p->out, "/**** ERROR: (%d) %s *****/\n", rc, sqlite3_errmsg(p->db));
if( (rc&0xff)!=SQLITE_CORRUPT ) p->nErr++;
@@ -1198,7 +1046,7 @@ static char *save_err_msg(
sqlite3 *db /* Database to query */
){
int nErrMsg = 1+strlen30(sqlite3_errmsg(db));
- char *zErrMsg = sqlite3_malloc64(nErrMsg);
+ char *zErrMsg = sqlite3_malloc(nErrMsg);
if( zErrMsg ){
memcpy(zErrMsg, sqlite3_errmsg(db), nErrMsg);
}
@@ -1210,7 +1058,7 @@ static char *save_err_msg(
*/
static int display_stats(
sqlite3 *db, /* Database to query */
- ShellState *pArg, /* Pointer to ShellState */
+ struct callback_data *pArg, /* Pointer to struct callback_data */
int bReset /* True to reset the stats */
){
int iCur;
@@ -1220,77 +1068,57 @@ static int display_stats(
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_MEMORY_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Memory Used: %d (max %d) bytes\n",
- iCur, iHiwtr);
+ fprintf(pArg->out, "Memory Used: %d (max %d) bytes\n", iCur, iHiwtr);
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_MALLOC_COUNT, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Number of Outstanding Allocations: %d (max %d)\n",
- iCur, iHiwtr);
- if( pArg->shellFlgs & SHFLG_Pagecache ){
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Number of Pcache Pages Used: %d (max %d) pages\n",
- iCur, iHiwtr);
- }
+ fprintf(pArg->out, "Number of Outstanding Allocations: %d (max %d)\n", iCur, iHiwtr);
+/*
+** Not currently used by the CLI.
+** iHiwtr = iCur = -1;
+** sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset);
+** fprintf(pArg->out, "Number of Pcache Pages Used: %d (max %d) pages\n", iCur, iHiwtr);
+*/
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_PAGECACHE_OVERFLOW, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Number of Pcache Overflow Bytes: %d (max %d) bytes\n",
- iCur, iHiwtr);
- if( pArg->shellFlgs & SHFLG_Scratch ){
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Number of Scratch Allocations Used: %d (max %d)\n",
- iCur, iHiwtr);
- }
+ fprintf(pArg->out, "Number of Pcache Overflow Bytes: %d (max %d) bytes\n", iCur, iHiwtr);
+/*
+** Not currently used by the CLI.
+** iHiwtr = iCur = -1;
+** sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset);
+** fprintf(pArg->out, "Number of Scratch Allocations Used: %d (max %d)\n", iCur, iHiwtr);
+*/
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_SCRATCH_OVERFLOW, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Number of Scratch Overflow Bytes: %d (max %d) bytes\n",
- iCur, iHiwtr);
+ fprintf(pArg->out, "Number of Scratch Overflow Bytes: %d (max %d) bytes\n", iCur, iHiwtr);
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_MALLOC_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Largest Allocation: %d bytes\n",
- iHiwtr);
+ fprintf(pArg->out, "Largest Allocation: %d bytes\n", iHiwtr);
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_PAGECACHE_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Largest Pcache Allocation: %d bytes\n",
- iHiwtr);
+ fprintf(pArg->out, "Largest Pcache Allocation: %d bytes\n", iHiwtr);
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_SCRATCH_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Largest Scratch Allocation: %d bytes\n",
- iHiwtr);
+ fprintf(pArg->out, "Largest Scratch Allocation: %d bytes\n", iHiwtr);
#ifdef YYTRACKMAXSTACKDEPTH
iHiwtr = iCur = -1;
sqlite3_status(SQLITE_STATUS_PARSER_STACK, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Deepest Parser Stack: %d (max %d)\n",
- iCur, iHiwtr);
+ fprintf(pArg->out, "Deepest Parser Stack: %d (max %d)\n", iCur, iHiwtr);
#endif
}
if( pArg && pArg->out && db ){
- if( pArg->shellFlgs & SHFLG_Lookaside ){
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED,
- &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Lookaside Slots Used: %d (max %d)\n",
- iCur, iHiwtr);
- sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT,
- &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Successful lookaside attempts: %d\n", iHiwtr);
- sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
- &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Lookaside failures due to size: %d\n", iHiwtr);
- sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
- &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Lookaside failures due to OOM: %d\n", iHiwtr);
- }
iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Pager Heap Usage: %d bytes\n",iCur);
+ sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED, &iCur, &iHiwtr, bReset);
+ fprintf(pArg->out, "Lookaside Slots Used: %d (max %d)\n", iCur, iHiwtr);
+ sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT, &iCur, &iHiwtr, bReset);
+ fprintf(pArg->out, "Successful lookaside attempts: %d\n", iHiwtr);
+ sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, &iCur, &iHiwtr, bReset);
+ fprintf(pArg->out, "Lookaside failures due to size: %d\n", iHiwtr);
+ sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, &iCur, &iHiwtr, bReset);
+ fprintf(pArg->out, "Lookaside failures due to OOM: %d\n", iHiwtr);
iHiwtr = iCur = -1;
+ sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset);
+ fprintf(pArg->out, "Pager Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1;
sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1);
fprintf(pArg->out, "Page cache hits: %d\n", iCur);
iHiwtr = iCur = -1;
@@ -1301,78 +1129,27 @@ static int display_stats(
fprintf(pArg->out, "Page cache writes: %d\n", iCur);
iHiwtr = iCur = -1;
sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Schema Heap Usage: %d bytes\n",iCur);
+ fprintf(pArg->out, "Schema Heap Usage: %d bytes\n", iCur);
iHiwtr = iCur = -1;
sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Statement Heap/Lookaside Usage: %d bytes\n",iCur);
+ fprintf(pArg->out, "Statement Heap/Lookaside Usage: %d bytes\n", iCur);
}
if( pArg && pArg->out && db && pArg->pStmt ){
- iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_FULLSCAN_STEP,
- bReset);
+ iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_FULLSCAN_STEP, bReset);
fprintf(pArg->out, "Fullscan Steps: %d\n", iCur);
iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_SORT, bReset);
fprintf(pArg->out, "Sort Operations: %d\n", iCur);
- iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_AUTOINDEX,bReset);
+ iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_AUTOINDEX, bReset);
fprintf(pArg->out, "Autoindex Inserts: %d\n", iCur);
iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_VM_STEP, bReset);
fprintf(pArg->out, "Virtual Machine Steps: %d\n", iCur);
}
- /* Do not remove this machine readable comment: extra-stats-output-here */
-
return 0;
}
/*
-** Display scan stats.
-*/
-static void display_scanstats(
- sqlite3 *db, /* Database to query */
- ShellState *pArg /* Pointer to ShellState */
-){
-#ifndef SQLITE_ENABLE_STMT_SCANSTATUS
- UNUSED_PARAMETER(db);
- UNUSED_PARAMETER(pArg);
-#else
- int i, k, n, mx;
- fprintf(pArg->out, "-------- scanstats --------\n");
- mx = 0;
- for(k=0; k<=mx; k++){
- double rEstLoop = 1.0;
- for(i=n=0; 1; i++){
- sqlite3_stmt *p = pArg->pStmt;
- sqlite3_int64 nLoop, nVisit;
- double rEst;
- int iSid;
- const char *zExplain;
- if( sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_NLOOP, (void*)&nLoop) ){
- break;
- }
- sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_SELECTID, (void*)&iSid);
- if( iSid>mx ) mx = iSid;
- if( iSid!=k ) continue;
- if( n==0 ){
- rEstLoop = (double)nLoop;
- if( k>0 ) fprintf(pArg->out, "-------- subquery %d -------\n", k);
- }
- n++;
- sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_NVISIT, (void*)&nVisit);
- sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_EST, (void*)&rEst);
- sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_EXPLAIN, (void*)&zExplain);
- fprintf(pArg->out, "Loop %2d: %s\n", n, zExplain);
- rEstLoop *= rEst;
- fprintf(pArg->out,
- " nLoop=%-8lld nRow=%-8lld estRow=%-8lld estRow/Loop=%-8g\n",
- nLoop, nVisit, (sqlite3_int64)(rEstLoop+0.5), rEst
- );
- }
- }
- fprintf(pArg->out, "---------------------------\n");
-#endif
-}
-
-/*
** Parameter azArray points to a zero-terminated array of strings. zStr
** points to a single nul-terminated string. Return non-zero if zStr
** is equal, according to strcmp(), to any of the strings in the array.
@@ -1388,7 +1165,7 @@ static int str_in_array(const char *zStr, const char **azArray){
/*
** If compiled statement pSql appears to be an EXPLAIN statement, allocate
-** and populate the ShellState.aiIndent[] array with the number of
+** and populate the callback_data.aiIndent[] array with the number of
** spaces each opcode should be indented before it is output.
**
** The indenting rules are:
@@ -1399,22 +1176,19 @@ static int str_in_array(const char *zStr, const char **azArray){
**
** * For each "Goto", if the jump destination is earlier in the program
** and ends on one of:
-** Yield SeekGt SeekLt RowSetRead Rewind
-** or if the P1 parameter is one instead of zero,
+** Yield SeekGt SeekLt RowSetRead
** then indent all opcodes between the earlier instruction
** and "Goto" by 2 spaces.
*/
-static void explain_data_prepare(ShellState *p, sqlite3_stmt *pSql){
+static void explain_data_prepare(struct callback_data *p, sqlite3_stmt *pSql){
const char *zSql; /* The text of the SQL statement */
const char *z; /* Used to check if this is an EXPLAIN */
int *abYield = 0; /* True if op is an OP_Yield */
int nAlloc = 0; /* Allocated size of p->aiIndent[], abYield */
int iOp; /* Index of operation in p->aiIndent[] */
- const char *azNext[] = { "Next", "Prev", "VPrev", "VNext", "SorterNext",
- "NextIfOpen", "PrevIfOpen", 0 };
- const char *azYield[] = { "Yield", "SeekLT", "SeekGT", "RowSetRead",
- "Rewind", 0 };
+ const char *azNext[] = { "Next", "Prev", "VPrev", "VNext", "SorterNext", 0 };
+ const char *azYield[] = { "Yield", "SeekLt", "SeekGt", "RowSetRead", 0 };
const char *azGoto[] = { "Goto", 0 };
/* Try to figure out if this is really an EXPLAIN statement. If this
@@ -1440,8 +1214,8 @@ static void explain_data_prepare(ShellState *p, sqlite3_stmt *pSql){
/* Grow the p->aiIndent array as required */
if( iOp>=nAlloc ){
nAlloc += 100;
- p->aiIndent = (int*)sqlite3_realloc64(p->aiIndent, nAlloc*sizeof(int));
- abYield = (int*)sqlite3_realloc64(abYield, nAlloc*sizeof(int));
+ p->aiIndent = (int*)sqlite3_realloc(p->aiIndent, nAlloc*sizeof(int));
+ abYield = (int*)sqlite3_realloc(abYield, nAlloc*sizeof(int));
}
abYield[iOp] = str_in_array(zOp, azYield);
p->aiIndent[iOp] = 0;
@@ -1450,10 +1224,8 @@ static void explain_data_prepare(ShellState *p, sqlite3_stmt *pSql){
if( str_in_array(zOp, azNext) ){
for(i=p2op; i<iOp; i++) p->aiIndent[i] += 2;
}
- if( str_in_array(zOp, azGoto) && p2op<p->nIndent
- && (abYield[p2op] || sqlite3_column_int(pSql, 2))
- ){
- for(i=p2op+1; i<iOp; i++) p->aiIndent[i] += 2;
+ if( str_in_array(zOp, azGoto) && p2op<p->nIndent && abYield[p2op] ){
+ for(i=p2op; i<iOp; i++) p->aiIndent[i] += 2;
}
}
@@ -1465,7 +1237,7 @@ static void explain_data_prepare(ShellState *p, sqlite3_stmt *pSql){
/*
** Free the array allocated by explain_data_prepare().
*/
-static void explain_data_delete(ShellState *p){
+static void explain_data_delete(struct callback_data *p){
sqlite3_free(p->aiIndent);
p->aiIndent = 0;
p->nIndent = 0;
@@ -1482,12 +1254,12 @@ static void explain_data_delete(ShellState *p){
** and callback data argument.
*/
static int shell_exec(
- sqlite3 *db, /* An open database */
- const char *zSql, /* SQL to be evaluated */
+ sqlite3 *db, /* An open database */
+ const char *zSql, /* SQL to be evaluated */
int (*xCallback)(void*,int,char**,char**,int*), /* Callback function */
- /* (not the same as sqlite3_exec) */
- ShellState *pArg, /* Pointer to ShellState */
- char **pzErrMsg /* Error msg written here */
+ /* (not the same as sqlite3_exec) */
+ struct callback_data *pArg, /* Pointer to struct callback_data */
+ char **pzErrMsg /* Error msg written here */
){
sqlite3_stmt *pStmt = NULL; /* Statement to execute. */
int rc = SQLITE_OK; /* Return Code */
@@ -1524,22 +1296,13 @@ static int shell_exec(
fprintf(pArg->out, "%s\n", zStmtSql ? zStmtSql : zSql);
}
- /* Show the EXPLAIN QUERY PLAN if .eqp is on */
- if( pArg && pArg->autoEQP ){
- sqlite3_stmt *pExplain;
- char *zEQP = sqlite3_mprintf("EXPLAIN QUERY PLAN %s",
- sqlite3_sql(pStmt));
- rc = sqlite3_prepare_v2(db, zEQP, -1, &pExplain, 0);
- if( rc==SQLITE_OK ){
- while( sqlite3_step(pExplain)==SQLITE_ROW ){
- fprintf(pArg->out,"--EQP-- %d,", sqlite3_column_int(pExplain, 0));
- fprintf(pArg->out,"%d,", sqlite3_column_int(pExplain, 1));
- fprintf(pArg->out,"%d,", sqlite3_column_int(pExplain, 2));
- fprintf(pArg->out,"%s\n", sqlite3_column_text(pExplain, 3));
- }
+ /* Output TESTCTRL_EXPLAIN text of requested */
+ if( pArg && pArg->mode==MODE_Explain ){
+ const char *zExplain = 0;
+ sqlite3_test_control(SQLITE_TESTCTRL_EXPLAIN_STMT, pStmt, &zExplain);
+ if( zExplain && zExplain[0] ){
+ fprintf(pArg->out, "%s", zExplain);
}
- sqlite3_finalize(pExplain);
- sqlite3_free(zEQP);
}
/* If the shell is currently in ".explain" mode, gather the extra
@@ -1558,7 +1321,7 @@ static int shell_exec(
if( xCallback ){
/* allocate space for col name ptr, value ptr, and type */
int nCol = sqlite3_column_count(pStmt);
- void *pData = sqlite3_malloc64(3*nCol*sizeof(const char*) + 1);
+ void *pData = sqlite3_malloc(3*nCol*sizeof(const char*) + 1);
if( !pData ){
rc = SQLITE_NOMEM;
}else{
@@ -1612,11 +1375,6 @@ static int shell_exec(
display_stats(db, pArg, 0);
}
- /* print loop-counters if required */
- if( pArg && pArg->scanstatsOn ){
- display_scanstats(db, pArg);
- }
-
/* Finalize the statement just executed. If this fails, save a
** copy of the error message. Otherwise, set zSql to point to the
** next statement to execute. */
@@ -1652,7 +1410,7 @@ static int dump_callback(void *pArg, int nArg, char **azArg, char **azCol){
const char *zType;
const char *zSql;
const char *zPrepStmt = 0;
- ShellState *p = (ShellState *)pArg;
+ struct callback_data *p = (struct callback_data *)pArg;
UNUSED_PARAMETER(azCol);
if( nArg!=3 ) return 1;
@@ -1694,7 +1452,7 @@ static int dump_callback(void *pArg, int nArg, char **azArg, char **azCol){
zTableInfo = appendText(zTableInfo, zTable, '"');
zTableInfo = appendText(zTableInfo, ");", 0);
- rc = sqlite3_prepare_v2(p->db, zTableInfo, -1, &pTableInfo, 0);
+ rc = sqlite3_prepare(p->db, zTableInfo, -1, &pTableInfo, 0);
free(zTableInfo);
if( rc!=SQLITE_OK || !pTableInfo ){
return 1;
@@ -1748,7 +1506,7 @@ static int dump_callback(void *pArg, int nArg, char **azArg, char **azCol){
** "ORDER BY rowid DESC" to the end.
*/
static int run_schema_dump_query(
- ShellState *p,
+ struct callback_data *p,
const char *zQuery
){
int rc;
@@ -1783,210 +1541,115 @@ static int run_schema_dump_query(
*/
static char zHelp[] =
".backup ?DB? FILE Backup DB (default \"main\") to FILE\n"
- ".bail on|off Stop after hitting an error. Default OFF\n"
- ".binary on|off Turn binary output on or off. Default OFF\n"
- ".clone NEWDB Clone data into NEWDB from the existing database\n"
+ ".bail ON|OFF Stop after hitting an error. Default OFF\n"
".databases List names and files of attached databases\n"
- ".dbinfo ?DB? Show status information about the database\n"
".dump ?TABLE? ... Dump the database in an SQL text format\n"
" If TABLE specified, only dump tables matching\n"
" LIKE pattern TABLE.\n"
- ".echo on|off Turn command echo on or off\n"
- ".eqp on|off Enable or disable automatic EXPLAIN QUERY PLAN\n"
+ ".echo ON|OFF Turn command echo on or off\n"
".exit Exit this program\n"
- ".explain ?on|off? Turn output mode suitable for EXPLAIN on or off.\n"
+ ".explain ?ON|OFF? Turn output mode suitable for EXPLAIN on or off.\n"
" With no args, it turns EXPLAIN on.\n"
- ".fullschema Show schema and the content of sqlite_stat tables\n"
- ".headers on|off Turn display of headers on or off\n"
+ ".header(s) ON|OFF Turn display of headers on or off\n"
".help Show this message\n"
".import FILE TABLE Import data from FILE into TABLE\n"
- ".indexes ?TABLE? Show names of all indexes\n"
- " If TABLE specified, only show indexes for tables\n"
+ ".indices ?TABLE? Show names of all indices\n"
+ " If TABLE specified, only show indices for tables\n"
" matching LIKE pattern TABLE.\n"
#ifdef SQLITE_ENABLE_IOTRACE
".iotrace FILE Enable I/O diagnostic logging to FILE\n"
#endif
- ".limit ?LIMIT? ?VAL? Display or change the value of an SQLITE_LIMIT\n"
#ifndef SQLITE_OMIT_LOAD_EXTENSION
".load FILE ?ENTRY? Load an extension library\n"
#endif
".log FILE|off Turn logging on or off. FILE can be stderr/stdout\n"
".mode MODE ?TABLE? Set output mode where MODE is one of:\n"
- " ascii Columns/rows delimited by 0x1F and 0x1E\n"
" csv Comma-separated values\n"
" column Left-aligned columns. (See .width)\n"
" html HTML <table> code\n"
" insert SQL insert statements for TABLE\n"
" line One value per line\n"
- " list Values delimited by .separator strings\n"
+ " list Values delimited by .separator string\n"
" tabs Tab-separated values\n"
" tcl TCL list elements\n"
".nullvalue STRING Use STRING in place of NULL values\n"
- ".once FILENAME Output for the next SQL command only to FILENAME\n"
".open ?FILENAME? Close existing database and reopen FILENAME\n"
- ".output ?FILENAME? Send output to FILENAME or stdout\n"
+ ".output FILENAME Send output to FILENAME\n"
+ ".output stdout Send output to the screen\n"
".print STRING... Print literal STRING\n"
".prompt MAIN CONTINUE Replace the standard prompts\n"
".quit Exit this program\n"
".read FILENAME Execute SQL in FILENAME\n"
".restore ?DB? FILE Restore content of DB (default \"main\") from FILE\n"
- ".save FILE Write in-memory database into FILE\n"
- ".scanstats on|off Turn sqlite3_stmt_scanstatus() metrics on or off\n"
".schema ?TABLE? Show the CREATE statements\n"
" If TABLE specified, only show tables matching\n"
" LIKE pattern TABLE.\n"
- ".separator COL ?ROW? Change the column separator and optionally the row\n"
- " separator for both the output mode and .import\n"
- ".shell CMD ARGS... Run CMD ARGS... in a system shell\n"
+ ".separator STRING Change separator used by output mode and .import\n"
".show Show the current values for various settings\n"
- ".stats on|off Turn stats on or off\n"
- ".system CMD ARGS... Run CMD ARGS... in a system shell\n"
+ ".stats ON|OFF Turn stats on or off\n"
".tables ?TABLE? List names of tables\n"
" If TABLE specified, only list tables matching\n"
" LIKE pattern TABLE.\n"
".timeout MS Try opening locked tables for MS milliseconds\n"
- ".timer on|off Turn SQL timer on or off\n"
".trace FILE|off Output each SQL statement as it is run\n"
".vfsname ?AUX? Print the name of the VFS stack\n"
".width NUM1 NUM2 ... Set column widths for \"column\" mode\n"
- " Negative values right-justify\n"
;
-/* Forward reference */
-static int process_input(ShellState *p, FILE *in);
-/*
-** Implementation of the "readfile(X)" SQL function. The entire content
-** of the file named X is read and returned as a BLOB. NULL is returned
-** if the file does not exist or is unreadable.
-*/
-static void readfileFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- const char *zName;
- FILE *in;
- long nIn;
- void *pBuf;
-
- UNUSED_PARAMETER(argc);
- zName = (const char*)sqlite3_value_text(argv[0]);
- if( zName==0 ) return;
- in = fopen(zName, "rb");
- if( in==0 ) return;
- fseek(in, 0, SEEK_END);
- nIn = ftell(in);
- rewind(in);
- pBuf = sqlite3_malloc64( nIn );
- if( pBuf && 1==fread(pBuf, nIn, 1, in) ){
- sqlite3_result_blob(context, pBuf, nIn, sqlite3_free);
- }else{
- sqlite3_free(pBuf);
- }
- fclose(in);
-}
-
-/*
-** Implementation of the "writefile(X,Y)" SQL function. The argument Y
-** is written into file X. The number of bytes written is returned. Or
-** NULL is returned if something goes wrong, such as being unable to open
-** file X for writing.
-*/
-static void writefileFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- FILE *out;
- const char *z;
- sqlite3_int64 rc;
- const char *zFile;
+static char zTimerHelp[] =
+ ".timer ON|OFF Turn the CPU timer measurement on or off\n"
+;
- UNUSED_PARAMETER(argc);
- zFile = (const char*)sqlite3_value_text(argv[0]);
- if( zFile==0 ) return;
- out = fopen(zFile, "wb");
- if( out==0 ) return;
- z = (const char*)sqlite3_value_blob(argv[1]);
- if( z==0 ){
- rc = 0;
- }else{
- rc = fwrite(z, 1, sqlite3_value_bytes(argv[1]), out);
- }
- fclose(out);
- sqlite3_result_int64(context, rc);
-}
+/* Forward reference */
+static int process_input(struct callback_data *p, FILE *in);
/*
** Make sure the database is open. If it is not, then open it. If
** the database fails to open, print an error message and exit.
*/
-static void open_db(ShellState *p, int keepAlive){
+static void open_db(struct callback_data *p, int keepAlive){
if( p->db==0 ){
sqlite3_initialize();
sqlite3_open(p->zDbFilename, &p->db);
- globalDb = p->db;
- if( p->db && sqlite3_errcode(p->db)==SQLITE_OK ){
- sqlite3_create_function(p->db, "shellstatic", 0, SQLITE_UTF8, 0,
+ db = p->db;
+ if( db && sqlite3_errcode(db)==SQLITE_OK ){
+ sqlite3_create_function(db, "shellstatic", 0, SQLITE_UTF8, 0,
shellstaticFunc, 0, 0);
}
- if( p->db==0 || SQLITE_OK!=sqlite3_errcode(p->db) ){
+ if( db==0 || SQLITE_OK!=sqlite3_errcode(db) ){
fprintf(stderr,"Error: unable to open database \"%s\": %s\n",
- p->zDbFilename, sqlite3_errmsg(p->db));
+ p->zDbFilename, sqlite3_errmsg(db));
if( keepAlive ) return;
exit(1);
}
#ifndef SQLITE_OMIT_LOAD_EXTENSION
sqlite3_enable_load_extension(p->db, 1);
#endif
- sqlite3_create_function(p->db, "readfile", 1, SQLITE_UTF8, 0,
- readfileFunc, 0, 0);
- sqlite3_create_function(p->db, "writefile", 2, SQLITE_UTF8, 0,
- writefileFunc, 0, 0);
}
}
/*
** Do C-language style dequoting.
**
-** \a -> alarm
-** \b -> backspace
** \t -> tab
** \n -> newline
-** \v -> vertical tab
-** \f -> form feed
** \r -> carriage return
-** \s -> space
** \" -> "
-** \' -> '
-** \\ -> backslash
** \NNN -> ascii character NNN in octal
+** \\ -> backslash
*/
static void resolve_backslashes(char *z){
int i, j;
char c;
- while( *z && *z!='\\' ) z++;
for(i=j=0; (c = z[i])!=0; i++, j++){
- if( c=='\\' && z[i+1]!=0 ){
+ if( c=='\\' ){
c = z[++i];
- if( c=='a' ){
- c = '\a';
- }else if( c=='b' ){
- c = '\b';
+ if( c=='n' ){
+ c = '\n';
}else if( c=='t' ){
c = '\t';
- }else if( c=='n' ){
- c = '\n';
- }else if( c=='v' ){
- c = '\v';
- }else if( c=='f' ){
- c = '\f';
}else if( c=='r' ){
c = '\r';
- }else if( c=='"' ){
- c = '"';
- }else if( c=='\'' ){
- c = '\'';
}else if( c=='\\' ){
c = '\\';
}else if( c>='0' && c<='7' ){
@@ -2003,7 +1666,7 @@ static void resolve_backslashes(char *z){
}
z[j] = c;
}
- if( j<i ) z[j] = 0;
+ z[j] = 0;
}
/*
@@ -2120,11 +1783,7 @@ static FILE *output_file_open(const char *zFile){
*/
static void sql_trace_callback(void *pArg, const char *z){
FILE *f = (FILE*)pArg;
- if( f ){
- int i = (int)strlen(z);
- while( i>0 && z[i-1]==';' ){ i--; }
- fprintf(f, "%.*s;\n", i, z);
- }
+ if( f ) fprintf(f, "%s\n", z);
}
/*
@@ -2137,10 +1796,10 @@ static void test_breakpoint(void){
}
/*
-** An object used to read a CSV and other files for import.
+** An object used to read a CSV file
*/
-typedef struct ImportCtx ImportCtx;
-struct ImportCtx {
+typedef struct CSVReader CSVReader;
+struct CSVReader {
const char *zFile; /* Name of the input file */
FILE *in; /* Read the CSV text from this input stream */
char *z; /* Accumulated text for a field */
@@ -2148,15 +1807,14 @@ struct ImportCtx {
int nAlloc; /* Space allocated for z[] */
int nLine; /* Current line number */
int cTerm; /* Character that terminated the most recent field */
- int cColSep; /* The column separator character. (Usually ",") */
- int cRowSep; /* The row separator character. (Usually "\n") */
+ int cSeparator; /* The separator character. (Usually ",") */
};
/* Append a single byte to z[] */
-static void import_append_char(ImportCtx *p, int c){
+static void csv_append_char(CSVReader *p, int c){
if( p->n+1>=p->nAlloc ){
p->nAlloc += p->nAlloc + 100;
- p->z = sqlite3_realloc64(p->z, p->nAlloc);
+ p->z = sqlite3_realloc(p->z, p->nAlloc);
if( p->z==0 ){
fprintf(stderr, "out of memory\n");
exit(1);
@@ -2170,18 +1828,16 @@ static void import_append_char(ImportCtx *p, int c){
**
** + Input comes from p->in.
** + Store results in p->z of length p->n. Space to hold p->z comes
-** from sqlite3_malloc64().
-** + Use p->cSep as the column separator. The default is ",".
-** + Use p->rSep as the row separator. The default is "\n".
+** from sqlite3_malloc().
+** + Use p->cSep as the separator. The default is ",".
** + Keep track of the line number in p->nLine.
** + Store the character that terminates the field in p->cTerm. Store
** EOF on end-of-file.
** + Report syntax errors on stderr
*/
-static char *SQLITE_CDECL csv_read_one_field(ImportCtx *p){
- int c;
- int cSep = p->cColSep;
- int rSep = p->cRowSep;
+static char *csv_read_one_field(CSVReader *p){
+ int c, pc;
+ int cSep = p->cSeparator;
p->n = 0;
c = fgetc(p->in);
if( c==EOF || seenInterrupt ){
@@ -2189,13 +1845,12 @@ static char *SQLITE_CDECL csv_read_one_field(ImportCtx *p){
return 0;
}
if( c=='"' ){
- int pc, ppc;
int startLine = p->nLine;
int cQuote = c;
- pc = ppc = 0;
+ pc = 0;
while( 1 ){
c = fgetc(p->in);
- if( c==rSep ) p->nLine++;
+ if( c=='\n' ) p->nLine++;
if( c==cQuote ){
if( pc==cQuote ){
pc = 0;
@@ -2203,8 +1858,8 @@ static char *SQLITE_CDECL csv_read_one_field(ImportCtx *p){
}
}
if( (c==cSep && pc==cQuote)
- || (c==rSep && pc==cQuote)
- || (c==rSep && pc=='\r' && ppc==cQuote)
+ || (c=='\n' && pc==cQuote)
+ || (c=='\n' && pc=='\r' && p->n>=2 && p->z[p->n-2]==cQuote)
|| (c==EOF && pc==cQuote)
){
do{ p->n--; }while( p->z[p->n]!=cQuote );
@@ -2218,21 +1873,20 @@ static char *SQLITE_CDECL csv_read_one_field(ImportCtx *p){
if( c==EOF ){
fprintf(stderr, "%s:%d: unterminated %c-quoted field\n",
p->zFile, startLine, cQuote);
- p->cTerm = c;
+ p->cTerm = EOF;
break;
}
- import_append_char(p, c);
- ppc = pc;
+ csv_append_char(p, c);
pc = c;
}
}else{
- while( c!=EOF && c!=cSep && c!=rSep ){
- import_append_char(p, c);
+ while( c!=EOF && c!=cSep && c!='\n' ){
+ csv_append_char(p, c);
c = fgetc(p->in);
}
- if( c==rSep ){
+ if( c=='\n' ){
p->nLine++;
- if( p->n>0 && p->z[p->n-1]=='\r' ) p->n--;
+ if( p->n>1 && p->z[p->n-1]=='\r' ) p->n--;
}
p->cTerm = c;
}
@@ -2240,403 +1894,14 @@ static char *SQLITE_CDECL csv_read_one_field(ImportCtx *p){
return p->z;
}
-/* Read a single field of ASCII delimited text.
-**
-** + Input comes from p->in.
-** + Store results in p->z of length p->n. Space to hold p->z comes
-** from sqlite3_malloc64().
-** + Use p->cSep as the column separator. The default is "\x1F".
-** + Use p->rSep as the row separator. The default is "\x1E".
-** + Keep track of the row number in p->nLine.
-** + Store the character that terminates the field in p->cTerm. Store
-** EOF on end-of-file.
-** + Report syntax errors on stderr
-*/
-static char *SQLITE_CDECL ascii_read_one_field(ImportCtx *p){
- int c;
- int cSep = p->cColSep;
- int rSep = p->cRowSep;
- p->n = 0;
- c = fgetc(p->in);
- if( c==EOF || seenInterrupt ){
- p->cTerm = EOF;
- return 0;
- }
- while( c!=EOF && c!=cSep && c!=rSep ){
- import_append_char(p, c);
- c = fgetc(p->in);
- }
- if( c==rSep ){
- p->nLine++;
- }
- p->cTerm = c;
- if( p->z ) p->z[p->n] = 0;
- return p->z;
-}
-
-/*
-** Try to transfer data for table zTable. If an error is seen while
-** moving forward, try to go backwards. The backwards movement won't
-** work for WITHOUT ROWID tables.
-*/
-static void tryToCloneData(
- ShellState *p,
- sqlite3 *newDb,
- const char *zTable
-){
- sqlite3_stmt *pQuery = 0;
- sqlite3_stmt *pInsert = 0;
- char *zQuery = 0;
- char *zInsert = 0;
- int rc;
- int i, j, n;
- int nTable = (int)strlen(zTable);
- int k = 0;
- int cnt = 0;
- const int spinRate = 10000;
-
- zQuery = sqlite3_mprintf("SELECT * FROM \"%w\"", zTable);
- rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
- if( rc ){
- fprintf(stderr, "Error %d: %s on [%s]\n",
- sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
- zQuery);
- goto end_data_xfer;
- }
- n = sqlite3_column_count(pQuery);
- zInsert = sqlite3_malloc64(200 + nTable + n*3);
- if( zInsert==0 ){
- fprintf(stderr, "out of memory\n");
- goto end_data_xfer;
- }
- sqlite3_snprintf(200+nTable,zInsert,
- "INSERT OR IGNORE INTO \"%s\" VALUES(?", zTable);
- i = (int)strlen(zInsert);
- for(j=1; j<n; j++){
- memcpy(zInsert+i, ",?", 2);
- i += 2;
- }
- memcpy(zInsert+i, ");", 3);
- rc = sqlite3_prepare_v2(newDb, zInsert, -1, &pInsert, 0);
- if( rc ){
- fprintf(stderr, "Error %d: %s on [%s]\n",
- sqlite3_extended_errcode(newDb), sqlite3_errmsg(newDb),
- zQuery);
- goto end_data_xfer;
- }
- for(k=0; k<2; k++){
- while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
- for(i=0; i<n; i++){
- switch( sqlite3_column_type(pQuery, i) ){
- case SQLITE_NULL: {
- sqlite3_bind_null(pInsert, i+1);
- break;
- }
- case SQLITE_INTEGER: {
- sqlite3_bind_int64(pInsert, i+1, sqlite3_column_int64(pQuery,i));
- break;
- }
- case SQLITE_FLOAT: {
- sqlite3_bind_double(pInsert, i+1, sqlite3_column_double(pQuery,i));
- break;
- }
- case SQLITE_TEXT: {
- sqlite3_bind_text(pInsert, i+1,
- (const char*)sqlite3_column_text(pQuery,i),
- -1, SQLITE_STATIC);
- break;
- }
- case SQLITE_BLOB: {
- sqlite3_bind_blob(pInsert, i+1, sqlite3_column_blob(pQuery,i),
- sqlite3_column_bytes(pQuery,i),
- SQLITE_STATIC);
- break;
- }
- }
- } /* End for */
- rc = sqlite3_step(pInsert);
- if( rc!=SQLITE_OK && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){
- fprintf(stderr, "Error %d: %s\n", sqlite3_extended_errcode(newDb),
- sqlite3_errmsg(newDb));
- }
- sqlite3_reset(pInsert);
- cnt++;
- if( (cnt%spinRate)==0 ){
- printf("%c\b", "|/-\\"[(cnt/spinRate)%4]);
- fflush(stdout);
- }
- } /* End while */
- if( rc==SQLITE_DONE ) break;
- sqlite3_finalize(pQuery);
- sqlite3_free(zQuery);
- zQuery = sqlite3_mprintf("SELECT * FROM \"%w\" ORDER BY rowid DESC;",
- zTable);
- rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
- if( rc ){
- fprintf(stderr, "Warning: cannot step \"%s\" backwards", zTable);
- break;
- }
- } /* End for(k=0...) */
-
-end_data_xfer:
- sqlite3_finalize(pQuery);
- sqlite3_finalize(pInsert);
- sqlite3_free(zQuery);
- sqlite3_free(zInsert);
-}
-
-
-/*
-** Try to transfer all rows of the schema that match zWhere. For
-** each row, invoke xForEach() on the object defined by that row.
-** If an error is encountered while moving forward through the
-** sqlite_master table, try again moving backwards.
-*/
-static void tryToCloneSchema(
- ShellState *p,
- sqlite3 *newDb,
- const char *zWhere,
- void (*xForEach)(ShellState*,sqlite3*,const char*)
-){
- sqlite3_stmt *pQuery = 0;
- char *zQuery = 0;
- int rc;
- const unsigned char *zName;
- const unsigned char *zSql;
- char *zErrMsg = 0;
-
- zQuery = sqlite3_mprintf("SELECT name, sql FROM sqlite_master"
- " WHERE %s", zWhere);
- rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
- if( rc ){
- fprintf(stderr, "Error: (%d) %s on [%s]\n",
- sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
- zQuery);
- goto end_schema_xfer;
- }
- while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
- zName = sqlite3_column_text(pQuery, 0);
- zSql = sqlite3_column_text(pQuery, 1);
- printf("%s... ", zName); fflush(stdout);
- sqlite3_exec(newDb, (const char*)zSql, 0, 0, &zErrMsg);
- if( zErrMsg ){
- fprintf(stderr, "Error: %s\nSQL: [%s]\n", zErrMsg, zSql);
- sqlite3_free(zErrMsg);
- zErrMsg = 0;
- }
- if( xForEach ){
- xForEach(p, newDb, (const char*)zName);
- }
- printf("done\n");
- }
- if( rc!=SQLITE_DONE ){
- sqlite3_finalize(pQuery);
- sqlite3_free(zQuery);
- zQuery = sqlite3_mprintf("SELECT name, sql FROM sqlite_master"
- " WHERE %s ORDER BY rowid DESC", zWhere);
- rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
- if( rc ){
- fprintf(stderr, "Error: (%d) %s on [%s]\n",
- sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
- zQuery);
- goto end_schema_xfer;
- }
- while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
- zName = sqlite3_column_text(pQuery, 0);
- zSql = sqlite3_column_text(pQuery, 1);
- printf("%s... ", zName); fflush(stdout);
- sqlite3_exec(newDb, (const char*)zSql, 0, 0, &zErrMsg);
- if( zErrMsg ){
- fprintf(stderr, "Error: %s\nSQL: [%s]\n", zErrMsg, zSql);
- sqlite3_free(zErrMsg);
- zErrMsg = 0;
- }
- if( xForEach ){
- xForEach(p, newDb, (const char*)zName);
- }
- printf("done\n");
- }
- }
-end_schema_xfer:
- sqlite3_finalize(pQuery);
- sqlite3_free(zQuery);
-}
-
-/*
-** Open a new database file named "zNewDb". Try to recover as much information
-** as possible out of the main database (which might be corrupt) and write it
-** into zNewDb.
-*/
-static void tryToClone(ShellState *p, const char *zNewDb){
- int rc;
- sqlite3 *newDb = 0;
- if( access(zNewDb,0)==0 ){
- fprintf(stderr, "File \"%s\" already exists.\n", zNewDb);
- return;
- }
- rc = sqlite3_open(zNewDb, &newDb);
- if( rc ){
- fprintf(stderr, "Cannot create output database: %s\n",
- sqlite3_errmsg(newDb));
- }else{
- sqlite3_exec(p->db, "PRAGMA writable_schema=ON;", 0, 0, 0);
- sqlite3_exec(newDb, "BEGIN EXCLUSIVE;", 0, 0, 0);
- tryToCloneSchema(p, newDb, "type='table'", tryToCloneData);
- tryToCloneSchema(p, newDb, "type!='table'", 0);
- sqlite3_exec(newDb, "COMMIT;", 0, 0, 0);
- sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0);
- }
- sqlite3_close(newDb);
-}
-
-/*
-** Change the output file back to stdout
-*/
-static void output_reset(ShellState *p){
- if( p->outfile[0]=='|' ){
-#ifndef SQLITE_OMIT_POPEN
- pclose(p->out);
-#endif
- }else{
- output_file_close(p->out);
- }
- p->outfile[0] = 0;
- p->out = stdout;
-}
-
-/*
-** Run an SQL command and return the single integer result.
-*/
-static int db_int(ShellState *p, const char *zSql){
- sqlite3_stmt *pStmt;
- int res = 0;
- sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
- if( pStmt && sqlite3_step(pStmt)==SQLITE_ROW ){
- res = sqlite3_column_int(pStmt,0);
- }
- sqlite3_finalize(pStmt);
- return res;
-}
-
-/*
-** Convert a 2-byte or 4-byte big-endian integer into a native integer
-*/
-unsigned int get2byteInt(unsigned char *a){
- return (a[0]<<8) + a[1];
-}
-unsigned int get4byteInt(unsigned char *a){
- return (a[0]<<24) + (a[1]<<16) + (a[2]<<8) + a[3];
-}
-
-/*
-** Implementation of the ".info" command.
-**
-** Return 1 on error, 2 to exit, and 0 otherwise.
-*/
-static int shell_dbinfo_command(ShellState *p, int nArg, char **azArg){
- static const struct { const char *zName; int ofst; } aField[] = {
- { "file change counter:", 24 },
- { "database page count:", 28 },
- { "freelist page count:", 36 },
- { "schema cookie:", 40 },
- { "schema format:", 44 },
- { "default cache size:", 48 },
- { "autovacuum top root:", 52 },
- { "incremental vacuum:", 64 },
- { "text encoding:", 56 },
- { "user version:", 60 },
- { "application id:", 68 },
- { "software version:", 96 },
- };
- static const struct { const char *zName; const char *zSql; } aQuery[] = {
- { "number of tables:",
- "SELECT count(*) FROM %s WHERE type='table'" },
- { "number of indexes:",
- "SELECT count(*) FROM %s WHERE type='index'" },
- { "number of triggers:",
- "SELECT count(*) FROM %s WHERE type='trigger'" },
- { "number of views:",
- "SELECT count(*) FROM %s WHERE type='view'" },
- { "schema size:",
- "SELECT total(length(sql)) FROM %s" },
- };
- sqlite3_file *pFile;
- int i;
- char *zSchemaTab;
- char *zDb = nArg>=2 ? azArg[1] : "main";
- unsigned char aHdr[100];
- open_db(p, 0);
- if( p->db==0 ) return 1;
- sqlite3_file_control(p->db, zDb, SQLITE_FCNTL_FILE_POINTER, &pFile);
- if( pFile==0 || pFile->pMethods==0 || pFile->pMethods->xRead==0 ){
- return 1;
- }
- i = pFile->pMethods->xRead(pFile, aHdr, 100, 0);
- if( i!=SQLITE_OK ){
- fprintf(stderr, "unable to read database header\n");
- return 1;
- }
- i = get2byteInt(aHdr+16);
- if( i==1 ) i = 65536;
- fprintf(p->out, "%-20s %d\n", "database page size:", i);
- fprintf(p->out, "%-20s %d\n", "write format:", aHdr[18]);
- fprintf(p->out, "%-20s %d\n", "read format:", aHdr[19]);
- fprintf(p->out, "%-20s %d\n", "reserved bytes:", aHdr[20]);
- for(i=0; i<ArraySize(aField); i++){
- int ofst = aField[i].ofst;
- unsigned int val = get4byteInt(aHdr + ofst);
- fprintf(p->out, "%-20s %u", aField[i].zName, val);
- switch( ofst ){
- case 56: {
- if( val==1 ) fprintf(p->out, " (utf8)");
- if( val==2 ) fprintf(p->out, " (utf16le)");
- if( val==3 ) fprintf(p->out, " (utf16be)");
- }
- }
- fprintf(p->out, "\n");
- }
- if( zDb==0 ){
- zSchemaTab = sqlite3_mprintf("main.sqlite_master");
- }else if( strcmp(zDb,"temp")==0 ){
- zSchemaTab = sqlite3_mprintf("%s", "sqlite_temp_master");
- }else{
- zSchemaTab = sqlite3_mprintf("\"%w\".sqlite_master", zDb);
- }
- for(i=0; i<ArraySize(aQuery); i++){
- char *zSql = sqlite3_mprintf(aQuery[i].zSql, zSchemaTab);
- int val = db_int(p, zSql);
- sqlite3_free(zSql);
- fprintf(p->out, "%-20s %d\n", aQuery[i].zName, val);
- }
- sqlite3_free(zSchemaTab);
- return 0;
-}
-
-/*
-** Print the current sqlite3_errmsg() value to stderr and return 1.
-*/
-static int shellDatabaseError(sqlite3 *db){
- const char *zErr = sqlite3_errmsg(db);
- fprintf(stderr, "Error: %s\n", zErr);
- return 1;
-}
-
-/*
-** Print an out-of-memory message to stderr and return 1.
-*/
-static int shellNomemError(void){
- fprintf(stderr, "Error: out of memory\n");
- return 1;
-}
-
/*
** If an input line begins with "." then invoke this routine to
** process that line.
**
** Return 1 on error, 2 to exit, and 0 otherwise.
*/
-static int do_meta_command(char *zLine, ShellState *p){
- int h = 1;
+static int do_meta_command(char *zLine, struct callback_data *p){
+ int i = 1;
int nArg = 0;
int n, c;
int rc = 0;
@@ -2644,24 +1909,24 @@ static int do_meta_command(char *zLine, ShellState *p){
/* Parse the input line into tokens.
*/
- while( zLine[h] && nArg<ArraySize(azArg) ){
- while( IsSpace(zLine[h]) ){ h++; }
- if( zLine[h]==0 ) break;
- if( zLine[h]=='\'' || zLine[h]=='"' ){
- int delim = zLine[h++];
- azArg[nArg++] = &zLine[h];
- while( zLine[h] && zLine[h]!=delim ){
- if( zLine[h]=='\\' && delim=='"' && zLine[h+1]!=0 ) h++;
- h++;
- }
- if( zLine[h]==delim ){
- zLine[h++] = 0;
+ while( zLine[i] && nArg<ArraySize(azArg) ){
+ while( IsSpace(zLine[i]) ){ i++; }
+ if( zLine[i]==0 ) break;
+ if( zLine[i]=='\'' || zLine[i]=='"' ){
+ int delim = zLine[i++];
+ azArg[nArg++] = &zLine[i];
+ while( zLine[i] && zLine[i]!=delim ){
+ if( zLine[i]=='\\' && delim=='"' && zLine[i+1]!=0 ) i++;
+ i++;
+ }
+ if( zLine[i]==delim ){
+ zLine[i++] = 0;
}
if( delim=='"' ) resolve_backslashes(azArg[nArg-1]);
}else{
- azArg[nArg++] = &zLine[h];
- while( zLine[h] && !IsSpace(zLine[h]) ){ h++; }
- if( zLine[h] ) zLine[h++] = 0;
+ azArg[nArg++] = &zLine[i];
+ while( zLine[i] && !IsSpace(zLine[i]) ){ i++; }
+ if( zLine[i] ) zLine[i++] = 0;
resolve_backslashes(azArg[nArg-1]);
}
}
@@ -2671,9 +1936,7 @@ static int do_meta_command(char *zLine, ShellState *p){
if( nArg==0 ) return 0; /* no tokens, no error */
n = strlen30(azArg[0]);
c = azArg[0][0];
- if( (c=='b' && n>=3 && strncmp(azArg[0], "backup", n)==0)
- || (c=='s' && n>=3 && strncmp(azArg[0], "save", n)==0)
- ){
+ if( c=='b' && n>=3 && strncmp(azArg[0], "backup", n)==0 ){
const char *zDestFile = 0;
const char *zDb = 0;
sqlite3 *pDest;
@@ -2727,26 +1990,8 @@ static int do_meta_command(char *zLine, ShellState *p){
sqlite3_close(pDest);
}else
- if( c=='b' && n>=3 && strncmp(azArg[0], "bail", n)==0 ){
- if( nArg==2 ){
- bail_on_error = booleanValue(azArg[1]);
- }else{
- fprintf(stderr, "Usage: .bail on|off\n");
- rc = 1;
- }
- }else
-
- if( c=='b' && n>=3 && strncmp(azArg[0], "binary", n)==0 ){
- if( nArg==2 ){
- if( booleanValue(azArg[1]) ){
- setBinaryMode(p->out);
- }else{
- setTextMode(p->out);
- }
- }else{
- fprintf(stderr, "Usage: .binary on|off\n");
- rc = 1;
- }
+ if( c=='b' && n>=3 && strncmp(azArg[0], "bail", n)==0 && nArg>1 && nArg<3 ){
+ bail_on_error = booleanValue(azArg[1]);
}else
/* The undocumented ".breakpoint" command causes a call to the no-op
@@ -2756,17 +2001,8 @@ static int do_meta_command(char *zLine, ShellState *p){
test_breakpoint();
}else
- if( c=='c' && strncmp(azArg[0], "clone", n)==0 ){
- if( nArg==2 ){
- tryToClone(p, azArg[1]);
- }else{
- fprintf(stderr, "Usage: .clone FILENAME\n");
- rc = 1;
- }
- }else
-
- if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 ){
- ShellState data;
+ if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 && nArg==1 ){
+ struct callback_data data;
char *zErrMsg = 0;
open_db(p, 0);
memcpy(&data, p, sizeof(data));
@@ -2784,20 +2020,11 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
- if( c=='d' && strncmp(azArg[0], "dbinfo", n)==0 ){
- rc = shell_dbinfo_command(p, nArg, azArg);
- }else
-
- if( c=='d' && strncmp(azArg[0], "dump", n)==0 ){
+ if( c=='d' && strncmp(azArg[0], "dump", n)==0 && nArg<3 ){
open_db(p, 0);
/* When playing back a "dump", the content might appear in an order
** which causes immediate foreign key constraints to be violated.
** So disable foreign-key constraint enforcement to prevent problems. */
- if( nArg!=1 && nArg!=2 ){
- fprintf(stderr, "Usage: .dump ?LIKE-PATTERN?\n");
- rc = 1;
- goto meta_command_exit;
- }
fprintf(p->out, "PRAGMA foreign_keys=OFF;\n");
fprintf(p->out, "BEGIN TRANSACTION;\n");
p->writableSchema = 0;
@@ -2842,22 +2069,8 @@ static int do_meta_command(char *zLine, ShellState *p){
fprintf(p->out, p->nErr ? "ROLLBACK; -- due to errors\n" : "COMMIT;\n");
}else
- if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){
- if( nArg==2 ){
- p->echoOn = booleanValue(azArg[1]);
- }else{
- fprintf(stderr, "Usage: .echo on|off\n");
- rc = 1;
- }
- }else
-
- if( c=='e' && strncmp(azArg[0], "eqp", n)==0 ){
- if( nArg==2 ){
- p->autoEQP = booleanValue(azArg[1]);
- }else{
- fprintf(stderr, "Usage: .eqp on|off\n");
- rc = 1;
- }
+ if( c=='e' && strncmp(azArg[0], "echo", n)==0 && nArg>1 && nArg<3 ){
+ p->echoOn = booleanValue(azArg[1]);
}else
if( c=='e' && strncmp(azArg[0], "exit", n)==0 ){
@@ -2865,14 +2078,14 @@ static int do_meta_command(char *zLine, ShellState *p){
rc = 2;
}else
- if( c=='e' && strncmp(azArg[0], "explain", n)==0 ){
+ if( c=='e' && strncmp(azArg[0], "explain", n)==0 && nArg<3 ){
int val = nArg>=2 ? booleanValue(azArg[1]) : 1;
if(val == 1) {
- if(!p->normalMode.valid) {
- p->normalMode.valid = 1;
- p->normalMode.mode = p->mode;
- p->normalMode.showHeader = p->showHeader;
- memcpy(p->normalMode.colWidth,p->colWidth,sizeof(p->colWidth));
+ if(!p->explainPrev.valid) {
+ p->explainPrev.valid = 1;
+ p->explainPrev.mode = p->mode;
+ p->explainPrev.showHeader = p->showHeader;
+ memcpy(p->explainPrev.colWidth,p->colWidth,sizeof(p->colWidth));
}
/* We could put this code under the !p->explainValid
** condition so that it does not execute if we are already in
@@ -2892,177 +2105,88 @@ static int do_meta_command(char *zLine, ShellState *p){
p->colWidth[5] = 13; /* P4 */
p->colWidth[6] = 2; /* P5 */
p->colWidth[7] = 13; /* Comment */
- }else if (p->normalMode.valid) {
- p->normalMode.valid = 0;
- p->mode = p->normalMode.mode;
- p->showHeader = p->normalMode.showHeader;
- memcpy(p->colWidth,p->normalMode.colWidth,sizeof(p->colWidth));
+ }else if (p->explainPrev.valid) {
+ p->explainPrev.valid = 0;
+ p->mode = p->explainPrev.mode;
+ p->showHeader = p->explainPrev.showHeader;
+ memcpy(p->colWidth,p->explainPrev.colWidth,sizeof(p->colWidth));
}
}else
- if( c=='f' && strncmp(azArg[0], "fullschema", n)==0 ){
- ShellState data;
- char *zErrMsg = 0;
- int doStats = 0;
- if( nArg!=1 ){
- fprintf(stderr, "Usage: .fullschema\n");
- rc = 1;
- goto meta_command_exit;
- }
- open_db(p, 0);
- memcpy(&data, p, sizeof(data));
- data.showHeader = 0;
- data.mode = MODE_Semi;
- rc = sqlite3_exec(p->db,
- "SELECT sql FROM"
- " (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x"
- " FROM sqlite_master UNION ALL"
- " SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
- "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
- "ORDER BY rowid",
- callback, &data, &zErrMsg
- );
- if( rc==SQLITE_OK ){
- sqlite3_stmt *pStmt;
- rc = sqlite3_prepare_v2(p->db,
- "SELECT rowid FROM sqlite_master"
- " WHERE name GLOB 'sqlite_stat[134]'",
- -1, &pStmt, 0);
- doStats = sqlite3_step(pStmt)==SQLITE_ROW;
- sqlite3_finalize(pStmt);
- }
- if( doStats==0 ){
- fprintf(p->out, "/* No STAT tables available */\n");
- }else{
- fprintf(p->out, "ANALYZE sqlite_master;\n");
- sqlite3_exec(p->db, "SELECT 'ANALYZE sqlite_master'",
- callback, &data, &zErrMsg);
- data.mode = MODE_Insert;
- data.zDestTable = "sqlite_stat1";
- shell_exec(p->db, "SELECT * FROM sqlite_stat1",
- shell_callback, &data,&zErrMsg);
- data.zDestTable = "sqlite_stat3";
- shell_exec(p->db, "SELECT * FROM sqlite_stat3",
- shell_callback, &data,&zErrMsg);
- data.zDestTable = "sqlite_stat4";
- shell_exec(p->db, "SELECT * FROM sqlite_stat4",
- shell_callback, &data, &zErrMsg);
- fprintf(p->out, "ANALYZE sqlite_master;\n");
- }
- }else
-
- if( c=='h' && strncmp(azArg[0], "headers", n)==0 ){
- if( nArg==2 ){
- p->showHeader = booleanValue(azArg[1]);
- }else{
- fprintf(stderr, "Usage: .headers on|off\n");
- rc = 1;
- }
+ if( c=='h' && (strncmp(azArg[0], "header", n)==0 ||
+ strncmp(azArg[0], "headers", n)==0) && nArg>1 && nArg<3 ){
+ p->showHeader = booleanValue(azArg[1]);
}else
if( c=='h' && strncmp(azArg[0], "help", n)==0 ){
- fprintf(p->out, "%s", zHelp);
+ fprintf(stderr,"%s",zHelp);
+ if( HAS_TIMER ){
+ fprintf(stderr,"%s",zTimerHelp);
+ }
}else
- if( c=='i' && strncmp(azArg[0], "import", n)==0 ){
- char *zTable; /* Insert data into this table */
- char *zFile; /* Name of file to extra content from */
+ if( c=='i' && strncmp(azArg[0], "import", n)==0 && nArg==3 ){
+ char *zTable = azArg[2]; /* Insert data into this table */
+ char *zFile = azArg[1]; /* Name of file to extra content from */
sqlite3_stmt *pStmt = NULL; /* A statement */
int nCol; /* Number of columns in the table */
int nByte; /* Number of bytes in an SQL string */
int i, j; /* Loop counters */
int needCommit; /* True to COMMIT or ROLLBACK at end */
- int nSep; /* Number of bytes in p->colSeparator[] */
+ int nSep; /* Number of bytes in p->separator[] */
char *zSql; /* An SQL statement */
- ImportCtx sCtx; /* Reader context */
- char *(SQLITE_CDECL *xRead)(ImportCtx*); /* Func to read one value */
- int (SQLITE_CDECL *xCloser)(FILE*); /* Func to close file */
+ CSVReader sCsv; /* Reader context */
+ int (*xCloser)(FILE*); /* Procedure to close th3 connection */
- if( nArg!=3 ){
- fprintf(stderr, "Usage: .import FILE TABLE\n");
- goto meta_command_exit;
- }
- zFile = azArg[1];
- zTable = azArg[2];
seenInterrupt = 0;
- memset(&sCtx, 0, sizeof(sCtx));
+ memset(&sCsv, 0, sizeof(sCsv));
open_db(p, 0);
- nSep = strlen30(p->colSeparator);
+ nSep = strlen30(p->separator);
if( nSep==0 ){
- fprintf(stderr, "Error: non-null column separator required for import\n");
+ fprintf(stderr, "Error: non-null separator required for import\n");
return 1;
}
if( nSep>1 ){
- fprintf(stderr, "Error: multi-character column separators not allowed"
- " for import\n");
- return 1;
- }
- nSep = strlen30(p->rowSeparator);
- if( nSep==0 ){
- fprintf(stderr, "Error: non-null row separator required for import\n");
- return 1;
- }
- if( nSep==2 && p->mode==MODE_Csv && strcmp(p->rowSeparator, SEP_CrLf)==0 ){
- /* When importing CSV (only), if the row separator is set to the
- ** default output row separator, change it to the default input
- ** row separator. This avoids having to maintain different input
- ** and output row separators. */
- sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row);
- nSep = strlen30(p->rowSeparator);
- }
- if( nSep>1 ){
- fprintf(stderr, "Error: multi-character row separators not allowed"
+ fprintf(stderr, "Error: multi-character separators not allowed"
" for import\n");
return 1;
}
- sCtx.zFile = zFile;
- sCtx.nLine = 1;
- if( sCtx.zFile[0]=='|' ){
-#ifdef SQLITE_OMIT_POPEN
- fprintf(stderr, "Error: pipes are not supported in this OS\n");
- return 1;
-#else
- sCtx.in = popen(sCtx.zFile+1, "r");
- sCtx.zFile = "<pipe>";
+ sCsv.zFile = zFile;
+ sCsv.nLine = 1;
+ if( sCsv.zFile[0]=='|' ){
+ sCsv.in = popen(sCsv.zFile+1, "r");
+ sCsv.zFile = "<pipe>";
xCloser = pclose;
-#endif
}else{
- sCtx.in = fopen(sCtx.zFile, "rb");
+ sCsv.in = fopen(sCsv.zFile, "rb");
xCloser = fclose;
}
- if( p->mode==MODE_Ascii ){
- xRead = ascii_read_one_field;
- }else{
- xRead = csv_read_one_field;
- }
- if( sCtx.in==0 ){
+ if( sCsv.in==0 ){
fprintf(stderr, "Error: cannot open \"%s\"\n", zFile);
return 1;
}
- sCtx.cColSep = p->colSeparator[0];
- sCtx.cRowSep = p->rowSeparator[0];
+ sCsv.cSeparator = p->separator[0];
zSql = sqlite3_mprintf("SELECT * FROM %s", zTable);
if( zSql==0 ){
fprintf(stderr, "Error: out of memory\n");
- xCloser(sCtx.in);
+ xCloser(sCsv.in);
return 1;
}
nByte = strlen30(zSql);
- rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
- import_append_char(&sCtx, 0); /* To ensure sCtx.z is allocated */
- if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(p->db))==0 ){
+ rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0);
+ if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(db))==0 ){
char *zCreate = sqlite3_mprintf("CREATE TABLE %s", zTable);
char cSep = '(';
- while( xRead(&sCtx) ){
- zCreate = sqlite3_mprintf("%z%c\n \"%s\" TEXT", zCreate, cSep, sCtx.z);
+ while( csv_read_one_field(&sCsv) ){
+ zCreate = sqlite3_mprintf("%z%c\n \"%s\" TEXT", zCreate, cSep, sCsv.z);
cSep = ',';
- if( sCtx.cTerm!=sCtx.cColSep ) break;
+ if( sCsv.cTerm!=sCsv.cSeparator ) break;
}
if( cSep=='(' ){
sqlite3_free(zCreate);
- sqlite3_free(sCtx.z);
- xCloser(sCtx.in);
- fprintf(stderr,"%s: empty file\n", sCtx.zFile);
+ sqlite3_free(sCsv.z);
+ xCloser(sCsv.in);
+ fprintf(stderr,"%s: empty file\n", sCsv.zFile);
return 1;
}
zCreate = sqlite3_mprintf("%z\n)", zCreate);
@@ -3070,28 +2194,28 @@ static int do_meta_command(char *zLine, ShellState *p){
sqlite3_free(zCreate);
if( rc ){
fprintf(stderr, "CREATE TABLE %s(...) failed: %s\n", zTable,
- sqlite3_errmsg(p->db));
- sqlite3_free(sCtx.z);
- xCloser(sCtx.in);
+ sqlite3_errmsg(db));
+ sqlite3_free(sCsv.z);
+ xCloser(sCsv.in);
return 1;
}
- rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
+ rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0);
}
sqlite3_free(zSql);
if( rc ){
if (pStmt) sqlite3_finalize(pStmt);
- fprintf(stderr,"Error: %s\n", sqlite3_errmsg(p->db));
- xCloser(sCtx.in);
+ fprintf(stderr,"Error: %s\n", sqlite3_errmsg(db));
+ xCloser(sCsv.in);
return 1;
}
nCol = sqlite3_column_count(pStmt);
sqlite3_finalize(pStmt);
pStmt = 0;
if( nCol==0 ) return 0; /* no columns, no error */
- zSql = sqlite3_malloc64( nByte*2 + 20 + nCol*2 );
+ zSql = sqlite3_malloc( nByte*2 + 20 + nCol*2 );
if( zSql==0 ){
fprintf(stderr, "Error: out of memory\n");
- xCloser(sCtx.in);
+ xCloser(sCsv.in);
return 1;
}
sqlite3_snprintf(nByte+20, zSql, "INSERT INTO \"%w\" VALUES(?", zTable);
@@ -3102,68 +2226,57 @@ static int do_meta_command(char *zLine, ShellState *p){
}
zSql[j++] = ')';
zSql[j] = 0;
- rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
+ rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0);
sqlite3_free(zSql);
if( rc ){
- fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
+ fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db));
if (pStmt) sqlite3_finalize(pStmt);
- xCloser(sCtx.in);
+ xCloser(sCsv.in);
return 1;
}
- needCommit = sqlite3_get_autocommit(p->db);
- if( needCommit ) sqlite3_exec(p->db, "BEGIN", 0, 0, 0);
+ needCommit = sqlite3_get_autocommit(db);
+ if( needCommit ) sqlite3_exec(db, "BEGIN", 0, 0, 0);
do{
- int startLine = sCtx.nLine;
+ int startLine = sCsv.nLine;
for(i=0; i<nCol; i++){
- char *z = xRead(&sCtx);
- /*
- ** Did we reach end-of-file before finding any columns?
- ** If so, stop instead of NULL filling the remaining columns.
- */
+ char *z = csv_read_one_field(&sCsv);
if( z==0 && i==0 ) break;
- /*
- ** Did we reach end-of-file OR end-of-line before finding any
- ** columns in ASCII mode? If so, stop instead of NULL filling
- ** the remaining columns.
- */
- if( p->mode==MODE_Ascii && (z==0 || z[0]==0) && i==0 ) break;
sqlite3_bind_text(pStmt, i+1, z, -1, SQLITE_TRANSIENT);
- if( i<nCol-1 && sCtx.cTerm!=sCtx.cColSep ){
+ if( i<nCol-1 && sCsv.cTerm!=sCsv.cSeparator ){
fprintf(stderr, "%s:%d: expected %d columns but found %d - "
"filling the rest with NULL\n",
- sCtx.zFile, startLine, nCol, i+1);
- i += 2;
- while( i<=nCol ){ sqlite3_bind_null(pStmt, i); i++; }
+ sCsv.zFile, startLine, nCol, i+1);
+ i++;
+ while( i<nCol ){ sqlite3_bind_null(pStmt, i); i++; }
}
}
- if( sCtx.cTerm==sCtx.cColSep ){
+ if( sCsv.cTerm==sCsv.cSeparator ){
do{
- xRead(&sCtx);
+ csv_read_one_field(&sCsv);
i++;
- }while( sCtx.cTerm==sCtx.cColSep );
+ }while( sCsv.cTerm==sCsv.cSeparator );
fprintf(stderr, "%s:%d: expected %d columns but found %d - "
"extras ignored\n",
- sCtx.zFile, startLine, nCol, i);
+ sCsv.zFile, startLine, nCol, i);
}
if( i>=nCol ){
sqlite3_step(pStmt);
rc = sqlite3_reset(pStmt);
if( rc!=SQLITE_OK ){
- fprintf(stderr, "%s:%d: INSERT failed: %s\n", sCtx.zFile, startLine,
- sqlite3_errmsg(p->db));
+ fprintf(stderr, "%s:%d: INSERT failed: %s\n", sCsv.zFile, startLine,
+ sqlite3_errmsg(db));
}
}
- }while( sCtx.cTerm!=EOF );
+ }while( sCsv.cTerm!=EOF );
- xCloser(sCtx.in);
- sqlite3_free(sCtx.z);
+ xCloser(sCsv.in);
+ sqlite3_free(sCsv.z);
sqlite3_finalize(pStmt);
- if( needCommit ) sqlite3_exec(p->db, "COMMIT", 0, 0, 0);
+ if( needCommit ) sqlite3_exec(db, "COMMIT", 0, 0, 0);
}else
- if( c=='i' && (strncmp(azArg[0], "indices", n)==0
- || strncmp(azArg[0], "indexes", n)==0) ){
- ShellState data;
+ if( c=='i' && strncmp(azArg[0], "indices", n)==0 && nArg<3 ){
+ struct callback_data data;
char *zErrMsg = 0;
open_db(p, 0);
memcpy(&data, p, sizeof(data));
@@ -3179,7 +2292,7 @@ static int do_meta_command(char *zLine, ShellState *p){
"ORDER BY 1",
callback, &data, &zErrMsg
);
- }else if( nArg==2 ){
+ }else{
zShellStatic = azArg[1];
rc = sqlite3_exec(p->db,
"SELECT name FROM sqlite_master "
@@ -3191,10 +2304,6 @@ static int do_meta_command(char *zLine, ShellState *p){
callback, &data, &zErrMsg
);
zShellStatic = 0;
- }else{
- fprintf(stderr, "Usage: .indexes ?LIKE-PATTERN?\n");
- rc = 1;
- goto meta_command_exit;
}
if( zErrMsg ){
fprintf(stderr,"Error: %s\n", zErrMsg);
@@ -3208,7 +2317,7 @@ static int do_meta_command(char *zLine, ShellState *p){
#ifdef SQLITE_ENABLE_IOTRACE
if( c=='i' && strncmp(azArg[0], "iotrace", n)==0 ){
- SQLITE_API extern void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...);
+ extern void (*sqlite3IoTrace)(const char*, ...);
if( iotrace && iotrace!=stdout ) fclose(iotrace);
iotrace = 0;
if( nArg<2 ){
@@ -3228,74 +2337,11 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
#endif
- if( c=='l' && n>=5 && strncmp(azArg[0], "limits", n)==0 ){
- static const struct {
- const char *zLimitName; /* Name of a limit */
- int limitCode; /* Integer code for that limit */
- } aLimit[] = {
- { "length", SQLITE_LIMIT_LENGTH },
- { "sql_length", SQLITE_LIMIT_SQL_LENGTH },
- { "column", SQLITE_LIMIT_COLUMN },
- { "expr_depth", SQLITE_LIMIT_EXPR_DEPTH },
- { "compound_select", SQLITE_LIMIT_COMPOUND_SELECT },
- { "vdbe_op", SQLITE_LIMIT_VDBE_OP },
- { "function_arg", SQLITE_LIMIT_FUNCTION_ARG },
- { "attached", SQLITE_LIMIT_ATTACHED },
- { "like_pattern_length", SQLITE_LIMIT_LIKE_PATTERN_LENGTH },
- { "variable_number", SQLITE_LIMIT_VARIABLE_NUMBER },
- { "trigger_depth", SQLITE_LIMIT_TRIGGER_DEPTH },
- { "worker_threads", SQLITE_LIMIT_WORKER_THREADS },
- };
- int i, n2;
- open_db(p, 0);
- if( nArg==1 ){
- for(i=0; i<ArraySize(aLimit); i++){
- printf("%20s %d\n", aLimit[i].zLimitName,
- sqlite3_limit(p->db, aLimit[i].limitCode, -1));
- }
- }else if( nArg>3 ){
- fprintf(stderr, "Usage: .limit NAME ?NEW-VALUE?\n");
- rc = 1;
- goto meta_command_exit;
- }else{
- int iLimit = -1;
- n2 = strlen30(azArg[1]);
- for(i=0; i<ArraySize(aLimit); i++){
- if( sqlite3_strnicmp(aLimit[i].zLimitName, azArg[1], n2)==0 ){
- if( iLimit<0 ){
- iLimit = i;
- }else{
- fprintf(stderr, "ambiguous limit: \"%s\"\n", azArg[1]);
- rc = 1;
- goto meta_command_exit;
- }
- }
- }
- if( iLimit<0 ){
- fprintf(stderr, "unknown limit: \"%s\"\n"
- "enter \".limits\" with no arguments for a list.\n",
- azArg[1]);
- rc = 1;
- goto meta_command_exit;
- }
- if( nArg==3 ){
- sqlite3_limit(p->db, aLimit[iLimit].limitCode,
- (int)integerValue(azArg[2]));
- }
- printf("%20s %d\n", aLimit[iLimit].zLimitName,
- sqlite3_limit(p->db, aLimit[iLimit].limitCode, -1));
- }
- }else
#ifndef SQLITE_OMIT_LOAD_EXTENSION
- if( c=='l' && strncmp(azArg[0], "load", n)==0 ){
+ if( c=='l' && strncmp(azArg[0], "load", n)==0 && nArg>=2 ){
const char *zFile, *zProc;
char *zErrMsg = 0;
- if( nArg<2 ){
- fprintf(stderr, "Usage: .load FILE ?ENTRYPOINT?\n");
- rc = 1;
- goto meta_command_exit;
- }
zFile = azArg[1];
zProc = nArg>=3 ? azArg[2] : 0;
open_db(p, 0);
@@ -3308,70 +2354,70 @@ static int do_meta_command(char *zLine, ShellState *p){
}else
#endif
- if( c=='l' && strncmp(azArg[0], "log", n)==0 ){
- if( nArg!=2 ){
- fprintf(stderr, "Usage: .log FILENAME\n");
- rc = 1;
- }else{
- const char *zFile = azArg[1];
- output_file_close(p->pLog);
- p->pLog = output_file_open(zFile);
- }
+ if( c=='l' && strncmp(azArg[0], "log", n)==0 && nArg>=2 ){
+ const char *zFile = azArg[1];
+ output_file_close(p->pLog);
+ p->pLog = output_file_open(zFile);
}else
- if( c=='m' && strncmp(azArg[0], "mode", n)==0 ){
- const char *zMode = nArg>=2 ? azArg[1] : "";
- int n2 = (int)strlen(zMode);
- int c2 = zMode[0];
- if( c2=='l' && n2>2 && strncmp(azArg[1],"lines",n2)==0 ){
+ if( c=='m' && strncmp(azArg[0], "mode", n)==0 && nArg==2 ){
+ int n2 = strlen30(azArg[1]);
+ if( (n2==4 && strncmp(azArg[1],"line",n2)==0)
+ ||
+ (n2==5 && strncmp(azArg[1],"lines",n2)==0) ){
p->mode = MODE_Line;
- }else if( c2=='c' && strncmp(azArg[1],"columns",n2)==0 ){
+ }else if( (n2==6 && strncmp(azArg[1],"column",n2)==0)
+ ||
+ (n2==7 && strncmp(azArg[1],"columns",n2)==0) ){
p->mode = MODE_Column;
- }else if( c2=='l' && n2>2 && strncmp(azArg[1],"list",n2)==0 ){
+ }else if( n2==4 && strncmp(azArg[1],"list",n2)==0 ){
p->mode = MODE_List;
- }else if( c2=='h' && strncmp(azArg[1],"html",n2)==0 ){
+ }else if( n2==4 && strncmp(azArg[1],"html",n2)==0 ){
p->mode = MODE_Html;
- }else if( c2=='t' && strncmp(azArg[1],"tcl",n2)==0 ){
+ }else if( n2==3 && strncmp(azArg[1],"tcl",n2)==0 ){
p->mode = MODE_Tcl;
- sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Space);
- }else if( c2=='c' && strncmp(azArg[1],"csv",n2)==0 ){
+ sqlite3_snprintf(sizeof(p->separator), p->separator, " ");
+ }else if( n2==3 && strncmp(azArg[1],"csv",n2)==0 ){
p->mode = MODE_Csv;
- sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Comma);
- sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_CrLf);
- }else if( c2=='t' && strncmp(azArg[1],"tabs",n2)==0 ){
+ sqlite3_snprintf(sizeof(p->separator), p->separator, ",");
+ }else if( n2==4 && strncmp(azArg[1],"tabs",n2)==0 ){
p->mode = MODE_List;
- sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Tab);
- }else if( c2=='i' && strncmp(azArg[1],"insert",n2)==0 ){
+ sqlite3_snprintf(sizeof(p->separator), p->separator, "\t");
+ }else if( n2==6 && strncmp(azArg[1],"insert",n2)==0 ){
p->mode = MODE_Insert;
- set_table_name(p, nArg>=3 ? azArg[2] : "table");
- }else if( c2=='a' && strncmp(azArg[1],"ascii",n2)==0 ){
- p->mode = MODE_Ascii;
- sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Unit);
- sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Record);
+ set_table_name(p, "table");
}else {
fprintf(stderr,"Error: mode should be one of: "
- "ascii column csv html insert line list tabs tcl\n");
+ "column csv html insert line list tabs tcl\n");
rc = 1;
}
}else
- if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 ){
- if( nArg==2 ){
- sqlite3_snprintf(sizeof(p->nullValue), p->nullValue,
- "%.*s", (int)ArraySize(p->nullValue)-1, azArg[1]);
- }else{
- fprintf(stderr, "Usage: .nullvalue STRING\n");
+ if( c=='m' && strncmp(azArg[0], "mode", n)==0 && nArg==3 ){
+ int n2 = strlen30(azArg[1]);
+ if( n2==6 && strncmp(azArg[1],"insert",n2)==0 ){
+ p->mode = MODE_Insert;
+ set_table_name(p, azArg[2]);
+ }else {
+ fprintf(stderr, "Error: invalid arguments: "
+ " \"%s\". Enter \".help\" for help\n", azArg[2]);
rc = 1;
}
}else
+ if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 && nArg==2 ) {
+ sqlite3_snprintf(sizeof(p->nullvalue), p->nullvalue,
+ "%.*s", (int)ArraySize(p->nullvalue)-1, azArg[1]);
+ }else
+
if( c=='o' && strncmp(azArg[0], "open", n)==0 && n>=2 ){
sqlite3 *savedDb = p->db;
const char *zSavedFilename = p->zDbFilename;
char *zNewFilename = 0;
p->db = 0;
- if( nArg>=2 ) zNewFilename = sqlite3_mprintf("%s", azArg[1]);
- p->zDbFilename = zNewFilename;
+ if( nArg>=2 ){
+ p->zDbFilename = zNewFilename = sqlite3_mprintf("%s", azArg[1]);
+ }
open_db(p, 1);
if( p->db!=0 ){
sqlite3_close(savedDb);
@@ -3384,51 +2430,32 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
- if( c=='o'
- && (strncmp(azArg[0], "output", n)==0 || strncmp(azArg[0], "once", n)==0)
- ){
- const char *zFile = nArg>=2 ? azArg[1] : "stdout";
- if( nArg>2 ){
- fprintf(stderr, "Usage: .%s FILE\n", azArg[0]);
- rc = 1;
- goto meta_command_exit;
- }
- if( n>1 && strncmp(azArg[0], "once", n)==0 ){
- if( nArg<2 ){
- fprintf(stderr, "Usage: .once FILE\n");
- rc = 1;
- goto meta_command_exit;
- }
- p->outCount = 2;
+ if( c=='o' && strncmp(azArg[0], "output", n)==0 && nArg==2 ){
+ if( p->outfile[0]=='|' ){
+ pclose(p->out);
}else{
- p->outCount = 0;
+ output_file_close(p->out);
}
- output_reset(p);
- if( zFile[0]=='|' ){
-#ifdef SQLITE_OMIT_POPEN
- fprintf(stderr,"Error: pipes are not supported in this OS\n");
- rc = 1;
- p->out = stdout;
-#else
- p->out = popen(zFile + 1, "w");
+ p->outfile[0] = 0;
+ if( azArg[1][0]=='|' ){
+ p->out = popen(&azArg[1][1], "w");
if( p->out==0 ){
- fprintf(stderr,"Error: cannot open pipe \"%s\"\n", zFile + 1);
+ fprintf(stderr,"Error: cannot open pipe \"%s\"\n", &azArg[1][1]);
p->out = stdout;
rc = 1;
}else{
- sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", zFile);
+ sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", azArg[1]);
}
-#endif
}else{
- p->out = output_file_open(zFile);
+ p->out = output_file_open(azArg[1]);
if( p->out==0 ){
- if( strcmp(zFile,"off")!=0 ){
- fprintf(stderr,"Error: cannot write to \"%s\"\n", zFile);
+ if( strcmp(azArg[1],"off")!=0 ){
+ fprintf(stderr,"Error: cannot write to \"%s\"\n", azArg[1]);
}
p->out = stdout;
rc = 1;
} else {
- sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", zFile);
+ sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", azArg[1]);
}
}
}else
@@ -3442,7 +2469,7 @@ static int do_meta_command(char *zLine, ShellState *p){
fprintf(p->out, "\n");
}else
- if( c=='p' && strncmp(azArg[0], "prompt", n)==0 ){
+ if( c=='p' && strncmp(azArg[0], "prompt", n)==0 && (nArg==2 || nArg==3)){
if( nArg >= 2) {
strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1);
}
@@ -3451,18 +2478,12 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
- if( c=='q' && strncmp(azArg[0], "quit", n)==0 ){
+ if( c=='q' && strncmp(azArg[0], "quit", n)==0 && nArg==1 ){
rc = 2;
}else
- if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 ){
- FILE *alt;
- if( nArg!=2 ){
- fprintf(stderr, "Usage: .read FILE\n");
- rc = 1;
- goto meta_command_exit;
- }
- alt = fopen(azArg[1], "rb");
+ if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 && nArg==2 ){
+ FILE *alt = fopen(azArg[1], "rb");
if( alt==0 ){
fprintf(stderr,"Error: cannot open \"%s\"\n", azArg[1]);
rc = 1;
@@ -3472,7 +2493,7 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
- if( c=='r' && n>=3 && strncmp(azArg[0], "restore", n)==0 ){
+ if( c=='r' && n>=3 && strncmp(azArg[0], "restore", n)==0 && nArg>1 && nArg<4){
const char *zSrcFile;
const char *zDb;
sqlite3 *pSrc;
@@ -3482,13 +2503,9 @@ static int do_meta_command(char *zLine, ShellState *p){
if( nArg==2 ){
zSrcFile = azArg[1];
zDb = "main";
- }else if( nArg==3 ){
+ }else{
zSrcFile = azArg[2];
zDb = azArg[1];
- }else{
- fprintf(stderr, "Usage: .restore ?DB? FILE\n");
- rc = 1;
- goto meta_command_exit;
}
rc = sqlite3_open(zSrcFile, &pSrc);
if( rc!=SQLITE_OK ){
@@ -3523,27 +2540,14 @@ static int do_meta_command(char *zLine, ShellState *p){
sqlite3_close(pSrc);
}else
-
- if( c=='s' && strncmp(azArg[0], "scanstats", n)==0 ){
- if( nArg==2 ){
- p->scanstatsOn = booleanValue(azArg[1]);
-#ifndef SQLITE_ENABLE_STMT_SCANSTATUS
- fprintf(stderr, "Warning: .scanstats not available in this build.\n");
-#endif
- }else{
- fprintf(stderr, "Usage: .scanstats on|off\n");
- rc = 1;
- }
- }else
-
- if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){
- ShellState data;
+ if( c=='s' && strncmp(azArg[0], "schema", n)==0 && nArg<3 ){
+ struct callback_data data;
char *zErrMsg = 0;
open_db(p, 0);
memcpy(&data, p, sizeof(data));
data.showHeader = 0;
data.mode = MODE_Semi;
- if( nArg==2 ){
+ if( nArg>1 ){
int i;
for(i=0; azArg[1][i]; i++) azArg[1][i] = ToLower(azArg[1][i]);
if( strcmp(azArg[1],"sqlite_master")==0 ){
@@ -3587,20 +2591,16 @@ static int do_meta_command(char *zLine, ShellState *p){
callback, &data, &zErrMsg);
zShellStatic = 0;
}
- }else if( nArg==1 ){
+ }else{
rc = sqlite3_exec(p->db,
"SELECT sql FROM "
" (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x"
" FROM sqlite_master UNION ALL"
" SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
- "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
+ "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%'"
"ORDER BY rowid",
callback, &data, &zErrMsg
);
- }else{
- fprintf(stderr, "Usage: .schema ?LIKE-PATTERN?\n");
- rc = 1;
- goto meta_command_exit;
}
if( zErrMsg ){
fprintf(stderr,"Error: %s\n", zErrMsg);
@@ -3614,15 +2614,6 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
-
-#if defined(SQLITE_DEBUG) && defined(SQLITE_ENABLE_SELECTTRACE)
- if( c=='s' && n==11 && strncmp(azArg[0], "selecttrace", n)==0 ){
- extern int sqlite3SelectTrace;
- sqlite3SelectTrace = integerValue(azArg[1]);
- }else
-#endif
-
-
#ifdef SQLITE_DEBUG
/* Undocumented commands for internal testing. Subject to change
** without notice. */
@@ -3639,89 +2630,45 @@ static int do_meta_command(char *zLine, ShellState *p){
for(i=1; i<nArg; i++){
char zBuf[200];
v = integerValue(azArg[i]);
- sqlite3_snprintf(sizeof(zBuf),zBuf,"%s: %lld 0x%llx\n", azArg[i],v,v);
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%s: %lld 0x%llx\n", azArg[i], v, v);
fprintf(p->out, "%s", zBuf);
}
}
}else
#endif
- if( c=='s' && strncmp(azArg[0], "separator", n)==0 ){
- if( nArg<2 || nArg>3 ){
- fprintf(stderr, "Usage: .separator COL ?ROW?\n");
- rc = 1;
- }
- if( nArg>=2 ){
- sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator,
- "%.*s", (int)ArraySize(p->colSeparator)-1, azArg[1]);
- }
- if( nArg>=3 ){
- sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator,
- "%.*s", (int)ArraySize(p->rowSeparator)-1, azArg[2]);
- }
+ if( c=='s' && strncmp(azArg[0], "separator", n)==0 && nArg==2 ){
+ sqlite3_snprintf(sizeof(p->separator), p->separator,
+ "%.*s", (int)sizeof(p->separator)-1, azArg[1]);
}else
- if( c=='s'
- && (strncmp(azArg[0], "shell", n)==0 || strncmp(azArg[0],"system",n)==0)
- ){
- char *zCmd;
- int i, x;
- if( nArg<2 ){
- fprintf(stderr, "Usage: .system COMMAND\n");
- rc = 1;
- goto meta_command_exit;
- }
- zCmd = sqlite3_mprintf(strchr(azArg[1],' ')==0?"%s":"\"%s\"", azArg[1]);
- for(i=2; i<nArg; i++){
- zCmd = sqlite3_mprintf(strchr(azArg[i],' ')==0?"%z %s":"%z \"%s\"",
- zCmd, azArg[i]);
- }
- x = system(zCmd);
- sqlite3_free(zCmd);
- if( x ) fprintf(stderr, "System command returns %d\n", x);
- }else
-
- if( c=='s' && strncmp(azArg[0], "show", n)==0 ){
+ if( c=='s' && strncmp(azArg[0], "show", n)==0 && nArg==1 ){
int i;
- if( nArg!=1 ){
- fprintf(stderr, "Usage: .show\n");
- rc = 1;
- goto meta_command_exit;
- }
- fprintf(p->out,"%12.12s: %s\n","echo", p->echoOn ? "on" : "off");
- fprintf(p->out,"%12.12s: %s\n","eqp", p->autoEQP ? "on" : "off");
- fprintf(p->out,"%9.9s: %s\n","explain", p->normalMode.valid ? "on" :"off");
- fprintf(p->out,"%12.12s: %s\n","headers", p->showHeader ? "on" : "off");
- fprintf(p->out,"%12.12s: %s\n","mode", modeDescr[p->mode]);
- fprintf(p->out,"%12.12s: ", "nullvalue");
- output_c_string(p->out, p->nullValue);
+ fprintf(p->out,"%9.9s: %s\n","echo", p->echoOn ? "on" : "off");
+ fprintf(p->out,"%9.9s: %s\n","explain", p->explainPrev.valid ? "on" :"off");
+ fprintf(p->out,"%9.9s: %s\n","headers", p->showHeader ? "on" : "off");
+ fprintf(p->out,"%9.9s: %s\n","mode", modeDescr[p->mode]);
+ fprintf(p->out,"%9.9s: ", "nullvalue");
+ output_c_string(p->out, p->nullvalue);
fprintf(p->out, "\n");
- fprintf(p->out,"%12.12s: %s\n","output",
+ fprintf(p->out,"%9.9s: %s\n","output",
strlen30(p->outfile) ? p->outfile : "stdout");
- fprintf(p->out,"%12.12s: ", "colseparator");
- output_c_string(p->out, p->colSeparator);
- fprintf(p->out, "\n");
- fprintf(p->out,"%12.12s: ", "rowseparator");
- output_c_string(p->out, p->rowSeparator);
+ fprintf(p->out,"%9.9s: ", "separator");
+ output_c_string(p->out, p->separator);
fprintf(p->out, "\n");
- fprintf(p->out,"%12.12s: %s\n","stats", p->statsOn ? "on" : "off");
- fprintf(p->out,"%12.12s: ","width");
+ fprintf(p->out,"%9.9s: %s\n","stats", p->statsOn ? "on" : "off");
+ fprintf(p->out,"%9.9s: ","width");
for (i=0;i<(int)ArraySize(p->colWidth) && p->colWidth[i] != 0;i++) {
fprintf(p->out,"%d ",p->colWidth[i]);
}
fprintf(p->out,"\n");
}else
- if( c=='s' && strncmp(azArg[0], "stats", n)==0 ){
- if( nArg==2 ){
- p->statsOn = booleanValue(azArg[1]);
- }else{
- fprintf(stderr, "Usage: .stats on|off\n");
- rc = 1;
- }
+ if( c=='s' && strncmp(azArg[0], "stats", n)==0 && nArg>1 && nArg<3 ){
+ p->statsOn = booleanValue(azArg[1]);
}else
- if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 ){
+ if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 && nArg<3 ){
sqlite3_stmt *pStmt;
char **azResult;
int nRow, nAlloc;
@@ -3729,17 +2676,13 @@ static int do_meta_command(char *zLine, ShellState *p){
int ii;
open_db(p, 0);
rc = sqlite3_prepare_v2(p->db, "PRAGMA database_list", -1, &pStmt, 0);
- if( rc ) return shellDatabaseError(p->db);
-
- /* Create an SQL statement to query for the list of tables in the
- ** main and all attached databases where the table name matches the
- ** LIKE pattern bound to variable "?1". */
+ if( rc ) return rc;
zSql = sqlite3_mprintf(
"SELECT name FROM sqlite_master"
" WHERE type IN ('table','view')"
" AND name NOT LIKE 'sqlite_%%'"
" AND name LIKE ?1");
- while( zSql && sqlite3_step(pStmt)==SQLITE_ROW ){
+ while( sqlite3_step(pStmt)==SQLITE_ROW ){
const char *zDbName = (const char*)sqlite3_column_text(pStmt, 1);
if( zDbName==0 || strcmp(zDbName,"main")==0 ) continue;
if( strcmp(zDbName,"temp")==0 ){
@@ -3758,17 +2701,11 @@ static int do_meta_command(char *zLine, ShellState *p){
" AND name LIKE ?1", zSql, zDbName, zDbName);
}
}
- rc = sqlite3_finalize(pStmt);
- if( zSql && rc==SQLITE_OK ){
- zSql = sqlite3_mprintf("%z ORDER BY 1", zSql);
- if( zSql ) rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
- }
+ sqlite3_finalize(pStmt);
+ zSql = sqlite3_mprintf("%z ORDER BY 1", zSql);
+ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
sqlite3_free(zSql);
- if( !zSql ) return shellNomemError();
- if( rc ) return shellDatabaseError(p->db);
-
- /* Run the SQL statement prepared by the above block. Store the results
- ** as an array of nul-terminated strings in azResult[]. */
+ if( rc ) return rc;
nRow = nAlloc = 0;
azResult = 0;
if( nArg>1 ){
@@ -3779,28 +2716,20 @@ static int do_meta_command(char *zLine, ShellState *p){
while( sqlite3_step(pStmt)==SQLITE_ROW ){
if( nRow>=nAlloc ){
char **azNew;
- int n2 = nAlloc*2 + 10;
- azNew = sqlite3_realloc64(azResult, sizeof(azResult[0])*n2);
+ int n = nAlloc*2 + 10;
+ azNew = sqlite3_realloc(azResult, sizeof(azResult[0])*n);
if( azNew==0 ){
- rc = shellNomemError();
+ fprintf(stderr, "Error: out of memory\n");
break;
}
- nAlloc = n2;
+ nAlloc = n;
azResult = azNew;
}
azResult[nRow] = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 0));
- if( 0==azResult[nRow] ){
- rc = shellNomemError();
- break;
- }
- nRow++;
+ if( azResult[nRow] ) nRow++;
}
- if( sqlite3_finalize(pStmt)!=SQLITE_OK ){
- rc = shellDatabaseError(p->db);
- }
-
- /* Pretty-print the contents of array azResult[] to the output */
- if( rc==0 && nRow>0 ){
+ sqlite3_finalize(pStmt);
+ if( nRow>0 ){
int len, maxlen = 0;
int i, j;
int nPrintCol, nPrintRow;
@@ -3814,12 +2743,11 @@ static int do_meta_command(char *zLine, ShellState *p){
for(i=0; i<nPrintRow; i++){
for(j=i; j<nRow; j+=nPrintRow){
char *zSp = j<nPrintRow ? "" : " ";
- fprintf(p->out, "%s%-*s", zSp, maxlen, azResult[j] ? azResult[j]:"");
+ fprintf(p->out, "%s%-*s", zSp, maxlen, azResult[j] ? azResult[j] : "");
}
fprintf(p->out, "\n");
}
}
-
for(ii=0; ii<nRow; ii++) sqlite3_free(azResult[ii]);
sqlite3_free(azResult);
}else
@@ -3842,20 +2770,17 @@ static int do_meta_command(char *zLine, ShellState *p){
{ "optimizations", SQLITE_TESTCTRL_OPTIMIZATIONS },
{ "iskeyword", SQLITE_TESTCTRL_ISKEYWORD },
{ "scratchmalloc", SQLITE_TESTCTRL_SCRATCHMALLOC },
- { "byteorder", SQLITE_TESTCTRL_BYTEORDER },
- { "never_corrupt", SQLITE_TESTCTRL_NEVER_CORRUPT },
- { "imposter", SQLITE_TESTCTRL_IMPOSTER },
};
int testctrl = -1;
- int rc2 = 0;
- int i, n2;
+ int rc = 0;
+ int i, n;
open_db(p, 0);
/* convert testctrl text option to value. allow any unique prefix
** of the option name, or a numerical value. */
- n2 = strlen30(azArg[1]);
- for(i=0; i<ArraySize(aCtrl); i++){
- if( strncmp(azArg[1], aCtrl[i].zCtrlName, n2)==0 ){
+ n = strlen30(azArg[1]);
+ for(i=0; i<(int)(sizeof(aCtrl)/sizeof(aCtrl[0])); i++){
+ if( strncmp(azArg[1], aCtrl[i].zCtrlName, n)==0 ){
if( testctrl<0 ){
testctrl = aCtrl[i].ctrlCode;
}else{
@@ -3876,8 +2801,8 @@ static int do_meta_command(char *zLine, ShellState *p){
case SQLITE_TESTCTRL_RESERVE:
if( nArg==3 ){
int opt = (int)strtol(azArg[2], 0, 0);
- rc2 = sqlite3_test_control(testctrl, p->db, opt);
- fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
+ rc = sqlite3_test_control(testctrl, p->db, opt);
+ fprintf(p->out, "%d (0x%08x)\n", rc, rc);
} else {
fprintf(stderr,"Error: testctrl %s takes a single int option\n",
azArg[1]);
@@ -3885,13 +2810,12 @@ static int do_meta_command(char *zLine, ShellState *p){
break;
/* sqlite3_test_control(int) */
- case SQLITE_TESTCTRL_PRNG_SAVE:
- case SQLITE_TESTCTRL_PRNG_RESTORE:
+ case SQLITE_TESTCTRL_PRNG_SAVE:
+ case SQLITE_TESTCTRL_PRNG_RESTORE:
case SQLITE_TESTCTRL_PRNG_RESET:
- case SQLITE_TESTCTRL_BYTEORDER:
if( nArg==2 ){
- rc2 = sqlite3_test_control(testctrl);
- fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
+ rc = sqlite3_test_control(testctrl);
+ fprintf(p->out, "%d (0x%08x)\n", rc, rc);
} else {
fprintf(stderr,"Error: testctrl %s takes no options\n", azArg[1]);
}
@@ -3901,8 +2825,8 @@ static int do_meta_command(char *zLine, ShellState *p){
case SQLITE_TESTCTRL_PENDING_BYTE:
if( nArg==3 ){
unsigned int opt = (unsigned int)integerValue(azArg[2]);
- rc2 = sqlite3_test_control(testctrl, opt);
- fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
+ rc = sqlite3_test_control(testctrl, opt);
+ fprintf(p->out, "%d (0x%08x)\n", rc, rc);
} else {
fprintf(stderr,"Error: testctrl %s takes a single unsigned"
" int option\n", azArg[1]);
@@ -3911,12 +2835,11 @@ static int do_meta_command(char *zLine, ShellState *p){
/* sqlite3_test_control(int, int) */
case SQLITE_TESTCTRL_ASSERT:
- case SQLITE_TESTCTRL_ALWAYS:
- case SQLITE_TESTCTRL_NEVER_CORRUPT:
+ case SQLITE_TESTCTRL_ALWAYS:
if( nArg==3 ){
int opt = booleanValue(azArg[2]);
- rc2 = sqlite3_test_control(testctrl, opt);
- fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
+ rc = sqlite3_test_control(testctrl, opt);
+ fprintf(p->out, "%d (0x%08x)\n", rc, rc);
} else {
fprintf(stderr,"Error: testctrl %s takes a single int option\n",
azArg[1]);
@@ -3928,8 +2851,8 @@ static int do_meta_command(char *zLine, ShellState *p){
case SQLITE_TESTCTRL_ISKEYWORD:
if( nArg==3 ){
const char *opt = azArg[2];
- rc2 = sqlite3_test_control(testctrl, opt);
- fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
+ rc = sqlite3_test_control(testctrl, opt);
+ fprintf(p->out, "%d (0x%08x)\n", rc, rc);
} else {
fprintf(stderr,"Error: testctrl %s takes a single char * option\n",
azArg[1]);
@@ -3937,18 +2860,6 @@ static int do_meta_command(char *zLine, ShellState *p){
break;
#endif
- case SQLITE_TESTCTRL_IMPOSTER:
- if( nArg==5 ){
- rc2 = sqlite3_test_control(testctrl, p->db,
- azArg[2],
- integerValue(azArg[3]),
- integerValue(azArg[4]));
- fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
- }else{
- fprintf(stderr,"Usage: .testctrl imposter dbName onoff tnum\n");
- }
- break;
-
case SQLITE_TESTCTRL_BITVEC_TEST:
case SQLITE_TESTCTRL_FAULT_INSTALL:
case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS:
@@ -3961,31 +2872,19 @@ static int do_meta_command(char *zLine, ShellState *p){
}
}else
- if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 ){
+ if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 && nArg==2 ){
open_db(p, 0);
- sqlite3_busy_timeout(p->db, nArg>=2 ? (int)integerValue(azArg[1]) : 0);
+ sqlite3_busy_timeout(p->db, (int)integerValue(azArg[1]));
}else
- if( c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0 ){
- if( nArg==2 ){
- enableTimer = booleanValue(azArg[1]);
- if( enableTimer && !HAS_TIMER ){
- fprintf(stderr, "Error: timer not available on this system.\n");
- enableTimer = 0;
- }
- }else{
- fprintf(stderr, "Usage: .timer on|off\n");
- rc = 1;
- }
+ if( HAS_TIMER && c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0
+ && nArg==2
+ ){
+ enableTimer = booleanValue(azArg[1]);
}else
- if( c=='t' && strncmp(azArg[0], "trace", n)==0 ){
+ if( c=='t' && strncmp(azArg[0], "trace", n)==0 && nArg>1 ){
open_db(p, 0);
- if( nArg!=2 ){
- fprintf(stderr, "Usage: .trace FILE|off\n");
- rc = 1;
- goto meta_command_exit;
- }
output_file_close(p->traceOut);
p->traceOut = output_file_open(azArg[1]);
#if !defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_OMIT_FLOATING_POINT)
@@ -3997,71 +2896,6 @@ static int do_meta_command(char *zLine, ShellState *p){
#endif
}else
-#if SQLITE_USER_AUTHENTICATION
- if( c=='u' && strncmp(azArg[0], "user", n)==0 ){
- if( nArg<2 ){
- fprintf(stderr, "Usage: .user SUBCOMMAND ...\n");
- rc = 1;
- goto meta_command_exit;
- }
- open_db(p, 0);
- if( strcmp(azArg[1],"login")==0 ){
- if( nArg!=4 ){
- fprintf(stderr, "Usage: .user login USER PASSWORD\n");
- rc = 1;
- goto meta_command_exit;
- }
- rc = sqlite3_user_authenticate(p->db, azArg[2], azArg[3],
- (int)strlen(azArg[3]));
- if( rc ){
- fprintf(stderr, "Authentication failed for user %s\n", azArg[2]);
- rc = 1;
- }
- }else if( strcmp(azArg[1],"add")==0 ){
- if( nArg!=5 ){
- fprintf(stderr, "Usage: .user add USER PASSWORD ISADMIN\n");
- rc = 1;
- goto meta_command_exit;
- }
- rc = sqlite3_user_add(p->db, azArg[2],
- azArg[3], (int)strlen(azArg[3]),
- booleanValue(azArg[4]));
- if( rc ){
- fprintf(stderr, "User-Add failed: %d\n", rc);
- rc = 1;
- }
- }else if( strcmp(azArg[1],"edit")==0 ){
- if( nArg!=5 ){
- fprintf(stderr, "Usage: .user edit USER PASSWORD ISADMIN\n");
- rc = 1;
- goto meta_command_exit;
- }
- rc = sqlite3_user_change(p->db, azArg[2],
- azArg[3], (int)strlen(azArg[3]),
- booleanValue(azArg[4]));
- if( rc ){
- fprintf(stderr, "User-Edit failed: %d\n", rc);
- rc = 1;
- }
- }else if( strcmp(azArg[1],"delete")==0 ){
- if( nArg!=3 ){
- fprintf(stderr, "Usage: .user delete USER\n");
- rc = 1;
- goto meta_command_exit;
- }
- rc = sqlite3_user_delete(p->db, azArg[2]);
- if( rc ){
- fprintf(stderr, "User-Delete failed: %d\n", rc);
- rc = 1;
- }
- }else{
- fprintf(stderr, "Usage: .user login|add|edit|delete ...\n");
- rc = 1;
- goto meta_command_exit;
- }
- }else
-#endif /* SQLITE_USER_AUTHENTICATION */
-
if( c=='v' && strncmp(azArg[0], "version", n)==0 ){
fprintf(p->out, "SQLite %s %s\n" /*extra-version-info*/,
sqlite3_libversion(), sqlite3_sourceid());
@@ -4082,11 +2916,11 @@ static int do_meta_command(char *zLine, ShellState *p){
#if defined(SQLITE_DEBUG) && defined(SQLITE_ENABLE_WHERETRACE)
if( c=='w' && strncmp(azArg[0], "wheretrace", n)==0 ){
extern int sqlite3WhereTrace;
- sqlite3WhereTrace = nArg>=2 ? booleanValue(azArg[1]) : 0xff;
+ sqlite3WhereTrace = booleanValue(azArg[1]);
}else
#endif
- if( c=='w' && strncmp(azArg[0], "width", n)==0 ){
+ if( c=='w' && strncmp(azArg[0], "width", n)==0 && nArg>1 ){
int j;
assert( nArg<=ArraySize(azArg) );
for(j=1; j<nArg && j<ArraySize(p->colWidth); j++){
@@ -4100,11 +2934,6 @@ static int do_meta_command(char *zLine, ShellState *p){
rc = 1;
}
-meta_command_exit:
- if( p->outCount ){
- p->outCount--;
- if( p->outCount==0 ) output_reset(p);
- }
return rc;
}
@@ -4182,7 +3011,7 @@ static int line_is_complete(char *zSql, int nSql){
**
** Return the number of errors.
*/
-static int process_input(ShellState *p, FILE *in){
+static int process_input(struct callback_data *p, FILE *in){
char *zLine = 0; /* A single input line */
char *zSql = 0; /* Accumulated SQL text */
int nLine; /* Length of current line */
@@ -4208,10 +3037,7 @@ static int process_input(ShellState *p, FILE *in){
seenInterrupt = 0;
}
lineno++;
- if( nSql==0 && _all_whitespace(zLine) ){
- if( p->echoOn ) printf("%s\n", zLine);
- continue;
- }
+ if( nSql==0 && _all_whitespace(zLine) ) continue;
if( zLine && zLine[0]=='.' && nSql==0 ){
if( p->echoOn ) printf("%s\n", zLine);
rc = do_meta_command(zLine, p);
@@ -4251,7 +3077,6 @@ static int process_input(ShellState *p, FILE *in){
&& sqlite3_complete(zSql) ){
p->cnt = 0;
open_db(p, 0);
- if( p->backslashOn ) resolve_backslashes(zSql);
BEGIN_TIMER;
rc = shell_exec(p->db, zSql, shell_callback, p, &zErrMsg);
END_TIMER;
@@ -4273,22 +3098,16 @@ static int process_input(ShellState *p, FILE *in){
errCnt++;
}
nSql = 0;
- if( p->outCount ){
- output_reset(p);
- p->outCount = 0;
- }
}else if( nSql && _all_whitespace(zSql) ){
- if( p->echoOn ) printf("%s\n", zSql);
nSql = 0;
}
}
if( nSql ){
if( !_all_whitespace(zSql) ){
fprintf(stderr, "Error: incomplete SQL: %s\n", zSql);
- errCnt++;
}
+ free(zSql);
}
- free(zSql);
free(zLine);
return errCnt>0;
}
@@ -4301,8 +3120,7 @@ static char *find_home_dir(void){
static char *home_dir = NULL;
if( home_dir ) return home_dir;
-#if !defined(_WIN32) && !defined(WIN32) && !defined(_WIN32_WCE) \
- && !defined(__RTP__) && !defined(_WRS_KERNEL)
+#if !defined(_WIN32) && !defined(WIN32) && !defined(_WIN32_WCE) && !defined(__RTP__) && !defined(_WRS_KERNEL)
{
struct passwd *pwent;
uid_t uid = getuid();
@@ -4363,21 +3181,23 @@ static char *find_home_dir(void){
**
** Returns the number of errors.
*/
-static void process_sqliterc(
- ShellState *p, /* Configuration data */
+static int process_sqliterc(
+ struct callback_data *p, /* Configuration data */
const char *sqliterc_override /* Name of config file. NULL to use default */
){
char *home_dir = NULL;
const char *sqliterc = sqliterc_override;
char *zBuf = 0;
FILE *in = NULL;
+ int rc = 0;
if (sqliterc == NULL) {
home_dir = find_home_dir();
if( home_dir==0 ){
- fprintf(stderr, "-- warning: cannot find home directory;"
- " cannot read ~/.sqliterc\n");
- return;
+#if !defined(__RTP__) && !defined(_WRS_KERNEL)
+ fprintf(stderr,"%s: Error: cannot locate your home directory\n", Argv0);
+#endif
+ return 1;
}
sqlite3_initialize();
zBuf = sqlite3_mprintf("%s/.sqliterc",home_dir);
@@ -4388,17 +3208,17 @@ static void process_sqliterc(
if( stdin_is_interactive ){
fprintf(stderr,"-- Loading resources from %s\n",sqliterc);
}
- process_input(p,in);
+ rc = process_input(p,in);
fclose(in);
}
sqlite3_free(zBuf);
+ return rc;
}
/*
** Show available command line options
*/
static const char zOptions[] =
- " -ascii set output mode to 'ascii'\n"
" -bail stop after hitting an error\n"
" -batch force batch I/O\n"
" -column set output mode to 'column'\n"
@@ -4415,16 +3235,12 @@ static const char zOptions[] =
" -interactive force interactive I/O\n"
" -line set output mode to 'line'\n"
" -list set output mode to 'list'\n"
- " -lookaside SIZE N use N entries of SZ bytes for lookaside memory\n"
" -mmap N default mmap size set to N\n"
#ifdef SQLITE_ENABLE_MULTIPLEX
" -multiplex enable the multiplexor VFS\n"
#endif
- " -newline SEP set output row separator. Default: '\\n'\n"
" -nullvalue TEXT set text string for NULL values. Default ''\n"
- " -pagecache SIZE N use N slots of SZ bytes each for page cache memory\n"
- " -scratch SIZE N use N slots of SZ bytes each for scratch memory\n"
- " -separator SEP set output column separator. Default: '|'\n"
+ " -separator SEP set output field separator. Default: '|'\n"
" -stats print memory stats before each finalize\n"
" -version show SQLite version\n"
" -vfs NAME use NAME as the default VFS\n"
@@ -4448,41 +3264,19 @@ static void usage(int showDetail){
/*
** Initialize the state information in data
*/
-static void main_init(ShellState *data) {
+static void main_init(struct callback_data *data) {
memset(data, 0, sizeof(*data));
data->mode = MODE_List;
- memcpy(data->colSeparator,SEP_Column, 2);
- memcpy(data->rowSeparator,SEP_Row, 2);
+ memcpy(data->separator,"|", 2);
data->showHeader = 0;
- data->shellFlgs = SHFLG_Lookaside;
sqlite3_config(SQLITE_CONFIG_URI, 1);
sqlite3_config(SQLITE_CONFIG_LOG, shellLog, data);
- sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
sqlite3_snprintf(sizeof(mainPrompt), mainPrompt,"sqlite> ");
sqlite3_snprintf(sizeof(continuePrompt), continuePrompt," ...> ");
+ sqlite3_config(SQLITE_CONFIG_SINGLETHREAD);
}
/*
-** Output text to the console in a font that attracts extra attention.
-*/
-#ifdef _WIN32
-static void printBold(const char *zText){
- HANDLE out = GetStdHandle(STD_OUTPUT_HANDLE);
- CONSOLE_SCREEN_BUFFER_INFO defaultScreenInfo;
- GetConsoleScreenBufferInfo(out, &defaultScreenInfo);
- SetConsoleTextAttribute(out,
- FOREGROUND_RED|FOREGROUND_INTENSITY
- );
- printf("%s", zText);
- SetConsoleTextAttribute(out, defaultScreenInfo.wAttributes);
-}
-#else
-static void printBold(const char *zText){
- printf("\033[1m%s\033[0m", zText);
-}
-#endif
-
-/*
** Get the argument to an --option. Throw an error and die if no argument
** is available.
*/
@@ -4495,26 +3289,19 @@ static char *cmdline_option_value(int argc, char **argv, int i){
return argv[i];
}
-int SQLITE_CDECL main(int argc, char **argv){
+int main(int argc, char **argv){
char *zErrMsg = 0;
- ShellState data;
+ struct callback_data data;
const char *zInitFile = 0;
+ char *zFirstCmd = 0;
int i;
int rc = 0;
- int warnInmemoryDb = 0;
- int readStdin = 1;
- int nCmd = 0;
- char **azCmd = 0;
-#if USE_SYSTEM_SQLITE+0!=1
if( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)!=0 ){
fprintf(stderr, "SQLite header and source version mismatch\n%s\n%s\n",
sqlite3_sourceid(), SQLITE_SOURCE_ID);
exit(1);
}
-#endif
- setBinaryMode(stdin);
- setvbuf(stderr, 0, _IONBF, 0); /* Make sure stderr is unbuffered */
Argv0 = argv[0];
main_init(&data);
stdin_is_interactive = isatty(0);
@@ -4526,18 +3313,6 @@ int SQLITE_CDECL main(int argc, char **argv){
signal(SIGINT, interrupt_handler);
#endif
-#ifdef SQLITE_SHELL_DBNAME_PROC
- {
- /* If the SQLITE_SHELL_DBNAME_PROC macro is defined, then it is the name
- ** of a C-function that will provide the name of the database file. Use
- ** this compile-time option to embed this shell program in larger
- ** applications. */
- extern void SQLITE_SHELL_DBNAME_PROC(const char**);
- SQLITE_SHELL_DBNAME_PROC(&data.zDbFilename);
- warnInmemoryDb = 0;
- }
-#endif
-
/* Do an initial pass through the command-line argument to locate
** the name of the database file, the name of the initialization file,
** the size of the alternative malloc heap,
@@ -4549,23 +3324,19 @@ int SQLITE_CDECL main(int argc, char **argv){
if( z[0]!='-' ){
if( data.zDbFilename==0 ){
data.zDbFilename = z;
- }else{
- /* Excesss arguments are interpreted as SQL (or dot-commands) and
- ** mean that nothing is read from stdin */
- readStdin = 0;
- nCmd++;
- azCmd = realloc(azCmd, sizeof(azCmd[0])*nCmd);
- if( azCmd==0 ){
- fprintf(stderr, "out of memory\n");
- exit(1);
- }
- azCmd[nCmd-1] = z;
+ continue;
+ }
+ if( zFirstCmd==0 ){
+ zFirstCmd = z;
+ continue;
}
+ fprintf(stderr,"%s: Error: too many options: \"%s\"\n", Argv0, argv[i]);
+ fprintf(stderr,"Use -help for a list of options.\n");
+ return 1;
}
if( z[1]=='-' ) z++;
if( strcmp(z,"-separator")==0
|| strcmp(z,"-nullvalue")==0
- || strcmp(z,"-newline")==0
|| strcmp(z,"-cmd")==0
){
(void)cmdline_option_value(argc, argv, ++i);
@@ -4587,33 +3358,6 @@ int SQLITE_CDECL main(int argc, char **argv){
if( szHeap>0x7fff0000 ) szHeap = 0x7fff0000;
sqlite3_config(SQLITE_CONFIG_HEAP, malloc((int)szHeap), (int)szHeap, 64);
#endif
- }else if( strcmp(z,"-scratch")==0 ){
- int n, sz;
- sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
- if( sz>400000 ) sz = 400000;
- if( sz<2500 ) sz = 2500;
- n = (int)integerValue(cmdline_option_value(argc,argv,++i));
- if( n>10 ) n = 10;
- if( n<1 ) n = 1;
- sqlite3_config(SQLITE_CONFIG_SCRATCH, malloc(n*sz+1), sz, n);
- data.shellFlgs |= SHFLG_Scratch;
- }else if( strcmp(z,"-pagecache")==0 ){
- int n, sz;
- sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
- if( sz>70000 ) sz = 70000;
- if( sz<800 ) sz = 800;
- n = (int)integerValue(cmdline_option_value(argc,argv,++i));
- if( n<10 ) n = 10;
- sqlite3_config(SQLITE_CONFIG_PAGECACHE, malloc(n*sz+1), sz, n);
- data.shellFlgs |= SHFLG_Pagecache;
- }else if( strcmp(z,"-lookaside")==0 ){
- int n, sz;
- sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
- if( sz<0 ) sz = 0;
- n = (int)integerValue(cmdline_option_value(argc,argv,++i));
- if( n<0 ) n = 0;
- sqlite3_config(SQLITE_CONFIG_LOOKASIDE, sz, n);
- if( sz*n==0 ) data.shellFlgs &= ~SHFLG_Lookaside;
#ifdef SQLITE_ENABLE_VFSTRACE
}else if( strcmp(z,"-vfstrace")==0 ){
extern int vfstrace_register(
@@ -4646,7 +3390,6 @@ int SQLITE_CDECL main(int argc, char **argv){
if( data.zDbFilename==0 ){
#ifndef SQLITE_OMIT_MEMORYDB
data.zDbFilename = ":memory:";
- warnInmemoryDb = argc==1;
#else
fprintf(stderr,"%s: Error: no database filename specified\n", Argv0);
return 1;
@@ -4667,7 +3410,10 @@ int SQLITE_CDECL main(int argc, char **argv){
** is given on the command line, look for a file named ~/.sqliterc and
** try to process it.
*/
- process_sqliterc(&data,zInitFile);
+ rc = process_sqliterc(&data,zInitFile);
+ if( rc>0 ){
+ return rc;
+ }
/* Make a second pass through the command-line argument and set
** options. This second pass is delayed until after the initialization
@@ -4690,21 +3436,12 @@ int SQLITE_CDECL main(int argc, char **argv){
data.mode = MODE_Column;
}else if( strcmp(z,"-csv")==0 ){
data.mode = MODE_Csv;
- memcpy(data.colSeparator,",",2);
- }else if( strcmp(z,"-ascii")==0 ){
- data.mode = MODE_Ascii;
- sqlite3_snprintf(sizeof(data.colSeparator), data.colSeparator,
- SEP_Unit);
- sqlite3_snprintf(sizeof(data.rowSeparator), data.rowSeparator,
- SEP_Record);
+ memcpy(data.separator,",",2);
}else if( strcmp(z,"-separator")==0 ){
- sqlite3_snprintf(sizeof(data.colSeparator), data.colSeparator,
- "%s",cmdline_option_value(argc,argv,++i));
- }else if( strcmp(z,"-newline")==0 ){
- sqlite3_snprintf(sizeof(data.rowSeparator), data.rowSeparator,
+ sqlite3_snprintf(sizeof(data.separator), data.separator,
"%s",cmdline_option_value(argc,argv,++i));
}else if( strcmp(z,"-nullvalue")==0 ){
- sqlite3_snprintf(sizeof(data.nullValue), data.nullValue,
+ sqlite3_snprintf(sizeof(data.nullvalue), data.nullvalue,
"%s",cmdline_option_value(argc,argv,++i));
}else if( strcmp(z,"-header")==0 ){
data.showHeader = 1;
@@ -4712,19 +3449,8 @@ int SQLITE_CDECL main(int argc, char **argv){
data.showHeader = 0;
}else if( strcmp(z,"-echo")==0 ){
data.echoOn = 1;
- }else if( strcmp(z,"-eqp")==0 ){
- data.autoEQP = 1;
}else if( strcmp(z,"-stats")==0 ){
data.statsOn = 1;
- }else if( strcmp(z,"-scanstats")==0 ){
- data.scanstatsOn = 1;
- }else if( strcmp(z,"-backslash")==0 ){
- /* Undocumented command-line option: -backslash
- ** Causes C-style backslash escapes to be evaluated in SQL statements
- ** prior to sending the SQL into SQLite. Useful for injecting
- ** crazy bytes in the middle of SQL statements for testing and debugging.
- */
- data.backslashOn = 1;
}else if( strcmp(z,"-bail")==0 ){
bail_on_error = 1;
}else if( strcmp(z,"-version")==0 ){
@@ -4736,12 +3462,6 @@ int SQLITE_CDECL main(int argc, char **argv){
stdin_is_interactive = 0;
}else if( strcmp(z,"-heap")==0 ){
i++;
- }else if( strcmp(z,"-scratch")==0 ){
- i+=2;
- }else if( strcmp(z,"-pagecache")==0 ){
- i+=2;
- }else if( strcmp(z,"-lookaside")==0 ){
- i+=2;
}else if( strcmp(z,"-mmap")==0 ){
i++;
}else if( strcmp(z,"-vfs")==0 ){
@@ -4757,10 +3477,6 @@ int SQLITE_CDECL main(int argc, char **argv){
}else if( strcmp(z,"-help")==0 ){
usage(1);
}else if( strcmp(z,"-cmd")==0 ){
- /* Run commands that follow -cmd first and separately from commands
- ** that simply appear on the command-line. This seems goofy. It would
- ** be better if all commands ran in the order that they appear. But
- ** we retain the goofy behavior for historical compatibility. */
if( i==argc-1 ) break;
z = cmdline_option_value(argc,argv,++i);
if( z[0]=='.' ){
@@ -4784,28 +3500,23 @@ int SQLITE_CDECL main(int argc, char **argv){
}
}
- if( !readStdin ){
- /* Run all arguments that do not begin with '-' as if they were separate
- ** command-line inputs, except for the argToSkip argument which contains
- ** the database filename.
+ if( zFirstCmd ){
+ /* Run just the command that follows the database name
*/
- for(i=0; i<nCmd; i++){
- if( azCmd[i][0]=='.' ){
- rc = do_meta_command(azCmd[i], &data);
- if( rc ) return rc==2 ? 0 : rc;
- }else{
- open_db(&data, 0);
- rc = shell_exec(data.db, azCmd[i], shell_callback, &data, &zErrMsg);
- if( zErrMsg!=0 ){
- fprintf(stderr,"Error: %s\n", zErrMsg);
- return rc!=0 ? rc : 1;
- }else if( rc!=0 ){
- fprintf(stderr,"Error: unable to process SQL: %s\n", azCmd[i]);
- return rc;
- }
+ if( zFirstCmd[0]=='.' ){
+ rc = do_meta_command(zFirstCmd, &data);
+ if( rc==2 ) rc = 0;
+ }else{
+ open_db(&data, 0);
+ rc = shell_exec(data.db, zFirstCmd, shell_callback, &data, &zErrMsg);
+ if( zErrMsg!=0 ){
+ fprintf(stderr,"Error: %s\n", zErrMsg);
+ return rc!=0 ? rc : 1;
+ }else if( rc!=0 ){
+ fprintf(stderr,"Error: unable to process SQL \"%s\"\n", zFirstCmd);
+ return rc;
}
}
- free(azCmd);
}else{
/* Run commands received from standard input
*/
@@ -4815,15 +3526,10 @@ int SQLITE_CDECL main(int argc, char **argv){
int nHistory;
printf(
"SQLite version %s %.19s\n" /*extra-version-info*/
- "Enter \".help\" for usage hints.\n",
+ "Enter \".help\" for instructions\n"
+ "Enter SQL statements terminated with a \";\"\n",
sqlite3_libversion(), sqlite3_sourceid()
);
- if( warnInmemoryDb ){
- printf("Connected to a ");
- printBold("transient in-memory database");
- printf(".\nUse \".open FILENAME\" to reopen on a "
- "persistent database.\n");
- }
zHome = find_home_dir();
if( zHome ){
nHistory = strlen30(zHome) + 20;
@@ -4831,11 +3537,13 @@ int SQLITE_CDECL main(int argc, char **argv){
sqlite3_snprintf(nHistory, zHistory,"%s/.sqlite_history", zHome);
}
}
- if( zHistory ){ shell_read_history(zHistory); }
+#if defined(HAVE_READLINE) && HAVE_READLINE==1
+ if( zHistory ) read_history(zHistory);
+#endif
rc = process_input(&data, 0);
if( zHistory ){
- shell_stifle_history(100);
- shell_write_history(zHistory);
+ stifle_history(100);
+ write_history(zHistory);
free(zHistory);
}
}else{
diff --git a/3rdparty/sqlite3/sqlite3.c b/3rdparty/sqlite3/sqlite3.c
index 0ae407d..98c3b04 100644
--- a/3rdparty/sqlite3/sqlite3.c
+++ b/3rdparty/sqlite3/sqlite3.c
@@ -1,6 +1,6 @@
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.9.2. By combining all the individual C code files into this
+** version 3.8.2. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -22,195 +22,9 @@
#ifndef SQLITE_PRIVATE
# define SQLITE_PRIVATE static
#endif
-/************** Begin file sqliteInt.h ***************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** Internal interface definitions for SQLite.
-**
-*/
-#ifndef _SQLITEINT_H_
-#define _SQLITEINT_H_
-
-/*
-** Include the header file used to customize the compiler options for MSVC.
-** This should be done first so that it can successfully prevent spurious
-** compiler warnings due to subsequent content in this file and other files
-** that are included by this file.
-*/
-/************** Include msvc.h in the middle of sqliteInt.h ******************/
-/************** Begin file msvc.h ********************************************/
-/*
-** 2015 January 12
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains code that is specific to MSVC.
-*/
-#ifndef _MSVC_H_
-#define _MSVC_H_
-
-#if defined(_MSC_VER)
-#pragma warning(disable : 4054)
-#pragma warning(disable : 4055)
-#pragma warning(disable : 4100)
-#pragma warning(disable : 4127)
-#pragma warning(disable : 4130)
-#pragma warning(disable : 4152)
-#pragma warning(disable : 4189)
-#pragma warning(disable : 4206)
-#pragma warning(disable : 4210)
-#pragma warning(disable : 4232)
-#pragma warning(disable : 4244)
-#pragma warning(disable : 4305)
-#pragma warning(disable : 4306)
-#pragma warning(disable : 4702)
-#pragma warning(disable : 4706)
-#endif /* defined(_MSC_VER) */
-
-#endif /* _MSVC_H_ */
-
-/************** End of msvc.h ************************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/*
-** Special setup for VxWorks
-*/
-/************** Include vxworks.h in the middle of sqliteInt.h ***************/
-/************** Begin file vxworks.h *****************************************/
-/*
-** 2015-03-02
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains code that is specific to Wind River's VxWorks
-*/
-#if defined(__RTP__) || defined(_WRS_KERNEL)
-/* This is VxWorks. Set up things specially for that OS
-*/
-#include <vxWorks.h>
-#include <pthread.h> /* amalgamator: dontcache */
-#define OS_VXWORKS 1
-#define SQLITE_OS_OTHER 0
-#define SQLITE_HOMEGROWN_RECURSIVE_MUTEX 1
-#define SQLITE_OMIT_LOAD_EXTENSION 1
-#define SQLITE_ENABLE_LOCKING_STYLE 0
-#define HAVE_UTIME 1
-#else
-/* This is not VxWorks. */
-#define OS_VXWORKS 0
-#endif /* defined(_WRS_KERNEL) */
-
-/************** End of vxworks.h *********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/*
-** These #defines should enable >2GB file support on POSIX if the
-** underlying operating system supports it. If the OS lacks
-** large file support, or if the OS is windows, these should be no-ops.
-**
-** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any
-** system #includes. Hence, this block of code must be the very first
-** code in all source files.
-**
-** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch
-** on the compiler command line. This is necessary if you are compiling
-** on a recent machine (ex: Red Hat 7.2) but you want your code to work
-** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2
-** without this option, LFS is enable. But LFS does not exist in the kernel
-** in Red Hat 6.0, so the code won't work. Hence, for maximum binary
-** portability you should omit LFS.
-**
-** The previous paragraph was written in 2005. (This paragraph is written
-** on 2008-11-28.) These days, all Linux kernels support large files, so
-** you should probably leave LFS enabled. But some embedded platforms might
-** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful.
-**
-** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later.
-*/
-#ifndef SQLITE_DISABLE_LFS
-# define _LARGE_FILE 1
-# ifndef _FILE_OFFSET_BITS
-# define _FILE_OFFSET_BITS 64
-# endif
-# define _LARGEFILE_SOURCE 1
-#endif
-
-/* What version of GCC is being used. 0 means GCC is not being used */
-#ifdef __GNUC__
-# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
-#else
-# define GCC_VERSION 0
-#endif
-
-/* Needed for various definitions... */
-#if defined(__GNUC__) && !defined(_GNU_SOURCE)
-# define _GNU_SOURCE
-#endif
-
-#if defined(__OpenBSD__) && !defined(_BSD_SOURCE)
-# define _BSD_SOURCE
-#endif
-
-/*
-** For MinGW, check to see if we can include the header file containing its
-** version information, among other things. Normally, this internal MinGW
-** header file would [only] be included automatically by other MinGW header
-** files; however, the contained version information is now required by this
-** header file to work around binary compatibility issues (see below) and
-** this is the only known way to reliably obtain it. This entire #if block
-** would be completely unnecessary if there was any other way of detecting
-** MinGW via their preprocessor (e.g. if they customized their GCC to define
-** some MinGW-specific macros). When compiling for MinGW, either the
-** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be
-** defined; otherwise, detection of conditions specific to MinGW will be
-** disabled.
-*/
-#if defined(_HAVE_MINGW_H)
-# include "mingw.h"
-#elif defined(_HAVE__MINGW_H)
-# include "_mingw.h"
-#endif
-
-/*
-** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T
-** define is required to maintain binary compatibility with the MSVC runtime
-** library in use (e.g. for Windows XP).
-*/
-#if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \
- defined(_WIN32) && !defined(_WIN64) && \
- defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \
- defined(__MSVCRT__)
-# define _USE_32BIT_TIME_T
+#ifndef SQLITE_API
+# define SQLITE_API
#endif
-
-/* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear
-** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for
-** MinGW.
-*/
-/************** Include sqlite3.h in the middle of sqliteInt.h ***************/
/************** Begin file sqlite3.h *****************************************/
/*
** 2001 September 15
@@ -237,7 +51,7 @@
**
** The official C-language API documentation for SQLite is derived
** from comments in this file. This file is the authoritative source
-** on how SQLite interfaces are supposed to operate.
+** on how SQLite interfaces are suppose to operate.
**
** The name of this file under configuration management is "sqlite.h.in".
** The makefile makes some minor changes to this file (such as inserting
@@ -257,25 +71,21 @@ extern "C" {
/*
-** Provide the ability to override linkage features of the interface.
+** Add the ability to override 'extern'
*/
#ifndef SQLITE_EXTERN
# define SQLITE_EXTERN extern
#endif
+
#ifndef SQLITE_API
# define SQLITE_API
#endif
-#ifndef SQLITE_CDECL
-# define SQLITE_CDECL
-#endif
-#ifndef SQLITE_STDCALL
-# define SQLITE_STDCALL
-#endif
+
/*
** These no-op macros are used in front of interfaces to mark those
** interfaces as either deprecated or experimental. New applications
-** should not use deprecated interfaces - they are supported for backwards
+** should not use deprecated interfaces - they are support for backwards
** compatibility only. Application writers should be aware that
** experimental interfaces are subject to change in point releases.
**
@@ -325,9 +135,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.9.2"
-#define SQLITE_VERSION_NUMBER 3009002
-#define SQLITE_SOURCE_ID "2015-11-02 18:31:45 bda77dda9697c463c3d0704014d51627fceee328"
+#define SQLITE_VERSION "3.8.2"
+#define SQLITE_VERSION_NUMBER 3008002
+#define SQLITE_SOURCE_ID "2013-12-06 14:53:30 27392118af4c38c5203a04b8013e1afdb1cebd0d"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -338,7 +148,7 @@ extern "C" {
** but are associated with the library instead of the header file. ^(Cautious
** programmers might include assert() statements in their application to
** verify that values returned by these interfaces match the macros in
-** the header, and thus ensure that the application is
+** the header, and thus insure that the application is
** compiled with matching library and header files.
**
** <blockquote><pre>
@@ -360,9 +170,9 @@ extern "C" {
** See also: [sqlite_version()] and [sqlite_source_id()].
*/
SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
+SQLITE_API const char *sqlite3_libversion(void);
+SQLITE_API const char *sqlite3_sourceid(void);
+SQLITE_API int sqlite3_libversion_number(void);
/*
** CAPI3REF: Run-Time Library Compilation Options Diagnostics
@@ -387,8 +197,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
** [sqlite_compileoption_get()] and the [compile_options pragma].
*/
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
+SQLITE_API const char *sqlite3_compileoption_get(int N);
#endif
/*
@@ -419,7 +229,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but
** can be fully or partially disabled using a call to [sqlite3_config()]
** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD],
-** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the
+** or [SQLITE_CONFIG_MUTEX]. ^(The return value of the
** sqlite3_threadsafe() function shows only the compile-time setting of
** thread safety, not any run-time changes to that setting made by
** sqlite3_config(). In other words, the return value from sqlite3_threadsafe()
@@ -427,7 +237,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
**
** See the [threading mode] documentation for additional information.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void);
+SQLITE_API int sqlite3_threadsafe(void);
/*
** CAPI3REF: Database Connection Handle
@@ -484,11 +294,10 @@ typedef sqlite_uint64 sqlite3_uint64;
/*
** CAPI3REF: Closing A Database Connection
-** DESTRUCTOR: sqlite3
**
** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors
** for the [sqlite3] object.
-** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if
+** ^Calls to sqlite3_close() and sqlite3_close_v2() return SQLITE_OK if
** the [sqlite3] object is successfully destroyed and all associated
** resources are deallocated.
**
@@ -496,7 +305,7 @@ typedef sqlite_uint64 sqlite3_uint64;
** statements or unfinished sqlite3_backup objects then sqlite3_close()
** will leave the database connection open and return [SQLITE_BUSY].
** ^If sqlite3_close_v2() is called with unfinalized prepared statements
-** and/or unfinished sqlite3_backups, then the database connection becomes
+** and unfinished sqlite3_backups, then the database connection becomes
** an unusable "zombie" which will automatically be deallocated when the
** last prepared statement is finalized or the last sqlite3_backup is
** finished. The sqlite3_close_v2() interface is intended for use with
@@ -509,7 +318,7 @@ typedef sqlite_uint64 sqlite3_uint64;
** with the [sqlite3] object prior to attempting to close the object. ^If
** sqlite3_close_v2() is called on a [database connection] that still has
** outstanding [prepared statements], [BLOB handles], and/or
-** [sqlite3_backup] objects then it returns [SQLITE_OK] and the deallocation
+** [sqlite3_backup] objects then it returns SQLITE_OK but the deallocation
** of resources is deferred until all [prepared statements], [BLOB handles],
** and [sqlite3_backup] objects are also destroyed.
**
@@ -524,8 +333,8 @@ typedef sqlite_uint64 sqlite3_uint64;
** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
** argument is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3*);
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3*);
+SQLITE_API int sqlite3_close(sqlite3*);
+SQLITE_API int sqlite3_close_v2(sqlite3*);
/*
** The type for a callback function.
@@ -536,7 +345,6 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
/*
** CAPI3REF: One-Step Query Execution Interface
-** METHOD: sqlite3
**
** The sqlite3_exec() interface is a convenience wrapper around
** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()],
@@ -588,7 +396,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** Restrictions:
**
** <ul>
-** <li> The application must ensure that the 1st parameter to sqlite3_exec()
+** <li> The application must insure that the 1st parameter to sqlite3_exec()
** is a valid and open [database connection].
** <li> The application must not close the [database connection] specified by
** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
@@ -596,7 +404,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** </ul>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
+SQLITE_API int sqlite3_exec(
sqlite3*, /* An open database */
const char *sql, /* SQL to be evaluated */
int (*callback)(void*,int,char**,char**), /* Callback function */
@@ -606,14 +414,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
/*
** CAPI3REF: Result Codes
-** KEYWORDS: {result code definitions}
+** KEYWORDS: SQLITE_OK {error code} {error codes}
+** KEYWORDS: {result code} {result codes}
**
** Many SQLite functions return an integer result code from the set shown
** here in order to indicate success or failure.
**
** New error codes may be added in future versions of SQLite.
**
-** See also: [extended result code definitions]
+** See also: [SQLITE_IOERR_READ | extended result codes],
+** [sqlite3_vtab_on_conflict()] [SQLITE_ROLLBACK | result codes].
*/
#define SQLITE_OK 0 /* Successful result */
/* beginning-of-error-codes */
@@ -651,19 +461,26 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
/*
** CAPI3REF: Extended Result Codes
-** KEYWORDS: {extended result code definitions}
+** KEYWORDS: {extended error code} {extended error codes}
+** KEYWORDS: {extended result code} {extended result codes}
**
-** In its default configuration, SQLite API routines return one of 30 integer
-** [result codes]. However, experience has shown that many of
+** In its default configuration, SQLite API routines return one of 26 integer
+** [SQLITE_OK | result codes]. However, experience has shown that many of
** these result codes are too coarse-grained. They do not provide as
** much information about problems as programmers might like. In an effort to
** address this, newer versions of SQLite (version 3.3.8 and later) include
** support for additional result codes that provide more detailed information
-** about errors. These [extended result codes] are enabled or disabled
+** about errors. The extended result codes are enabled or disabled
** on a per database connection basis using the
-** [sqlite3_extended_result_codes()] API. Or, the extended code for
-** the most recent error can be obtained using
-** [sqlite3_extended_errcode()].
+** [sqlite3_extended_result_codes()] API.
+**
+** Some of the available extended result codes are listed here.
+** One may expect the number of extended result codes will increase
+** over time. Software that uses extended result codes should expect
+** to see new result codes in future releases of SQLite.
+**
+** The SQLITE_OK result code will never be extended. It will always
+** be exactly zero.
*/
#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
@@ -691,7 +508,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8))
#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8))
-#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8))
@@ -703,7 +519,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8))
#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8))
-#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8))
#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8))
#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8))
#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8))
@@ -718,7 +533,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
-#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
/*
** CAPI3REF: Flags For File Open Operations
@@ -772,11 +586,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
** after reboot following a crash or power loss, the only bytes in a
** file that were written at the application level might have changed
** and that adjacent bytes, even bytes within the same sector are
-** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
-** flag indicate that a file cannot be deleted when open. The
-** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
-** read-only media and cannot be changed even by processes with
-** elevated privileges.
+** guaranteed to be unchanged.
*/
#define SQLITE_IOCAP_ATOMIC 0x00000001
#define SQLITE_IOCAP_ATOMIC512 0x00000002
@@ -791,7 +601,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_IOCAP_SEQUENTIAL 0x00000400
#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800
#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000
-#define SQLITE_IOCAP_IMMUTABLE 0x00002000
/*
** CAPI3REF: File Locking Levels
@@ -898,7 +707,7 @@ struct sqlite3_file {
** locking strategy (for example to use dot-file locks), to inquire
** about the status of a lock, or to break stale locks. The SQLite
** core reserves all opcodes less than 100 for its own use.
-** A [file control opcodes | list of opcodes] less than 100 is available.
+** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available.
** Applications that define a custom xFileControl method should use opcodes
** greater than 100 to avoid conflicts. VFS implementations should
** return [SQLITE_NOTFOUND] for file control opcodes that they do not
@@ -971,22 +780,19 @@ struct sqlite3_io_methods {
/*
** CAPI3REF: Standard File Control Opcodes
-** KEYWORDS: {file control opcodes} {file control opcode}
**
** These integer constants are opcodes for the xFileControl method
** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()]
** interface.
**
-** <ul>
-** <li>[[SQLITE_FCNTL_LOCKSTATE]]
** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This
** opcode causes the xFileControl method to write the current state of
** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED],
** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE])
** into an integer that the pArg argument points to. This capability
-** is used during testing and is only available when the SQLITE_TEST
-** compile-time option is used.
-**
+** is used during testing and only needs to be supported when SQLITE_TEST
+** is defined.
+** <ul>
** <li>[[SQLITE_FCNTL_SIZE_HINT]]
** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS
** layer a hint of how large the database file will grow to be during the
@@ -1011,29 +817,15 @@ struct sqlite3_io_methods {
** additional information.
**
** <li>[[SQLITE_FCNTL_SYNC_OMITTED]]
-** No longer in use.
-**
-** <li>[[SQLITE_FCNTL_SYNC]]
-** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
-** sent to the VFS immediately before the xSync method is invoked on a
-** database file descriptor. Or, if the xSync method is not invoked
-** because the user has configured SQLite with
-** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place
-** of the xSync method. In most cases, the pointer argument passed with
-** this file-control is NULL. However, if the database file is being synced
-** as part of a multi-database commit, the argument points to a nul-terminated
-** string containing the transactions master-journal file name. VFSes that
-** do not need this signal should silently ignore this opcode. Applications
-** should not call [sqlite3_file_control()] with this opcode as doing so may
-** disrupt the operation of the specialized VFSes that do require it.
-**
-** <li>[[SQLITE_FCNTL_COMMIT_PHASETWO]]
-** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite
-** and sent to the VFS after a transaction has been committed immediately
-** but before the database is unlocked. VFSes that do not need this signal
-** should silently ignore this opcode. Applications should not call
-** [sqlite3_file_control()] with this opcode as doing so may disrupt the
-** operation of the specialized VFSes that do require it.
+** ^(The [SQLITE_FCNTL_SYNC_OMITTED] opcode is generated internally by
+** SQLite and sent to all VFSes in place of a call to the xSync method
+** when the database connection has [PRAGMA synchronous] set to OFF.)^
+** Some specialized VFSes need this signal in order to operate correctly
+** when [PRAGMA synchronous | PRAGMA synchronous=OFF] is set, but most
+** VFSes do not need this signal and should silently ignore this opcode.
+** Applications should not call [sqlite3_file_control()] with this
+** opcode as doing so may disrupt the operation of the specialized VFSes
+** that do require it.
**
** <li>[[SQLITE_FCNTL_WIN32_AV_RETRY]]
** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic
@@ -1111,9 +903,7 @@ struct sqlite3_io_methods {
** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA]
** file control returns [SQLITE_OK], then the parser assumes that the
** VFS has handled the PRAGMA itself and the parser generates a no-op
-** prepared statement if result string is NULL, or that returns a copy
-** of the result string if the string is non-NULL.
-** ^If the [SQLITE_FCNTL_PRAGMA] file control returns
+** prepared statement. ^If the [SQLITE_FCNTL_PRAGMA] file control returns
** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means
** that the VFS encountered an error while handling the [PRAGMA] and the
** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA]
@@ -1159,39 +949,12 @@ struct sqlite3_io_methods {
** SQLite stack may generate instances of this file control if
** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled.
**
-** <li>[[SQLITE_FCNTL_HAS_MOVED]]
-** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a
-** pointer to an integer and it writes a boolean into that integer depending
-** on whether or not the file has been renamed, moved, or deleted since it
-** was first opened.
-**
-** <li>[[SQLITE_FCNTL_WIN32_SET_HANDLE]]
-** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
-** opcode causes the xFileControl method to swap the file handle with the one
-** pointed to by the pArg argument. This capability is used during testing
-** and only needs to be supported when SQLITE_TEST is defined.
-**
-** <li>[[SQLITE_FCNTL_WAL_BLOCK]]
-** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might
-** be advantageous to block on the next WAL lock if the lock is not immediately
-** available. The WAL subsystem issues this signal during rare
-** circumstances in order to fix a problem with priority inversion.
-** Applications should <em>not</em> use this file-control.
-**
-** <li>[[SQLITE_FCNTL_ZIPVFS]]
-** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other
-** VFS should return SQLITE_NOTFOUND for this opcode.
-**
-** <li>[[SQLITE_FCNTL_RBU]]
-** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by
-** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for
-** this opcode.
** </ul>
*/
#define SQLITE_FCNTL_LOCKSTATE 1
-#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2
-#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3
-#define SQLITE_FCNTL_LAST_ERRNO 4
+#define SQLITE_GET_LOCKPROXYFILE 2
+#define SQLITE_SET_LOCKPROXYFILE 3
+#define SQLITE_LAST_ERRNO 4
#define SQLITE_FCNTL_SIZE_HINT 5
#define SQLITE_FCNTL_CHUNK_SIZE 6
#define SQLITE_FCNTL_FILE_POINTER 7
@@ -1206,19 +969,6 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_TEMPFILENAME 16
#define SQLITE_FCNTL_MMAP_SIZE 18
#define SQLITE_FCNTL_TRACE 19
-#define SQLITE_FCNTL_HAS_MOVED 20
-#define SQLITE_FCNTL_SYNC 21
-#define SQLITE_FCNTL_COMMIT_PHASETWO 22
-#define SQLITE_FCNTL_WIN32_SET_HANDLE 23
-#define SQLITE_FCNTL_WAL_BLOCK 24
-#define SQLITE_FCNTL_ZIPVFS 25
-#define SQLITE_FCNTL_RBU 26
-
-/* deprecated names */
-#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
-#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE
-#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO
-
/*
** CAPI3REF: Mutex Handle
@@ -1470,7 +1220,7 @@ struct sqlite3_vfs {
** </ul>
**
** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as
-** was given on the corresponding lock.
+** was given no the corresponding lock.
**
** The xShmLock method can transition between unlocked and SHARED or
** between unlocked and EXCLUSIVE. It cannot transition between SHARED
@@ -1567,10 +1317,10 @@ struct sqlite3_vfs {
** must return [SQLITE_OK] on success and some other [error code] upon
** failure.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
+SQLITE_API int sqlite3_initialize(void);
+SQLITE_API int sqlite3_shutdown(void);
+SQLITE_API int sqlite3_os_init(void);
+SQLITE_API int sqlite3_os_end(void);
/*
** CAPI3REF: Configuring The SQLite Library
@@ -1581,11 +1331,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
** applications and so this routine is usually not necessary. It is
** provided to support rare applications with unusual needs.
**
-** <b>The sqlite3_config() interface is not threadsafe. The application
-** must ensure that no other SQLite interfaces are invoked by other
-** threads while sqlite3_config() is running.</b>
-**
-** The sqlite3_config() interface
+** The sqlite3_config() interface is not threadsafe. The application
+** must insure that no other SQLite interfaces are invoked by other
+** threads while sqlite3_config() is running. Furthermore, sqlite3_config()
** may only be invoked prior to library initialization using
** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
@@ -1603,11 +1351,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
** ^If the option is unknown or SQLite is unable to set the option
** then this routine returns a non-zero [error code].
*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
+SQLITE_API int sqlite3_config(int, ...);
/*
** CAPI3REF: Configure database connections
-** METHOD: sqlite3
**
** The sqlite3_db_config() interface is used to make configuration
** changes to a [database connection]. The interface is similar to
@@ -1622,7 +1369,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
** the call is considered successful.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Memory Allocation Routines
@@ -1756,33 +1503,31 @@ struct sqlite3_mem_methods {
** SQLITE_CONFIG_SERIALIZED configuration option.</dd>
**
** [[SQLITE_CONFIG_MALLOC]] <dt>SQLITE_CONFIG_MALLOC</dt>
-** <dd> ^(The SQLITE_CONFIG_MALLOC option takes a single argument which is
-** a pointer to an instance of the [sqlite3_mem_methods] structure.
-** The argument specifies
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mem_methods] structure. The argument specifies
** alternative low-level memory allocation routines to be used in place of
** the memory allocation routines built into SQLite.)^ ^SQLite makes
** its own private copy of the content of the [sqlite3_mem_methods] structure
** before the [sqlite3_config()] call returns.</dd>
**
** [[SQLITE_CONFIG_GETMALLOC]] <dt>SQLITE_CONFIG_GETMALLOC</dt>
-** <dd> ^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which
-** is a pointer to an instance of the [sqlite3_mem_methods] structure.
-** The [sqlite3_mem_methods]
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mem_methods] structure. The [sqlite3_mem_methods]
** structure is filled with the currently defined memory allocation routines.)^
** This option can be used to overload the default memory allocation
** routines with a wrapper that simulations memory allocation failure or
** tracks memory usage, for example. </dd>
**
** [[SQLITE_CONFIG_MEMSTATUS]] <dt>SQLITE_CONFIG_MEMSTATUS</dt>
-** <dd> ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int,
-** interpreted as a boolean, which enables or disables the collection of
-** memory allocation statistics. ^(When memory allocation statistics are
-** disabled, the following SQLite interfaces become non-operational:
+** <dd> ^This option takes single argument of type int, interpreted as a
+** boolean, which enables or disables the collection of memory allocation
+** statistics. ^(When memory allocation statistics are disabled, the
+** following SQLite interfaces become non-operational:
** <ul>
** <li> [sqlite3_memory_used()]
** <li> [sqlite3_memory_highwater()]
** <li> [sqlite3_soft_heap_limit64()]
-** <li> [sqlite3_status64()]
+** <li> [sqlite3_status()]
** </ul>)^
** ^Memory allocation statistics are enabled by default unless SQLite is
** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory
@@ -1790,67 +1535,53 @@ struct sqlite3_mem_methods {
** </dd>
**
** [[SQLITE_CONFIG_SCRATCH]] <dt>SQLITE_CONFIG_SCRATCH</dt>
-** <dd> ^The SQLITE_CONFIG_SCRATCH option specifies a static memory buffer
-** that SQLite can use for scratch memory. ^(There are three arguments
-** to SQLITE_CONFIG_SCRATCH: A pointer an 8-byte
+** <dd> ^This option specifies a static memory buffer that SQLite can use for
+** scratch memory. There are three arguments: A pointer an 8-byte
** aligned memory buffer from which the scratch allocations will be
** drawn, the size of each scratch allocation (sz),
-** and the maximum number of scratch allocations (N).)^
+** and the maximum number of scratch allocations (N). The sz
+** argument must be a multiple of 16.
** The first argument must be a pointer to an 8-byte aligned buffer
** of at least sz*N bytes of memory.
-** ^SQLite will not use more than one scratch buffers per thread.
-** ^SQLite will never request a scratch buffer that is more than 6
-** times the database page size.
-** ^If SQLite needs needs additional
+** ^SQLite will use no more than two scratch buffers per thread. So
+** N should be set to twice the expected maximum number of threads.
+** ^SQLite will never require a scratch buffer that is more than 6
+** times the database page size. ^If SQLite needs needs additional
** scratch memory beyond what is provided by this configuration option, then
-** [sqlite3_malloc()] will be used to obtain the memory needed.<p>
-** ^When the application provides any amount of scratch memory using
-** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large
-** [sqlite3_malloc|heap allocations].
-** This can help [Robson proof|prevent memory allocation failures] due to heap
-** fragmentation in low-memory embedded systems.
-** </dd>
+** [sqlite3_malloc()] will be used to obtain the memory needed.</dd>
**
** [[SQLITE_CONFIG_PAGECACHE]] <dt>SQLITE_CONFIG_PAGECACHE</dt>
-** <dd> ^The SQLITE_CONFIG_PAGECACHE option specifies a static memory buffer
-** that SQLite can use for the database page cache with the default page
-** cache implementation.
+** <dd> ^This option specifies a static memory buffer that SQLite can use for
+** the database page cache with the default page cache implementation.
** This configuration should not be used if an application-define page
-** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2]
-** configuration option.
-** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
-** 8-byte aligned
+** cache implementation is loaded using the SQLITE_CONFIG_PCACHE2 option.
+** There are three arguments to this option: A pointer to 8-byte aligned
** memory, the size of each page buffer (sz), and the number of pages (N).
** The sz argument should be the size of the largest database page
-** (a power of two between 512 and 65536) plus some extra bytes for each
-** page header. ^The number of extra bytes needed by the page header
-** can be determined using the [SQLITE_CONFIG_PCACHE_HDRSZ] option
-** to [sqlite3_config()].
-** ^It is harmless, apart from the wasted memory,
-** for the sz parameter to be larger than necessary. The first
-** argument should pointer to an 8-byte aligned block of memory that
-** is at least sz*N bytes of memory, otherwise subsequent behavior is
-** undefined.
+** (a power of two between 512 and 32768) plus a little extra for each
+** page header. ^The page header size is 20 to 40 bytes depending on
+** the host architecture. ^It is harmless, apart from the wasted memory,
+** to make sz a little too large. The first
+** argument should point to an allocation of at least sz*N bytes of memory.
** ^SQLite will use the memory provided by the first argument to satisfy its
** memory needs for the first N pages that it adds to cache. ^If additional
** page cache memory is needed beyond what is provided by this option, then
-** SQLite goes to [sqlite3_malloc()] for the additional storage space.</dd>
+** SQLite goes to [sqlite3_malloc()] for the additional storage space.
+** The pointer in the first argument must
+** be aligned to an 8-byte boundary or subsequent behavior of SQLite
+** will be undefined.</dd>
**
** [[SQLITE_CONFIG_HEAP]] <dt>SQLITE_CONFIG_HEAP</dt>
-** <dd> ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer
-** that SQLite will use for all of its dynamic memory allocation needs
-** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and
-** [SQLITE_CONFIG_PAGECACHE].
-** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled
-** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns
-** [SQLITE_ERROR] if invoked otherwise.
-** ^There are three arguments to SQLITE_CONFIG_HEAP:
-** An 8-byte aligned pointer to the memory,
+** <dd> ^This option specifies a static memory buffer that SQLite will use
+** for all of its dynamic memory allocation needs beyond those provided
+** for by [SQLITE_CONFIG_SCRATCH] and [SQLITE_CONFIG_PAGECACHE].
+** There are three arguments: An 8-byte aligned pointer to the memory,
** the number of bytes in the memory buffer, and the minimum allocation size.
** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts
** to using its default memory allocator (the system malloc() implementation),
** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the
-** memory pointer is not NULL then the alternative memory
+** memory pointer is not NULL and either [SQLITE_ENABLE_MEMSYS3] or
+** [SQLITE_ENABLE_MEMSYS5] are defined, then the alternative memory
** allocator is engaged to handle all of SQLites memory allocation needs.
** The first pointer (the memory pointer) must be aligned to an 8-byte
** boundary or subsequent behavior of SQLite will be undefined.
@@ -1858,11 +1589,11 @@ struct sqlite3_mem_methods {
** for the minimum allocation size are 2**5 through 2**8.</dd>
**
** [[SQLITE_CONFIG_MUTEX]] <dt>SQLITE_CONFIG_MUTEX</dt>
-** <dd> ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a
-** pointer to an instance of the [sqlite3_mutex_methods] structure.
-** The argument specifies alternative low-level mutex routines to be used
-** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of
-** the content of the [sqlite3_mutex_methods] structure before the call to
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mutex_methods] structure. The argument specifies
+** alternative low-level mutex routines to be used in place
+** the mutex routines built into SQLite.)^ ^SQLite makes a copy of the
+** content of the [sqlite3_mutex_methods] structure before the call to
** [sqlite3_config()] returns. ^If SQLite is compiled with
** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
** the entire mutexing subsystem is omitted from the build and hence calls to
@@ -1870,8 +1601,8 @@ struct sqlite3_mem_methods {
** return [SQLITE_ERROR].</dd>
**
** [[SQLITE_CONFIG_GETMUTEX]] <dt>SQLITE_CONFIG_GETMUTEX</dt>
-** <dd> ^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which
-** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mutex_methods] structure. The
** [sqlite3_mutex_methods]
** structure is filled with the currently defined mutex routines.)^
** This option can be used to overload the default mutex allocation
@@ -1883,25 +1614,25 @@ struct sqlite3_mem_methods {
** return [SQLITE_ERROR].</dd>
**
** [[SQLITE_CONFIG_LOOKASIDE]] <dt>SQLITE_CONFIG_LOOKASIDE</dt>
-** <dd> ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine
-** the default size of lookaside memory on each [database connection].
-** The first argument is the
+** <dd> ^(This option takes two arguments that determine the default
+** memory allocation for the lookaside memory allocator on each
+** [database connection]. The first argument is the
** size of each lookaside buffer slot and the second is the number of
-** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE
-** sets the <i>default</i> lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
-** option to [sqlite3_db_config()] can be used to change the lookaside
+** slots allocated to each database connection.)^ ^(This option sets the
+** <i>default</i> lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
+** verb to [sqlite3_db_config()] can be used to change the lookaside
** configuration on individual connections.)^ </dd>
**
** [[SQLITE_CONFIG_PCACHE2]] <dt>SQLITE_CONFIG_PCACHE2</dt>
-** <dd> ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is
-** a pointer to an [sqlite3_pcache_methods2] object. This object specifies
-** the interface to a custom page cache implementation.)^
-** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.</dd>
+** <dd> ^(This option takes a single argument which is a pointer to
+** an [sqlite3_pcache_methods2] object. This object specifies the interface
+** to a custom page cache implementation.)^ ^SQLite makes a copy of the
+** object and uses it for page cache memory allocations.</dd>
**
** [[SQLITE_CONFIG_GETPCACHE2]] <dt>SQLITE_CONFIG_GETPCACHE2</dt>
-** <dd> ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which
-** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of
-** the current page cache implementation into that object.)^ </dd>
+** <dd> ^(This option takes a single argument which is a pointer to an
+** [sqlite3_pcache_methods2] object. SQLite copies of the current
+** page cache implementation into that object.)^ </dd>
**
** [[SQLITE_CONFIG_LOG]] <dt>SQLITE_CONFIG_LOG</dt>
** <dd> The SQLITE_CONFIG_LOG option is used to configure the SQLite
@@ -1924,11 +1655,10 @@ struct sqlite3_mem_methods {
** function must be threadsafe. </dd>
**
** [[SQLITE_CONFIG_URI]] <dt>SQLITE_CONFIG_URI
-** <dd>^(The SQLITE_CONFIG_URI option takes a single argument of type int.
-** If non-zero, then URI handling is globally enabled. If the parameter is zero,
-** then URI handling is globally disabled.)^ ^If URI handling is globally
-** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()],
-** [sqlite3_open16()] or
+** <dd>^(This option takes a single argument of type int. If non-zero, then
+** URI handling is globally enabled. If the parameter is zero, then URI handling
+** is globally disabled.)^ ^If URI handling is globally enabled, all filenames
+** passed to [sqlite3_open()], [sqlite3_open_v2()], [sqlite3_open16()] or
** specified as part of [ATTACH] commands are interpreted as URIs, regardless
** of whether or not the [SQLITE_OPEN_URI] flag is set when the database
** connection is opened. ^If it is globally disabled, filenames are
@@ -1938,10 +1668,9 @@ struct sqlite3_mem_methods {
** [SQLITE_USE_URI] symbol defined.)^
**
** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]] <dt>SQLITE_CONFIG_COVERING_INDEX_SCAN
-** <dd>^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer
-** argument which is interpreted as a boolean in order to enable or disable
-** the use of covering indices for full table scans in the query optimizer.
-** ^The default setting is determined
+** <dd>^This option takes a single integer argument which is interpreted as
+** a boolean in order to enable or disable the use of covering indices for
+** full table scans in the query optimizer. ^The default setting is determined
** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on"
** if that compile-time option is omitted.
** The ability to disable the use of covering indices for full table scans
@@ -1981,37 +1710,18 @@ struct sqlite3_mem_methods {
** ^The default setting can be overridden by each database connection using
** either the [PRAGMA mmap_size] command, or by using the
** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size
-** will be silently truncated if necessary so that it does not exceed the
-** compile-time maximum mmap size set by the
+** cannot be changed at run-time. Nor may the maximum allowed mmap size
+** exceed the compile-time maximum mmap size set by the
** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^
** ^If either argument to this option is negative, then that argument is
** changed to its compile-time default.
**
** [[SQLITE_CONFIG_WIN32_HEAPSIZE]]
** <dt>SQLITE_CONFIG_WIN32_HEAPSIZE
-** <dd>^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is
-** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro
-** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
+** <dd>^This option is only available if SQLite is compiled for Windows
+** with the [SQLITE_WIN32_MALLOC] pre-processor macro defined.
+** SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
** that specifies the maximum size of the created heap.
-**
-** [[SQLITE_CONFIG_PCACHE_HDRSZ]]
-** <dt>SQLITE_CONFIG_PCACHE_HDRSZ
-** <dd>^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which
-** is a pointer to an integer and writes into that integer the number of extra
-** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE].
-** The amount of extra space required can change depending on the compiler,
-** target platform, and SQLite version.
-**
-** [[SQLITE_CONFIG_PMASZ]]
-** <dt>SQLITE_CONFIG_PMASZ
-** <dd>^The SQLITE_CONFIG_PMASZ option takes a single parameter which
-** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded
-** sorter to that integer. The default minimum PMA Size is set by the
-** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched
-** to help with sort operations when multithreaded sorting
-** is enabled (using the [PRAGMA threads] command) and the amount of content
-** to be sorted exceeds the page size times the minimum of the
-** [PRAGMA cache_size] setting and this value.
** </dl>
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@@ -2037,8 +1747,6 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */
-#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */
-#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */
/*
** CAPI3REF: Database Connection Configuration Options
@@ -2105,17 +1813,15 @@ struct sqlite3_mem_methods {
/*
** CAPI3REF: Enable Or Disable Extended Result Codes
-** METHOD: sqlite3
**
** ^The sqlite3_extended_result_codes() routine enables or disables the
** [extended result codes] feature of SQLite. ^The extended result
** codes are disabled by default for historical compatibility.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff);
+SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff);
/*
** CAPI3REF: Last Insert Rowid
-** METHOD: sqlite3
**
** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables)
** has a unique 64-bit signed
@@ -2163,51 +1869,52 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff)
** unpredictable and might not equal either the old or the new
** last insert [rowid].
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
+SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
/*
** CAPI3REF: Count The Number Of Rows Modified
-** METHOD: sqlite3
-**
-** ^This function returns the number of rows modified, inserted or
-** deleted by the most recently completed INSERT, UPDATE or DELETE
-** statement on the database connection specified by the only parameter.
-** ^Executing any other type of SQL statement does not modify the value
-** returned by this function.
**
-** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are
-** considered - auxiliary changes caused by [CREATE TRIGGER | triggers],
-** [foreign key actions] or [REPLACE] constraint resolution are not counted.
-**
-** Changes to a view that are intercepted by
-** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value
-** returned by sqlite3_changes() immediately after an INSERT, UPDATE or
-** DELETE statement run on a view is always zero. Only changes made to real
-** tables are counted.
-**
-** Things are more complicated if the sqlite3_changes() function is
-** executed while a trigger program is running. This may happen if the
-** program uses the [changes() SQL function], or if some other callback
-** function invokes sqlite3_changes() directly. Essentially:
-**
-** <ul>
-** <li> ^(Before entering a trigger program the value returned by
-** sqlite3_changes() function is saved. After the trigger program
-** has finished, the original value is restored.)^
-**
-** <li> ^(Within a trigger program each INSERT, UPDATE and DELETE
-** statement sets the value returned by sqlite3_changes()
-** upon completion as normal. Of course, this value will not include
-** any changes performed by sub-triggers, as the sqlite3_changes()
-** value will be saved and restored after each sub-trigger has run.)^
-** </ul>
-**
-** ^This means that if the changes() SQL function (or similar) is used
-** by the first INSERT, UPDATE or DELETE statement within a trigger, it
-** returns the value as set when the calling statement began executing.
-** ^If it is used by the second or subsequent such statement within a trigger
-** program, the value returned reflects the number of rows modified by the
-** previous INSERT, UPDATE or DELETE statement within the same trigger.
+** ^This function returns the number of database rows that were changed
+** or inserted or deleted by the most recently completed SQL statement
+** on the [database connection] specified by the first parameter.
+** ^(Only changes that are directly specified by the [INSERT], [UPDATE],
+** or [DELETE] statement are counted. Auxiliary changes caused by
+** triggers or [foreign key actions] are not counted.)^ Use the
+** [sqlite3_total_changes()] function to find the total number of changes
+** including changes caused by triggers and foreign key actions.
+**
+** ^Changes to a view that are simulated by an [INSTEAD OF trigger]
+** are not counted. Only real table changes are counted.
+**
+** ^(A "row change" is a change to a single row of a single table
+** caused by an INSERT, DELETE, or UPDATE statement. Rows that
+** are changed as side effects of [REPLACE] constraint resolution,
+** rollback, ABORT processing, [DROP TABLE], or by any other
+** mechanisms do not count as direct row changes.)^
+**
+** A "trigger context" is a scope of execution that begins and
+** ends with the script of a [CREATE TRIGGER | trigger].
+** Most SQL statements are
+** evaluated outside of any trigger. This is the "top level"
+** trigger context. If a trigger fires from the top level, a
+** new trigger context is entered for the duration of that one
+** trigger. Subtriggers create subcontexts for their duration.
+**
+** ^Calling [sqlite3_exec()] or [sqlite3_step()] recursively does
+** not create a new trigger context.
+**
+** ^This function returns the number of direct row changes in the
+** most recent INSERT, UPDATE, or DELETE statement within the same
+** trigger context.
+**
+** ^Thus, when called from the top level, this function returns the
+** number of changes in the most recent INSERT, UPDATE, or DELETE
+** that also occurred at the top level. ^(Within the body of a trigger,
+** the sqlite3_changes() interface can be called to find the number of
+** changes in the most recently completed INSERT, UPDATE, or DELETE
+** statement within the body of the same trigger.
+** However, the number returned does not include changes
+** caused by subtriggers since those have their own context.)^
**
** See also the [sqlite3_total_changes()] interface, the
** [count_changes pragma], and the [changes() SQL function].
@@ -2216,23 +1923,25 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
** while [sqlite3_changes()] is running then the value returned
** is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
+SQLITE_API int sqlite3_changes(sqlite3*);
/*
** CAPI3REF: Total Number Of Rows Modified
-** METHOD: sqlite3
**
-** ^This function returns the total number of rows inserted, modified or
-** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed
-** since the database connection was opened, including those executed as
-** part of trigger programs. ^Executing any other type of SQL statement
-** does not affect the value returned by sqlite3_total_changes().
-**
-** ^Changes made as part of [foreign key actions] are included in the
-** count, but those made as part of REPLACE constraint resolution are
-** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
-** are not counted.
-**
+** ^This function returns the number of row changes caused by [INSERT],
+** [UPDATE] or [DELETE] statements since the [database connection] was opened.
+** ^(The count returned by sqlite3_total_changes() includes all changes
+** from all [CREATE TRIGGER | trigger] contexts and changes made by
+** [foreign key actions]. However,
+** the count does not include changes used to implement [REPLACE] constraints,
+** do rollbacks or ABORT processing, or [DROP TABLE] processing. The
+** count does not include rows of views that fire an [INSTEAD OF trigger],
+** though if the INSTEAD OF trigger makes changes of its own, those changes
+** are counted.)^
+** ^The sqlite3_total_changes() function counts the changes as soon as
+** the statement that makes them is completed (when the statement handle
+** is passed to [sqlite3_reset()] or [sqlite3_finalize()]).
+**
** See also the [sqlite3_changes()] interface, the
** [count_changes pragma], and the [total_changes() SQL function].
**
@@ -2240,11 +1949,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
** while [sqlite3_total_changes()] is running then the value
** returned is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
+SQLITE_API int sqlite3_total_changes(sqlite3*);
/*
** CAPI3REF: Interrupt A Long-Running Query
-** METHOD: sqlite3
**
** ^This function causes any pending database operation to abort and
** return at its earliest opportunity. This routine is typically
@@ -2280,7 +1988,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
** If the database connection closes while [sqlite3_interrupt()]
** is running then bad things will likely happen.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
+SQLITE_API void sqlite3_interrupt(sqlite3*);
/*
** CAPI3REF: Determine If An SQL Statement Is Complete
@@ -2315,41 +2023,33 @@ SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
** The input to [sqlite3_complete16()] must be a zero-terminated
** UTF-16 string in native byte order.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *sql);
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
+SQLITE_API int sqlite3_complete(const char *sql);
+SQLITE_API int sqlite3_complete16(const void *sql);
/*
** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
-** KEYWORDS: {busy-handler callback} {busy handler}
-** METHOD: sqlite3
-**
-** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X
-** that might be invoked with argument P whenever
-** an attempt is made to access a database table associated with
-** [database connection] D when another thread
-** or process has the table locked.
-** The sqlite3_busy_handler() interface is used to implement
-** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout].
-**
-** ^If the busy callback is NULL, then [SQLITE_BUSY]
+**
+** ^This routine sets a callback function that might be invoked whenever
+** an attempt is made to open a database table that another thread
+** or process has locked.
+**
+** ^If the busy callback is NULL, then [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]
** is returned immediately upon encountering the lock. ^If the busy callback
** is not NULL, then the callback might be invoked with two arguments.
**
** ^The first argument to the busy handler is a copy of the void* pointer which
** is the third argument to sqlite3_busy_handler(). ^The second argument to
** the busy handler callback is the number of times that the busy handler has
-** been invoked previously for the same locking event. ^If the
+** been invoked for this locking event. ^If the
** busy callback returns 0, then no additional attempts are made to
-** access the database and [SQLITE_BUSY] is returned
-** to the application.
+** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned.
** ^If the callback returns non-zero, then another attempt
-** is made to access the database and the cycle repeats.
+** is made to open the database for reading and the cycle repeats.
**
** The presence of a busy handler does not guarantee that it will be invoked
** when there is lock contention. ^If SQLite determines that invoking the busy
** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY]
-** to the application instead of invoking the
-** busy handler.
+** or [SQLITE_IOERR_BLOCKED] instead of invoking the busy handler.
** Consider a scenario where one process is holding a read lock that
** it is trying to promote to a reserved lock and
** a second process is holding a reserved lock that it is trying
@@ -2363,48 +2063,57 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
**
** ^The default busy callback is NULL.
**
+** ^The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED]
+** when SQLite is in the middle of a large transaction where all the
+** changes will not fit into the in-memory cache. SQLite will
+** already hold a RESERVED lock on the database file, but it needs
+** to promote this lock to EXCLUSIVE so that it can spill cache
+** pages into the database file without harm to concurrent
+** readers. ^If it is unable to promote the lock, then the in-memory
+** cache will be left in an inconsistent state and so the error
+** code is promoted from the relatively benign [SQLITE_BUSY] to
+** the more severe [SQLITE_IOERR_BLOCKED]. ^This error code promotion
+** forces an automatic rollback of the changes. See the
+** <a href="/cvstrac/wiki?p=CorruptionFollowingBusyError">
+** CorruptionFollowingBusyError</a> wiki page for a discussion of why
+** this is important.
+**
** ^(There can only be a single busy handler defined for each
** [database connection]. Setting a new busy handler clears any
** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()]
-** or evaluating [PRAGMA busy_timeout=N] will change the
-** busy handler and thus clear any previously set busy handler.
+** will also set or clear the busy handler.
**
** The busy callback should not take any actions which modify the
-** database connection that invoked the busy handler. In other words,
-** the busy handler is not reentrant. Any such actions
+** database connection that invoked the busy handler. Any such actions
** result in undefined behavior.
**
** A busy handler must not close the database connection
** or [prepared statement] that invoked the busy handler.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*);
+SQLITE_API int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*);
/*
** CAPI3REF: Set A Busy Timeout
-** METHOD: sqlite3
**
** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps
** for a specified amount of time when a table is locked. ^The handler
** will sleep multiple times until at least "ms" milliseconds of sleeping
** have accumulated. ^After at least "ms" milliseconds of sleeping,
** the handler returns 0 which causes [sqlite3_step()] to return
-** [SQLITE_BUSY].
+** [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED].
**
** ^Calling this routine with an argument less than or equal to zero
** turns off all busy handlers.
**
** ^(There can only be a single busy handler for a particular
-** [database connection] at any given moment. If another busy handler
+** [database connection] any any given moment. If another busy handler
** was defined (using [sqlite3_busy_handler()]) prior to calling
** this routine, that other busy handler is cleared.)^
-**
-** See also: [PRAGMA busy_timeout]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
+SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms);
/*
** CAPI3REF: Convenience Routines For Running Queries
-** METHOD: sqlite3
**
** This is a legacy interface that is preserved for backwards compatibility.
** Use of this interface is not recommended.
@@ -2475,7 +2184,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
** reflected in subsequent calls to [sqlite3_errcode()] or
** [sqlite3_errmsg()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
+SQLITE_API int sqlite3_get_table(
sqlite3 *db, /* An open database */
const char *zSql, /* SQL to be evaluated */
char ***pazResult, /* Results of the query */
@@ -2483,17 +2192,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
int *pnColumn, /* Number of result columns written here */
char **pzErrmsg /* Error msg written here */
);
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
+SQLITE_API void sqlite3_free_table(char **result);
/*
** CAPI3REF: Formatted String Printing Functions
**
** These routines are work-alikes of the "printf()" family of functions
** from the standard C library.
-** These routines understand most of the common K&R formatting options,
-** plus some additional non-standard formats, detailed below.
-** Note that some of the more obscure formatting options from recent
-** C-library standards are omitted from this implementation.
**
** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their
** results into memory obtained from [sqlite3_malloc()].
@@ -2526,7 +2231,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
** These routines all implement some additional formatting
** options that are useful for constructing SQL statements.
** All of the usual printf() formatting options apply. In addition, there
-** is are "%q", "%Q", "%w" and "%z" options.
+** is are "%q", "%Q", and "%z" options.
**
** ^(The %q option works like %s in that it substitutes a nul-terminated
** string from the argument list. But %q also doubles every '\'' character.
@@ -2579,20 +2284,14 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
** The code above will render a correct SQL statement in the zSQL
** variable even if the zText variable is a NULL pointer.
**
-** ^(The "%w" formatting option is like "%q" except that it expects to
-** be contained within double-quotes instead of single quotes, and it
-** escapes the double-quote character instead of the single-quote
-** character.)^ The "%w" formatting option is intended for safely inserting
-** table and column names into a constructed SQL statement.
-**
** ^(The "%z" formatting option works like "%s" but with the
** addition that after the string has been read and copied into
** the result, [sqlite3_free()] is called on the input string.)^
*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char*,...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char*, va_list);
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int,char*,const char*, ...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list);
+SQLITE_API char *sqlite3_mprintf(const char*,...);
+SQLITE_API char *sqlite3_vmprintf(const char*, va_list);
+SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...);
+SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list);
/*
** CAPI3REF: Memory Allocation Subsystem
@@ -2609,10 +2308,6 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns
** a NULL pointer.
**
-** ^The sqlite3_malloc64(N) routine works just like
-** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead
-** of a signed 32-bit integer.
-**
** ^Calling sqlite3_free() with a pointer previously returned
** by sqlite3_malloc() or sqlite3_realloc() releases that memory so
** that it might be reused. ^The sqlite3_free() routine is
@@ -2624,38 +2319,24 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** might result if sqlite3_free() is called with a non-NULL pointer that
** was not obtained from sqlite3_malloc() or sqlite3_realloc().
**
-** ^The sqlite3_realloc(X,N) interface attempts to resize a
-** prior memory allocation X to be at least N bytes.
-** ^If the X parameter to sqlite3_realloc(X,N)
+** ^(The sqlite3_realloc() interface attempts to resize a
+** prior memory allocation to be at least N bytes, where N is the
+** second parameter. The memory allocation to be resized is the first
+** parameter.)^ ^ If the first parameter to sqlite3_realloc()
** is a NULL pointer then its behavior is identical to calling
-** sqlite3_malloc(N).
-** ^If the N parameter to sqlite3_realloc(X,N) is zero or
+** sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc().
+** ^If the second parameter to sqlite3_realloc() is zero or
** negative then the behavior is exactly the same as calling
-** sqlite3_free(X).
-** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation
-** of at least N bytes in size or NULL if insufficient memory is available.
+** sqlite3_free(P) where P is the first parameter to sqlite3_realloc().
+** ^sqlite3_realloc() returns a pointer to a memory allocation
+** of at least N bytes in size or NULL if sufficient memory is unavailable.
** ^If M is the size of the prior allocation, then min(N,M) bytes
** of the prior allocation are copied into the beginning of buffer returned
-** by sqlite3_realloc(X,N) and the prior allocation is freed.
-** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the
-** prior allocation is not freed.
-**
-** ^The sqlite3_realloc64(X,N) interfaces works the same as
-** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead
-** of a 32-bit signed integer.
-**
-** ^If X is a memory allocation previously obtained from sqlite3_malloc(),
-** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then
-** sqlite3_msize(X) returns the size of that memory allocation in bytes.
-** ^The value returned by sqlite3_msize(X) might be larger than the number
-** of bytes requested when X was allocated. ^If X is a NULL pointer then
-** sqlite3_msize(X) returns zero. If X points to something that is not
-** the beginning of memory allocation, or if it points to a formerly
-** valid memory allocation that has now been freed, then the behavior
-** of sqlite3_msize(X) is undefined and possibly harmful.
-**
-** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(),
-** sqlite3_malloc64(), and sqlite3_realloc64()
+** by sqlite3_realloc() and the prior allocation is freed.
+** ^If sqlite3_realloc() returns NULL, then the prior allocation
+** is not freed.
+**
+** ^The memory returned by sqlite3_malloc() and sqlite3_realloc()
** is always aligned to at least an 8 byte boundary, or to a
** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time
** option is used.
@@ -2682,12 +2363,9 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** a block of memory after it has been released using
** [sqlite3_free()] or [sqlite3_realloc()].
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void*, int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void*, sqlite3_uint64);
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void*);
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
+SQLITE_API void *sqlite3_malloc(int);
+SQLITE_API void *sqlite3_realloc(void*, int);
+SQLITE_API void sqlite3_free(void*);
/*
** CAPI3REF: Memory Allocator Statistics
@@ -2712,8 +2390,8 @@ SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
** by [sqlite3_memory_highwater(1)] is the high-water mark
** prior to the reset.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void);
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
/*
** CAPI3REF: Pseudo-Random Number Generator
@@ -2725,22 +2403,18 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
** applications to access the same PRNG for other purposes.
**
** ^A call to this routine stores N bytes of randomness into buffer P.
-** ^The P parameter can be a NULL pointer.
-**
-** ^If this routine has not been previously called or if the previous
-** call had N less than one or a NULL pointer for P, then the PRNG is
-** seeded using randomness obtained from the xRandomness method of
-** the default [sqlite3_vfs] object.
-** ^If the previous call to this routine had an N of 1 or more and a
-** non-NULL P then the pseudo-randomness is generated
+**
+** ^The first time this routine is invoked (either internally or by
+** the application) the PRNG is seeded using randomness obtained
+** from the xRandomness method of the default [sqlite3_vfs] object.
+** ^On all subsequent invocations, the pseudo-randomness is generated
** internally and without recourse to the [sqlite3_vfs] xRandomness
** method.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
+SQLITE_API void sqlite3_randomness(int N, void *P);
/*
** CAPI3REF: Compile-Time Authorization Callbacks
-** METHOD: sqlite3
**
** ^This routine registers an authorizer callback with a particular
** [database connection], supplied in the first argument.
@@ -2819,7 +2493,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
** as stated in the previous paragraph, sqlite3_step() invokes
** sqlite3_prepare_v2() to reprepare a statement after a schema change.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
+SQLITE_API int sqlite3_set_authorizer(
sqlite3*,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pUserData
@@ -2834,8 +2508,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
** [sqlite3_set_authorizer | authorizer documentation] for additional
** information.
**
-** Note that SQLITE_IGNORE is also used as a [conflict resolution mode]
-** returned from the [sqlite3_vtab_on_conflict()] interface.
+** Note that SQLITE_IGNORE is also used as a [SQLITE_ROLLBACK | return code]
+** from the [sqlite3_vtab_on_conflict()] interface.
*/
#define SQLITE_DENY 1 /* Abort the SQL statement with an error */
#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */
@@ -2893,11 +2567,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
#define SQLITE_FUNCTION 31 /* NULL Function Name */
#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */
#define SQLITE_COPY 0 /* No longer used */
-#define SQLITE_RECURSIVE 33 /* NULL NULL */
/*
** CAPI3REF: Tracing And Profiling Functions
-** METHOD: sqlite3
**
** These routines register callback functions that can be used for
** tracing and profiling the execution of SQL statements.
@@ -2924,13 +2596,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
** sqlite3_profile() function is considered experimental and is
** subject to change in future versions of SQLite.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
-SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
+SQLITE_API void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
+SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*,
void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
/*
** CAPI3REF: Query Progress Callbacks
-** METHOD: sqlite3
**
** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
** function X to be invoked periodically during long running calls to
@@ -2960,11 +2631,10 @@ SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
** database connections for the meaning of "modify" in this paragraph.
**
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
+SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
/*
** CAPI3REF: Opening A New Database Connection
-** CONSTRUCTOR: sqlite3
**
** ^These routines open an SQLite database file as specified by the
** filename argument. ^The filename argument is interpreted as UTF-8 for
@@ -2979,9 +2649,9 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** an English language description of the error following a failure of any
** of the sqlite3_open() routines.
**
-** ^The default encoding will be UTF-8 for databases created using
-** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases
-** created using sqlite3_open16() will be UTF-16 in the native byte order.
+** ^The default encoding for the database will be UTF-8 if
+** sqlite3_open() or sqlite3_open_v2() is called and
+** UTF-16 in the native byte order if sqlite3_open16() is used.
**
** Whether or not an error occurs when it is opened, resources
** associated with the [database connection] handle should be released by
@@ -3069,14 +2739,13 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** then it is interpreted as an absolute path. ^If the path does not begin
** with a '/' (meaning that the authority section is omitted from the URI)
** then the path is interpreted as a relative path.
-** ^(On windows, the first component of an absolute path
-** is a drive specification (e.g. "C:").)^
+** ^On windows, the first component of an absolute path
+** is a drive specification (e.g. "C:").
**
** [[core URI query parameters]]
** The query component of a URI may contain parameters that are interpreted
** either by SQLite itself, or by a [VFS | custom VFS implementation].
-** SQLite and its built-in [VFSes] interpret the
-** following query parameters:
+** SQLite interprets the following three query parameters:
**
** <ul>
** <li> <b>vfs</b>: ^The "vfs" parameter may be used to specify the name of
@@ -3110,28 +2779,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** ^If sqlite3_open_v2() is used and the "cache" parameter is present in
** a URI filename, its value overrides any behavior requested by setting
** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag.
-**
-** <li> <b>psow</b>: ^The psow parameter indicates whether or not the
-** [powersafe overwrite] property does or does not apply to the
-** storage media on which the database file resides.
-**
-** <li> <b>nolock</b>: ^The nolock parameter is a boolean query parameter
-** which if set disables file locking in rollback journal modes. This
-** is useful for accessing a database on a filesystem that does not
-** support locking. Caution: Database corruption might result if two
-** or more processes write to the same database and any one of those
-** processes uses nolock=1.
-**
-** <li> <b>immutable</b>: ^The immutable parameter is a boolean query
-** parameter that indicates that the database file is stored on
-** read-only media. ^When immutable is set, SQLite assumes that the
-** database file cannot be changed, even by a process with higher
-** privilege, and so the database is opened read-only and all locking
-** and change detection is disabled. Caution: Setting the immutable
-** property on a database file that does in fact change can result
-** in incorrect query results and/or [SQLITE_CORRUPT] errors.
-** See also: [SQLITE_IOCAP_IMMUTABLE].
-**
** </ul>
**
** ^Specifying an unknown parameter in the query component of a URI is not an
@@ -3161,9 +2808,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** Open file "data.db" in the current directory for read-only access.
** Regardless of whether or not shared-cache mode is enabled by
** default, use a private cache.
-** <tr><td> file:/home/fred/data.db?vfs=unix-dotfile <td>
-** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile"
-** that uses dot-files in place of posix advisory locking.
+** <tr><td> file:/home/fred/data.db?vfs=unix-nolock <td>
+** Open file "/home/fred/data.db". Use the special VFS "unix-nolock".
** <tr><td> file:data.db?mode=readonly <td>
** An error. "readonly" is not a valid option for the "mode" parameter.
** </table>
@@ -3189,15 +2835,15 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
**
** See also: [sqlite3_temp_directory]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
+SQLITE_API int sqlite3_open(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
+SQLITE_API int sqlite3_open16(
const void *filename, /* Database filename (UTF-16) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
+SQLITE_API int sqlite3_open_v2(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb, /* OUT: SQLite db handle */
int flags, /* Flags */
@@ -3243,22 +2889,19 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
** VFS method, then the behavior of this routine is undefined and probably
** undesirable.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam);
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
/*
** CAPI3REF: Error Codes And Messages
-** METHOD: sqlite3
-**
-** ^If the most recent sqlite3_* API call associated with
-** [database connection] D failed, then the sqlite3_errcode(D) interface
-** returns the numeric [result code] or [extended result code] for that
-** API call.
-** If the most recent API call was successful,
-** then the return value from sqlite3_errcode() is undefined.
-** ^The sqlite3_extended_errcode()
+**
+** ^The sqlite3_errcode() interface returns the numeric [result code] or
+** [extended result code] for the most recent failed sqlite3_* API call
+** associated with a [database connection]. If a prior API call failed
+** but the most recent API call succeeded, the return value from
+** sqlite3_errcode() is undefined. ^The sqlite3_extended_errcode()
** interface is the same except that it always returns the
** [extended result code] even when extended result codes are
** disabled.
@@ -3289,41 +2932,40 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const cha
** was invoked incorrectly by the application. In that case, the
** error code and message may or may not be set.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db);
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3*);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int);
+SQLITE_API int sqlite3_errcode(sqlite3 *db);
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db);
+SQLITE_API const char *sqlite3_errmsg(sqlite3*);
+SQLITE_API const void *sqlite3_errmsg16(sqlite3*);
+SQLITE_API const char *sqlite3_errstr(int);
/*
-** CAPI3REF: Prepared Statement Object
+** CAPI3REF: SQL Statement Object
** KEYWORDS: {prepared statement} {prepared statements}
**
-** An instance of this object represents a single SQL statement that
-** has been compiled into binary form and is ready to be evaluated.
-**
-** Think of each SQL statement as a separate computer program. The
-** original SQL text is source code. A prepared statement object
-** is the compiled object code. All SQL must be converted into a
-** prepared statement before it can be run.
+** An instance of this object represents a single SQL statement.
+** This object is variously known as a "prepared statement" or a
+** "compiled SQL statement" or simply as a "statement".
**
-** The life-cycle of a prepared statement object usually goes like this:
+** The life of a statement object goes something like this:
**
** <ol>
-** <li> Create the prepared statement object using [sqlite3_prepare_v2()].
-** <li> Bind values to [parameters] using the sqlite3_bind_*()
+** <li> Create the object using [sqlite3_prepare_v2()] or a related
+** function.
+** <li> Bind values to [host parameters] using the sqlite3_bind_*()
** interfaces.
** <li> Run the SQL by calling [sqlite3_step()] one or more times.
-** <li> Reset the prepared statement using [sqlite3_reset()] then go back
+** <li> Reset the statement using [sqlite3_reset()] then go back
** to step 2. Do this zero or more times.
** <li> Destroy the object using [sqlite3_finalize()].
** </ol>
+**
+** Refer to documentation on individual methods above for additional
+** information.
*/
typedef struct sqlite3_stmt sqlite3_stmt;
/*
** CAPI3REF: Run-time Limits
-** METHOD: sqlite3
**
** ^(This interface allows the size of various constructs to be limited
** on a connection by connection basis. The first parameter is the
@@ -3361,7 +3003,7 @@ typedef struct sqlite3_stmt sqlite3_stmt;
**
** New run-time limit categories may be added in future releases.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
+SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
/*
** CAPI3REF: Run-Time Limit Categories
@@ -3413,10 +3055,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
**
** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(<dt>SQLITE_LIMIT_TRIGGER_DEPTH</dt>
** <dd>The maximum depth of recursion for triggers.</dd>)^
-**
-** [[SQLITE_LIMIT_WORKER_THREADS]] ^(<dt>SQLITE_LIMIT_WORKER_THREADS</dt>
-** <dd>The maximum number of auxiliary worker threads that a single
-** [prepared statement] may start.</dd>)^
** </dl>
*/
#define SQLITE_LIMIT_LENGTH 0
@@ -3430,13 +3068,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8
#define SQLITE_LIMIT_VARIABLE_NUMBER 9
#define SQLITE_LIMIT_TRIGGER_DEPTH 10
-#define SQLITE_LIMIT_WORKER_THREADS 11
/*
** CAPI3REF: Compiling An SQL Statement
** KEYWORDS: {SQL statement compiler}
-** METHOD: sqlite3
-** CONSTRUCTOR: sqlite3_stmt
**
** To execute an SQL query, it must first be compiled into a byte-code
** program using one of these routines.
@@ -3450,14 +3085,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2()
** use UTF-16.
**
-** ^If the nByte argument is negative, then zSql is read up to the
-** first zero terminator. ^If nByte is positive, then it is the
-** number of bytes read from zSql. ^If nByte is zero, then no prepared
-** statement is generated.
-** If the caller knows that the supplied string is nul-terminated, then
-** there is a small performance advantage to passing an nByte parameter that
-** is the number of bytes in the input string <i>including</i>
-** the nul-terminator.
+** ^If the nByte argument is less than zero, then zSql is read up to the
+** first zero terminator. ^If nByte is non-negative, then it is the maximum
+** number of bytes read from zSql. ^When nByte is non-negative, the
+** zSql string ends at either the first '\000' or '\u0000' character or
+** the nByte-th byte, whichever comes first. If the caller knows
+** that the supplied string is nul-terminated, then there is a small
+** performance advantage to be gained by passing an nByte parameter that
+** is equal to the number of bytes in the input string <i>including</i>
+** the nul-terminator bytes as this saves SQLite from having to
+** make a copy of the input string.
**
** ^If pzTail is not NULL then *pzTail is made to point to the first byte
** past the end of the first SQL statement in zSql. These routines only
@@ -3513,28 +3150,28 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
** </li>
** </ol>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
+SQLITE_API int sqlite3_prepare(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
+SQLITE_API int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
+SQLITE_API int sqlite3_prepare16(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const void **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
+SQLITE_API int sqlite3_prepare16_v2(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
@@ -3544,17 +3181,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
/*
** CAPI3REF: Retrieving Statement SQL
-** METHOD: sqlite3_stmt
**
** ^This interface can be used to retrieve a saved copy of the original
** SQL text used to create a [prepared statement] if that statement was
** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If An SQL Statement Writes The Database
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if
** and only if the [prepared statement] X makes no direct changes to
@@ -3582,16 +3217,14 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
** change the configuration of a database connection, they do not make
** changes to the content of the database files on disk.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
** [prepared statement] S has been stepped at least once using
-** [sqlite3_step(S)] but has neither run to completion (returned
-** [SQLITE_DONE] from [sqlite3_step(S)]) nor
+** [sqlite3_step(S)] but has not run to completion and/or has not
** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S)
** interface returns false if S is a NULL pointer. If S is not a
** NULL pointer and is not a pointer to a valid [prepared statement]
@@ -3603,7 +3236,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
** for example, in diagnostic routines to search for prepared
** statements that are holding a transaction open.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*);
/*
** CAPI3REF: Dynamically Typed Value Object
@@ -3618,9 +3251,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
** Some interfaces require a protected sqlite3_value. Other interfaces
** will accept either a protected or an unprotected sqlite3_value.
** Every interface that accepts sqlite3_value arguments specifies
-** whether or not it requires a protected sqlite3_value. The
-** [sqlite3_value_dup()] interface can be used to construct a new
-** protected sqlite3_value from an unprotected sqlite3_value.
+** whether or not it requires a protected sqlite3_value.
**
** The terms "protected" and "unprotected" refer to whether or not
** a mutex is held. An internal mutex is held for a protected
@@ -3664,7 +3295,6 @@ typedef struct sqlite3_context sqlite3_context;
** CAPI3REF: Binding Values To Prepared Statements
** KEYWORDS: {host parameter} {host parameters} {host parameter name}
** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding}
-** METHOD: sqlite3_stmt
**
** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants,
** literals may be replaced by a [parameter] that matches one of following
@@ -3711,18 +3341,18 @@ typedef struct sqlite3_context sqlite3_context;
** If the fourth parameter to sqlite3_bind_blob() is negative, then
** the behavior is undefined.
** If a non-negative fourth parameter is provided to sqlite3_bind_text()
-** or sqlite3_bind_text16() or sqlite3_bind_text64() then
-** that parameter must be the byte offset
+** or sqlite3_bind_text16() then that parameter must be the byte offset
** where the NUL terminator would occur assuming the string were NUL
** terminated. If any NUL characters occur at byte offsets less than
** the value of the fourth parameter then the resulting string value will
** contain embedded NULs. The result of expressions involving strings
** with embedded NULs is undefined.
**
-** ^The fifth argument to the BLOB and string binding interfaces
-** is a destructor used to dispose of the BLOB or
+** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and
+** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or
** string after SQLite has finished with it. ^The destructor is called
-** to dispose of the BLOB or string even if the call to bind API fails.
+** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(),
+** sqlite3_bind_text(), or sqlite3_bind_text16() fails.
** ^If the fifth argument is
** the special value [SQLITE_STATIC], then SQLite assumes that the
** information is in static, unmanaged space and does not need to be freed.
@@ -3730,14 +3360,6 @@ typedef struct sqlite3_context sqlite3_context;
** SQLite makes its own private copy of the data immediately, before
** the sqlite3_bind_*() routine returns.
**
-** ^The sixth argument to sqlite3_bind_text64() must be one of
-** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]
-** to specify the encoding of the text in the third parameter. If
-** the sixth argument to sqlite3_bind_text64() is not one of the
-** allowed values shown above, or if the text encoding is different
-** from the encoding specified by the sixth parameter, then the behavior
-** is undefined.
-**
** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that
** is filled with zeroes. ^A zeroblob uses a fixed amount of memory
** (just an integer to hold its size) while it is being processed.
@@ -3758,33 +3380,24 @@ typedef struct sqlite3_context sqlite3_context;
**
** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an
** [error code] if anything goes wrong.
-** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB
-** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or
-** [SQLITE_MAX_LENGTH].
** ^[SQLITE_RANGE] is returned if the parameter
** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails.
**
** See also: [sqlite3_bind_parameter_count()],
** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
- void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt*, int, double);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt*, int, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt*, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
- void(*)(void*), unsigned char encoding);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
+SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double);
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int);
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int);
+SQLITE_API int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
/*
** CAPI3REF: Number Of SQL Parameters
-** METHOD: sqlite3_stmt
**
** ^This routine can be used to find the number of [SQL parameters]
** in a [prepared statement]. SQL parameters are tokens of the
@@ -3801,11 +3414,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite
** [sqlite3_bind_parameter_name()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*);
/*
** CAPI3REF: Name Of A Host Parameter
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_bind_parameter_name(P,N) interface returns
** the name of the N-th [SQL parameter] in the [prepared statement] P.
@@ -3829,11 +3441,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*, int);
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
/*
** CAPI3REF: Index Of A Parameter With A Given Name
-** METHOD: sqlite3_stmt
**
** ^Return the index of an SQL parameter given its name. ^The
** index value returned is suitable for use as the second
@@ -3844,23 +3455,21 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*,
**
** See also: [sqlite3_bind_blob|sqlite3_bind()],
** [sqlite3_bind_parameter_count()], and
-** [sqlite3_bind_parameter_name()].
+** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
/*
** CAPI3REF: Reset All Bindings On A Prepared Statement
-** METHOD: sqlite3_stmt
**
** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset
** the [sqlite3_bind_blob | bindings] on a [prepared statement].
** ^Use this routine to reset all host parameters to NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
/*
** CAPI3REF: Number Of Columns In A Result Set
-** METHOD: sqlite3_stmt
**
** ^Return the number of columns in the result set returned by the
** [prepared statement]. ^This routine returns 0 if pStmt is an SQL
@@ -3868,11 +3477,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
**
** See also: [sqlite3_data_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Column Names In A Result Set
-** METHOD: sqlite3_stmt
**
** ^These routines return the name assigned to a particular column
** in the result set of a [SELECT] statement. ^The sqlite3_column_name()
@@ -3897,12 +3505,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
** then the name of the column is unspecified and may change from
** one release of SQLite to the next.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt*, int N);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N);
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N);
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N);
/*
** CAPI3REF: Source Of Data In A Query Result
-** METHOD: sqlite3_stmt
**
** ^These routines provide a means to determine the database, table, and
** table column that is the origin of a particular result column in
@@ -3946,16 +3553,15 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N
** for the same [prepared statement] and result column
** at the same time then the results are undefined.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
/*
** CAPI3REF: Declared Datatype Of A Query Result
-** METHOD: sqlite3_stmt
**
** ^(The first parameter is a [prepared statement].
** If this statement is a [SELECT] statement and the Nth column of the
@@ -3983,12 +3589,11 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*
** is associated with individual values, not with the containers
** used to hold those values.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
/*
** CAPI3REF: Evaluate An SQL Statement
-** METHOD: sqlite3_stmt
**
** After a [prepared statement] has been prepared using either
** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy
@@ -4064,11 +3669,10 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,in
** then the more specific [error codes] are returned directly
** by sqlite3_step(). The use of the "v2" interface is recommended.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
+SQLITE_API int sqlite3_step(sqlite3_stmt*);
/*
** CAPI3REF: Number of columns in a result set
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_data_count(P) interface returns the number of columns in the
** current row of the result set of [prepared statement] P.
@@ -4085,7 +3689,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
**
** See also: [sqlite3_column_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Fundamental Datatypes
@@ -4122,7 +3726,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Result Values From A Query
** KEYWORDS: {column access functions}
-** METHOD: sqlite3_stmt
+**
+** These routines form the "result set" interface.
**
** ^These routines return information about a single column of the current
** result row of a query. ^In every case the first argument is a pointer
@@ -4183,14 +3788,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** even empty strings, are always zero-terminated. ^The return
** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer.
**
-** <b>Warning:</b> ^The object returned by [sqlite3_column_value()] is an
-** [unprotected sqlite3_value] object. In a multithreaded environment,
-** an unprotected sqlite3_value object may only be used safely with
-** [sqlite3_bind_value()] and [sqlite3_result_value()].
+** ^The object returned by [sqlite3_column_value()] is an
+** [unprotected sqlite3_value] object. An unprotected sqlite3_value object
+** may only be used with [sqlite3_bind_value()] and [sqlite3_result_value()].
** If the [unprotected sqlite3_value] object returned by
** [sqlite3_column_value()] is used in any other way, including calls
** to routines like [sqlite3_value_int()], [sqlite3_value_text()],
-** or [sqlite3_value_bytes()], the behavior is not threadsafe.
+** or [sqlite3_value_bytes()], then the behavior is undefined.
**
** These routines attempt to convert the value where appropriate. ^For
** example, if the internal representation is FLOAT and a text result
@@ -4221,6 +3825,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** </table>
** </blockquote>)^
**
+** The table above makes reference to standard C library functions atoi()
+** and atof(). SQLite does not really use these functions. It has its
+** own equivalent internal routines. The atoi() and atof() names are
+** used in the table for brevity and because they are familiar to most
+** C programmers.
+**
** Note that when type conversions occur, pointers returned by prior
** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or
** sqlite3_column_text16() may be invalidated.
@@ -4245,7 +3855,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** of conversion are done in place when it is possible, but sometimes they
** are not possible and in those cases prior pointers are invalidated.
**
-** The safest policy is to invoke these routines
+** The safest and easiest to remember policy is to invoke these routines
** in one of the following ways:
**
** <ul>
@@ -4265,7 +3875,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** ^The pointers returned are valid until a type conversion occurs as
** described above, or until [sqlite3_step()] or [sqlite3_reset()] or
** [sqlite3_finalize()] is called. ^The memory space used to hold strings
-** and BLOBs is freed automatically. Do <em>not</em> pass the pointers returned
+** and BLOBs is freed automatically. Do <b>not</b> pass the pointers returned
** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
** [sqlite3_free()].
**
@@ -4275,20 +3885,19 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** pointer. Subsequent calls to [sqlite3_errcode()] will return
** [SQLITE_NOMEM].)^
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt*, int iCol);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt*, int iCol);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
+SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
/*
** CAPI3REF: Destroy A Prepared Statement Object
-** DESTRUCTOR: sqlite3_stmt
**
** ^The sqlite3_finalize() function is called to delete a [prepared statement].
** ^If the most recent evaluation of the statement encountered no errors
@@ -4312,11 +3921,10 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int
** statement after it has been finalized can result in undefined and
** undesirable behavior such as segfaults and heap corruption.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Reset A Prepared Statement Object
-** METHOD: sqlite3_stmt
**
** The sqlite3_reset() function is called to reset a [prepared statement]
** object back to its initial state, ready to be re-executed.
@@ -4339,14 +3947,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
** KEYWORDS: {application-defined SQL function}
** KEYWORDS: {application-defined SQL functions}
-** METHOD: sqlite3
**
** ^These functions (collectively known as "function creation routines")
** are used to add SQL functions or aggregates or to redefine the behavior
@@ -4378,24 +3985,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
**
** ^The fourth parameter, eTextRep, specifies what
** [SQLITE_UTF8 | text encoding] this SQL function prefers for
-** its parameters. The application should set this parameter to
-** [SQLITE_UTF16LE] if the function implementation invokes
-** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the
-** implementation invokes [sqlite3_value_text16be()] on an input, or
-** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8]
-** otherwise. ^The same SQL function may be registered multiple times using
-** different preferred text encodings, with different implementations for
-** each encoding.
+** its parameters. Every SQL function implementation must be able to work
+** with UTF-8, UTF-16le, or UTF-16be. But some implementations may be
+** more efficient with one encoding than another. ^An application may
+** invoke sqlite3_create_function() or sqlite3_create_function16() multiple
+** times with the same function but with different values of eTextRep.
** ^When multiple implementations of the same function are available, SQLite
** will pick the one that involves the least amount of data conversion.
-**
-** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC]
-** to signal that the function will always return the same result given
-** the same inputs within a single SQL statement. Most SQL functions are
-** deterministic. The built-in [random()] SQL function is an example of a
-** function that is not deterministic. The SQLite query planner is able to
-** perform additional optimizations on deterministic functions, so use
-** of the [SQLITE_DETERMINISTIC] flag is recommended where possible.
+** If there is only a single implementation which does not care what text
+** encoding is used, then the fourth argument should be [SQLITE_ANY].
**
** ^(The fifth parameter is an arbitrary pointer. The implementation of the
** function can gain access to this pointer using [sqlite3_user_data()].)^
@@ -4439,7 +4037,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
** close the database connection nor finalize or reset the prepared
** statement in which the function is running.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4449,7 +4047,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
+SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
const void *zFunctionName,
int nArg,
@@ -4459,7 +4057,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
+SQLITE_API int sqlite3_create_function_v2(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4477,50 +4075,39 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
** These constant define integer codes that represent the various
** text encodings supported by SQLite.
*/
-#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */
-#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */
-#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */
+#define SQLITE_UTF8 1
+#define SQLITE_UTF16LE 2
+#define SQLITE_UTF16BE 3
#define SQLITE_UTF16 4 /* Use native byte order */
-#define SQLITE_ANY 5 /* Deprecated */
+#define SQLITE_ANY 5 /* sqlite3_create_function only */
#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */
/*
-** CAPI3REF: Function Flags
-**
-** These constants may be ORed together with the
-** [SQLITE_UTF8 | preferred text encoding] as the fourth argument
-** to [sqlite3_create_function()], [sqlite3_create_function16()], or
-** [sqlite3_create_function_v2()].
-*/
-#define SQLITE_DETERMINISTIC 0x800
-
-/*
** CAPI3REF: Deprecated Functions
** DEPRECATED
**
** These functions are [deprecated]. In order to maintain
** backwards compatibility with older code, these functions continue
** to be supported. However, new applications should avoid
-** the use of these functions. To encourage programmers to avoid
-** these functions, we will not explain what they do.
+** the use of these functions. To help encourage people to avoid
+** using these functions, we are not going to tell you what they do.
*/
#ifndef SQLITE_OMIT_DEPRECATED
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_global_recover(void);
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_thread_cleanup(void);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
+SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
void*,sqlite3_int64);
#endif
/*
-** CAPI3REF: Obtaining SQL Values
-** METHOD: sqlite3_value
+** CAPI3REF: Obtaining SQL Function Parameter Values
**
** The C-language implementation of SQL functions and aggregates uses
** this set of interface routines to access the parameter values on
-** the function or aggregate.
+** the function or aggregate.
**
** The xFunc (for scalar functions) or xStep (for aggregates) parameters
** to [sqlite3_create_function()] and [sqlite3_create_function16()]
@@ -4535,7 +4122,7 @@ SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(voi
** object results in undefined behavior.
**
** ^These routines work just like the corresponding [column access functions]
-** except that these routines take a single [protected sqlite3_value] object
+** except that these routines take a single [protected sqlite3_value] object
** pointer instead of a [sqlite3_stmt*] pointer and an integer column number.
**
** ^The sqlite3_value_text16() interface extracts a UTF-16 string
@@ -4560,55 +4147,21 @@ SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(voi
** These routines must be called from the same thread as
** the SQL function that supplied the [sqlite3_value*] parameters.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value*);
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value*);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value*);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
-
-/*
-** CAPI3REF: Finding The Subtype Of SQL Values
-** METHOD: sqlite3_value
-**
-** The sqlite3_value_subtype(V) function returns the subtype for
-** an [application-defined SQL function] argument V. The subtype
-** information can be used to pass a limited amount of context from
-** one SQL function to another. Use the [sqlite3_result_subtype()]
-** routine to set the subtype for the return value of an SQL function.
-**
-** SQLite makes no use of subtype itself. It merely passes the subtype
-** from the result of one [application-defined SQL function] into the
-** input of another.
-*/
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
-
-/*
-** CAPI3REF: Copy And Free SQL Values
-** METHOD: sqlite3_value
-**
-** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
-** object D and returns a pointer to that copy. ^The [sqlite3_value] returned
-** is a [protected sqlite3_value] object even if the input is not.
-** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
-** memory allocation fails.
-**
-** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
-** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
-** then sqlite3_value_free(V) is a harmless no-op.
-*/
-SQLITE_API SQLITE_EXPERIMENTAL sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
-SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
+SQLITE_API double sqlite3_value_double(sqlite3_value*);
+SQLITE_API int sqlite3_value_int(sqlite3_value*);
+SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*);
+SQLITE_API int sqlite3_value_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
/*
** CAPI3REF: Obtain Aggregate Function Context
-** METHOD: sqlite3_context
**
** Implementations of aggregate SQL functions use this
** routine to allocate memory for storing their state.
@@ -4649,11 +4202,10 @@ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_value_free(sqlite3_va
** This routine must be called from the same thread in which
** the aggregate SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int nBytes);
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
/*
** CAPI3REF: User Data For Functions
-** METHOD: sqlite3_context
**
** ^The sqlite3_user_data() interface returns a copy of
** the pointer that was the pUserData parameter (the 5th parameter)
@@ -4664,11 +4216,10 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int
** This routine must be called from the same thread in which
** the application-defined function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
+SQLITE_API void *sqlite3_user_data(sqlite3_context*);
/*
** CAPI3REF: Database Connection For Functions
-** METHOD: sqlite3_context
**
** ^The sqlite3_context_db_handle() interface returns a copy of
** the pointer to the [database connection] (the 1st parameter)
@@ -4676,11 +4227,10 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
** and [sqlite3_create_function16()] routines that originally
** registered the application defined function.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
/*
** CAPI3REF: Function Auxiliary Data
-** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
** associate metadata with argument values. If the same value is passed to
@@ -4729,8 +4279,8 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
** These routines must be called from the same thread in which
** the SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context*, int N);
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
+SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
/*
@@ -4753,7 +4303,6 @@ typedef void (*sqlite3_destructor_type)(void*);
/*
** CAPI3REF: Setting The Result Of An SQL Function
-** METHOD: sqlite3_context
**
** These routines are used by the xFunc or xFinal callbacks that
** implement SQL functions and aggregates. See
@@ -4769,9 +4318,9 @@ typedef void (*sqlite3_destructor_type)(void*);
** to by the second parameter and which is N bytes long where N is the
** third parameter.
**
-** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N)
-** interfaces set the result of the application-defined function to be
-** a BLOB containing all zero bytes and N bytes in size.
+** ^The sqlite3_result_zeroblob() interfaces set the result of
+** the application-defined function to be a BLOB containing all zero
+** bytes and N bytes in size, where N is the value of the 2nd parameter.
**
** ^The sqlite3_result_double() interface sets the result from
** an application-defined function to be a floating point value specified
@@ -4820,10 +4369,6 @@ typedef void (*sqlite3_destructor_type)(void*);
** set the return value of the application-defined function to be
** a text string which is represented as UTF-8, UTF-16 native byte order,
** UTF-16 little endian, or UTF-16 big endian, respectively.
-** ^The sqlite3_result_text64() interface sets the return value of an
-** application-defined function to be a text string in an encoding
-** specified by the fifth (and last) parameter, which must be one
-** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE].
** ^SQLite takes the text result from the application from
** the 2nd parameter of the sqlite3_result_text* interfaces.
** ^If the 3rd parameter to the sqlite3_result_text* interfaces
@@ -4853,7 +4398,7 @@ typedef void (*sqlite3_destructor_type)(void*);
** from [sqlite3_malloc()] before it returns.
**
** ^The sqlite3_result_value() interface sets the result of
-** the application-defined function to be a copy of the
+** the application-defined function to be a copy the
** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The
** sqlite3_result_value() interface makes a copy of the [sqlite3_value]
** so that the [sqlite3_value] specified in the parameter may change or
@@ -4866,46 +4411,25 @@ typedef void (*sqlite3_destructor_type)(void*);
** than the one containing the application-defined function that received
** the [sqlite3_context] pointer, the results are undefined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(sqlite3_context*,const void*,
- sqlite3_uint64,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context*, double);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context*, const char*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context*, const void*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
- void(*)(void*), unsigned char encoding);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
-
-
-/*
-** CAPI3REF: Setting The Subtype Of An SQL Function
-** METHOD: sqlite3_context
-**
-** The sqlite3_result_subtype(C,T) function causes the subtype of
-** the result from the [application-defined SQL function] with
-** [sqlite3_context] C to be the value T. Only the lower 8 bits
-** of the subtype T are preserved in current versions of SQLite;
-** higher order bits are discarded.
-** The number of subtype bytes preserved by SQLite might increase
-** in future releases of SQLite.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned int);
+SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
+SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int);
+SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int);
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
+SQLITE_API void sqlite3_result_null(sqlite3_context*);
+SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n);
/*
** CAPI3REF: Define New Collating Sequences
-** METHOD: sqlite3
**
** ^These functions add, remove, or modify a [collation] associated
** with the [database connection] specified as the first argument.
@@ -4983,14 +4507,14 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned
**
** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
+SQLITE_API int sqlite3_create_collation(
sqlite3*,
const char *zName,
int eTextRep,
void *pArg,
int(*xCompare)(void*,int,const void*,int,const void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
+SQLITE_API int sqlite3_create_collation_v2(
sqlite3*,
const char *zName,
int eTextRep,
@@ -4998,7 +4522,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
int(*xCompare)(void*,int,const void*,int,const void*),
void(*xDestroy)(void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
+SQLITE_API int sqlite3_create_collation16(
sqlite3*,
const void *zName,
int eTextRep,
@@ -5008,7 +4532,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
/*
** CAPI3REF: Collation Needed Callbacks
-** METHOD: sqlite3
**
** ^To avoid having to register all collation sequences before a database
** can be used, a single callback function may be registered with the
@@ -5033,12 +4556,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
** [sqlite3_create_collation_v2()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
+SQLITE_API int sqlite3_collation_needed(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const char*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
+SQLITE_API int sqlite3_collation_needed16(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const void*)
@@ -5052,11 +4575,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_key(
+SQLITE_API int sqlite3_key(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
+SQLITE_API int sqlite3_key_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The key */
@@ -5070,11 +4593,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey(
+SQLITE_API int sqlite3_rekey(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The new key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
+SQLITE_API int sqlite3_rekey_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The new key */
@@ -5084,7 +4607,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
** Specify the activation key for a SEE database. Unless
** activated, none of the SEE routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
+SQLITE_API void sqlite3_activate_see(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -5094,7 +4617,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
** Specify the activation key for a CEROD database. Unless
** activated, none of the CEROD routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
+SQLITE_API void sqlite3_activate_cerod(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -5116,7 +4639,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
** all, then the behavior of sqlite3_sleep() may deviate from the description
** in the previous paragraphs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
+SQLITE_API int sqlite3_sleep(int);
/*
** CAPI3REF: Name Of The Folder Holding Temporary Files
@@ -5128,13 +4651,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
** is a NULL pointer, then SQLite performs a search for an appropriate
** temporary file directory.
**
-** Applications are strongly discouraged from using this global variable.
-** It is required to set a temporary folder on Windows Runtime (WinRT).
-** But for all other platforms, it is highly recommended that applications
-** neither read nor write this variable. This global variable is a relic
-** that exists for backwards compatibility of legacy applications and should
-** be avoided in new projects.
-**
** It is not safe to read or modify this variable in more than one
** thread at a time. It is not safe to read or modify this variable
** if a [database connection] is being used at the same time in a separate
@@ -5153,11 +4669,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
** Hence, if this variable is modified directly, either it should be
** made NULL or made to point to memory obtained from [sqlite3_malloc]
** or else the use of the [temp_store_directory pragma] should be avoided.
-** Except when requested by the [temp_store_directory pragma], SQLite
-** does not free the memory that sqlite3_temp_directory points to. If
-** the application wants that memory to be freed, it must do
-** so itself, taking care to only do so after all [database connection]
-** objects have been destroyed.
**
** <b>Note to Windows Runtime users:</b> The temporary directory must be set
** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various
@@ -5216,7 +4727,6 @@ SQLITE_API char *sqlite3_data_directory;
/*
** CAPI3REF: Test For Auto-Commit Mode
** KEYWORDS: {autocommit mode}
-** METHOD: sqlite3
**
** ^The sqlite3_get_autocommit() interface returns non-zero or
** zero if the given database connection is or is not in autocommit mode,
@@ -5235,11 +4745,10 @@ SQLITE_API char *sqlite3_data_directory;
** connection while this routine is running, then the return value
** is undefined.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
+SQLITE_API int sqlite3_get_autocommit(sqlite3*);
/*
** CAPI3REF: Find The Database Handle Of A Prepared Statement
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_db_handle interface returns the [database connection] handle
** to which a [prepared statement] belongs. ^The [database connection]
@@ -5248,11 +4757,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
** create the statement in the first place.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
/*
** CAPI3REF: Return The Filename For A Database Connection
-** METHOD: sqlite3
**
** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename
** associated with database N of connection D. ^The main database file
@@ -5265,21 +4773,19 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
** will be an absolute pathname, even if the filename used
** to open the database originally was a URI or relative pathname.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Determine if a database is read-only
-** METHOD: sqlite3
**
** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N
** of connection D is read-only, 0 if it is read/write, or -1 if N is not
** the name of a database on connection D.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Find the next prepared statement
-** METHOD: sqlite3
**
** ^This interface returns a pointer to the next [prepared statement] after
** pStmt associated with the [database connection] pDb. ^If pStmt is NULL
@@ -5291,11 +4797,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbNa
** [sqlite3_next_stmt(D,S)] must refer to an open database
** connection and in particular must not be a NULL pointer.
*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
/*
** CAPI3REF: Commit And Rollback Notification Callbacks
-** METHOD: sqlite3
**
** ^The sqlite3_commit_hook() interface registers a callback
** function to be invoked whenever a transaction is [COMMIT | committed].
@@ -5340,12 +4845,11 @@ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_
**
** See also the [sqlite3_update_hook()] interface.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
+SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
+SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
/*
** CAPI3REF: Data Change Notification Callbacks
-** METHOD: sqlite3
**
** ^The sqlite3_update_hook() interface registers a callback function
** with the [database connection] identified by the first argument
@@ -5392,7 +4896,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *),
** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()]
** interfaces.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
+SQLITE_API void *sqlite3_update_hook(
sqlite3*,
void(*)(void *,int ,char const *,char const *,sqlite3_int64),
void*
@@ -5422,17 +4926,12 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
** future releases of SQLite. Applications that care about shared
** cache setting should set it explicitly.
**
-** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0
-** and will always return SQLITE_MISUSE. On those systems,
-** shared cache mode should be enabled per-database connection via
-** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE].
-**
** This interface is threadsafe on processors where writing a
** 32-bit integer is atomic.
**
** See Also: [SQLite Shared-Cache Mode]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
+SQLITE_API int sqlite3_enable_shared_cache(int);
/*
** CAPI3REF: Attempt To Free Heap Memory
@@ -5448,11 +4947,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
**
** See also: [sqlite3_db_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
+SQLITE_API int sqlite3_release_memory(int);
/*
** CAPI3REF: Free Memory Used By A Database Connection
-** METHOD: sqlite3
**
** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap
** memory as possible from database connection D. Unlike the
@@ -5462,7 +4960,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
**
** See also: [sqlite3_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
+SQLITE_API int sqlite3_db_release_memory(sqlite3*);
/*
** CAPI3REF: Impose A Limit On Heap Size
@@ -5514,7 +5012,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
** The circumstances under which SQLite will enforce the soft heap limit may
** changes in future releases of SQLite.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 N);
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N);
/*
** CAPI3REF: Deprecated Soft Heap Limit Interface
@@ -5525,34 +5023,26 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64
** only. All new applications should use the
** [sqlite3_soft_heap_limit64()] interface rather than this one.
*/
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N);
/*
** CAPI3REF: Extract Metadata About A Column Of A Table
-** METHOD: sqlite3
-**
-** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns
-** information about column C of table T in database D
-** on [database connection] X.)^ ^The sqlite3_table_column_metadata()
-** interface returns SQLITE_OK and fills in the non-NULL pointers in
-** the final five arguments with appropriate values if the specified
-** column exists. ^The sqlite3_table_column_metadata() interface returns
-** SQLITE_ERROR and if the specified column does not exist.
-** ^If the column-name parameter to sqlite3_table_column_metadata() is a
-** NULL pointer, then this routine simply checks for the existance of the
-** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it
-** does not.
+**
+** ^This routine returns metadata about a specific column of a specific
+** database table accessible using the [database connection] handle
+** passed as the first function argument.
**
** ^The column is identified by the second, third and fourth parameters to
-** this function. ^(The second parameter is either the name of the database
+** this function. ^The second parameter is either the name of the database
** (i.e. "main", "temp", or an attached database) containing the specified
-** table or NULL.)^ ^If it is NULL, then all attached databases are searched
+** table or NULL. ^If it is NULL, then all attached databases are searched
** for the table using the same algorithm used by the database engine to
** resolve unqualified table references.
**
** ^The third and fourth parameters to this function are the table and column
-** name of the desired column, respectively.
+** name of the desired column, respectively. Neither of these parameters
+** may be NULL.
**
** ^Metadata is returned by writing to the memory locations passed as the 5th
** and subsequent parameters to this function. ^Any of these arguments may be
@@ -5571,17 +5061,16 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
** </blockquote>)^
**
** ^The memory pointed to by the character pointers returned for the
-** declaration type and collation sequence is valid until the next
+** declaration type and collation sequence is valid only until the next
** call to any SQLite API function.
**
** ^If the specified table is actually a view, an [error code] is returned.
**
-** ^If the specified column is "rowid", "oid" or "_rowid_" and the table
-** is not a [WITHOUT ROWID] table and an
+** ^If the specified column is "rowid", "oid" or "_rowid_" and an
** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output
** parameters are set for the explicitly declared column. ^(If there is no
-** [INTEGER PRIMARY KEY] column, then the outputs
-** for the [rowid] are set as follows:
+** explicitly declared [INTEGER PRIMARY KEY] column, then the output
+** parameters are set as follows:
**
** <pre>
** data type: "INTEGER"
@@ -5591,11 +5080,15 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
** auto increment: 0
** </pre>)^
**
-** ^This function causes all database schemas to be read from disk and
-** parsed, if that has not already been done, and returns an error if
-** any errors are encountered while loading the schema.
+** ^(This function may load one or more schemas from database files. If an
+** error occurs during this process, or if the requested table or column
+** cannot be found, an [error code] is returned and an error message left
+** in the [database connection] (to be retrieved using sqlite3_errmsg()).)^
+**
+** ^This API is only available if the library was compiled with the
+** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
+SQLITE_API int sqlite3_table_column_metadata(
sqlite3 *db, /* Connection handle */
const char *zDbName, /* Database name or NULL */
const char *zTableName, /* Table name */
@@ -5609,7 +5102,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
/*
** CAPI3REF: Load An Extension
-** METHOD: sqlite3
**
** ^This interface loads an SQLite extension library from the named file.
**
@@ -5642,7 +5134,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
**
** See also the [load_extension() SQL function].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
+SQLITE_API int sqlite3_load_extension(
sqlite3 *db, /* Load the extension into this database connection */
const char *zFile, /* Name of the shared library containing extension */
const char *zProc, /* Entry point. Derived from zFile if 0 */
@@ -5651,7 +5143,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
/*
** CAPI3REF: Enable Or Disable Extension Loading
-** METHOD: sqlite3
**
** ^So as not to open security holes in older applications that are
** unprepared to deal with [extension loading], and as a means of disabling
@@ -5663,7 +5154,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
** to turn extension loading on and call it with onoff==0 to turn
** it back off again.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff);
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
/*
** CAPI3REF: Automatically Load Statically Linked Extensions
@@ -5701,7 +5192,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
** See also: [sqlite3_reset_auto_extension()]
** and [sqlite3_cancel_auto_extension()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
+SQLITE_API int sqlite3_auto_extension(void (*xEntryPoint)(void));
/*
** CAPI3REF: Cancel Automatic Extension Loading
@@ -5713,7 +5204,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
** unregistered and it returns 0 if X was not on the list of initialization
** routines.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(void));
+SQLITE_API int sqlite3_cancel_auto_extension(void (*xEntryPoint)(void));
/*
** CAPI3REF: Reset Automatic Extension Loading
@@ -5721,7 +5212,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(
** ^This interface disables all automatic extensions previously
** registered using [sqlite3_auto_extension()].
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void);
+SQLITE_API void sqlite3_reset_auto_extension(void);
/*
** The interface to the virtual-table mechanism is currently considered
@@ -5848,31 +5339,13 @@ struct sqlite3_module {
** ^The estimatedRows value is an estimate of the number of rows that
** will be returned by the strategy.
**
-** The xBestIndex method may optionally populate the idxFlags field with a
-** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
-** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
-** assumes that the strategy may visit at most one row.
-**
-** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
-** SQLite also assumes that if a call to the xUpdate() method is made as
-** part of the same statement to delete or update a virtual table row and the
-** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
-** any database changes. In other words, if the xUpdate() returns
-** SQLITE_CONSTRAINT, the database contents must be exactly as they were
-** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
-** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
-** the xUpdate method are automatically rolled back by SQLite.
-**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
** structure for SQLite version 3.8.2. If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting
** to read or write the estimatedRows field are undefined (but are likely
** to included crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
-** value greater than or equal to 3008002. Similarly, the idxFlags field
-** was added for version 3.9.0. It may therefore only be used if
-** sqlite3_libversion_number() returns a value greater than or equal to
-** 3009000.
+** value greater than or equal to 3008002.
*/
struct sqlite3_index_info {
/* Inputs */
@@ -5900,16 +5373,9 @@ struct sqlite3_index_info {
double estimatedCost; /* Estimated cost of using this index */
/* Fields below are only available in SQLite 3.8.2 and later */
sqlite3_int64 estimatedRows; /* Estimated number of rows returned */
- /* Fields below are only available in SQLite 3.9.0 and later */
- int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */
};
/*
-** CAPI3REF: Virtual Table Scan Flags
-*/
-#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */
-
-/*
** CAPI3REF: Virtual Table Constraint Operator Codes
**
** These macros defined the allowed values for the
@@ -5926,7 +5392,6 @@ struct sqlite3_index_info {
/*
** CAPI3REF: Register A Virtual Table Implementation
-** METHOD: sqlite3
**
** ^These routines are used to register a new [virtual table module] name.
** ^Module names must be registered before
@@ -5950,13 +5415,13 @@ struct sqlite3_index_info {
** interface is equivalent to sqlite3_create_module_v2() with a NULL
** destructor.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
+SQLITE_API int sqlite3_create_module(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
void *pClientData /* Client data for xCreate/xConnect */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
+SQLITE_API int sqlite3_create_module_v2(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
@@ -5984,7 +5449,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
*/
struct sqlite3_vtab {
const sqlite3_module *pModule; /* The module for this virtual table */
- int nRef; /* Number of open cursors */
+ int nRef; /* NO LONGER USED */
char *zErrMsg; /* Error message from sqlite3_mprintf() */
/* Virtual table implementations will typically add additional fields */
};
@@ -6019,11 +5484,10 @@ struct sqlite3_vtab_cursor {
** to declare the format (the names and datatypes of the columns) of
** the virtual tables they implement.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
+SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
/*
** CAPI3REF: Overload A Function For A Virtual Table
-** METHOD: sqlite3
**
** ^(Virtual tables can provide alternative implementations of functions
** using the [xFindFunction] method of the [virtual table module].
@@ -6038,7 +5502,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
** purpose is to be a placeholder function that can be overloaded
** by a [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
+SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
/*
** The interface to the virtual-table mechanism defined above (back up
@@ -6066,8 +5530,6 @@ typedef struct sqlite3_blob sqlite3_blob;
/*
** CAPI3REF: Open A BLOB For Incremental I/O
-** METHOD: sqlite3
-** CONSTRUCTOR: sqlite3_blob
**
** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located
** in row iRow, column zColumn, table zTable in database zDb;
@@ -6077,42 +5539,26 @@ typedef struct sqlite3_blob sqlite3_blob;
** SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
** </pre>)^
**
-** ^(Parameter zDb is not the filename that contains the database, but
-** rather the symbolic name of the database. For attached databases, this is
-** the name that appears after the AS keyword in the [ATTACH] statement.
-** For the main database file, the database name is "main". For TEMP
-** tables, the database name is "temp".)^
-**
** ^If the flags parameter is non-zero, then the BLOB is opened for read
-** and write access. ^If the flags parameter is zero, the BLOB is opened for
-** read-only access.
-**
-** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored
-** in *ppBlob. Otherwise an [error code] is returned and, unless the error
-** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
-** the API is not misused, it is always safe to call [sqlite3_blob_close()]
-** on *ppBlob after this function it returns.
-**
-** This function fails with SQLITE_ERROR if any of the following are true:
-** <ul>
-** <li> ^(Database zDb does not exist)^,
-** <li> ^(Table zTable does not exist within database zDb)^,
-** <li> ^(Table zTable is a WITHOUT ROWID table)^,
-** <li> ^(Column zColumn does not exist)^,
-** <li> ^(Row iRow is not present in the table)^,
-** <li> ^(The specified column of row iRow contains a value that is not
-** a TEXT or BLOB value)^,
-** <li> ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE
-** constraint and the blob is being opened for read/write access)^,
-** <li> ^([foreign key constraints | Foreign key constraints] are enabled,
-** column zColumn is part of a [child key] definition and the blob is
-** being opened for read/write access)^.
-** </ul>
-**
-** ^Unless it returns SQLITE_MISUSE, this function sets the
-** [database connection] error code and message accessible via
-** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
-**
+** and write access. ^If it is zero, the BLOB is opened for read access.
+** ^It is not possible to open a column that is part of an index or primary
+** key for writing. ^If [foreign key constraints] are enabled, it is
+** not possible to open a column that is part of a [child key] for writing.
+**
+** ^Note that the database name is not the filename that contains
+** the database but rather the symbolic name of the database that
+** appears after the AS keyword when the database is connected using [ATTACH].
+** ^For the main database file, the database name is "main".
+** ^For TEMP tables, the database name is "temp".
+**
+** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is written
+** to *ppBlob. Otherwise an [error code] is returned and *ppBlob is set
+** to be a null pointer.)^
+** ^This function sets the [database connection] error code and message
+** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()] and related
+** functions. ^Note that the *ppBlob variable is always initialized in a
+** way that makes it safe to invoke [sqlite3_blob_close()] on *ppBlob
+** regardless of the success or failure of this routine.
**
** ^(If the row that a BLOB handle points to is modified by an
** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
@@ -6130,14 +5576,18 @@ typedef struct sqlite3_blob sqlite3_blob;
** interface. Use the [UPDATE] SQL command to change the size of a
** blob.
**
+** ^The [sqlite3_blob_open()] interface will fail for a [WITHOUT ROWID]
+** table. Incremental BLOB I/O is not possible on [WITHOUT ROWID] tables.
+**
** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces
-** and the built-in [zeroblob] SQL function may be used to create a
-** zero-filled blob to read or write using the incremental-blob interface.
+** and the built-in [zeroblob] SQL function can be used, if desired,
+** to create an empty, zero-filled blob in which to read or write using
+** this interface.
**
** To avoid a resource leak, every open [BLOB handle] should eventually
** be released by a call to [sqlite3_blob_close()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
+SQLITE_API int sqlite3_blob_open(
sqlite3*,
const char *zDb,
const char *zTable,
@@ -6149,7 +5599,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
/*
** CAPI3REF: Move a BLOB Handle to a New Row
-** METHOD: sqlite3_blob
**
** ^This function is used to move an existing blob handle so that it points
** to a different row of the same database table. ^The new row is identified
@@ -6170,34 +5619,34 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
**
** ^This function sets the database handle error code and message.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
/*
** CAPI3REF: Close A BLOB Handle
-** DESTRUCTOR: sqlite3_blob
**
-** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed
-** unconditionally. Even if this routine returns an error code, the
-** handle is still closed.)^
+** ^Closes an open [BLOB handle].
+**
+** ^Closing a BLOB shall cause the current transaction to commit
+** if there are no other BLOBs, no pending prepared statements, and the
+** database connection is in [autocommit mode].
+** ^If any writes were made to the BLOB, they might be held in cache
+** until the close operation if they will fit.
**
-** ^If the blob handle being closed was opened for read-write access, and if
-** the database is in auto-commit mode and there are no other open read-write
-** blob handles or active write statements, the current transaction is
-** committed. ^If an error occurs while committing the transaction, an error
-** code is returned and the transaction rolled back.
+** ^(Closing the BLOB often forces the changes
+** out to disk and so if any I/O errors occur, they will likely occur
+** at the time when the BLOB is closed. Any errors that occur during
+** closing are reported as a non-zero return value.)^
**
-** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
-** with a null pointer (such as would be returned by a failed call to
-** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
-** is passed a valid open blob handle, the values returned by the
-** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
+** ^(The BLOB is closed unconditionally. Even if this routine returns
+** an error code, the BLOB is still closed.)^
+**
+** ^Calling this routine with a null pointer (such as would be returned
+** by a failed call to [sqlite3_blob_open()]) is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *);
/*
** CAPI3REF: Return The Size Of An Open BLOB
-** METHOD: sqlite3_blob
**
** ^Returns the size in bytes of the BLOB accessible via the
** successfully opened [BLOB handle] in its only argument. ^The
@@ -6209,11 +5658,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
** been closed by [sqlite3_blob_close()]. Passing any other pointer in
** to this routine results in undefined and probably undesirable behavior.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *);
/*
** CAPI3REF: Read Data From A BLOB Incrementally
-** METHOD: sqlite3_blob
**
** ^(This function is used to read data from an open [BLOB handle] into a
** caller-supplied buffer. N bytes of data are copied into buffer Z
@@ -6238,33 +5686,26 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
**
** See also: [sqlite3_blob_write()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
/*
** CAPI3REF: Write Data Into A BLOB Incrementally
-** METHOD: sqlite3_blob
**
-** ^(This function is used to write data into an open [BLOB handle] from a
-** caller-supplied buffer. N bytes of data are copied from the buffer Z
-** into the open BLOB, starting at offset iOffset.)^
-**
-** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
-** Otherwise, an [error code] or an [extended error code] is returned.)^
-** ^Unless SQLITE_MISUSE is returned, this function sets the
-** [database connection] error code and message accessible via
-** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
+** ^This function is used to write data into an open [BLOB handle] from a
+** caller-supplied buffer. ^N bytes of data are copied from the buffer Z
+** into the open BLOB, starting at offset iOffset.
**
** ^If the [BLOB handle] passed as the first argument was not opened for
** writing (the flags parameter to [sqlite3_blob_open()] was zero),
** this function returns [SQLITE_READONLY].
**
-** This function may only modify the contents of the BLOB; it is
+** ^This function may only modify the contents of the BLOB; it is
** not possible to increase the size of a BLOB using this API.
** ^If offset iOffset is less than N bytes from the end of the BLOB,
-** [SQLITE_ERROR] is returned and no data is written. The size of the
-** BLOB (and hence the maximum value of N+iOffset) can be determined
-** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less
-** than zero [SQLITE_ERROR] is returned and no data is written.
+** [SQLITE_ERROR] is returned and no data is written. ^If N is
+** less than zero [SQLITE_ERROR] is returned and no data is written.
+** The size of the BLOB (and hence the maximum value of N+iOffset)
+** can be determined using the [sqlite3_blob_bytes()] interface.
**
** ^An attempt to write to an expired [BLOB handle] fails with an
** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred
@@ -6273,6 +5714,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N,
** have been overwritten by the statement that expired the BLOB handle
** or by other independent statements.
**
+** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
+** Otherwise, an [error code] or an [extended error code] is returned.)^
+**
** This routine only works on a [BLOB handle] which has been created
** by a prior successful call to [sqlite3_blob_open()] and which has not
** been closed by [sqlite3_blob_close()]. Passing any other pointer in
@@ -6280,7 +5724,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N,
**
** See also: [sqlite3_blob_read()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
/*
** CAPI3REF: Virtual File System Objects
@@ -6311,9 +5755,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z,
** ^(If the default VFS is unregistered, another VFS is chosen as
** the default. The choice for the new VFS is arbitrary.)^
*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfsName);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
/*
** CAPI3REF: Mutexes
@@ -6325,51 +5769,45 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** The SQLite source code contains multiple implementations
** of these mutex routines. An appropriate implementation
-** is selected automatically at compile-time. The following
+** is selected automatically at compile-time. ^(The following
** implementations are available in the SQLite core:
**
** <ul>
** <li> SQLITE_MUTEX_PTHREADS
** <li> SQLITE_MUTEX_W32
** <li> SQLITE_MUTEX_NOOP
-** </ul>
+** </ul>)^
**
-** The SQLITE_MUTEX_NOOP implementation is a set of routines
+** ^The SQLITE_MUTEX_NOOP implementation is a set of routines
** that does no real locking and is appropriate for use in
-** a single-threaded application. The SQLITE_MUTEX_PTHREADS and
+** a single-threaded application. ^The SQLITE_MUTEX_PTHREADS and
** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix
** and Windows.
**
-** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
+** ^(If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex
** implementation is included with the library. In this case the
** application must supply a custom mutex implementation using the
** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function
** before calling sqlite3_initialize() or any other public sqlite3_
-** function that calls sqlite3_initialize().
+** function that calls sqlite3_initialize().)^
**
** ^The sqlite3_mutex_alloc() routine allocates a new
-** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc()
-** routine returns NULL if it is unable to allocate the requested
-** mutex. The argument to sqlite3_mutex_alloc() must one of these
-** integer constants:
+** mutex and returns a pointer to it. ^If it returns NULL
+** that means that a mutex could not be allocated. ^SQLite
+** will unwind its stack and return an error. ^(The argument
+** to sqlite3_mutex_alloc() is one of these integer constants:
**
** <ul>
** <li> SQLITE_MUTEX_FAST
** <li> SQLITE_MUTEX_RECURSIVE
** <li> SQLITE_MUTEX_STATIC_MASTER
** <li> SQLITE_MUTEX_STATIC_MEM
-** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_MEM2
** <li> SQLITE_MUTEX_STATIC_PRNG
** <li> SQLITE_MUTEX_STATIC_LRU
-** <li> SQLITE_MUTEX_STATIC_PMEM
-** <li> SQLITE_MUTEX_STATIC_APP1
-** <li> SQLITE_MUTEX_STATIC_APP2
-** <li> SQLITE_MUTEX_STATIC_APP3
-** <li> SQLITE_MUTEX_STATIC_VFS1
-** <li> SQLITE_MUTEX_STATIC_VFS2
-** <li> SQLITE_MUTEX_STATIC_VFS3
-** </ul>
+** <li> SQLITE_MUTEX_STATIC_LRU2
+** </ul>)^
**
** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
** cause sqlite3_mutex_alloc() to create
@@ -6377,14 +5815,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
** The mutex implementation does not need to make a distinction
** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
-** not want to. SQLite will only request a recursive mutex in
-** cases where it really needs one. If a faster non-recursive mutex
+** not want to. ^SQLite will only request a recursive mutex in
+** cases where it really needs one. ^If a faster non-recursive mutex
** implementation is available on the host platform, the mutex subsystem
** might return such a mutex in response to SQLITE_MUTEX_FAST.
**
** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other
** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return
-** a pointer to a static preexisting mutex. ^Nine static mutexes are
+** a pointer to a static preexisting mutex. ^Six static mutexes are
** used by the current version of SQLite. Future versions of SQLite
** may add additional static mutexes. Static mutexes are for internal
** use by SQLite only. Applications that use SQLite mutexes should
@@ -6393,13 +5831,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
-** returns a different mutex on every call. ^For the static
+** returns a different mutex on every call. ^But for the static
** mutex types, the same mutex is returned on every call that has
** the same type number.
**
** ^The sqlite3_mutex_free() routine deallocates a previously
-** allocated dynamic mutex. Attempting to deallocate a static
-** mutex results in undefined behavior.
+** allocated dynamic mutex. ^SQLite is careful to deallocate every
+** dynamic mutex that it allocates. The dynamic mutexes must not be in
+** use when they are deallocated. Attempting to deallocate a static
+** mutex results in undefined behavior. ^SQLite never deallocates
+** a static mutex.
**
** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
** to enter a mutex. ^If another thread is already within the mutex,
@@ -6407,21 +5848,23 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK]
** upon successful entry. ^(Mutexes created using
** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread.
-** In such cases, the
+** In such cases the,
** mutex must be exited an equal number of times before another thread
-** can enter.)^ If the same thread tries to enter any mutex other
-** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined.
+** can enter.)^ ^(If the same thread tries to enter any other
+** kind of mutex more than once, the behavior is undefined.
+** SQLite will never exhibit
+** such behavior in its own use of mutexes.)^
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. The SQLite core only ever uses
+** sqlite3_mutex_try() as an optimization so this is acceptable behavior.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
-** previously entered by the same thread. The behavior
+** previously entered by the same thread. ^(The behavior
** is undefined if the mutex is not currently entered by the
-** calling thread or is not currently allocated.
+** calling thread or is not currently allocated. SQLite will
+** never do either.)^
**
** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or
** sqlite3_mutex_leave() is a NULL pointer, then all three routines
@@ -6429,11 +5872,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int);
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*);
/*
** CAPI3REF: Mutex Methods Object
@@ -6442,9 +5885,9 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
** used to allocate and use mutexes.
**
** Usually, the default mutex implementations provided by SQLite are
-** sufficient, however the application has the option of substituting a custom
+** sufficient, however the user has the option of substituting a custom
** implementation for specialized deployments or systems for which SQLite
-** does not provide a suitable implementation. In this case, the application
+** does not provide a suitable implementation. In this case, the user
** creates and populates an instance of this structure to pass
** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option.
** Additionally, an instance of this structure can be used as an
@@ -6485,13 +5928,13 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
** (i.e. it is acceptable to provide an implementation that segfaults if
** it is passed a NULL pointer).
**
-** The xMutexInit() method must be threadsafe. It must be harmless to
+** The xMutexInit() method must be threadsafe. ^It must be harmless to
** invoke xMutexInit() multiple times within the same process and without
** intervening calls to xMutexEnd(). Second and subsequent calls to
** xMutexInit() must be no-ops.
**
-** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
-** and its associates). Similarly, xMutexAlloc() must not use SQLite memory
+** ^xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
+** and its associates). ^Similarly, xMutexAlloc() must not use SQLite memory
** allocation for a static mutex. ^However xMutexAlloc() may use SQLite
** memory allocation for a fast or recursive mutex.
**
@@ -6517,34 +5960,34 @@ struct sqlite3_mutex_methods {
** CAPI3REF: Mutex Verification Routines
**
** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines
-** are intended for use inside assert() statements. The SQLite core
+** are intended for use inside assert() statements. ^The SQLite core
** never uses these routines except inside an assert() and applications
-** are advised to follow the lead of the core. The SQLite core only
+** are advised to follow the lead of the core. ^The SQLite core only
** provides implementations for these routines when it is compiled
-** with the SQLITE_DEBUG flag. External mutex implementations
+** with the SQLITE_DEBUG flag. ^External mutex implementations
** are only required to provide these routines if SQLITE_DEBUG is
** defined and if NDEBUG is not defined.
**
-** These routines should return true if the mutex in their argument
+** ^These routines should return true if the mutex in their argument
** is held or not held, respectively, by the calling thread.
**
-** The implementation is not required to provide versions of these
+** ^The implementation is not required to provide versions of these
** routines that actually work. If the implementation does not provide working
** versions of these routines, it should at least provide stubs that always
** return true so that one does not get spurious assertion failures.
**
-** If the argument to sqlite3_mutex_held() is a NULL pointer then
+** ^If the argument to sqlite3_mutex_held() is a NULL pointer then
** the routine should return 1. This seems counter-intuitive since
** clearly the mutex cannot be held if it does not exist. But
** the reason the mutex does not exist is because the build is not
** using mutexes. And we do not want the assert() containing the
** call to sqlite3_mutex_held() to fail, so a non-zero return is
-** the appropriate thing to do. The sqlite3_mutex_notheld()
+** the appropriate thing to do. ^The sqlite3_mutex_notheld()
** interface should also return 1 when given a NULL pointer.
*/
#ifndef NDEBUG
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*);
#endif
/*
@@ -6567,16 +6010,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
-#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */
-#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */
-#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */
-#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */
-#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */
-#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */
/*
** CAPI3REF: Retrieve the mutex for a database connection
-** METHOD: sqlite3
**
** ^This interface returns a pointer the [sqlite3_mutex] object that
** serializes access to the [database connection] given in the argument
@@ -6584,11 +6020,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
** ^If the [threading mode] is Single-thread or Multi-thread then this
** routine returns a NULL pointer.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
/*
** CAPI3REF: Low-Level Control Of Database Files
-** METHOD: sqlite3
**
** ^The [sqlite3_file_control()] interface makes a direct call to the
** xFileControl method for the [sqlite3_io_methods] object associated
@@ -6619,7 +6054,7 @@ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
**
** See also: [SQLITE_FCNTL_LOCKSTATE]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
+SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
/*
** CAPI3REF: Testing Interface
@@ -6638,7 +6073,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName
** Unlike most of the SQLite API, this function is not guaranteed to
** operate consistently from one release to the next.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
+SQLITE_API int sqlite3_test_control(int op, ...);
/*
** CAPI3REF: Testing Interface Operation Codes
@@ -6666,19 +6101,14 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ISKEYWORD 16
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
-#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
+#define SQLITE_TESTCTRL_EXPLAIN_STMT 19
#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
-#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
-#define SQLITE_TESTCTRL_BYTEORDER 22
-#define SQLITE_TESTCTRL_ISINIT 23
-#define SQLITE_TESTCTRL_SORTER_MMAP 24
-#define SQLITE_TESTCTRL_IMPOSTER 25
-#define SQLITE_TESTCTRL_LAST 25
+#define SQLITE_TESTCTRL_LAST 20
/*
** CAPI3REF: SQLite Runtime Status
**
-** ^These interfaces are used to retrieve runtime status information
+** ^This interface is used to retrieve runtime status information
** about the performance of SQLite, and optionally to reset various
** highwater marks. ^The first argument is an integer code for
** the specific parameter to measure. ^(Recognized integer codes
@@ -6692,22 +6122,19 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
** ^(Other parameters record only the highwater mark and not the current
** value. For these latter parameters nothing is written into *pCurrent.)^
**
-** ^The sqlite3_status() and sqlite3_status64() routines return
-** SQLITE_OK on success and a non-zero [error code] on failure.
+** ^The sqlite3_status() routine returns SQLITE_OK on success and a
+** non-zero [error code] on failure.
**
-** If either the current value or the highwater mark is too large to
-** be represented by a 32-bit integer, then the values returned by
-** sqlite3_status() are undefined.
+** This routine is threadsafe but is not atomic. This routine can be
+** called while other threads are running the same or different SQLite
+** interfaces. However the values returned in *pCurrent and
+** *pHighwater reflect the status of SQLite at different points in time
+** and it is possible that another thread might change the parameter
+** in between the times when *pCurrent and *pHighwater are written.
**
** See also: [sqlite3_db_status()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
- int op,
- sqlite3_int64 *pCurrent,
- sqlite3_int64 *pHighwater,
- int resetFlag
-);
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
/*
@@ -6805,7 +6232,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
/*
** CAPI3REF: Database Connection Status
-** METHOD: sqlite3
**
** ^This interface is used to retrieve runtime status information
** about a single [database connection]. ^The first argument is the
@@ -6826,7 +6252,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
**
** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
+SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
/*
** CAPI3REF: Status Parameters for database connections
@@ -6868,12 +6294,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
** the current value is always zero.)^
**
** [[SQLITE_DBSTATUS_CACHE_USED]] ^(<dt>SQLITE_DBSTATUS_CACHE_USED</dt>
-** <dd>This parameter returns the approximate number of bytes of heap
+** <dd>This parameter returns the approximate number of of bytes of heap
** memory used by all pager caches associated with the database connection.)^
** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
**
** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(<dt>SQLITE_DBSTATUS_SCHEMA_USED</dt>
-** <dd>This parameter returns the approximate number of bytes of heap
+** <dd>This parameter returns the approximate number of of bytes of heap
** memory used to store the schema for all databases associated
** with the connection - main, temp, and any [ATTACH]-ed databases.)^
** ^The full amount of memory used by the schemas is reported, even if the
@@ -6882,7 +6308,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0.
**
** [[SQLITE_DBSTATUS_STMT_USED]] ^(<dt>SQLITE_DBSTATUS_STMT_USED</dt>
-** <dd>This parameter returns the approximate number of bytes of heap
+** <dd>This parameter returns the approximate number of of bytes of heap
** and lookaside memory used by all prepared statements associated with
** the database connection.)^
** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0.
@@ -6934,7 +6360,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
/*
** CAPI3REF: Prepared Statement Status
-** METHOD: sqlite3_stmt
**
** ^(Each prepared statement maintains various
** [SQLITE_STMTSTATUS counters] that measure the number
@@ -6956,7 +6381,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
**
** See also: [sqlite3_status()] and [sqlite3_db_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
/*
** CAPI3REF: Status Parameters for prepared statements
@@ -7283,10 +6708,6 @@ typedef struct sqlite3_backup sqlite3_backup;
** must be different or else sqlite3_backup_init(D,N,S,M) will fail with
** an error.
**
-** ^A call to sqlite3_backup_init() will fail, returning SQLITE_ERROR, if
-** there is already a read or read-write transaction open on the
-** destination database.
-**
** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is
** returned and an error code and error message are stored in the
** destination [database connection] D.
@@ -7379,20 +6800,20 @@ typedef struct sqlite3_backup sqlite3_backup;
** is not a permanent error and does not affect the return value of
** sqlite3_backup_finish().
**
-** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]]
+** [[sqlite3_backup__remaining()]] [[sqlite3_backup_pagecount()]]
** <b>sqlite3_backup_remaining() and sqlite3_backup_pagecount()</b>
**
-** ^The sqlite3_backup_remaining() routine returns the number of pages still
-** to be backed up at the conclusion of the most recent sqlite3_backup_step().
-** ^The sqlite3_backup_pagecount() routine returns the total number of pages
-** in the source database at the conclusion of the most recent
-** sqlite3_backup_step().
-** ^(The values returned by these functions are only updated by
-** sqlite3_backup_step(). If the source database is modified in a way that
-** changes the size of the source database or the number of pages remaining,
-** those changes are not reflected in the output of sqlite3_backup_pagecount()
-** and sqlite3_backup_remaining() until after the next
-** sqlite3_backup_step().)^
+** ^Each call to sqlite3_backup_step() sets two values inside
+** the [sqlite3_backup] object: the number of pages still to be backed
+** up and the total number of pages in the source database file.
+** The sqlite3_backup_remaining() and sqlite3_backup_pagecount() interfaces
+** retrieve these two values, respectively.
+**
+** ^The values returned by these functions are only updated by
+** sqlite3_backup_step(). ^If the source database is modified during a backup
+** operation, then the values are not updated to account for any extra
+** pages that need to be updated or the size of the source database file
+** changing.
**
** <b>Concurrent Usage of Database Handles</b>
**
@@ -7425,20 +6846,19 @@ typedef struct sqlite3_backup sqlite3_backup;
** same time as another thread is invoking sqlite3_backup_step() it is
** possible that they return invalid values.
*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
sqlite3 *pDest, /* Destination database handle */
const char *zDestName, /* Destination database name */
sqlite3 *pSource, /* Source database handle */
const char *zSourceName /* Source database name */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage);
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p);
/*
** CAPI3REF: Unlock Notification
-** METHOD: sqlite3
**
** ^When running in shared-cache mode, a database operation may fail with
** an [SQLITE_LOCKED] error if the required locks on the shared-cache or
@@ -7551,7 +6971,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
** the special "DROP TABLE/INDEX" case, the extended error code is just
** SQLITE_LOCKED.)^
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
+SQLITE_API int sqlite3_unlock_notify(
sqlite3 *pBlocked, /* Waiting connection */
void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
void *pNotifyArg /* Argument to pass to xNotify */
@@ -7566,8 +6986,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
** strings in a case-independent fashion, using the same definition of "case
** independence" that SQLite uses internally when comparing identifiers.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
+SQLITE_API int sqlite3_stricmp(const char *, const char *);
+SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
/*
** CAPI3REF: String Globbing
@@ -7582,7 +7002,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
** Note that this routine returns zero on a match and non-zero if the strings
** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
/*
** CAPI3REF: Error Logging Interface
@@ -7605,17 +7025,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zSt
** a few hundred characters, it will be truncated to the length of the
** buffer.
*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...);
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...);
/*
** CAPI3REF: Write-Ahead Log Commit Hook
-** METHOD: sqlite3
**
** ^The [sqlite3_wal_hook()] function is used to register a callback that
-** is invoked each time data is committed to a database in wal mode.
+** will be invoked each time a database connection commits data to a
+** [write-ahead log] (i.e. whenever a transaction is committed in
+** [journal_mode | journal_mode=WAL mode]).
**
-** ^(The callback is invoked by SQLite after the commit has taken place and
-** the associated write-lock on the database released)^, so the implementation
+** ^The callback is invoked by SQLite after the commit has taken place and
+** the associated write-lock on the database released, so the implementation
** may read, write or [checkpoint] the database as required.
**
** ^The first parameter passed to the callback function when it is invoked
@@ -7641,7 +7062,7 @@ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...)
** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
** those overwrite any prior [sqlite3_wal_hook()] settings.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
+SQLITE_API void *sqlite3_wal_hook(
sqlite3*,
int(*)(void *,sqlite3*,const char*,int),
void*
@@ -7649,7 +7070,6 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
/*
** CAPI3REF: Configure an auto-checkpoint
-** METHOD: sqlite3
**
** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around
** [sqlite3_wal_hook()] that causes any database on [database connection] D
@@ -7667,132 +7087,103 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
** ^The [wal_autocheckpoint pragma] can be used to invoke this interface
** from SQL.
**
-** ^Checkpoints initiated by this mechanism are
-** [sqlite3_wal_checkpoint_v2|PASSIVE].
-**
** ^Every new [database connection] defaults to having the auto-checkpoint
** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT]
** pages. The use of this interface
** is only necessary if the default setting is found to be suboptimal
** for a particular application.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
/*
** CAPI3REF: Checkpoint a database
-** METHOD: sqlite3
**
-** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to
-** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^
+** ^The [sqlite3_wal_checkpoint(D,X)] interface causes database named X
+** on [database connection] D to be [checkpointed]. ^If X is NULL or an
+** empty string, then a checkpoint is run on all databases of
+** connection D. ^If the database connection D is not in
+** [WAL | write-ahead log mode] then this interface is a harmless no-op.
**
-** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the
-** [write-ahead log] for database X on [database connection] D to be
-** transferred into the database file and for the write-ahead log to
-** be reset. See the [checkpointing] documentation for addition
-** information.
+** ^The [wal_checkpoint pragma] can be used to invoke this interface
+** from SQL. ^The [sqlite3_wal_autocheckpoint()] interface and the
+** [wal_autocheckpoint pragma] can be used to cause this interface to be
+** run whenever the WAL reaches a certain size threshold.
**
-** This interface used to be the only way to cause a checkpoint to
-** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()]
-** interface was added. This interface is retained for backwards
-** compatibility and as a convenience for applications that need to manually
-** start a callback but which do not need the full power (and corresponding
-** complication) of [sqlite3_wal_checkpoint_v2()].
+** See also: [sqlite3_wal_checkpoint_v2()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
/*
** CAPI3REF: Checkpoint a database
-** METHOD: sqlite3
**
-** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint
-** operation on database X of [database connection] D in mode M. Status
-** information is written back into integers pointed to by L and C.)^
-** ^(The M parameter must be a valid [checkpoint mode]:)^
+** Run a checkpoint operation on WAL database zDb attached to database
+** handle db. The specific operation is determined by the value of the
+** eMode parameter:
**
** <dl>
** <dt>SQLITE_CHECKPOINT_PASSIVE<dd>
-** ^Checkpoint as many frames as possible without waiting for any database
-** readers or writers to finish, then sync the database file if all frames
-** in the log were checkpointed. ^The [busy-handler callback]
-** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode.
-** ^On the other hand, passive mode might leave the checkpoint unfinished
-** if there are concurrent readers or writers.
+** Checkpoint as many frames as possible without waiting for any database
+** readers or writers to finish. Sync the db file if all frames in the log
+** are checkpointed. This mode is the same as calling
+** sqlite3_wal_checkpoint(). The busy-handler callback is never invoked.
**
** <dt>SQLITE_CHECKPOINT_FULL<dd>
-** ^This mode blocks (it invokes the
-** [sqlite3_busy_handler|busy-handler callback]) until there is no
+** This mode blocks (calls the busy-handler callback) until there is no
** database writer and all readers are reading from the most recent database
-** snapshot. ^It then checkpoints all frames in the log file and syncs the
-** database file. ^This mode blocks new database writers while it is pending,
-** but new database readers are allowed to continue unimpeded.
+** snapshot. It then checkpoints all frames in the log file and syncs the
+** database file. This call blocks database writers while it is running,
+** but not database readers.
**
** <dt>SQLITE_CHECKPOINT_RESTART<dd>
-** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition
-** that after checkpointing the log file it blocks (calls the
-** [busy-handler callback])
-** until all readers are reading from the database file only. ^This ensures
-** that the next writer will restart the log file from the beginning.
-** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new
-** database writer attempts while it is pending, but does not impede readers.
-**
-** <dt>SQLITE_CHECKPOINT_TRUNCATE<dd>
-** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the
-** addition that it also truncates the log file to zero bytes just prior
-** to a successful return.
+** This mode works the same way as SQLITE_CHECKPOINT_FULL, except after
+** checkpointing the log file it blocks (calls the busy-handler callback)
+** until all readers are reading from the database file only. This ensures
+** that the next client to write to the database file restarts the log file
+** from the beginning. This call blocks database writers while it is running,
+** but not database readers.
** </dl>
**
-** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in
-** the log file or to -1 if the checkpoint could not run because
-** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not
-** NULL,then *pnCkpt is set to the total number of checkpointed frames in the
-** log file (including any that were already checkpointed before the function
-** was called) or to -1 if the checkpoint could not run due to an error or
-** because the database is not in WAL mode. ^Note that upon successful
-** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been
-** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero.
-**
-** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If
+** If pnLog is not NULL, then *pnLog is set to the total number of frames in
+** the log file before returning. If pnCkpt is not NULL, then *pnCkpt is set to
+** the total number of checkpointed frames (including any that were already
+** checkpointed when this function is called). *pnLog and *pnCkpt may be
+** populated even if sqlite3_wal_checkpoint_v2() returns other than SQLITE_OK.
+** If no values are available because of an error, they are both set to -1
+** before returning to communicate this to the caller.
+**
+** All calls obtain an exclusive "checkpoint" lock on the database file. If
** any other process is running a checkpoint operation at the same time, the
-** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a
+** lock cannot be obtained and SQLITE_BUSY is returned. Even if there is a
** busy-handler configured, it will not be invoked in this case.
**
-** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the
-** exclusive "writer" lock on the database file. ^If the writer lock cannot be
-** obtained immediately, and a busy-handler is configured, it is invoked and
-** the writer lock retried until either the busy-handler returns 0 or the lock
-** is successfully obtained. ^The busy-handler is also invoked while waiting for
-** database readers as described above. ^If the busy-handler returns 0 before
+** The SQLITE_CHECKPOINT_FULL and RESTART modes also obtain the exclusive
+** "writer" lock on the database file. If the writer lock cannot be obtained
+** immediately, and a busy-handler is configured, it is invoked and the writer
+** lock retried until either the busy-handler returns 0 or the lock is
+** successfully obtained. The busy-handler is also invoked while waiting for
+** database readers as described above. If the busy-handler returns 0 before
** the writer lock is obtained or while waiting for database readers, the
** checkpoint operation proceeds from that point in the same way as
** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible
-** without blocking any further. ^SQLITE_BUSY is returned in this case.
+** without blocking any further. SQLITE_BUSY is returned in this case.
**
-** ^If parameter zDb is NULL or points to a zero length string, then the
-** specified operation is attempted on all WAL databases [attached] to
-** [database connection] db. In this case the
-** values written to output parameters *pnLog and *pnCkpt are undefined. ^If
+** If parameter zDb is NULL or points to a zero length string, then the
+** specified operation is attempted on all WAL databases. In this case the
+** values written to output parameters *pnLog and *pnCkpt are undefined. If
** an SQLITE_BUSY error is encountered when processing one or more of the
** attached WAL databases, the operation is still attempted on any remaining
-** attached databases and SQLITE_BUSY is returned at the end. ^If any other
+** attached databases and SQLITE_BUSY is returned to the caller. If any other
** error occurs while processing an attached database, processing is abandoned
-** and the error code is returned to the caller immediately. ^If no error
+** and the error code returned to the caller immediately. If no error
** (SQLITE_BUSY or otherwise) is encountered while processing the attached
** databases, SQLITE_OK is returned.
**
-** ^If database zDb is the name of an attached database that is not in WAL
-** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If
+** If database zDb is the name of an attached database that is not in WAL
+** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. If
** zDb is not NULL (or a zero length string) and is not the name of any
** attached database, SQLITE_ERROR is returned to the caller.
-**
-** ^Unless it returns SQLITE_MISUSE,
-** the sqlite3_wal_checkpoint_v2() interface
-** sets the error information that is queried by
-** [sqlite3_errcode()] and [sqlite3_errmsg()].
-**
-** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
-** from SQL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
+SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of attached database (or NULL) */
int eMode, /* SQLITE_CHECKPOINT_* value */
@@ -7801,18 +7192,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
);
/*
-** CAPI3REF: Checkpoint Mode Values
-** KEYWORDS: {checkpoint mode}
+** CAPI3REF: Checkpoint operation parameters
**
-** These constants define all valid values for the "checkpoint mode" passed
-** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface.
-** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the
-** meaning of each of these checkpoint modes.
+** These constants can be used as the 3rd parameter to
+** [sqlite3_wal_checkpoint_v2()]. See the [sqlite3_wal_checkpoint_v2()]
+** documentation for additional information about the meaning and use of
+** each of these values.
*/
-#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */
-#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */
-#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */
-#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */
+#define SQLITE_CHECKPOINT_PASSIVE 0
+#define SQLITE_CHECKPOINT_FULL 1
+#define SQLITE_CHECKPOINT_RESTART 2
/*
** CAPI3REF: Virtual Table Interface Configuration
@@ -7828,7 +7217,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options
** may be added in the future.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Virtual Table Configuration Options
@@ -7881,11 +7270,10 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
** of the SQL statement that triggered the call to the [xUpdate] method of the
** [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *);
/*
** CAPI3REF: Conflict resolution modes
-** KEYWORDS: {conflict resolution mode}
**
** These constants are returned by [sqlite3_vtab_on_conflict()] to
** inform a [virtual table] implementation what the [ON CONFLICT] mode
@@ -7901,108 +7289,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
/* #define SQLITE_ABORT 4 // Also an error code */
#define SQLITE_REPLACE 5
-/*
-** CAPI3REF: Prepared Statement Scan Status Opcodes
-** KEYWORDS: {scanstatus options}
-**
-** The following constants can be used for the T parameter to the
-** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a
-** different metric for sqlite3_stmt_scanstatus() to return.
-**
-** When the value returned to V is a string, space to hold that string is
-** managed by the prepared statement S and will be automatically freed when
-** S is finalized.
-**
-** <dl>
-** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt>
-** <dd>^The [sqlite3_int64] variable pointed to by the T parameter will be
-** set to the total number of times that the X-th loop has run.</dd>
-**
-** [[SQLITE_SCANSTAT_NVISIT]] <dt>SQLITE_SCANSTAT_NVISIT</dt>
-** <dd>^The [sqlite3_int64] variable pointed to by the T parameter will be set
-** to the total number of rows examined by all iterations of the X-th loop.</dd>
-**
-** [[SQLITE_SCANSTAT_EST]] <dt>SQLITE_SCANSTAT_EST</dt>
-** <dd>^The "double" variable pointed to by the T parameter will be set to the
-** query planner's estimate for the average number of rows output from each
-** iteration of the X-th loop. If the query planner's estimates was accurate,
-** then this value will approximate the quotient NVISIT/NLOOP and the
-** product of this value for all prior loops with the same SELECTID will
-** be the NLOOP value for the current loop.
-**
-** [[SQLITE_SCANSTAT_NAME]] <dt>SQLITE_SCANSTAT_NAME</dt>
-** <dd>^The "const char *" variable pointed to by the T parameter will be set
-** to a zero-terminated UTF-8 string containing the name of the index or table
-** used for the X-th loop.
-**
-** [[SQLITE_SCANSTAT_EXPLAIN]] <dt>SQLITE_SCANSTAT_EXPLAIN</dt>
-** <dd>^The "const char *" variable pointed to by the T parameter will be set
-** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
-** description for the X-th loop.
-**
-** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt>
-** <dd>^The "int" variable pointed to by the T parameter will be set to the
-** "select-id" for the X-th loop. The select-id identifies which query or
-** subquery the loop is part of. The main query has a select-id of zero.
-** The select-id is the same value as is output in the first column
-** of an [EXPLAIN QUERY PLAN] query.
-** </dl>
-*/
-#define SQLITE_SCANSTAT_NLOOP 0
-#define SQLITE_SCANSTAT_NVISIT 1
-#define SQLITE_SCANSTAT_EST 2
-#define SQLITE_SCANSTAT_NAME 3
-#define SQLITE_SCANSTAT_EXPLAIN 4
-#define SQLITE_SCANSTAT_SELECTID 5
-
-/*
-** CAPI3REF: Prepared Statement Scan Status
-** METHOD: sqlite3_stmt
-**
-** This interface returns information about the predicted and measured
-** performance for pStmt. Advanced applications can use this
-** interface to compare the predicted and the measured performance and
-** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
-**
-** Since this interface is expected to be rarely used, it is only
-** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS]
-** compile-time option.
-**
-** The "iScanStatusOp" parameter determines which status information to return.
-** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
-** of this interface is undefined.
-** ^The requested measurement is written into a variable pointed to by
-** the "pOut" parameter.
-** Parameter "idx" identifies the specific loop to retrieve statistics for.
-** Loops are numbered starting from zero. ^If idx is out of range - less than
-** zero or greater than or equal to the total number of loops used to implement
-** the statement - a non-zero value is returned and the variable that pOut
-** points to is unchanged.
-**
-** ^Statistics might not be available for all loops in all statements. ^In cases
-** where there exist loops with no available statistics, this function behaves
-** as if the loop did not exist - it returns non-zero and leave the variable
-** that pOut points to unchanged.
-**
-** See also: [sqlite3_stmt_scanstatus_reset()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
- sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
- int idx, /* Index of loop to report on */
- int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
- void *pOut /* Result written here */
-);
-
-/*
-** CAPI3REF: Zero Scan-Status Counters
-** METHOD: sqlite3_stmt
-**
-** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
-**
-** This API is only available if the library is built with pre-processor
-** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
/*
@@ -8040,16 +7326,6 @@ extern "C" {
#endif
typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry;
-typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
-
-/* The double-precision datatype used by RTree depends on the
-** SQLITE_RTREE_INT_ONLY compile-time option.
-*/
-#ifdef SQLITE_RTREE_INT_ONLY
- typedef sqlite3_int64 sqlite3_rtree_dbl;
-#else
- typedef double sqlite3_rtree_dbl;
-#endif
/*
** Register a geometry callback named zGeom that can be used as part of an
@@ -8057,10 +7333,14 @@ typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
**
** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zGeom(... params ...)
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
+SQLITE_API int sqlite3_rtree_geometry_callback(
sqlite3 *db,
const char *zGeom,
- int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
+#ifdef SQLITE_RTREE_INT_ONLY
+ int (*xGeom)(sqlite3_rtree_geometry*, int n, sqlite3_int64 *a, int *pRes),
+#else
+ int (*xGeom)(sqlite3_rtree_geometry*, int n, double *a, int *pRes),
+#endif
void *pContext
);
@@ -8072,62 +7352,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
struct sqlite3_rtree_geometry {
void *pContext; /* Copy of pContext passed to s_r_g_c() */
int nParam; /* Size of array aParam[] */
- sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */
+ double *aParam; /* Parameters passed to SQL geom function */
void *pUser; /* Callback implementation user data */
void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */
};
-/*
-** Register a 2nd-generation geometry callback named zScore that can be
-** used as part of an R-Tree geometry query as follows:
-**
-** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zQueryFunc(... params ...)
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
- sqlite3 *db,
- const char *zQueryFunc,
- int (*xQueryFunc)(sqlite3_rtree_query_info*),
- void *pContext,
- void (*xDestructor)(void*)
-);
-
-
-/*
-** A pointer to a structure of the following type is passed as the
-** argument to scored geometry callback registered using
-** sqlite3_rtree_query_callback().
-**
-** Note that the first 5 fields of this structure are identical to
-** sqlite3_rtree_geometry. This structure is a subclass of
-** sqlite3_rtree_geometry.
-*/
-struct sqlite3_rtree_query_info {
- void *pContext; /* pContext from when function registered */
- int nParam; /* Number of function parameters */
- sqlite3_rtree_dbl *aParam; /* value of function parameters */
- void *pUser; /* callback can use this, if desired */
- void (*xDelUser)(void*); /* function to free pUser */
- sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */
- unsigned int *anQueue; /* Number of pending entries in the queue */
- int nCoord; /* Number of coordinates */
- int iLevel; /* Level of current node or entry */
- int mxLevel; /* The largest iLevel value in the tree */
- sqlite3_int64 iRowid; /* Rowid for current entry */
- sqlite3_rtree_dbl rParentScore; /* Score of parent node */
- int eParentWithin; /* Visibility of parent node */
- int eWithin; /* OUT: Visiblity */
- sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
- /* The following fields are only available in 3.8.11 and later */
- sqlite3_value **apSqlParam; /* Original SQL values of parameters */
-};
-
-/*
-** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin.
-*/
-#define NOT_WITHIN 0 /* Object completely outside of query region */
-#define PARTLY_WITHIN 1 /* Object partially overlaps query region */
-#define FULLY_WITHIN 2 /* Object fully contained within query region */
-
#if 0
} /* end of the 'extern "C"' block */
@@ -8135,8 +7364,11 @@ struct sqlite3_rtree_query_info {
#endif /* ifndef _SQLITE3RTREE_H_ */
+
+/************** End of sqlite3.h *********************************************/
+/************** Begin file sqliteInt.h ***************************************/
/*
-** 2014 May 31
+** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
@@ -8145,520 +7377,40 @@ struct sqlite3_rtree_query_info {
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
-******************************************************************************
-**
-** Interfaces to extend FTS5. Using the interfaces defined in this file,
-** FTS5 may be extended with:
-**
-** * custom tokenizers, and
-** * custom auxiliary functions.
-*/
-
-
-#ifndef _FTS5_H
-#define _FTS5_H
-
-
-#if 0
-extern "C" {
-#endif
-
-/*************************************************************************
-** CUSTOM AUXILIARY FUNCTIONS
+*************************************************************************
+** Internal interface definitions for SQLite.
**
-** Virtual table implementations may overload SQL functions by implementing
-** the sqlite3_module.xFindFunction() method.
*/
-
-typedef struct Fts5ExtensionApi Fts5ExtensionApi;
-typedef struct Fts5Context Fts5Context;
-typedef struct Fts5PhraseIter Fts5PhraseIter;
-
-typedef void (*fts5_extension_function)(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-);
-
-struct Fts5PhraseIter {
- const unsigned char *a;
- const unsigned char *b;
-};
+#ifndef _SQLITEINT_H_
+#define _SQLITEINT_H_
/*
-** EXTENSION API FUNCTIONS
-**
-** xUserData(pFts):
-** Return a copy of the context pointer the extension function was
-** registered with.
-**
-** xColumnTotalSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the FTS5 table. Or, if iCol is
-** non-negative but less than the number of columns in the table, return
-** the total number of tokens in column iCol, considering all rows in
-** the FTS5 table.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnCount(pFts):
-** Return the number of columns in the table.
-**
-** xColumnSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the current row. Or, if iCol is
-** non-negative but less than the number of columns in the table, set
-** *pnToken to the number of tokens in column iCol of the current row.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
-** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
-** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
-** if an error occurs, an SQLite error code is returned and the final values
-** of (*pz) and (*pn) are undefined.
-**
-** xPhraseCount:
-** Returns the number of phrases in the current query expression.
-**
-** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
-**
-** xInstCount:
-** Set *pnInst to the total number of occurrences of all phrases within
-** the query within the current row. Return SQLITE_OK if successful, or
-** an error code (i.e. SQLITE_NOMEM) if an error occurs.
-**
-** xInst:
-** Query for the details of phrase match iIdx within the current row.
-** Phrase matches are numbered starting from zero, so the iIdx argument
-** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
-**
-** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM)
-** if an error occurs.
-**
-** xRowid:
-** Returns the rowid of the current row.
-**
-** xTokenize:
-** Tokenize text using the tokenizer belonging to the FTS5 table.
-**
-** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
-** This API function is used to query the FTS table for phrase iPhrase
-** of the current query. Specifically, a query equivalent to:
-**
-** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
-**
-** with $p set to a phrase equivalent to the phrase iPhrase of the
-** current query is executed. For each row visited, the callback function
-** passed as the fourth argument is invoked. The context and API objects
-** passed to the callback function may be used to access the properties of
-** each matched row. Invoking Api.xUserData() returns a copy of the pointer
-** passed as the third argument to pUserData.
-**
-** If the callback function returns any value other than SQLITE_OK, the
-** query is abandoned and the xQueryPhrase function returns immediately.
-** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
-** Otherwise, the error code is propagated upwards.
-**
-** If the query runs to completion without incident, SQLITE_OK is returned.
-** Or, if some error occurs before the query completes or is aborted by
-** the callback, an SQLite error code is returned.
-**
-**
-** xSetAuxdata(pFts5, pAux, xDelete)
-**
-** Save the pointer passed as the second argument as the extension functions
-** "auxiliary data". The pointer may then be retrieved by the current or any
-** future invocation of the same fts5 extension function made as part of
-** of the same MATCH query using the xGetAuxdata() API.
-**
-** Each extension function is allocated a single auxiliary data slot for
-** each FTS query (MATCH expression). If the extension function is invoked
-** more than once for a single FTS query, then all invocations share a
-** single auxiliary data context.
-**
-** If there is already an auxiliary data pointer when this function is
-** invoked, then it is replaced by the new pointer. If an xDelete callback
-** was specified along with the original pointer, it is invoked at this
-** point.
-**
-** The xDelete callback, if one is specified, is also invoked on the
-** auxiliary data pointer after the FTS5 query has finished.
-**
-** If an error (e.g. an OOM condition) occurs within this function, an
-** the auxiliary data is set to NULL and an error code returned. If the
-** xDelete parameter was not NULL, it is invoked on the auxiliary data
-** pointer before returning.
-**
-**
-** xGetAuxdata(pFts5, bClear)
-**
-** Returns the current auxiliary data pointer for the fts5 extension
-** function. See the xSetAuxdata() method for details.
-**
-** If the bClear argument is non-zero, then the auxiliary data is cleared
-** (set to NULL) before this function returns. In this case the xDelete,
-** if any, is not invoked.
-**
-**
-** xRowCount(pFts5, pnRow)
-**
-** This function is used to retrieve the total number of rows in the table.
-** In other words, the same value that would be returned by:
-**
-** SELECT count(*) FROM ftstable;
-**
-** xPhraseFirst()
-** This function is used, along with type Fts5PhraseIter and the xPhraseNext
-** method, to iterate through all instances of a single query phrase within
-** the current row. This is the same information as is accessible via the
-** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
-** to use, this API may be faster under some circumstances. To iterate
-** through instances of phrase iPhrase, use the following code:
+** These #defines should enable >2GB file support on POSIX if the
+** underlying operating system supports it. If the OS lacks
+** large file support, or if the OS is windows, these should be no-ops.
**
-** Fts5PhraseIter iter;
-** int iCol, iOff;
-** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
-** iOff>=0;
-** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
-** ){
-** // An instance of phrase iPhrase at offset iOff of column iCol
-** }
+** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any
+** system #includes. Hence, this block of code must be the very first
+** code in all source files.
**
-** The Fts5PhraseIter structure is defined above. Applications should not
-** modify this structure directly - it should only be used as shown above
-** with the xPhraseFirst() and xPhraseNext() API methods.
+** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch
+** on the compiler command line. This is necessary if you are compiling
+** on a recent machine (ex: Red Hat 7.2) but you want your code to work
+** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2
+** without this option, LFS is enable. But LFS does not exist in the kernel
+** in Red Hat 6.0, so the code won't work. Hence, for maximum binary
+** portability you should omit LFS.
**
-** xPhraseNext()
-** See xPhraseFirst above.
+** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later.
*/
-struct Fts5ExtensionApi {
- int iVersion; /* Currently always set to 1 */
-
- void *(*xUserData)(Fts5Context*);
-
- int (*xColumnCount)(Fts5Context*);
- int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
- int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
-
- int (*xTokenize)(Fts5Context*,
- const char *pText, int nText, /* Text to tokenize */
- void *pCtx, /* Context passed to xToken() */
- int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
- );
-
- int (*xPhraseCount)(Fts5Context*);
- int (*xPhraseSize)(Fts5Context*, int iPhrase);
-
- int (*xInstCount)(Fts5Context*, int *pnInst);
- int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
-
- sqlite3_int64 (*xRowid)(Fts5Context*);
- int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
- int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
-
- int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
- int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
- );
- int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
- void *(*xGetAuxdata)(Fts5Context*, int bClear);
-
- void (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
- void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
-};
-
-/*
-** CUSTOM AUXILIARY FUNCTIONS
-*************************************************************************/
-
-/*************************************************************************
-** CUSTOM TOKENIZERS
-**
-** Applications may also register custom tokenizer types. A tokenizer
-** is registered by providing fts5 with a populated instance of the
-** following structure. All structure methods must be defined, setting
-** any member of the fts5_tokenizer struct to NULL leads to undefined
-** behaviour. The structure methods are expected to function as follows:
-**
-** xCreate:
-** This function is used to allocate and inititalize a tokenizer instance.
-** A tokenizer instance is required to actually tokenize text.
-**
-** The first argument passed to this function is a copy of the (void*)
-** pointer provided by the application when the fts5_tokenizer object
-** was registered with FTS5 (the third argument to xCreateTokenizer()).
-** The second and third arguments are an array of nul-terminated strings
-** containing the tokenizer arguments, if any, specified following the
-** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
-** to create the FTS5 table.
-**
-** The final argument is an output variable. If successful, (*ppOut)
-** should be set to point to the new tokenizer handle and SQLITE_OK
-** returned. If an error occurs, some value other than SQLITE_OK should
-** be returned. In this case, fts5 assumes that the final value of *ppOut
-** is undefined.
-**
-** xDelete:
-** This function is invoked to delete a tokenizer handle previously
-** allocated using xCreate(). Fts5 guarantees that this function will
-** be invoked exactly once for each successful call to xCreate().
-**
-** xTokenize:
-** This function is expected to tokenize the nText byte string indicated
-** by argument pText. pText may or may not be nul-terminated. The first
-** argument passed to this function is a pointer to an Fts5Tokenizer object
-** returned by an earlier call to xCreate().
-**
-** The second argument indicates the reason that FTS5 is requesting
-** tokenization of the supplied text. This is always one of the following
-** four values:
-**
-** <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
-** or removed from the FTS table. The tokenizer is being invoked to
-** determine the set of tokens to add to (or delete from) the
-** FTS index.
-**
-** <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed
-** against the FTS index. The tokenizer is being called to tokenize
-** a bareword or quoted string specified as part of the query.
-**
-** <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
-** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
-** followed by a "*" character, indicating that the last token
-** returned by the tokenizer will be treated as a token prefix.
-**
-** <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to
-** satisfy an fts5_api.xTokenize() request made by an auxiliary
-** function. Or an fts5_api.xColumnSize() request made by the same
-** on a columnsize=0 database.
-** </ul>
-**
-** For each token in the input string, the supplied callback xToken() must
-** be invoked. The first argument to it should be a copy of the pointer
-** passed as the second argument to xTokenize(). The third and fourth
-** arguments are a pointer to a buffer containing the token text, and the
-** size of the token in bytes. The 4th and 5th arguments are the byte offsets
-** of the first byte of and first byte immediately following the text from
-** which the token is derived within the input.
-**
-** The second argument passed to the xToken() callback ("tflags") should
-** normally be set to 0. The exception is if the tokenizer supports
-** synonyms. In this case see the discussion below for details.
-**
-** FTS5 assumes the xToken() callback is invoked for each token in the
-** order that they occur within the input text.
-**
-** If an xToken() callback returns any value other than SQLITE_OK, then
-** the tokenization should be abandoned and the xTokenize() method should
-** immediately return a copy of the xToken() return value. Or, if the
-** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
-** if an error occurs with the xTokenize() implementation itself, it
-** may abandon the tokenization and return any error code other than
-** SQLITE_OK or SQLITE_DONE.
-**
-** SYNONYM SUPPORT
-**
-** Custom tokenizers may also support synonyms. Consider a case in which a
-** user wishes to query for a phrase such as "first place". Using the
-** built-in tokenizers, the FTS5 query 'first + place' will match instances
-** of "first place" within the document set, but not alternative forms
-** such as "1st place". In some applications, it would be better to match
-** all instances of "first place" or "1st place" regardless of which form
-** the user specified in the MATCH query text.
-**
-** There are several ways to approach this in FTS5:
-**
-** <ol><li> By mapping all synonyms to a single token. In this case, the
-** In the above example, this means that the tokenizer returns the
-** same token for inputs "first" and "1st". Say that token is in
-** fact "first", so that when the user inserts the document "I won
-** 1st place" entries are added to the index for tokens "i", "won",
-** "first" and "place". If the user then queries for '1st + place',
-** the tokenizer substitutes "first" for "1st" and the query works
-** as expected.
-**
-** <li> By adding multiple synonyms for a single term to the FTS index.
-** In this case, when tokenizing query text, the tokenizer may
-** provide multiple synonyms for a single term within the document.
-** FTS5 then queries the index for each synonym individually. For
-** example, faced with the query:
-**
-** <codeblock>
-** ... MATCH 'first place'</codeblock>
-**
-** the tokenizer offers both "1st" and "first" as synonyms for the
-** first token in the MATCH query and FTS5 effectively runs a query
-** similar to:
-**
-** <codeblock>
-** ... MATCH '(first OR 1st) place'</codeblock>
-**
-** except that, for the purposes of auxiliary functions, the query
-** still appears to contain just two phrases - "(first OR 1st)"
-** being treated as a single phrase.
-**
-** <li> By adding multiple synonyms for a single term to the FTS index.
-** Using this method, when tokenizing document text, the tokenizer
-** provides multiple synonyms for each token. So that when a
-** document such as "I won first place" is tokenized, entries are
-** added to the FTS index for "i", "won", "first", "1st" and
-** "place".
-**
-** This way, even if the tokenizer does not provide synonyms
-** when tokenizing query text (it should not - to do would be
-** inefficient), it doesn't matter if the user queries for
-** 'first + place' or '1st + place', as there are entires in the
-** FTS index corresponding to both forms of the first token.
-** </ol>
-**
-** Whether it is parsing document or query text, any call to xToken that
-** specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
-** is considered to supply a synonym for the previous token. For example,
-** when parsing the document "I won first place", a tokenizer that supports
-** synonyms would call xToken() 5 times, as follows:
-**
-** <codeblock>
-** xToken(pCtx, 0, "i", 1, 0, 1);
-** xToken(pCtx, 0, "won", 3, 2, 5);
-** xToken(pCtx, 0, "first", 5, 6, 11);
-** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
-** xToken(pCtx, 0, "place", 5, 12, 17);
-**</codeblock>
-**
-** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
-** xToken() is called. Multiple synonyms may be specified for a single token
-** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
-** There is no limit to the number of synonyms that may be provided for a
-** single token.
-**
-** In many cases, method (1) above is the best approach. It does not add
-** extra data to the FTS index or require FTS5 to query for multiple terms,
-** so it is efficient in terms of disk space and query speed. However, it
-** does not support prefix queries very well. If, as suggested above, the
-** token "first" is subsituted for "1st" by the tokenizer, then the query:
-**
-** <codeblock>
-** ... MATCH '1s*'</codeblock>
-**
-** will not match documents that contain the token "1st" (as the tokenizer
-** will probably not map "1s" to any prefix of "first").
-**
-** For full prefix support, method (3) may be preferred. In this case,
-** because the index contains entries for both "first" and "1st", prefix
-** queries such as 'fi*' or '1s*' will match correctly. However, because
-** extra entries are added to the FTS index, this method uses more space
-** within the database.
-**
-** Method (2) offers a midpoint between (1) and (3). Using this method,
-** a query such as '1s*' will match documents that contain the literal
-** token "1st", but not "first" (assuming the tokenizer is not able to
-** provide synonyms for prefixes). However, a non-prefix query like '1st'
-** will match against "1st" and "first". This method does not require
-** extra disk space, as no extra entries are added to the FTS index.
-** On the other hand, it may require more CPU cycles to run MATCH queries,
-** as separate queries of the FTS index are required for each synonym.
-**
-** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
-** inefficient.
-*/
-typedef struct Fts5Tokenizer Fts5Tokenizer;
-typedef struct fts5_tokenizer fts5_tokenizer;
-struct fts5_tokenizer {
- int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
- void (*xDelete)(Fts5Tokenizer*);
- int (*xTokenize)(Fts5Tokenizer*,
- void *pCtx,
- int flags, /* Mask of FTS5_TOKENIZE_* flags */
- const char *pText, int nText,
- int (*xToken)(
- void *pCtx, /* Copy of 2nd argument to xTokenize() */
- int tflags, /* Mask of FTS5_TOKEN_* flags */
- const char *pToken, /* Pointer to buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Byte offset of token within input text */
- int iEnd /* Byte offset of end of token within input text */
- )
- );
-};
-
-/* Flags that may be passed as the third argument to xTokenize() */
-#define FTS5_TOKENIZE_QUERY 0x0001
-#define FTS5_TOKENIZE_PREFIX 0x0002
-#define FTS5_TOKENIZE_DOCUMENT 0x0004
-#define FTS5_TOKENIZE_AUX 0x0008
-
-/* Flags that may be passed by the tokenizer implementation back to FTS5
-** as the third argument to the supplied xToken callback. */
-#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
-
-/*
-** END OF CUSTOM TOKENIZERS
-*************************************************************************/
-
-/*************************************************************************
-** FTS5 EXTENSION REGISTRATION API
-*/
-typedef struct fts5_api fts5_api;
-struct fts5_api {
- int iVersion; /* Currently always set to 2 */
-
- /* Create a new tokenizer */
- int (*xCreateTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_tokenizer *pTokenizer,
- void (*xDestroy)(void*)
- );
-
- /* Find an existing tokenizer */
- int (*xFindTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void **ppContext,
- fts5_tokenizer *pTokenizer
- );
-
- /* Create a new auxiliary function */
- int (*xCreateFunction)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_extension_function xFunction,
- void (*xDestroy)(void*)
- );
-};
-
-/*
-** END OF REGISTRATION API
-*************************************************************************/
-
-#if 0
-} /* end of the 'extern "C"' block */
+#ifndef SQLITE_DISABLE_LFS
+# define _LARGE_FILE 1
+# ifndef _FILE_OFFSET_BITS
+# define _FILE_OFFSET_BITS 64
+# endif
+# define _LARGEFILE_SOURCE 1
#endif
-#endif /* _FTS5_H */
-
-
-
-/************** End of sqlite3.h *********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
/*
** Include the configuration header output by 'configure' if we're using the
** autoconf-based build
@@ -8769,17 +7521,15 @@ struct fts5_api {
#endif
/*
-** The suggested maximum number of in-memory pages to use for
-** the main database table and for temporary tables.
-**
-** IMPLEMENTATION-OF: R-31093-59126 The default suggested cache size
-** is 2000 pages.
-** IMPLEMENTATION-OF: R-48205-43578 The default suggested cache size can be
-** altered using the SQLITE_DEFAULT_CACHE_SIZE compile-time options.
+** The maximum number of in-memory pages to use for the main database
+** table and for temporary tables. The SQLITE_DEFAULT_CACHE_SIZE
*/
#ifndef SQLITE_DEFAULT_CACHE_SIZE
# define SQLITE_DEFAULT_CACHE_SIZE 2000
#endif
+#ifndef SQLITE_DEFAULT_TEMP_CACHE_SIZE
+# define SQLITE_DEFAULT_TEMP_CACHE_SIZE 500
+#endif
/*
** The default number of frames to accumulate in the log file before
@@ -8892,6 +7642,15 @@ struct fts5_api {
#pragma warn -spa /* Suspicious pointer arithmetic */
#endif
+/* Needed for various definitions... */
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+
+#if defined(__OpenBSD__) && !defined(_BSD_SOURCE)
+# define _BSD_SOURCE
+#endif
+
/*
** Include standard header files as necessary
*/
@@ -8933,36 +7692,6 @@ struct fts5_api {
#endif
/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define SQLITE_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define SQLITE_NOINLINE __declspec(noinline)
-#else
-# define SQLITE_NOINLINE
-#endif
-
-/*
-** Make sure that the compiler intrinsics we desire are enabled when
-** compiling with an appropriate version of MSVC unless prevented by
-** the SQLITE_DISABLE_INTRINSIC define.
-*/
-#if !defined(SQLITE_DISABLE_INTRINSIC)
-# if defined(_MSC_VER) && _MSC_VER>=1300
-# if !defined(_WIN32_WCE)
-# include <intrin.h>
-# pragma intrinsic(_byteswap_ushort)
-# pragma intrinsic(_byteswap_ulong)
-# pragma intrinsic(_ReadWriteBarrier)
-# else
-# include <cmnintrin.h>
-# endif
-# endif
-#endif
-
-/*
** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
** 0 means mutexes are permanently disable and the library is never
** threadsafe. 1 means the library is serialized which is the highest
@@ -8990,9 +7719,10 @@ struct fts5_api {
#endif
/*
-** EVIDENCE-OF: R-25715-37072 Memory allocation statistics are enabled by
-** default unless SQLite is compiled with SQLITE_DEFAULT_MEMSTATUS=0 in
-** which case memory allocation statistics are disabled by default.
+** The SQLITE_DEFAULT_MEMSTATUS macro must be defined as either 0 or 1.
+** It determines whether or not the features related to
+** SQLITE_CONFIG_MEMSTATUS are available by default or not. This value can
+** be overridden at runtime using the sqlite3_config() API.
*/
#if !defined(SQLITE_DEFAULT_MEMSTATUS)
# define SQLITE_DEFAULT_MEMSTATUS 1
@@ -9147,33 +7877,7 @@ SQLITE_PRIVATE void sqlite3Coverage(int);
#endif
/*
-** Declarations used for tracing the operating system interfaces.
-*/
-#if defined(SQLITE_FORCE_OS_TRACE) || defined(SQLITE_TEST) || \
- (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
- extern int sqlite3OSTrace;
-# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X
-# define SQLITE_HAVE_OS_TRACE
-#else
-# define OSTRACE(X)
-# undef SQLITE_HAVE_OS_TRACE
-#endif
-
-/*
-** Is the sqlite3ErrName() function needed in the build? Currently,
-** it is needed by "mutex_w32.c" (when debugging), "os_win.c" (when
-** OSTRACE is enabled), and by several "test*.c" files (which are
-** compiled using SQLITE_TEST).
-*/
-#if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \
- (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
-# define SQLITE_NEED_ERR_NAME
-#else
-# undef SQLITE_NEED_ERR_NAME
-#endif
-
-/*
-** Return true (non-zero) if the input is an integer that is too large
+** Return true (non-zero) if the input is a integer that is too large
** to fit in 32-bits. This macro is used inside of various testcase()
** macros to verify that we have tested SQLite for large-file support.
*/
@@ -9252,15 +7956,15 @@ struct Hash {
struct HashElem {
HashElem *next, *prev; /* Next and previous elements in the table */
void *data; /* Data associated with this element */
- const char *pKey; /* Key associated with this element */
+ const char *pKey; int nKey; /* Key associated with this element */
};
/*
** Access routines. To delete, insert a NULL pointer.
*/
SQLITE_PRIVATE void sqlite3HashInit(Hash*);
-SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, void *pData);
-SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey);
+SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, int nKey, void *pData);
+SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey, int nKey);
SQLITE_PRIVATE void sqlite3HashClear(Hash*);
/*
@@ -9292,165 +7996,163 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include parse.h in the middle of sqliteInt.h *****************/
/************** Begin file parse.h *******************************************/
-#define TK_SEMI 1
-#define TK_EXPLAIN 2
-#define TK_QUERY 3
-#define TK_PLAN 4
-#define TK_BEGIN 5
-#define TK_TRANSACTION 6
-#define TK_DEFERRED 7
-#define TK_IMMEDIATE 8
-#define TK_EXCLUSIVE 9
-#define TK_COMMIT 10
-#define TK_END 11
-#define TK_ROLLBACK 12
-#define TK_SAVEPOINT 13
-#define TK_RELEASE 14
-#define TK_TO 15
-#define TK_TABLE 16
-#define TK_CREATE 17
-#define TK_IF 18
-#define TK_NOT 19
-#define TK_EXISTS 20
-#define TK_TEMP 21
-#define TK_LP 22
-#define TK_RP 23
-#define TK_AS 24
-#define TK_WITHOUT 25
-#define TK_COMMA 26
-#define TK_ID 27
-#define TK_INDEXED 28
-#define TK_ABORT 29
-#define TK_ACTION 30
-#define TK_AFTER 31
-#define TK_ANALYZE 32
-#define TK_ASC 33
-#define TK_ATTACH 34
-#define TK_BEFORE 35
-#define TK_BY 36
-#define TK_CASCADE 37
-#define TK_CAST 38
-#define TK_COLUMNKW 39
-#define TK_CONFLICT 40
-#define TK_DATABASE 41
-#define TK_DESC 42
-#define TK_DETACH 43
-#define TK_EACH 44
-#define TK_FAIL 45
-#define TK_FOR 46
-#define TK_IGNORE 47
-#define TK_INITIALLY 48
-#define TK_INSTEAD 49
-#define TK_LIKE_KW 50
-#define TK_MATCH 51
-#define TK_NO 52
-#define TK_KEY 53
-#define TK_OF 54
-#define TK_OFFSET 55
-#define TK_PRAGMA 56
-#define TK_RAISE 57
-#define TK_RECURSIVE 58
-#define TK_REPLACE 59
-#define TK_RESTRICT 60
-#define TK_ROW 61
-#define TK_TRIGGER 62
-#define TK_VACUUM 63
-#define TK_VIEW 64
-#define TK_VIRTUAL 65
-#define TK_WITH 66
-#define TK_REINDEX 67
-#define TK_RENAME 68
-#define TK_CTIME_KW 69
-#define TK_ANY 70
-#define TK_OR 71
-#define TK_AND 72
-#define TK_IS 73
-#define TK_BETWEEN 74
-#define TK_IN 75
-#define TK_ISNULL 76
-#define TK_NOTNULL 77
-#define TK_NE 78
-#define TK_EQ 79
-#define TK_GT 80
-#define TK_LE 81
-#define TK_LT 82
-#define TK_GE 83
-#define TK_ESCAPE 84
-#define TK_BITAND 85
-#define TK_BITOR 86
-#define TK_LSHIFT 87
-#define TK_RSHIFT 88
-#define TK_PLUS 89
-#define TK_MINUS 90
-#define TK_STAR 91
-#define TK_SLASH 92
-#define TK_REM 93
-#define TK_CONCAT 94
-#define TK_COLLATE 95
-#define TK_BITNOT 96
-#define TK_STRING 97
-#define TK_JOIN_KW 98
-#define TK_CONSTRAINT 99
-#define TK_DEFAULT 100
-#define TK_NULL 101
-#define TK_PRIMARY 102
-#define TK_UNIQUE 103
-#define TK_CHECK 104
-#define TK_REFERENCES 105
-#define TK_AUTOINCR 106
-#define TK_ON 107
-#define TK_INSERT 108
-#define TK_DELETE 109
-#define TK_UPDATE 110
-#define TK_SET 111
-#define TK_DEFERRABLE 112
-#define TK_FOREIGN 113
-#define TK_DROP 114
-#define TK_UNION 115
-#define TK_ALL 116
-#define TK_EXCEPT 117
-#define TK_INTERSECT 118
-#define TK_SELECT 119
-#define TK_VALUES 120
-#define TK_DISTINCT 121
-#define TK_DOT 122
-#define TK_FROM 123
-#define TK_JOIN 124
-#define TK_USING 125
-#define TK_ORDER 126
-#define TK_GROUP 127
-#define TK_HAVING 128
-#define TK_LIMIT 129
-#define TK_WHERE 130
-#define TK_INTO 131
-#define TK_INTEGER 132
-#define TK_FLOAT 133
-#define TK_BLOB 134
-#define TK_VARIABLE 135
-#define TK_CASE 136
-#define TK_WHEN 137
-#define TK_THEN 138
-#define TK_ELSE 139
-#define TK_INDEX 140
-#define TK_ALTER 141
-#define TK_ADD 142
-#define TK_TO_TEXT 143
-#define TK_TO_BLOB 144
-#define TK_TO_NUMERIC 145
-#define TK_TO_INT 146
-#define TK_TO_REAL 147
-#define TK_ISNOT 148
-#define TK_END_OF_FILE 149
-#define TK_ILLEGAL 150
-#define TK_SPACE 151
-#define TK_UNCLOSED_STRING 152
-#define TK_FUNCTION 153
-#define TK_COLUMN 154
-#define TK_AGG_FUNCTION 155
-#define TK_AGG_COLUMN 156
-#define TK_UMINUS 157
-#define TK_UPLUS 158
-#define TK_REGISTER 159
+#define TK_SEMI 1
+#define TK_EXPLAIN 2
+#define TK_QUERY 3
+#define TK_PLAN 4
+#define TK_BEGIN 5
+#define TK_TRANSACTION 6
+#define TK_DEFERRED 7
+#define TK_IMMEDIATE 8
+#define TK_EXCLUSIVE 9
+#define TK_COMMIT 10
+#define TK_END 11
+#define TK_ROLLBACK 12
+#define TK_SAVEPOINT 13
+#define TK_RELEASE 14
+#define TK_TO 15
+#define TK_TABLE 16
+#define TK_CREATE 17
+#define TK_IF 18
+#define TK_NOT 19
+#define TK_EXISTS 20
+#define TK_TEMP 21
+#define TK_LP 22
+#define TK_RP 23
+#define TK_AS 24
+#define TK_WITHOUT 25
+#define TK_COMMA 26
+#define TK_ID 27
+#define TK_INDEXED 28
+#define TK_ABORT 29
+#define TK_ACTION 30
+#define TK_AFTER 31
+#define TK_ANALYZE 32
+#define TK_ASC 33
+#define TK_ATTACH 34
+#define TK_BEFORE 35
+#define TK_BY 36
+#define TK_CASCADE 37
+#define TK_CAST 38
+#define TK_COLUMNKW 39
+#define TK_CONFLICT 40
+#define TK_DATABASE 41
+#define TK_DESC 42
+#define TK_DETACH 43
+#define TK_EACH 44
+#define TK_FAIL 45
+#define TK_FOR 46
+#define TK_IGNORE 47
+#define TK_INITIALLY 48
+#define TK_INSTEAD 49
+#define TK_LIKE_KW 50
+#define TK_MATCH 51
+#define TK_NO 52
+#define TK_KEY 53
+#define TK_OF 54
+#define TK_OFFSET 55
+#define TK_PRAGMA 56
+#define TK_RAISE 57
+#define TK_REPLACE 58
+#define TK_RESTRICT 59
+#define TK_ROW 60
+#define TK_TRIGGER 61
+#define TK_VACUUM 62
+#define TK_VIEW 63
+#define TK_VIRTUAL 64
+#define TK_REINDEX 65
+#define TK_RENAME 66
+#define TK_CTIME_KW 67
+#define TK_ANY 68
+#define TK_OR 69
+#define TK_AND 70
+#define TK_IS 71
+#define TK_BETWEEN 72
+#define TK_IN 73
+#define TK_ISNULL 74
+#define TK_NOTNULL 75
+#define TK_NE 76
+#define TK_EQ 77
+#define TK_GT 78
+#define TK_LE 79
+#define TK_LT 80
+#define TK_GE 81
+#define TK_ESCAPE 82
+#define TK_BITAND 83
+#define TK_BITOR 84
+#define TK_LSHIFT 85
+#define TK_RSHIFT 86
+#define TK_PLUS 87
+#define TK_MINUS 88
+#define TK_STAR 89
+#define TK_SLASH 90
+#define TK_REM 91
+#define TK_CONCAT 92
+#define TK_COLLATE 93
+#define TK_BITNOT 94
+#define TK_STRING 95
+#define TK_JOIN_KW 96
+#define TK_CONSTRAINT 97
+#define TK_DEFAULT 98
+#define TK_NULL 99
+#define TK_PRIMARY 100
+#define TK_UNIQUE 101
+#define TK_CHECK 102
+#define TK_REFERENCES 103
+#define TK_AUTOINCR 104
+#define TK_ON 105
+#define TK_INSERT 106
+#define TK_DELETE 107
+#define TK_UPDATE 108
+#define TK_SET 109
+#define TK_DEFERRABLE 110
+#define TK_FOREIGN 111
+#define TK_DROP 112
+#define TK_UNION 113
+#define TK_ALL 114
+#define TK_EXCEPT 115
+#define TK_INTERSECT 116
+#define TK_SELECT 117
+#define TK_DISTINCT 118
+#define TK_DOT 119
+#define TK_FROM 120
+#define TK_JOIN 121
+#define TK_USING 122
+#define TK_ORDER 123
+#define TK_GROUP 124
+#define TK_HAVING 125
+#define TK_LIMIT 126
+#define TK_WHERE 127
+#define TK_INTO 128
+#define TK_VALUES 129
+#define TK_INTEGER 130
+#define TK_FLOAT 131
+#define TK_BLOB 132
+#define TK_REGISTER 133
+#define TK_VARIABLE 134
+#define TK_CASE 135
+#define TK_WHEN 136
+#define TK_THEN 137
+#define TK_ELSE 138
+#define TK_INDEX 139
+#define TK_ALTER 140
+#define TK_ADD 141
+#define TK_TO_TEXT 142
+#define TK_TO_BLOB 143
+#define TK_TO_NUMERIC 144
+#define TK_TO_INT 145
+#define TK_TO_REAL 146
+#define TK_ISNOT 147
+#define TK_END_OF_FILE 148
+#define TK_ILLEGAL 149
+#define TK_SPACE 150
+#define TK_UNCLOSED_STRING 151
+#define TK_FUNCTION 152
+#define TK_COLUMN 153
+#define TK_AGG_FUNCTION 154
+#define TK_AGG_COLUMN 155
+#define TK_UMINUS 156
+#define TK_UPLUS 157
/************** End of parse.h ***********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
@@ -9520,37 +8222,6 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
#endif
/*
-** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if
-** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it
-** to zero.
-*/
-#if SQLITE_TEMP_STORE==3 || SQLITE_THREADSAFE==0
-# undef SQLITE_MAX_WORKER_THREADS
-# define SQLITE_MAX_WORKER_THREADS 0
-#endif
-#ifndef SQLITE_MAX_WORKER_THREADS
-# define SQLITE_MAX_WORKER_THREADS 8
-#endif
-#ifndef SQLITE_DEFAULT_WORKER_THREADS
-# define SQLITE_DEFAULT_WORKER_THREADS 0
-#endif
-#if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS
-# undef SQLITE_MAX_WORKER_THREADS
-# define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS
-#endif
-
-/*
-** The default initial allocation for the pagecache when using separate
-** pagecaches for each database connection. A positive number is the
-** number of pages. A negative number N translations means that a buffer
-** of -1024*N bytes is allocated and used for as many pages as it will hold.
-*/
-#ifndef SQLITE_DEFAULT_PCACHE_INITSZ
-# define SQLITE_DEFAULT_PCACHE_INITSZ 100
-#endif
-
-
-/*
** GCC does not define the offsetof() macro so we'll have to do it
** ourselves.
*/
@@ -9565,11 +8236,6 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
#define MAX(A,B) ((A)>(B)?(A):(B))
/*
-** Swap two objects of type TYPE.
-*/
-#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;}
-
-/*
** Check to see if this machine uses EBCDIC. (Yes, believe it or
** not, there are still machines out there that use EBCDIC.)
*/
@@ -9658,10 +8324,10 @@ typedef INT8_TYPE i8; /* 1-byte signed integer */
** gives a possible range of values of approximately 1.0e986 to 1e-986.
** But the allowed values are "grainy". Not every value is representable.
** For example, quantities 16 and 17 are both represented by a LogEst
-** of 40. However, since LogEst quantities are suppose to be estimates,
+** of 40. However, since LogEst quantatites are suppose to be estimates,
** not exact values, this imprecision is not a problem.
**
-** "LogEst" is short for "Logarithmic Estimate".
+** "LogEst" is short for "Logarithimic Estimate".
**
** Examples:
** 1 -> 0 20 -> 43 10000 -> 132
@@ -9678,54 +8344,23 @@ typedef INT8_TYPE i8; /* 1-byte signed integer */
typedef INT16_TYPE LogEst;
/*
-** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer
-*/
-#ifndef SQLITE_PTRSIZE
-# if defined(__SIZEOF_POINTER__)
-# define SQLITE_PTRSIZE __SIZEOF_POINTER__
-# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
- defined(_M_ARM) || defined(__arm__) || defined(__x86)
-# define SQLITE_PTRSIZE 4
-# else
-# define SQLITE_PTRSIZE 8
-# endif
-#endif
-
-/*
** Macros to determine whether the machine is big or little endian,
-** and whether or not that determination is run-time or compile-time.
-**
-** For best performance, an attempt is made to guess at the byte-order
-** using C-preprocessor macros. If that is unsuccessful, or if
-** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
-** at run-time.
+** evaluated at runtime.
*/
#ifdef SQLITE_AMALGAMATION
SQLITE_PRIVATE const int sqlite3one = 1;
#else
SQLITE_PRIVATE const int sqlite3one;
#endif
-#if (defined(i386) || defined(__i386__) || defined(_M_IX86) || \
- defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
- defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
- defined(__arm__)) && !defined(SQLITE_RUNTIME_BYTEORDER)
-# define SQLITE_BYTEORDER 1234
+#if defined(i386) || defined(__i386__) || defined(_M_IX86)\
+ || defined(__x86_64) || defined(__x86_64__)
# define SQLITE_BIGENDIAN 0
# define SQLITE_LITTLEENDIAN 1
# define SQLITE_UTF16NATIVE SQLITE_UTF16LE
-#endif
-#if (defined(sparc) || defined(__ppc__)) \
- && !defined(SQLITE_RUNTIME_BYTEORDER)
-# define SQLITE_BYTEORDER 4321
-# define SQLITE_BIGENDIAN 1
-# define SQLITE_LITTLEENDIAN 0
-# define SQLITE_UTF16NATIVE SQLITE_UTF16BE
-#endif
-#if !defined(SQLITE_BYTEORDER)
-# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */
+#else
# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0)
# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1)
-# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE)
+# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE)
#endif
/*
@@ -9753,7 +8388,7 @@ SQLITE_PRIVATE const int sqlite3one;
** all alignment restrictions correct.
**
** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the
-** underlying malloc() implementation might return us 4-byte aligned
+** underlying malloc() implemention might return us 4-byte aligned
** pointers. In that case, only verify 4-byte alignment.
*/
#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
@@ -9784,9 +8419,7 @@ SQLITE_PRIVATE const int sqlite3one;
# if defined(__linux__) \
|| defined(_WIN32) \
|| (defined(__APPLE__) && defined(__MACH__)) \
- || defined(__sun) \
- || defined(__FreeBSD__) \
- || defined(__DragonFly__)
+ || defined(__sun)
# define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */
# else
# define SQLITE_MAX_MMAP_SIZE 0
@@ -9823,16 +8456,6 @@ SQLITE_PRIVATE const int sqlite3one;
#endif
/*
-** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not
-** the Select query generator tracing logic is turned on.
-*/
-#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_SELECTTRACE)
-# define SELECTTRACE_ENABLED 1
-#else
-# define SELECTTRACE_ENABLED 0
-#endif
-
-/*
** An instance of the following structure is used to store the busy-handler
** callback for a given sqlite handle.
**
@@ -9905,8 +8528,8 @@ struct BusyHandler {
#define SQLITE_WSD const
#define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v)))
#define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config)
-SQLITE_API int SQLITE_STDCALL sqlite3_wsd_init(int N, int J);
-SQLITE_API void *SQLITE_STDCALL sqlite3_wsd_find(void *K, int L);
+SQLITE_API int sqlite3_wsd_init(int N, int J);
+SQLITE_API void *sqlite3_wsd_find(void *K, int L);
#else
#define SQLITE_WSD
#define GLOBAL(t,v) v
@@ -9960,18 +8583,15 @@ typedef struct LookasideSlot LookasideSlot;
typedef struct Module Module;
typedef struct NameContext NameContext;
typedef struct Parse Parse;
-typedef struct PrintfArguments PrintfArguments;
typedef struct RowSet RowSet;
typedef struct Savepoint Savepoint;
typedef struct Select Select;
-typedef struct SQLiteThread SQLiteThread;
typedef struct SelectDest SelectDest;
typedef struct SrcList SrcList;
typedef struct StrAccum StrAccum;
typedef struct Table Table;
typedef struct TableLock TableLock;
typedef struct Token Token;
-typedef struct TreeView TreeView;
typedef struct Trigger Trigger;
typedef struct TriggerPrg TriggerPrg;
typedef struct TriggerStep TriggerStep;
@@ -9980,7 +8600,6 @@ typedef struct VTable VTable;
typedef struct VtabCtx VtabCtx;
typedef struct Walker Walker;
typedef struct WhereInfo WhereInfo;
-typedef struct With With;
/*
** Defer sourcing vdbe.h and btree.h until after the "u8" and
@@ -10010,7 +8629,7 @@ typedef struct With With;
/* TODO: This definition is just included so other modules compile. It
** needs to be revisited.
*/
-#define SQLITE_N_BTREE_META 16
+#define SQLITE_N_BTREE_META 10
/*
** If defined as non-zero, auto-vacuum is enabled by default. Otherwise
@@ -10054,9 +8673,7 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
SQLITE_PRIVATE int sqlite3BtreeClose(Btree*);
SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int);
-#if SQLITE_MAX_MMAP_SIZE>0
-SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64);
-#endif
+SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64);
SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(Btree*,unsigned);
SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree*);
SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix);
@@ -10064,15 +8681,17 @@ SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*);
SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree*,int);
SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree*);
SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int);
-SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree*);
+SQLITE_PRIVATE int sqlite3BtreeGetReserve(Btree*);
+#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_DEBUG)
SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p);
+#endif
SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int);
SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *);
SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int);
SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster);
SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int);
SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int,int);
+SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int);
SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int);
SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, int*, int flags);
SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*);
@@ -10104,8 +8723,7 @@ SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *);
SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*);
SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*);
-SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree*, int, int);
+SQLITE_PRIVATE void sqlite3BtreeTripAllCursors(Btree*, int);
SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue);
SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value);
@@ -10123,11 +8741,6 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p);
** For example, the free-page-count field is located at byte offset 36 of
** the database file header. The incr-vacuum-flag field is located at
** byte offset 64 (== 36+4*7).
-**
-** The BTREE_DATA_VERSION value is not really a value stored in the header.
-** It is a read-only number computed by the pager. But we merge it with
-** the header value access routines since its access pattern is the same.
-** Call it a "virtual meta value".
*/
#define BTREE_FREE_PAGE_COUNT 0
#define BTREE_SCHEMA_VERSION 1
@@ -10138,23 +8751,12 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p);
#define BTREE_USER_VERSION 6
#define BTREE_INCR_VACUUM 7
#define BTREE_APPLICATION_ID 8
-#define BTREE_DATA_VERSION 15 /* A virtual meta-value */
/*
** Values that may be OR'd together to form the second argument of an
** sqlite3BtreeCursorHints() call.
-**
-** The BTREE_BULKLOAD flag is set on index cursors when the index is going
-** to be filled with content that is already in sorted order.
-**
-** The BTREE_SEEK_EQ flag is set on cursors that will get OP_SeekGE or
-** OP_SeekLE opcodes for a range search, but where the range of entries
-** selected will all have the same key. In other words, the cursor will
-** be used only for equality key searches.
-**
*/
-#define BTREE_BULKLOAD 0x00000001 /* Used to full index in sorted order */
-#define BTREE_SEEK_EQ 0x00000002 /* EQ seeks only - no range seeks */
+#define BTREE_BULKLOAD 0x00000001
SQLITE_PRIVATE int sqlite3BtreeCursor(
Btree*, /* BTree containing table to open */
@@ -10174,9 +8776,8 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
int bias,
int *pRes
);
-SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*);
-SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, int);
+SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*, int*);
+SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*);
SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey,
const void *pData, int nData,
int nZero, int bias, int seekResult);
@@ -10191,20 +8792,17 @@ SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor*, u32 *pAmt);
SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor*, u32 *pAmt);
SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor*, u32 *pSize);
SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*);
+SQLITE_PRIVATE void sqlite3BtreeSetCachedRowid(BtCursor*, sqlite3_int64);
+SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeGetCachedRowid(BtCursor*);
SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*);
SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*);
SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*);
-SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *);
+SQLITE_PRIVATE void sqlite3BtreeCacheOverflow(BtCursor *);
SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *);
SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion);
SQLITE_PRIVATE void sqlite3BtreeCursorHints(BtCursor *, unsigned int mask);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask);
-#endif
-SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt);
-SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void);
#ifndef NDEBUG
SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*);
@@ -10316,14 +8914,13 @@ struct VdbeOp {
int p1; /* First operand */
int p2; /* Second parameter (often the jump destination) */
int p3; /* The third parameter */
- union p4union { /* fourth parameter */
+ union { /* fourth parameter */
int i; /* Integer value if p4type==P4_INT32 */
void *p; /* Generic pointer */
char *z; /* Pointer to data for string (char array) types */
i64 *pI64; /* Used when p4type is P4_INT64 */
double *pReal; /* Used when p4type is P4_REAL */
FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */
- sqlite3_context *pCtx; /* Used when p4type is P4_FUNCCTX */
CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */
Mem *pMem; /* Used when p4type is P4_MEM */
VTable *pVtab; /* Used when p4type is P4_VTAB */
@@ -10336,12 +8933,9 @@ struct VdbeOp {
char *zComment; /* Comment to improve readability */
#endif
#ifdef VDBE_PROFILE
- u32 cnt; /* Number of times this instruction was executed */
+ int cnt; /* Number of times this instruction was executed */
u64 cycles; /* Total time spent executing this instruction */
#endif
-#ifdef SQLITE_VDBE_COVERAGE
- int iSrcLine; /* Source-code line that generated this opcode */
-#endif
};
typedef struct VdbeOp VdbeOp;
@@ -10390,7 +8984,6 @@ typedef struct VdbeOpList VdbeOpList;
#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */
#define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */
#define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */
-#define P4_FUNCCTX (-20) /* P4 is a pointer to an sqlite3_context object */
/* Error message codes for OP_Halt */
#define P5_ConstraintNotNull 1
@@ -10433,166 +9026,158 @@ typedef struct VdbeOpList VdbeOpList;
/************** Begin file opcodes.h *****************************************/
/* Automatically generated. Do not edit */
/* See the mkopcodeh.awk script for details */
-#define OP_Savepoint 1
-#define OP_AutoCommit 2
-#define OP_Transaction 3
-#define OP_SorterNext 4
-#define OP_PrevIfOpen 5
-#define OP_NextIfOpen 6
-#define OP_Prev 7
-#define OP_Next 8
-#define OP_Checkpoint 9
-#define OP_JournalMode 10
-#define OP_Vacuum 11
-#define OP_VFilter 12 /* synopsis: iplan=r[P3] zplan='P4' */
-#define OP_VUpdate 13 /* synopsis: data=r[P3@P2] */
-#define OP_Goto 14
-#define OP_Gosub 15
-#define OP_Return 16
-#define OP_InitCoroutine 17
-#define OP_EndCoroutine 18
+#define OP_Function 1 /* synopsis: r[P3]=func(r[P2@P5]) */
+#define OP_Savepoint 2
+#define OP_AutoCommit 3
+#define OP_Transaction 4
+#define OP_SorterNext 5
+#define OP_PrevIfOpen 6
+#define OP_NextIfOpen 7
+#define OP_Prev 8
+#define OP_Next 9
+#define OP_AggStep 10 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_Checkpoint 11
+#define OP_JournalMode 12
+#define OP_Vacuum 13
+#define OP_VFilter 14 /* synopsis: iPlan=r[P3] zPlan='P4' */
+#define OP_VUpdate 15 /* synopsis: data=r[P3@P2] */
+#define OP_Goto 16
+#define OP_Gosub 17
+#define OP_Return 18
#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */
#define OP_Yield 20
-#define OP_HaltIfNull 21 /* synopsis: if r[P3]=null halt */
+#define OP_HaltIfNull 21 /* synopsis: if r[P3] null then halt */
#define OP_Halt 22
#define OP_Integer 23 /* synopsis: r[P2]=P1 */
#define OP_Int64 24 /* synopsis: r[P2]=P4 */
#define OP_String 25 /* synopsis: r[P2]='P4' (len=P1) */
#define OP_Null 26 /* synopsis: r[P2..P3]=NULL */
-#define OP_SoftNull 27 /* synopsis: r[P1]=NULL */
-#define OP_Blob 28 /* synopsis: r[P2]=P4 (len=P1) */
-#define OP_Variable 29 /* synopsis: r[P2]=parameter(P1,P4) */
-#define OP_Move 30 /* synopsis: r[P2@P3]=r[P1@P3] */
-#define OP_Copy 31 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
-#define OP_SCopy 32 /* synopsis: r[P2]=r[P1] */
-#define OP_ResultRow 33 /* synopsis: output=r[P1@P2] */
-#define OP_CollSeq 34
-#define OP_Function0 35 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_Function 36 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_AddImm 37 /* synopsis: r[P1]=r[P1]+P2 */
-#define OP_MustBeInt 38
-#define OP_RealAffinity 39
-#define OP_Cast 40 /* synopsis: affinity(r[P1]) */
-#define OP_Permutation 41
-#define OP_Compare 42 /* synopsis: r[P1@P3] <-> r[P2@P3] */
-#define OP_Jump 43
-#define OP_Once 44
-#define OP_If 45
-#define OP_IfNot 46
-#define OP_Column 47 /* synopsis: r[P3]=PX */
-#define OP_Affinity 48 /* synopsis: affinity(r[P1@P2]) */
-#define OP_MakeRecord 49 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
-#define OP_Count 50 /* synopsis: r[P2]=count() */
-#define OP_ReadCookie 51
-#define OP_SetCookie 52
-#define OP_ReopenIdx 53 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenRead 54 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenWrite 55 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenAutoindex 56 /* synopsis: nColumn=P2 */
-#define OP_OpenEphemeral 57 /* synopsis: nColumn=P2 */
-#define OP_SorterOpen 58
-#define OP_SequenceTest 59 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
-#define OP_OpenPseudo 60 /* synopsis: P3 columns in r[P2] */
-#define OP_Close 61
-#define OP_ColumnsUsed 62
-#define OP_SeekLT 63 /* synopsis: key=r[P3@P4] */
-#define OP_SeekLE 64 /* synopsis: key=r[P3@P4] */
-#define OP_SeekGE 65 /* synopsis: key=r[P3@P4] */
-#define OP_SeekGT 66 /* synopsis: key=r[P3@P4] */
-#define OP_Seek 67 /* synopsis: intkey=r[P2] */
-#define OP_NoConflict 68 /* synopsis: key=r[P3@P4] */
-#define OP_NotFound 69 /* synopsis: key=r[P3@P4] */
-#define OP_Found 70 /* synopsis: key=r[P3@P4] */
-#define OP_Or 71 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */
-#define OP_And 72 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */
-#define OP_NotExists 73 /* synopsis: intkey=r[P3] */
-#define OP_Sequence 74 /* synopsis: r[P2]=cursor[P1].ctr++ */
-#define OP_NewRowid 75 /* synopsis: r[P2]=rowid */
-#define OP_IsNull 76 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
-#define OP_NotNull 77 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
-#define OP_Ne 78 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */
-#define OP_Eq 79 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */
-#define OP_Gt 80 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */
-#define OP_Le 81 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */
-#define OP_Lt 82 /* same as TK_LT, synopsis: if r[P1]<r[P3] goto P2 */
-#define OP_Ge 83 /* same as TK_GE, synopsis: if r[P1]>=r[P3] goto P2 */
-#define OP_Insert 84 /* synopsis: intkey=r[P3] data=r[P2] */
-#define OP_BitAnd 85 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
-#define OP_BitOr 86 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
-#define OP_ShiftLeft 87 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<<r[P1] */
-#define OP_ShiftRight 88 /* same as TK_RSHIFT, synopsis: r[P3]=r[P2]>>r[P1] */
-#define OP_Add 89 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */
-#define OP_Subtract 90 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */
-#define OP_Multiply 91 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */
-#define OP_Divide 92 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
-#define OP_Remainder 93 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
-#define OP_Concat 94 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
-#define OP_InsertInt 95 /* synopsis: intkey=P3 data=r[P2] */
-#define OP_BitNot 96 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */
-#define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */
-#define OP_Delete 98
-#define OP_ResetCount 99
-#define OP_SorterCompare 100 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
-#define OP_SorterData 101 /* synopsis: r[P2]=data */
-#define OP_RowKey 102 /* synopsis: r[P2]=key */
-#define OP_RowData 103 /* synopsis: r[P2]=data */
-#define OP_Rowid 104 /* synopsis: r[P2]=rowid */
-#define OP_NullRow 105
-#define OP_Last 106
-#define OP_SorterSort 107
-#define OP_Sort 108
-#define OP_Rewind 109
-#define OP_SorterInsert 110
-#define OP_IdxInsert 111 /* synopsis: key=r[P2] */
-#define OP_IdxDelete 112 /* synopsis: key=r[P2@P3] */
-#define OP_IdxRowid 113 /* synopsis: r[P2]=rowid */
-#define OP_IdxLE 114 /* synopsis: key=r[P3@P4] */
-#define OP_IdxGT 115 /* synopsis: key=r[P3@P4] */
-#define OP_IdxLT 116 /* synopsis: key=r[P3@P4] */
-#define OP_IdxGE 117 /* synopsis: key=r[P3@P4] */
-#define OP_Destroy 118
-#define OP_Clear 119
-#define OP_ResetSorter 120
-#define OP_CreateIndex 121 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_CreateTable 122 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_ParseSchema 123
-#define OP_LoadAnalysis 124
-#define OP_DropTable 125
-#define OP_DropIndex 126
-#define OP_DropTrigger 127
-#define OP_IntegrityCk 128
-#define OP_RowSetAdd 129 /* synopsis: rowset(P1)=r[P2] */
-#define OP_RowSetRead 130 /* synopsis: r[P3]=rowset(P1) */
-#define OP_RowSetTest 131 /* synopsis: if r[P3] in rowset(P1) goto P2 */
-#define OP_Program 132
-#define OP_Real 133 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
-#define OP_Param 134
-#define OP_FkCounter 135 /* synopsis: fkctr[P1]+=P2 */
-#define OP_FkIfZero 136 /* synopsis: if fkctr[P1]==0 goto P2 */
-#define OP_MemMax 137 /* synopsis: r[P1]=max(r[P1],r[P2]) */
-#define OP_IfPos 138 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
-#define OP_SetIfNotPos 139 /* synopsis: if r[P1]<=0 then r[P2]=P3 */
-#define OP_IfNotZero 140 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */
-#define OP_DecrJumpZero 141 /* synopsis: if (--r[P1])==0 goto P2 */
-#define OP_JumpZeroIncr 142 /* synopsis: if (r[P1]++)==0 ) goto P2 */
-#define OP_AggStep0 143 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggStep 144 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggFinal 145 /* synopsis: accum=r[P1] N=P2 */
-#define OP_IncrVacuum 146
-#define OP_Expire 147
-#define OP_TableLock 148 /* synopsis: iDb=P1 root=P2 write=P3 */
-#define OP_VBegin 149
-#define OP_VCreate 150
-#define OP_VDestroy 151
-#define OP_VOpen 152
-#define OP_VColumn 153 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VNext 154
-#define OP_VRename 155
-#define OP_Pagecount 156
-#define OP_MaxPgcnt 157
-#define OP_Init 158 /* synopsis: Start at P2 */
-#define OP_Noop 159
-#define OP_Explain 160
+#define OP_Blob 27 /* synopsis: r[P2]=P4 (len=P1) */
+#define OP_Variable 28 /* synopsis: r[P2]=parameter(P1,P4) */
+#define OP_Move 29 /* synopsis: r[P2@P3]=r[P1@P3] */
+#define OP_Copy 30 /* synopsis: r[P2@P3]=r[P1@P3] */
+#define OP_SCopy 31 /* synopsis: r[P2]=r[P1] */
+#define OP_ResultRow 32 /* synopsis: output=r[P1@P2] */
+#define OP_CollSeq 33
+#define OP_AddImm 34 /* synopsis: r[P1]=r[P1]+P2 */
+#define OP_MustBeInt 35
+#define OP_RealAffinity 36
+#define OP_Permutation 37
+#define OP_Compare 38
+#define OP_Jump 39
+#define OP_Once 40
+#define OP_If 41
+#define OP_IfNot 42
+#define OP_Column 43 /* synopsis: r[P3]=PX */
+#define OP_Affinity 44 /* synopsis: affinity(r[P1@P2]) */
+#define OP_MakeRecord 45 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
+#define OP_Count 46 /* synopsis: r[P2]=count() */
+#define OP_ReadCookie 47
+#define OP_SetCookie 48
+#define OP_VerifyCookie 49
+#define OP_OpenRead 50 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenWrite 51 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenAutoindex 52 /* synopsis: nColumn=P2 */
+#define OP_OpenEphemeral 53 /* synopsis: nColumn=P2 */
+#define OP_SorterOpen 54
+#define OP_OpenPseudo 55 /* synopsis: content in r[P2@P3] */
+#define OP_Close 56
+#define OP_SeekLt 57 /* synopsis: key=r[P3@P4] */
+#define OP_SeekLe 58 /* synopsis: key=r[P3@P4] */
+#define OP_SeekGe 59 /* synopsis: key=r[P3@P4] */
+#define OP_SeekGt 60 /* synopsis: key=r[P3@P4] */
+#define OP_Seek 61 /* synopsis: intkey=r[P2] */
+#define OP_NoConflict 62 /* synopsis: key=r[P3@P4] */
+#define OP_NotFound 63 /* synopsis: key=r[P3@P4] */
+#define OP_Found 64 /* synopsis: key=r[P3@P4] */
+#define OP_NotExists 65 /* synopsis: intkey=r[P3] */
+#define OP_Sequence 66 /* synopsis: r[P2]=rowid */
+#define OP_NewRowid 67 /* synopsis: r[P2]=rowid */
+#define OP_Insert 68 /* synopsis: intkey=r[P3] data=r[P2] */
+#define OP_Or 69 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */
+#define OP_And 70 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */
+#define OP_InsertInt 71 /* synopsis: intkey=P3 data=r[P2] */
+#define OP_Delete 72
+#define OP_ResetCount 73
+#define OP_IsNull 74 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
+#define OP_NotNull 75 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
+#define OP_Ne 76 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */
+#define OP_Eq 77 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */
+#define OP_Gt 78 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */
+#define OP_Le 79 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */
+#define OP_Lt 80 /* same as TK_LT, synopsis: if r[P1]<r[P3] goto P2 */
+#define OP_Ge 81 /* same as TK_GE, synopsis: if r[P1]>=r[P3] goto P2 */
+#define OP_SorterCompare 82 /* synopsis: if key(P1)!=rtrim(r[P3],P4) goto P2 */
+#define OP_BitAnd 83 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
+#define OP_BitOr 84 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
+#define OP_ShiftLeft 85 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<<r[P1] */
+#define OP_ShiftRight 86 /* same as TK_RSHIFT, synopsis: r[P3]=r[P2]>>r[P1] */
+#define OP_Add 87 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */
+#define OP_Subtract 88 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */
+#define OP_Multiply 89 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */
+#define OP_Divide 90 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
+#define OP_Remainder 91 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
+#define OP_Concat 92 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
+#define OP_SorterData 93 /* synopsis: r[P2]=data */
+#define OP_BitNot 94 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */
+#define OP_String8 95 /* same as TK_STRING, synopsis: r[P2]='P4' */
+#define OP_RowKey 96 /* synopsis: r[P2]=key */
+#define OP_RowData 97 /* synopsis: r[P2]=data */
+#define OP_Rowid 98 /* synopsis: r[P2]=rowid */
+#define OP_NullRow 99
+#define OP_Last 100
+#define OP_SorterSort 101
+#define OP_Sort 102
+#define OP_Rewind 103
+#define OP_SorterInsert 104
+#define OP_IdxInsert 105 /* synopsis: key=r[P2] */
+#define OP_IdxDelete 106 /* synopsis: key=r[P2@P3] */
+#define OP_IdxRowid 107 /* synopsis: r[P2]=rowid */
+#define OP_IdxLT 108 /* synopsis: key=r[P3@P4] */
+#define OP_IdxGE 109 /* synopsis: key=r[P3@P4] */
+#define OP_Destroy 110
+#define OP_Clear 111
+#define OP_CreateIndex 112 /* synopsis: r[P2]=root iDb=P1 */
+#define OP_CreateTable 113 /* synopsis: r[P2]=root iDb=P1 */
+#define OP_ParseSchema 114
+#define OP_LoadAnalysis 115
+#define OP_DropTable 116
+#define OP_DropIndex 117
+#define OP_DropTrigger 118
+#define OP_IntegrityCk 119
+#define OP_RowSetAdd 120 /* synopsis: rowset(P1)=r[P2] */
+#define OP_RowSetRead 121 /* synopsis: r[P3]=rowset(P1) */
+#define OP_RowSetTest 122 /* synopsis: if r[P3] in rowset(P1) goto P2 */
+#define OP_Program 123
+#define OP_Param 124
+#define OP_FkCounter 125 /* synopsis: fkctr[P1]+=P2 */
+#define OP_FkIfZero 126 /* synopsis: if fkctr[P1]==0 goto P2 */
+#define OP_MemMax 127 /* synopsis: r[P1]=max(r[P1],r[P2]) */
+#define OP_IfPos 128 /* synopsis: if r[P1]>0 goto P2 */
+#define OP_IfNeg 129 /* synopsis: if r[P1]<0 goto P2 */
+#define OP_IfZero 130 /* synopsis: r[P1]+=P3, if r[P1]==0 goto P2 */
+#define OP_Real 131 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
+#define OP_AggFinal 132 /* synopsis: accum=r[P1] N=P2 */
+#define OP_IncrVacuum 133
+#define OP_Expire 134
+#define OP_TableLock 135 /* synopsis: iDb=P1 root=P2 write=P3 */
+#define OP_VBegin 136
+#define OP_VCreate 137
+#define OP_VDestroy 138
+#define OP_VOpen 139
+#define OP_VColumn 140 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VNext 141
+#define OP_ToText 142 /* same as TK_TO_TEXT */
+#define OP_ToBlob 143 /* same as TK_TO_BLOB */
+#define OP_ToNumeric 144 /* same as TK_TO_NUMERIC */
+#define OP_ToInt 145 /* same as TK_TO_INT */
+#define OP_ToReal 146 /* same as TK_TO_REAL */
+#define OP_VRename 147
+#define OP_Pagecount 148
+#define OP_MaxPgcnt 149
+#define OP_Trace 150
+#define OP_Noop 151
+#define OP_Explain 152
/* Properties such as "out2" or "jump" that are specified in
@@ -10600,33 +9185,33 @@ typedef struct VdbeOpList VdbeOpList;
** are encoded into bitvectors as follows:
*/
#define OPFLG_JUMP 0x0001 /* jump: P2 holds jmp target */
-#define OPFLG_IN1 0x0002 /* in1: P1 is an input */
-#define OPFLG_IN2 0x0004 /* in2: P2 is an input */
-#define OPFLG_IN3 0x0008 /* in3: P3 is an input */
-#define OPFLG_OUT2 0x0010 /* out2: P2 is an output */
-#define OPFLG_OUT3 0x0020 /* out3: P3 is an output */
+#define OPFLG_OUT2_PRERELEASE 0x0002 /* out2-prerelease: */
+#define OPFLG_IN1 0x0004 /* in1: P1 is an input */
+#define OPFLG_IN2 0x0008 /* in2: P2 is an input */
+#define OPFLG_IN3 0x0010 /* in3: P3 is an input */
+#define OPFLG_OUT2 0x0020 /* out2: P2 is an output */
+#define OPFLG_OUT3 0x0040 /* out3: P3 is an output */
#define OPFLG_INITIALIZER {\
-/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,\
-/* 8 */ 0x01, 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01,\
-/* 16 */ 0x02, 0x01, 0x02, 0x12, 0x03, 0x08, 0x00, 0x10,\
-/* 24 */ 0x10, 0x10, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00,\
-/* 32 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x03, 0x02,\
-/* 40 */ 0x02, 0x00, 0x00, 0x01, 0x01, 0x03, 0x03, 0x00,\
-/* 48 */ 0x00, 0x00, 0x10, 0x10, 0x08, 0x00, 0x00, 0x00,\
-/* 56 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,\
-/* 64 */ 0x09, 0x09, 0x09, 0x04, 0x09, 0x09, 0x09, 0x26,\
-/* 72 */ 0x26, 0x09, 0x10, 0x10, 0x03, 0x03, 0x0b, 0x0b,\
-/* 80 */ 0x0b, 0x0b, 0x0b, 0x0b, 0x00, 0x26, 0x26, 0x26,\
-/* 88 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00,\
-/* 96 */ 0x12, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 104 */ 0x10, 0x00, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04,\
-/* 112 */ 0x00, 0x10, 0x01, 0x01, 0x01, 0x01, 0x10, 0x00,\
-/* 120 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 128 */ 0x00, 0x06, 0x23, 0x0b, 0x01, 0x10, 0x10, 0x00,\
-/* 136 */ 0x01, 0x04, 0x03, 0x06, 0x03, 0x03, 0x03, 0x00,\
-/* 144 */ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 152 */ 0x00, 0x00, 0x01, 0x00, 0x10, 0x10, 0x01, 0x00,\
-/* 160 */ 0x00,}
+/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01,\
+/* 8 */ 0x01, 0x01, 0x00, 0x00, 0x02, 0x00, 0x01, 0x00,\
+/* 16 */ 0x01, 0x01, 0x04, 0x24, 0x04, 0x10, 0x00, 0x02,\
+/* 24 */ 0x02, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x20,\
+/* 32 */ 0x00, 0x00, 0x04, 0x05, 0x04, 0x00, 0x00, 0x01,\
+/* 40 */ 0x01, 0x05, 0x05, 0x00, 0x00, 0x00, 0x02, 0x02,\
+/* 48 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 56 */ 0x00, 0x11, 0x11, 0x11, 0x11, 0x08, 0x11, 0x11,\
+/* 64 */ 0x11, 0x11, 0x02, 0x02, 0x00, 0x4c, 0x4c, 0x00,\
+/* 72 */ 0x00, 0x00, 0x05, 0x05, 0x15, 0x15, 0x15, 0x15,\
+/* 80 */ 0x15, 0x15, 0x00, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c,\
+/* 88 */ 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x00, 0x24, 0x02,\
+/* 96 */ 0x00, 0x00, 0x02, 0x00, 0x01, 0x01, 0x01, 0x01,\
+/* 104 */ 0x08, 0x08, 0x00, 0x02, 0x01, 0x01, 0x02, 0x00,\
+/* 112 */ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 120 */ 0x0c, 0x45, 0x15, 0x01, 0x02, 0x00, 0x01, 0x08,\
+/* 128 */ 0x05, 0x05, 0x05, 0x02, 0x00, 0x01, 0x00, 0x00,\
+/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x04,\
+/* 144 */ 0x04, 0x04, 0x04, 0x00, 0x02, 0x02, 0x00, 0x00,\
+/* 152 */ 0x00,}
/************** End of opcodes.h *********************************************/
/************** Continuing where we left off in vdbe.h ***********************/
@@ -10635,27 +9220,21 @@ typedef struct VdbeOpList VdbeOpList;
** Prototypes for the VDBE interface. See comments on the implementation
** for a description of what each of these routines does.
*/
-SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse*);
+SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(sqlite3*);
SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int);
SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int);
SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int);
-SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe*,int);
-SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe*,int,const char*);
-SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe*,int,const char*,...);
SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int);
SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(Vdbe*,int,int,int,int,const u8*,int);
SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno);
+SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp);
SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*);
-SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, u32 addr, u8);
SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1);
SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2);
SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3);
SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5);
SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr);
SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe*, int addr);
-SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op);
SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N);
SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*);
SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int);
@@ -10686,16 +9265,11 @@ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int);
#ifndef SQLITE_OMIT_TRACE
SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*);
#endif
-SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*);
SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*);
SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*);
-SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int);
SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **);
-typedef int (*RecordCompare)(int,const void*,UnpackedRecord*);
-SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*);
-
#ifndef SQLITE_OMIT_TRIGGER
SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *);
#endif
@@ -10723,49 +9297,6 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
# define VdbeModuleComment(X)
#endif
-/*
-** The VdbeCoverage macros are used to set a coverage testing point
-** for VDBE branch instructions. The coverage testing points are line
-** numbers in the sqlite3.c source file. VDBE branch coverage testing
-** only works with an amalagmation build. That's ok since a VDBE branch
-** coverage build designed for testing the test suite only. No application
-** should ever ship with VDBE branch coverage measuring turned on.
-**
-** VdbeCoverage(v) // Mark the previously coded instruction
-** // as a branch
-**
-** VdbeCoverageIf(v, conditional) // Mark previous if conditional true
-**
-** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken
-**
-** VdbeCoverageNeverTaken(v) // Previous branch is never taken
-**
-** Every VDBE branch operation must be tagged with one of the macros above.
-** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
-** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch()
-** routine in vdbe.c, alerting the developer to the missed tag.
-*/
-#ifdef SQLITE_VDBE_COVERAGE
-SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int);
-# define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__)
-# define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__)
-# define VdbeCoverageAlwaysTaken(v) sqlite3VdbeSetLineNumber(v,2);
-# define VdbeCoverageNeverTaken(v) sqlite3VdbeSetLineNumber(v,1);
-# define VDBE_OFFSET_LINENO(x) (__LINE__+x)
-#else
-# define VdbeCoverage(v)
-# define VdbeCoverageIf(v,x)
-# define VdbeCoverageAlwaysTaken(v)
-# define VdbeCoverageNeverTaken(v)
-# define VDBE_OFFSET_LINENO(x) 0
-#endif
-
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
-SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*);
-#else
-# define sqlite3VdbeScanStatus(a,b,c,d,e)
-#endif
-
#endif
/************** End of vdbe.h ************************************************/
@@ -10892,9 +9423,6 @@ SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*);
/* Functions used to configure a Pager object. */
SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *);
SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int);
-#ifdef SQLITE_HAS_CODEC
-SQLITE_PRIVATE void sqlite3PagerAlignReserve(Pager*,Pager*);
-#endif
SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int);
SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int);
SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64);
@@ -10913,7 +9441,6 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(Pager *pPager, Pgno pgno, DbPage **ppPage
SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno);
SQLITE_PRIVATE void sqlite3PagerRef(DbPage*);
SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*);
-SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage*);
/* Operations on page references. */
SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*);
@@ -10928,7 +9455,7 @@ SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*);
SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int);
SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int);
SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*);
-SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster);
+SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager);
SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*);
SQLITE_PRIVATE int sqlite3PagerRollback(Pager*);
SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n);
@@ -10949,10 +9476,7 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager);
/* Functions used to query pager state and configuration. */
SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*);
-SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager*);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*);
-#endif
+SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*);
SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*);
SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager*, int);
SQLITE_PRIVATE const sqlite3_vfs *sqlite3PagerVfs(Pager*);
@@ -10968,8 +9492,6 @@ SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *);
/* Functions used to truncate the database file. */
SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno);
-SQLITE_PRIVATE void sqlite3PagerRekey(DbPage*, Pgno, u16);
-
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL)
SQLITE_PRIVATE void *sqlite3PagerCodec(DbPage *);
#endif
@@ -11043,14 +9565,14 @@ struct PgHdr {
};
/* Bit values for PgHdr.flags */
-#define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */
-#define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */
-#define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */
-#define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before
- ** writing this page to the database */
-#define PGHDR_NEED_READ 0x010 /* Content is unread */
-#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */
-#define PGHDR_MMAP 0x040 /* This is an mmap page object */
+#define PGHDR_DIRTY 0x002 /* Page has changed */
+#define PGHDR_NEED_SYNC 0x004 /* Fsync the rollback journal before
+ ** writing this page to the database */
+#define PGHDR_NEED_READ 0x008 /* Content is unread */
+#define PGHDR_REUSE_UNLIKELY 0x010 /* A hint that reuse is unlikely */
+#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */
+
+#define PGHDR_MMAP 0x040 /* This is an mmap page object */
/* Initialize and shutdown the page cache subsystem */
SQLITE_PRIVATE int sqlite3PcacheInitialize(void);
@@ -11065,7 +9587,7 @@ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n);
** Under memory stress, invoke xStress to try to make pages clean.
** Only clean and unpinned pages can be reclaimed.
*/
-SQLITE_PRIVATE int sqlite3PcacheOpen(
+SQLITE_PRIVATE void sqlite3PcacheOpen(
int szPage, /* Size of every page */
int szExtra, /* Extra space associated with each page */
int bPurgeable, /* True if pages are on backing store */
@@ -11075,7 +9597,7 @@ SQLITE_PRIVATE int sqlite3PcacheOpen(
);
/* Modify the page-size after the cache has been created. */
-SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *, int);
+SQLITE_PRIVATE void sqlite3PcacheSetPageSize(PCache *, int);
/* Return the size in bytes of a PCache object. Used to preallocate
** storage space.
@@ -11085,9 +9607,7 @@ SQLITE_PRIVATE int sqlite3PcacheSize(void);
/* One release per successful fetch. Page is pinned until released.
** Reference counted.
*/
-SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(PCache*, Pgno, int createFlag);
-SQLITE_PRIVATE int sqlite3PcacheFetchStress(PCache*, Pgno, sqlite3_pcache_page**);
-SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(PCache*, Pgno, sqlite3_pcache_page *pPage);
+SQLITE_PRIVATE int sqlite3PcacheFetch(PCache*, Pgno, int createFlag, PgHdr**);
SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*);
SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */
@@ -11157,10 +9677,6 @@ SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*);
SQLITE_PRIVATE void sqlite3PCacheSetDefault(void);
-/* Return the header size */
-SQLITE_PRIVATE int sqlite3HeaderSizePcache(void);
-SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void);
-
#endif /* _PCACHE_H_ */
/************** End of pcache.h **********************************************/
@@ -11191,71 +9707,83 @@ SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void);
#define _SQLITE_OS_H_
/*
-** Attempt to automatically detect the operating system and setup the
-** necessary pre-processor macros for it.
+** Figure out if we are dealing with Unix, Windows, or some other
+** operating system. After the following block of preprocess macros,
+** all of SQLITE_OS_UNIX, SQLITE_OS_WIN, and SQLITE_OS_OTHER
+** will defined to either 1 or 0. One of the four will be 1. The other
+** three will be 0.
*/
-/************** Include os_setup.h in the middle of os.h *********************/
-/************** Begin file os_setup.h ****************************************/
+#if defined(SQLITE_OS_OTHER)
+# if SQLITE_OS_OTHER==1
+# undef SQLITE_OS_UNIX
+# define SQLITE_OS_UNIX 0
+# undef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+# else
+# undef SQLITE_OS_OTHER
+# endif
+#endif
+#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER)
+# define SQLITE_OS_OTHER 0
+# ifndef SQLITE_OS_WIN
+# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__)
+# define SQLITE_OS_WIN 1
+# define SQLITE_OS_UNIX 0
+# else
+# define SQLITE_OS_WIN 0
+# define SQLITE_OS_UNIX 1
+# endif
+# else
+# define SQLITE_OS_UNIX 0
+# endif
+#else
+# ifndef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+# endif
+#endif
+
+#if SQLITE_OS_WIN
+# include <windows.h>
+#endif
+
/*
-** 2013 November 25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
+** Determine if we are dealing with Windows NT.
**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
+** We ought to be able to determine if we are compiling for win98 or winNT
+** using the _WIN32_WINNT macro as follows:
**
-******************************************************************************
+** #if defined(_WIN32_WINNT)
+** # define SQLITE_OS_WINNT 1
+** #else
+** # define SQLITE_OS_WINNT 0
+** #endif
**
-** This file contains pre-processor directives related to operating system
-** detection and/or setup.
+** However, vs2005 does not set _WIN32_WINNT by default, as it ought to,
+** so the above test does not work. We'll just assume that everything is
+** winNT unless the programmer explicitly says otherwise by setting
+** SQLITE_OS_WINNT to 0.
*/
-#ifndef _OS_SETUP_H_
-#define _OS_SETUP_H_
+#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT)
+# define SQLITE_OS_WINNT 1
+#endif
/*
-** Figure out if we are dealing with Unix, Windows, or some other operating
-** system.
-**
-** After the following block of preprocess macros, all of SQLITE_OS_UNIX,
-** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of
-** the three will be 1. The other two will be 0.
+** Determine if we are dealing with WindowsCE - which has a much
+** reduced API.
*/
-#if defined(SQLITE_OS_OTHER)
-# if SQLITE_OS_OTHER==1
-# undef SQLITE_OS_UNIX
-# define SQLITE_OS_UNIX 0
-# undef SQLITE_OS_WIN
-# define SQLITE_OS_WIN 0
-# else
-# undef SQLITE_OS_OTHER
-# endif
-#endif
-#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER)
-# define SQLITE_OS_OTHER 0
-# ifndef SQLITE_OS_WIN
-# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \
- defined(__MINGW32__) || defined(__BORLANDC__)
-# define SQLITE_OS_WIN 1
-# define SQLITE_OS_UNIX 0
-# else
-# define SQLITE_OS_WIN 0
-# define SQLITE_OS_UNIX 1
-# endif
-# else
-# define SQLITE_OS_UNIX 0
-# endif
+#if defined(_WIN32_WCE)
+# define SQLITE_OS_WINCE 1
#else
-# ifndef SQLITE_OS_WIN
-# define SQLITE_OS_WIN 0
-# endif
+# define SQLITE_OS_WINCE 0
#endif
-#endif /* _OS_SETUP_H_ */
-
-/************** End of os_setup.h ********************************************/
-/************** Continuing where we left off in os.h *************************/
+/*
+** Determine if we are dealing with WinRT, which provides only a subset of
+** the full Win32 API.
+*/
+#if !defined(SQLITE_OS_WINRT)
+# define SQLITE_OS_WINRT 0
+#endif
/* If the SET_FULLSYNC macro is not defined above, then make it
** a no-op
@@ -11351,7 +9879,7 @@ SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void);
** shared locks begins at SHARED_FIRST.
**
** The same locking strategy and
-** byte ranges are used for Unix. This leaves open the possibility of having
+** byte ranges are used for Unix. This leaves open the possiblity of having
** clients on win95, winNT, and unix all talking to the same shared file
** and all locking correctly. To do so would require that samba (or whatever
** tool is being used for file sharing) implements locks correctly between
@@ -11470,7 +9998,7 @@ SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *);
** Figure out what version of the code to use. The choices are
**
** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The
-** mutexes implementation cannot be overridden
+** mutexes implemention cannot be overridden
** at start-time.
**
** SQLITE_MUTEX_NOOP For single-threaded applications. No
@@ -11559,7 +10087,7 @@ struct Schema {
Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */
u8 file_format; /* Schema format version for this file */
u8 enc; /* Text encoding used by this database */
- u16 schemaFlags; /* Flags associated with this schema */
+ u16 flags; /* Flags associated with this schema */
int cache_size; /* Number of pages to use in the cache */
};
@@ -11567,10 +10095,10 @@ struct Schema {
** These macros can be used to test, set, or clear bits in the
** Db.pSchema->flags field.
*/
-#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))==(P))
-#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))!=0)
-#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags|=(P)
-#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags&=~(P)
+#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))==(P))
+#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))!=0)
+#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->flags|=(P)
+#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->flags&=~(P)
/*
** Allowed values for the DB.pSchema->flags field.
@@ -11590,7 +10118,7 @@ struct Schema {
** The number of different kinds of things that can be limited
** using the sqlite3_limit() interface.
*/
-#define SQLITE_N_LIMIT (SQLITE_LIMIT_WORKER_THREADS+1)
+#define SQLITE_N_LIMIT (SQLITE_LIMIT_TRIGGER_DEPTH+1)
/*
** Lookaside malloc is a set of fixed-size buffers that can be used
@@ -11637,45 +10165,6 @@ struct FuncDefHash {
FuncDef *a[23]; /* Hash table for functions */
};
-#ifdef SQLITE_USER_AUTHENTICATION
-/*
-** Information held in the "sqlite3" database connection object and used
-** to manage user authentication.
-*/
-typedef struct sqlite3_userauth sqlite3_userauth;
-struct sqlite3_userauth {
- u8 authLevel; /* Current authentication level */
- int nAuthPW; /* Size of the zAuthPW in bytes */
- char *zAuthPW; /* Password used to authenticate */
- char *zAuthUser; /* User name used to authenticate */
-};
-
-/* Allowed values for sqlite3_userauth.authLevel */
-#define UAUTH_Unknown 0 /* Authentication not yet checked */
-#define UAUTH_Fail 1 /* User authentication failed */
-#define UAUTH_User 2 /* Authenticated as a normal user */
-#define UAUTH_Admin 3 /* Authenticated as an administrator */
-
-/* Functions used only by user authorization logic */
-SQLITE_PRIVATE int sqlite3UserAuthTable(const char*);
-SQLITE_PRIVATE int sqlite3UserAuthCheckLogin(sqlite3*,const char*,u8*);
-SQLITE_PRIVATE void sqlite3UserAuthInit(sqlite3*);
-SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**);
-
-#endif /* SQLITE_USER_AUTHENTICATION */
-
-/*
-** typedef for the authorization callback function.
-*/
-#ifdef SQLITE_USER_AUTHENTICATION
- typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*,
- const char*, const char*);
-#else
- typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*,
- const char*);
-#endif
-
-
/*
** Each database connection is an instance of the following structure.
*/
@@ -11693,7 +10182,6 @@ struct sqlite3 {
int errCode; /* Most recent error code (SQLITE_*) */
int errMask; /* & result codes with this before returning */
u16 dbOptFlags; /* Flags to enable/disable optimizations */
- u8 enc; /* Text encoding */
u8 autoCommit; /* The auto-commit flag. */
u8 temp_store; /* 1: file 2: memory 0: default */
u8 mallocFailed; /* True if we have seen a malloc failure */
@@ -11707,19 +10195,16 @@ struct sqlite3 {
int nChange; /* Value returned by sqlite3_changes() */
int nTotalChange; /* Value returned by sqlite3_total_changes() */
int aLimit[SQLITE_N_LIMIT]; /* Limits */
- int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */
struct sqlite3InitInfo { /* Information used during initialization */
int newTnum; /* Rootpage of table being initialized */
u8 iDb; /* Which db file is being initialized */
u8 busy; /* TRUE if currently initializing */
u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */
- u8 imposterTable; /* Building an imposter table */
} init;
int nVdbeActive; /* Number of VDBEs currently running */
int nVdbeRead; /* Number of active VDBEs that read or write */
int nVdbeWrite; /* Number of active VDBEs that read and write */
int nVdbeExec; /* Number of nested calls to VdbeExec() */
- int nVDestroy; /* Number of active OP_VDestroy operations */
int nExtension; /* Number of loaded extensions */
void **aExtension; /* Array of shared library handles */
void (*xTrace)(void*,const char*); /* Trace function */
@@ -11746,7 +10231,8 @@ struct sqlite3 {
} u1;
Lookaside lookaside; /* Lookaside malloc configuration */
#ifndef SQLITE_OMIT_AUTHORIZATION
- sqlite3_xauth xAuth; /* Access authorization function */
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*);
+ /* Access authorization function */
void *pAuthArg; /* 1st argument to the access auth function */
#endif
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
@@ -11772,6 +10258,7 @@ struct sqlite3 {
i64 nDeferredCons; /* Net deferred constraints this transaction. */
i64 nDeferredImmCons; /* Net deferred immediate constraints */
int *pnBytesFreed; /* If not NULL, increment this in DbFree() */
+
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
/* The following variables are all protected by the STATIC_MASTER
** mutex, not by sqlite3.mutex. They are used by code in notify.c.
@@ -11789,16 +10276,12 @@ struct sqlite3 {
void (*xUnlockNotify)(void **, int); /* Unlock notify callback */
sqlite3 *pNextBlocked; /* Next in list of all blocked connections */
#endif
-#ifdef SQLITE_USER_AUTHENTICATION
- sqlite3_userauth auth; /* User authentication information */
-#endif
};
/*
** A macro to discover the encoding of a database.
*/
-#define SCHEMA_ENC(db) ((db)->aDb[0].pSchema->enc)
-#define ENC(db) ((db)->enc)
+#define ENC(db) ((db)->aDb[0].pSchema->enc)
/*
** Possible values for the sqlite3.flags.
@@ -11833,8 +10316,6 @@ struct sqlite3 {
#define SQLITE_DeferFKs 0x01000000 /* Defer all FK constraints */
#define SQLITE_QueryOnly 0x02000000 /* Disable database changes */
#define SQLITE_VdbeEQP 0x04000000 /* Debug EXPLAIN QUERY PLAN */
-#define SQLITE_Vacuum 0x08000000 /* Currently in a VACUUM */
-#define SQLITE_CellSizeCk 0x10000000 /* Check btree cell sizes on load */
/*
@@ -11846,14 +10327,15 @@ struct sqlite3 {
#define SQLITE_ColumnCache 0x0002 /* Column cache */
#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */
#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */
-/* not used 0x0010 // Was: SQLITE_IdxRealAsInt */
+#define SQLITE_IdxRealAsInt 0x0010 /* Store REAL as INT in indices */
#define SQLITE_DistinctOpt 0x0020 /* DISTINCT using indexes */
#define SQLITE_CoverIdxScan 0x0040 /* Covering index scans */
#define SQLITE_OrderByIdxJoin 0x0080 /* ORDER BY of joins via index */
#define SQLITE_SubqCoroutine 0x0100 /* Evaluate subqueries as coroutines */
#define SQLITE_Transitive 0x0200 /* Transitive constraints */
#define SQLITE_OmitNoopJoin 0x0400 /* Omit unused tables in joins */
-#define SQLITE_Stat34 0x0800 /* Use STAT3 or STAT4 data */
+#define SQLITE_Stat3 0x0800 /* Use the SQLITE_STAT3 table */
+#define SQLITE_AdjustOutEst 0x1000 /* Adjust output estimates using WHERE */
#define SQLITE_AllOpts 0xffff /* All optimizations */
/*
@@ -11871,7 +10353,8 @@ struct sqlite3 {
** Return true if it OK to factor constant expressions into the initialization
** code. The argument is a Parse object for the code generator.
*/
-#define ConstFactorOk(P) ((P)->okConstFactor)
+#define ConstFactorOk(P) \
+ ((P)->cookieGoto>0 && OptimizationEnabled((P)->db,SQLITE_FactorOutConst))
/*
** Possible values for the sqlite.magic field.
@@ -11929,20 +10412,17 @@ struct FuncDestructor {
** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. There
** are assert() statements in the code to verify this.
*/
-#define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */
-#define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */
-#define SQLITE_FUNC_CASE 0x0008 /* Case-sensitive LIKE-type function */
-#define SQLITE_FUNC_EPHEM 0x0010 /* Ephemeral. Delete with VDBE */
-#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/
-#define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */
-#define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */
-#define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */
-#define SQLITE_FUNC_COALESCE 0x0200 /* Built-in coalesce() or ifnull() */
-#define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */
-#define SQLITE_FUNC_CONSTANT 0x0800 /* Constant inputs give a constant output */
-#define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */
-#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
- ** single query - might change over time */
+#define SQLITE_FUNC_ENCMASK 0x003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */
+#define SQLITE_FUNC_LIKE 0x004 /* Candidate for the LIKE optimization */
+#define SQLITE_FUNC_CASE 0x008 /* Case-sensitive LIKE-type function */
+#define SQLITE_FUNC_EPHEM 0x010 /* Ephemeral. Delete with VDBE */
+#define SQLITE_FUNC_NEEDCOLL 0x020 /* sqlite3GetFuncCollSeq() might be called */
+#define SQLITE_FUNC_LENGTH 0x040 /* Built-in length() function */
+#define SQLITE_FUNC_TYPEOF 0x080 /* Built-in typeof() function */
+#define SQLITE_FUNC_COUNT 0x100 /* Built-in count(*) aggregate */
+#define SQLITE_FUNC_COALESCE 0x200 /* Built-in coalesce() or ifnull() */
+#define SQLITE_FUNC_UNLIKELY 0x400 /* Built-in unlikely() function */
+#define SQLITE_FUNC_CONSTANT 0x800 /* Constant inputs give a constant output */
/*
** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are
@@ -11958,12 +10438,6 @@ struct FuncDestructor {
** VFUNCTION(zName, nArg, iArg, bNC, xFunc)
** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag.
**
-** DFUNCTION(zName, nArg, iArg, bNC, xFunc)
-** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and
-** adds the SQLITE_FUNC_SLOCHNG flag. Used for date & time functions
-** and functions like sqlite_version() that can change, but not during
-** a single query.
-**
** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal)
** Used to create an aggregate function definition implemented by
** the C functions xStep and xFinal. The first four parameters
@@ -11984,14 +10458,11 @@ struct FuncDestructor {
#define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \
{nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0}
-#define DFUNCTION(zName, nArg, iArg, bNC, xFunc) \
- {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0}
#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \
{nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\
SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0}
#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \
- {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
+ {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
pArg, 0, xFunc, 0, 0, #zName, 0, 0}
#define LIKEFUNC(zName, nArg, arg, flags) \
{nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \
@@ -11999,9 +10470,6 @@ struct FuncDestructor {
#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \
{nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL), \
SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0,0}
-#define AGGREGATE2(zName, nArg, arg, nc, xStep, xFinal, extraFlags) \
- {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|extraFlags, \
- SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0,0}
/*
** All current savepoints are stored in a linked list starting at
@@ -12035,7 +10503,6 @@ struct Module {
const char *zName; /* Name passed to create_module() */
void *pAux; /* pAux passed to create_module() */
void (*xDestroy)(void *); /* Module destructor function */
- Table *pEpoTab; /* Eponymous table for this module */
};
/*
@@ -12081,7 +10548,6 @@ struct CollSeq {
*/
#define SQLITE_SO_ASC 0 /* Sort in ascending order */
#define SQLITE_SO_DESC 1 /* Sort in ascending order */
-#define SQLITE_SO_UNDEFINED -1 /* No sort order specified */
/*
** Column affinity types.
@@ -12090,18 +10556,18 @@ struct CollSeq {
** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve
** the speed a little by numbering the values consecutively.
**
-** But rather than start with 0 or 1, we begin with 'A'. That way,
+** But rather than start with 0 or 1, we begin with 'a'. That way,
** when multiple affinity types are concatenated into a string and
** used as the P4 operand, they will be more readable.
**
** Note also that the numeric types are grouped together so that testing
-** for a numeric type is a single comparison. And the BLOB type is first.
+** for a numeric type is a single comparison.
*/
-#define SQLITE_AFF_BLOB 'A'
-#define SQLITE_AFF_TEXT 'B'
-#define SQLITE_AFF_NUMERIC 'C'
-#define SQLITE_AFF_INTEGER 'D'
-#define SQLITE_AFF_REAL 'E'
+#define SQLITE_AFF_TEXT 'a'
+#define SQLITE_AFF_NONE 'b'
+#define SQLITE_AFF_NUMERIC 'c'
+#define SQLITE_AFF_INTEGER 'd'
+#define SQLITE_AFF_REAL 'e'
#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC)
@@ -12109,21 +10575,15 @@ struct CollSeq {
** The SQLITE_AFF_MASK values masks off the significant bits of an
** affinity value.
*/
-#define SQLITE_AFF_MASK 0x47
+#define SQLITE_AFF_MASK 0x67
/*
** Additional bit values that can be ORed with an affinity without
** changing the affinity.
-**
-** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL.
-** It causes an assert() to fire if either operand to a comparison
-** operator is NULL. It is added to certain comparison operators to
-** prove that the operands are always NOT NULL.
*/
-#define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */
-#define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */
+#define SQLITE_JUMPIFNULL 0x08 /* jumps if either operand is NULL */
+#define SQLITE_STOREP2 0x10 /* Store result in reg[P2] rather than jump */
#define SQLITE_NULLEQ 0x80 /* NULL=NULL */
-#define SQLITE_NOTNULL 0x90 /* Assert that operands are never NULL */
/*
** An object of this type is created for each virtual table present in
@@ -12178,8 +10638,34 @@ struct VTable {
};
/*
-** The schema for each SQL table and view is represented in memory
-** by an instance of the following structure.
+** Each SQL table is represented in memory by an instance of the
+** following structure.
+**
+** Table.zName is the name of the table. The case of the original
+** CREATE TABLE statement is stored, but case is not significant for
+** comparisons.
+**
+** Table.nCol is the number of columns in this table. Table.aCol is a
+** pointer to an array of Column structures, one for each column.
+**
+** If the table has an INTEGER PRIMARY KEY, then Table.iPKey is the index of
+** the column that is that key. Otherwise Table.iPKey is negative. Note
+** that the datatype of the PRIMARY KEY must be INTEGER for this field to
+** be set. An INTEGER PRIMARY KEY is used as the rowid for each row of
+** the table. If a table has no INTEGER PRIMARY KEY, then a random rowid
+** is generated for each row of the table. TF_HasPrimaryKey is set if
+** the table has any PRIMARY KEY, INTEGER or otherwise.
+**
+** Table.tnum is the page number for the root BTree page of the table in the
+** database file. If Table.iDb is the index of the database table backend
+** in sqlite.aDb[]. 0 is for the main database and 1 is for the file that
+** holds temporary tables and indices. If TF_Ephemeral is set
+** then the table is stored in a file that is automatically deleted
+** when the VDBE cursor to the table is closed. In this case Table.tnum
+** refers VDBE cursor number that holds the table open, not to the root
+** page number. Transient tables are used to hold the results of a
+** sub-query that appears instead of a real table name in the FROM clause
+** of a SELECT statement.
*/
struct Table {
char *zName; /* Name of the table or view */
@@ -12188,17 +10674,15 @@ struct Table {
Select *pSelect; /* NULL for tables. Points to definition if a view. */
FKey *pFKey; /* Linked list of all foreign keys in this table */
char *zColAff; /* String defining the affinity of each column */
+#ifndef SQLITE_OMIT_CHECK
ExprList *pCheck; /* All CHECK constraints */
- /* ... also used as column name list in a VIEW */
- int tnum; /* Root BTree page for this table */
- i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */
+#endif
+ tRowcnt nRowEst; /* Estimated rows in table - from sqlite_stat1 table */
+ int tnum; /* Root BTree node for this table (see note above) */
+ i16 iPKey; /* If not negative, use aCol[iPKey] as the primary key */
i16 nCol; /* Number of columns in this table */
u16 nRef; /* Number of pointers to this Table */
- LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */
LogEst szTabRow; /* Estimated size of each table row in bytes */
-#ifdef SQLITE_ENABLE_COSTMULT
- LogEst costMult; /* Cost multiplier for using this table */
-#endif
u8 tabFlags; /* Mask of TF_* values */
u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */
#ifndef SQLITE_OMIT_ALTERTABLE
@@ -12206,7 +10690,7 @@ struct Table {
#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
int nModuleArg; /* Number of arguments to the module */
- char **azModuleArg; /* 0: module 1: schema 2: vtab name 3...: args */
+ char **azModuleArg; /* Text of all module args. [0] is module name */
VTable *pVTable; /* List of VTable objects. */
#endif
Trigger *pTrigger; /* List of triggers stored in pSchema */
@@ -12215,22 +10699,14 @@ struct Table {
};
/*
-** Allowed values for Table.tabFlags.
-**
-** TF_OOOHidden applies to virtual tables that have hidden columns that are
-** followed by non-hidden columns. Example: "CREATE VIRTUAL TABLE x USING
-** vtab1(a HIDDEN, b);". Since "b" is a non-hidden column but "a" is hidden,
-** the TF_OOOHidden attribute would apply in this case. Such tables require
-** special handling during INSERT processing.
+** Allowed values for Tabe.tabFlags.
*/
#define TF_Readonly 0x01 /* Read-only system table */
#define TF_Ephemeral 0x02 /* An ephemeral table */
#define TF_HasPrimaryKey 0x04 /* Table has a primary key */
#define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */
#define TF_Virtual 0x10 /* Is a virtual table */
-#define TF_WithoutRowid 0x20 /* No rowid. PRIMARY KEY is the key */
-#define TF_NoVisibleRowid 0x40 /* No user-visible "rowid" column */
-#define TF_OOOHidden 0x80 /* Out-of-Order hidden columns */
+#define TF_WithoutRowid 0x20 /* No rowid used. PRIMARY KEY is the key */
/*
@@ -12248,7 +10724,6 @@ struct Table {
/* Does the table have a rowid */
#define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0)
-#define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0)
/*
** Each foreign key constraint is an instance of the following structure.
@@ -12367,20 +10842,19 @@ struct KeyInfo {
**
** This structure holds a record that has already been disassembled
** into its constituent fields.
-**
-** The r1 and r2 member variables are only used by the optimized comparison
-** functions vdbeRecordCompareInt() and vdbeRecordCompareString().
*/
struct UnpackedRecord {
KeyInfo *pKeyInfo; /* Collation and sort-order information */
u16 nField; /* Number of entries in apMem[] */
- i8 default_rc; /* Comparison result if keys are equal */
- u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */
+ u8 flags; /* Boolean settings. UNPACKED_... below */
Mem *aMem; /* Values */
- int r1; /* Value to return if (lhs > rhs) */
- int r2; /* Value to return if (rhs < lhs) */
};
+/*
+** Allowed values of UnpackedRecord.flags
+*/
+#define UNPACKED_INCRKEY 0x01 /* Make this key an epsilon larger */
+#define UNPACKED_PREFIX_MATCH 0x02 /* A prefix match is considered OK */
/*
** Each SQL index is represented in memory by an
@@ -12407,19 +10881,11 @@ struct UnpackedRecord {
** and the value of Index.onError indicate the which conflict resolution
** algorithm to employ whenever an attempt is made to insert a non-unique
** element.
-**
-** While parsing a CREATE TABLE or CREATE INDEX statement in order to
-** generate VDBE code (as opposed to parsing one read from an sqlite_master
-** table as part of parsing an existing database schema), transient instances
-** of this structure may be created. In this case the Index.tnum variable is
-** used to store the address of a VDBE instruction, not a database page
-** number (it cannot - the database page is not allocated until the VDBE
-** program is executed). See convertToWithoutRowidTable() for details.
*/
struct Index {
char *zName; /* Name of this index */
i16 *aiColumn; /* Which columns are used by this index. 1st is 0 */
- LogEst *aiRowLogEst; /* From ANALYZE: Est. rows selected by each column */
+ tRowcnt *aiRowEst; /* From ANALYZE: Est. rows selected by each column */
Table *pTable; /* The SQL table being indexed */
char *zColAff; /* String defining the affinity of each column */
Index *pNext; /* The next index associated with the same table */
@@ -12427,48 +10893,26 @@ struct Index {
u8 *aSortOrder; /* for each column: True==DESC, False==ASC */
char **azColl; /* Array of collation sequence names for index */
Expr *pPartIdxWhere; /* WHERE clause for partial indices */
- ExprList *aColExpr; /* Column expressions */
+ KeyInfo *pKeyInfo; /* A KeyInfo object suitable for this index */
int tnum; /* DB Page containing root of this index */
LogEst szIdxRow; /* Estimated average row size in bytes */
u16 nKeyCol; /* Number of columns forming the key */
u16 nColumn; /* Number of columns stored in the index */
u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */
- unsigned idxType:2; /* 1==UNIQUE, 2==PRIMARY KEY, 0==CREATE INDEX */
+ unsigned autoIndex:2; /* 1==UNIQUE, 2==PRIMARY KEY, 0==CREATE INDEX */
unsigned bUnordered:1; /* Use this index for == or IN queries only */
unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */
unsigned isResized:1; /* True if resizeIndexObject() has been called */
unsigned isCovering:1; /* True if this is a covering index */
- unsigned noSkipScan:1; /* Do not try to use skip-scan if true */
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
int nSample; /* Number of elements in aSample[] */
int nSampleCol; /* Size of IndexSample.anEq[] and so on */
tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */
IndexSample *aSample; /* Samples of the left-most key */
- tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */
- tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */
#endif
};
/*
-** Allowed values for Index.idxType
-*/
-#define SQLITE_IDXTYPE_APPDEF 0 /* Created using CREATE INDEX */
-#define SQLITE_IDXTYPE_UNIQUE 1 /* Implements a UNIQUE constraint */
-#define SQLITE_IDXTYPE_PRIMARYKEY 2 /* Is the PRIMARY KEY for the table */
-
-/* Return true if index X is a PRIMARY KEY index */
-#define IsPrimaryKeyIndex(X) ((X)->idxType==SQLITE_IDXTYPE_PRIMARYKEY)
-
-/* Return true if index X is a UNIQUE index */
-#define IsUniqueIndex(X) ((X)->onError!=OE_None)
-
-/* The Index.aiColumn[] values are normally positive integer. But
-** there are some negative values that have special meaning:
-*/
-#define XN_ROWID (-1) /* Indexed column is the rowid */
-#define XN_EXPR (-2) /* Indexed column is an expression */
-
-/*
** Each sample stored in the sqlite_stat3 table is represented in memory
** using a structure of this type. See documentation at the top of the
** analyze.c source file for additional information.
@@ -12515,7 +10959,6 @@ struct AggInfo {
int sortingIdx; /* Cursor number of the sorting index */
int sortingIdxPTab; /* Cursor number of pseudo-table */
int nSortingColumn; /* Number of columns in the sorting index */
- int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */
ExprList *pGroupBy; /* The group by clause */
struct AggInfo_col { /* For each column used in source tables */
Table *pTab; /* Source table */
@@ -12649,7 +11092,7 @@ struct Expr {
int iTable; /* TK_COLUMN: cursor number of table holding column
** TK_REGISTER: register number
** TK_TRIGGER: 1 -> new, 0 -> old
- ** EP_Unlikely: 134217728 times likelihood */
+ ** EP_Unlikely: 1000 times likelihood */
ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.
** TK_VARIABLE: variable number (always >= 1). */
i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */
@@ -12664,7 +11107,7 @@ struct Expr {
/*
** The following are the meanings of bits in the Expr.flags field.
*/
-#define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */
+#define EP_FromJoin 0x000001 /* Originated in ON or USING clause of a join */
#define EP_Agg 0x000002 /* Contains one or more aggregate functions */
#define EP_Resolved 0x000004 /* IDs have been resolved to COLUMNs */
#define EP_Error 0x000008 /* Expression contains one or more errors */
@@ -12672,8 +11115,8 @@ struct Expr {
#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */
#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */
#define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */
-#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */
-#define EP_Generic 0x000200 /* Ignore COLLATE or affinity on this tree */
+#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE opeartor */
+ /* unused 0x000200 */
#define EP_IntValue 0x000400 /* Integer value contained in u.iValue */
#define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */
#define EP_Skip 0x001000 /* COLLATE, AS, or UNLIKELY */
@@ -12683,15 +11126,7 @@ struct Expr {
#define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */
#define EP_NoReduce 0x020000 /* Cannot EXPRDUP_REDUCE this Expr */
#define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */
-#define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
-#define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */
-#define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */
-#define EP_Alias 0x400000 /* Is an alias for a result set column */
-
-/*
-** Combinations of two or more EP_* flags
-*/
-#define EP_Propagate (EP_Collate|EP_Subquery) /* Propagate these bits up tree */
+#define EP_Constant 0x080000 /* Node is a constant */
/*
** These macros can be used to test, set, or clear bits in the
@@ -12745,6 +11180,7 @@ struct Expr {
*/
struct ExprList {
int nExpr; /* Number of expressions on the list */
+ int iECursor; /* VDBE Cursor associated with this ExprList */
struct ExprList_item { /* For each expression in the list */
Expr *pExpr; /* The list of expressions */
char *zName; /* Token associated with this expression */
@@ -12815,7 +11251,6 @@ typedef u64 Bitmask;
** A bit in a Bitmask
*/
#define MASKBIT(n) (((Bitmask)1)<<(n))
-#define MASKBIT32(n) (((unsigned int)1)<<(n))
/*
** The following structure describes the FROM clause of a SELECT statement.
@@ -12837,8 +11272,8 @@ typedef u64 Bitmask;
** contains more than 63 columns and the 64-th or later column is used.
*/
struct SrcList {
- int nSrc; /* Number of tables or subqueries in the FROM clause */
- u32 nAlloc; /* Number of entries allocated in a[] below */
+ u8 nSrc; /* Number of tables or subqueries in the FROM clause */
+ u8 nAlloc; /* Number of entries allocated in a[] below */
struct SrcList_item {
Schema *pSchema; /* Schema to which this item is fixed */
char *zDatabase; /* Name of database holding this table */
@@ -12848,16 +11283,10 @@ struct SrcList {
Select *pSelect; /* A SELECT statement used in place of a table name */
int addrFillSub; /* Address of subroutine to manifest a subquery */
int regReturn; /* Register holding return address of addrFillSub */
- int regResult; /* Registers holding results of a co-routine */
- struct {
- u8 jointype; /* Type of join between this able and the previous */
- unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */
- unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */
- unsigned isTabFunc :1; /* True if table-valued-function syntax */
- unsigned isCorrelated :1; /* True if sub-query is correlated */
- unsigned viaCoroutine :1; /* Implemented as a co-routine */
- unsigned isRecursive :1; /* True for recursive reference in WITH */
- } fg;
+ u8 jointype; /* Type of join between this able and the previous */
+ unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */
+ unsigned isCorrelated :1; /* True if sub-query is correlated */
+ unsigned viaCoroutine :1; /* Implemented as a co-routine */
#ifndef SQLITE_OMIT_EXPLAIN
u8 iSelectId; /* If pSelect!=0, the id of the sub-select in EQP */
#endif
@@ -12865,11 +11294,8 @@ struct SrcList {
Expr *pOn; /* The ON clause of a join */
IdList *pUsing; /* The USING clause of a join */
Bitmask colUsed; /* Bit N (1<<N) set if column N of pTab is used */
- union {
- char *zIndexedBy; /* Identifier from "INDEXED BY <zIndex>" clause */
- ExprList *pFuncArg; /* Arguments to table-valued-function */
- } u1;
- Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */
+ char *zIndex; /* Identifier from "INDEXED BY <zIndex>" clause */
+ Index *pIndex; /* Index structure corresponding to zIndex, if any */
} a[1]; /* One entry for each identifier on the list */
};
@@ -12897,13 +11323,10 @@ struct SrcList {
#define WHERE_OMIT_OPEN_CLOSE 0x0010 /* Table cursors are already open */
#define WHERE_FORCE_TABLE 0x0020 /* Do not use an index-only search */
#define WHERE_ONETABLE_ONLY 0x0040 /* Only code the 1st table in pTabList */
-#define WHERE_NO_AUTOINDEX 0x0080 /* Disallow automatic indexes */
+#define WHERE_AND_ONLY 0x0080 /* Don't use indices for OR terms */
#define WHERE_GROUPBY 0x0100 /* pOrderBy is really a GROUP BY */
#define WHERE_DISTINCTBY 0x0200 /* pOrderby is really a DISTINCT clause */
#define WHERE_WANT_DISTINCT 0x0400 /* All output needs to be distinct */
-#define WHERE_SORTBYGROUP 0x0800 /* Support sqlite3WhereIsSorted() */
-#define WHERE_REOPEN_IDX 0x1000 /* Try to use OP_ReopenIdx */
-#define WHERE_ONEPASS_MULTIROW 0x2000 /* ONEPASS is ok with multiple rows */
/* Allowed return values from sqlite3WhereIsDistinct()
*/
@@ -12941,23 +11364,17 @@ struct NameContext {
NameContext *pNext; /* Next outer name context. NULL for outermost */
int nRef; /* Number of names resolved by this context */
int nErr; /* Number of errors encountered while resolving names */
- u16 ncFlags; /* Zero or more NC_* flags defined below */
+ u8 ncFlags; /* Zero or more NC_* flags defined below */
};
/*
** Allowed values for the NameContext, ncFlags field.
-**
-** Note: NC_MinMaxAgg must have the same value as SF_MinMaxAgg and
-** SQLITE_FUNC_MINMAX.
-**
*/
-#define NC_AllowAgg 0x0001 /* Aggregate functions are allowed here */
-#define NC_HasAgg 0x0002 /* One or more aggregate functions seen */
-#define NC_IsCheck 0x0004 /* True if resolving names in a CHECK constraint */
-#define NC_InAggFunc 0x0008 /* True if analyzing arguments to an agg func */
-#define NC_PartIdx 0x0010 /* True if resolving a partial index WHERE */
-#define NC_IdxExpr 0x0020 /* True if resolving columns of CREATE INDEX */
-#define NC_MinMaxAgg 0x1000 /* min/max aggregates seen. See note above */
+#define NC_AllowAgg 0x01 /* Aggregate functions are allowed here */
+#define NC_HasAgg 0x02 /* One or more aggregate functions seen */
+#define NC_IsCheck 0x04 /* True if resolving names in a CHECK constraint */
+#define NC_InAggFunc 0x08 /* True if analyzing arguments to an agg func */
+#define NC_PartIdx 0x10 /* True if resolving a partial index WHERE */
/*
** An instance of the following structure contains all information
@@ -12984,10 +11401,7 @@ struct Select {
u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */
u16 selFlags; /* Various SF_* values */
int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */
-#if SELECTTRACE_ENABLED
- char zSelName[12]; /* Symbolic name of this SELECT use for debugging */
-#endif
- int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */
+ int addrOpenEphm[3]; /* OP_OpenEphem opcodes related to this select */
u64 nSelectRow; /* Estimated number of result rows */
SrcList *pSrc; /* The FROM clause */
Expr *pWhere; /* The WHERE clause */
@@ -12996,9 +11410,9 @@ struct Select {
ExprList *pOrderBy; /* The ORDER BY clause */
Select *pPrior; /* Prior select in a compound select statement */
Select *pNext; /* Next select to the left in a compound */
+ Select *pRightmost; /* Right-most select in a compound select statement */
Expr *pLimit; /* LIMIT expression. NULL means not used. */
Expr *pOffset; /* OFFSET expression. NULL means not used. */
- With *pWith; /* WITH clause attached to this select. Or NULL. */
};
/*
@@ -13006,116 +11420,47 @@ struct Select {
** "Select Flag".
*/
#define SF_Distinct 0x0001 /* Output should be DISTINCT */
-#define SF_All 0x0002 /* Includes the ALL keyword */
-#define SF_Resolved 0x0004 /* Identifiers have been resolved */
-#define SF_Aggregate 0x0008 /* Contains aggregate functions */
-#define SF_UsesEphemeral 0x0010 /* Uses the OpenEphemeral opcode */
-#define SF_Expanded 0x0020 /* sqlite3SelectExpand() called on this */
-#define SF_HasTypeInfo 0x0040 /* FROM subqueries have Table metadata */
-#define SF_Compound 0x0080 /* Part of a compound query */
-#define SF_Values 0x0100 /* Synthesized from VALUES clause */
-#define SF_MultiValue 0x0200 /* Single VALUES term with multiple rows */
-#define SF_NestedFrom 0x0400 /* Part of a parenthesized FROM clause */
-#define SF_MaybeConvert 0x0800 /* Need convertCompoundSelectToSubquery() */
-#define SF_MinMaxAgg 0x1000 /* Aggregate containing min() or max() */
-#define SF_Recursive 0x2000 /* The recursive part of a recursive CTE */
-#define SF_Converted 0x4000 /* By convertCompoundSelectToSubquery() */
-
-
-/*
-** The results of a SELECT can be distributed in several ways, as defined
-** by one of the following macros. The "SRT" prefix means "SELECT Result
-** Type".
-**
-** SRT_Union Store results as a key in a temporary index
-** identified by pDest->iSDParm.
-**
-** SRT_Except Remove results from the temporary index pDest->iSDParm.
-**
-** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result
-** set is not empty.
-**
-** SRT_Discard Throw the results away. This is used by SELECT
-** statements within triggers whose only purpose is
-** the side-effects of functions.
-**
-** All of the above are free to ignore their ORDER BY clause. Those that
-** follow must honor the ORDER BY clause.
-**
-** SRT_Output Generate a row of output (using the OP_ResultRow
-** opcode) for each row in the result set.
-**
-** SRT_Mem Only valid if the result is a single column.
-** Store the first column of the first result row
-** in register pDest->iSDParm then abandon the rest
-** of the query. This destination implies "LIMIT 1".
-**
-** SRT_Set The result must be a single column. Store each
-** row of result as the key in table pDest->iSDParm.
-** Apply the affinity pDest->affSdst before storing
-** results. Used to implement "IN (SELECT ...)".
-**
-** SRT_EphemTab Create an temporary table pDest->iSDParm and store
-** the result there. The cursor is left open after
-** returning. This is like SRT_Table except that
-** this destination uses OP_OpenEphemeral to create
-** the table first.
-**
-** SRT_Coroutine Generate a co-routine that returns a new row of
-** results each time it is invoked. The entry point
-** of the co-routine is stored in register pDest->iSDParm
-** and the result row is stored in pDest->nDest registers
-** starting with pDest->iSdst.
-**
-** SRT_Table Store results in temporary table pDest->iSDParm.
-** SRT_Fifo This is like SRT_EphemTab except that the table
-** is assumed to already be open. SRT_Fifo has
-** the additional property of being able to ignore
-** the ORDER BY clause.
-**
-** SRT_DistFifo Store results in a temporary table pDest->iSDParm.
-** But also use temporary table pDest->iSDParm+1 as
-** a record of all prior results and ignore any duplicate
-** rows. Name means: "Distinct Fifo".
-**
-** SRT_Queue Store results in priority queue pDest->iSDParm (really
-** an index). Append a sequence number so that all entries
-** are distinct.
-**
-** SRT_DistQueue Store results in priority queue pDest->iSDParm only if
-** the same record has never been stored before. The
-** index at pDest->iSDParm+1 hold all prior stores.
+#define SF_Resolved 0x0002 /* Identifiers have been resolved */
+#define SF_Aggregate 0x0004 /* Contains aggregate functions */
+#define SF_UsesEphemeral 0x0008 /* Uses the OpenEphemeral opcode */
+#define SF_Expanded 0x0010 /* sqlite3SelectExpand() called on this */
+#define SF_HasTypeInfo 0x0020 /* FROM subqueries have Table metadata */
+#define SF_UseSorter 0x0040 /* Sort using a sorter */
+#define SF_Values 0x0080 /* Synthesized from VALUES clause */
+#define SF_Materialize 0x0100 /* Force materialization of views */
+#define SF_NestedFrom 0x0200 /* Part of a parenthesized FROM clause */
+#define SF_MaybeConvert 0x0400 /* Need convertCompoundSelectToSubquery() */
+
+
+/*
+** The results of a select can be distributed in several ways. The
+** "SRT" prefix means "SELECT Result Type".
*/
#define SRT_Union 1 /* Store result as keys in an index */
#define SRT_Except 2 /* Remove result from a UNION index */
#define SRT_Exists 3 /* Store 1 if the result is not empty */
#define SRT_Discard 4 /* Do not save the results anywhere */
-#define SRT_Fifo 5 /* Store result as data with an automatic rowid */
-#define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */
-#define SRT_Queue 7 /* Store result in an queue */
-#define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */
/* The ORDER BY clause is ignored for all of the above */
-#define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue)
+#define IgnorableOrderby(X) ((X->eDest)<=SRT_Discard)
-#define SRT_Output 9 /* Output each row of result */
-#define SRT_Mem 10 /* Store result in a memory cell */
-#define SRT_Set 11 /* Store results as keys in an index */
-#define SRT_EphemTab 12 /* Create transient tab and store like SRT_Table */
-#define SRT_Coroutine 13 /* Generate a single row of result */
-#define SRT_Table 14 /* Store result as data with an automatic rowid */
+#define SRT_Output 5 /* Output each row of result */
+#define SRT_Mem 6 /* Store result in a memory cell */
+#define SRT_Set 7 /* Store results as keys in an index */
+#define SRT_Table 8 /* Store result as data with an automatic rowid */
+#define SRT_EphemTab 9 /* Create transient tab and store like SRT_Table */
+#define SRT_Coroutine 10 /* Generate a single row of result */
/*
** An instance of this object describes where to put of the results of
** a SELECT statement.
*/
struct SelectDest {
- u8 eDest; /* How to dispose of the results. On of SRT_* above. */
- char affSdst; /* Affinity used when eDest==SRT_Set */
- int iSDParm; /* A parameter used by the eDest disposal method */
- int iSdst; /* Base register where results are written */
- int nSdst; /* Number of registers allocated */
- ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */
+ u8 eDest; /* How to dispose of the results. On of SRT_* above. */
+ char affSdst; /* Affinity used when eDest==SRT_Set */
+ int iSDParm; /* A parameter used by the eDest disposal method */
+ int iSdst; /* Base register where results are written */
+ int nSdst; /* Number of registers allocated */
};
/*
@@ -13171,19 +11516,9 @@ struct TriggerPrg {
** The yDbMask datatype for the bitmask of all attached databases.
*/
#if SQLITE_MAX_ATTACHED>30
- typedef unsigned char yDbMask[(SQLITE_MAX_ATTACHED+9)/8];
-# define DbMaskTest(M,I) (((M)[(I)/8]&(1<<((I)&7)))!=0)
-# define DbMaskZero(M) memset((M),0,sizeof(M))
-# define DbMaskSet(M,I) (M)[(I)/8]|=(1<<((I)&7))
-# define DbMaskAllZero(M) sqlite3DbMaskAllZero(M)
-# define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0)
+ typedef sqlite3_uint64 yDbMask;
#else
typedef unsigned int yDbMask;
-# define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0)
-# define DbMaskZero(M) (M)=0
-# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I))
-# define DbMaskAllZero(M) (M)==0
-# define DbMaskNonZero(M) (M)!=0
#endif
/*
@@ -13211,10 +11546,12 @@ struct Parse {
u8 checkSchema; /* Causes schema cookie check after an error */
u8 nested; /* Number of nested calls to the parser/code generator */
u8 nTempReg; /* Number of temporary registers in aTempReg[] */
+ u8 nTempInUse; /* Number of aTempReg[] currently checked out */
+ u8 nColCache; /* Number of entries in aColCache[] */
+ u8 iColCache; /* Next entry in aColCache[] to replace */
u8 isMultiWrite; /* True if statement may modify/insert multiple rows */
u8 mayAbort; /* True if statement may throw an ABORT exception */
u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */
- u8 okConstFactor; /* OK to factor out constants */
int aTempReg[8]; /* Holding area for temporary registers */
int nRangeReg; /* Size of the temporary register block */
int iRangeReg; /* First register in temporary register block */
@@ -13223,34 +11560,27 @@ struct Parse {
int nMem; /* Number of memory cells used so far */
int nSet; /* Number of sets used so far */
int nOnce; /* Number of OP_Once instructions so far */
- int nOpAlloc; /* Number of slots allocated for Vdbe.aOp[] */
- int iFixedOp; /* Never back out opcodes iFixedOp-1 or earlier */
int ckBase; /* Base register of data during check constraints */
- int iSelfTab; /* Table of an index whose exprs are being coded */
+ int iPartIdxTab; /* Table corresponding to a partial index */
int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */
int iCacheCnt; /* Counter used to generate aColCache[].lru values */
- int nLabel; /* Number of labels used */
- int *aLabel; /* Space to hold the labels */
struct yColCache {
int iTable; /* Table cursor number */
- i16 iColumn; /* Table column number */
+ int iColumn; /* Table column number */
u8 tempReg; /* iReg is a temp register that needs to be freed */
int iLevel; /* Nesting level */
int iReg; /* Reg with value of this column. 0 means none. */
int lru; /* Least recently used entry has the smallest value */
} aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */
ExprList *pConstExpr;/* Constant expressions */
- Token constraintName;/* Name of the constraint currently being parsed */
yDbMask writeMask; /* Start a write transaction on these databases */
yDbMask cookieMask; /* Bitmask of schema verified databases */
+ int cookieGoto; /* Address of OP_Goto to cookie verifier subroutine */
int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */
int regRowid; /* Register holding rowid of CREATE TABLE entry */
int regRoot; /* Register holding root page number for new objects */
int nMaxArg; /* Max args passed to user function by sub-program */
-#if SELECTTRACE_ENABLED
- int nSelect; /* Number of SELECT statements seen */
- int nSelectIndent; /* How far to indent SELECTTRACE() output */
-#endif
+ Token constraintName;/* Name of the constraint currently being parsed */
#ifndef SQLITE_OMIT_SHARED_CACHE
int nTableLock; /* Number of locks in aTableLock */
TableLock *aTableLock; /* Required table locks for shared-cache mode */
@@ -13261,6 +11591,7 @@ struct Parse {
Parse *pToplevel; /* Parse structure for main program (or NULL) */
Table *pTriggerTab; /* Table triggers are being coded for */
int addrCrTab; /* Address of OP_CreateTable opcode on CREATE TABLE */
+ int addrSkipPK; /* Address of instruction to skip PRIMARY KEY index */
u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
u32 oldmask; /* Mask of old.* columns referenced */
u32 newmask; /* Mask of new.* columns referenced */
@@ -13268,17 +11599,12 @@ struct Parse {
u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
u8 disableTriggers; /* True to disable triggers */
- /************************************************************************
- ** Above is constant between recursions. Below is reset before and after
- ** each recursion. The boundary between these two regions is determined
- ** using offsetof(Parse,nVar) so the nVar field must be the first field
- ** in the recursive region.
- ************************************************************************/
+ /* Above is constant between recursions. Below is reset before and after
+ ** each recursion */
int nVar; /* Number of '?' variables seen in the SQL so far */
int nzVar; /* Number of available slots in azVar[] */
u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */
- u8 bFreeWith; /* True if pWith should be freed with parser */
u8 explain; /* True if the EXPLAIN flag is found on the query */
#ifndef SQLITE_OMIT_VIRTUALTABLE
u8 declareVtab; /* True if inside sqlite3_declare_vtab() */
@@ -13304,7 +11630,6 @@ struct Parse {
#endif
Table *pZombieTab; /* List of Table objects to delete after code gen */
TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */
- With *pWith; /* Current WITH clause, or NULL */
};
/*
@@ -13329,16 +11654,15 @@ struct AuthContext {
** Bitfield flags for P5 value in various opcodes.
*/
#define OPFLAG_NCHANGE 0x01 /* Set to update db->nChange */
-#define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */
#define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */
#define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */
#define OPFLAG_APPEND 0x08 /* This is likely to be an append */
#define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */
+#define OPFLAG_CLEARCACHE 0x20 /* Clear pseudo-table cache in OP_Column */
#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */
#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */
#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */
-#define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */
-#define OPFLAG_P2ISREG 0x04 /* P2 to OP_Open** is a register number */
+#define OPFLAG_P2ISREG 0x02 /* P2 to OP_Open** is a register number */
#define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */
/*
@@ -13397,7 +11721,7 @@ struct Trigger {
* orconf -> stores the ON CONFLICT algorithm
* pSelect -> If this is an INSERT INTO ... SELECT ... statement, then
* this stores a pointer to the SELECT statement. Otherwise NULL.
- * zTarget -> Dequoted name of the table to insert into.
+ * target -> A token holding the quoted name of the table to insert into.
* pExprList -> If this is an INSERT INTO ... VALUES ... statement, then
* this stores values to be inserted. Otherwise NULL.
* pIdList -> If this is an INSERT INTO ... (<column-names>) VALUES ...
@@ -13405,12 +11729,12 @@ struct Trigger {
* inserted into.
*
* (op == TK_DELETE)
- * zTarget -> Dequoted name of the table to delete from.
+ * target -> A token holding the quoted name of the table to delete from.
* pWhere -> The WHERE clause of the DELETE statement if one is specified.
* Otherwise NULL.
*
* (op == TK_UPDATE)
- * zTarget -> Dequoted name of the table to update.
+ * target -> A token holding the quoted name of the table to update rows of.
* pWhere -> The WHERE clause of the UPDATE statement if one is specified.
* Otherwise NULL.
* pExprList -> A list of the columns to update and the expressions to update
@@ -13422,10 +11746,10 @@ struct TriggerStep {
u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */
u8 orconf; /* OE_Rollback etc. */
Trigger *pTrig; /* The trigger that this step is a part of */
- Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */
- char *zTarget; /* Target table for DELETE, UPDATE, INSERT */
+ Select *pSelect; /* SELECT statment or RHS of INSERT INTO .. SELECT ... */
+ Token target; /* Target table for DELETE, UPDATE, INSERT */
Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */
- ExprList *pExprList; /* SET clause for UPDATE. */
+ ExprList *pExprList; /* SET clause for UPDATE. VALUES clause for INSERT */
IdList *pIdList; /* Column names for INSERT */
TriggerStep *pNext; /* Next in the link-list */
TriggerStep *pLast; /* Last element in link-list. Valid for 1st elem only */
@@ -13456,7 +11780,8 @@ struct StrAccum {
char *zText; /* The string collected so far */
int nChar; /* Length of the string so far */
int nAlloc; /* Amount of space allocated in zText */
- int mxAlloc; /* Maximum allowed allocation. 0 for no malloc usage */
+ int mxAlloc; /* Maximum allowed string length */
+ u8 useMalloc; /* 0: none, 1: sqlite3DbMalloc, 2: sqlite3_malloc */
u8 accError; /* STRACCUM_NOMEM or STRACCUM_TOOBIG */
};
#define STRACCUM_NOMEM 1
@@ -13504,7 +11829,6 @@ struct Sqlite3Config {
int nPage; /* Number of pages in pPage[] */
int mxParserStack; /* maximum depth of the parser stack */
int sharedCacheEnabled; /* true if shared-cache mode enabled */
- u32 szPma; /* Maximum Sorter PMA size */
/* The above might be initialized to non-zero. The following need to always
** initially be zero, however. */
int isInit; /* True after initialization has finished */
@@ -13512,25 +11836,15 @@ struct Sqlite3Config {
int isMutexInit; /* True after mutexes are initialized */
int isMallocInit; /* True after malloc is initialized */
int isPCacheInit; /* True after malloc is initialized */
- int nRefInitMutex; /* Number of users of pInitMutex */
sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */
+ int nRefInitMutex; /* Number of users of pInitMutex */
void (*xLog)(void*,int,const char*); /* Function for logging */
void *pLogArg; /* First argument to xLog() */
+ int bLocaltimeFault; /* True to fail localtime() calls */
#ifdef SQLITE_ENABLE_SQLLOG
void(*xSqllog)(void*,sqlite3*,const char*, int);
void *pSqllogArg;
#endif
-#ifdef SQLITE_VDBE_COVERAGE
- /* The following callback (if not NULL) is invoked on every VDBE branch
- ** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE.
- */
- void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */
- void *pVdbeBranchArg; /* 1st argument */
-#endif
-#ifndef SQLITE_OMIT_BUILTIN_TEST
- int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */
-#endif
- int bLocaltimeFault; /* True to fail localtime() calls */
};
/*
@@ -13557,14 +11871,12 @@ struct Sqlite3Config {
struct Walker {
int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */
int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */
- void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */
Parse *pParse; /* Parser context. */
int walkerDepth; /* Number of subqueries */
- u8 eCode; /* A small processing code */
+ u8 bSelectDepthFirst; /* Do subqueries first */
union { /* Extra data for callback */
NameContext *pNC; /* Naming context */
- int n; /* A counter */
- int iCur; /* A cursor number */
+ int i; /* Integer value */
SrcList *pSrcList; /* FROM clause */
struct SrcCount *pSrcCount; /* Counting column references */
} u;
@@ -13586,32 +11898,6 @@ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker*, Select*);
#define WRC_Abort 2 /* Abandon the tree walk */
/*
-** An instance of this structure represents a set of one or more CTEs
-** (common table expressions) created by a single WITH clause.
-*/
-struct With {
- int nCte; /* Number of CTEs in the WITH clause */
- With *pOuter; /* Containing WITH clause, or NULL */
- struct Cte { /* For each CTE in the WITH clause.... */
- char *zName; /* Name of this CTE */
- ExprList *pCols; /* List of explicit column names, or NULL */
- Select *pSelect; /* The definition of this CTE */
- const char *zCteErr; /* Error message for circular references */
- } a[1];
-};
-
-#ifdef SQLITE_DEBUG
-/*
-** An instance of the TreeView object is used for printing the content of
-** data structures on sqlite3DebugPrintf() using a tree-like view.
-*/
-struct TreeView {
- int iLevel; /* Which level of the tree we are on */
- u8 bLine[100]; /* Draw vertical in column i if bLine[i] is true */
-};
-#endif /* SQLITE_DEBUG */
-
-/*
** Assuming zIn points to the first byte of a UTF-8 character,
** advance zIn to point to the first byte of the next UTF-8 character.
*/
@@ -13638,11 +11924,11 @@ SQLITE_PRIVATE int sqlite3CantopenError(int);
/*
** FTS4 is really an extension for FTS3. It is enabled using the
-** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also call
-** the SQLITE_ENABLE_FTS4 macro to serve as an alias for SQLITE_ENABLE_FTS3.
+** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also all
+** the SQLITE_ENABLE_FTS4 macro to serve as an alisse for SQLITE_ENABLE_FTS3.
*/
#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3)
-# define SQLITE_ENABLE_FTS3 1
+# define SQLITE_ENABLE_FTS3
#endif
/*
@@ -13676,9 +11962,6 @@ SQLITE_PRIVATE int sqlite3CantopenError(int);
# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x))
# define sqlite3Tolower(x) tolower((unsigned char)(x))
#endif
-#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_PRIVATE int sqlite3IsIdChar(u8);
-#endif
/*
** Internal function prototypes
@@ -13689,15 +11972,15 @@ SQLITE_PRIVATE int sqlite3Strlen30(const char*);
SQLITE_PRIVATE int sqlite3MallocInit(void);
SQLITE_PRIVATE void sqlite3MallocEnd(void);
-SQLITE_PRIVATE void *sqlite3Malloc(u64);
-SQLITE_PRIVATE void *sqlite3MallocZero(u64);
-SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, u64);
-SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, u64);
+SQLITE_PRIVATE void *sqlite3Malloc(int);
+SQLITE_PRIVATE void *sqlite3MallocZero(int);
+SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, int);
+SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, int);
SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3*,const char*);
-SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, u64);
-SQLITE_PRIVATE void *sqlite3Realloc(void*, u64);
-SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64);
-SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64);
+SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, int);
+SQLITE_PRIVATE void *sqlite3Realloc(void*, int);
+SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, int);
+SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, int);
SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*);
SQLITE_PRIVATE int sqlite3MallocSize(void*);
SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*);
@@ -13706,9 +11989,7 @@ SQLITE_PRIVATE void sqlite3ScratchFree(void*);
SQLITE_PRIVATE void *sqlite3PageMalloc(int);
SQLITE_PRIVATE void sqlite3PageFree(void*);
SQLITE_PRIVATE void sqlite3MemSetDefault(void);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void));
-#endif
SQLITE_PRIVATE int sqlite3HeapNearlyFull(void);
/*
@@ -13744,58 +12025,54 @@ SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int);
SQLITE_PRIVATE int sqlite3MutexInit(void);
SQLITE_PRIVATE int sqlite3MutexEnd(void);
#endif
-#if !defined(SQLITE_MUTEX_OMIT) && !defined(SQLITE_MUTEX_NOOP)
-SQLITE_PRIVATE void sqlite3MemoryBarrier(void);
-#else
-# define sqlite3MemoryBarrier()
-#endif
-SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int);
-SQLITE_PRIVATE void sqlite3StatusUp(int, int);
-SQLITE_PRIVATE void sqlite3StatusDown(int, int);
+SQLITE_PRIVATE int sqlite3StatusValue(int);
+SQLITE_PRIVATE void sqlite3StatusAdd(int, int);
SQLITE_PRIVATE void sqlite3StatusSet(int, int);
-/* Access to mutexes used by sqlite3_status() */
-SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void);
-SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void);
-
#ifndef SQLITE_OMIT_FLOATING_POINT
SQLITE_PRIVATE int sqlite3IsNaN(double);
#else
# define sqlite3IsNaN(X) 0
#endif
-/*
-** An instance of the following structure holds information about SQL
-** functions arguments that are the parameters to the printf() function.
-*/
-struct PrintfArguments {
- int nArg; /* Total number of arguments */
- int nUsed; /* Number of arguments used so far */
- sqlite3_value **apArg; /* The argument values */
-};
-
-#define SQLITE_PRINTF_INTERNAL 0x01
-#define SQLITE_PRINTF_SQLFUNC 0x02
-SQLITE_PRIVATE void sqlite3VXPrintf(StrAccum*, u32, const char*, va_list);
-SQLITE_PRIVATE void sqlite3XPrintf(StrAccum*, u32, const char*, ...);
+SQLITE_PRIVATE void sqlite3VXPrintf(StrAccum*, int, const char*, va_list);
+#ifndef SQLITE_OMIT_TRACE
+SQLITE_PRIVATE void sqlite3XPrintf(StrAccum*, const char*, ...);
+#endif
SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...);
SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list);
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+SQLITE_PRIVATE char *sqlite3MAppendf(sqlite3*,char*,const char*,...);
+#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG)
SQLITE_PRIVATE void sqlite3DebugPrintf(const char*, ...);
#endif
#if defined(SQLITE_TEST)
SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*);
#endif
-#if defined(SQLITE_DEBUG)
-SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8);
-SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*);
-SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8);
+/* Output formatting for SQLITE_TESTCTRL_EXPLAIN */
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+SQLITE_PRIVATE void sqlite3ExplainBegin(Vdbe*);
+SQLITE_PRIVATE void sqlite3ExplainPrintf(Vdbe*, const char*, ...);
+SQLITE_PRIVATE void sqlite3ExplainNL(Vdbe*);
+SQLITE_PRIVATE void sqlite3ExplainPush(Vdbe*);
+SQLITE_PRIVATE void sqlite3ExplainPop(Vdbe*);
+SQLITE_PRIVATE void sqlite3ExplainFinish(Vdbe*);
+SQLITE_PRIVATE void sqlite3ExplainSelect(Vdbe*, Select*);
+SQLITE_PRIVATE void sqlite3ExplainExpr(Vdbe*, Expr*);
+SQLITE_PRIVATE void sqlite3ExplainExprList(Vdbe*, ExprList*);
+SQLITE_PRIVATE const char *sqlite3VdbeExplanation(Vdbe*);
+#else
+# define sqlite3ExplainBegin(X)
+# define sqlite3ExplainSelect(A,B)
+# define sqlite3ExplainExpr(A,B)
+# define sqlite3ExplainExprList(A,B)
+# define sqlite3ExplainFinish(X)
+# define sqlite3VdbeExplanation(X) 0
#endif
-SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*);
+SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*, ...);
SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...);
SQLITE_PRIVATE int sqlite3Dequote(char*);
SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int);
@@ -13815,11 +12092,9 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*);
SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*);
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
-SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int);
SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int);
SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*);
SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*);
-SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**);
SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int);
@@ -13828,8 +12103,6 @@ SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int);
SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*);
SQLITE_PRIVATE void sqlite3BeginParse(Parse*,int);
SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*);
-SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*);
-SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**);
SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*);
SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *, int);
SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*);
@@ -13848,30 +12121,21 @@ SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*,
SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*);
SQLITE_PRIVATE int sqlite3CodeOnce(Parse *);
-#ifdef SQLITE_OMIT_BUILTIN_TEST
-# define sqlite3FaultSim(X) SQLITE_OK
-#else
-SQLITE_PRIVATE int sqlite3FaultSim(int);
-#endif
-
SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32);
SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec*, u32);
-SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec*, u32);
SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32);
SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*);
SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*);
SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*);
-#endif
SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int);
SQLITE_PRIVATE void sqlite3RowSetClear(RowSet*);
SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64);
-SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64);
+SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, u8 iBatch, i64);
SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*);
-SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,ExprList*,Select*,int,int);
+SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,Select*,int,int);
#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*);
@@ -13879,9 +12143,6 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*);
# define sqlite3ViewGetColumnNames(A,B) 0
#endif
-#if SQLITE_MAX_ATTACHED>30
-SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask);
-#endif
SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int);
SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int);
SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*);
@@ -13892,7 +12153,8 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse);
# define sqlite3AutoincrementBegin(X)
# define sqlite3AutoincrementEnd(X)
#endif
-SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int);
+SQLITE_PRIVATE int sqlite3CodeCoroutine(Parse*, Select*, SelectDest*);
+SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, ExprList*, Select*, IdList*, int);
SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*);
SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*);
SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*);
@@ -13901,7 +12163,6 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*)
SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*,
Token*, Select*, Expr*, IdList*);
SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *);
-SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*);
SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *);
SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*);
SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*);
@@ -13928,36 +12189,28 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*);
SQLITE_PRIVATE u64 sqlite3WhereOutputRowCount(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*);
-#define ONEPASS_OFF 0 /* Use of ONEPASS not allowed */
-#define ONEPASS_SINGLE 1 /* ONEPASS valid for a single row update */
-#define ONEPASS_MULTI 2 /* ONEPASS is valid for multiple rows */
-SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(Parse*, Index*, int, int, int);
SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8);
SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int);
SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int);
SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse*, int, int, int);
SQLITE_PRIVATE void sqlite3ExprCachePush(Parse*);
-SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*);
+SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*, int);
SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse*, int, int);
SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse*);
SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int);
-SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprCode(Parse*, Expr*, int);
SQLITE_PRIVATE void sqlite3ExprCodeAtInit(Parse*, Expr*, int, u8);
SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*);
SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int);
-SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8);
+SQLITE_PRIVATE int sqlite3ExprCodeAndCache(Parse*, Expr*, int);
+SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, u8);
#define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */
#define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */
-#define SQLITE_ECEL_REF 0x04 /* Use ExprList.u.x.iOrderByCol */
SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int);
SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int);
-SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int);
SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*);
SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,int isView,const char*, const char*);
SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,int isView,struct SrcList_item *);
@@ -13974,10 +12227,9 @@ SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*);
SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);
SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*);
SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
SQLITE_PRIVATE void sqlite3PrngSaveState(void);
SQLITE_PRIVATE void sqlite3PrngRestoreState(void);
-#endif
+SQLITE_PRIVATE void sqlite3PrngResetState(void);
SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3*,int);
SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse*, int);
SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb);
@@ -13989,17 +12241,15 @@ SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *);
SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*);
SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*);
SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*);
-SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8);
-SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int);
+SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*);
SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*);
SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*);
+SQLITE_PRIVATE void sqlite3ExprCodeIsNullJump(Vdbe*, const Expr*, int, int);
SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char);
SQLITE_PRIVATE int sqlite3IsRowid(const char*);
-SQLITE_PRIVATE void sqlite3GenerateRowDelete(
- Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int);
-SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);
-SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,Index*,int);
-SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int);
+SQLITE_PRIVATE void sqlite3GenerateRowDelete(Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8);
+SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*);
+SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*);
SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int,
u8,u8,int,int*);
SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int);
@@ -14015,11 +12265,6 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int);
SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int);
SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*);
SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int);
-#if SELECTTRACE_ENABLED
-SQLITE_PRIVATE void sqlite3SelectSetName(Select*,const char*);
-#else
-# define sqlite3SelectSetName(A,B)
-#endif
SQLITE_PRIVATE void sqlite3FuncDefInsert(FuncDefHash*, FuncDef*);
SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,int,u8,u8);
SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(sqlite3*);
@@ -14048,14 +12293,13 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, i
SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*);
SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*);
SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*,
- Select*,u8);
+ ExprList*,Select*,u8);
SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, u8);
SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*);
SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*);
SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*);
SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Table*,int);
# define sqlite3ParseToplevel(p) ((p)->pToplevel ? (p)->pToplevel : (p))
-# define sqlite3IsToplevel(p) ((p)->pToplevel==0)
#else
# define sqlite3TriggersExist(B,C,D,E,F) 0
# define sqlite3DeleteTrigger(A,B)
@@ -14065,7 +12309,6 @@ SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Tab
# define sqlite3CodeRowTriggerDirect(A,B,C,D,E,F)
# define sqlite3TriggerList(X, Y) 0
# define sqlite3ParseToplevel(p) p
-# define sqlite3IsToplevel(p) 1
# define sqlite3TriggerColmask(A,B,C,D,E,F,G) 0
#endif
@@ -14108,41 +12351,55 @@ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst);
/*
** Routines to read and write variable-length integers. These used to
** be defined locally, but now we use the varint routines in the util.c
-** file.
+** file. Code should use the MACRO forms below, as the Varint32 versions
+** are coded to assume the single byte case is already handled (which
+** the MACRO form does).
*/
SQLITE_PRIVATE int sqlite3PutVarint(unsigned char*, u64);
+SQLITE_PRIVATE int sqlite3PutVarint32(unsigned char*, u32);
SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *, u64 *);
SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *, u32 *);
SQLITE_PRIVATE int sqlite3VarintLen(u64 v);
/*
-** The common case is for a varint to be a single byte. They following
-** macros handle the common case without a procedure call, but then call
-** the procedure for larger varints.
+** The header of a record consists of a sequence variable-length integers.
+** These integers are almost always small and are encoded as a single byte.
+** The following macros take advantage this fact to provide a fast encode
+** and decode of the integers in a record header. It is faster for the common
+** case where the integer is a single byte. It is a little slower when the
+** integer is two or more bytes. But overall it is faster.
+**
+** The following expressions are equivalent:
+**
+** x = sqlite3GetVarint32( A, &B );
+** x = sqlite3PutVarint32( A, B );
+**
+** x = getVarint32( A, B );
+** x = putVarint32( A, B );
+**
*/
#define getVarint32(A,B) \
(u8)((*(A)<(u8)0x80)?((B)=(u32)*(A)),1:sqlite3GetVarint32((A),(u32 *)&(B)))
#define putVarint32(A,B) \
(u8)(((u32)(B)<(u32)0x80)?(*(A)=(unsigned char)(B)),1:\
- sqlite3PutVarint((A),(B)))
+ sqlite3PutVarint32((A),(B)))
#define getVarint sqlite3GetVarint
#define putVarint sqlite3PutVarint
-SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*);
-SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int);
+SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(Vdbe *, Index *);
+SQLITE_PRIVATE void sqlite3TableAffinityStr(Vdbe *, Table *);
SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2);
SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity);
SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr);
SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8);
-SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*);
-SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...);
-SQLITE_PRIVATE void sqlite3Error(sqlite3*,int);
+SQLITE_PRIVATE void sqlite3Error(sqlite3*, int, const char*,...);
SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n);
SQLITE_PRIVATE u8 sqlite3HexToInt(int h);
SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **);
-#if defined(SQLITE_NEED_ERR_NAME)
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) || \
+ defined(SQLITE_DEBUG_OS_TRACE)
SQLITE_PRIVATE const char *sqlite3ErrName(int);
#endif
@@ -14151,7 +12408,7 @@ SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse);
SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int);
SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName);
SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr);
-SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*, int);
+SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, Token*);
SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*);
SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*);
SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *);
@@ -14166,13 +12423,12 @@ SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*);
#else
# define sqlite3FileSuffix3(X,Y)
#endif
-SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8);
+SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,int);
SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8);
SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8);
SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8,
void(*)(void*));
-SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value*);
SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value*);
SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *);
SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8);
@@ -14198,10 +12454,8 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...);
SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*);
SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *, Expr *, int, int);
SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*);
-SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p);
SQLITE_PRIVATE int sqlite3MatchSpanName(const char*, const char*, const char*, const char*);
SQLITE_PRIVATE int sqlite3ResolveExprNames(NameContext*, Expr*);
-SQLITE_PRIVATE int sqlite3ResolveExprListNames(NameContext*, ExprList*);
SQLITE_PRIVATE void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*);
SQLITE_PRIVATE void sqlite3ResolveSelfReference(Parse*,Table*,int,Expr*,ExprList*);
SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*);
@@ -14238,10 +12492,9 @@ SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *,
SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int);
SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *);
-SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int);
+SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, char*, int, int);
SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum*,const char*,int);
-SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum*,const char*);
-SQLITE_PRIVATE void sqlite3AppendChar(StrAccum*,int,char);
+SQLITE_PRIVATE void sqlite3AppendSpace(StrAccum*,int);
SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*);
SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum*);
SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int);
@@ -14253,15 +12506,13 @@ SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *);
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void);
SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(Parse*,Index*,UnpackedRecord**,Expr*,u8,int,int*);
-SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(Parse*, Expr*, u8, sqlite3_value**);
SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*);
-SQLITE_PRIVATE int sqlite3Stat4Column(sqlite3*, const void*, int, int, sqlite3_value**);
#endif
/*
** The interface to the LEMON-generated parser
*/
-SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64));
+SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(size_t));
SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*));
SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*);
#ifdef YYTRACKMAXSTACKDEPTH
@@ -14310,8 +12561,6 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*);
SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*);
# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0)
#endif
-SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*);
-SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3*,Module*);
SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse*,Table*);
SQLITE_PRIVATE void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*, int);
SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse*, Token*);
@@ -14336,14 +12585,6 @@ SQLITE_PRIVATE const char *sqlite3JournalModename(int);
SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*);
SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int);
#endif
-#ifndef SQLITE_OMIT_CTE
-SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*);
-SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*);
-SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8);
-#else
-#define sqlite3WithPush(x,y,z)
-#define sqlite3WithDelete(x,y)
-#endif
/* Declarations for functions in fkey.c. All of these are replaced by
** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign
@@ -14394,21 +12635,11 @@ SQLITE_PRIVATE void sqlite3EndBenignMalloc(void);
#define sqlite3EndBenignMalloc()
#endif
-/*
-** Allowed return values from sqlite3FindInIndex()
-*/
-#define IN_INDEX_ROWID 1 /* Search the rowid of the table */
-#define IN_INDEX_EPH 2 /* Search an ephemeral b-tree */
-#define IN_INDEX_INDEX_ASC 3 /* Existing index ASCENDING */
-#define IN_INDEX_INDEX_DESC 4 /* Existing index DESCENDING */
-#define IN_INDEX_NOOP 5 /* No table available. Use comparisons */
-/*
-** Allowed flags for the 3rd parameter to sqlite3FindInIndex().
-*/
-#define IN_INDEX_NOOP_OK 0x0001 /* OK to return IN_INDEX_NOOP */
-#define IN_INDEX_MEMBERSHIP 0x0002 /* IN operator used for membership test */
-#define IN_INDEX_LOOP 0x0004 /* IN operator used as a loop */
-SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*);
+#define IN_INDEX_ROWID 1
+#define IN_INDEX_EPH 2
+#define IN_INDEX_INDEX_ASC 3
+#define IN_INDEX_INDEX_DESC 4
+SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, int*);
#ifdef SQLITE_ENABLE_ATOMIC_WRITE
SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int);
@@ -14424,11 +12655,12 @@ SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *);
SQLITE_PRIVATE int sqlite3MemJournalSize(void);
SQLITE_PRIVATE int sqlite3IsMemJournal(sqlite3_file *);
-SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p);
#if SQLITE_MAX_EXPR_DEPTH>0
+SQLITE_PRIVATE void sqlite3ExprSetHeight(Parse *pParse, Expr *p);
SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *);
SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int);
#else
+ #define sqlite3ExprSetHeight(x,y)
#define sqlite3SelectExprHeight(x) 0
#define sqlite3ExprCheckHeight(x,y)
#endif
@@ -14458,7 +12690,7 @@ SQLITE_PRIVATE void sqlite3ParserTrace(FILE*, char *);
#ifdef SQLITE_ENABLE_IOTRACE
# define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; }
SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe*);
-SQLITE_API SQLITE_EXTERN void (SQLITE_CDECL *sqlite3IoTrace)(const char*,...);
+SQLITE_PRIVATE void (*sqlite3IoTrace)(const char*,...);
#else
# define IOTRACE(A)
# define sqlite3VdbeIOTraceSql(X)
@@ -14502,21 +12734,10 @@ SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8);
# define sqlite3MemdebugNoType(X,Y) 1
#endif
#define MEMTYPE_HEAP 0x01 /* General heap allocations */
-#define MEMTYPE_LOOKASIDE 0x02 /* Heap that might have been lookaside */
+#define MEMTYPE_LOOKASIDE 0x02 /* Might have been lookaside memory */
#define MEMTYPE_SCRATCH 0x04 /* Scratch allocations */
#define MEMTYPE_PCACHE 0x08 /* Page cache allocations */
-
-/*
-** Threading interface
-*/
-#if SQLITE_MAX_WORKER_THREADS>0
-SQLITE_PRIVATE int sqlite3ThreadCreate(SQLiteThread**,void*(*)(void*),void*);
-SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**);
-#endif
-
-#if defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST)
-SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*);
-#endif
+#define MEMTYPE_DB 0x10 /* Uses sqlite3DbMalloc, not sqlite_malloc */
#endif /* _SQLITEINT_H_ */
@@ -14534,9 +12755,8 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*);
**
*************************************************************************
**
-** This file contains definitions of global variables and constants.
+** This file contains definitions of global variables and contants.
*/
-/* #include "sqliteInt.h" */
/* An array to map all upper-case characters into their corresponding
** lower-case character.
@@ -14570,16 +12790,16 @@ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = {
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */
- 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, /* 6x */
- 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, /* 7x */
+ 96, 97, 66, 67, 68, 69, 70, 71, 72, 73,106,107,108,109,110,111, /* 6x */
+ 112, 81, 82, 83, 84, 85, 86, 87, 88, 89,122,123,124,125,126,127, /* 7x */
128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */
- 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, /* 9x */
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,156,159, /* 9x */
160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */
176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */
192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */
208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */
- 224,225,162,163,164,165,166,167,168,169,234,235,236,237,238,239, /* Ex */
- 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, /* Fx */
+ 224,225,162,163,164,165,166,167,168,169,232,203,204,205,206,207, /* Ex */
+ 239,240,241,242,243,244,245,246,247,248,249,219,220,221,222,255, /* Fx */
#endif
};
@@ -14653,36 +12873,14 @@ SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = {
};
#endif
-/* EVIDENCE-OF: R-02982-34736 In order to maintain full backwards
-** compatibility for legacy applications, the URI filename capability is
-** disabled by default.
-**
-** EVIDENCE-OF: R-38799-08373 URI filenames can be enabled or disabled
-** using the SQLITE_USE_URI=1 or SQLITE_USE_URI=0 compile-time options.
-**
-** EVIDENCE-OF: R-43642-56306 By default, URI handling is globally
-** disabled. The default value may be changed by compiling with the
-** SQLITE_USE_URI symbol defined.
-*/
#ifndef SQLITE_USE_URI
# define SQLITE_USE_URI 0
#endif
-/* EVIDENCE-OF: R-38720-18127 The default setting is determined by the
-** SQLITE_ALLOW_COVERING_INDEX_SCAN compile-time option, or is "on" if
-** that compile-time option is omitted.
-*/
#ifndef SQLITE_ALLOW_COVERING_INDEX_SCAN
# define SQLITE_ALLOW_COVERING_INDEX_SCAN 1
#endif
-/* The minimum PMA size is set to this value multiplied by the database
-** page size in bytes.
-*/
-#ifndef SQLITE_SORTER_PMASZ
-# define SQLITE_SORTER_PMASZ 250
-#endif
-
/*
** The following singleton contains the global configuration for
** the SQLite library.
@@ -14710,32 +12908,24 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
0, /* nScratch */
(void*)0, /* pPage */
0, /* szPage */
- SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */
+ 0, /* nPage */
0, /* mxParserStack */
0, /* sharedCacheEnabled */
- SQLITE_SORTER_PMASZ, /* szPma */
/* All the rest should always be initialized to zero */
0, /* isInit */
0, /* inProgress */
0, /* isMutexInit */
0, /* isMallocInit */
0, /* isPCacheInit */
- 0, /* nRefInitMutex */
0, /* pInitMutex */
+ 0, /* nRefInitMutex */
0, /* xLog */
0, /* pLogArg */
+ 0, /* bLocaltimeFault */
#ifdef SQLITE_ENABLE_SQLLOG
0, /* xSqllog */
- 0, /* pSqllogArg */
-#endif
-#ifdef SQLITE_VDBE_COVERAGE
- 0, /* xVdbeBranch */
- 0, /* pVbeBranchArg */
-#endif
-#ifndef SQLITE_OMIT_BUILTIN_TEST
- 0, /* xTestCallback */
+ 0 /* pSqllogArg */
#endif
- 0 /* bLocaltimeFault */
};
/*
@@ -14769,14 +12959,13 @@ SQLITE_PRIVATE const Token sqlite3IntTokens[] = {
**
** IMPORTANT: Changing the pending byte to any value other than
** 0x40000000 results in an incompatible database file format!
-** Changing the pending byte during operation will result in undefined
-** and incorrect behavior.
+** Changing the pending byte during operating results in undefined
+** and dileterious behavior.
*/
#ifndef SQLITE_OMIT_WSD
SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000;
#endif
-/* #include "opcodes.h" */
/*
** Properties of opcodes. The OPFLG_INITIALIZER macro is
** created by mkopcodeh.awk during compilation. Data is obtained
@@ -14805,7 +12994,6 @@ SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER;
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-/* #include "sqliteInt.h" */
/*
** An array of names of all compile-time options. This array should
@@ -14822,100 +13010,88 @@ static const char * const azCompileOpt[] = {
#define CTIMEOPT_VAL_(opt) #opt
#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt)
-#if SQLITE_32BIT_ROWID
+#ifdef SQLITE_32BIT_ROWID
"32BIT_ROWID",
#endif
-#if SQLITE_4_BYTE_ALIGNED_MALLOC
+#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
"4_BYTE_ALIGNED_MALLOC",
#endif
-#if SQLITE_CASE_SENSITIVE_LIKE
+#ifdef SQLITE_CASE_SENSITIVE_LIKE
"CASE_SENSITIVE_LIKE",
#endif
-#if SQLITE_CHECK_PAGES
+#ifdef SQLITE_CHECK_PAGES
"CHECK_PAGES",
#endif
-#if SQLITE_COVERAGE_TEST
+#ifdef SQLITE_COVERAGE_TEST
"COVERAGE_TEST",
#endif
-#if SQLITE_DEBUG
+#ifdef SQLITE_DEBUG
"DEBUG",
#endif
-#if SQLITE_DEFAULT_LOCKING_MODE
+#ifdef SQLITE_DEFAULT_LOCKING_MODE
"DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE),
#endif
#if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc)
"DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE),
#endif
-#if SQLITE_DISABLE_DIRSYNC
+#ifdef SQLITE_DISABLE_DIRSYNC
"DISABLE_DIRSYNC",
#endif
-#if SQLITE_DISABLE_LFS
+#ifdef SQLITE_DISABLE_LFS
"DISABLE_LFS",
#endif
-#if SQLITE_ENABLE_API_ARMOR
- "ENABLE_API_ARMOR",
-#endif
-#if SQLITE_ENABLE_ATOMIC_WRITE
+#ifdef SQLITE_ENABLE_ATOMIC_WRITE
"ENABLE_ATOMIC_WRITE",
#endif
-#if SQLITE_ENABLE_CEROD
+#ifdef SQLITE_ENABLE_CEROD
"ENABLE_CEROD",
#endif
-#if SQLITE_ENABLE_COLUMN_METADATA
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
"ENABLE_COLUMN_METADATA",
#endif
-#if SQLITE_ENABLE_DBSTAT_VTAB
- "ENABLE_DBSTAT_VTAB",
-#endif
-#if SQLITE_ENABLE_EXPENSIVE_ASSERT
+#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
"ENABLE_EXPENSIVE_ASSERT",
#endif
-#if SQLITE_ENABLE_FTS1
+#ifdef SQLITE_ENABLE_FTS1
"ENABLE_FTS1",
#endif
-#if SQLITE_ENABLE_FTS2
+#ifdef SQLITE_ENABLE_FTS2
"ENABLE_FTS2",
#endif
-#if SQLITE_ENABLE_FTS3
+#ifdef SQLITE_ENABLE_FTS3
"ENABLE_FTS3",
#endif
-#if SQLITE_ENABLE_FTS3_PARENTHESIS
+#ifdef SQLITE_ENABLE_FTS3_PARENTHESIS
"ENABLE_FTS3_PARENTHESIS",
#endif
-#if SQLITE_ENABLE_FTS4
+#ifdef SQLITE_ENABLE_FTS4
"ENABLE_FTS4",
#endif
-#if SQLITE_ENABLE_FTS5
- "ENABLE_FTS5",
-#endif
-#if SQLITE_ENABLE_ICU
+#ifdef SQLITE_ENABLE_ICU
"ENABLE_ICU",
#endif
-#if SQLITE_ENABLE_IOTRACE
+#ifdef SQLITE_ENABLE_IOTRACE
"ENABLE_IOTRACE",
#endif
-#if SQLITE_ENABLE_JSON1
- "ENABLE_JSON1",
-#endif
-#if SQLITE_ENABLE_LOAD_EXTENSION
+#ifdef SQLITE_ENABLE_LOAD_EXTENSION
"ENABLE_LOAD_EXTENSION",
#endif
-#if SQLITE_ENABLE_LOCKING_STYLE
+#ifdef SQLITE_ENABLE_LOCKING_STYLE
"ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE),
#endif
-#if SQLITE_ENABLE_MEMORY_MANAGEMENT
+#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
"ENABLE_MEMORY_MANAGEMENT",
#endif
-#if SQLITE_ENABLE_MEMSYS3
+#ifdef SQLITE_ENABLE_MEMSYS3
"ENABLE_MEMSYS3",
#endif
-#if SQLITE_ENABLE_MEMSYS5
+#ifdef SQLITE_ENABLE_MEMSYS5
"ENABLE_MEMSYS5",
#endif
-#if SQLITE_ENABLE_OVERSIZE_CELL_CHECK
+#ifdef SQLITE_ENABLE_OVERSIZE_CELL_CHECK
"ENABLE_OVERSIZE_CELL_CHECK",
#endif
-#if SQLITE_ENABLE_RTREE
+#ifdef SQLITE_ENABLE_RTREE
"ENABLE_RTREE",
#endif
#if defined(SQLITE_ENABLE_STAT4)
@@ -14923,31 +13099,31 @@ static const char * const azCompileOpt[] = {
#elif defined(SQLITE_ENABLE_STAT3)
"ENABLE_STAT3",
#endif
-#if SQLITE_ENABLE_UNLOCK_NOTIFY
+#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
"ENABLE_UNLOCK_NOTIFY",
#endif
-#if SQLITE_ENABLE_UPDATE_DELETE_LIMIT
+#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
"ENABLE_UPDATE_DELETE_LIMIT",
#endif
-#if SQLITE_HAS_CODEC
+#ifdef SQLITE_HAS_CODEC
"HAS_CODEC",
#endif
-#if HAVE_ISNAN || SQLITE_HAVE_ISNAN
+#ifdef SQLITE_HAVE_ISNAN
"HAVE_ISNAN",
#endif
-#if SQLITE_HOMEGROWN_RECURSIVE_MUTEX
+#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
"HOMEGROWN_RECURSIVE_MUTEX",
#endif
-#if SQLITE_IGNORE_AFP_LOCK_ERRORS
+#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS
"IGNORE_AFP_LOCK_ERRORS",
#endif
-#if SQLITE_IGNORE_FLOCK_LOCK_ERRORS
+#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS
"IGNORE_FLOCK_LOCK_ERRORS",
#endif
#ifdef SQLITE_INT64_TYPE
"INT64_TYPE",
#endif
-#if SQLITE_LOCK_TRACE
+#ifdef SQLITE_LOCK_TRACE
"LOCK_TRACE",
#endif
#if defined(SQLITE_MAX_MMAP_SIZE) && !defined(SQLITE_MAX_MMAP_SIZE_xc)
@@ -14956,226 +13132,220 @@ static const char * const azCompileOpt[] = {
#ifdef SQLITE_MAX_SCHEMA_RETRY
"MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY),
#endif
-#if SQLITE_MEMDEBUG
+#ifdef SQLITE_MEMDEBUG
"MEMDEBUG",
#endif
-#if SQLITE_MIXED_ENDIAN_64BIT_FLOAT
+#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
"MIXED_ENDIAN_64BIT_FLOAT",
#endif
-#if SQLITE_NO_SYNC
+#ifdef SQLITE_NO_SYNC
"NO_SYNC",
#endif
-#if SQLITE_OMIT_ALTERTABLE
+#ifdef SQLITE_OMIT_ALTERTABLE
"OMIT_ALTERTABLE",
#endif
-#if SQLITE_OMIT_ANALYZE
+#ifdef SQLITE_OMIT_ANALYZE
"OMIT_ANALYZE",
#endif
-#if SQLITE_OMIT_ATTACH
+#ifdef SQLITE_OMIT_ATTACH
"OMIT_ATTACH",
#endif
-#if SQLITE_OMIT_AUTHORIZATION
+#ifdef SQLITE_OMIT_AUTHORIZATION
"OMIT_AUTHORIZATION",
#endif
-#if SQLITE_OMIT_AUTOINCREMENT
+#ifdef SQLITE_OMIT_AUTOINCREMENT
"OMIT_AUTOINCREMENT",
#endif
-#if SQLITE_OMIT_AUTOINIT
+#ifdef SQLITE_OMIT_AUTOINIT
"OMIT_AUTOINIT",
#endif
-#if SQLITE_OMIT_AUTOMATIC_INDEX
+#ifdef SQLITE_OMIT_AUTOMATIC_INDEX
"OMIT_AUTOMATIC_INDEX",
#endif
-#if SQLITE_OMIT_AUTORESET
+#ifdef SQLITE_OMIT_AUTORESET
"OMIT_AUTORESET",
#endif
-#if SQLITE_OMIT_AUTOVACUUM
+#ifdef SQLITE_OMIT_AUTOVACUUM
"OMIT_AUTOVACUUM",
#endif
-#if SQLITE_OMIT_BETWEEN_OPTIMIZATION
+#ifdef SQLITE_OMIT_BETWEEN_OPTIMIZATION
"OMIT_BETWEEN_OPTIMIZATION",
#endif
-#if SQLITE_OMIT_BLOB_LITERAL
+#ifdef SQLITE_OMIT_BLOB_LITERAL
"OMIT_BLOB_LITERAL",
#endif
-#if SQLITE_OMIT_BTREECOUNT
+#ifdef SQLITE_OMIT_BTREECOUNT
"OMIT_BTREECOUNT",
#endif
-#if SQLITE_OMIT_BUILTIN_TEST
+#ifdef SQLITE_OMIT_BUILTIN_TEST
"OMIT_BUILTIN_TEST",
#endif
-#if SQLITE_OMIT_CAST
+#ifdef SQLITE_OMIT_CAST
"OMIT_CAST",
#endif
-#if SQLITE_OMIT_CHECK
+#ifdef SQLITE_OMIT_CHECK
"OMIT_CHECK",
#endif
-#if SQLITE_OMIT_COMPLETE
+#ifdef SQLITE_OMIT_COMPLETE
"OMIT_COMPLETE",
#endif
-#if SQLITE_OMIT_COMPOUND_SELECT
+#ifdef SQLITE_OMIT_COMPOUND_SELECT
"OMIT_COMPOUND_SELECT",
#endif
-#if SQLITE_OMIT_CTE
- "OMIT_CTE",
-#endif
-#if SQLITE_OMIT_DATETIME_FUNCS
+#ifdef SQLITE_OMIT_DATETIME_FUNCS
"OMIT_DATETIME_FUNCS",
#endif
-#if SQLITE_OMIT_DECLTYPE
+#ifdef SQLITE_OMIT_DECLTYPE
"OMIT_DECLTYPE",
#endif
-#if SQLITE_OMIT_DEPRECATED
+#ifdef SQLITE_OMIT_DEPRECATED
"OMIT_DEPRECATED",
#endif
-#if SQLITE_OMIT_DISKIO
+#ifdef SQLITE_OMIT_DISKIO
"OMIT_DISKIO",
#endif
-#if SQLITE_OMIT_EXPLAIN
+#ifdef SQLITE_OMIT_EXPLAIN
"OMIT_EXPLAIN",
#endif
-#if SQLITE_OMIT_FLAG_PRAGMAS
+#ifdef SQLITE_OMIT_FLAG_PRAGMAS
"OMIT_FLAG_PRAGMAS",
#endif
-#if SQLITE_OMIT_FLOATING_POINT
+#ifdef SQLITE_OMIT_FLOATING_POINT
"OMIT_FLOATING_POINT",
#endif
-#if SQLITE_OMIT_FOREIGN_KEY
+#ifdef SQLITE_OMIT_FOREIGN_KEY
"OMIT_FOREIGN_KEY",
#endif
-#if SQLITE_OMIT_GET_TABLE
+#ifdef SQLITE_OMIT_GET_TABLE
"OMIT_GET_TABLE",
#endif
-#if SQLITE_OMIT_INCRBLOB
+#ifdef SQLITE_OMIT_INCRBLOB
"OMIT_INCRBLOB",
#endif
-#if SQLITE_OMIT_INTEGRITY_CHECK
+#ifdef SQLITE_OMIT_INTEGRITY_CHECK
"OMIT_INTEGRITY_CHECK",
#endif
-#if SQLITE_OMIT_LIKE_OPTIMIZATION
+#ifdef SQLITE_OMIT_LIKE_OPTIMIZATION
"OMIT_LIKE_OPTIMIZATION",
#endif
-#if SQLITE_OMIT_LOAD_EXTENSION
+#ifdef SQLITE_OMIT_LOAD_EXTENSION
"OMIT_LOAD_EXTENSION",
#endif
-#if SQLITE_OMIT_LOCALTIME
+#ifdef SQLITE_OMIT_LOCALTIME
"OMIT_LOCALTIME",
#endif
-#if SQLITE_OMIT_LOOKASIDE
+#ifdef SQLITE_OMIT_LOOKASIDE
"OMIT_LOOKASIDE",
#endif
-#if SQLITE_OMIT_MEMORYDB
+#ifdef SQLITE_OMIT_MEMORYDB
"OMIT_MEMORYDB",
#endif
-#if SQLITE_OMIT_OR_OPTIMIZATION
+#ifdef SQLITE_OMIT_OR_OPTIMIZATION
"OMIT_OR_OPTIMIZATION",
#endif
-#if SQLITE_OMIT_PAGER_PRAGMAS
+#ifdef SQLITE_OMIT_PAGER_PRAGMAS
"OMIT_PAGER_PRAGMAS",
#endif
-#if SQLITE_OMIT_PRAGMA
+#ifdef SQLITE_OMIT_PRAGMA
"OMIT_PRAGMA",
#endif
-#if SQLITE_OMIT_PROGRESS_CALLBACK
+#ifdef SQLITE_OMIT_PROGRESS_CALLBACK
"OMIT_PROGRESS_CALLBACK",
#endif
-#if SQLITE_OMIT_QUICKBALANCE
+#ifdef SQLITE_OMIT_QUICKBALANCE
"OMIT_QUICKBALANCE",
#endif
-#if SQLITE_OMIT_REINDEX
+#ifdef SQLITE_OMIT_REINDEX
"OMIT_REINDEX",
#endif
-#if SQLITE_OMIT_SCHEMA_PRAGMAS
+#ifdef SQLITE_OMIT_SCHEMA_PRAGMAS
"OMIT_SCHEMA_PRAGMAS",
#endif
-#if SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
+#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
"OMIT_SCHEMA_VERSION_PRAGMAS",
#endif
-#if SQLITE_OMIT_SHARED_CACHE
+#ifdef SQLITE_OMIT_SHARED_CACHE
"OMIT_SHARED_CACHE",
#endif
-#if SQLITE_OMIT_SUBQUERY
+#ifdef SQLITE_OMIT_SUBQUERY
"OMIT_SUBQUERY",
#endif
-#if SQLITE_OMIT_TCL_VARIABLE
+#ifdef SQLITE_OMIT_TCL_VARIABLE
"OMIT_TCL_VARIABLE",
#endif
-#if SQLITE_OMIT_TEMPDB
+#ifdef SQLITE_OMIT_TEMPDB
"OMIT_TEMPDB",
#endif
-#if SQLITE_OMIT_TRACE
+#ifdef SQLITE_OMIT_TRACE
"OMIT_TRACE",
#endif
-#if SQLITE_OMIT_TRIGGER
+#ifdef SQLITE_OMIT_TRIGGER
"OMIT_TRIGGER",
#endif
-#if SQLITE_OMIT_TRUNCATE_OPTIMIZATION
+#ifdef SQLITE_OMIT_TRUNCATE_OPTIMIZATION
"OMIT_TRUNCATE_OPTIMIZATION",
#endif
-#if SQLITE_OMIT_UTF16
+#ifdef SQLITE_OMIT_UTF16
"OMIT_UTF16",
#endif
-#if SQLITE_OMIT_VACUUM
+#ifdef SQLITE_OMIT_VACUUM
"OMIT_VACUUM",
#endif
-#if SQLITE_OMIT_VIEW
+#ifdef SQLITE_OMIT_VIEW
"OMIT_VIEW",
#endif
-#if SQLITE_OMIT_VIRTUALTABLE
+#ifdef SQLITE_OMIT_VIRTUALTABLE
"OMIT_VIRTUALTABLE",
#endif
-#if SQLITE_OMIT_WAL
+#ifdef SQLITE_OMIT_WAL
"OMIT_WAL",
#endif
-#if SQLITE_OMIT_WSD
+#ifdef SQLITE_OMIT_WSD
"OMIT_WSD",
#endif
-#if SQLITE_OMIT_XFER_OPT
+#ifdef SQLITE_OMIT_XFER_OPT
"OMIT_XFER_OPT",
#endif
-#if SQLITE_PERFORMANCE_TRACE
+#ifdef SQLITE_PERFORMANCE_TRACE
"PERFORMANCE_TRACE",
#endif
-#if SQLITE_PROXY_DEBUG
+#ifdef SQLITE_PROXY_DEBUG
"PROXY_DEBUG",
#endif
-#if SQLITE_RTREE_INT_ONLY
+#ifdef SQLITE_RTREE_INT_ONLY
"RTREE_INT_ONLY",
#endif
-#if SQLITE_SECURE_DELETE
+#ifdef SQLITE_SECURE_DELETE
"SECURE_DELETE",
#endif
-#if SQLITE_SMALL_STACK
+#ifdef SQLITE_SMALL_STACK
"SMALL_STACK",
#endif
-#if SQLITE_SOUNDEX
+#ifdef SQLITE_SOUNDEX
"SOUNDEX",
#endif
-#if SQLITE_SYSTEM_MALLOC
+#ifdef SQLITE_SYSTEM_MALLOC
"SYSTEM_MALLOC",
#endif
-#if SQLITE_TCL
+#ifdef SQLITE_TCL
"TCL",
#endif
#if defined(SQLITE_TEMP_STORE) && !defined(SQLITE_TEMP_STORE_xc)
"TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE),
#endif
-#if SQLITE_TEST
+#ifdef SQLITE_TEST
"TEST",
#endif
#if defined(SQLITE_THREADSAFE)
"THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE),
#endif
-#if SQLITE_USE_ALLOCA
+#ifdef SQLITE_USE_ALLOCA
"USE_ALLOCA",
#endif
-#if SQLITE_USER_AUTHENTICATION
- "USER_AUTHENTICATION",
-#endif
-#if SQLITE_WIN32_MALLOC
+#ifdef SQLITE_WIN32_MALLOC
"WIN32_MALLOC",
#endif
-#if SQLITE_ZERO_MALLOC
+#ifdef SQLITE_ZERO_MALLOC
"ZERO_MALLOC"
#endif
};
@@ -15187,15 +13357,8 @@ static const char * const azCompileOpt[] = {
** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix
** is not required for a match.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName){
int i, n;
-
-#if SQLITE_ENABLE_API_ARMOR
- if( zOptName==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
if( sqlite3StrNICmp(zOptName, "SQLITE_", 7)==0 ) zOptName += 7;
n = sqlite3Strlen30(zOptName);
@@ -15203,7 +13366,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){
** linear search is adequate. No need for a binary search. */
for(i=0; i<ArraySize(azCompileOpt); i++){
if( sqlite3StrNICmp(zOptName, azCompileOpt[i], n)==0
- && sqlite3IsIdChar((unsigned char)azCompileOpt[i][n])==0
+ && sqlite3CtypeMap[(unsigned char)azCompileOpt[i][n]]==0
){
return 1;
}
@@ -15215,7 +13378,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){
** Return the N-th compile-time option string. If N is out of range,
** return a NULL pointer.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N){
+SQLITE_API const char *sqlite3_compileoption_get(int N){
if( N>=0 && N<ArraySize(azCompileOpt) ){
return azCompileOpt[N];
}
@@ -15241,7 +13404,6 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N){
** This module implements the sqlite3_status() interface and related
** functionality.
*/
-/* #include "sqliteInt.h" */
/************** Include vdbeInt.h in the middle of status.c ******************/
/************** Begin file vdbeInt.h *****************************************/
/*
@@ -15314,24 +13476,19 @@ struct VdbeCursor {
int pseudoTableReg; /* Register holding pseudotable content. */
i16 nField; /* Number of fields in the header */
u16 nHdrParsed; /* Number of header fields parsed so far */
-#ifdef SQLITE_DEBUG
- u8 seekOp; /* Most recent seek operation on this cursor */
-#endif
i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */
u8 nullRow; /* True if pointing to a row with no data */
+ u8 rowidIsValid; /* True if lastRowid is valid */
u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */
- Bool isEphemeral:1; /* True for an ephemeral table */
Bool useRandomRowid:1;/* Generate new record numbers semi-randomly */
Bool isTable:1; /* True if a table requiring integer keys */
Bool isOrdered:1; /* True if the underlying table is BTREE_UNORDERED */
- Pgno pgnoRoot; /* Root page of the open btree cursor */
+ Bool multiPseudo:1; /* Multi-register pseudo-cursor */
sqlite3_vtab_cursor *pVtabCursor; /* The cursor for a virtual table */
i64 seqCount; /* Sequence counter */
i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */
+ i64 lastRowid; /* Rowid being deleted by OP_Delete */
VdbeSorter *pSorter; /* Sorter object for OP_SorterOpen cursors */
-#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
- u64 maskUsed; /* Mask of columns used by this cursor */
-#endif
/* Cached information about the header for the data record that the
** cursor is currently pointing to. Only valid if cacheStatus matches
@@ -15347,7 +13504,6 @@ struct VdbeCursor {
u32 szRow; /* Byte available in aRow */
u32 iHdrOffset; /* Offset to next unparsed byte of the header */
const u8 *aRow; /* Data for the current row, if all on one page */
- u32 *aOffset; /* Pointer to aType[nField] */
u32 aType[1]; /* Type values for all entries in the record */
/* 2*nField extra array elements allocated for aType[], beyond the one
** static element declared in the structure. nField total array slots for
@@ -15381,7 +13537,6 @@ struct VdbeFrame {
Vdbe *v; /* VM this frame belongs to */
VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */
Op *aOp; /* Program instructions for parent frame */
- i64 *anExec; /* Event counters from parent frame */
Mem *aMem; /* Array of memory cells for parent frame */
u8 *aOnceFlag; /* Array of OP_Once flags for parent frame */
VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */
@@ -15394,8 +13549,7 @@ struct VdbeFrame {
int nOnceFlag; /* Number of entries in aOnceFlag */
int nChildMem; /* Number of memory cells for child frame */
int nChildCsr; /* Number of cursors for child frame */
- int nChange; /* Statement changes (Vdbe.nChange) */
- int nDbChange; /* Value of db->nChange */
+ int nChange; /* Statement changes (Vdbe.nChanges) */
};
#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))])
@@ -15411,37 +13565,28 @@ struct VdbeFrame {
** integer etc.) of the same value.
*/
struct Mem {
- union MemValue {
- double r; /* Real value used when MEM_Real is set in flags */
+ sqlite3 *db; /* The associated database connection */
+ char *z; /* String or BLOB value */
+ double r; /* Real value */
+ union {
i64 i; /* Integer value used when MEM_Int is set in flags */
int nZero; /* Used when bit MEM_Zero is set in flags */
FuncDef *pDef; /* Used only when flags==MEM_Agg */
RowSet *pRowSet; /* Used only when flags==MEM_RowSet */
VdbeFrame *pFrame; /* Used when flags==MEM_Frame */
} u;
+ int n; /* Number of characters in string value, excluding '\0' */
u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */
+ u8 type; /* One of SQLITE_NULL, SQLITE_TEXT, SQLITE_INTEGER, etc */
u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */
- u8 eSubtype; /* Subtype for this value */
- int n; /* Number of characters in string value, excluding '\0' */
- char *z; /* String or BLOB value */
- /* ShallowCopy only needs to copy the information above */
- char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */
- int szMalloc; /* Size of the zMalloc allocation */
- u32 uTemp; /* Transient storage for serial_type in OP_MakeRecord */
- sqlite3 *db; /* The associated database connection */
- void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */
#ifdef SQLITE_DEBUG
Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */
void *pFiller; /* So that sizeof(Mem) is a multiple of 8 */
#endif
+ void (*xDel)(void *); /* If not null, call this function to delete Mem.z */
+ char *zMalloc; /* Dynamic buffer allocated by sqlite3_malloc() */
};
-/*
-** Size of struct Mem not including the Mem.zMalloc member or anything that
-** follows.
-*/
-#define MEMCELLSIZE offsetof(Mem,zMalloc)
-
/* One or more of the following flags are set to indicate the validOK
** representations of the value stored in the Mem struct.
**
@@ -15459,10 +13604,9 @@ struct Mem {
#define MEM_Int 0x0004 /* Value is an integer */
#define MEM_Real 0x0008 /* Value is a real number */
#define MEM_Blob 0x0010 /* Value is a BLOB */
-#define MEM_AffMask 0x001f /* Mask of affinity bits */
#define MEM_RowSet 0x0020 /* Value is a RowSet object */
#define MEM_Frame 0x0040 /* Value is a VdbeFrame object */
-#define MEM_Undefined 0x0080 /* Value is undefined */
+#define MEM_Invalid 0x0080 /* Value is undefined */
#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */
#define MEM_TypeMask 0x01ff /* Mask of type bits */
@@ -15473,7 +13617,7 @@ struct Mem {
** string is \000 or \u0000 terminated
*/
#define MEM_Term 0x0200 /* String rep is nul terminated */
-#define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */
+#define MEM_Dyn 0x0400 /* Need to call sqliteFree() on Mem.z */
#define MEM_Static 0x0800 /* Mem.z points to a static string */
#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */
#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */
@@ -15494,11 +13638,11 @@ struct Mem {
** is for use inside assert() statements only.
*/
#ifdef SQLITE_DEBUG
-#define memIsValid(M) ((M)->flags & MEM_Undefined)==0
+#define memIsValid(M) ((M)->flags & MEM_Invalid)==0
#endif
/*
-** Each auxiliary data pointer stored by a user defined function
+** Each auxilliary data pointer stored by a user defined function
** implementation calling sqlite3_set_auxdata() is stored in an instance
** of this structure. All such structures associated with a single VM
** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed
@@ -15513,7 +13657,7 @@ struct AuxData {
};
/*
-** The "context" argument for an installable function. A pointer to an
+** The "context" argument for a installable function. A pointer to an
** instance of this structure is the first argument to the routines used
** implement the SQL functions.
**
@@ -15526,16 +13670,15 @@ struct AuxData {
** (Mem) which are only defined there.
*/
struct sqlite3_context {
- Mem *pOut; /* The return value is stored here */
- FuncDef *pFunc; /* Pointer to function information */
- Mem *pMem; /* Memory cell used to store aggregate context */
- Vdbe *pVdbe; /* The VM that owns this context */
- int iOp; /* Instruction number of OP_Function */
- int isError; /* Error code returned by the function. */
- u8 skipFlag; /* Skip accumulator loading if true */
- u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */
- u8 argc; /* Number of arguments */
- sqlite3_value *argv[1]; /* Argument set */
+ FuncDef *pFunc; /* Pointer to function information. MUST BE FIRST */
+ Mem s; /* The return value is stored here */
+ Mem *pMem; /* Memory cell used to store aggregate context */
+ CollSeq *pColl; /* Collating sequence */
+ Vdbe *pVdbe; /* The VM that owns this context */
+ int iOp; /* Instruction number of OP_Function */
+ int isError; /* Error code returned by the function. */
+ u8 skipFlag; /* Skip skip accumulator loading if true */
+ u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */
};
/*
@@ -15555,22 +13698,20 @@ struct Explain {
*/
typedef unsigned bft; /* Bit Field Type */
-typedef struct ScanStatus ScanStatus;
-struct ScanStatus {
- int addrExplain; /* OP_Explain for loop */
- int addrLoop; /* Address of "loops" counter */
- int addrVisit; /* Address of "rows visited" counter */
- int iSelectID; /* The "Select-ID" for this loop */
- LogEst nEst; /* Estimated output rows per loop */
- char *zName; /* Name of table or index */
-};
-
/*
** An instance of the virtual machine. This structure contains the complete
** state of the virtual machine.
**
** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare()
** is really a pointer to an instance of this structure.
+**
+** The Vdbe.inVtabMethod variable is set to non-zero for the duration of
+** any virtual table method invocations made by the vdbe program. It is
+** set to 2 for xDestroy method calls and 1 for all other methods. This
+** variable is used for two purposes: to allow xDestroy methods to execute
+** "DROP TABLE" statements and to prevent some nasty side effects of
+** malloc failure when SQLite is invoked recursively by a virtual table
+** method function.
*/
struct Vdbe {
sqlite3 *db; /* The database connection that owns this statement */
@@ -15579,9 +13720,12 @@ struct Vdbe {
Mem **apArg; /* Arguments to currently executing user function */
Mem *aColName; /* Column names to return */
Mem *pResultSet; /* Pointer to an array of results */
- Parse *pParse; /* Parsing context used to create this Vdbe */
int nMem; /* Number of memory locations currently allocated */
int nOp; /* Number of instructions in the program */
+ int nOpAlloc; /* Number of slots allocated for aOp[] */
+ int nLabel; /* Number of labels used */
+ int *aLabel; /* Space to hold the labels */
+ u16 nResColumn; /* Number of columns in one row of the result set */
int nCursor; /* Number of slots in apCsr[] */
u32 magic; /* Magic number for sanity checking */
char *zErrMsg; /* Error message written here */
@@ -15594,13 +13738,10 @@ struct Vdbe {
u32 cacheCtr; /* VdbeCursor row cache generation counter */
int pc; /* The program counter */
int rc; /* Value to return */
-#ifdef SQLITE_DEBUG
- int rcApp; /* errcode set by sqlite3_result_error_code() */
-#endif
- u16 nResColumn; /* Number of columns in one row of the result set */
u8 errorAction; /* Recovery action to do in case of an error */
u8 minWriteFileFormat; /* Minimum file format for writable database files */
bft explain:2; /* True if EXPLAIN present on SQL command */
+ bft inVtabMethod:2; /* See comments above */
bft changeCntOn:1; /* True to update the change-counter */
bft expired:1; /* True if the VM needs to be recompiled */
bft runOnlyOnce:1; /* Automatically expire on reset */
@@ -15623,6 +13764,10 @@ struct Vdbe {
i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */
char *zSql; /* Text of the SQL statement that generated this */
void *pFree; /* Free this when deleting the vdbe */
+#ifdef SQLITE_ENABLE_TREE_EXPLAIN
+ Explain *pExplain; /* The explainer */
+ char *zExplain; /* Explanation of data structures */
+#endif
VdbeFrame *pFrame; /* Parent frame */
VdbeFrame *pDelFrame; /* List of frame objects to free on VM reset */
int nFrame; /* Number of frames in pFrame list */
@@ -15631,11 +13776,6 @@ struct Vdbe {
int nOnceFlag; /* Size of array aOnceFlag[] */
u8 *aOnceFlag; /* Flags for OP_Once */
AuxData *pAuxData; /* Linked list of auxdata allocations */
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- i64 *anExec; /* Number of times each op has been executed */
- int nScan; /* Entries in aScan[] */
- ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */
-#endif
};
/*
@@ -15649,23 +13789,22 @@ struct Vdbe {
/*
** Function prototypes
*/
-SQLITE_PRIVATE void sqlite3VdbeError(Vdbe*, const char *, ...);
SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*);
void sqliteVdbePopStack(Vdbe*,int);
SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor*);
-SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*);
#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, Op*);
#endif
SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32);
SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem*, int);
-SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32);
+SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, int, Mem*, int);
SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*);
SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(Vdbe*, int, int);
int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *);
-SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(sqlite3*,VdbeCursor*,UnpackedRecord*,int*);
-SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor*, i64*);
+SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(VdbeCursor*,UnpackedRecord*,int*);
+SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor *, i64 *);
+SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*);
SQLITE_PRIVATE int sqlite3VdbeExec(Vdbe*);
SQLITE_PRIVATE int sqlite3VdbeList(Vdbe*);
SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe*);
@@ -15682,39 +13821,38 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64);
#else
SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double);
#endif
-SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16);
SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int);
SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8);
+SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, int);
SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*);
SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*);
SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*);
-SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8);
SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,int,Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p);
-#define VdbeMemDynamic(X) \
- (((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame))!=0)
+SQLITE_PRIVATE void sqlite3VdbeMemReleaseExternal(Mem *p);
+#define VdbeMemRelease(X) \
+ if((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame)) \
+ sqlite3VdbeMemReleaseExternal(X);
SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*);
SQLITE_PRIVATE const char *sqlite3OpcodeName(int);
SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve);
-SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n);
SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int);
SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*);
SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *);
+SQLITE_PRIVATE void sqlite3VdbeMemStoreType(Mem *pMem);
SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p);
-SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *);
-SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *);
+SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, VdbeCursor *);
SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *);
SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *);
SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *, int *);
-SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *);
-SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *);
+SQLITE_PRIVATE int sqlite3VdbeSorterRewind(sqlite3 *, const VdbeCursor *, int *);
+SQLITE_PRIVATE int sqlite3VdbeSorterWrite(sqlite3 *, const VdbeCursor *, Mem *);
SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *);
#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0
@@ -15727,7 +13865,6 @@ SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe*);
#ifdef SQLITE_DEBUG
SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe*,Mem*);
-SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*);
#endif
#ifndef SQLITE_OMIT_FOREIGN_KEY
@@ -15761,32 +13898,10 @@ SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *);
*/
typedef struct sqlite3StatType sqlite3StatType;
static SQLITE_WSD struct sqlite3StatType {
-#if SQLITE_PTRSIZE>4
- sqlite3_int64 nowValue[10]; /* Current value */
- sqlite3_int64 mxValue[10]; /* Maximum value */
-#else
- u32 nowValue[10]; /* Current value */
- u32 mxValue[10]; /* Maximum value */
-#endif
+ int nowValue[10]; /* Current value */
+ int mxValue[10]; /* Maximum value */
} sqlite3Stat = { {0,}, {0,} };
-/*
-** Elements of sqlite3Stat[] are protected by either the memory allocator
-** mutex, or by the pcache1 mutex. The following array determines which.
-*/
-static const char statMutex[] = {
- 0, /* SQLITE_STATUS_MEMORY_USED */
- 1, /* SQLITE_STATUS_PAGECACHE_USED */
- 1, /* SQLITE_STATUS_PAGECACHE_OVERFLOW */
- 0, /* SQLITE_STATUS_SCRATCH_USED */
- 0, /* SQLITE_STATUS_SCRATCH_OVERFLOW */
- 0, /* SQLITE_STATUS_MALLOC_SIZE */
- 0, /* SQLITE_STATUS_PARSER_STACK */
- 1, /* SQLITE_STATUS_PAGECACHE_SIZE */
- 0, /* SQLITE_STATUS_SCRATCH_SIZE */
- 0, /* SQLITE_STATUS_MALLOC_COUNT */
-};
-
/* The "wsdStat" macro will resolve to the status information
** state vector. If writable static data is unsupported on the target,
@@ -15803,60 +13918,33 @@ static const char statMutex[] = {
#endif
/*
-** Return the current value of a status parameter. The caller must
-** be holding the appropriate mutex.
+** Return the current value of a status parameter.
*/
-SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int op){
+SQLITE_PRIVATE int sqlite3StatusValue(int op){
wsdStatInit;
assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
- assert( op>=0 && op<ArraySize(statMutex) );
- assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
- : sqlite3MallocMutex()) );
return wsdStat.nowValue[op];
}
/*
-** Add N to the value of a status record. The caller must hold the
-** appropriate mutex. (Locking is checked by assert()).
-**
-** The StatusUp() routine can accept positive or negative values for N.
-** The value of N is added to the current status value and the high-water
-** mark is adjusted if necessary.
-**
-** The StatusDown() routine lowers the current value by N. The highwater
-** mark is unchanged. N must be non-negative for StatusDown().
+** Add N to the value of a status record. It is assumed that the
+** caller holds appropriate locks.
*/
-SQLITE_PRIVATE void sqlite3StatusUp(int op, int N){
+SQLITE_PRIVATE void sqlite3StatusAdd(int op, int N){
wsdStatInit;
assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
- assert( op>=0 && op<ArraySize(statMutex) );
- assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
- : sqlite3MallocMutex()) );
wsdStat.nowValue[op] += N;
if( wsdStat.nowValue[op]>wsdStat.mxValue[op] ){
wsdStat.mxValue[op] = wsdStat.nowValue[op];
}
}
-SQLITE_PRIVATE void sqlite3StatusDown(int op, int N){
- wsdStatInit;
- assert( N>=0 );
- assert( op>=0 && op<ArraySize(statMutex) );
- assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
- : sqlite3MallocMutex()) );
- assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
- wsdStat.nowValue[op] -= N;
-}
/*
-** Set the value of a status to X. The highwater mark is adjusted if
-** necessary. The caller must hold the appropriate mutex.
+** Set the value of a status to X.
*/
SQLITE_PRIVATE void sqlite3StatusSet(int op, int X){
wsdStatInit;
assert( op>=0 && op<ArraySize(wsdStat.nowValue) );
- assert( op>=0 && op<ArraySize(statMutex) );
- assert( sqlite3_mutex_held(statMutex[op] ? sqlite3Pcache1Mutex()
- : sqlite3MallocMutex()) );
wsdStat.nowValue[op] = X;
if( wsdStat.nowValue[op]>wsdStat.mxValue[op] ){
wsdStat.mxValue[op] = wsdStat.nowValue[op];
@@ -15865,50 +13953,28 @@ SQLITE_PRIVATE void sqlite3StatusSet(int op, int X){
/*
** Query status information.
+**
+** This implementation assumes that reading or writing an aligned
+** 32-bit integer is an atomic operation. If that assumption is not true,
+** then this routine is not threadsafe.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
- int op,
- sqlite3_int64 *pCurrent,
- sqlite3_int64 *pHighwater,
- int resetFlag
-){
- sqlite3_mutex *pMutex;
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){
wsdStatInit;
if( op<0 || op>=ArraySize(wsdStat.nowValue) ){
return SQLITE_MISUSE_BKPT;
}
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT;
-#endif
- pMutex = statMutex[op] ? sqlite3Pcache1Mutex() : sqlite3MallocMutex();
- sqlite3_mutex_enter(pMutex);
*pCurrent = wsdStat.nowValue[op];
*pHighwater = wsdStat.mxValue[op];
if( resetFlag ){
wsdStat.mxValue[op] = wsdStat.nowValue[op];
}
- sqlite3_mutex_leave(pMutex);
- (void)pMutex; /* Prevent warning when SQLITE_THREADSAFE=0 */
return SQLITE_OK;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){
- sqlite3_int64 iCur, iHwtr;
- int rc;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT;
-#endif
- rc = sqlite3_status64(op, &iCur, &iHwtr, resetFlag);
- if( rc==0 ){
- *pCurrent = (int)iCur;
- *pHighwater = (int)iHwtr;
- }
- return rc;
-}
/*
** Query status information for a single database connection
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
+SQLITE_API int sqlite3_db_status(
sqlite3 *db, /* The database connection whose status is desired */
int op, /* Status verb */
int *pCurrent, /* Write current value here */
@@ -15916,11 +13982,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
int resetFlag /* Reset high-water mark if true */
){
int rc = SQLITE_OK; /* Return code */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwater==0 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
switch( op ){
case SQLITE_DBSTATUS_LOOKASIDE_USED: {
@@ -16029,7 +14090,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
}
db->pnBytesFreed = 0;
- *pHighwater = 0; /* IMP: R-64479-57858 */
+ *pHighwater = 0;
*pCurrent = nByte;
break;
@@ -16054,9 +14115,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet);
}
}
- *pHighwater = 0; /* IMP: R-42420-56072 */
- /* IMP: R-54100-20147 */
- /* IMP: R-29431-39229 */
+ *pHighwater = 0;
*pCurrent = nRet;
break;
}
@@ -16066,7 +14125,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
** have been satisfied. The *pHighwater is always set to zero.
*/
case SQLITE_DBSTATUS_DEFERRED_FKS: {
- *pHighwater = 0; /* IMP: R-11967-56545 */
+ *pHighwater = 0;
*pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0;
break;
}
@@ -16099,7 +14158,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
** sqlite3RegisterDateTimeFunctions() found at the bottom of the file.
** All other code has file scope.
**
-** SQLite processes all times and dates as julian day numbers. The
+** SQLite processes all times and dates as Julian Day numbers. The
** dates and times are stored as the number of days since noon
** in Greenwich on November 24, 4714 B.C. according to the Gregorian
** calendar system.
@@ -16107,14 +14166,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
** 1970-01-01 00:00:00 is JD 2440587.5
** 2000-01-01 00:00:00 is JD 2451544.5
**
-** This implementation requires years to be expressed as a 4-digit number
+** This implemention requires years to be expressed as a 4-digit number
** which means that only dates between 0000-01-01 and 9999-12-31 can
** be represented, even though julian day numbers allow a much wider
** range of dates.
**
** The Gregorian calendar system is used for all dates and times,
** even those that predate the Gregorian calendar. Historians usually
-** use the julian calendar for dates prior to 1582-10-15 and for some
+** use the Julian calendar for dates prior to 1582-10-15 and for some
** dates afterwards, depending on locale. Beware of this difference.
**
** The conversion algorithms are implemented based on descriptions
@@ -16126,7 +14185,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
** Willmann-Bell, Inc
** Richmond, Virginia (USA)
*/
-/* #include "sqliteInt.h" */
/* #include <stdlib.h> */
/* #include <assert.h> */
#include <time.h>
@@ -16387,7 +14445,7 @@ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){
}
/*
-** Attempt to parse the given string into a julian day number. Return
+** Attempt to parse the given string into a Julian Day Number. Return
** the number of errors.
**
** The following are acceptable forms for the input string:
@@ -16438,7 +14496,7 @@ static void computeYMD(DateTime *p){
A = Z + 1 + A - (A/4);
B = A + 1524;
C = (int)((B - 122.1)/365.25);
- D = (36525*(C&32767))/100;
+ D = (36525*C)/100;
E = (int)((B-D)/30.6001);
X1 = (int)(30.6001*E);
p->D = B - D - X1;
@@ -16495,9 +14553,8 @@ static void clearYMD_HMS_TZ(DateTime *p){
** already, check for an MSVC build environment that provides
** localtime_s().
*/
-#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S \
- && defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE)
-#undef HAVE_LOCALTIME_S
+#if !defined(HAVE_LOCALTIME_R) && !defined(HAVE_LOCALTIME_S) && \
+ defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE)
#define HAVE_LOCALTIME_S 1
#endif
@@ -16517,7 +14574,8 @@ static void clearYMD_HMS_TZ(DateTime *p){
*/
static int osLocaltime(time_t *t, struct tm *pTm){
int rc;
-#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S
+#if (!defined(HAVE_LOCALTIME_R) || !HAVE_LOCALTIME_R) \
+ && (!defined(HAVE_LOCALTIME_S) || !HAVE_LOCALTIME_S)
struct tm *pX;
#if SQLITE_THREADSAFE>0
sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
@@ -16534,7 +14592,7 @@ static int osLocaltime(time_t *t, struct tm *pTm){
#ifndef SQLITE_OMIT_BUILTIN_TEST
if( sqlite3GlobalConfig.bLocaltimeFault ) return 1;
#endif
-#if HAVE_LOCALTIME_R
+#if defined(HAVE_LOCALTIME_R) && HAVE_LOCALTIME_R
rc = localtime_r(t, pTm)==0;
#else
rc = localtime_s(pTm, t);
@@ -16958,7 +15016,7 @@ static void dateFunc(
** %f ** fractional seconds SS.SSS
** %H hour 00-24
** %j day of year 000-366
-** %J ** julian day number
+** %J ** Julian day number
** %m month 01-12
** %M minute 00-59
** %s seconds since 1970-01-01
@@ -16978,10 +15036,8 @@ static void strftimeFunc(
size_t i,j;
char *z;
sqlite3 *db;
- const char *zFmt;
+ const char *zFmt = (const char*)sqlite3_value_text(argv[0]);
char zBuf[100];
- if( argc==0 ) return;
- zFmt = (const char*)sqlite3_value_text(argv[0]);
if( zFmt==0 || isDate(context, argc-1, argv+1, &x) ) return;
db = sqlite3_context_db_handle(context);
for(i=0, n=1; zFmt[i]; i++, n++){
@@ -17175,7 +15231,7 @@ static void currentTimeFunc(
iT = sqlite3StmtCurrentTime(context);
if( iT<=0 ) return;
t = iT/1000 - 10000*(sqlite3_int64)21086676;
-#if HAVE_GMTIME_R
+#ifdef HAVE_GMTIME_R
pTm = gmtime_r(&t, &sNow);
#else
sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
@@ -17198,14 +15254,14 @@ static void currentTimeFunc(
SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){
static SQLITE_WSD FuncDef aDateTimeFuncs[] = {
#ifndef SQLITE_OMIT_DATETIME_FUNCS
- DFUNCTION(julianday, -1, 0, 0, juliandayFunc ),
- DFUNCTION(date, -1, 0, 0, dateFunc ),
- DFUNCTION(time, -1, 0, 0, timeFunc ),
- DFUNCTION(datetime, -1, 0, 0, datetimeFunc ),
- DFUNCTION(strftime, -1, 0, 0, strftimeFunc ),
- DFUNCTION(current_time, 0, 0, 0, ctimeFunc ),
- DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc),
- DFUNCTION(current_date, 0, 0, 0, cdateFunc ),
+ FUNCTION(julianday, -1, 0, 0, juliandayFunc ),
+ FUNCTION(date, -1, 0, 0, dateFunc ),
+ FUNCTION(time, -1, 0, 0, timeFunc ),
+ FUNCTION(datetime, -1, 0, 0, datetimeFunc ),
+ FUNCTION(strftime, -1, 0, 0, strftimeFunc ),
+ FUNCTION(current_time, 0, 0, 0, ctimeFunc ),
+ FUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc),
+ FUNCTION(current_date, 0, 0, 0, cdateFunc ),
#else
STR_FUNCTION(current_time, 0, "%H:%M:%S", 0, currentTimeFunc),
STR_FUNCTION(current_date, 0, "%Y-%m-%d", 0, currentTimeFunc),
@@ -17239,7 +15295,6 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){
** architectures.
*/
#define _SQLITE_OS_C_ 1
-/* #include "sqliteInt.h" */
#undef _SQLITE_OS_C_
/*
@@ -17332,21 +15387,7 @@ SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){
** routine has no return value since the return value would be meaningless.
*/
SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){
-#ifdef SQLITE_TEST
- if( op!=SQLITE_FCNTL_COMMIT_PHASETWO ){
- /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite
- ** is using a regular VFS, it is called after the corresponding
- ** transaction has been committed. Injecting a fault at this point
- ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM
- ** but the transaction is committed anyway.
- **
- ** The core must call OsFileControl() though, not OsFileControlHint(),
- ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably
- ** means the commit really has failed and an error should be returned
- ** to the user. */
- DO_OS_MALLOC_TEST(id);
- }
-#endif
+ DO_OS_MALLOC_TEST(id);
return id->pMethods->xFileControl(id, op, pArg);
}
SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){
@@ -17534,7 +15575,7 @@ static sqlite3_vfs * SQLITE_WSD vfsList = 0;
** Locate a VFS by name. If no name is given, simply return the
** first VFS on the list.
*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfs){
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){
sqlite3_vfs *pVfs = 0;
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex;
@@ -17580,16 +15621,12 @@ static void vfsUnlink(sqlite3_vfs *pVfs){
** VFS multiple times. The new VFS becomes the default if makeDflt is
** true.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
MUTEX_LOGIC(sqlite3_mutex *mutex;)
#ifndef SQLITE_OMIT_AUTOINIT
int rc = sqlite3_initialize();
if( rc ) return rc;
#endif
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pVfs==0 ) return SQLITE_MISUSE_BKPT;
-#endif
-
MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); )
sqlite3_mutex_enter(mutex);
vfsUnlink(pVfs);
@@ -17608,7 +15645,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDf
/*
** Unregister a VFS so that it is no longer accessible.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
#endif
@@ -17646,7 +15683,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
** during a hash table resize is a benign fault.
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_OMIT_BUILTIN_TEST
@@ -17728,7 +15764,6 @@ SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){
** are merely placeholders. Real drivers must be substituted using
** sqlite3_config() before SQLite will operate.
*/
-/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is the default. It is
@@ -17815,7 +15850,6 @@ SQLITE_PRIVATE void sqlite3MemSetDefault(void){
** be necessary when compiling for Delphi,
** for example.
*/
-/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is the default. It is
@@ -17853,9 +15887,9 @@ static malloc_zone_t* _sqliteZone_;
** The malloc.h header file is needed for malloc_usable_size() function
** on some systems (e.g. Linux).
*/
-#if HAVE_MALLOC_H && HAVE_MALLOC_USABLE_SIZE
-# define SQLITE_USE_MALLOC_H 1
-# define SQLITE_USE_MALLOC_USABLE_SIZE 1
+#if defined(HAVE_MALLOC_H) && defined(HAVE_MALLOC_USABLE_SIZE)
+# define SQLITE_USE_MALLOC_H
+# define SQLITE_USE_MALLOC_USABLE_SIZE
/*
** The MSVCRT has malloc_usable_size(), but it is called _msize(). The
** use of _msize() is automatic, but can be disabled by compiling with
@@ -17962,7 +15996,7 @@ static int sqlite3MemSize(void *pPrior){
**
** For this low-level interface, we know that pPrior!=0. Cases where
** pPrior==0 while have been intercepted by higher-level routine and
-** redirected to xMalloc. Similarly, we know that nByte>0 because
+** redirected to xMalloc. Similarly, we know that nByte>0 becauses
** cases where nByte<=0 will have been intercepted by higher-level
** routines and redirected to xFree.
*/
@@ -18091,7 +16125,6 @@ SQLITE_PRIVATE void sqlite3MemSetDefault(void){
** This file contains implementations of the low-level memory allocation
** routines specified in the sqlite3_mem_methods object.
*/
-/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is used only if the
@@ -18466,7 +16499,7 @@ SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){
** This routine is designed for use within an assert() statement, to
** verify the type of an allocation. For example:
**
-** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+** assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) );
*/
SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){
int rc = 1;
@@ -18488,7 +16521,7 @@ SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){
** This routine is designed for use within an assert() statement, to
** verify the type of an allocation. For example:
**
-** assert( sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
+** assert( sqlite3MemdebugNoType(p, MEMTYPE_DB) );
*/
SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){
int rc = 1;
@@ -18626,7 +16659,6 @@ SQLITE_PRIVATE int sqlite3MemdebugMallocCount(){
** This version of the memory allocation subsystem is included
** in the build only if SQLITE_ENABLE_MEMSYS3 is defined.
*/
-/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is only built into the library
@@ -19321,7 +17353,7 @@ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){
** 1. All memory allocations sizes are rounded up to a power of 2.
**
** 2. If two adjacent free blocks are the halves of a larger block,
-** then the two blocks are coalesced into the single larger block.
+** then the two blocks are coalesed into the single larger block.
**
** 3. New memory is allocated from the first available free block.
**
@@ -19341,7 +17373,6 @@ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){
** The sqlite3_status() logic tracks the maximum values of n and M so
** that an application can, at any time, verify this constraint.
*/
-/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is used only when
@@ -19541,7 +17572,7 @@ static void *memsys5MallocUnsafe(int nByte){
** block. If not, then split a block of the next larger power of
** two in order to create a new free block of size iLogsize.
*/
- for(iBin=iLogsize; iBin<=LOGMAX && mem5.aiFreelist[iBin]<0; iBin++){}
+ for(iBin=iLogsize; mem5.aiFreelist[iBin]<0 && iBin<=LOGMAX; iBin++){}
if( iBin>LOGMAX ){
testcase( sqlite3GlobalConfig.xLog!=0 );
sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes", nByte);
@@ -19568,12 +17599,6 @@ static void *memsys5MallocUnsafe(int nByte){
if( mem5.maxCount<mem5.currentCount ) mem5.maxCount = mem5.currentCount;
if( mem5.maxOut<mem5.currentOut ) mem5.maxOut = mem5.currentOut;
-#ifdef SQLITE_DEBUG
- /* Make sure the allocated memory does not assume that it is set to zero
- ** or retains a value from a previous allocation */
- memset(&mem5.zPool[i*mem5.szAtom], 0xAA, iFullSz);
-#endif
-
/* Return a pointer to the allocated memory. */
return (void*)&mem5.zPool[i*mem5.szAtom];
}
@@ -19631,13 +17656,6 @@ static void memsys5FreeUnsafe(void *pOld){
}
size *= 2;
}
-
-#ifdef SQLITE_DEBUG
- /* Overwrite freed memory with the 0x55 bit pattern to verify that it is
- ** not used after being freed */
- memset(&mem5.zPool[iBlock*mem5.szAtom], 0x55, size);
-#endif
-
memsys5Link(iBlock, iLogsize);
}
@@ -19885,7 +17903,6 @@ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){
**
** This file contains code that is common across all mutex implementations.
*/
-/* #include "sqliteInt.h" */
#if defined(SQLITE_DEBUG) && !defined(SQLITE_MUTEX_OMIT)
/*
@@ -19894,7 +17911,7 @@ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){
** allocate a mutex while the system is uninitialized.
*/
static SQLITE_WSD int mutexIsInit = 0;
-#endif /* SQLITE_DEBUG && !defined(SQLITE_MUTEX_OMIT) */
+#endif /* SQLITE_DEBUG */
#ifndef SQLITE_MUTEX_OMIT
@@ -19917,18 +17934,11 @@ SQLITE_PRIVATE int sqlite3MutexInit(void){
}else{
pFrom = sqlite3NoopMutex();
}
- pTo->xMutexInit = pFrom->xMutexInit;
- pTo->xMutexEnd = pFrom->xMutexEnd;
- pTo->xMutexFree = pFrom->xMutexFree;
- pTo->xMutexEnter = pFrom->xMutexEnter;
- pTo->xMutexTry = pFrom->xMutexTry;
- pTo->xMutexLeave = pFrom->xMutexLeave;
- pTo->xMutexHeld = pFrom->xMutexHeld;
- pTo->xMutexNotheld = pFrom->xMutexNotheld;
- sqlite3MemoryBarrier();
+ memcpy(pTo, pFrom, offsetof(sqlite3_mutex_methods, xMutexAlloc));
+ memcpy(&pTo->xMutexFree, &pFrom->xMutexFree,
+ sizeof(*pTo) - offsetof(sqlite3_mutex_methods, xMutexFree));
pTo->xMutexAlloc = pFrom->xMutexAlloc;
}
- assert( sqlite3GlobalConfig.mutex.xMutexInit );
rc = sqlite3GlobalConfig.mutex.xMutexInit();
#ifdef SQLITE_DEBUG
@@ -19958,12 +17968,10 @@ SQLITE_PRIVATE int sqlite3MutexEnd(void){
/*
** Retrieve a pointer to a static mutex or allocate a new dynamic one.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int id){
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int id){
#ifndef SQLITE_OMIT_AUTOINIT
- if( id<=SQLITE_MUTEX_RECURSIVE && sqlite3_initialize() ) return 0;
- if( id>SQLITE_MUTEX_RECURSIVE && sqlite3MutexInit() ) return 0;
+ if( sqlite3_initialize() ) return 0;
#endif
- assert( sqlite3GlobalConfig.mutex.xMutexAlloc );
return sqlite3GlobalConfig.mutex.xMutexAlloc(id);
}
@@ -19972,16 +17980,14 @@ SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){
return 0;
}
assert( GLOBAL(int, mutexIsInit) );
- assert( sqlite3GlobalConfig.mutex.xMutexAlloc );
return sqlite3GlobalConfig.mutex.xMutexAlloc(id);
}
/*
** Free a dynamic mutex.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex *p){
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex *p){
if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexFree );
sqlite3GlobalConfig.mutex.xMutexFree(p);
}
}
@@ -19990,9 +17996,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex *p){
** Obtain the mutex p. If some other thread already has the mutex, block
** until it can be obtained.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex *p){
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex *p){
if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexEnter );
sqlite3GlobalConfig.mutex.xMutexEnter(p);
}
}
@@ -20001,10 +18006,9 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex *p){
** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another
** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex *p){
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex *p){
int rc = SQLITE_OK;
if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexTry );
return sqlite3GlobalConfig.mutex.xMutexTry(p);
}
return rc;
@@ -20016,9 +18020,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex *p){
** is not currently entered. If a NULL pointer is passed as an argument
** this function is a no-op.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex *p){
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex *p){
if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexLeave );
sqlite3GlobalConfig.mutex.xMutexLeave(p);
}
}
@@ -20028,12 +18031,10 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex *p){
** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
** intended for use inside assert() statements.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex *p){
- assert( p==0 || sqlite3GlobalConfig.mutex.xMutexHeld );
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex *p){
return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex *p){
- assert( p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld );
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex *p){
return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p);
}
#endif
@@ -20069,7 +18070,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex *p){
** that does error checking on mutexes to make sure they are being
** called correctly.
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_MUTEX_OMIT
@@ -20151,7 +18151,7 @@ static int debugMutexEnd(void){ return SQLITE_OK; }
** that means that a mutex could not be allocated.
*/
static sqlite3_mutex *debugMutexAlloc(int id){
- static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1];
+ static sqlite3_debug_mutex aStatic[6];
sqlite3_debug_mutex *pNew = 0;
switch( id ){
case SQLITE_MUTEX_FAST:
@@ -20164,12 +18164,8 @@ static sqlite3_mutex *debugMutexAlloc(int id){
break;
}
default: {
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( id-2<0 || id-2>=ArraySize(aStatic) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+ assert( id-2 >= 0 );
+ assert( id-2 < (int)(sizeof(aStatic)/sizeof(aStatic[0])) );
pNew = &aStatic[id-2];
pNew->id = id;
break;
@@ -20184,13 +18180,8 @@ static sqlite3_mutex *debugMutexAlloc(int id){
static void debugMutexFree(sqlite3_mutex *pX){
sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
assert( p->cnt==0 );
- if( p->id==SQLITE_MUTEX_RECURSIVE || p->id==SQLITE_MUTEX_FAST ){
- sqlite3_free(p);
- }else{
-#ifdef SQLITE_ENABLE_API_ARMOR
- (void)SQLITE_MISUSE_BKPT;
-#endif
- }
+ assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE );
+ sqlite3_free(p);
}
/*
@@ -20273,7 +18264,6 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
*************************************************************************
** This file contains the C functions that implement mutexes for pthreads
*/
-/* #include "sqliteInt.h" */
/*
** The code in this file is only used if we are compiling threadsafe
@@ -20302,10 +18292,8 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
*/
struct sqlite3_mutex {
pthread_mutex_t mutex; /* Mutex controlling the lock */
-#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
- int id; /* Mutex type */
-#endif
#if SQLITE_MUTEX_NREF
+ int id; /* Mutex type */
volatile int nRef; /* Number of entrances */
volatile pthread_t owner; /* Thread that is within this mutex */
int trace; /* True to trace changes */
@@ -20343,19 +18331,6 @@ static int pthreadMutexNotheld(sqlite3_mutex *p){
#endif
/*
-** Try to provide a memory barrier operation, needed for initialization
-** and also for the implementation of xShmBarrier in the VFS in cases
-** where SQLite is compiled without mutexes.
-*/
-SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
-#if defined(SQLITE_MEMORY_BARRIER)
- SQLITE_MEMORY_BARRIER;
-#elif defined(__GNUC__) && GCC_VERSION>=4001000
- __sync_synchronize();
-#endif
-}
-
-/*
** Initialize and deinitialize the mutex subsystem.
*/
static int pthreadMutexInit(void){ return SQLITE_OK; }
@@ -20373,16 +18348,10 @@ static int pthreadMutexEnd(void){ return SQLITE_OK; }
** <li> SQLITE_MUTEX_RECURSIVE
** <li> SQLITE_MUTEX_STATIC_MASTER
** <li> SQLITE_MUTEX_STATIC_MEM
-** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_MEM2
** <li> SQLITE_MUTEX_STATIC_PRNG
** <li> SQLITE_MUTEX_STATIC_LRU
** <li> SQLITE_MUTEX_STATIC_PMEM
-** <li> SQLITE_MUTEX_STATIC_APP1
-** <li> SQLITE_MUTEX_STATIC_APP2
-** <li> SQLITE_MUTEX_STATIC_APP3
-** <li> SQLITE_MUTEX_STATIC_VFS1
-** <li> SQLITE_MUTEX_STATIC_VFS2
-** <li> SQLITE_MUTEX_STATIC_VFS3
** </ul>
**
** The first two constants cause sqlite3_mutex_alloc() to create
@@ -20416,12 +18385,6 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){
SQLITE3_MUTEX_INITIALIZER,
SQLITE3_MUTEX_INITIALIZER,
SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
SQLITE3_MUTEX_INITIALIZER
};
sqlite3_mutex *p;
@@ -20441,30 +18404,32 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){
pthread_mutex_init(&p->mutex, &recursiveAttr);
pthread_mutexattr_destroy(&recursiveAttr);
#endif
+#if SQLITE_MUTEX_NREF
+ p->id = iType;
+#endif
}
break;
}
case SQLITE_MUTEX_FAST: {
p = sqlite3MallocZero( sizeof(*p) );
if( p ){
+#if SQLITE_MUTEX_NREF
+ p->id = iType;
+#endif
pthread_mutex_init(&p->mutex, 0);
}
break;
}
default: {
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( iType-2<0 || iType-2>=ArraySize(staticMutexes) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+ assert( iType-2 >= 0 );
+ assert( iType-2 < ArraySize(staticMutexes) );
p = &staticMutexes[iType-2];
+#if SQLITE_MUTEX_NREF
+ p->id = iType;
+#endif
break;
}
}
-#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
- if( p ) p->id = iType;
-#endif
return p;
}
@@ -20476,18 +18441,9 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){
*/
static void pthreadMutexFree(sqlite3_mutex *p){
assert( p->nRef==0 );
-#if SQLITE_ENABLE_API_ARMOR
- if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE )
-#endif
- {
- pthread_mutex_destroy(&p->mutex);
- sqlite3_free(p);
- }
-#ifdef SQLITE_ENABLE_API_ARMOR
- else{
- (void)SQLITE_MISUSE_BKPT;
- }
-#endif
+ assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE );
+ pthread_mutex_destroy(&p->mutex);
+ sqlite3_free(p);
}
/*
@@ -20659,315 +18615,12 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file contains the C functions that implement mutexes for Win32.
-*/
-/* #include "sqliteInt.h" */
-
-#if SQLITE_OS_WIN
-/*
-** Include code that is common to all os_*.c files
-*/
-/************** Include os_common.h in the middle of mutex_w32.c *************/
-/************** Begin file os_common.h ***************************************/
-/*
-** 2004 May 22
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains macros and a little bit of code that is common to
-** all of the platform-specific files (os_*.c) and is #included into those
-** files.
-**
-** This file should be #included by the os_*.c files only. It is not a
-** general purpose header file.
+** This file contains the C functions that implement mutexes for win32
*/
-#ifndef _OS_COMMON_H_
-#define _OS_COMMON_H_
-
-/*
-** At least two bugs have slipped in because we changed the MEMORY_DEBUG
-** macro to SQLITE_DEBUG and some older makefiles have not yet made the
-** switch. The following code should catch this problem at compile-time.
-*/
-#ifdef MEMORY_DEBUG
-# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
-#endif
-
-/*
-** Macros for performance tracing. Normally turned off. Only works
-** on i486 hardware.
-*/
-#ifdef SQLITE_PERFORMANCE_TRACE
-
-/*
-** hwtime.h contains inline assembler code for implementing
-** high-performance timing routines.
-*/
-/************** Include hwtime.h in the middle of os_common.h ****************/
-/************** Begin file hwtime.h ******************************************/
-/*
-** 2008 May 27
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains inline asm code for retrieving "high-performance"
-** counters for x86 class CPUs.
-*/
-#ifndef _HWTIME_H_
-#define _HWTIME_H_
-
-/*
-** The following routine only works on pentium-class (or newer) processors.
-** It uses the RDTSC opcode to read the cycle count value out of the
-** processor and returns that value. This can be used for high-res
-** profiling.
-*/
-#if (defined(__GNUC__) || defined(_MSC_VER)) && \
- (defined(i386) || defined(__i386__) || defined(_M_IX86))
-
- #if defined(__GNUC__)
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned int lo, hi;
- __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
- return (sqlite_uint64)hi << 32 | lo;
- }
-
- #elif defined(_MSC_VER)
-
- __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
- __asm {
- rdtsc
- ret ; return value at EDX:EAX
- }
- }
-
- #endif
-
-#elif (defined(__GNUC__) && defined(__x86_64__))
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned long val;
- __asm__ __volatile__ ("rdtsc" : "=A" (val));
- return val;
- }
-
-#elif (defined(__GNUC__) && defined(__ppc__))
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned long long retval;
- unsigned long junk;
- __asm__ __volatile__ ("\n\
- 1: mftbu %1\n\
- mftb %L0\n\
- mftbu %0\n\
- cmpw %0,%1\n\
- bne 1b"
- : "=r" (retval), "=r" (junk));
- return retval;
- }
-
-#else
-
- #error Need implementation of sqlite3Hwtime() for your platform.
-
- /*
- ** To compile without implementing sqlite3Hwtime() for your platform,
- ** you can remove the above #error and use the following
- ** stub function. You will lose timing support for many
- ** of the debugging and testing utilities, but it should at
- ** least compile and run.
- */
-SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
-
-#endif
-
-#endif /* !defined(_HWTIME_H_) */
-
-/************** End of hwtime.h **********************************************/
-/************** Continuing where we left off in os_common.h ******************/
-
-static sqlite_uint64 g_start;
-static sqlite_uint64 g_elapsed;
-#define TIMER_START g_start=sqlite3Hwtime()
-#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start
-#define TIMER_ELAPSED g_elapsed
-#else
-#define TIMER_START
-#define TIMER_END
-#define TIMER_ELAPSED ((sqlite_uint64)0)
-#endif
-
-/*
-** If we compile with the SQLITE_TEST macro set, then the following block
-** of code will give us the ability to simulate a disk I/O error. This
-** is used for testing the I/O recovery logic.
-*/
-#ifdef SQLITE_TEST
-SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */
-SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */
-SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */
-SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */
-SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */
-SQLITE_API int sqlite3_diskfull_pending = 0;
-SQLITE_API int sqlite3_diskfull = 0;
-#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X)
-#define SimulateIOError(CODE) \
- if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \
- || sqlite3_io_error_pending-- == 1 ) \
- { local_ioerr(); CODE; }
-static void local_ioerr(){
- IOTRACE(("IOERR\n"));
- sqlite3_io_error_hit++;
- if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++;
-}
-#define SimulateDiskfullError(CODE) \
- if( sqlite3_diskfull_pending ){ \
- if( sqlite3_diskfull_pending == 1 ){ \
- local_ioerr(); \
- sqlite3_diskfull = 1; \
- sqlite3_io_error_hit = 1; \
- CODE; \
- }else{ \
- sqlite3_diskfull_pending--; \
- } \
- }
-#else
-#define SimulateIOErrorBenign(X)
-#define SimulateIOError(A)
-#define SimulateDiskfullError(A)
-#endif
-
-/*
-** When testing, keep a count of the number of open files.
-*/
-#ifdef SQLITE_TEST
-SQLITE_API int sqlite3_open_file_count = 0;
-#define OpenCounter(X) sqlite3_open_file_count+=(X)
-#else
-#define OpenCounter(X)
-#endif
-
-#endif /* !defined(_OS_COMMON_H_) */
-
-/************** End of os_common.h *******************************************/
-/************** Continuing where we left off in mutex_w32.c ******************/
-
-/*
-** Include the header file for the Windows VFS.
-*/
-/************** Include os_win.h in the middle of mutex_w32.c ****************/
-/************** Begin file os_win.h ******************************************/
-/*
-** 2013 November 25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains code that is specific to Windows.
-*/
-#ifndef _OS_WIN_H_
-#define _OS_WIN_H_
-
-/*
-** Include the primary Windows SDK header file.
-*/
-#include "windows.h"
-
-#ifdef __CYGWIN__
-# include <sys/cygwin.h>
-# include <errno.h> /* amalgamator: dontcache */
-#endif
-
-/*
-** Determine if we are dealing with Windows NT.
-**
-** We ought to be able to determine if we are compiling for Windows 9x or
-** Windows NT using the _WIN32_WINNT macro as follows:
-**
-** #if defined(_WIN32_WINNT)
-** # define SQLITE_OS_WINNT 1
-** #else
-** # define SQLITE_OS_WINNT 0
-** #endif
-**
-** However, Visual Studio 2005 does not set _WIN32_WINNT by default, as
-** it ought to, so the above test does not work. We'll just assume that
-** everything is Windows NT unless the programmer explicitly says otherwise
-** by setting SQLITE_OS_WINNT to 0.
-*/
-#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT)
-# define SQLITE_OS_WINNT 1
-#endif
-
-/*
-** Determine if we are dealing with Windows CE - which has a much reduced
-** API.
-*/
-#if defined(_WIN32_WCE)
-# define SQLITE_OS_WINCE 1
-#else
-# define SQLITE_OS_WINCE 0
-#endif
-
-/*
-** Determine if we are dealing with WinRT, which provides only a subset of
-** the full Win32 API.
-*/
-#if !defined(SQLITE_OS_WINRT)
-# define SQLITE_OS_WINRT 0
-#endif
-
-/*
-** For WinCE, some API function parameters do not appear to be declared as
-** volatile.
-*/
-#if SQLITE_OS_WINCE
-# define SQLITE_WIN32_VOLATILE
-#else
-# define SQLITE_WIN32_VOLATILE volatile
-#endif
-
-/*
-** For some Windows sub-platforms, the _beginthreadex() / _endthreadex()
-** functions are not available (e.g. those not using MSVC, Cygwin, etc).
-*/
-#if SQLITE_OS_WIN && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \
- SQLITE_THREADSAFE>0 && !defined(__CYGWIN__)
-# define SQLITE_OS_WIN_THREADS 1
-#else
-# define SQLITE_OS_WIN_THREADS 0
-#endif
-
-#endif /* _OS_WIN_H_ */
-
-/************** End of os_win.h **********************************************/
-/************** Continuing where we left off in mutex_w32.c ******************/
-#endif
/*
** The code in this file is only used if we are compiling multithreaded
-** on a Win32 system.
+** on a win32 system.
*/
#ifdef SQLITE_MUTEX_W32
@@ -20980,24 +18633,50 @@ struct sqlite3_mutex {
#ifdef SQLITE_DEBUG
volatile int nRef; /* Number of enterances */
volatile DWORD owner; /* Thread holding this mutex */
- volatile int trace; /* True to trace changes */
+ int trace; /* True to trace changes */
#endif
};
-
-/*
-** These are the initializer values used when declaring a "static" mutex
-** on Win32. It should be noted that all mutexes require initialization
-** on the Win32 platform.
-*/
#define SQLITE_W32_MUTEX_INITIALIZER { 0 }
-
#ifdef SQLITE_DEBUG
-#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, \
- 0L, (DWORD)0, 0 }
+#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, 0L, (DWORD)0, 0 }
#else
#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 }
#endif
+/*
+** Return true (non-zero) if we are running under WinNT, Win2K, WinXP,
+** or WinCE. Return false (zero) for Win95, Win98, or WinME.
+**
+** Here is an interesting observation: Win95, Win98, and WinME lack
+** the LockFileEx() API. But we can still statically link against that
+** API as long as we don't call it win running Win95/98/ME. A call to
+** this routine is used to determine if the host is Win95/98/ME or
+** WinNT/2K/XP so that we will know whether or not we can safely call
+** the LockFileEx() API.
+**
+** mutexIsNT() is only used for the TryEnterCriticalSection() API call,
+** which is only available if your application was compiled with
+** _WIN32_WINNT defined to a value >= 0x0400. Currently, the only
+** call to TryEnterCriticalSection() is #ifdef'ed out, so #ifdef
+** this out as well.
+*/
+#if 0
+#if SQLITE_OS_WINCE || SQLITE_OS_WINRT
+# define mutexIsNT() (1)
+#else
+ static int mutexIsNT(void){
+ static int osType = 0;
+ if( osType==0 ){
+ OSVERSIONINFO sInfo;
+ sInfo.dwOSVersionInfoSize = sizeof(sInfo);
+ GetVersionEx(&sInfo);
+ osType = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1;
+ }
+ return osType==2;
+ }
+#endif /* SQLITE_OS_WINCE || SQLITE_OS_WINRT */
+#endif
+
#ifdef SQLITE_DEBUG
/*
** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
@@ -21006,45 +18685,20 @@ struct sqlite3_mutex {
static int winMutexHeld(sqlite3_mutex *p){
return p->nRef!=0 && p->owner==GetCurrentThreadId();
}
-
static int winMutexNotheld2(sqlite3_mutex *p, DWORD tid){
return p->nRef==0 || p->owner!=tid;
}
-
static int winMutexNotheld(sqlite3_mutex *p){
- DWORD tid = GetCurrentThreadId();
+ DWORD tid = GetCurrentThreadId();
return winMutexNotheld2(p, tid);
}
#endif
-/*
-** Try to provide a memory barrier operation, needed for initialization
-** and also for the xShmBarrier method of the VFS in cases when SQLite is
-** compiled without mutexes (SQLITE_THREADSAFE=0).
-*/
-SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
-#if defined(SQLITE_MEMORY_BARRIER)
- SQLITE_MEMORY_BARRIER;
-#elif defined(__GNUC__)
- __sync_synchronize();
-#elif !defined(SQLITE_DISABLE_INTRINSIC) && \
- defined(_MSC_VER) && _MSC_VER>=1300
- _ReadWriteBarrier();
-#elif defined(MemoryBarrier)
- MemoryBarrier();
-#endif
-}
/*
** Initialize and deinitialize the mutex subsystem.
*/
-static sqlite3_mutex winMutex_staticMutexes[] = {
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
+static sqlite3_mutex winMutex_staticMutexes[6] = {
SQLITE3_MUTEX_INITIALIZER,
SQLITE3_MUTEX_INITIALIZER,
SQLITE3_MUTEX_INITIALIZER,
@@ -21052,20 +18706,17 @@ static sqlite3_mutex winMutex_staticMutexes[] = {
SQLITE3_MUTEX_INITIALIZER,
SQLITE3_MUTEX_INITIALIZER
};
-
static int winMutex_isInit = 0;
-static int winMutex_isNt = -1; /* <0 means "need to query" */
-
-/* As the winMutexInit() and winMutexEnd() functions are called as part
-** of the sqlite3_initialize() and sqlite3_shutdown() processing, the
-** "interlocked" magic used here is probably not strictly necessary.
+/* As winMutexInit() and winMutexEnd() are called as part
+** of the sqlite3_initialize and sqlite3_shutdown()
+** processing, the "interlocked" magic is probably not
+** strictly necessary.
*/
-static LONG SQLITE_WIN32_VOLATILE winMutex_lock = 0;
+static LONG winMutex_lock = 0;
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void); /* os_win.c */
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */
+SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */
-static int winMutexInit(void){
+static int winMutexInit(void){
/* The first to increment to 1 does actual initialization */
if( InterlockedCompareExchange(&winMutex_lock, 1, 0)==0 ){
int i;
@@ -21078,17 +18729,16 @@ static int winMutexInit(void){
}
winMutex_isInit = 1;
}else{
- /* Another thread is (in the process of) initializing the static
- ** mutexes */
+ /* Someone else is in the process of initing the static mutexes */
while( !winMutex_isInit ){
sqlite3_win32_sleep(1);
}
}
- return SQLITE_OK;
+ return SQLITE_OK;
}
-static int winMutexEnd(void){
- /* The first to decrement to 0 does actual shutdown
+static int winMutexEnd(void){
+ /* The first to decrement to 0 does actual shutdown
** (which should be the last to shutdown.) */
if( InterlockedCompareExchange(&winMutex_lock, 0, 1)==1 ){
if( winMutex_isInit==1 ){
@@ -21099,7 +18749,7 @@ static int winMutexEnd(void){
winMutex_isInit = 0;
}
}
- return SQLITE_OK;
+ return SQLITE_OK;
}
/*
@@ -21114,16 +18764,10 @@ static int winMutexEnd(void){
** <li> SQLITE_MUTEX_RECURSIVE
** <li> SQLITE_MUTEX_STATIC_MASTER
** <li> SQLITE_MUTEX_STATIC_MEM
-** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_MEM2
** <li> SQLITE_MUTEX_STATIC_PRNG
** <li> SQLITE_MUTEX_STATIC_LRU
** <li> SQLITE_MUTEX_STATIC_PMEM
-** <li> SQLITE_MUTEX_STATIC_APP1
-** <li> SQLITE_MUTEX_STATIC_APP2
-** <li> SQLITE_MUTEX_STATIC_APP3
-** <li> SQLITE_MUTEX_STATIC_VFS1
-** <li> SQLITE_MUTEX_STATIC_VFS2
-** <li> SQLITE_MUTEX_STATIC_VFS3
** </ul>
**
** The first two constants cause sqlite3_mutex_alloc() to create
@@ -21146,7 +18790,7 @@ static int winMutexEnd(void){
**
** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
-** returns a different mutex on every call. But for the static
+** returns a different mutex on every call. But for the static
** mutex types, the same mutex is returned on every call that has
** the same type number.
*/
@@ -21157,12 +18801,9 @@ static sqlite3_mutex *winMutexAlloc(int iType){
case SQLITE_MUTEX_FAST:
case SQLITE_MUTEX_RECURSIVE: {
p = sqlite3MallocZero( sizeof(*p) );
- if( p ){
- p->id = iType;
+ if( p ){
#ifdef SQLITE_DEBUG
-#ifdef SQLITE_WIN32_MUTEX_TRACE_DYNAMIC
- p->trace = 1;
-#endif
+ p->id = iType;
#endif
#if SQLITE_OS_WINRT
InitializeCriticalSectionEx(&p->mutex, 0, 0);
@@ -21173,18 +18814,12 @@ static sqlite3_mutex *winMutexAlloc(int iType){
break;
}
default: {
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( iType-2<0 || iType-2>=ArraySize(winMutex_staticMutexes) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+ assert( winMutex_isInit==1 );
+ assert( iType-2 >= 0 );
+ assert( iType-2 < ArraySize(winMutex_staticMutexes) );
p = &winMutex_staticMutexes[iType-2];
- p->id = iType;
#ifdef SQLITE_DEBUG
-#ifdef SQLITE_WIN32_MUTEX_TRACE_STATIC
- p->trace = 1;
-#endif
+ p->id = iType;
#endif
break;
}
@@ -21201,14 +18836,9 @@ static sqlite3_mutex *winMutexAlloc(int iType){
static void winMutexFree(sqlite3_mutex *p){
assert( p );
assert( p->nRef==0 && p->owner==0 );
- if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ){
- DeleteCriticalSection(&p->mutex);
- sqlite3_free(p);
- }else{
-#ifdef SQLITE_ENABLE_API_ARMOR
- (void)SQLITE_MISUSE_BKPT;
-#endif
- }
+ assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE );
+ DeleteCriticalSection(&p->mutex);
+ sqlite3_free(p);
}
/*
@@ -21223,39 +18853,30 @@ static void winMutexFree(sqlite3_mutex *p){
** more than once, the behavior is undefined.
*/
static void winMutexEnter(sqlite3_mutex *p){
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- DWORD tid = GetCurrentThreadId();
-#endif
#ifdef SQLITE_DEBUG
- assert( p );
+ DWORD tid = GetCurrentThreadId();
assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) );
-#else
- assert( p );
#endif
- assert( winMutex_isInit==1 );
EnterCriticalSection(&p->mutex);
#ifdef SQLITE_DEBUG
assert( p->nRef>0 || p->owner==0 );
- p->owner = tid;
+ p->owner = tid;
p->nRef++;
if( p->trace ){
- OSTRACE(("ENTER-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n",
- tid, p, p->trace, p->nRef));
+ printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
}
#endif
}
-
static int winMutexTry(sqlite3_mutex *p){
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- DWORD tid = GetCurrentThreadId();
+#ifndef NDEBUG
+ DWORD tid = GetCurrentThreadId();
#endif
int rc = SQLITE_BUSY;
- assert( p );
assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) );
/*
** The sqlite3_mutex_try() routine is very rarely used, and when it
** is used it is merely an optimization. So it is OK for it to always
- ** fail.
+ ** fail.
**
** The TryEnterCriticalSection() interface is only available on WinNT.
** And some windows compilers complain if you try to use it without
@@ -21263,27 +18884,18 @@ static int winMutexTry(sqlite3_mutex *p){
** For that reason, we will omit this optimization for now. See
** ticket #2685.
*/
-#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0400
- assert( winMutex_isInit==1 );
- assert( winMutex_isNt>=-1 && winMutex_isNt<=1 );
- if( winMutex_isNt<0 ){
- winMutex_isNt = sqlite3_win32_is_nt();
- }
- assert( winMutex_isNt==0 || winMutex_isNt==1 );
- if( winMutex_isNt && TryEnterCriticalSection(&p->mutex) ){
-#ifdef SQLITE_DEBUG
+#if 0
+ if( mutexIsNT() && TryEnterCriticalSection(&p->mutex) ){
p->owner = tid;
p->nRef++;
-#endif
rc = SQLITE_OK;
}
#else
UNUSED_PARAMETER(p);
#endif
#ifdef SQLITE_DEBUG
- if( p->trace ){
- OSTRACE(("TRY-MUTEX tid=%lu, mutex=%p (%d), owner=%lu, nRef=%d, rc=%s\n",
- tid, p, p->trace, p->owner, p->nRef, sqlite3ErrName(rc)));
+ if( rc==SQLITE_OK && p->trace ){
+ printf("try mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
}
#endif
return rc;
@@ -21296,23 +18908,18 @@ static int winMutexTry(sqlite3_mutex *p){
** is not currently allocated. SQLite will never do either.
*/
static void winMutexLeave(sqlite3_mutex *p){
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
+#ifndef NDEBUG
DWORD tid = GetCurrentThreadId();
-#endif
- assert( p );
-#ifdef SQLITE_DEBUG
assert( p->nRef>0 );
assert( p->owner==tid );
p->nRef--;
if( p->nRef==0 ) p->owner = 0;
assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE );
#endif
- assert( winMutex_isInit==1 );
LeaveCriticalSection(&p->mutex);
#ifdef SQLITE_DEBUG
if( p->trace ){
- OSTRACE(("LEAVE-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n",
- tid, p, p->trace, p->nRef));
+ printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
}
#endif
}
@@ -21334,9 +18941,9 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
0
#endif
};
+
return &sMutex;
}
-
#endif /* SQLITE_MUTEX_W32 */
/************** End of mutex_w32.c *******************************************/
@@ -21355,7 +18962,6 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
**
** Memory allocation functions used throughout sqlite.
*/
-/* #include "sqliteInt.h" */
/* #include <stdarg.h> */
/*
@@ -21363,7 +18969,7 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
** held by SQLite. An example of non-essential memory is memory used to
** cache database pages that are not currently in use.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int n){
+SQLITE_API int sqlite3_release_memory(int n){
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
return sqlite3PcacheReleaseMemory(n);
#else
@@ -21388,7 +18994,16 @@ typedef struct ScratchFreeslot {
*/
static SQLITE_WSD struct Mem0Global {
sqlite3_mutex *mutex; /* Mutex to serialize access */
- sqlite3_int64 alarmThreshold; /* The soft heap limit */
+
+ /*
+ ** The alarm callback and its arguments. The mem0.mutex lock will
+ ** be held while the callback is running. Recursive calls into
+ ** the memory subsystem are allowed, but no new callbacks will be
+ ** issued.
+ */
+ sqlite3_int64 alarmThreshold;
+ void (*alarmCallback)(void*, sqlite3_int64,int);
+ void *alarmArg;
/*
** Pointers to the end of sqlite3GlobalConfig.pScratch memory
@@ -21405,62 +19020,82 @@ static SQLITE_WSD struct Mem0Global {
** sqlite3_soft_heap_limit() setting.
*/
int nearlyFull;
-} mem0 = { 0, 0, 0, 0, 0, 0 };
+} mem0 = { 0, 0, 0, 0, 0, 0, 0, 0 };
#define mem0 GLOBAL(struct Mem0Global, mem0)
/*
-** Return the memory allocator mutex. sqlite3_status() needs it.
+** This routine runs when the memory allocator sees that the
+** total memory allocation is about to exceed the soft heap
+** limit.
*/
-SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void){
- return mem0.mutex;
+static void softHeapLimitEnforcer(
+ void *NotUsed,
+ sqlite3_int64 NotUsed2,
+ int allocSize
+){
+ UNUSED_PARAMETER2(NotUsed, NotUsed2);
+ sqlite3_release_memory(allocSize);
}
-#ifndef SQLITE_OMIT_DEPRECATED
/*
-** Deprecated external interface. It used to set an alarm callback
-** that was invoked when memory usage grew too large. Now it is a
-** no-op.
+** Change the alarm callback
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_memory_alarm(
+static int sqlite3MemoryAlarm(
void(*xCallback)(void *pArg, sqlite3_int64 used,int N),
void *pArg,
sqlite3_int64 iThreshold
){
- (void)xCallback;
- (void)pArg;
- (void)iThreshold;
+ int nUsed;
+ sqlite3_mutex_enter(mem0.mutex);
+ mem0.alarmCallback = xCallback;
+ mem0.alarmArg = pArg;
+ mem0.alarmThreshold = iThreshold;
+ nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
+ mem0.nearlyFull = (iThreshold>0 && iThreshold<=nUsed);
+ sqlite3_mutex_leave(mem0.mutex);
return SQLITE_OK;
}
+
+#ifndef SQLITE_OMIT_DEPRECATED
+/*
+** Deprecated external interface. Internal/core SQLite code
+** should call sqlite3MemoryAlarm.
+*/
+SQLITE_API int sqlite3_memory_alarm(
+ void(*xCallback)(void *pArg, sqlite3_int64 used,int N),
+ void *pArg,
+ sqlite3_int64 iThreshold
+){
+ return sqlite3MemoryAlarm(xCallback, pArg, iThreshold);
+}
#endif
/*
** Set the soft heap-size limit for the library. Passing a zero or
** negative value indicates no limit.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 n){
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 n){
sqlite3_int64 priorLimit;
sqlite3_int64 excess;
- sqlite3_int64 nUsed;
#ifndef SQLITE_OMIT_AUTOINIT
int rc = sqlite3_initialize();
if( rc ) return -1;
#endif
sqlite3_mutex_enter(mem0.mutex);
priorLimit = mem0.alarmThreshold;
- if( n<0 ){
- sqlite3_mutex_leave(mem0.mutex);
- return priorLimit;
- }
- mem0.alarmThreshold = n;
- nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
- mem0.nearlyFull = (n>0 && n<=nUsed);
sqlite3_mutex_leave(mem0.mutex);
+ if( n<0 ) return priorLimit;
+ if( n>0 ){
+ sqlite3MemoryAlarm(softHeapLimitEnforcer, 0, n);
+ }else{
+ sqlite3MemoryAlarm(0, 0, 0);
+ }
excess = sqlite3_memory_used() - n;
if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff));
return priorLimit;
}
-SQLITE_API void SQLITE_STDCALL sqlite3_soft_heap_limit(int n){
+SQLITE_API void sqlite3_soft_heap_limit(int n){
if( n<0 ) n = 0;
sqlite3_soft_heap_limit64(n);
}
@@ -21469,7 +19104,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_soft_heap_limit(int n){
** Initialize the memory allocation subsystem.
*/
SQLITE_PRIVATE int sqlite3MallocInit(void){
- int rc;
if( sqlite3GlobalConfig.m.xMalloc==0 ){
sqlite3MemSetDefault();
}
@@ -21500,13 +19134,12 @@ SQLITE_PRIVATE int sqlite3MallocInit(void){
sqlite3GlobalConfig.nScratch = 0;
}
if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512
- || sqlite3GlobalConfig.nPage<=0 ){
+ || sqlite3GlobalConfig.nPage<1 ){
sqlite3GlobalConfig.pPage = 0;
sqlite3GlobalConfig.szPage = 0;
+ sqlite3GlobalConfig.nPage = 0;
}
- rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData);
- if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0));
- return rc;
+ return sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData);
}
/*
@@ -21531,9 +19164,11 @@ SQLITE_PRIVATE void sqlite3MallocEnd(void){
/*
** Return the amount of memory currently checked out.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void){
- sqlite3_int64 res, mx;
- sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, 0);
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void){
+ int n, mx;
+ sqlite3_int64 res;
+ sqlite3_status(SQLITE_STATUS_MEMORY_USED, &n, &mx, 0);
+ res = (sqlite3_int64)n; /* Work around bug in Borland C. Ticket #3216 */
return res;
}
@@ -21542,20 +19177,31 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void){
** checked out since either the beginning of this process
** or since the most recent reset.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag){
- sqlite3_int64 res, mx;
- sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, resetFlag);
- return mx;
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag){
+ int n, mx;
+ sqlite3_int64 res;
+ sqlite3_status(SQLITE_STATUS_MEMORY_USED, &n, &mx, resetFlag);
+ res = (sqlite3_int64)mx; /* Work around bug in Borland C. Ticket #3216 */
+ return res;
}
/*
** Trigger the alarm
*/
static void sqlite3MallocAlarm(int nByte){
- if( mem0.alarmThreshold<=0 ) return;
+ void (*xCallback)(void*,sqlite3_int64,int);
+ sqlite3_int64 nowUsed;
+ void *pArg;
+ if( mem0.alarmCallback==0 ) return;
+ xCallback = mem0.alarmCallback;
+ nowUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
+ pArg = mem0.alarmArg;
+ mem0.alarmCallback = 0;
sqlite3_mutex_leave(mem0.mutex);
- sqlite3_release_memory(nByte);
+ xCallback(pArg, nowUsed, nByte);
sqlite3_mutex_enter(mem0.mutex);
+ mem0.alarmCallback = xCallback;
+ mem0.alarmArg = pArg;
}
/*
@@ -21568,8 +19214,8 @@ static int mallocWithAlarm(int n, void **pp){
assert( sqlite3_mutex_held(mem0.mutex) );
nFull = sqlite3GlobalConfig.m.xRoundup(n);
sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, n);
- if( mem0.alarmThreshold>0 ){
- sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
+ if( mem0.alarmCallback!=0 ){
+ int nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
if( nUsed >= mem0.alarmThreshold - nFull ){
mem0.nearlyFull = 1;
sqlite3MallocAlarm(nFull);
@@ -21579,15 +19225,15 @@ static int mallocWithAlarm(int n, void **pp){
}
p = sqlite3GlobalConfig.m.xMalloc(nFull);
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
- if( p==0 && mem0.alarmThreshold>0 ){
+ if( p==0 && mem0.alarmCallback ){
sqlite3MallocAlarm(nFull);
p = sqlite3GlobalConfig.m.xMalloc(nFull);
}
#endif
if( p ){
nFull = sqlite3MallocSize(p);
- sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nFull);
- sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, nFull);
+ sqlite3StatusAdd(SQLITE_STATUS_MALLOC_COUNT, 1);
}
*pp = p;
return nFull;
@@ -21597,9 +19243,11 @@ static int mallocWithAlarm(int n, void **pp){
** Allocate memory. This routine is like sqlite3_malloc() except that it
** assumes the memory subsystem has already been initialized.
*/
-SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
+SQLITE_PRIVATE void *sqlite3Malloc(int n){
void *p;
- if( n==0 || n>=0x7fffff00 ){
+ if( n<=0 /* IMP: R-65312-04917 */
+ || n>=0x7fffff00
+ ){
/* A memory allocation of a number of bytes which is near the maximum
** signed integer value might cause an integer overflow inside of the
** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving
@@ -21608,12 +19256,12 @@ SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
p = 0;
}else if( sqlite3GlobalConfig.bMemstat ){
sqlite3_mutex_enter(mem0.mutex);
- mallocWithAlarm((int)n, &p);
+ mallocWithAlarm(n, &p);
sqlite3_mutex_leave(mem0.mutex);
}else{
- p = sqlite3GlobalConfig.m.xMalloc((int)n);
+ p = sqlite3GlobalConfig.m.xMalloc(n);
}
- assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-11148-40995 */
+ assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-04675-44850 */
return p;
}
@@ -21622,13 +19270,7 @@ SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
** First make sure the memory subsystem is initialized, then do the
** allocation.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int n){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return 0;
-#endif
- return n<=0 ? 0 : sqlite3Malloc(n);
-}
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64 n){
+SQLITE_API void *sqlite3_malloc(int n){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
@@ -21659,20 +19301,22 @@ SQLITE_PRIVATE void *sqlite3ScratchMalloc(int n){
assert( n>0 );
sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n);
if( mem0.nScratchFree && sqlite3GlobalConfig.szScratch>=n ){
p = mem0.pScratchFree;
mem0.pScratchFree = mem0.pScratchFree->pNext;
mem0.nScratchFree--;
- sqlite3StatusUp(SQLITE_STATUS_SCRATCH_USED, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_USED, 1);
+ sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n);
sqlite3_mutex_leave(mem0.mutex);
}else{
- sqlite3_mutex_leave(mem0.mutex);
- p = sqlite3Malloc(n);
- if( sqlite3GlobalConfig.bMemstat && p ){
- sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusUp(SQLITE_STATUS_SCRATCH_OVERFLOW, sqlite3MallocSize(p));
+ if( sqlite3GlobalConfig.bMemstat ){
+ sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n);
+ n = mallocWithAlarm(n, &p);
+ if( p ) sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_OVERFLOW, n);
+ sqlite3_mutex_leave(mem0.mutex);
+ }else{
sqlite3_mutex_leave(mem0.mutex);
+ p = sqlite3GlobalConfig.m.xMalloc(n);
}
sqlite3MemdebugSetType(p, MEMTYPE_SCRATCH);
}
@@ -21680,12 +19324,11 @@ SQLITE_PRIVATE void *sqlite3ScratchMalloc(int n){
#if SQLITE_THREADSAFE==0 && !defined(NDEBUG)
- /* EVIDENCE-OF: R-12970-05880 SQLite will not use more than one scratch
- ** buffers per thread.
- **
- ** This can only be checked in single-threaded mode.
- */
- assert( scratchAllocOut==0 );
+ /* Verify that no more than two scratch allocations per thread
+ ** are outstanding at one time. (This is only checked in the
+ ** single-threaded case since checking in the multi-threaded case
+ ** would be much more complicated.) */
+ assert( scratchAllocOut<=1 );
if( p ) scratchAllocOut++;
#endif
@@ -21712,19 +19355,19 @@ SQLITE_PRIVATE void sqlite3ScratchFree(void *p){
mem0.pScratchFree = pSlot;
mem0.nScratchFree++;
assert( mem0.nScratchFree <= (u32)sqlite3GlobalConfig.nScratch );
- sqlite3StatusDown(SQLITE_STATUS_SCRATCH_USED, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_USED, -1);
sqlite3_mutex_leave(mem0.mutex);
}else{
/* Release memory back to the heap */
assert( sqlite3MemdebugHasType(p, MEMTYPE_SCRATCH) );
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_SCRATCH) );
+ assert( sqlite3MemdebugNoType(p, ~MEMTYPE_SCRATCH) );
sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
if( sqlite3GlobalConfig.bMemstat ){
int iSize = sqlite3MallocSize(p);
sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusDown(SQLITE_STATUS_SCRATCH_OVERFLOW, iSize);
- sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, iSize);
- sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_OVERFLOW, -iSize);
+ sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -iSize);
+ sqlite3StatusAdd(SQLITE_STATUS_MALLOC_COUNT, -1);
sqlite3GlobalConfig.m.xFree(p);
sqlite3_mutex_leave(mem0.mutex);
}else{
@@ -21739,7 +19382,7 @@ SQLITE_PRIVATE void sqlite3ScratchFree(void *p){
*/
#ifndef SQLITE_OMIT_LOOKASIDE
static int isLookaside(sqlite3 *db, void *p){
- return p>=db->lookaside.pStart && p<db->lookaside.pEnd;
+ return p && p>=db->lookaside.pStart && p<db->lookaside.pEnd;
}
#else
#define isLookaside(A,B) 0
@@ -21751,42 +19394,32 @@ static int isLookaside(sqlite3 *db, void *p){
*/
SQLITE_PRIVATE int sqlite3MallocSize(void *p){
assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugNoType(p, MEMTYPE_DB) );
return sqlite3GlobalConfig.m.xSize(p);
}
SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){
- if( db==0 || !isLookaside(db,p) ){
-#if SQLITE_DEBUG
- if( db==0 ){
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- }else{
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- }
-#endif
- return sqlite3GlobalConfig.m.xSize(p);
- }else{
- assert( sqlite3_mutex_held(db->mutex) );
+ assert( db==0 || sqlite3_mutex_held(db->mutex) );
+ if( db && isLookaside(db, p) ){
return db->lookaside.sz;
+ }else{
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_LOOKASIDE|MEMTYPE_HEAP) );
+ assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
+ return sqlite3GlobalConfig.m.xSize(p);
}
}
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void *p){
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- return (sqlite3_uint64)sqlite3GlobalConfig.m.xSize(p);
-}
/*
** Free memory previously obtained from sqlite3Malloc().
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void *p){
+SQLITE_API void sqlite3_free(void *p){
if( p==0 ) return; /* IMP: R-49053-54554 */
+ assert( sqlite3MemdebugNoType(p, MEMTYPE_DB) );
assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
if( sqlite3GlobalConfig.bMemstat ){
sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, sqlite3MallocSize(p));
- sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -sqlite3MallocSize(p));
+ sqlite3StatusAdd(SQLITE_STATUS_MALLOC_COUNT, -1);
sqlite3GlobalConfig.m.xFree(p);
sqlite3_mutex_leave(mem0.mutex);
}else{
@@ -21795,14 +19428,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free(void *p){
}
/*
-** Add the size of memory allocation "p" to the count in
-** *db->pnBytesFreed.
-*/
-static SQLITE_NOINLINE void measureAllocationSize(sqlite3 *db, void *p){
- *db->pnBytesFreed += sqlite3DbMallocSize(db,p);
-}
-
-/*
** Free memory that might be associated with a particular database
** connection.
*/
@@ -21811,7 +19436,7 @@ SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){
if( p==0 ) return;
if( db ){
if( db->pnBytesFreed ){
- measureAllocationSize(db, p);
+ *db->pnBytesFreed += sqlite3DbMallocSize(db, p);
return;
}
if( isLookaside(db, p) ){
@@ -21826,8 +19451,8 @@ SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){
return;
}
}
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_LOOKASIDE|MEMTYPE_HEAP) );
assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
sqlite3_free(p);
@@ -21836,16 +19461,14 @@ SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){
/*
** Change the size of an existing memory allocation
*/
-SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
+SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, int nBytes){
int nOld, nNew, nDiff;
void *pNew;
- assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) );
- assert( sqlite3MemdebugNoType(pOld, (u8)~MEMTYPE_HEAP) );
if( pOld==0 ){
- return sqlite3Malloc(nBytes); /* IMP: R-04300-56712 */
+ return sqlite3Malloc(nBytes); /* IMP: R-28354-25769 */
}
- if( nBytes==0 ){
- sqlite3_free(pOld); /* IMP: R-26507-47431 */
+ if( nBytes<=0 ){
+ sqlite3_free(pOld); /* IMP: R-31593-10574 */
return 0;
}
if( nBytes>=0x7fffff00 ){
@@ -21856,31 +19479,33 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
/* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second
** argument to xRealloc is always a value returned by a prior call to
** xRoundup. */
- nNew = sqlite3GlobalConfig.m.xRoundup((int)nBytes);
+ nNew = sqlite3GlobalConfig.m.xRoundup(nBytes);
if( nOld==nNew ){
pNew = pOld;
}else if( sqlite3GlobalConfig.bMemstat ){
sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes);
+ sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, nBytes);
nDiff = nNew - nOld;
if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >=
mem0.alarmThreshold-nDiff ){
sqlite3MallocAlarm(nDiff);
}
+ assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugNoType(pOld, ~MEMTYPE_HEAP) );
pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
- if( pNew==0 && mem0.alarmThreshold>0 ){
- sqlite3MallocAlarm((int)nBytes);
+ if( pNew==0 && mem0.alarmCallback ){
+ sqlite3MallocAlarm(nBytes);
pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
}
if( pNew ){
nNew = sqlite3MallocSize(pNew);
- sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nNew-nOld);
+ sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, nNew-nOld);
}
sqlite3_mutex_leave(mem0.mutex);
}else{
pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
}
- assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-11148-40995 */
+ assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-04675-44850 */
return pNew;
}
@@ -21888,14 +19513,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
** The public interface to sqlite3Realloc. Make sure that the memory
** subsystem is initialized prior to invoking sqliteRealloc.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void *pOld, int n){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return 0;
-#endif
- if( n<0 ) n = 0; /* IMP: R-26507-47431 */
- return sqlite3Realloc(pOld, n);
-}
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void *pOld, sqlite3_uint64 n){
+SQLITE_API void *sqlite3_realloc(void *pOld, int n){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
@@ -21906,10 +19524,10 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void *pOld, sqlite3_uint64 n){
/*
** Allocate and zero memory.
*/
-SQLITE_PRIVATE void *sqlite3MallocZero(u64 n){
+SQLITE_PRIVATE void *sqlite3MallocZero(int n){
void *p = sqlite3Malloc(n);
if( p ){
- memset(p, 0, (size_t)n);
+ memset(p, 0, n);
}
return p;
}
@@ -21918,10 +19536,10 @@ SQLITE_PRIVATE void *sqlite3MallocZero(u64 n){
** Allocate and zero memory. If the allocation fails, make
** the mallocFailed flag in the connection pointer.
*/
-SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, u64 n){
+SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, int n){
void *p = sqlite3DbMallocRaw(db, n);
if( p ){
- memset(p, 0, (size_t)n);
+ memset(p, 0, n);
}
return p;
}
@@ -21944,7 +19562,7 @@ SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, u64 n){
** In other words, if a subsequent malloc (ex: "b") worked, it is assumed
** that all prior mallocs (ex: "a") worked too.
*/
-SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){
+SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, int n){
void *p;
assert( db==0 || sqlite3_mutex_held(db->mutex) );
assert( db==0 || db->pnBytesFreed==0 );
@@ -21979,8 +19597,8 @@ SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){
if( !p && db ){
db->mallocFailed = 1;
}
- sqlite3MemdebugSetType(p,
- (db && db->lookaside.bEnabled) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP);
+ sqlite3MemdebugSetType(p, MEMTYPE_DB |
+ ((db && db->lookaside.bEnabled) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP));
return p;
}
@@ -21988,7 +19606,7 @@ SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){
** Resize the block of memory pointed to by p to n bytes. If the
** resize fails, set the mallocFailed flag in the connection object.
*/
-SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){
+SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, int n){
void *pNew = 0;
assert( db!=0 );
assert( sqlite3_mutex_held(db->mutex) );
@@ -22006,14 +19624,15 @@ SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){
sqlite3DbFree(db, p);
}
}else{
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_LOOKASIDE|MEMTYPE_HEAP) );
sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
- pNew = sqlite3_realloc64(p, n);
+ pNew = sqlite3_realloc(p, n);
if( !pNew ){
+ sqlite3MemdebugSetType(p, MEMTYPE_DB|MEMTYPE_HEAP);
db->mallocFailed = 1;
}
- sqlite3MemdebugSetType(pNew,
+ sqlite3MemdebugSetType(pNew, MEMTYPE_DB |
(db->lookaside.bEnabled ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP));
}
}
@@ -22024,7 +19643,7 @@ SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){
** Attempt to reallocate p. If the reallocation fails, then free p
** and set the mallocFailed flag in the database connection.
*/
-SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, u64 n){
+SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, int n){
void *pNew;
pNew = sqlite3DbRealloc(db, p, n);
if( !pNew ){
@@ -22054,7 +19673,7 @@ SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){
}
return zNew;
}
-SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){
+SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, int n){
char *zNew;
if( z==0 ){
return 0;
@@ -22062,28 +19681,28 @@ SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){
assert( (n&0x7fffffff)==n );
zNew = sqlite3DbMallocRaw(db, n+1);
if( zNew ){
- memcpy(zNew, z, (size_t)n);
+ memcpy(zNew, z, n);
zNew[n] = 0;
}
return zNew;
}
/*
-** Free any prior content in *pz and replace it with a copy of zNew.
+** Create a string from the zFromat argument and the va_list that follows.
+** Store the string in memory obtained from sqliteMalloc() and make *pz
+** point to that string.
*/
-SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){
+SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zFormat, ...){
+ va_list ap;
+ char *z;
+
+ va_start(ap, zFormat);
+ z = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
sqlite3DbFree(db, *pz);
- *pz = sqlite3DbStrDup(db, zNew);
+ *pz = z;
}
-/*
-** Take actions at the end of an API call to indicate an OOM error
-*/
-static SQLITE_NOINLINE int apiOomError(sqlite3 *db){
- db->mallocFailed = 0;
- sqlite3Error(db, SQLITE_NOMEM);
- return SQLITE_NOMEM;
-}
/*
** This function must be called before exiting any API function (i.e.
@@ -22094,36 +19713,40 @@ static SQLITE_NOINLINE int apiOomError(sqlite3 *db){
** function. However, if a malloc() failure has occurred since the previous
** invocation SQLITE_NOMEM is returned instead.
**
-** If an OOM as occurred, then the connection error-code (the value
-** returned by sqlite3_errcode()) is set to SQLITE_NOMEM.
+** If the first argument, db, is not NULL and a malloc() error has occurred,
+** then the connection error-code (the value returned by sqlite3_errcode())
+** is set to SQLITE_NOMEM.
*/
SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){
- /* If the db handle must hold the connection handle mutex here.
- ** Otherwise the read (and possible write) of db->mallocFailed
+ /* If the db handle is not NULL, then we must hold the connection handle
+ ** mutex here. Otherwise the read (and possible write) of db->mallocFailed
** is unsafe, as is the call to sqlite3Error().
*/
- assert( db!=0 );
- assert( sqlite3_mutex_held(db->mutex) );
- if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){
- return apiOomError(db);
+ assert( !db || sqlite3_mutex_held(db->mutex) );
+ if( db && (db->mallocFailed || rc==SQLITE_IOERR_NOMEM) ){
+ sqlite3Error(db, SQLITE_NOMEM, 0);
+ db->mallocFailed = 0;
+ rc = SQLITE_NOMEM;
}
- return rc & db->errMask;
+ return rc & (db ? db->errMask : 0xff);
}
/************** End of malloc.c **********************************************/
/************** Begin file printf.c ******************************************/
/*
** The "printf" code that follows dates from the 1980's. It is in
-** the public domain.
+** the public domain. The original comments are included here for
+** completeness. They are very out-of-date but might be useful as
+** an historical reference. Most of the "enhancements" have been backed
+** out so that the functionality is now the same as standard printf().
**
**************************************************************************
**
** This file contains code for a set of "printf"-like routines. These
** routines format strings much like the printf() from the standard C
** library, though the implementation here has enhancements to support
-** SQLite.
+** SQLlite.
*/
-/* #include "sqliteInt.h" */
/*
** Conversion types fall into various categories as defined by the
@@ -22246,31 +19869,19 @@ static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){
#endif /* SQLITE_OMIT_FLOATING_POINT */
/*
-** Set the StrAccum object to an error mode.
+** Append N space characters to the given string buffer.
*/
-static void setStrAccumError(StrAccum *p, u8 eError){
- assert( eError==STRACCUM_NOMEM || eError==STRACCUM_TOOBIG );
- p->accError = eError;
- p->nAlloc = 0;
-}
-
-/*
-** Extra argument values from a PrintfArguments object
-*/
-static sqlite3_int64 getIntArg(PrintfArguments *p){
- if( p->nArg<=p->nUsed ) return 0;
- return sqlite3_value_int64(p->apArg[p->nUsed++]);
-}
-static double getDoubleArg(PrintfArguments *p){
- if( p->nArg<=p->nUsed ) return 0.0;
- return sqlite3_value_double(p->apArg[p->nUsed++]);
-}
-static char *getTextArg(PrintfArguments *p){
- if( p->nArg<=p->nUsed ) return 0;
- return (char*)sqlite3_value_text(p->apArg[p->nUsed++]);
+SQLITE_PRIVATE void sqlite3AppendSpace(StrAccum *pAccum, int N){
+ static const char zSpaces[] = " ";
+ while( N>=(int)sizeof(zSpaces)-1 ){
+ sqlite3StrAccumAppend(pAccum, zSpaces, sizeof(zSpaces)-1);
+ N -= sizeof(zSpaces)-1;
+ }
+ if( N>0 ){
+ sqlite3StrAccumAppend(pAccum, zSpaces, N);
+ }
}
-
/*
** On machines with a small stack size, you can redefine the
** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired.
@@ -22284,10 +19895,10 @@ static char *getTextArg(PrintfArguments *p){
** Render a string given by "fmt" into the StrAccum object.
*/
SQLITE_PRIVATE void sqlite3VXPrintf(
- StrAccum *pAccum, /* Accumulate results here */
- u32 bFlags, /* SQLITE_PRINTF_* flags */
- const char *fmt, /* Format string */
- va_list ap /* arguments */
+ StrAccum *pAccum, /* Accumulate results here */
+ int useExtended, /* Allow extended %-conversions */
+ const char *fmt, /* Format string */
+ va_list ap /* arguments */
){
int c; /* Next character in the format string */
char *bufpt; /* Pointer to the conversion buffer */
@@ -22305,15 +19916,13 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
etByte flag_longlong; /* True if the "ll" flag is present */
etByte done; /* Loop termination flag */
etByte xtype = 0; /* Conversion paradigm */
- u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */
- u8 useIntern; /* Ok to use internal conversions (ex: %T) */
char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
sqlite_uint64 longvalue; /* Value for integer types */
LONGDOUBLE_TYPE realvalue; /* Value for real types */
const et_info *infop; /* Pointer to the appropriate info structure */
char *zOut; /* Rendering buffer */
int nOut; /* Size of the rendering buffer */
- char *zExtra = 0; /* Malloced memory used by some conversion */
+ char *zExtra; /* Malloced memory used by some conversion */
#ifndef SQLITE_OMIT_FLOATING_POINT
int exp, e2; /* exponent of real numbers */
int nsd; /* Number of significant digits returned */
@@ -22321,28 +19930,17 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
etByte flag_dp; /* True if decimal point should be shown */
etByte flag_rtz; /* True if trailing zeros should be removed */
#endif
- PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */
char buf[etBUFSIZE]; /* Conversion buffer */
bufpt = 0;
- if( bFlags ){
- if( (bArgList = (bFlags & SQLITE_PRINTF_SQLFUNC))!=0 ){
- pArgList = va_arg(ap, PrintfArguments*);
- }
- useIntern = bFlags & SQLITE_PRINTF_INTERNAL;
- }else{
- bArgList = useIntern = 0;
- }
for(; (c=(*fmt))!=0; ++fmt){
if( c!='%' ){
+ int amt;
bufpt = (char *)fmt;
-#if HAVE_STRCHRNUL
- fmt = strchrnul(fmt, '%');
-#else
- do{ fmt++; }while( *fmt && *fmt != '%' );
-#endif
- sqlite3StrAccumAppend(pAccum, bufpt, (int)(fmt - bufpt));
- if( *fmt==0 ) break;
+ amt = 1;
+ while( (c=(*++fmt))!='%' && c!=0 ) amt++;
+ sqlite3StrAccumAppend(pAccum, bufpt, amt);
+ if( c==0 ) break;
}
if( (c=(*++fmt))==0 ){
sqlite3StrAccumAppend(pAccum, "%", 1);
@@ -22364,48 +19962,33 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
}
}while( !done && (c=(*++fmt))!=0 );
/* Get the field width */
+ width = 0;
if( c=='*' ){
- if( bArgList ){
- width = (int)getIntArg(pArgList);
- }else{
- width = va_arg(ap,int);
- }
+ width = va_arg(ap,int);
if( width<0 ){
flag_leftjustify = 1;
- width = width >= -2147483647 ? -width : 0;
+ width = -width;
}
c = *++fmt;
}else{
- unsigned wx = 0;
while( c>='0' && c<='9' ){
- wx = wx*10 + c - '0';
+ width = width*10 + c - '0';
c = *++fmt;
}
- testcase( wx>0x7fffffff );
- width = wx & 0x7fffffff;
}
-
/* Get the precision */
if( c=='.' ){
+ precision = 0;
c = *++fmt;
if( c=='*' ){
- if( bArgList ){
- precision = (int)getIntArg(pArgList);
- }else{
- precision = va_arg(ap,int);
- }
+ precision = va_arg(ap,int);
+ if( precision<0 ) precision = -precision;
c = *++fmt;
- if( precision<0 ){
- precision = precision >= -2147483647 ? -precision : -1;
- }
}else{
- unsigned px = 0;
while( c>='0' && c<='9' ){
- px = px*10 + c - '0';
+ precision = precision*10 + c - '0';
c = *++fmt;
}
- testcase( px>0x7fffffff );
- precision = px & 0x7fffffff;
}
}else{
precision = -1;
@@ -22429,7 +20012,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
for(idx=0; idx<ArraySize(fmtinfo); idx++){
if( c==fmtinfo[idx].fmttype ){
infop = &fmtinfo[idx];
- if( useIntern || (infop->flags & FLAG_INTERN)==0 ){
+ if( useExtended || (infop->flags & FLAG_INTERN)==0 ){
xtype = infop->type;
}else{
return;
@@ -22437,6 +20020,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
break;
}
}
+ zExtra = 0;
/*
** At this point, variables are initialized as follows:
@@ -22468,9 +20052,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
case etRADIX:
if( infop->flags & FLAG_SIGNED ){
i64 v;
- if( bArgList ){
- v = getIntArg(pArgList);
- }else if( flag_longlong ){
+ if( flag_longlong ){
v = va_arg(ap,i64);
}else if( flag_long ){
v = va_arg(ap,long int);
@@ -22491,9 +20073,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
else prefix = 0;
}
}else{
- if( bArgList ){
- longvalue = (u64)getIntArg(pArgList);
- }else if( flag_longlong ){
+ if( flag_longlong ){
longvalue = va_arg(ap,u64);
}else if( flag_long ){
longvalue = va_arg(ap,unsigned long int);
@@ -22513,7 +20093,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
nOut = precision + 10;
zOut = zExtra = sqlite3Malloc( nOut );
if( zOut==0 ){
- setStrAccumError(pAccum, STRACCUM_NOMEM);
+ pAccum->accError = STRACCUM_NOMEM;
return;
}
}
@@ -22528,8 +20108,10 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
*(--bufpt) = zOrd[x*2];
}
{
- const char *cset = &aDigits[infop->charset];
- u8 base = infop->base;
+ register const char *cset; /* Use registers for speed */
+ register int base;
+ cset = &aDigits[infop->charset];
+ base = infop->base;
do{ /* Convert to ascii */
*(--bufpt) = cset[longvalue%base];
longvalue = longvalue/base;
@@ -22551,11 +20133,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
case etFLOAT:
case etEXP:
case etGENERIC:
- if( bArgList ){
- realvalue = getDoubleArg(pArgList);
- }else{
- realvalue = va_arg(ap,double);
- }
+ realvalue = va_arg(ap,double);
#ifdef SQLITE_OMIT_FLOATING_POINT
length = 0;
#else
@@ -22569,8 +20147,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
else prefix = 0;
}
if( xtype==etGENERIC && precision>0 ) precision--;
- testcase( precision>0xfff );
- for(idx=precision&0xfff, rounder=0.5; idx>0; idx--, rounder*=0.1){}
+ for(idx=precision, rounder=0.5; idx>0; idx--, rounder*=0.1){}
if( xtype==etFLOAT ) realvalue += rounder;
/* Normalize realvalue to within 10.0 > realvalue >= 1.0 */
exp = 0;
@@ -22582,16 +20159,21 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
if( realvalue>0.0 ){
LONGDOUBLE_TYPE scale = 1.0;
while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;}
- while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; }
+ while( realvalue>=1e64*scale && exp<=350 ){ scale *= 1e64; exp+=64; }
+ while( realvalue>=1e8*scale && exp<=350 ){ scale *= 1e8; exp+=8; }
while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; }
realvalue /= scale;
while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; }
while( realvalue<1.0 ){ realvalue *= 10.0; exp--; }
if( exp>350 ){
- bufpt = buf;
- buf[0] = prefix;
- memcpy(buf+(prefix!=0),"Inf",4);
- length = 3+(prefix!=0);
+ if( prefix=='-' ){
+ bufpt = "-Inf";
+ }else if( prefix=='+' ){
+ bufpt = "+Inf";
+ }else{
+ bufpt = "Inf";
+ }
+ length = sqlite3Strlen30(bufpt);
break;
}
}
@@ -22620,11 +20202,10 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
}else{
e2 = exp;
}
- if( MAX(e2,0)+(i64)precision+(i64)width > etBUFSIZE - 15 ){
- bufpt = zExtra
- = sqlite3Malloc( MAX(e2,0)+(i64)precision+(i64)width+15 );
+ if( MAX(e2,0)+precision+width > etBUFSIZE - 15 ){
+ bufpt = zExtra = sqlite3Malloc( MAX(e2,0)+precision+width+15 );
if( bufpt==0 ){
- setStrAccumError(pAccum, STRACCUM_NOMEM);
+ pAccum->accError = STRACCUM_NOMEM;
return;
}
}
@@ -22707,9 +20288,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */
break;
case etSIZE:
- if( !bArgList ){
- *(va_arg(ap,int*)) = pAccum->nChar;
- }
+ *(va_arg(ap,int*)) = pAccum->nChar;
length = width = 0;
break;
case etPERCENT:
@@ -22718,32 +20297,19 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
length = 1;
break;
case etCHARX:
- if( bArgList ){
- bufpt = getTextArg(pArgList);
- c = bufpt ? bufpt[0] : 0;
+ c = va_arg(ap,int);
+ buf[0] = (char)c;
+ if( precision>=0 ){
+ for(idx=1; idx<precision; idx++) buf[idx] = (char)c;
+ length = precision;
}else{
- c = va_arg(ap,int);
- }
- if( precision>1 ){
- width -= precision-1;
- if( width>1 && !flag_leftjustify ){
- sqlite3AppendChar(pAccum, width-1, ' ');
- width = 0;
- }
- sqlite3AppendChar(pAccum, precision-1, c);
+ length =1;
}
- length = 1;
- buf[0] = c;
bufpt = buf;
break;
case etSTRING:
case etDYNSTRING:
- if( bArgList ){
- bufpt = getTextArg(pArgList);
- xtype = etSTRING;
- }else{
- bufpt = va_arg(ap,char*);
- }
+ bufpt = va_arg(ap,char*);
if( bufpt==0 ){
bufpt = "";
}else if( xtype==etDYNSTRING ){
@@ -22755,20 +20321,14 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
length = sqlite3Strlen30(bufpt);
}
break;
- case etSQLESCAPE: /* Escape ' characters */
- case etSQLESCAPE2: /* Escape ' and enclose in '...' */
- case etSQLESCAPE3: { /* Escape " characters */
+ case etSQLESCAPE:
+ case etSQLESCAPE2:
+ case etSQLESCAPE3: {
int i, j, k, n, isnull;
int needQuote;
char ch;
char q = ((xtype==etSQLESCAPE3)?'"':'\''); /* Quote character */
- char *escarg;
-
- if( bArgList ){
- escarg = getTextArg(pArgList);
- }else{
- escarg = va_arg(ap,char*);
- }
+ char *escarg = va_arg(ap,char*);
isnull = escarg==0;
if( isnull ) escarg = (xtype==etSQLESCAPE2 ? "NULL" : "(NULL)");
k = precision;
@@ -22776,11 +20336,11 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
if( ch==q ) n++;
}
needQuote = !isnull && xtype==etSQLESCAPE2;
- n += i + 3;
+ n += i + 1 + needQuote*2;
if( n>etBUFSIZE ){
bufpt = zExtra = sqlite3Malloc( n );
if( bufpt==0 ){
- setStrAccumError(pAccum, STRACCUM_NOMEM);
+ pAccum->accError = STRACCUM_NOMEM;
return;
}
}else{
@@ -22803,8 +20363,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
}
case etTOKEN: {
Token *pToken = va_arg(ap, Token*);
- assert( bArgList==0 );
- if( pToken && pToken->n ){
+ if( pToken ){
sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n);
}
length = width = 0;
@@ -22814,13 +20373,12 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
SrcList *pSrc = va_arg(ap, SrcList*);
int k = va_arg(ap, int);
struct SrcList_item *pItem = &pSrc->a[k];
- assert( bArgList==0 );
assert( k>=0 && k<pSrc->nSrc );
if( pItem->zDatabase ){
- sqlite3StrAccumAppendAll(pAccum, pItem->zDatabase);
+ sqlite3StrAccumAppend(pAccum, pItem->zDatabase, -1);
sqlite3StrAccumAppend(pAccum, ".", 1);
}
- sqlite3StrAccumAppendAll(pAccum, pItem->zName);
+ sqlite3StrAccumAppend(pAccum, pItem->zName, -1);
length = width = 0;
break;
}
@@ -22834,126 +20392,82 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
** "length" characters long. The field width is "width". Do
** the output.
*/
- width -= length;
- if( width>0 && !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' ');
- sqlite3StrAccumAppend(pAccum, bufpt, length);
- if( width>0 && flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' ');
-
- if( zExtra ){
- sqlite3_free(zExtra);
- zExtra = 0;
+ if( !flag_leftjustify ){
+ register int nspace;
+ nspace = width-length;
+ if( nspace>0 ){
+ sqlite3AppendSpace(pAccum, nspace);
+ }
+ }
+ if( length>0 ){
+ sqlite3StrAccumAppend(pAccum, bufpt, length);
+ }
+ if( flag_leftjustify ){
+ register int nspace;
+ nspace = width-length;
+ if( nspace>0 ){
+ sqlite3AppendSpace(pAccum, nspace);
+ }
}
+ sqlite3_free(zExtra);
}/* End for loop over the format string */
} /* End of function */
/*
-** Enlarge the memory allocation on a StrAccum object so that it is
-** able to accept at least N more bytes of text.
-**
-** Return the number of bytes of text that StrAccum is able to accept
-** after the attempted enlargement. The value returned might be zero.
+** Append N bytes of text from z to the StrAccum object.
*/
-static int sqlite3StrAccumEnlarge(StrAccum *p, int N){
- char *zNew;
- assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */
+SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){
+ assert( z!=0 || N==0 );
if( p->accError ){
testcase(p->accError==STRACCUM_TOOBIG);
testcase(p->accError==STRACCUM_NOMEM);
- return 0;
- }
- if( p->mxAlloc==0 ){
- N = p->nAlloc - p->nChar - 1;
- setStrAccumError(p, STRACCUM_TOOBIG);
- return N;
- }else{
- char *zOld = (p->zText==p->zBase ? 0 : p->zText);
- i64 szNew = p->nChar;
- szNew += N + 1;
- if( szNew+p->nChar<=p->mxAlloc ){
- /* Force exponential buffer size growth as long as it does not overflow,
- ** to avoid having to call this routine too often */
- szNew += p->nChar;
- }
- if( szNew > p->mxAlloc ){
- sqlite3StrAccumReset(p);
- setStrAccumError(p, STRACCUM_TOOBIG);
- return 0;
- }else{
- p->nAlloc = (int)szNew;
- }
- if( p->db ){
- zNew = sqlite3DbRealloc(p->db, zOld, p->nAlloc);
- }else{
- zNew = sqlite3_realloc64(zOld, p->nAlloc);
- }
- if( zNew ){
- assert( p->zText!=0 || p->nChar==0 );
- if( zOld==0 && p->nChar>0 ) memcpy(zNew, p->zText, p->nChar);
- p->zText = zNew;
- p->nAlloc = sqlite3DbMallocSize(p->db, zNew);
- }else{
- sqlite3StrAccumReset(p);
- setStrAccumError(p, STRACCUM_NOMEM);
- return 0;
- }
- }
- return N;
-}
-
-/*
-** Append N copies of character c to the given string buffer.
-*/
-SQLITE_PRIVATE void sqlite3AppendChar(StrAccum *p, int N, char c){
- testcase( p->nChar + (i64)N > 0x7fffffff );
- if( p->nChar+(i64)N >= p->nAlloc && (N = sqlite3StrAccumEnlarge(p, N))<=0 ){
return;
}
- while( (N--)>0 ) p->zText[p->nChar++] = c;
-}
-
-/*
-** The StrAccum "p" is not large enough to accept N new bytes of z[].
-** So enlarge if first, then do the append.
-**
-** This is a helper routine to sqlite3StrAccumAppend() that does special-case
-** work (enlarging the buffer) using tail recursion, so that the
-** sqlite3StrAccumAppend() routine can use fast calling semantics.
-*/
-static void SQLITE_NOINLINE enlargeAndAppend(StrAccum *p, const char *z, int N){
- N = sqlite3StrAccumEnlarge(p, N);
- if( N>0 ){
- memcpy(&p->zText[p->nChar], z, N);
- p->nChar += N;
+ assert( p->zText!=0 || p->nChar==0 );
+ if( N<=0 ){
+ if( N==0 || z[0]==0 ) return;
+ N = sqlite3Strlen30(z);
}
-}
-
-/*
-** Append N bytes of text from z to the StrAccum object. Increase the
-** size of the memory allocation for StrAccum if necessary.
-*/
-SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){
- assert( z!=0 || N==0 );
- assert( p->zText!=0 || p->nChar==0 || p->accError );
- assert( N>=0 );
- assert( p->accError==0 || p->nAlloc==0 );
if( p->nChar+N >= p->nAlloc ){
- enlargeAndAppend(p,z,N);
- }else{
- assert( p->zText );
- p->nChar += N;
- memcpy(&p->zText[p->nChar-N], z, N);
+ char *zNew;
+ if( !p->useMalloc ){
+ p->accError = STRACCUM_TOOBIG;
+ N = p->nAlloc - p->nChar - 1;
+ if( N<=0 ){
+ return;
+ }
+ }else{
+ char *zOld = (p->zText==p->zBase ? 0 : p->zText);
+ i64 szNew = p->nChar;
+ szNew += N + 1;
+ if( szNew > p->mxAlloc ){
+ sqlite3StrAccumReset(p);
+ p->accError = STRACCUM_TOOBIG;
+ return;
+ }else{
+ p->nAlloc = (int)szNew;
+ }
+ if( p->useMalloc==1 ){
+ zNew = sqlite3DbRealloc(p->db, zOld, p->nAlloc);
+ }else{
+ zNew = sqlite3_realloc(zOld, p->nAlloc);
+ }
+ if( zNew ){
+ if( zOld==0 && p->nChar>0 ) memcpy(zNew, p->zText, p->nChar);
+ p->zText = zNew;
+ }else{
+ p->accError = STRACCUM_NOMEM;
+ sqlite3StrAccumReset(p);
+ return;
+ }
+ }
}
+ assert( p->zText );
+ memcpy(&p->zText[p->nChar], z, N);
+ p->nChar += N;
}
/*
-** Append the complete text of zero-terminated string z[] to the p string.
-*/
-SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){
- sqlite3StrAccumAppend(p, z, sqlite3Strlen30(z));
-}
-
-
-/*
** Finish off a string by making sure it is zero-terminated.
** Return a pointer to the resulting string. Return a NULL
** pointer if any kind of error was encountered.
@@ -22961,12 +20475,16 @@ SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){
SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){
if( p->zText ){
p->zText[p->nChar] = 0;
- if( p->mxAlloc>0 && p->zText==p->zBase ){
- p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 );
+ if( p->useMalloc && p->zText==p->zBase ){
+ if( p->useMalloc==1 ){
+ p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 );
+ }else{
+ p->zText = sqlite3_malloc(p->nChar+1);
+ }
if( p->zText ){
memcpy(p->zText, p->zBase, p->nChar+1);
}else{
- setStrAccumError(p, STRACCUM_NOMEM);
+ p->accError = STRACCUM_NOMEM;
}
}
}
@@ -22978,31 +20496,25 @@ SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){
*/
SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum *p){
if( p->zText!=p->zBase ){
- sqlite3DbFree(p->db, p->zText);
+ if( p->useMalloc==1 ){
+ sqlite3DbFree(p->db, p->zText);
+ }else{
+ sqlite3_free(p->zText);
+ }
}
p->zText = 0;
}
/*
-** Initialize a string accumulator.
-**
-** p: The accumulator to be initialized.
-** db: Pointer to a database connection. May be NULL. Lookaside
-** memory is used if not NULL. db->mallocFailed is set appropriately
-** when not NULL.
-** zBase: An initial buffer. May be NULL in which case the initial buffer
-** is malloced.
-** n: Size of zBase in bytes. If total space requirements never exceed
-** n then no memory allocations ever occur.
-** mx: Maximum number of bytes to accumulate. If mx==0 then no memory
-** allocations will ever occur.
+** Initialize a string accumulator
*/
-SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum *p, sqlite3 *db, char *zBase, int n, int mx){
+SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum *p, char *zBase, int n, int mx){
p->zText = p->zBase = zBase;
- p->db = db;
+ p->db = 0;
p->nChar = 0;
p->nAlloc = n;
p->mxAlloc = mx;
+ p->useMalloc = 1;
p->accError = 0;
}
@@ -23015,9 +20527,10 @@ SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list a
char zBase[SQLITE_PRINT_BUF_SIZE];
StrAccum acc;
assert( db!=0 );
- sqlite3StrAccumInit(&acc, db, zBase, sizeof(zBase),
+ sqlite3StrAccumInit(&acc, zBase, sizeof(zBase),
db->aLimit[SQLITE_LIMIT_LENGTH]);
- sqlite3VXPrintf(&acc, SQLITE_PRINTF_INTERNAL, zFormat, ap);
+ acc.db = db;
+ sqlite3VXPrintf(&acc, 1, zFormat, ap);
z = sqlite3StrAccumFinish(&acc);
if( acc.accError==STRACCUM_NOMEM ){
db->mallocFailed = 1;
@@ -23039,24 +20552,36 @@ SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){
}
/*
+** Like sqlite3MPrintf(), but call sqlite3DbFree() on zStr after formatting
+** the string and before returnning. This routine is intended to be used
+** to modify an existing string. For example:
+**
+** x = sqlite3MPrintf(db, x, "prefix %s suffix", x);
+**
+*/
+SQLITE_PRIVATE char *sqlite3MAppendf(sqlite3 *db, char *zStr, const char *zFormat, ...){
+ va_list ap;
+ char *z;
+ va_start(ap, zFormat);
+ z = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
+ sqlite3DbFree(db, zStr);
+ return z;
+}
+
+/*
** Print into memory obtained from sqlite3_malloc(). Omit the internal
** %-conversion extensions.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char *zFormat, va_list ap){
+SQLITE_API char *sqlite3_vmprintf(const char *zFormat, va_list ap){
char *z;
char zBase[SQLITE_PRINT_BUF_SIZE];
StrAccum acc;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( zFormat==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
- sqlite3StrAccumInit(&acc, 0, zBase, sizeof(zBase), SQLITE_MAX_LENGTH);
+ sqlite3StrAccumInit(&acc, zBase, sizeof(zBase), SQLITE_MAX_LENGTH);
+ acc.useMalloc = 2;
sqlite3VXPrintf(&acc, 0, zFormat, ap);
z = sqlite3StrAccumFinish(&acc);
return z;
@@ -23066,7 +20591,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char *zFormat, va_list ap
** Print into memory obtained from sqlite3_malloc()(). Omit the internal
** %-conversion extensions.
*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char *zFormat, ...){
+SQLITE_API char *sqlite3_mprintf(const char *zFormat, ...){
va_list ap;
char *z;
#ifndef SQLITE_OMIT_AUTOINIT
@@ -23091,21 +20616,15 @@ SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char *zFormat, ...){
**
** sqlite3_vsnprintf() is the varargs version.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){
+SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){
StrAccum acc;
if( n<=0 ) return zBuf;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( zBuf==0 || zFormat==0 ) {
- (void)SQLITE_MISUSE_BKPT;
- if( zBuf ) zBuf[0] = 0;
- return zBuf;
- }
-#endif
- sqlite3StrAccumInit(&acc, 0, zBuf, n, 0);
+ sqlite3StrAccumInit(&acc, zBuf, n, 0);
+ acc.useMalloc = 0;
sqlite3VXPrintf(&acc, 0, zFormat, ap);
return sqlite3StrAccumFinish(&acc);
}
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){
+SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){
char *z;
va_list ap;
va_start(ap,zFormat);
@@ -23122,17 +20641,13 @@ SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int n, char *zBuf, const char *zF
** sqlite3_log() must render into a static buffer. It cannot dynamically
** allocate memory because it might be called while the memory allocator
** mutex is held.
-**
-** sqlite3VXPrintf() might ask for *temporary* memory allocations for
-** certain format characters (%q) or for very large precisions or widths.
-** Care must be taken that any sqlite3_log() calls that occur while the
-** memory mutex is held do not use these mechanisms.
*/
static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){
StrAccum acc; /* String accumulator */
char zMsg[SQLITE_PRINT_BUF_SIZE*3]; /* Complete log message */
- sqlite3StrAccumInit(&acc, 0, zMsg, sizeof(zMsg), 0);
+ sqlite3StrAccumInit(&acc, zMsg, sizeof(zMsg), 0);
+ acc.useMalloc = 0;
sqlite3VXPrintf(&acc, 0, zFormat, ap);
sqlite3GlobalConfig.xLog(sqlite3GlobalConfig.pLogArg, iErrCode,
sqlite3StrAccumFinish(&acc));
@@ -23141,7 +20656,7 @@ static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){
/*
** Format and write a message to the log if logging is enabled.
*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...){
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...){
va_list ap; /* Vararg list */
if( sqlite3GlobalConfig.xLog ){
va_start(ap, zFormat);
@@ -23150,7 +20665,7 @@ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...)
}
}
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+#if defined(SQLITE_DEBUG)
/*
** A version of printf() that understands %lld. Used for debugging.
** The printf() built into some versions of windows does not understand %lld
@@ -23160,7 +20675,8 @@ SQLITE_PRIVATE void sqlite3DebugPrintf(const char *zFormat, ...){
va_list ap;
StrAccum acc;
char zBuf[500];
- sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ sqlite3StrAccumInit(&acc, zBuf, sizeof(zBuf), 0);
+ acc.useMalloc = 0;
va_start(ap,zFormat);
sqlite3VXPrintf(&acc, 0, zFormat, ap);
va_end(ap);
@@ -23170,464 +20686,19 @@ SQLITE_PRIVATE void sqlite3DebugPrintf(const char *zFormat, ...){
}
#endif
-
+#ifndef SQLITE_OMIT_TRACE
/*
-** variable-argument wrapper around sqlite3VXPrintf(). The bFlags argument
-** can contain the bit SQLITE_PRINTF_INTERNAL enable internal formats.
+** variable-argument wrapper around sqlite3VXPrintf().
*/
-SQLITE_PRIVATE void sqlite3XPrintf(StrAccum *p, u32 bFlags, const char *zFormat, ...){
+SQLITE_PRIVATE void sqlite3XPrintf(StrAccum *p, const char *zFormat, ...){
va_list ap;
va_start(ap,zFormat);
- sqlite3VXPrintf(p, bFlags, zFormat, ap);
- va_end(ap);
-}
-
-/************** End of printf.c **********************************************/
-/************** Begin file treeview.c ****************************************/
-/*
-** 2015-06-08
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains C code to implement the TreeView debugging routines.
-** These routines print a parse tree to standard output for debugging and
-** analysis.
-**
-** The interfaces in this file is only available when compiling
-** with SQLITE_DEBUG.
-*/
-/* #include "sqliteInt.h" */
-#ifdef SQLITE_DEBUG
-
-/*
-** Add a new subitem to the tree. The moreToFollow flag indicates that this
-** is not the last item in the tree.
-*/
-static TreeView *sqlite3TreeViewPush(TreeView *p, u8 moreToFollow){
- if( p==0 ){
- p = sqlite3_malloc64( sizeof(*p) );
- if( p==0 ) return 0;
- memset(p, 0, sizeof(*p));
- }else{
- p->iLevel++;
- }
- assert( moreToFollow==0 || moreToFollow==1 );
- if( p->iLevel<sizeof(p->bLine) ) p->bLine[p->iLevel] = moreToFollow;
- return p;
-}
-
-/*
-** Finished with one layer of the tree
-*/
-static void sqlite3TreeViewPop(TreeView *p){
- if( p==0 ) return;
- p->iLevel--;
- if( p->iLevel<0 ) sqlite3_free(p);
-}
-
-/*
-** Generate a single line of output for the tree, with a prefix that contains
-** all the appropriate tree lines
-*/
-static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){
- va_list ap;
- int i;
- StrAccum acc;
- char zBuf[500];
- sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
- if( p ){
- for(i=0; i<p->iLevel && i<sizeof(p->bLine)-1; i++){
- sqlite3StrAccumAppend(&acc, p->bLine[i] ? "| " : " ", 4);
- }
- sqlite3StrAccumAppend(&acc, p->bLine[i] ? "|-- " : "'-- ", 4);
- }
- va_start(ap, zFormat);
- sqlite3VXPrintf(&acc, 0, zFormat, ap);
+ sqlite3VXPrintf(p, 1, zFormat, ap);
va_end(ap);
- if( zBuf[acc.nChar-1]!='\n' ) sqlite3StrAccumAppend(&acc, "\n", 1);
- sqlite3StrAccumFinish(&acc);
- fprintf(stdout,"%s", zBuf);
- fflush(stdout);
}
-
-/*
-** Shorthand for starting a new tree item that consists of a single label
-*/
-static void sqlite3TreeViewItem(TreeView *p, const char *zLabel,u8 moreFollows){
- p = sqlite3TreeViewPush(p, moreFollows);
- sqlite3TreeViewLine(p, "%s", zLabel);
-}
-
-
-/*
-** Generate a human-readable description of a the Select object.
-*/
-SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 moreToFollow){
- int n = 0;
- int cnt = 0;
- pView = sqlite3TreeViewPush(pView, moreToFollow);
- do{
- sqlite3TreeViewLine(pView, "SELECT%s%s (0x%p) selFlags=0x%x",
- ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""),
- ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""), p, p->selFlags
- );
- if( cnt++ ) sqlite3TreeViewPop(pView);
- if( p->pPrior ){
- n = 1000;
- }else{
- n = 0;
- if( p->pSrc && p->pSrc->nSrc ) n++;
- if( p->pWhere ) n++;
- if( p->pGroupBy ) n++;
- if( p->pHaving ) n++;
- if( p->pOrderBy ) n++;
- if( p->pLimit ) n++;
- if( p->pOffset ) n++;
- }
- sqlite3TreeViewExprList(pView, p->pEList, (n--)>0, "result-set");
- if( p->pSrc && p->pSrc->nSrc ){
- int i;
- pView = sqlite3TreeViewPush(pView, (n--)>0);
- sqlite3TreeViewLine(pView, "FROM");
- for(i=0; i<p->pSrc->nSrc; i++){
- struct SrcList_item *pItem = &p->pSrc->a[i];
- StrAccum x;
- char zLine[100];
- sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0);
- sqlite3XPrintf(&x, 0, "{%d,*}", pItem->iCursor);
- if( pItem->zDatabase ){
- sqlite3XPrintf(&x, 0, " %s.%s", pItem->zDatabase, pItem->zName);
- }else if( pItem->zName ){
- sqlite3XPrintf(&x, 0, " %s", pItem->zName);
- }
- if( pItem->pTab ){
- sqlite3XPrintf(&x, 0, " tabname=%Q", pItem->pTab->zName);
- }
- if( pItem->zAlias ){
- sqlite3XPrintf(&x, 0, " (AS %s)", pItem->zAlias);
- }
- if( pItem->fg.jointype & JT_LEFT ){
- sqlite3XPrintf(&x, 0, " LEFT-JOIN");
- }
- sqlite3StrAccumFinish(&x);
- sqlite3TreeViewItem(pView, zLine, i<p->pSrc->nSrc-1);
- if( pItem->pSelect ){
- sqlite3TreeViewSelect(pView, pItem->pSelect, 0);
- }
- if( pItem->fg.isTabFunc ){
- sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:");
- }
- sqlite3TreeViewPop(pView);
- }
- sqlite3TreeViewPop(pView);
- }
- if( p->pWhere ){
- sqlite3TreeViewItem(pView, "WHERE", (n--)>0);
- sqlite3TreeViewExpr(pView, p->pWhere, 0);
- sqlite3TreeViewPop(pView);
- }
- if( p->pGroupBy ){
- sqlite3TreeViewExprList(pView, p->pGroupBy, (n--)>0, "GROUPBY");
- }
- if( p->pHaving ){
- sqlite3TreeViewItem(pView, "HAVING", (n--)>0);
- sqlite3TreeViewExpr(pView, p->pHaving, 0);
- sqlite3TreeViewPop(pView);
- }
- if( p->pOrderBy ){
- sqlite3TreeViewExprList(pView, p->pOrderBy, (n--)>0, "ORDERBY");
- }
- if( p->pLimit ){
- sqlite3TreeViewItem(pView, "LIMIT", (n--)>0);
- sqlite3TreeViewExpr(pView, p->pLimit, 0);
- sqlite3TreeViewPop(pView);
- }
- if( p->pOffset ){
- sqlite3TreeViewItem(pView, "OFFSET", (n--)>0);
- sqlite3TreeViewExpr(pView, p->pOffset, 0);
- sqlite3TreeViewPop(pView);
- }
- if( p->pPrior ){
- const char *zOp = "UNION";
- switch( p->op ){
- case TK_ALL: zOp = "UNION ALL"; break;
- case TK_INTERSECT: zOp = "INTERSECT"; break;
- case TK_EXCEPT: zOp = "EXCEPT"; break;
- }
- sqlite3TreeViewItem(pView, zOp, 1);
- }
- p = p->pPrior;
- }while( p!=0 );
- sqlite3TreeViewPop(pView);
-}
-
-/*
-** Generate a human-readable explanation of an expression tree.
-*/
-SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 moreToFollow){
- const char *zBinOp = 0; /* Binary operator */
- const char *zUniOp = 0; /* Unary operator */
- char zFlgs[30];
- pView = sqlite3TreeViewPush(pView, moreToFollow);
- if( pExpr==0 ){
- sqlite3TreeViewLine(pView, "nil");
- sqlite3TreeViewPop(pView);
- return;
- }
- if( pExpr->flags ){
- sqlite3_snprintf(sizeof(zFlgs),zFlgs," flags=0x%x",pExpr->flags);
- }else{
- zFlgs[0] = 0;
- }
- switch( pExpr->op ){
- case TK_AGG_COLUMN: {
- sqlite3TreeViewLine(pView, "AGG{%d:%d}%s",
- pExpr->iTable, pExpr->iColumn, zFlgs);
- break;
- }
- case TK_COLUMN: {
- if( pExpr->iTable<0 ){
- /* This only happens when coding check constraints */
- sqlite3TreeViewLine(pView, "COLUMN(%d)%s", pExpr->iColumn, zFlgs);
- }else{
- sqlite3TreeViewLine(pView, "{%d:%d}%s",
- pExpr->iTable, pExpr->iColumn, zFlgs);
- }
- break;
- }
- case TK_INTEGER: {
- if( pExpr->flags & EP_IntValue ){
- sqlite3TreeViewLine(pView, "%d", pExpr->u.iValue);
- }else{
- sqlite3TreeViewLine(pView, "%s", pExpr->u.zToken);
- }
- break;
- }
-#ifndef SQLITE_OMIT_FLOATING_POINT
- case TK_FLOAT: {
- sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken);
- break;
- }
-#endif
- case TK_STRING: {
- sqlite3TreeViewLine(pView,"%Q", pExpr->u.zToken);
- break;
- }
- case TK_NULL: {
- sqlite3TreeViewLine(pView,"NULL");
- break;
- }
-#ifndef SQLITE_OMIT_BLOB_LITERAL
- case TK_BLOB: {
- sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken);
- break;
- }
-#endif
- case TK_VARIABLE: {
- sqlite3TreeViewLine(pView,"VARIABLE(%s,%d)",
- pExpr->u.zToken, pExpr->iColumn);
- break;
- }
- case TK_REGISTER: {
- sqlite3TreeViewLine(pView,"REGISTER(%d)", pExpr->iTable);
- break;
- }
- case TK_ID: {
- sqlite3TreeViewLine(pView,"ID \"%w\"", pExpr->u.zToken);
- break;
- }
-#ifndef SQLITE_OMIT_CAST
- case TK_CAST: {
- /* Expressions of the form: CAST(pLeft AS token) */
- sqlite3TreeViewLine(pView,"CAST %Q", pExpr->u.zToken);
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
- break;
- }
-#endif /* SQLITE_OMIT_CAST */
- case TK_LT: zBinOp = "LT"; break;
- case TK_LE: zBinOp = "LE"; break;
- case TK_GT: zBinOp = "GT"; break;
- case TK_GE: zBinOp = "GE"; break;
- case TK_NE: zBinOp = "NE"; break;
- case TK_EQ: zBinOp = "EQ"; break;
- case TK_IS: zBinOp = "IS"; break;
- case TK_ISNOT: zBinOp = "ISNOT"; break;
- case TK_AND: zBinOp = "AND"; break;
- case TK_OR: zBinOp = "OR"; break;
- case TK_PLUS: zBinOp = "ADD"; break;
- case TK_STAR: zBinOp = "MUL"; break;
- case TK_MINUS: zBinOp = "SUB"; break;
- case TK_REM: zBinOp = "REM"; break;
- case TK_BITAND: zBinOp = "BITAND"; break;
- case TK_BITOR: zBinOp = "BITOR"; break;
- case TK_SLASH: zBinOp = "DIV"; break;
- case TK_LSHIFT: zBinOp = "LSHIFT"; break;
- case TK_RSHIFT: zBinOp = "RSHIFT"; break;
- case TK_CONCAT: zBinOp = "CONCAT"; break;
- case TK_DOT: zBinOp = "DOT"; break;
-
- case TK_UMINUS: zUniOp = "UMINUS"; break;
- case TK_UPLUS: zUniOp = "UPLUS"; break;
- case TK_BITNOT: zUniOp = "BITNOT"; break;
- case TK_NOT: zUniOp = "NOT"; break;
- case TK_ISNULL: zUniOp = "ISNULL"; break;
- case TK_NOTNULL: zUniOp = "NOTNULL"; break;
-
- case TK_COLLATE: {
- sqlite3TreeViewLine(pView, "COLLATE %Q", pExpr->u.zToken);
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
- break;
- }
-
- case TK_AGG_FUNCTION:
- case TK_FUNCTION: {
- ExprList *pFarg; /* List of function arguments */
- if( ExprHasProperty(pExpr, EP_TokenOnly) ){
- pFarg = 0;
- }else{
- pFarg = pExpr->x.pList;
- }
- if( pExpr->op==TK_AGG_FUNCTION ){
- sqlite3TreeViewLine(pView, "AGG_FUNCTION%d %Q",
- pExpr->op2, pExpr->u.zToken);
- }else{
- sqlite3TreeViewLine(pView, "FUNCTION %Q", pExpr->u.zToken);
- }
- if( pFarg ){
- sqlite3TreeViewExprList(pView, pFarg, 0, 0);
- }
- break;
- }
-#ifndef SQLITE_OMIT_SUBQUERY
- case TK_EXISTS: {
- sqlite3TreeViewLine(pView, "EXISTS-expr");
- sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
- break;
- }
- case TK_SELECT: {
- sqlite3TreeViewLine(pView, "SELECT-expr");
- sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
- break;
- }
- case TK_IN: {
- sqlite3TreeViewLine(pView, "IN");
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
- if( ExprHasProperty(pExpr, EP_xIsSelect) ){
- sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
- }else{
- sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0);
- }
- break;
- }
-#endif /* SQLITE_OMIT_SUBQUERY */
-
- /*
- ** x BETWEEN y AND z
- **
- ** This is equivalent to
- **
- ** x>=y AND x<=z
- **
- ** X is stored in pExpr->pLeft.
- ** Y is stored in pExpr->pList->a[0].pExpr.
- ** Z is stored in pExpr->pList->a[1].pExpr.
- */
- case TK_BETWEEN: {
- Expr *pX = pExpr->pLeft;
- Expr *pY = pExpr->x.pList->a[0].pExpr;
- Expr *pZ = pExpr->x.pList->a[1].pExpr;
- sqlite3TreeViewLine(pView, "BETWEEN");
- sqlite3TreeViewExpr(pView, pX, 1);
- sqlite3TreeViewExpr(pView, pY, 1);
- sqlite3TreeViewExpr(pView, pZ, 0);
- break;
- }
- case TK_TRIGGER: {
- /* If the opcode is TK_TRIGGER, then the expression is a reference
- ** to a column in the new.* or old.* pseudo-tables available to
- ** trigger programs. In this case Expr.iTable is set to 1 for the
- ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn
- ** is set to the column of the pseudo-table to read, or to -1 to
- ** read the rowid field.
- */
- sqlite3TreeViewLine(pView, "%s(%d)",
- pExpr->iTable ? "NEW" : "OLD", pExpr->iColumn);
- break;
- }
- case TK_CASE: {
- sqlite3TreeViewLine(pView, "CASE");
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
- sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0);
- break;
- }
-#ifndef SQLITE_OMIT_TRIGGER
- case TK_RAISE: {
- const char *zType = "unk";
- switch( pExpr->affinity ){
- case OE_Rollback: zType = "rollback"; break;
- case OE_Abort: zType = "abort"; break;
- case OE_Fail: zType = "fail"; break;
- case OE_Ignore: zType = "ignore"; break;
- }
- sqlite3TreeViewLine(pView, "RAISE %s(%Q)", zType, pExpr->u.zToken);
- break;
- }
#endif
- default: {
- sqlite3TreeViewLine(pView, "op=%d", pExpr->op);
- break;
- }
- }
- if( zBinOp ){
- sqlite3TreeViewLine(pView, "%s%s", zBinOp, zFlgs);
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
- sqlite3TreeViewExpr(pView, pExpr->pRight, 0);
- }else if( zUniOp ){
- sqlite3TreeViewLine(pView, "%s%s", zUniOp, zFlgs);
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
- }
- sqlite3TreeViewPop(pView);
-}
-/*
-** Generate a human-readable explanation of an expression list.
-*/
-SQLITE_PRIVATE void sqlite3TreeViewExprList(
- TreeView *pView,
- const ExprList *pList,
- u8 moreToFollow,
- const char *zLabel
-){
- int i;
- pView = sqlite3TreeViewPush(pView, moreToFollow);
- if( zLabel==0 || zLabel[0]==0 ) zLabel = "LIST";
- if( pList==0 ){
- sqlite3TreeViewLine(pView, "%s (empty)", zLabel);
- }else{
- sqlite3TreeViewLine(pView, "%s", zLabel);
- for(i=0; i<pList->nExpr; i++){
- int j = pList->a[i].u.x.iOrderByCol;
- if( j ){
- sqlite3TreeViewPush(pView, 0);
- sqlite3TreeViewLine(pView, "iOrderByCol=%d", j);
- }
- sqlite3TreeViewExpr(pView, pList->a[i].pExpr, i<pList->nExpr-1);
- if( j ) sqlite3TreeViewPop(pView);
- }
- }
- sqlite3TreeViewPop(pView);
-}
-
-#endif /* SQLITE_DEBUG */
-
-/************** End of treeview.c ********************************************/
+/************** End of printf.c **********************************************/
/************** Begin file random.c ******************************************/
/*
** 2001 September 15
@@ -23646,7 +20717,6 @@ SQLITE_PRIVATE void sqlite3TreeViewExprList(
** Random numbers are used by some of the database backends in order
** to generate random integer keys for tables or random filenames.
*/
-/* #include "sqliteInt.h" */
/* All threads share a single random number generator.
@@ -23661,7 +20731,7 @@ static SQLITE_WSD struct sqlite3PrngType {
/*
** Return N random bytes.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *pBuf){
+SQLITE_API void sqlite3_randomness(int N, void *pBuf){
unsigned char t;
unsigned char *zBuf = pBuf;
@@ -23679,23 +20749,9 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *pBuf){
#endif
#if SQLITE_THREADSAFE
- sqlite3_mutex *mutex;
-#endif
-
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return;
-#endif
-
-#if SQLITE_THREADSAFE
- mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG);
-#endif
-
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG);
sqlite3_mutex_enter(mutex);
- if( N<=0 || pBuf==0 ){
- wsdPrng.isInit = 0;
- sqlite3_mutex_leave(mutex);
- return;
- }
+#endif
/* Initialize the state of the random number generator once,
** the first time this routine is called. The seed value does
@@ -23724,8 +20780,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *pBuf){
wsdPrng.isInit = 1;
}
- assert( N>0 );
- do{
+ while( N-- ){
wsdPrng.i++;
t = wsdPrng.s[wsdPrng.i];
wsdPrng.j += t;
@@ -23733,7 +20788,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *pBuf){
wsdPrng.s[wsdPrng.j] = t;
t += wsdPrng.s[wsdPrng.i];
*(zBuf++) = wsdPrng.s[t];
- }while( --N );
+ }
sqlite3_mutex_leave(mutex);
}
@@ -23762,286 +20817,12 @@ SQLITE_PRIVATE void sqlite3PrngRestoreState(void){
sizeof(sqlite3Prng)
);
}
+SQLITE_PRIVATE void sqlite3PrngResetState(void){
+ GLOBAL(struct sqlite3PrngType, sqlite3Prng).isInit = 0;
+}
#endif /* SQLITE_OMIT_BUILTIN_TEST */
/************** End of random.c **********************************************/
-/************** Begin file threads.c *****************************************/
-/*
-** 2012 July 21
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file presents a simple cross-platform threading interface for
-** use internally by SQLite.
-**
-** A "thread" can be created using sqlite3ThreadCreate(). This thread
-** runs independently of its creator until it is joined using
-** sqlite3ThreadJoin(), at which point it terminates.
-**
-** Threads do not have to be real. It could be that the work of the
-** "thread" is done by the main thread at either the sqlite3ThreadCreate()
-** or sqlite3ThreadJoin() call. This is, in fact, what happens in
-** single threaded systems. Nothing in SQLite requires multiple threads.
-** This interface exists so that applications that want to take advantage
-** of multiple cores can do so, while also allowing applications to stay
-** single-threaded if desired.
-*/
-/* #include "sqliteInt.h" */
-#if SQLITE_OS_WIN
-/* # include "os_win.h" */
-#endif
-
-#if SQLITE_MAX_WORKER_THREADS>0
-
-/********************************* Unix Pthreads ****************************/
-#if SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) && SQLITE_THREADSAFE>0
-
-#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */
-/* #include <pthread.h> */
-
-/* A running thread */
-struct SQLiteThread {
- pthread_t tid; /* Thread ID */
- int done; /* Set to true when thread finishes */
- void *pOut; /* Result returned by the thread */
- void *(*xTask)(void*); /* The thread routine */
- void *pIn; /* Argument to the thread */
-};
-
-/* Create a new thread */
-SQLITE_PRIVATE int sqlite3ThreadCreate(
- SQLiteThread **ppThread, /* OUT: Write the thread object here */
- void *(*xTask)(void*), /* Routine to run in a separate thread */
- void *pIn /* Argument passed into xTask() */
-){
- SQLiteThread *p;
- int rc;
-
- assert( ppThread!=0 );
- assert( xTask!=0 );
- /* This routine is never used in single-threaded mode */
- assert( sqlite3GlobalConfig.bCoreMutex!=0 );
-
- *ppThread = 0;
- p = sqlite3Malloc(sizeof(*p));
- if( p==0 ) return SQLITE_NOMEM;
- memset(p, 0, sizeof(*p));
- p->xTask = xTask;
- p->pIn = pIn;
- /* If the SQLITE_TESTCTRL_FAULT_INSTALL callback is registered to a
- ** function that returns SQLITE_ERROR when passed the argument 200, that
- ** forces worker threads to run sequentially and deterministically
- ** for testing purposes. */
- if( sqlite3FaultSim(200) ){
- rc = 1;
- }else{
- rc = pthread_create(&p->tid, 0, xTask, pIn);
- }
- if( rc ){
- p->done = 1;
- p->pOut = xTask(pIn);
- }
- *ppThread = p;
- return SQLITE_OK;
-}
-
-/* Get the results of the thread */
-SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
- int rc;
-
- assert( ppOut!=0 );
- if( NEVER(p==0) ) return SQLITE_NOMEM;
- if( p->done ){
- *ppOut = p->pOut;
- rc = SQLITE_OK;
- }else{
- rc = pthread_join(p->tid, ppOut) ? SQLITE_ERROR : SQLITE_OK;
- }
- sqlite3_free(p);
- return rc;
-}
-
-#endif /* SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) */
-/******************************** End Unix Pthreads *************************/
-
-
-/********************************* Win32 Threads ****************************/
-#if SQLITE_OS_WIN_THREADS
-
-#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */
-#include <process.h>
-
-/* A running thread */
-struct SQLiteThread {
- void *tid; /* The thread handle */
- unsigned id; /* The thread identifier */
- void *(*xTask)(void*); /* The routine to run as a thread */
- void *pIn; /* Argument to xTask */
- void *pResult; /* Result of xTask */
-};
-
-/* Thread procedure Win32 compatibility shim */
-static unsigned __stdcall sqlite3ThreadProc(
- void *pArg /* IN: Pointer to the SQLiteThread structure */
-){
- SQLiteThread *p = (SQLiteThread *)pArg;
-
- assert( p!=0 );
-#if 0
- /*
- ** This assert appears to trigger spuriously on certain
- ** versions of Windows, possibly due to _beginthreadex()
- ** and/or CreateThread() not fully setting their thread
- ** ID parameter before starting the thread.
- */
- assert( p->id==GetCurrentThreadId() );
-#endif
- assert( p->xTask!=0 );
- p->pResult = p->xTask(p->pIn);
-
- _endthreadex(0);
- return 0; /* NOT REACHED */
-}
-
-/* Create a new thread */
-SQLITE_PRIVATE int sqlite3ThreadCreate(
- SQLiteThread **ppThread, /* OUT: Write the thread object here */
- void *(*xTask)(void*), /* Routine to run in a separate thread */
- void *pIn /* Argument passed into xTask() */
-){
- SQLiteThread *p;
-
- assert( ppThread!=0 );
- assert( xTask!=0 );
- *ppThread = 0;
- p = sqlite3Malloc(sizeof(*p));
- if( p==0 ) return SQLITE_NOMEM;
- /* If the SQLITE_TESTCTRL_FAULT_INSTALL callback is registered to a
- ** function that returns SQLITE_ERROR when passed the argument 200, that
- ** forces worker threads to run sequentially and deterministically
- ** (via the sqlite3FaultSim() term of the conditional) for testing
- ** purposes. */
- if( sqlite3GlobalConfig.bCoreMutex==0 || sqlite3FaultSim(200) ){
- memset(p, 0, sizeof(*p));
- }else{
- p->xTask = xTask;
- p->pIn = pIn;
- p->tid = (void*)_beginthreadex(0, 0, sqlite3ThreadProc, p, 0, &p->id);
- if( p->tid==0 ){
- memset(p, 0, sizeof(*p));
- }
- }
- if( p->xTask==0 ){
- p->id = GetCurrentThreadId();
- p->pResult = xTask(pIn);
- }
- *ppThread = p;
- return SQLITE_OK;
-}
-
-SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject); /* os_win.c */
-
-/* Get the results of the thread */
-SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
- DWORD rc;
- BOOL bRc;
-
- assert( ppOut!=0 );
- if( NEVER(p==0) ) return SQLITE_NOMEM;
- if( p->xTask==0 ){
- /* assert( p->id==GetCurrentThreadId() ); */
- rc = WAIT_OBJECT_0;
- assert( p->tid==0 );
- }else{
- assert( p->id!=0 && p->id!=GetCurrentThreadId() );
- rc = sqlite3Win32Wait((HANDLE)p->tid);
- assert( rc!=WAIT_IO_COMPLETION );
- bRc = CloseHandle((HANDLE)p->tid);
- assert( bRc );
- }
- if( rc==WAIT_OBJECT_0 ) *ppOut = p->pResult;
- sqlite3_free(p);
- return (rc==WAIT_OBJECT_0) ? SQLITE_OK : SQLITE_ERROR;
-}
-
-#endif /* SQLITE_OS_WIN_THREADS */
-/******************************** End Win32 Threads *************************/
-
-
-/********************************* Single-Threaded **************************/
-#ifndef SQLITE_THREADS_IMPLEMENTED
-/*
-** This implementation does not actually create a new thread. It does the
-** work of the thread in the main thread, when either the thread is created
-** or when it is joined
-*/
-
-/* A running thread */
-struct SQLiteThread {
- void *(*xTask)(void*); /* The routine to run as a thread */
- void *pIn; /* Argument to xTask */
- void *pResult; /* Result of xTask */
-};
-
-/* Create a new thread */
-SQLITE_PRIVATE int sqlite3ThreadCreate(
- SQLiteThread **ppThread, /* OUT: Write the thread object here */
- void *(*xTask)(void*), /* Routine to run in a separate thread */
- void *pIn /* Argument passed into xTask() */
-){
- SQLiteThread *p;
-
- assert( ppThread!=0 );
- assert( xTask!=0 );
- *ppThread = 0;
- p = sqlite3Malloc(sizeof(*p));
- if( p==0 ) return SQLITE_NOMEM;
- if( (SQLITE_PTR_TO_INT(p)/17)&1 ){
- p->xTask = xTask;
- p->pIn = pIn;
- }else{
- p->xTask = 0;
- p->pResult = xTask(pIn);
- }
- *ppThread = p;
- return SQLITE_OK;
-}
-
-/* Get the results of the thread */
-SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
-
- assert( ppOut!=0 );
- if( NEVER(p==0) ) return SQLITE_NOMEM;
- if( p->xTask ){
- *ppOut = p->xTask(p->pIn);
- }else{
- *ppOut = p->pResult;
- }
- sqlite3_free(p);
-
-#if defined(SQLITE_TEST)
- {
- void *pTstAlloc = sqlite3Malloc(10);
- if (!pTstAlloc) return SQLITE_NOMEM;
- sqlite3_free(pTstAlloc);
- }
-#endif
-
- return SQLITE_OK;
-}
-
-#endif /* !defined(SQLITE_THREADS_IMPLEMENTED) */
-/****************************** End Single-Threaded *************************/
-#endif /* SQLITE_MAX_WORKER_THREADS>0 */
-
-/************** End of threads.c *********************************************/
/************** Begin file utf.c *********************************************/
/*
** 2004 April 13
@@ -24078,9 +20859,7 @@ SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){
** 0xfe 0xff big-endian utf-16 follows
**
*/
-/* #include "sqliteInt.h" */
/* #include <assert.h> */
-/* #include "vdbeInt.h" */
#ifndef SQLITE_AMALGAMATION
/*
@@ -24193,8 +20972,8 @@ static const unsigned char sqlite3Utf8Trans1[] = {
** and rendered as themselves even though they are technically
** invalid characters.
**
-** * This routine accepts over-length UTF8 encodings
-** for unicode values 0x80 and greater. It does not change over-length
+** * This routine accepts an infinite number of different UTF8 encodings
+** for unicode values 0x80 and greater. It do not change over-length
** encodings to 0xfffd as some systems recommend.
*/
#define READ_UTF8(zIn, zTerm, c) \
@@ -24244,7 +21023,7 @@ SQLITE_PRIVATE u32 sqlite3Utf8Read(
** desiredEnc. It is an error if the string is already of the desired
** encoding, or if *pMem does not contain a string value.
*/
-SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){
+SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){
int len; /* Maximum length of output string in bytes */
unsigned char *zOut; /* Output buffer */
unsigned char *zIn; /* Input iterator */
@@ -24359,13 +21138,12 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desired
*z = 0;
assert( (pMem->n+(desiredEnc==SQLITE_UTF8?1:2))<=len );
- c = pMem->flags;
sqlite3VdbeMemRelease(pMem);
- pMem->flags = MEM_Str|MEM_Term|(c&MEM_AffMask);
+ pMem->flags &= ~(MEM_Static|MEM_Dyn|MEM_Ephem);
pMem->enc = desiredEnc;
+ pMem->flags |= (MEM_Term|MEM_Dyn);
pMem->z = (char*)zOut;
pMem->zMalloc = pMem->z;
- pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->z);
translate_out:
#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG)
@@ -24491,6 +21269,7 @@ SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *db, const void *z, int nByte, u8 e
}
assert( (m.flags & MEM_Term)!=0 || db->mallocFailed );
assert( (m.flags & MEM_Str)!=0 || db->mallocFailed );
+ assert( (m.flags & MEM_Dyn)!=0 || db->mallocFailed );
assert( m.z || db->mallocFailed );
return m.z;
}
@@ -24593,9 +21372,8 @@ SQLITE_PRIVATE void sqlite3UtfSelfTest(void){
** strings, and stuff like that.
**
*/
-/* #include "sqliteInt.h" */
/* #include <stdarg.h> */
-#if HAVE_ISNAN || SQLITE_HAVE_ISNAN
+#ifdef SQLITE_HAVE_ISNAN
# include <math.h>
#endif
@@ -24609,24 +21387,6 @@ SQLITE_PRIVATE void sqlite3Coverage(int x){
}
#endif
-/*
-** Give a callback to the test harness that can be used to simulate faults
-** in places where it is difficult or expensive to do so purely by means
-** of inputs.
-**
-** The intent of the integer argument is to let the fault simulator know
-** which of multiple sqlite3FaultSim() calls has been hit.
-**
-** Return whatever integer value the test callback returns, or return
-** SQLITE_OK if no test callback is installed.
-*/
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-SQLITE_PRIVATE int sqlite3FaultSim(int iTest){
- int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback;
- return xCallback ? xCallback(iTest) : SQLITE_OK;
-}
-#endif
-
#ifndef SQLITE_OMIT_FLOATING_POINT
/*
** Return true if the floating point value is Not a Number (NaN).
@@ -24636,7 +21396,7 @@ SQLITE_PRIVATE int sqlite3FaultSim(int iTest){
*/
SQLITE_PRIVATE int sqlite3IsNaN(double x){
int rc; /* The value return */
-#if !SQLITE_HAVE_ISNAN && !HAVE_ISNAN
+#if !defined(SQLITE_HAVE_ISNAN)
/*
** Systems that support the isnan() library function should probably
** make use of it by compiling with -DSQLITE_HAVE_ISNAN. But we have
@@ -24666,9 +21426,9 @@ SQLITE_PRIVATE int sqlite3IsNaN(double x){
volatile double y = x;
volatile double z = y;
rc = (y!=z);
-#else /* if HAVE_ISNAN */
+#else /* if defined(SQLITE_HAVE_ISNAN) */
rc = isnan(x);
-#endif /* HAVE_ISNAN */
+#endif /* SQLITE_HAVE_ISNAN */
testcase( rc );
return rc;
}
@@ -24683,17 +21443,10 @@ SQLITE_PRIVATE int sqlite3IsNaN(double x){
** than 1GiB) the value returned might be less than the true string length.
*/
SQLITE_PRIVATE int sqlite3Strlen30(const char *z){
+ const char *z2 = z;
if( z==0 ) return 0;
- return 0x3fffffff & (int)strlen(z);
-}
-
-/*
-** Set the current error code to err_code and clear any prior error message.
-*/
-SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){
- assert( db!=0 );
- db->errCode = err_code;
- if( db->pErr ) sqlite3ValueSetNull(db->pErr);
+ while( *z2 ){ z2++; }
+ return 0x3fffffff & (int)(z2 - z);
}
/*
@@ -24717,18 +21470,19 @@ SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){
** should be called with err_code set to SQLITE_OK and zFormat set
** to NULL.
*/
-SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *zFormat, ...){
- assert( db!=0 );
- db->errCode = err_code;
- if( zFormat==0 ){
- sqlite3Error(db, err_code);
- }else if( db->pErr || (db->pErr = sqlite3ValueNew(db))!=0 ){
- char *z;
- va_list ap;
- va_start(ap, zFormat);
- z = sqlite3VMPrintf(db, zFormat, ap);
- va_end(ap);
- sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC);
+SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code, const char *zFormat, ...){
+ if( db && (db->pErr || (db->pErr = sqlite3ValueNew(db))!=0) ){
+ db->errCode = err_code;
+ if( zFormat ){
+ char *z;
+ va_list ap;
+ va_start(ap, zFormat);
+ z = sqlite3VMPrintf(db, zFormat, ap);
+ va_end(ap);
+ sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC);
+ }else{
+ sqlite3ValueSetStr(db->pErr, 0, 0, SQLITE_UTF8, SQLITE_STATIC);
+ }
}
}
@@ -24742,12 +21496,12 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *z
** %T Insert a token
** %S Insert the first element of a SrcList
**
-** This function should be used to report any error that occurs while
+** This function should be used to report any error that occurs whilst
** compiling an SQL statement (i.e. within sqlite3_prepare()). The
** last thing the sqlite3_prepare() function does is copy the error
** stored by this function into the database handle using sqlite3Error().
-** Functions sqlite3Error() or sqlite3ErrorWithMsg() should be used
-** during statement execution (sqlite3_step() etc.).
+** Function sqlite3Error() should be used during statement execution
+** (sqlite3_step() etc.).
*/
SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){
char *zMsg;
@@ -24780,7 +21534,7 @@ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){
** occur.
**
** 2002-Feb-14: This routine is extended to remove MS-Access style
-** brackets from around identifiers. For example: "[a-b-c]" becomes
+** brackets from around identifers. For example: "[a-b-c]" becomes
** "a-b-c".
*/
SQLITE_PRIVATE int sqlite3Dequote(char *z){
@@ -24825,25 +21579,15 @@ SQLITE_PRIVATE int sqlite3Dequote(char *z){
** case-independent fashion, using the same definition of "case
** independence" that SQLite uses internally when comparing identifiers.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *zLeft, const char *zRight){
+SQLITE_API int sqlite3_stricmp(const char *zLeft, const char *zRight){
register unsigned char *a, *b;
- if( zLeft==0 ){
- return zRight ? -1 : 0;
- }else if( zRight==0 ){
- return 1;
- }
a = (unsigned char *)zLeft;
b = (unsigned char *)zRight;
while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; }
return UpperToLower[*a] - UpperToLower[*b];
}
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){
+SQLITE_API int sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){
register unsigned char *a, *b;
- if( zLeft==0 ){
- return zRight ? -1 : 0;
- }else if( zRight==0 ){
- return 1;
- }
a = (unsigned char *)zLeft;
b = (unsigned char *)zRight;
while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; }
@@ -25070,9 +21814,9 @@ static int compare2pow63(const char *zNum, int incr){
return c;
}
+
/*
-** Convert zNum to a 64-bit signed integer. zNum must be decimal. This
-** routine does *not* accept hexadecimal notation.
+** Convert zNum to a 64-bit signed integer.
**
** If the zNum value is representable as a 64-bit twos-complement
** integer, then write that value into *pNum and return 0.
@@ -25161,43 +21905,9 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc
}
/*
-** Transform a UTF-8 integer literal, in either decimal or hexadecimal,
-** into a 64-bit signed integer. This routine accepts hexadecimal literals,
-** whereas sqlite3Atoi64() does not.
-**
-** Returns:
-**
-** 0 Successful transformation. Fits in a 64-bit signed integer.
-** 1 Integer too large for a 64-bit signed integer or is malformed
-** 2 Special case of 9223372036854775808
-*/
-SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){
-#ifndef SQLITE_OMIT_HEX_INTEGER
- if( z[0]=='0'
- && (z[1]=='x' || z[1]=='X')
- && sqlite3Isxdigit(z[2])
- ){
- u64 u = 0;
- int i, k;
- for(i=2; z[i]=='0'; i++){}
- for(k=i; sqlite3Isxdigit(z[k]); k++){
- u = u*16 + sqlite3HexToInt(z[k]);
- }
- memcpy(pOut, &u, 8);
- return (z[k]==0 && k-i<=16) ? 0 : 1;
- }else
-#endif /* SQLITE_OMIT_HEX_INTEGER */
- {
- return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8);
- }
-}
-
-/*
** If zNum represents an integer that will fit in 32-bits, then set
** *pValue to that integer and return true. Otherwise return false.
**
-** This routine accepts both decimal and hexadecimal notation for integers.
-**
** Any non-numeric characters that following zNum are ignored.
** This is different from sqlite3Atoi64() which requires the
** input number to be zero-terminated.
@@ -25212,25 +21922,6 @@ SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){
}else if( zNum[0]=='+' ){
zNum++;
}
-#ifndef SQLITE_OMIT_HEX_INTEGER
- else if( zNum[0]=='0'
- && (zNum[1]=='x' || zNum[1]=='X')
- && sqlite3Isxdigit(zNum[2])
- ){
- u32 u = 0;
- zNum += 2;
- while( zNum[0]=='0' ) zNum++;
- for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){
- u = u*16 + sqlite3HexToInt(zNum[i]);
- }
- if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){
- memcpy(pValue, &u, 4);
- return 1;
- }else{
- return 0;
- }
- }
-#endif
while( zNum[0]=='0' ) zNum++;
for(i=0; i<11 && (c = zNum[i] - '0')>=0 && c<=9; i++){
v = v*10 + c;
@@ -25295,7 +21986,7 @@ SQLITE_PRIVATE int sqlite3Atoi(const char *z){
** bit clear. Except, if we get to the 9th byte, it stores the full
** 8 bits and is the last byte.
*/
-static int SQLITE_NOINLINE putVarint64(unsigned char *p, u64 v){
+SQLITE_PRIVATE int sqlite3PutVarint(unsigned char *p, u64 v){
int i, j, n;
u8 buf[10];
if( v & (((u64)0xff000000)<<32) ){
@@ -25319,17 +22010,28 @@ static int SQLITE_NOINLINE putVarint64(unsigned char *p, u64 v){
}
return n;
}
-SQLITE_PRIVATE int sqlite3PutVarint(unsigned char *p, u64 v){
- if( v<=0x7f ){
- p[0] = v&0x7f;
+
+/*
+** This routine is a faster version of sqlite3PutVarint() that only
+** works for 32-bit positive integers and which is optimized for
+** the common case of small integers. A MACRO version, putVarint32,
+** is provided which inlines the single-byte case. All code should use
+** the MACRO version as this function assumes the single-byte case has
+** already been handled.
+*/
+SQLITE_PRIVATE int sqlite3PutVarint32(unsigned char *p, u32 v){
+#ifndef putVarint32
+ if( (v & ~0x7f)==0 ){
+ p[0] = v;
return 1;
}
- if( v<=0x3fff ){
- p[0] = ((v>>7)&0x7f)|0x80;
- p[1] = v&0x7f;
+#endif
+ if( (v & ~0x3fff)==0 ){
+ p[0] = (u8)((v>>7) | 0x80);
+ p[1] = (u8)(v & 0x7f);
return 2;
}
- return putVarint64(p,v);
+ return sqlite3PutVarint(p, v);
}
/*
@@ -25643,8 +22345,11 @@ SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){
** 64-bit integer.
*/
SQLITE_PRIVATE int sqlite3VarintLen(u64 v){
- int i;
- for(i=1; (v >>= 7)!=0; i++){ assert( i<9 ); }
+ int i = 0;
+ do{
+ i++;
+ v >>= 7;
+ }while( v!=0 && ALWAYS(i<9) );
return i;
}
@@ -25653,40 +22358,13 @@ SQLITE_PRIVATE int sqlite3VarintLen(u64 v){
** Read or write a four-byte big-endian integer value.
*/
SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){
-#if SQLITE_BYTEORDER==4321
- u32 x;
- memcpy(&x,p,4);
- return x;
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(__GNUC__) && GCC_VERSION>=4003000
- u32 x;
- memcpy(&x,p,4);
- return __builtin_bswap32(x);
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(_MSC_VER) && _MSC_VER>=1300
- u32 x;
- memcpy(&x,p,4);
- return _byteswap_ulong(x);
-#else
- testcase( p[0]&0x80 );
- return ((unsigned)p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3];
-#endif
+ return (p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3];
}
SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){
-#if SQLITE_BYTEORDER==4321
- memcpy(p,&v,4);
-#elif SQLITE_BYTEORDER==1234 && defined(__GNUC__) && GCC_VERSION>=4003000
- u32 x = __builtin_bswap32(v);
- memcpy(p,&x,4);
-#elif SQLITE_BYTEORDER==1234 && defined(_MSC_VER) && _MSC_VER>=1300
- u32 x = _byteswap_ulong(v);
- memcpy(p,&x,4);
-#else
p[0] = (u8)(v>>24);
p[1] = (u8)(v>>16);
p[2] = (u8)(v>>8);
p[3] = (u8)v;
-#endif
}
@@ -25801,12 +22479,13 @@ SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){
testcase( iA>0 && LARGEST_INT64 - iA == iB );
testcase( iA>0 && LARGEST_INT64 - iA == iB - 1 );
if( iA>0 && LARGEST_INT64 - iA < iB ) return 1;
+ *pA += iB;
}else{
testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 1 );
testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 2 );
if( iA<0 && -(iA + LARGEST_INT64) > iB + 1 ) return 1;
+ *pA += iB;
}
- *pA += iB;
return 0;
}
SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){
@@ -25830,18 +22509,9 @@ SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){
iA0 = iA % TWOPOWER32;
iB1 = iB/TWOPOWER32;
iB0 = iB % TWOPOWER32;
- if( iA1==0 ){
- if( iB1==0 ){
- *pA *= iB;
- return 0;
- }
- r = iA0*iB1;
- }else if( iB1==0 ){
- r = iA1*iB0;
- }else{
- /* If both iA1 and iB1 are non-zero, overflow will result */
- return 1;
- }
+ if( iA1*iB1 != 0 ) return 1;
+ assert( iA1*iB0==0 || iA0*iB1==0 );
+ r = iA1*iB0 + iA0*iB1;
testcase( r==(-TWOPOWER31)-1 );
testcase( r==(-TWOPOWER31) );
testcase( r==TWOPOWER31 );
@@ -25924,8 +22594,8 @@ SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst a, LogEst b){
}
/*
-** Convert an integer into a LogEst. In other words, compute an
-** approximation for 10*log2(x).
+** Convert an integer into a LogEst. In other words, compute a
+** good approximatation for 10*log2(x).
*/
SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){
static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 };
@@ -25989,7 +22659,6 @@ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){
** This is the implementation of generic hash-tables
** used in SQLite.
*/
-/* #include "sqliteInt.h" */
/* #include <assert.h> */
/* Turn bulk memory into a hash table object by initializing the
@@ -26029,11 +22698,12 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){
/*
** The hashing function.
*/
-static unsigned int strHash(const char *z){
- unsigned int h = 0;
- unsigned char c;
- while( (c = (unsigned char)*z++)!=0 ){
- h = (h<<3) ^ h ^ sqlite3UpperToLower[c];
+static unsigned int strHash(const char *z, int nKey){
+ int h = 0;
+ assert( nKey>=0 );
+ while( nKey > 0 ){
+ h = (h<<3) ^ h ^ sqlite3UpperToLower[(unsigned char)*z++];
+ nKey--;
}
return h;
}
@@ -26105,7 +22775,7 @@ static int rehash(Hash *pH, unsigned int new_size){
pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht);
memset(new_ht, 0, new_size*sizeof(struct _ht));
for(elem=pH->first, pH->first=0; elem; elem = next_elem){
- unsigned int h = strHash(elem->pKey) % new_size;
+ unsigned int h = strHash(elem->pKey, elem->nKey) % new_size;
next_elem = elem->next;
insertElement(pH, &new_ht[h], elem);
}
@@ -26113,33 +22783,28 @@ static int rehash(Hash *pH, unsigned int new_size){
}
/* This function (for internal use only) locates an element in an
-** hash table that matches the given key. The hash for this key is
-** also computed and returned in the *pH parameter.
+** hash table that matches the given key. The hash for this key has
+** already been computed and is passed as the 4th parameter.
*/
-static HashElem *findElementWithHash(
+static HashElem *findElementGivenHash(
const Hash *pH, /* The pH to be searched */
const char *pKey, /* The key we are searching for */
- unsigned int *pHash /* Write the hash value here */
+ int nKey, /* Bytes in key (not counting zero terminator) */
+ unsigned int h /* The hash for this key. */
){
HashElem *elem; /* Used to loop thru the element list */
int count; /* Number of elements left to test */
- unsigned int h; /* The computed hash */
if( pH->ht ){
- struct _ht *pEntry;
- h = strHash(pKey) % pH->htsize;
- pEntry = &pH->ht[h];
+ struct _ht *pEntry = &pH->ht[h];
elem = pEntry->chain;
count = pEntry->count;
}else{
- h = 0;
elem = pH->first;
count = pH->count;
}
- *pHash = h;
- while( count-- ){
- assert( elem!=0 );
- if( sqlite3StrICmp(elem->pKey,pKey)==0 ){
+ while( count-- && ALWAYS(elem) ){
+ if( elem->nKey==nKey && sqlite3StrNICmp(elem->pKey,pKey,nKey)==0 ){
return elem;
}
elem = elem->next;
@@ -26182,20 +22847,26 @@ static void removeElementGivenHash(
}
/* Attempt to locate an element of the hash table pH with a key
-** that matches pKey. Return the data for this element if it is
+** that matches pKey,nKey. Return the data for this element if it is
** found, or NULL if there is no match.
*/
-SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey){
+SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey, int nKey){
HashElem *elem; /* The element that matches key */
unsigned int h; /* A hash on key */
assert( pH!=0 );
assert( pKey!=0 );
- elem = findElementWithHash(pH, pKey, &h);
+ assert( nKey>=0 );
+ if( pH->ht ){
+ h = strHash(pKey, nKey) % pH->htsize;
+ }else{
+ h = 0;
+ }
+ elem = findElementGivenHash(pH, pKey, nKey, h);
return elem ? elem->data : 0;
}
-/* Insert an element into the hash table pH. The key is pKey
+/* Insert an element into the hash table pH. The key is pKey,nKey
** and the data is "data".
**
** If no element exists with a matching key, then a new
@@ -26209,14 +22880,20 @@ SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey){
** If the "data" parameter to this function is NULL, then the
** element corresponding to "key" is removed from the hash table.
*/
-SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){
+SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, int nKey, void *data){
unsigned int h; /* the hash of the key modulo hash table size */
HashElem *elem; /* Used to loop thru the element list */
HashElem *new_elem; /* New element added to the pH */
assert( pH!=0 );
assert( pKey!=0 );
- elem = findElementWithHash(pH,pKey,&h);
+ assert( nKey>=0 );
+ if( pH->htsize ){
+ h = strHash(pKey, nKey) % pH->htsize;
+ }else{
+ h = 0;
+ }
+ elem = findElementGivenHash(pH,pKey,nKey,h);
if( elem ){
void *old_data = elem->data;
if( data==0 ){
@@ -26224,6 +22901,7 @@ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){
}else{
elem->data = data;
elem->pKey = pKey;
+ assert(nKey==elem->nKey);
}
return old_data;
}
@@ -26231,15 +22909,20 @@ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){
new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) );
if( new_elem==0 ) return data;
new_elem->pKey = pKey;
+ new_elem->nKey = nKey;
new_elem->data = data;
pH->count++;
if( pH->count>=10 && pH->count > 2*pH->htsize ){
if( rehash(pH, pH->count*2) ){
assert( pH->htsize>0 );
- h = strHash(pKey) % pH->htsize;
+ h = strHash(pKey, nKey) % pH->htsize;
}
}
- insertElement(pH, pH->ht ? &pH->ht[h] : 0, new_elem);
+ if( pH->ht ){
+ insertElement(pH, &pH->ht[h], new_elem);
+ }else{
+ insertElement(pH, 0, new_elem);
+ }
return 0;
}
@@ -26255,166 +22938,158 @@ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){
#endif
SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
static const char *const azName[] = { "?",
- /* 1 */ "Savepoint" OpHelp(""),
- /* 2 */ "AutoCommit" OpHelp(""),
- /* 3 */ "Transaction" OpHelp(""),
- /* 4 */ "SorterNext" OpHelp(""),
- /* 5 */ "PrevIfOpen" OpHelp(""),
- /* 6 */ "NextIfOpen" OpHelp(""),
- /* 7 */ "Prev" OpHelp(""),
- /* 8 */ "Next" OpHelp(""),
- /* 9 */ "Checkpoint" OpHelp(""),
- /* 10 */ "JournalMode" OpHelp(""),
- /* 11 */ "Vacuum" OpHelp(""),
- /* 12 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"),
- /* 13 */ "VUpdate" OpHelp("data=r[P3@P2]"),
- /* 14 */ "Goto" OpHelp(""),
- /* 15 */ "Gosub" OpHelp(""),
- /* 16 */ "Return" OpHelp(""),
- /* 17 */ "InitCoroutine" OpHelp(""),
- /* 18 */ "EndCoroutine" OpHelp(""),
+ /* 1 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"),
+ /* 2 */ "Savepoint" OpHelp(""),
+ /* 3 */ "AutoCommit" OpHelp(""),
+ /* 4 */ "Transaction" OpHelp(""),
+ /* 5 */ "SorterNext" OpHelp(""),
+ /* 6 */ "PrevIfOpen" OpHelp(""),
+ /* 7 */ "NextIfOpen" OpHelp(""),
+ /* 8 */ "Prev" OpHelp(""),
+ /* 9 */ "Next" OpHelp(""),
+ /* 10 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 11 */ "Checkpoint" OpHelp(""),
+ /* 12 */ "JournalMode" OpHelp(""),
+ /* 13 */ "Vacuum" OpHelp(""),
+ /* 14 */ "VFilter" OpHelp("iPlan=r[P3] zPlan='P4'"),
+ /* 15 */ "VUpdate" OpHelp("data=r[P3@P2]"),
+ /* 16 */ "Goto" OpHelp(""),
+ /* 17 */ "Gosub" OpHelp(""),
+ /* 18 */ "Return" OpHelp(""),
/* 19 */ "Not" OpHelp("r[P2]= !r[P1]"),
/* 20 */ "Yield" OpHelp(""),
- /* 21 */ "HaltIfNull" OpHelp("if r[P3]=null halt"),
+ /* 21 */ "HaltIfNull" OpHelp("if r[P3] null then halt"),
/* 22 */ "Halt" OpHelp(""),
/* 23 */ "Integer" OpHelp("r[P2]=P1"),
/* 24 */ "Int64" OpHelp("r[P2]=P4"),
/* 25 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
/* 26 */ "Null" OpHelp("r[P2..P3]=NULL"),
- /* 27 */ "SoftNull" OpHelp("r[P1]=NULL"),
- /* 28 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
- /* 29 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
- /* 30 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
- /* 31 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
- /* 32 */ "SCopy" OpHelp("r[P2]=r[P1]"),
- /* 33 */ "ResultRow" OpHelp("output=r[P1@P2]"),
- /* 34 */ "CollSeq" OpHelp(""),
- /* 35 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"),
- /* 36 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"),
- /* 37 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
- /* 38 */ "MustBeInt" OpHelp(""),
- /* 39 */ "RealAffinity" OpHelp(""),
- /* 40 */ "Cast" OpHelp("affinity(r[P1])"),
- /* 41 */ "Permutation" OpHelp(""),
- /* 42 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
- /* 43 */ "Jump" OpHelp(""),
- /* 44 */ "Once" OpHelp(""),
- /* 45 */ "If" OpHelp(""),
- /* 46 */ "IfNot" OpHelp(""),
- /* 47 */ "Column" OpHelp("r[P3]=PX"),
- /* 48 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
- /* 49 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
- /* 50 */ "Count" OpHelp("r[P2]=count()"),
- /* 51 */ "ReadCookie" OpHelp(""),
- /* 52 */ "SetCookie" OpHelp(""),
- /* 53 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
- /* 54 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
- /* 55 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
- /* 56 */ "OpenAutoindex" OpHelp("nColumn=P2"),
- /* 57 */ "OpenEphemeral" OpHelp("nColumn=P2"),
- /* 58 */ "SorterOpen" OpHelp(""),
- /* 59 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"),
- /* 60 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"),
- /* 61 */ "Close" OpHelp(""),
- /* 62 */ "ColumnsUsed" OpHelp(""),
- /* 63 */ "SeekLT" OpHelp("key=r[P3@P4]"),
- /* 64 */ "SeekLE" OpHelp("key=r[P3@P4]"),
- /* 65 */ "SeekGE" OpHelp("key=r[P3@P4]"),
- /* 66 */ "SeekGT" OpHelp("key=r[P3@P4]"),
- /* 67 */ "Seek" OpHelp("intkey=r[P2]"),
- /* 68 */ "NoConflict" OpHelp("key=r[P3@P4]"),
- /* 69 */ "NotFound" OpHelp("key=r[P3@P4]"),
- /* 70 */ "Found" OpHelp("key=r[P3@P4]"),
- /* 71 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"),
- /* 72 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"),
- /* 73 */ "NotExists" OpHelp("intkey=r[P3]"),
- /* 74 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"),
- /* 75 */ "NewRowid" OpHelp("r[P2]=rowid"),
- /* 76 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"),
- /* 77 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"),
- /* 78 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"),
- /* 79 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"),
- /* 80 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"),
- /* 81 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"),
- /* 82 */ "Lt" OpHelp("if r[P1]<r[P3] goto P2"),
- /* 83 */ "Ge" OpHelp("if r[P1]>=r[P3] goto P2"),
- /* 84 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
- /* 85 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
- /* 86 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
- /* 87 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<<r[P1]"),
- /* 88 */ "ShiftRight" OpHelp("r[P3]=r[P2]>>r[P1]"),
- /* 89 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"),
- /* 90 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"),
- /* 91 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"),
- /* 92 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"),
- /* 93 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"),
- /* 94 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"),
- /* 95 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"),
- /* 96 */ "BitNot" OpHelp("r[P1]= ~r[P1]"),
- /* 97 */ "String8" OpHelp("r[P2]='P4'"),
- /* 98 */ "Delete" OpHelp(""),
- /* 99 */ "ResetCount" OpHelp(""),
- /* 100 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
- /* 101 */ "SorterData" OpHelp("r[P2]=data"),
- /* 102 */ "RowKey" OpHelp("r[P2]=key"),
- /* 103 */ "RowData" OpHelp("r[P2]=data"),
- /* 104 */ "Rowid" OpHelp("r[P2]=rowid"),
- /* 105 */ "NullRow" OpHelp(""),
- /* 106 */ "Last" OpHelp(""),
- /* 107 */ "SorterSort" OpHelp(""),
- /* 108 */ "Sort" OpHelp(""),
- /* 109 */ "Rewind" OpHelp(""),
- /* 110 */ "SorterInsert" OpHelp(""),
- /* 111 */ "IdxInsert" OpHelp("key=r[P2]"),
- /* 112 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
- /* 113 */ "IdxRowid" OpHelp("r[P2]=rowid"),
- /* 114 */ "IdxLE" OpHelp("key=r[P3@P4]"),
- /* 115 */ "IdxGT" OpHelp("key=r[P3@P4]"),
- /* 116 */ "IdxLT" OpHelp("key=r[P3@P4]"),
- /* 117 */ "IdxGE" OpHelp("key=r[P3@P4]"),
- /* 118 */ "Destroy" OpHelp(""),
- /* 119 */ "Clear" OpHelp(""),
- /* 120 */ "ResetSorter" OpHelp(""),
- /* 121 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"),
- /* 122 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"),
- /* 123 */ "ParseSchema" OpHelp(""),
- /* 124 */ "LoadAnalysis" OpHelp(""),
- /* 125 */ "DropTable" OpHelp(""),
- /* 126 */ "DropIndex" OpHelp(""),
- /* 127 */ "DropTrigger" OpHelp(""),
- /* 128 */ "IntegrityCk" OpHelp(""),
- /* 129 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
- /* 130 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
- /* 131 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
- /* 132 */ "Program" OpHelp(""),
- /* 133 */ "Real" OpHelp("r[P2]=P4"),
- /* 134 */ "Param" OpHelp(""),
- /* 135 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
- /* 136 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
- /* 137 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
- /* 138 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
- /* 139 */ "SetIfNotPos" OpHelp("if r[P1]<=0 then r[P2]=P3"),
- /* 140 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"),
- /* 141 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
- /* 142 */ "JumpZeroIncr" OpHelp("if (r[P1]++)==0 ) goto P2"),
- /* 143 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 144 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 145 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
- /* 146 */ "IncrVacuum" OpHelp(""),
- /* 147 */ "Expire" OpHelp(""),
- /* 148 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
- /* 149 */ "VBegin" OpHelp(""),
- /* 150 */ "VCreate" OpHelp(""),
- /* 151 */ "VDestroy" OpHelp(""),
- /* 152 */ "VOpen" OpHelp(""),
- /* 153 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 154 */ "VNext" OpHelp(""),
- /* 155 */ "VRename" OpHelp(""),
- /* 156 */ "Pagecount" OpHelp(""),
- /* 157 */ "MaxPgcnt" OpHelp(""),
- /* 158 */ "Init" OpHelp("Start at P2"),
- /* 159 */ "Noop" OpHelp(""),
- /* 160 */ "Explain" OpHelp(""),
+ /* 27 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
+ /* 28 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
+ /* 29 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
+ /* 30 */ "Copy" OpHelp("r[P2@P3]=r[P1@P3]"),
+ /* 31 */ "SCopy" OpHelp("r[P2]=r[P1]"),
+ /* 32 */ "ResultRow" OpHelp("output=r[P1@P2]"),
+ /* 33 */ "CollSeq" OpHelp(""),
+ /* 34 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
+ /* 35 */ "MustBeInt" OpHelp(""),
+ /* 36 */ "RealAffinity" OpHelp(""),
+ /* 37 */ "Permutation" OpHelp(""),
+ /* 38 */ "Compare" OpHelp(""),
+ /* 39 */ "Jump" OpHelp(""),
+ /* 40 */ "Once" OpHelp(""),
+ /* 41 */ "If" OpHelp(""),
+ /* 42 */ "IfNot" OpHelp(""),
+ /* 43 */ "Column" OpHelp("r[P3]=PX"),
+ /* 44 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
+ /* 45 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
+ /* 46 */ "Count" OpHelp("r[P2]=count()"),
+ /* 47 */ "ReadCookie" OpHelp(""),
+ /* 48 */ "SetCookie" OpHelp(""),
+ /* 49 */ "VerifyCookie" OpHelp(""),
+ /* 50 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
+ /* 51 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
+ /* 52 */ "OpenAutoindex" OpHelp("nColumn=P2"),
+ /* 53 */ "OpenEphemeral" OpHelp("nColumn=P2"),
+ /* 54 */ "SorterOpen" OpHelp(""),
+ /* 55 */ "OpenPseudo" OpHelp("content in r[P2@P3]"),
+ /* 56 */ "Close" OpHelp(""),
+ /* 57 */ "SeekLt" OpHelp("key=r[P3@P4]"),
+ /* 58 */ "SeekLe" OpHelp("key=r[P3@P4]"),
+ /* 59 */ "SeekGe" OpHelp("key=r[P3@P4]"),
+ /* 60 */ "SeekGt" OpHelp("key=r[P3@P4]"),
+ /* 61 */ "Seek" OpHelp("intkey=r[P2]"),
+ /* 62 */ "NoConflict" OpHelp("key=r[P3@P4]"),
+ /* 63 */ "NotFound" OpHelp("key=r[P3@P4]"),
+ /* 64 */ "Found" OpHelp("key=r[P3@P4]"),
+ /* 65 */ "NotExists" OpHelp("intkey=r[P3]"),
+ /* 66 */ "Sequence" OpHelp("r[P2]=rowid"),
+ /* 67 */ "NewRowid" OpHelp("r[P2]=rowid"),
+ /* 68 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
+ /* 69 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"),
+ /* 70 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"),
+ /* 71 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"),
+ /* 72 */ "Delete" OpHelp(""),
+ /* 73 */ "ResetCount" OpHelp(""),
+ /* 74 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"),
+ /* 75 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"),
+ /* 76 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"),
+ /* 77 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"),
+ /* 78 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"),
+ /* 79 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"),
+ /* 80 */ "Lt" OpHelp("if r[P1]<r[P3] goto P2"),
+ /* 81 */ "Ge" OpHelp("if r[P1]>=r[P3] goto P2"),
+ /* 82 */ "SorterCompare" OpHelp("if key(P1)!=rtrim(r[P3],P4) goto P2"),
+ /* 83 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
+ /* 84 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
+ /* 85 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<<r[P1]"),
+ /* 86 */ "ShiftRight" OpHelp("r[P3]=r[P2]>>r[P1]"),
+ /* 87 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"),
+ /* 88 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"),
+ /* 89 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"),
+ /* 90 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"),
+ /* 91 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"),
+ /* 92 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"),
+ /* 93 */ "SorterData" OpHelp("r[P2]=data"),
+ /* 94 */ "BitNot" OpHelp("r[P1]= ~r[P1]"),
+ /* 95 */ "String8" OpHelp("r[P2]='P4'"),
+ /* 96 */ "RowKey" OpHelp("r[P2]=key"),
+ /* 97 */ "RowData" OpHelp("r[P2]=data"),
+ /* 98 */ "Rowid" OpHelp("r[P2]=rowid"),
+ /* 99 */ "NullRow" OpHelp(""),
+ /* 100 */ "Last" OpHelp(""),
+ /* 101 */ "SorterSort" OpHelp(""),
+ /* 102 */ "Sort" OpHelp(""),
+ /* 103 */ "Rewind" OpHelp(""),
+ /* 104 */ "SorterInsert" OpHelp(""),
+ /* 105 */ "IdxInsert" OpHelp("key=r[P2]"),
+ /* 106 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
+ /* 107 */ "IdxRowid" OpHelp("r[P2]=rowid"),
+ /* 108 */ "IdxLT" OpHelp("key=r[P3@P4]"),
+ /* 109 */ "IdxGE" OpHelp("key=r[P3@P4]"),
+ /* 110 */ "Destroy" OpHelp(""),
+ /* 111 */ "Clear" OpHelp(""),
+ /* 112 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"),
+ /* 113 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"),
+ /* 114 */ "ParseSchema" OpHelp(""),
+ /* 115 */ "LoadAnalysis" OpHelp(""),
+ /* 116 */ "DropTable" OpHelp(""),
+ /* 117 */ "DropIndex" OpHelp(""),
+ /* 118 */ "DropTrigger" OpHelp(""),
+ /* 119 */ "IntegrityCk" OpHelp(""),
+ /* 120 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
+ /* 121 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
+ /* 122 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
+ /* 123 */ "Program" OpHelp(""),
+ /* 124 */ "Param" OpHelp(""),
+ /* 125 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
+ /* 126 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
+ /* 127 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
+ /* 128 */ "IfPos" OpHelp("if r[P1]>0 goto P2"),
+ /* 129 */ "IfNeg" OpHelp("if r[P1]<0 goto P2"),
+ /* 130 */ "IfZero" OpHelp("r[P1]+=P3, if r[P1]==0 goto P2"),
+ /* 131 */ "Real" OpHelp("r[P2]=P4"),
+ /* 132 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
+ /* 133 */ "IncrVacuum" OpHelp(""),
+ /* 134 */ "Expire" OpHelp(""),
+ /* 135 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
+ /* 136 */ "VBegin" OpHelp(""),
+ /* 137 */ "VCreate" OpHelp(""),
+ /* 138 */ "VDestroy" OpHelp(""),
+ /* 139 */ "VOpen" OpHelp(""),
+ /* 140 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 141 */ "VNext" OpHelp(""),
+ /* 142 */ "ToText" OpHelp(""),
+ /* 143 */ "ToBlob" OpHelp(""),
+ /* 144 */ "ToNumeric" OpHelp(""),
+ /* 145 */ "ToInt" OpHelp(""),
+ /* 146 */ "ToReal" OpHelp(""),
+ /* 147 */ "VRename" OpHelp(""),
+ /* 148 */ "Pagecount" OpHelp(""),
+ /* 149 */ "MaxPgcnt" OpHelp(""),
+ /* 150 */ "Trace" OpHelp(""),
+ /* 151 */ "Noop" OpHelp(""),
+ /* 152 */ "Explain" OpHelp(""),
};
return azName[i];
}
@@ -26467,7 +23142,6 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
** * Definitions of sqlite3_vfs objects for all locking methods
** plus implementations of sqlite3_os_init() and sqlite3_os_end().
*/
-/* #include "sqliteInt.h" */
#if SQLITE_OS_UNIX /* This file is used on unix only */
/*
@@ -26496,6 +23170,44 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
#endif
/*
+** Define the OS_VXWORKS pre-processor macro to 1 if building on
+** vxworks, or 0 otherwise.
+*/
+#ifndef OS_VXWORKS
+# if defined(__RTP__) || defined(_WRS_KERNEL)
+# define OS_VXWORKS 1
+# else
+# define OS_VXWORKS 0
+# endif
+#endif
+
+/*
+** These #defines should enable >2GB file support on Posix if the
+** underlying operating system supports it. If the OS lacks
+** large file support, these should be no-ops.
+**
+** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch
+** on the compiler command line. This is necessary if you are compiling
+** on a recent machine (ex: RedHat 7.2) but you want your code to work
+** on an older machine (ex: RedHat 6.0). If you compile on RedHat 7.2
+** without this option, LFS is enable. But LFS does not exist in the kernel
+** in RedHat 6.0, so the code won't work. Hence, for maximum binary
+** portability you should omit LFS.
+**
+** The previous paragraph was written in 2005. (This paragraph is written
+** on 2008-11-28.) These days, all Linux kernels support large files, so
+** you should probably leave LFS enabled. But some embedded platforms might
+** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful.
+*/
+#ifndef SQLITE_DISABLE_LFS
+# define _LARGE_FILE 1
+# ifndef _FILE_OFFSET_BITS
+# define _FILE_OFFSET_BITS 64
+# endif
+# define _LARGEFILE_SOURCE 1
+#endif
+
+/*
** standard include files.
*/
#include <sys/types.h>
@@ -26506,33 +23218,22 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
#include <sys/time.h>
#include <errno.h>
#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
-# include <sys/mman.h>
+#include <sys/mman.h>
#endif
+
#if SQLITE_ENABLE_LOCKING_STYLE
# include <sys/ioctl.h>
-# include <sys/file.h>
-# include <sys/param.h>
+# if OS_VXWORKS
+# include <semaphore.h>
+# include <limits.h>
+# else
+# include <sys/file.h>
+# include <sys/param.h>
+# endif
#endif /* SQLITE_ENABLE_LOCKING_STYLE */
-#if defined(__APPLE__) && ((__MAC_OS_X_VERSION_MIN_REQUIRED > 1050) || \
- (__IPHONE_OS_VERSION_MIN_REQUIRED > 2000))
-# if (!defined(TARGET_OS_EMBEDDED) || (TARGET_OS_EMBEDDED==0)) \
- && (!defined(TARGET_IPHONE_SIMULATOR) || (TARGET_IPHONE_SIMULATOR==0))
-# define HAVE_GETHOSTUUID 1
-# else
-# warning "gethostuuid() is disabled."
-# endif
-#endif
-
-
-#if OS_VXWORKS
-/* # include <sys/ioctl.h> */
-# include <semaphore.h>
-# include <limits.h>
-#endif /* OS_VXWORKS */
-
-#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE
+#if defined(__APPLE__) || (SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS)
# include <sys/mount.h>
#endif
@@ -26573,10 +23274,6 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
*/
#define MAX_PATHNAME 512
-/* Always cast the getpid() return type for compatibility with
-** kernel modules in VxWorks. */
-#define osGetpid(X) (pid_t)getpid()
-
/*
** Only set the lastErrno if the error code is a real error and not
** a normal expected return code of SQLITE_BUSY or SQLITE_OK
@@ -26661,12 +23358,6 @@ struct unixFile {
#endif
};
-/* This variable holds the process id (pid) from when the xRandomness()
-** method was called. If xOpen() is called from a different process id,
-** indicating that a fork() has occurred, the PRNG will be reset.
-*/
-static pid_t randomnessPid = 0;
-
/*
** Allowed values for the unixFile.ctrlFlags bitmask:
*/
@@ -26682,8 +23373,7 @@ static pid_t randomnessPid = 0;
#define UNIXFILE_DELETE 0x20 /* Delete on close */
#define UNIXFILE_URI 0x40 /* Filename might have query parameters */
#define UNIXFILE_NOLOCK 0x80 /* Do no file locking */
-#define UNIXFILE_WARNED 0x0100 /* verifyDbFile() warnings issued */
-#define UNIXFILE_BLOCK 0x0200 /* Next SHM lock might block */
+#define UNIXFILE_WARNED 0x0100 /* verifyDbFile() warnings have been issued */
/*
** Include code that is common to all os_*.c files
@@ -26721,6 +23411,16 @@ static pid_t randomnessPid = 0;
# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
#endif
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
+# ifndef SQLITE_DEBUG_OS_TRACE
+# define SQLITE_DEBUG_OS_TRACE 0
+# endif
+ int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE;
+# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X
+#else
+# define OSTRACE(X)
+#endif
+
/*
** Macros for performance tracing. Normally turned off. Only works
** on i486 hardware.
@@ -26927,14 +23627,6 @@ SQLITE_API int sqlite3_open_file_count = 0;
#endif
/*
-** Explicitly call the 64-bit version of lseek() on Android. Otherwise, lseek()
-** is the 32-bit version, even if _FILE_OFFSET_BITS=64 is defined.
-*/
-#ifdef __ANDROID__
-# define lseek lseek64
-#endif
-
-/*
** Different Unix systems declare open() in different ways. Same use
** open(const char*,int,mode_t). Others use open(const char*,int,...).
** The difference is important when using a pointer to the function.
@@ -26952,16 +23644,11 @@ static int posixOpen(const char *zFile, int flags, int mode){
** we are not running as root.
*/
static int posixFchown(int fd, uid_t uid, gid_t gid){
-#if OS_VXWORKS
- return 0;
-#else
return geteuid() ? 0 : fchown(fd,uid,gid);
-#endif
}
/* Forward reference */
static int openDirectory(const char*, int*);
-static int unixGetpagesize(void);
/*
** Many system calls are accessed through pointer-to-functions so that
@@ -27083,9 +23770,6 @@ static struct unix_syscall {
{ "mremap", (sqlite3_syscall_ptr)0, 0 },
#endif
#define osMremap ((void*(*)(void*,size_t,size_t,int,...))aSyscall[23].pCurrent)
- { "getpagesize", (sqlite3_syscall_ptr)unixGetpagesize, 0 },
-#define osGetpagesize ((int(*)(void))aSyscall[24].pCurrent)
-
#endif
}; /* End of the overrideable system calls */
@@ -27251,22 +23935,22 @@ static int robust_open(const char *z, int f, mode_t m){
** unixEnterLeave()
*/
static void unixEnterMutex(void){
- sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1));
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
}
static void unixLeaveMutex(void){
- sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1));
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
}
#ifdef SQLITE_DEBUG
static int unixMutexHeld(void) {
- return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1));
+ return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
}
#endif
-#ifdef SQLITE_HAVE_OS_TRACE
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
/*
** Helper function for printing out trace information from debugging
-** binaries. This returns the string representation of the supplied
+** binaries. This returns the string represetation of the supplied
** integer lock-type.
*/
static const char *azFileLock(int eFileLock){
@@ -27343,22 +24027,9 @@ static int lockTrace(int fd, int op, struct flock *p){
/*
** Retry ftruncate() calls that fail due to EINTR
-**
-** All calls to ftruncate() within this file should be made through
-** this wrapper. On the Android platform, bypassing the logic below
-** could lead to a corrupt database.
*/
static int robust_ftruncate(int h, sqlite3_int64 sz){
int rc;
-#ifdef __ANDROID__
- /* On Android, ftruncate() always uses 32-bit offsets, even if
- ** _FILE_OFFSET_BITS=64 is defined. This means it is unsafe to attempt to
- ** truncate a file to any size larger than 2GiB. Silently ignore any
- ** such attempts. */
- if( sz>(sqlite3_int64)0x7FFFFFFF ){
- rc = SQLITE_OK;
- }else
-#endif
do{ rc = osFtruncate(h,sz); }while( rc<0 && errno==EINTR );
return rc;
}
@@ -27412,6 +24083,16 @@ static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) {
case EPERM:
return SQLITE_PERM;
+ /* EDEADLK is only possible if a call to fcntl(F_SETLKW) is made. And
+ ** this module never makes such a call. And the code in SQLite itself
+ ** asserts that SQLITE_IOERR_BLOCKED is never returned. For these reasons
+ ** this case is also commented out. If the system does set errno to EDEADLK,
+ ** the default SQLITE_IOERR_XXX code will be returned. */
+#if 0
+ case EDEADLK:
+ return SQLITE_IOERR_BLOCKED;
+#endif
+
#if EOPNOTSUPP!=ENOTSUP
case EOPNOTSUPP:
/* something went terribly awry, unless during file system support
@@ -27526,7 +24207,7 @@ static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){
assert( zAbsoluteName[0]=='/' );
n = (int)strlen(zAbsoluteName);
- pNew = sqlite3_malloc64( sizeof(*pNew) + (n+1) );
+ pNew = sqlite3_malloc( sizeof(*pNew) + (n+1) );
if( pNew==0 ) return 0;
pNew->zCanonicalName = (char*)&pNew[1];
memcpy(pNew->zCanonicalName, zAbsoluteName, n+1);
@@ -27806,14 +24487,6 @@ static void robust_close(unixFile *pFile, int h, int lineno){
}
/*
-** Set the pFile->lastErrno. Do this in a subroutine as that provides
-** a convenient place to set a breakpoint.
-*/
-static void storeLastErrno(unixFile *pFile, int error){
- pFile->lastErrno = error;
-}
-
-/*
** Close all file descriptors accumuated in the unixInodeInfo->pUnused list.
*/
static void closePendingFds(unixFile *pFile){
@@ -27886,7 +24559,7 @@ static int findInodeInfo(
fd = pFile->h;
rc = osFstat(fd, &statbuf);
if( rc!=0 ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
#ifdef EOVERFLOW
if( pFile->lastErrno==EOVERFLOW ) return SQLITE_NOLFS;
#endif
@@ -27907,12 +24580,12 @@ static int findInodeInfo(
if( statbuf.st_size==0 && (pFile->fsFlags & SQLITE_FSFLAGS_IS_MSDOS)!=0 ){
do{ rc = osWrite(fd, "S", 1); }while( rc<0 && errno==EINTR );
if( rc!=1 ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return SQLITE_IOERR;
}
rc = osFstat(fd, &statbuf);
if( rc!=0 ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return SQLITE_IOERR;
}
}
@@ -27930,7 +24603,7 @@ static int findInodeInfo(
pInode = pInode->pNext;
}
if( pInode==0 ){
- pInode = sqlite3_malloc64( sizeof(*pInode) );
+ pInode = sqlite3_malloc( sizeof(*pInode) );
if( pInode==0 ){
return SQLITE_NOMEM;
}
@@ -27948,19 +24621,6 @@ static int findInodeInfo(
return SQLITE_OK;
}
-/*
-** Return TRUE if pFile has been renamed or unlinked since it was first opened.
-*/
-static int fileHasMoved(unixFile *pFile){
-#if OS_VXWORKS
- return pFile->pInode!=0 && pFile->pId!=pFile->pInode->fileId.pId;
-#else
- struct stat buf;
- return pFile->pInode!=0 &&
- (osStat(pFile->zPath, &buf)!=0 || buf.st_ino!=pFile->pInode->fileId.ino);
-#endif
-}
-
/*
** Check a unixFile that is a database. Verify the following:
@@ -27995,7 +24655,10 @@ static void verifyDbFile(unixFile *pFile){
pFile->ctrlFlags |= UNIXFILE_WARNED;
return;
}
- if( fileHasMoved(pFile) ){
+ if( pFile->pInode!=0
+ && ((rc = osStat(pFile->zPath, &buf))!=0
+ || buf.st_ino!=pFile->pInode->fileId.ino)
+ ){
sqlite3_log(SQLITE_WARNING, "file renamed while open: %s", pFile->zPath);
pFile->ctrlFlags |= UNIXFILE_WARNED;
return;
@@ -28035,7 +24698,7 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){
lock.l_type = F_WRLCK;
if( osFcntl(pFile->h, F_GETLK, &lock) ){
rc = SQLITE_IOERR_CHECKRESERVEDLOCK;
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
} else if( lock.l_type!=F_UNLCK ){
reserved = 1;
}
@@ -28168,8 +24831,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
assert( pFile );
OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h,
azFileLock(eFileLock), azFileLock(pFile->eFileLock),
- azFileLock(pFile->pInode->eFileLock), pFile->pInode->nShared,
- osGetpid(0)));
+ azFileLock(pFile->pInode->eFileLock), pFile->pInode->nShared , getpid()));
/* If there is already a lock of this type or more restrictive on the
** unixFile, do nothing. Don't use the end_lock: exit path, as
@@ -28236,7 +24898,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
tErrno = errno;
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
if( rc!=SQLITE_BUSY ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
goto end_lock;
}
@@ -28271,7 +24933,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
if( rc ){
if( rc!=SQLITE_BUSY ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
goto end_lock;
}else{
@@ -28304,7 +24966,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
tErrno = errno;
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
if( rc!=SQLITE_BUSY ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
}
}
@@ -28377,7 +25039,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
assert( pFile );
OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, eFileLock,
pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared,
- osGetpid(0)));
+ getpid()));
assert( eFileLock<=SHARED_LOCK );
if( pFile->eFileLock<=eFileLock ){
@@ -28411,6 +25073,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
** 4: [RRRR.]
*/
if( eFileLock==SHARED_LOCK ){
+
#if !defined(__APPLE__) || !SQLITE_ENABLE_LOCKING_STYLE
(void)handleNFSUnlock;
assert( handleNFSUnlock==0 );
@@ -28428,7 +25091,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
tErrno = errno;
rc = SQLITE_IOERR_UNLOCK;
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
goto end_unlock;
}
@@ -28440,7 +25103,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
tErrno = errno;
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_RDLOCK);
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
goto end_unlock;
}
@@ -28452,7 +25115,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
tErrno = errno;
rc = SQLITE_IOERR_UNLOCK;
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
goto end_unlock;
}
@@ -28471,7 +25134,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
** SQLITE_BUSY would confuse the upper layer (in practice it causes
** an assert to fail). */
rc = SQLITE_IOERR_RDLOCK;
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
goto end_unlock;
}
}
@@ -28484,7 +25147,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
pInode->eFileLock = SHARED_LOCK;
}else{
rc = SQLITE_IOERR_UNLOCK;
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
goto end_unlock;
}
}
@@ -28502,7 +25165,7 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
pInode->eFileLock = NO_LOCK;
}else{
rc = SQLITE_IOERR_UNLOCK;
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
pInode->eFileLock = NO_LOCK;
pFile->eFileLock = NO_LOCK;
}
@@ -28572,13 +25235,6 @@ static int closeUnixFile(sqlite3_file *id){
pFile->pId = 0;
}
#endif
-#ifdef SQLITE_UNLINK_AFTER_CLOSE
- if( pFile->ctrlFlags & UNIXFILE_DELETE ){
- osUnlink(pFile->zPath);
- sqlite3_free(*(char**)&pFile->zPath);
- pFile->zPath = 0;
- }
-#endif
OSTRACE(("CLOSE %-3d\n", pFile->h));
OpenCounter(-1);
sqlite3_free(pFile->pUnused);
@@ -28777,7 +25433,7 @@ static int dotlockLock(sqlite3_file *id, int eFileLock) {
} else {
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
}
return rc;
@@ -28804,7 +25460,7 @@ static int dotlockUnlock(sqlite3_file *id, int eFileLock) {
assert( pFile );
OSTRACE(("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, eFileLock,
- pFile->eFileLock, osGetpid(0)));
+ pFile->eFileLock, getpid()));
assert( eFileLock<=SHARED_LOCK );
/* no-op if possible */
@@ -28831,7 +25487,7 @@ static int dotlockUnlock(sqlite3_file *id, int eFileLock) {
rc = SQLITE_IOERR_UNLOCK;
}
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
return rc;
}
@@ -28867,9 +25523,10 @@ static int dotlockClose(sqlite3_file *id) {
** still works when you do this, but concurrency is reduced since
** only a single process can be reading the database at a time.
**
-** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off
+** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off or if
+** compiling for VXWORKS.
*/
-#if SQLITE_ENABLE_LOCKING_STYLE
+#if SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS
/*
** Retry flock() calls that fail with EINTR
@@ -28917,7 +25574,7 @@ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){
/* unlock failed with an error */
lrc = SQLITE_IOERR_UNLOCK;
if( IS_LOCK_ERROR(lrc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
rc = lrc;
}
}
@@ -28927,7 +25584,7 @@ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){
/* someone else might have it reserved */
lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
if( IS_LOCK_ERROR(lrc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
rc = lrc;
}
}
@@ -28993,7 +25650,7 @@ static int flockLock(sqlite3_file *id, int eFileLock) {
/* didn't get, must be busy */
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK);
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
} else {
/* got it, set the type and return ok */
@@ -29022,7 +25679,7 @@ static int flockUnlock(sqlite3_file *id, int eFileLock) {
assert( pFile );
OSTRACE(("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, eFileLock,
- pFile->eFileLock, osGetpid(0)));
+ pFile->eFileLock, getpid()));
assert( eFileLock<=SHARED_LOCK );
/* no-op if possible */
@@ -29083,7 +25740,7 @@ static int flockClose(sqlite3_file *id) {
** to a non-zero value otherwise *pResOut is set to zero. The return value
** is set to SQLITE_OK unless an I/O error occurs during lock checking.
*/
-static int semXCheckReservedLock(sqlite3_file *id, int *pResOut) {
+static int semCheckReservedLock(sqlite3_file *id, int *pResOut) {
int rc = SQLITE_OK;
int reserved = 0;
unixFile *pFile = (unixFile*)id;
@@ -29100,12 +25757,13 @@ static int semXCheckReservedLock(sqlite3_file *id, int *pResOut) {
/* Otherwise see if some other process holds it. */
if( !reserved ){
sem_t *pSem = pFile->pInode->pSem;
+ struct stat statBuf;
if( sem_trywait(pSem)==-1 ){
int tErrno = errno;
if( EAGAIN != tErrno ){
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK);
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
} else {
/* someone else has the lock when we are in NO_LOCK */
reserved = (pFile->eFileLock < SHARED_LOCK);
@@ -29150,8 +25808,9 @@ static int semXCheckReservedLock(sqlite3_file *id, int *pResOut) {
** This routine will only increase a lock. Use the sqlite3OsUnlock()
** routine to lower a locking level.
*/
-static int semXLock(sqlite3_file *id, int eFileLock) {
+static int semLock(sqlite3_file *id, int eFileLock) {
unixFile *pFile = (unixFile*)id;
+ int fd;
sem_t *pSem = pFile->pInode->pSem;
int rc = SQLITE_OK;
@@ -29183,14 +25842,14 @@ static int semXLock(sqlite3_file *id, int eFileLock) {
** If the locking level of the file descriptor is already at or below
** the requested locking level, this routine is a no-op.
*/
-static int semXUnlock(sqlite3_file *id, int eFileLock) {
+static int semUnlock(sqlite3_file *id, int eFileLock) {
unixFile *pFile = (unixFile*)id;
sem_t *pSem = pFile->pInode->pSem;
assert( pFile );
assert( pSem );
OSTRACE(("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, eFileLock,
- pFile->eFileLock, osGetpid(0)));
+ pFile->eFileLock, getpid()));
assert( eFileLock<=SHARED_LOCK );
/* no-op if possible */
@@ -29209,7 +25868,7 @@ static int semXUnlock(sqlite3_file *id, int eFileLock) {
int rc, tErrno = errno;
rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK);
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
return rc;
}
@@ -29220,10 +25879,10 @@ static int semXUnlock(sqlite3_file *id, int eFileLock) {
/*
** Close a file.
*/
-static int semXClose(sqlite3_file *id) {
+static int semClose(sqlite3_file *id) {
if( id ){
unixFile *pFile = (unixFile*)id;
- semXUnlock(id, NO_LOCK);
+ semUnlock(id, NO_LOCK);
assert( pFile );
unixEnterMutex();
releaseInodeInfo(pFile);
@@ -29311,7 +25970,7 @@ static int afpSetLock(
setLockFlag ? SQLITE_IOERR_LOCK : SQLITE_IOERR_UNLOCK);
#endif /* SQLITE_IGNORE_AFP_LOCK_ERRORS */
if( IS_LOCK_ERROR(rc) ){
- storeLastErrno(pFile, tErrno);
+ pFile->lastErrno = tErrno;
}
return rc;
} else {
@@ -29404,7 +26063,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
assert( pFile );
OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h,
azFileLock(eFileLock), azFileLock(pFile->eFileLock),
- azFileLock(pInode->eFileLock), pInode->nShared , osGetpid(0)));
+ azFileLock(pInode->eFileLock), pInode->nShared , getpid()));
/* If there is already a lock of this type or more restrictive on the
** unixFile, do nothing. Don't use the afp_end_lock: exit path, as
@@ -29494,7 +26153,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
lrc2 = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0);
if( IS_LOCK_ERROR(lrc1) ) {
- storeLastErrno(pFile, lrc1Errno);
+ pFile->lastErrno = lrc1Errno;
rc = lrc1;
goto afp_end_lock;
} else if( IS_LOCK_ERROR(lrc2) ){
@@ -29590,7 +26249,7 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
assert( pFile );
OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock,
pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared,
- osGetpid(0)));
+ getpid()));
assert( eFileLock<=SHARED_LOCK );
if( pFile->eFileLock<=eFileLock ){
@@ -29753,7 +26412,7 @@ static int nfsUnlock(sqlite3_file *id, int eFileLock){
** NB: If you define USE_PREAD or USE_PREAD64, then it might also
** be necessary to define _XOPEN_SOURCE to be 500. This varies from
** one system to another. Since SQLite does not define USE_PREAD
-** in any form by default, we will not attempt to define _XOPEN_SOURCE.
+** any any form by default, we will not attempt to define _XOPEN_SOURCE.
** See tickets #2741 and #2681.
**
** To avoid stomping the errno value on a failed read the lastErrno value
@@ -29768,6 +26427,7 @@ static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){
TIMER_START;
assert( cnt==(cnt&0x1ffff) );
assert( id->h>2 );
+ cnt &= 0x1ffff;
do{
#if defined(USE_PREAD)
got = osPread(id->h, pBuf, cnt, offset);
@@ -29780,9 +26440,9 @@ static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){
SimulateIOError( newOffset-- );
if( newOffset!=offset ){
if( newOffset == -1 ){
- storeLastErrno((unixFile*)id, errno);
+ ((unixFile*)id)->lastErrno = errno;
}else{
- storeLastErrno((unixFile*)id, 0);
+ ((unixFile*)id)->lastErrno = 0;
}
return -1;
}
@@ -29792,7 +26452,7 @@ static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){
if( got<0 ){
if( errno==EINTR ){ got = 1; continue; }
prior = 0;
- storeLastErrno((unixFile*)id, errno);
+ ((unixFile*)id)->lastErrno = errno;
break;
}else if( got>0 ){
cnt -= got;
@@ -29857,7 +26517,7 @@ static int unixRead(
/* lastErrno set by seekAndRead */
return SQLITE_IOERR_READ;
}else{
- storeLastErrno(pFile, 0); /* not a system error */
+ pFile->lastErrno = 0; /* not a system error */
/* Unread parts of the buffer must be zero-filled */
memset(&((char*)pBuf)[got], 0, amt-got);
return SQLITE_IOERR_SHORT_READ;
@@ -29886,9 +26546,9 @@ static int seekAndWriteFd(
TIMER_START;
#if defined(USE_PREAD)
- do{ rc = (int)osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR );
+ do{ rc = osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR );
#elif defined(USE_PREAD64)
- do{ rc = (int)osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR);
+ do{ rc = osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR);
#else
do{
i64 iSeek = lseek(fd, iOff, SEEK_SET);
@@ -29984,8 +26644,8 @@ static int unixWrite(
}
}
#endif
-
- while( (wrote = seekAndWrite(pFile, offset, pBuf, amt))<amt && wrote>0 ){
+
+ while( amt>0 && (wrote = seekAndWrite(pFile, offset, pBuf, amt))>0 ){
amt -= wrote;
offset += wrote;
pBuf = &((char*)pBuf)[wrote];
@@ -29993,12 +26653,12 @@ static int unixWrite(
SimulateIOError(( wrote=(-1), amt=1 ));
SimulateDiskfullError(( wrote=0, amt=1 ));
- if( amt>wrote ){
+ if( amt>0 ){
if( wrote<0 && pFile->lastErrno!=ENOSPC ){
/* lastErrno set by seekAndWrite */
return SQLITE_IOERR_WRITE;
}else{
- storeLastErrno(pFile, 0); /* not a system error */
+ pFile->lastErrno = 0; /* not a system error */
return SQLITE_FULL;
}
}
@@ -30019,9 +26679,9 @@ SQLITE_API int sqlite3_fullsync_count = 0;
** We do not trust systems to provide a working fdatasync(). Some do.
** Others do no. To be safe, we will stick with the (slightly slower)
** fsync(). If you know that your system does support fdatasync() correctly,
-** then simply compile with -Dfdatasync=fdatasync or -DHAVE_FDATASYNC
+** then simply compile with -Dfdatasync=fdatasync
*/
-#if !defined(fdatasync) && !HAVE_FDATASYNC
+#if !defined(fdatasync)
# define fdatasync fsync
#endif
@@ -30207,7 +26867,7 @@ static int unixSync(sqlite3_file *id, int flags){
rc = full_fsync(pFile->h, isFullsync, isDataOnly);
SimulateIOError( rc=1 );
if( rc ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath);
}
@@ -30249,9 +26909,9 @@ static int unixTruncate(sqlite3_file *id, i64 nByte){
nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk;
}
- rc = robust_ftruncate(pFile->h, nByte);
+ rc = robust_ftruncate(pFile->h, (off_t)nByte);
if( rc ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath);
}else{
#ifdef SQLITE_DEBUG
@@ -30291,7 +26951,7 @@ static int unixFileSize(sqlite3_file *id, i64 *pSize){
rc = osFstat(((unixFile*)id)->h, &buf);
SimulateIOError( rc=1 );
if( rc!=0 ){
- storeLastErrno((unixFile*)id, errno);
+ ((unixFile*)id)->lastErrno = errno;
return SQLITE_IOERR_FSTAT;
}
*pSize = buf.st_size;
@@ -30327,9 +26987,7 @@ static int fcntlSizeHint(unixFile *pFile, i64 nByte){
i64 nSize; /* Required file size */
struct stat buf; /* Used to hold return values of fstat() */
- if( osFstat(pFile->h, &buf) ){
- return SQLITE_IOERR_FSTAT;
- }
+ if( osFstat(pFile->h, &buf) ) return SQLITE_IOERR_FSTAT;
nSize = ((nByte+pFile->szChunk-1) / pFile->szChunk) * pFile->szChunk;
if( nSize>(i64)buf.st_size ){
@@ -30344,28 +27002,24 @@ static int fcntlSizeHint(unixFile *pFile, i64 nByte){
}while( err==EINTR );
if( err ) return SQLITE_IOERR_WRITE;
#else
- /* If the OS does not have posix_fallocate(), fake it. Write a
- ** single byte to the last byte in each block that falls entirely
- ** within the extended region. Then, if required, a single byte
- ** at offset (nSize-1), to set the size of the file correctly.
- ** This is a similar technique to that used by glibc on systems
- ** that do not have a real fallocate() call.
+ /* If the OS does not have posix_fallocate(), fake it. First use
+ ** ftruncate() to set the file size, then write a single byte to
+ ** the last byte in each block within the extended region. This
+ ** is the same technique used by glibc to implement posix_fallocate()
+ ** on systems that do not have a real fallocate() system call.
*/
int nBlk = buf.st_blksize; /* File-system block size */
- int nWrite = 0; /* Number of bytes written by seekAndWrite */
i64 iWrite; /* Next offset to write to */
- iWrite = ((buf.st_size + 2*nBlk - 1)/nBlk)*nBlk-1;
- assert( iWrite>=buf.st_size );
- assert( (iWrite/nBlk)==((buf.st_size+nBlk-1)/nBlk) );
- assert( ((iWrite+1)%nBlk)==0 );
- for(/*no-op*/; iWrite<nSize; iWrite+=nBlk ){
- nWrite = seekAndWrite(pFile, iWrite, "", 1);
- if( nWrite!=1 ) return SQLITE_IOERR_WRITE;
+ if( robust_ftruncate(pFile->h, nSize) ){
+ pFile->lastErrno = errno;
+ return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath);
}
- if( nWrite==0 || (nSize%nBlk) ){
- nWrite = seekAndWrite(pFile, nSize-1, "", 1);
+ iWrite = ((buf.st_size + 2*nBlk - 1)/nBlk)*nBlk-1;
+ while( iWrite<nSize ){
+ int nWrite = seekAndWrite(pFile, iWrite, "", 1);
if( nWrite!=1 ) return SQLITE_IOERR_WRITE;
+ iWrite += nBlk;
}
#endif
}
@@ -30376,7 +27030,7 @@ static int fcntlSizeHint(unixFile *pFile, i64 nByte){
int rc;
if( pFile->szChunk<=0 ){
if( robust_ftruncate(pFile->h, nByte) ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath);
}
}
@@ -30390,7 +27044,7 @@ static int fcntlSizeHint(unixFile *pFile, i64 nByte){
}
/*
-** If *pArg is initially negative then this is a query. Set *pArg to
+** If *pArg is inititially negative then this is a query. Set *pArg to
** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set.
**
** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags.
@@ -30414,15 +27068,11 @@ static int unixGetTempname(int nBuf, char *zBuf);
static int unixFileControl(sqlite3_file *id, int op, void *pArg){
unixFile *pFile = (unixFile*)id;
switch( op ){
- case SQLITE_FCNTL_WAL_BLOCK: {
- /* pFile->ctrlFlags |= UNIXFILE_BLOCK; // Deferred feature */
- return SQLITE_OK;
- }
case SQLITE_FCNTL_LOCKSTATE: {
*(int*)pArg = pFile->eFileLock;
return SQLITE_OK;
}
- case SQLITE_FCNTL_LAST_ERRNO: {
+ case SQLITE_LAST_ERRNO: {
*(int*)pArg = pFile->lastErrno;
return SQLITE_OK;
}
@@ -30450,17 +27100,13 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){
return SQLITE_OK;
}
case SQLITE_FCNTL_TEMPFILENAME: {
- char *zTFile = sqlite3_malloc64( pFile->pVfs->mxPathname );
+ char *zTFile = sqlite3_malloc( pFile->pVfs->mxPathname );
if( zTFile ){
unixGetTempname(pFile->pVfs->mxPathname, zTFile);
*(char**)pArg = zTFile;
}
return SQLITE_OK;
}
- case SQLITE_FCNTL_HAS_MOVED: {
- *(int*)pArg = fileHasMoved(pFile);
- return SQLITE_OK;
- }
#if SQLITE_MAX_MMAP_SIZE>0
case SQLITE_FCNTL_MMAP_SIZE: {
i64 newLimit = *(i64*)pArg;
@@ -30491,8 +27137,8 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){
}
#endif
#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
- case SQLITE_FCNTL_SET_LOCKPROXYFILE:
- case SQLITE_FCNTL_GET_LOCKPROXYFILE: {
+ case SQLITE_SET_LOCKPROXYFILE:
+ case SQLITE_GET_LOCKPROXYFILE: {
return proxyFileControl(id,op,pArg);
}
#endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */
@@ -30601,7 +27247,7 @@ static int unixSectorSize(sqlite3_file *id){
** Return the device characteristics for the file.
**
** This VFS is set up to return SQLITE_IOCAP_POWERSAFE_OVERWRITE by default.
-** However, that choice is controversial since technically the underlying
+** However, that choice is contraversial since technically the underlying
** file system does not always provide powersafe overwrites. (In other
** words, after a power-loss event, parts of the file that were never
** written might end up being altered.) However, non-PSOW behavior is very,
@@ -30623,28 +27269,9 @@ static int unixDeviceCharacteristics(sqlite3_file *id){
return rc;
}
-#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
-
-/*
-** Return the system page size.
-**
-** This function should not be called directly by other code in this file.
-** Instead, it should be called via macro osGetpagesize().
-*/
-static int unixGetpagesize(void){
-#if OS_VXWORKS
- return 1024;
-#elif defined(_BSD_SOURCE)
- return getpagesize();
-#else
- return (int)sysconf(_SC_PAGESIZE);
-#endif
-}
-
-#endif /* !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 */
-
#ifndef SQLITE_OMIT_WAL
+
/*
** Object used to represent an shared memory buffer.
**
@@ -30727,17 +27354,15 @@ struct unixShm {
** otherwise.
*/
static int unixShmSystemLock(
- unixFile *pFile, /* Open connection to the WAL file */
+ unixShmNode *pShmNode, /* Apply locks to this open shared-memory segment */
int lockType, /* F_UNLCK, F_RDLCK, or F_WRLCK */
int ofst, /* First byte of the locking range */
int n /* Number of bytes to lock */
){
- unixShmNode *pShmNode; /* Apply locks to this open shared-memory segment */
- struct flock f; /* The posix advisory locking structure */
- int rc = SQLITE_OK; /* Result code form fcntl() */
+ struct flock f; /* The posix advisory locking structure */
+ int rc = SQLITE_OK; /* Result code form fcntl() */
/* Access to the unixShmNode object is serialized by the caller */
- pShmNode = pFile->pInode->pShmNode;
assert( sqlite3_mutex_held(pShmNode->mutex) || pShmNode->nRef==0 );
/* Shared locks never span more than one byte */
@@ -30747,7 +27372,6 @@ static int unixShmSystemLock(
assert( n>=1 && n<SQLITE_SHM_NLOCK );
if( pShmNode->h>=0 ){
- int lkType;
/* Initialize the locking parameters */
memset(&f, 0, sizeof(f));
f.l_type = lockType;
@@ -30755,17 +27379,15 @@ static int unixShmSystemLock(
f.l_start = ofst;
f.l_len = n;
- lkType = (pFile->ctrlFlags & UNIXFILE_BLOCK)!=0 ? F_SETLKW : F_SETLK;
- rc = osFcntl(pShmNode->h, lkType, &f);
+ rc = osFcntl(pShmNode->h, F_SETLK, &f);
rc = (rc!=(-1)) ? SQLITE_OK : SQLITE_BUSY;
- pFile->ctrlFlags &= ~UNIXFILE_BLOCK;
}
/* Update the global lock state and do debug tracing */
#ifdef SQLITE_DEBUG
{ u16 mask;
OSTRACE(("SHM-LOCK "));
- mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<<ofst);
+ mask = ofst>31 ? 0xffffffff : (1<<(ofst+n)) - (1<<ofst);
if( rc==SQLITE_OK ){
if( lockType==F_UNLCK ){
OSTRACE(("unlock %d ok", ofst));
@@ -30799,22 +27421,6 @@ static int unixShmSystemLock(
return rc;
}
-/*
-** Return the minimum number of 32KB shm regions that should be mapped at
-** a time, assuming that each mapping must be an integer multiple of the
-** current system page-size.
-**
-** Usually, this is 1. The exception seems to be systems that are configured
-** to use 64KB pages - in this case each mapping must cover at least two
-** shm regions.
-*/
-static int unixShmRegionPerMap(void){
- int shmsz = 32*1024; /* SHM region size */
- int pgsz = osGetpagesize(); /* System page size */
- assert( ((pgsz-1)&pgsz)==0 ); /* Page size must be a power of 2 */
- if( pgsz<shmsz ) return 1;
- return pgsz/shmsz;
-}
/*
** Purge the unixShmNodeList list of all entries with unixShmNode.nRef==0.
@@ -30826,11 +27432,10 @@ static void unixShmPurge(unixFile *pFd){
unixShmNode *p = pFd->pInode->pShmNode;
assert( unixMutexHeld() );
if( p && p->nRef==0 ){
- int nShmPerMap = unixShmRegionPerMap();
int i;
assert( p->pInode==pFd->pInode );
sqlite3_mutex_free(p->mutex);
- for(i=0; i<p->nRegion; i+=nShmPerMap){
+ for(i=0; i<p->nRegion; i++){
if( p->h>=0 ){
osMunmap(p->apRegion[i], p->szRegion);
}else{
@@ -30891,7 +27496,7 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
int nShmFilename; /* Size of the SHM filename in bytes */
/* Allocate space for the new unixShm object. */
- p = sqlite3_malloc64( sizeof(*p) );
+ p = sqlite3_malloc( sizeof(*p) );
if( p==0 ) return SQLITE_NOMEM;
memset(p, 0, sizeof(*p));
assert( pDbFd->pShm==0 );
@@ -30904,9 +27509,6 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
pShmNode = pInode->pShmNode;
if( pShmNode==0 ){
struct stat sStat; /* fstat() info for database file */
-#ifndef SQLITE_SHM_DIRECTORY
- const char *zBasePath = pDbFd->zPath;
-#endif
/* Call fstat() to figure out the permissions on the database file. If
** a new *-shm file is created, an attempt will be made to create it
@@ -30920,9 +27522,9 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
#ifdef SQLITE_SHM_DIRECTORY
nShmFilename = sizeof(SQLITE_SHM_DIRECTORY) + 31;
#else
- nShmFilename = 6 + (int)strlen(zBasePath);
+ nShmFilename = 6 + (int)strlen(pDbFd->zPath);
#endif
- pShmNode = sqlite3_malloc64( sizeof(*pShmNode) + nShmFilename );
+ pShmNode = sqlite3_malloc( sizeof(*pShmNode) + nShmFilename );
if( pShmNode==0 ){
rc = SQLITE_NOMEM;
goto shm_open_err;
@@ -30934,7 +27536,7 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
SQLITE_SHM_DIRECTORY "/sqlite-shm-%x-%x",
(u32)sStat.st_ino, (u32)sStat.st_dev);
#else
- sqlite3_snprintf(nShmFilename, zShmFilename, "%s-shm", zBasePath);
+ sqlite3_snprintf(nShmFilename, zShmFilename, "%s-shm", pDbFd->zPath);
sqlite3FileSuffix3(pDbFd->zPath, zShmFilename);
#endif
pShmNode->h = -1;
@@ -30968,13 +27570,13 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
** If not, truncate the file to zero length.
*/
rc = SQLITE_OK;
- if( unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1)==SQLITE_OK ){
+ if( unixShmSystemLock(pShmNode, F_WRLCK, UNIX_SHM_DMS, 1)==SQLITE_OK ){
if( robust_ftruncate(pShmNode->h, 0) ){
rc = unixLogError(SQLITE_IOERR_SHMOPEN, "ftruncate", zShmFilename);
}
}
if( rc==SQLITE_OK ){
- rc = unixShmSystemLock(pDbFd, F_RDLCK, UNIX_SHM_DMS, 1);
+ rc = unixShmSystemLock(pShmNode, F_RDLCK, UNIX_SHM_DMS, 1);
}
if( rc ) goto shm_open_err;
}
@@ -31040,8 +27642,6 @@ static int unixShmMap(
unixShm *p;
unixShmNode *pShmNode;
int rc = SQLITE_OK;
- int nShmPerMap = unixShmRegionPerMap();
- int nReqRegion;
/* If the shared-memory file has not yet been opened, open it now. */
if( pDbFd->pShm==0 ){
@@ -31057,12 +27657,9 @@ static int unixShmMap(
assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 );
assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 );
- /* Minimum number of regions required to be mapped. */
- nReqRegion = ((iRegion+nShmPerMap) / nShmPerMap) * nShmPerMap;
-
- if( pShmNode->nRegion<nReqRegion ){
+ if( pShmNode->nRegion<=iRegion ){
char **apNew; /* New apRegion[] array */
- int nByte = nReqRegion*szRegion; /* Minimum required file size */
+ int nByte = (iRegion+1)*szRegion; /* Minimum required file size */
struct stat sStat; /* Used by fstat() */
pShmNode->szRegion = szRegion;
@@ -31111,19 +27708,17 @@ static int unixShmMap(
/* Map the requested memory region into this processes address space. */
apNew = (char **)sqlite3_realloc(
- pShmNode->apRegion, nReqRegion*sizeof(char *)
+ pShmNode->apRegion, (iRegion+1)*sizeof(char *)
);
if( !apNew ){
rc = SQLITE_IOERR_NOMEM;
goto shmpage_out;
}
pShmNode->apRegion = apNew;
- while( pShmNode->nRegion<nReqRegion ){
- int nMap = szRegion*nShmPerMap;
- int i;
+ while(pShmNode->nRegion<=iRegion){
void *pMem;
if( pShmNode->h>=0 ){
- pMem = osMmap(0, nMap,
+ pMem = osMmap(0, szRegion,
pShmNode->isReadonly ? PROT_READ : PROT_READ|PROT_WRITE,
MAP_SHARED, pShmNode->h, szRegion*(i64)pShmNode->nRegion
);
@@ -31132,18 +27727,15 @@ static int unixShmMap(
goto shmpage_out;
}
}else{
- pMem = sqlite3_malloc64(szRegion);
+ pMem = sqlite3_malloc(szRegion);
if( pMem==0 ){
rc = SQLITE_NOMEM;
goto shmpage_out;
}
memset(pMem, 0, szRegion);
}
-
- for(i=0; i<nShmPerMap; i++){
- pShmNode->apRegion[pShmNode->nRegion+i] = &((char*)pMem)[szRegion*i];
- }
- pShmNode->nRegion += nShmPerMap;
+ pShmNode->apRegion[pShmNode->nRegion] = pMem;
+ pShmNode->nRegion++;
}
}
@@ -31206,7 +27798,7 @@ static int unixShmLock(
/* Unlock the system-level locks */
if( (mask & allMask)==0 ){
- rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
+ rc = unixShmSystemLock(pShmNode, F_UNLCK, ofst+UNIX_SHM_BASE, n);
}else{
rc = SQLITE_OK;
}
@@ -31234,7 +27826,7 @@ static int unixShmLock(
/* Get shared locks at the system level, if necessary */
if( rc==SQLITE_OK ){
if( (allShared & mask)==0 ){
- rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
+ rc = unixShmSystemLock(pShmNode, F_RDLCK, ofst+UNIX_SHM_BASE, n);
}else{
rc = SQLITE_OK;
}
@@ -31259,7 +27851,7 @@ static int unixShmLock(
** also mark the local connection as being locked.
*/
if( rc==SQLITE_OK ){
- rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n);
+ rc = unixShmSystemLock(pShmNode, F_WRLCK, ofst+UNIX_SHM_BASE, n);
if( rc==SQLITE_OK ){
assert( (p->sharedMask & mask)==0 );
p->exclMask |= mask;
@@ -31268,7 +27860,7 @@ static int unixShmLock(
}
sqlite3_mutex_leave(pShmNode->mutex);
OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n",
- p->id, osGetpid(0), p->sharedMask, p->exclMask));
+ p->id, getpid(), p->sharedMask, p->exclMask));
return rc;
}
@@ -31282,8 +27874,7 @@ static void unixShmBarrier(
sqlite3_file *fd /* Database file holding the shared memory */
){
UNUSED_PARAMETER(fd);
- sqlite3MemoryBarrier(); /* compiler-defined memory barrier */
- unixEnterMutex(); /* Also mutex, for redundancy */
+ unixEnterMutex();
unixLeaveMutex();
}
@@ -31328,9 +27919,7 @@ static int unixShmUnmap(
assert( pShmNode->nRef>0 );
pShmNode->nRef--;
if( pShmNode->nRef==0 ){
- if( deleteFlag && pShmNode->h>=0 ){
- osUnlink(pShmNode->zFilename);
- }
+ if( deleteFlag && pShmNode->h>=0 ) osUnlink(pShmNode->zFilename);
unixShmPurge(pDbFd);
}
unixLeaveMutex();
@@ -31361,6 +27950,19 @@ static void unixUnmapfile(unixFile *pFd){
}
/*
+** Return the system page size.
+*/
+static int unixGetPagesize(void){
+#if HAVE_MREMAP
+ return 512;
+#elif defined(_BSD_SOURCE)
+ return getpagesize();
+#else
+ return (int)sysconf(_SC_PAGESIZE);
+#endif
+}
+
+/*
** Attempt to set the size of the memory mapping maintained by file
** descriptor pFd to nNew bytes. Any existing mapping is discarded.
**
@@ -31396,12 +27998,8 @@ static void unixRemapfile(
if( (pFd->ctrlFlags & UNIXFILE_RDONLY)==0 ) flags |= PROT_WRITE;
if( pOrig ){
-#if HAVE_MREMAP
- i64 nReuse = pFd->mmapSize;
-#else
- const int szSyspage = osGetpagesize();
+ const int szSyspage = unixGetPagesize();
i64 nReuse = (pFd->mmapSize & ~(szSyspage-1));
-#endif
u8 *pReq = &pOrig[nReuse];
/* Unmap any pages of the existing mapping that cannot be reused. */
@@ -31540,10 +28138,10 @@ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
** may now be invalid and should be unmapped.
*/
static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){
-#if SQLITE_MAX_MMAP_SIZE>0
unixFile *pFd = (unixFile *)fd; /* The underlying database file */
UNUSED_PARAMETER(iOff);
+#if SQLITE_MAX_MMAP_SIZE>0
/* If p==0 (unmap the entire file) then there must be no outstanding
** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
** then there must be at least one outstanding. */
@@ -31559,10 +28157,6 @@ static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){
}
assert( pFd->nFetchOut>=0 );
-#else
- UNUSED_PARAMETER(fd);
- UNUSED_PARAMETER(p);
- UNUSED_PARAMETER(iOff);
#endif
return SQLITE_OK;
}
@@ -31586,7 +28180,7 @@ static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){
** looks at the filesystem type and tries to guess the best locking
** strategy from that.
**
-** For finder-function F, two objects are created:
+** For finder-funtion F, two objects are created:
**
** (1) The real finder-function named "FImpt()".
**
@@ -31607,7 +28201,7 @@ static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){
** * An I/O method finder function called FINDER that returns a pointer
** to the METHOD object in the previous bullet.
*/
-#define IOMETHODS(FINDER,METHOD,VERSION,CLOSE,LOCK,UNLOCK,CKLOCK,SHMMAP) \
+#define IOMETHODS(FINDER, METHOD, VERSION, CLOSE, LOCK, UNLOCK, CKLOCK) \
static const sqlite3_io_methods METHOD = { \
VERSION, /* iVersion */ \
CLOSE, /* xClose */ \
@@ -31622,7 +28216,7 @@ static const sqlite3_io_methods METHOD = { \
unixFileControl, /* xFileControl */ \
unixSectorSize, /* xSectorSize */ \
unixDeviceCharacteristics, /* xDeviceCapabilities */ \
- SHMMAP, /* xShmMap */ \
+ unixShmMap, /* xShmMap */ \
unixShmLock, /* xShmLock */ \
unixShmBarrier, /* xShmBarrier */ \
unixShmUnmap, /* xShmUnmap */ \
@@ -31648,18 +28242,16 @@ IOMETHODS(
unixClose, /* xClose method */
unixLock, /* xLock method */
unixUnlock, /* xUnlock method */
- unixCheckReservedLock, /* xCheckReservedLock method */
- unixShmMap /* xShmMap method */
+ unixCheckReservedLock /* xCheckReservedLock method */
)
IOMETHODS(
nolockIoFinder, /* Finder function name */
nolockIoMethods, /* sqlite3_io_methods object name */
- 3, /* shared memory is disabled */
+ 1, /* shared memory is disabled */
nolockClose, /* xClose method */
nolockLock, /* xLock method */
nolockUnlock, /* xUnlock method */
- nolockCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ nolockCheckReservedLock /* xCheckReservedLock method */
)
IOMETHODS(
dotlockIoFinder, /* Finder function name */
@@ -31668,11 +28260,10 @@ IOMETHODS(
dotlockClose, /* xClose method */
dotlockLock, /* xLock method */
dotlockUnlock, /* xUnlock method */
- dotlockCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ dotlockCheckReservedLock /* xCheckReservedLock method */
)
-#if SQLITE_ENABLE_LOCKING_STYLE
+#if SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS
IOMETHODS(
flockIoFinder, /* Finder function name */
flockIoMethods, /* sqlite3_io_methods object name */
@@ -31680,8 +28271,7 @@ IOMETHODS(
flockClose, /* xClose method */
flockLock, /* xLock method */
flockUnlock, /* xUnlock method */
- flockCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ flockCheckReservedLock /* xCheckReservedLock method */
)
#endif
@@ -31690,11 +28280,10 @@ IOMETHODS(
semIoFinder, /* Finder function name */
semIoMethods, /* sqlite3_io_methods object name */
1, /* shared memory is disabled */
- semXClose, /* xClose method */
- semXLock, /* xLock method */
- semXUnlock, /* xUnlock method */
- semXCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ semClose, /* xClose method */
+ semLock, /* xLock method */
+ semUnlock, /* xUnlock method */
+ semCheckReservedLock /* xCheckReservedLock method */
)
#endif
@@ -31706,8 +28295,7 @@ IOMETHODS(
afpClose, /* xClose method */
afpLock, /* xLock method */
afpUnlock, /* xUnlock method */
- afpCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ afpCheckReservedLock /* xCheckReservedLock method */
)
#endif
@@ -31732,8 +28320,7 @@ IOMETHODS(
proxyClose, /* xClose method */
proxyLock, /* xLock method */
proxyUnlock, /* xUnlock method */
- proxyCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ proxyCheckReservedLock /* xCheckReservedLock method */
)
#endif
@@ -31746,8 +28333,7 @@ IOMETHODS(
unixClose, /* xClose method */
unixLock, /* xLock method */
nfsUnlock, /* xUnlock method */
- unixCheckReservedLock, /* xCheckReservedLock method */
- 0 /* xShmMap method */
+ unixCheckReservedLock /* xCheckReservedLock method */
)
#endif
@@ -31817,13 +28403,15 @@ static const sqlite3_io_methods
#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */
-#if OS_VXWORKS
-/*
-** This "finder" function for VxWorks checks to see if posix advisory
-** locking works. If it does, then that is what is used. If it does not
-** work, then fallback to named semaphore locking.
+#if OS_VXWORKS && SQLITE_ENABLE_LOCKING_STYLE
+/*
+** This "finder" function attempts to determine the best locking strategy
+** for the database file "filePath". It then returns the sqlite3_io_methods
+** object that implements that strategy.
+**
+** This is for VXWorks only.
*/
-static const sqlite3_io_methods *vxworksIoFinderImpl(
+static const sqlite3_io_methods *autolockIoFinderImpl(
const char *filePath, /* name of the database file */
unixFile *pNew /* the open file object */
){
@@ -31849,12 +28437,12 @@ static const sqlite3_io_methods *vxworksIoFinderImpl(
}
}
static const sqlite3_io_methods
- *(*const vxworksIoFinder)(const char*,unixFile*) = vxworksIoFinderImpl;
+ *(*const autolockIoFinder)(const char*,unixFile*) = autolockIoFinderImpl;
-#endif /* OS_VXWORKS */
+#endif /* OS_VXWORKS && SQLITE_ENABLE_LOCKING_STYLE */
/*
-** An abstract type for a pointer to an IO method finder function:
+** An abstract type for a pointer to a IO method finder function:
*/
typedef const sqlite3_io_methods *(*finder_type)(const char*,unixFile*);
@@ -31970,7 +28558,7 @@ static int fillInUnixFile(
** the afpLockingContext.
*/
afpLockingContext *pCtx;
- pNew->lockingContext = pCtx = sqlite3_malloc64( sizeof(*pCtx) );
+ pNew->lockingContext = pCtx = sqlite3_malloc( sizeof(*pCtx) );
if( pCtx==0 ){
rc = SQLITE_NOMEM;
}else{
@@ -32000,7 +28588,7 @@ static int fillInUnixFile(
int nFilename;
assert( zFilename!=0 );
nFilename = (int)strlen(zFilename) + 6;
- zLockFile = (char *)sqlite3_malloc64(nFilename);
+ zLockFile = (char *)sqlite3_malloc(nFilename);
if( zLockFile==0 ){
rc = SQLITE_NOMEM;
}else{
@@ -32033,7 +28621,7 @@ static int fillInUnixFile(
}
#endif
- storeLastErrno(pNew, 0);
+ pNew->lastErrno = 0;
#if OS_VXWORKS
if( rc!=SQLITE_OK ){
if( h>=0 ) robust_close(pNew, h, __LINE__);
@@ -32168,7 +28756,7 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
** descriptor on the same path, fail, and return an error to SQLite.
**
** Even if a subsequent open() call does succeed, the consequences of
- ** not searching for a reusable file descriptor are not dire. */
+ ** not searching for a resusable file descriptor are not dire. */
if( 0==osStat(zPath, &sStat) ){
unixInodeInfo *pInode;
@@ -32199,7 +28787,7 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
** written to *pMode. If an IO error occurs, an SQLite error code is
** returned and the value of *pMode is not modified.
**
-** In most cases, this routine sets *pMode to 0, which will become
+** In most cases cases, this routine sets *pMode to 0, which will become
** an indication to robust_open() to create the file using
** SQLITE_DEFAULT_FILE_PERMISSIONS adjusted by the umask.
** But if the file being opened is a WAL or regular journal file, then
@@ -32359,16 +28947,6 @@ static int unixOpen(
|| eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL
);
- /* Detect a pid change and reset the PRNG. There is a race condition
- ** here such that two or more threads all trying to open databases at
- ** the same instant might all reset the PRNG. But multiple resets
- ** are harmless.
- */
- if( randomnessPid!=osGetpid(0) ){
- randomnessPid = osGetpid(0);
- sqlite3_randomness(0,0);
- }
-
memset(p, 0, sizeof(unixFile));
if( eType==SQLITE_OPEN_MAIN_DB ){
@@ -32377,7 +28955,7 @@ static int unixOpen(
if( pUnused ){
fd = pUnused->fd;
}else{
- pUnused = sqlite3_malloc64(sizeof(*pUnused));
+ pUnused = sqlite3_malloc(sizeof(*pUnused));
if( !pUnused ){
return SQLITE_NOMEM;
}
@@ -32460,12 +29038,6 @@ static int unixOpen(
if( isDelete ){
#if OS_VXWORKS
zPath = zName;
-#elif defined(SQLITE_UNLINK_AFTER_CLOSE)
- zPath = sqlite3_mprintf("%s", zName);
- if( zPath==0 ){
- robust_close(p, fd, __LINE__);
- return SQLITE_NOMEM;
- }
#else
osUnlink(zName);
#endif
@@ -32481,16 +29053,13 @@ static int unixOpen(
#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE
if( fstatfs(fd, &fsInfo) == -1 ){
- storeLastErrno(p, errno);
+ ((unixFile*)pFile)->lastErrno = errno;
robust_close(p, fd, __LINE__);
return SQLITE_IOERR_ACCESS;
}
if (0 == strncmp("msdos", fsInfo.f_fstypename, 5)) {
((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS;
}
- if (0 == strncmp("exfat", fsInfo.f_fstypename, 5)) {
- ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS;
- }
#endif
/* Set up appropriate ctrlFlags */
@@ -32513,6 +29082,19 @@ static int unixOpen(
if( envforce!=NULL ){
useProxy = atoi(envforce)>0;
}else{
+ if( statfs(zPath, &fsInfo) == -1 ){
+ /* In theory, the close(fd) call is sub-optimal. If the file opened
+ ** with fd is a database file, and there are other connections open
+ ** on that file that are currently holding advisory locks on it,
+ ** then the call to close() will cancel those locks. In practice,
+ ** we're assuming that statfs() doesn't fail very often. At least
+ ** not while other file descriptors opened by the same process on
+ ** the same file are working. */
+ p->lastErrno = errno;
+ robust_close(p, fd, __LINE__);
+ rc = SQLITE_IOERR_ACCESS;
+ goto open_finished;
+ }
useProxy = !(fsInfo.f_flags&MNT_LOCAL);
}
if( useProxy ){
@@ -32556,11 +29138,7 @@ static int unixDelete(
UNUSED_PARAMETER(NotUsed);
SimulateIOError(return SQLITE_IOERR_DELETE);
if( osUnlink(zPath)==(-1) ){
- if( errno==ENOENT
-#if OS_VXWORKS
- || osAccess(zPath,0)!=0
-#endif
- ){
+ if( errno==ENOENT ){
rc = SQLITE_IOERR_DELETE_NOENT;
}else{
rc = unixLogError(SQLITE_IOERR_DELETE, "unlink", zPath);
@@ -32756,18 +29334,18 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){
** tests repeatable.
*/
memset(zBuf, 0, nBuf);
- randomnessPid = osGetpid(0);
-#if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS)
+#if !defined(SQLITE_TEST)
{
- int fd, got;
+ int pid, fd, got;
fd = robust_open("/dev/urandom", O_RDONLY, 0);
if( fd<0 ){
time_t t;
time(&t);
memcpy(zBuf, &t, sizeof(t));
- memcpy(&zBuf[sizeof(t)], &randomnessPid, sizeof(randomnessPid));
- assert( sizeof(t)+sizeof(randomnessPid)<=(size_t)nBuf );
- nBuf = sizeof(t) + sizeof(randomnessPid);
+ pid = getpid();
+ memcpy(&zBuf[sizeof(t)], &pid, sizeof(pid));
+ assert( sizeof(t)+sizeof(pid)<=(size_t)nBuf );
+ nBuf = sizeof(t) + sizeof(pid);
}else{
do{ got = osRead(fd, zBuf, nBuf); }while( got<0 && errno==EINTR );
robust_close(0, fd, __LINE__);
@@ -32938,10 +29516,9 @@ static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){
**
** C APIs
**
-** sqlite3_file_control(db, dbname, SQLITE_FCNTL_SET_LOCKPROXYFILE,
+** sqlite3_file_control(db, dbname, SQLITE_SET_LOCKPROXYFILE,
** <proxy_path> | ":auto:");
-** sqlite3_file_control(db, dbname, SQLITE_FCNTL_GET_LOCKPROXYFILE,
-** &<proxy_path>);
+** sqlite3_file_control(db, dbname, SQLITE_GET_LOCKPROXYFILE, &<proxy_path>);
**
**
** SQL pragmas
@@ -32982,7 +29559,7 @@ static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){
** proxy path against the values stored in the conch. The conch file is
** stored in the same directory as the database file and the file name
** is patterned after the database file name as ".<databasename>-conch".
-** If the conch file does not exist, or its contents do not match the
+** If the conch file does not exist, or it's contents do not match the
** host ID and/or proxy path, then the lock is escalated to an exclusive
** lock and the conch file contents is updated with the host ID and proxy
** path and the lock is downgraded to a shared lock again. If the conch
@@ -33034,7 +29611,7 @@ static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){
** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will
** force proxy locking to be used for every database file opened, and 0
** will force automatic proxy locking to be disabled for all database
-** files (explicitly calling the SQLITE_FCNTL_SET_LOCKPROXYFILE pragma or
+** files (explicity calling the SQLITE_SET_LOCKPROXYFILE pragma or
** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING).
*/
@@ -33055,7 +29632,6 @@ struct proxyLockingContext {
char *lockProxyPath; /* Name of the proxy lock file */
char *dbPath; /* Name of the open file */
int conchHeld; /* 1 if the conch is held, -1 if lockless */
- int nFails; /* Number of conch taking failures */
void *oldLockingContext; /* Original lockingcontext to restore on close */
sqlite3_io_methods const *pOldMethod; /* Original I/O methods for close */
};
@@ -33077,7 +29653,7 @@ static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){
{
if( !confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen) ){
OSTRACE(("GETLOCKPATH failed %s errno=%d pid=%d\n",
- lPath, errno, osGetpid(0)));
+ lPath, errno, getpid()));
return SQLITE_IOERR_LOCK;
}
len = strlcat(lPath, "sqliteplocks", maxLen);
@@ -33099,7 +29675,7 @@ static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){
}
lPath[i+len]='\0';
strlcat(lPath, ":auto:", maxLen);
- OSTRACE(("GETLOCKPATH proxy lock path=%s pid=%d\n", lPath, osGetpid(0)));
+ OSTRACE(("GETLOCKPATH proxy lock path=%s pid=%d\n", lPath, getpid()));
return SQLITE_OK;
}
@@ -33126,7 +29702,7 @@ static int proxyCreateLockPath(const char *lockPath){
if( err!=EEXIST ) {
OSTRACE(("CREATELOCKPATH FAILED creating %s, "
"'%s' proxy lock path=%s pid=%d\n",
- buf, strerror(err), lockPath, osGetpid(0)));
+ buf, strerror(err), lockPath, getpid()));
return err;
}
}
@@ -33135,7 +29711,7 @@ static int proxyCreateLockPath(const char *lockPath){
}
buf[i] = lockPath[i];
}
- OSTRACE(("CREATELOCKPATH proxy lock path=%s pid=%d\n", lockPath, osGetpid(0)));
+ OSTRACE(("CREATELOCKPATH proxy lock path=%s pid=%d\n", lockPath, getpid()));
return 0;
}
@@ -33169,7 +29745,7 @@ static int proxyCreateUnixFile(
if( pUnused ){
fd = pUnused->fd;
}else{
- pUnused = sqlite3_malloc64(sizeof(*pUnused));
+ pUnused = sqlite3_malloc(sizeof(*pUnused));
if( !pUnused ){
return SQLITE_NOMEM;
}
@@ -33202,7 +29778,7 @@ static int proxyCreateUnixFile(
}
}
- pNew = (unixFile *)sqlite3_malloc64(sizeof(*pNew));
+ pNew = (unixFile *)sqlite3_malloc(sizeof(*pNew));
if( pNew==NULL ){
rc = SQLITE_NOMEM;
goto end_create_proxy;
@@ -33235,10 +29811,8 @@ SQLITE_API int sqlite3_hostid_num = 0;
#define PROXY_HOSTIDLEN 16 /* conch file host id length */
-#ifdef HAVE_GETHOSTUUID
/* Not always defined in the headers as it ought to be */
extern int gethostuuid(uuid_t id, const struct timespec *wait);
-#endif
/* get the host ID via gethostuuid(), pHostID must point to PROXY_HOSTIDLEN
** bytes of writable memory.
@@ -33246,9 +29820,10 @@ extern int gethostuuid(uuid_t id, const struct timespec *wait);
static int proxyGetHostID(unsigned char *pHostID, int *pError){
assert(PROXY_HOSTIDLEN == sizeof(uuid_t));
memset(pHostID, 0, PROXY_HOSTIDLEN);
-#ifdef HAVE_GETHOSTUUID
+#if defined(__MAX_OS_X_VERSION_MIN_REQUIRED)\
+ && __MAC_OS_X_VERSION_MIN_REQUIRED<1050
{
- struct timespec timeout = {1, 0}; /* 1 sec timeout */
+ static const struct timespec timeout = {1, 0}; /* 1 sec timeout */
if( gethostuuid(pHostID, &timeout) ){
int err = errno;
if( pError ){
@@ -33363,7 +29938,7 @@ static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){
*/
struct stat buf;
if( osFstat(conchFile->h, &buf) ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return SQLITE_IOERR_LOCK;
}
@@ -33383,7 +29958,7 @@ static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){
char tBuf[PROXY_MAXCONCHLEN];
int len = osPread(conchFile->h, tBuf, PROXY_MAXCONCHLEN, 0);
if( len<0 ){
- storeLastErrno(pFile, errno);
+ pFile->lastErrno = errno;
return SQLITE_IOERR_LOCK;
}
if( len>PROXY_PATHINDEX && tBuf[0]==(char)PROXY_CONCHVERSION){
@@ -33403,7 +29978,7 @@ static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){
if( 0==proxyBreakConchLock(pFile, myHostID) ){
rc = SQLITE_OK;
if( lockType==EXCLUSIVE_LOCK ){
- rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK);
+ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK);
}
if( !rc ){
rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType);
@@ -33441,12 +30016,11 @@ static int proxyTakeConch(unixFile *pFile){
int forceNewLockPath = 0;
OSTRACE(("TAKECONCH %d for %s pid=%d\n", conchFile->h,
- (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"),
- osGetpid(0)));
+ (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), getpid()));
rc = proxyGetHostID(myHostID, &pError);
if( (rc&0xff)==SQLITE_IOERR ){
- storeLastErrno(pFile, pError);
+ pFile->lastErrno = pError;
goto end_takeconch;
}
rc = proxyConchLock(pFile, myHostID, SHARED_LOCK);
@@ -33457,7 +30031,7 @@ static int proxyTakeConch(unixFile *pFile){
readLen = seekAndRead((unixFile*)conchFile, 0, readBuf, PROXY_MAXCONCHLEN);
if( readLen<0 ){
/* I/O error: lastErrno set by seekAndRead */
- storeLastErrno(pFile, conchFile->lastErrno);
+ pFile->lastErrno = conchFile->lastErrno;
rc = SQLITE_IOERR_READ;
goto end_takeconch;
}else if( readLen<=(PROXY_HEADERLEN+PROXY_HOSTIDLEN) ||
@@ -33530,7 +30104,7 @@ static int proxyTakeConch(unixFile *pFile){
rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK);
}
}else{
- rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK);
+ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, EXCLUSIVE_LOCK);
}
if( rc==SQLITE_OK ){
char writeBuffer[PROXY_MAXCONCHLEN];
@@ -33539,8 +30113,7 @@ static int proxyTakeConch(unixFile *pFile){
writeBuffer[0] = (char)PROXY_CONCHVERSION;
memcpy(&writeBuffer[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN);
if( pCtx->lockProxyPath!=NULL ){
- strlcpy(&writeBuffer[PROXY_PATHINDEX], pCtx->lockProxyPath,
- MAXPATHLEN);
+ strlcpy(&writeBuffer[PROXY_PATHINDEX], pCtx->lockProxyPath, MAXPATHLEN);
}else{
strlcpy(&writeBuffer[PROXY_PATHINDEX], tempLockPath, MAXPATHLEN);
}
@@ -33652,7 +30225,7 @@ static int proxyReleaseConch(unixFile *pFile){
conchFile = pCtx->conchFile;
OSTRACE(("RELEASECONCH %d for %s pid=%d\n", conchFile->h,
(pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"),
- osGetpid(0)));
+ getpid()));
if( pCtx->conchHeld>0 ){
rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK);
}
@@ -33664,7 +30237,7 @@ static int proxyReleaseConch(unixFile *pFile){
/*
** Given the name of a database file, compute the name of its conch file.
-** Store the conch filename in memory obtained from sqlite3_malloc64().
+** Store the conch filename in memory obtained from sqlite3_malloc().
** Make *pConchPath point to the new name. Return SQLITE_OK on success
** or SQLITE_NOMEM if unable to obtain memory.
**
@@ -33680,7 +30253,7 @@ static int proxyCreateConchPathname(char *dbPath, char **pConchPath){
/* Allocate space for the conch filename and initialize the name to
** the name of the original database file. */
- *pConchPath = conchPath = (char *)sqlite3_malloc64(len + 8);
+ *pConchPath = conchPath = (char *)sqlite3_malloc(len + 8);
if( conchPath==0 ){
return SQLITE_NOMEM;
}
@@ -33752,8 +30325,7 @@ static int proxyGetDbPathForUnixFile(unixFile *pFile, char *dbPath){
/* afp style keeps a reference to the db path in the filePath field
** of the struct */
assert( (int)strlen((char*)pFile->lockingContext)<=MAXPATHLEN );
- strlcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath,
- MAXPATHLEN);
+ strlcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath, MAXPATHLEN);
} else
#endif
if( pFile->pMethod == &dotlockIoMethods ){
@@ -33794,9 +30366,9 @@ static int proxyTransformUnixFile(unixFile *pFile, const char *path) {
}
OSTRACE(("TRANSPROXY %d for %s pid=%d\n", pFile->h,
- (lockPath ? lockPath : ":auto:"), osGetpid(0)));
+ (lockPath ? lockPath : ":auto:"), getpid()));
- pCtx = sqlite3_malloc64( sizeof(*pCtx) );
+ pCtx = sqlite3_malloc( sizeof(*pCtx) );
if( pCtx==0 ){
return SQLITE_NOMEM;
}
@@ -33866,7 +30438,7 @@ static int proxyTransformUnixFile(unixFile *pFile, const char *path) {
*/
static int proxyFileControl(sqlite3_file *id, int op, void *pArg){
switch( op ){
- case SQLITE_FCNTL_GET_LOCKPROXYFILE: {
+ case SQLITE_GET_LOCKPROXYFILE: {
unixFile *pFile = (unixFile*)id;
if( pFile->pMethod == &proxyIoMethods ){
proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext;
@@ -33881,16 +30453,13 @@ static int proxyFileControl(sqlite3_file *id, int op, void *pArg){
}
return SQLITE_OK;
}
- case SQLITE_FCNTL_SET_LOCKPROXYFILE: {
+ case SQLITE_SET_LOCKPROXYFILE: {
unixFile *pFile = (unixFile*)id;
int rc = SQLITE_OK;
int isProxyStyle = (pFile->pMethod == &proxyIoMethods);
if( pArg==NULL || (const char *)pArg==0 ){
if( isProxyStyle ){
- /* turn off proxy locking - not supported. If support is added for
- ** switching proxy locking mode off then it will need to fail if
- ** the journal mode is WAL mode.
- */
+ /* turn off proxy locking - not supported */
rc = SQLITE_ERROR /*SQLITE_PROTOCOL? SQLITE_MISUSE?*/;
}else{
/* turn off proxy locking - already off - NOOP */
@@ -34081,7 +30650,7 @@ static int proxyClose(sqlite3_file *id) {
** necessarily been initialized when this routine is called, and so they
** should not be used.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
+SQLITE_API int sqlite3_os_init(void){
/*
** The following macro defines an initializer for an sqlite3_vfs object.
** The name of the VFS is NAME. The pAppData is a pointer to a pointer
@@ -34135,10 +30704,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
** array cannot be const.
*/
static sqlite3_vfs aVfs[] = {
-#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
+#if SQLITE_ENABLE_LOCKING_STYLE && (OS_VXWORKS || defined(__APPLE__))
UNIXVFS("unix", autolockIoFinder ),
-#elif OS_VXWORKS
- UNIXVFS("unix", vxworksIoFinder ),
#else
UNIXVFS("unix", posixIoFinder ),
#endif
@@ -34148,12 +30715,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
#if OS_VXWORKS
UNIXVFS("unix-namedsem", semIoFinder ),
#endif
-#if SQLITE_ENABLE_LOCKING_STYLE || OS_VXWORKS
- UNIXVFS("unix-posix", posixIoFinder ),
-#endif
#if SQLITE_ENABLE_LOCKING_STYLE
+ UNIXVFS("unix-posix", posixIoFinder ),
+#if !OS_VXWORKS
UNIXVFS("unix-flock", flockIoFinder ),
#endif
+#endif
#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
UNIXVFS("unix-afp", afpIoFinder ),
UNIXVFS("unix-nfs", nfsIoFinder ),
@@ -34164,7 +30731,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
/* Double-check that the aSyscall[] array has been constructed
** correctly. See ticket [bb3a86e890c8e96ab] */
- assert( ArraySize(aSyscall)==25 );
+ assert( ArraySize(aSyscall)==24 );
/* Register all VFSes defined in the aVfs[] array */
for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){
@@ -34180,7 +30747,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
** to release dynamically allocated objects. But not on unix.
** This routine is a no-op for unix.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
+SQLITE_API int sqlite3_os_end(void){
return SQLITE_OK;
}
@@ -34202,9 +30769,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
**
** This file contains code that is specific to Windows.
*/
-/* #include "sqliteInt.h" */
#if SQLITE_OS_WIN /* This file is used for Windows only */
+#ifdef __CYGWIN__
+# include <sys/cygwin.h>
+# include <errno.h> /* amalgamator: keep */
+#endif
+
/*
** Include code that is common to all os_*.c files
*/
@@ -34241,6 +30812,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
#endif
+#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG)
+# ifndef SQLITE_DEBUG_OS_TRACE
+# define SQLITE_DEBUG_OS_TRACE 0
+# endif
+ int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE;
+# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X
+#else
+# define OSTRACE(X)
+#endif
+
/*
** Macros for performance tracing. Normally turned off. Only works
** on i486 hardware.
@@ -34409,11 +30990,6 @@ SQLITE_API int sqlite3_open_file_count = 0;
/************** Continuing where we left off in os_win.c *********************/
/*
-** Include the header file for the Windows VFS.
-*/
-/* #include "os_win.h" */
-
-/*
** Compiling and using WAL mode requires several APIs that are only
** available in Windows platforms based on the NT kernel.
*/
@@ -34422,11 +30998,6 @@ SQLITE_API int sqlite3_open_file_count = 0;
with SQLITE_OMIT_WAL."
#endif
-#if !SQLITE_OS_WINNT && SQLITE_MAX_MMAP_SIZE>0
-# error "Memory mapped files require support from the Windows NT kernel,\
- compile with SQLITE_MAX_MMAP_SIZE=0."
-#endif
-
/*
** Are most of the Win32 ANSI APIs available (i.e. with certain exceptions
** based on the sub-platform)?
@@ -34465,14 +31036,18 @@ SQLITE_API int sqlite3_open_file_count = 0;
#endif
/*
-** Check to see if the GetVersionEx[AW] functions are deprecated on the
-** target system. GetVersionEx was first deprecated in Win8.1.
+** Check if the GetVersionEx[AW] functions should be considered deprecated
+** and avoid using them in that case. It should be noted here that if the
+** value of the SQLITE_WIN32_GETVERSIONEX pre-processor macro is zero
+** (whether via this block or via being manually specified), that implies
+** the underlying operating system will always be based on the Windows NT
+** Kernel.
*/
#ifndef SQLITE_WIN32_GETVERSIONEX
# if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINBLUE
-# define SQLITE_WIN32_GETVERSIONEX 0 /* GetVersionEx() is deprecated */
+# define SQLITE_WIN32_GETVERSIONEX 0
# else
-# define SQLITE_WIN32_GETVERSIONEX 1 /* GetVersionEx() is current */
+# define SQLITE_WIN32_GETVERSIONEX 1
# endif
#endif
@@ -34544,7 +31119,7 @@ SQLITE_API int sqlite3_open_file_count = 0;
** [sometimes] not used by the code (e.g. via conditional compilation).
*/
#ifndef UNUSED_VARIABLE_VALUE
-# define UNUSED_VARIABLE_VALUE(x) (void)(x)
+# define UNUSED_VARIABLE_VALUE(x) (void)(x)
#endif
/*
@@ -34556,11 +31131,10 @@ SQLITE_API int sqlite3_open_file_count = 0;
/*
** Do we need to manually define the Win32 file mapping APIs for use with WAL
-** mode or memory mapped files (e.g. these APIs are available in the Windows
-** CE SDK; however, they are not present in the header file)?
+** mode (e.g. these APIs are available in the Windows CE SDK; however, they
+** are not present in the header file)?
*/
-#if SQLITE_WIN32_FILEMAPPING_API && \
- (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+#if SQLITE_WIN32_FILEMAPPING_API && !defined(SQLITE_OMIT_WAL)
/*
** Two of the file mapping APIs are different under WinRT. Figure out which
** set we need.
@@ -34585,18 +31159,16 @@ WINBASEAPI LPVOID WINAPI MapViewOfFile(HANDLE, DWORD, DWORD, DWORD, SIZE_T);
#endif /* SQLITE_OS_WINRT */
/*
-** These file mapping APIs are common to both Win32 and WinRT.
+** This file mapping API is common to both Win32 and WinRT.
*/
-
-WINBASEAPI BOOL WINAPI FlushViewOfFile(LPCVOID, SIZE_T);
WINBASEAPI BOOL WINAPI UnmapViewOfFile(LPCVOID);
-#endif /* SQLITE_WIN32_FILEMAPPING_API */
+#endif /* SQLITE_WIN32_FILEMAPPING_API && !defined(SQLITE_OMIT_WAL) */
/*
** Some Microsoft compilers lack this definition.
*/
#ifndef INVALID_FILE_ATTRIBUTES
-# define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
+# define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
#endif
#ifndef FILE_FLAG_MASK
@@ -34646,7 +31218,7 @@ struct winFile {
int szChunk; /* Chunk size configured by FCNTL_CHUNK_SIZE */
#if SQLITE_OS_WINCE
LPWSTR zDeleteOnClose; /* Name of file to delete when closing */
- HANDLE hMutex; /* Mutex used to control access to shared lock */
+ HANDLE hMutex; /* Mutex used to control access to shared lock */
HANDLE hShared; /* Shared memory segment used for locking */
winceLock local; /* Locks obtained by this instance of winFile */
winceLock *shared; /* Global shared lock memory for the file */
@@ -34806,9 +31378,10 @@ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void);
** can manually set this value to 1 to emulate Win98 behavior.
*/
#ifdef SQLITE_TEST
-SQLITE_API LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0;
-#else
-static LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0;
+SQLITE_API int sqlite3_os_type = 0;
+#elif !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \
+ defined(SQLITE_WIN32_HAS_ANSI) && defined(SQLITE_WIN32_HAS_WIDE)
+static int sqlite3_os_type = 0;
#endif
#ifndef SYSCALL
@@ -34883,7 +31456,7 @@ static struct win_syscall {
LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[5].pCurrent)
#if (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_ANSI) && \
- (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0))
+ !defined(SQLITE_OMIT_WAL))
{ "CreateFileMappingA", (SYSCALL)CreateFileMappingA, 0 },
#else
{ "CreateFileMappingA", (SYSCALL)0, 0 },
@@ -34893,7 +31466,7 @@ static struct win_syscall {
DWORD,DWORD,DWORD,LPCSTR))aSyscall[6].pCurrent)
#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \
- (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0))
+ !defined(SQLITE_OMIT_WAL))
{ "CreateFileMappingW", (SYSCALL)CreateFileMappingW, 0 },
#else
{ "CreateFileMappingW", (SYSCALL)0, 0 },
@@ -35233,8 +31806,7 @@ static struct win_syscall {
LPOVERLAPPED))aSyscall[48].pCurrent)
#endif
-#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && \
- (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0))
+#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && !defined(SQLITE_OMIT_WAL))
{ "MapViewOfFile", (SYSCALL)MapViewOfFile, 0 },
#else
{ "MapViewOfFile", (SYSCALL)0, 0 },
@@ -35304,7 +31876,7 @@ static struct win_syscall {
#define osUnlockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \
LPOVERLAPPED))aSyscall[58].pCurrent)
-#if SQLITE_OS_WINCE || !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+#if SQLITE_OS_WINCE || !defined(SQLITE_OMIT_WAL)
{ "UnmapViewOfFile", (SYSCALL)UnmapViewOfFile, 0 },
#else
{ "UnmapViewOfFile", (SYSCALL)0, 0 },
@@ -35340,7 +31912,7 @@ static struct win_syscall {
#define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \
DWORD))aSyscall[63].pCurrent)
-#if !SQLITE_OS_WINCE
+#if SQLITE_OS_WINRT
{ "WaitForSingleObjectEx", (SYSCALL)WaitForSingleObjectEx, 0 },
#else
{ "WaitForSingleObjectEx", (SYSCALL)0, 0 },
@@ -35367,7 +31939,7 @@ static struct win_syscall {
#define osGetFileInformationByHandleEx ((BOOL(WINAPI*)(HANDLE, \
FILE_INFO_BY_HANDLE_CLASS,LPVOID,DWORD))aSyscall[66].pCurrent)
-#if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_WAL)
{ "MapViewOfFileFromApp", (SYSCALL)MapViewOfFileFromApp, 0 },
#else
{ "MapViewOfFileFromApp", (SYSCALL)0, 0 },
@@ -35431,7 +32003,7 @@ static struct win_syscall {
#define osGetProcessHeap ((HANDLE(WINAPI*)(VOID))aSyscall[74].pCurrent)
-#if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)
+#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_WAL)
{ "CreateFileMappingFromApp", (SYSCALL)CreateFileMappingFromApp, 0 },
#else
{ "CreateFileMappingFromApp", (SYSCALL)0, 0 },
@@ -35440,48 +32012,6 @@ static struct win_syscall {
#define osCreateFileMappingFromApp ((HANDLE(WINAPI*)(HANDLE, \
LPSECURITY_ATTRIBUTES,ULONG,ULONG64,LPCWSTR))aSyscall[75].pCurrent)
-/*
-** NOTE: On some sub-platforms, the InterlockedCompareExchange "function"
-** is really just a macro that uses a compiler intrinsic (e.g. x64).
-** So do not try to make this is into a redefinable interface.
-*/
-#if defined(InterlockedCompareExchange)
- { "InterlockedCompareExchange", (SYSCALL)0, 0 },
-
-#define osInterlockedCompareExchange InterlockedCompareExchange
-#else
- { "InterlockedCompareExchange", (SYSCALL)InterlockedCompareExchange, 0 },
-
-#define osInterlockedCompareExchange ((LONG(WINAPI*)(LONG \
- SQLITE_WIN32_VOLATILE*, LONG,LONG))aSyscall[76].pCurrent)
-#endif /* defined(InterlockedCompareExchange) */
-
-#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID
- { "UuidCreate", (SYSCALL)UuidCreate, 0 },
-#else
- { "UuidCreate", (SYSCALL)0, 0 },
-#endif
-
-#define osUuidCreate ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[77].pCurrent)
-
-#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID
- { "UuidCreateSequential", (SYSCALL)UuidCreateSequential, 0 },
-#else
- { "UuidCreateSequential", (SYSCALL)0, 0 },
-#endif
-
-#define osUuidCreateSequential \
- ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[78].pCurrent)
-
-#if !defined(SQLITE_NO_SYNC) && SQLITE_MAX_MMAP_SIZE>0
- { "FlushViewOfFile", (SYSCALL)FlushViewOfFile, 0 },
-#else
- { "FlushViewOfFile", (SYSCALL)0, 0 },
-#endif
-
-#define osFlushViewOfFile \
- ((BOOL(WINAPI*)(LPCVOID,SIZE_T))aSyscall[79].pCurrent)
-
}; /* End of the overrideable system calls */
/*
@@ -35575,7 +32105,7 @@ static const char *winNextSystemCall(sqlite3_vfs *p, const char *zName){
** "pnLargest" argument, if non-zero, will be used to return the size of the
** largest committed free block in the heap, in bytes.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_compact_heap(LPUINT pnLargest){
+SQLITE_API int sqlite3_win32_compact_heap(LPUINT pnLargest){
int rc = SQLITE_OK;
UINT nLargest = 0;
HANDLE hHeap;
@@ -35615,12 +32145,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_win32_compact_heap(LPUINT pnLargest){
** the sqlite3_memory_used() function does not return zero, SQLITE_BUSY will
** be returned and no changes will be made to the Win32 native heap.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_reset_heap(){
+SQLITE_API int sqlite3_win32_reset_heap(){
int rc;
MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */
MUTEX_LOGIC( sqlite3_mutex *pMem; ) /* The memsys static mutex */
- MUTEX_LOGIC( pMaster = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); )
- MUTEX_LOGIC( pMem = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM); )
+ MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); )
+ MUTEX_LOGIC( pMem = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); )
sqlite3_mutex_enter(pMaster);
sqlite3_mutex_enter(pMem);
winMemAssertMagic();
@@ -35660,7 +32190,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_win32_reset_heap(){
** (if available).
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_write_debug(const char *zBuf, int nBuf){
+SQLITE_API void sqlite3_win32_write_debug(const char *zBuf, int nBuf){
char zDbgBuf[SQLITE_WIN32_DBG_BUF_SIZE];
int nMin = MIN(nBuf, (SQLITE_WIN32_DBG_BUF_SIZE - 1)); /* may be negative. */
if( nMin<-1 ) nMin = -1; /* all negative values become -1. */
@@ -35700,7 +32230,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_win32_write_debug(const char *zBuf, int n
static HANDLE sleepObj = NULL;
#endif
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds){
+SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds){
#if SQLITE_OS_WINRT
if ( sleepObj==NULL ){
sleepObj = osCreateEventExW(NULL, NULL, CREATE_EVENT_MANUAL_RESET,
@@ -35713,16 +32243,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds){
#endif
}
-#if SQLITE_MAX_WORKER_THREADS>0 && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \
- SQLITE_THREADSAFE>0
-SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject){
- DWORD rc;
- while( (rc = osWaitForSingleObjectEx(hObject, INFINITE,
- TRUE))==WAIT_IO_COMPLETION ){}
- return rc;
-}
-#endif
-
/*
** Return true (non-zero) if we are running under WinNT, Win2K, WinXP,
** or WinCE. Return false (zero) for Win95, Win98, or WinME.
@@ -35742,47 +32262,22 @@ SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject){
#elif !defined(SQLITE_WIN32_HAS_WIDE)
# define osIsNT() (0)
#else
-# define osIsNT() ((sqlite3_os_type==2) || sqlite3_win32_is_nt())
-#endif
-
-/*
-** This function determines if the machine is running a version of Windows
-** based on the NT kernel.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void){
-#if SQLITE_OS_WINRT
- /*
- ** NOTE: The WinRT sub-platform is always assumed to be based on the NT
- ** kernel.
- */
- return 1;
-#elif defined(SQLITE_WIN32_GETVERSIONEX) && SQLITE_WIN32_GETVERSIONEX
- if( osInterlockedCompareExchange(&sqlite3_os_type, 0, 0)==0 ){
-#if defined(SQLITE_WIN32_HAS_ANSI)
- OSVERSIONINFOA sInfo;
- sInfo.dwOSVersionInfoSize = sizeof(sInfo);
- osGetVersionExA(&sInfo);
- osInterlockedCompareExchange(&sqlite3_os_type,
- (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0);
-#elif defined(SQLITE_WIN32_HAS_WIDE)
- OSVERSIONINFOW sInfo;
- sInfo.dwOSVersionInfoSize = sizeof(sInfo);
- osGetVersionExW(&sInfo);
- osInterlockedCompareExchange(&sqlite3_os_type,
- (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0);
+ static int osIsNT(void){
+ if( sqlite3_os_type==0 ){
+#if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WIN8
+ OSVERSIONINFOW sInfo;
+ sInfo.dwOSVersionInfoSize = sizeof(sInfo);
+ osGetVersionExW(&sInfo);
+#else
+ OSVERSIONINFOA sInfo;
+ sInfo.dwOSVersionInfoSize = sizeof(sInfo);
+ osGetVersionExA(&sInfo);
#endif
+ sqlite3_os_type = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1;
+ }
+ return sqlite3_os_type==2;
}
- return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2;
-#elif SQLITE_TEST
- return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2;
-#else
- /*
- ** NOTE: All sub-platforms where the GetVersionEx[AW] functions are
- ** deprecated are always assumed to be based on the NT kernel.
- */
- return 1;
#endif
-}
#ifdef SQLITE_WIN32_MALLOC
/*
@@ -35990,7 +32485,7 @@ SQLITE_PRIVATE void sqlite3MemSetDefault(void){
#endif /* SQLITE_WIN32_MALLOC */
/*
-** Convert a UTF-8 string to Microsoft Unicode (UTF-16?).
+** Convert a UTF-8 string to Microsoft Unicode (UTF-16?).
**
** Space to hold the returned string is obtained from malloc.
*/
@@ -36043,7 +32538,7 @@ static char *winUnicodeToUtf8(LPCWSTR zWideFilename){
/*
** Convert an ANSI string to Microsoft Unicode, based on the
** current codepage settings for file apis.
-**
+**
** Space to hold the returned string is obtained
** from sqlite3_malloc.
*/
@@ -36103,7 +32598,7 @@ static char *winUnicodeToMbcs(LPCWSTR zWideFilename){
** Convert multibyte character string to UTF-8. Space to hold the
** returned string is obtained from sqlite3_malloc().
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zFilename){
+SQLITE_API char *sqlite3_win32_mbcs_to_utf8(const char *zFilename){
char *zFilenameUtf8;
LPWSTR zTmpWide;
@@ -36117,10 +32612,10 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zFilename
}
/*
-** Convert UTF-8 to multibyte character string. Space to hold the
+** Convert UTF-8 to multibyte character string. Space to hold the
** returned string is obtained from sqlite3_malloc().
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zFilename){
+SQLITE_API char *sqlite3_win32_utf8_to_mbcs(const char *zFilename){
char *zFilenameMbcs;
LPWSTR zTmpWide;
@@ -36140,7 +32635,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zFilename
** argument is the name of the directory to use. The return value will be
** SQLITE_OK if successful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){
+SQLITE_API int sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){
char **ppDirectory = 0;
#ifndef SQLITE_OMIT_AUTOINIT
int rc = sqlite3_initialize();
@@ -36257,11 +32752,11 @@ static int winGetLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){
**
** This routine is invoked after an error occurs in an OS function.
** It logs a message using sqlite3_log() containing the current value of
-** error code and, if possible, the human-readable equivalent from
+** error code and, if possible, the human-readable equivalent from
** FormatMessage.
**
** The first argument passed to the macro should be the error code that
-** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN).
+** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN).
** The two subsequent arguments should be the name of the OS function that
** failed and the associated file-system path, if any.
*/
@@ -36292,7 +32787,7 @@ static int winLogErrorAtLine(
/*
** The number of times that a ReadFile(), WriteFile(), and DeleteFile()
-** will be retried following a locking error - probably caused by
+** will be retried following a locking error - probably caused by
** antivirus software. Also the initial delay before the first retry.
** The delay increases linearly with each retry.
*/
@@ -36306,32 +32801,6 @@ static int winIoerrRetry = SQLITE_WIN32_IOERR_RETRY;
static int winIoerrRetryDelay = SQLITE_WIN32_IOERR_RETRY_DELAY;
/*
-** The "winIoerrCanRetry1" macro is used to determine if a particular I/O
-** error code obtained via GetLastError() is eligible to be retried. It
-** must accept the error code DWORD as its only argument and should return
-** non-zero if the error code is transient in nature and the operation
-** responsible for generating the original error might succeed upon being
-** retried. The argument to this macro should be a variable.
-**
-** Additionally, a macro named "winIoerrCanRetry2" may be defined. If it
-** is defined, it will be consulted only when the macro "winIoerrCanRetry1"
-** returns zero. The "winIoerrCanRetry2" macro is completely optional and
-** may be used to include additional error codes in the set that should
-** result in the failing I/O operation being retried by the caller. If
-** defined, the "winIoerrCanRetry2" macro must exhibit external semantics
-** identical to those of the "winIoerrCanRetry1" macro.
-*/
-#if !defined(winIoerrCanRetry1)
-#define winIoerrCanRetry1(a) (((a)==ERROR_ACCESS_DENIED) || \
- ((a)==ERROR_SHARING_VIOLATION) || \
- ((a)==ERROR_LOCK_VIOLATION) || \
- ((a)==ERROR_DEV_NOT_EXIST) || \
- ((a)==ERROR_NETNAME_DELETED) || \
- ((a)==ERROR_SEM_TIMEOUT) || \
- ((a)==ERROR_NETWORK_UNREACHABLE))
-#endif
-
-/*
** If a ReadFile() or WriteFile() error occurs, invoke this routine
** to see if it should be retried. Return TRUE to retry. Return FALSE
** to give up with an error.
@@ -36344,18 +32813,13 @@ static int winRetryIoerr(int *pnRetry, DWORD *pError){
}
return 0;
}
- if( winIoerrCanRetry1(e) ){
+ if( e==ERROR_ACCESS_DENIED ||
+ e==ERROR_LOCK_VIOLATION ||
+ e==ERROR_SHARING_VIOLATION ){
sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry));
++*pnRetry;
return 1;
}
-#if defined(winIoerrCanRetry2)
- else if( winIoerrCanRetry2(e) ){
- sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry));
- ++*pnRetry;
- return 1;
- }
-#endif
if( pError ){
*pError = e;
}
@@ -36365,11 +32829,11 @@ static int winRetryIoerr(int *pnRetry, DWORD *pError){
/*
** Log a I/O error retry episode.
*/
-static void winLogIoerr(int nRetry, int lineno){
+static void winLogIoerr(int nRetry){
if( nRetry ){
- sqlite3_log(SQLITE_NOTICE,
- "delayed %dms for lock/sharing conflict at line %d",
- winIoerrRetryDelay*nRetry*(nRetry+1)/2, lineno
+ sqlite3_log(SQLITE_IOERR,
+ "delayed %dms for lock/sharing conflict",
+ winIoerrRetryDelay*nRetry*(nRetry+1)/2
);
}
}
@@ -36461,17 +32925,17 @@ static int winceCreateLock(const char *zFilename, winFile *pFile){
/* Acquire the mutex before continuing */
winceMutexAcquire(pFile->hMutex);
-
- /* Since the names of named mutexes, semaphores, file mappings etc are
+
+ /* Since the names of named mutexes, semaphores, file mappings etc are
** case-sensitive, take advantage of that by uppercasing the mutex name
** and using that as the shared filemapping name.
*/
osCharUpperW(zName);
pFile->hShared = osCreateFileMappingW(INVALID_HANDLE_VALUE, NULL,
PAGE_READWRITE, 0, sizeof(winceLock),
- zName);
+ zName);
- /* Set a flag that indicates we're the first to create the memory so it
+ /* Set a flag that indicates we're the first to create the memory so it
** must be zero-initialized */
lastErrno = osGetLastError();
if (lastErrno == ERROR_ALREADY_EXISTS){
@@ -36482,7 +32946,7 @@ static int winceCreateLock(const char *zFilename, winFile *pFile){
/* If we succeeded in making the shared memory handle, map it. */
if( pFile->hShared ){
- pFile->shared = (winceLock*)osMapViewOfFile(pFile->hShared,
+ pFile->shared = (winceLock*)osMapViewOfFile(pFile->hShared,
FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock));
/* If mapping failed, close the shared memory handle and erase it */
if( !pFile->shared ){
@@ -36508,7 +32972,7 @@ static int winceCreateLock(const char *zFilename, winFile *pFile){
pFile->hMutex = NULL;
return SQLITE_IOERR;
}
-
+
/* Initialize the shared memory if we're supposed to */
if( bInit ){
memset(pFile->shared, 0, sizeof(winceLock));
@@ -36546,13 +33010,13 @@ static void winceDestroyLock(winFile *pFile){
osCloseHandle(pFile->hShared);
/* Done with the mutex */
- winceMutexRelease(pFile->hMutex);
+ winceMutexRelease(pFile->hMutex);
osCloseHandle(pFile->hMutex);
pFile->hMutex = NULL;
}
}
-/*
+/*
** An implementation of the LockFile() API of Windows for CE
*/
static BOOL winceLockFile(
@@ -36763,8 +33227,8 @@ static BOOL winUnlockFile(
#endif
/*
-** Move the current position of the file handle passed as the first
-** argument to offset iOffset within the file. If successful, return 0.
+** Move the current position of the file handle passed as the first
+** argument to offset iOffset within the file. If successful, return 0.
** Otherwise, set pFile->lastErrno and return non-zero.
*/
static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){
@@ -36779,11 +33243,11 @@ static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){
upperBits = (LONG)((iOffset>>32) & 0x7fffffff);
lowerBits = (LONG)(iOffset & 0xffffffff);
- /* API oddity: If successful, SetFilePointer() returns a dword
+ /* API oddity: If successful, SetFilePointer() returns a dword
** containing the lower 32-bits of the new file-offset. Or, if it fails,
- ** it returns INVALID_SET_FILE_POINTER. However according to MSDN,
- ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine
- ** whether an error has actually occurred, it is also necessary to call
+ ** it returns INVALID_SET_FILE_POINTER. However according to MSDN,
+ ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine
+ ** whether an error has actually occurred, it is also necessary to call
** GetLastError().
*/
dwRet = osSetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN);
@@ -36849,8 +33313,7 @@ static int winClose(sqlite3_file *id){
assert( pFile->pShm==0 );
#endif
assert( pFile->h!=NULL && pFile->h!=INVALID_HANDLE_VALUE );
- OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("CLOSE file=%p\n", pFile->h));
#if SQLITE_MAX_MMAP_SIZE>0
winUnmapfile(pFile);
@@ -36867,7 +33330,7 @@ static int winClose(sqlite3_file *id){
int cnt = 0;
while(
osDeleteFileW(pFile->zDeleteOnClose)==0
- && osGetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff
+ && osGetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff
&& cnt++ < WINCE_DELETION_ATTEMPTS
){
sqlite3_win32_sleep(100); /* Wait a little before trying again */
@@ -36879,8 +33342,7 @@ static int winClose(sqlite3_file *id){
pFile->h = NULL;
}
OpenCounter(-1);
- OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p, rc=%s\n",
- osGetCurrentProcessId(), pFile, pFile->h, rc ? "ok" : "failed"));
+ OSTRACE(("CLOSE file=%p, rc=%s\n", pFile->h, rc ? "ok" : "failed"));
return rc ? SQLITE_OK
: winLogError(SQLITE_IOERR_CLOSE, osGetLastError(),
"winClose", pFile->zPath);
@@ -36897,7 +33359,7 @@ static int winRead(
int amt, /* Number of bytes to read */
sqlite3_int64 offset /* Begin reading at this offset */
){
-#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if !SQLITE_OS_WINCE
OVERLAPPED overlapped; /* The offset for ReadFile. */
#endif
winFile *pFile = (winFile*)id; /* file handle */
@@ -36908,8 +33370,7 @@ static int winRead(
assert( amt>0 );
assert( offset>=0 );
SimulateIOError(return SQLITE_IOERR_READ);
- OSTRACE(("READ pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, "
- "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile,
+ OSTRACE(("READ file=%p, buffer=%p, amount=%d, offset=%lld, lock=%d\n",
pFile->h, pBuf, amt, offset, pFile->locktype));
#if SQLITE_MAX_MMAP_SIZE>0
@@ -36918,8 +33379,7 @@ static int winRead(
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt);
- OSTRACE(("READ-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("READ-MMAP file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}else{
int nCopy = (int)(pFile->mmapSize - offset);
@@ -36931,10 +33391,9 @@ static int winRead(
}
#endif
-#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if SQLITE_OS_WINCE
if( winSeekFile(pFile, offset) ){
- OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("READ file=%p, rc=SQLITE_FULL\n", pFile->h));
return SQLITE_FULL;
}
while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){
@@ -36948,22 +33407,19 @@ static int winRead(
DWORD lastErrno;
if( winRetryIoerr(&nRetry, &lastErrno) ) continue;
pFile->lastErrno = lastErrno;
- OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_READ\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("READ file=%p, rc=SQLITE_IOERR_READ\n", pFile->h));
return winLogError(SQLITE_IOERR_READ, pFile->lastErrno,
"winRead", pFile->zPath);
}
- winLogIoerr(nRetry, __LINE__);
+ winLogIoerr(nRetry);
if( nRead<(DWORD)amt ){
/* Unread parts of the buffer must be zero-filled */
memset(&((char*)pBuf)[nRead], 0, amt-nRead);
- OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_SHORT_READ\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("READ file=%p, rc=SQLITE_IOERR_SHORT_READ\n", pFile->h));
return SQLITE_IOERR_SHORT_READ;
}
- OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("READ file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
@@ -36986,8 +33442,7 @@ static int winWrite(
SimulateIOError(return SQLITE_IOERR_WRITE);
SimulateDiskfullError(return SQLITE_FULL);
- OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, "
- "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile,
+ OSTRACE(("WRITE file=%p, buffer=%p, amount=%d, offset=%lld, lock=%d\n",
pFile->h, pBuf, amt, offset, pFile->locktype));
#if SQLITE_MAX_MMAP_SIZE>0
@@ -36996,8 +33451,7 @@ static int winWrite(
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt);
- OSTRACE(("WRITE-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("WRITE-MMAP file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}else{
int nCopy = (int)(pFile->mmapSize - offset);
@@ -37009,13 +33463,13 @@ static int winWrite(
}
#endif
-#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if SQLITE_OS_WINCE
rc = winSeekFile(pFile, offset);
if( rc==0 ){
#else
{
#endif
-#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if !SQLITE_OS_WINCE
OVERLAPPED overlapped; /* The offset for WriteFile. */
#endif
u8 *aRem = (u8 *)pBuf; /* Data yet to be written */
@@ -37023,14 +33477,14 @@ static int winWrite(
DWORD nWrite; /* Bytes written by each WriteFile() call */
DWORD lastErrno = NO_ERROR; /* Value returned by GetLastError() */
-#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if !SQLITE_OS_WINCE
memset(&overlapped, 0, sizeof(OVERLAPPED));
overlapped.Offset = (LONG)(offset & 0xffffffff);
overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff);
#endif
while( nRem>0 ){
-#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if SQLITE_OS_WINCE
if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, 0) ){
#else
if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, &overlapped) ){
@@ -37043,7 +33497,7 @@ static int winWrite(
lastErrno = osGetLastError();
break;
}
-#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED)
+#if !SQLITE_OS_WINCE
offset += nWrite;
overlapped.Offset = (LONG)(offset & 0xffffffff);
overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff);
@@ -37060,20 +33514,17 @@ static int winWrite(
if( rc ){
if( ( pFile->lastErrno==ERROR_HANDLE_DISK_FULL )
|| ( pFile->lastErrno==ERROR_DISK_FULL )){
- OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("WRITE file=%p, rc=SQLITE_FULL\n", pFile->h));
return winLogError(SQLITE_FULL, pFile->lastErrno,
"winWrite1", pFile->zPath);
}
- OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_WRITE\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("WRITE file=%p, rc=SQLITE_IOERR_WRITE\n", pFile->h));
return winLogError(SQLITE_IOERR_WRITE, pFile->lastErrno,
"winWrite2", pFile->zPath);
}else{
- winLogIoerr(nRetry, __LINE__);
+ winLogIoerr(nRetry);
}
- OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("WRITE file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
@@ -37087,8 +33538,8 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
assert( pFile );
SimulateIOError(return SQLITE_IOERR_TRUNCATE);
- OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, size=%lld, lock=%d\n",
- osGetCurrentProcessId(), pFile, pFile->h, nByte, pFile->locktype));
+ OSTRACE(("TRUNCATE file=%p, size=%lld, lock=%d\n",
+ pFile->h, nByte, pFile->locktype));
/* If the user has configured a chunk-size for this file, truncate the
** file so that it consists of an integer number of chunks (i.e. the
@@ -37120,8 +33571,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
}
#endif
- OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, rc=%s\n",
- osGetCurrentProcessId(), pFile, pFile->h, sqlite3ErrName(rc)));
+ OSTRACE(("TRUNCATE file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc)));
return rc;
}
@@ -37145,7 +33595,7 @@ static int winSync(sqlite3_file *id, int flags){
BOOL rc;
#endif
#if !defined(NDEBUG) || !defined(SQLITE_NO_SYNC) || \
- defined(SQLITE_HAVE_OS_TRACE)
+ (defined(SQLITE_TEST) && defined(SQLITE_DEBUG))
/*
** Used when SQLITE_NO_SYNC is not defined and by the assert() and/or
** OSTRACE() macros.
@@ -37166,9 +33616,8 @@ static int winSync(sqlite3_file *id, int flags){
*/
SimulateDiskfullError( return SQLITE_FULL );
- OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, flags=%x, lock=%d\n",
- osGetCurrentProcessId(), pFile, pFile->h, flags,
- pFile->locktype));
+ OSTRACE(("SYNC file=%p, flags=%x, lock=%d\n",
+ pFile->h, flags, pFile->locktype));
#ifndef SQLITE_TEST
UNUSED_PARAMETER(flags);
@@ -37183,38 +33632,19 @@ static int winSync(sqlite3_file *id, int flags){
** no-op
*/
#ifdef SQLITE_NO_SYNC
- OSTRACE(("SYNC-NOP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("SYNC-NOP file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
#else
-#if SQLITE_MAX_MMAP_SIZE>0
- if( pFile->pMapRegion ){
- if( osFlushViewOfFile(pFile->pMapRegion, 0) ){
- OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, "
- "rc=SQLITE_OK\n", osGetCurrentProcessId(),
- pFile, pFile->pMapRegion));
- }else{
- pFile->lastErrno = osGetLastError();
- OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, "
- "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(),
- pFile, pFile->pMapRegion));
- return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno,
- "winSync1", pFile->zPath);
- }
- }
-#endif
rc = osFlushFileBuffers(pFile->h);
SimulateIOError( rc=FALSE );
if( rc ){
- OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("SYNC file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}else{
pFile->lastErrno = osGetLastError();
- OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_FSYNC\n",
- osGetCurrentProcessId(), pFile, pFile->h));
+ OSTRACE(("SYNC file=%p, rc=SQLITE_IOERR_FSYNC\n", pFile->h));
return winLogError(SQLITE_IOERR_FSYNC, pFile->lastErrno,
- "winSync2", pFile->zPath);
+ "winSync", pFile->zPath);
}
#endif
}
@@ -37328,7 +33758,7 @@ static int winGetReadLock(winFile *pFile){
pFile->lastErrno = osGetLastError();
/* No need to log a failure to lock */
}
- OSTRACE(("READ-LOCK file=%p, result=%d\n", pFile->h, res));
+ OSTRACE(("READ-LOCK file=%p, rc=%s\n", pFile->h, sqlite3ErrName(res)));
return res;
}
@@ -37352,7 +33782,7 @@ static int winUnlockReadLock(winFile *pFile){
winLogError(SQLITE_IOERR_UNLOCK, pFile->lastErrno,
"winUnlockReadLock", pFile->zPath);
}
- OSTRACE(("READ-UNLOCK file=%p, result=%d\n", pFile->h, res));
+ OSTRACE(("READ-UNLOCK file=%p, rc=%s\n", pFile->h, sqlite3ErrName(res)));
return res;
}
@@ -37403,12 +33833,6 @@ static int winLock(sqlite3_file *id, int locktype){
return SQLITE_OK;
}
- /* Do not allow any kind of write-lock on a read-only database
- */
- if( (pFile->ctrlFlags & WINFILE_RDONLY)!=0 && locktype>=RESERVED_LOCK ){
- return SQLITE_IOERR_LOCK;
- }
-
/* Make sure the locking sequence is correct
*/
assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK );
@@ -37433,16 +33857,8 @@ static int winLock(sqlite3_file *id, int locktype){
** If you are using this code as a model for alternative VFSes, do not
** copy this retry logic. It is a hack intended for Windows only.
*/
- lastErrno = osGetLastError();
- OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n",
- pFile->h, cnt, res));
- if( lastErrno==ERROR_INVALID_HANDLE ){
- pFile->lastErrno = lastErrno;
- rc = SQLITE_IOERR_LOCK;
- OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n",
- pFile->h, cnt, sqlite3ErrName(rc)));
- return rc;
- }
+ OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, rc=%s\n",
+ pFile->h, cnt, sqlite3ErrName(res)));
if( cnt ) sqlite3_win32_sleep(1);
}
gotPendingLock = res;
@@ -37527,7 +33943,7 @@ static int winLock(sqlite3_file *id, int locktype){
** non-zero, otherwise zero.
*/
static int winCheckReservedLock(sqlite3_file *id, int *pResOut){
- int res;
+ int rc;
winFile *pFile = (winFile*)id;
SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
@@ -37535,17 +33951,17 @@ static int winCheckReservedLock(sqlite3_file *id, int *pResOut){
assert( id!=0 );
if( pFile->locktype>=RESERVED_LOCK ){
- res = 1;
- OSTRACE(("TEST-WR-LOCK file=%p, result=%d (local)\n", pFile->h, res));
+ rc = 1;
+ OSTRACE(("TEST-WR-LOCK file=%p, rc=%d (local)\n", pFile->h, rc));
}else{
- res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE, 0, 1, 0);
- if( res ){
+ rc = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE, 0, 1, 0);
+ if( rc ){
winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0);
}
- res = !res;
- OSTRACE(("TEST-WR-LOCK file=%p, result=%d (remote)\n", pFile->h, res));
+ rc = !rc;
+ OSTRACE(("TEST-WR-LOCK file=%p, rc=%d (remote)\n", pFile->h, rc));
}
- *pResOut = res;
+ *pResOut = rc;
OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n",
pFile->h, pResOut, *pResOut));
return SQLITE_OK;
@@ -37596,7 +34012,7 @@ static int winUnlock(sqlite3_file *id, int locktype){
}
/*
-** If *pArg is initially negative then this is a query. Set *pArg to
+** If *pArg is inititially negative then this is a query. Set *pArg to
** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set.
**
** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags.
@@ -37667,7 +34083,7 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){
return SQLITE_OK;
}
case SQLITE_FCNTL_VFSNAME: {
- *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName);
+ *(char**)pArg = sqlite3_mprintf("win32");
OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
@@ -37686,17 +34102,6 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){
OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
-#ifdef SQLITE_TEST
- case SQLITE_FCNTL_WIN32_SET_HANDLE: {
- LPHANDLE phFile = (LPHANDLE)pArg;
- HANDLE hOldFile = pFile->h;
- pFile->h = *phFile;
- *phFile = hOldFile;
- OSTRACE(("FCNTL oldFile=%p, newFile=%p, rc=SQLITE_OK\n",
- hOldFile, pFile->h));
- return SQLITE_OK;
- }
-#endif
case SQLITE_FCNTL_TEMPFILENAME: {
char *zTFile = 0;
int rc = winGetTempname(pFile->pVfs, &zTFile);
@@ -37754,23 +34159,23 @@ static int winDeviceCharacteristics(sqlite3_file *id){
((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0);
}
-/*
+/*
** Windows will only let you create file view mappings
** on allocation size granularity boundaries.
** During sqlite3_os_init() we do a GetSystemInfo()
** to get the granularity size.
*/
-static SYSTEM_INFO winSysInfo;
+SYSTEM_INFO winSysInfo;
#ifndef SQLITE_OMIT_WAL
/*
** Helper functions to obtain and relinquish the global mutex. The
-** global mutex is used to protect the winLockInfo objects used by
+** global mutex is used to protect the winLockInfo objects used by
** this file, all of which may be shared by multiple threads.
**
-** Function winShmMutexHeld() is used to assert() that the global mutex
-** is held when required. This function is only used as part of assert()
+** Function winShmMutexHeld() is used to assert() that the global mutex
+** is held when required. This function is only used as part of assert()
** statements. e.g.
**
** winShmEnterMutex()
@@ -37778,14 +34183,14 @@ static SYSTEM_INFO winSysInfo;
** winShmLeaveMutex()
*/
static void winShmEnterMutex(void){
- sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1));
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
}
static void winShmLeaveMutex(void){
- sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1));
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
}
-#ifndef NDEBUG
+#ifdef SQLITE_DEBUG
static int winShmMutexHeld(void) {
- return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1));
+ return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
}
#endif
@@ -37800,10 +34205,10 @@ static int winShmMutexHeld(void) {
** this object or while reading or writing the following fields:
**
** nRef
-** pNext
+** pNext
**
** The following fields are read-only after the object is created:
-**
+**
** fid
** zFilename
**
@@ -37828,7 +34233,7 @@ struct winShmNode {
int nRef; /* Number of winShm objects pointing to this */
winShm *pFirst; /* All winShm objects pointing to this */
winShmNode *pNext; /* Next in list of all winShmNode objects */
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+#ifdef SQLITE_DEBUG
u8 nextShmId; /* Next available winShm.id value */
#endif
};
@@ -37859,7 +34264,7 @@ struct winShm {
u8 hasMutex; /* True if holding the winShmNode mutex */
u16 sharedMask; /* Mask of shared locks held */
u16 exclMask; /* Mask of exclusive locks held */
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+#ifdef SQLITE_DEBUG
u8 id; /* Id of this connection with its winShmNode */
#endif
};
@@ -37899,7 +34304,7 @@ static int winShmSystemLock(
if( lockType == _SHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK;
rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0);
}
-
+
if( rc!= 0 ){
rc = SQLITE_OK;
}else{
@@ -37995,7 +34400,7 @@ static int winOpenSharedMemory(winFile *pDbFd){
}
pNew->zFilename = (char*)&pNew[1];
sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath);
- sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename);
+ sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename);
/* Look to see if there is an existing winShmNode that can be used.
** If no matching winShmNode currently exists, create a new one.
@@ -38032,7 +34437,7 @@ static int winOpenSharedMemory(winFile *pDbFd){
}
/* Check to see if another process is holding the dead-man switch.
- ** If not, truncate the file to zero length.
+ ** If not, truncate the file to zero length.
*/
if( winShmSystemLock(pShmNode, _SHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){
rc = winTruncate((sqlite3_file *)&pShmNode->hFile, 0);
@@ -38050,7 +34455,7 @@ static int winOpenSharedMemory(winFile *pDbFd){
/* Make the new connection a child of the winShmNode */
p->pShmNode = pShmNode;
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
+#ifdef SQLITE_DEBUG
p->id = pShmNode->nextShmId++;
#endif
pShmNode->nRef++;
@@ -38061,7 +34466,7 @@ static int winOpenSharedMemory(winFile *pDbFd){
** the cover of the winShmEnterMutex() mutex and the pointer from the
** new (struct winShm) object to the pShmNode has been set. All that is
** left to do is to link the new object into the linked list starting
- ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex
+ ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex
** mutex.
*/
sqlite3_mutex_enter(pShmNode->mutex);
@@ -38081,7 +34486,7 @@ shm_open_err:
}
/*
-** Close a connection to shared-memory. Delete the underlying
+** Close a connection to shared-memory. Delete the underlying
** storage if deleteFlag is true.
*/
static int winShmUnmap(
@@ -38170,7 +34575,7 @@ static int winShmLock(
if( rc==SQLITE_OK ){
p->exclMask &= ~mask;
p->sharedMask &= ~mask;
- }
+ }
}else if( flags & SQLITE_SHM_SHARED ){
u16 allShared = 0; /* Union of locks held by connections other than "p" */
@@ -38209,7 +34614,7 @@ static int winShmLock(
break;
}
}
-
+
/* Get the exclusive locks at the system level. Then if successful
** also mark the local connection as being locked.
*/
@@ -38229,7 +34634,7 @@ static int winShmLock(
}
/*
-** Implement a memory barrier or memory fence on shared memory.
+** Implement a memory barrier or memory fence on shared memory.
**
** All loads and stores begun before the barrier must complete before
** any load or store begun after the barrier.
@@ -38238,28 +34643,28 @@ static void winShmBarrier(
sqlite3_file *fd /* Database holding the shared memory */
){
UNUSED_PARAMETER(fd);
- sqlite3MemoryBarrier(); /* compiler-defined memory barrier */
- winShmEnterMutex(); /* Also mutex, for redundancy */
+ /* MemoryBarrier(); // does not work -- do not know why not */
+ winShmEnterMutex();
winShmLeaveMutex();
}
/*
-** This function is called to obtain a pointer to region iRegion of the
-** shared-memory associated with the database file fd. Shared-memory regions
-** are numbered starting from zero. Each shared-memory region is szRegion
+** This function is called to obtain a pointer to region iRegion of the
+** shared-memory associated with the database file fd. Shared-memory regions
+** are numbered starting from zero. Each shared-memory region is szRegion
** bytes in size.
**
** If an error occurs, an error code is returned and *pp is set to NULL.
**
** Otherwise, if the isWrite parameter is 0 and the requested shared-memory
** region has not been allocated (by any client, including one running in a
-** separate process), then *pp is set to NULL and SQLITE_OK returned. If
-** isWrite is non-zero and the requested shared-memory region has not yet
+** separate process), then *pp is set to NULL and SQLITE_OK returned. If
+** isWrite is non-zero and the requested shared-memory region has not yet
** been allocated, it is allocated by this function.
**
** If the shared-memory region has already been allocated or is allocated by
-** this call as described above, then it is mapped into this processes
-** address space (if it is not already), *pp is set to point to the mapped
+** this call as described above, then it is mapped into this processes
+** address space (if it is not already), *pp is set to point to the mapped
** memory and SQLITE_OK returned.
*/
static int winShmMap(
@@ -38270,16 +34675,16 @@ static int winShmMap(
void volatile **pp /* OUT: Mapped memory */
){
winFile *pDbFd = (winFile*)fd;
- winShm *pShm = pDbFd->pShm;
+ winShm *p = pDbFd->pShm;
winShmNode *pShmNode;
int rc = SQLITE_OK;
- if( !pShm ){
+ if( !p ){
rc = winOpenSharedMemory(pDbFd);
if( rc!=SQLITE_OK ) return rc;
- pShm = pDbFd->pShm;
+ p = pDbFd->pShm;
}
- pShmNode = pShm->pShmNode;
+ pShmNode = p->pShmNode;
sqlite3_mutex_enter(pShmNode->mutex);
assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 );
@@ -38319,7 +34724,7 @@ static int winShmMap(
}
/* Map the requested memory region into this processes address space. */
- apNew = (struct ShmRegion *)sqlite3_realloc64(
+ apNew = (struct ShmRegion *)sqlite3_realloc(
pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0])
);
if( !apNew ){
@@ -38331,17 +34736,17 @@ static int winShmMap(
while( pShmNode->nRegion<=iRegion ){
HANDLE hMap = NULL; /* file-mapping handle */
void *pMap = 0; /* Mapped memory region */
-
+
#if SQLITE_OS_WINRT
hMap = osCreateFileMappingFromApp(pShmNode->hFile.h,
NULL, PAGE_READWRITE, nByte, NULL
);
#elif defined(SQLITE_WIN32_HAS_WIDE)
- hMap = osCreateFileMappingW(pShmNode->hFile.h,
+ hMap = osCreateFileMappingW(pShmNode->hFile.h,
NULL, PAGE_READWRITE, 0, nByte, NULL
);
#elif defined(SQLITE_WIN32_HAS_ANSI)
- hMap = osCreateFileMappingA(pShmNode->hFile.h,
+ hMap = osCreateFileMappingA(pShmNode->hFile.h,
NULL, PAGE_READWRITE, 0, nByte, NULL
);
#endif
@@ -38438,14 +34843,14 @@ static int winUnmapfile(winFile *pFile){
/*
** Memory map or remap the file opened by file-descriptor pFd (if the file
-** is already mapped, the existing mapping is replaced by the new). Or, if
-** there already exists a mapping for this file, and there are still
+** is already mapped, the existing mapping is replaced by the new). Or, if
+** there already exists a mapping for this file, and there are still
** outstanding xFetch() references to it, this function is a no-op.
**
-** If parameter nByte is non-negative, then it is the requested size of
-** the mapping to create. Otherwise, if nByte is less than zero, then the
+** If parameter nByte is non-negative, then it is the requested size of
+** the mapping to create. Otherwise, if nByte is less than zero, then the
** requested size is the size of the file on disk. The actual size of the
-** created mapping is either the requested size or the value configured
+** created mapping is either the requested size or the value configured
** using SQLITE_FCNTL_MMAP_SIZE, whichever is smaller.
**
** SQLITE_OK is returned if no error occurs (even if the mapping is not
@@ -38474,7 +34879,7 @@ static int winMapfile(winFile *pFd, sqlite3_int64 nByte){
nMap = pFd->mmapSizeMax;
}
nMap &= ~(sqlite3_int64)(winSysInfo.dwPageSize - 1);
-
+
if( nMap==0 && pFd->mmapSize>0 ){
winUnmapfile(pFd);
}
@@ -38546,7 +34951,7 @@ static int winMapfile(winFile *pFd, sqlite3_int64 nByte){
** Finally, if an error does occur, return an SQLite error code. The final
** value of *pp is undefined in this case.
**
-** If this function does return a pointer, the caller must eventually
+** If this function does return a pointer, the caller must eventually
** release the reference by calling winUnfetch().
*/
static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
@@ -38581,20 +34986,20 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
}
/*
-** If the third argument is non-NULL, then this function releases a
+** If the third argument is non-NULL, then this function releases a
** reference obtained by an earlier call to winFetch(). The second
** argument passed to this function must be the same as the corresponding
-** argument that was passed to the winFetch() invocation.
+** argument that was passed to the winFetch() invocation.
**
-** Or, if the third argument is NULL, then this function is being called
-** to inform the VFS layer that, according to POSIX, any existing mapping
+** Or, if the third argument is NULL, then this function is being called
+** to inform the VFS layer that, according to POSIX, any existing mapping
** may now be invalid and should be unmapped.
*/
static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){
#if SQLITE_MAX_MMAP_SIZE>0
winFile *pFd = (winFile*)fd; /* The underlying database file */
- /* If p==0 (unmap the entire file) then there must be no outstanding
+ /* If p==0 (unmap the entire file) then there must be no outstanding
** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
** then there must be at least one outstanding. */
assert( (p==0)==(pFd->nFetchOut==0) );
@@ -38610,7 +35015,7 @@ static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){
}else{
/* FIXME: If Windows truly always prevents truncating or deleting a
** file while a mapping is held, then the following winUnmapfile() call
- ** is unnecessary can be omitted - potentially improving
+ ** is unnecessary can can be omitted - potentially improving
** performance. */
winUnmapfile(pFd);
}
@@ -38740,7 +35145,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
/* It's odd to simulate an io-error here, but really this is just
** using the io-error infrastructure to test that SQLite handles this
- ** function failing.
+ ** function failing.
*/
SimulateIOError( return SQLITE_IOERR );
@@ -38922,7 +35327,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
}
/*
- ** Check that the output buffer is large enough for the temporary file
+ ** Check that the output buffer is large enough for the temporary file
** name in the following format:
**
** "<temporary_directory>/etilqs_XXXXXXXXXXXXXXX\0\0"
@@ -39025,8 +35430,8 @@ static int winOpen(
#ifndef NDEBUG
int isOpenJournal = (isCreate && (
- eType==SQLITE_OPEN_MASTER_JOURNAL
- || eType==SQLITE_OPEN_MAIN_JOURNAL
+ eType==SQLITE_OPEN_MASTER_JOURNAL
+ || eType==SQLITE_OPEN_MAIN_JOURNAL
|| eType==SQLITE_OPEN_WAL
));
#endif
@@ -39034,9 +35439,9 @@ static int winOpen(
OSTRACE(("OPEN name=%s, pFile=%p, flags=%x, pOutFlags=%p\n",
zUtf8Name, id, flags, pOutFlags));
- /* Check the following statements are true:
+ /* Check the following statements are true:
**
- ** (a) Exactly one of the READWRITE and READONLY flags must be set, and
+ ** (a) Exactly one of the READWRITE and READONLY flags must be set, and
** (b) if CREATE is set, then READWRITE must also be set, and
** (c) if EXCLUSIVE is set, then CREATE must also be set.
** (d) if DELETEONCLOSE is set, then CREATE must also be set.
@@ -39046,7 +35451,7 @@ static int winOpen(
assert(isExclusive==0 || isCreate);
assert(isDelete==0 || isCreate);
- /* The main DB, main journal, WAL file and master journal are never
+ /* The main DB, main journal, WAL file and master journal are never
** automatically deleted. Nor are they ever temporary files. */
assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB );
assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL );
@@ -39054,9 +35459,9 @@ static int winOpen(
assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL );
/* Assert that the upper layer has set one of the "file-type" flags. */
- assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB
- || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL
- || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL
+ assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB
+ || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL
+ || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL
|| eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL
);
@@ -39071,8 +35476,8 @@ static int winOpen(
}
#endif
- /* If the second argument to this function is NULL, generate a
- ** temporary file name to use
+ /* If the second argument to this function is NULL, generate a
+ ** temporary file name to use
*/
if( !zUtf8Name ){
assert( isDelete && !isOpenJournal );
@@ -39112,8 +35517,8 @@ static int winOpen(
dwDesiredAccess = GENERIC_READ;
}
- /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is
- ** created. SQLite doesn't use it to indicate "exclusive access"
+ /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is
+ ** created. SQLite doesn't use it to indicate "exclusive access"
** as it is usually understood.
*/
if( isExclusive ){
@@ -39191,7 +35596,7 @@ static int winOpen(
}
}
#endif
- winLogIoerr(cnt, __LINE__);
+ winLogIoerr(cnt);
OSTRACE(("OPEN file=%p, name=%s, access=%lx, rc=%s\n", h, zUtf8Name,
dwDesiredAccess, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok"));
@@ -39202,7 +35607,7 @@ static int winOpen(
sqlite3_free(zConverted);
sqlite3_free(zTmpname);
if( isReadWrite && !isExclusive ){
- return winOpen(pVfs, zName, id,
+ return winOpen(pVfs, zName, id,
((flags|SQLITE_OPEN_READONLY) &
~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE)),
pOutFlags);
@@ -39375,7 +35780,7 @@ static int winDelete(
if( rc && rc!=SQLITE_IOERR_DELETE_NOENT ){
rc = winLogError(SQLITE_IOERR_DELETE, lastErrno, "winDelete", zFilename);
}else{
- winLogIoerr(cnt, __LINE__);
+ winLogIoerr(cnt);
}
sqlite3_free(zConverted);
OSTRACE(("DELETE name=%s, rc=%s\n", zFilename, sqlite3ErrName(rc)));
@@ -39411,21 +35816,21 @@ static int winAccess(
WIN32_FILE_ATTRIBUTE_DATA sAttrData;
memset(&sAttrData, 0, sizeof(sAttrData));
while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted,
- GetFileExInfoStandard,
+ GetFileExInfoStandard,
&sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){}
if( rc ){
/* For an SQLITE_ACCESS_EXISTS query, treat a zero-length file
** as if it does not exist.
*/
if( flags==SQLITE_ACCESS_EXISTS
- && sAttrData.nFileSizeHigh==0
+ && sAttrData.nFileSizeHigh==0
&& sAttrData.nFileSizeLow==0 ){
attr = INVALID_FILE_ATTRIBUTES;
}else{
attr = sAttrData.dwFileAttributes;
}
}else{
- winLogIoerr(cnt, __LINE__);
+ winLogIoerr(cnt);
if( lastErrno!=ERROR_FILE_NOT_FOUND && lastErrno!=ERROR_PATH_NOT_FOUND ){
sqlite3_free(zConverted);
return winLogError(SQLITE_IOERR_ACCESS, lastErrno, "winAccess",
@@ -39517,7 +35922,7 @@ static int winFullPathname(
int nFull, /* Size of output buffer in bytes */
char *zFull /* Output buffer */
){
-
+
#if defined(__CYGWIN__)
SimulateIOError( return SQLITE_ERROR );
UNUSED_PARAMETER(nFull);
@@ -39694,29 +36099,15 @@ static int winFullPathname(
** Interfaces for opening a shared library, finding entry points
** within the shared library, and closing the shared library.
*/
+/*
+** Interfaces for opening a shared library, finding entry points
+** within the shared library, and closing the shared library.
+*/
static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){
HANDLE h;
-#if defined(__CYGWIN__)
- int nFull = pVfs->mxPathname+1;
- char *zFull = sqlite3MallocZero( nFull );
- void *zConverted = 0;
- if( zFull==0 ){
- OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0));
- return 0;
- }
- if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){
- sqlite3_free(zFull);
- OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0));
- return 0;
- }
- zConverted = winConvertFromUtf8Filename(zFull);
- sqlite3_free(zFull);
-#else
void *zConverted = winConvertFromUtf8Filename(zFilename);
UNUSED_PARAMETER(pVfs);
-#endif
if( zConverted==0 ){
- OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0));
return 0;
}
if( osIsNT() ){
@@ -39731,7 +36122,6 @@ static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){
h = osLoadLibraryA((char*)zConverted);
}
#endif
- OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)h));
sqlite3_free(zConverted);
return (void*)h;
}
@@ -39740,17 +36130,12 @@ static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){
winGetLastErrorMsg(osGetLastError(), nBuf, zBufOut);
}
static void (*winDlSym(sqlite3_vfs *pVfs,void *pH,const char *zSym))(void){
- FARPROC proc;
UNUSED_PARAMETER(pVfs);
- proc = osGetProcAddressA((HANDLE)pH, zSym);
- OSTRACE(("DLSYM handle=%p, symbol=%s, address=%p\n",
- (void*)pH, zSym, (void*)proc));
- return (void(*)(void))proc;
+ return (void(*)(void))osGetProcAddressA((HANDLE)pH, zSym);
}
static void winDlClose(sqlite3_vfs *pVfs, void *pHandle){
UNUSED_PARAMETER(pVfs);
osFreeLibrary((HANDLE)pHandle);
- OSTRACE(("DLCLOSE handle=%p\n", (void*)pHandle));
}
#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */
#define winDlOpen 0
@@ -39766,7 +36151,7 @@ static void winDlClose(sqlite3_vfs *pVfs, void *pHandle){
static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
int n = 0;
UNUSED_PARAMETER(pVfs);
-#if defined(SQLITE_TEST) || defined(SQLITE_OMIT_RANDOMNESS)
+#if defined(SQLITE_TEST)
n = nBuf;
memset(zBuf, 0, nBuf);
#else
@@ -39800,23 +36185,7 @@ static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
memcpy(&zBuf[n], &i, sizeof(i));
n += sizeof(i);
}
-#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID
- if( sizeof(UUID)<=nBuf-n ){
- UUID id;
- memset(&id, 0, sizeof(UUID));
- osUuidCreate(&id);
- memcpy(&zBuf[n], &id, sizeof(UUID));
- n += sizeof(UUID);
- }
- if( sizeof(UUID)<=nBuf-n ){
- UUID id;
- memset(&id, 0, sizeof(UUID));
- osUuidCreateSequential(&id);
- memcpy(&zBuf[n], &id, sizeof(UUID));
- n += sizeof(UUID);
- }
#endif
-#endif /* defined(SQLITE_TEST) || defined(SQLITE_ZERO_PRNG_SEED) */
return n;
}
@@ -39846,12 +36215,12 @@ SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1
** epoch of noon in Greenwich on November 24, 4714 B.C according to the
** proleptic Gregorian calendar.
**
-** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date
+** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date
** cannot be found.
*/
static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){
- /* FILETIME structure is a 64-bit value representing the number of
- 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5).
+ /* FILETIME structure is a 64-bit value representing the number of
+ 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5).
*/
FILETIME ft;
static const sqlite3_int64 winFiletimeEpoch = 23058135*(sqlite3_int64)8640000;
@@ -39859,7 +36228,7 @@ static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){
static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000;
#endif
/* 2^32 - to avoid use of LL and warnings in gcc */
- static const sqlite3_int64 max32BitValue =
+ static const sqlite3_int64 max32BitValue =
(sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 +
(sqlite3_int64)294967296;
@@ -39875,7 +36244,7 @@ static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){
#endif
*piNow = winFiletimeEpoch +
- ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) +
+ ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) +
(sqlite3_int64)ft.dwLowDateTime)/(sqlite3_int64)10000;
#ifdef SQLITE_TEST
@@ -39940,7 +36309,7 @@ static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
/*
** Initialize and deinitialize the operating system interface.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
+SQLITE_API int sqlite3_os_init(void){
static sqlite3_vfs winVfs = {
3, /* iVersion */
sizeof(winFile), /* szOsFile */
@@ -39994,7 +36363,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
/* Double-check that the aSyscall[] array has been constructed
** correctly. See ticket [bb3a86e890c8e96ab] */
- assert( ArraySize(aSyscall)==80 );
+ assert( ArraySize(aSyscall)==76 );
/* get memory map allocation granularity */
memset(&winSysInfo, 0, sizeof(SYSTEM_INFO));
@@ -40012,10 +36381,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
sqlite3_vfs_register(&winLongPathVfs, 0);
#endif
- return SQLITE_OK;
+ return SQLITE_OK;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
+SQLITE_API int sqlite3_os_end(void){
#if SQLITE_OS_WINRT
if( sleepObj!=NULL ){
osCloseHandle(sleepObj);
@@ -40065,7 +36434,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
** start of a transaction, and is thus usually less than a few thousand,
** but can be as large as 2 billion for a really big database.
*/
-/* #include "sqliteInt.h" */
/* Size of the Bitvec structure in bytes. */
#define BITVEC_SZ 512
@@ -40157,10 +36525,10 @@ SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32 iSize){
** If p is NULL (if the bitmap has not been created) or if
** i is out of range, then return false.
*/
-SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec *p, u32 i){
- assert( p!=0 );
+SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec *p, u32 i){
+ if( p==0 ) return 0;
+ if( i>p->iSize || i==0 ) return 0;
i--;
- if( i>=p->iSize ) return 0;
while( p->iDivisor ){
u32 bin = i/p->iDivisor;
i = i%p->iDivisor;
@@ -40180,9 +36548,6 @@ SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec *p, u32 i){
return 0;
}
}
-SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec *p, u32 i){
- return p!=0 && sqlite3BitvecTestNotNull(p,i);
-}
/*
** Set the i-th bit. Return 0 on success and an error code if
@@ -40375,7 +36740,7 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){
** bits to act as the reference */
pBitvec = sqlite3BitvecCreate( sz );
pV = sqlite3MallocZero( (sz+7)/8 + 1 );
- pTmpSpace = sqlite3_malloc64(BITVEC_SZ);
+ pTmpSpace = sqlite3_malloc(BITVEC_SZ);
if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end;
/* NULL pBitvec tests */
@@ -40455,7 +36820,6 @@ bitvec_end:
*************************************************************************
** This file implements that page cache.
*/
-/* #include "sqliteInt.h" */
/*
** A complete page cache is an instance of this structure.
@@ -40463,112 +36827,119 @@ bitvec_end:
struct PCache {
PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */
PgHdr *pSynced; /* Last synced page in dirty page list */
- int nRefSum; /* Sum of ref counts over all pages */
+ int nRef; /* Number of referenced pages */
int szCache; /* Configured cache size */
int szPage; /* Size of every page in this cache */
int szExtra; /* Size of extra space for each page */
- u8 bPurgeable; /* True if pages are on backing store */
- u8 eCreate; /* eCreate value for for xFetch() */
+ int bPurgeable; /* True if pages are on backing store */
int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */
void *pStress; /* Argument to xStress */
sqlite3_pcache *pCache; /* Pluggable cache module */
+ PgHdr *pPage1; /* Reference to page 1 */
};
+/*
+** Some of the assert() macros in this code are too expensive to run
+** even during normal debugging. Use them only rarely on long-running
+** tests. Enable the expensive asserts using the
+** -DSQLITE_ENABLE_EXPENSIVE_ASSERT=1 compile-time option.
+*/
+#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
+# define expensive_assert(X) assert(X)
+#else
+# define expensive_assert(X)
+#endif
+
/********************************** Linked List Management ********************/
-/* Allowed values for second argument to pcacheManageDirtyList() */
-#define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */
-#define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */
-#define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */
+#if !defined(NDEBUG) && defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
+/*
+** Check that the pCache->pSynced variable is set correctly. If it
+** is not, either fail an assert or return zero. Otherwise, return
+** non-zero. This is only used in debugging builds, as follows:
+**
+** expensive_assert( pcacheCheckSynced(pCache) );
+*/
+static int pcacheCheckSynced(PCache *pCache){
+ PgHdr *p;
+ for(p=pCache->pDirtyTail; p!=pCache->pSynced; p=p->pDirtyPrev){
+ assert( p->nRef || (p->flags&PGHDR_NEED_SYNC) );
+ }
+ return (p==0 || p->nRef || (p->flags&PGHDR_NEED_SYNC)==0);
+}
+#endif /* !NDEBUG && SQLITE_ENABLE_EXPENSIVE_ASSERT */
/*
-** Manage pPage's participation on the dirty list. Bits of the addRemove
-** argument determines what operation to do. The 0x01 bit means first
-** remove pPage from the dirty list. The 0x02 means add pPage back to
-** the dirty list. Doing both moves pPage to the front of the dirty list.
+** Remove page pPage from the list of dirty pages.
*/
-static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){
+static void pcacheRemoveFromDirtyList(PgHdr *pPage){
PCache *p = pPage->pCache;
- if( addRemove & PCACHE_DIRTYLIST_REMOVE ){
- assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
- assert( pPage->pDirtyPrev || pPage==p->pDirty );
-
- /* Update the PCache1.pSynced variable if necessary. */
- if( p->pSynced==pPage ){
- PgHdr *pSynced = pPage->pDirtyPrev;
- while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){
- pSynced = pSynced->pDirtyPrev;
- }
- p->pSynced = pSynced;
- }
-
- if( pPage->pDirtyNext ){
- pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
- }else{
- assert( pPage==p->pDirtyTail );
- p->pDirtyTail = pPage->pDirtyPrev;
- }
- if( pPage->pDirtyPrev ){
- pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
- }else{
- assert( pPage==p->pDirty );
- p->pDirty = pPage->pDirtyNext;
- if( p->pDirty==0 && p->bPurgeable ){
- assert( p->eCreate==1 );
- p->eCreate = 2;
- }
+ assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
+ assert( pPage->pDirtyPrev || pPage==p->pDirty );
+
+ /* Update the PCache1.pSynced variable if necessary. */
+ if( p->pSynced==pPage ){
+ PgHdr *pSynced = pPage->pDirtyPrev;
+ while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){
+ pSynced = pSynced->pDirtyPrev;
}
- pPage->pDirtyNext = 0;
- pPage->pDirtyPrev = 0;
+ p->pSynced = pSynced;
}
- if( addRemove & PCACHE_DIRTYLIST_ADD ){
- assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage );
-
- pPage->pDirtyNext = p->pDirty;
- if( pPage->pDirtyNext ){
- assert( pPage->pDirtyNext->pDirtyPrev==0 );
- pPage->pDirtyNext->pDirtyPrev = pPage;
- }else{
- p->pDirtyTail = pPage;
- if( p->bPurgeable ){
- assert( p->eCreate==2 );
- p->eCreate = 1;
- }
- }
- p->pDirty = pPage;
- if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){
- p->pSynced = pPage;
- }
+
+ if( pPage->pDirtyNext ){
+ pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
+ }else{
+ assert( pPage==p->pDirtyTail );
+ p->pDirtyTail = pPage->pDirtyPrev;
+ }
+ if( pPage->pDirtyPrev ){
+ pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
+ }else{
+ assert( pPage==p->pDirty );
+ p->pDirty = pPage->pDirtyNext;
}
+ pPage->pDirtyNext = 0;
+ pPage->pDirtyPrev = 0;
+
+ expensive_assert( pcacheCheckSynced(p) );
}
/*
-** Wrapper around the pluggable caches xUnpin method. If the cache is
-** being used for an in-memory database, this function is a no-op.
+** Add page pPage to the head of the dirty list (PCache1.pDirty is set to
+** pPage).
*/
-static void pcacheUnpin(PgHdr *p){
- if( p->pCache->bPurgeable ){
- sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0);
+static void pcacheAddToDirtyList(PgHdr *pPage){
+ PCache *p = pPage->pCache;
+
+ assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage );
+
+ pPage->pDirtyNext = p->pDirty;
+ if( pPage->pDirtyNext ){
+ assert( pPage->pDirtyNext->pDirtyPrev==0 );
+ pPage->pDirtyNext->pDirtyPrev = pPage;
+ }
+ p->pDirty = pPage;
+ if( !p->pDirtyTail ){
+ p->pDirtyTail = pPage;
+ }
+ if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){
+ p->pSynced = pPage;
}
+ expensive_assert( pcacheCheckSynced(p) );
}
/*
-** Compute the number of pages of cache requested. p->szCache is the
-** cache size requested by the "PRAGMA cache_size" statement.
-**
-**
+** Wrapper around the pluggable caches xUnpin method. If the cache is
+** being used for an in-memory database, this function is a no-op.
*/
-static int numberOfCachePages(PCache *p){
- if( p->szCache>=0 ){
- /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the
- ** suggested cache size is set to N. */
- return p->szCache;
- }else{
- /* IMPLEMENTATION-OF: R-61436-13639 If the argument N is negative, then
- ** the number of cache pages is adjusted to use approximately abs(N*1024)
- ** bytes of memory. */
- return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra));
+static void pcacheUnpin(PgHdr *p){
+ PCache *pCache = p->pCache;
+ if( pCache->bPurgeable ){
+ if( p->pgno==1 ){
+ pCache->pPage1 = 0;
+ }
+ sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, p->pPage, 0);
}
}
@@ -40604,7 +36975,7 @@ SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); }
** The caller discovers how much space needs to be allocated by
** calling sqlite3PcacheSize().
*/
-SQLITE_PRIVATE int sqlite3PcacheOpen(
+SQLITE_PRIVATE void sqlite3PcacheOpen(
int szPage, /* Size of every page */
int szExtra, /* Extra space associated with each page */
int bPurgeable, /* True if pages are on backing store */
@@ -40613,206 +36984,156 @@ SQLITE_PRIVATE int sqlite3PcacheOpen(
PCache *p /* Preallocated space for the PCache */
){
memset(p, 0, sizeof(PCache));
- p->szPage = 1;
+ p->szPage = szPage;
p->szExtra = szExtra;
p->bPurgeable = bPurgeable;
- p->eCreate = 2;
p->xStress = xStress;
p->pStress = pStress;
p->szCache = 100;
- return sqlite3PcacheSetPageSize(p, szPage);
}
/*
** Change the page size for PCache object. The caller must ensure that there
** are no outstanding page references when this function is called.
*/
-SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){
- assert( pCache->nRefSum==0 && pCache->pDirty==0 );
- if( pCache->szPage ){
- sqlite3_pcache *pNew;
- pNew = sqlite3GlobalConfig.pcache2.xCreate(
- szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)),
- pCache->bPurgeable
- );
- if( pNew==0 ) return SQLITE_NOMEM;
- sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache));
- if( pCache->pCache ){
- sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
- }
- pCache->pCache = pNew;
- pCache->szPage = szPage;
+SQLITE_PRIVATE void sqlite3PcacheSetPageSize(PCache *pCache, int szPage){
+ assert( pCache->nRef==0 && pCache->pDirty==0 );
+ if( pCache->pCache ){
+ sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
+ pCache->pCache = 0;
+ pCache->pPage1 = 0;
+ }
+ pCache->szPage = szPage;
+}
+
+/*
+** Compute the number of pages of cache requested.
+*/
+static int numberOfCachePages(PCache *p){
+ if( p->szCache>=0 ){
+ return p->szCache;
+ }else{
+ return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra));
}
- return SQLITE_OK;
}
/*
** Try to obtain a page from the cache.
-**
-** This routine returns a pointer to an sqlite3_pcache_page object if
-** such an object is already in cache, or if a new one is created.
-** This routine returns a NULL pointer if the object was not in cache
-** and could not be created.
-**
-** The createFlags should be 0 to check for existing pages and should
-** be 3 (not 1, but 3) to try to create a new page.
-**
-** If the createFlag is 0, then NULL is always returned if the page
-** is not already in the cache. If createFlag is 1, then a new page
-** is created only if that can be done without spilling dirty pages
-** and without exceeding the cache size limit.
-**
-** The caller needs to invoke sqlite3PcacheFetchFinish() to properly
-** initialize the sqlite3_pcache_page object and convert it into a
-** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish()
-** routines are split this way for performance reasons. When separated
-** they can both (usually) operate without having to push values to
-** the stack on entry and pop them back off on exit, which saves a
-** lot of pushing and popping.
*/
-SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(
+SQLITE_PRIVATE int sqlite3PcacheFetch(
PCache *pCache, /* Obtain the page from this cache */
Pgno pgno, /* Page number to obtain */
- int createFlag /* If true, create page if it does not exist already */
+ int createFlag, /* If true, create page if it does not exist already */
+ PgHdr **ppPage /* Write the page here */
){
+ sqlite3_pcache_page *pPage = 0;
+ PgHdr *pPgHdr = 0;
int eCreate;
assert( pCache!=0 );
- assert( pCache->pCache!=0 );
- assert( createFlag==3 || createFlag==0 );
+ assert( createFlag==1 || createFlag==0 );
assert( pgno>0 );
- /* eCreate defines what to do if the page does not exist.
- ** 0 Do not allocate a new page. (createFlag==0)
- ** 1 Allocate a new page if doing so is inexpensive.
- ** (createFlag==1 AND bPurgeable AND pDirty)
- ** 2 Allocate a new page even it doing so is difficult.
- ** (createFlag==1 AND !(bPurgeable AND pDirty)
+ /* If the pluggable cache (sqlite3_pcache*) has not been allocated,
+ ** allocate it now.
*/
- eCreate = createFlag & pCache->eCreate;
- assert( eCreate==0 || eCreate==1 || eCreate==2 );
- assert( createFlag==0 || pCache->eCreate==eCreate );
- assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
- return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
-}
+ if( !pCache->pCache && createFlag ){
+ sqlite3_pcache *p;
+ p = sqlite3GlobalConfig.pcache2.xCreate(
+ pCache->szPage, pCache->szExtra + sizeof(PgHdr), pCache->bPurgeable
+ );
+ if( !p ){
+ return SQLITE_NOMEM;
+ }
+ sqlite3GlobalConfig.pcache2.xCachesize(p, numberOfCachePages(pCache));
+ pCache->pCache = p;
+ }
-/*
-** If the sqlite3PcacheFetch() routine is unable to allocate a new
-** page because new clean pages are available for reuse and the cache
-** size limit has been reached, then this routine can be invoked to
-** try harder to allocate a page. This routine might invoke the stress
-** callback to spill dirty pages to the journal. It will then try to
-** allocate the new page and will only fail to allocate a new page on
-** an OOM error.
-**
-** This routine should be invoked only after sqlite3PcacheFetch() fails.
-*/
-SQLITE_PRIVATE int sqlite3PcacheFetchStress(
- PCache *pCache, /* Obtain the page from this cache */
- Pgno pgno, /* Page number to obtain */
- sqlite3_pcache_page **ppPage /* Write result here */
-){
- PgHdr *pPg;
- if( pCache->eCreate==2 ) return 0;
+ eCreate = createFlag * (1 + (!pCache->bPurgeable || !pCache->pDirty));
+ if( pCache->pCache ){
+ pPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
+ }
+ if( !pPage && eCreate==1 ){
+ PgHdr *pPg;
- /* Find a dirty page to write-out and recycle. First try to find a
- ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
- ** cleared), but if that is not possible settle for any other
- ** unreferenced dirty page.
- */
- for(pPg=pCache->pSynced;
- pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC));
- pPg=pPg->pDirtyPrev
- );
- pCache->pSynced = pPg;
- if( !pPg ){
- for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
- }
- if( pPg ){
- int rc;
+ /* Find a dirty page to write-out and recycle. First try to find a
+ ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
+ ** cleared), but if that is not possible settle for any other
+ ** unreferenced dirty page.
+ */
+ expensive_assert( pcacheCheckSynced(pCache) );
+ for(pPg=pCache->pSynced;
+ pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC));
+ pPg=pPg->pDirtyPrev
+ );
+ pCache->pSynced = pPg;
+ if( !pPg ){
+ for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
+ }
+ if( pPg ){
+ int rc;
#ifdef SQLITE_LOG_CACHE_SPILL
- sqlite3_log(SQLITE_FULL,
- "spill page %d making room for %d - cache used: %d/%d",
- pPg->pgno, pgno,
- sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache),
- numberOfCachePages(pCache));
-#endif
- rc = pCache->xStress(pCache->pStress, pPg);
- if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
- return rc;
+ sqlite3_log(SQLITE_FULL,
+ "spill page %d making room for %d - cache used: %d/%d",
+ pPg->pgno, pgno,
+ sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache),
+ numberOfCachePages(pCache));
+#endif
+ rc = pCache->xStress(pCache->pStress, pPg);
+ if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
+ return rc;
+ }
}
- }
- *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
- return *ppPage==0 ? SQLITE_NOMEM : SQLITE_OK;
-}
-
-/*
-** This is a helper routine for sqlite3PcacheFetchFinish()
-**
-** In the uncommon case where the page being fetched has not been
-** initialized, this routine is invoked to do the initialization.
-** This routine is broken out into a separate function since it
-** requires extra stack manipulation that can be avoided in the common
-** case.
-*/
-static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
- PCache *pCache, /* Obtain the page from this cache */
- Pgno pgno, /* Page number obtained */
- sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
-){
- PgHdr *pPgHdr;
- assert( pPage!=0 );
- pPgHdr = (PgHdr*)pPage->pExtra;
- assert( pPgHdr->pPage==0 );
- memset(pPgHdr, 0, sizeof(PgHdr));
- pPgHdr->pPage = pPage;
- pPgHdr->pData = pPage->pBuf;
- pPgHdr->pExtra = (void *)&pPgHdr[1];
- memset(pPgHdr->pExtra, 0, pCache->szExtra);
- pPgHdr->pCache = pCache;
- pPgHdr->pgno = pgno;
- pPgHdr->flags = PGHDR_CLEAN;
- return sqlite3PcacheFetchFinish(pCache,pgno,pPage);
-}
-/*
-** This routine converts the sqlite3_pcache_page object returned by
-** sqlite3PcacheFetch() into an initialized PgHdr object. This routine
-** must be called after sqlite3PcacheFetch() in order to get a usable
-** result.
-*/
-SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(
- PCache *pCache, /* Obtain the page from this cache */
- Pgno pgno, /* Page number obtained */
- sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
-){
- PgHdr *pPgHdr;
-
- assert( pPage!=0 );
- pPgHdr = (PgHdr *)pPage->pExtra;
+ pPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
+ }
- if( !pPgHdr->pPage ){
- return pcacheFetchFinishWithInit(pCache, pgno, pPage);
+ if( pPage ){
+ pPgHdr = (PgHdr *)pPage->pExtra;
+
+ if( !pPgHdr->pPage ){
+ memset(pPgHdr, 0, sizeof(PgHdr));
+ pPgHdr->pPage = pPage;
+ pPgHdr->pData = pPage->pBuf;
+ pPgHdr->pExtra = (void *)&pPgHdr[1];
+ memset(pPgHdr->pExtra, 0, pCache->szExtra);
+ pPgHdr->pCache = pCache;
+ pPgHdr->pgno = pgno;
+ }
+ assert( pPgHdr->pCache==pCache );
+ assert( pPgHdr->pgno==pgno );
+ assert( pPgHdr->pData==pPage->pBuf );
+ assert( pPgHdr->pExtra==(void *)&pPgHdr[1] );
+
+ if( 0==pPgHdr->nRef ){
+ pCache->nRef++;
+ }
+ pPgHdr->nRef++;
+ if( pgno==1 ){
+ pCache->pPage1 = pPgHdr;
+ }
}
- pCache->nRefSum++;
- pPgHdr->nRef++;
- return pPgHdr;
+ *ppPage = pPgHdr;
+ return (pPgHdr==0 && eCreate) ? SQLITE_NOMEM : SQLITE_OK;
}
/*
** Decrement the reference count on a page. If the page is clean and the
-** reference count drops to 0, then it is made eligible for recycling.
+** reference count drops to 0, then it is made elible for recycling.
*/
-SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
+SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr *p){
assert( p->nRef>0 );
- p->pCache->nRefSum--;
- if( (--p->nRef)==0 ){
- if( p->flags&PGHDR_CLEAN ){
+ p->nRef--;
+ if( p->nRef==0 ){
+ PCache *pCache = p->pCache;
+ pCache->nRef--;
+ if( (p->flags&PGHDR_DIRTY)==0 ){
pcacheUnpin(p);
- }else if( p->pDirtyPrev!=0 ){
+ }else{
/* Move the page to the head of the dirty list. */
- pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
+ pcacheRemoveFromDirtyList(p);
+ pcacheAddToDirtyList(p);
}
}
}
@@ -40823,7 +37144,6 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){
assert(p->nRef>0);
p->nRef++;
- p->pCache->nRefSum++;
}
/*
@@ -40832,12 +37152,17 @@ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){
** page pointed to by p is invalid.
*/
SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){
+ PCache *pCache;
assert( p->nRef==1 );
if( p->flags&PGHDR_DIRTY ){
- pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
+ pcacheRemoveFromDirtyList(p);
}
- p->pCache->nRefSum--;
- sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1);
+ pCache = p->pCache;
+ pCache->nRef--;
+ if( p->pgno==1 ){
+ pCache->pPage1 = 0;
+ }
+ sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, p->pPage, 1);
}
/*
@@ -40845,14 +37170,11 @@ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){
** make it so.
*/
SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
+ p->flags &= ~PGHDR_DONT_WRITE;
assert( p->nRef>0 );
- if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){
- p->flags &= ~PGHDR_DONT_WRITE;
- if( p->flags & PGHDR_CLEAN ){
- p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN);
- assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
- pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
- }
+ if( 0==(p->flags & PGHDR_DIRTY) ){
+ p->flags |= PGHDR_DIRTY;
+ pcacheAddToDirtyList( p);
}
}
@@ -40862,10 +37184,8 @@ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
*/
SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){
if( (p->flags & PGHDR_DIRTY) ){
- assert( (p->flags & PGHDR_CLEAN)==0 );
- pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
- p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
- p->flags |= PGHDR_CLEAN;
+ pcacheRemoveFromDirtyList(p);
+ p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC);
if( p->nRef==0 ){
pcacheUnpin(p);
}
@@ -40903,7 +37223,8 @@ SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
p->pgno = newPgno;
if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
- pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
+ pcacheRemoveFromDirtyList(p);
+ pcacheAddToDirtyList(p);
}
}
@@ -40932,14 +37253,9 @@ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
sqlite3PcacheMakeClean(p);
}
}
- if( pgno==0 && pCache->nRefSum ){
- sqlite3_pcache_page *pPage1;
- pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0);
- if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because
- ** pCache->nRefSum>0 */
- memset(pPage1->pBuf, 0, pCache->szPage);
- pgno = 1;
- }
+ if( pgno==0 && pCache->pPage1 ){
+ memset(pCache->pPage1->pData, 0, pCache->szPage);
+ pgno = 1;
}
sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1);
}
@@ -40949,8 +37265,9 @@ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
** Close a cache.
*/
SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){
- assert( pCache->pCache!=0 );
- sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
+ if( pCache->pCache ){
+ sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
+ }
}
/*
@@ -41042,13 +37359,10 @@ SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache *pCache){
}
/*
-** Return the total number of references to all pages held by the cache.
-**
-** This is not the total number of pages referenced, but the sum of the
-** reference count for all pages.
+** Return the total number of referenced pages held by the cache.
*/
SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){
- return pCache->nRefSum;
+ return pCache->nRef;
}
/*
@@ -41062,8 +37376,11 @@ SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){
** Return the total number of pages in the cache.
*/
SQLITE_PRIVATE int sqlite3PcachePagecount(PCache *pCache){
- assert( pCache->pCache!=0 );
- return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache);
+ int nPage = 0;
+ if( pCache->pCache ){
+ nPage = sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache);
+ }
+ return nPage;
}
#ifdef SQLITE_TEST
@@ -41079,27 +37396,22 @@ SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *pCache){
** Set the suggested cache-size value.
*/
SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){
- assert( pCache->pCache!=0 );
pCache->szCache = mxPage;
- sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache,
- numberOfCachePages(pCache));
+ if( pCache->pCache ){
+ sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache,
+ numberOfCachePages(pCache));
+ }
}
/*
** Free up as much memory as possible from the page cache.
*/
SQLITE_PRIVATE void sqlite3PcacheShrink(PCache *pCache){
- assert( pCache->pCache!=0 );
- sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache);
+ if( pCache->pCache ){
+ sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache);
+ }
}
-/*
-** Return the size of the header added by this middleware layer
-** in the page-cache hierarchy.
-*/
-SQLITE_PRIVATE int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); }
-
-
#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
/*
** For all dirty pages currently in the cache, invoke the specified
@@ -41131,100 +37443,18 @@ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHd
** This file implements the default page cache implementation (the
** sqlite3_pcache interface). It also contains part of the implementation
** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features.
-** If the default page cache implementation is overridden, then neither of
+** If the default page cache implementation is overriden, then neither of
** these two features are available.
-**
-** A Page cache line looks like this:
-**
-** -------------------------------------------------------------
-** | database page content | PgHdr1 | MemPage | PgHdr |
-** -------------------------------------------------------------
-**
-** The database page content is up front (so that buffer overreads tend to
-** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage
-** is the extension added by the btree.c module containing information such
-** as the database page number and how that database page is used. PgHdr
-** is added by the pcache.c layer and contains information used to keep track
-** of which pages are "dirty". PgHdr1 is an extension added by this
-** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page.
-** PgHdr1 contains information needed to look up a page by its page number.
-** The superclass sqlite3_pcache_page.pBuf points to the start of the
-** database page content and sqlite3_pcache_page.pExtra points to PgHdr.
-**
-** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at
-** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The
-** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this
-** size can vary according to architecture, compile-time options, and
-** SQLite library version number.
-**
-** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained
-** using a separate memory allocation from the database page content. This
-** seeks to overcome the "clownshoe" problem (also called "internal
-** fragmentation" in academic literature) of allocating a few bytes more
-** than a power of two with the memory allocator rounding up to the next
-** power of two, and leaving the rounded-up space unused.
-**
-** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates
-** with this module. Information is passed back and forth as PgHdr1 pointers.
-**
-** The pcache.c and pager.c modules deal pointers to PgHdr objects.
-** The btree.c module deals with pointers to MemPage objects.
-**
-** SOURCE OF PAGE CACHE MEMORY:
-**
-** Memory for a page might come from any of three sources:
-**
-** (1) The general-purpose memory allocator - sqlite3Malloc()
-** (2) Global page-cache memory provided using sqlite3_config() with
-** SQLITE_CONFIG_PAGECACHE.
-** (3) PCache-local bulk allocation.
-**
-** The third case is a chunk of heap memory (defaulting to 100 pages worth)
-** that is allocated when the page cache is created. The size of the local
-** bulk allocation can be adjusted using
-**
-** sqlite3_config(SQLITE_CONFIG_PAGECACHE, 0, 0, N).
-**
-** If N is positive, then N pages worth of memory are allocated using a single
-** sqlite3Malloc() call and that memory is used for the first N pages allocated.
-** Or if N is negative, then -1024*N bytes of memory are allocated and used
-** for as many pages as can be accomodated.
-**
-** Only one of (2) or (3) can be used. Once the memory available to (2) or
-** (3) is exhausted, subsequent allocations fail over to the general-purpose
-** memory allocator (1).
-**
-** Earlier versions of SQLite used only methods (1) and (2). But experiments
-** show that method (3) with N==100 provides about a 5% performance boost for
-** common workloads.
*/
-/* #include "sqliteInt.h" */
+
typedef struct PCache1 PCache1;
typedef struct PgHdr1 PgHdr1;
typedef struct PgFreeslot PgFreeslot;
typedef struct PGroup PGroup;
-/*
-** Each cache entry is represented by an instance of the following
-** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of
-** PgHdr1.pCache->szPage bytes is allocated directly before this structure
-** in memory.
-*/
-struct PgHdr1 {
- sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */
- unsigned int iKey; /* Key value (page number) */
- u8 isPinned; /* Page in use, not on the LRU list */
- u8 isBulkLocal; /* This page from bulk local storage */
- u8 isAnchor; /* This is the PGroup.lru element */
- PgHdr1 *pNext; /* Next in hash table chain */
- PCache1 *pCache; /* Cache that currently owns this page */
- PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */
- PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */
-};
-
/* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set
-** of one or more PCaches that are able to recycle each other's unpinned
+** of one or more PCaches that are able to recycle each others unpinned
** pages when they are under memory pressure. A PGroup is an instance of
** the following object.
**
@@ -41251,7 +37481,7 @@ struct PGroup {
unsigned int nMinPage; /* Sum of nMin for purgeable caches */
unsigned int mxPinned; /* nMaxpage + 10 - nMinPage */
unsigned int nCurrentPage; /* Number of purgeable pages allocated */
- PgHdr1 lru; /* The beginning and end of the LRU list */
+ PgHdr1 *pLruHead, *pLruTail; /* LRU list of unpinned pages */
};
/* Each page cache is an instance of the following object. Every
@@ -41269,9 +37499,8 @@ struct PCache1 {
** The PGroup mutex must be held when accessing nMax.
*/
PGroup *pGroup; /* PGroup this cache belongs to */
- int szPage; /* Size of database content section */
- int szExtra; /* sizeof(MemPage)+sizeof(PgHdr) */
- int szAlloc; /* Total size of one pcache line */
+ int szPage; /* Size of allocated pages in bytes */
+ int szExtra; /* Size of extra space in bytes */
int bPurgeable; /* True if cache is purgeable */
unsigned int nMin; /* Minimum number of pages reserved */
unsigned int nMax; /* Configured "cache_size" value */
@@ -41285,13 +37514,26 @@ struct PCache1 {
unsigned int nPage; /* Total number of pages in apHash */
unsigned int nHash; /* Number of slots in apHash[] */
PgHdr1 **apHash; /* Hash table for fast lookup by key */
- PgHdr1 *pFree; /* List of unused pcache-local pages */
- void *pBulk; /* Bulk memory used by pcache-local */
};
/*
-** Free slots in the allocator used to divide up the global page cache
-** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism.
+** Each cache entry is represented by an instance of the following
+** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of
+** PgHdr1.pCache->szPage bytes is allocated directly before this structure
+** in memory.
+*/
+struct PgHdr1 {
+ sqlite3_pcache_page page;
+ unsigned int iKey; /* Key value (page number) */
+ PgHdr1 *pNext; /* Next in hash table chain */
+ PCache1 *pCache; /* Cache that currently owns this page */
+ PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */
+ PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */
+};
+
+/*
+** Free slots in the allocator used to divide up the buffer provided using
+** the SQLITE_CONFIG_PAGECACHE mechanism.
*/
struct PgFreeslot {
PgFreeslot *pNext; /* Next free slot */
@@ -41309,12 +37551,10 @@ static SQLITE_WSD struct PCacheGlobal {
** The nFreeSlot and pFree values do require mutex protection.
*/
int isInit; /* True if initialized */
- int separateCache; /* Use a new PGroup for each PCache */
- int nInitPage; /* Initial bulk allocation size */
int szSlot; /* Size of each free slot */
int nSlot; /* The number of pcache slots */
int nReserve; /* Try to keep nFreeSlot above this */
- void *pStart, *pEnd; /* Bounds of global page cache memory */
+ void *pStart, *pEnd; /* Bounds of pagecache malloc range */
/* Above requires no mutex. Use mutex below for variable that follow. */
sqlite3_mutex *mutex; /* Mutex for accessing the following: */
PgFreeslot *pFree; /* Free page blocks */
@@ -41336,20 +37576,12 @@ static SQLITE_WSD struct PCacheGlobal {
/*
** Macros to enter and leave the PCache LRU mutex.
*/
-#if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0
-# define pcache1EnterMutex(X) assert((X)->mutex==0)
-# define pcache1LeaveMutex(X) assert((X)->mutex==0)
-# define PCACHE1_MIGHT_USE_GROUP_MUTEX 0
-#else
-# define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex)
-# define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex)
-# define PCACHE1_MIGHT_USE_GROUP_MUTEX 1
-#endif
+#define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex)
+#define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex)
/******************************************************************************/
/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/
-
/*
** This function is called during initialization if a static buffer is
** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE
@@ -41362,7 +37594,6 @@ static SQLITE_WSD struct PCacheGlobal {
SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){
if( pcache1.isInit ){
PgFreeslot *p;
- if( pBuf==0 ) sz = n = 0;
sz = ROUNDDOWN8(sz);
pcache1.szSlot = sz;
pcache1.nSlot = pcache1.nFreeSlot = n;
@@ -41381,44 +37612,6 @@ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){
}
/*
-** Try to initialize the pCache->pFree and pCache->pBulk fields. Return
-** true if pCache->pFree ends up containing one or more free pages.
-*/
-static int pcache1InitBulk(PCache1 *pCache){
- i64 szBulk;
- char *zBulk;
- if( pcache1.nInitPage==0 ) return 0;
- /* Do not bother with a bulk allocation if the cache size very small */
- if( pCache->nMax<3 ) return 0;
- sqlite3BeginBenignMalloc();
- if( pcache1.nInitPage>0 ){
- szBulk = pCache->szAlloc * (i64)pcache1.nInitPage;
- }else{
- szBulk = -1024 * (i64)pcache1.nInitPage;
- }
- if( szBulk > pCache->szAlloc*(i64)pCache->nMax ){
- szBulk = pCache->szAlloc*pCache->nMax;
- }
- zBulk = pCache->pBulk = sqlite3Malloc( szBulk );
- sqlite3EndBenignMalloc();
- if( zBulk ){
- int nBulk = sqlite3MallocSize(zBulk)/pCache->szAlloc;
- int i;
- for(i=0; i<nBulk; i++){
- PgHdr1 *pX = (PgHdr1*)&zBulk[pCache->szPage];
- pX->page.pBuf = zBulk;
- pX->page.pExtra = &pX[1];
- pX->isBulkLocal = 1;
- pX->isAnchor = 0;
- pX->pNext = pCache->pFree;
- pCache->pFree = pX;
- zBulk += pCache->szAlloc;
- }
- }
- return pCache->pFree!=0;
-}
-
-/*
** Malloc function used within this file to allocate space from the buffer
** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no
** such buffer exists or there is no space left in it, this function falls
@@ -41430,6 +37623,7 @@ static int pcache1InitBulk(PCache1 *pCache){
static void *pcache1Alloc(int nByte){
void *p = 0;
assert( sqlite3_mutex_notheld(pcache1.grp.mutex) );
+ sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte);
if( nByte<=pcache1.szSlot ){
sqlite3_mutex_enter(pcache1.mutex);
p = (PgHdr1 *)pcache1.pFree;
@@ -41438,8 +37632,7 @@ static void *pcache1Alloc(int nByte){
pcache1.nFreeSlot--;
pcache1.bUnderPressure = pcache1.nFreeSlot<pcache1.nReserve;
assert( pcache1.nFreeSlot>=0 );
- sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte);
- sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, 1);
}
sqlite3_mutex_leave(pcache1.mutex);
}
@@ -41452,8 +37645,7 @@ static void *pcache1Alloc(int nByte){
if( p ){
int sz = sqlite3MallocSize(p);
sqlite3_mutex_enter(pcache1.mutex);
- sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte);
- sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz);
+ sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz);
sqlite3_mutex_leave(pcache1.mutex);
}
#endif
@@ -41465,13 +37657,13 @@ static void *pcache1Alloc(int nByte){
/*
** Free an allocated buffer obtained from pcache1Alloc().
*/
-static void pcache1Free(void *p){
+static int pcache1Free(void *p){
int nFreed = 0;
- if( p==0 ) return;
+ if( p==0 ) return 0;
if( p>=pcache1.pStart && p<pcache1.pEnd ){
PgFreeslot *pSlot;
sqlite3_mutex_enter(pcache1.mutex);
- sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_USED, 1);
+ sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, -1);
pSlot = (PgFreeslot*)p;
pSlot->pNext = pcache1.pFree;
pcache1.pFree = pSlot;
@@ -41482,14 +37674,15 @@ static void pcache1Free(void *p){
}else{
assert( sqlite3MemdebugHasType(p, MEMTYPE_PCACHE) );
sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
-#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
nFreed = sqlite3MallocSize(p);
+#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
sqlite3_mutex_enter(pcache1.mutex);
- sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_OVERFLOW, nFreed);
+ sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, -nFreed);
sqlite3_mutex_leave(pcache1.mutex);
#endif
sqlite3_free(p);
}
+ return nFreed;
}
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
@@ -41513,72 +37706,58 @@ static int pcache1MemSize(void *p){
/*
** Allocate a new page object initially associated with cache pCache.
*/
-static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){
+static PgHdr1 *pcache1AllocPage(PCache1 *pCache){
PgHdr1 *p = 0;
void *pPg;
+ /* The group mutex must be released before pcache1Alloc() is called. This
+ ** is because it may call sqlite3_release_memory(), which assumes that
+ ** this mutex is not held. */
assert( sqlite3_mutex_held(pCache->pGroup->mutex) );
- if( pCache->pFree || (pCache->nPage==0 && pcache1InitBulk(pCache)) ){
- p = pCache->pFree;
- pCache->pFree = p->pNext;
- p->pNext = 0;
- }else{
-#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
- /* The group mutex must be released before pcache1Alloc() is called. This
- ** is because it might call sqlite3_release_memory(), which assumes that
- ** this mutex is not held. */
- assert( pcache1.separateCache==0 );
- assert( pCache->pGroup==&pcache1.grp );
- pcache1LeaveMutex(pCache->pGroup);
-#endif
- if( benignMalloc ){ sqlite3BeginBenignMalloc(); }
+ pcache1LeaveMutex(pCache->pGroup);
#ifdef SQLITE_PCACHE_SEPARATE_HEADER
- pPg = pcache1Alloc(pCache->szPage);
- p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra);
- if( !pPg || !p ){
- pcache1Free(pPg);
- sqlite3_free(p);
- pPg = 0;
- }
+ pPg = pcache1Alloc(pCache->szPage);
+ p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra);
+ if( !pPg || !p ){
+ pcache1Free(pPg);
+ sqlite3_free(p);
+ pPg = 0;
+ }
#else
- pPg = pcache1Alloc(pCache->szAlloc);
- p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage];
+ pPg = pcache1Alloc(sizeof(PgHdr1) + pCache->szPage + pCache->szExtra);
+ p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage];
#endif
- if( benignMalloc ){ sqlite3EndBenignMalloc(); }
-#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
- pcache1EnterMutex(pCache->pGroup);
-#endif
- if( pPg==0 ) return 0;
+ pcache1EnterMutex(pCache->pGroup);
+
+ if( pPg ){
p->page.pBuf = pPg;
p->page.pExtra = &p[1];
- p->isBulkLocal = 0;
- p->isAnchor = 0;
- }
- if( pCache->bPurgeable ){
- pCache->pGroup->nCurrentPage++;
+ if( pCache->bPurgeable ){
+ pCache->pGroup->nCurrentPage++;
+ }
+ return p;
}
- return p;
+ return 0;
}
/*
** Free a page object allocated by pcache1AllocPage().
+**
+** The pointer is allowed to be NULL, which is prudent. But it turns out
+** that the current implementation happens to never call this routine
+** with a NULL pointer, so we mark the NULL test with ALWAYS().
*/
static void pcache1FreePage(PgHdr1 *p){
- PCache1 *pCache;
- assert( p!=0 );
- pCache = p->pCache;
- assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) );
- if( p->isBulkLocal ){
- p->pNext = pCache->pFree;
- pCache->pFree = p;
- }else{
+ if( ALWAYS(p) ){
+ PCache1 *pCache = p->pCache;
+ assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) );
pcache1Free(p->page.pBuf);
#ifdef SQLITE_PCACHE_SEPARATE_HEADER
sqlite3_free(p);
#endif
- }
- if( pCache->bPurgeable ){
- pCache->pGroup->nCurrentPage--;
+ if( pCache->bPurgeable ){
+ pCache->pGroup->nCurrentPage--;
+ }
}
}
@@ -41632,7 +37811,7 @@ static int pcache1UnderMemoryPressure(PCache1 *pCache){
**
** The PCache mutex must be held when this function is called.
*/
-static void pcache1ResizeHash(PCache1 *p){
+static int pcache1ResizeHash(PCache1 *p){
PgHdr1 **apNew;
unsigned int nNew;
unsigned int i;
@@ -41664,6 +37843,8 @@ static void pcache1ResizeHash(PCache1 *p){
p->apHash = apNew;
p->nHash = nNew;
}
+
+ return (p->apHash ? SQLITE_OK : SQLITE_NOMEM);
}
/*
@@ -41672,36 +37853,44 @@ static void pcache1ResizeHash(PCache1 *p){
** LRU list, then this function is a no-op.
**
** The PGroup mutex must be held when this function is called.
+**
+** If pPage is NULL then this routine is a no-op.
*/
-static PgHdr1 *pcache1PinPage(PgHdr1 *pPage){
+static void pcache1PinPage(PgHdr1 *pPage){
PCache1 *pCache;
+ PGroup *pGroup;
- assert( pPage!=0 );
- assert( pPage->isPinned==0 );
+ if( pPage==0 ) return;
pCache = pPage->pCache;
- assert( pPage->pLruNext );
- assert( pPage->pLruPrev );
- assert( sqlite3_mutex_held(pCache->pGroup->mutex) );
- pPage->pLruPrev->pLruNext = pPage->pLruNext;
- pPage->pLruNext->pLruPrev = pPage->pLruPrev;
- pPage->pLruNext = 0;
- pPage->pLruPrev = 0;
- pPage->isPinned = 1;
- assert( pPage->isAnchor==0 );
- assert( pCache->pGroup->lru.isAnchor==1 );
- pCache->nRecyclable--;
- return pPage;
+ pGroup = pCache->pGroup;
+ assert( sqlite3_mutex_held(pGroup->mutex) );
+ if( pPage->pLruNext || pPage==pGroup->pLruTail ){
+ if( pPage->pLruPrev ){
+ pPage->pLruPrev->pLruNext = pPage->pLruNext;
+ }
+ if( pPage->pLruNext ){
+ pPage->pLruNext->pLruPrev = pPage->pLruPrev;
+ }
+ if( pGroup->pLruHead==pPage ){
+ pGroup->pLruHead = pPage->pLruNext;
+ }
+ if( pGroup->pLruTail==pPage ){
+ pGroup->pLruTail = pPage->pLruPrev;
+ }
+ pPage->pLruNext = 0;
+ pPage->pLruPrev = 0;
+ pPage->pCache->nRecyclable--;
+ }
}
/*
** Remove the page supplied as an argument from the hash table
** (PCache1.apHash structure) that it is currently stored in.
-** Also free the page if freePage is true.
**
** The PGroup mutex must be held when this function is called.
*/
-static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag){
+static void pcache1RemoveFromHash(PgHdr1 *pPage){
unsigned int h;
PCache1 *pCache = pPage->pCache;
PgHdr1 **pp;
@@ -41712,28 +37901,20 @@ static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag){
*pp = (*pp)->pNext;
pCache->nPage--;
- if( freeFlag ) pcache1FreePage(pPage);
}
/*
** If there are currently more than nMaxPage pages allocated, try
** to recycle pages to reduce the number allocated to nMaxPage.
*/
-static void pcache1EnforceMaxPage(PCache1 *pCache){
- PGroup *pGroup = pCache->pGroup;
- PgHdr1 *p;
+static void pcache1EnforceMaxPage(PGroup *pGroup){
assert( sqlite3_mutex_held(pGroup->mutex) );
- while( pGroup->nCurrentPage>pGroup->nMaxPage
- && (p=pGroup->lru.pLruPrev)->isAnchor==0
- ){
+ while( pGroup->nCurrentPage>pGroup->nMaxPage && pGroup->pLruTail ){
+ PgHdr1 *p = pGroup->pLruTail;
assert( p->pCache->pGroup==pGroup );
- assert( p->isPinned==0 );
pcache1PinPage(p);
- pcache1RemoveFromHash(p, 1);
- }
- if( pCache->nPage==0 && pCache->pBulk ){
- sqlite3_free(pCache->pBulk);
- pCache->pBulk = pCache->pFree = 0;
+ pcache1RemoveFromHash(p);
+ pcache1FreePage(p);
}
}
@@ -41758,7 +37939,7 @@ static void pcache1TruncateUnsafe(
if( pPage->iKey>=iLimit ){
pCache->nPage--;
*pp = pPage->pNext;
- if( !pPage->isPinned ) pcache1PinPage(pPage);
+ pcache1PinPage(pPage);
pcache1FreePage(pPage);
}else{
pp = &pPage->pNext;
@@ -41779,45 +37960,10 @@ static int pcache1Init(void *NotUsed){
UNUSED_PARAMETER(NotUsed);
assert( pcache1.isInit==0 );
memset(&pcache1, 0, sizeof(pcache1));
-
-
- /*
- ** The pcache1.separateCache variable is true if each PCache has its own
- ** private PGroup (mode-1). pcache1.separateCache is false if the single
- ** PGroup in pcache1.grp is used for all page caches (mode-2).
- **
- ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT
- **
- ** * Use a unified cache in single-threaded applications that have
- ** configured a start-time buffer for use as page-cache memory using
- ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL
- ** pBuf argument.
- **
- ** * Otherwise use separate caches (mode-1)
- */
-#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT)
- pcache1.separateCache = 0;
-#elif SQLITE_THREADSAFE
- pcache1.separateCache = sqlite3GlobalConfig.pPage==0
- || sqlite3GlobalConfig.bCoreMutex>0;
-#else
- pcache1.separateCache = sqlite3GlobalConfig.pPage==0;
-#endif
-
-#if SQLITE_THREADSAFE
if( sqlite3GlobalConfig.bCoreMutex ){
pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU);
pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM);
}
-#endif
- if( pcache1.separateCache
- && sqlite3GlobalConfig.nPage!=0
- && sqlite3GlobalConfig.pPage==0
- ){
- pcache1.nInitPage = sqlite3GlobalConfig.nPage;
- }else{
- pcache1.nInitPage = 0;
- }
pcache1.grp.mxPinned = 10;
pcache1.isInit = 1;
return SQLITE_OK;
@@ -41834,9 +37980,6 @@ static void pcache1Shutdown(void *NotUsed){
memset(&pcache1, 0, sizeof(pcache1));
}
-/* forward declaration */
-static void pcache1Destroy(sqlite3_pcache *p);
-
/*
** Implementation of the sqlite3_pcache.xCreate method.
**
@@ -41847,38 +37990,46 @@ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){
PGroup *pGroup; /* The group the new page cache will belong to */
int sz; /* Bytes of memory required to allocate the new cache */
+ /*
+ ** The separateCache variable is true if each PCache has its own private
+ ** PGroup. In other words, separateCache is true for mode (1) where no
+ ** mutexing is required.
+ **
+ ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT
+ **
+ ** * Always use a unified cache in single-threaded applications
+ **
+ ** * Otherwise (if multi-threaded and ENABLE_MEMORY_MANAGEMENT is off)
+ ** use separate caches (mode-1)
+ */
+#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0
+ const int separateCache = 0;
+#else
+ int separateCache = sqlite3GlobalConfig.bCoreMutex>0;
+#endif
+
assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 );
assert( szExtra < 300 );
- sz = sizeof(PCache1) + sizeof(PGroup)*pcache1.separateCache;
+ sz = sizeof(PCache1) + sizeof(PGroup)*separateCache;
pCache = (PCache1 *)sqlite3MallocZero(sz);
if( pCache ){
- if( pcache1.separateCache ){
+ if( separateCache ){
pGroup = (PGroup*)&pCache[1];
pGroup->mxPinned = 10;
}else{
pGroup = &pcache1.grp;
}
- if( pGroup->lru.isAnchor==0 ){
- pGroup->lru.isAnchor = 1;
- pGroup->lru.pLruPrev = pGroup->lru.pLruNext = &pGroup->lru;
- }
pCache->pGroup = pGroup;
pCache->szPage = szPage;
pCache->szExtra = szExtra;
- pCache->szAlloc = szPage + szExtra + ROUND8(sizeof(PgHdr1));
pCache->bPurgeable = (bPurgeable ? 1 : 0);
- pcache1EnterMutex(pGroup);
- pcache1ResizeHash(pCache);
if( bPurgeable ){
pCache->nMin = 10;
+ pcache1EnterMutex(pGroup);
pGroup->nMinPage += pCache->nMin;
pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage;
- }
- pcache1LeaveMutex(pGroup);
- if( pCache->nHash==0 ){
- pcache1Destroy((sqlite3_pcache*)pCache);
- pCache = 0;
+ pcache1LeaveMutex(pGroup);
}
}
return (sqlite3_pcache *)pCache;
@@ -41898,7 +38049,7 @@ static void pcache1Cachesize(sqlite3_pcache *p, int nMax){
pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage;
pCache->nMax = nMax;
pCache->n90pct = pCache->nMax*9/10;
- pcache1EnforceMaxPage(pCache);
+ pcache1EnforceMaxPage(pGroup);
pcache1LeaveMutex(pGroup);
}
}
@@ -41916,7 +38067,7 @@ static void pcache1Shrink(sqlite3_pcache *p){
pcache1EnterMutex(pGroup);
savedMaxPage = pGroup->nMaxPage;
pGroup->nMaxPage = 0;
- pcache1EnforceMaxPage(pCache);
+ pcache1EnforceMaxPage(pGroup);
pGroup->nMaxPage = savedMaxPage;
pcache1LeaveMutex(pGroup);
}
@@ -41934,84 +38085,6 @@ static int pcache1Pagecount(sqlite3_pcache *p){
return n;
}
-
-/*
-** Implement steps 3, 4, and 5 of the pcache1Fetch() algorithm described
-** in the header of the pcache1Fetch() procedure.
-**
-** This steps are broken out into a separate procedure because they are
-** usually not needed, and by avoiding the stack initialization required
-** for these steps, the main pcache1Fetch() procedure can run faster.
-*/
-static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2(
- PCache1 *pCache,
- unsigned int iKey,
- int createFlag
-){
- unsigned int nPinned;
- PGroup *pGroup = pCache->pGroup;
- PgHdr1 *pPage = 0;
-
- /* Step 3: Abort if createFlag is 1 but the cache is nearly full */
- assert( pCache->nPage >= pCache->nRecyclable );
- nPinned = pCache->nPage - pCache->nRecyclable;
- assert( pGroup->mxPinned == pGroup->nMaxPage + 10 - pGroup->nMinPage );
- assert( pCache->n90pct == pCache->nMax*9/10 );
- if( createFlag==1 && (
- nPinned>=pGroup->mxPinned
- || nPinned>=pCache->n90pct
- || (pcache1UnderMemoryPressure(pCache) && pCache->nRecyclable<nPinned)
- )){
- return 0;
- }
-
- if( pCache->nPage>=pCache->nHash ) pcache1ResizeHash(pCache);
- assert( pCache->nHash>0 && pCache->apHash );
-
- /* Step 4. Try to recycle a page. */
- if( pCache->bPurgeable
- && !pGroup->lru.pLruPrev->isAnchor
- && ((pCache->nPage+1>=pCache->nMax) || pcache1UnderMemoryPressure(pCache))
- ){
- PCache1 *pOther;
- pPage = pGroup->lru.pLruPrev;
- assert( pPage->isPinned==0 );
- pcache1RemoveFromHash(pPage, 0);
- pcache1PinPage(pPage);
- pOther = pPage->pCache;
- if( pOther->szAlloc != pCache->szAlloc ){
- pcache1FreePage(pPage);
- pPage = 0;
- }else{
- pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable);
- }
- }
-
- /* Step 5. If a usable page buffer has still not been found,
- ** attempt to allocate a new one.
- */
- if( !pPage ){
- pPage = pcache1AllocPage(pCache, createFlag==1);
- }
-
- if( pPage ){
- unsigned int h = iKey % pCache->nHash;
- pCache->nPage++;
- pPage->iKey = iKey;
- pPage->pNext = pCache->apHash[h];
- pPage->pCache = pCache;
- pPage->pLruPrev = 0;
- pPage->pLruNext = 0;
- pPage->isPinned = 1;
- *(void **)pPage->page.pExtra = 0;
- pCache->apHash[h] = pPage;
- if( iKey>pCache->iMaxKey ){
- pCache->iMaxKey = iKey;
- }
- }
- return pPage;
-}
-
/*
** Implementation of the sqlite3_pcache.xFetch method.
**
@@ -42065,80 +38138,118 @@ static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2(
** proceed to step 5.
**
** 5. Otherwise, allocate and return a new page buffer.
-**
-** There are two versions of this routine. pcache1FetchWithMutex() is
-** the general case. pcache1FetchNoMutex() is a faster implementation for
-** the common case where pGroup->mutex is NULL. The pcache1Fetch() wrapper
-** invokes the appropriate routine.
*/
-static PgHdr1 *pcache1FetchNoMutex(
+static sqlite3_pcache_page *pcache1Fetch(
sqlite3_pcache *p,
unsigned int iKey,
int createFlag
){
+ unsigned int nPinned;
PCache1 *pCache = (PCache1 *)p;
+ PGroup *pGroup;
PgHdr1 *pPage = 0;
+ assert( pCache->bPurgeable || createFlag!=1 );
+ assert( pCache->bPurgeable || pCache->nMin==0 );
+ assert( pCache->bPurgeable==0 || pCache->nMin==10 );
+ assert( pCache->nMin==0 || pCache->bPurgeable );
+ pcache1EnterMutex(pGroup = pCache->pGroup);
+
/* Step 1: Search the hash table for an existing entry. */
- pPage = pCache->apHash[iKey % pCache->nHash];
- while( pPage && pPage->iKey!=iKey ){ pPage = pPage->pNext; }
+ if( pCache->nHash>0 ){
+ unsigned int h = iKey % pCache->nHash;
+ for(pPage=pCache->apHash[h]; pPage&&pPage->iKey!=iKey; pPage=pPage->pNext);
+ }
- /* Step 2: If the page was found in the hash table, then return it.
- ** If the page was not in the hash table and createFlag is 0, abort.
- ** Otherwise (page not in hash and createFlag!=0) continue with
- ** subsequent steps to try to create the page. */
- if( pPage ){
- if( !pPage->isPinned ){
- return pcache1PinPage(pPage);
+ /* Step 2: Abort if no existing page is found and createFlag is 0 */
+ if( pPage || createFlag==0 ){
+ pcache1PinPage(pPage);
+ goto fetch_out;
+ }
+
+ /* The pGroup local variable will normally be initialized by the
+ ** pcache1EnterMutex() macro above. But if SQLITE_MUTEX_OMIT is defined,
+ ** then pcache1EnterMutex() is a no-op, so we have to initialize the
+ ** local variable here. Delaying the initialization of pGroup is an
+ ** optimization: The common case is to exit the module before reaching
+ ** this point.
+ */
+#ifdef SQLITE_MUTEX_OMIT
+ pGroup = pCache->pGroup;
+#endif
+
+ /* Step 3: Abort if createFlag is 1 but the cache is nearly full */
+ assert( pCache->nPage >= pCache->nRecyclable );
+ nPinned = pCache->nPage - pCache->nRecyclable;
+ assert( pGroup->mxPinned == pGroup->nMaxPage + 10 - pGroup->nMinPage );
+ assert( pCache->n90pct == pCache->nMax*9/10 );
+ if( createFlag==1 && (
+ nPinned>=pGroup->mxPinned
+ || nPinned>=pCache->n90pct
+ || pcache1UnderMemoryPressure(pCache)
+ )){
+ goto fetch_out;
+ }
+
+ if( pCache->nPage>=pCache->nHash && pcache1ResizeHash(pCache) ){
+ goto fetch_out;
+ }
+ assert( pCache->nHash>0 && pCache->apHash );
+
+ /* Step 4. Try to recycle a page. */
+ if( pCache->bPurgeable && pGroup->pLruTail && (
+ (pCache->nPage+1>=pCache->nMax)
+ || pGroup->nCurrentPage>=pGroup->nMaxPage
+ || pcache1UnderMemoryPressure(pCache)
+ )){
+ PCache1 *pOther;
+ pPage = pGroup->pLruTail;
+ pcache1RemoveFromHash(pPage);
+ pcache1PinPage(pPage);
+ pOther = pPage->pCache;
+
+ /* We want to verify that szPage and szExtra are the same for pOther
+ ** and pCache. Assert that we can verify this by comparing sums. */
+ assert( (pCache->szPage & (pCache->szPage-1))==0 && pCache->szPage>=512 );
+ assert( pCache->szExtra<512 );
+ assert( (pOther->szPage & (pOther->szPage-1))==0 && pOther->szPage>=512 );
+ assert( pOther->szExtra<512 );
+
+ if( pOther->szPage+pOther->szExtra != pCache->szPage+pCache->szExtra ){
+ pcache1FreePage(pPage);
+ pPage = 0;
}else{
- return pPage;
+ pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable);
}
- }else if( createFlag ){
- /* Steps 3, 4, and 5 implemented by this subroutine */
- return pcache1FetchStage2(pCache, iKey, createFlag);
- }else{
- return 0;
}
-}
-#if PCACHE1_MIGHT_USE_GROUP_MUTEX
-static PgHdr1 *pcache1FetchWithMutex(
- sqlite3_pcache *p,
- unsigned int iKey,
- int createFlag
-){
- PCache1 *pCache = (PCache1 *)p;
- PgHdr1 *pPage;
- pcache1EnterMutex(pCache->pGroup);
- pPage = pcache1FetchNoMutex(p, iKey, createFlag);
- assert( pPage==0 || pCache->iMaxKey>=iKey );
- pcache1LeaveMutex(pCache->pGroup);
- return pPage;
-}
-#endif
-static sqlite3_pcache_page *pcache1Fetch(
- sqlite3_pcache *p,
- unsigned int iKey,
- int createFlag
-){
-#if PCACHE1_MIGHT_USE_GROUP_MUTEX || defined(SQLITE_DEBUG)
- PCache1 *pCache = (PCache1 *)p;
-#endif
+ /* Step 5. If a usable page buffer has still not been found,
+ ** attempt to allocate a new one.
+ */
+ if( !pPage ){
+ if( createFlag==1 ) sqlite3BeginBenignMalloc();
+ pPage = pcache1AllocPage(pCache);
+ if( createFlag==1 ) sqlite3EndBenignMalloc();
+ }
- assert( offsetof(PgHdr1,page)==0 );
- assert( pCache->bPurgeable || createFlag!=1 );
- assert( pCache->bPurgeable || pCache->nMin==0 );
- assert( pCache->bPurgeable==0 || pCache->nMin==10 );
- assert( pCache->nMin==0 || pCache->bPurgeable );
- assert( pCache->nHash>0 );
-#if PCACHE1_MIGHT_USE_GROUP_MUTEX
- if( pCache->pGroup->mutex ){
- return (sqlite3_pcache_page*)pcache1FetchWithMutex(p, iKey, createFlag);
- }else
-#endif
- {
- return (sqlite3_pcache_page*)pcache1FetchNoMutex(p, iKey, createFlag);
+ if( pPage ){
+ unsigned int h = iKey % pCache->nHash;
+ pCache->nPage++;
+ pPage->iKey = iKey;
+ pPage->pNext = pCache->apHash[h];
+ pPage->pCache = pCache;
+ pPage->pLruPrev = 0;
+ pPage->pLruNext = 0;
+ *(void **)pPage->page.pExtra = 0;
+ pCache->apHash[h] = pPage;
+ }
+
+fetch_out:
+ if( pPage && iKey>pCache->iMaxKey ){
+ pCache->iMaxKey = iKey;
}
+ pcache1LeaveMutex(pGroup);
+ return &pPage->page;
}
@@ -42163,18 +38274,22 @@ static void pcache1Unpin(
** part of the PGroup LRU list.
*/
assert( pPage->pLruPrev==0 && pPage->pLruNext==0 );
- assert( pPage->isPinned==1 );
+ assert( pGroup->pLruHead!=pPage && pGroup->pLruTail!=pPage );
if( reuseUnlikely || pGroup->nCurrentPage>pGroup->nMaxPage ){
- pcache1RemoveFromHash(pPage, 1);
+ pcache1RemoveFromHash(pPage);
+ pcache1FreePage(pPage);
}else{
/* Add the page to the PGroup LRU list. */
- PgHdr1 **ppFirst = &pGroup->lru.pLruNext;
- pPage->pLruPrev = &pGroup->lru;
- (pPage->pLruNext = *ppFirst)->pLruPrev = pPage;
- *ppFirst = pPage;
+ if( pGroup->pLruHead ){
+ pGroup->pLruHead->pLruPrev = pPage;
+ pPage->pLruNext = pGroup->pLruHead;
+ pGroup->pLruHead = pPage;
+ }else{
+ pGroup->pLruTail = pPage;
+ pGroup->pLruHead = pPage;
+ }
pCache->nRecyclable++;
- pPage->isPinned = 0;
}
pcache1LeaveMutex(pCache->pGroup);
@@ -42249,9 +38364,8 @@ static void pcache1Destroy(sqlite3_pcache *p){
assert( pGroup->nMinPage >= pCache->nMin );
pGroup->nMinPage -= pCache->nMin;
pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage;
- pcache1EnforceMaxPage(pCache);
+ pcache1EnforceMaxPage(pGroup);
pcache1LeaveMutex(pGroup);
- sqlite3_free(pCache->pBulk);
sqlite3_free(pCache->apHash);
sqlite3_free(pCache);
}
@@ -42280,19 +38394,6 @@ SQLITE_PRIVATE void sqlite3PCacheSetDefault(void){
sqlite3_config(SQLITE_CONFIG_PCACHE2, &defaultMethods);
}
-/*
-** Return the size of the header on each page of this PCACHE implementation.
-*/
-SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void){ return ROUND8(sizeof(PgHdr1)); }
-
-/*
-** Return the global mutex used by this PCACHE implementation. The
-** sqlite3_status() routine needs access to this mutex.
-*/
-SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void){
- return pcache1.mutex;
-}
-
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
/*
** This function is called to free superfluous dynamically allocated memory
@@ -42307,20 +38408,17 @@ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){
int nFree = 0;
assert( sqlite3_mutex_notheld(pcache1.grp.mutex) );
assert( sqlite3_mutex_notheld(pcache1.mutex) );
- if( sqlite3GlobalConfig.nPage==0 ){
+ if( pcache1.pStart==0 ){
PgHdr1 *p;
pcache1EnterMutex(&pcache1.grp);
- while( (nReq<0 || nFree<nReq)
- && (p=pcache1.grp.lru.pLruPrev)!=0
- && p->isAnchor==0
- ){
+ while( (nReq<0 || nFree<nReq) && ((p=pcache1.grp.pLruTail)!=0) ){
nFree += pcache1MemSize(p->page.pBuf);
#ifdef SQLITE_PCACHE_SEPARATE_HEADER
nFree += sqlite3MemSize(p);
#endif
- assert( p->isPinned==0 );
pcache1PinPage(p);
- pcache1RemoveFromHash(p, 1);
+ pcache1RemoveFromHash(p);
+ pcache1FreePage(p);
}
pcache1LeaveMutex(&pcache1.grp);
}
@@ -42341,8 +38439,7 @@ SQLITE_PRIVATE void sqlite3PcacheStats(
){
PgHdr1 *p;
int nRecyclable = 0;
- for(p=pcache1.grp.lru.pLruNext; p && !p->isAnchor; p=p->pLruNext){
- assert( p->isPinned==0 );
+ for(p=pcache1.grp.pLruHead; p; p=p->pLruNext){
nRecyclable++;
}
*pnCurrent = pcache1.grp.nCurrentPage;
@@ -42406,7 +38503,7 @@ SQLITE_PRIVATE void sqlite3PcacheStats(
** No INSERTs may occurs after a SMALLEST. An assertion will fail if
** that is attempted.
**
-** The cost of an INSERT is roughly constant. (Sometimes new memory
+** The cost of an INSERT is roughly constant. (Sometime new memory
** has to be allocated on an INSERT.) The cost of a TEST with a new
** batch number is O(NlogN) where N is the number of elements in the RowSet.
** The cost of a TEST using the same batch number is O(logN). The cost
@@ -42416,7 +38513,6 @@ SQLITE_PRIVATE void sqlite3PcacheStats(
** There is an added cost of O(N) when switching between TEST and
** SMALLEST primitives.
*/
-/* #include "sqliteInt.h" */
/*
@@ -42468,8 +38564,8 @@ struct RowSet {
struct RowSetEntry *pFresh; /* Source of new entry objects */
struct RowSetEntry *pForest; /* List of binary trees of entries */
u16 nFresh; /* Number of objects on pFresh */
- u16 rsFlags; /* Various flags */
- int iBatch; /* Current insert batch */
+ u8 rsFlags; /* Various flags */
+ u8 iBatch; /* Current insert batch */
};
/*
@@ -42799,11 +38895,11 @@ SQLITE_PRIVATE int sqlite3RowSetNext(RowSet *p, i64 *pRowid){
** Check to see if element iRowid was inserted into the rowset as
** part of any insert batch prior to iBatch. Return 1 or 0.
**
-** If this is the first test of a new batch and if there exist entries
-** on pRowSet->pEntry, then sort those entries into the forest at
+** If this is the first test of a new batch and if there exist entires
+** on pRowSet->pEntry, then sort those entires into the forest at
** pRowSet->pForest so that they can be tested.
*/
-SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){
+SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, u8 iBatch, sqlite3_int64 iRowid){
struct RowSetEntry *p, *pTree;
/* This routine is never called after sqlite3RowSetNext() */
@@ -42886,7 +38982,6 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64
** another is writing.
*/
#ifndef SQLITE_OMIT_DISKIO
-/* #include "sqliteInt.h" */
/************** Include wal.h in the middle of pager.c ***********************/
/************** Begin file wal.h *********************************************/
/*
@@ -42908,7 +39003,6 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64
#ifndef _WAL_H_
#define _WAL_H_
-/* #include "sqliteInt.h" */
/* Additional values that can be added to the sync_flags argument of
** sqlite3WalFrames():
@@ -43084,12 +39178,12 @@ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal);
** Definition: Two databases (or the same database at two points it time)
** are said to be "logically equivalent" if they give the same answer to
** all queries. Note in particular the content of freelist leaf
-** pages can be changed arbitrarily without affecting the logical equivalence
+** pages can be changed arbitarily without effecting the logical equivalence
** of the database.
**
** (7) At any time, if any subset, including the empty set and the total set,
** of the unsynced changes to a rollback journal are removed and the
-** journal is rolled back, the resulting database file will be logically
+** journal is rolled back, the resulting database file will be logical
** equivalent to the database file at the beginning of the transaction.
**
** (8) When a transaction is rolled back, the xTruncate method of the VFS
@@ -43386,7 +39480,7 @@ int sqlite3PagerTrace=1; /* True to enable tracing */
**
** The exception is when the database file is unlocked as the pager moves
** from ERROR to OPEN state. At this point there may be a hot-journal file
-** in the file-system that needs to be rolled back (as part of an OPEN->SHARED
+** in the file-system that needs to be rolled back (as part of a OPEN->SHARED
** transition, by the same pager or any other). If the call to xUnlock()
** fails at this point and the pager is left holding an EXCLUSIVE lock, this
** can confuse the call to xCheckReservedLock() call made later as part
@@ -43464,12 +39558,12 @@ struct PagerSavepoint {
/*
** Bits of the Pager.doNotSpill flag. See further description below.
*/
-#define SPILLFLAG_OFF 0x01 /* Never spill cache. Set via pragma */
-#define SPILLFLAG_ROLLBACK 0x02 /* Current rolling back, so do not spill */
-#define SPILLFLAG_NOSYNC 0x04 /* Spill is ok, but do not sync */
+#define SPILLFLAG_OFF 0x01 /* Never spill cache. Set via pragma */
+#define SPILLFLAG_ROLLBACK 0x02 /* Current rolling back, so do not spill */
+#define SPILLFLAG_NOSYNC 0x04 /* Spill is ok, but do not sync */
/*
-** An open page cache is an instance of struct Pager. A description of
+** A open page cache is an instance of struct Pager. A description of
** some of the more important member variables follows:
**
** eState
@@ -43548,11 +39642,11 @@ struct PagerSavepoint {
** while it is being traversed by code in pager_playback(). The SPILLFLAG_OFF
** case is a user preference.
**
-** If the SPILLFLAG_NOSYNC bit is set, writing to the database from
-** pagerStress() is permitted, but syncing the journal file is not.
-** This flag is set by sqlite3PagerWrite() when the file-system sector-size
-** is larger than the database page-size in order to prevent a journal sync
-** from happening in between the journalling of two pages on the same sector.
+** If the SPILLFLAG_NOSYNC bit is set, writing to the database from pagerStress()
+** is permitted, but syncing the journal file is not. This flag is set
+** by sqlite3PagerWrite() when the file-system sector-size is larger than
+** the database page-size in order to prevent a journal sync from happening
+** in between the journalling of two pages on the same sector.
**
** subjInMemory
**
@@ -43634,14 +39728,13 @@ struct Pager {
u8 ckptSyncFlags; /* SYNC_NORMAL or SYNC_FULL for checkpoint */
u8 walSyncFlags; /* SYNC_NORMAL or SYNC_FULL for wal writes */
u8 syncFlags; /* SYNC_NORMAL or SYNC_FULL otherwise */
- u8 tempFile; /* zFilename is a temporary or immutable file */
- u8 noLock; /* Do not lock (except in WAL mode) */
+ u8 tempFile; /* zFilename is a temporary file */
u8 readOnly; /* True for a read-only database */
u8 memDb; /* True to inhibit all file I/O */
/**************************************************************************
** The following block contains those class members that change during
- ** routine operation. Class members not in this block are either fixed
+ ** routine opertion. Class members not in this block are either fixed
** when the pager is first created or else only change when there is a
** significant mode change (such as changing the page_size, locking_mode,
** or the journal_mode). From another view, these class members describe
@@ -43654,8 +39747,6 @@ struct Pager {
u8 setMaster; /* True if a m-j name has been written to jrnl */
u8 doNotSpill; /* Do not spill the cache when non-zero */
u8 subjInMemory; /* True to use in-memory sub-journals */
- u8 bUseFetch; /* True to use xFetch() */
- u8 hasHeldSharedLock; /* True if a shared lock has ever been held */
Pgno dbSize; /* Number of pages in the database */
Pgno dbOrigSize; /* dbSize before the current transaction */
Pgno dbFileSize; /* Number of pages in the database file */
@@ -43673,9 +39764,9 @@ struct Pager {
sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */
PagerSavepoint *aSavepoint; /* Array of active savepoints */
int nSavepoint; /* Number of elements in aSavepoint[] */
- u32 iDataVersion; /* Changes whenever database content changes */
char dbFileVers[16]; /* Changes whenever database file changes */
+ u8 bUseFetch; /* True to use xFetch() */
int nMmapOut; /* Number of mmap pages currently outstanding */
sqlite3_int64 szMmap; /* Desired maximum mmap size */
PgHdr *pMmapFreelist; /* List of free mmap page headers (pDirty) */
@@ -43816,7 +39907,7 @@ static const unsigned char aJournalMagic[] = {
**
** if( pPager->jfd->pMethods ){ ...
*/
-#define isOpen(pFd) ((pFd)->pMethods!=0)
+#define isOpen(pFd) ((pFd)->pMethods)
/*
** Return true if this pager uses a write-ahead log instead of the usual
@@ -44035,25 +40126,26 @@ static char *print_pager_state(Pager *p){
static int subjRequiresPage(PgHdr *pPg){
Pager *pPager = pPg->pPager;
PagerSavepoint *p;
- Pgno pgno = pPg->pgno;
+ Pgno pgno;
int i;
- for(i=0; i<pPager->nSavepoint; i++){
- p = &pPager->aSavepoint[i];
- if( p->nOrig>=pgno && 0==sqlite3BitvecTestNotNull(p->pInSavepoint, pgno) ){
- return 1;
+ if( pPager->nSavepoint ){
+ pgno = pPg->pgno;
+ for(i=0; i<pPager->nSavepoint; i++){
+ p = &pPager->aSavepoint[i];
+ if( p->nOrig>=pgno && 0==sqlite3BitvecTest(p->pInSavepoint, pgno) ){
+ return 1;
+ }
}
}
return 0;
}
-#ifdef SQLITE_DEBUG
/*
** Return true if the page is already in the journal file.
*/
-static int pageInJournal(Pager *pPager, PgHdr *pPg){
- return sqlite3BitvecTest(pPager->pInJournal, pPg->pgno);
+static int pageInJournal(PgHdr *pPg){
+ return sqlite3BitvecTest(pPg->pPager->pInJournal, pPg->pgno);
}
-#endif
/*
** Read a 32-bit integer from the given file descriptor. Store the integer
@@ -44104,7 +40196,7 @@ static int pagerUnlockDb(Pager *pPager, int eLock){
assert( eLock!=NO_LOCK || pagerUseWal(pPager)==0 );
if( isOpen(pPager->fd) ){
assert( pPager->eLock>=eLock );
- rc = pPager->noLock ? SQLITE_OK : sqlite3OsUnlock(pPager->fd, eLock);
+ rc = sqlite3OsUnlock(pPager->fd, eLock);
if( pPager->eLock!=UNKNOWN_LOCK ){
pPager->eLock = (u8)eLock;
}
@@ -44128,7 +40220,7 @@ static int pagerLockDb(Pager *pPager, int eLock){
assert( eLock==SHARED_LOCK || eLock==RESERVED_LOCK || eLock==EXCLUSIVE_LOCK );
if( pPager->eLock<eLock || pPager->eLock==UNKNOWN_LOCK ){
- rc = pPager->noLock ? SQLITE_OK : sqlite3OsLock(pPager->fd, eLock);
+ rc = sqlite3OsLock(pPager->fd, eLock);
if( rc==SQLITE_OK && (pPager->eLock!=UNKNOWN_LOCK||eLock==EXCLUSIVE_LOCK) ){
pPager->eLock = (u8)eLock;
IOTRACE(("LOCK %p %d\n", pPager, eLock))
@@ -44259,7 +40351,6 @@ static int readMasterJournal(sqlite3_file *pJrnl, char *zMaster, u32 nMaster){
|| szJ<16
|| SQLITE_OK!=(rc = read32bits(pJrnl, szJ-16, &len))
|| len>=nMaster
- || len==0
|| SQLITE_OK!=(rc = read32bits(pJrnl, szJ-12, &cksum))
|| SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8))
|| memcmp(aMagic, aJournalMagic, 8)
@@ -44637,11 +40728,12 @@ static int writeMasterJournal(Pager *pPager, const char *zMaster){
if( !zMaster
|| pPager->journalMode==PAGER_JOURNALMODE_MEMORY
- || !isOpen(pPager->jfd)
+ || pPager->journalMode==PAGER_JOURNALMODE_OFF
){
return SQLITE_OK;
}
pPager->setMaster = 1;
+ assert( isOpen(pPager->jfd) );
assert( pPager->journalHdr <= pPager->journalOff );
/* Calculate the length in bytes and the checksum of zMaster */
@@ -44665,8 +40757,7 @@ static int writeMasterJournal(Pager *pPager, const char *zMaster){
|| (0 != (rc = sqlite3OsWrite(pPager->jfd, zMaster, nMaster, iHdrOff+4)))
|| (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster, nMaster)))
|| (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster+4, cksum)))
- || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8,
- iHdrOff+4+nMaster+8)))
+ || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8, iHdrOff+4+nMaster+8)))
){
return rc;
}
@@ -44691,20 +40782,26 @@ static int writeMasterJournal(Pager *pPager, const char *zMaster){
}
/*
-** Discard the entire contents of the in-memory page-cache.
+** Find a page in the hash table given its page number. Return
+** a pointer to the page or NULL if the requested page is not
+** already in memory.
*/
-static void pager_reset(Pager *pPager){
- pPager->iDataVersion++;
- sqlite3BackupRestart(pPager->pBackup);
- sqlite3PcacheClear(pPager->pPCache);
+static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){
+ PgHdr *p; /* Return value */
+
+ /* It is not possible for a call to PcacheFetch() with createFlag==0 to
+ ** fail, since no attempt to allocate dynamic memory will be made.
+ */
+ (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &p);
+ return p;
}
/*
-** Return the pPager->iDataVersion value
+** Discard the entire contents of the in-memory page-cache.
*/
-SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager *pPager){
- assert( pPager->eState>PAGER_OPEN );
- return pPager->iDataVersion;
+static void pager_reset(Pager *pPager){
+ sqlite3BackupRestart(pPager->pBackup);
+ sqlite3PcacheClear(pPager->pPCache);
}
/*
@@ -44963,14 +41060,6 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){
rc = SQLITE_OK;
}else{
rc = sqlite3OsTruncate(pPager->jfd, 0);
- if( rc==SQLITE_OK && pPager->fullSync ){
- /* Make sure the new file size is written into the inode right away.
- ** Otherwise the journal might resurrect following a power loss and
- ** cause the last transaction to roll back. See
- ** https://bugzilla.mozilla.org/show_bug.cgi?id=1072773
- */
- rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags);
- }
}
pPager->journalOff = 0;
}else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST
@@ -44999,10 +41088,10 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){
#ifdef SQLITE_CHECK_PAGES
sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash);
if( pPager->dbSize==0 && sqlite3PcacheRefCount(pPager->pPCache)>0 ){
- PgHdr *p = sqlite3PagerLookup(pPager, 1);
+ PgHdr *p = pager_lookup(pPager, 1);
if( p ){
p->pageHash = 0;
- sqlite3PagerUnrefNotNull(p);
+ sqlite3PagerUnref(p);
}
}
#endif
@@ -45031,11 +41120,6 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){
rc = pager_truncate(pPager, pPager->dbSize);
}
- if( rc==SQLITE_OK && bCommit && isOpen(pPager->fd) ){
- rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_COMMIT_PHASETWO, 0);
- if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
- }
-
if( !pPager->exclusiveMode
&& (!pagerUseWal(pPager) || sqlite3WalExclusiveMode(pPager->pWal, 0))
){
@@ -45124,20 +41208,6 @@ static void pagerReportSize(Pager *pPager){
# define pagerReportSize(X) /* No-op if we do not support a codec */
#endif
-#ifdef SQLITE_HAS_CODEC
-/*
-** Make sure the number of reserved bits is the same in the destination
-** pager as it is in the source. This comes up when a VACUUM changes the
-** number of reserved bits to the "optimal" amount.
-*/
-SQLITE_PRIVATE void sqlite3PagerAlignReserve(Pager *pDest, Pager *pSrc){
- if( pDest->nReserve!=pSrc->nReserve ){
- pDest->nReserve = pSrc->nReserve;
- pagerReportSize(pDest);
- }
-}
-#endif
-
/*
** Read a single page from either the journal file (if isMainJrnl==1) or
** from the sub-journal (if isMainJrnl==0) and playback that page.
@@ -45240,7 +41310,7 @@ static int pager_playback_one_page(
}
}
- /* If this page has already been played back before during the current
+ /* If this page has already been played by before during the current
** rollback, then don't bother to play it back again.
*/
if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){
@@ -45292,7 +41362,7 @@ static int pager_playback_one_page(
if( pagerUseWal(pPager) ){
pPg = 0;
}else{
- pPg = sqlite3PagerLookup(pPager, pgno);
+ pPg = pager_lookup(pPager, pgno);
}
assert( pPg || !MEMDB );
assert( pPager->eState!=PAGER_OPEN || pPg==0 );
@@ -45472,7 +41542,7 @@ static int pager_delmaster(Pager *pPager, const char *zMaster){
rc = sqlite3OsFileSize(pMaster, &nMasterJournal);
if( rc!=SQLITE_OK ) goto delmaster_out;
nMasterPtr = pVfs->mxPathname+1;
- zMasterJournal = sqlite3Malloc(nMasterJournal + nMasterPtr + 1);
+ zMasterJournal = sqlite3Malloc((int)nMasterJournal + nMasterPtr + 1);
if( !zMasterJournal ){
rc = SQLITE_NOMEM;
goto delmaster_out;
@@ -45541,7 +41611,7 @@ delmaster_out:
** If the file on disk is currently larger than nPage pages, then use the VFS
** xTruncate() method to truncate it.
**
-** Or, it might be the case that the file on disk is smaller than
+** Or, it might might be the case that the file on disk is smaller than
** nPage pages. Some operating system implementations can get confused if
** you try to truncate a file to some size that is larger than it
** currently is, so detect this case and write a single zero byte to
@@ -45600,7 +41670,7 @@ SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *pFile){
/*
** Set the value of the Pager.sectorSize variable for the given
** pager based on the value returned by the xSectorSize method
-** of the open database file. The sector size will be used
+** of the open database file. The sector size will be used used
** to determine the size and alignment of journal header and
** master journal pointers within created journal files.
**
@@ -45863,7 +41933,7 @@ end_playback:
if( rc==SQLITE_OK
&& (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN)
){
- rc = sqlite3PagerSync(pPager, 0);
+ rc = sqlite3PagerSync(pPager);
}
if( rc==SQLITE_OK ){
rc = pager_end_transaction(pPager, zMaster[0]!='\0', 0);
@@ -45935,7 +42005,7 @@ static int readDbPage(PgHdr *pPg, u32 iFrame){
**
** For an encrypted database, the situation is more complex: bytes
** 24..39 of the database are white noise. But the probability of
- ** white noise equaling 16 bytes of 0xff is vanishingly small so
+ ** white noising equaling 16 bytes of 0xff is vanishingly small so
** we should still be ok.
*/
memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers));
@@ -46009,7 +42079,7 @@ static int pagerUndoCallback(void *pCtx, Pgno iPg){
if( rc==SQLITE_OK ){
pPager->xReiniter(pPg);
}
- sqlite3PagerUnrefNotNull(pPg);
+ sqlite3PagerUnref(pPg);
}
}
@@ -46069,7 +42139,9 @@ static int pagerWalFrames(
){
int rc; /* Return code */
int nList; /* Number of pages in pList */
+#if defined(SQLITE_DEBUG) || defined(SQLITE_CHECK_PAGES)
PgHdr *p; /* For looping over pages */
+#endif
assert( pPager->pWal );
assert( pList );
@@ -46086,6 +42158,7 @@ static int pagerWalFrames(
** any pages with page numbers greater than nTruncate into the WAL file.
** They will never be read by any client. So remove them from the pDirty
** list here. */
+ PgHdr *p;
PgHdr **ppNext = &pList;
nList = 0;
for(p=pList; (*ppNext = p)!=0; p=p->pDirty){
@@ -46105,6 +42178,7 @@ static int pagerWalFrames(
pPager->pageSize, pList, nTruncate, isCommit, pPager->walSyncFlags
);
if( rc==SQLITE_OK && pPager->pBackup ){
+ PgHdr *p;
for(p=pList; p; p=p->pDirty){
sqlite3BackupUpdate(pPager->pBackup, p->pgno, (u8 *)p->pData);
}
@@ -46174,10 +42248,11 @@ static int pagerPagecount(Pager *pPager, Pgno *pnPage){
assert( pPager->eLock>=SHARED_LOCK );
nPage = sqlite3WalDbsize(pPager->pWal);
- /* If the number of pages in the database is not available from the
- ** WAL sub-system, determine the page counte based on the size of
- ** the database file. If the size of the database file is not an
- ** integer multiple of the page-size, round up the result.
+ /* If the database size was not available from the WAL sub-system,
+ ** determine it based on the size of the database file. If the size
+ ** of the database file is not an integer multiple of the page-size,
+ ** round down to the nearest page. Except, any file larger than 0
+ ** bytes in size is considered to contain at least one page.
*/
if( nPage==0 ){
i64 n = 0; /* Size of db file in bytes */
@@ -46657,15 +42732,11 @@ SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nR
if( rc==SQLITE_OK ){
pager_reset(pPager);
- rc = sqlite3PcacheSetPageSize(pPager->pPCache, pageSize);
- }
- if( rc==SQLITE_OK ){
- sqlite3PageFree(pPager->pTmpSpace);
- pPager->pTmpSpace = pNew;
pPager->dbSize = (Pgno)((nByte+pageSize-1)/pageSize);
pPager->pageSize = pageSize;
- }else{
- sqlite3PageFree(pNew);
+ sqlite3PageFree(pPager->pTmpSpace);
+ pPager->pTmpSpace = pNew;
+ sqlite3PcacheSetPageSize(pPager->pPCache, pageSize);
}
}
@@ -46799,7 +42870,7 @@ static int pager_wait_on_lock(Pager *pPager, int locktype){
int rc; /* Return code */
/* Check that this is either a no-op (because the requested lock is
- ** already held), or one of the transitions that the busy-handler
+ ** already held, or one of the transistions that the busy-handler
** may be invoked during, according to the comment above
** sqlite3PagerSetBusyhandler().
*/
@@ -46918,7 +42989,7 @@ static int pagerAcquireMapPage(
PgHdr **ppPage /* OUT: Acquired page object */
){
PgHdr *p; /* Memory mapped page to return */
-
+
if( pPager->pMmapFreelist ){
*ppPage = p = pPager->pMmapFreelist;
pPager->pMmapFreelist = p->pDirty;
@@ -47342,6 +43413,8 @@ static int openSubJournal(Pager *pPager){
/*
** Append a record of the current state of page pPg to the sub-journal.
+** It is the callers responsibility to use subjRequiresPage() to check
+** that it is really required before calling this function.
**
** If successful, set the bit corresponding to pPg->pgno in the bitvecs
** for all open savepoints before returning.
@@ -47361,7 +43434,7 @@ static int subjournalPage(PgHdr *pPg){
assert( isOpen(pPager->jfd) || pagerUseWal(pPager) );
assert( isOpen(pPager->sjfd) || pPager->nSubRec==0 );
assert( pagerUseWal(pPager)
- || pageInJournal(pPager, pPg)
+ || pageInJournal(pPg)
|| pPg->pgno>pPager->dbOrigSize
);
rc = openSubJournal(pPager);
@@ -47388,13 +43461,6 @@ static int subjournalPage(PgHdr *pPg){
}
return rc;
}
-static int subjournalPageIfRequired(PgHdr *pPg){
- if( subjRequiresPage(pPg) ){
- return subjournalPage(pPg);
- }else{
- return SQLITE_OK;
- }
-}
/*
** This function is called by the pcache layer when it has reached some
@@ -47432,8 +43498,8 @@ static int pagerStress(void *p, PgHdr *pPg){
** a rollback or by user request, respectively.
**
** Spilling is also prohibited when in an error state since that could
- ** lead to database corruption. In the current implementation it
- ** is impossible for sqlite3PcacheFetch() to be called with createFlag==3
+ ** lead to database corruption. In the current implementaton it
+ ** is impossible for sqlite3PcacheFetch() to be called with createFlag==1
** while in the error state, hence it is impossible for this routine to
** be called in the error state. Nevertheless, we include a NEVER()
** test for the error state as a safeguard against future changes.
@@ -47452,7 +43518,9 @@ static int pagerStress(void *p, PgHdr *pPg){
pPg->pDirty = 0;
if( pagerUseWal(pPager) ){
/* Write a single frame for this page to the log. */
- rc = subjournalPageIfRequired(pPg);
+ if( subjRequiresPage(pPg) ){
+ rc = subjournalPage(pPg);
+ }
if( rc==SQLITE_OK ){
rc = pagerWalFrames(pPager, pPg, 0, 0);
}
@@ -47465,6 +43533,39 @@ static int pagerStress(void *p, PgHdr *pPg){
rc = syncJournal(pPager, 1);
}
+ /* If the page number of this page is larger than the current size of
+ ** the database image, it may need to be written to the sub-journal.
+ ** This is because the call to pager_write_pagelist() below will not
+ ** actually write data to the file in this case.
+ **
+ ** Consider the following sequence of events:
+ **
+ ** BEGIN;
+ ** <journal page X>
+ ** <modify page X>
+ ** SAVEPOINT sp;
+ ** <shrink database file to Y pages>
+ ** pagerStress(page X)
+ ** ROLLBACK TO sp;
+ **
+ ** If (X>Y), then when pagerStress is called page X will not be written
+ ** out to the database file, but will be dropped from the cache. Then,
+ ** following the "ROLLBACK TO sp" statement, reading page X will read
+ ** data from the database file. This will be the copy of page X as it
+ ** was when the transaction started, not as it was when "SAVEPOINT sp"
+ ** was executed.
+ **
+ ** The solution is to write the current data for page X into the
+ ** sub-journal file now (if it is not already there), so that it will
+ ** be restored to its current value when the "ROLLBACK TO sp" is
+ ** executed.
+ */
+ if( NEVER(
+ rc==SQLITE_OK && pPg->pgno>pPager->dbSize && subjRequiresPage(pPg)
+ ) ){
+ rc = subjournalPage(pPg);
+ }
+
/* Write the contents of the page out to the database file. */
if( rc==SQLITE_OK ){
assert( (pPg->flags&PGHDR_NEED_SYNC)==0 );
@@ -47673,38 +43774,30 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
** + The value returned by sqlite3OsSectorSize()
** + The largest page size that can be written atomically.
*/
- if( rc==SQLITE_OK ){
- int iDc = sqlite3OsDeviceCharacteristics(pPager->fd);
- if( !readOnly ){
- setSectorSize(pPager);
- assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE);
- if( szPageDflt<pPager->sectorSize ){
- if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){
- szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE;
- }else{
- szPageDflt = (u32)pPager->sectorSize;
- }
+ if( rc==SQLITE_OK && !readOnly ){
+ setSectorSize(pPager);
+ assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE);
+ if( szPageDflt<pPager->sectorSize ){
+ if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){
+ szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE;
+ }else{
+ szPageDflt = (u32)pPager->sectorSize;
}
+ }
#ifdef SQLITE_ENABLE_ATOMIC_WRITE
- {
- int ii;
- assert(SQLITE_IOCAP_ATOMIC512==(512>>8));
- assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8));
- assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536);
- for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){
- if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){
- szPageDflt = ii;
- }
+ {
+ int iDc = sqlite3OsDeviceCharacteristics(pPager->fd);
+ int ii;
+ assert(SQLITE_IOCAP_ATOMIC512==(512>>8));
+ assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8));
+ assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536);
+ for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){
+ if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){
+ szPageDflt = ii;
}
}
-#endif
- }
- pPager->noLock = sqlite3_uri_boolean(zFilename, "nolock", 0);
- if( (iDc & SQLITE_IOCAP_IMMUTABLE)!=0
- || sqlite3_uri_boolean(zFilename, "immutable", 0) ){
- vfsFlags |= SQLITE_OPEN_READONLY;
- goto act_like_temp_file;
}
+#endif
}
}else{
/* If a temporary file is requested, it is not opened immediately.
@@ -47714,14 +43807,10 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
** This branch is also run for an in-memory database. An in-memory
** database is the same as a temp-file that is never written out to
** disk and uses an in-memory rollback journal.
- **
- ** This branch also runs for files marked as immutable.
*/
-act_like_temp_file:
tempFile = 1;
- pPager->eState = PAGER_READER; /* Pretend we already have a lock */
- pPager->eLock = EXCLUSIVE_LOCK; /* Pretend we are in EXCLUSIVE mode */
- pPager->noLock = 1; /* Do no locking */
+ pPager->eState = PAGER_READER;
+ pPager->eLock = EXCLUSIVE_LOCK;
readOnly = (vfsFlags&SQLITE_OPEN_READONLY);
}
@@ -47734,23 +43823,22 @@ act_like_temp_file:
testcase( rc!=SQLITE_OK );
}
- /* Initialize the PCache object. */
- if( rc==SQLITE_OK ){
- assert( nExtra<1000 );
- nExtra = ROUND8(nExtra);
- rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb,
- !memDb?pagerStress:0, (void *)pPager, pPager->pPCache);
- }
-
- /* If an error occurred above, free the Pager structure and close the file.
+ /* If an error occurred in either of the blocks above, free the
+ ** Pager structure and close the file.
*/
if( rc!=SQLITE_OK ){
+ assert( !pPager->pTmpSpace );
sqlite3OsClose(pPager->fd);
- sqlite3PageFree(pPager->pTmpSpace);
sqlite3_free(pPager);
return rc;
}
+ /* Initialize the PCache object. */
+ assert( nExtra<1000 );
+ nExtra = ROUND8(nExtra);
+ sqlite3PcacheOpen(szPageDflt, nExtra, !memDb,
+ !memDb?pagerStress:0, (void *)pPager, pPager->pPCache);
+
PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename));
IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename))
@@ -47763,6 +43851,9 @@ act_like_temp_file:
/* pPager->nPage = 0; */
pPager->mxPgno = SQLITE_MAX_PAGE_COUNT;
/* pPager->state = PAGER_UNLOCK; */
+#if 0
+ assert( pPager->state == (tempFile ? PAGER_EXCLUSIVE : PAGER_UNLOCK) );
+#endif
/* pPager->errMask = 0; */
pPager->tempFile = (u8)tempFile;
assert( tempFile==PAGER_LOCKINGMODE_NORMAL
@@ -47808,30 +43899,6 @@ act_like_temp_file:
}
-/* Verify that the database file has not be deleted or renamed out from
-** under the pager. Return SQLITE_OK if the database is still were it ought
-** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error
-** code from sqlite3OsAccess()) if the database has gone missing.
-*/
-static int databaseIsUnmoved(Pager *pPager){
- int bHasMoved = 0;
- int rc;
-
- if( pPager->tempFile ) return SQLITE_OK;
- if( pPager->dbSize==0 ) return SQLITE_OK;
- assert( pPager->zFilename && pPager->zFilename[0] );
- rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved);
- if( rc==SQLITE_NOTFOUND ){
- /* If the HAS_MOVED file-control is unimplemented, assume that the file
- ** has not been moved. That is the historical behavior of SQLite: prior to
- ** version 3.8.3, it never checked */
- rc = SQLITE_OK;
- }else if( rc==SQLITE_OK && bHasMoved ){
- rc = SQLITE_READONLY_DBMOVED;
- }
- return rc;
-}
-
/*
** This function is called after transitioning from PAGER_UNLOCK to
@@ -47897,17 +43964,15 @@ static int hasHotJournal(Pager *pPager, int *pExists){
if( rc==SQLITE_OK && !locked ){
Pgno nPage; /* Number of pages in database file */
+ /* Check the size of the database file. If it consists of 0 pages,
+ ** then delete the journal file. See the header comment above for
+ ** the reasoning here. Delete the obsolete journal file under
+ ** a RESERVED lock to avoid race conditions and to avoid violating
+ ** [H33020].
+ */
rc = pagerPagecount(pPager, &nPage);
if( rc==SQLITE_OK ){
- /* If the database is zero pages in size, that means that either (1) the
- ** journal is a remnant from a prior database with the same name where
- ** the database file but not the journal was deleted, or (2) the initial
- ** transaction that populates a new database is being rolled back.
- ** In either case, the journal file can be deleted. However, take care
- ** not to delete the journal file if it is already open due to
- ** journal_mode=PERSIST.
- */
- if( nPage==0 && !jrnlOpen ){
+ if( nPage==0 ){
sqlite3BeginBenignMalloc();
if( pagerLockDb(pPager, RESERVED_LOCK)==SQLITE_OK ){
sqlite3OsDelete(pVfs, pPager->zJournal, 0);
@@ -47937,7 +44002,7 @@ static int hasHotJournal(Pager *pPager, int *pExists){
*pExists = (first!=0);
}else if( rc==SQLITE_CANTOPEN ){
/* If we cannot open the rollback journal file in order to see if
- ** it has a zero header, that might be due to an I/O error, or
+ ** its has a zero header, that might be due to an I/O error, or
** it might be due to the race condition described above and in
** ticket #3883. Either way, assume that the journal is hot.
** This might be a false positive. But if it is, then the
@@ -48119,14 +44184,18 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
);
}
- if( !pPager->tempFile && pPager->hasHeldSharedLock ){
- /* The shared-lock has just been acquired then check to
- ** see if the database has been modified. If the database has changed,
- ** flush the cache. The hasHeldSharedLock flag prevents this from
- ** occurring on the very first access to a file, in order to save a
- ** single unnecessary sqlite3OsRead() call at the start-up.
+ if( !pPager->tempFile && (
+ pPager->pBackup
+ || sqlite3PcachePagecount(pPager->pPCache)>0
+ || USEFETCH(pPager)
+ )){
+ /* The shared-lock has just been acquired on the database file
+ ** and there are already pages in the cache (from a previous
+ ** read or write transaction). Check to see if the database
+ ** has been modified. If the database has changed, flush the
+ ** cache.
**
- ** Database changes are detected by looking at 15 bytes beginning
+ ** Database changes is detected by looking at 15 bytes beginning
** at offset 24 into the file. The first 4 of these 16 bytes are
** a 32-bit counter that is incremented with each change. The
** other bytes change randomly with each file change when
@@ -48192,7 +44261,6 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
assert( pPager->eState==PAGER_OPEN );
}else{
pPager->eState = PAGER_READER;
- pPager->hasHeldSharedLock = 1;
}
return rc;
}
@@ -48276,37 +44344,33 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
** page 1 if there is no write-transaction open or the ACQUIRE_READONLY
** flag was specified by the caller. And so long as the db is not a
** temporary or in-memory database. */
- const int bMmapOk = (pgno>1 && USEFETCH(pPager)
+ const int bMmapOk = (pgno!=1 && USEFETCH(pPager)
&& (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY))
#ifdef SQLITE_HAS_CODEC
&& pPager->xCodec==0
#endif
);
- /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here
- ** allows the compiler optimizer to reuse the results of the "pgno>1"
- ** test in the previous statement, and avoid testing pgno==0 in the
- ** common case where pgno is large. */
- if( pgno<=1 && pgno==0 ){
- return SQLITE_CORRUPT_BKPT;
- }
assert( pPager->eState>=PAGER_READER );
assert( assert_pager_state(pPager) );
assert( noContent==0 || bMmapOk==0 );
- assert( pPager->hasHeldSharedLock==1 );
+ if( pgno==0 ){
+ return SQLITE_CORRUPT_BKPT;
+ }
/* If the pager is in the error state, return an error immediately.
** Otherwise, request the page from the PCache layer. */
if( pPager->errCode!=SQLITE_OK ){
rc = pPager->errCode;
}else{
+
if( bMmapOk && pagerUseWal(pPager) ){
rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
if( rc!=SQLITE_OK ) goto pager_acquire_err;
}
- if( bMmapOk && iFrame==0 ){
+ if( iFrame==0 && bMmapOk ){
void *pData = 0;
rc = sqlite3OsFetch(pPager->fd,
@@ -48315,7 +44379,7 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
if( rc==SQLITE_OK && pData ){
if( pPager->eState>PAGER_READER ){
- pPg = sqlite3PagerLookup(pPager, pgno);
+ (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg);
}
if( pPg==0 ){
rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg);
@@ -48333,21 +44397,7 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
}
}
- {
- sqlite3_pcache_page *pBase;
- pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3);
- if( pBase==0 ){
- rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase);
- if( rc!=SQLITE_OK ) goto pager_acquire_err;
- if( pBase==0 ){
- pPg = *ppPage = 0;
- rc = SQLITE_NOMEM;
- goto pager_acquire_err;
- }
- }
- pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase);
- assert( pPg!=0 );
- }
+ rc = sqlite3PcacheFetch(pPager->pPCache, pgno, 1, ppPage);
}
if( rc!=SQLITE_OK ){
@@ -48357,11 +44407,10 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
pPg = 0;
goto pager_acquire_err;
}
- assert( pPg==(*ppPage) );
- assert( pPg->pgno==pgno );
- assert( pPg->pPager==pPager || pPg->pPager==0 );
+ assert( (*ppPage)->pgno==pgno );
+ assert( (*ppPage)->pPager==pPager || (*ppPage)->pPager==0 );
- if( pPg->pPager && !noContent ){
+ if( (*ppPage)->pPager && !noContent ){
/* In this case the pcache already contains an initialized copy of
** the page. Return without further ado. */
assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) );
@@ -48372,6 +44421,7 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
/* The pager cache has created a new page. Its content needs to
** be initialized. */
+ pPg = *ppPage;
pPg->pPager = pPager;
/* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page
@@ -48444,14 +44494,13 @@ pager_acquire_err:
** has ever happened.
*/
SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){
- sqlite3_pcache_page *pPage;
+ PgHdr *pPg = 0;
assert( pPager!=0 );
assert( pgno!=0 );
assert( pPager->pPCache!=0 );
- pPage = sqlite3PcacheFetch(pPager->pPCache, pgno, 0);
- assert( pPage==0 || pPager->hasHeldSharedLock );
- if( pPage==0 ) return 0;
- return sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pPage);
+ assert( pPager->eState>=PAGER_READER && pPager->eState!=PAGER_ERROR );
+ sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg);
+ return pPg;
}
/*
@@ -48462,19 +44511,16 @@ SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){
** are released, a rollback occurs and the lock on the database is
** removed.
*/
-SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){
- Pager *pPager;
- assert( pPg!=0 );
- pPager = pPg->pPager;
- if( pPg->flags & PGHDR_MMAP ){
- pagerReleaseMapPage(pPg);
- }else{
- sqlite3PcacheRelease(pPg);
- }
- pagerUnlockIfUnused(pPager);
-}
SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){
- if( pPg ) sqlite3PagerUnrefNotNull(pPg);
+ if( pPg ){
+ Pager *pPager = pPg->pPager;
+ if( pPg->flags & PGHDR_MMAP ){
+ pagerReleaseMapPage(pPg);
+ }else{
+ sqlite3PcacheRelease(pPg);
+ }
+ pagerUnlockIfUnused(pPager);
+ }
}
/*
@@ -48529,19 +44575,13 @@ static int pager_open_journal(Pager *pPager){
(SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL):
(SQLITE_OPEN_MAIN_JOURNAL)
);
-
- /* Verify that the database still has the same name as it did when
- ** it was originally opened. */
- rc = databaseIsUnmoved(pPager);
- if( rc==SQLITE_OK ){
-#ifdef SQLITE_ENABLE_ATOMIC_WRITE
- rc = sqlite3JournalOpen(
- pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager)
- );
-#else
- rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0);
-#endif
- }
+ #ifdef SQLITE_ENABLE_ATOMIC_WRITE
+ rc = sqlite3JournalOpen(
+ pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager)
+ );
+ #else
+ rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0);
+ #endif
}
assert( rc!=SQLITE_OK || isOpen(pPager->jfd) );
}
@@ -48655,59 +44695,6 @@ SQLITE_PRIVATE int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory
}
/*
-** Write page pPg onto the end of the rollback journal.
-*/
-static SQLITE_NOINLINE int pagerAddPageToRollbackJournal(PgHdr *pPg){
- Pager *pPager = pPg->pPager;
- int rc;
- u32 cksum;
- char *pData2;
- i64 iOff = pPager->journalOff;
-
- /* We should never write to the journal file the page that
- ** contains the database locks. The following assert verifies
- ** that we do not. */
- assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) );
-
- assert( pPager->journalHdr<=pPager->journalOff );
- CODEC2(pPager, pPg->pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2);
- cksum = pager_cksum(pPager, (u8*)pData2);
-
- /* Even if an IO or diskfull error occurs while journalling the
- ** page in the block above, set the need-sync flag for the page.
- ** Otherwise, when the transaction is rolled back, the logic in
- ** playback_one_page() will think that the page needs to be restored
- ** in the database file. And if an IO error occurs while doing so,
- ** then corruption may follow.
- */
- pPg->flags |= PGHDR_NEED_SYNC;
-
- rc = write32bits(pPager->jfd, iOff, pPg->pgno);
- if( rc!=SQLITE_OK ) return rc;
- rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, iOff+4);
- if( rc!=SQLITE_OK ) return rc;
- rc = write32bits(pPager->jfd, iOff+pPager->pageSize+4, cksum);
- if( rc!=SQLITE_OK ) return rc;
-
- IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno,
- pPager->journalOff, pPager->pageSize));
- PAGER_INCR(sqlite3_pager_writej_count);
- PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n",
- PAGERID(pPager), pPg->pgno,
- ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg)));
-
- pPager->journalOff += 8 + pPager->pageSize;
- pPager->nRec++;
- assert( pPager->pInJournal!=0 );
- rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno);
- testcase( rc==SQLITE_NOMEM );
- assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
- rc |= addToSavepointBitvecs(pPager, pPg->pgno);
- assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
- return rc;
-}
-
-/*
** Mark a single data page as writeable. The page is written into the
** main journal or sub-journal as required. If the page is written into
** one of the journals, the corresponding bit is set in the
@@ -48715,6 +44702,7 @@ static SQLITE_NOINLINE int pagerAddPageToRollbackJournal(PgHdr *pPg){
** of any open savepoints as appropriate.
*/
static int pager_write(PgHdr *pPg){
+ void *pData = pPg->pData;
Pager *pPager = pPg->pPager;
int rc = SQLITE_OK;
@@ -48727,8 +44715,15 @@ static int pager_write(PgHdr *pPg){
|| pPager->eState==PAGER_WRITER_DBMOD
);
assert( assert_pager_state(pPager) );
- assert( pPager->errCode==0 );
- assert( pPager->readOnly==0 );
+
+ /* If an error has been previously detected, report the same error
+ ** again. This should not happen, but the check provides robustness. */
+ if( NEVER(pPager->errCode) ) return pPager->errCode;
+
+ /* Higher-level routines never call this function if database is not
+ ** writable. But check anyway, just for robustness. */
+ if( NEVER(pPager->readOnly) ) return SQLITE_PERM;
+
CHECK_PAGE(pPg);
/* The journal file needs to be opened. Higher level routines have already
@@ -48747,142 +44742,93 @@ static int pager_write(PgHdr *pPg){
assert( pPager->eState>=PAGER_WRITER_CACHEMOD );
assert( assert_pager_state(pPager) );
- /* Mark the page that is about to be modified as dirty. */
- sqlite3PcacheMakeDirty(pPg);
-
- /* If a rollback journal is in use, them make sure the page that is about
- ** to change is in the rollback journal, or if the page is a new page off
- ** then end of the file, make sure it is marked as PGHDR_NEED_SYNC.
- */
- assert( (pPager->pInJournal!=0) == isOpen(pPager->jfd) );
- if( pPager->pInJournal!=0
- && sqlite3BitvecTestNotNull(pPager->pInJournal, pPg->pgno)==0
- ){
- assert( pagerUseWal(pPager)==0 );
- if( pPg->pgno<=pPager->dbOrigSize ){
- rc = pagerAddPageToRollbackJournal(pPg);
- if( rc!=SQLITE_OK ){
- return rc;
- }
- }else{
- if( pPager->eState!=PAGER_WRITER_DBMOD ){
- pPg->flags |= PGHDR_NEED_SYNC;
- }
- PAGERTRACE(("APPEND %d page %d needSync=%d\n",
- PAGERID(pPager), pPg->pgno,
- ((pPg->flags&PGHDR_NEED_SYNC)?1:0)));
- }
- }
-
- /* The PGHDR_DIRTY bit is set above when the page was added to the dirty-list
- ** and before writing the page into the rollback journal. Wait until now,
- ** after the page has been successfully journalled, before setting the
- ** PGHDR_WRITEABLE bit that indicates that the page can be safely modified.
+ /* Mark the page as dirty. If the page has already been written
+ ** to the journal then we can return right away.
*/
- pPg->flags |= PGHDR_WRITEABLE;
+ sqlite3PcacheMakeDirty(pPg);
+ if( pageInJournal(pPg) && !subjRequiresPage(pPg) ){
+ assert( !pagerUseWal(pPager) );
+ }else{
- /* If the statement journal is open and the page is not in it,
- ** then write the page into the statement journal.
- */
- if( pPager->nSavepoint>0 ){
- rc = subjournalPageIfRequired(pPg);
- }
-
- /* Update the database size and return. */
- if( pPager->dbSize<pPg->pgno ){
- pPager->dbSize = pPg->pgno;
- }
- return rc;
-}
-
-/*
-** This is a variant of sqlite3PagerWrite() that runs when the sector size
-** is larger than the page size. SQLite makes the (reasonable) assumption that
-** all bytes of a sector are written together by hardware. Hence, all bytes of
-** a sector need to be journalled in case of a power loss in the middle of
-** a write.
-**
-** Usually, the sector size is less than or equal to the page size, in which
-** case pages can be individually written. This routine only runs in the
-** exceptional case where the page size is smaller than the sector size.
-*/
-static SQLITE_NOINLINE int pagerWriteLargeSector(PgHdr *pPg){
- int rc = SQLITE_OK; /* Return code */
- Pgno nPageCount; /* Total number of pages in database file */
- Pgno pg1; /* First page of the sector pPg is located on. */
- int nPage = 0; /* Number of pages starting at pg1 to journal */
- int ii; /* Loop counter */
- int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */
- Pager *pPager = pPg->pPager; /* The pager that owns pPg */
- Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize);
-
- /* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow
- ** a journal header to be written between the pages journaled by
- ** this function.
- */
- assert( !MEMDB );
- assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)==0 );
- pPager->doNotSpill |= SPILLFLAG_NOSYNC;
-
- /* This trick assumes that both the page-size and sector-size are
- ** an integer power of 2. It sets variable pg1 to the identifier
- ** of the first page of the sector pPg is located on.
- */
- pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1;
+ /* The transaction journal now exists and we have a RESERVED or an
+ ** EXCLUSIVE lock on the main database file. Write the current page to
+ ** the transaction journal if it is not there already.
+ */
+ if( !pageInJournal(pPg) && !pagerUseWal(pPager) ){
+ assert( pagerUseWal(pPager)==0 );
+ if( pPg->pgno<=pPager->dbOrigSize && isOpen(pPager->jfd) ){
+ u32 cksum;
+ char *pData2;
+ i64 iOff = pPager->journalOff;
+
+ /* We should never write to the journal file the page that
+ ** contains the database locks. The following assert verifies
+ ** that we do not. */
+ assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) );
+
+ assert( pPager->journalHdr<=pPager->journalOff );
+ CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2);
+ cksum = pager_cksum(pPager, (u8*)pData2);
+
+ /* Even if an IO or diskfull error occurs while journalling the
+ ** page in the block above, set the need-sync flag for the page.
+ ** Otherwise, when the transaction is rolled back, the logic in
+ ** playback_one_page() will think that the page needs to be restored
+ ** in the database file. And if an IO error occurs while doing so,
+ ** then corruption may follow.
+ */
+ pPg->flags |= PGHDR_NEED_SYNC;
- nPageCount = pPager->dbSize;
- if( pPg->pgno>nPageCount ){
- nPage = (pPg->pgno - pg1)+1;
- }else if( (pg1+nPagePerSector-1)>nPageCount ){
- nPage = nPageCount+1-pg1;
- }else{
- nPage = nPagePerSector;
- }
- assert(nPage>0);
- assert(pg1<=pPg->pgno);
- assert((pg1+nPage)>pPg->pgno);
+ rc = write32bits(pPager->jfd, iOff, pPg->pgno);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, iOff+4);
+ if( rc!=SQLITE_OK ) return rc;
+ rc = write32bits(pPager->jfd, iOff+pPager->pageSize+4, cksum);
+ if( rc!=SQLITE_OK ) return rc;
- for(ii=0; ii<nPage && rc==SQLITE_OK; ii++){
- Pgno pg = pg1+ii;
- PgHdr *pPage;
- if( pg==pPg->pgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){
- if( pg!=PAGER_MJ_PGNO(pPager) ){
- rc = sqlite3PagerGet(pPager, pg, &pPage);
- if( rc==SQLITE_OK ){
- rc = pager_write(pPage);
- if( pPage->flags&PGHDR_NEED_SYNC ){
- needSync = 1;
- }
- sqlite3PagerUnrefNotNull(pPage);
+ IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno,
+ pPager->journalOff, pPager->pageSize));
+ PAGER_INCR(sqlite3_pager_writej_count);
+ PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n",
+ PAGERID(pPager), pPg->pgno,
+ ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg)));
+
+ pPager->journalOff += 8 + pPager->pageSize;
+ pPager->nRec++;
+ assert( pPager->pInJournal!=0 );
+ rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno);
+ testcase( rc==SQLITE_NOMEM );
+ assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
+ rc |= addToSavepointBitvecs(pPager, pPg->pgno);
+ if( rc!=SQLITE_OK ){
+ assert( rc==SQLITE_NOMEM );
+ return rc;
}
+ }else{
+ if( pPager->eState!=PAGER_WRITER_DBMOD ){
+ pPg->flags |= PGHDR_NEED_SYNC;
+ }
+ PAGERTRACE(("APPEND %d page %d needSync=%d\n",
+ PAGERID(pPager), pPg->pgno,
+ ((pPg->flags&PGHDR_NEED_SYNC)?1:0)));
}
- }else if( (pPage = sqlite3PagerLookup(pPager, pg))!=0 ){
- if( pPage->flags&PGHDR_NEED_SYNC ){
- needSync = 1;
- }
- sqlite3PagerUnrefNotNull(pPage);
+ }
+
+ /* If the statement journal is open and the page is not in it,
+ ** then write the current page to the statement journal. Note that
+ ** the statement journal format differs from the standard journal format
+ ** in that it omits the checksums and the header.
+ */
+ if( subjRequiresPage(pPg) ){
+ rc = subjournalPage(pPg);
}
}
- /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages
- ** starting at pg1, then it needs to be set for all of them. Because
- ** writing to any of these nPage pages may damage the others, the
- ** journal file must contain sync()ed copies of all of them
- ** before any of them can be written out to the database file.
+ /* Update the database size and return.
*/
- if( rc==SQLITE_OK && needSync ){
- assert( !MEMDB );
- for(ii=0; ii<nPage; ii++){
- PgHdr *pPage = sqlite3PagerLookup(pPager, pg1+ii);
- if( pPage ){
- pPage->flags |= PGHDR_NEED_SYNC;
- sqlite3PagerUnrefNotNull(pPage);
- }
- }
+ if( pPager->dbSize<pPg->pgno ){
+ pPager->dbSize = pPg->pgno;
}
-
- assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)!=0 );
- pPager->doNotSpill &= ~SPILLFLAG_NOSYNC;
return rc;
}
@@ -48900,20 +44846,96 @@ static SQLITE_NOINLINE int pagerWriteLargeSector(PgHdr *pPg){
** If an error occurs, SQLITE_NOMEM or an IO error code is returned
** as appropriate. Otherwise, SQLITE_OK.
*/
-SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){
+SQLITE_PRIVATE int sqlite3PagerWrite(DbPage *pDbPage){
+ int rc = SQLITE_OK;
+
+ PgHdr *pPg = pDbPage;
Pager *pPager = pPg->pPager;
+ Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize);
+
assert( (pPg->flags & PGHDR_MMAP)==0 );
assert( pPager->eState>=PAGER_WRITER_LOCKED );
assert( pPager->eState!=PAGER_ERROR );
assert( assert_pager_state(pPager) );
- if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){
- if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg);
- return SQLITE_OK;
- }else if( pPager->sectorSize > (u32)pPager->pageSize ){
- return pagerWriteLargeSector(pPg);
+
+ if( nPagePerSector>1 ){
+ Pgno nPageCount; /* Total number of pages in database file */
+ Pgno pg1; /* First page of the sector pPg is located on. */
+ int nPage = 0; /* Number of pages starting at pg1 to journal */
+ int ii; /* Loop counter */
+ int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */
+
+ /* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow
+ ** a journal header to be written between the pages journaled by
+ ** this function.
+ */
+ assert( !MEMDB );
+ assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)==0 );
+ pPager->doNotSpill |= SPILLFLAG_NOSYNC;
+
+ /* This trick assumes that both the page-size and sector-size are
+ ** an integer power of 2. It sets variable pg1 to the identifier
+ ** of the first page of the sector pPg is located on.
+ */
+ pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1;
+
+ nPageCount = pPager->dbSize;
+ if( pPg->pgno>nPageCount ){
+ nPage = (pPg->pgno - pg1)+1;
+ }else if( (pg1+nPagePerSector-1)>nPageCount ){
+ nPage = nPageCount+1-pg1;
+ }else{
+ nPage = nPagePerSector;
+ }
+ assert(nPage>0);
+ assert(pg1<=pPg->pgno);
+ assert((pg1+nPage)>pPg->pgno);
+
+ for(ii=0; ii<nPage && rc==SQLITE_OK; ii++){
+ Pgno pg = pg1+ii;
+ PgHdr *pPage;
+ if( pg==pPg->pgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){
+ if( pg!=PAGER_MJ_PGNO(pPager) ){
+ rc = sqlite3PagerGet(pPager, pg, &pPage);
+ if( rc==SQLITE_OK ){
+ rc = pager_write(pPage);
+ if( pPage->flags&PGHDR_NEED_SYNC ){
+ needSync = 1;
+ }
+ sqlite3PagerUnref(pPage);
+ }
+ }
+ }else if( (pPage = pager_lookup(pPager, pg))!=0 ){
+ if( pPage->flags&PGHDR_NEED_SYNC ){
+ needSync = 1;
+ }
+ sqlite3PagerUnref(pPage);
+ }
+ }
+
+ /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages
+ ** starting at pg1, then it needs to be set for all of them. Because
+ ** writing to any of these nPage pages may damage the others, the
+ ** journal file must contain sync()ed copies of all of them
+ ** before any of them can be written out to the database file.
+ */
+ if( rc==SQLITE_OK && needSync ){
+ assert( !MEMDB );
+ for(ii=0; ii<nPage; ii++){
+ PgHdr *pPage = pager_lookup(pPager, pg1+ii);
+ if( pPage ){
+ pPage->flags |= PGHDR_NEED_SYNC;
+ sqlite3PagerUnref(pPage);
+ }
+ }
+ }
+
+ assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)!=0 );
+ pPager->doNotSpill &= ~SPILLFLAG_NOSYNC;
}else{
- return pager_write(pPg);
+ rc = pager_write(pDbPage);
}
+ return rc;
}
/*
@@ -48923,7 +44945,7 @@ SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){
*/
#ifndef NDEBUG
SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage *pPg){
- return pPg->flags & PGHDR_WRITEABLE;
+ return pPg->flags&PGHDR_DIRTY;
}
#endif
@@ -48947,7 +44969,6 @@ SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){
PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)));
IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno))
pPg->flags |= PGHDR_DONT_WRITE;
- pPg->flags &= ~PGHDR_WRITEABLE;
pager_set_pagehash(pPg);
}
}
@@ -49057,17 +45078,17 @@ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){
** If successful, or if called on a pager for which it is a no-op, this
** function returns SQLITE_OK. Otherwise, an IO error code is returned.
*/
-SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster){
+SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager){
int rc = SQLITE_OK;
-
- if( isOpen(pPager->fd) ){
- void *pArg = (void*)zMaster;
- rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg);
- if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
- }
- if( rc==SQLITE_OK && !pPager->noSync ){
+ if( !pPager->noSync ){
assert( !MEMDB );
rc = sqlite3OsSync(pPager->fd, pPager->syncFlags);
+ }else if( isOpen(pPager->fd) ){
+ assert( !MEMDB );
+ rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC_OMITTED, 0);
+ if( rc==SQLITE_NOTFOUND ){
+ rc = SQLITE_OK;
+ }
}
return rc;
}
@@ -49266,7 +45287,7 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
/* Finally, sync the database file. */
if( !noSync ){
- rc = sqlite3PagerSync(pPager, zMaster);
+ rc = sqlite3PagerSync(pPager);
}
IOTRACE(("DBSYNC %p\n", pPager))
}
@@ -49330,7 +45351,6 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager *pPager){
}
PAGERTRACE(("COMMIT %d\n", PAGERID(pPager)));
- pPager->iDataVersion++;
rc = pager_end_transaction(pPager, pPager->setMaster, 1);
return pager_error(pPager, rc);
}
@@ -49396,9 +45416,7 @@ SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){
assert( pPager->eState==PAGER_READER || rc!=SQLITE_OK );
assert( rc==SQLITE_OK || rc==SQLITE_FULL || rc==SQLITE_CORRUPT
- || rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR
- || rc==SQLITE_CANTOPEN
- );
+ || rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR );
/* If an error occurs during a ROLLBACK, we can no longer trust the pager
** cache. So call pager_error() on the way out to make any error persistent.
@@ -49414,14 +45432,12 @@ SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager *pPager){
return pPager->readOnly;
}
-#ifdef SQLITE_DEBUG
/*
-** Return the sum of the reference counts for all pages held by pPager.
+** Return the number of references to the pager.
*/
SQLITE_PRIVATE int sqlite3PagerRefcount(Pager *pPager){
return sqlite3PcacheRefCount(pPager->pPCache);
}
-#endif
/*
** Return the approximate number of bytes of memory currently
@@ -49504,63 +45520,55 @@ SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){
** occurs while opening the sub-journal file, then an IO error code is
** returned. Otherwise, SQLITE_OK.
*/
-static SQLITE_NOINLINE int pagerOpenSavepoint(Pager *pPager, int nSavepoint){
+SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){
int rc = SQLITE_OK; /* Return code */
int nCurrent = pPager->nSavepoint; /* Current number of savepoints */
- int ii; /* Iterator variable */
- PagerSavepoint *aNew; /* New Pager.aSavepoint array */
assert( pPager->eState>=PAGER_WRITER_LOCKED );
assert( assert_pager_state(pPager) );
- assert( nSavepoint>nCurrent && pPager->useJournal );
- /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM
- ** if the allocation fails. Otherwise, zero the new portion in case a
- ** malloc failure occurs while populating it in the for(...) loop below.
- */
- aNew = (PagerSavepoint *)sqlite3Realloc(
- pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint
- );
- if( !aNew ){
- return SQLITE_NOMEM;
- }
- memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint));
- pPager->aSavepoint = aNew;
+ if( nSavepoint>nCurrent && pPager->useJournal ){
+ int ii; /* Iterator variable */
+ PagerSavepoint *aNew; /* New Pager.aSavepoint array */
- /* Populate the PagerSavepoint structures just allocated. */
- for(ii=nCurrent; ii<nSavepoint; ii++){
- aNew[ii].nOrig = pPager->dbSize;
- if( isOpen(pPager->jfd) && pPager->journalOff>0 ){
- aNew[ii].iOffset = pPager->journalOff;
- }else{
- aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager);
- }
- aNew[ii].iSubRec = pPager->nSubRec;
- aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize);
- if( !aNew[ii].pInSavepoint ){
+ /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM
+ ** if the allocation fails. Otherwise, zero the new portion in case a
+ ** malloc failure occurs while populating it in the for(...) loop below.
+ */
+ aNew = (PagerSavepoint *)sqlite3Realloc(
+ pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint
+ );
+ if( !aNew ){
return SQLITE_NOMEM;
}
- if( pagerUseWal(pPager) ){
- sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData);
+ memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint));
+ pPager->aSavepoint = aNew;
+
+ /* Populate the PagerSavepoint structures just allocated. */
+ for(ii=nCurrent; ii<nSavepoint; ii++){
+ aNew[ii].nOrig = pPager->dbSize;
+ if( isOpen(pPager->jfd) && pPager->journalOff>0 ){
+ aNew[ii].iOffset = pPager->journalOff;
+ }else{
+ aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager);
+ }
+ aNew[ii].iSubRec = pPager->nSubRec;
+ aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize);
+ if( !aNew[ii].pInSavepoint ){
+ return SQLITE_NOMEM;
+ }
+ if( pagerUseWal(pPager) ){
+ sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData);
+ }
+ pPager->nSavepoint = ii+1;
}
- pPager->nSavepoint = ii+1;
+ assert( pPager->nSavepoint==nSavepoint );
+ assertTruncateConstraint(pPager);
}
- assert( pPager->nSavepoint==nSavepoint );
- assertTruncateConstraint(pPager);
- return rc;
-}
-SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){
- assert( pPager->eState>=PAGER_WRITER_LOCKED );
- assert( assert_pager_state(pPager) );
- if( nSavepoint>pPager->nSavepoint && pPager->useJournal ){
- return pagerOpenSavepoint(pPager, nSavepoint);
- }else{
- return SQLITE_OK;
- }
+ return rc;
}
-
/*
** This function is called to rollback or release (commit) a savepoint.
** The savepoint to release or rollback need not be the most recently
@@ -49790,8 +45798,9 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i
** one or more savepoint bitvecs. This is the reason this function
** may return SQLITE_NOMEM.
*/
- if( (pPg->flags & PGHDR_DIRTY)!=0
- && SQLITE_OK!=(rc = subjournalPageIfRequired(pPg))
+ if( pPg->flags&PGHDR_DIRTY
+ && subjRequiresPage(pPg)
+ && SQLITE_OK!=(rc = subjournalPage(pPg))
){
return rc;
}
@@ -49810,7 +45819,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i
if( (pPg->flags&PGHDR_NEED_SYNC) && !isCommit ){
needSyncPgno = pPg->pgno;
assert( pPager->journalMode==PAGER_JOURNALMODE_OFF ||
- pageInJournal(pPager, pPg) || pPg->pgno>pPager->dbOrigSize );
+ pageInJournal(pPg) || pPg->pgno>pPager->dbOrigSize );
assert( pPg->flags&PGHDR_DIRTY );
}
@@ -49820,7 +45829,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i
** for the page moved there.
*/
pPg->flags &= ~PGHDR_NEED_SYNC;
- pPgOld = sqlite3PagerLookup(pPager, pgno);
+ pPgOld = pager_lookup(pPager, pgno);
assert( !pPgOld || pPgOld->nRef==1 );
if( pPgOld ){
pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC);
@@ -49844,7 +45853,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i
if( MEMDB ){
assert( pPgOld );
sqlite3PcacheMove(pPgOld, origPgno);
- sqlite3PagerUnrefNotNull(pPgOld);
+ sqlite3PagerUnref(pPgOld);
}
if( needSyncPgno ){
@@ -49873,7 +45882,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i
}
pPgHdr->flags |= PGHDR_NEED_SYNC;
sqlite3PcacheMakeDirty(pPgHdr);
- sqlite3PagerUnrefNotNull(pPgHdr);
+ sqlite3PagerUnref(pPgHdr);
}
return SQLITE_OK;
@@ -49881,18 +45890,6 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i
#endif
/*
-** The page handle passed as the first argument refers to a dirty page
-** with a page number other than iNew. This function changes the page's
-** page number to iNew and sets the value of the PgHdr.flags field to
-** the value passed as the third parameter.
-*/
-SQLITE_PRIVATE void sqlite3PagerRekey(DbPage *pPg, Pgno iNew, u16 flags){
- assert( pPg->pgno!=iNew );
- pPg->flags = flags;
- sqlite3PcacheMove(pPg, iNew);
-}
-
-/*
** Return a pointer to the data for the specified page.
*/
SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *pPg){
@@ -50037,8 +46034,6 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
}
assert( state==pPager->eState );
}
- }else if( eMode==PAGER_JOURNALMODE_OFF ){
- sqlite3OsClose(pPager->jfd);
}
}
@@ -50110,8 +46105,7 @@ SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int eMode, int *pnLog,
int rc = SQLITE_OK;
if( pPager->pWal ){
rc = sqlite3WalCheckpoint(pPager->pWal, eMode,
- (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler),
- pPager->pBusyHandlerArg,
+ pPager->xBusyHandler, pPager->pBusyHandlerArg,
pPager->ckptSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace,
pnLog, pnCkpt
);
@@ -50288,12 +46282,11 @@ SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){
** is empty, return 0.
*/
SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){
- assert( pPager->eState>=PAGER_READER );
+ assert( pPager->eState==PAGER_READER );
return sqlite3WalFramesize(pPager->pWal);
}
#endif
-
#endif /* SQLITE_OMIT_DISKIO */
/************** End of pager.c ***********************************************/
@@ -50542,7 +46535,6 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){
*/
#ifndef SQLITE_OMIT_WAL
-/* #include "wal.h" */
/*
** Trace output macros
@@ -50728,7 +46720,6 @@ struct Wal {
u8 syncHeader; /* Fsync the WAL header if true */
u8 padToSectorBoundary; /* Pad transactions out to the next sector */
WalIndexHdr hdr; /* Wal-index header for current transaction */
- u32 minFrame; /* Ignore wal frames before this one */
const char *zWalName; /* Name of WAL file */
u32 nCkpt; /* Checkpoint sequence counter in the wal-header */
#ifdef SQLITE_DEBUG
@@ -50823,7 +46814,7 @@ static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){
if( pWal->nWiData<=iPage ){
int nByte = sizeof(u32*)*(iPage+1);
volatile u32 **apNew;
- apNew = (volatile u32 **)sqlite3_realloc64((void *)pWal->apWiData, nByte);
+ apNew = (volatile u32 **)sqlite3_realloc((void *)pWal->apWiData, nByte);
if( !apNew ){
*ppPage = 0;
return SQLITE_NOMEM;
@@ -50875,7 +46866,7 @@ static volatile WalIndexHdr *walIndexHdr(Wal *pWal){
** The argument to this macro must be of type u32. On a little-endian
** architecture, it returns the u32 value that results from interpreting
** the 4 bytes as a big-endian value. On a big-endian architecture, it
-** returns the value that would be produced by interpreting the 4 bytes
+** returns the value that would be produced by intepreting the 4 bytes
** of the input value as a little-endian integer.
*/
#define BYTESWAP32(x) ( \
@@ -50949,9 +46940,9 @@ static void walIndexWriteHdr(Wal *pWal){
pWal->hdr.isInit = 1;
pWal->hdr.iVersion = WALINDEX_MAX_VERSION;
walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum);
- memcpy((void*)&aHdr[1], (const void*)&pWal->hdr, sizeof(WalIndexHdr));
+ memcpy((void *)&aHdr[1], (void *)&pWal->hdr, sizeof(WalIndexHdr));
walShmBarrier(pWal);
- memcpy((void*)&aHdr[0], (const void*)&pWal->hdr, sizeof(WalIndexHdr));
+ memcpy((void *)&aHdr[0], (void *)&pWal->hdr, sizeof(WalIndexHdr));
}
/*
@@ -51089,10 +47080,9 @@ static void walUnlockShared(Wal *pWal, int lockIdx){
SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED);
WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx)));
}
-static int walLockExclusive(Wal *pWal, int lockIdx, int n, int fBlock){
+static int walLockExclusive(Wal *pWal, int lockIdx, int n){
int rc;
if( pWal->exclusiveMode ) return SQLITE_OK;
- if( fBlock ) sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_WAL_BLOCK, 0);
rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, n,
SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE);
WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal,
@@ -51253,13 +47243,13 @@ static void walCleanupHash(Wal *pWal){
** via the hash table even after the cleanup.
*/
if( iLimit ){
- int j; /* Loop counter */
+ int i; /* Loop counter */
int iKey; /* Hash key */
- for(j=1; j<=iLimit; j++){
- for(iKey=walHash(aPgno[j]); aHash[iKey]; iKey=walNextHash(iKey)){
- if( aHash[iKey]==j ) break;
+ for(i=1; i<=iLimit; i++){
+ for(iKey=walHash(aPgno[i]); aHash[iKey]; iKey=walNextHash(iKey)){
+ if( aHash[iKey]==i ) break;
}
- assert( aHash[iKey]==j );
+ assert( aHash[iKey]==i );
}
}
#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */
@@ -51290,7 +47280,7 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){
assert( idx <= HASHTABLE_NSLOT/2 + 1 );
/* If this is the first entry to be added to this hash-table, zero the
- ** entire hash table and aPgno[] array before proceeding.
+ ** entire hash table and aPgno[] array before proceding.
*/
if( idx==1 ){
int nByte = (int)((u8 *)&aHash[HASHTABLE_NSLOT] - (u8 *)&aPgno[1]);
@@ -51378,7 +47368,7 @@ static int walIndexRecover(Wal *pWal){
assert( pWal->writeLock );
iLock = WAL_ALL_BUT_WRITE + pWal->ckptLock;
nLock = SQLITE_SHM_NLOCK - iLock;
- rc = walLockExclusive(pWal, iLock, nLock, 0);
+ rc = walLockExclusive(pWal, iLock, nLock);
if( rc ){
return rc;
}
@@ -51448,7 +47438,7 @@ static int walIndexRecover(Wal *pWal){
/* Malloc a buffer to read frames into. */
szFrame = szPage + WAL_FRAME_HDRSIZE;
- aFrame = (u8 *)sqlite3_malloc64(szFrame);
+ aFrame = (u8 *)sqlite3_malloc(szFrame);
if( !aFrame ){
rc = SQLITE_NOMEM;
goto recovery_error;
@@ -51608,7 +47598,7 @@ SQLITE_PRIVATE int sqlite3WalOpen(
sqlite3OsClose(pRet->pWalFd);
sqlite3_free(pRet);
}else{
- int iDC = sqlite3OsDeviceCharacteristics(pDbFd);
+ int iDC = sqlite3OsDeviceCharacteristics(pRet->pWalFd);
if( iDC & SQLITE_IOCAP_SEQUENTIAL ){ pRet->syncHeader = 0; }
if( iDC & SQLITE_IOCAP_POWERSAFE_OVERWRITE ){
pRet->padToSectorBoundary = 0;
@@ -51761,7 +47751,7 @@ static void walMergesort(
int nMerge = 0; /* Number of elements in list aMerge */
ht_slot *aMerge = 0; /* List to be merged */
int iList; /* Index into input list */
- u32 iSub = 0; /* Index into aSub array */
+ int iSub = 0; /* Index into aSub array */
struct Sublist aSub[13]; /* Array of sub-lists */
memset(aSub, 0, sizeof(aSub));
@@ -51772,9 +47762,7 @@ static void walMergesort(
nMerge = 1;
aMerge = &aList[iList];
for(iSub=0; iList & (1<<iSub); iSub++){
- struct Sublist *p;
- assert( iSub<ArraySize(aSub) );
- p = &aSub[iSub];
+ struct Sublist *p = &aSub[iSub];
assert( p->aList && p->nList<=(1<<iSub) );
assert( p->aList==&aList[iList&~((2<<iSub)-1)] );
walMerge(aContent, p->aList, p->nList, &aMerge, &nMerge, aBuffer);
@@ -51785,9 +47773,7 @@ static void walMergesort(
for(iSub++; iSub<ArraySize(aSub); iSub++){
if( nList & (1<<iSub) ){
- struct Sublist *p;
- assert( iSub<ArraySize(aSub) );
- p = &aSub[iSub];
+ struct Sublist *p = &aSub[iSub];
assert( p->nList<=(1<<iSub) );
assert( p->aList==&aList[nList&~((2<<iSub)-1)] );
walMerge(aContent, p->aList, p->nList, &aMerge, &nMerge, aBuffer);
@@ -51810,7 +47796,7 @@ static void walMergesort(
** Free an iterator allocated by walIteratorInit().
*/
static void walIteratorFree(WalIterator *p){
- sqlite3_free(p);
+ sqlite3ScratchFree(p);
}
/*
@@ -51845,7 +47831,7 @@ static int walIteratorInit(Wal *pWal, WalIterator **pp){
nByte = sizeof(WalIterator)
+ (nSegment-1)*sizeof(struct WalSegment)
+ iLast*sizeof(ht_slot);
- p = (WalIterator *)sqlite3_malloc64(nByte);
+ p = (WalIterator *)sqlite3ScratchMalloc(nByte);
if( !p ){
return SQLITE_NOMEM;
}
@@ -51855,7 +47841,7 @@ static int walIteratorInit(Wal *pWal, WalIterator **pp){
/* Allocate temporary space used by the merge-sort routine. This block
** of memory will be freed before this function returns.
*/
- aTmp = (ht_slot *)sqlite3_malloc64(
+ aTmp = (ht_slot *)sqlite3ScratchMalloc(
sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast)
);
if( !aTmp ){
@@ -51892,7 +47878,7 @@ static int walIteratorInit(Wal *pWal, WalIterator **pp){
p->aSegment[i].aPgno = (u32 *)aPgno;
}
}
- sqlite3_free(aTmp);
+ sqlite3ScratchFree(aTmp);
if( rc!=SQLITE_OK ){
walIteratorFree(p);
@@ -51916,7 +47902,7 @@ static int walBusyLock(
){
int rc;
do {
- rc = walLockExclusive(pWal, lockIdx, n, 0);
+ rc = walLockExclusive(pWal, lockIdx, n);
}while( xBusy && rc==SQLITE_BUSY && xBusy(pBusyArg) );
return rc;
}
@@ -51930,38 +47916,6 @@ static int walPagesize(Wal *pWal){
}
/*
-** The following is guaranteed when this function is called:
-**
-** a) the WRITER lock is held,
-** b) the entire log file has been checkpointed, and
-** c) any existing readers are reading exclusively from the database
-** file - there are no readers that may attempt to read a frame from
-** the log file.
-**
-** This function updates the shared-memory structures so that the next
-** client to write to the database (which may be this one) does so by
-** writing frames into the start of the log file.
-**
-** The value of parameter salt1 is used as the aSalt[1] value in the
-** new wal-index header. It should be passed a pseudo-random value (i.e.
-** one obtained from sqlite3_randomness()).
-*/
-static void walRestartHdr(Wal *pWal, u32 salt1){
- volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
- int i; /* Loop counter */
- u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */
- pWal->nCkpt++;
- pWal->hdr.mxFrame = 0;
- sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0]));
- memcpy(&pWal->hdr.aSalt[1], &salt1, 4);
- walIndexWriteHdr(pWal);
- pInfo->nBackfill = 0;
- pInfo->aReadMark[1] = 0;
- for(i=2; i<WAL_NREADER; i++) pInfo->aReadMark[i] = READMARK_NOT_USED;
- assert( pInfo->aReadMark[0]==0 );
-}
-
-/*
** Copy as much content as we can from the WAL back into the database file
** in response to an sqlite3_wal_checkpoint() request or the equivalent.
**
@@ -51984,7 +47938,7 @@ static void walRestartHdr(Wal *pWal, u32 salt1){
** database file.
**
** This routine uses and updates the nBackfill field of the wal-index header.
-** This is the only routine that will increase the value of nBackfill.
+** This is the only routine tha will increase the value of nBackfill.
** (A WAL reset or recovery will revert nBackfill to zero, but not increase
** its value.)
**
@@ -51995,12 +47949,12 @@ static void walRestartHdr(Wal *pWal, u32 salt1){
static int walCheckpoint(
Wal *pWal, /* Wal connection */
int eMode, /* One of PASSIVE, FULL or RESTART */
- int (*xBusy)(void*), /* Function to call when busy */
+ int (*xBusyCall)(void*), /* Function to call when busy */
void *pBusyArg, /* Context argument for xBusyHandler */
int sync_flags, /* Flags for OsSync() (or 0) */
u8 *zBuf /* Temporary buffer to use */
){
- int rc = SQLITE_OK; /* Return code */
+ int rc; /* Return code */
int szPage; /* Database page-size */
WalIterator *pIter = 0; /* Wal iterator context */
u32 iDbpage = 0; /* Next database page to write */
@@ -52009,154 +47963,123 @@ static int walCheckpoint(
u32 mxPage; /* Max database page to write */
int i; /* Loop counter */
volatile WalCkptInfo *pInfo; /* The checkpoint status information */
+ int (*xBusy)(void*) = 0; /* Function to call when waiting for locks */
szPage = walPagesize(pWal);
testcase( szPage<=32768 );
testcase( szPage>=65536 );
pInfo = walCkptInfo(pWal);
- if( pInfo->nBackfill<pWal->hdr.mxFrame ){
+ if( pInfo->nBackfill>=pWal->hdr.mxFrame ) return SQLITE_OK;
- /* Allocate the iterator */
- rc = walIteratorInit(pWal, &pIter);
- if( rc!=SQLITE_OK ){
- return rc;
- }
- assert( pIter );
+ /* Allocate the iterator */
+ rc = walIteratorInit(pWal, &pIter);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ assert( pIter );
- /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked
- ** in the SQLITE_CHECKPOINT_PASSIVE mode. */
- assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 );
+ if( eMode!=SQLITE_CHECKPOINT_PASSIVE ) xBusy = xBusyCall;
- /* Compute in mxSafeFrame the index of the last frame of the WAL that is
- ** safe to write into the database. Frames beyond mxSafeFrame might
- ** overwrite database pages that are in use by active readers and thus
- ** cannot be backfilled from the WAL.
- */
- mxSafeFrame = pWal->hdr.mxFrame;
- mxPage = pWal->hdr.nPage;
- for(i=1; i<WAL_NREADER; i++){
- /* Thread-sanitizer reports that the following is an unsafe read,
- ** as some other thread may be in the process of updating the value
- ** of the aReadMark[] slot. The assumption here is that if that is
- ** happening, the other client may only be increasing the value,
- ** not decreasing it. So assuming either that either the "old" or
- ** "new" version of the value is read, and not some arbitrary value
- ** that would never be written by a real client, things are still
- ** safe. */
- u32 y = pInfo->aReadMark[i];
- if( mxSafeFrame>y ){
- assert( y<=pWal->hdr.mxFrame );
- rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1);
- if( rc==SQLITE_OK ){
- pInfo->aReadMark[i] = (i==1 ? mxSafeFrame : READMARK_NOT_USED);
- walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
- }else if( rc==SQLITE_BUSY ){
- mxSafeFrame = y;
- xBusy = 0;
- }else{
- goto walcheckpoint_out;
- }
+ /* Compute in mxSafeFrame the index of the last frame of the WAL that is
+ ** safe to write into the database. Frames beyond mxSafeFrame might
+ ** overwrite database pages that are in use by active readers and thus
+ ** cannot be backfilled from the WAL.
+ */
+ mxSafeFrame = pWal->hdr.mxFrame;
+ mxPage = pWal->hdr.nPage;
+ for(i=1; i<WAL_NREADER; i++){
+ u32 y = pInfo->aReadMark[i];
+ if( mxSafeFrame>y ){
+ assert( y<=pWal->hdr.mxFrame );
+ rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1);
+ if( rc==SQLITE_OK ){
+ pInfo->aReadMark[i] = (i==1 ? mxSafeFrame : READMARK_NOT_USED);
+ walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
+ }else if( rc==SQLITE_BUSY ){
+ mxSafeFrame = y;
+ xBusy = 0;
+ }else{
+ goto walcheckpoint_out;
}
}
+ }
- if( pInfo->nBackfill<mxSafeFrame
- && (rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(0),1))==SQLITE_OK
- ){
- i64 nSize; /* Current size of database file */
- u32 nBackfill = pInfo->nBackfill;
+ if( pInfo->nBackfill<mxSafeFrame
+ && (rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(0), 1))==SQLITE_OK
+ ){
+ i64 nSize; /* Current size of database file */
+ u32 nBackfill = pInfo->nBackfill;
- /* Sync the WAL to disk */
- if( sync_flags ){
- rc = sqlite3OsSync(pWal->pWalFd, sync_flags);
- }
+ /* Sync the WAL to disk */
+ if( sync_flags ){
+ rc = sqlite3OsSync(pWal->pWalFd, sync_flags);
+ }
- /* If the database may grow as a result of this checkpoint, hint
- ** about the eventual size of the db file to the VFS layer.
- */
- if( rc==SQLITE_OK ){
- i64 nReq = ((i64)mxPage * szPage);
- rc = sqlite3OsFileSize(pWal->pDbFd, &nSize);
- if( rc==SQLITE_OK && nSize<nReq ){
- sqlite3OsFileControlHint(pWal->pDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq);
- }
+ /* If the database may grow as a result of this checkpoint, hint
+ ** about the eventual size of the db file to the VFS layer.
+ */
+ if( rc==SQLITE_OK ){
+ i64 nReq = ((i64)mxPage * szPage);
+ rc = sqlite3OsFileSize(pWal->pDbFd, &nSize);
+ if( rc==SQLITE_OK && nSize<nReq ){
+ sqlite3OsFileControlHint(pWal->pDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq);
}
+ }
- /* Iterate through the contents of the WAL, copying data to the db file */
- while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
- i64 iOffset;
- assert( walFramePgno(pWal, iFrame)==iDbpage );
- if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){
- continue;
+ /* Iterate through the contents of the WAL, copying data to the db file. */
+ while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
+ i64 iOffset;
+ assert( walFramePgno(pWal, iFrame)==iDbpage );
+ if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ) continue;
+ iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE;
+ /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */
+ rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset);
+ if( rc!=SQLITE_OK ) break;
+ iOffset = (iDbpage-1)*(i64)szPage;
+ testcase( IS_BIG_INT(iOffset) );
+ rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset);
+ if( rc!=SQLITE_OK ) break;
+ }
+
+ /* If work was actually accomplished... */
+ if( rc==SQLITE_OK ){
+ if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){
+ i64 szDb = pWal->hdr.nPage*(i64)szPage;
+ testcase( IS_BIG_INT(szDb) );
+ rc = sqlite3OsTruncate(pWal->pDbFd, szDb);
+ if( rc==SQLITE_OK && sync_flags ){
+ rc = sqlite3OsSync(pWal->pDbFd, sync_flags);
}
- iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE;
- /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */
- rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset);
- if( rc!=SQLITE_OK ) break;
- iOffset = (iDbpage-1)*(i64)szPage;
- testcase( IS_BIG_INT(iOffset) );
- rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset);
- if( rc!=SQLITE_OK ) break;
}
-
- /* If work was actually accomplished... */
if( rc==SQLITE_OK ){
- if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){
- i64 szDb = pWal->hdr.nPage*(i64)szPage;
- testcase( IS_BIG_INT(szDb) );
- rc = sqlite3OsTruncate(pWal->pDbFd, szDb);
- if( rc==SQLITE_OK && sync_flags ){
- rc = sqlite3OsSync(pWal->pDbFd, sync_flags);
- }
- }
- if( rc==SQLITE_OK ){
- pInfo->nBackfill = mxSafeFrame;
- }
+ pInfo->nBackfill = mxSafeFrame;
}
-
- /* Release the reader lock held while backfilling */
- walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1);
}
- if( rc==SQLITE_BUSY ){
- /* Reset the return code so as not to report a checkpoint failure
- ** just because there are active readers. */
- rc = SQLITE_OK;
- }
+ /* Release the reader lock held while backfilling */
+ walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1);
}
- /* If this is an SQLITE_CHECKPOINT_RESTART or TRUNCATE operation, and the
- ** entire wal file has been copied into the database file, then block
- ** until all readers have finished using the wal file. This ensures that
- ** the next process to write to the database restarts the wal file.
+ if( rc==SQLITE_BUSY ){
+ /* Reset the return code so as not to report a checkpoint failure
+ ** just because there are active readers. */
+ rc = SQLITE_OK;
+ }
+
+ /* If this is an SQLITE_CHECKPOINT_RESTART operation, and the entire wal
+ ** file has been copied into the database file, then block until all
+ ** readers have finished using the wal file. This ensures that the next
+ ** process to write to the database restarts the wal file.
*/
if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){
assert( pWal->writeLock );
if( pInfo->nBackfill<pWal->hdr.mxFrame ){
rc = SQLITE_BUSY;
- }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){
- u32 salt1;
- sqlite3_randomness(4, &salt1);
- assert( pInfo->nBackfill==pWal->hdr.mxFrame );
+ }else if( eMode==SQLITE_CHECKPOINT_RESTART ){
+ assert( mxSafeFrame==pWal->hdr.mxFrame );
rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(1), WAL_NREADER-1);
if( rc==SQLITE_OK ){
- if( eMode==SQLITE_CHECKPOINT_TRUNCATE ){
- /* IMPLEMENTATION-OF: R-44699-57140 This mode works the same way as
- ** SQLITE_CHECKPOINT_RESTART with the addition that it also
- ** truncates the log file to zero bytes just prior to a
- ** successful return.
- **
- ** In theory, it might be safe to do this without updating the
- ** wal-index header in shared memory, as all subsequent reader or
- ** writer clients should see that the entire log file has been
- ** checkpointed and behave accordingly. This seems unsafe though,
- ** as it would leave the system in a state where the contents of
- ** the wal-index header do not match the contents of the
- ** file-system. To avoid this, update the wal-index header to
- ** indicate that the log file contains zero valid frames. */
- walRestartHdr(pWal, salt1);
- rc = sqlite3OsTruncate(pWal->pWalFd, 0);
- }
walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1);
}
}
@@ -52319,7 +48242,7 @@ static int walIndexTryHdr(Wal *pWal, int *pChanged){
** wal-index from the WAL before returning.
**
** Set *pChanged to 1 if the wal-index header value in pWal->hdr is
-** changed by this operation. If pWal->hdr is unchanged, set *pChanged
+** changed by this opertion. If pWal->hdr is unchanged, set *pChanged
** to 0.
**
** If the wal-index header is successfully read, return SQLITE_OK.
@@ -52357,7 +48280,7 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){
walUnlockShared(pWal, WAL_WRITE_LOCK);
rc = SQLITE_READONLY_RECOVERY;
}
- }else if( SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1, 1)) ){
+ }else if( SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){
pWal->writeLock = 1;
if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){
badHdr = walIndexTryHdr(pWal, pChanged);
@@ -52465,8 +48388,8 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** calls to sqlite3OsSleep() have a delay of 1 microsecond. Really this
** is more of a scheduler yield than an actual delay. But on the 10th
** an subsequent retries, the delays start becoming longer and longer,
- ** so that on the 100th (and last) RETRY we delay for 323 milliseconds.
- ** The total delay time before giving up is less than 10 seconds.
+ ** so that on the 100th (and last) RETRY we delay for 21 milliseconds.
+ ** The total delay time before giving up is less than 1 second.
*/
if( cnt>5 ){
int nDelay = 1; /* Pause time in microseconds */
@@ -52474,7 +48397,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
VVA_ONLY( pWal->lockError = 1; )
return SQLITE_PROTOCOL;
}
- if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+ if( cnt>=10 ) nDelay = (cnt-9)*238; /* Max delay 21ms. Total delay 996ms */
sqlite3OsSleep(pWal->pVfs, nDelay);
}
@@ -52523,7 +48446,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** may have been appended to the log before READ_LOCK(0) was obtained.
** When holding READ_LOCK(0), the reader ignores the entire log file,
** which implies that the database file contains a trustworthy
- ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from
+ ** snapshoT. Since holding READ_LOCK(0) prevents a checkpoint from
** happening, this is usually correct.
**
** However, if frames have been appended to the log (or if the log
@@ -52563,7 +48486,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
&& (mxReadMark<pWal->hdr.mxFrame || mxI==0)
){
for(i=1; i<WAL_NREADER; i++){
- rc = walLockExclusive(pWal, WAL_READ_LOCK(i), 1, 0);
+ rc = walLockExclusive(pWal, WAL_READ_LOCK(i), 1);
if( rc==SQLITE_OK ){
mxReadMark = pInfo->aReadMark[i] = pWal->hdr.mxFrame;
mxI = i;
@@ -52597,27 +48520,12 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry
** instead.
**
- ** Before checking that the live wal-index header has not changed
- ** since it was read, set Wal.minFrame to the first frame in the wal
- ** file that has not yet been checkpointed. This client will not need
- ** to read any frames earlier than minFrame from the wal file - they
- ** can be safely read directly from the database file.
- **
- ** Because a ShmBarrier() call is made between taking the copy of
- ** nBackfill and checking that the wal-header in shared-memory still
- ** matches the one cached in pWal->hdr, it is guaranteed that the
- ** checkpointer that set nBackfill was not working with a wal-index
- ** header newer than that cached in pWal->hdr. If it were, that could
- ** cause a problem. The checkpointer could omit to checkpoint
- ** a version of page X that lies before pWal->minFrame (call that version
- ** A) on the basis that there is a newer version (version B) of the same
- ** page later in the wal file. But if version B happens to like past
- ** frame pWal->hdr.mxFrame - then the client would incorrectly assume
- ** that it can read version A from the database file. However, since
- ** we can guarantee that the checkpointer that set nBackfill could not
- ** see any pages past pWal->hdr.mxFrame, this problem does not come up.
+ ** This does not guarantee that the copy of the wal-index header is up to
+ ** date before proceeding. That would not be possible without somehow
+ ** blocking writers. It only guarantees that a dangerous checkpoint or
+ ** log-wrap (either of which would require an exclusive lock on
+ ** WAL_READ_LOCK(mxI)) has not occurred since the snapshot was valid.
*/
- pWal->minFrame = pInfo->nBackfill+1;
walShmBarrier(pWal);
if( pInfo->aReadMark[mxI]!=mxReadMark
|| memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr))
@@ -52688,7 +48596,6 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
u32 iRead = 0; /* If !=0, WAL frame to return data from */
u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */
int iHash; /* Used to loop through N hash tables */
- int iMinHash;
/* This routine is only be called from within a read transaction. */
assert( pWal->readLock>=0 || pWal->lockError );
@@ -52729,8 +48636,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
** This condition filters out entries that were added to the hash
** table after the current read-transaction had started.
*/
- iMinHash = walFramePage(pWal->minFrame);
- for(iHash=walFramePage(iLast); iHash>=iMinHash && iRead==0; iHash--){
+ for(iHash=walFramePage(iLast); iHash>=0 && iRead==0; iHash--){
volatile ht_slot *aHash; /* Pointer to hash table */
volatile u32 *aPgno; /* Pointer to array of page numbers */
u32 iZero; /* Frame number corresponding to aPgno[0] */
@@ -52745,8 +48651,8 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
nCollide = HASHTABLE_NSLOT;
for(iKey=walHash(pgno); aHash[iKey]; iKey=walNextHash(iKey)){
u32 iFrame = aHash[iKey] + iZero;
- if( iFrame<=iLast && iFrame>=pWal->minFrame && aPgno[aHash[iKey]]==pgno ){
- assert( iFrame>iRead || CORRUPT_DB );
+ if( iFrame<=iLast && aPgno[aHash[iKey]]==pgno ){
+ /* assert( iFrame>iRead ); -- not true if there is corruption */
iRead = iFrame;
}
if( (nCollide--)==0 ){
@@ -52762,8 +48668,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
{
u32 iRead2 = 0;
u32 iTest;
- assert( pWal->minFrame>0 );
- for(iTest=iLast; iTest>=pWal->minFrame; iTest--){
+ for(iTest=iLast; iTest>0; iTest--){
if( walFramePgno(pWal, iTest)==pgno ){
iRead2 = iTest;
break;
@@ -52837,7 +48742,7 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){
/* Only one writer allowed at a time. Get the write lock. Return
** SQLITE_BUSY if unable.
*/
- rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1, 0);
+ rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
if( rc ){
return rc;
}
@@ -52912,6 +48817,7 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p
}
if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal);
}
+ assert( rc==SQLITE_OK );
return rc;
}
@@ -52960,6 +48866,7 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){
return rc;
}
+
/*
** This function is called just before writing a set of frames to the log
** file (see sqlite3WalFrames()). It checks to see if, instead of appending
@@ -52982,7 +48889,7 @@ static int walRestartLog(Wal *pWal){
if( pInfo->nBackfill>0 ){
u32 salt1;
sqlite3_randomness(4, &salt1);
- rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1, 0);
+ rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1);
if( rc==SQLITE_OK ){
/* If all readers are using WAL_READ_LOCK(0) (in other words if no
** readers are currently using the WAL), then the transactions
@@ -52992,8 +48899,20 @@ static int walRestartLog(Wal *pWal){
** In theory it would be Ok to update the cache of the header only
** at this point. But updating the actual wal-index header is also
** safe and means there is no special case for sqlite3WalUndo()
- ** to handle if this transaction is rolled back. */
- walRestartHdr(pWal, salt1);
+ ** to handle if this transaction is rolled back.
+ */
+ int i; /* Loop counter */
+ u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */
+
+ pWal->nCkpt++;
+ pWal->hdr.mxFrame = 0;
+ sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0]));
+ aSalt[1] = salt1;
+ walIndexWriteHdr(pWal);
+ pInfo->nBackfill = 0;
+ pInfo->aReadMark[1] = 0;
+ for(i=2; i<WAL_NREADER; i++) pInfo->aReadMark[i] = READMARK_NOT_USED;
+ assert( pInfo->aReadMark[0]==0 );
walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1);
}else if( rc!=SQLITE_BUSY ){
return rc;
@@ -53050,7 +48969,7 @@ static int walWriteToLog(
iAmt -= iFirstAmt;
pContent = (void*)(iFirstAmt + (char*)pContent);
assert( p->syncFlags & (SQLITE_SYNC_NORMAL|SQLITE_SYNC_FULL) );
- rc = sqlite3OsSync(p->pFd, p->syncFlags & SQLITE_SYNC_MASK);
+ rc = sqlite3OsSync(p->pFd, p->syncFlags);
if( iAmt==0 || rc ) return rc;
}
rc = sqlite3OsWrite(p->pFd, pContent, iAmt, iOffset);
@@ -53195,7 +49114,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(
**
** Padding and syncing only occur if this set of frames complete a
** transaction and if PRAGMA synchronous=FULL. If synchronous==NORMAL
- ** or synchronous==OFF, then no padding or syncing are needed.
+ ** or synchonous==OFF, then no padding or syncing are needed.
**
** If SQLITE_IOCAP_POWERSAFE_OVERWRITE is defined, then padding is not
** needed and only the sync is done. If padding is needed, then the
@@ -53281,7 +49200,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(
*/
SQLITE_PRIVATE int sqlite3WalCheckpoint(
Wal *pWal, /* Wal connection */
- int eMode, /* PASSIVE, FULL, RESTART, or TRUNCATE */
+ int eMode, /* PASSIVE, FULL or RESTART */
int (*xBusy)(void*), /* Function to call when busy */
void *pBusyArg, /* Context argument for xBusyHandler */
int sync_flags, /* Flags to sync db file with (or 0) */
@@ -53293,42 +49212,29 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
int rc; /* Return code */
int isChanged = 0; /* True if a new wal-index header is loaded */
int eMode2 = eMode; /* Mode to pass to walCheckpoint() */
- int (*xBusy2)(void*) = xBusy; /* Busy handler for eMode2 */
assert( pWal->ckptLock==0 );
assert( pWal->writeLock==0 );
- /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked
- ** in the SQLITE_CHECKPOINT_PASSIVE mode. */
- assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 );
-
if( pWal->readOnly ) return SQLITE_READONLY;
WALTRACE(("WAL%p: checkpoint begins\n", pWal));
-
- /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive
- ** "checkpoint" lock on the database file. */
- rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1, 0);
+ rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1);
if( rc ){
- /* EVIDENCE-OF: R-10421-19736 If any other process is running a
- ** checkpoint operation at the same time, the lock cannot be obtained and
- ** SQLITE_BUSY is returned.
- ** EVIDENCE-OF: R-53820-33897 Even if there is a busy-handler configured,
- ** it will not be invoked in this case.
- */
- testcase( rc==SQLITE_BUSY );
- testcase( xBusy!=0 );
+ /* Usually this is SQLITE_BUSY meaning that another thread or process
+ ** is already running a checkpoint, or maybe a recovery. But it might
+ ** also be SQLITE_IOERR. */
return rc;
}
pWal->ckptLock = 1;
- /* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART and
- ** TRUNCATE modes also obtain the exclusive "writer" lock on the database
- ** file.
+ /* If this is a blocking-checkpoint, then obtain the write-lock as well
+ ** to prevent any writers from running while the checkpoint is underway.
+ ** This has to be done before the call to walIndexReadHdr() below.
**
- ** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained
- ** immediately, and a busy-handler is configured, it is invoked and the
- ** writer lock retried until either the busy-handler returns 0 or the
- ** lock is successfully obtained.
+ ** If the writer lock cannot be obtained, then a passive checkpoint is
+ ** run instead. Since the checkpointer is not holding the writer lock,
+ ** there is no point in blocking waiting for any readers. Assuming no
+ ** other error occurs, this function will return SQLITE_BUSY to the caller.
*/
if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){
rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_WRITE_LOCK, 1);
@@ -53336,7 +49242,6 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
pWal->writeLock = 1;
}else if( rc==SQLITE_BUSY ){
eMode2 = SQLITE_CHECKPOINT_PASSIVE;
- xBusy2 = 0;
rc = SQLITE_OK;
}
}
@@ -53354,7 +49259,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
rc = SQLITE_CORRUPT_BKPT;
}else{
- rc = walCheckpoint(pWal, eMode2, xBusy2, pBusyArg, sync_flags, zBuf);
+ rc = walCheckpoint(pWal, eMode2, xBusy, pBusyArg, sync_flags, zBuf);
}
/* If no error occurred, set the output variables. */
@@ -53512,7 +49417,7 @@ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file implements an external (disk-based) database using BTrees.
+** This file implements a external (disk-based) database using BTrees.
** For a detailed discussion of BTrees, refer to
**
** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3:
@@ -53638,7 +49543,7 @@ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){
**
** The flags define the format of this btree page. The leaf flag means that
** this page has no children. The zerodata flag means that this page carries
-** only keys and no data. The intkey flag means that the key is an integer
+** only keys and no data. The intkey flag means that the key is a integer
** which is stored in the key size entry of the cell header rather than in
** the payload area.
**
@@ -53716,7 +49621,6 @@ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){
** 4 Number of leaf pointers on this page
** * zero or more pages numbers of leaves
*/
-/* #include "sqliteInt.h" */
/* The following value is the maximum cell size assuming a maximum page
@@ -53734,7 +49638,6 @@ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){
/* Forward declarations */
typedef struct MemPage MemPage;
typedef struct BtLock BtLock;
-typedef struct CellInfo CellInfo;
/*
** This is a magic string that appears at the beginning of every
@@ -53777,14 +49680,12 @@ typedef struct CellInfo CellInfo;
struct MemPage {
u8 isInit; /* True if previously initialized. MUST BE FIRST! */
u8 nOverflow; /* Number of overflow cell bodies in aCell[] */
- u8 intKey; /* True if table b-trees. False for index b-trees */
- u8 intKeyLeaf; /* True if the leaf of an intKey table */
- u8 noPayload; /* True if internal intKey page (thus w/o data) */
- u8 leaf; /* True if a leaf page */
+ u8 intKey; /* True if intkey flag is set */
+ u8 leaf; /* True if leaf flag is set */
+ u8 hasData; /* True if this page stores data */
u8 hdrOffset; /* 100 for page 1. 0 otherwise */
u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */
u8 max1bytePayload; /* min(maxLocal,127) */
- u8 bBusy; /* Prevent endless loops on corrupt database files */
u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */
u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */
u16 cellOffset; /* Index in aData of first cell pointer */
@@ -53798,10 +49699,7 @@ struct MemPage {
u8 *aData; /* Pointer to disk image of the page data */
u8 *aDataEnd; /* One byte past the end of usable data */
u8 *aCellIdx; /* The cell index area */
- u8 *aDataOfst; /* Same as aData for leaves. aData+4 for interior */
DbPage *pDbPage; /* Pager page handle */
- u16 (*xCellSize)(MemPage*,u8*); /* cellSizePtr method */
- void (*xParseCell)(MemPage*,u8*,CellInfo*); /* btreeParseCell method */
Pgno pgno; /* Page number for this page */
};
@@ -53857,10 +49755,8 @@ struct Btree {
u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */
u8 sharable; /* True if we can share pBt with another db */
u8 locked; /* True if db currently has pBt locked */
- u8 hasIncrblobCur; /* True if there are one or more Incrblob cursors */
int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */
int nBackup; /* Number of backup operations reading this btree */
- u32 iDataVersion; /* Combines with pBt->pPager->iDataVersion */
Btree *pNext; /* List of other sharable Btrees from the same db */
Btree *pPrev; /* Back pointer of the same list */
#ifndef SQLITE_OMIT_SHARED_CACHE
@@ -53927,9 +49823,6 @@ struct BtShared {
#endif
u8 inTransaction; /* Transaction state */
u8 max1bytePayload; /* Maximum first byte of cell for a 1-byte payload */
-#ifdef SQLITE_HAS_CODEC
- u8 optimalReserve; /* Desired amount of reserved space per page */
-#endif
u16 btsFlags; /* Boolean parameters. See BTS_* macros below */
u16 maxLocal; /* Maximum local payload in non-LEAFDATA tables */
u16 minLocal; /* Minimum local payload in non-LEAFDATA tables */
@@ -53949,7 +49842,7 @@ struct BtShared {
BtLock *pLock; /* List of locks held on this shared-btree struct */
Btree *pWriter; /* Btree with currently open write transaction */
#endif
- u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */
+ u8 *pTmpSpace; /* BtShared.pageSize bytes of space for tmp use */
};
/*
@@ -53968,11 +49861,14 @@ struct BtShared {
** about a cell. The parseCellPtr() function fills in this structure
** based on information extract from the raw disk page.
*/
+typedef struct CellInfo CellInfo;
struct CellInfo {
- i64 nKey; /* The key for INTKEY tables, or nPayload otherwise */
- u8 *pPayload; /* Pointer to the start of payload */
- u32 nPayload; /* Bytes of payload */
- u16 nLocal; /* Amount of payload held locally, not on overflow */
+ i64 nKey; /* The key for INTKEY tables, or number of bytes in key */
+ u8 *pCell; /* Pointer to the start of cell content */
+ u32 nData; /* Number of bytes of data */
+ u32 nPayload; /* Total amount of payload */
+ u16 nHeader; /* Size of the cell content header in bytes */
+ u16 nLocal; /* Amount of payload held locally */
u16 iOverflow; /* Offset to overflow page number. Zero if no overflow */
u16 nSize; /* Size of the cell content on the main b-tree page */
};
@@ -54001,50 +49897,35 @@ struct CellInfo {
**
** Fields in this structure are accessed under the BtShared.mutex
** found at self->pBt->mutex.
-**
-** skipNext meaning:
-** eState==SKIPNEXT && skipNext>0: Next sqlite3BtreeNext() is no-op.
-** eState==SKIPNEXT && skipNext<0: Next sqlite3BtreePrevious() is no-op.
-** eState==FAULT: Cursor fault with skipNext as error code.
*/
struct BtCursor {
Btree *pBtree; /* The Btree to which this cursor belongs */
BtShared *pBt; /* The BtShared this cursor points to */
- BtCursor *pNext; /* Forms a linked list of all cursors */
+ BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */
+ struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */
+#ifndef SQLITE_OMIT_INCRBLOB
Pgno *aOverflow; /* Cache of overflow page locations */
- CellInfo info; /* A parse of the cell we are pointing at */
- i64 nKey; /* Size of pKey, or last integer key */
- void *pKey; /* Saved key that was cursor last known position */
+#endif
Pgno pgnoRoot; /* The root page of this tree */
- int nOvflAlloc; /* Allocated size of aOverflow[] array */
- int skipNext; /* Prev() is noop if negative. Next() is noop if positive.
- ** Error code if eState==CURSOR_FAULT */
- u8 curFlags; /* zero or more BTCF_* flags defined below */
- u8 curPagerFlags; /* Flags to send to sqlite3PagerAcquire() */
+ sqlite3_int64 cachedRowid; /* Next rowid cache. 0 means not valid */
+ CellInfo info; /* A parse of the cell we are pointing at */
+ i64 nKey; /* Size of pKey, or last integer key */
+ void *pKey; /* Saved key that was cursor's last known position */
+ int skipNext; /* Prev() is noop if negative. Next() is noop if positive */
+ u8 wrFlag; /* True if writable */
+ u8 atLast; /* Cursor pointing to the last entry */
+ u8 validNKey; /* True if info.nKey is valid */
u8 eState; /* One of the CURSOR_XXX constants (see below) */
- u8 hints; /* As configured by CursorSetHints() */
- /* All fields above are zeroed when the cursor is allocated. See
- ** sqlite3BtreeCursorZero(). Fields that follow must be manually
- ** initialized. */
- i8 iPage; /* Index of current page in apPage */
- u8 curIntKey; /* Value of apPage[0]->intKey */
- struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */
- void *padding1; /* Make object size a multiple of 16 */
+#ifndef SQLITE_OMIT_INCRBLOB
+ u8 isIncrblobHandle; /* True if this cursor is an incr. io handle */
+#endif
+ u8 hints; /* As configured by CursorSetHints() */
+ i16 iPage; /* Index of current page in apPage */
u16 aiIdx[BTCURSOR_MAX_DEPTH]; /* Current index in apPage[i] */
MemPage *apPage[BTCURSOR_MAX_DEPTH]; /* Pages from root to current page */
};
/*
-** Legal values for BtCursor.curFlags
-*/
-#define BTCF_WriteFlag 0x01 /* True if a write cursor */
-#define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */
-#define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */
-#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */
-#define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */
-#define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */
-
-/*
** Potential values for BtCursor.eState.
**
** CURSOR_INVALID:
@@ -54068,11 +49949,11 @@ struct BtCursor {
** seek the cursor to the saved position.
**
** CURSOR_FAULT:
-** An unrecoverable error (an I/O error or a malloc failure) has occurred
+** A unrecoverable error (an I/O error or a malloc failure) has occurred
** on a different connection that shares the BtShared cache with this
** cursor. The error has left the cache in an inconsistent state.
** Do nothing else with this cursor. Any attempt to use the cursor
-** should return the error code stored in BtCursor.skipNext
+** should return the error code stored in BtCursor.skip
*/
#define CURSOR_INVALID 0
#define CURSOR_VALID 1
@@ -54182,10 +50063,7 @@ struct IntegrityCk {
int mxErr; /* Stop accumulating errors when this reaches zero */
int nErr; /* Number of messages written to zErrMsg so far */
int mallocFailed; /* A memory allocation error has occurred */
- const char *zPfx; /* Error message prefix */
- int v1, v2; /* Values for up to two %d fields in zPfx */
StrAccum errMsg; /* Accumulate the error message text here */
- u32 *heap; /* Min-heap used for analyzing cell coverage */
};
/*
@@ -54196,23 +50074,6 @@ struct IntegrityCk {
#define get4byte sqlite3Get4byte
#define put4byte sqlite3Put4byte
-/*
-** get2byteAligned(), unlike get2byte(), requires that its argument point to a
-** two-byte aligned address. get2bytea() is only used for accessing the
-** cell addresses in a btree header.
-*/
-#if SQLITE_BYTEORDER==4321
-# define get2byteAligned(x) (*(u16*)(x))
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && GCC_VERSION>=4008000
-# define get2byteAligned(x) __builtin_bswap16(*(u16*)(x))
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(_MSC_VER) && _MSC_VER>=1300
-# define get2byteAligned(x) _byteswap_ushort(*(u16*)(x))
-#else
-# define get2byteAligned(x) ((x)[0]<<8 | (x)[1])
-#endif
-
/************** End of btreeInt.h ********************************************/
/************** Continuing where we left off in btmutex.c ********************/
#ifndef SQLITE_OMIT_SHARED_CACHE
@@ -54237,7 +50098,7 @@ static void lockBtreeMutex(Btree *p){
** Release the BtShared mutex associated with B-Tree handle p and
** clear the p->locked boolean.
*/
-static void SQLITE_NOINLINE unlockBtreeMutex(Btree *p){
+static void unlockBtreeMutex(Btree *p){
BtShared *pBt = p->pBt;
assert( p->locked==1 );
assert( sqlite3_mutex_held(pBt->mutex) );
@@ -54248,9 +50109,6 @@ static void SQLITE_NOINLINE unlockBtreeMutex(Btree *p){
p->locked = 0;
}
-/* Forward reference */
-static void SQLITE_NOINLINE btreeLockCarefully(Btree *p);
-
/*
** Enter a mutex on the given BTree object.
**
@@ -54268,6 +50126,8 @@ static void SQLITE_NOINLINE btreeLockCarefully(Btree *p);
** subsequent Btrees that desire a lock.
*/
SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){
+ Btree *pLater;
+
/* Some basic sanity checking on the Btree. The list of Btrees
** connected by pNext and pPrev should be in sorted order by
** Btree.pBt value. All elements of the list should belong to
@@ -54292,20 +50152,9 @@ SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){
if( !p->sharable ) return;
p->wantToLock++;
if( p->locked ) return;
- btreeLockCarefully(p);
-}
-
-/* This is a helper function for sqlite3BtreeLock(). By moving
-** complex, but seldom used logic, out of sqlite3BtreeLock() and
-** into this routine, we avoid unnecessary stack pointer changes
-** and thus help the sqlite3BtreeLock() routine to run much faster
-** in the common case.
-*/
-static void SQLITE_NOINLINE btreeLockCarefully(Btree *p){
- Btree *pLater;
/* In most cases, we should be able to acquire the lock we
- ** want without having to go through the ascending lock
+ ** want without having to go throught the ascending lock
** procedure that follows. Just be sure not to block.
*/
if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){
@@ -54335,12 +50184,10 @@ static void SQLITE_NOINLINE btreeLockCarefully(Btree *p){
}
}
-
/*
** Exit the recursive mutex on a Btree.
*/
SQLITE_PRIVATE void sqlite3BtreeLeave(Btree *p){
- assert( sqlite3_mutex_held(p->db->mutex) );
if( p->sharable ){
assert( p->wantToLock>0 );
p->wantToLock--;
@@ -54512,11 +50359,10 @@ SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file implements an external (disk-based) database using BTrees.
+** This file implements a external (disk-based) database using BTrees.
** See the header comment on "btreeInt.h" for additional information.
** Including a description of file format and an overview of operation.
*/
-/* #include "btreeInt.h" */
/*
** The header string that appears at the beginning of every
@@ -54589,7 +50435,7 @@ static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
** The shared cache setting effects only future calls to
** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int enable){
+SQLITE_API int sqlite3_enable_shared_cache(int enable){
sqlite3GlobalConfig.sharedCacheEnabled = enable;
return SQLITE_OK;
}
@@ -54665,7 +50511,7 @@ static int hasSharedCacheTableLock(
** the correct locks are held. So do not bother - just return true.
** This case does not come up very often anyhow.
*/
- if( isIndex && (!pSchema || (pSchema->schemaFlags&DB_SchemaLoaded)==0) ){
+ if( isIndex && (!pSchema || (pSchema->flags&DB_SchemaLoaded)==0) ){
return 1;
}
@@ -54678,12 +50524,6 @@ static int hasSharedCacheTableLock(
for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){
Index *pIdx = (Index *)sqliteHashData(p);
if( pIdx->tnum==(int)iRoot ){
- if( iTab ){
- /* Two or more indexes share the same root page. There must
- ** be imposter tables. So just return true. The assert is not
- ** useful in that case. */
- return 1;
- }
iTab = pIdx->pTable->tnum;
}
}
@@ -54955,11 +50795,16 @@ static int cursorHoldsMutex(BtCursor *p){
}
#endif
+
+#ifndef SQLITE_OMIT_INCRBLOB
/*
-** Invalidate the overflow cache of the cursor passed as the first argument.
-** on the shared btree structure pBt.
+** Invalidate the overflow page-list cache for cursor pCur, if any.
*/
-#define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl)
+static void invalidateOverflowCache(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ sqlite3_free(pCur->aOverflow);
+ pCur->aOverflow = 0;
+}
/*
** Invalidate the overflow page-list cache for all cursors opened
@@ -54973,7 +50818,6 @@ static void invalidateAllOverflowCache(BtShared *pBt){
}
}
-#ifndef SQLITE_OMIT_INCRBLOB
/*
** This function is called before modifying the contents of a table
** to invalidate any incrblob cursors that are open on the
@@ -54993,21 +50837,19 @@ static void invalidateIncrblobCursors(
int isClearTable /* True if all rows are being deleted */
){
BtCursor *p;
- if( pBtree->hasIncrblobCur==0 ) return;
+ BtShared *pBt = pBtree->pBt;
assert( sqlite3BtreeHoldsMutex(pBtree) );
- pBtree->hasIncrblobCur = 0;
- for(p=pBtree->pBt->pCursor; p; p=p->pNext){
- if( (p->curFlags & BTCF_Incrblob)!=0 ){
- pBtree->hasIncrblobCur = 1;
- if( isClearTable || p->info.nKey==iRow ){
- p->eState = CURSOR_INVALID;
- }
+ for(p=pBt->pCursor; p; p=p->pNext){
+ if( p->isIncrblobHandle && (isClearTable || p->info.nKey==iRow) ){
+ p->eState = CURSOR_INVALID;
}
}
}
#else
- /* Stub function when INCRBLOB is omitted */
+ /* Stub functions when INCRBLOB is omitted */
+ #define invalidateOverflowCache(x)
+ #define invalidateAllOverflowCache(x)
#define invalidateIncrblobCursors(x,y,z)
#endif /* SQLITE_OMIT_INCRBLOB */
@@ -55094,21 +50936,17 @@ static void btreeReleaseAllCursorPages(BtCursor *pCur){
pCur->iPage = -1;
}
+
/*
-** The cursor passed as the only argument must point to a valid entry
-** when this function is called (i.e. have eState==CURSOR_VALID). This
-** function saves the current cursor key in variables pCur->nKey and
-** pCur->pKey. SQLITE_OK is returned if successful or an SQLite error
-** code otherwise.
+** Save the current cursor position in the variables BtCursor.nKey
+** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
**
-** If the cursor is open on an intkey table, then the integer key
-** (the rowid) is stored in pCur->nKey and pCur->pKey is left set to
-** NULL. If the cursor is open on a non-intkey table, then pCur->pKey is
-** set to point to a malloced buffer pCur->nKey bytes in size containing
-** the key.
+** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
+** prior to calling this routine.
*/
-static int saveCursorKey(BtCursor *pCur){
+static int saveCursorPosition(BtCursor *pCur){
int rc;
+
assert( CURSOR_VALID==pCur->eState );
assert( 0==pCur->pKey );
assert( cursorHoldsMutex(pCur) );
@@ -55120,9 +50958,10 @@ static int saveCursorKey(BtCursor *pCur){
** stores the integer key in pCur->nKey. In this case this value is
** all that is required. Otherwise, if pCur is not open on an intKey
** table, then malloc space for and store the pCur->nKey bytes of key
- ** data. */
- if( 0==pCur->curIntKey ){
- void *pKey = sqlite3Malloc( pCur->nKey );
+ ** data.
+ */
+ if( 0==pCur->apPage[0]->intKey ){
+ void *pKey = sqlite3Malloc( (int)pCur->nKey );
if( pKey ){
rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey);
if( rc==SQLITE_OK ){
@@ -55134,89 +50973,29 @@ static int saveCursorKey(BtCursor *pCur){
rc = SQLITE_NOMEM;
}
}
- assert( !pCur->curIntKey || !pCur->pKey );
- return rc;
-}
-
-/*
-** Save the current cursor position in the variables BtCursor.nKey
-** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
-**
-** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
-** prior to calling this routine.
-*/
-static int saveCursorPosition(BtCursor *pCur){
- int rc;
-
- assert( CURSOR_VALID==pCur->eState || CURSOR_SKIPNEXT==pCur->eState );
- assert( 0==pCur->pKey );
- assert( cursorHoldsMutex(pCur) );
-
- if( pCur->eState==CURSOR_SKIPNEXT ){
- pCur->eState = CURSOR_VALID;
- }else{
- pCur->skipNext = 0;
- }
+ assert( !pCur->apPage[0]->intKey || !pCur->pKey );
- rc = saveCursorKey(pCur);
if( rc==SQLITE_OK ){
btreeReleaseAllCursorPages(pCur);
pCur->eState = CURSOR_REQUIRESEEK;
}
- pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl|BTCF_AtLast);
+ invalidateOverflowCache(pCur);
return rc;
}
-/* Forward reference */
-static int SQLITE_NOINLINE saveCursorsOnList(BtCursor*,Pgno,BtCursor*);
-
/*
** Save the positions of all cursors (except pExcept) that are open on
-** the table with root-page iRoot. "Saving the cursor position" means that
-** the location in the btree is remembered in such a way that it can be
-** moved back to the same spot after the btree has been modified. This
-** routine is called just before cursor pExcept is used to modify the
-** table, for example in BtreeDelete() or BtreeInsert().
-**
-** If there are two or more cursors on the same btree, then all such
-** cursors should have their BTCF_Multiple flag set. The btreeCursor()
-** routine enforces that rule. This routine only needs to be called in
-** the uncommon case when pExpect has the BTCF_Multiple flag set.
-**
-** If pExpect!=NULL and if no other cursors are found on the same root-page,
-** then the BTCF_Multiple flag on pExpect is cleared, to avoid another
-** pointless call to this routine.
-**
-** Implementation note: This routine merely checks to see if any cursors
-** need to be saved. It calls out to saveCursorsOnList() in the (unusual)
-** event that cursors are in need to being saved.
+** the table with root-page iRoot. Usually, this is called just before cursor
+** pExcept is used to modify the table (BtreeDelete() or BtreeInsert()).
*/
static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){
BtCursor *p;
assert( sqlite3_mutex_held(pBt->mutex) );
assert( pExcept==0 || pExcept->pBt==pBt );
for(p=pBt->pCursor; p; p=p->pNext){
- if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ) break;
- }
- if( p ) return saveCursorsOnList(p, iRoot, pExcept);
- if( pExcept ) pExcept->curFlags &= ~BTCF_Multiple;
- return SQLITE_OK;
-}
-
-/* This helper routine to saveAllCursors does the actual work of saving
-** the cursors if and when a cursor is found that actually requires saving.
-** The common case is that no cursors need to be saved, so this routine is
-** broken out from its caller to avoid unnecessary stack pointer movement.
-*/
-static int SQLITE_NOINLINE saveCursorsOnList(
- BtCursor *p, /* The first cursor that needs saving */
- Pgno iRoot, /* Only save cursor with this iRoot. Save all if zero */
- BtCursor *pExcept /* Do not save this cursor */
-){
- do{
if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){
- if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){
+ if( p->eState==CURSOR_VALID ){
int rc = saveCursorPosition(p);
if( SQLITE_OK!=rc ){
return rc;
@@ -55226,8 +51005,7 @@ static int SQLITE_NOINLINE saveCursorsOnList(
btreeReleaseAllCursorPages(p);
}
}
- p = p->pNext;
- }while( p );
+ }
return SQLITE_OK;
}
@@ -55288,19 +51066,17 @@ static int btreeMoveto(
*/
static int btreeRestoreCursorPosition(BtCursor *pCur){
int rc;
- int skipNext;
assert( cursorHoldsMutex(pCur) );
assert( pCur->eState>=CURSOR_REQUIRESEEK );
if( pCur->eState==CURSOR_FAULT ){
return pCur->skipNext;
}
pCur->eState = CURSOR_INVALID;
- rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &skipNext);
+ rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &pCur->skipNext);
if( rc==SQLITE_OK ){
sqlite3_free(pCur->pKey);
pCur->pKey = 0;
assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID );
- pCur->skipNext |= skipNext;
if( pCur->skipNext && pCur->eState==CURSOR_VALID ){
pCur->eState = CURSOR_SKIPNEXT;
}
@@ -55314,49 +51090,25 @@ static int btreeRestoreCursorPosition(BtCursor *pCur){
SQLITE_OK)
/*
-** Determine whether or not a cursor has moved from the position where
-** it was last placed, or has been invalidated for any other reason.
-** Cursors can move when the row they are pointing at is deleted out
-** from under them, for example. Cursor might also move if a btree
-** is rebalanced.
-**
-** Calling this routine with a NULL cursor pointer returns false.
-**
-** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor
-** back to where it ought to be if this routine returns true.
-*/
-SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur){
- return pCur->eState!=CURSOR_VALID;
-}
-
-/*
-** This routine restores a cursor back to its original position after it
-** has been moved by some outside activity (such as a btree rebalance or
-** a row having been deleted out from under the cursor).
-**
-** On success, the *pDifferentRow parameter is false if the cursor is left
-** pointing at exactly the same row. *pDifferntRow is the row the cursor
-** was pointing to has been deleted, forcing the cursor to point to some
-** nearby row.
+** Determine whether or not a cursor has moved from the position it
+** was last placed at. Cursors can move when the row they are pointing
+** at is deleted out from under them.
**
-** This routine should only be called for a cursor that just returned
-** TRUE from sqlite3BtreeCursorHasMoved().
+** This routine returns an error code if something goes wrong. The
+** integer *pHasMoved is set to one if the cursor has moved and 0 if not.
*/
-SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow){
+SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur, int *pHasMoved){
int rc;
- assert( pCur!=0 );
- assert( pCur->eState!=CURSOR_VALID );
rc = restoreCursorPosition(pCur);
if( rc ){
- *pDifferentRow = 1;
+ *pHasMoved = 1;
return rc;
}
- if( pCur->eState!=CURSOR_VALID ){
- *pDifferentRow = 1;
+ if( pCur->eState!=CURSOR_VALID || NEVER(pCur->skipNext!=0) ){
+ *pHasMoved = 1;
}else{
- assert( pCur->skipNext==0 );
- *pDifferentRow = 0;
+ *pHasMoved = 0;
}
return SQLITE_OK;
}
@@ -55489,222 +51241,128 @@ static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){
** the page, 1 means the second cell, and so forth) return a pointer
** to the cell content.
**
-** findCellPastPtr() does the same except it skips past the initial
-** 4-byte child pointer found on interior pages, if there is one.
-**
** This routine works only for pages that do not contain overflow cells.
*/
#define findCell(P,I) \
- ((P)->aData + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)])))
-#define findCellPastPtr(P,I) \
- ((P)->aDataOfst + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)])))
+ ((P)->aData + ((P)->maskPage & get2byte(&(P)->aCellIdx[2*(I)])))
+#define findCellv2(D,M,O,I) (D+(M&get2byte(D+(O+2*(I)))))
/*
-** This is common tail processing for btreeParseCellPtr() and
-** btreeParseCellPtrIndex() for the case when the cell does not fit entirely
-** on a single B-tree page. Make necessary adjustments to the CellInfo
-** structure.
+** This a more complex version of findCell() that works for
+** pages that do contain overflow cells.
*/
-static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow(
- MemPage *pPage, /* Page containing the cell */
- u8 *pCell, /* Pointer to the cell text. */
- CellInfo *pInfo /* Fill in this structure */
-){
- /* If the payload will not fit completely on the local page, we have
- ** to decide how much to store locally and how much to spill onto
- ** overflow pages. The strategy is to minimize the amount of unused
- ** space on overflow pages while keeping the amount of local storage
- ** in between minLocal and maxLocal.
- **
- ** Warning: changing the way overflow payload is distributed in any
- ** way will result in an incompatible file format.
- */
- int minLocal; /* Minimum amount of payload held locally */
- int maxLocal; /* Maximum amount of payload held locally */
- int surplus; /* Overflow payload available for local storage */
-
- minLocal = pPage->minLocal;
- maxLocal = pPage->maxLocal;
- surplus = minLocal + (pInfo->nPayload - minLocal)%(pPage->pBt->usableSize-4);
- testcase( surplus==maxLocal );
- testcase( surplus==maxLocal+1 );
- if( surplus <= maxLocal ){
- pInfo->nLocal = (u16)surplus;
- }else{
- pInfo->nLocal = (u16)minLocal;
+static u8 *findOverflowCell(MemPage *pPage, int iCell){
+ int i;
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ for(i=pPage->nOverflow-1; i>=0; i--){
+ int k;
+ k = pPage->aiOvfl[i];
+ if( k<=iCell ){
+ if( k==iCell ){
+ return pPage->apOvfl[i];
+ }
+ iCell--;
+ }
}
- pInfo->iOverflow = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell);
- pInfo->nSize = pInfo->iOverflow + 4;
+ return findCell(pPage, iCell);
}
/*
-** The following routines are implementations of the MemPage.xParseCell()
-** method.
-**
-** Parse a cell content block and fill in the CellInfo structure.
+** Parse a cell content block and fill in the CellInfo structure. There
+** are two versions of this function. btreeParseCell() takes a
+** cell index as the second argument and btreeParseCellPtr()
+** takes a pointer to the body of the cell as its second argument.
**
-** btreeParseCellPtr() => table btree leaf nodes
-** btreeParseCellNoPayload() => table btree internal nodes
-** btreeParseCellPtrIndex() => index btree nodes
-**
-** There is also a wrapper function btreeParseCell() that works for
-** all MemPage types and that references the cell by index rather than
-** by pointer.
+** Within this file, the parseCell() macro can be called instead of
+** btreeParseCellPtr(). Using some compilers, this will be faster.
*/
-static void btreeParseCellPtrNoPayload(
- MemPage *pPage, /* Page containing the cell */
- u8 *pCell, /* Pointer to the cell text. */
- CellInfo *pInfo /* Fill in this structure */
-){
- assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- assert( pPage->leaf==0 );
- assert( pPage->noPayload );
- assert( pPage->childPtrSize==4 );
-#ifndef SQLITE_DEBUG
- UNUSED_PARAMETER(pPage);
-#endif
- pInfo->nSize = 4 + getVarint(&pCell[4], (u64*)&pInfo->nKey);
- pInfo->nPayload = 0;
- pInfo->nLocal = 0;
- pInfo->iOverflow = 0;
- pInfo->pPayload = 0;
- return;
-}
static void btreeParseCellPtr(
MemPage *pPage, /* Page containing the cell */
u8 *pCell, /* Pointer to the cell text. */
CellInfo *pInfo /* Fill in this structure */
){
- u8 *pIter; /* For scanning through pCell */
+ u16 n; /* Number bytes in cell content header */
u32 nPayload; /* Number of bytes of cell payload */
- u64 iKey; /* Extracted Key value */
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- assert( pPage->leaf==0 || pPage->leaf==1 );
- assert( pPage->intKeyLeaf || pPage->noPayload );
- assert( pPage->noPayload==0 );
- assert( pPage->intKeyLeaf );
- assert( pPage->childPtrSize==0 );
- pIter = pCell;
-
- /* The next block of code is equivalent to:
- **
- ** pIter += getVarint32(pIter, nPayload);
- **
- ** The code is inlined to avoid a function call.
- */
- nPayload = *pIter;
- if( nPayload>=0x80 ){
- u8 *pEnd = &pIter[8];
- nPayload &= 0x7f;
- do{
- nPayload = (nPayload<<7) | (*++pIter & 0x7f);
- }while( (*pIter)>=0x80 && pIter<pEnd );
- }
- pIter++;
- /* The next block of code is equivalent to:
- **
- ** pIter += getVarint(pIter, (u64*)&pInfo->nKey);
- **
- ** The code is inlined to avoid a function call.
- */
- iKey = *pIter;
- if( iKey>=0x80 ){
- u8 *pEnd = &pIter[7];
- iKey &= 0x7f;
- while(1){
- iKey = (iKey<<7) | (*++pIter & 0x7f);
- if( (*pIter)<0x80 ) break;
- if( pIter>=pEnd ){
- iKey = (iKey<<8) | *++pIter;
- break;
- }
+ pInfo->pCell = pCell;
+ assert( pPage->leaf==0 || pPage->leaf==1 );
+ n = pPage->childPtrSize;
+ assert( n==4-4*pPage->leaf );
+ if( pPage->intKey ){
+ if( pPage->hasData ){
+ assert( n==0 );
+ n = getVarint32(pCell, nPayload);
+ }else{
+ nPayload = 0;
}
- }
- pIter++;
-
- pInfo->nKey = *(i64*)&iKey;
- pInfo->nPayload = nPayload;
- pInfo->pPayload = pIter;
- testcase( nPayload==pPage->maxLocal );
- testcase( nPayload==pPage->maxLocal+1 );
- if( nPayload<=pPage->maxLocal ){
- /* This is the (easy) common case where the entire payload fits
- ** on the local page. No overflow is required.
- */
- pInfo->nSize = nPayload + (u16)(pIter - pCell);
- if( pInfo->nSize<4 ) pInfo->nSize = 4;
- pInfo->nLocal = (u16)nPayload;
- pInfo->iOverflow = 0;
+ n += getVarint(&pCell[n], (u64*)&pInfo->nKey);
+ pInfo->nData = nPayload;
}else{
- btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo);
+ pInfo->nData = 0;
+ n += getVarint32(&pCell[n], nPayload);
+ pInfo->nKey = nPayload;
}
-}
-static void btreeParseCellPtrIndex(
- MemPage *pPage, /* Page containing the cell */
- u8 *pCell, /* Pointer to the cell text. */
- CellInfo *pInfo /* Fill in this structure */
-){
- u8 *pIter; /* For scanning through pCell */
- u32 nPayload; /* Number of bytes of cell payload */
-
- assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- assert( pPage->leaf==0 || pPage->leaf==1 );
- assert( pPage->intKeyLeaf==0 );
- assert( pPage->noPayload==0 );
- pIter = pCell + pPage->childPtrSize;
- nPayload = *pIter;
- if( nPayload>=0x80 ){
- u8 *pEnd = &pIter[8];
- nPayload &= 0x7f;
- do{
- nPayload = (nPayload<<7) | (*++pIter & 0x7f);
- }while( *(pIter)>=0x80 && pIter<pEnd );
- }
- pIter++;
- pInfo->nKey = nPayload;
pInfo->nPayload = nPayload;
- pInfo->pPayload = pIter;
+ pInfo->nHeader = n;
testcase( nPayload==pPage->maxLocal );
testcase( nPayload==pPage->maxLocal+1 );
- if( nPayload<=pPage->maxLocal ){
+ if( likely(nPayload<=pPage->maxLocal) ){
/* This is the (easy) common case where the entire payload fits
** on the local page. No overflow is required.
*/
- pInfo->nSize = nPayload + (u16)(pIter - pCell);
- if( pInfo->nSize<4 ) pInfo->nSize = 4;
+ if( (pInfo->nSize = (u16)(n+nPayload))<4 ) pInfo->nSize = 4;
pInfo->nLocal = (u16)nPayload;
pInfo->iOverflow = 0;
}else{
- btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo);
+ /* If the payload will not fit completely on the local page, we have
+ ** to decide how much to store locally and how much to spill onto
+ ** overflow pages. The strategy is to minimize the amount of unused
+ ** space on overflow pages while keeping the amount of local storage
+ ** in between minLocal and maxLocal.
+ **
+ ** Warning: changing the way overflow payload is distributed in any
+ ** way will result in an incompatible file format.
+ */
+ int minLocal; /* Minimum amount of payload held locally */
+ int maxLocal; /* Maximum amount of payload held locally */
+ int surplus; /* Overflow payload available for local storage */
+
+ minLocal = pPage->minLocal;
+ maxLocal = pPage->maxLocal;
+ surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize - 4);
+ testcase( surplus==maxLocal );
+ testcase( surplus==maxLocal+1 );
+ if( surplus <= maxLocal ){
+ pInfo->nLocal = (u16)surplus;
+ }else{
+ pInfo->nLocal = (u16)minLocal;
+ }
+ pInfo->iOverflow = (u16)(pInfo->nLocal + n);
+ pInfo->nSize = pInfo->iOverflow + 4;
}
}
+#define parseCell(pPage, iCell, pInfo) \
+ btreeParseCellPtr((pPage), findCell((pPage), (iCell)), (pInfo))
static void btreeParseCell(
MemPage *pPage, /* Page containing the cell */
int iCell, /* The cell index. First cell is 0 */
CellInfo *pInfo /* Fill in this structure */
){
- pPage->xParseCell(pPage, findCell(pPage, iCell), pInfo);
+ parseCell(pPage, iCell, pInfo);
}
/*
-** The following routines are implementations of the MemPage.xCellSize
-** method.
-**
** Compute the total number of bytes that a Cell needs in the cell
** data area of the btree-page. The return number includes the cell
** data header and the local payload, but not any overflow page or
** the space used by the cell pointer.
-**
-** cellSizePtrNoPayload() => table internal nodes
-** cellSizePtr() => all index nodes & table leaf nodes
*/
static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
- u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */
- u8 *pEnd; /* End mark for a varint */
- u32 nSize; /* Size value to return */
+ u8 *pIter = &pCell[pPage->childPtrSize];
+ u32 nSize;
#ifdef SQLITE_DEBUG
/* The value returned by this function should always be the same as
@@ -55712,32 +51370,29 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
** this function verifies that this invariant is not violated. */
CellInfo debuginfo;
- pPage->xParseCell(pPage, pCell, &debuginfo);
+ btreeParseCellPtr(pPage, pCell, &debuginfo);
#endif
- assert( pPage->noPayload==0 );
- nSize = *pIter;
- if( nSize>=0x80 ){
- pEnd = &pIter[8];
- nSize &= 0x7f;
- do{
- nSize = (nSize<<7) | (*++pIter & 0x7f);
- }while( *(pIter)>=0x80 && pIter<pEnd );
- }
- pIter++;
if( pPage->intKey ){
+ u8 *pEnd;
+ if( pPage->hasData ){
+ pIter += getVarint32(pIter, nSize);
+ }else{
+ nSize = 0;
+ }
+
/* pIter now points at the 64-bit integer key value, a variable length
** integer. The following block moves pIter to point at the first byte
** past the end of the key value. */
pEnd = &pIter[9];
while( (*pIter++)&0x80 && pIter<pEnd );
+ }else{
+ pIter += getVarint32(pIter, nSize);
}
+
testcase( nSize==pPage->maxLocal );
testcase( nSize==pPage->maxLocal+1 );
- if( nSize<=pPage->maxLocal ){
- nSize += (u32)(pIter - pCell);
- if( nSize<4 ) nSize = 4;
- }else{
+ if( nSize>pPage->maxLocal ){
int minLocal = pPage->minLocal;
nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4);
testcase( nSize==pPage->maxLocal );
@@ -55745,39 +51400,24 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
if( nSize>pPage->maxLocal ){
nSize = minLocal;
}
- nSize += 4 + (u16)(pIter - pCell);
+ nSize += 4;
}
- assert( nSize==debuginfo.nSize || CORRUPT_DB );
- return (u16)nSize;
-}
-static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){
- u8 *pIter = pCell + 4; /* For looping over bytes of pCell */
- u8 *pEnd; /* End mark for a varint */
+ nSize += (u32)(pIter - pCell);
-#ifdef SQLITE_DEBUG
- /* The value returned by this function should always be the same as
- ** the (CellInfo.nSize) value found by doing a full parse of the
- ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
- ** this function verifies that this invariant is not violated. */
- CellInfo debuginfo;
- pPage->xParseCell(pPage, pCell, &debuginfo);
-#else
- UNUSED_PARAMETER(pPage);
-#endif
+ /* The minimum size of any cell is 4 bytes. */
+ if( nSize<4 ){
+ nSize = 4;
+ }
- assert( pPage->childPtrSize==4 );
- pEnd = pIter + 9;
- while( (*pIter++)&0x80 && pIter<pEnd );
- assert( debuginfo.nSize==(u16)(pIter - pCell) || CORRUPT_DB );
- return (u16)(pIter - pCell);
+ assert( nSize==debuginfo.nSize );
+ return (u16)nSize;
}
-
#ifdef SQLITE_DEBUG
/* This variation on cellSizePtr() is used inside of assert() statements
** only. */
static u16 cellSize(MemPage *pPage, int iCell){
- return pPage->xCellSize(pPage, findCell(pPage, iCell));
+ return cellSizePtr(pPage, findCell(pPage, iCell));
}
#endif
@@ -55791,7 +51431,8 @@ static void ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell, int *pRC){
CellInfo info;
if( *pRC ) return;
assert( pCell!=0 );
- pPage->xParseCell(pPage, pCell, &info);
+ btreeParseCellPtr(pPage, pCell, &info);
+ assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload );
if( info.iOverflow ){
Pgno ovfl = get4byte(&pCell[info.iOverflow]);
ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC);
@@ -55805,15 +51446,10 @@ static void ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell, int *pRC){
** end of the page and all free space is collected into one
** big FreeBlk that occurs in between the header and cell
** pointer array and the cell content area.
-**
-** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a
-** b-tree page so that there are no freeblocks or fragment bytes, all
-** unused bytes are contained in the unallocated space region, and all
-** cells are packed tightly at the end of the page.
*/
static int defragmentPage(MemPage *pPage){
int i; /* Loop counter */
- int pc; /* Address of the i-th cell */
+ int pc; /* Address of a i-th cell */
int hdr; /* Offset to the page header */
int size; /* Size of a cell */
int usableSize; /* Number of usable bytes on a page */
@@ -55822,7 +51458,6 @@ static int defragmentPage(MemPage *pPage){
int nCell; /* Number of cells on the page */
unsigned char *data; /* The page data */
unsigned char *temp; /* Temp area for cell content */
- unsigned char *src; /* Source of content */
int iCellFirst; /* First allowable cell index */
int iCellLast; /* Last possible cell index */
@@ -55832,13 +51467,15 @@ static int defragmentPage(MemPage *pPage){
assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE );
assert( pPage->nOverflow==0 );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- temp = 0;
- src = data = pPage->aData;
+ temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
+ data = pPage->aData;
hdr = pPage->hdrOffset;
cellOffset = pPage->cellOffset;
nCell = pPage->nCell;
assert( nCell==get2byte(&data[hdr+3]) );
usableSize = pPage->pBt->usableSize;
+ cbrk = get2byte(&data[hdr+5]);
+ memcpy(&temp[cbrk], &data[cbrk], usableSize - cbrk);
cbrk = usableSize;
iCellFirst = cellOffset + 2*nCell;
iCellLast = usableSize - 4;
@@ -55848,31 +51485,31 @@ static int defragmentPage(MemPage *pPage){
pc = get2byte(pAddr);
testcase( pc==iCellFirst );
testcase( pc==iCellLast );
+#if !defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
/* These conditions have already been verified in btreeInitPage()
- ** if PRAGMA cell_size_check=ON.
+ ** if SQLITE_ENABLE_OVERSIZE_CELL_CHECK is defined
*/
if( pc<iCellFirst || pc>iCellLast ){
return SQLITE_CORRUPT_BKPT;
}
+#endif
assert( pc>=iCellFirst && pc<=iCellLast );
- size = pPage->xCellSize(pPage, &src[pc]);
+ size = cellSizePtr(pPage, &temp[pc]);
cbrk -= size;
+#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
+ if( cbrk<iCellFirst ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+#else
if( cbrk<iCellFirst || pc+size>usableSize ){
return SQLITE_CORRUPT_BKPT;
}
+#endif
assert( cbrk+size<=usableSize && cbrk>=iCellFirst );
testcase( cbrk+size==usableSize );
testcase( pc+size==usableSize );
+ memcpy(&data[cbrk], &temp[pc], size);
put2byte(pAddr, cbrk);
- if( temp==0 ){
- int x;
- if( cbrk==pc ) continue;
- temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
- x = get2byte(&data[hdr+5]);
- memcpy(&temp[x], &data[x], (cbrk+size) - x);
- src = temp;
- }
- memcpy(&data[cbrk], &src[pc], size);
}
assert( cbrk>=iCellFirst );
put2byte(&data[hdr+5], cbrk);
@@ -55888,70 +51525,6 @@ static int defragmentPage(MemPage *pPage){
}
/*
-** Search the free-list on page pPg for space to store a cell nByte bytes in
-** size. If one can be found, return a pointer to the space and remove it
-** from the free-list.
-**
-** If no suitable space can be found on the free-list, return NULL.
-**
-** This function may detect corruption within pPg. If corruption is
-** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned.
-**
-** Slots on the free list that are between 1 and 3 bytes larger than nByte
-** will be ignored if adding the extra space to the fragmentation count
-** causes the fragmentation count to exceed 60.
-*/
-static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
- const int hdr = pPg->hdrOffset;
- u8 * const aData = pPg->aData;
- int iAddr = hdr + 1;
- int pc = get2byte(&aData[iAddr]);
- int x;
- int usableSize = pPg->pBt->usableSize;
-
- assert( pc>0 );
- do{
- int size; /* Size of the free slot */
- /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of
- ** increasing offset. */
- if( pc>usableSize-4 || pc<iAddr+4 ){
- *pRc = SQLITE_CORRUPT_BKPT;
- return 0;
- }
- /* EVIDENCE-OF: R-22710-53328 The third and fourth bytes of each
- ** freeblock form a big-endian integer which is the size of the freeblock
- ** in bytes, including the 4-byte header. */
- size = get2byte(&aData[pc+2]);
- if( (x = size - nByte)>=0 ){
- testcase( x==4 );
- testcase( x==3 );
- if( pc < pPg->cellOffset+2*pPg->nCell || size+pc > usableSize ){
- *pRc = SQLITE_CORRUPT_BKPT;
- return 0;
- }else if( x<4 ){
- /* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total
- ** number of bytes in fragments may not exceed 60. */
- if( aData[hdr+7]>57 ) return 0;
-
- /* Remove the slot from the free-list. Update the number of
- ** fragmented bytes within the page. */
- memcpy(&aData[iAddr], &aData[pc], 2);
- aData[hdr+7] += (u8)x;
- }else{
- /* The slot remains on the free-list. Reduce its size to account
- ** for the portion used by the new allocation. */
- put2byte(&aData[pc+2], x);
- }
- return &aData[pc + x];
- }
- iAddr = pc;
- pc = get2byte(&aData[pc]);
- }while( pc );
-
- return 0;
-}
-
-/*
** Allocate nByte bytes of space from within the B-Tree page passed
** as the first argument. Write into *pIdx the index into pPage->aData[]
** of the first byte of allocated space. Return either SQLITE_OK or
@@ -55967,9 +51540,11 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */
u8 * const data = pPage->aData; /* Local cache of pPage->aData */
+ int nFrag; /* Number of fragmented bytes on pPage */
int top; /* First byte of cell content area */
- int rc = SQLITE_OK; /* Integer return code */
int gap; /* First byte of gap between cell pointers and cell content */
+ int rc; /* Integer return code */
+ int usableSize; /* Usable size of the page */
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
assert( pPage->pBt );
@@ -55977,50 +51552,62 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
assert( nByte>=0 ); /* Minimum cell size is 4 */
assert( pPage->nFree>=nByte );
assert( pPage->nOverflow==0 );
- assert( nByte < (int)(pPage->pBt->usableSize-8) );
+ usableSize = pPage->pBt->usableSize;
+ assert( nByte < usableSize-8 );
+ nFrag = data[hdr+7];
assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf );
gap = pPage->cellOffset + 2*pPage->nCell;
- assert( gap<=65536 );
- /* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size
- ** and the reserved space is zero (the usual value for reserved space)
- ** then the cell content offset of an empty page wants to be 65536.
- ** However, that integer is too large to be stored in a 2-byte unsigned
- ** integer, so a value of 0 is used in its place. */
- top = get2byte(&data[hdr+5]);
- assert( top<=(int)pPage->pBt->usableSize ); /* Prevent by getAndInitPage() */
- if( gap>top ){
- if( top==0 && pPage->pBt->usableSize==65536 ){
- top = 65536;
- }else{
- return SQLITE_CORRUPT_BKPT;
- }
- }
-
- /* If there is enough space between gap and top for one more cell pointer
- ** array entry offset, and if the freelist is not empty, then search the
- ** freelist looking for a free slot big enough to satisfy the request.
- */
+ top = get2byteNotZero(&data[hdr+5]);
+ if( gap>top ) return SQLITE_CORRUPT_BKPT;
testcase( gap+2==top );
testcase( gap+1==top );
testcase( gap==top );
- if( (data[hdr+2] || data[hdr+1]) && gap+2<=top ){
- u8 *pSpace = pageFindSlot(pPage, nByte, &rc);
- if( pSpace ){
- assert( pSpace>=data && (pSpace - data)<65536 );
- *pIdx = (int)(pSpace - data);
- return SQLITE_OK;
- }else if( rc ){
- return rc;
+
+ if( nFrag>=60 ){
+ /* Always defragment highly fragmented pages */
+ rc = defragmentPage(pPage);
+ if( rc ) return rc;
+ top = get2byteNotZero(&data[hdr+5]);
+ }else if( gap+2<=top ){
+ /* Search the freelist looking for a free slot big enough to satisfy
+ ** the request. The allocation is made from the first free slot in
+ ** the list that is large enough to accommodate it.
+ */
+ int pc, addr;
+ for(addr=hdr+1; (pc = get2byte(&data[addr]))>0; addr=pc){
+ int size; /* Size of the free slot */
+ if( pc>usableSize-4 || pc<addr+4 ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ size = get2byte(&data[pc+2]);
+ if( size>=nByte ){
+ int x = size - nByte;
+ testcase( x==4 );
+ testcase( x==3 );
+ if( x<4 ){
+ /* Remove the slot from the free-list. Update the number of
+ ** fragmented bytes within the page. */
+ memcpy(&data[addr], &data[pc], 2);
+ data[hdr+7] = (u8)(nFrag + x);
+ }else if( size+pc > usableSize ){
+ return SQLITE_CORRUPT_BKPT;
+ }else{
+ /* The slot remains on the free-list. Reduce its size to account
+ ** for the portion used by the new allocation. */
+ put2byte(&data[pc+2], x);
+ }
+ *pIdx = pc + x;
+ return SQLITE_OK;
+ }
}
}
- /* The request could not be fulfilled using a freelist slot. Check
- ** to see if defragmentation is necessary.
+ /* Check to make sure there is enough space in the gap to satisfy
+ ** the allocation. If not, defragment.
*/
testcase( gap+2+nByte==top );
if( gap+2+nByte>top ){
- assert( pPage->nCell>0 || CORRUPT_DB );
rc = defragmentPage(pPage);
if( rc ) return rc;
top = get2byteNotZero(&data[hdr+5]);
@@ -56043,101 +51630,90 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
/*
** Return a section of the pPage->aData to the freelist.
-** The first byte of the new free block is pPage->aData[iStart]
-** and the size of the block is iSize bytes.
-**
-** Adjacent freeblocks are coalesced.
-**
-** Note that even though the freeblock list was checked by btreeInitPage(),
-** that routine will not detect overlap between cells or freeblocks. Nor
-** does it detect cells or freeblocks that encrouch into the reserved bytes
-** at the end of the page. So do additional corruption checks inside this
-** routine and return SQLITE_CORRUPT if any problems are found.
-*/
-static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
- u16 iPtr; /* Address of ptr to next freeblock */
- u16 iFreeBlk; /* Address of the next freeblock */
- u8 hdr; /* Page header size. 0 or 100 */
- u8 nFrag = 0; /* Reduction in fragmentation */
- u16 iOrigSize = iSize; /* Original value of iSize */
- u32 iLast = pPage->pBt->usableSize-4; /* Largest possible freeblock offset */
- u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */
- unsigned char *data = pPage->aData; /* Page content */
+** The first byte of the new free block is pPage->aDisk[start]
+** and the size of the block is "size" bytes.
+**
+** Most of the effort here is involved in coalesing adjacent
+** free blocks into a single big free block.
+*/
+static int freeSpace(MemPage *pPage, int start, int size){
+ int addr, pbegin, hdr;
+ int iLast; /* Largest possible freeblock offset */
+ unsigned char *data = pPage->aData;
assert( pPage->pBt!=0 );
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
- assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize );
- assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize );
+ assert( start>=pPage->hdrOffset+6+pPage->childPtrSize );
+ assert( (start + size) <= (int)pPage->pBt->usableSize );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- assert( iSize>=4 ); /* Minimum cell size is 4 */
- assert( iStart<=iLast );
+ assert( size>=0 ); /* Minimum cell size is 4 */
- /* Overwrite deleted information with zeros when the secure_delete
- ** option is enabled */
if( pPage->pBt->btsFlags & BTS_SECURE_DELETE ){
- memset(&data[iStart], 0, iSize);
+ /* Overwrite deleted information with zeros when the secure_delete
+ ** option is enabled */
+ memset(&data[start], 0, size);
}
- /* The list of freeblocks must be in ascending order. Find the
- ** spot on the list where iStart should be inserted.
+ /* Add the space back into the linked list of freeblocks. Note that
+ ** even though the freeblock list was checked by btreeInitPage(),
+ ** btreeInitPage() did not detect overlapping cells or
+ ** freeblocks that overlapped cells. Nor does it detect when the
+ ** cell content area exceeds the value in the page header. If these
+ ** situations arise, then subsequent insert operations might corrupt
+ ** the freelist. So we do need to check for corruption while scanning
+ ** the freelist.
*/
hdr = pPage->hdrOffset;
- iPtr = hdr + 1;
- if( data[iPtr+1]==0 && data[iPtr]==0 ){
- iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */
- }else{
- while( (iFreeBlk = get2byte(&data[iPtr]))>0 && iFreeBlk<iStart ){
- if( iFreeBlk<iPtr+4 ) return SQLITE_CORRUPT_BKPT;
- iPtr = iFreeBlk;
+ addr = hdr + 1;
+ iLast = pPage->pBt->usableSize - 4;
+ assert( start<=iLast );
+ while( (pbegin = get2byte(&data[addr]))<start && pbegin>0 ){
+ if( pbegin<addr+4 ){
+ return SQLITE_CORRUPT_BKPT;
}
- if( iFreeBlk>iLast ) return SQLITE_CORRUPT_BKPT;
- assert( iFreeBlk>iPtr || iFreeBlk==0 );
-
- /* At this point:
- ** iFreeBlk: First freeblock after iStart, or zero if none
- ** iPtr: The address of a pointer to iFreeBlk
- **
- ** Check to see if iFreeBlk should be coalesced onto the end of iStart.
- */
- if( iFreeBlk && iEnd+3>=iFreeBlk ){
- nFrag = iFreeBlk - iEnd;
- if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_BKPT;
- iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]);
- if( iEnd > pPage->pBt->usableSize ) return SQLITE_CORRUPT_BKPT;
- iSize = iEnd - iStart;
- iFreeBlk = get2byte(&data[iFreeBlk]);
+ addr = pbegin;
+ }
+ if( pbegin>iLast ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( pbegin>addr || pbegin==0 );
+ put2byte(&data[addr], start);
+ put2byte(&data[start], pbegin);
+ put2byte(&data[start+2], size);
+ pPage->nFree = pPage->nFree + (u16)size;
+
+ /* Coalesce adjacent free blocks */
+ addr = hdr + 1;
+ while( (pbegin = get2byte(&data[addr]))>0 ){
+ int pnext, psize, x;
+ assert( pbegin>addr );
+ assert( pbegin <= (int)pPage->pBt->usableSize-4 );
+ pnext = get2byte(&data[pbegin]);
+ psize = get2byte(&data[pbegin+2]);
+ if( pbegin + psize + 3 >= pnext && pnext>0 ){
+ int frag = pnext - (pbegin+psize);
+ if( (frag<0) || (frag>(int)data[hdr+7]) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ data[hdr+7] -= (u8)frag;
+ x = get2byte(&data[pnext]);
+ put2byte(&data[pbegin], x);
+ x = pnext + get2byte(&data[pnext+2]) - pbegin;
+ put2byte(&data[pbegin+2], x);
+ }else{
+ addr = pbegin;
}
-
- /* If iPtr is another freeblock (that is, if iPtr is not the freelist
- ** pointer in the page header) then check to see if iStart should be
- ** coalesced onto the end of iPtr.
- */
- if( iPtr>hdr+1 ){
- int iPtrEnd = iPtr + get2byte(&data[iPtr+2]);
- if( iPtrEnd+3>=iStart ){
- if( iPtrEnd>iStart ) return SQLITE_CORRUPT_BKPT;
- nFrag += iStart - iPtrEnd;
- iSize = iEnd - iPtr;
- iStart = iPtr;
- }
- }
- if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_BKPT;
- data[hdr+7] -= nFrag;
- }
- if( iStart==get2byte(&data[hdr+5]) ){
- /* The new freeblock is at the beginning of the cell content area,
- ** so just extend the cell content area rather than create another
- ** freelist entry */
- if( iPtr!=hdr+1 ) return SQLITE_CORRUPT_BKPT;
- put2byte(&data[hdr+1], iFreeBlk);
- put2byte(&data[hdr+5], iEnd);
- }else{
- /* Insert the new freeblock into the freelist */
- put2byte(&data[iPtr], iStart);
- put2byte(&data[iStart], iFreeBlk);
- put2byte(&data[iStart+2], iSize);
- }
- pPage->nFree += iOrigSize;
+ }
+
+ /* If the cell content area begins with a freeblock, remove it. */
+ if( data[hdr+1]==data[hdr+5] && data[hdr+2]==data[hdr+6] ){
+ int top;
+ pbegin = get2byte(&data[hdr+1]);
+ memcpy(&data[hdr+1], &data[pbegin], 2);
+ top = get2byte(&data[hdr+5]) + get2byte(&data[pbegin+2]);
+ put2byte(&data[hdr+5], top);
+ }
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
return SQLITE_OK;
}
@@ -56161,44 +51737,18 @@ static int decodeFlags(MemPage *pPage, int flagByte){
pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 );
flagByte &= ~PTF_LEAF;
pPage->childPtrSize = 4-4*pPage->leaf;
- pPage->xCellSize = cellSizePtr;
pBt = pPage->pBt;
if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
- /* EVIDENCE-OF: R-03640-13415 A value of 5 means the page is an interior
- ** table b-tree page. */
- assert( (PTF_LEAFDATA|PTF_INTKEY)==5 );
- /* EVIDENCE-OF: R-20501-61796 A value of 13 means the page is a leaf
- ** table b-tree page. */
- assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 );
pPage->intKey = 1;
- if( pPage->leaf ){
- pPage->intKeyLeaf = 1;
- pPage->noPayload = 0;
- pPage->xParseCell = btreeParseCellPtr;
- }else{
- pPage->intKeyLeaf = 0;
- pPage->noPayload = 1;
- pPage->xCellSize = cellSizePtrNoPayload;
- pPage->xParseCell = btreeParseCellPtrNoPayload;
- }
+ pPage->hasData = pPage->leaf;
pPage->maxLocal = pBt->maxLeaf;
pPage->minLocal = pBt->minLeaf;
}else if( flagByte==PTF_ZERODATA ){
- /* EVIDENCE-OF: R-27225-53936 A value of 2 means the page is an interior
- ** index b-tree page. */
- assert( (PTF_ZERODATA)==2 );
- /* EVIDENCE-OF: R-16571-11615 A value of 10 means the page is a leaf
- ** index b-tree page. */
- assert( (PTF_ZERODATA|PTF_LEAF)==10 );
pPage->intKey = 0;
- pPage->intKeyLeaf = 0;
- pPage->noPayload = 0;
- pPage->xParseCell = btreeParseCellPtrIndex;
+ pPage->hasData = 0;
pPage->maxLocal = pBt->maxLocal;
pPage->minLocal = pBt->minLocal;
}else{
- /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is
- ** an error. */
return SQLITE_CORRUPT_BKPT;
}
pPage->max1bytePayload = pBt->max1bytePayload;
@@ -56217,7 +51767,6 @@ static int decodeFlags(MemPage *pPage, int flagByte){
static int btreeInitPage(MemPage *pPage){
assert( pPage->pBt!=0 );
- assert( pPage->pBt->db!=0 );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) );
assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) );
@@ -56239,34 +51788,21 @@ static int btreeInitPage(MemPage *pPage){
hdr = pPage->hdrOffset;
data = pPage->aData;
- /* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating
- ** the b-tree page type. */
if( decodeFlags(pPage, data[hdr]) ) return SQLITE_CORRUPT_BKPT;
assert( pBt->pageSize>=512 && pBt->pageSize<=65536 );
pPage->maskPage = (u16)(pBt->pageSize - 1);
pPage->nOverflow = 0;
usableSize = pBt->usableSize;
- pPage->cellOffset = cellOffset = hdr + 8 + pPage->childPtrSize;
+ pPage->cellOffset = cellOffset = hdr + 12 - 4*pPage->leaf;
pPage->aDataEnd = &data[usableSize];
pPage->aCellIdx = &data[cellOffset];
- pPage->aDataOfst = &data[pPage->childPtrSize];
- /* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates
- ** the start of the cell content area. A zero value for this integer is
- ** interpreted as 65536. */
top = get2byteNotZero(&data[hdr+5]);
- /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
- ** number of cells on the page. */
pPage->nCell = get2byte(&data[hdr+3]);
if( pPage->nCell>MX_CELL(pBt) ){
/* To many cells for a single page. The page must be corrupt */
return SQLITE_CORRUPT_BKPT;
}
testcase( pPage->nCell==MX_CELL(pBt) );
- /* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only
- ** possible for a root page of a table that contains no rows) then the
- ** offset to the cell content area will equal the page size minus the
- ** bytes of reserved space. */
- assert( pPage->nCell>0 || top==usableSize || CORRUPT_DB );
/* A malformed database page might cause us to read past the end
** of page when parsing a cell.
@@ -56277,19 +51813,20 @@ static int btreeInitPage(MemPage *pPage){
*/
iCellFirst = cellOffset + 2*pPage->nCell;
iCellLast = usableSize - 4;
- if( pBt->db->flags & SQLITE_CellSizeCk ){
+#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
+ {
int i; /* Index into the cell pointer array */
int sz; /* Size of a cell */
if( !pPage->leaf ) iCellLast--;
for(i=0; i<pPage->nCell; i++){
- pc = get2byteAligned(&data[cellOffset+i*2]);
+ pc = get2byte(&data[cellOffset+i*2]);
testcase( pc==iCellFirst );
testcase( pc==iCellLast );
if( pc<iCellFirst || pc>iCellLast ){
return SQLITE_CORRUPT_BKPT;
}
- sz = pPage->xCellSize(pPage, &data[pc]);
+ sz = cellSizePtr(pPage, &data[pc]);
testcase( pc+sz==usableSize );
if( pc+sz>usableSize ){
return SQLITE_CORRUPT_BKPT;
@@ -56297,21 +51834,15 @@ static int btreeInitPage(MemPage *pPage){
}
if( !pPage->leaf ) iCellLast++;
}
+#endif
- /* Compute the total free space on the page
- ** EVIDENCE-OF: R-23588-34450 The two-byte integer at offset 1 gives the
- ** start of the first freeblock on the page, or is zero if there are no
- ** freeblocks. */
+ /* Compute the total free space on the page */
pc = get2byte(&data[hdr+1]);
- nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */
+ nFree = data[hdr+7] + top;
while( pc>0 ){
u16 next, size;
if( pc<iCellFirst || pc>iCellLast ){
- /* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will
- ** always be at least one cell before the first freeblock.
- **
- ** Or, the freeblock is off the end of the page
- */
+ /* Start of free block is off the page */
return SQLITE_CORRUPT_BKPT;
}
next = get2byte(&data[pc]);
@@ -56360,16 +51891,16 @@ static void zeroPage(MemPage *pPage, int flags){
memset(&data[hdr], 0, pBt->usableSize - hdr);
}
data[hdr] = (char)flags;
- first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8);
+ first = hdr + 8 + 4*((flags&PTF_LEAF)==0 ?1:0);
memset(&data[hdr+1], 0, 4);
data[hdr+7] = 0;
put2byte(&data[hdr+5], pBt->usableSize);
pPage->nFree = (u16)(pBt->usableSize - first);
decodeFlags(pPage, flags);
+ pPage->hdrOffset = hdr;
pPage->cellOffset = first;
pPage->aDataEnd = &data[pBt->usableSize];
pPage->aCellIdx = &data[first];
- pPage->aDataOfst = &data[pPage->childPtrSize];
pPage->nOverflow = 0;
assert( pBt->pageSize>=512 && pBt->pageSize<=65536 );
pPage->maskPage = (u16)(pBt->pageSize - 1);
@@ -56388,16 +51919,16 @@ static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){
pPage->pDbPage = pDbPage;
pPage->pBt = pBt;
pPage->pgno = pgno;
- pPage->hdrOffset = pgno==1 ? 100 : 0;
+ pPage->hdrOffset = pPage->pgno==1 ? 100 : 0;
return pPage;
}
/*
** Get a page from the pager. Initialize the MemPage.pBt and
-** MemPage.aData elements if needed. See also: btreeGetUnusedPage().
+** MemPage.aData elements if needed.
**
-** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care
-** about the content of the page at this time. So do not go to the disk
+** If the noContent flag is set, it means that we do not care about
+** the content of the page at this time. So do not go to the disk
** to fetch the content. Just fill in the content with zeros for now.
** If in the future we call sqlite3PagerWrite() on this page, that
** means we have started to be concerned about content and the disk
@@ -56445,66 +51976,39 @@ static Pgno btreePagecount(BtShared *pBt){
SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree *p){
assert( sqlite3BtreeHoldsMutex(p) );
assert( ((p->pBt->nPage)&0x8000000)==0 );
- return btreePagecount(p->pBt);
+ return (int)btreePagecount(p->pBt);
}
/*
-** Get a page from the pager and initialize it.
+** Get a page from the pager and initialize it. This routine is just a
+** convenience wrapper around separate calls to btreeGetPage() and
+** btreeInitPage().
**
-** If pCur!=0 then the page is being fetched as part of a moveToChild()
-** call. Do additional sanity checking on the page in this case.
-** And if the fetch fails, this routine must decrement pCur->iPage.
-**
-** The page is fetched as read-write unless pCur is not NULL and is
-** a read-only cursor.
-**
-** If an error occurs, then *ppPage is undefined. It
+** If an error occurs, then the value *ppPage is set to is undefined. It
** may remain unchanged, or it may be set to an invalid value.
*/
static int getAndInitPage(
BtShared *pBt, /* The database file */
Pgno pgno, /* Number of the page to get */
MemPage **ppPage, /* Write the page pointer here */
- BtCursor *pCur, /* Cursor to receive the page, or NULL */
- int bReadOnly /* True for a read-only page */
+ int bReadonly /* PAGER_GET_READONLY or 0 */
){
int rc;
- DbPage *pDbPage;
assert( sqlite3_mutex_held(pBt->mutex) );
- assert( pCur==0 || ppPage==&pCur->apPage[pCur->iPage] );
- assert( pCur==0 || bReadOnly==pCur->curPagerFlags );
- assert( pCur==0 || pCur->iPage>0 );
+ assert( bReadonly==PAGER_GET_READONLY || bReadonly==0 );
if( pgno>btreePagecount(pBt) ){
rc = SQLITE_CORRUPT_BKPT;
- goto getAndInitPage_error;
- }
- rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly);
- if( rc ){
- goto getAndInitPage_error;
- }
- *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt);
- if( (*ppPage)->isInit==0 ){
- rc = btreeInitPage(*ppPage);
- if( rc!=SQLITE_OK ){
- releasePage(*ppPage);
- goto getAndInitPage_error;
+ }else{
+ rc = btreeGetPage(pBt, pgno, ppPage, bReadonly);
+ if( rc==SQLITE_OK ){
+ rc = btreeInitPage(*ppPage);
+ if( rc!=SQLITE_OK ){
+ releasePage(*ppPage);
+ }
}
}
- /* If obtaining a child page for a cursor, we must verify that the page is
- ** compatible with the root page. */
- if( pCur
- && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey)
- ){
- rc = SQLITE_CORRUPT_BKPT;
- releasePage(*ppPage);
- goto getAndInitPage_error;
- }
- return SQLITE_OK;
-
-getAndInitPage_error:
- if( pCur ) pCur->iPage--;
testcase( pgno==0 );
assert( pgno!=0 || rc==SQLITE_CORRUPT );
return rc;
@@ -56514,49 +52018,17 @@ getAndInitPage_error:
** Release a MemPage. This should be called once for each prior
** call to btreeGetPage.
*/
-static void releasePageNotNull(MemPage *pPage){
- assert( pPage->aData );
- assert( pPage->pBt );
- assert( pPage->pDbPage!=0 );
- assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
- assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData );
- assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- sqlite3PagerUnrefNotNull(pPage->pDbPage);
-}
static void releasePage(MemPage *pPage){
- if( pPage ) releasePageNotNull(pPage);
-}
-
-/*
-** Get an unused page.
-**
-** This works just like btreeGetPage() with the addition:
-**
-** * If the page is already in use for some other purpose, immediately
-** release it and return an SQLITE_CURRUPT error.
-** * Make sure the isInit flag is clear
-*/
-static int btreeGetUnusedPage(
- BtShared *pBt, /* The btree */
- Pgno pgno, /* Number of the page to fetch */
- MemPage **ppPage, /* Return the page in this parameter */
- int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */
-){
- int rc = btreeGetPage(pBt, pgno, ppPage, flags);
- if( rc==SQLITE_OK ){
- if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){
- releasePage(*ppPage);
- *ppPage = 0;
- return SQLITE_CORRUPT_BKPT;
- }
- (*ppPage)->isInit = 0;
- }else{
- *ppPage = 0;
+ if( pPage ){
+ assert( pPage->aData );
+ assert( pPage->pBt );
+ assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage );
+ assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ sqlite3PagerUnref(pPage->pDbPage);
}
- return rc;
}
-
/*
** During a rollback, when the pager reloads information into the cache
** so that the cache is restored to its original state at the start of
@@ -56679,18 +52151,16 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
*/
if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){
if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){
- int nFilename = sqlite3Strlen30(zFilename)+1;
int nFullPathname = pVfs->mxPathname+1;
- char *zFullPathname = sqlite3Malloc(MAX(nFullPathname,nFilename));
+ char *zFullPathname = sqlite3Malloc(nFullPathname);
MUTEX_LOGIC( sqlite3_mutex *mutexShared; )
-
p->sharable = 1;
if( !zFullPathname ){
sqlite3_free(p);
return SQLITE_NOMEM;
}
if( isMemdb ){
- memcpy(zFullPathname, zFilename, nFilename);
+ memcpy(zFullPathname, zFilename, sqlite3Strlen30(zFilename)+1);
}else{
rc = sqlite3OsFullPathname(pVfs, zFilename,
nFullPathname, zFullPathname);
@@ -56747,8 +52217,8 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
** the right size. This is to guard against size changes that result
** when compiling on a different architecture.
*/
- assert( sizeof(i64)==8 );
- assert( sizeof(u64)==8 );
+ assert( sizeof(i64)==8 || sizeof(i64)==4 );
+ assert( sizeof(u64)==8 || sizeof(u64)==4 );
assert( sizeof(u32)==4 );
assert( sizeof(u16)==2 );
assert( sizeof(Pgno)==4 );
@@ -56778,9 +52248,6 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
#ifdef SQLITE_SECURE_DELETE
pBt->btsFlags |= BTS_SECURE_DELETE;
#endif
- /* EVIDENCE-OF: R-51873-39618 The page size for a database file is
- ** determined by the 2-byte integer located at an offset of 16 bytes from
- ** the beginning of the database file. */
pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16);
if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE
|| ((pBt->pageSize-1)&pBt->pageSize)!=0 ){
@@ -56799,9 +52266,6 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
#endif
nReserve = 0;
}else{
- /* EVIDENCE-OF: R-37497-42412 The size of the reserved region is
- ** determined by the one-byte unsigned integer found at an offset of 20
- ** into the database file header. */
nReserve = zDbHeader[20];
pBt->btsFlags |= BTS_PAGESIZE_FIXED;
#ifndef SQLITE_OMIT_AUTOVACUUM
@@ -56936,8 +52400,7 @@ static int removeFromSharingList(BtShared *pBt){
/*
** Make sure pBt->pTmpSpace points to an allocation of
-** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child
-** pointer.
+** MX_CELL_SIZE(pBt) bytes.
*/
static void allocateTempSpace(BtShared *pBt){
if( !pBt->pTmpSpace ){
@@ -56952,16 +52415,8 @@ static void allocateTempSpace(BtShared *pBt){
** it into a database page. This is not actually a problem, but it
** does cause a valgrind error when the 1 or 2 bytes of unitialized
** data is passed to system call write(). So to avoid this error,
- ** zero the first 4 bytes of temp space here.
- **
- ** Also: Provide four bytes of initialized space before the
- ** beginning of pTmpSpace as an area available to prepend the
- ** left-child pointer to the beginning of a cell.
- */
- if( pBt->pTmpSpace ){
- memset(pBt->pTmpSpace, 0, 8);
- pBt->pTmpSpace += 4;
- }
+ ** zero the first 4 bytes of temp space here. */
+ if( pBt->pTmpSpace ) memset(pBt->pTmpSpace, 0, 4);
}
}
@@ -56969,11 +52424,8 @@ static void allocateTempSpace(BtShared *pBt){
** Free the pBt->pTmpSpace allocation
*/
static void freeTempSpace(BtShared *pBt){
- if( pBt->pTmpSpace ){
- pBt->pTmpSpace -= 4;
- sqlite3PageFree(pBt->pTmpSpace);
- pBt->pTmpSpace = 0;
- }
+ sqlite3PageFree( pBt->pTmpSpace);
+ pBt->pTmpSpace = 0;
}
/*
@@ -56999,7 +52451,7 @@ SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){
** The call to sqlite3BtreeRollback() drops any table-locks held by
** this handle.
*/
- sqlite3BtreeRollback(p, SQLITE_OK, 0);
+ sqlite3BtreeRollback(p, SQLITE_OK);
sqlite3BtreeLeave(p);
/* If there are still other outstanding references to the shared-btree
@@ -57058,7 +52510,6 @@ SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){
return SQLITE_OK;
}
-#if SQLITE_MAX_MMAP_SIZE>0
/*
** Change the limit on the amount of the database file that may be
** memory mapped.
@@ -57071,7 +52522,6 @@ SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){
sqlite3BtreeLeave(p);
return SQLITE_OK;
}
-#endif /* SQLITE_MAX_MMAP_SIZE>0 */
/*
** Change the way data is synced to disk in order to increase or decrease
@@ -57135,9 +52585,6 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve,
BtShared *pBt = p->pBt;
assert( nReserve>=-1 && nReserve<=255 );
sqlite3BtreeEnter(p);
-#if SQLITE_HAS_CODEC
- if( nReserve>pBt->optimalReserve ) pBt->optimalReserve = (u8)nReserve;
-#endif
if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){
sqlite3BtreeLeave(p);
return SQLITE_READONLY;
@@ -57149,7 +52596,7 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve,
if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE &&
((pageSize-1)&pageSize)==0 ){
assert( (pageSize & 7)==0 );
- assert( !pBt->pCursor );
+ assert( !pBt->pPage1 && !pBt->pCursor );
pBt->pageSize = (u32)pageSize;
freeTempSpace(pBt);
}
@@ -57167,6 +52614,7 @@ SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){
return p->pBt->pageSize;
}
+#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_DEBUG)
/*
** This function is similar to sqlite3BtreeGetReserve(), except that it
** may only be called if it is guaranteed that the b-tree mutex is already
@@ -57179,33 +52627,25 @@ SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){
** database handle that owns *p, causing undefined behavior.
*/
SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){
- int n;
assert( sqlite3_mutex_held(p->pBt->mutex) );
- n = p->pBt->pageSize - p->pBt->usableSize;
- return n;
+ return p->pBt->pageSize - p->pBt->usableSize;
}
+#endif /* SQLITE_HAS_CODEC || SQLITE_DEBUG */
+#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM)
/*
** Return the number of bytes of space at the end of every page that
** are intentually left unused. This is the "reserved" space that is
** sometimes used by extensions.
-**
-** If SQLITE_HAS_MUTEX is defined then the number returned is the
-** greater of the current reserved space and the maximum requested
-** reserve space.
*/
-SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree *p){
+SQLITE_PRIVATE int sqlite3BtreeGetReserve(Btree *p){
int n;
sqlite3BtreeEnter(p);
- n = sqlite3BtreeGetReserveNoMutex(p);
-#ifdef SQLITE_HAS_CODEC
- if( n<p->pBt->optimalReserve ) n = p->pBt->optimalReserve;
-#endif
+ n = p->pBt->pageSize - p->pBt->usableSize;
sqlite3BtreeLeave(p);
return n;
}
-
/*
** Set the maximum page count for a database if mxPage is positive.
** No changes are made if mxPage is 0 or negative.
@@ -57236,6 +52676,7 @@ SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){
sqlite3BtreeLeave(p);
return b;
}
+#endif /* !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) */
/*
** Change the 'auto-vacuum' property of the database. If the 'autoVacuum'
@@ -57320,9 +52761,6 @@ static int lockBtree(BtShared *pBt){
u32 usableSize;
u8 *page1 = pPage1->aData;
rc = SQLITE_NOTADB;
- /* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins
- ** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d
- ** 61 74 20 33 00. */
if( memcmp(page1, zMagicHeader, 16)!=0 ){
goto page1_init_failed;
}
@@ -57363,21 +52801,15 @@ static int lockBtree(BtShared *pBt){
}
#endif
- /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload
- ** fractions and the leaf payload fraction values must be 64, 32, and 32.
- **
+ /* The maximum embedded fraction must be exactly 25%. And the minimum
+ ** embedded fraction must be 12.5% for both leaf-data and non-leaf-data.
** The original design allowed these amounts to vary, but as of
** version 3.6.0, we require them to be fixed.
*/
if( memcmp(&page1[21], "\100\040\040",3)!=0 ){
goto page1_init_failed;
}
- /* EVIDENCE-OF: R-51873-39618 The page size for a database file is
- ** determined by the 2-byte integer located at an offset of 16 bytes from
- ** the beginning of the database file. */
pageSize = (page1[16]<<8) | (page1[17]<<16);
- /* EVIDENCE-OF: R-25008-21688 The size of a page is a power of two
- ** between 512 and 65536 inclusive. */
if( ((pageSize-1)&pageSize)!=0
|| pageSize>SQLITE_MAX_PAGE_SIZE
|| pageSize<=256
@@ -57385,13 +52817,6 @@ static int lockBtree(BtShared *pBt){
goto page1_init_failed;
}
assert( (pageSize & 7)==0 );
- /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte
- ** integer at offset 20 is the number of bytes of space at the end of
- ** each page to reserve for extensions.
- **
- ** EVIDENCE-OF: R-37497-42412 The size of the reserved region is
- ** determined by the one-byte unsigned integer found at an offset of 20
- ** into the database file header. */
usableSize = pageSize - page1[20];
if( (u32)pageSize!=pBt->pageSize ){
/* After reading the first page of the database assuming a page size
@@ -57412,9 +52837,6 @@ static int lockBtree(BtShared *pBt){
rc = SQLITE_CORRUPT_BKPT;
goto page1_init_failed;
}
- /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to
- ** be less than 480. In other words, if the page size is 512, then the
- ** reserved space size cannot exceed 32. */
if( usableSize<480 ){
goto page1_init_failed;
}
@@ -57469,15 +52891,14 @@ page1_init_failed:
** false then all cursors are counted.
**
** For the purposes of this routine, a cursor is any cursor that
-** is capable of reading or writing to the database. Cursors that
+** is capable of reading or writing to the databse. Cursors that
** have been tripped into the CURSOR_FAULT state are not counted.
*/
static int countValidCursors(BtShared *pBt, int wrOnly){
BtCursor *pCur;
int r = 0;
for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
- if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0)
- && pCur->eState!=CURSOR_FAULT ) r++;
+ if( (wrOnly==0 || pCur->wrFlag) && pCur->eState!=CURSOR_FAULT ) r++;
}
return r;
}
@@ -57495,11 +52916,11 @@ static void unlockBtreeIfUnused(BtShared *pBt){
assert( sqlite3_mutex_held(pBt->mutex) );
assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE );
if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){
- MemPage *pPage1 = pBt->pPage1;
- assert( pPage1->aData );
+ assert( pBt->pPage1->aData );
assert( sqlite3PagerRefcount(pBt->pPager)==1 );
+ assert( pBt->pPage1->aData );
+ releasePage(pBt->pPage1);
pBt->pPage1 = 0;
- releasePageNotNull(pPage1);
}
}
@@ -57804,17 +53225,15 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
u8 isInitOrig = pPage->isInit;
int i;
int nCell;
- int rc;
- rc = btreeInitPage(pPage);
- if( rc ) return rc;
+ btreeInitPage(pPage);
nCell = pPage->nCell;
for(i=0; i<nCell; i++){
u8 *pCell = findCell(pPage, i);
if( eType==PTRMAP_OVERFLOW1 ){
CellInfo info;
- pPage->xParseCell(pPage, pCell, &info);
+ btreeParseCellPtr(pPage, pCell, &info);
if( info.iOverflow
&& pCell+info.iOverflow+3<=pPage->aData+pPage->maskPage
&& iFrom==get4byte(&pCell[info.iOverflow])
@@ -57935,7 +53354,7 @@ static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8);
** calling this function again), return SQLITE_DONE. Or, if an error
** occurs, return some other error code.
**
-** More specifically, this function attempts to re-organize the database so
+** More specificly, this function attempts to re-organize the database so
** that the last page of the file currently in use is no longer in use.
**
** Parameter nFin is the number of pages that this database would contain
@@ -57943,7 +53362,7 @@ static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8);
**
** If the bCommit parameter is non-zero, this function assumes that the
** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE
-** or an error. bCommit is passed true for an auto-vacuum-on-commit
+** or an error. bCommit is passed true for an auto-vacuum-on-commmit
** operation, or false for an incremental vacuum.
*/
static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){
@@ -58113,7 +53532,7 @@ SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *p){
static int autoVacuumCommit(BtShared *pBt){
int rc = SQLITE_OK;
Pager *pPager = pBt->pPager;
- VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); )
+ VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager) );
assert( sqlite3_mutex_held(pBt->mutex) );
invalidateAllOverflowCache(pBt);
@@ -58297,7 +53716,6 @@ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){
sqlite3BtreeLeave(p);
return rc;
}
- p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */
pBt->inTransaction = TRANS_READ;
btreeClearHasContent(pBt);
}
@@ -58323,91 +53741,60 @@ SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){
/*
** This routine sets the state to CURSOR_FAULT and the error
-** code to errCode for every cursor on any BtShared that pBtree
-** references. Or if the writeOnly flag is set to 1, then only
-** trip write cursors and leave read cursors unchanged.
-**
-** Every cursor is a candidate to be tripped, including cursors
-** that belong to other database connections that happen to be
-** sharing the cache with pBtree.
-**
-** This routine gets called when a rollback occurs. If the writeOnly
-** flag is true, then only write-cursors need be tripped - read-only
-** cursors save their current positions so that they may continue
-** following the rollback. Or, if writeOnly is false, all cursors are
-** tripped. In general, writeOnly is false if the transaction being
-** rolled back modified the database schema. In this case b-tree root
-** pages may be moved or deleted from the database altogether, making
-** it unsafe for read cursors to continue.
-**
-** If the writeOnly flag is true and an error is encountered while
-** saving the current position of a read-only cursor, all cursors,
-** including all read-cursors are tripped.
-**
-** SQLITE_OK is returned if successful, or if an error occurs while
-** saving a cursor position, an SQLite error code.
-*/
-SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode, int writeOnly){
+** code to errCode for every cursor on BtShared that pBtree
+** references.
+**
+** Every cursor is tripped, including cursors that belong
+** to other database connections that happen to be sharing
+** the cache with pBtree.
+**
+** This routine gets called when a rollback occurs.
+** All cursors using the same cache must be tripped
+** to prevent them from trying to use the btree after
+** the rollback. The rollback may have deleted tables
+** or moved root pages, so it is not sufficient to
+** save the state of the cursor. The cursor must be
+** invalidated.
+*/
+SQLITE_PRIVATE void sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode){
BtCursor *p;
- int rc = SQLITE_OK;
-
- assert( (writeOnly==0 || writeOnly==1) && BTCF_WriteFlag==1 );
- if( pBtree ){
- sqlite3BtreeEnter(pBtree);
- for(p=pBtree->pBt->pCursor; p; p=p->pNext){
- int i;
- if( writeOnly && (p->curFlags & BTCF_WriteFlag)==0 ){
- if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){
- rc = saveCursorPosition(p);
- if( rc!=SQLITE_OK ){
- (void)sqlite3BtreeTripAllCursors(pBtree, rc, 0);
- break;
- }
- }
- }else{
- sqlite3BtreeClearCursor(p);
- p->eState = CURSOR_FAULT;
- p->skipNext = errCode;
- }
- for(i=0; i<=p->iPage; i++){
- releasePage(p->apPage[i]);
- p->apPage[i] = 0;
- }
+ if( pBtree==0 ) return;
+ sqlite3BtreeEnter(pBtree);
+ for(p=pBtree->pBt->pCursor; p; p=p->pNext){
+ int i;
+ sqlite3BtreeClearCursor(p);
+ p->eState = CURSOR_FAULT;
+ p->skipNext = errCode;
+ for(i=0; i<=p->iPage; i++){
+ releasePage(p->apPage[i]);
+ p->apPage[i] = 0;
}
- sqlite3BtreeLeave(pBtree);
}
- return rc;
+ sqlite3BtreeLeave(pBtree);
}
/*
-** Rollback the transaction in progress.
-**
-** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped).
-** Only write cursors are tripped if writeOnly is true but all cursors are
-** tripped if writeOnly is false. Any attempt to use
-** a tripped cursor will result in an error.
+** Rollback the transaction in progress. All cursors will be
+** invalided by this operation. Any attempt to use a cursor
+** that was open at the beginning of this operation will result
+** in an error.
**
** This will release the write lock on the database file. If there
** are no active cursors, it also releases the read lock.
*/
-SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){
+SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode){
int rc;
BtShared *pBt = p->pBt;
MemPage *pPage1;
- assert( writeOnly==1 || writeOnly==0 );
- assert( tripCode==SQLITE_ABORT_ROLLBACK || tripCode==SQLITE_OK );
sqlite3BtreeEnter(p);
if( tripCode==SQLITE_OK ){
rc = tripCode = saveAllCursors(pBt, 0, 0);
- if( rc ) writeOnly = 0;
}else{
rc = SQLITE_OK;
}
if( tripCode ){
- int rc2 = sqlite3BtreeTripAllCursors(p, tripCode, writeOnly);
- assert( rc==SQLITE_OK || (writeOnly==0 && rc2==SQLITE_OK) );
- if( rc2!=SQLITE_OK ) rc = rc2;
+ sqlite3BtreeTripAllCursors(p, tripCode);
}
btreeIntegrity(p);
@@ -58442,7 +53829,7 @@ SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){
}
/*
-** Start a statement subtransaction. The subtransaction can be rolled
+** Start a statement subtransaction. The subtransaction can can be rolled
** back independently of the main transaction. You must start a transaction
** before starting a subtransaction. The subtransaction is ended automatically
** if the main transaction commits or rolls back.
@@ -58555,7 +53942,6 @@ static int btreeCursor(
BtCursor *pCur /* Space for new cursor */
){
BtShared *pBt = p->pBt; /* Shared b-tree handle */
- BtCursor *pX; /* Looping over other all cursors */
assert( sqlite3BtreeHoldsMutex(p) );
assert( wrFlag==0 || wrFlag==1 );
@@ -58571,11 +53957,9 @@ static int btreeCursor(
assert( p->inTrans>TRANS_NONE );
assert( wrFlag==0 || p->inTrans==TRANS_WRITE );
assert( pBt->pPage1 && pBt->pPage1->aData );
- assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 );
- if( wrFlag ){
- allocateTempSpace(pBt);
- if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM;
+ if( NEVER(wrFlag && (pBt->btsFlags & BTS_READ_ONLY)!=0) ){
+ return SQLITE_READONLY;
}
if( iTable==1 && btreePagecount(pBt)==0 ){
assert( wrFlag==0 );
@@ -58589,20 +53973,14 @@ static int btreeCursor(
pCur->pKeyInfo = pKeyInfo;
pCur->pBtree = p;
pCur->pBt = pBt;
- assert( wrFlag==0 || wrFlag==BTCF_WriteFlag );
- pCur->curFlags = wrFlag;
- pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY;
- /* If there are two or more cursors on the same btree, then all such
- ** cursors *must* have the BTCF_Multiple flag set. */
- for(pX=pBt->pCursor; pX; pX=pX->pNext){
- if( pX->pgnoRoot==(Pgno)iTable ){
- pX->curFlags |= BTCF_Multiple;
- pCur->curFlags |= BTCF_Multiple;
- }
- }
+ pCur->wrFlag = (u8)wrFlag;
pCur->pNext = pBt->pCursor;
+ if( pCur->pNext ){
+ pCur->pNext->pPrev = pCur;
+ }
pBt->pCursor = pCur;
pCur->eState = CURSOR_INVALID;
+ pCur->cachedRowid = 0;
return SQLITE_OK;
}
SQLITE_PRIVATE int sqlite3BtreeCursor(
@@ -58613,13 +53991,9 @@ SQLITE_PRIVATE int sqlite3BtreeCursor(
BtCursor *pCur /* Write new cursor here */
){
int rc;
- if( iTable<1 ){
- rc = SQLITE_CORRUPT_BKPT;
- }else{
- sqlite3BtreeEnter(p);
- rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur);
- sqlite3BtreeLeave(p);
- }
+ sqlite3BtreeEnter(p);
+ rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur);
+ sqlite3BtreeLeave(p);
return rc;
}
@@ -58648,6 +54022,36 @@ SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){
}
/*
+** Set the cached rowid value of every cursor in the same database file
+** as pCur and having the same root page number as pCur. The value is
+** set to iRowid.
+**
+** Only positive rowid values are considered valid for this cache.
+** The cache is initialized to zero, indicating an invalid cache.
+** A btree will work fine with zero or negative rowids. We just cannot
+** cache zero or negative rowids, which means tables that use zero or
+** negative rowids might run a little slower. But in practice, zero
+** or negative rowids are very uncommon so this should not be a problem.
+*/
+SQLITE_PRIVATE void sqlite3BtreeSetCachedRowid(BtCursor *pCur, sqlite3_int64 iRowid){
+ BtCursor *p;
+ for(p=pCur->pBt->pCursor; p; p=p->pNext){
+ if( p->pgnoRoot==pCur->pgnoRoot ) p->cachedRowid = iRowid;
+ }
+ assert( pCur->cachedRowid==iRowid );
+}
+
+/*
+** Return the cached rowid for the given cursor. A negative or zero
+** return value indicates that the rowid cache is invalid and should be
+** ignored. If the rowid cache has never before been set, then a
+** zero is returned.
+*/
+SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeGetCachedRowid(BtCursor *pCur){
+ return pCur->cachedRowid;
+}
+
+/*
** Close a cursor. The read lock on the database file is released
** when the last cursor is closed.
*/
@@ -58658,24 +54062,19 @@ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){
BtShared *pBt = pCur->pBt;
sqlite3BtreeEnter(pBtree);
sqlite3BtreeClearCursor(pCur);
- assert( pBt->pCursor!=0 );
- if( pBt->pCursor==pCur ){
- pBt->pCursor = pCur->pNext;
+ if( pCur->pPrev ){
+ pCur->pPrev->pNext = pCur->pNext;
}else{
- BtCursor *pPrev = pBt->pCursor;
- do{
- if( pPrev->pNext==pCur ){
- pPrev->pNext = pCur->pNext;
- break;
- }
- pPrev = pPrev->pNext;
- }while( ALWAYS(pPrev) );
+ pBt->pCursor = pCur->pNext;
+ }
+ if( pCur->pNext ){
+ pCur->pNext->pPrev = pCur->pPrev;
}
for(i=0; i<=pCur->iPage; i++){
releasePage(pCur->apPage[i]);
}
unlockBtreeIfUnused(pBt);
- sqlite3_free(pCur->aOverflow);
+ invalidateOverflowCache(pCur);
/* sqlite3_free(pCur); */
sqlite3BtreeLeave(pBtree);
}
@@ -58689,6 +54088,13 @@ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){
**
** BtCursor.info is a cache of the information in the current cell.
** Using this cache reduces the number of calls to btreeParseCell().
+**
+** 2007-06-25: There is a bug in some versions of MSVC that cause the
+** compiler to crash when getCellInfo() is implemented as a macro.
+** But there is a measureable speed advantage to using the macro on gcc
+** (when less compiler optimizations like -Os or -O0 are used and the
+** compiler is not doing agressive inlining.) So we use a real function
+** for MSVC and a macro for everything else. Ticket #2457.
*/
#ifndef NDEBUG
static void assertCellInfo(BtCursor *pCur){
@@ -58696,20 +54102,33 @@ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){
int iPage = pCur->iPage;
memset(&info, 0, sizeof(info));
btreeParseCell(pCur->apPage[iPage], pCur->aiIdx[iPage], &info);
- assert( CORRUPT_DB || memcmp(&info, &pCur->info, sizeof(info))==0 );
+ assert( memcmp(&info, &pCur->info, sizeof(info))==0 );
}
#else
#define assertCellInfo(x)
#endif
-static SQLITE_NOINLINE void getCellInfo(BtCursor *pCur){
- if( pCur->info.nSize==0 ){
- int iPage = pCur->iPage;
- pCur->curFlags |= BTCF_ValidNKey;
- btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info);
- }else{
- assertCellInfo(pCur);
+#ifdef _MSC_VER
+ /* Use a real function in MSVC to work around bugs in that compiler. */
+ static void getCellInfo(BtCursor *pCur){
+ if( pCur->info.nSize==0 ){
+ int iPage = pCur->iPage;
+ btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info);
+ pCur->validNKey = 1;
+ }else{
+ assertCellInfo(pCur);
+ }
}
-}
+#else /* if not _MSC_VER */
+ /* Use a macro in all other compilers so that the function is inlined */
+#define getCellInfo(pCur) \
+ if( pCur->info.nSize==0 ){ \
+ int iPage = pCur->iPage; \
+ btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); \
+ pCur->validNKey = 1; \
+ }else{ \
+ assertCellInfo(pCur); \
+ }
+#endif /* _MSC_VER */
#ifndef NDEBUG /* The next routine used only within assert() statements */
/*
@@ -58736,9 +54155,13 @@ SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){
*/
SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){
assert( cursorHoldsMutex(pCur) );
- assert( pCur->eState==CURSOR_VALID );
- getCellInfo(pCur);
- *pSize = pCur->info.nKey;
+ assert( pCur->eState==CURSOR_INVALID || pCur->eState==CURSOR_VALID );
+ if( pCur->eState!=CURSOR_VALID ){
+ *pSize = 0;
+ }else{
+ getCellInfo(pCur);
+ *pSize = pCur->info.nKey;
+ }
return SQLITE_OK;
}
@@ -58757,11 +54180,8 @@ SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){
SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize){
assert( cursorHoldsMutex(pCur) );
assert( pCur->eState==CURSOR_VALID );
- assert( pCur->iPage>=0 );
- assert( pCur->iPage<BTCURSOR_MAX_DEPTH );
- assert( pCur->apPage[pCur->iPage]->intKeyLeaf==1 );
getCellInfo(pCur);
- *pSize = pCur->info.nPayload;
+ *pSize = pCur->info.nData;
return SQLITE_OK;
}
@@ -58875,12 +54295,10 @@ static int copyPayload(
/*
** This function is used to read or overwrite payload information
-** for the entry that the pCur cursor is pointing to. The eOp
-** argument is interpreted as follows:
-**
-** 0: The operation is a read. Populate the overflow cache.
-** 1: The operation is a write. Populate the overflow cache.
-** 2: The operation is a read. Do not populate the overflow cache.
+** for the entry that the pCur cursor is pointing to. If the eOp
+** parameter is 0, this is a read operation (data copied into
+** buffer pBuf). If it is non-zero, a write (data copied from
+** buffer pBuf).
**
** A total of "amt" bytes are read or written beginning at "offset".
** Data is read to or from the buffer pBuf.
@@ -58888,11 +54306,11 @@ static int copyPayload(
** The content being read or written might appear on the main page
** or be scattered out on multiple overflow pages.
**
-** If the current cursor entry uses one or more overflow pages and the
-** eOp argument is not 2, this function may allocate space for and lazily
-** populates the overflow page-list cache array (BtCursor.aOverflow).
-** Subsequent calls use this cache to make seeking to the supplied offset
-** more efficient.
+** If the BtCursor.isIncrblobHandle flag is set, and the current
+** cursor entry uses one or more overflow pages, this function
+** allocates space for and lazily popluates the overflow page-list
+** cache array (BtCursor.aOverflow). Subsequent calls use this
+** cache to make seeking to the supplied offset more efficient.
**
** Once an overflow page-list cache has been allocated, it may be
** invalidated if some other cursor writes to the same table, or if
@@ -58912,28 +54330,23 @@ static int accessPayload(
){
unsigned char *aPayload;
int rc = SQLITE_OK;
+ u32 nKey;
int iIdx = 0;
MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */
BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */
-#ifdef SQLITE_DIRECT_OVERFLOW_READ
- unsigned char * const pBufStart = pBuf;
- int bEnd; /* True if reading to end of data */
-#endif
assert( pPage );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->aiIdx[pCur->iPage]<pPage->nCell );
assert( cursorHoldsMutex(pCur) );
- assert( eOp!=2 || offset==0 ); /* Always start from beginning for eOp==2 */
getCellInfo(pCur);
- aPayload = pCur->info.pPayload;
-#ifdef SQLITE_DIRECT_OVERFLOW_READ
- bEnd = offset+amt==pCur->info.nPayload;
-#endif
- assert( offset+amt <= pCur->info.nPayload );
+ aPayload = pCur->info.pCell + pCur->info.nHeader;
+ nKey = (pPage->intKey ? 0 : (int)pCur->info.nKey);
- if( &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize] ){
+ if( NEVER(offset+amt > nKey+pCur->info.nData)
+ || &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize]
+ ){
/* Trying to read or write past the end of the data is an error */
return SQLITE_CORRUPT_BKPT;
}
@@ -58944,7 +54357,7 @@ static int accessPayload(
if( a+offset>pCur->info.nLocal ){
a = pCur->info.nLocal - offset;
}
- rc = copyPayload(&aPayload[offset], pBuf, a, (eOp & 0x01), pPage->pDbPage);
+ rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage);
offset = 0;
pBuf += a;
amt -= a;
@@ -58952,37 +54365,27 @@ static int accessPayload(
offset -= pCur->info.nLocal;
}
-
if( rc==SQLITE_OK && amt>0 ){
const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */
Pgno nextPage;
nextPage = get4byte(&aPayload[pCur->info.nLocal]);
- /* If the BtCursor.aOverflow[] has not been allocated, allocate it now.
- ** Except, do not allocate aOverflow[] for eOp==2.
- **
- ** The aOverflow[] array is sized at one entry for each overflow page
- ** in the overflow chain. The page number of the first overflow page is
- ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array
- ** means "not yet known" (the cache is lazily populated).
+#ifndef SQLITE_OMIT_INCRBLOB
+ /* If the isIncrblobHandle flag is set and the BtCursor.aOverflow[]
+ ** has not been allocated, allocate it now. The array is sized at
+ ** one entry for each overflow page in the overflow chain. The
+ ** page number of the first overflow page is stored in aOverflow[0],
+ ** etc. A value of 0 in the aOverflow[] array means "not yet known"
+ ** (the cache is lazily populated).
*/
- if( eOp!=2 && (pCur->curFlags & BTCF_ValidOvfl)==0 ){
+ if( pCur->isIncrblobHandle && !pCur->aOverflow ){
int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize;
- if( nOvfl>pCur->nOvflAlloc ){
- Pgno *aNew = (Pgno*)sqlite3Realloc(
- pCur->aOverflow, nOvfl*2*sizeof(Pgno)
- );
- if( aNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- pCur->nOvflAlloc = nOvfl*2;
- pCur->aOverflow = aNew;
- }
- }
- if( rc==SQLITE_OK ){
- memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno));
- pCur->curFlags |= BTCF_ValidOvfl;
+ pCur->aOverflow = (Pgno *)sqlite3MallocZero(sizeof(Pgno)*nOvfl);
+ /* nOvfl is always positive. If it were zero, fetchPayload would have
+ ** been used instead of this routine. */
+ if( ALWAYS(nOvfl) && !pCur->aOverflow ){
+ rc = SQLITE_NOMEM;
}
}
@@ -58990,21 +54393,22 @@ static int accessPayload(
** entry for the first required overflow page is valid, skip
** directly to it.
*/
- if( (pCur->curFlags & BTCF_ValidOvfl)!=0
- && pCur->aOverflow[offset/ovflSize]
- ){
+ if( pCur->aOverflow && pCur->aOverflow[offset/ovflSize] ){
iIdx = (offset/ovflSize);
nextPage = pCur->aOverflow[iIdx];
offset = (offset%ovflSize);
}
+#endif
for( ; rc==SQLITE_OK && amt>0 && nextPage; iIdx++){
+#ifndef SQLITE_OMIT_INCRBLOB
/* If required, populate the overflow page-list cache. */
- if( (pCur->curFlags & BTCF_ValidOvfl)!=0 ){
+ if( pCur->aOverflow ){
assert(!pCur->aOverflow[iIdx] || pCur->aOverflow[iIdx]==nextPage);
pCur->aOverflow[iIdx] = nextPage;
}
+#endif
if( offset>=ovflSize ){
/* The only reason to read this page is to obtain the page
@@ -59012,18 +54416,13 @@ static int accessPayload(
** data is not required. So first try to lookup the overflow
** page-list cache, if any, then fall back to the getOverflowPage()
** function.
- **
- ** Note that the aOverflow[] array must be allocated because eOp!=2
- ** here. If eOp==2, then offset==0 and this branch is never taken.
*/
- assert( eOp!=2 );
- assert( pCur->curFlags & BTCF_ValidOvfl );
- assert( pCur->pBtree->db==pBt->db );
- if( pCur->aOverflow[iIdx+1] ){
+#ifndef SQLITE_OMIT_INCRBLOB
+ if( pCur->aOverflow && pCur->aOverflow[iIdx+1] ){
nextPage = pCur->aOverflow[iIdx+1];
- }else{
+ } else
+#endif
rc = getOverflowPage(pBt, nextPage, 0, &nextPage);
- }
offset -= ovflSize;
}else{
/* Need to read this page properly. It contains some of the
@@ -59045,24 +54444,19 @@ static int accessPayload(
** 3) the database is file-backed, and
** 4) there is no open write-transaction, and
** 5) the database is not a WAL database,
- ** 6) all data from the page is being read.
- ** 7) at least 4 bytes have already been read into the output buffer
**
** then data can be read directly from the database file into the
** output buffer, bypassing the page-cache altogether. This speeds
** up loading large records that span many overflow pages.
*/
- if( (eOp&0x01)==0 /* (1) */
+ if( eOp==0 /* (1) */
&& offset==0 /* (2) */
- && (bEnd || a==ovflSize) /* (6) */
&& pBt->inTransaction==TRANS_READ /* (4) */
&& (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (3) */
&& pBt->pPage1->aData[19]==0x01 /* (5) */
- && &pBuf[-4]>=pBufStart /* (7) */
){
u8 aSave[4];
u8 *aWrite = &pBuf[-4];
- assert( aWrite>=pBufStart ); /* hence (7) */
memcpy(aSave, aWrite, 4);
rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
nextPage = get4byte(aWrite);
@@ -59073,12 +54467,12 @@ static int accessPayload(
{
DbPage *pDbPage;
rc = sqlite3PagerAcquire(pBt->pPager, nextPage, &pDbPage,
- ((eOp&0x01)==0 ? PAGER_GET_READONLY : 0)
+ (eOp==0 ? PAGER_GET_READONLY : 0)
);
if( rc==SQLITE_OK ){
aPayload = sqlite3PagerGetData(pDbPage);
nextPage = get4byte(aPayload);
- rc = copyPayload(&aPayload[offset+4], pBuf, a, (eOp&0x01), pDbPage);
+ rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage);
sqlite3PagerUnref(pDbPage);
offset = 0;
}
@@ -59097,7 +54491,7 @@ static int accessPayload(
/*
** Read part of the key associated with cursor pCur. Exactly
-** "amt" bytes will be transferred into pBuf[]. The transfer
+** "amt" bytes will be transfered into pBuf[]. The transfer
** begins at "offset".
**
** The caller must ensure that pCur is pointing to a valid row
@@ -59147,10 +54541,10 @@ SQLITE_PRIVATE int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *p
/*
** Return a pointer to payload information from the entry that the
** pCur cursor is pointing to. The pointer is to the beginning of
-** the key if index btrees (pPage->intKey==0) and is the data for
-** table btrees (pPage->intKey==1). The number of bytes of available
-** key/data is written into *pAmt. If *pAmt==0, then the value
-** returned will not be a valid pointer.
+** the key if skipKey==0 and it points to the beginning of data if
+** skipKey==1. The number of bytes of available key/data is written
+** into *pAmt. If *pAmt==0, then the value returned will not be
+** a valid pointer.
**
** This routine is an optimization. It is common for the entire key
** and data to fit on the local page and for there to be no overflow
@@ -59163,23 +54557,41 @@ SQLITE_PRIVATE int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *p
** page of the database. The data might change or move the next time
** any btree routine is called.
*/
-static const void *fetchPayload(
+static const unsigned char *fetchPayload(
BtCursor *pCur, /* Cursor pointing to entry to read from */
- u32 *pAmt /* Write the number of available bytes here */
+ u32 *pAmt, /* Write the number of available bytes here */
+ int skipKey /* read beginning at data if this is true */
){
- u32 amt;
+ unsigned char *aPayload;
+ MemPage *pPage;
+ u32 nKey;
+ u32 nLocal;
+
assert( pCur!=0 && pCur->iPage>=0 && pCur->apPage[pCur->iPage]);
assert( pCur->eState==CURSOR_VALID );
- assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
assert( cursorHoldsMutex(pCur) );
- assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
- assert( pCur->info.nSize>0 );
- assert( pCur->info.pPayload>pCur->apPage[pCur->iPage]->aData || CORRUPT_DB );
- assert( pCur->info.pPayload<pCur->apPage[pCur->iPage]->aDataEnd ||CORRUPT_DB);
- amt = (int)(pCur->apPage[pCur->iPage]->aDataEnd - pCur->info.pPayload);
- if( pCur->info.nLocal<amt ) amt = pCur->info.nLocal;
- *pAmt = amt;
- return (void*)pCur->info.pPayload;
+ pPage = pCur->apPage[pCur->iPage];
+ assert( pCur->aiIdx[pCur->iPage]<pPage->nCell );
+ if( pCur->info.nSize==0 ){
+ btreeParseCell(pCur->apPage[pCur->iPage], pCur->aiIdx[pCur->iPage],
+ &pCur->info);
+ }
+ aPayload = pCur->info.pCell;
+ aPayload += pCur->info.nHeader;
+ if( pPage->intKey ){
+ nKey = 0;
+ }else{
+ nKey = (int)pCur->info.nKey;
+ }
+ if( skipKey ){
+ aPayload += nKey;
+ nLocal = pCur->info.nLocal - nKey;
+ }else{
+ nLocal = pCur->info.nLocal;
+ assert( nLocal<=nKey );
+ }
+ *pAmt = nLocal;
+ return aPayload;
}
@@ -59198,10 +54610,22 @@ static const void *fetchPayload(
** in the common case where no overflow pages are used.
*/
SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor *pCur, u32 *pAmt){
- return fetchPayload(pCur, pAmt);
+ const void *p = 0;
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ assert( cursorHoldsMutex(pCur) );
+ if( ALWAYS(pCur->eState==CURSOR_VALID) ){
+ p = (const void*)fetchPayload(pCur, pAmt, 0);
+ }
+ return p;
}
SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor *pCur, u32 *pAmt){
- return fetchPayload(pCur, pAmt);
+ const void *p = 0;
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ assert( cursorHoldsMutex(pCur) );
+ if( ALWAYS(pCur->eState==CURSOR_VALID) ){
+ p = (const void*)fetchPayload(pCur, pAmt, 1);
+ }
+ return p;
}
@@ -59215,6 +54639,9 @@ SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor *pCur, u32 *pAmt){
** vice-versa).
*/
static int moveToChild(BtCursor *pCur, u32 newPgno){
+ int rc;
+ int i = pCur->iPage;
+ MemPage *pNewPage;
BtShared *pBt = pCur->pBt;
assert( cursorHoldsMutex(pCur) );
@@ -59224,15 +54651,22 @@ static int moveToChild(BtCursor *pCur, u32 newPgno){
if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){
return SQLITE_CORRUPT_BKPT;
}
- pCur->info.nSize = 0;
- pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ rc = getAndInitPage(pBt, newPgno, &pNewPage,
+ pCur->wrFlag==0 ? PAGER_GET_READONLY : 0);
+ if( rc ) return rc;
+ pCur->apPage[i+1] = pNewPage;
+ pCur->aiIdx[i+1] = 0;
pCur->iPage++;
- pCur->aiIdx[pCur->iPage] = 0;
- return getAndInitPage(pBt, newPgno, &pCur->apPage[pCur->iPage],
- pCur, pCur->curPagerFlags);
+
+ pCur->info.nSize = 0;
+ pCur->validNKey = 0;
+ if( pNewPage->nCell<1 || pNewPage->intKey!=pCur->apPage[i]->intKey ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ return SQLITE_OK;
}
-#if SQLITE_DEBUG
+#if 0
/*
** Page pParent is an internal (non-leaf) tree page. This function
** asserts that page number iChild is the left-child if the iIdx'th
@@ -59241,8 +54675,6 @@ static int moveToChild(BtCursor *pCur, u32 newPgno){
** the page.
*/
static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){
- if( CORRUPT_DB ) return; /* The conditions tested below might not be true
- ** in a corrupt database */
assert( iIdx<=pParent->nCell );
if( iIdx==pParent->nCell ){
assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild );
@@ -59267,15 +54699,25 @@ static void moveToParent(BtCursor *pCur){
assert( pCur->eState==CURSOR_VALID );
assert( pCur->iPage>0 );
assert( pCur->apPage[pCur->iPage] );
+
+ /* UPDATE: It is actually possible for the condition tested by the assert
+ ** below to be untrue if the database file is corrupt. This can occur if
+ ** one cursor has modified page pParent while a reference to it is held
+ ** by a second cursor. Which can only happen if a single page is linked
+ ** into more than one b-tree structure in a corrupt database. */
+#if 0
assertParentIndex(
pCur->apPage[pCur->iPage-1],
pCur->aiIdx[pCur->iPage-1],
pCur->apPage[pCur->iPage]->pgno
);
+#endif
testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell );
+
+ releasePage(pCur->apPage[pCur->iPage]);
+ pCur->iPage--;
pCur->info.nSize = 0;
- pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
- releasePageNotNull(pCur->apPage[pCur->iPage--]);
+ pCur->validNKey = 0;
}
/*
@@ -59302,6 +54744,8 @@ static void moveToParent(BtCursor *pCur){
static int moveToRoot(BtCursor *pCur){
MemPage *pRoot;
int rc = SQLITE_OK;
+ Btree *p = pCur->pBtree;
+ BtShared *pBt = p->pBt;
assert( cursorHoldsMutex(pCur) );
assert( CURSOR_INVALID < CURSOR_REQUIRESEEK );
@@ -59316,56 +54760,56 @@ static int moveToRoot(BtCursor *pCur){
}
if( pCur->iPage>=0 ){
- while( pCur->iPage ){
- assert( pCur->apPage[pCur->iPage]!=0 );
- releasePageNotNull(pCur->apPage[pCur->iPage--]);
+ int i;
+ for(i=1; i<=pCur->iPage; i++){
+ releasePage(pCur->apPage[i]);
}
+ pCur->iPage = 0;
}else if( pCur->pgnoRoot==0 ){
pCur->eState = CURSOR_INVALID;
return SQLITE_OK;
}else{
- assert( pCur->iPage==(-1) );
- rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->apPage[0],
- 0, pCur->curPagerFlags);
+ rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->apPage[0],
+ pCur->wrFlag==0 ? PAGER_GET_READONLY : 0);
if( rc!=SQLITE_OK ){
pCur->eState = CURSOR_INVALID;
return rc;
}
pCur->iPage = 0;
- pCur->curIntKey = pCur->apPage[0]->intKey;
+
+ /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor
+ ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is
+ ** NULL, the caller expects a table b-tree. If this is not the case,
+ ** return an SQLITE_CORRUPT error. */
+ assert( pCur->apPage[0]->intKey==1 || pCur->apPage[0]->intKey==0 );
+ if( (pCur->pKeyInfo==0)!=pCur->apPage[0]->intKey ){
+ return SQLITE_CORRUPT_BKPT;
+ }
}
+
+ /* Assert that the root page is of the correct type. This must be the
+ ** case as the call to this function that loaded the root-page (either
+ ** this call or a previous invocation) would have detected corruption
+ ** if the assumption were not true, and it is not possible for the flags
+ ** byte to have been modified while this cursor is holding a reference
+ ** to the page. */
pRoot = pCur->apPage[0];
assert( pRoot->pgno==pCur->pgnoRoot );
-
- /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor
- ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is
- ** NULL, the caller expects a table b-tree. If this is not the case,
- ** return an SQLITE_CORRUPT error.
- **
- ** Earlier versions of SQLite assumed that this test could not fail
- ** if the root page was already loaded when this function was called (i.e.
- ** if pCur->iPage>=0). But this is not so if the database is corrupted
- ** in such a way that page pRoot is linked into a second b-tree table
- ** (or the freelist). */
- assert( pRoot->intKey==1 || pRoot->intKey==0 );
- if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){
- return SQLITE_CORRUPT_BKPT;
- }
+ assert( pRoot->isInit && (pCur->pKeyInfo==0)==pRoot->intKey );
pCur->aiIdx[0] = 0;
pCur->info.nSize = 0;
- pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl);
+ pCur->atLast = 0;
+ pCur->validNKey = 0;
- if( pRoot->nCell>0 ){
- pCur->eState = CURSOR_VALID;
- }else if( !pRoot->leaf ){
+ if( pRoot->nCell==0 && !pRoot->leaf ){
Pgno subpage;
if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT;
subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]);
pCur->eState = CURSOR_VALID;
rc = moveToChild(pCur, subpage);
}else{
- pCur->eState = CURSOR_INVALID;
+ pCur->eState = ((pRoot->nCell>0)?CURSOR_VALID:CURSOR_INVALID);
}
return rc;
}
@@ -59409,16 +54853,17 @@ static int moveToRightmost(BtCursor *pCur){
assert( cursorHoldsMutex(pCur) );
assert( pCur->eState==CURSOR_VALID );
- while( !(pPage = pCur->apPage[pCur->iPage])->leaf ){
+ while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){
pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]);
pCur->aiIdx[pCur->iPage] = pPage->nCell;
rc = moveToChild(pCur, pgno);
- if( rc ) return rc;
}
- pCur->aiIdx[pCur->iPage] = pPage->nCell-1;
- assert( pCur->info.nSize==0 );
- assert( (pCur->curFlags & BTCF_ValidNKey)==0 );
- return SQLITE_OK;
+ if( rc==SQLITE_OK ){
+ pCur->aiIdx[pCur->iPage] = pPage->nCell-1;
+ pCur->info.nSize = 0;
+ pCur->validNKey = 0;
+ }
+ return rc;
}
/* Move the cursor to the first entry in the table. Return SQLITE_OK
@@ -59455,7 +54900,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){
assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
/* If the cursor already points to the last entry, this is a no-op. */
- if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){
+ if( CURSOR_VALID==pCur->eState && pCur->atLast ){
#ifdef SQLITE_DEBUG
/* This block serves to assert() that the cursor really does point
** to the last entry in the b-tree. */
@@ -59478,12 +54923,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){
assert( pCur->eState==CURSOR_VALID );
*pRes = 0;
rc = moveToRightmost(pCur);
- if( rc==SQLITE_OK ){
- pCur->curFlags |= BTCF_AtLast;
- }else{
- pCur->curFlags &= ~BTCF_AtLast;
- }
-
+ pCur->atLast = rc==SQLITE_OK ?1:0;
}
}
return rc;
@@ -59525,7 +54965,6 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
int *pRes /* Write search results here */
){
int rc;
- RecordCompare xRecordCompare;
assert( cursorHoldsMutex(pCur) );
assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
@@ -59534,30 +54973,19 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
/* If the cursor is already positioned at the point we are trying
** to move to, then just return without doing any work */
- if( pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0
- && pCur->curIntKey
+ if( pCur->eState==CURSOR_VALID && pCur->validNKey
+ && pCur->apPage[0]->intKey
){
if( pCur->info.nKey==intKey ){
*pRes = 0;
return SQLITE_OK;
}
- if( (pCur->curFlags & BTCF_AtLast)!=0 && pCur->info.nKey<intKey ){
+ if( pCur->atLast && pCur->info.nKey<intKey ){
*pRes = -1;
return SQLITE_OK;
}
}
- if( pIdxKey ){
- xRecordCompare = sqlite3VdbeFindCompare(pIdxKey);
- pIdxKey->errCode = 0;
- assert( pIdxKey->default_rc==1
- || pIdxKey->default_rc==0
- || pIdxKey->default_rc==-1
- );
- }else{
- xRecordCompare = 0; /* All keys are integers */
- }
-
rc = moveToRoot(pCur);
if( rc ){
return rc;
@@ -59570,8 +54998,7 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 );
return SQLITE_OK;
}
- assert( pCur->apPage[0]->intKey==pCur->curIntKey );
- assert( pCur->curIntKey || pIdxKey );
+ assert( pCur->apPage[0]->intKey || pIdxKey );
for(;;){
int lwr, upr, idx, c;
Pgno chldPg;
@@ -59591,11 +55018,11 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
assert( biasRight==0 || biasRight==1 );
idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */
pCur->aiIdx[pCur->iPage] = (u16)idx;
- if( xRecordCompare==0 ){
+ if( pPage->intKey ){
for(;;){
i64 nCellKey;
- pCell = findCellPastPtr(pPage, idx);
- if( pPage->intKeyLeaf ){
+ pCell = findCell(pPage, idx) + pPage->childPtrSize;
+ if( pPage->hasData ){
while( 0x80 <= *(pCell++) ){
if( pCell>=pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT;
}
@@ -59609,7 +55036,7 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
if( lwr>upr ){ c = +1; break; }
}else{
assert( nCellKey==intKey );
- pCur->curFlags |= BTCF_ValidNKey;
+ pCur->validNKey = 1;
pCur->info.nKey = nCellKey;
pCur->aiIdx[pCur->iPage] = (u16)idx;
if( !pPage->leaf ){
@@ -59626,8 +55053,8 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
}
}else{
for(;;){
- int nCell; /* Size of the pCell cell in bytes */
- pCell = findCellPastPtr(pPage, idx);
+ int nCell;
+ pCell = findCell(pPage, idx) + pPage->childPtrSize;
/* The maximum supported page-size is 65536 bytes. This means that
** the maximum number of record bytes stored on an index B-Tree
@@ -59638,59 +55065,45 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
** 2 bytes of the cell.
*/
nCell = pCell[0];
- if( nCell<=pPage->max1bytePayload ){
+ if( nCell<=pPage->max1bytePayload
+ /* && (pCell+nCell)<pPage->aDataEnd */
+ ){
/* This branch runs if the record-size field of the cell is a
** single byte varint and the record fits entirely on the main
** b-tree page. */
testcase( pCell+nCell+1==pPage->aDataEnd );
- c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey);
+ c = sqlite3VdbeRecordCompare(nCell, (void*)&pCell[1], pIdxKey);
}else if( !(pCell[1] & 0x80)
&& (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal
+ /* && (pCell+nCell+2)<=pPage->aDataEnd */
){
/* The record-size field is a 2 byte varint and the record
** fits entirely on the main b-tree page. */
testcase( pCell+nCell+2==pPage->aDataEnd );
- c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey);
+ c = sqlite3VdbeRecordCompare(nCell, (void*)&pCell[2], pIdxKey);
}else{
/* The record flows over onto one or more overflow pages. In
** this case the whole cell needs to be parsed, a buffer allocated
** and accessPayload() used to retrieve the record into the
- ** buffer before VdbeRecordCompare() can be called.
- **
- ** If the record is corrupt, the xRecordCompare routine may read
- ** up to two varints past the end of the buffer. An extra 18
- ** bytes of padding is allocated at the end of the buffer in
- ** case this happens. */
+ ** buffer before VdbeRecordCompare() can be called. */
void *pCellKey;
u8 * const pCellBody = pCell - pPage->childPtrSize;
- pPage->xParseCell(pPage, pCellBody, &pCur->info);
+ btreeParseCellPtr(pPage, pCellBody, &pCur->info);
nCell = (int)pCur->info.nKey;
- testcase( nCell<0 ); /* True if key size is 2^32 or more */
- testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */
- testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */
- testcase( nCell==2 ); /* Minimum legal index key size */
- if( nCell<2 ){
- rc = SQLITE_CORRUPT_BKPT;
- goto moveto_finish;
- }
- pCellKey = sqlite3Malloc( nCell+18 );
+ pCellKey = sqlite3Malloc( nCell );
if( pCellKey==0 ){
rc = SQLITE_NOMEM;
goto moveto_finish;
}
pCur->aiIdx[pCur->iPage] = (u16)idx;
- rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 2);
+ rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0);
if( rc ){
sqlite3_free(pCellKey);
goto moveto_finish;
}
- c = xRecordCompare(nCell, pCellKey, pIdxKey);
+ c = sqlite3VdbeRecordCompare(nCell, pCellKey, pIdxKey);
sqlite3_free(pCellKey);
}
- assert(
- (pIdxKey->errCode!=SQLITE_CORRUPT || c==0)
- && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed)
- );
if( c<0 ){
lwr = idx+1;
}else if( c>0 ){
@@ -59700,7 +55113,6 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
*pRes = 0;
rc = SQLITE_OK;
pCur->aiIdx[pCur->iPage] = (u16)idx;
- if( pIdxKey->errCode ) rc = SQLITE_CORRUPT;
goto moveto_finish;
}
if( lwr>upr ) break;
@@ -59729,7 +55141,7 @@ moveto_next_layer:
}
moveto_finish:
pCur->info.nSize = 0;
- pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ pCur->validNKey = 0;
return rc;
}
@@ -59754,34 +55166,19 @@ SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor *pCur){
** successful then set *pRes=0. If the cursor
** was already pointing to the last entry in the database before
** this routine was called, then set *pRes=1.
-**
-** The main entry point is sqlite3BtreeNext(). That routine is optimized
-** for the common case of merely incrementing the cell counter BtCursor.aiIdx
-** to the next cell on the current page. The (slower) btreeNext() helper
-** routine is called when it is necessary to move to a different page or
-** to restore the cursor.
-**
-** The calling function will set *pRes to 0 or 1. The initial *pRes value
-** will be 1 if the cursor being stepped corresponds to an SQL index and
-** if this routine could have been skipped if that SQL index had been
-** a unique index. Otherwise the caller will have set *pRes to zero.
-** Zero is the common case. The btree implementation is free to use the
-** initial *pRes value as a hint to improve performance, but the current
-** SQLite btree implementation does not. (Note that the comdb2 btree
-** implementation does use this hint, however.)
-*/
-static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){
+*/
+SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int *pRes){
int rc;
int idx;
MemPage *pPage;
assert( cursorHoldsMutex(pCur) );
+ assert( pRes!=0 );
assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
- assert( *pRes==0 );
if( pCur->eState!=CURSOR_VALID ){
- assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
rc = restoreCursorPosition(pCur);
if( rc!=SQLITE_OK ){
+ *pRes = 0;
return rc;
}
if( CURSOR_INVALID==pCur->eState ){
@@ -59793,6 +55190,7 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){
pCur->eState = CURSOR_VALID;
if( pCur->skipNext>0 ){
pCur->skipNext = 0;
+ *pRes = 0;
return SQLITE_OK;
}
pCur->skipNext = 0;
@@ -59810,11 +55208,18 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){
** page into more than one b-tree structure. */
testcase( idx>pPage->nCell );
+ pCur->info.nSize = 0;
+ pCur->validNKey = 0;
if( idx>=pPage->nCell ){
if( !pPage->leaf ){
rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
- if( rc ) return rc;
- return moveToLeftmost(pCur);
+ if( rc ){
+ *pRes = 0;
+ return rc;
+ }
+ rc = moveToLeftmost(pCur);
+ *pRes = 0;
+ return rc;
}
do{
if( pCur->iPage==0 ){
@@ -59825,75 +55230,44 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){
moveToParent(pCur);
pPage = pCur->apPage[pCur->iPage];
}while( pCur->aiIdx[pCur->iPage]>=pPage->nCell );
+ *pRes = 0;
if( pPage->intKey ){
- return sqlite3BtreeNext(pCur, pRes);
+ rc = sqlite3BtreeNext(pCur, pRes);
}else{
- return SQLITE_OK;
+ rc = SQLITE_OK;
}
+ return rc;
}
- if( pPage->leaf ){
- return SQLITE_OK;
- }else{
- return moveToLeftmost(pCur);
- }
-}
-SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int *pRes){
- MemPage *pPage;
- assert( cursorHoldsMutex(pCur) );
- assert( pRes!=0 );
- assert( *pRes==0 || *pRes==1 );
- assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
- pCur->info.nSize = 0;
- pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
*pRes = 0;
- if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur, pRes);
- pPage = pCur->apPage[pCur->iPage];
- if( (++pCur->aiIdx[pCur->iPage])>=pPage->nCell ){
- pCur->aiIdx[pCur->iPage]--;
- return btreeNext(pCur, pRes);
- }
if( pPage->leaf ){
return SQLITE_OK;
- }else{
- return moveToLeftmost(pCur);
}
+ rc = moveToLeftmost(pCur);
+ return rc;
}
+
/*
** Step the cursor to the back to the previous entry in the database. If
** successful then set *pRes=0. If the cursor
** was already pointing to the first entry in the database before
** this routine was called, then set *pRes=1.
-**
-** The main entry point is sqlite3BtreePrevious(). That routine is optimized
-** for the common case of merely decrementing the cell counter BtCursor.aiIdx
-** to the previous cell on the current page. The (slower) btreePrevious()
-** helper routine is called when it is necessary to move to a different page
-** or to restore the cursor.
-**
-** The calling function will set *pRes to 0 or 1. The initial *pRes value
-** will be 1 if the cursor being stepped corresponds to an SQL index and
-** if this routine could have been skipped if that SQL index had been
-** a unique index. Otherwise the caller will have set *pRes to zero.
-** Zero is the common case. The btree implementation is free to use the
-** initial *pRes value as a hint to improve performance, but the current
-** SQLite btree implementation does not. (Note that the comdb2 btree
-** implementation does use this hint, however.)
-*/
-static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
+*/
+SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){
int rc;
MemPage *pPage;
assert( cursorHoldsMutex(pCur) );
assert( pRes!=0 );
- assert( *pRes==0 );
assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
- assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 );
- assert( pCur->info.nSize==0 );
+ pCur->atLast = 0;
if( pCur->eState!=CURSOR_VALID ){
- rc = restoreCursorPosition(pCur);
- if( rc!=SQLITE_OK ){
- return rc;
+ if( ALWAYS(pCur->eState>=CURSOR_REQUIRESEEK) ){
+ rc = btreeRestoreCursorPosition(pCur);
+ if( rc!=SQLITE_OK ){
+ *pRes = 0;
+ return rc;
+ }
}
if( CURSOR_INVALID==pCur->eState ){
*pRes = 1;
@@ -59904,6 +55278,7 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
pCur->eState = CURSOR_VALID;
if( pCur->skipNext<0 ){
pCur->skipNext = 0;
+ *pRes = 0;
return SQLITE_OK;
}
pCur->skipNext = 0;
@@ -59915,7 +55290,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
if( !pPage->leaf ){
int idx = pCur->aiIdx[pCur->iPage];
rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
- if( rc ) return rc;
+ if( rc ){
+ *pRes = 0;
+ return rc;
+ }
rc = moveToRightmost(pCur);
}else{
while( pCur->aiIdx[pCur->iPage]==0 ){
@@ -59926,8 +55304,8 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
}
moveToParent(pCur);
}
- assert( pCur->info.nSize==0 );
- assert( (pCur->curFlags & (BTCF_ValidNKey|BTCF_ValidOvfl))==0 );
+ pCur->info.nSize = 0;
+ pCur->validNKey = 0;
pCur->aiIdx[pCur->iPage]--;
pPage = pCur->apPage[pCur->iPage];
@@ -59937,24 +55315,8 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
rc = SQLITE_OK;
}
}
- return rc;
-}
-SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){
- assert( cursorHoldsMutex(pCur) );
- assert( pRes!=0 );
- assert( *pRes==0 || *pRes==1 );
- assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID );
*pRes = 0;
- pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey);
- pCur->info.nSize = 0;
- if( pCur->eState!=CURSOR_VALID
- || pCur->aiIdx[pCur->iPage]==0
- || pCur->apPage[pCur->iPage]->leaf==0
- ){
- return btreePrevious(pCur, pRes);
- }
- pCur->aiIdx[pCur->iPage]--;
- return SQLITE_OK;
+ return rc;
}
/*
@@ -59966,7 +55328,8 @@ SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){
** sqlite3PagerUnref() on the new page when it is done.
**
** SQLITE_OK is returned on success. Any other return value indicates
-** an error. *ppPage is set to NULL in the event of an error.
+** an error. *ppPage and *pPgno are undefined in the event of an error.
+** Do not invoke sqlite3PagerUnref() on *ppPage if an error is returned.
**
** If the "nearby" parameter is not 0, then an effort is made to
** locate a page close to the page number "nearby". This can be used in an
@@ -59998,8 +55361,6 @@ static int allocateBtreePage(
assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) );
pPage1 = pBt->pPage1;
mxPage = btreePagecount(pBt);
- /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36
- ** stores stores the total number of pages on the freelist. */
n = get4byte(&pPage1->aData[36]);
testcase( n==mxPage-1 );
if( n>=mxPage ){
@@ -60009,7 +55370,6 @@ static int allocateBtreePage(
/* There are pages on the freelist. Reuse one of those pages. */
Pgno iTrunk;
u8 searchList = 0; /* If the free-list must be searched for 'nearby' */
- u32 nSearch = 0; /* Count of the number of search attempts */
/* If eMode==BTALLOC_EXACT and a query of the pointer-map
** shows that the page 'nearby' is somewhere on the free-list, then
@@ -60047,21 +55407,15 @@ static int allocateBtreePage(
do {
pPrevTrunk = pTrunk;
if( pPrevTrunk ){
- /* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page
- ** is the page number of the next freelist trunk page in the list or
- ** zero if this is the last freelist trunk page. */
iTrunk = get4byte(&pPrevTrunk->aData[0]);
}else{
- /* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32
- ** stores the page number of the first page of the freelist, or zero if
- ** the freelist is empty. */
iTrunk = get4byte(&pPage1->aData[32]);
}
testcase( iTrunk==mxPage );
- if( iTrunk>mxPage || nSearch++ > n ){
+ if( iTrunk>mxPage ){
rc = SQLITE_CORRUPT_BKPT;
}else{
- rc = btreeGetUnusedPage(pBt, iTrunk, &pTrunk, 0);
+ rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0);
}
if( rc ){
pTrunk = 0;
@@ -60069,9 +55423,8 @@ static int allocateBtreePage(
}
assert( pTrunk!=0 );
assert( pTrunk->aData!=0 );
- /* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page
- ** is the number of leaf page pointers to follow. */
- k = get4byte(&pTrunk->aData[4]);
+
+ k = get4byte(&pTrunk->aData[4]); /* # of leaves on this trunk page */
if( k==0 && !searchList ){
/* The trunk has no leaves and the list is not being searched.
** So extract the trunk page itself and use it as the newly
@@ -60126,7 +55479,7 @@ static int allocateBtreePage(
goto end_allocate_page;
}
testcase( iNewTrunk==mxPage );
- rc = btreeGetUnusedPage(pBt, iNewTrunk, &pNewTrunk, 0);
+ rc = btreeGetPage(pBt, iNewTrunk, &pNewTrunk, 0);
if( rc!=SQLITE_OK ){
goto end_allocate_page;
}
@@ -60205,13 +55558,12 @@ static int allocateBtreePage(
memcpy(&aData[8+closest*4], &aData[4+k*4], 4);
}
put4byte(&aData[4], k-1);
- noContent = !btreeGetHasContent(pBt, *pPgno)? PAGER_GET_NOCONTENT : 0;
- rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, noContent);
+ noContent = !btreeGetHasContent(pBt, *pPgno) ? PAGER_GET_NOCONTENT : 0;
+ rc = btreeGetPage(pBt, *pPgno, ppPage, noContent);
if( rc==SQLITE_OK ){
rc = sqlite3PagerWrite((*ppPage)->pDbPage);
if( rc!=SQLITE_OK ){
releasePage(*ppPage);
- *ppPage = 0;
}
}
searchList = 0;
@@ -60239,7 +55591,7 @@ static int allocateBtreePage(
** here are confined to those pages that lie between the end of the
** database image and the end of the database file.
*/
- int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate))? PAGER_GET_NOCONTENT:0;
+ int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate)) ? PAGER_GET_NOCONTENT : 0;
rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
if( rc ) return rc;
@@ -60255,7 +55607,7 @@ static int allocateBtreePage(
MemPage *pPg = 0;
TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage));
assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) );
- rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent);
+ rc = btreeGetPage(pBt, pBt->nPage, &pPg, bNoContent);
if( rc==SQLITE_OK ){
rc = sqlite3PagerWrite(pPg->pDbPage);
releasePage(pPg);
@@ -60269,12 +55621,11 @@ static int allocateBtreePage(
*pPgno = pBt->nPage;
assert( *pPgno!=PENDING_BYTE_PAGE(pBt) );
- rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, bNoContent);
+ rc = btreeGetPage(pBt, *pPgno, ppPage, bNoContent);
if( rc ) return rc;
rc = sqlite3PagerWrite((*ppPage)->pDbPage);
if( rc!=SQLITE_OK ){
releasePage(*ppPage);
- *ppPage = 0;
}
TRACE(("ALLOCATE: %d from end of file\n", *pPgno));
}
@@ -60284,8 +55635,16 @@ static int allocateBtreePage(
end_allocate_page:
releasePage(pTrunk);
releasePage(pPrevTrunk);
- assert( rc!=SQLITE_OK || sqlite3PagerPageRefcount((*ppPage)->pDbPage)<=1 );
- assert( rc!=SQLITE_OK || (*ppPage)->isInit==0 );
+ if( rc==SQLITE_OK ){
+ if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){
+ releasePage(*ppPage);
+ return SQLITE_CORRUPT_BKPT;
+ }
+ (*ppPage)->isInit = 0;
+ }else{
+ *ppPage = 0;
+ }
+ assert( rc!=SQLITE_OK || sqlite3PagerIswriteable((*ppPage)->pDbPage) );
return rc;
}
@@ -60310,10 +55669,9 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
int nFree; /* Initial number of pages on free-list */
assert( sqlite3_mutex_held(pBt->mutex) );
- assert( CORRUPT_DB || iPage>1 );
+ assert( iPage>1 );
assert( !pMemPage || pMemPage->pgno==iPage );
- if( iPage<2 ) return SQLITE_CORRUPT_BKPT;
if( pMemPage ){
pPage = pMemPage;
sqlite3PagerRef(pPage->pDbPage);
@@ -60383,11 +55741,6 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
** for now. At some point in the future (once everyone has upgraded
** to 3.6.0 or later) we should consider fixing the conditional above
** to read "usableSize/4-2" instead of "usableSize/4-8".
- **
- ** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still
- ** avoid using the last six entries in the freelist trunk page array in
- ** order that database files created by newer versions of SQLite can be
- ** read by older versions of SQLite.
*/
rc = sqlite3PagerWrite(pTrunk->pDbPage);
if( rc==SQLITE_OK ){
@@ -60436,15 +55789,9 @@ static void freePage(MemPage *pPage, int *pRC){
}
/*
-** Free any overflow pages associated with the given Cell. Write the
-** local Cell size (the number of bytes on the original page, omitting
-** overflow) into *pnSize.
+** Free any overflow pages associated with the given Cell.
*/
-static int clearCell(
- MemPage *pPage, /* The page that contains the Cell */
- unsigned char *pCell, /* First byte of the Cell */
- u16 *pnSize /* Write the size of the Cell here */
-){
+static int clearCell(MemPage *pPage, unsigned char *pCell){
BtShared *pBt = pPage->pBt;
CellInfo info;
Pgno ovflPgno;
@@ -60453,8 +55800,7 @@ static int clearCell(
u32 ovflPageSize;
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- pPage->xParseCell(pPage, pCell, &info);
- *pnSize = info.nSize;
+ btreeParseCellPtr(pPage, pCell, &info);
if( info.iOverflow==0 ){
return SQLITE_OK; /* No overflow pages. Return without doing anything */
}
@@ -60465,9 +55811,7 @@ static int clearCell(
assert( pBt->usableSize > 4 );
ovflPageSize = pBt->usableSize - 4;
nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize;
- assert( nOvfl>0 ||
- (CORRUPT_DB && (info.nPayload + ovflPageSize)<ovflPageSize)
- );
+ assert( ovflPgno==0 || nOvfl>0 );
while( nOvfl-- ){
Pgno iNext = 0;
MemPage *pOvfl = 0;
@@ -60540,6 +55884,7 @@ static int fillInCell(
BtShared *pBt = pPage->pBt;
Pgno pgnoOvfl = 0;
int nHeader;
+ CellInfo info;
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
@@ -60549,72 +55894,40 @@ static int fillInCell(
|| sqlite3PagerIswriteable(pPage->pDbPage) );
/* Fill in the header. */
- nHeader = pPage->childPtrSize;
- nPayload = nData + nZero;
- if( pPage->intKeyLeaf ){
- nHeader += putVarint32(&pCell[nHeader], nPayload);
+ nHeader = 0;
+ if( !pPage->leaf ){
+ nHeader += 4;
+ }
+ if( pPage->hasData ){
+ nHeader += putVarint(&pCell[nHeader], nData+nZero);
}else{
- assert( nData==0 );
- assert( nZero==0 );
+ nData = nZero = 0;
}
nHeader += putVarint(&pCell[nHeader], *(u64*)&nKey);
+ btreeParseCellPtr(pPage, pCell, &info);
+ assert( info.nHeader==nHeader );
+ assert( info.nKey==nKey );
+ assert( info.nData==(u32)(nData+nZero) );
- /* Fill in the payload size */
+ /* Fill in the payload */
+ nPayload = nData + nZero;
if( pPage->intKey ){
pSrc = pData;
nSrc = nData;
nData = 0;
}else{
- assert( nKey<=0x7fffffff && pKey!=0 );
- nPayload = (int)nKey;
+ if( NEVER(nKey>0x7fffffff || pKey==0) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ nPayload += (int)nKey;
pSrc = pKey;
nSrc = (int)nKey;
}
- if( nPayload<=pPage->maxLocal ){
- n = nHeader + nPayload;
- testcase( n==3 );
- testcase( n==4 );
- if( n<4 ) n = 4;
- *pnSize = n;
- spaceLeft = nPayload;
- pPrior = pCell;
- }else{
- int mn = pPage->minLocal;
- n = mn + (nPayload - mn) % (pPage->pBt->usableSize - 4);
- testcase( n==pPage->maxLocal );
- testcase( n==pPage->maxLocal+1 );
- if( n > pPage->maxLocal ) n = mn;
- spaceLeft = n;
- *pnSize = n + nHeader + 4;
- pPrior = &pCell[nHeader+n];
- }
+ *pnSize = info.nSize;
+ spaceLeft = info.nLocal;
pPayload = &pCell[nHeader];
+ pPrior = &pCell[info.iOverflow];
- /* At this point variables should be set as follows:
- **
- ** nPayload Total payload size in bytes
- ** pPayload Begin writing payload here
- ** spaceLeft Space available at pPayload. If nPayload>spaceLeft,
- ** that means content must spill into overflow pages.
- ** *pnSize Size of the local cell (not counting overflow pages)
- ** pPrior Where to write the pgno of the first overflow page
- **
- ** Use a call to btreeParseCellPtr() to verify that the values above
- ** were computed correctly.
- */
-#if SQLITE_DEBUG
- {
- CellInfo info;
- pPage->xParseCell(pPage, pCell, &info);
- assert( nHeader=(int)(info.pPayload - pCell) );
- assert( info.nKey==nKey );
- assert( *pnSize == info.nSize );
- assert( spaceLeft == info.nLocal );
- assert( pPrior == &pCell[info.iOverflow] );
- }
-#endif
-
- /* Write the payload into the local Cell and any extra into overflow pages */
while( nPayload>0 ){
if( spaceLeft==0 ){
#ifndef SQLITE_OMIT_AUTOVACUUM
@@ -60714,13 +56027,14 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
u32 pc; /* Offset to cell content of cell being deleted */
u8 *data; /* pPage->aData */
u8 *ptr; /* Used to move bytes around within data[] */
+ u8 *endPtr; /* End of loop */
int rc; /* The return code */
int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */
if( *pRC ) return;
assert( idx>=0 && idx<pPage->nCell );
- assert( CORRUPT_DB || sz==cellSize(pPage, idx) );
+ assert( sz==cellSize(pPage, idx) );
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
data = pPage->aData;
@@ -60738,18 +56052,15 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
*pRC = rc;
return;
}
- pPage->nCell--;
- if( pPage->nCell==0 ){
- memset(&data[hdr+1], 0, 4);
- data[hdr+7] = 0;
- put2byte(&data[hdr+5], pPage->pBt->usableSize);
- pPage->nFree = pPage->pBt->usableSize - pPage->hdrOffset
- - pPage->childPtrSize - 8;
- }else{
- memmove(ptr, ptr+2, 2*(pPage->nCell - idx));
- put2byte(&data[hdr+3], pPage->nCell);
- pPage->nFree += 2;
+ endPtr = &pPage->aCellIdx[2*pPage->nCell - 2];
+ assert( (SQLITE_PTR_TO_INT(ptr)&1)==0 ); /* ptr is always 2-byte aligned */
+ while( ptr<endPtr ){
+ *(u16*)ptr = *(u16*)&ptr[2];
+ ptr += 2;
}
+ pPage->nCell--;
+ put2byte(&data[hdr+3], pPage->nCell);
+ pPage->nFree += 2;
}
/*
@@ -60763,6 +56074,11 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
** in pTemp or the original pCell) and also record its index.
** Allocating a new entry in pPage->aCell[] implies that
** pPage->nOverflow is incremented.
+**
+** If nSkip is non-zero, then do not copy the first nSkip bytes of the
+** cell. The caller will overwrite them after this function returns. If
+** nSkip is non-zero, then pCell may not point to an invalid memory location
+** (but pCell+nSkip is always valid).
*/
static void insertCell(
MemPage *pPage, /* Page into which we are copying */
@@ -60775,14 +56091,19 @@ static void insertCell(
){
int idx = 0; /* Where to write new cell content in data[] */
int j; /* Loop counter */
+ int end; /* First byte past the last cell pointer in data[] */
+ int ins; /* Index in data[] where new cell pointer is inserted */
+ int cellOffset; /* Address of first cell pointer in data[] */
u8 *data; /* The content of the whole page */
- u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */
+ u8 *ptr; /* Used for moving information around in data[] */
+ u8 *endPtr; /* End of the loop */
+
+ int nSkip = (iChild ? 4 : 0);
if( *pRC ) return;
assert( i>=0 && i<=pPage->nCell+pPage->nOverflow );
- assert( MX_CELL(pPage->pBt)<=10921 );
- assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB );
+ assert( pPage->nCell<=MX_CELL(pPage->pBt) && MX_CELL(pPage->pBt)<=10921 );
assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) );
assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
@@ -60791,10 +56112,10 @@ static void insertCell(
** wanted to be less than 4 but got rounded up to 4 on the leaf, then size
** might be less than 8 (leaf-size + pointer) on the interior node. Hence
** the term after the || in the following assert(). */
- assert( sz==pPage->xCellSize(pPage, pCell) || (sz==8 && iChild>0) );
+ assert( sz==cellSizePtr(pPage, pCell) || (sz==8 && iChild>0) );
if( pPage->nOverflow || sz+2>pPage->nFree ){
if( pTemp ){
- memcpy(pTemp, pCell, sz);
+ memcpy(pTemp+nSkip, pCell+nSkip, sz-nSkip);
pCell = pTemp;
}
if( iChild ){
@@ -60804,14 +56125,6 @@ static void insertCell(
assert( j<(int)(sizeof(pPage->apOvfl)/sizeof(pPage->apOvfl[0])) );
pPage->apOvfl[j] = pCell;
pPage->aiOvfl[j] = (u16)i;
-
- /* When multiple overflows occur, they are always sequential and in
- ** sorted order. This invariants arise because multiple overflows can
- ** only occur when inserting divider cells into the parent page during
- ** balancing, and the dividers are adjacent and sorted.
- */
- assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */
- assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */
}else{
int rc = sqlite3PagerWrite(pPage->pDbPage);
if( rc!=SQLITE_OK ){
@@ -60820,26 +56133,30 @@ static void insertCell(
}
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
data = pPage->aData;
- assert( &data[pPage->cellOffset]==pPage->aCellIdx );
+ cellOffset = pPage->cellOffset;
+ end = cellOffset + 2*pPage->nCell;
+ ins = cellOffset + 2*i;
rc = allocateSpace(pPage, sz, &idx);
if( rc ){ *pRC = rc; return; }
- /* The allocateSpace() routine guarantees the following properties
- ** if it returns successfully */
- assert( idx >= 0 );
- assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB );
+ /* The allocateSpace() routine guarantees the following two properties
+ ** if it returns success */
+ assert( idx >= end+2 );
assert( idx+sz <= (int)pPage->pBt->usableSize );
+ pPage->nCell++;
pPage->nFree -= (u16)(2 + sz);
- memcpy(&data[idx], pCell, sz);
+ memcpy(&data[idx+nSkip], pCell+nSkip, sz-nSkip);
if( iChild ){
put4byte(&data[idx], iChild);
}
- pIns = pPage->aCellIdx + i*2;
- memmove(pIns+2, pIns, 2*(pPage->nCell - i));
- put2byte(pIns, idx);
- pPage->nCell++;
- /* increment the cell count */
- if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++;
- assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell );
+ ptr = &data[end];
+ endPtr = &data[ins];
+ assert( (SQLITE_PTR_TO_INT(ptr)&1)==0 ); /* ptr is always 2-byte aligned */
+ while( ptr>endPtr ){
+ *(u16*)ptr = *(u16*)&ptr[-2];
+ ptr -= 2;
+ }
+ put2byte(&data[ins], idx);
+ put2byte(&data[pPage->hdrOffset+3], pPage->nCell);
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pPage->pBt->autoVacuum ){
/* The cell may contain a pointer to an overflow page. If so, write
@@ -60852,328 +56169,45 @@ static void insertCell(
}
/*
-** A CellArray object contains a cache of pointers and sizes for a
-** consecutive sequence of cells that might be held multiple pages.
-*/
-typedef struct CellArray CellArray;
-struct CellArray {
- int nCell; /* Number of cells in apCell[] */
- MemPage *pRef; /* Reference page */
- u8 **apCell; /* All cells begin balanced */
- u16 *szCell; /* Local size of all cells in apCell[] */
-};
-
-/*
-** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been
-** computed.
-*/
-static void populateCellCache(CellArray *p, int idx, int N){
- assert( idx>=0 && idx+N<=p->nCell );
- while( N>0 ){
- assert( p->apCell[idx]!=0 );
- if( p->szCell[idx]==0 ){
- p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]);
- }else{
- assert( CORRUPT_DB ||
- p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) );
- }
- idx++;
- N--;
- }
-}
-
-/*
-** Return the size of the Nth element of the cell array
-*/
-static SQLITE_NOINLINE u16 computeCellSize(CellArray *p, int N){
- assert( N>=0 && N<p->nCell );
- assert( p->szCell[N]==0 );
- p->szCell[N] = p->pRef->xCellSize(p->pRef, p->apCell[N]);
- return p->szCell[N];
-}
-static u16 cachedCellSize(CellArray *p, int N){
- assert( N>=0 && N<p->nCell );
- if( p->szCell[N] ) return p->szCell[N];
- return computeCellSize(p, N);
-}
-
-/*
-** Array apCell[] contains pointers to nCell b-tree page cells. The
-** szCell[] array contains the size in bytes of each cell. This function
-** replaces the current contents of page pPg with the contents of the cell
-** array.
-**
-** Some of the cells in apCell[] may currently be stored in pPg. This
-** function works around problems caused by this by making a copy of any
-** such cells before overwriting the page data.
-**
-** The MemPage.nFree field is invalidated by this function. It is the
-** responsibility of the caller to set it correctly.
+** Add a list of cells to a page. The page should be initially empty.
+** The cells are guaranteed to fit on the page.
*/
-static int rebuildPage(
- MemPage *pPg, /* Edit this page */
- int nCell, /* Final number of cells on page */
- u8 **apCell, /* Array of cells */
- u16 *szCell /* Array of cell sizes */
+static void assemblePage(
+ MemPage *pPage, /* The page to be assemblied */
+ int nCell, /* The number of cells to add to this page */
+ u8 **apCell, /* Pointers to cell bodies */
+ u16 *aSize /* Sizes of the cells */
){
- const int hdr = pPg->hdrOffset; /* Offset of header on pPg */
- u8 * const aData = pPg->aData; /* Pointer to data for pPg */
- const int usableSize = pPg->pBt->usableSize;
- u8 * const pEnd = &aData[usableSize];
- int i;
- u8 *pCellptr = pPg->aCellIdx;
- u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager);
- u8 *pData;
-
- i = get2byte(&aData[hdr+5]);
- memcpy(&pTmp[i], &aData[i], usableSize - i);
+ int i; /* Loop counter */
+ u8 *pCellptr; /* Address of next cell pointer */
+ int cellbody; /* Address of next cell body */
+ u8 * const data = pPage->aData; /* Pointer to data for pPage */
+ const int hdr = pPage->hdrOffset; /* Offset of header on pPage */
+ const int nUsable = pPage->pBt->usableSize; /* Usable size of page */
- pData = pEnd;
- for(i=0; i<nCell; i++){
- u8 *pCell = apCell[i];
- if( pCell>aData && pCell<pEnd ){
- pCell = &pTmp[pCell - aData];
- }
- pData -= szCell[i];
- put2byte(pCellptr, (pData - aData));
- pCellptr += 2;
- if( pData < pCellptr ) return SQLITE_CORRUPT_BKPT;
- memcpy(pData, pCell, szCell[i]);
- assert( szCell[i]==pPg->xCellSize(pPg, pCell) || CORRUPT_DB );
- testcase( szCell[i]!=pPg->xCellSize(pPg,pCell) );
- }
-
- /* The pPg->nFree field is now set incorrectly. The caller will fix it. */
- pPg->nCell = nCell;
- pPg->nOverflow = 0;
-
- put2byte(&aData[hdr+1], 0);
- put2byte(&aData[hdr+3], pPg->nCell);
- put2byte(&aData[hdr+5], pData - aData);
- aData[hdr+7] = 0x00;
- return SQLITE_OK;
-}
-
-/*
-** Array apCell[] contains nCell pointers to b-tree cells. Array szCell
-** contains the size in bytes of each such cell. This function attempts to
-** add the cells stored in the array to page pPg. If it cannot (because
-** the page needs to be defragmented before the cells will fit), non-zero
-** is returned. Otherwise, if the cells are added successfully, zero is
-** returned.
-**
-** Argument pCellptr points to the first entry in the cell-pointer array
-** (part of page pPg) to populate. After cell apCell[0] is written to the
-** page body, a 16-bit offset is written to pCellptr. And so on, for each
-** cell in the array. It is the responsibility of the caller to ensure
-** that it is safe to overwrite this part of the cell-pointer array.
-**
-** When this function is called, *ppData points to the start of the
-** content area on page pPg. If the size of the content area is extended,
-** *ppData is updated to point to the new start of the content area
-** before returning.
-**
-** Finally, argument pBegin points to the byte immediately following the
-** end of the space required by this page for the cell-pointer area (for
-** all cells - not just those inserted by the current call). If the content
-** area must be extended to before this point in order to accomodate all
-** cells in apCell[], then the cells do not fit and non-zero is returned.
-*/
-static int pageInsertArray(
- MemPage *pPg, /* Page to add cells to */
- u8 *pBegin, /* End of cell-pointer array */
- u8 **ppData, /* IN/OUT: Page content -area pointer */
- u8 *pCellptr, /* Pointer to cell-pointer area */
- int iFirst, /* Index of first cell to add */
- int nCell, /* Number of cells to add to pPg */
- CellArray *pCArray /* Array of cells */
-){
- int i;
- u8 *aData = pPg->aData;
- u8 *pData = *ppData;
- int iEnd = iFirst + nCell;
- assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */
- for(i=iFirst; i<iEnd; i++){
- int sz, rc;
- u8 *pSlot;
- sz = cachedCellSize(pCArray, i);
- if( (aData[1]==0 && aData[2]==0) || (pSlot = pageFindSlot(pPg,sz,&rc))==0 ){
- pData -= sz;
- if( pData<pBegin ) return 1;
- pSlot = pData;
- }
- /* pSlot and pCArray->apCell[i] will never overlap on a well-formed
- ** database. But they might for a corrupt database. Hence use memmove()
- ** since memcpy() sends SIGABORT with overlapping buffers on OpenBSD */
- assert( (pSlot+sz)<=pCArray->apCell[i]
- || pSlot>=(pCArray->apCell[i]+sz)
- || CORRUPT_DB );
- memmove(pSlot, pCArray->apCell[i], sz);
- put2byte(pCellptr, (pSlot - aData));
- pCellptr += 2;
- }
- *ppData = pData;
- return 0;
-}
-
-/*
-** Array apCell[] contains nCell pointers to b-tree cells. Array szCell
-** contains the size in bytes of each such cell. This function adds the
-** space associated with each cell in the array that is currently stored
-** within the body of pPg to the pPg free-list. The cell-pointers and other
-** fields of the page are not updated.
-**
-** This function returns the total number of cells added to the free-list.
-*/
-static int pageFreeArray(
- MemPage *pPg, /* Page to edit */
- int iFirst, /* First cell to delete */
- int nCell, /* Cells to delete */
- CellArray *pCArray /* Array of cells */
-){
- u8 * const aData = pPg->aData;
- u8 * const pEnd = &aData[pPg->pBt->usableSize];
- u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize];
- int nRet = 0;
- int i;
- int iEnd = iFirst + nCell;
- u8 *pFree = 0;
- int szFree = 0;
-
- for(i=iFirst; i<iEnd; i++){
- u8 *pCell = pCArray->apCell[i];
- if( pCell>=pStart && pCell<pEnd ){
- int sz;
- /* No need to use cachedCellSize() here. The sizes of all cells that
- ** are to be freed have already been computing while deciding which
- ** cells need freeing */
- sz = pCArray->szCell[i]; assert( sz>0 );
- if( pFree!=(pCell + sz) ){
- if( pFree ){
- assert( pFree>aData && (pFree - aData)<65536 );
- freeSpace(pPg, (u16)(pFree - aData), szFree);
- }
- pFree = pCell;
- szFree = sz;
- if( pFree+sz>pEnd ) return 0;
- }else{
- pFree = pCell;
- szFree += sz;
- }
- nRet++;
- }
- }
- if( pFree ){
- assert( pFree>aData && (pFree - aData)<65536 );
- freeSpace(pPg, (u16)(pFree - aData), szFree);
- }
- return nRet;
-}
+ assert( pPage->nOverflow==0 );
+ assert( sqlite3_mutex_held(pPage->pBt->mutex) );
+ assert( nCell>=0 && nCell<=(int)MX_CELL(pPage->pBt)
+ && (int)MX_CELL(pPage->pBt)<=10921);
+ assert( sqlite3PagerIswriteable(pPage->pDbPage) );
-/*
-** apCell[] and szCell[] contains pointers to and sizes of all cells in the
-** pages being balanced. The current page, pPg, has pPg->nCell cells starting
-** with apCell[iOld]. After balancing, this page should hold nNew cells
-** starting at apCell[iNew].
-**
-** This routine makes the necessary adjustments to pPg so that it contains
-** the correct cells after being balanced.
-**
-** The pPg->nFree field is invalid when this function returns. It is the
-** responsibility of the caller to set it correctly.
-*/
-static int editPage(
- MemPage *pPg, /* Edit this page */
- int iOld, /* Index of first cell currently on page */
- int iNew, /* Index of new first cell on page */
- int nNew, /* Final number of cells on page */
- CellArray *pCArray /* Array of cells and sizes */
-){
- u8 * const aData = pPg->aData;
- const int hdr = pPg->hdrOffset;
- u8 *pBegin = &pPg->aCellIdx[nNew * 2];
- int nCell = pPg->nCell; /* Cells stored on pPg */
- u8 *pData;
- u8 *pCellptr;
- int i;
- int iOldEnd = iOld + pPg->nCell + pPg->nOverflow;
- int iNewEnd = iNew + nNew;
+ /* Check that the page has just been zeroed by zeroPage() */
+ assert( pPage->nCell==0 );
+ assert( get2byteNotZero(&data[hdr+5])==nUsable );
-#ifdef SQLITE_DEBUG
- u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager);
- memcpy(pTmp, aData, pPg->pBt->usableSize);
-#endif
-
- /* Remove cells from the start and end of the page */
- if( iOld<iNew ){
- int nShift = pageFreeArray(pPg, iOld, iNew-iOld, pCArray);
- memmove(pPg->aCellIdx, &pPg->aCellIdx[nShift*2], nCell*2);
- nCell -= nShift;
- }
- if( iNewEnd < iOldEnd ){
- nCell -= pageFreeArray(pPg, iNewEnd, iOldEnd - iNewEnd, pCArray);
- }
-
- pData = &aData[get2byteNotZero(&aData[hdr+5])];
- if( pData<pBegin ) goto editpage_fail;
-
- /* Add cells to the start of the page */
- if( iNew<iOld ){
- int nAdd = MIN(nNew,iOld-iNew);
- assert( (iOld-iNew)<nNew || nCell==0 || CORRUPT_DB );
- pCellptr = pPg->aCellIdx;
- memmove(&pCellptr[nAdd*2], pCellptr, nCell*2);
- if( pageInsertArray(
- pPg, pBegin, &pData, pCellptr,
- iNew, nAdd, pCArray
- ) ) goto editpage_fail;
- nCell += nAdd;
- }
-
- /* Add any overflow cells */
- for(i=0; i<pPg->nOverflow; i++){
- int iCell = (iOld + pPg->aiOvfl[i]) - iNew;
- if( iCell>=0 && iCell<nNew ){
- pCellptr = &pPg->aCellIdx[iCell * 2];
- memmove(&pCellptr[2], pCellptr, (nCell - iCell) * 2);
- nCell++;
- if( pageInsertArray(
- pPg, pBegin, &pData, pCellptr,
- iCell+iNew, 1, pCArray
- ) ) goto editpage_fail;
- }
+ pCellptr = &pPage->aCellIdx[nCell*2];
+ cellbody = nUsable;
+ for(i=nCell-1; i>=0; i--){
+ u16 sz = aSize[i];
+ pCellptr -= 2;
+ cellbody -= sz;
+ put2byte(pCellptr, cellbody);
+ memcpy(&data[cellbody], apCell[i], sz);
}
-
- /* Append cells to the end of the page */
- pCellptr = &pPg->aCellIdx[nCell*2];
- if( pageInsertArray(
- pPg, pBegin, &pData, pCellptr,
- iNew+nCell, nNew-nCell, pCArray
- ) ) goto editpage_fail;
-
- pPg->nCell = nNew;
- pPg->nOverflow = 0;
-
- put2byte(&aData[hdr+3], pPg->nCell);
- put2byte(&aData[hdr+5], pData - aData);
-
-#ifdef SQLITE_DEBUG
- for(i=0; i<nNew && !CORRUPT_DB; i++){
- u8 *pCell = pCArray->apCell[i+iNew];
- int iOff = get2byteAligned(&pPg->aCellIdx[i*2]);
- if( pCell>=aData && pCell<&aData[pPg->pBt->usableSize] ){
- pCell = &pTmp[pCell - aData];
- }
- assert( 0==memcmp(pCell, &aData[iOff],
- pCArray->pRef->xCellSize(pCArray->pRef, pCArray->apCell[i+iNew])) );
- }
-#endif
-
- return SQLITE_OK;
- editpage_fail:
- /* Unable to edit this page. Rebuild it from scratch instead. */
- populateCellCache(pCArray, iNew, nNew);
- return rebuildPage(pPg, nNew, &pCArray->apCell[iNew], &pCArray->szCell[iNew]);
+ put2byte(&data[hdr+3], nCell);
+ put2byte(&data[hdr+5], cellbody);
+ pPage->nFree -= (nCell*2 + nUsable - cellbody);
+ pPage->nCell = (u16)nCell;
}
/*
@@ -61227,7 +56261,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
assert( pPage->nOverflow==1 );
/* This error condition is now caught prior to reaching this function */
- if( NEVER(pPage->nCell==0) ) return SQLITE_CORRUPT_BKPT;
+ if( pPage->nCell==0 ) return SQLITE_CORRUPT_BKPT;
/* Allocate a new page. This page will become the right-sibling of
** pPage. Make the parent page writable, so that the new divider cell
@@ -61239,15 +56273,13 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
u8 *pOut = &pSpace[4];
u8 *pCell = pPage->apOvfl[0];
- u16 szCell = pPage->xCellSize(pPage, pCell);
+ u16 szCell = cellSizePtr(pPage, pCell);
u8 *pStop;
assert( sqlite3PagerIswriteable(pNew->pDbPage) );
assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) );
zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF);
- rc = rebuildPage(pNew, 1, &pCell, &szCell);
- if( NEVER(rc) ) return rc;
- pNew->nFree = pBt->usableSize - pNew->cellOffset - 2 - szCell;
+ assemblePage(pNew, 1, &pCell, &szCell);
/* If this is an auto-vacuum database, update the pointer map
** with entries for the new page, and any pointer from the
@@ -61319,7 +56351,7 @@ static int ptrmapCheckPages(MemPage **apPage, int nPage){
u8 *z;
z = findCell(pPage, j);
- pPage->xParseCell(pPage, z, &info);
+ btreeParseCellPtr(pPage, z, &info);
if( info.iOverflow ){
Pgno ovfl = get4byte(&z[info.iOverflow]);
ptrmapGet(pBt, ovfl, &e, &n);
@@ -61450,6 +56482,7 @@ static int balance_nonroot(
int bBulk /* True if this call is part of a bulk load */
){
BtShared *pBt; /* The whole database */
+ int nCell = 0; /* Number of cells in apCell[] */
int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */
int nNew = 0; /* Number of pages in apNew[] */
int nOld; /* Number of pages in apOld[] */
@@ -61460,27 +56493,22 @@ static int balance_nonroot(
int leafData; /* True if pPage is a leaf of a LEAFDATA tree */
int usableSpace; /* Bytes in pPage beyond the header */
int pageFlags; /* Value of pPage->aData[0] */
+ int subtotal; /* Subtotal of bytes in cells on one page */
int iSpace1 = 0; /* First unused byte of aSpace1[] */
int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */
int szScratch; /* Size of scratch memory requested */
MemPage *apOld[NB]; /* pPage and up to two siblings */
+ MemPage *apCopy[NB]; /* Private copies of apOld[] pages */
MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */
u8 *pRight; /* Location in parent of right-sibling pointer */
u8 *apDiv[NB-1]; /* Divider cells in pParent */
- int cntNew[NB+2]; /* Index in b.paCell[] of cell after i-th page */
- int cntOld[NB+2]; /* Old index in b.apCell[] */
- int szNew[NB+2]; /* Combined size of cells placed on i-th page */
+ int cntNew[NB+2]; /* Index in aCell[] of cell after i-th page */
+ int szNew[NB+2]; /* Combined size of cells place on i-th page */
+ u8 **apCell = 0; /* All cells begin balanced */
+ u16 *szCell; /* Local size of all cells in apCell[] */
u8 *aSpace1; /* Space for copies of dividers cells */
Pgno pgno; /* Temp var to store a page number in */
- u8 abDone[NB+2]; /* True after i'th new page is populated */
- Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */
- Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */
- u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */
- CellArray b; /* Parsed information on cells being balanced */
-
- memset(abDone, 0, sizeof(abDone));
- b.nCell = 0;
- b.apCell = 0;
+
pBt = pParent->pBt;
assert( sqlite3_mutex_held(pBt->mutex) );
assert( sqlite3PagerIswriteable(pParent->pDbPage) );
@@ -61522,6 +56550,7 @@ static int balance_nonroot(
}else if( iParentIdx==i ){
nxDiv = i-2+bBulk;
}else{
+ assert( bBulk==0 );
nxDiv = iParentIdx-1;
}
i = 2-bBulk;
@@ -61534,7 +56563,7 @@ static int balance_nonroot(
}
pgno = get4byte(pRight);
while( 1 ){
- rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0);
+ rc = getAndInitPage(pBt, pgno, &apOld[i], 0);
if( rc ){
memset(apOld, 0, (i+1)*sizeof(MemPage*));
goto balance_cleanup;
@@ -61545,12 +56574,12 @@ static int balance_nonroot(
if( i+nxDiv==pParent->aiOvfl[0] && pParent->nOverflow ){
apDiv[i] = pParent->apOvfl[0];
pgno = get4byte(apDiv[i]);
- szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
+ szNew[i] = cellSizePtr(pParent, apDiv[i]);
pParent->nOverflow = 0;
}else{
apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow);
pgno = get4byte(apDiv[i]);
- szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
+ szNew[i] = cellSizePtr(pParent, apDiv[i]);
/* Drop the cell from the parent page. apDiv[i] still points to
** the cell within the parent, even though it has been dropped.
@@ -61588,209 +56617,138 @@ static int balance_nonroot(
/*
** Allocate space for memory structures
*/
+ k = pBt->pageSize + ROUND8(sizeof(MemPage));
szScratch =
- nMaxCells*sizeof(u8*) /* b.apCell */
- + nMaxCells*sizeof(u16) /* b.szCell */
- + pBt->pageSize; /* aSpace1 */
-
- /* EVIDENCE-OF: R-28375-38319 SQLite will never request a scratch buffer
- ** that is more than 6 times the database page size. */
- assert( szScratch<=6*(int)pBt->pageSize );
- b.apCell = sqlite3ScratchMalloc( szScratch );
- if( b.apCell==0 ){
+ nMaxCells*sizeof(u8*) /* apCell */
+ + nMaxCells*sizeof(u16) /* szCell */
+ + pBt->pageSize /* aSpace1 */
+ + k*nOld; /* Page copies (apCopy) */
+ apCell = sqlite3ScratchMalloc( szScratch );
+ if( apCell==0 ){
rc = SQLITE_NOMEM;
goto balance_cleanup;
}
- b.szCell = (u16*)&b.apCell[nMaxCells];
- aSpace1 = (u8*)&b.szCell[nMaxCells];
+ szCell = (u16*)&apCell[nMaxCells];
+ aSpace1 = (u8*)&szCell[nMaxCells];
assert( EIGHT_BYTE_ALIGNMENT(aSpace1) );
/*
** Load pointers to all cells on sibling pages and the divider cells
- ** into the local b.apCell[] array. Make copies of the divider cells
- ** into space obtained from aSpace1[]. The divider cells have already
- ** been removed from pParent.
+ ** into the local apCell[] array. Make copies of the divider cells
+ ** into space obtained from aSpace1[] and remove the divider cells
+ ** from pParent.
**
** If the siblings are on leaf pages, then the child pointers of the
** divider cells are stripped from the cells before they are copied
- ** into aSpace1[]. In this way, all cells in b.apCell[] are without
+ ** into aSpace1[]. In this way, all cells in apCell[] are without
** child pointers. If siblings are not leaves, then all cell in
- ** b.apCell[] include child pointers. Either way, all cells in b.apCell[]
+ ** apCell[] include child pointers. Either way, all cells in apCell[]
** are alike.
**
** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf.
** leafData: 1 if pPage holds key+data and pParent holds only keys.
*/
- b.pRef = apOld[0];
- leafCorrection = b.pRef->leaf*4;
- leafData = b.pRef->intKeyLeaf;
+ leafCorrection = apOld[0]->leaf*4;
+ leafData = apOld[0]->hasData;
for(i=0; i<nOld; i++){
- MemPage *pOld = apOld[i];
- int limit = pOld->nCell;
- u8 *aData = pOld->aData;
- u16 maskPage = pOld->maskPage;
- u8 *piCell = aData + pOld->cellOffset;
- u8 *piEnd;
-
- /* Verify that all sibling pages are of the same "type" (table-leaf,
- ** table-interior, index-leaf, or index-interior).
- */
- if( pOld->aData[0]!=apOld[0]->aData[0] ){
- rc = SQLITE_CORRUPT_BKPT;
- goto balance_cleanup;
- }
-
- /* Load b.apCell[] with pointers to all cells in pOld. If pOld
- ** constains overflow cells, include them in the b.apCell[] array
- ** in the correct spot.
- **
- ** Note that when there are multiple overflow cells, it is always the
- ** case that they are sequential and adjacent. This invariant arises
- ** because multiple overflows can only occurs when inserting divider
- ** cells into a parent on a prior balance, and divider cells are always
- ** adjacent and are inserted in order. There is an assert() tagged
- ** with "NOTE 1" in the overflow cell insertion loop to prove this
- ** invariant.
- **
- ** This must be done in advance. Once the balance starts, the cell
- ** offset section of the btree page will be overwritten and we will no
- ** long be able to find the cells if a pointer to each cell is not saved
- ** first.
- */
- memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*limit);
+ int limit;
+
+ /* Before doing anything else, take a copy of the i'th original sibling
+ ** The rest of this function will use data from the copies rather
+ ** that the original pages since the original pages will be in the
+ ** process of being overwritten. */
+ MemPage *pOld = apCopy[i] = (MemPage*)&aSpace1[pBt->pageSize + k*i];
+ memcpy(pOld, apOld[i], sizeof(MemPage));
+ pOld->aData = (void*)&pOld[1];
+ memcpy(pOld->aData, apOld[i]->aData, pBt->pageSize);
+
+ limit = pOld->nCell+pOld->nOverflow;
if( pOld->nOverflow>0 ){
- memset(&b.szCell[b.nCell+limit], 0, sizeof(b.szCell[0])*pOld->nOverflow);
- limit = pOld->aiOvfl[0];
for(j=0; j<limit; j++){
- b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell));
- piCell += 2;
- b.nCell++;
+ assert( nCell<nMaxCells );
+ apCell[nCell] = findOverflowCell(pOld, j);
+ szCell[nCell] = cellSizePtr(pOld, apCell[nCell]);
+ nCell++;
}
- for(k=0; k<pOld->nOverflow; k++){
- assert( k==0 || pOld->aiOvfl[k-1]+1==pOld->aiOvfl[k] );/* NOTE 1 */
- b.apCell[b.nCell] = pOld->apOvfl[k];
- b.nCell++;
+ }else{
+ u8 *aData = pOld->aData;
+ u16 maskPage = pOld->maskPage;
+ u16 cellOffset = pOld->cellOffset;
+ for(j=0; j<limit; j++){
+ assert( nCell<nMaxCells );
+ apCell[nCell] = findCellv2(aData, maskPage, cellOffset, j);
+ szCell[nCell] = cellSizePtr(pOld, apCell[nCell]);
+ nCell++;
}
- }
- piEnd = aData + pOld->cellOffset + 2*pOld->nCell;
- while( piCell<piEnd ){
- assert( b.nCell<nMaxCells );
- b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell));
- piCell += 2;
- b.nCell++;
- }
-
- cntOld[i] = b.nCell;
+ }
if( i<nOld-1 && !leafData){
u16 sz = (u16)szNew[i];
u8 *pTemp;
- assert( b.nCell<nMaxCells );
- b.szCell[b.nCell] = sz;
+ assert( nCell<nMaxCells );
+ szCell[nCell] = sz;
pTemp = &aSpace1[iSpace1];
iSpace1 += sz;
assert( sz<=pBt->maxLocal+23 );
assert( iSpace1 <= (int)pBt->pageSize );
memcpy(pTemp, apDiv[i], sz);
- b.apCell[b.nCell] = pTemp+leafCorrection;
+ apCell[nCell] = pTemp+leafCorrection;
assert( leafCorrection==0 || leafCorrection==4 );
- b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection;
+ szCell[nCell] = szCell[nCell] - leafCorrection;
if( !pOld->leaf ){
assert( leafCorrection==0 );
assert( pOld->hdrOffset==0 );
/* The right pointer of the child page pOld becomes the left
** pointer of the divider cell */
- memcpy(b.apCell[b.nCell], &pOld->aData[8], 4);
+ memcpy(apCell[nCell], &pOld->aData[8], 4);
}else{
assert( leafCorrection==4 );
- while( b.szCell[b.nCell]<4 ){
- /* Do not allow any cells smaller than 4 bytes. If a smaller cell
- ** does exist, pad it with 0x00 bytes. */
- assert( b.szCell[b.nCell]==3 || CORRUPT_DB );
- assert( b.apCell[b.nCell]==&aSpace1[iSpace1-3] || CORRUPT_DB );
- aSpace1[iSpace1++] = 0x00;
- b.szCell[b.nCell]++;
+ if( szCell[nCell]<4 ){
+ /* Do not allow any cells smaller than 4 bytes. */
+ szCell[nCell] = 4;
}
}
- b.nCell++;
+ nCell++;
}
}
/*
- ** Figure out the number of pages needed to hold all b.nCell cells.
+ ** Figure out the number of pages needed to hold all nCell cells.
** Store this number in "k". Also compute szNew[] which is the total
** size of all cells on the i-th page and cntNew[] which is the index
- ** in b.apCell[] of the cell that divides page i from page i+1.
- ** cntNew[k] should equal b.nCell.
+ ** in apCell[] of the cell that divides page i from page i+1.
+ ** cntNew[k] should equal nCell.
**
** Values computed by this block:
**
** k: The total number of sibling pages
** szNew[i]: Spaced used on the i-th sibling page.
- ** cntNew[i]: Index in b.apCell[] and b.szCell[] for the first cell to
+ ** cntNew[i]: Index in apCell[] and szCell[] for the first cell to
** the right of the i-th sibling page.
** usableSpace: Number of bytes of space available on each sibling.
**
*/
usableSpace = pBt->usableSize - 12 + leafCorrection;
- for(i=0; i<nOld; i++){
- MemPage *p = apOld[i];
- szNew[i] = usableSpace - p->nFree;
- if( szNew[i]<0 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; }
- for(j=0; j<p->nOverflow; j++){
- szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]);
- }
- cntNew[i] = cntOld[i];
- }
- k = nOld;
- for(i=0; i<k; i++){
- int sz;
- while( szNew[i]>usableSpace ){
- if( i+1>=k ){
- k = i+2;
- if( k>NB+2 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; }
- szNew[k-1] = 0;
- cntNew[k-1] = b.nCell;
- }
- sz = 2 + cachedCellSize(&b, cntNew[i]-1);
- szNew[i] -= sz;
- if( !leafData ){
- if( cntNew[i]<b.nCell ){
- sz = 2 + cachedCellSize(&b, cntNew[i]);
- }else{
- sz = 0;
- }
- }
- szNew[i+1] += sz;
- cntNew[i]--;
- }
- while( cntNew[i]<b.nCell ){
- sz = 2 + cachedCellSize(&b, cntNew[i]);
- if( szNew[i]+sz>usableSpace ) break;
- szNew[i] += sz;
- cntNew[i]++;
- if( !leafData ){
- if( cntNew[i]<b.nCell ){
- sz = 2 + cachedCellSize(&b, cntNew[i]);
- }else{
- sz = 0;
- }
- }
- szNew[i+1] -= sz;
- }
- if( cntNew[i]>=b.nCell ){
- k = i+1;
- }else if( cntNew[i] <= (i>0 ? cntNew[i-1] : 0) ){
- rc = SQLITE_CORRUPT_BKPT;
- goto balance_cleanup;
- }
- }
+ for(subtotal=k=i=0; i<nCell; i++){
+ assert( i<nMaxCells );
+ subtotal += szCell[i] + 2;
+ if( subtotal > usableSpace ){
+ szNew[k] = subtotal - szCell[i];
+ cntNew[k] = i;
+ if( leafData ){ i--; }
+ subtotal = 0;
+ k++;
+ if( k>NB+1 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; }
+ }
+ }
+ szNew[k] = subtotal;
+ cntNew[k] = nCell;
+ k++;
/*
** The packing computed by the previous block is biased toward the siblings
- ** on the left side (siblings with smaller keys). The left siblings are
- ** always nearly full, while the right-most sibling might be nearly empty.
- ** The next block of code attempts to adjust the packing of siblings to
- ** get a better balance.
+ ** on the left side. The left siblings are always nearly full, while the
+ ** right-most sibling might be nearly empty. This block of code attempts
+ ** to adjust the packing of siblings to get a better balance.
**
** This adjustment is more than an optimization. The packing above might
** be so out of balance as to be illegal. For example, the right-most
@@ -61804,46 +56762,46 @@ static int balance_nonroot(
r = cntNew[i-1] - 1;
d = r + 1 - leafData;
- (void)cachedCellSize(&b, d);
- do{
- assert( d<nMaxCells );
- assert( r<nMaxCells );
- (void)cachedCellSize(&b, r);
- if( szRight!=0
- && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+2)) ){
- break;
- }
- szRight += b.szCell[d] + 2;
- szLeft -= b.szCell[r] + 2;
- cntNew[i-1] = r;
- r--;
- d--;
- }while( r>=0 );
+ assert( d<nMaxCells );
+ assert( r<nMaxCells );
+ while( szRight==0
+ || (!bBulk && szRight+szCell[d]+2<=szLeft-(szCell[r]+2))
+ ){
+ szRight += szCell[d] + 2;
+ szLeft -= szCell[r] + 2;
+ cntNew[i-1]--;
+ r = cntNew[i-1] - 1;
+ d = r + 1 - leafData;
+ }
szNew[i] = szRight;
szNew[i-1] = szLeft;
- if( cntNew[i-1] <= (i>1 ? cntNew[i-2] : 0) ){
- rc = SQLITE_CORRUPT_BKPT;
- goto balance_cleanup;
- }
}
- /* Sanity check: For a non-corrupt database file one of the follwing
- ** must be true:
- ** (1) We found one or more cells (cntNew[0])>0), or
- ** (2) pPage is a virtual root page. A virtual root page is when
- ** the real root page is page 1 and we are the only child of
- ** that page.
+ /* Either we found one or more cells (cntnew[0])>0) or pPage is
+ ** a virtual root page. A virtual root page is when the real root
+ ** page is page 1 and we are the only child of that page.
+ **
+ ** UPDATE: The assert() below is not necessarily true if the database
+ ** file is corrupt. The corruption will be detected and reported later
+ ** in this procedure so there is no need to act upon it now.
*/
- assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB);
- TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n",
- apOld[0]->pgno, apOld[0]->nCell,
- nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0,
- nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0
+#if 0
+ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) );
+#endif
+
+ TRACE(("BALANCE: old: %d %d %d ",
+ apOld[0]->pgno,
+ nOld>=2 ? apOld[1]->pgno : 0,
+ nOld>=3 ? apOld[2]->pgno : 0
));
/*
** Allocate k new pages. Reuse old pages where possible.
*/
+ if( apOld[0]->pgno<=1 ){
+ rc = SQLITE_CORRUPT_BKPT;
+ goto balance_cleanup;
+ }
pageFlags = apOld[0]->aData[0];
for(i=0; i<k; i++){
MemPage *pNew;
@@ -61857,10 +56815,8 @@ static int balance_nonroot(
assert( i>0 );
rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0);
if( rc ) goto balance_cleanup;
- zeroPage(pNew, pageFlags);
apNew[i] = pNew;
nNew++;
- cntOld[i] = b.nCell;
/* Set the pointer-map entry for the new sibling page. */
if( ISAUTOVACUUM ){
@@ -61872,250 +56828,135 @@ static int balance_nonroot(
}
}
+ /* Free any old pages that were not reused as new pages.
+ */
+ while( i<nOld ){
+ freePage(apOld[i], &rc);
+ if( rc ) goto balance_cleanup;
+ releasePage(apOld[i]);
+ apOld[i] = 0;
+ i++;
+ }
+
/*
- ** Reassign page numbers so that the new pages are in ascending order.
- ** This helps to keep entries in the disk file in order so that a scan
- ** of the table is closer to a linear scan through the file. That in turn
- ** helps the operating system to deliver pages from the disk more rapidly.
+ ** Put the new pages in accending order. This helps to
+ ** keep entries in the disk file in order so that a scan
+ ** of the table is a linear scan through the file. That
+ ** in turn helps the operating system to deliver pages
+ ** from the disk more rapidly.
**
- ** An O(n^2) insertion sort algorithm is used, but since n is never more
- ** than (NB+2) (a small constant), that should not be a problem.
+ ** An O(n^2) insertion sort algorithm is used, but since
+ ** n is never more than NB (a small constant), that should
+ ** not be a problem.
**
- ** When NB==3, this one optimization makes the database about 25% faster
- ** for large insertions and deletions.
+ ** When NB==3, this one optimization makes the database
+ ** about 25% faster for large insertions and deletions.
*/
- for(i=0; i<nNew; i++){
- aPgOrder[i] = aPgno[i] = apNew[i]->pgno;
- aPgFlags[i] = apNew[i]->pDbPage->flags;
- for(j=0; j<i; j++){
- if( aPgno[j]==aPgno[i] ){
- /* This branch is taken if the set of sibling pages somehow contains
- ** duplicate entries. This can happen if the database is corrupt.
- ** It would be simpler to detect this as part of the loop below, but
- ** we do the detection here in order to avoid populating the pager
- ** cache with two separate objects associated with the same
- ** page number. */
- assert( CORRUPT_DB );
- rc = SQLITE_CORRUPT_BKPT;
- goto balance_cleanup;
+ for(i=0; i<k-1; i++){
+ int minV = apNew[i]->pgno;
+ int minI = i;
+ for(j=i+1; j<k; j++){
+ if( apNew[j]->pgno<(unsigned)minV ){
+ minI = j;
+ minV = apNew[j]->pgno;
}
}
- }
- for(i=0; i<nNew; i++){
- int iBest = 0; /* aPgno[] index of page number to use */
- for(j=1; j<nNew; j++){
- if( aPgOrder[j]<aPgOrder[iBest] ) iBest = j;
- }
- pgno = aPgOrder[iBest];
- aPgOrder[iBest] = 0xffffffff;
- if( iBest!=i ){
- if( iBest>i ){
- sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0);
- }
- sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]);
- apNew[i]->pgno = pgno;
+ if( minI>i ){
+ MemPage *pT;
+ pT = apNew[i];
+ apNew[i] = apNew[minI];
+ apNew[minI] = pT;
}
}
-
- TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) "
- "%d(%d nc=%d) %d(%d nc=%d)\n",
- apNew[0]->pgno, szNew[0], cntNew[0],
+ TRACE(("new: %d(%d) %d(%d) %d(%d) %d(%d) %d(%d)\n",
+ apNew[0]->pgno, szNew[0],
nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0,
- nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0,
nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0,
- nNew>=3 ? cntNew[2] - cntNew[1] - !leafData : 0,
nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0,
- nNew>=4 ? cntNew[3] - cntNew[2] - !leafData : 0,
- nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0,
- nNew>=5 ? cntNew[4] - cntNew[3] - !leafData : 0
- ));
+ nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0));
assert( sqlite3PagerIswriteable(pParent->pDbPage) );
put4byte(pRight, apNew[nNew-1]->pgno);
- /* If the sibling pages are not leaves, ensure that the right-child pointer
- ** of the right-most new sibling page is set to the value that was
- ** originally in the same field of the right-most old sibling page. */
- if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){
- MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1];
- memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4);
- }
-
- /* Make any required updates to pointer map entries associated with
- ** cells stored on sibling pages following the balance operation. Pointer
- ** map entries associated with divider cells are set by the insertCell()
- ** routine. The associated pointer map entries are:
- **
- ** a) if the cell contains a reference to an overflow chain, the
- ** entry associated with the first page in the overflow chain, and
- **
- ** b) if the sibling pages are not leaves, the child page associated
- ** with the cell.
- **
- ** If the sibling pages are not leaves, then the pointer map entry
- ** associated with the right-child of each sibling may also need to be
- ** updated. This happens below, after the sibling pages have been
- ** populated, not here.
+ /*
+ ** Evenly distribute the data in apCell[] across the new pages.
+ ** Insert divider cells into pParent as necessary.
*/
- if( ISAUTOVACUUM ){
- MemPage *pNew = apNew[0];
- u8 *aOld = pNew->aData;
- int cntOldNext = pNew->nCell + pNew->nOverflow;
- int usableSize = pBt->usableSize;
- int iNew = 0;
- int iOld = 0;
-
- for(i=0; i<b.nCell; i++){
- u8 *pCell = b.apCell[i];
- if( i==cntOldNext ){
- MemPage *pOld = (++iOld)<nNew ? apNew[iOld] : apOld[iOld];
- cntOldNext += pOld->nCell + pOld->nOverflow + !leafData;
- aOld = pOld->aData;
- }
- if( i==cntNew[iNew] ){
- pNew = apNew[++iNew];
- if( !leafData ) continue;
- }
-
- /* Cell pCell is destined for new sibling page pNew. Originally, it
- ** was either part of sibling page iOld (possibly an overflow cell),
- ** or else the divider cell to the left of sibling page iOld. So,
- ** if sibling page iOld had the same page number as pNew, and if
- ** pCell really was a part of sibling page iOld (not a divider or
- ** overflow cell), we can skip updating the pointer map entries. */
- if( iOld>=nNew
- || pNew->pgno!=aPgno[iOld]
- || pCell<aOld
- || pCell>=&aOld[usableSize]
- ){
- if( !leafCorrection ){
- ptrmapPut(pBt, get4byte(pCell), PTRMAP_BTREE, pNew->pgno, &rc);
- }
- if( cachedCellSize(&b,i)>pNew->minLocal ){
- ptrmapPutOvflPtr(pNew, pCell, &rc);
- }
- if( rc ) goto balance_cleanup;
- }
- }
- }
-
- /* Insert new divider cells into pParent. */
- for(i=0; i<nNew-1; i++){
- u8 *pCell;
- u8 *pTemp;
- int sz;
+ j = 0;
+ for(i=0; i<nNew; i++){
+ /* Assemble the new sibling page. */
MemPage *pNew = apNew[i];
- j = cntNew[i];
-
assert( j<nMaxCells );
- assert( b.apCell[j]!=0 );
- pCell = b.apCell[j];
- sz = b.szCell[j] + leafCorrection;
- pTemp = &aOvflSpace[iOvflSpace];
- if( !pNew->leaf ){
- memcpy(&pNew->aData[8], pCell, 4);
- }else if( leafData ){
- /* If the tree is a leaf-data tree, and the siblings are leaves,
- ** then there is no divider cell in b.apCell[]. Instead, the divider
- ** cell consists of the integer key for the right-most cell of
- ** the sibling-page assembled above only.
- */
- CellInfo info;
- j--;
- pNew->xParseCell(pNew, b.apCell[j], &info);
- pCell = pTemp;
- sz = 4 + putVarint(&pCell[4], info.nKey);
- pTemp = 0;
- }else{
- pCell -= 4;
- /* Obscure case for non-leaf-data trees: If the cell at pCell was
- ** previously stored on a leaf node, and its reported size was 4
- ** bytes, then it may actually be smaller than this
- ** (see btreeParseCellPtr(), 4 bytes is the minimum size of
- ** any cell). But it is important to pass the correct size to
- ** insertCell(), so reparse the cell now.
- **
- ** Note that this can never happen in an SQLite data file, as all
- ** cells are at least 4 bytes. It only happens in b-trees used
- ** to evaluate "IN (SELECT ...)" and similar clauses.
- */
- if( b.szCell[j]==4 ){
- assert(leafCorrection==4);
- sz = pParent->xCellSize(pParent, pCell);
- }
- }
- iOvflSpace += sz;
- assert( sz<=pBt->maxLocal+23 );
- assert( iOvflSpace <= (int)pBt->pageSize );
- insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc);
- if( rc!=SQLITE_OK ) goto balance_cleanup;
- assert( sqlite3PagerIswriteable(pParent->pDbPage) );
- }
-
- /* Now update the actual sibling pages. The order in which they are updated
- ** is important, as this code needs to avoid disrupting any page from which
- ** cells may still to be read. In practice, this means:
- **
- ** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1])
- ** then it is not safe to update page apNew[iPg] until after
- ** the left-hand sibling apNew[iPg-1] has been updated.
- **
- ** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1])
- ** then it is not safe to update page apNew[iPg] until after
- ** the right-hand sibling apNew[iPg+1] has been updated.
- **
- ** If neither of the above apply, the page is safe to update.
- **
- ** The iPg value in the following loop starts at nNew-1 goes down
- ** to 0, then back up to nNew-1 again, thus making two passes over
- ** the pages. On the initial downward pass, only condition (1) above
- ** needs to be tested because (2) will always be true from the previous
- ** step. On the upward pass, both conditions are always true, so the
- ** upwards pass simply processes pages that were missed on the downward
- ** pass.
- */
- for(i=1-nNew; i<nNew; i++){
- int iPg = i<0 ? -i : i;
- assert( iPg>=0 && iPg<nNew );
- if( abDone[iPg] ) continue; /* Skip pages already processed */
- if( i>=0 /* On the upwards pass, or... */
- || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */
- ){
- int iNew;
- int iOld;
- int nNewCell;
+ zeroPage(pNew, pageFlags);
+ assemblePage(pNew, cntNew[i]-j, &apCell[j], &szCell[j]);
+ assert( pNew->nCell>0 || (nNew==1 && cntNew[0]==0) );
+ assert( pNew->nOverflow==0 );
- /* Verify condition (1): If cells are moving left, update iPg
- ** only after iPg-1 has already been updated. */
- assert( iPg==0 || cntOld[iPg-1]>=cntNew[iPg-1] || abDone[iPg-1] );
+ j = cntNew[i];
- /* Verify condition (2): If cells are moving right, update iPg
- ** only after iPg+1 has already been updated. */
- assert( cntNew[iPg]>=cntOld[iPg] || abDone[iPg+1] );
+ /* If the sibling page assembled above was not the right-most sibling,
+ ** insert a divider cell into the parent page.
+ */
+ assert( i<nNew-1 || j==nCell );
+ if( j<nCell ){
+ u8 *pCell;
+ u8 *pTemp;
+ int sz;
- if( iPg==0 ){
- iNew = iOld = 0;
- nNewCell = cntNew[0];
+ assert( j<nMaxCells );
+ pCell = apCell[j];
+ sz = szCell[j] + leafCorrection;
+ pTemp = &aOvflSpace[iOvflSpace];
+ if( !pNew->leaf ){
+ memcpy(&pNew->aData[8], pCell, 4);
+ }else if( leafData ){
+ /* If the tree is a leaf-data tree, and the siblings are leaves,
+ ** then there is no divider cell in apCell[]. Instead, the divider
+ ** cell consists of the integer key for the right-most cell of
+ ** the sibling-page assembled above only.
+ */
+ CellInfo info;
+ j--;
+ btreeParseCellPtr(pNew, apCell[j], &info);
+ pCell = pTemp;
+ sz = 4 + putVarint(&pCell[4], info.nKey);
+ pTemp = 0;
}else{
- iOld = iPg<nOld ? (cntOld[iPg-1] + !leafData) : b.nCell;
- iNew = cntNew[iPg-1] + !leafData;
- nNewCell = cntNew[iPg] - iNew;
+ pCell -= 4;
+ /* Obscure case for non-leaf-data trees: If the cell at pCell was
+ ** previously stored on a leaf node, and its reported size was 4
+ ** bytes, then it may actually be smaller than this
+ ** (see btreeParseCellPtr(), 4 bytes is the minimum size of
+ ** any cell). But it is important to pass the correct size to
+ ** insertCell(), so reparse the cell now.
+ **
+ ** Note that this can never happen in an SQLite data file, as all
+ ** cells are at least 4 bytes. It only happens in b-trees used
+ ** to evaluate "IN (SELECT ...)" and similar clauses.
+ */
+ if( szCell[j]==4 ){
+ assert(leafCorrection==4);
+ sz = cellSizePtr(pParent, pCell);
+ }
}
+ iOvflSpace += sz;
+ assert( sz<=pBt->maxLocal+23 );
+ assert( iOvflSpace <= (int)pBt->pageSize );
+ insertCell(pParent, nxDiv, pCell, sz, pTemp, pNew->pgno, &rc);
+ if( rc!=SQLITE_OK ) goto balance_cleanup;
+ assert( sqlite3PagerIswriteable(pParent->pDbPage) );
- rc = editPage(apNew[iPg], iOld, iNew, nNewCell, &b);
- if( rc ) goto balance_cleanup;
- abDone[iPg]++;
- apNew[iPg]->nFree = usableSpace-szNew[iPg];
- assert( apNew[iPg]->nOverflow==0 );
- assert( apNew[iPg]->nCell==nNewCell );
+ j++;
+ nxDiv++;
}
}
-
- /* All pages have been processed exactly once */
- assert( memcmp(abDone, "\01\01\01\01\01", nNew)==0 );
-
+ assert( j==nCell );
assert( nOld>0 );
assert( nNew>0 );
+ if( (pageFlags & PTF_LEAF)==0 ){
+ u8 *zChild = &apCopy[nOld-1]->aData[8];
+ memcpy(&apNew[nNew-1]->aData[8], zChild, 4);
+ }
if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){
/* The root page of the b-tree now contains no cells. The only sibling
@@ -62128,56 +56969,132 @@ static int balance_nonroot(
** sets all pointer-map entries corresponding to database image pages
** for which the pointer is stored within the content being copied.
**
- ** It is critical that the child page be defragmented before being
- ** copied into the parent, because if the parent is page 1 then it will
- ** by smaller than the child due to the database header, and so all the
- ** free space needs to be up front.
- */
- assert( nNew==1 || CORRUPT_DB );
- rc = defragmentPage(apNew[0]);
- testcase( rc!=SQLITE_OK );
+ ** The second assert below verifies that the child page is defragmented
+ ** (it must be, as it was just reconstructed using assemblePage()). This
+ ** is important if the parent page happens to be page 1 of the database
+ ** image. */
+ assert( nNew==1 );
assert( apNew[0]->nFree ==
- (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2)
- || rc!=SQLITE_OK
+ (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2)
);
copyNodeContent(apNew[0], pParent, &rc);
freePage(apNew[0], &rc);
- }else if( ISAUTOVACUUM && !leafCorrection ){
- /* Fix the pointer map entries associated with the right-child of each
- ** sibling page. All other pointer map entries have already been taken
- ** care of. */
- for(i=0; i<nNew; i++){
- u32 key = get4byte(&apNew[i]->aData[8]);
- ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc);
- }
- }
+ }else if( ISAUTOVACUUM ){
+ /* Fix the pointer-map entries for all the cells that were shifted around.
+ ** There are several different types of pointer-map entries that need to
+ ** be dealt with by this routine. Some of these have been set already, but
+ ** many have not. The following is a summary:
+ **
+ ** 1) The entries associated with new sibling pages that were not
+ ** siblings when this function was called. These have already
+ ** been set. We don't need to worry about old siblings that were
+ ** moved to the free-list - the freePage() code has taken care
+ ** of those.
+ **
+ ** 2) The pointer-map entries associated with the first overflow
+ ** page in any overflow chains used by new divider cells. These
+ ** have also already been taken care of by the insertCell() code.
+ **
+ ** 3) If the sibling pages are not leaves, then the child pages of
+ ** cells stored on the sibling pages may need to be updated.
+ **
+ ** 4) If the sibling pages are not internal intkey nodes, then any
+ ** overflow pages used by these cells may need to be updated
+ ** (internal intkey nodes never contain pointers to overflow pages).
+ **
+ ** 5) If the sibling pages are not leaves, then the pointer-map
+ ** entries for the right-child pages of each sibling may need
+ ** to be updated.
+ **
+ ** Cases 1 and 2 are dealt with above by other code. The next
+ ** block deals with cases 3 and 4 and the one after that, case 5. Since
+ ** setting a pointer map entry is a relatively expensive operation, this
+ ** code only sets pointer map entries for child or overflow pages that have
+ ** actually moved between pages. */
+ MemPage *pNew = apNew[0];
+ MemPage *pOld = apCopy[0];
+ int nOverflow = pOld->nOverflow;
+ int iNextOld = pOld->nCell + nOverflow;
+ int iOverflow = (nOverflow ? pOld->aiOvfl[0] : -1);
+ j = 0; /* Current 'old' sibling page */
+ k = 0; /* Current 'new' sibling page */
+ for(i=0; i<nCell; i++){
+ int isDivider = 0;
+ while( i==iNextOld ){
+ /* Cell i is the cell immediately following the last cell on old
+ ** sibling page j. If the siblings are not leaf pages of an
+ ** intkey b-tree, then cell i was a divider cell. */
+ assert( j+1 < ArraySize(apCopy) );
+ assert( j+1 < nOld );
+ pOld = apCopy[++j];
+ iNextOld = i + !leafData + pOld->nCell + pOld->nOverflow;
+ if( pOld->nOverflow ){
+ nOverflow = pOld->nOverflow;
+ iOverflow = i + !leafData + pOld->aiOvfl[0];
+ }
+ isDivider = !leafData;
+ }
+
+ assert(nOverflow>0 || iOverflow<i );
+ assert(nOverflow<2 || pOld->aiOvfl[0]==pOld->aiOvfl[1]-1);
+ assert(nOverflow<3 || pOld->aiOvfl[1]==pOld->aiOvfl[2]-1);
+ if( i==iOverflow ){
+ isDivider = 1;
+ if( (--nOverflow)>0 ){
+ iOverflow++;
+ }
+ }
+
+ if( i==cntNew[k] ){
+ /* Cell i is the cell immediately following the last cell on new
+ ** sibling page k. If the siblings are not leaf pages of an
+ ** intkey b-tree, then cell i is a divider cell. */
+ pNew = apNew[++k];
+ if( !leafData ) continue;
+ }
+ assert( j<nOld );
+ assert( k<nNew );
- assert( pParent->isInit );
- TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n",
- nOld, nNew, b.nCell));
+ /* If the cell was originally divider cell (and is not now) or
+ ** an overflow cell, or if the cell was located on a different sibling
+ ** page before the balancing, then the pointer map entries associated
+ ** with any child or overflow pages need to be updated. */
+ if( isDivider || pOld->pgno!=pNew->pgno ){
+ if( !leafCorrection ){
+ ptrmapPut(pBt, get4byte(apCell[i]), PTRMAP_BTREE, pNew->pgno, &rc);
+ }
+ if( szCell[i]>pNew->minLocal ){
+ ptrmapPutOvflPtr(pNew, apCell[i], &rc);
+ }
+ }
+ }
- /* Free any old pages that were not reused as new pages.
- */
- for(i=nNew; i<nOld; i++){
- freePage(apOld[i], &rc);
- }
+ if( !leafCorrection ){
+ for(i=0; i<nNew; i++){
+ u32 key = get4byte(&apNew[i]->aData[8]);
+ ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc);
+ }
+ }
#if 0
- if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){
/* The ptrmapCheckPages() contains assert() statements that verify that
** all pointer map pages are set correctly. This is helpful while
** debugging. This is usually disabled because a corrupt database may
** cause an assert() statement to fail. */
ptrmapCheckPages(apNew, nNew);
ptrmapCheckPages(&pParent, 1);
- }
#endif
+ }
+
+ assert( pParent->isInit );
+ TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n",
+ nOld, nNew, nCell));
/*
** Cleanup before returning.
*/
balance_cleanup:
- sqlite3ScratchFree(b.apCell);
+ sqlite3ScratchFree(apCell);
for(i=0; i<nOld; i++){
releasePage(apOld[i]);
}
@@ -62308,7 +57225,7 @@ static int balance(BtCursor *pCur){
rc = sqlite3PagerWrite(pParent->pDbPage);
if( rc==SQLITE_OK ){
#ifndef SQLITE_OMIT_QUICKBALANCE
- if( pPage->intKeyLeaf
+ if( pPage->hasData
&& pPage->nOverflow==1
&& pPage->aiOvfl[0]==pPage->nCell
&& pParent->pgno!=1
@@ -62317,7 +57234,7 @@ static int balance(BtCursor *pCur){
/* Call balance_quick() to create a new sibling of pPage on which
** to store the overflow cell. balance_quick() inserts a new cell
** into pParent, which may cause pParent overflow. If this
- ** happens, the next iteration of the do-loop will balance pParent
+ ** happens, the next interation of the do-loop will balance pParent
** use either balance_nonroot() or balance_deeper(). Until this
** happens, the overflow cell is stored in the aBalanceQuickSpace[]
** buffer.
@@ -62350,8 +57267,7 @@ static int balance(BtCursor *pCur){
** pSpace buffer passed to the latter call to balance_nonroot().
*/
u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize);
- rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1,
- pCur->hints&BTREE_BULKLOAD);
+ rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1, pCur->hints);
if( pFree ){
/* If pFree is not NULL, it points to the pSpace buffer used
** by a previous call to balance_nonroot(). Its contents are
@@ -62372,7 +57288,6 @@ static int balance(BtCursor *pCur){
/* The next iteration of the do-loop balances the parent page. */
releasePage(pPage);
pCur->iPage--;
- assert( pCur->iPage>=0 );
}
}while( rc==SQLITE_OK );
@@ -62396,7 +57311,7 @@ static int balance(BtCursor *pCur){
** MovetoUnpacked() to seek cursor pCur to (pKey, nKey) has already
** been performed. seekResult is the search result returned (a negative
** number if pCur points at an entry that is smaller than (pKey, nKey), or
-** a positive value if pCur points at an entry that is larger than
+** a positive value if pCur points at an etry that is larger than
** (pKey, nKey)).
**
** If the seekResult parameter is non-zero, then the caller guarantees that
@@ -62429,8 +57344,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
}
assert( cursorHoldsMutex(pCur) );
- assert( (pCur->curFlags & BTCF_WriteFlag)!=0
- && pBt->inTransaction==TRANS_WRITE
+ assert( pCur->wrFlag && pBt->inTransaction==TRANS_WRITE
&& (pBt->btsFlags & BTS_READ_ONLY)==0 );
assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
@@ -62452,28 +57366,17 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
** doing any work. To avoid thwarting these optimizations, it is important
** not to clear the cursor here.
*/
- if( pCur->curFlags & BTCF_Multiple ){
- rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
- if( rc ) return rc;
- }
+ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ if( rc ) return rc;
+ /* If this is an insert into a table b-tree, invalidate any incrblob
+ ** cursors open on the row being replaced (assuming this is a replace
+ ** operation - if it is not, the following is a no-op). */
if( pCur->pKeyInfo==0 ){
- assert( pKey==0 );
- /* If this is an insert into a table b-tree, invalidate any incrblob
- ** cursors open on the row being replaced */
invalidateIncrblobCursors(p, nKey, 0);
+ }
- /* If the cursor is currently on the last row and we are appending a
- ** new row onto the end, set the "loc" to avoid an unnecessary
- ** btreeMoveto() call */
- if( (pCur->curFlags&BTCF_ValidNKey)!=0 && nKey>0
- && pCur->info.nKey==nKey-1 ){
- loc = -1;
- }else if( loc==0 ){
- rc = sqlite3BtreeMovetoUnpacked(pCur, 0, nKey, appendBias, &loc);
- if( rc ) return rc;
- }
- }else if( loc==0 ){
+ if( !loc ){
rc = btreeMoveto(pCur, pKey, nKey, appendBias, &loc);
if( rc ) return rc;
}
@@ -62487,11 +57390,12 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
pCur->pgnoRoot, nKey, nData, pPage->pgno,
loc==0 ? "overwrite" : "new entry"));
assert( pPage->isInit );
+ allocateTempSpace(pBt);
newCell = pBt->pTmpSpace;
- assert( newCell!=0 );
+ if( newCell==0 ) return SQLITE_NOMEM;
rc = fillInCell(pPage, newCell, pKey, nKey, pData, nData, nZero, &szNew);
if( rc ) goto end_insert;
- assert( szNew==pPage->xCellSize(pPage, newCell) );
+ assert( szNew==cellSizePtr(pPage, newCell) );
assert( szNew <= MX_CELL_SIZE(pBt) );
idx = pCur->aiIdx[pCur->iPage];
if( loc==0 ){
@@ -62505,7 +57409,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
if( !pPage->leaf ){
memcpy(newCell, oldCell, 4);
}
- rc = clearCell(pPage, oldCell, &szOld);
+ szOld = cellSizePtr(pPage, oldCell);
+ rc = clearCell(pPage, oldCell);
dropCell(pPage, idx, szOld, &rc);
if( rc ) goto end_insert;
}else if( loc<0 && pPage->nCell>0 ){
@@ -62519,7 +57424,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
/* If no error has occurred and pPage has an overflow cell, call balance()
** to redistribute the cells within the tree. Since balance() may move
- ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey
+ ** the cursor, zero the BtCursor.info.nSize and BtCursor.validNKey
** variables.
**
** Previous versions of SQLite called moveToRoot() to move the cursor
@@ -62538,8 +57443,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
** row without seeking the cursor. This can be a big performance boost.
*/
pCur->info.nSize = 0;
+ pCur->validNKey = 0;
if( rc==SQLITE_OK && pPage->nOverflow ){
- pCur->curFlags &= ~(BTCF_ValidNKey);
rc = balance(pCur);
/* Must make sure nOverflow is reset to zero even if the balance()
@@ -62556,15 +57461,10 @@ end_insert:
}
/*
-** Delete the entry that the cursor is pointing to.
-**
-** If the second parameter is zero, then the cursor is left pointing at an
-** arbitrary location after the delete. If it is non-zero, then the cursor
-** is left in a state such that the next call to BtreeNext() or BtreePrev()
-** moves it to the same row as it would if the call to BtreeDelete() had
-** been omitted.
+** Delete the entry that the cursor is pointing to. The cursor
+** is left pointing at a arbitrary location.
*/
-SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, int bPreserve){
+SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur){
Btree *p = pCur->pBtree;
BtShared *pBt = p->pBt;
int rc; /* Return code */
@@ -62572,17 +57472,19 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, int bPreserve){
unsigned char *pCell; /* Pointer to cell to delete */
int iCellIdx; /* Index of cell to delete */
int iCellDepth; /* Depth of node containing pCell */
- u16 szCell; /* Size of the cell being deleted */
- int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */
assert( cursorHoldsMutex(pCur) );
assert( pBt->inTransaction==TRANS_WRITE );
assert( (pBt->btsFlags & BTS_READ_ONLY)==0 );
- assert( pCur->curFlags & BTCF_WriteFlag );
+ assert( pCur->wrFlag );
assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
assert( !hasReadConflicts(p, pCur->pgnoRoot) );
- assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
- assert( pCur->eState==CURSOR_VALID );
+
+ if( NEVER(pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell)
+ || NEVER(pCur->eState!=CURSOR_VALID)
+ ){
+ return SQLITE_ERROR; /* Something has gone awry. */
+ }
iCellDepth = pCur->iPage;
iCellIdx = pCur->aiIdx[iCellDepth];
@@ -62597,17 +57499,18 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, int bPreserve){
** sub-tree headed by the child page of the cell being deleted. This makes
** balancing the tree following the delete operation easier. */
if( !pPage->leaf ){
- int notUsed = 0;
+ int notUsed;
rc = sqlite3BtreePrevious(pCur, &notUsed);
if( rc ) return rc;
}
/* Save the positions of any other cursors open on this table before
- ** making any modifications. */
- if( pCur->curFlags & BTCF_Multiple ){
- rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
- if( rc ) return rc;
- }
+ ** making any modifications. Make the page containing the entry to be
+ ** deleted writable. Then free any overflow pages associated with the
+ ** entry and finally remove the cell itself from within the page.
+ */
+ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ if( rc ) return rc;
/* If this is a delete operation to remove a row from a table b-tree,
** invalidate any incrblob cursors open on the row being deleted. */
@@ -62615,35 +57518,10 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, int bPreserve){
invalidateIncrblobCursors(p, pCur->info.nKey, 0);
}
- /* If the bPreserve flag is set to true, then the cursor position must
- ** be preserved following this delete operation. If the current delete
- ** will cause a b-tree rebalance, then this is done by saving the cursor
- ** key and leaving the cursor in CURSOR_REQUIRESEEK state before
- ** returning.
- **
- ** Or, if the current delete will not cause a rebalance, then the cursor
- ** will be left in CURSOR_SKIPNEXT state pointing to the entry immediately
- ** before or after the deleted entry. In this case set bSkipnext to true. */
- if( bPreserve ){
- if( !pPage->leaf
- || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3)
- ){
- /* A b-tree rebalance will be required after deleting this entry.
- ** Save the cursor key. */
- rc = saveCursorKey(pCur);
- if( rc ) return rc;
- }else{
- bSkipnext = 1;
- }
- }
-
- /* Make the page containing the entry to be deleted writable. Then free any
- ** overflow pages associated with the entry and finally remove the cell
- ** itself from within the page. */
rc = sqlite3PagerWrite(pPage->pDbPage);
if( rc ) return rc;
- rc = clearCell(pPage, pCell, &szCell);
- dropCell(pPage, iCellIdx, szCell, &rc);
+ rc = clearCell(pPage, pCell);
+ dropCell(pPage, iCellIdx, cellSizePtr(pPage, pCell), &rc);
if( rc ) return rc;
/* If the cell deleted was not located on a leaf page, then the cursor
@@ -62658,11 +57536,12 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, int bPreserve){
unsigned char *pTmp;
pCell = findCell(pLeaf, pLeaf->nCell-1);
- if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT;
- nCell = pLeaf->xCellSize(pLeaf, pCell);
+ nCell = cellSizePtr(pLeaf, pCell);
assert( MX_CELL_SIZE(pBt) >= nCell );
+
+ allocateTempSpace(pBt);
pTmp = pBt->pTmpSpace;
- assert( pTmp!=0 );
+
rc = sqlite3PagerWrite(pLeaf->pDbPage);
insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc);
dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc);
@@ -62693,23 +57572,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, int bPreserve){
}
if( rc==SQLITE_OK ){
- if( bSkipnext ){
- assert( bPreserve && pCur->iPage==iCellDepth );
- assert( pPage==pCur->apPage[pCur->iPage] );
- assert( (pPage->nCell>0 || CORRUPT_DB) && iCellIdx<=pPage->nCell );
- pCur->eState = CURSOR_SKIPNEXT;
- if( iCellIdx>=pPage->nCell ){
- pCur->skipNext = -1;
- pCur->aiIdx[iCellDepth] = pPage->nCell-1;
- }else{
- pCur->skipNext = 1;
- }
- }else{
- rc = moveToRoot(pCur);
- if( bPreserve ){
- pCur->eState = CURSOR_REQUIRESEEK;
- }
- }
+ moveToRoot(pCur);
}
return rc;
}
@@ -62767,8 +57630,7 @@ static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){
pgnoRoot==PENDING_BYTE_PAGE(pBt) ){
pgnoRoot++;
}
- assert( pgnoRoot>=3 || CORRUPT_DB );
- testcase( pgnoRoot<3 );
+ assert( pgnoRoot>=3 );
/* Allocate a page. The page that currently resides at pgnoRoot will
** be moved to the allocated page (unless the allocated page happens
@@ -62890,46 +57752,37 @@ static int clearDatabasePage(
int rc;
unsigned char *pCell;
int i;
- int hdr;
- u16 szCell;
assert( sqlite3_mutex_held(pBt->mutex) );
if( pgno>btreePagecount(pBt) ){
return SQLITE_CORRUPT_BKPT;
}
- rc = getAndInitPage(pBt, pgno, &pPage, 0, 0);
+
+ rc = getAndInitPage(pBt, pgno, &pPage, 0);
if( rc ) return rc;
- if( pPage->bBusy ){
- rc = SQLITE_CORRUPT_BKPT;
- goto cleardatabasepage_out;
- }
- pPage->bBusy = 1;
- hdr = pPage->hdrOffset;
for(i=0; i<pPage->nCell; i++){
pCell = findCell(pPage, i);
if( !pPage->leaf ){
rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange);
if( rc ) goto cleardatabasepage_out;
}
- rc = clearCell(pPage, pCell, &szCell);
+ rc = clearCell(pPage, pCell);
if( rc ) goto cleardatabasepage_out;
}
if( !pPage->leaf ){
- rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange);
+ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[8]), 1, pnChange);
if( rc ) goto cleardatabasepage_out;
}else if( pnChange ){
- assert( pPage->intKey || CORRUPT_DB );
- testcase( !pPage->intKey );
+ assert( pPage->intKey );
*pnChange += pPage->nCell;
}
if( freePageFlag ){
freePage(pPage, &rc);
}else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){
- zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF);
+ zeroPage(pPage, pPage->aData[0] | PTF_LEAF);
}
cleardatabasepage_out:
- pPage->bBusy = 0;
releasePage(pPage);
return rc;
}
@@ -62967,15 +57820,6 @@ SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){
}
/*
-** Delete all information from the single table that pCur is open on.
-**
-** This routine only work for pCur on an ephemeral table.
-*/
-SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){
- return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0);
-}
-
-/*
** Erase all information in a table and add the root of the table to
** the freelist. Except, the root of the principle table (the one on
** page 1) is never added to the freelist.
@@ -63119,13 +57963,6 @@ SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){
** The schema layer numbers meta values differently. At the schema
** layer (and the SetCookie and ReadCookie opcodes) the number of
** free pages is not visible. So Cookie[0] is the same as Meta[1].
-**
-** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead
-** of reading the value out of the header, it instead loads the "DataVersion"
-** from the pager. The BTREE_DATA_VERSION value is not actually stored in the
-** database file. It is a number computed by the pager. But its access
-** pattern is the same as header meta values, and so it is convenient to
-** read it from this routine.
*/
SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){
BtShared *pBt = p->pBt;
@@ -63136,11 +57973,7 @@ SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){
assert( pBt->pPage1 );
assert( idx>=0 && idx<=15 );
- if( idx==BTREE_DATA_VERSION ){
- *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion;
- }else{
- *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]);
- }
+ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]);
/* If auto-vacuum is disabled in this build and this is an auto-vacuum
** database, mark the database as read-only. */
@@ -63231,7 +58064,7 @@ SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){
if( pCur->iPage==0 ){
/* All pages of the b-tree have been visited. Return successfully. */
*pnEntry = nEntry;
- return moveToRoot(pCur);
+ return SQLITE_OK;
}
moveToParent(pCur);
}while ( pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell );
@@ -63270,6 +58103,7 @@ SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){
*/
static void checkAppendMsg(
IntegrityCk *pCheck,
+ char *zMsg1,
const char *zFormat,
...
){
@@ -63281,8 +58115,8 @@ static void checkAppendMsg(
if( pCheck->errMsg.nChar ){
sqlite3StrAccumAppend(&pCheck->errMsg, "\n", 1);
}
- if( pCheck->zPfx ){
- sqlite3XPrintf(&pCheck->errMsg, 0, pCheck->zPfx, pCheck->v1, pCheck->v2);
+ if( zMsg1 ){
+ sqlite3StrAccumAppend(&pCheck->errMsg, zMsg1, -1);
}
sqlite3VXPrintf(&pCheck->errMsg, 1, zFormat, ap);
va_end(ap);
@@ -63315,19 +58149,19 @@ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
/*
** Add 1 to the reference count for page iPage. If this is the second
** reference to the page, add an error message to pCheck->zErrMsg.
-** Return 1 if there are 2 or more references to the page and 0 if
+** Return 1 if there are 2 ore more references to the page and 0 if
** if this is the first reference to the page.
**
** Also check that the page number is in bounds.
*/
-static int checkRef(IntegrityCk *pCheck, Pgno iPage){
+static int checkRef(IntegrityCk *pCheck, Pgno iPage, char *zContext){
if( iPage==0 ) return 1;
if( iPage>pCheck->nPage ){
- checkAppendMsg(pCheck, "invalid page number %d", iPage);
+ checkAppendMsg(pCheck, zContext, "invalid page number %d", iPage);
return 1;
}
if( getPageReferenced(pCheck, iPage) ){
- checkAppendMsg(pCheck, "2nd reference to page %d", iPage);
+ checkAppendMsg(pCheck, zContext, "2nd reference to page %d", iPage);
return 1;
}
setPageReferenced(pCheck, iPage);
@@ -63344,7 +58178,8 @@ static void checkPtrmap(
IntegrityCk *pCheck, /* Integrity check context */
Pgno iChild, /* Child page number */
u8 eType, /* Expected pointer map type */
- Pgno iParent /* Expected pointer map parent page number */
+ Pgno iParent, /* Expected pointer map parent page number */
+ char *zContext /* Context description (used for error msg) */
){
int rc;
u8 ePtrmapType;
@@ -63353,12 +58188,12 @@ static void checkPtrmap(
rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent);
if( rc!=SQLITE_OK ){
if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1;
- checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild);
+ checkAppendMsg(pCheck, zContext, "Failed to read ptrmap key=%d", iChild);
return;
}
if( ePtrmapType!=eType || iPtrmapParent!=iParent ){
- checkAppendMsg(pCheck,
+ checkAppendMsg(pCheck, zContext,
"Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)",
iChild, eType, iParent, ePtrmapType, iPtrmapParent);
}
@@ -63373,7 +58208,8 @@ static void checkList(
IntegrityCk *pCheck, /* Integrity checking context */
int isFreeList, /* True for a freelist. False for overflow page list */
int iPage, /* Page number for first page in the list */
- int N /* Expected number of pages in the list */
+ int N, /* Expected number of pages in the list */
+ char *zContext /* Context for error messages */
){
int i;
int expected = N;
@@ -63382,14 +58218,14 @@ static void checkList(
DbPage *pOvflPage;
unsigned char *pOvflData;
if( iPage<1 ){
- checkAppendMsg(pCheck,
+ checkAppendMsg(pCheck, zContext,
"%d of %d pages missing from overflow list starting at %d",
N+1, expected, iFirst);
break;
}
- if( checkRef(pCheck, iPage) ) break;
+ if( checkRef(pCheck, iPage, zContext) ) break;
if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage) ){
- checkAppendMsg(pCheck, "failed to get page %d", iPage);
+ checkAppendMsg(pCheck, zContext, "failed to get page %d", iPage);
break;
}
pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage);
@@ -63397,11 +58233,11 @@ static void checkList(
int n = get4byte(&pOvflData[4]);
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pCheck->pBt->autoVacuum ){
- checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0);
+ checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0, zContext);
}
#endif
if( n>(int)pCheck->pBt->usableSize/4-2 ){
- checkAppendMsg(pCheck,
+ checkAppendMsg(pCheck, zContext,
"freelist leaf count too big on page %d", iPage);
N--;
}else{
@@ -63409,10 +58245,10 @@ static void checkList(
Pgno iFreePage = get4byte(&pOvflData[8+i*4]);
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pCheck->pBt->autoVacuum ){
- checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0);
+ checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0, zContext);
}
#endif
- checkRef(pCheck, iFreePage);
+ checkRef(pCheck, iFreePage, zContext);
}
N -= n;
}
@@ -63425,71 +58261,16 @@ static void checkList(
*/
if( pCheck->pBt->autoVacuum && N>0 ){
i = get4byte(pOvflData);
- checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage);
+ checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage, zContext);
}
}
#endif
iPage = get4byte(pOvflData);
sqlite3PagerUnref(pOvflPage);
-
- if( isFreeList && N<(iPage!=0) ){
- checkAppendMsg(pCheck, "free-page count in header is too small");
- }
}
}
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
-/*
-** An implementation of a min-heap.
-**
-** aHeap[0] is the number of elements on the heap. aHeap[1] is the
-** root element. The daughter nodes of aHeap[N] are aHeap[N*2]
-** and aHeap[N*2+1].
-**
-** The heap property is this: Every node is less than or equal to both
-** of its daughter nodes. A consequence of the heap property is that the
-** root node aHeap[1] is always the minimum value currently in the heap.
-**
-** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto
-** the heap, preserving the heap property. The btreeHeapPull() routine
-** removes the root element from the heap (the minimum value in the heap)
-** and then moves other nodes around as necessary to preserve the heap
-** property.
-**
-** This heap is used for cell overlap and coverage testing. Each u32
-** entry represents the span of a cell or freeblock on a btree page.
-** The upper 16 bits are the index of the first byte of a range and the
-** lower 16 bits are the index of the last byte of that range.
-*/
-static void btreeHeapInsert(u32 *aHeap, u32 x){
- u32 j, i = ++aHeap[0];
- aHeap[i] = x;
- while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){
- x = aHeap[j];
- aHeap[j] = aHeap[i];
- aHeap[i] = x;
- i = j;
- }
-}
-static int btreeHeapPull(u32 *aHeap, u32 *pOut){
- u32 j, i, x;
- if( (x = aHeap[0])==0 ) return 0;
- *pOut = aHeap[1];
- aHeap[1] = aHeap[x];
- aHeap[x] = 0xffffffff;
- aHeap[0]--;
- i = 1;
- while( (j = i*2)<=aHeap[0] ){
- if( aHeap[j]>aHeap[j+1] ) j++;
- if( aHeap[i]<aHeap[j] ) break;
- x = aHeap[i];
- aHeap[i] = aHeap[j];
- aHeap[j] = x;
- i = j;
- }
- return 1;
-}
-
#ifndef SQLITE_OMIT_INTEGRITY_CHECK
/*
** Do various sanity checks on a single page of a tree. Return
@@ -63500,257 +58281,221 @@ static int btreeHeapPull(u32 *aHeap, u32 *pOut){
**
** 1. Make sure that cells and freeblocks do not overlap
** but combine to completely cover the page.
-** 2. Make sure integer cell keys are in order.
-** 3. Check the integrity of overflow pages.
-** 4. Recursively call checkTreePage on all children.
-** 5. Verify that the depth of all children is the same.
+** NO 2. Make sure cell keys are in order.
+** NO 3. Make sure no key is less than or equal to zLowerBound.
+** NO 4. Make sure no key is greater than or equal to zUpperBound.
+** 5. Check the integrity of overflow pages.
+** 6. Recursively call checkTreePage on all children.
+** 7. Verify that the depth of all children is the same.
+** 8. Make sure this page is at least 33% full or else it is
+** the root of the tree.
*/
static int checkTreePage(
IntegrityCk *pCheck, /* Context for the sanity check */
int iPage, /* Page number of the page to check */
- i64 *piMinKey, /* Write minimum integer primary key here */
- i64 maxKey /* Error if integer primary key greater than this */
+ char *zParentContext, /* Parent context */
+ i64 *pnParentMinKey,
+ i64 *pnParentMaxKey
){
- MemPage *pPage = 0; /* The page being analyzed */
- int i; /* Loop counter */
- int rc; /* Result code from subroutine call */
- int depth = -1, d2; /* Depth of a subtree */
- int pgno; /* Page number */
- int nFrag; /* Number of fragmented bytes on the page */
- int hdr; /* Offset to the page header */
- int cellStart; /* Offset to the start of the cell pointer array */
- int nCell; /* Number of cells */
- int doCoverageCheck = 1; /* True if cell coverage checking should be done */
- int keyCanBeEqual = 1; /* True if IPK can be equal to maxKey
- ** False if IPK must be strictly less than maxKey */
- u8 *data; /* Page content */
- u8 *pCell; /* Cell content */
- u8 *pCellIdx; /* Next element of the cell pointer array */
- BtShared *pBt; /* The BtShared object that owns pPage */
- u32 pc; /* Address of a cell */
- u32 usableSize; /* Usable size of the page */
- u32 contentOffset; /* Offset to the start of the cell content area */
- u32 *heap = 0; /* Min-heap used for checking cell coverage */
- u32 x, prev = 0; /* Next and previous entry on the min-heap */
- const char *saved_zPfx = pCheck->zPfx;
- int saved_v1 = pCheck->v1;
- int saved_v2 = pCheck->v2;
- u8 savedIsInit = 0;
+ MemPage *pPage;
+ int i, rc, depth, d2, pgno, cnt;
+ int hdr, cellStart;
+ int nCell;
+ u8 *data;
+ BtShared *pBt;
+ int usableSize;
+ char zContext[100];
+ char *hit = 0;
+ i64 nMinKey = 0;
+ i64 nMaxKey = 0;
+
+ sqlite3_snprintf(sizeof(zContext), zContext, "Page %d: ", iPage);
/* Check that the page exists
*/
pBt = pCheck->pBt;
usableSize = pBt->usableSize;
if( iPage==0 ) return 0;
- if( checkRef(pCheck, iPage) ) return 0;
- pCheck->zPfx = "Page %d: ";
- pCheck->v1 = iPage;
+ if( checkRef(pCheck, iPage, zParentContext) ) return 0;
if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){
- checkAppendMsg(pCheck,
+ checkAppendMsg(pCheck, zContext,
"unable to get the page. error code=%d", rc);
- goto end_of_check;
+ return 0;
}
/* Clear MemPage.isInit to make sure the corruption detection code in
** btreeInitPage() is executed. */
- savedIsInit = pPage->isInit;
pPage->isInit = 0;
if( (rc = btreeInitPage(pPage))!=0 ){
assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */
- checkAppendMsg(pCheck,
+ checkAppendMsg(pCheck, zContext,
"btreeInitPage() returns error code %d", rc);
- goto end_of_check;
- }
- data = pPage->aData;
- hdr = pPage->hdrOffset;
-
- /* Set up for cell analysis */
- pCheck->zPfx = "On tree page %d cell %d: ";
- contentOffset = get2byteNotZero(&data[hdr+5]);
- assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */
-
- /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
- ** number of cells on the page. */
- nCell = get2byte(&data[hdr+3]);
- assert( pPage->nCell==nCell );
-
- /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page
- ** immediately follows the b-tree page header. */
- cellStart = hdr + 12 - 4*pPage->leaf;
- assert( pPage->aCellIdx==&data[cellStart] );
- pCellIdx = &data[cellStart + 2*(nCell-1)];
-
- if( !pPage->leaf ){
- /* Analyze the right-child page of internal pages */
- pgno = get4byte(&data[hdr+8]);
-#ifndef SQLITE_OMIT_AUTOVACUUM
- if( pBt->autoVacuum ){
- pCheck->zPfx = "On page %d at right child: ";
- checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage);
- }
-#endif
- depth = checkTreePage(pCheck, pgno, &maxKey, maxKey);
- keyCanBeEqual = 0;
- }else{
- /* For leaf pages, the coverage check will occur in the same loop
- ** as the other cell checks, so initialize the heap. */
- heap = pCheck->heap;
- heap[0] = 0;
+ releasePage(pPage);
+ return 0;
}
- /* EVIDENCE-OF: R-02776-14802 The cell pointer array consists of K 2-byte
- ** integer offsets to the cell contents. */
- for(i=nCell-1; i>=0 && pCheck->mxErr; i--){
+ /* Check out all the cells.
+ */
+ depth = 0;
+ for(i=0; i<pPage->nCell && pCheck->mxErr; i++){
+ u8 *pCell;
+ u32 sz;
CellInfo info;
- /* Check cell size */
- pCheck->v2 = i;
- assert( pCellIdx==&data[cellStart + i*2] );
- pc = get2byteAligned(pCellIdx);
- pCellIdx -= 2;
- if( pc<contentOffset || pc>usableSize-4 ){
- checkAppendMsg(pCheck, "Offset %d out of range %d..%d",
- pc, contentOffset, usableSize-4);
- doCoverageCheck = 0;
- continue;
- }
- pCell = &data[pc];
- pPage->xParseCell(pPage, pCell, &info);
- if( pc+info.nSize>usableSize ){
- checkAppendMsg(pCheck, "Extends off end of page");
- doCoverageCheck = 0;
- continue;
- }
-
- /* Check for integer primary key out of range */
- if( pPage->intKey ){
- if( keyCanBeEqual ? (info.nKey > maxKey) : (info.nKey >= maxKey) ){
- checkAppendMsg(pCheck, "Rowid %lld out of order", info.nKey);
+ /* Check payload overflow pages
+ */
+ sqlite3_snprintf(sizeof(zContext), zContext,
+ "On tree page %d cell %d: ", iPage, i);
+ pCell = findCell(pPage,i);
+ btreeParseCellPtr(pPage, pCell, &info);
+ sz = info.nData;
+ if( !pPage->intKey ) sz += (int)info.nKey;
+ /* For intKey pages, check that the keys are in order.
+ */
+ else if( i==0 ) nMinKey = nMaxKey = info.nKey;
+ else{
+ if( info.nKey <= nMaxKey ){
+ checkAppendMsg(pCheck, zContext,
+ "Rowid %lld out of order (previous was %lld)", info.nKey, nMaxKey);
}
- maxKey = info.nKey;
+ nMaxKey = info.nKey;
}
-
- /* Check the content overflow list */
- if( info.nPayload>info.nLocal ){
- int nPage; /* Number of pages on the overflow chain */
- Pgno pgnoOvfl; /* First page of the overflow chain */
- assert( pc + info.iOverflow <= usableSize );
- nPage = (info.nPayload - info.nLocal + usableSize - 5)/(usableSize - 4);
- pgnoOvfl = get4byte(&pCell[info.iOverflow]);
+ assert( sz==info.nPayload );
+ if( (sz>info.nLocal)
+ && (&pCell[info.iOverflow]<=&pPage->aData[pBt->usableSize])
+ ){
+ int nPage = (sz - info.nLocal + usableSize - 5)/(usableSize - 4);
+ Pgno pgnoOvfl = get4byte(&pCell[info.iOverflow]);
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pBt->autoVacuum ){
- checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage);
+ checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage, zContext);
}
#endif
- checkList(pCheck, 0, pgnoOvfl, nPage);
+ checkList(pCheck, 0, pgnoOvfl, nPage, zContext);
}
+ /* Check sanity of left child page.
+ */
if( !pPage->leaf ){
- /* Check sanity of left child page for internal pages */
pgno = get4byte(pCell);
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pBt->autoVacuum ){
- checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage);
+ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, zContext);
}
#endif
- d2 = checkTreePage(pCheck, pgno, &maxKey, maxKey);
- keyCanBeEqual = 0;
- if( d2!=depth ){
- checkAppendMsg(pCheck, "Child page depth differs");
- depth = d2;
+ d2 = checkTreePage(pCheck, pgno, zContext, &nMinKey, i==0 ? NULL : &nMaxKey);
+ if( i>0 && d2!=depth ){
+ checkAppendMsg(pCheck, zContext, "Child page depth differs");
+ }
+ depth = d2;
+ }
+ }
+
+ if( !pPage->leaf ){
+ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]);
+ sqlite3_snprintf(sizeof(zContext), zContext,
+ "On page %d at right child: ", iPage);
+#ifndef SQLITE_OMIT_AUTOVACUUM
+ if( pBt->autoVacuum ){
+ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, zContext);
+ }
+#endif
+ checkTreePage(pCheck, pgno, zContext, NULL, !pPage->nCell ? NULL : &nMaxKey);
+ }
+
+ /* For intKey leaf pages, check that the min/max keys are in order
+ ** with any left/parent/right pages.
+ */
+ if( pPage->leaf && pPage->intKey ){
+ /* if we are a left child page */
+ if( pnParentMinKey ){
+ /* if we are the left most child page */
+ if( !pnParentMaxKey ){
+ if( nMaxKey > *pnParentMinKey ){
+ checkAppendMsg(pCheck, zContext,
+ "Rowid %lld out of order (max larger than parent min of %lld)",
+ nMaxKey, *pnParentMinKey);
+ }
+ }else{
+ if( nMinKey <= *pnParentMinKey ){
+ checkAppendMsg(pCheck, zContext,
+ "Rowid %lld out of order (min less than parent min of %lld)",
+ nMinKey, *pnParentMinKey);
+ }
+ if( nMaxKey > *pnParentMaxKey ){
+ checkAppendMsg(pCheck, zContext,
+ "Rowid %lld out of order (max larger than parent max of %lld)",
+ nMaxKey, *pnParentMaxKey);
+ }
+ *pnParentMinKey = nMaxKey;
+ }
+ /* else if we're a right child page */
+ } else if( pnParentMaxKey ){
+ if( nMinKey <= *pnParentMaxKey ){
+ checkAppendMsg(pCheck, zContext,
+ "Rowid %lld out of order (min less than parent max of %lld)",
+ nMinKey, *pnParentMaxKey);
}
- }else{
- /* Populate the coverage-checking heap for leaf pages */
- btreeHeapInsert(heap, (pc<<16)|(pc+info.nSize-1));
}
}
- *piMinKey = maxKey;
/* Check for complete coverage of the page
*/
- pCheck->zPfx = 0;
- if( doCoverageCheck && pCheck->mxErr>0 ){
- /* For leaf pages, the min-heap has already been initialized and the
- ** cells have already been inserted. But for internal pages, that has
- ** not yet been done, so do it now */
- if( !pPage->leaf ){
- heap = pCheck->heap;
- heap[0] = 0;
- for(i=nCell-1; i>=0; i--){
- u32 size;
- pc = get2byteAligned(&data[cellStart+i*2]);
- size = pPage->xCellSize(pPage, &data[pc]);
- btreeHeapInsert(heap, (pc<<16)|(pc+size-1));
+ data = pPage->aData;
+ hdr = pPage->hdrOffset;
+ hit = sqlite3PageMalloc( pBt->pageSize );
+ if( hit==0 ){
+ pCheck->mallocFailed = 1;
+ }else{
+ int contentOffset = get2byteNotZero(&data[hdr+5]);
+ assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */
+ memset(hit+contentOffset, 0, usableSize-contentOffset);
+ memset(hit, 1, contentOffset);
+ nCell = get2byte(&data[hdr+3]);
+ cellStart = hdr + 12 - 4*pPage->leaf;
+ for(i=0; i<nCell; i++){
+ int pc = get2byte(&data[cellStart+i*2]);
+ u32 size = 65536;
+ int j;
+ if( pc<=usableSize-4 ){
+ size = cellSizePtr(pPage, &data[pc]);
+ }
+ if( (int)(pc+size-1)>=usableSize ){
+ checkAppendMsg(pCheck, 0,
+ "Corruption detected in cell %d on page %d",i,iPage);
+ }else{
+ for(j=pc+size-1; j>=pc; j--) hit[j]++;
}
}
- /* Add the freeblocks to the min-heap
- **
- ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header
- ** is the offset of the first freeblock, or zero if there are no
- ** freeblocks on the page.
- */
i = get2byte(&data[hdr+1]);
while( i>0 ){
int size, j;
- assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */
+ assert( i<=usableSize-4 ); /* Enforced by btreeInitPage() */
size = get2byte(&data[i+2]);
- assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */
- btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1));
- /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a
- ** big-endian integer which is the offset in the b-tree page of the next
- ** freeblock in the chain, or zero if the freeblock is the last on the
- ** chain. */
+ assert( i+size<=usableSize ); /* Enforced by btreeInitPage() */
+ for(j=i+size-1; j>=i; j--) hit[j]++;
j = get2byte(&data[i]);
- /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of
- ** increasing offset. */
assert( j==0 || j>i+size ); /* Enforced by btreeInitPage() */
- assert( (u32)j<=usableSize-4 ); /* Enforced by btreeInitPage() */
+ assert( j<=usableSize-4 ); /* Enforced by btreeInitPage() */
i = j;
}
- /* Analyze the min-heap looking for overlap between cells and/or
- ** freeblocks, and counting the number of untracked bytes in nFrag.
- **
- ** Each min-heap entry is of the form: (start_address<<16)|end_address.
- ** There is an implied first entry the covers the page header, the cell
- ** pointer index, and the gap between the cell pointer index and the start
- ** of cell content.
- **
- ** The loop below pulls entries from the min-heap in order and compares
- ** the start_address against the previous end_address. If there is an
- ** overlap, that means bytes are used multiple times. If there is a gap,
- ** that gap is added to the fragmentation count.
- */
- nFrag = 0;
- prev = contentOffset - 1; /* Implied first min-heap entry */
- while( btreeHeapPull(heap,&x) ){
- if( (prev&0xffff)>=(x>>16) ){
- checkAppendMsg(pCheck,
- "Multiple uses for byte %u of page %d", x>>16, iPage);
+ for(i=cnt=0; i<usableSize; i++){
+ if( hit[i]==0 ){
+ cnt++;
+ }else if( hit[i]>1 ){
+ checkAppendMsg(pCheck, 0,
+ "Multiple uses for byte %d of page %d", i, iPage);
break;
- }else{
- nFrag += (x>>16) - (prev&0xffff) - 1;
- prev = x;
}
}
- nFrag += usableSize - (prev&0xffff) - 1;
- /* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments
- ** is stored in the fifth field of the b-tree page header.
- ** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the
- ** number of fragmented free bytes within the cell content area.
- */
- if( heap[0]==0 && nFrag!=data[hdr+7] ){
- checkAppendMsg(pCheck,
+ if( cnt!=data[hdr+7] ){
+ checkAppendMsg(pCheck, 0,
"Fragmentation of %d bytes reported as %d on page %d",
- nFrag, data[hdr+7], iPage);
+ cnt, data[hdr+7], iPage);
}
}
-
-end_of_check:
- if( !doCoverageCheck ) pPage->isInit = savedIsInit;
+ sqlite3PageFree(hit);
releasePage(pPage);
- pCheck->zPfx = saved_zPfx;
- pCheck->v1 = saved_v1;
- pCheck->v2 = saved_v2;
return depth+1;
}
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
@@ -63777,74 +58522,60 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
int *pnErr /* Write number of errors seen to this variable */
){
Pgno i;
+ int nRef;
IntegrityCk sCheck;
BtShared *pBt = p->pBt;
- int savedDbFlags = pBt->db->flags;
char zErr[100];
- VVA_ONLY( int nRef );
sqlite3BtreeEnter(p);
assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE );
- assert( (nRef = sqlite3PagerRefcount(pBt->pPager))>=0 );
+ nRef = sqlite3PagerRefcount(pBt->pPager);
sCheck.pBt = pBt;
sCheck.pPager = pBt->pPager;
sCheck.nPage = btreePagecount(sCheck.pBt);
sCheck.mxErr = mxErr;
sCheck.nErr = 0;
sCheck.mallocFailed = 0;
- sCheck.zPfx = 0;
- sCheck.v1 = 0;
- sCheck.v2 = 0;
- sCheck.aPgRef = 0;
- sCheck.heap = 0;
- sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
+ *pnErr = 0;
if( sCheck.nPage==0 ){
- goto integrity_ck_cleanup;
+ sqlite3BtreeLeave(p);
+ return 0;
}
sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1);
if( !sCheck.aPgRef ){
- sCheck.mallocFailed = 1;
- goto integrity_ck_cleanup;
- }
- sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize );
- if( sCheck.heap==0 ){
- sCheck.mallocFailed = 1;
- goto integrity_ck_cleanup;
+ *pnErr = 1;
+ sqlite3BtreeLeave(p);
+ return 0;
}
-
i = PENDING_BYTE_PAGE(pBt);
if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i);
+ sqlite3StrAccumInit(&sCheck.errMsg, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
+ sCheck.errMsg.useMalloc = 2;
/* Check the integrity of the freelist
*/
- sCheck.zPfx = "Main freelist: ";
checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]),
- get4byte(&pBt->pPage1->aData[36]));
- sCheck.zPfx = 0;
+ get4byte(&pBt->pPage1->aData[36]), "Main freelist: ");
/* Check all the tables.
*/
- testcase( pBt->db->flags & SQLITE_CellSizeCk );
- pBt->db->flags &= ~SQLITE_CellSizeCk;
for(i=0; (int)i<nRoot && sCheck.mxErr; i++){
- i64 notUsed;
if( aRoot[i]==0 ) continue;
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pBt->autoVacuum && aRoot[i]>1 ){
- checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0);
+ checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0, 0);
}
#endif
- checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64);
+ checkTreePage(&sCheck, aRoot[i], "List of tree roots: ", NULL, NULL);
}
- pBt->db->flags = savedDbFlags;
/* Make sure every page in the file is referenced
*/
for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){
#ifdef SQLITE_OMIT_AUTOVACUUM
if( getPageReferenced(&sCheck, i)==0 ){
- checkAppendMsg(&sCheck, "Page %d is never used", i);
+ checkAppendMsg(&sCheck, 0, "Page %d is never used", i);
}
#else
/* If the database supports auto-vacuum, make sure no tables contain
@@ -63852,29 +58583,37 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
*/
if( getPageReferenced(&sCheck, i)==0 &&
(PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){
- checkAppendMsg(&sCheck, "Page %d is never used", i);
+ checkAppendMsg(&sCheck, 0, "Page %d is never used", i);
}
if( getPageReferenced(&sCheck, i)!=0 &&
(PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){
- checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i);
+ checkAppendMsg(&sCheck, 0, "Pointer map page %d is referenced", i);
}
#endif
}
+ /* Make sure this analysis did not leave any unref() pages.
+ ** This is an internal consistency check; an integrity check
+ ** of the integrity check.
+ */
+ if( NEVER(nRef != sqlite3PagerRefcount(pBt->pPager)) ){
+ checkAppendMsg(&sCheck, 0,
+ "Outstanding page count goes from %d to %d during this analysis",
+ nRef, sqlite3PagerRefcount(pBt->pPager)
+ );
+ }
+
/* Clean up and report errors.
*/
-integrity_ck_cleanup:
- sqlite3PageFree(sCheck.heap);
+ sqlite3BtreeLeave(p);
sqlite3_free(sCheck.aPgRef);
if( sCheck.mallocFailed ){
sqlite3StrAccumReset(&sCheck.errMsg);
- sCheck.nErr++;
+ *pnErr = sCheck.nErr+1;
+ return 0;
}
*pnErr = sCheck.nErr;
if( sCheck.nErr==0 ) sqlite3StrAccumReset(&sCheck.errMsg);
- /* Make sure this analysis did not leave any unref() pages. */
- assert( nRef==sqlite3PagerRefcount(pBt->pPager) );
- sqlite3BtreeLeave(p);
return sqlite3StrAccumFinish(&sCheck.errMsg);
}
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
@@ -64039,7 +58778,7 @@ SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void
int rc;
assert( cursorHoldsMutex(pCsr) );
assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) );
- assert( pCsr->curFlags & BTCF_Incrblob );
+ assert( pCsr->isIncrblobHandle );
rc = restoreCursorPosition(pCsr);
if( rc!=SQLITE_OK ){
@@ -64054,7 +58793,7 @@ SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void
** required in case any of them are holding references to an xFetch
** version of the b-tree page modified by the accessPayload call below.
**
- ** Note that pCsr must be open on a INTKEY table and saveCursorPosition()
+ ** Note that pCsr must be open on a BTREE_INTKEY table and saveCursorPosition()
** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence
** saveAllCursors can only return SQLITE_OK.
*/
@@ -64068,7 +58807,7 @@ SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void
** (d) there are no conflicting read-locks, and
** (e) the cursor points at a valid row of an intKey table.
*/
- if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){
+ if( !pCsr->wrFlag ){
return SQLITE_READONLY;
}
assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0
@@ -64081,11 +58820,20 @@ SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void
}
/*
-** Mark this cursor as an incremental blob cursor.
+** Set a flag on this cursor to cache the locations of pages from the
+** overflow list for the current row. This is used by cursors opened
+** for incremental blob IO only.
+**
+** This function sets a flag only. The actual page location cache
+** (stored in BtCursor.aOverflow[]) is allocated and used by function
+** accessPayload() (the worker function for sqlite3BtreeData() and
+** sqlite3BtreePutData()).
*/
-SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *pCur){
- pCur->curFlags |= BTCF_Incrblob;
- pCur->pBtree->hasIncrblobCur = 1;
+SQLITE_PRIVATE void sqlite3BtreeCacheOverflow(BtCursor *pCur){
+ assert( cursorHoldsMutex(pCur) );
+ assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
+ invalidateOverflowCache(pCur);
+ pCur->isIncrblobHandle = 1;
}
#endif
@@ -64126,35 +58874,14 @@ SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){
}
/*
-** set the mask of hint flags for cursor pCsr.
+** set the mask of hint flags for cursor pCsr. Currently the only valid
+** values are 0 and BTREE_BULKLOAD.
*/
SQLITE_PRIVATE void sqlite3BtreeCursorHints(BtCursor *pCsr, unsigned int mask){
- assert( mask==BTREE_BULKLOAD || mask==BTREE_SEEK_EQ || mask==0 );
+ assert( mask==BTREE_BULKLOAD || mask==0 );
pCsr->hints = mask;
}
-#ifdef SQLITE_DEBUG
-/*
-** Return true if the cursor has a hint specified. This routine is
-** only used from within assert() statements
-*/
-SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor *pCsr, unsigned int mask){
- return (pCsr->hints & mask)!=0;
-}
-#endif
-
-/*
-** Return true if the given Btree is read-only.
-*/
-SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){
- return (p->pBt->btsFlags & BTS_READ_ONLY)!=0;
-}
-
-/*
-** Return the size of the header added to each page by this module.
-*/
-SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); }
-
/************** End of btree.c ***********************************************/
/************** Begin file backup.c ******************************************/
/*
@@ -64171,8 +58898,6 @@ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage));
** This file contains the implementation of the sqlite3_backup_XXX()
** API functions and the related features.
*/
-/* #include "sqliteInt.h" */
-/* #include "btreeInt.h" */
/*
** Structure allocated for each backup operation.
@@ -64246,12 +58971,12 @@ static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){
int rc = 0;
pParse = sqlite3StackAllocZero(pErrorDb, sizeof(*pParse));
if( pParse==0 ){
- sqlite3ErrorWithMsg(pErrorDb, SQLITE_NOMEM, "out of memory");
+ sqlite3Error(pErrorDb, SQLITE_NOMEM, "out of memory");
rc = SQLITE_NOMEM;
}else{
pParse->db = pDb;
if( sqlite3OpenTempDatabase(pParse) ){
- sqlite3ErrorWithMsg(pErrorDb, pParse->rc, "%s", pParse->zErrMsg);
+ sqlite3Error(pErrorDb, pParse->rc, "%s", pParse->zErrMsg);
rc = SQLITE_ERROR;
}
sqlite3DbFree(pErrorDb, pParse->zErrMsg);
@@ -64264,7 +58989,7 @@ static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){
}
if( i<0 ){
- sqlite3ErrorWithMsg(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb);
+ sqlite3Error(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb);
return 0;
}
@@ -64282,20 +59007,6 @@ static int setDestPgsz(sqlite3_backup *p){
}
/*
-** Check that there is no open read-transaction on the b-tree passed as the
-** second argument. If there is not, return SQLITE_OK. Otherwise, if there
-** is an open read-transaction, return SQLITE_ERROR and leave an error
-** message in database handle db.
-*/
-static int checkReadTransaction(sqlite3 *db, Btree *p){
- if( sqlite3BtreeIsInReadTrans(p) ){
- sqlite3ErrorWithMsg(db, SQLITE_ERROR, "destination database is in use");
- return SQLITE_ERROR;
- }
- return SQLITE_OK;
-}
-
-/*
** Create an sqlite3_backup process to copy the contents of zSrcDb from
** connection handle pSrcDb to zDestDb in pDestDb. If successful, return
** a pointer to the new sqlite3_backup object.
@@ -64303,7 +59014,7 @@ static int checkReadTransaction(sqlite3 *db, Btree *p){
** If an error occurs, NULL is returned and an error code and error message
** stored in database handle pDestDb.
*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
sqlite3* pDestDb, /* Database to write to */
const char *zDestDb, /* Name of database within pDestDb */
sqlite3* pSrcDb, /* Database connection to read from */
@@ -64311,13 +59022,6 @@ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
){
sqlite3_backup *p; /* Value to return */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(pSrcDb)||!sqlite3SafetyCheckOk(pDestDb) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
-
/* Lock the source database handle. The destination database
** handle is not locked in this routine, but it is locked in
** sqlite3_backup_step(). The user is required to ensure that no
@@ -64330,7 +59034,7 @@ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
sqlite3_mutex_enter(pDestDb->mutex);
if( pSrcDb==pDestDb ){
- sqlite3ErrorWithMsg(
+ sqlite3Error(
pDestDb, SQLITE_ERROR, "source and destination must be distinct"
);
p = 0;
@@ -64341,7 +59045,7 @@ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
** sqlite3_backup_finish(). */
p = (sqlite3_backup *)sqlite3MallocZero(sizeof(sqlite3_backup));
if( !p ){
- sqlite3Error(pDestDb, SQLITE_NOMEM);
+ sqlite3Error(pDestDb, SQLITE_NOMEM, 0);
}
}
@@ -64354,15 +59058,12 @@ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
p->iNext = 1;
p->isAttached = 0;
- if( 0==p->pSrc || 0==p->pDest
- || setDestPgsz(p)==SQLITE_NOMEM
- || checkReadTransaction(pDestDb, p->pDest)!=SQLITE_OK
- ){
+ if( 0==p->pSrc || 0==p->pDest || setDestPgsz(p)==SQLITE_NOMEM ){
/* One (or both) of the named databases did not exist or an OOM
- ** error was hit. Or there is a transaction open on the destination
- ** database. The error has already been written into the pDestDb
- ** handle. All that is left to do here is free the sqlite3_backup
- ** structure. */
+ ** error was hit. The error has already been written into the
+ ** pDestDb handle. All that is left to do here is free the
+ ** sqlite3_backup structure.
+ */
sqlite3_free(p);
p = 0;
}
@@ -64406,7 +59107,7 @@ static int backupOnePage(
** guaranteed that the shared-mutex is held by this thread, handle
** p->pSrc may not actually be the owner. */
int nSrcReserve = sqlite3BtreeGetReserveNoMutex(p->pSrc);
- int nDestReserve = sqlite3BtreeGetOptimalReserve(p->pDest);
+ int nDestReserve = sqlite3BtreeGetReserve(p->pDest);
#endif
int rc = SQLITE_OK;
i64 iOff;
@@ -64511,15 +59212,12 @@ static void attachBackupObject(sqlite3_backup *p){
/*
** Copy nPage pages from the source b-tree to the destination.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
int rc;
int destMode; /* Destination journal mode */
int pgszSrc = 0; /* Source page size */
int pgszDest = 0; /* Destination page size */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( p==0 ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(p->pSrcDb->mutex);
sqlite3BtreeEnter(p->pSrc);
if( p->pDestDb ){
@@ -64712,7 +59410,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
/* Sync the database file to disk. */
if( rc==SQLITE_OK ){
- rc = sqlite3PagerSync(pDestPager, 0);
+ rc = sqlite3PagerSync(pDestPager);
}
}else{
sqlite3PagerTruncateImage(pDestPager, nDestTruncate);
@@ -64756,7 +59454,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
/*
** Release all resources associated with an sqlite3_backup* handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p){
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p){
sqlite3_backup **pp; /* Ptr to head of pagers backup list */
sqlite3 *pSrcDb; /* Source database connection */
int rc; /* Value to return */
@@ -64783,14 +59481,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p){
}
/* If a transaction is still open on the Btree, roll it back. */
- sqlite3BtreeRollback(p->pDest, SQLITE_OK, 0);
+ sqlite3BtreeRollback(p->pDest, SQLITE_OK);
/* Set the error code of the destination database handle. */
rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc;
- if( p->pDestDb ){
- sqlite3Error(p->pDestDb, rc);
+ sqlite3Error(p->pDestDb, rc, 0);
- /* Exit the mutexes and free the backup context structure. */
+ /* Exit the mutexes and free the backup context structure. */
+ if( p->pDestDb ){
sqlite3LeaveMutexAndCloseZombie(p->pDestDb);
}
sqlite3BtreeLeave(p->pSrc);
@@ -64808,13 +59506,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p){
** Return the number of pages still to be backed up as of the most recent
** call to sqlite3_backup_step().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( p==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p){
return p->nRemaining;
}
@@ -64822,13 +59514,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p){
** Return the total number of pages in the source database as of the most
** recent call to sqlite3_backup_step().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( p==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p){
return p->nPagecount;
}
@@ -64844,13 +59530,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p){
** corresponding to the source database is held when this function is
** called.
*/
-static SQLITE_NOINLINE void backupUpdate(
- sqlite3_backup *p,
- Pgno iPage,
- const u8 *aData
-){
- assert( p!=0 );
- do{
+SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){
+ sqlite3_backup *p; /* Iterator variable */
+ for(p=pBackup; p; p=p->pNext){
assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) );
if( !isFatalError(p->rc) && iPage<p->iNext ){
/* The backup process p has already copied page iPage. But now it
@@ -64867,10 +59549,7 @@ static SQLITE_NOINLINE void backupUpdate(
p->rc = rc;
}
}
- }while( (p = p->pNext)!=0 );
-}
-SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){
- if( pBackup ) backupUpdate(pBackup, iPage, aData);
+ }
}
/*
@@ -64928,10 +59607,6 @@ SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){
b.pDest = pTo;
b.iNext = 1;
-#ifdef SQLITE_HAS_CODEC
- sqlite3PagerAlignReserve(sqlite3BtreePager(pTo), sqlite3BtreePager(pFrom));
-#endif
-
/* 0x7FFFFFFF is the hard limit for the number of pages in a database
** file. By passing this as the number of pages to copy to
** sqlite3_backup_step(), we can guarantee that the copy finishes
@@ -64975,55 +59650,6 @@ copy_finished:
** only within the VDBE. Interface routines refer to a Mem using the
** name sqlite_value
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
-
-#ifdef SQLITE_DEBUG
-/*
-** Check invariants on a Mem object.
-**
-** This routine is intended for use inside of assert() statements, like
-** this: assert( sqlite3VdbeCheckMemInvariants(pMem) );
-*/
-SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){
- /* If MEM_Dyn is set then Mem.xDel!=0.
- ** Mem.xDel is might not be initialized if MEM_Dyn is clear.
- */
- assert( (p->flags & MEM_Dyn)==0 || p->xDel!=0 );
-
- /* MEM_Dyn may only be set if Mem.szMalloc==0. In this way we
- ** ensure that if Mem.szMalloc>0 then it is safe to do
- ** Mem.z = Mem.zMalloc without having to check Mem.flags&MEM_Dyn.
- ** That saves a few cycles in inner loops. */
- assert( (p->flags & MEM_Dyn)==0 || p->szMalloc==0 );
-
- /* Cannot be both MEM_Int and MEM_Real at the same time */
- assert( (p->flags & (MEM_Int|MEM_Real))!=(MEM_Int|MEM_Real) );
-
- /* The szMalloc field holds the correct memory allocation size */
- assert( p->szMalloc==0
- || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc) );
-
- /* If p holds a string or blob, the Mem.z must point to exactly
- ** one of the following:
- **
- ** (1) Memory in Mem.zMalloc and managed by the Mem object
- ** (2) Memory to be freed using Mem.xDel
- ** (3) An ephemeral string or blob
- ** (4) A static string or blob
- */
- if( (p->flags & (MEM_Str|MEM_Blob)) && p->n>0 ){
- assert(
- ((p->szMalloc>0 && p->z==p->zMalloc)? 1 : 0) +
- ((p->flags&MEM_Dyn)!=0 ? 1 : 0) +
- ((p->flags&MEM_Ephem)!=0 ? 1 : 0) +
- ((p->flags&MEM_Static)!=0 ? 1 : 0) == 1
- );
- }
- return 1;
-}
-#endif
-
/*
** If pMem is an object with a valid string representation, this routine
@@ -65066,84 +59692,64 @@ SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){
/*
** Make sure pMem->z points to a writable allocation of at least
-** min(n,32) bytes.
-**
-** If the bPreserve argument is true, then copy of the content of
-** pMem->z into the new allocation. pMem must be either a string or
-** blob if bPreserve is true. If bPreserve is false, any prior content
-** in pMem->z is discarded.
-*/
-SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){
- assert( sqlite3VdbeCheckMemInvariants(pMem) );
+** n bytes.
+**
+** If the third argument passed to this function is true, then memory
+** cell pMem must contain a string or blob. In this case the content is
+** preserved. Otherwise, if the third parameter to this function is false,
+** any current string or blob value may be discarded.
+**
+** This function sets the MEM_Dyn flag and clears any xDel callback.
+** It also clears MEM_Ephem and MEM_Static. If the preserve flag is
+** not set, Mem.n is zeroed.
+*/
+SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve){
+ assert( 1 >=
+ ((pMem->zMalloc && pMem->zMalloc==pMem->z) ? 1 : 0) +
+ (((pMem->flags&MEM_Dyn)&&pMem->xDel) ? 1 : 0) +
+ ((pMem->flags&MEM_Ephem) ? 1 : 0) +
+ ((pMem->flags&MEM_Static) ? 1 : 0)
+ );
assert( (pMem->flags&MEM_RowSet)==0 );
- /* If the bPreserve flag is set to true, then the memory cell must already
+ /* If the preserve flag is set to true, then the memory cell must already
** contain a valid string or blob value. */
- assert( bPreserve==0 || pMem->flags&(MEM_Blob|MEM_Str) );
- testcase( bPreserve && pMem->z==0 );
-
- assert( pMem->szMalloc==0
- || pMem->szMalloc==sqlite3DbMallocSize(pMem->db, pMem->zMalloc) );
- if( pMem->szMalloc<n ){
- if( n<32 ) n = 32;
- if( bPreserve && pMem->szMalloc>0 && pMem->z==pMem->zMalloc ){
+ assert( preserve==0 || pMem->flags&(MEM_Blob|MEM_Str) );
+
+ if( n<32 ) n = 32;
+ if( sqlite3DbMallocSize(pMem->db, pMem->zMalloc)<n ){
+ if( preserve && pMem->z==pMem->zMalloc ){
pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n);
- bPreserve = 0;
+ preserve = 0;
}else{
- if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc);
+ sqlite3DbFree(pMem->db, pMem->zMalloc);
pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n);
}
- if( pMem->zMalloc==0 ){
- sqlite3VdbeMemSetNull(pMem);
- pMem->z = 0;
- pMem->szMalloc = 0;
- return SQLITE_NOMEM;
- }else{
- pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc);
- }
}
- if( bPreserve && pMem->z && pMem->z!=pMem->zMalloc ){
+ if( pMem->z && preserve && pMem->zMalloc && pMem->z!=pMem->zMalloc ){
memcpy(pMem->zMalloc, pMem->z, pMem->n);
}
- if( (pMem->flags&MEM_Dyn)!=0 ){
- assert( pMem->xDel!=0 && pMem->xDel!=SQLITE_DYNAMIC );
+ if( pMem->flags&MEM_Dyn && pMem->xDel ){
+ assert( pMem->xDel!=SQLITE_DYNAMIC );
pMem->xDel((void *)(pMem->z));
}
pMem->z = pMem->zMalloc;
- pMem->flags &= ~(MEM_Dyn|MEM_Ephem|MEM_Static);
- return SQLITE_OK;
-}
-
-/*
-** Change the pMem->zMalloc allocation to be at least szNew bytes.
-** If pMem->zMalloc already meets or exceeds the requested size, this
-** routine is a no-op.
-**
-** Any prior string or blob content in the pMem object may be discarded.
-** The pMem->xDel destructor is called, if it exists. Though MEM_Str
-** and MEM_Blob values may be discarded, MEM_Int, MEM_Real, and MEM_Null
-** values are preserved.
-**
-** Return SQLITE_OK on success or an error code (probably SQLITE_NOMEM)
-** if unable to complete the resizing.
-*/
-SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){
- assert( szNew>0 );
- assert( (pMem->flags & MEM_Dyn)==0 || pMem->szMalloc==0 );
- if( pMem->szMalloc<szNew ){
- return sqlite3VdbeMemGrow(pMem, szNew, 0);
+ if( pMem->z==0 ){
+ pMem->flags = MEM_Null;
+ }else{
+ pMem->flags &= ~(MEM_Ephem|MEM_Static);
}
- assert( (pMem->flags & MEM_Dyn)==0 );
- pMem->z = pMem->zMalloc;
- pMem->flags &= (MEM_Null|MEM_Int|MEM_Real);
- return SQLITE_OK;
+ pMem->xDel = 0;
+ return (pMem->z ? SQLITE_OK : SQLITE_NOMEM);
}
/*
-** Change pMem so that its MEM_Str or MEM_Blob value is stored in
-** MEM.zMalloc, where it can be safely written.
+** Make the given Mem object MEM_Dyn. In other words, make it so
+** that any TEXT or BLOB content is stored in memory obtained from
+** malloc(). In this way, we know that the memory is safe to be
+** overwritten or altered.
**
** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails.
*/
@@ -65153,18 +59759,17 @@ SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){
assert( (pMem->flags&MEM_RowSet)==0 );
ExpandBlob(pMem);
f = pMem->flags;
- if( (f&(MEM_Str|MEM_Blob)) && (pMem->szMalloc==0 || pMem->z!=pMem->zMalloc) ){
+ if( (f&(MEM_Str|MEM_Blob)) && pMem->z!=pMem->zMalloc ){
if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){
return SQLITE_NOMEM;
}
pMem->z[pMem->n] = 0;
pMem->z[pMem->n+1] = 0;
pMem->flags |= MEM_Term;
- }
- pMem->flags &= ~MEM_Ephem;
#ifdef SQLITE_DEBUG
- pMem->pScopyFrom = 0;
+ pMem->pScopyFrom = 0;
#endif
+ }
return SQLITE_OK;
}
@@ -65198,11 +59803,15 @@ SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){
}
#endif
+
/*
-** It is already known that pMem contains an unterminated string.
-** Add the zero terminator.
+** Make sure the given Mem is \u0000 terminated.
*/
-static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){
+SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ if( (pMem->flags & MEM_Term)!=0 || (pMem->flags & MEM_Str)==0 ){
+ return SQLITE_OK; /* Nothing to do */
+ }
if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){
return SQLITE_NOMEM;
}
@@ -65213,34 +59822,20 @@ static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){
}
/*
-** Make sure the given Mem is \u0000 terminated.
-*/
-SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){
- assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
- testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) );
- testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 );
- if( (pMem->flags & (MEM_Term|MEM_Str))!=MEM_Str ){
- return SQLITE_OK; /* Nothing to do */
- }else{
- return vdbeMemAddTerminator(pMem);
- }
-}
-
-/*
** Add MEM_Str to the set of representations for the given Mem. Numbers
** are converted using sqlite3_snprintf(). Converting a BLOB to a string
** is a no-op.
**
-** Existing representations MEM_Int and MEM_Real are invalidated if
-** bForce is true but are retained if bForce is false.
+** Existing representations MEM_Int and MEM_Real are *not* invalidated.
**
** A MEM_Null value will never be passed to this function. This function is
** used for converting values to text for returning to the user (i.e. via
** sqlite3_value_text()), or for ensuring that values to be used as btree
** keys are strings. In the former case a NULL pointer is returned the
-** user and the latter is an internal programming error.
+** user and the later is an internal programming error.
*/
-SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
+SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, int enc){
+ int rc = SQLITE_OK;
int fg = pMem->flags;
const int nByte = 32;
@@ -65252,11 +59847,11 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
- if( sqlite3VdbeMemClearAndResize(pMem, nByte) ){
+ if( sqlite3VdbeMemGrow(pMem, nByte, 0) ){
return SQLITE_NOMEM;
}
- /* For a Real or Integer, use sqlite3_snprintf() to produce the UTF-8
+ /* For a Real or Integer, use sqlite3_mprintf() to produce the UTF-8
** string representation of the value. Then, if the required encoding
** is UTF-16le or UTF-16be do a translation.
**
@@ -65266,14 +59861,13 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
sqlite3_snprintf(nByte, pMem->z, "%lld", pMem->u.i);
}else{
assert( fg & MEM_Real );
- sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->u.r);
+ sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->r);
}
pMem->n = sqlite3Strlen30(pMem->z);
pMem->enc = SQLITE_UTF8;
pMem->flags |= MEM_Str|MEM_Term;
- if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real);
sqlite3VdbeChangeEncoding(pMem, enc);
- return SQLITE_OK;
+ return rc;
}
/*
@@ -65288,90 +59882,56 @@ SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){
int rc = SQLITE_OK;
if( ALWAYS(pFunc && pFunc->xFinalize) ){
sqlite3_context ctx;
- Mem t;
assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef );
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
memset(&ctx, 0, sizeof(ctx));
- memset(&t, 0, sizeof(t));
- t.flags = MEM_Null;
- t.db = pMem->db;
- ctx.pOut = &t;
+ ctx.s.flags = MEM_Null;
+ ctx.s.db = pMem->db;
ctx.pMem = pMem;
ctx.pFunc = pFunc;
pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */
- assert( (pMem->flags & MEM_Dyn)==0 );
- if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc);
- memcpy(pMem, &t, sizeof(t));
+ assert( 0==(pMem->flags&MEM_Dyn) && !pMem->xDel );
+ sqlite3DbFree(pMem->db, pMem->zMalloc);
+ memcpy(pMem, &ctx.s, sizeof(ctx.s));
rc = ctx.isError;
}
return rc;
}
/*
-** If the memory cell contains a value that must be freed by
-** invoking the external callback in Mem.xDel, then this routine
-** will free that value. It also sets Mem.flags to MEM_Null.
-**
-** This is a helper routine for sqlite3VdbeMemSetNull() and
-** for sqlite3VdbeMemRelease(). Use those other routines as the
-** entry point for releasing Mem resources.
+** If the memory cell contains a string value that must be freed by
+** invoking an external callback, free it now. Calling this function
+** does not free any Mem.zMalloc buffer.
*/
-static SQLITE_NOINLINE void vdbeMemClearExternAndSetNull(Mem *p){
+SQLITE_PRIVATE void sqlite3VdbeMemReleaseExternal(Mem *p){
assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) );
- assert( VdbeMemDynamic(p) );
if( p->flags&MEM_Agg ){
sqlite3VdbeMemFinalize(p, p->u.pDef);
assert( (p->flags & MEM_Agg)==0 );
- testcase( p->flags & MEM_Dyn );
- }
- if( p->flags&MEM_Dyn ){
+ sqlite3VdbeMemRelease(p);
+ }else if( p->flags&MEM_Dyn && p->xDel ){
assert( (p->flags&MEM_RowSet)==0 );
- assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 );
+ assert( p->xDel!=SQLITE_DYNAMIC );
p->xDel((void *)p->z);
+ p->xDel = 0;
}else if( p->flags&MEM_RowSet ){
sqlite3RowSetClear(p->u.pRowSet);
}else if( p->flags&MEM_Frame ){
- VdbeFrame *pFrame = p->u.pFrame;
- pFrame->pParent = pFrame->v->pDelFrame;
- pFrame->v->pDelFrame = pFrame;
- }
- p->flags = MEM_Null;
-}
-
-/*
-** Release memory held by the Mem p, both external memory cleared
-** by p->xDel and memory in p->zMalloc.
-**
-** This is a helper routine invoked by sqlite3VdbeMemRelease() in
-** the unusual case where there really is memory in p that needs
-** to be freed.
-*/
-static SQLITE_NOINLINE void vdbeMemClear(Mem *p){
- if( VdbeMemDynamic(p) ){
- vdbeMemClearExternAndSetNull(p);
- }
- if( p->szMalloc ){
- sqlite3DbFree(p->db, p->zMalloc);
- p->szMalloc = 0;
+ sqlite3VdbeMemSetNull(p);
}
- p->z = 0;
}
/*
-** Release any memory resources held by the Mem. Both the memory that is
-** free by Mem.xDel and the Mem.zMalloc allocation are freed.
-**
-** Use this routine prior to clean up prior to abandoning a Mem, or to
-** reset a Mem back to its minimum memory utilization.
-**
-** Use sqlite3VdbeMemSetNull() to release just the Mem.xDel space
-** prior to inserting new content into the Mem.
+** Release any memory held by the Mem. This may leave the Mem in an
+** inconsistent state, for example with (Mem.z==0) and
+** (Mem.type==SQLITE_TEXT).
*/
SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){
- assert( sqlite3VdbeCheckMemInvariants(p) );
- if( VdbeMemDynamic(p) || p->szMalloc ){
- vdbeMemClear(p);
- }
+ VdbeMemRelease(p);
+ sqlite3DbFree(p->db, p->zMalloc);
+ p->z = 0;
+ p->zMalloc = 0;
+ p->xDel = 0;
}
/*
@@ -65410,7 +59970,7 @@ static i64 doubleToInt64(double r){
** If pMem is an integer, then the value is exact. If pMem is
** a floating-point then the value returned is the integer part.
** If pMem is a string or blob, then we make an attempt to convert
-** it into an integer and return that. If pMem represents an
+** it into a integer and return that. If pMem represents an
** an SQL-NULL value, return 0.
**
** If pMem represents a string value, its encoding might be changed.
@@ -65423,10 +59983,11 @@ SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){
if( flags & MEM_Int ){
return pMem->u.i;
}else if( flags & MEM_Real ){
- return doubleToInt64(pMem->u.r);
+ return doubleToInt64(pMem->r);
}else if( flags & (MEM_Str|MEM_Blob) ){
i64 value = 0;
assert( pMem->z || pMem->n==0 );
+ testcase( pMem->z==0 );
sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc);
return value;
}else{
@@ -65444,7 +60005,7 @@ SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
if( pMem->flags & MEM_Real ){
- return pMem->u.r;
+ return pMem->r;
}else if( pMem->flags & MEM_Int ){
return (double)pMem->u.i;
}else if( pMem->flags & (MEM_Str|MEM_Blob) ){
@@ -65463,13 +60024,12 @@ SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){
** MEM_Int if we can.
*/
SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
- i64 ix;
assert( pMem->flags & MEM_Real );
assert( (pMem->flags & MEM_RowSet)==0 );
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
- ix = doubleToInt64(pMem->u.r);
+ pMem->u.i = doubleToInt64(pMem->r);
/* Only mark the value as an integer if
**
@@ -65481,9 +60041,11 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
** the second condition under the assumption that addition overflow causes
** values to wrap around.
*/
- if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){
- pMem->u.i = ix;
- MemSetTypeFlag(pMem, MEM_Int);
+ if( pMem->r==(double)pMem->u.i
+ && pMem->u.i>SMALLEST_INT64
+ && pMem->u.i<LARGEST_INT64
+ ){
+ pMem->flags |= MEM_Int;
}
}
@@ -65508,7 +60070,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
- pMem->u.r = sqlite3VdbeRealValue(pMem);
+ pMem->r = sqlite3VdbeRealValue(pMem);
MemSetTypeFlag(pMem, MEM_Real);
return SQLITE_OK;
}
@@ -65528,7 +60090,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){
if( 0==sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc) ){
MemSetTypeFlag(pMem, MEM_Int);
}else{
- pMem->u.r = sqlite3VdbeRealValue(pMem);
+ pMem->r = sqlite3VdbeRealValue(pMem);
MemSetTypeFlag(pMem, MEM_Real);
sqlite3VdbeIntegerAffinity(pMem);
}
@@ -65539,83 +60101,19 @@ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){
}
/*
-** Cast the datatype of the value in pMem according to the affinity
-** "aff". Casting is different from applying affinity in that a cast
-** is forced. In other words, the value is converted into the desired
-** affinity even if that results in loss of data. This routine is
-** used (for example) to implement the SQL "cast()" operator.
-*/
-SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
- if( pMem->flags & MEM_Null ) return;
- switch( aff ){
- case SQLITE_AFF_BLOB: { /* Really a cast to BLOB */
- if( (pMem->flags & MEM_Blob)==0 ){
- sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
- assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
- MemSetTypeFlag(pMem, MEM_Blob);
- }else{
- pMem->flags &= ~(MEM_TypeMask&~MEM_Blob);
- }
- break;
- }
- case SQLITE_AFF_NUMERIC: {
- sqlite3VdbeMemNumerify(pMem);
- break;
- }
- case SQLITE_AFF_INTEGER: {
- sqlite3VdbeMemIntegerify(pMem);
- break;
- }
- case SQLITE_AFF_REAL: {
- sqlite3VdbeMemRealify(pMem);
- break;
- }
- default: {
- assert( aff==SQLITE_AFF_TEXT );
- assert( MEM_Str==(MEM_Blob>>3) );
- pMem->flags |= (pMem->flags&MEM_Blob)>>3;
- sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
- assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
- pMem->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero);
- break;
- }
- }
-}
-
-/*
-** Initialize bulk memory to be a consistent Mem object.
-**
-** The minimum amount of initialization feasible is performed.
-*/
-SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem *pMem, sqlite3 *db, u16 flags){
- assert( (flags & ~MEM_TypeMask)==0 );
- pMem->flags = flags;
- pMem->db = db;
- pMem->szMalloc = 0;
-}
-
-
-/*
** Delete any previous value and set the value stored in *pMem to NULL.
-**
-** This routine calls the Mem.xDel destructor to dispose of values that
-** require the destructor. But it preserves the Mem.zMalloc memory allocation.
-** To free all resources, use sqlite3VdbeMemRelease(), which both calls this
-** routine to invoke the destructor and deallocates Mem.zMalloc.
-**
-** Use this routine to reset the Mem prior to insert a new value.
-**
-** Use sqlite3VdbeMemRelease() to complete erase the Mem prior to abandoning it.
*/
SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem *pMem){
- if( VdbeMemDynamic(pMem) ){
- vdbeMemClearExternAndSetNull(pMem);
- }else{
- pMem->flags = MEM_Null;
+ if( pMem->flags & MEM_Frame ){
+ VdbeFrame *pFrame = pMem->u.pFrame;
+ pFrame->pParent = pFrame->v->pDelFrame;
+ pFrame->v->pDelFrame = pFrame;
}
-}
-SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){
- sqlite3VdbeMemSetNull((Mem*)p);
+ if( pMem->flags & MEM_RowSet ){
+ sqlite3RowSetClear(pMem->u.pRowSet);
+ }
+ MemSetTypeFlag(pMem, MEM_Null);
+ pMem->type = SQLITE_NULL;
}
/*
@@ -65625,22 +60123,19 @@ SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){
SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){
sqlite3VdbeMemRelease(pMem);
pMem->flags = MEM_Blob|MEM_Zero;
+ pMem->type = SQLITE_BLOB;
pMem->n = 0;
if( n<0 ) n = 0;
pMem->u.nZero = n;
pMem->enc = SQLITE_UTF8;
- pMem->z = 0;
-}
-/*
-** The pMem is known to contain content that needs to be destroyed prior
-** to a value change. So invoke the destructor, then set the value to
-** a 64-bit integer.
-*/
-static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){
- sqlite3VdbeMemSetNull(pMem);
- pMem->u.i = val;
- pMem->flags = MEM_Int;
+#ifdef SQLITE_OMIT_INCRBLOB
+ sqlite3VdbeMemGrow(pMem, n, 0);
+ if( pMem->z ){
+ pMem->n = n;
+ memset(pMem->z, 0, n);
+ }
+#endif
}
/*
@@ -65648,12 +60143,10 @@ static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){
** manifest type INTEGER.
*/
SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){
- if( VdbeMemDynamic(pMem) ){
- vdbeReleaseAndSetInt64(pMem, val);
- }else{
- pMem->u.i = val;
- pMem->flags = MEM_Int;
- }
+ sqlite3VdbeMemRelease(pMem);
+ pMem->u.i = val;
+ pMem->flags = MEM_Int;
+ pMem->type = SQLITE_INTEGER;
}
#ifndef SQLITE_OMIT_FLOATING_POINT
@@ -65662,10 +60155,13 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){
** manifest type REAL.
*/
SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){
- sqlite3VdbeMemSetNull(pMem);
- if( !sqlite3IsNaN(val) ){
- pMem->u.r = val;
+ if( sqlite3IsNaN(val) ){
+ sqlite3VdbeMemSetNull(pMem);
+ }else{
+ sqlite3VdbeMemRelease(pMem);
+ pMem->r = val;
pMem->flags = MEM_Real;
+ pMem->type = SQLITE_FLOAT;
}
}
#endif
@@ -65682,11 +60178,10 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem *pMem){
pMem->zMalloc = sqlite3DbMallocRaw(db, 64);
if( db->mallocFailed ){
pMem->flags = MEM_Null;
- pMem->szMalloc = 0;
}else{
assert( pMem->zMalloc );
- pMem->szMalloc = sqlite3DbMallocSize(db, pMem->zMalloc);
- pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, pMem->szMalloc);
+ pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc,
+ sqlite3DbMallocSize(db, pMem->zMalloc));
assert( pMem->u.pRowSet!=0 );
pMem->flags = MEM_RowSet;
}
@@ -65710,7 +60205,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){
#ifdef SQLITE_DEBUG
/*
-** This routine prepares a memory cell for modification by breaking
+** This routine prepares a memory cell for modication by breaking
** its link to a shallow copy and by marking any current shallow
** copies of this cell as invalid.
**
@@ -65722,7 +60217,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){
Mem *pX;
for(i=1, pX=&pVdbe->aMem[1]; i<=pVdbe->nMem; i++, pX++){
if( pX->pScopyFrom==pMem ){
- pX->flags |= MEM_Undefined;
+ pX->flags |= MEM_Invalid;
pX->pScopyFrom = 0;
}
}
@@ -65730,6 +60225,10 @@ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){
}
#endif /* SQLITE_DEBUG */
+/*
+** Size of struct Mem not including the Mem.zMalloc member.
+*/
+#define MEMCELLSIZE (size_t)(&(((Mem *)0)->zMalloc))
/*
** Make an shallow copy of pFrom into pTo. Prior contents of
@@ -65737,16 +60236,11 @@ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){
** pFrom->z is used, then pTo->z points to the same thing as pFrom->z
** and flags gets srcType (either MEM_Ephem or MEM_Static).
*/
-static SQLITE_NOINLINE void vdbeClrCopy(Mem *pTo, const Mem *pFrom, int eType){
- vdbeMemClearExternAndSetNull(pTo);
- assert( !VdbeMemDynamic(pTo) );
- sqlite3VdbeMemShallowCopy(pTo, pFrom, eType);
-}
SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){
assert( (pFrom->flags & MEM_RowSet)==0 );
- assert( pTo->db==pFrom->db );
- if( VdbeMemDynamic(pTo) ){ vdbeClrCopy(pTo,pFrom,srcType); return; }
+ VdbeMemRelease(pTo);
memcpy(pTo, pFrom, MEMCELLSIZE);
+ pTo->xDel = 0;
if( (pFrom->flags&MEM_Static)==0 ){
pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem);
assert( srcType==MEM_Ephem || srcType==MEM_Static );
@@ -65761,14 +60255,11 @@ SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int sr
SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){
int rc = SQLITE_OK;
- /* The pFrom==0 case in the following assert() is when an sqlite3_value
- ** from sqlite3_value_dup() is used as the argument
- ** to sqlite3_result_value(). */
- assert( pTo->db==pFrom->db || pFrom->db==0 );
assert( (pFrom->flags & MEM_RowSet)==0 );
- if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo);
+ VdbeMemRelease(pTo);
memcpy(pTo, pFrom, MEMCELLSIZE);
pTo->flags &= ~MEM_Dyn;
+
if( pTo->flags&(MEM_Str|MEM_Blob) ){
if( 0==(pFrom->flags&MEM_Static) ){
pTo->flags |= MEM_Ephem;
@@ -65793,7 +60284,8 @@ SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){
sqlite3VdbeMemRelease(pTo);
memcpy(pTo, pFrom, sizeof(Mem));
pFrom->flags = MEM_Null;
- pFrom->szMalloc = 0;
+ pFrom->xDel = 0;
+ pFrom->zMalloc = 0;
}
/*
@@ -65840,8 +60332,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
if( nByte<0 ){
assert( enc!=0 );
if( enc==SQLITE_UTF8 ){
- nByte = sqlite3Strlen30(z);
- if( nByte>iLimit ) nByte = iLimit+1;
+ for(nByte=0; nByte<=iLimit && z[nByte]; nByte++){}
}else{
for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){}
}
@@ -65860,17 +60351,14 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
if( nByte>iLimit ){
return SQLITE_TOOBIG;
}
- testcase( nAlloc==0 );
- testcase( nAlloc==31 );
- testcase( nAlloc==32 );
- if( sqlite3VdbeMemClearAndResize(pMem, MAX(nAlloc,32)) ){
+ if( sqlite3VdbeMemGrow(pMem, nAlloc, 0) ){
return SQLITE_NOMEM;
}
memcpy(pMem->z, z, nAlloc);
}else if( xDel==SQLITE_DYNAMIC ){
sqlite3VdbeMemRelease(pMem);
pMem->zMalloc = pMem->z = (char *)z;
- pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc);
+ pMem->xDel = 0;
}else{
sqlite3VdbeMemRelease(pMem);
pMem->z = (char *)z;
@@ -65881,6 +60369,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
pMem->n = nByte;
pMem->flags = flags;
pMem->enc = (enc==0 ? SQLITE_UTF8 : enc);
+ pMem->type = (enc==0 ? SQLITE_BLOB : SQLITE_TEXT);
#ifndef SQLITE_OMIT_UTF16
if( pMem->enc!=SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){
@@ -65896,47 +60385,131 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
}
/*
-** Move data out of a btree key or data field and into a Mem structure.
-** The data or key is taken from the entry that pCur is currently pointing
-** to. offset and amt determine what portion of the data or key to retrieve.
-** key is true to get the key or false to get data. The result is written
-** into the pMem element.
-**
-** The pMem object must have been initialized. This routine will use
-** pMem->zMalloc to hold the content from the btree, if possible. New
-** pMem->zMalloc space will be allocated if necessary. The calling routine
-** is responsible for making sure that the pMem object is eventually
-** destroyed.
+** Compare the values contained by the two memory cells, returning
+** negative, zero or positive if pMem1 is less than, equal to, or greater
+** than pMem2. Sorting order is NULL's first, followed by numbers (integers
+** and reals) sorted numerically, followed by text ordered by the collating
+** sequence pColl and finally blob's ordered by memcmp().
**
-** If this routine fails for any reason (malloc returns NULL or unable
-** to read from the disk) then the pMem is left in an inconsistent state.
+** Two NULL values are considered equal by this function.
*/
-static SQLITE_NOINLINE int vdbeMemFromBtreeResize(
- BtCursor *pCur, /* Cursor pointing at record to retrieve. */
- u32 offset, /* Offset from the start of data to return bytes from. */
- u32 amt, /* Number of bytes to return. */
- int key, /* If true, retrieve from the btree key, not data. */
- Mem *pMem /* OUT: Return data in this Mem structure. */
-){
+SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){
int rc;
- pMem->flags = MEM_Null;
- if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+2)) ){
- if( key ){
- rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z);
+ int f1, f2;
+ int combined_flags;
+
+ f1 = pMem1->flags;
+ f2 = pMem2->flags;
+ combined_flags = f1|f2;
+ assert( (combined_flags & MEM_RowSet)==0 );
+
+ /* If one value is NULL, it is less than the other. If both values
+ ** are NULL, return 0.
+ */
+ if( combined_flags&MEM_Null ){
+ return (f2&MEM_Null) - (f1&MEM_Null);
+ }
+
+ /* If one value is a number and the other is not, the number is less.
+ ** If both are numbers, compare as reals if one is a real, or as integers
+ ** if both values are integers.
+ */
+ if( combined_flags&(MEM_Int|MEM_Real) ){
+ double r1, r2;
+ if( (f1 & f2 & MEM_Int)!=0 ){
+ if( pMem1->u.i < pMem2->u.i ) return -1;
+ if( pMem1->u.i > pMem2->u.i ) return 1;
+ return 0;
+ }
+ if( (f1&MEM_Real)!=0 ){
+ r1 = pMem1->r;
+ }else if( (f1&MEM_Int)!=0 ){
+ r1 = (double)pMem1->u.i;
}else{
- rc = sqlite3BtreeData(pCur, offset, amt, pMem->z);
+ return 1;
}
- if( rc==SQLITE_OK ){
- pMem->z[amt] = 0;
- pMem->z[amt+1] = 0;
- pMem->flags = MEM_Blob|MEM_Term;
- pMem->n = (int)amt;
+ if( (f2&MEM_Real)!=0 ){
+ r2 = pMem2->r;
+ }else if( (f2&MEM_Int)!=0 ){
+ r2 = (double)pMem2->u.i;
}else{
- sqlite3VdbeMemRelease(pMem);
+ return -1;
}
+ if( r1<r2 ) return -1;
+ if( r1>r2 ) return 1;
+ return 0;
+ }
+
+ /* If one value is a string and the other is a blob, the string is less.
+ ** If both are strings, compare using the collating functions.
+ */
+ if( combined_flags&MEM_Str ){
+ if( (f1 & MEM_Str)==0 ){
+ return 1;
+ }
+ if( (f2 & MEM_Str)==0 ){
+ return -1;
+ }
+
+ assert( pMem1->enc==pMem2->enc );
+ assert( pMem1->enc==SQLITE_UTF8 ||
+ pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE );
+
+ /* The collation sequence must be defined at this point, even if
+ ** the user deletes the collation sequence after the vdbe program is
+ ** compiled (this was not always the case).
+ */
+ assert( !pColl || pColl->xCmp );
+
+ if( pColl ){
+ if( pMem1->enc==pColl->enc ){
+ /* The strings are already in the correct encoding. Call the
+ ** comparison function directly */
+ return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z);
+ }else{
+ const void *v1, *v2;
+ int n1, n2;
+ Mem c1;
+ Mem c2;
+ memset(&c1, 0, sizeof(c1));
+ memset(&c2, 0, sizeof(c2));
+ sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem);
+ sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem);
+ v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc);
+ n1 = v1==0 ? 0 : c1.n;
+ v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc);
+ n2 = v2==0 ? 0 : c2.n;
+ rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2);
+ sqlite3VdbeMemRelease(&c1);
+ sqlite3VdbeMemRelease(&c2);
+ return rc;
+ }
+ }
+ /* If a NULL pointer was passed as the collate function, fall through
+ ** to the blob case and use memcmp(). */
+ }
+
+ /* Both values must be blobs. Compare using memcmp(). */
+ rc = memcmp(pMem1->z, pMem2->z, (pMem1->n>pMem2->n)?pMem2->n:pMem1->n);
+ if( rc==0 ){
+ rc = pMem1->n - pMem2->n;
}
return rc;
}
+
+/*
+** Move data out of a btree key or data field and into a Mem structure.
+** The data or key is taken from the entry that pCur is currently pointing
+** to. offset and amt determine what portion of the data or key to retrieve.
+** key is true to get the key or false to get data. The result is written
+** into the pMem element.
+**
+** The pMem structure is assumed to be uninitialized. Any prior content
+** is overwritten without being freed.
+**
+** If this routine fails for any reason (malloc returns NULL or unable
+** to read from the disk) then the pMem is left in an inconsistent state.
+*/
SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
BtCursor *pCur, /* Cursor pointing at record to retrieve. */
u32 offset, /* Offset from the start of data to return bytes from. */
@@ -65949,7 +60522,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
int rc = SQLITE_OK; /* Return code */
assert( sqlite3BtreeCursorIsValid(pCur) );
- assert( !VdbeMemDynamic(pMem) );
/* Note: the calls to BtreeKeyFetch() and DataFetch() below assert()
** that both the BtShared and database handle mutexes are held. */
@@ -65962,35 +60534,54 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
assert( zData!=0 );
if( offset+amt<=available ){
+ sqlite3VdbeMemRelease(pMem);
pMem->z = &zData[offset];
pMem->flags = MEM_Blob|MEM_Ephem;
- pMem->n = (int)amt;
- }else{
- rc = vdbeMemFromBtreeResize(pCur, offset, amt, key, pMem);
+ }else if( SQLITE_OK==(rc = sqlite3VdbeMemGrow(pMem, amt+2, 0)) ){
+ pMem->flags = MEM_Blob|MEM_Dyn|MEM_Term;
+ pMem->enc = 0;
+ pMem->type = SQLITE_BLOB;
+ if( key ){
+ rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z);
+ }else{
+ rc = sqlite3BtreeData(pCur, offset, amt, pMem->z);
+ }
+ pMem->z[amt] = 0;
+ pMem->z[amt+1] = 0;
+ if( rc!=SQLITE_OK ){
+ sqlite3VdbeMemRelease(pMem);
+ }
}
+ pMem->n = (int)amt;
return rc;
}
-/*
-** The pVal argument is known to be a value other than NULL.
-** Convert it into a string with encoding enc and return a pointer
-** to a zero-terminated version of that string.
+/* This function is only available internally, it is not part of the
+** external API. It works in a similar way to sqlite3_value_text(),
+** except the data returned is in the encoding specified by the second
+** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or
+** SQLITE_UTF8.
+**
+** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED.
+** If that is the case, then the result must be aligned on an even byte
+** boundary.
*/
-static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
- assert( pVal!=0 );
+SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
+ if( !pVal ) return 0;
+
assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
assert( (pVal->flags & MEM_RowSet)==0 );
- assert( (pVal->flags & (MEM_Null))==0 );
- if( pVal->flags & (MEM_Blob|MEM_Str) ){
- pVal->flags |= MEM_Str;
- if( pVal->flags & MEM_Zero ){
- sqlite3VdbeMemExpandBlob(pVal);
- }
- if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){
- sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED);
- }
+
+ if( pVal->flags&MEM_Null ){
+ return 0;
+ }
+ assert( (MEM_Blob>>3) == MEM_Str );
+ pVal->flags |= (pVal->flags & MEM_Blob)>>3;
+ ExpandBlob(pVal);
+ if( pVal->flags&MEM_Str ){
+ sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED);
if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){
assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 );
if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){
@@ -65999,7 +60590,8 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
}
sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */
}else{
- sqlite3VdbeMemStringify(pVal, enc, 0);
+ assert( (pVal->flags&MEM_Blob)==0 );
+ sqlite3VdbeMemStringify(pVal, enc);
assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) );
}
assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0
@@ -66011,30 +60603,6 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
}
}
-/* This function is only available internally, it is not part of the
-** external API. It works in a similar way to sqlite3_value_text(),
-** except the data returned is in the encoding specified by the second
-** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or
-** SQLITE_UTF8.
-**
-** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED.
-** If that is the case, then the result must be aligned on an even byte
-** boundary.
-*/
-SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
- if( !pVal ) return 0;
- assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
- assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
- assert( (pVal->flags & MEM_RowSet)==0 );
- if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){
- return pVal->z;
- }
- if( pVal->flags&MEM_Null ){
- return 0;
- }
- return valueToText(pVal, enc);
-}
-
/*
** Create a new sqlite3_value object.
*/
@@ -66042,6 +60610,7 @@ SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *db){
Mem *p = sqlite3DbMallocZero(db, sizeof(*p));
if( p ){
p->flags = MEM_Null;
+ p->type = SQLITE_NULL;
p->db = db;
}
return p;
@@ -66066,7 +60635,7 @@ struct ValueNewStat4Ctx {
** Otherwise, if the second argument is non-zero, then this function is
** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not
** already been allocated, allocate the UnpackedRecord structure that
-** that function will return to its caller here. Then return a pointer to
+** that function will return to its caller here. Then return a pointer
** an sqlite3_value within the UnpackedRecord.a[] array.
*/
static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
@@ -66087,9 +60656,11 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
if( pRec->pKeyInfo ){
assert( pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField==nCol );
assert( pRec->pKeyInfo->enc==ENC(db) );
+ pRec->flags = UNPACKED_PREFIX_MATCH;
pRec->aMem = (Mem *)((u8*)pRec + ROUND8(sizeof(UnpackedRecord)));
for(i=0; i<nCol; i++){
pRec->aMem[i].flags = MEM_Null;
+ pRec->aMem[i].type = SQLITE_NULL;
pRec->aMem[i].db = db;
}
}else{
@@ -66111,113 +60682,6 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
}
/*
-** The expression object indicated by the second argument is guaranteed
-** to be a scalar SQL function. If
-**
-** * all function arguments are SQL literals,
-** * one of the SQLITE_FUNC_CONSTANT or _SLOCHNG function flags is set, and
-** * the SQLITE_FUNC_NEEDCOLL function flag is not set,
-**
-** then this routine attempts to invoke the SQL function. Assuming no
-** error occurs, output parameter (*ppVal) is set to point to a value
-** object containing the result before returning SQLITE_OK.
-**
-** Affinity aff is applied to the result of the function before returning.
-** If the result is a text value, the sqlite3_value object uses encoding
-** enc.
-**
-** If the conditions above are not met, this function returns SQLITE_OK
-** and sets (*ppVal) to NULL. Or, if an error occurs, (*ppVal) is set to
-** NULL and an SQLite error code returned.
-*/
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
-static int valueFromFunction(
- sqlite3 *db, /* The database connection */
- Expr *p, /* The expression to evaluate */
- u8 enc, /* Encoding to use */
- u8 aff, /* Affinity to use */
- sqlite3_value **ppVal, /* Write the new value here */
- struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */
-){
- sqlite3_context ctx; /* Context object for function invocation */
- sqlite3_value **apVal = 0; /* Function arguments */
- int nVal = 0; /* Size of apVal[] array */
- FuncDef *pFunc = 0; /* Function definition */
- sqlite3_value *pVal = 0; /* New value */
- int rc = SQLITE_OK; /* Return code */
- int nName; /* Size of function name in bytes */
- ExprList *pList = 0; /* Function arguments */
- int i; /* Iterator variable */
-
- assert( pCtx!=0 );
- assert( (p->flags & EP_TokenOnly)==0 );
- pList = p->x.pList;
- if( pList ) nVal = pList->nExpr;
- nName = sqlite3Strlen30(p->u.zToken);
- pFunc = sqlite3FindFunction(db, p->u.zToken, nName, nVal, enc, 0);
- assert( pFunc );
- if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0
- || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
- ){
- return SQLITE_OK;
- }
-
- if( pList ){
- apVal = (sqlite3_value**)sqlite3DbMallocZero(db, sizeof(apVal[0]) * nVal);
- if( apVal==0 ){
- rc = SQLITE_NOMEM;
- goto value_from_function_out;
- }
- for(i=0; i<nVal; i++){
- rc = sqlite3ValueFromExpr(db, pList->a[i].pExpr, enc, aff, &apVal[i]);
- if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out;
- }
- }
-
- pVal = valueNew(db, pCtx);
- if( pVal==0 ){
- rc = SQLITE_NOMEM;
- goto value_from_function_out;
- }
-
- assert( pCtx->pParse->rc==SQLITE_OK );
- memset(&ctx, 0, sizeof(ctx));
- ctx.pOut = pVal;
- ctx.pFunc = pFunc;
- pFunc->xFunc(&ctx, nVal, apVal);
- if( ctx.isError ){
- rc = ctx.isError;
- sqlite3ErrorMsg(pCtx->pParse, "%s", sqlite3_value_text(pVal));
- }else{
- sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8);
- assert( rc==SQLITE_OK );
- rc = sqlite3VdbeChangeEncoding(pVal, enc);
- if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){
- rc = SQLITE_TOOBIG;
- pCtx->pParse->nErr++;
- }
- }
- pCtx->pParse->rc = rc;
-
- value_from_function_out:
- if( rc!=SQLITE_OK ){
- pVal = 0;
- }
- if( apVal ){
- for(i=0; i<nVal; i++){
- sqlite3ValueFree(apVal[i]);
- }
- sqlite3DbFree(db, apVal);
- }
-
- *ppVal = pVal;
- return rc;
-}
-#else
-# define valueFromFunction(a,b,c,d,e,f) SQLITE_OK
-#endif /* defined(SQLITE_ENABLE_STAT3_OR_STAT4) */
-
-/*
** Extract a value from the supplied expression in the manner described
** above sqlite3ValueFromExpr(). Allocate the sqlite3_value object
** using valueNew().
@@ -66246,26 +60710,9 @@ static int valueFromExpr(
*ppVal = 0;
return SQLITE_OK;
}
- while( (op = pExpr->op)==TK_UPLUS ) pExpr = pExpr->pLeft;
+ op = pExpr->op;
if( NEVER(op==TK_REGISTER) ) op = pExpr->op2;
- /* Compressed expressions only appear when parsing the DEFAULT clause
- ** on a table column definition, and hence only when pCtx==0. This
- ** check ensures that an EP_TokenOnly expression is never passed down
- ** into valueFromFunction(). */
- assert( (pExpr->flags & EP_TokenOnly)==0 || pCtx==0 );
-
- if( op==TK_CAST ){
- u8 aff = sqlite3AffinityType(pExpr->u.zToken,0);
- rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx);
- testcase( rc!=SQLITE_OK );
- if( *ppVal ){
- sqlite3VdbeMemCast(*ppVal, aff, SQLITE_UTF8);
- sqlite3ValueApplyAffinity(*ppVal, affinity, SQLITE_UTF8);
- }
- return rc;
- }
-
/* Handle negative integers in a single step. This is needed in the
** case when the value is -9223372036854775808.
*/
@@ -66286,8 +60733,9 @@ static int valueFromExpr(
zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken);
if( zVal==0 ) goto no_mem;
sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC);
+ if( op==TK_FLOAT ) pVal->type = SQLITE_FLOAT;
}
- if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){
+ if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_NONE ){
sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8);
}else{
sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8);
@@ -66302,14 +60750,14 @@ static int valueFromExpr(
&& pVal!=0
){
sqlite3VdbeMemNumerify(pVal);
- if( pVal->flags & MEM_Real ){
- pVal->u.r = -pVal->u.r;
- }else if( pVal->u.i==SMALLEST_INT64 ){
- pVal->u.r = -(double)SMALLEST_INT64;
- MemSetTypeFlag(pVal, MEM_Real);
+ if( pVal->u.i==SMALLEST_INT64 ){
+ pVal->flags &= MEM_Int;
+ pVal->flags |= MEM_Real;
+ pVal->r = (double)LARGEST_INT64;
}else{
pVal->u.i = -pVal->u.i;
}
+ pVal->r = -pVal->r;
sqlite3ValueApplyAffinity(pVal, affinity, enc);
}
}else if( op==TK_NULL ){
@@ -66331,12 +60779,9 @@ static int valueFromExpr(
}
#endif
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- else if( op==TK_FUNCTION && pCtx!=0 ){
- rc = valueFromFunction(db, pExpr, enc, affinity, &pVal, pCtx);
+ if( pVal ){
+ sqlite3VdbeMemStoreType(pVal);
}
-#endif
-
*ppVal = pVal;
return rc;
@@ -66406,8 +60851,8 @@ static void recordFunc(
sqlite3_result_error_nomem(context);
}else{
aRet[0] = nSerial+1;
- putVarint32(&aRet[1], iSerial);
- sqlite3VdbeSerialPut(&aRet[1+nSerial], argv[0], iSerial);
+ sqlite3PutVarint(&aRet[1], iSerial);
+ sqlite3VdbeSerialPut(&aRet[1+nSerial], nVal, argv[0], file_format);
sqlite3_result_blob(context, aRet, nRet, SQLITE_TRANSIENT);
sqlite3DbFree(db, aRet);
}
@@ -66429,68 +60874,6 @@ SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void){
}
/*
-** Attempt to extract a value from pExpr and use it to construct *ppVal.
-**
-** If pAlloc is not NULL, then an UnpackedRecord object is created for
-** pAlloc if one does not exist and the new value is added to the
-** UnpackedRecord object.
-**
-** A value is extracted in the following cases:
-**
-** * (pExpr==0). In this case the value is assumed to be an SQL NULL,
-**
-** * The expression is a bound variable, and this is a reprepare, or
-**
-** * The expression is a literal value.
-**
-** On success, *ppVal is made to point to the extracted value. The caller
-** is responsible for ensuring that the value is eventually freed.
-*/
-static int stat4ValueFromExpr(
- Parse *pParse, /* Parse context */
- Expr *pExpr, /* The expression to extract a value from */
- u8 affinity, /* Affinity to use */
- struct ValueNewStat4Ctx *pAlloc,/* How to allocate space. Or NULL */
- sqlite3_value **ppVal /* OUT: New value object (or NULL) */
-){
- int rc = SQLITE_OK;
- sqlite3_value *pVal = 0;
- sqlite3 *db = pParse->db;
-
- /* Skip over any TK_COLLATE nodes */
- pExpr = sqlite3ExprSkipCollate(pExpr);
-
- if( !pExpr ){
- pVal = valueNew(db, pAlloc);
- if( pVal ){
- sqlite3VdbeMemSetNull((Mem*)pVal);
- }
- }else if( pExpr->op==TK_VARIABLE
- || NEVER(pExpr->op==TK_REGISTER && pExpr->op2==TK_VARIABLE)
- ){
- Vdbe *v;
- int iBindVar = pExpr->iColumn;
- sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar);
- if( (v = pParse->pReprepare)!=0 ){
- pVal = valueNew(db, pAlloc);
- if( pVal ){
- rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]);
- if( rc==SQLITE_OK ){
- sqlite3ValueApplyAffinity(pVal, affinity, ENC(db));
- }
- pVal->db = pParse->db;
- }
- }
- }else{
- rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, pAlloc);
- }
-
- assert( pVal==0 || pVal->db==db );
- *ppVal = pVal;
- return rc;
-}
-
-/*
** This function is used to allocate and populate UnpackedRecord
** structures intended to be compared against sample index keys stored
** in the sqlite_stat4 table.
@@ -66529,86 +60912,49 @@ SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(
int iVal, /* Array element to populate */
int *pbOk /* OUT: True if value was extracted */
){
- int rc;
+ int rc = SQLITE_OK;
sqlite3_value *pVal = 0;
- struct ValueNewStat4Ctx alloc;
+ sqlite3 *db = pParse->db;
+
+ struct ValueNewStat4Ctx alloc;
alloc.pParse = pParse;
alloc.pIdx = pIdx;
alloc.ppRec = ppRec;
alloc.iVal = iVal;
- rc = stat4ValueFromExpr(pParse, pExpr, affinity, &alloc, &pVal);
- assert( pVal==0 || pVal->db==pParse->db );
- *pbOk = (pVal!=0);
- return rc;
-}
+ /* Skip over any TK_COLLATE nodes */
+ pExpr = sqlite3ExprSkipCollate(pExpr);
-/*
-** Attempt to extract a value from expression pExpr using the methods
-** as described for sqlite3Stat4ProbeSetValue() above.
-**
-** If successful, set *ppVal to point to a new value object and return
-** SQLITE_OK. If no value can be extracted, but no other error occurs
-** (e.g. OOM), return SQLITE_OK and set *ppVal to NULL. Or, if an error
-** does occur, return an SQLite error code. The final value of *ppVal
-** is undefined in this case.
-*/
-SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(
- Parse *pParse, /* Parse context */
- Expr *pExpr, /* The expression to extract a value from */
- u8 affinity, /* Affinity to use */
- sqlite3_value **ppVal /* OUT: New value object (or NULL) */
-){
- return stat4ValueFromExpr(pParse, pExpr, affinity, 0, ppVal);
-}
+ if( !pExpr ){
+ pVal = valueNew(db, &alloc);
+ if( pVal ){
+ sqlite3VdbeMemSetNull((Mem*)pVal);
+ }
+ }else if( pExpr->op==TK_VARIABLE
+ || NEVER(pExpr->op==TK_REGISTER && pExpr->op2==TK_VARIABLE)
+ ){
+ Vdbe *v;
+ int iBindVar = pExpr->iColumn;
+ sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar);
+ if( (v = pParse->pReprepare)!=0 ){
+ pVal = valueNew(db, &alloc);
+ if( pVal ){
+ rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]);
+ if( rc==SQLITE_OK ){
+ sqlite3ValueApplyAffinity(pVal, affinity, ENC(db));
+ }
+ pVal->db = pParse->db;
+ sqlite3VdbeMemStoreType((Mem*)pVal);
+ }
+ }
+ }else{
+ rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, &alloc);
+ }
+ *pbOk = (pVal!=0);
-/*
-** Extract the iCol-th column from the nRec-byte record in pRec. Write
-** the column value into *ppVal. If *ppVal is initially NULL then a new
-** sqlite3_value object is allocated.
-**
-** If *ppVal is initially NULL then the caller is responsible for
-** ensuring that the value written into *ppVal is eventually freed.
-*/
-SQLITE_PRIVATE int sqlite3Stat4Column(
- sqlite3 *db, /* Database handle */
- const void *pRec, /* Pointer to buffer containing record */
- int nRec, /* Size of buffer pRec in bytes */
- int iCol, /* Column to extract */
- sqlite3_value **ppVal /* OUT: Extracted value */
-){
- u32 t; /* a column type code */
- int nHdr; /* Size of the header in the record */
- int iHdr; /* Next unread header byte */
- int iField; /* Next unread data byte */
- int szField; /* Size of the current data field */
- int i; /* Column index */
- u8 *a = (u8*)pRec; /* Typecast byte array */
- Mem *pMem = *ppVal; /* Write result into this Mem object */
-
- assert( iCol>0 );
- iHdr = getVarint32(a, nHdr);
- if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT;
- iField = nHdr;
- for(i=0; i<=iCol; i++){
- iHdr += getVarint32(&a[iHdr], t);
- testcase( iHdr==nHdr );
- testcase( iHdr==nHdr+1 );
- if( iHdr>nHdr ) return SQLITE_CORRUPT_BKPT;
- szField = sqlite3VdbeSerialTypeLen(t);
- iField += szField;
- }
- testcase( iField==nRec );
- testcase( iField==nRec+1 );
- if( iField>nRec ) return SQLITE_CORRUPT_BKPT;
- if( pMem==0 ){
- pMem = *ppVal = sqlite3ValueNew(db);
- if( pMem==0 ) return SQLITE_NOMEM;
- }
- sqlite3VdbeSerialGet(&a[iField-szField], t, pMem);
- pMem->enc = ENC(db);
- return SQLITE_OK;
+ assert( pVal==0 || pVal->db==db );
+ return rc;
}
/*
@@ -66623,7 +60969,7 @@ SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord *pRec){
Mem *aMem = pRec->aMem;
sqlite3 *db = aMem[0].db;
for(i=0; i<nCol; i++){
- sqlite3VdbeMemRelease(&aMem[i]);
+ sqlite3DbFree(db, aMem[i].zMalloc);
}
sqlite3KeyInfoUnref(pRec->pKeyInfo);
sqlite3DbFree(db, pRec);
@@ -66654,28 +61000,19 @@ SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value *v){
}
/*
-** The sqlite3ValueBytes() routine returns the number of bytes in the
-** sqlite3_value object assuming that it uses the encoding "enc".
-** The valueBytes() routine is a helper function.
+** Return the number of bytes in the sqlite3_value object assuming
+** that it uses the encoding "enc"
*/
-static SQLITE_NOINLINE int valueBytes(sqlite3_value *pVal, u8 enc){
- return valueToText(pVal, enc)!=0 ? pVal->n : 0;
-}
SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){
Mem *p = (Mem*)pVal;
- assert( (p->flags & MEM_Null)==0 || (p->flags & (MEM_Str|MEM_Blob))==0 );
- if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){
- return p->n;
- }
- if( (p->flags & MEM_Blob)!=0 ){
+ if( (p->flags & MEM_Blob)!=0 || sqlite3ValueText(pVal, enc) ){
if( p->flags & MEM_Zero ){
return p->n + p->u.nZero;
}else{
return p->n;
}
}
- if( p->flags & MEM_Null ) return 0;
- return valueBytes(pVal, enc);
+ return 0;
}
/************** End of vdbemem.c *********************************************/
@@ -66692,16 +61029,15 @@ SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){
**
*************************************************************************
** This file contains code used for creating, destroying, and populating
-** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.)
+** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.) Prior
+** to version 2.8.7, all this code was combined into the vdbe.c source file.
+** But that file was getting too big so this subroutines were split out.
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
/*
** Create a new virtual database engine.
*/
-SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){
- sqlite3 *db = pParse->db;
+SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(sqlite3 *db){
Vdbe *p;
p = sqlite3DbMallocZero(db, sizeof(Vdbe) );
if( p==0 ) return 0;
@@ -66713,25 +61049,10 @@ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){
p->pPrev = 0;
db->pVdbe = p;
p->magic = VDBE_MAGIC_INIT;
- p->pParse = pParse;
- assert( pParse->aLabel==0 );
- assert( pParse->nLabel==0 );
- assert( pParse->nOpAlloc==0 );
return p;
}
/*
-** Change the error string stored in Vdbe.zErrMsg
-*/
-SQLITE_PRIVATE void sqlite3VdbeError(Vdbe *p, const char *zFormat, ...){
- va_list ap;
- sqlite3DbFree(p->db, p->zErrMsg);
- va_start(ap, zFormat);
- p->zErrMsg = sqlite3VMPrintf(p->db, zFormat, ap);
- va_end(ap);
-}
-
-/*
** Remember the SQL string for a prepared statement.
*/
SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepareV2){
@@ -66748,9 +61069,9 @@ SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepa
/*
** Return the SQL associated with a prepared statement
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt){
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe *)pStmt;
- return p ? p->zSql : 0;
+ return (p && p->isPrepareV2) ? p->zSql : 0;
}
/*
@@ -66775,39 +61096,21 @@ SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){
}
/*
-** Resize the Vdbe.aOp array so that it is at least nOp elements larger
-** than its current size. nOp is guaranteed to be less than or equal
-** to 1024/sizeof(Op).
+** Resize the Vdbe.aOp array so that it is at least one op larger than
+** it was.
**
** If an out-of-memory error occurs while resizing the array, return
-** SQLITE_NOMEM. In this case Vdbe.aOp and Parse.nOpAlloc remain
+** SQLITE_NOMEM. In this case Vdbe.aOp and Vdbe.nOpAlloc remain
** unchanged (this is so that any opcodes already allocated can be
** correctly deallocated along with the rest of the Vdbe).
*/
-static int growOpArray(Vdbe *v, int nOp){
+static int growOpArray(Vdbe *p){
VdbeOp *pNew;
- Parse *p = v->pParse;
-
- /* The SQLITE_TEST_REALLOC_STRESS compile-time option is designed to force
- ** more frequent reallocs and hence provide more opportunities for
- ** simulated OOM faults. SQLITE_TEST_REALLOC_STRESS is generally used
- ** during testing only. With SQLITE_TEST_REALLOC_STRESS grow the op array
- ** by the minimum* amount required until the size reaches 512. Normal
- ** operation (without SQLITE_TEST_REALLOC_STRESS) is to double the current
- ** size of the op array or add 1KB of space, whichever is smaller. */
-#ifdef SQLITE_TEST_REALLOC_STRESS
- int nNew = (p->nOpAlloc>=512 ? p->nOpAlloc*2 : p->nOpAlloc+nOp);
-#else
int nNew = (p->nOpAlloc ? p->nOpAlloc*2 : (int)(1024/sizeof(Op)));
- UNUSED_PARAMETER(nOp);
-#endif
-
- assert( nOp<=(1024/sizeof(Op)) );
- assert( nNew>=(p->nOpAlloc+nOp) );
- pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op));
+ pNew = sqlite3DbRealloc(p->db, p->aOp, nNew*sizeof(Op));
if( pNew ){
p->nOpAlloc = sqlite3DbMallocSize(p->db, pNew)/sizeof(Op);
- v->aOp = pNew;
+ p->aOp = pNew;
}
return (pNew ? SQLITE_OK : SQLITE_NOMEM);
}
@@ -66846,8 +61149,8 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
i = p->nOp;
assert( p->magic==VDBE_MAGIC_INIT );
assert( op>0 && op<0xff );
- if( p->pParse->nOpAlloc<=i ){
- if( growOpArray(p, 1) ){
+ if( p->nOpAlloc<=i ){
+ if( growOpArray(p) ){
return 1;
}
}
@@ -66865,15 +61168,6 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
#endif
#ifdef SQLITE_DEBUG
if( p->db->flags & SQLITE_VdbeAddopTrace ){
- int jj, kk;
- Parse *pParse = p->pParse;
- for(jj=kk=0; jj<SQLITE_N_COLCACHE; jj++){
- struct yColCache *x = pParse->aColCache + jj;
- if( x->iLevel>pParse->iCacheLevel || x->iReg==0 ) continue;
- printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn);
- kk++;
- }
- if( kk ) printf("\n");
sqlite3VdbePrintOp(0, i, &p->aOp[i]);
test_addop_breakpoint();
}
@@ -66882,9 +61176,6 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
pOp->cycles = 0;
pOp->cnt = 0;
#endif
-#ifdef SQLITE_VDBE_COVERAGE
- pOp->iSrcLine = 0;
-#endif
return i;
}
SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
@@ -66897,44 +61188,6 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
}
-/* Generate code for an unconditional jump to instruction iDest
-*/
-SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe *p, int iDest){
- return sqlite3VdbeAddOp3(p, OP_Goto, 0, iDest, 0);
-}
-
-/* Generate code to cause the string zStr to be loaded into
-** register iDest
-*/
-SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe *p, int iDest, const char *zStr){
- return sqlite3VdbeAddOp4(p, OP_String8, 0, iDest, 0, zStr, 0);
-}
-
-/*
-** Generate code that initializes multiple registers to string or integer
-** constants. The registers begin with iDest and increase consecutively.
-** One register is initialized for each characgter in zTypes[]. For each
-** "s" character in zTypes[], the register is a string if the argument is
-** not NULL, or OP_Null if the value is a null pointer. For each "i" character
-** in zTypes[], the register is initialized to an integer.
-*/
-SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe *p, int iDest, const char *zTypes, ...){
- va_list ap;
- int i;
- char c;
- va_start(ap, zTypes);
- for(i=0; (c = zTypes[i])!=0; i++){
- if( c=='s' ){
- const char *z = va_arg(ap, const char*);
- int addr = sqlite3VdbeAddOp2(p, z==0 ? OP_Null : OP_String8, 0, iDest++);
- if( z ) sqlite3VdbeChangeP4(p, addr, z, 0);
- }else{
- assert( c=='i' );
- sqlite3VdbeAddOp2(p, OP_Integer, va_arg(ap, int), iDest++);
- }
- }
- va_end(ap);
-}
/*
** Add an opcode that includes the p4 value as a pointer.
@@ -66954,24 +61207,6 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4(
}
/*
-** Add an opcode that includes the p4 value with a P4_INT64 or
-** P4_REAL type.
-*/
-SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(
- Vdbe *p, /* Add the opcode to this VM */
- int op, /* The new opcode */
- int p1, /* The P1 operand */
- int p2, /* The P2 operand */
- int p3, /* The P3 operand */
- const u8 *zP4, /* The P4 operand */
- int p4type /* P4 operand type */
-){
- char *p4copy = sqlite3DbMallocRaw(sqlite3VdbeDb(p), 8);
- if( p4copy ) memcpy(p4copy, zP4, 8);
- return sqlite3VdbeAddOp4(p, op, p1, p2, p3, p4copy, p4type);
-}
-
-/*
** Add an OP_ParseSchema opcode. This routine is broken out from
** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees
** as having been used.
@@ -67016,10 +61251,9 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
**
** Zero is returned if a malloc() fails.
*/
-SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *v){
- Parse *p = v->pParse;
+SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *p){
int i = p->nLabel++;
- assert( v->magic==VDBE_MAGIC_INIT );
+ assert( p->magic==VDBE_MAGIC_INIT );
if( (i & (i-1))==0 ){
p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel,
(i*2+1)*sizeof(p->aLabel[0]));
@@ -67035,16 +61269,13 @@ SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *v){
** be inserted. The parameter "x" must have been obtained from
** a prior call to sqlite3VdbeMakeLabel().
*/
-SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
- Parse *p = v->pParse;
+SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *p, int x){
int j = -1-x;
- assert( v->magic==VDBE_MAGIC_INIT );
+ assert( p->magic==VDBE_MAGIC_INIT );
assert( j<p->nLabel );
- assert( j>=0 );
- if( p->aLabel ){
- p->aLabel[j] = v->nOp;
+ if( j>=0 && p->aLabel ){
+ p->aLabel[j] = p->nOp;
}
- p->iFixedOp = v->nOp - 1;
}
/*
@@ -67136,7 +61367,6 @@ static Op *opIterNext(VdbeOpIter *p){
** * OP_VUpdate
** * OP_VRename
** * OP_FkCounter with P2==0 (immediate foreign key constraint)
-** * OP_CreateTable and OP_InitCoroutine (for CREATE TABLE AS SELECT ...)
**
** Then check that the value of Parse.mayAbort is true if an
** ABORT may be thrown, or false otherwise. Return true if it does
@@ -67147,9 +61377,6 @@ static Op *opIterNext(VdbeOpIter *p){
*/
SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
int hasAbort = 0;
- int hasFkCounter = 0;
- int hasCreateTable = 0;
- int hasInitCoroutine = 0;
Op *pOp;
VdbeOpIter sIter;
memset(&sIter, 0, sizeof(sIter));
@@ -67158,19 +61385,15 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
while( (pOp = opIterNext(&sIter))!=0 ){
int opcode = pOp->opcode;
if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ || (opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1)
+#endif
|| ((opcode==OP_Halt || opcode==OP_HaltIfNull)
&& ((pOp->p1&0xff)==SQLITE_CONSTRAINT && pOp->p2==OE_Abort))
){
hasAbort = 1;
break;
}
- if( opcode==OP_CreateTable ) hasCreateTable = 1;
- if( opcode==OP_InitCoroutine ) hasInitCoroutine = 1;
-#ifndef SQLITE_OMIT_FOREIGN_KEY
- if( opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1 ){
- hasFkCounter = 1;
- }
-#endif
}
sqlite3DbFree(v->db, sIter.apSub);
@@ -67179,34 +61402,28 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
** through all opcodes and hasAbort may be set incorrectly. Return
** true for this case to prevent the assert() in the callers frame
** from failing. */
- return ( v->db->mallocFailed || hasAbort==mayAbort || hasFkCounter
- || (hasCreateTable && hasInitCoroutine) );
+ return ( v->db->mallocFailed || hasAbort==mayAbort );
}
#endif /* SQLITE_DEBUG - the sqlite3AssertMayAbort() function */
/*
-** This routine is called after all opcodes have been inserted. It loops
-** through all the opcodes and fixes up some details.
-**
-** (1) For each jump instruction with a negative P2 value (a label)
-** resolve the P2 value to an actual address.
+** Loop through the program looking for P2 values that are negative
+** on jump instructions. Each such value is a label. Resolve the
+** label by setting the P2 value to its correct non-zero value.
**
-** (2) Compute the maximum number of arguments used by any SQL function
-** and store that value in *pMaxFuncArgs.
+** This routine is called once after all opcodes have been inserted.
**
-** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately
-** indicate what the prepared statement actually does.
+** Variable *pMaxFuncArgs is set to the maximum value of any P2 argument
+** to an OP_Function, OP_AggStep or OP_VFilter opcode. This is used by
+** sqlite3VdbeMakeReady() to size the Vdbe.apArg[] array.
**
-** (4) Initialize the p4.xAdvance pointer on opcodes that use it.
-**
-** (5) Reclaim the memory allocated for storing labels.
+** The Op.opflags field is set on all opcodes.
*/
static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
int i;
int nMaxArgs = *pMaxFuncArgs;
Op *pOp;
- Parse *pParse = p->pParse;
- int *aLabel = pParse->aLabel;
+ int *aLabel = p->aLabel;
p->readOnly = 1;
p->bIsReader = 0;
for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){
@@ -67215,6 +61432,11 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
/* NOTE: Be sure to update mkopcodeh.awk when adding or removing
** cases from this switch! */
switch( opcode ){
+ case OP_Function:
+ case OP_AggStep: {
+ if( pOp->p5>nMaxArgs ) nMaxArgs = pOp->p5;
+ break;
+ }
case OP_Transaction: {
if( pOp->p2!=0 ) p->readOnly = 0;
/* fall thru */
@@ -67264,15 +61486,14 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
pOp->opflags = sqlite3OpcodeProperty[opcode];
if( (pOp->opflags & OPFLG_JUMP)!=0 && pOp->p2<0 ){
- assert( -1-pOp->p2<pParse->nLabel );
+ assert( -1-pOp->p2<p->nLabel );
pOp->p2 = aLabel[-1-pOp->p2];
}
}
- sqlite3DbFree(p->db, pParse->aLabel);
- pParse->aLabel = 0;
- pParse->nLabel = 0;
+ sqlite3DbFree(p->db, p->aLabel);
+ p->aLabel = 0;
*pMaxFuncArgs = nMaxArgs;
- assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) );
+ assert( p->bIsReader!=0 || p->btreeMask==0 );
}
/*
@@ -67299,7 +61520,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg)
assert( aOp && !p->db->mallocFailed );
/* Check that sqlite3VdbeUsesBtree() was not called on this VM */
- assert( DbMaskAllZero(p->btreeMask) );
+ assert( p->btreeMask==0 );
resolveP2Values(p, pnMaxArg);
*pnOp = p->nOp;
@@ -67311,94 +61532,89 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg)
** Add a whole list of operations to the operation stack. Return the
** address of the first operation added.
*/
-SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp, int iLineno){
- int addr, i;
- VdbeOp *pOut;
- assert( nOp>0 );
+SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp){
+ int addr;
assert( p->magic==VDBE_MAGIC_INIT );
- if( p->nOp + nOp > p->pParse->nOpAlloc && growOpArray(p, nOp) ){
+ if( p->nOp + nOp > p->nOpAlloc && growOpArray(p) ){
return 0;
}
addr = p->nOp;
- pOut = &p->aOp[addr];
- for(i=0; i<nOp; i++, aOp++, pOut++){
- int p2 = aOp->p2;
- pOut->opcode = aOp->opcode;
- pOut->p1 = aOp->p1;
- if( p2<0 ){
- assert( sqlite3OpcodeProperty[pOut->opcode] & OPFLG_JUMP );
- pOut->p2 = addr + ADDR(p2);
- }else{
- pOut->p2 = p2;
- }
- pOut->p3 = aOp->p3;
- pOut->p4type = P4_NOTUSED;
- pOut->p4.p = 0;
- pOut->p5 = 0;
+ if( ALWAYS(nOp>0) ){
+ int i;
+ VdbeOpList const *pIn = aOp;
+ for(i=0; i<nOp; i++, pIn++){
+ int p2 = pIn->p2;
+ VdbeOp *pOut = &p->aOp[i+addr];
+ pOut->opcode = pIn->opcode;
+ pOut->p1 = pIn->p1;
+ if( p2<0 ){
+ assert( sqlite3OpcodeProperty[pOut->opcode] & OPFLG_JUMP );
+ pOut->p2 = addr + ADDR(p2);
+ }else{
+ pOut->p2 = p2;
+ }
+ pOut->p3 = pIn->p3;
+ pOut->p4type = P4_NOTUSED;
+ pOut->p4.p = 0;
+ pOut->p5 = 0;
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
- pOut->zComment = 0;
-#endif
-#ifdef SQLITE_VDBE_COVERAGE
- pOut->iSrcLine = iLineno+i;
-#else
- (void)iLineno;
+ pOut->zComment = 0;
#endif
#ifdef SQLITE_DEBUG
- if( p->db->flags & SQLITE_VdbeAddopTrace ){
- sqlite3VdbePrintOp(0, i+addr, &p->aOp[i+addr]);
- }
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ sqlite3VdbePrintOp(0, i+addr, &p->aOp[i+addr]);
+ }
#endif
+ }
+ p->nOp += nOp;
}
- p->nOp += nOp;
return addr;
}
-#if defined(SQLITE_ENABLE_STMT_SCANSTATUS)
/*
-** Add an entry to the array of counters managed by sqlite3_stmt_scanstatus().
+** Change the value of the P1 operand for a specific instruction.
+** This routine is useful when a large program is loaded from a
+** static array using sqlite3VdbeAddOpList but we want to make a
+** few minor changes to the program.
*/
-SQLITE_PRIVATE void sqlite3VdbeScanStatus(
- Vdbe *p, /* VM to add scanstatus() to */
- int addrExplain, /* Address of OP_Explain (or 0) */
- int addrLoop, /* Address of loop counter */
- int addrVisit, /* Address of rows visited counter */
- LogEst nEst, /* Estimated number of output rows */
- const char *zName /* Name of table or index being scanned */
-){
- int nByte = (p->nScan+1) * sizeof(ScanStatus);
- ScanStatus *aNew;
- aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte);
- if( aNew ){
- ScanStatus *pNew = &aNew[p->nScan++];
- pNew->addrExplain = addrExplain;
- pNew->addrLoop = addrLoop;
- pNew->addrVisit = addrVisit;
- pNew->nEst = nEst;
- pNew->zName = sqlite3DbStrDup(p->db, zName);
- p->aScan = aNew;
+SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, u32 addr, int val){
+ assert( p!=0 );
+ if( ((u32)p->nOp)>addr ){
+ p->aOp[addr].p1 = val;
}
}
-#endif
-
/*
-** Change the value of the opcode, or P1, P2, P3, or P5 operands
-** for a specific instruction.
+** Change the value of the P2 operand for a specific instruction.
+** This routine is useful for setting a jump destination.
*/
-SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe *p, u32 addr, u8 iNewOpcode){
- sqlite3VdbeGetOp(p,addr)->opcode = iNewOpcode;
-}
-SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, u32 addr, int val){
- sqlite3VdbeGetOp(p,addr)->p1 = val;
-}
SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){
- sqlite3VdbeGetOp(p,addr)->p2 = val;
+ assert( p!=0 );
+ if( ((u32)p->nOp)>addr ){
+ p->aOp[addr].p2 = val;
+ }
}
+
+/*
+** Change the value of the P3 operand for a specific instruction.
+*/
SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){
- sqlite3VdbeGetOp(p,addr)->p3 = val;
+ assert( p!=0 );
+ if( ((u32)p->nOp)>addr ){
+ p->aOp[addr].p3 = val;
+ }
}
-SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 p5){
- sqlite3VdbeGetOp(p,-1)->p5 = p5;
+
+/*
+** Change the value of the P5 operand for the most recently
+** added operation.
+*/
+SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 val){
+ assert( p!=0 );
+ if( p->aOp ){
+ assert( p->nOp>0 );
+ p->aOp[p->nOp-1].p5 = val;
+ }
}
/*
@@ -67406,8 +61622,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 p5){
** the address of the next instruction to be coded.
*/
SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){
- p->pParse->iFixedOp = p->nOp - 1;
- sqlite3VdbeChangeP2(p, addr, p->nOp);
+ if( ALWAYS(addr>=0) ) sqlite3VdbeChangeP2(p, addr, p->nOp);
}
@@ -67430,10 +61645,6 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
if( p4 ){
assert( db );
switch( p4type ){
- case P4_FUNCCTX: {
- freeEphemeralFunction(db, ((sqlite3_context*)p4)->pFunc);
- /* Fall through into the next case */
- }
case P4_REAL:
case P4_INT64:
case P4_DYNAMIC:
@@ -67458,7 +61669,7 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
sqlite3ValueFree((sqlite3_value*)p4);
}else{
Mem *p = (Mem*)p4;
- if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
+ sqlite3DbFree(db, p->zMalloc);
sqlite3DbFree(db, p);
}
break;
@@ -67503,7 +61714,7 @@ SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *pVdbe, SubProgram *p){
** Change the opcode at addr into OP_Noop
*/
SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe *p, int addr){
- if( addr<p->nOp ){
+ if( p->aOp ){
VdbeOp *pOp = &p->aOp[addr];
sqlite3 *db = p->db;
freeP4(db, pOp->p4type, pOp->p4.p);
@@ -67514,19 +61725,6 @@ SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe *p, int addr){
}
/*
-** If the last opcode is "op" and it is not a jump destination,
-** then remove it. Return true if and only if an opcode was removed.
-*/
-SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){
- if( (p->nOp-1)>(p->pParse->iFixedOp) && p->aOp[p->nOp-1].opcode==op ){
- sqlite3VdbeChangeToNoop(p, p->nOp-1);
- return 1;
- }else{
- return 0;
- }
-}
-
-/*
** Change the value of the P4 operand for a specific instruction.
** This routine is useful when a large program is loaded from a
** static array using sqlite3VdbeAddOpList but we want to make a
@@ -67561,9 +61759,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int
addr = p->nOp - 1;
}
pOp = &p->aOp[addr];
- assert( pOp->p4type==P4_NOTUSED
- || pOp->p4type==P4_INT32
- || pOp->p4type==P4_KEYINFO );
+ assert( pOp->p4type==P4_NOTUSED || pOp->p4type==P4_INT32 );
freeP4(db, pOp->p4type, pOp->p4.p);
pOp->p4.p = 0;
if( n==P4_INT32 ){
@@ -67639,15 +61835,6 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){
}
#endif /* NDEBUG */
-#ifdef SQLITE_VDBE_COVERAGE
-/*
-** Set the value if the iSrcLine field for the previously coded instruction.
-*/
-SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){
- sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine;
-}
-#endif /* SQLITE_VDBE_COVERAGE */
-
/*
** Return the opcode for a given address. If the address is -1, then
** return the most recently inserted opcode.
@@ -67656,10 +61843,18 @@ SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){
** routine, then a pointer to a dummy VdbeOp will be returned. That opcode
** is readable but not writable, though it is cast to a writable value.
** The return of a dummy opcode allows the call to continue functioning
-** after an OOM fault without having to check to see if the return from
+** after a OOM fault without having to check to see if the return from
** this routine is a valid pointer. But because the dummy.opcode is 0,
** dummy will never be written to. This is verified by code inspection and
** by running with Valgrind.
+**
+** About the #ifdef SQLITE_OMIT_TRACE: Normally, this routine is never called
+** unless p->nOp>0. This is because in the absense of SQLITE_OMIT_TRACE,
+** an OP_Trace instruction is always inserted by sqlite3VdbeGet() as soon as
+** a new VDBE is created. So we are free to set addr to p->nOp-1 without
+** having to double-check to make sure that the result is non-negative. But
+** if SQLITE_OMIT_TRACE is defined, the OP_Trace is omitted and we do need to
+** check the value of p->nOp-1 before continuing.
*/
SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){
/* C89 specifies that the constant "dummy" will be initialized to all
@@ -67667,6 +61862,9 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){
static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */
assert( p->magic==VDBE_MAGIC_INIT );
if( addr<0 ){
+#ifdef SQLITE_OMIT_TRACE
+ if( p->nOp==0 ) return (VdbeOp*)&dummy;
+#endif
addr = p->nOp - 1;
}
assert( (addr>=0 && addr<p->nOp) || p->db->mallocFailed );
@@ -67691,17 +61889,7 @@ static int translateP(char c, const Op *pOp){
}
/*
-** Compute a string for the "comment" field of a VDBE opcode listing.
-**
-** The Synopsis: field in comments in the vdbe.c source file gets converted
-** to an extra string that is appended to the sqlite3OpcodeName(). In the
-** absence of other comments, this synopsis becomes the comment on the opcode.
-** Some translation occurs:
-**
-** "PX" -> "r[X]"
-** "PX@PY" -> "r[X..X+Y-1]" or "r[x]" if y is 0 or 1
-** "PX@PY+1" -> "r[X..X+Y]" or "r[x]" if y is 0
-** "PY..PY" -> "r[X..Y]" or "r[x]" if y<=x
+** Compute a string for the "comment" field of a VDBE opcode listing
*/
static int displayComment(
const Op *pOp, /* The opcode to be commented */
@@ -67735,13 +61923,7 @@ static int displayComment(
ii += 3;
jj += sqlite3Strlen30(zTemp+jj);
v2 = translateP(zSynopsis[ii], pOp);
- if( strncmp(zSynopsis+ii+1,"+1",2)==0 ){
- ii += 2;
- v2++;
- }
- if( v2>1 ){
- sqlite3_snprintf(nTemp-jj, zTemp+jj, "..%d", v1+v2-1);
- }
+ if( v2>1 ) sqlite3_snprintf(nTemp-jj, zTemp+jj, "..%d", v1+v2-1);
}else if( strncmp(zSynopsis+ii+1, "..P3", 4)==0 && pOp->p3==0 ){
ii += 4;
}
@@ -67792,9 +61974,8 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){
zColl = "B";
n = 1;
}
- if( i+n>nTemp-7 ){
+ if( i+n>nTemp-6 ){
memcpy(&zTemp[i],",...",4);
- i += 4;
break;
}
zTemp[i++] = ',';
@@ -67819,13 +62000,6 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){
sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg);
break;
}
-#ifdef SQLITE_DEBUG
- case P4_FUNCCTX: {
- FuncDef *pDef = pOp->p4.pCtx->pFunc;
- sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg);
- break;
- }
-#endif
case P4_INT64: {
sqlite3_snprintf(nTemp, zTemp, "%lld", *pOp->p4.pI64);
break;
@@ -67845,7 +62019,7 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){
}else if( pMem->flags & MEM_Int ){
sqlite3_snprintf(nTemp, zTemp, "%lld", pMem->u.i);
}else if( pMem->flags & MEM_Real ){
- sqlite3_snprintf(nTemp, zTemp, "%.16g", pMem->u.r);
+ sqlite3_snprintf(nTemp, zTemp, "%.16g", pMem->r);
}else if( pMem->flags & MEM_Null ){
sqlite3_snprintf(nTemp, zTemp, "NULL");
}else{
@@ -67857,7 +62031,7 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){
#ifndef SQLITE_OMIT_VIRTUALTABLE
case P4_VTAB: {
sqlite3_vtab *pVtab = pOp->p4.pVtab->pVtab;
- sqlite3_snprintf(nTemp, zTemp, "vtab:%p", pVtab);
+ sqlite3_snprintf(nTemp, zTemp, "vtab:%p:%p", pVtab, pVtab->pModule);
break;
}
#endif
@@ -67897,9 +62071,9 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){
SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){
assert( i>=0 && i<p->db->nDb && i<(int)sizeof(yDbMask)*8 );
assert( i<(int)sizeof(p->btreeMask)*8 );
- DbMaskSet(p->btreeMask, i);
+ p->btreeMask |= ((yDbMask)1)<<i;
if( i!=1 && sqlite3BtreeSharable(p->db->aDb[i].pBt) ){
- DbMaskSet(p->lockMask, i);
+ p->lockMask |= ((yDbMask)1)<<i;
}
}
@@ -67927,15 +62101,16 @@ SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){
*/
SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){
int i;
+ yDbMask mask;
sqlite3 *db;
Db *aDb;
int nDb;
- if( DbMaskAllZero(p->lockMask) ) return; /* The common case */
+ if( p->lockMask==0 ) return; /* The common case */
db = p->db;
aDb = db->aDb;
nDb = db->nDb;
- for(i=0; i<nDb; i++){
- if( i!=1 && DbMaskTest(p->lockMask,i) && ALWAYS(aDb[i].pBt!=0) ){
+ for(i=0, mask=1; i<nDb; i++, mask += mask){
+ if( i!=1 && (mask & p->lockMask)!=0 && ALWAYS(aDb[i].pBt!=0) ){
sqlite3BtreeEnter(aDb[i].pBt);
}
}
@@ -67946,24 +62121,22 @@ SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){
/*
** Unlock all of the btrees previously locked by a call to sqlite3VdbeEnter().
*/
-static SQLITE_NOINLINE void vdbeLeave(Vdbe *p){
+SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){
int i;
+ yDbMask mask;
sqlite3 *db;
Db *aDb;
int nDb;
+ if( p->lockMask==0 ) return; /* The common case */
db = p->db;
aDb = db->aDb;
nDb = db->nDb;
- for(i=0; i<nDb; i++){
- if( i!=1 && DbMaskTest(p->lockMask,i) && ALWAYS(aDb[i].pBt!=0) ){
+ for(i=0, mask=1; i<nDb; i++, mask += mask){
+ if( i!=1 && (mask & p->lockMask)!=0 && ALWAYS(aDb[i].pBt!=0) ){
sqlite3BtreeLeave(aDb[i].pBt);
}
}
}
-SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){
- if( DbMaskAllZero(p->lockMask) ) return; /* The common case */
- vdbeLeave(p);
-}
#endif
#if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG)
@@ -67980,11 +62153,8 @@ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
displayComment(pOp, zP4, zCom, sizeof(zCom));
#else
- zCom[0] = 0;
+ zCom[0] = 0
#endif
- /* NB: The sqlite3OpcodeName() function is implemented by code created
- ** by the mkopcodeh.awk and mkopcodec.awk scripts which extract the
- ** information from the vdbe.c source text */
fprintf(pOut, zFormat1, pc,
sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3, zP4, pOp->p5,
zCom
@@ -67998,18 +62168,17 @@ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){
*/
static void releaseMemArray(Mem *p, int N){
if( p && N ){
- Mem *pEnd = &p[N];
+ Mem *pEnd;
sqlite3 *db = p->db;
u8 malloc_failed = db->mallocFailed;
if( db->pnBytesFreed ){
- do{
- if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
- }while( (++p)<pEnd );
+ for(pEnd=&p[N]; p<pEnd; p++){
+ sqlite3DbFree(db, p->zMalloc);
+ }
return;
}
- do{
+ for(pEnd=&p[N]; p<pEnd; p++){
assert( (&p[1])==pEnd || p[0].db==p[1].db );
- assert( sqlite3VdbeCheckMemInvariants(p) );
/* This block is really an inlined version of sqlite3VdbeMemRelease()
** that takes advantage of the fact that the memory cell value is
@@ -68023,19 +62192,15 @@ static void releaseMemArray(Mem *p, int N){
** with no indexes using a single prepared INSERT statement, bind()
** and reset(). Inserts are grouped into a transaction.
*/
- testcase( p->flags & MEM_Agg );
- testcase( p->flags & MEM_Dyn );
- testcase( p->flags & MEM_Frame );
- testcase( p->flags & MEM_RowSet );
if( p->flags&(MEM_Agg|MEM_Dyn|MEM_Frame|MEM_RowSet) ){
sqlite3VdbeMemRelease(p);
- }else if( p->szMalloc ){
+ }else if( p->zMalloc ){
sqlite3DbFree(db, p->zMalloc);
- p->szMalloc = 0;
+ p->zMalloc = 0;
}
- p->flags = MEM_Undefined;
- }while( (++p)<pEnd );
+ p->flags = MEM_Invalid;
+ }
db->mallocFailed = malloc_failed;
}
}
@@ -68136,7 +62301,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
}else if( db->u1.isInterrupted ){
p->rc = SQLITE_INTERRUPT;
rc = SQLITE_ERROR;
- sqlite3VdbeError(p, sqlite3ErrStr(p->rc));
+ sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(p->rc));
}else{
char *zP4;
Op *pOp;
@@ -68156,6 +62321,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
}
if( p->explain==1 ){
pMem->flags = MEM_Int;
+ pMem->type = SQLITE_INTEGER;
pMem->u.i = i; /* Program counter */
pMem++;
@@ -68163,6 +62329,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */
assert( pMem->z!=0 );
pMem->n = sqlite3Strlen30(pMem->z);
+ pMem->type = SQLITE_TEXT;
pMem->enc = SQLITE_UTF8;
pMem++;
@@ -68188,21 +62355,24 @@ SQLITE_PRIVATE int sqlite3VdbeList(
pMem->flags = MEM_Int;
pMem->u.i = pOp->p1; /* P1 */
+ pMem->type = SQLITE_INTEGER;
pMem++;
pMem->flags = MEM_Int;
pMem->u.i = pOp->p2; /* P2 */
+ pMem->type = SQLITE_INTEGER;
pMem++;
pMem->flags = MEM_Int;
pMem->u.i = pOp->p3; /* P3 */
+ pMem->type = SQLITE_INTEGER;
pMem++;
- if( sqlite3VdbeMemClearAndResize(pMem, 32) ){ /* P4 */
+ if( sqlite3VdbeMemGrow(pMem, 32, 0) ){ /* P4 */
assert( p->db->mallocFailed );
return SQLITE_ERROR;
}
- pMem->flags = MEM_Str|MEM_Term;
+ pMem->flags = MEM_Dyn|MEM_Str|MEM_Term;
zP4 = displayP4(pOp, pMem->z, 32);
if( zP4!=pMem->z ){
sqlite3VdbeMemSetStr(pMem, zP4, -1, SQLITE_UTF8, 0);
@@ -68211,29 +62381,33 @@ SQLITE_PRIVATE int sqlite3VdbeList(
pMem->n = sqlite3Strlen30(pMem->z);
pMem->enc = SQLITE_UTF8;
}
+ pMem->type = SQLITE_TEXT;
pMem++;
if( p->explain==1 ){
- if( sqlite3VdbeMemClearAndResize(pMem, 4) ){
+ if( sqlite3VdbeMemGrow(pMem, 4, 0) ){
assert( p->db->mallocFailed );
return SQLITE_ERROR;
}
- pMem->flags = MEM_Str|MEM_Term;
+ pMem->flags = MEM_Dyn|MEM_Str|MEM_Term;
pMem->n = 2;
sqlite3_snprintf(3, pMem->z, "%.2x", pOp->p5); /* P5 */
+ pMem->type = SQLITE_TEXT;
pMem->enc = SQLITE_UTF8;
pMem++;
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
- if( sqlite3VdbeMemClearAndResize(pMem, 500) ){
+ if( sqlite3VdbeMemGrow(pMem, 500, 0) ){
assert( p->db->mallocFailed );
return SQLITE_ERROR;
}
- pMem->flags = MEM_Str|MEM_Term;
+ pMem->flags = MEM_Dyn|MEM_Str|MEM_Term;
pMem->n = displayComment(pOp, zP4, pMem->z, 500);
+ pMem->type = SQLITE_TEXT;
pMem->enc = SQLITE_UTF8;
#else
pMem->flags = MEM_Null; /* Comment */
+ pMem->type = SQLITE_NULL;
#endif
}
@@ -68256,7 +62430,7 @@ SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe *p){
z = p->zSql;
}else if( p->nOp>=1 ){
const VdbeOp *pOp = &p->aOp[0];
- if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){
+ if( pOp->opcode==OP_Trace && pOp->p4.z!=0 ){
z = pOp->p4.z;
while( sqlite3Isspace(*z) ) z++;
}
@@ -68275,7 +62449,7 @@ SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe *p){
if( sqlite3IoTrace==0 ) return;
if( nOp<1 ) return;
pOp = &p->aOp[0];
- if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){
+ if( pOp->opcode==OP_Trace && pOp->p4.z!=0 ){
int i, j;
char z[1000];
sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z);
@@ -68378,13 +62552,13 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
/*
** Prepare a virtual machine for execution for the first time after
** creating the virtual machine. This involves things such
-** as allocating registers and initializing the program counter.
+** as allocating stack space and initializing the program counter.
** After the VDBE has be prepped, it can be executed by one or more
** calls to sqlite3VdbeExec().
**
-** This function may be called exactly once on each virtual machine.
+** This function may be called exact once on a each virtual machine.
** After this routine is called the VM has been "packaged" and is ready
-** to run. After this routine is called, further calls to
+** to run. After this routine is called, futher calls to
** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects
** the Vdbe from the Parse object that helped generate it so that the
** the Vdbe becomes an independent entity and the Parse object can be
@@ -68412,7 +62586,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
assert( p->nOp>0 );
assert( pParse!=0 );
assert( p->magic==VDBE_MAGIC_INIT );
- assert( pParse==p->pParse );
db = p->db;
assert( db->mallocFailed==0 );
nVar = pParse->nVar;
@@ -68436,8 +62609,8 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
/* Allocate space for memory registers, SQL variables, VDBE cursors and
** an array to marshal SQL function arguments in.
*/
- zCsr = (u8*)&p->aOp[p->nOp]; /* Memory avaliable for allocation */
- zEnd = (u8*)&p->aOp[pParse->nOpAlloc]; /* First byte past end of zCsr[] */
+ zCsr = (u8*)&p->aOp[p->nOp]; /* Memory avaliable for allocation */
+ zEnd = (u8*)&p->aOp[p->nOpAlloc]; /* First byte past end of zCsr[] */
resolveP2Values(p, &nArg);
p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort);
@@ -68468,9 +62641,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->apCsr = allocSpace(p->apCsr, nCursor*sizeof(VdbeCursor*),
&zCsr, zEnd, &nByte);
p->aOnceFlag = allocSpace(p->aOnceFlag, nOnce, &zCsr, zEnd, &nByte);
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- p->anExec = allocSpace(p->anExec, p->nOp*sizeof(i64), &zCsr, zEnd, &nByte);
-#endif
if( nByte ){
p->pFree = sqlite3DbMallocZero(db, nByte);
}
@@ -68487,7 +62657,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->aVar[n].db = db;
}
}
- if( p->azVar && pParse->nzVar>0 ){
+ if( p->azVar ){
p->nzVar = pParse->nzVar;
memcpy(p->azVar, pParse->azVar, p->nzVar*sizeof(p->azVar[0]));
memset(pParse->azVar, 0, pParse->nzVar*sizeof(pParse->azVar[0]));
@@ -68496,7 +62666,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->aMem--; /* aMem[] goes from 1..nMem */
p->nMem = nMem; /* not from 0..nMem-1 */
for(n=1; n<=nMem; n++){
- p->aMem[n].flags = MEM_Undefined;
+ p->aMem[n].flags = MEM_Invalid;
p->aMem[n].db = db;
}
}
@@ -68521,43 +62691,23 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
sqlite3BtreeCloseCursor(pCx->pCursor);
}
#ifndef SQLITE_OMIT_VIRTUALTABLE
- else if( pCx->pVtabCursor ){
+ if( pCx->pVtabCursor ){
sqlite3_vtab_cursor *pVtabCursor = pCx->pVtabCursor;
const sqlite3_module *pModule = pVtabCursor->pVtab->pModule;
- assert( pVtabCursor->pVtab->nRef>0 );
- pVtabCursor->pVtab->nRef--;
+ p->inVtabMethod = 1;
pModule->xClose(pVtabCursor);
+ p->inVtabMethod = 0;
}
#endif
}
/*
-** Close all cursors in the current frame.
-*/
-static void closeCursorsInFrame(Vdbe *p){
- if( p->apCsr ){
- int i;
- for(i=0; i<p->nCursor; i++){
- VdbeCursor *pC = p->apCsr[i];
- if( pC ){
- sqlite3VdbeFreeCursor(p, pC);
- p->apCsr[i] = 0;
- }
- }
- }
-}
-
-/*
** Copy the values stored in the VdbeFrame structure to its Vdbe. This
** is used, for example, when a trigger sub-program is halted to restore
** control to the main program.
*/
SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){
Vdbe *v = pFrame->v;
- closeCursorsInFrame(v);
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- v->anExec = pFrame->anExec;
-#endif
v->aOnceFlag = pFrame->aOnceFlag;
v->nOnceFlag = pFrame->nOnceFlag;
v->aOp = pFrame->aOp;
@@ -68568,7 +62718,6 @@ SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){
v->nCursor = pFrame->nCursor;
v->db->lastRowid = pFrame->lastRowid;
v->nChange = pFrame->nChange;
- v->db->nChange = pFrame->nDbChange;
return pFrame->pc;
}
@@ -68585,11 +62734,20 @@ static void closeAllCursors(Vdbe *p){
VdbeFrame *pFrame;
for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
sqlite3VdbeFrameRestore(pFrame);
- p->pFrame = 0;
- p->nFrame = 0;
}
- assert( p->nFrame==0 );
- closeCursorsInFrame(p);
+ p->pFrame = 0;
+ p->nFrame = 0;
+
+ if( p->apCsr ){
+ int i;
+ for(i=0; i<p->nCursor; i++){
+ VdbeCursor *pC = p->apCsr[i];
+ if( pC ){
+ sqlite3VdbeFreeCursor(p, pC);
+ p->apCsr[i] = 0;
+ }
+ }
+ }
if( p->aMem ){
releaseMemArray(&p->aMem[1], p->nMem);
}
@@ -68600,12 +62758,16 @@ static void closeAllCursors(Vdbe *p){
}
/* Delete any auxdata allocations made by the VM */
- if( p->pAuxData ) sqlite3VdbeDeleteAuxData(p, -1, 0);
+ sqlite3VdbeDeleteAuxData(p, -1, 0);
assert( p->pAuxData==0 );
}
/*
-** Clean up the VM after a single run.
+** Clean up the VM after execution.
+**
+** This routine will automatically close any cursors, lists, and/or
+** sorters that were left open. It also deletes the values of
+** variables in the aVar[] array.
*/
static void Cleanup(Vdbe *p){
sqlite3 *db = p->db;
@@ -68616,7 +62778,7 @@ static void Cleanup(Vdbe *p){
int i;
if( p->apCsr ) for(i=0; i<p->nCursor; i++) assert( p->apCsr[i]==0 );
if( p->aMem ){
- for(i=1; i<=p->nMem; i++) assert( p->aMem[i].flags==MEM_Undefined );
+ for(i=1; i<=p->nMem; i++) assert( p->aMem[i].flags==MEM_Invalid );
}
#endif
@@ -68773,7 +62935,7 @@ static int vdbeCommit(sqlite3 *db, Vdbe *p){
/* The complex case - There is a multi-file write-transaction active.
** This requires a master journal file to ensure the transaction is
- ** committed atomically.
+ ** committed atomicly.
*/
#ifndef SQLITE_OMIT_DISKIO
else{
@@ -68892,7 +63054,7 @@ static int vdbeCommit(sqlite3 *db, Vdbe *p){
** doing this the directory is synced again before any individual
** transaction files are deleted.
*/
- rc = sqlite3OsDelete(pVfs, zMaster, needSync);
+ rc = sqlite3OsDelete(pVfs, zMaster, 1);
sqlite3DbFree(db, zMaster);
zMaster = 0;
if( rc ){
@@ -68941,7 +63103,7 @@ static void checkActiveVdbeCnt(sqlite3 *db){
int nRead = 0;
p = db->pVdbe;
while( p ){
- if( sqlite3_stmt_busy((sqlite3_stmt*)p) ){
+ if( p->magic==VDBE_MAGIC_RUN && p->pc>=0 ){
cnt++;
if( p->readOnly==0 ) nWrite++;
if( p->bIsReader ) nRead++;
@@ -69039,7 +63201,7 @@ SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){
){
p->rc = SQLITE_CONSTRAINT_FOREIGNKEY;
p->errorAction = OE_Abort;
- sqlite3VdbeError(p, "FOREIGN KEY constraint failed");
+ sqlite3SetString(&p->zErrMsg, db, "FOREIGN KEY constraint failed");
return SQLITE_ERROR;
}
return SQLITE_OK;
@@ -69101,6 +63263,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
/* Check for one of the special errors */
mrc = p->rc & 0xff;
+ assert( p->rc!=SQLITE_IOERR_BLOCKED ); /* This error no longer exists */
isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR
|| mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL;
if( isSpecialError ){
@@ -69126,7 +63289,6 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
sqlite3CloseSavepoints(db);
db->autoCommit = 1;
- p->nChange = 0;
}
}
}
@@ -69167,7 +63329,6 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
}else if( rc!=SQLITE_OK ){
p->rc = rc;
sqlite3RollbackAll(db, SQLITE_OK);
- p->nChange = 0;
}else{
db->nDeferredCons = 0;
db->nDeferredImmCons = 0;
@@ -69176,7 +63337,6 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
}
}else{
sqlite3RollbackAll(db, SQLITE_OK);
- p->nChange = 0;
}
db->nStatement = 0;
}else if( eStatementOp==0 ){
@@ -69188,7 +63348,6 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
sqlite3CloseSavepoints(db);
db->autoCommit = 1;
- p->nChange = 0;
}
}
@@ -69209,7 +63368,6 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
sqlite3CloseSavepoints(db);
db->autoCommit = 1;
- p->nChange = 0;
}
}
@@ -69279,13 +63437,12 @@ SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){
if( p->zErrMsg ){
u8 mallocFailed = db->mallocFailed;
sqlite3BeginBenignMalloc();
- if( db->pErr==0 ) db->pErr = sqlite3ValueNew(db);
sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT);
sqlite3EndBenignMalloc();
db->mallocFailed = mallocFailed;
db->errCode = rc;
}else{
- sqlite3Error(db, rc);
+ sqlite3Error(db, rc, 0);
}
return rc;
}
@@ -69348,7 +63505,8 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
** to sqlite3_step(). For consistency (since sqlite3_step() was
** called), set the database error in this case as well.
*/
- sqlite3ErrorWithMsg(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg);
+ sqlite3Error(db, p->rc, 0);
+ sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT);
sqlite3DbFree(db, p->zErrMsg);
p->zErrMsg = 0;
}
@@ -69369,24 +63527,12 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
fprintf(out, "%02x", p->aOp[i].opcode);
}
fprintf(out, "\n");
- if( p->zSql ){
- char c, pc = 0;
- fprintf(out, "-- ");
- for(i=0; (c = p->zSql[i])!=0; i++){
- if( pc=='\n' ) fprintf(out, "-- ");
- putc(c, out);
- pc = c;
- }
- if( pc!='\n' ) fprintf(out, "\n");
- }
for(i=0; i<p->nOp; i++){
- char zHdr[100];
- sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ",
+ fprintf(out, "%6d %10lld %8lld ",
p->aOp[i].cnt,
p->aOp[i].cycles,
p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0
);
- fprintf(out, "%s", zHdr);
sqlite3VdbePrintOp(out, i, &p->aOp[i]);
}
fclose(out);
@@ -69426,16 +63572,15 @@ SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){
** from left to right), or
**
** * the corresponding bit in argument mask is clear (where the first
-** function parameter corresponds to bit 0 etc.).
+** function parameter corrsponds to bit 0 etc.).
*/
SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(Vdbe *pVdbe, int iOp, int mask){
AuxData **pp = &pVdbe->pAuxData;
while( *pp ){
AuxData *pAux = *pp;
if( (iOp<0)
- || (pAux->iOp==iOp && (pAux->iArg>31 || !(mask & MASKBIT32(pAux->iArg))))
+ || (pAux->iOp==iOp && (pAux->iArg>31 || !(mask & ((u32)1<<pAux->iArg))))
){
- testcase( pAux->iArg==31 );
if( pAux->xDelete ){
pAux->xDelete(pAux->pAux);
}
@@ -69468,14 +63613,13 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
}
for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]);
vdbeFreeOpArray(db, p->aOp, p->nOp);
+ sqlite3DbFree(db, p->aLabel);
sqlite3DbFree(db, p->aColName);
sqlite3DbFree(db, p->zSql);
sqlite3DbFree(db, p->pFree);
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- for(i=0; i<p->nScan; i++){
- sqlite3DbFree(db, p->aScan[i].zName);
- }
- sqlite3DbFree(db, p->aScan);
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+ sqlite3DbFree(db, p->zExplain);
+ sqlite3DbFree(db, p->pExplain);
#endif
}
@@ -69504,57 +63648,6 @@ SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){
}
/*
-** The cursor "p" has a pending seek operation that has not yet been
-** carried out. Seek the cursor now. If an error occurs, return
-** the appropriate error code.
-*/
-static int SQLITE_NOINLINE handleDeferredMoveto(VdbeCursor *p){
- int res, rc;
-#ifdef SQLITE_TEST
- extern int sqlite3_search_count;
-#endif
- assert( p->deferredMoveto );
- assert( p->isTable );
- rc = sqlite3BtreeMovetoUnpacked(p->pCursor, 0, p->movetoTarget, 0, &res);
- if( rc ) return rc;
- if( res!=0 ) return SQLITE_CORRUPT_BKPT;
-#ifdef SQLITE_TEST
- sqlite3_search_count++;
-#endif
- p->deferredMoveto = 0;
- p->cacheStatus = CACHE_STALE;
- return SQLITE_OK;
-}
-
-/*
-** Something has moved cursor "p" out of place. Maybe the row it was
-** pointed to was deleted out from under it. Or maybe the btree was
-** rebalanced. Whatever the cause, try to restore "p" to the place it
-** is supposed to be pointing. If the row was deleted out from under the
-** cursor, set the cursor to point to a NULL row.
-*/
-static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){
- int isDifferentRow, rc;
- assert( p->pCursor!=0 );
- assert( sqlite3BtreeCursorHasMoved(p->pCursor) );
- rc = sqlite3BtreeCursorRestore(p->pCursor, &isDifferentRow);
- p->cacheStatus = CACHE_STALE;
- if( isDifferentRow ) p->nullRow = 1;
- return rc;
-}
-
-/*
-** Check to ensure that the cursor is valid. Restore the cursor
-** if need be. Return any I/O error from the restore operation.
-*/
-SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){
- if( sqlite3BtreeCursorHasMoved(p->pCursor) ){
- return handleMovedCursor(p);
- }
- return SQLITE_OK;
-}
-
-/*
** Make sure the cursor p is ready to read or write the row to which it
** was last positioned. Return an error code if an OOM fault or I/O error
** prevents us from positioning the cursor to its correct position.
@@ -69569,10 +63662,29 @@ SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){
*/
SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor *p){
if( p->deferredMoveto ){
- return handleDeferredMoveto(p);
- }
- if( p->pCursor && sqlite3BtreeCursorHasMoved(p->pCursor) ){
- return handleMovedCursor(p);
+ int res, rc;
+#ifdef SQLITE_TEST
+ extern int sqlite3_search_count;
+#endif
+ assert( p->isTable );
+ rc = sqlite3BtreeMovetoUnpacked(p->pCursor, 0, p->movetoTarget, 0, &res);
+ if( rc ) return rc;
+ p->lastRowid = p->movetoTarget;
+ if( res!=0 ) return SQLITE_CORRUPT_BKPT;
+ p->rowidIsValid = 1;
+#ifdef SQLITE_TEST
+ sqlite3_search_count++;
+#endif
+ p->deferredMoveto = 0;
+ p->cacheStatus = CACHE_STALE;
+ }else if( p->pCursor ){
+ int hasMoved;
+ int rc = sqlite3BtreeCursorHasMoved(p->pCursor, &hasMoved);
+ if( rc ) return rc;
+ if( hasMoved ){
+ p->cacheStatus = CACHE_STALE;
+ p->nullRow = 1;
+ }
}
return SQLITE_OK;
}
@@ -69624,7 +63736,7 @@ SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor *p){
*/
SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format){
int flags = pMem->flags;
- u32 n;
+ int n;
if( flags&MEM_Null ){
return 0;
@@ -69635,7 +63747,9 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format){
i64 i = pMem->u.i;
u64 u;
if( i<0 ){
- u = ~i;
+ if( i<(-MAX_6BYTE) ) return 6;
+ /* Previous test prevents: u = -(-9223372036854775808) */
+ u = -i;
}else{
u = i;
}
@@ -69652,29 +63766,23 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format){
return 7;
}
assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) );
- assert( pMem->n>=0 );
- n = (u32)pMem->n;
+ n = pMem->n;
if( flags & MEM_Zero ){
n += pMem->u.nZero;
}
+ assert( n>=0 );
return ((n*2) + 12 + ((flags&MEM_Str)!=0));
}
/*
-** The sizes for serial types less than 12
-*/
-static const u8 sqlite3SmallTypeSizes[] = {
- 0, 1, 2, 3, 4, 6, 8, 8, 0, 0, 0, 0
-};
-
-/*
** Return the length of the data corresponding to the supplied serial-type.
*/
SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32 serial_type){
if( serial_type>=12 ){
return (serial_type-12)/2;
}else{
- return sqlite3SmallTypeSizes[serial_type];
+ static const u8 aSize[] = { 0, 1, 2, 3, 4, 6, 8, 8, 0, 0, 0, 0 };
+ return aSize[serial_type];
}
}
@@ -69736,15 +63844,21 @@ static u64 floatSwap(u64 in){
** buf. It is assumed that the caller has allocated sufficient space.
** Return the number of bytes written.
**
-** nBuf is the amount of space left in buf[]. The caller is responsible
-** for allocating enough space to buf[] to hold the entire field, exclusive
-** of the pMem->u.nZero bytes for a MEM_Zero value.
+** nBuf is the amount of space left in buf[]. nBuf must always be
+** large enough to hold the entire field. Except, if the field is
+** a blob with a zero-filled tail, then buf[] might be just the right
+** size to hold everything except for the zero-filled tail. If buf[]
+** is only big enough to hold the non-zero prefix, then only write that
+** prefix into buf[]. But if buf[] is large enough to hold both the
+** prefix and the tail then write the prefix and set the tail to all
+** zeros.
**
** Return the number of bytes actually written into buf[]. The number
** of bytes in the zero-filled tail is included in the return value only
** if those bytes were zeroed in buf[].
*/
-SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
+SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, int nBuf, Mem *pMem, int file_format){
+ u32 serial_type = sqlite3VdbeSerialType(pMem, file_format);
u32 len;
/* Integer and Real */
@@ -69752,18 +63866,18 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
u64 v;
u32 i;
if( serial_type==7 ){
- assert( sizeof(v)==sizeof(pMem->u.r) );
- memcpy(&v, &pMem->u.r, sizeof(v));
+ assert( sizeof(v)==sizeof(pMem->r) );
+ memcpy(&v, &pMem->r, sizeof(v));
swapMixedEndianFloat(v);
}else{
v = pMem->u.i;
}
- len = i = sqlite3SmallTypeSizes[serial_type];
- assert( i>0 );
- do{
- buf[--i] = (u8)(v&0xFF);
+ len = i = sqlite3VdbeSerialTypeLen(serial_type);
+ assert( len<=(u32)nBuf );
+ while( i-- ){
+ buf[i] = (u8)(v&0xFF);
v >>= 8;
- }while( i );
+ }
return len;
}
@@ -69771,8 +63885,17 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
if( serial_type>=12 ){
assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0)
== (int)sqlite3VdbeSerialTypeLen(serial_type) );
+ assert( pMem->n<=nBuf );
len = pMem->n;
memcpy(buf, pMem->z, len);
+ if( pMem->flags & MEM_Zero ){
+ len += pMem->u.nZero;
+ assert( nBuf>=0 );
+ if( len > (u32)nBuf ){
+ len = (u32)nBuf;
+ }
+ memset(&buf[pMem->n], 0, len-pMem->n);
+ }
return len;
}
@@ -69780,60 +63903,10 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
return 0;
}
-/* Input "x" is a sequence of unsigned characters that represent a
-** big-endian integer. Return the equivalent native integer
-*/
-#define ONE_BYTE_INT(x) ((i8)(x)[0])
-#define TWO_BYTE_INT(x) (256*(i8)((x)[0])|(x)[1])
-#define THREE_BYTE_INT(x) (65536*(i8)((x)[0])|((x)[1]<<8)|(x)[2])
-#define FOUR_BYTE_UINT(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
-#define FOUR_BYTE_INT(x) (16777216*(i8)((x)[0])|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
-
/*
** Deserialize the data blob pointed to by buf as serial type serial_type
** and store the result in pMem. Return the number of bytes read.
-**
-** This function is implemented as two separate routines for performance.
-** The few cases that require local variables are broken out into a separate
-** routine so that in most cases the overhead of moving the stack pointer
-** is avoided.
*/
-static u32 SQLITE_NOINLINE serialGet(
- const unsigned char *buf, /* Buffer to deserialize from */
- u32 serial_type, /* Serial type to deserialize */
- Mem *pMem /* Memory cell to write value into */
-){
- u64 x = FOUR_BYTE_UINT(buf);
- u32 y = FOUR_BYTE_UINT(buf+4);
- x = (x<<32) + y;
- if( serial_type==6 ){
- /* EVIDENCE-OF: R-29851-52272 Value is a big-endian 64-bit
- ** twos-complement integer. */
- pMem->u.i = *(i64*)&x;
- pMem->flags = MEM_Int;
- testcase( pMem->u.i<0 );
- }else{
- /* EVIDENCE-OF: R-57343-49114 Value is a big-endian IEEE 754-2008 64-bit
- ** floating point number. */
-#if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT)
- /* Verify that integers and floating point values use the same
- ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is
- ** defined that 64-bit floating point values really are mixed
- ** endian.
- */
- static const u64 t1 = ((u64)0x3ff00000)<<32;
- static const double r1 = 1.0;
- u64 t2 = t1;
- swapMixedEndianFloat(t2);
- assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 );
-#endif
- assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 );
- swapMixedEndianFloat(x);
- memcpy(&pMem->u.r, &x, sizeof(x));
- pMem->flags = sqlite3IsNaN(pMem->u.r) ? MEM_Null : MEM_Real;
- }
- return 8;
-}
SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(
const unsigned char *buf, /* Buffer to deserialize from */
u32 serial_type, /* Serial type to deserialize */
@@ -69842,79 +63915,88 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(
switch( serial_type ){
case 10: /* Reserved for future use */
case 11: /* Reserved for future use */
- case 0: { /* Null */
- /* EVIDENCE-OF: R-24078-09375 Value is a NULL. */
+ case 0: { /* NULL */
pMem->flags = MEM_Null;
break;
}
- case 1: {
- /* EVIDENCE-OF: R-44885-25196 Value is an 8-bit twos-complement
- ** integer. */
- pMem->u.i = ONE_BYTE_INT(buf);
+ case 1: { /* 1-byte signed integer */
+ pMem->u.i = (signed char)buf[0];
pMem->flags = MEM_Int;
- testcase( pMem->u.i<0 );
return 1;
}
case 2: { /* 2-byte signed integer */
- /* EVIDENCE-OF: R-49794-35026 Value is a big-endian 16-bit
- ** twos-complement integer. */
- pMem->u.i = TWO_BYTE_INT(buf);
+ pMem->u.i = (((signed char)buf[0])<<8) | buf[1];
pMem->flags = MEM_Int;
- testcase( pMem->u.i<0 );
return 2;
}
case 3: { /* 3-byte signed integer */
- /* EVIDENCE-OF: R-37839-54301 Value is a big-endian 24-bit
- ** twos-complement integer. */
- pMem->u.i = THREE_BYTE_INT(buf);
+ pMem->u.i = (((signed char)buf[0])<<16) | (buf[1]<<8) | buf[2];
pMem->flags = MEM_Int;
- testcase( pMem->u.i<0 );
return 3;
}
case 4: { /* 4-byte signed integer */
- /* EVIDENCE-OF: R-01849-26079 Value is a big-endian 32-bit
- ** twos-complement integer. */
- pMem->u.i = FOUR_BYTE_INT(buf);
+ pMem->u.i = (buf[0]<<24) | (buf[1]<<16) | (buf[2]<<8) | buf[3];
pMem->flags = MEM_Int;
- testcase( pMem->u.i<0 );
return 4;
}
case 5: { /* 6-byte signed integer */
- /* EVIDENCE-OF: R-50385-09674 Value is a big-endian 48-bit
- ** twos-complement integer. */
- pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf);
+ u64 x = (((signed char)buf[0])<<8) | buf[1];
+ u32 y = (buf[2]<<24) | (buf[3]<<16) | (buf[4]<<8) | buf[5];
+ x = (x<<32) | y;
+ pMem->u.i = *(i64*)&x;
pMem->flags = MEM_Int;
- testcase( pMem->u.i<0 );
return 6;
}
case 6: /* 8-byte signed integer */
case 7: { /* IEEE floating point */
- /* These use local variables, so do them in a separate routine
- ** to avoid having to move the frame pointer in the common case */
- return serialGet(buf,serial_type,pMem);
+ u64 x;
+ u32 y;
+#if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT)
+ /* Verify that integers and floating point values use the same
+ ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is
+ ** defined that 64-bit floating point values really are mixed
+ ** endian.
+ */
+ static const u64 t1 = ((u64)0x3ff00000)<<32;
+ static const double r1 = 1.0;
+ u64 t2 = t1;
+ swapMixedEndianFloat(t2);
+ assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 );
+#endif
+
+ x = (buf[0]<<24) | (buf[1]<<16) | (buf[2]<<8) | buf[3];
+ y = (buf[4]<<24) | (buf[5]<<16) | (buf[6]<<8) | buf[7];
+ x = (x<<32) | y;
+ if( serial_type==6 ){
+ pMem->u.i = *(i64*)&x;
+ pMem->flags = MEM_Int;
+ }else{
+ assert( sizeof(x)==8 && sizeof(pMem->r)==8 );
+ swapMixedEndianFloat(x);
+ memcpy(&pMem->r, &x, sizeof(x));
+ pMem->flags = sqlite3IsNaN(pMem->r) ? MEM_Null : MEM_Real;
+ }
+ return 8;
}
case 8: /* Integer 0 */
case 9: { /* Integer 1 */
- /* EVIDENCE-OF: R-12976-22893 Value is the integer 0. */
- /* EVIDENCE-OF: R-18143-12121 Value is the integer 1. */
pMem->u.i = serial_type-8;
pMem->flags = MEM_Int;
return 0;
}
default: {
- /* EVIDENCE-OF: R-14606-31564 Value is a BLOB that is (N-12)/2 bytes in
- ** length.
- ** EVIDENCE-OF: R-28401-00140 Value is a string in the text encoding and
- ** (N-13)/2 bytes in length. */
static const u16 aFlag[] = { MEM_Blob|MEM_Ephem, MEM_Str|MEM_Ephem };
+ u32 len = (serial_type-12)/2;
pMem->z = (char *)buf;
- pMem->n = (serial_type-12)/2;
+ pMem->n = len;
+ pMem->xDel = 0;
pMem->flags = aFlag[serial_type&1];
- return pMem->n;
+ return len;
}
}
return 0;
}
+
/*
** This routine is used to allocate sufficient space for an UnpackedRecord
** structure large enough to be used with sqlite3VdbeRecordUnpack() if
@@ -69979,43 +64061,47 @@ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(
u32 szHdr;
Mem *pMem = p->aMem;
- p->default_rc = 0;
+ p->flags = 0;
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
idx = getVarint32(aKey, szHdr);
d = szHdr;
u = 0;
- while( idx<szHdr && d<=nKey ){
+ while( idx<szHdr && u<p->nField && d<=nKey ){
u32 serial_type;
idx += getVarint32(&aKey[idx], serial_type);
pMem->enc = pKeyInfo->enc;
pMem->db = pKeyInfo->db;
/* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */
- pMem->szMalloc = 0;
+ pMem->zMalloc = 0;
d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem);
pMem++;
- if( (++u)>=p->nField ) break;
+ u++;
}
assert( u<=pKeyInfo->nField + 1 );
p->nField = u;
}
-#if SQLITE_DEBUG
/*
-** This function compares two index or table record keys in the same way
-** as the sqlite3VdbeRecordCompare() routine. Unlike VdbeRecordCompare(),
-** this function deserializes and compares values using the
-** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used
-** in assert() statements to ensure that the optimized code in
-** sqlite3VdbeRecordCompare() returns results with these two primitives.
+** This function compares the two table rows or index records
+** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero
+** or positive integer if key1 is less than, equal to or
+** greater than key2. The {nKey1, pKey1} key must be a blob
+** created by th OP_MakeRecord opcode of the VDBE. The pPKey2
+** key must be a parsed key such as obtained from
+** sqlite3VdbeParseRecord.
**
-** Return true if the result of comparison is equivalent to desiredResult.
-** Return false if there is a disagreement.
+** Key1 and Key2 do not have to contain the same number of fields.
+** The key with fewer fields is usually compares less than the
+** longer key. However if the UNPACKED_INCRKEY flags in pPKey2 is set
+** and the common prefixes are equal, then key1 is less than key2.
+** Or if the UNPACKED_MATCH_PREFIX flag is set and the prefixes are
+** equal, then the keys are considered to be equal and
+** the parts beyond the common prefix are ignored.
*/
-static int vdbeRecordCompareDebug(
+SQLITE_PRIVATE int sqlite3VdbeRecordCompare(
int nKey1, const void *pKey1, /* Left key */
- const UnpackedRecord *pPKey2, /* Right key */
- int desiredResult /* Correct answer */
+ UnpackedRecord *pPKey2 /* Right key */
){
u32 d1; /* Offset into aKey[] of next data element */
u32 idx1; /* Offset into aKey[] of next header element */
@@ -70027,11 +64113,10 @@ static int vdbeRecordCompareDebug(
Mem mem1;
pKeyInfo = pPKey2->pKeyInfo;
- if( pKeyInfo->db==0 ) return 1;
mem1.enc = pKeyInfo->enc;
mem1.db = pKeyInfo->db;
/* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */
- VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
+ VVA_ONLY( mem1.zMalloc = 0; ) /* Only needed by assert() statements */
/* Compilers may complain that mem1.u.i is potentially uninitialized.
** We could initialize it, as shown here, to silence those complaints.
@@ -70043,7 +64128,6 @@ static int vdbeRecordCompareDebug(
/* mem1.u.i = 0; // not needed, here to silence compiler warning */
idx1 = getVarint32(aKey1, szHdr1);
- if( szHdr1>98307 ) return SQLITE_CORRUPT;
d1 = szHdr1;
assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB );
assert( pKeyInfo->aSortOrder!=0 );
@@ -70075,11 +64159,11 @@ static int vdbeRecordCompareDebug(
*/
rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i], pKeyInfo->aColl[i]);
if( rc!=0 ){
- assert( mem1.szMalloc==0 ); /* See comment below */
+ assert( mem1.zMalloc==0 ); /* See comment below */
if( pKeyInfo->aSortOrder[i] ){
rc = -rc; /* Invert the result for DESC sort order. */
}
- goto debugCompareEnd;
+ return rc;
}
i++;
}while( idx1<szHdr1 && i<pPKey2->nField );
@@ -70088,650 +64172,27 @@ static int vdbeRecordCompareDebug(
** the following assert(). If the assert() fails, it indicates a
** memory leak and a need to call sqlite3VdbeMemRelease(&mem1).
*/
- assert( mem1.szMalloc==0 );
+ assert( mem1.zMalloc==0 );
/* rc==0 here means that one of the keys ran out of fields and
- ** all the fields up to that point were equal. Return the default_rc
- ** value. */
- rc = pPKey2->default_rc;
-
-debugCompareEnd:
- if( desiredResult==0 && rc==0 ) return 1;
- if( desiredResult<0 && rc<0 ) return 1;
- if( desiredResult>0 && rc>0 ) return 1;
- if( CORRUPT_DB ) return 1;
- if( pKeyInfo->db->mallocFailed ) return 1;
- return 0;
-}
-#endif
-
-#if SQLITE_DEBUG
-/*
-** Count the number of fields (a.k.a. columns) in the record given by
-** pKey,nKey. The verify that this count is less than or equal to the
-** limit given by pKeyInfo->nField + pKeyInfo->nXField.
-**
-** If this constraint is not satisfied, it means that the high-speed
-** vdbeRecordCompareInt() and vdbeRecordCompareString() routines will
-** not work correctly. If this assert() ever fires, it probably means
-** that the KeyInfo.nField or KeyInfo.nXField values were computed
-** incorrectly.
-*/
-static void vdbeAssertFieldCountWithinLimits(
- int nKey, const void *pKey, /* The record to verify */
- const KeyInfo *pKeyInfo /* Compare size with this KeyInfo */
-){
- int nField = 0;
- u32 szHdr;
- u32 idx;
- u32 notUsed;
- const unsigned char *aKey = (const unsigned char*)pKey;
-
- if( CORRUPT_DB ) return;
- idx = getVarint32(aKey, szHdr);
- assert( nKey>=0 );
- assert( szHdr<=(u32)nKey );
- while( idx<szHdr ){
- idx += getVarint32(aKey+idx, notUsed);
- nField++;
- }
- assert( nField <= pKeyInfo->nField+pKeyInfo->nXField );
-}
-#else
-# define vdbeAssertFieldCountWithinLimits(A,B,C)
-#endif
-
-/*
-** Both *pMem1 and *pMem2 contain string values. Compare the two values
-** using the collation sequence pColl. As usual, return a negative , zero
-** or positive value if *pMem1 is less than, equal to or greater than
-** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);".
-*/
-static int vdbeCompareMemString(
- const Mem *pMem1,
- const Mem *pMem2,
- const CollSeq *pColl,
- u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */
-){
- if( pMem1->enc==pColl->enc ){
- /* The strings are already in the correct encoding. Call the
- ** comparison function directly */
- return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z);
- }else{
- int rc;
- const void *v1, *v2;
- int n1, n2;
- Mem c1;
- Mem c2;
- sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null);
- sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null);
- sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem);
- sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem);
- v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc);
- n1 = v1==0 ? 0 : c1.n;
- v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc);
- n2 = v2==0 ? 0 : c2.n;
- rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2);
- sqlite3VdbeMemRelease(&c1);
- sqlite3VdbeMemRelease(&c2);
- if( (v1==0 || v2==0) && prcErr ) *prcErr = SQLITE_NOMEM;
- return rc;
+ ** all the fields up to that point were equal. If the UNPACKED_INCRKEY
+ ** flag is set, then break the tie by treating key2 as larger.
+ ** If the UPACKED_PREFIX_MATCH flag is set, then keys with common prefixes
+ ** are considered to be equal. Otherwise, the longer key is the
+ ** larger. As it happens, the pPKey2 will always be the longer
+ ** if there is a difference.
+ */
+ assert( rc==0 );
+ if( pPKey2->flags & UNPACKED_INCRKEY ){
+ rc = -1;
+ }else if( pPKey2->flags & UNPACKED_PREFIX_MATCH ){
+ /* Leave rc==0 */
+ }else if( idx1<szHdr1 ){
+ rc = 1;
}
+ return rc;
}
-
-/*
-** Compare two blobs. Return negative, zero, or positive if the first
-** is less than, equal to, or greater than the second, respectively.
-** If one blob is a prefix of the other, then the shorter is the lessor.
-*/
-static SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){
- int c = memcmp(pB1->z, pB2->z, pB1->n>pB2->n ? pB2->n : pB1->n);
- if( c ) return c;
- return pB1->n - pB2->n;
-}
-
-
-/*
-** Compare the values contained by the two memory cells, returning
-** negative, zero or positive if pMem1 is less than, equal to, or greater
-** than pMem2. Sorting order is NULL's first, followed by numbers (integers
-** and reals) sorted numerically, followed by text ordered by the collating
-** sequence pColl and finally blob's ordered by memcmp().
-**
-** Two NULL values are considered equal by this function.
-*/
-SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){
- int f1, f2;
- int combined_flags;
-
- f1 = pMem1->flags;
- f2 = pMem2->flags;
- combined_flags = f1|f2;
- assert( (combined_flags & MEM_RowSet)==0 );
- /* If one value is NULL, it is less than the other. If both values
- ** are NULL, return 0.
- */
- if( combined_flags&MEM_Null ){
- return (f2&MEM_Null) - (f1&MEM_Null);
- }
-
- /* If one value is a number and the other is not, the number is less.
- ** If both are numbers, compare as reals if one is a real, or as integers
- ** if both values are integers.
- */
- if( combined_flags&(MEM_Int|MEM_Real) ){
- double r1, r2;
- if( (f1 & f2 & MEM_Int)!=0 ){
- if( pMem1->u.i < pMem2->u.i ) return -1;
- if( pMem1->u.i > pMem2->u.i ) return 1;
- return 0;
- }
- if( (f1&MEM_Real)!=0 ){
- r1 = pMem1->u.r;
- }else if( (f1&MEM_Int)!=0 ){
- r1 = (double)pMem1->u.i;
- }else{
- return 1;
- }
- if( (f2&MEM_Real)!=0 ){
- r2 = pMem2->u.r;
- }else if( (f2&MEM_Int)!=0 ){
- r2 = (double)pMem2->u.i;
- }else{
- return -1;
- }
- if( r1<r2 ) return -1;
- if( r1>r2 ) return 1;
- return 0;
- }
-
- /* If one value is a string and the other is a blob, the string is less.
- ** If both are strings, compare using the collating functions.
- */
- if( combined_flags&MEM_Str ){
- if( (f1 & MEM_Str)==0 ){
- return 1;
- }
- if( (f2 & MEM_Str)==0 ){
- return -1;
- }
-
- assert( pMem1->enc==pMem2->enc );
- assert( pMem1->enc==SQLITE_UTF8 ||
- pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE );
-
- /* The collation sequence must be defined at this point, even if
- ** the user deletes the collation sequence after the vdbe program is
- ** compiled (this was not always the case).
- */
- assert( !pColl || pColl->xCmp );
-
- if( pColl ){
- return vdbeCompareMemString(pMem1, pMem2, pColl, 0);
- }
- /* If a NULL pointer was passed as the collate function, fall through
- ** to the blob case and use memcmp(). */
- }
-
- /* Both values must be blobs. Compare using memcmp(). */
- return sqlite3BlobCompare(pMem1, pMem2);
-}
-
-
-/*
-** The first argument passed to this function is a serial-type that
-** corresponds to an integer - all values between 1 and 9 inclusive
-** except 7. The second points to a buffer containing an integer value
-** serialized according to serial_type. This function deserializes
-** and returns the value.
-*/
-static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){
- u32 y;
- assert( CORRUPT_DB || (serial_type>=1 && serial_type<=9 && serial_type!=7) );
- switch( serial_type ){
- case 0:
- case 1:
- testcase( aKey[0]&0x80 );
- return ONE_BYTE_INT(aKey);
- case 2:
- testcase( aKey[0]&0x80 );
- return TWO_BYTE_INT(aKey);
- case 3:
- testcase( aKey[0]&0x80 );
- return THREE_BYTE_INT(aKey);
- case 4: {
- testcase( aKey[0]&0x80 );
- y = FOUR_BYTE_UINT(aKey);
- return (i64)*(int*)&y;
- }
- case 5: {
- testcase( aKey[0]&0x80 );
- return FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey);
- }
- case 6: {
- u64 x = FOUR_BYTE_UINT(aKey);
- testcase( aKey[0]&0x80 );
- x = (x<<32) | FOUR_BYTE_UINT(aKey+4);
- return (i64)*(i64*)&x;
- }
- }
-
- return (serial_type - 8);
-}
-
-/*
-** This function compares the two table rows or index records
-** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero
-** or positive integer if key1 is less than, equal to or
-** greater than key2. The {nKey1, pKey1} key must be a blob
-** created by the OP_MakeRecord opcode of the VDBE. The pPKey2
-** key must be a parsed key such as obtained from
-** sqlite3VdbeParseRecord.
-**
-** If argument bSkip is non-zero, it is assumed that the caller has already
-** determined that the first fields of the keys are equal.
-**
-** Key1 and Key2 do not have to contain the same number of fields. If all
-** fields that appear in both keys are equal, then pPKey2->default_rc is
-** returned.
-**
-** If database corruption is discovered, set pPKey2->errCode to
-** SQLITE_CORRUPT and return 0. If an OOM error is encountered,
-** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the
-** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db).
-*/
-SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
- int nKey1, const void *pKey1, /* Left key */
- UnpackedRecord *pPKey2, /* Right key */
- int bSkip /* If true, skip the first field */
-){
- u32 d1; /* Offset into aKey[] of next data element */
- int i; /* Index of next field to compare */
- u32 szHdr1; /* Size of record header in bytes */
- u32 idx1; /* Offset of first type in header */
- int rc = 0; /* Return value */
- Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */
- KeyInfo *pKeyInfo = pPKey2->pKeyInfo;
- const unsigned char *aKey1 = (const unsigned char *)pKey1;
- Mem mem1;
-
- /* If bSkip is true, then the caller has already determined that the first
- ** two elements in the keys are equal. Fix the various stack variables so
- ** that this routine begins comparing at the second field. */
- if( bSkip ){
- u32 s1;
- idx1 = 1 + getVarint32(&aKey1[1], s1);
- szHdr1 = aKey1[0];
- d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1);
- i = 1;
- pRhs++;
- }else{
- idx1 = getVarint32(aKey1, szHdr1);
- d1 = szHdr1;
- if( d1>(unsigned)nKey1 ){
- pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
- return 0; /* Corruption */
- }
- i = 0;
- }
-
- VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
- assert( pPKey2->pKeyInfo->nField+pPKey2->pKeyInfo->nXField>=pPKey2->nField
- || CORRUPT_DB );
- assert( pPKey2->pKeyInfo->aSortOrder!=0 );
- assert( pPKey2->pKeyInfo->nField>0 );
- assert( idx1<=szHdr1 || CORRUPT_DB );
- do{
- u32 serial_type;
-
- /* RHS is an integer */
- if( pRhs->flags & MEM_Int ){
- serial_type = aKey1[idx1];
- testcase( serial_type==12 );
- if( serial_type>=10 ){
- rc = +1;
- }else if( serial_type==0 ){
- rc = -1;
- }else if( serial_type==7 ){
- double rhs = (double)pRhs->u.i;
- sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
- if( mem1.u.r<rhs ){
- rc = -1;
- }else if( mem1.u.r>rhs ){
- rc = +1;
- }
- }else{
- i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]);
- i64 rhs = pRhs->u.i;
- if( lhs<rhs ){
- rc = -1;
- }else if( lhs>rhs ){
- rc = +1;
- }
- }
- }
-
- /* RHS is real */
- else if( pRhs->flags & MEM_Real ){
- serial_type = aKey1[idx1];
- if( serial_type>=10 ){
- /* Serial types 12 or greater are strings and blobs (greater than
- ** numbers). Types 10 and 11 are currently "reserved for future
- ** use", so it doesn't really matter what the results of comparing
- ** them to numberic values are. */
- rc = +1;
- }else if( serial_type==0 ){
- rc = -1;
- }else{
- double rhs = pRhs->u.r;
- double lhs;
- sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
- if( serial_type==7 ){
- lhs = mem1.u.r;
- }else{
- lhs = (double)mem1.u.i;
- }
- if( lhs<rhs ){
- rc = -1;
- }else if( lhs>rhs ){
- rc = +1;
- }
- }
- }
-
- /* RHS is a string */
- else if( pRhs->flags & MEM_Str ){
- getVarint32(&aKey1[idx1], serial_type);
- testcase( serial_type==12 );
- if( serial_type<12 ){
- rc = -1;
- }else if( !(serial_type & 0x01) ){
- rc = +1;
- }else{
- mem1.n = (serial_type - 12) / 2;
- testcase( (d1+mem1.n)==(unsigned)nKey1 );
- testcase( (d1+mem1.n+1)==(unsigned)nKey1 );
- if( (d1+mem1.n) > (unsigned)nKey1 ){
- pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
- return 0; /* Corruption */
- }else if( pKeyInfo->aColl[i] ){
- mem1.enc = pKeyInfo->enc;
- mem1.db = pKeyInfo->db;
- mem1.flags = MEM_Str;
- mem1.z = (char*)&aKey1[d1];
- rc = vdbeCompareMemString(
- &mem1, pRhs, pKeyInfo->aColl[i], &pPKey2->errCode
- );
- }else{
- int nCmp = MIN(mem1.n, pRhs->n);
- rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
- if( rc==0 ) rc = mem1.n - pRhs->n;
- }
- }
- }
-
- /* RHS is a blob */
- else if( pRhs->flags & MEM_Blob ){
- getVarint32(&aKey1[idx1], serial_type);
- testcase( serial_type==12 );
- if( serial_type<12 || (serial_type & 0x01) ){
- rc = -1;
- }else{
- int nStr = (serial_type - 12) / 2;
- testcase( (d1+nStr)==(unsigned)nKey1 );
- testcase( (d1+nStr+1)==(unsigned)nKey1 );
- if( (d1+nStr) > (unsigned)nKey1 ){
- pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
- return 0; /* Corruption */
- }else{
- int nCmp = MIN(nStr, pRhs->n);
- rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
- if( rc==0 ) rc = nStr - pRhs->n;
- }
- }
- }
-
- /* RHS is null */
- else{
- serial_type = aKey1[idx1];
- rc = (serial_type!=0);
- }
-
- if( rc!=0 ){
- if( pKeyInfo->aSortOrder[i] ){
- rc = -rc;
- }
- assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) );
- assert( mem1.szMalloc==0 ); /* See comment below */
- return rc;
- }
-
- i++;
- pRhs++;
- d1 += sqlite3VdbeSerialTypeLen(serial_type);
- idx1 += sqlite3VarintLen(serial_type);
- }while( idx1<(unsigned)szHdr1 && i<pPKey2->nField && d1<=(unsigned)nKey1 );
-
- /* No memory allocation is ever used on mem1. Prove this using
- ** the following assert(). If the assert() fails, it indicates a
- ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */
- assert( mem1.szMalloc==0 );
-
- /* rc==0 here means that one or both of the keys ran out of fields and
- ** all the fields up to that point were equal. Return the default_rc
- ** value. */
- assert( CORRUPT_DB
- || vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc)
- || pKeyInfo->db->mallocFailed
- );
- return pPKey2->default_rc;
-}
-SQLITE_PRIVATE int sqlite3VdbeRecordCompare(
- int nKey1, const void *pKey1, /* Left key */
- UnpackedRecord *pPKey2 /* Right key */
-){
- return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 0);
-}
-
-
-/*
-** This function is an optimized version of sqlite3VdbeRecordCompare()
-** that (a) the first field of pPKey2 is an integer, and (b) the
-** size-of-header varint at the start of (pKey1/nKey1) fits in a single
-** byte (i.e. is less than 128).
-**
-** To avoid concerns about buffer overreads, this routine is only used
-** on schemas where the maximum valid header size is 63 bytes or less.
-*/
-static int vdbeRecordCompareInt(
- int nKey1, const void *pKey1, /* Left key */
- UnpackedRecord *pPKey2 /* Right key */
-){
- const u8 *aKey = &((const u8*)pKey1)[*(const u8*)pKey1 & 0x3F];
- int serial_type = ((const u8*)pKey1)[1];
- int res;
- u32 y;
- u64 x;
- i64 v = pPKey2->aMem[0].u.i;
- i64 lhs;
-
- vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
- assert( (*(u8*)pKey1)<=0x3F || CORRUPT_DB );
- switch( serial_type ){
- case 1: { /* 1-byte signed integer */
- lhs = ONE_BYTE_INT(aKey);
- testcase( lhs<0 );
- break;
- }
- case 2: { /* 2-byte signed integer */
- lhs = TWO_BYTE_INT(aKey);
- testcase( lhs<0 );
- break;
- }
- case 3: { /* 3-byte signed integer */
- lhs = THREE_BYTE_INT(aKey);
- testcase( lhs<0 );
- break;
- }
- case 4: { /* 4-byte signed integer */
- y = FOUR_BYTE_UINT(aKey);
- lhs = (i64)*(int*)&y;
- testcase( lhs<0 );
- break;
- }
- case 5: { /* 6-byte signed integer */
- lhs = FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey);
- testcase( lhs<0 );
- break;
- }
- case 6: { /* 8-byte signed integer */
- x = FOUR_BYTE_UINT(aKey);
- x = (x<<32) | FOUR_BYTE_UINT(aKey+4);
- lhs = *(i64*)&x;
- testcase( lhs<0 );
- break;
- }
- case 8:
- lhs = 0;
- break;
- case 9:
- lhs = 1;
- break;
-
- /* This case could be removed without changing the results of running
- ** this code. Including it causes gcc to generate a faster switch
- ** statement (since the range of switch targets now starts at zero and
- ** is contiguous) but does not cause any duplicate code to be generated
- ** (as gcc is clever enough to combine the two like cases). Other
- ** compilers might be similar. */
- case 0: case 7:
- return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
-
- default:
- return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
- }
-
- if( v>lhs ){
- res = pPKey2->r1;
- }else if( v<lhs ){
- res = pPKey2->r2;
- }else if( pPKey2->nField>1 ){
- /* The first fields of the two keys are equal. Compare the trailing
- ** fields. */
- res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
- }else{
- /* The first fields of the two keys are equal and there are no trailing
- ** fields. Return pPKey2->default_rc in this case. */
- res = pPKey2->default_rc;
- }
-
- assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) );
- return res;
-}
-
-/*
-** This function is an optimized version of sqlite3VdbeRecordCompare()
-** that (a) the first field of pPKey2 is a string, that (b) the first field
-** uses the collation sequence BINARY and (c) that the size-of-header varint
-** at the start of (pKey1/nKey1) fits in a single byte.
-*/
-static int vdbeRecordCompareString(
- int nKey1, const void *pKey1, /* Left key */
- UnpackedRecord *pPKey2 /* Right key */
-){
- const u8 *aKey1 = (const u8*)pKey1;
- int serial_type;
- int res;
-
- vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
- getVarint32(&aKey1[1], serial_type);
- if( serial_type<12 ){
- res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */
- }else if( !(serial_type & 0x01) ){
- res = pPKey2->r2; /* (pKey1/nKey1) is a blob */
- }else{
- int nCmp;
- int nStr;
- int szHdr = aKey1[0];
-
- nStr = (serial_type-12) / 2;
- if( (szHdr + nStr) > nKey1 ){
- pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
- return 0; /* Corruption */
- }
- nCmp = MIN( pPKey2->aMem[0].n, nStr );
- res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp);
-
- if( res==0 ){
- res = nStr - pPKey2->aMem[0].n;
- if( res==0 ){
- if( pPKey2->nField>1 ){
- res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
- }else{
- res = pPKey2->default_rc;
- }
- }else if( res>0 ){
- res = pPKey2->r2;
- }else{
- res = pPKey2->r1;
- }
- }else if( res>0 ){
- res = pPKey2->r2;
- }else{
- res = pPKey2->r1;
- }
- }
-
- assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res)
- || CORRUPT_DB
- || pPKey2->pKeyInfo->db->mallocFailed
- );
- return res;
-}
-
-/*
-** Return a pointer to an sqlite3VdbeRecordCompare() compatible function
-** suitable for comparing serialized records to the unpacked record passed
-** as the only argument.
-*/
-SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){
- /* varintRecordCompareInt() and varintRecordCompareString() both assume
- ** that the size-of-header varint that occurs at the start of each record
- ** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt()
- ** also assumes that it is safe to overread a buffer by at least the
- ** maximum possible legal header size plus 8 bytes. Because there is
- ** guaranteed to be at least 74 (but not 136) bytes of padding following each
- ** buffer passed to varintRecordCompareInt() this makes it convenient to
- ** limit the size of the header to 64 bytes in cases where the first field
- ** is an integer.
- **
- ** The easiest way to enforce this limit is to consider only records with
- ** 13 fields or less. If the first field is an integer, the maximum legal
- ** header size is (12*5 + 1 + 1) bytes. */
- if( (p->pKeyInfo->nField + p->pKeyInfo->nXField)<=13 ){
- int flags = p->aMem[0].flags;
- if( p->pKeyInfo->aSortOrder[0] ){
- p->r1 = 1;
- p->r2 = -1;
- }else{
- p->r1 = -1;
- p->r2 = 1;
- }
- if( (flags & MEM_Int) ){
- return vdbeRecordCompareInt;
- }
- testcase( flags & MEM_Real );
- testcase( flags & MEM_Null );
- testcase( flags & MEM_Blob );
- if( (flags & (MEM_Real|MEM_Null|MEM_Blob))==0 && p->pKeyInfo->aColl[0]==0 ){
- assert( flags & MEM_Str );
- return vdbeRecordCompareString;
- }
- }
-
- return sqlite3VdbeRecordCompare;
-}
/*
** pCur points at an index entry created using the OP_MakeRecord opcode.
@@ -70749,6 +64210,8 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
u32 lenRowid; /* Size of the rowid */
Mem m, v;
+ UNUSED_PARAMETER(db);
+
/* Get the size of the index entry. Only indices entries of less
** than 2GiB are support - anything large must be database corruption.
** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so
@@ -70760,7 +64223,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey );
/* Read in the complete content of the index entry */
- sqlite3VdbeMemInit(&m, db, 0);
+ memset(&m, 0, sizeof(m));
rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m);
if( rc ){
return rc;
@@ -70788,7 +64251,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){
goto idx_rowid_corruption;
}
- lenRowid = sqlite3SmallTypeSizes[typeRowid];
+ lenRowid = sqlite3VdbeSerialTypeLen(typeRowid);
testcase( (u32)m.n==szHdr+lenRowid );
if( unlikely((u32)m.n<szHdr+lenRowid) ){
goto idx_rowid_corruption;
@@ -70803,7 +64266,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
/* Jump here if database corruption is detected after m has been
** allocated. Free the m object and return SQLITE_CORRUPT. */
idx_rowid_corruption:
- testcase( m.szMalloc!=0 );
+ testcase( m.zMalloc!=0 );
sqlite3VdbeMemRelease(&m);
return SQLITE_CORRUPT_BKPT;
}
@@ -70820,10 +64283,9 @@ idx_rowid_corruption:
** of the keys prior to the final rowid, not the entire key.
*/
SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
- sqlite3 *db, /* Database connection */
- VdbeCursor *pC, /* The cursor to compare against */
- UnpackedRecord *pUnpacked, /* Unpacked version of key */
- int *res /* Write the comparison result here */
+ VdbeCursor *pC, /* The cursor to compare against */
+ UnpackedRecord *pUnpacked, /* Unpacked version of key to compare against */
+ int *res /* Write the comparison result here */
){
i64 nCellKey = 0;
int rc;
@@ -70833,17 +64295,18 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
assert( sqlite3BtreeCursorIsValid(pCur) );
VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey);
assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */
- /* nCellKey will always be between 0 and 0xffffffff because of the way
+ /* nCellKey will always be between 0 and 0xffffffff because of the say
** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */
if( nCellKey<=0 || nCellKey>0x7fffffff ){
*res = 0;
return SQLITE_CORRUPT_BKPT;
}
- sqlite3VdbeMemInit(&m, db, 0);
+ memset(&m, 0, sizeof(m));
rc = sqlite3VdbeMemFromBtree(pC->pCursor, 0, (u32)nCellKey, 1, &m);
if( rc ){
return rc;
}
+ assert( pUnpacked->flags & UNPACKED_PREFIX_MATCH );
*res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked);
sqlite3VdbeMemRelease(&m);
return SQLITE_OK;
@@ -70908,6 +64371,7 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff
if( pRet ){
sqlite3VdbeMemCopy((Mem *)pRet, pMem);
sqlite3ValueApplyAffinity(pRet, aff, SQLITE_UTF8);
+ sqlite3VdbeMemStoreType((Mem *)pRet);
}
return pRet;
}
@@ -70961,8 +64425,6 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){
** This file contains code use to implement APIs that are part of the
** VDBE.
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
#ifndef SQLITE_OMIT_DEPRECATED
/*
@@ -70973,7 +64435,7 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){
** collating sequences are registered or if an authorizer function is
** added or changed.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_expired(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
return p==0 || p->expired;
}
@@ -71001,31 +64463,6 @@ static int vdbeSafetyNotNull(Vdbe *p){
}
}
-#ifndef SQLITE_OMIT_TRACE
-/*
-** Invoke the profile callback. This routine is only called if we already
-** know that the profile callback is defined and needs to be invoked.
-*/
-static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){
- sqlite3_int64 iNow;
- assert( p->startTime>0 );
- assert( db->xProfile!=0 );
- assert( db->init.busy==0 );
- assert( p->zSql!=0 );
- sqlite3OsCurrentTimeInt64(db->pVfs, &iNow);
- db->xProfile(db->pProfileArg, p->zSql, (iNow - p->startTime)*1000000);
- p->startTime = 0;
-}
-/*
-** The checkProfileCallback(DB,P) macro checks to see if a profile callback
-** is needed, and it invokes the callback if it is needed.
-*/
-# define checkProfileCallback(DB,P) \
- if( ((P)->startTime)>0 ){ invokeProfileCallback(DB,P); }
-#else
-# define checkProfileCallback(DB,P) /*no-op*/
-#endif
-
/*
** The following routine destroys a virtual machine that is created by
** the sqlite3_compile() routine. The integer returned is an SQLITE_
@@ -71035,7 +64472,7 @@ static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){
** This routine sets the error code and string returned by
** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){
int rc;
if( pStmt==0 ){
/* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL
@@ -71046,7 +64483,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt){
sqlite3 *db = v->db;
if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT;
sqlite3_mutex_enter(db->mutex);
- checkProfileCallback(db, v);
rc = sqlite3VdbeFinalize(v);
rc = sqlite3ApiExit(db, rc);
sqlite3LeaveMutexAndCloseZombie(db);
@@ -71062,20 +64498,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt){
** This routine sets the error code and string returned by
** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt){
int rc;
if( pStmt==0 ){
rc = SQLITE_OK;
}else{
Vdbe *v = (Vdbe*)pStmt;
- sqlite3 *db = v->db;
- sqlite3_mutex_enter(db->mutex);
- checkProfileCallback(db, v);
+ sqlite3_mutex_enter(v->db->mutex);
rc = sqlite3VdbeReset(v);
sqlite3VdbeRewind(v);
- assert( (rc & (db->errMask))==rc );
- rc = sqlite3ApiExit(db, rc);
- sqlite3_mutex_leave(db->mutex);
+ assert( (rc & (v->db->errMask))==rc );
+ rc = sqlite3ApiExit(v->db, rc);
+ sqlite3_mutex_leave(v->db->mutex);
}
return rc;
}
@@ -71083,7 +64517,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt){
/*
** Set all the parameters in the compiled SQL statement to NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
int i;
int rc = SQLITE_OK;
Vdbe *p = (Vdbe*)pStmt;
@@ -71107,133 +64541,57 @@ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt *pStmt){
** The following routines extract information from a Mem or sqlite3_value
** structure.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value *pVal){
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value *pVal){
Mem *p = (Mem*)pVal;
if( p->flags & (MEM_Blob|MEM_Str) ){
- if( sqlite3VdbeMemExpandBlob(p)!=SQLITE_OK ){
- assert( p->flags==MEM_Null && p->z==0 );
- return 0;
- }
+ sqlite3VdbeMemExpandBlob(p);
+ p->flags &= ~MEM_Str;
p->flags |= MEM_Blob;
return p->n ? p->z : 0;
}else{
return sqlite3_value_text(pVal);
}
}
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_bytes(sqlite3_value *pVal){
return sqlite3ValueBytes(pVal, SQLITE_UTF8);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value *pVal){
return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE);
}
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value *pVal){
+SQLITE_API double sqlite3_value_double(sqlite3_value *pVal){
return sqlite3VdbeRealValue((Mem*)pVal);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_int(sqlite3_value *pVal){
return (int)sqlite3VdbeIntValue((Mem*)pVal);
}
-SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value *pVal){
+SQLITE_API sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){
return sqlite3VdbeIntValue((Mem*)pVal);
}
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value *pVal){
- return ((Mem*)pVal)->eSubtype;
-}
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value *pVal){
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value *pVal){
return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value* pVal){
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value* pVal){
return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE);
}
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value *pVal){
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value *pVal){
return sqlite3ValueText(pVal, SQLITE_UTF16BE);
}
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value *pVal){
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value *pVal){
return sqlite3ValueText(pVal, SQLITE_UTF16LE);
}
#endif /* SQLITE_OMIT_UTF16 */
-/* EVIDENCE-OF: R-12793-43283 Every value in SQLite has one of five
-** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating
-** point number string BLOB NULL
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value* pVal){
- static const u8 aType[] = {
- SQLITE_BLOB, /* 0x00 */
- SQLITE_NULL, /* 0x01 */
- SQLITE_TEXT, /* 0x02 */
- SQLITE_NULL, /* 0x03 */
- SQLITE_INTEGER, /* 0x04 */
- SQLITE_NULL, /* 0x05 */
- SQLITE_INTEGER, /* 0x06 */
- SQLITE_NULL, /* 0x07 */
- SQLITE_FLOAT, /* 0x08 */
- SQLITE_NULL, /* 0x09 */
- SQLITE_FLOAT, /* 0x0a */
- SQLITE_NULL, /* 0x0b */
- SQLITE_INTEGER, /* 0x0c */
- SQLITE_NULL, /* 0x0d */
- SQLITE_INTEGER, /* 0x0e */
- SQLITE_NULL, /* 0x0f */
- SQLITE_BLOB, /* 0x10 */
- SQLITE_NULL, /* 0x11 */
- SQLITE_TEXT, /* 0x12 */
- SQLITE_NULL, /* 0x13 */
- SQLITE_INTEGER, /* 0x14 */
- SQLITE_NULL, /* 0x15 */
- SQLITE_INTEGER, /* 0x16 */
- SQLITE_NULL, /* 0x17 */
- SQLITE_FLOAT, /* 0x18 */
- SQLITE_NULL, /* 0x19 */
- SQLITE_FLOAT, /* 0x1a */
- SQLITE_NULL, /* 0x1b */
- SQLITE_INTEGER, /* 0x1c */
- SQLITE_NULL, /* 0x1d */
- SQLITE_INTEGER, /* 0x1e */
- SQLITE_NULL, /* 0x1f */
- };
- return aType[pVal->flags&MEM_AffMask];
-}
-
-/* Make a copy of an sqlite3_value object
-*/
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value *pOrig){
- sqlite3_value *pNew;
- if( pOrig==0 ) return 0;
- pNew = sqlite3_malloc( sizeof(*pNew) );
- if( pNew==0 ) return 0;
- memset(pNew, 0, sizeof(*pNew));
- memcpy(pNew, pOrig, MEMCELLSIZE);
- pNew->flags &= ~MEM_Dyn;
- pNew->db = 0;
- if( pNew->flags&(MEM_Str|MEM_Blob) ){
- pNew->flags &= ~(MEM_Static|MEM_Dyn);
- pNew->flags |= MEM_Ephem;
- if( sqlite3VdbeMemMakeWriteable(pNew)!=SQLITE_OK ){
- sqlite3ValueFree(pNew);
- pNew = 0;
- }
- }
- return pNew;
+SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){
+ return pVal->type;
}
-/* Destroy an sqlite3_value object previously obtained from
-** sqlite3_value_dup().
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value *pOld){
- sqlite3ValueFree(pOld);
-}
-
-
/**************************** sqlite3_result_ *******************************
** The following routines are used by user-defined functions to specify
** the function result.
**
-** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the
+** The setStrOrError() funtion calls sqlite3VdbeMemSetStr() to store the
** result as a string or blob but if the string or blob is too large, it
** then sets the error code to SQLITE_TOOBIG
-**
-** The invokeValueDestructor(P,X) routine invokes destructor function X()
-** on value P is not going to be used and need to be destroyed.
*/
static void setResultStrOrError(
sqlite3_context *pCtx, /* Function context */
@@ -71242,183 +64600,121 @@ static void setResultStrOrError(
u8 enc, /* Encoding of z. 0 for BLOBs */
void (*xDel)(void*) /* Destructor function */
){
- if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){
+ if( sqlite3VdbeMemSetStr(&pCtx->s, z, n, enc, xDel)==SQLITE_TOOBIG ){
sqlite3_result_error_toobig(pCtx);
}
}
-static int invokeValueDestructor(
- const void *p, /* Value to destroy */
- void (*xDel)(void*), /* The destructor */
- sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */
-){
- assert( xDel!=SQLITE_DYNAMIC );
- if( xDel==0 ){
- /* noop */
- }else if( xDel==SQLITE_TRANSIENT ){
- /* noop */
- }else{
- xDel((void*)p);
- }
- if( pCtx ) sqlite3_result_error_toobig(pCtx);
- return SQLITE_TOOBIG;
-}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(
+SQLITE_API void sqlite3_result_blob(
sqlite3_context *pCtx,
const void *z,
int n,
void (*xDel)(void *)
){
assert( n>=0 );
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
setResultStrOrError(pCtx, z, n, 0, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(
- sqlite3_context *pCtx,
- const void *z,
- sqlite3_uint64 n,
- void (*xDel)(void *)
-){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- assert( xDel!=SQLITE_DYNAMIC );
- if( n>0x7fffffff ){
- (void)invokeValueDestructor(z, xDel, pCtx);
- }else{
- setResultStrOrError(pCtx, z, (int)n, 0, xDel);
- }
-}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context *pCtx, double rVal){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetDouble(pCtx->pOut, rVal);
+SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemSetDouble(&pCtx->s, rVal);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
pCtx->isError = SQLITE_ERROR;
pCtx->fErrorOrAux = 1;
- sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
+ sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
pCtx->isError = SQLITE_ERROR;
pCtx->fErrorOrAux = 1;
- sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
+ sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
}
#endif
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context *pCtx, int iVal){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal);
-}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetInt64(pCtx->pOut, iVal);
+SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemSetInt64(&pCtx->s, (i64)iVal);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context *pCtx){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetNull(pCtx->pOut);
+SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemSetInt64(&pCtx->s, iVal);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- pCtx->pOut->eSubtype = eSubtype & 0xff;
+SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemSetNull(&pCtx->s);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(
+SQLITE_API void sqlite3_result_text(
sqlite3_context *pCtx,
const char *z,
int n,
void (*xDel)(void *)
){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(
- sqlite3_context *pCtx,
- const char *z,
- sqlite3_uint64 n,
- void (*xDel)(void *),
- unsigned char enc
-){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- assert( xDel!=SQLITE_DYNAMIC );
- if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
- if( n>0x7fffffff ){
- (void)invokeValueDestructor(z, xDel, pCtx);
- }else{
- setResultStrOrError(pCtx, z, (int)n, enc, xDel);
- }
-}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(
+SQLITE_API void sqlite3_result_text16(
sqlite3_context *pCtx,
const void *z,
int n,
void (*xDel)(void *)
){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(
+SQLITE_API void sqlite3_result_text16be(
sqlite3_context *pCtx,
const void *z,
int n,
void (*xDel)(void *)
){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(
+SQLITE_API void sqlite3_result_text16le(
sqlite3_context *pCtx,
const void *z,
int n,
void (*xDel)(void *)
){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel);
}
#endif /* SQLITE_OMIT_UTF16 */
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemCopy(pCtx->pOut, pValue);
-}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n);
-}
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
- Mem *pOut = pCtx->pOut;
- assert( sqlite3_mutex_held(pOut->db->mutex) );
- if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
- return SQLITE_TOOBIG;
- }
- sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n);
- return SQLITE_OK;
+SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemCopy(&pCtx->s, pValue);
+}
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemSetZeroBlob(&pCtx->s, n);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
+SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
pCtx->isError = errCode;
pCtx->fErrorOrAux = 1;
-#ifdef SQLITE_DEBUG
- if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;
-#endif
- if( pCtx->pOut->flags & MEM_Null ){
- sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1,
+ if( pCtx->s.flags & MEM_Null ){
+ sqlite3VdbeMemSetStr(&pCtx->s, sqlite3ErrStr(errCode), -1,
SQLITE_UTF8, SQLITE_STATIC);
}
}
/* Force an SQLITE_TOOBIG error. */
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context *pCtx){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
pCtx->isError = SQLITE_TOOBIG;
pCtx->fErrorOrAux = 1;
- sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1,
+ sqlite3VdbeMemSetStr(&pCtx->s, "string or blob too big", -1,
SQLITE_UTF8, SQLITE_STATIC);
}
/* An SQLITE_NOMEM error. */
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context *pCtx){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetNull(pCtx->pOut);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
+ sqlite3VdbeMemSetNull(&pCtx->s);
pCtx->isError = SQLITE_NOMEM;
pCtx->fErrorOrAux = 1;
- pCtx->pOut->db->mallocFailed = 1;
+ pCtx->s.db->mallocFailed = 1;
}
/*
@@ -71432,10 +64728,7 @@ static int doWalCallbacks(sqlite3 *db){
for(i=0; i<db->nDb; i++){
Btree *pBt = db->aDb[i].pBt;
if( pBt ){
- int nEntry;
- sqlite3BtreeEnter(pBt);
- nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt));
- sqlite3BtreeLeave(pBt);
+ int nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt));
if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){
rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zName, nEntry);
}
@@ -71445,7 +64738,6 @@ static int doWalCallbacks(sqlite3 *db){
return rc;
}
-
/*
** Execute the statement pStmt, either until a row of data is ready, the
** statement is completely executed or an error occurs.
@@ -71478,7 +64770,7 @@ static int sqlite3Step(Vdbe *p){
** or SQLITE_BUSY error.
*/
#ifdef SQLITE_OMIT_AUTORESET
- if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){
+ if( p->rc==SQLITE_BUSY || p->rc==SQLITE_LOCKED ){
sqlite3_reset((sqlite3_stmt*)p);
}else{
return SQLITE_MISUSE_BKPT;
@@ -71514,10 +64806,8 @@ static int sqlite3Step(Vdbe *p){
);
#ifndef SQLITE_OMIT_TRACE
- if( db->xProfile && !db->init.busy && p->zSql ){
+ if( db->xProfile && !db->init.busy ){
sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime);
- }else{
- assert( p->startTime==0 );
}
#endif
@@ -71526,9 +64816,6 @@ static int sqlite3Step(Vdbe *p){
if( p->bIsReader ) db->nVdbeRead++;
p->pc = 0;
}
-#ifdef SQLITE_DEBUG
- p->rcApp = SQLITE_OK;
-#endif
#ifndef SQLITE_OMIT_EXPLAIN
if( p->explain ){
rc = sqlite3VdbeList(p);
@@ -71541,8 +64828,13 @@ static int sqlite3Step(Vdbe *p){
}
#ifndef SQLITE_OMIT_TRACE
- /* If the statement completed successfully, invoke the profile callback */
- if( rc!=SQLITE_ROW ) checkProfileCallback(db, p);
+ /* Invoke the profile callback if there is one
+ */
+ if( rc!=SQLITE_ROW && db->xProfile && !db->init.busy && p->zSql ){
+ sqlite3_int64 iNow;
+ sqlite3OsCurrentTimeInt64(db->pVfs, &iNow);
+ db->xProfile(db->pProfileArg, p->zSql, (iNow - p->startTime)*1000000);
+ }
#endif
if( rc==SQLITE_DONE ){
@@ -71566,9 +64858,9 @@ end_of_step:
** were called on statement p.
*/
assert( rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR
- || (rc&0xff)==SQLITE_BUSY || rc==SQLITE_MISUSE
+ || rc==SQLITE_BUSY || rc==SQLITE_MISUSE
);
- assert( (p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE) || p->rc==p->rcApp );
+ assert( p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE );
if( p->isPrepareV2 && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){
/* If this statement was prepared using sqlite3_prepare_v2(), and an
** error has occurred, then return the error code in p->rc to the
@@ -71584,7 +64876,7 @@ end_of_step:
** sqlite3Step() to do most of the work. If a schema error occurs,
** call sqlite3Reprepare() and try again.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
int rc = SQLITE_OK; /* Result from sqlite3Step() */
int rc2 = SQLITE_OK; /* Result from sqlite3Reprepare() */
Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */
@@ -71598,15 +64890,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt *pStmt){
sqlite3_mutex_enter(db->mutex);
v->doingRerun = 0;
while( (rc = sqlite3Step(v))==SQLITE_SCHEMA
- && cnt++ < SQLITE_MAX_SCHEMA_RETRY ){
- int savedPc = v->pc;
- rc2 = rc = sqlite3Reprepare(v);
- if( rc!=SQLITE_OK) break;
+ && cnt++ < SQLITE_MAX_SCHEMA_RETRY
+ && (rc2 = rc = sqlite3Reprepare(v))==SQLITE_OK ){
sqlite3_reset(pStmt);
- if( savedPc>=0 ) v->doingRerun = 1;
+ v->doingRerun = 1;
assert( v->expired==0 );
}
- if( rc2!=SQLITE_OK ){
+ if( rc2!=SQLITE_OK && ALWAYS(v->isPrepareV2) && ALWAYS(db->pErr) ){
/* This case occurs after failing to recompile an sql statement.
** The error message from the SQL compiler has already been loaded
** into the database handle. This block copies the error message
@@ -71635,7 +64925,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt *pStmt){
** Extract the user data from a sqlite3_context structure and return a
** pointer to it.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context *p){
+SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
assert( p && p->pFunc );
return p->pFunc->pUserData;
}
@@ -71650,32 +64940,22 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context *p){
** sqlite3_create_function16() routines that originally registered the
** application defined function.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context *p){
- assert( p && p->pOut );
- return p->pOut->db;
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
+ assert( p && p->pFunc );
+ return p->s.db;
}
/*
-** Return the current time for a statement. If the current time
-** is requested more than once within the same run of a single prepared
-** statement, the exact same time is returned for each invocation regardless
-** of the amount of time that elapses between invocations. In other words,
-** the time returned is always the time of the first call.
+** Return the current time for a statement
*/
SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){
+ Vdbe *v = p->pVdbe;
int rc;
-#ifndef SQLITE_ENABLE_STAT3_OR_STAT4
- sqlite3_int64 *piTime = &p->pVdbe->iCurrentTime;
- assert( p->pVdbe!=0 );
-#else
- sqlite3_int64 iTime = 0;
- sqlite3_int64 *piTime = p->pVdbe!=0 ? &p->pVdbe->iCurrentTime : &iTime;
-#endif
- if( *piTime==0 ){
- rc = sqlite3OsCurrentTimeInt64(p->pOut->db->pVfs, piTime);
- if( rc ) *piTime = 0;
+ if( v->iCurrentTime==0 ){
+ rc = sqlite3OsCurrentTimeInt64(p->s.db->pVfs, &v->iCurrentTime);
+ if( rc ) v->iCurrentTime = 0;
}
- return *piTime;
+ return v->iCurrentTime;
}
/*
@@ -71701,55 +64981,41 @@ SQLITE_PRIVATE void sqlite3InvalidFunction(
}
/*
-** Create a new aggregate context for p and return a pointer to
-** its pMem->z element.
-*/
-static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){
- Mem *pMem = p->pMem;
- assert( (pMem->flags & MEM_Agg)==0 );
- if( nByte<=0 ){
- sqlite3VdbeMemSetNull(pMem);
- pMem->z = 0;
- }else{
- sqlite3VdbeMemClearAndResize(pMem, nByte);
- pMem->flags = MEM_Agg;
- pMem->u.pDef = p->pFunc;
- if( pMem->z ){
- memset(pMem->z, 0, nByte);
- }
- }
- return (void*)pMem->z;
-}
-
-/*
** Allocate or return the aggregate context for a user function. A new
** context is allocated on the first call. Subsequent calls return the
** same context that was returned on prior calls.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context *p, int nByte){
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){
+ Mem *pMem;
assert( p && p->pFunc && p->pFunc->xStep );
- assert( sqlite3_mutex_held(p->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(p->s.db->mutex) );
+ pMem = p->pMem;
testcase( nByte<0 );
- if( (p->pMem->flags & MEM_Agg)==0 ){
- return createAggContext(p, nByte);
- }else{
- return (void*)p->pMem->z;
+ if( (pMem->flags & MEM_Agg)==0 ){
+ if( nByte<=0 ){
+ sqlite3VdbeMemReleaseExternal(pMem);
+ pMem->flags = MEM_Null;
+ pMem->z = 0;
+ }else{
+ sqlite3VdbeMemGrow(pMem, nByte, 0);
+ pMem->flags = MEM_Agg;
+ pMem->u.pDef = p->pFunc;
+ if( pMem->z ){
+ memset(pMem->z, 0, nByte);
+ }
+ }
}
+ return (void*)pMem->z;
}
/*
-** Return the auxiliary data pointer, if any, for the iArg'th argument to
+** Return the auxilary data pointer, if any, for the iArg'th argument to
** the user-function defined by pCtx.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
AuxData *pAuxData;
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
-#if SQLITE_ENABLE_STAT3_OR_STAT4
- if( pCtx->pVdbe==0 ) return 0;
-#else
- assert( pCtx->pVdbe!=0 );
-#endif
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){
if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break;
}
@@ -71758,11 +65024,11 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context *pCtx, int i
}
/*
-** Set the auxiliary data pointer and delete function, for the iArg'th
+** Set the auxilary data pointer and delete function, for the iArg'th
** argument to the user-function defined by pCtx. Any previous value is
** deleted by calling the delete function specified when it was set.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(
+SQLITE_API void sqlite3_set_auxdata(
sqlite3_context *pCtx,
int iArg,
void *pAux,
@@ -71771,13 +65037,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(
AuxData *pAuxData;
Vdbe *pVdbe = pCtx->pVdbe;
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
+ assert( sqlite3_mutex_held(pCtx->s.db->mutex) );
if( iArg<0 ) goto failed;
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- if( pVdbe==0 ) goto failed;
-#else
- assert( pVdbe!=0 );
-#endif
for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){
if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break;
@@ -71809,7 +65070,7 @@ failed:
#ifndef SQLITE_OMIT_DEPRECATED
/*
-** Return the number of times the Step function of an aggregate has been
+** Return the number of times the Step function of a aggregate has been
** called.
**
** This function is deprecated. Do not use it for new code. It is
@@ -71817,7 +65078,7 @@ failed:
** implementations should keep their own counts within their aggregate
** context.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context *p){
+SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){
assert( p && p->pMem && p->pFunc && p->pFunc->xStep );
return p->pMem->n;
}
@@ -71826,7 +65087,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context *p){
/*
** Return the number of columns in the result set for the statement pStmt.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
return pVm ? pVm->nResColumn : 0;
}
@@ -71835,48 +65096,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt){
** Return the number of values available from the current row of the
** currently executing statement pStmt.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
if( pVm==0 || pVm->pResultSet==0 ) return 0;
return pVm->nResColumn;
}
-/*
-** Return a pointer to static memory containing an SQL NULL value.
-*/
-static const Mem *columnNullValue(void){
- /* Even though the Mem structure contains an element
- ** of type i64, on certain architectures (x86) with certain compiler
- ** switches (-Os), gcc may align this Mem object on a 4-byte boundary
- ** instead of an 8-byte one. This all works fine, except that when
- ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s
- ** that a Mem structure is located on an 8-byte boundary. To prevent
- ** these assert()s from failing, when building with SQLITE_DEBUG defined
- ** using gcc, we force nullMem to be 8-byte aligned using the magical
- ** __attribute__((aligned(8))) macro. */
- static const Mem nullMem
-#if defined(SQLITE_DEBUG) && defined(__GNUC__)
- __attribute__((aligned(8)))
-#endif
- = {
- /* .u = */ {0},
- /* .flags = */ (u16)MEM_Null,
- /* .enc = */ (u8)0,
- /* .eSubtype = */ (u8)0,
- /* .n = */ (int)0,
- /* .z = */ (char*)0,
- /* .zMalloc = */ (char*)0,
- /* .szMalloc = */ (int)0,
- /* .uTemp = */ (u32)0,
- /* .db = */ (sqlite3*)0,
- /* .xDel = */ (void(*)(void*))0,
-#ifdef SQLITE_DEBUG
- /* .pScopyFrom = */ (Mem*)0,
- /* .pFiller = */ (void*)0,
-#endif
- };
- return &nullMem;
-}
/*
** Check to see if column iCol of the given statement is valid. If
@@ -71893,11 +65118,32 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){
sqlite3_mutex_enter(pVm->db->mutex);
pOut = &pVm->pResultSet[i];
}else{
+ /* If the value passed as the second argument is out of range, return
+ ** a pointer to the following static Mem object which contains the
+ ** value SQL NULL. Even though the Mem structure contains an element
+ ** of type i64, on certain architectures (x86) with certain compiler
+ ** switches (-Os), gcc may align this Mem object on a 4-byte boundary
+ ** instead of an 8-byte one. This all works fine, except that when
+ ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s
+ ** that a Mem structure is located on an 8-byte boundary. To prevent
+ ** these assert()s from failing, when building with SQLITE_DEBUG defined
+ ** using gcc, we force nullMem to be 8-byte aligned using the magical
+ ** __attribute__((aligned(8))) macro. */
+ static const Mem nullMem
+#if defined(SQLITE_DEBUG) && defined(__GNUC__)
+ __attribute__((aligned(8)))
+#endif
+ = {0, "", (double)0, {0}, 0, MEM_Null, SQLITE_NULL, 0,
+#ifdef SQLITE_DEBUG
+ 0, 0, /* pScopyFrom, pFiller */
+#endif
+ 0, 0 };
+
if( pVm && ALWAYS(pVm->db) ){
sqlite3_mutex_enter(pVm->db->mutex);
- sqlite3Error(pVm->db, SQLITE_RANGE);
+ sqlite3Error(pVm->db, SQLITE_RANGE, 0);
}
- pOut = (Mem*)columnNullValue();
+ pOut = (Mem*)&nullMem;
}
return pOut;
}
@@ -71938,7 +65184,7 @@ static void columnMallocFailure(sqlite3_stmt *pStmt)
** The following routines are used to access elements of the current row
** in the result set.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt *pStmt, int i){
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt *pStmt, int i){
const void *val;
val = sqlite3_value_blob( columnMem(pStmt,i) );
/* Even though there is no encoding conversion, value_blob() might
@@ -71948,37 +65194,37 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt *pStmt, i
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){
int val = sqlite3_value_bytes( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){
int val = sqlite3_value_bytes16( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt *pStmt, int i){
+SQLITE_API double sqlite3_column_double(sqlite3_stmt *pStmt, int i){
double val = sqlite3_value_double( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_int(sqlite3_stmt *pStmt, int i){
int val = sqlite3_value_int( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt *pStmt, int i){
+SQLITE_API sqlite_int64 sqlite3_column_int64(sqlite3_stmt *pStmt, int i){
sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt *pStmt, int i){
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt *pStmt, int i){
const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt *pStmt, int i){
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){
Mem *pOut = columnMem(pStmt, i);
if( pOut->flags&MEM_Static ){
pOut->flags &= ~MEM_Static;
@@ -71988,13 +65234,13 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt *pStm
return (sqlite3_value *)pOut;
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt *pStmt, int i){
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){
const void *val = sqlite3_value_text16( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
#endif /* SQLITE_OMIT_UTF16 */
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){
int iType = sqlite3_value_type( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return iType;
@@ -72022,19 +65268,11 @@ static const void *columnName(
const void *(*xFunc)(Mem*),
int useType
){
- const void *ret;
- Vdbe *p;
+ const void *ret = 0;
+ Vdbe *p = (Vdbe *)pStmt;
int n;
- sqlite3 *db;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pStmt==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- ret = 0;
- p = (Vdbe *)pStmt;
- db = p->db;
+ sqlite3 *db = p->db;
+
assert( db!=0 );
n = sqlite3_column_count(pStmt);
if( N<n && N>=0 ){
@@ -72058,12 +65296,12 @@ static const void *columnName(
** Return the name of the Nth column of the result set returned by SQL
** statement pStmt.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_NAME);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_NAME);
}
@@ -72083,12 +65321,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt *pStmt,
** Return the column declaration type (if applicable) of the 'i'th column
** of the result set of SQL statement pStmt.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DECLTYPE);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE);
}
@@ -72099,14 +65337,14 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt *pS
/*
** Return the name of the database from which a result column derives.
** NULL is returned if the result column is an expression or constant or
-** anything else which is not an unambiguous reference to a database column.
+** anything else which is not an unabiguous reference to a database column.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DATABASE);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DATABASE);
}
@@ -72115,14 +65353,14 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stm
/*
** Return the name of the table from which a result column derives.
** NULL is returned if the result column is an expression or constant or
-** anything else which is not an unambiguous reference to a database column.
+** anything else which is not an unabiguous reference to a database column.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_TABLE);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_TABLE);
}
@@ -72131,14 +65369,14 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt *
/*
** Return the name of the table column from which a result column derives.
** NULL is returned if the result column is an expression or constant or
-** anything else which is not an unambiguous reference to a database column.
+** anything else which is not an unabiguous reference to a database column.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_COLUMN);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_COLUMN);
}
@@ -72168,14 +65406,14 @@ static int vdbeUnbind(Vdbe *p, int i){
}
sqlite3_mutex_enter(p->db->mutex);
if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){
- sqlite3Error(p->db, SQLITE_MISUSE);
+ sqlite3Error(p->db, SQLITE_MISUSE, 0);
sqlite3_mutex_leave(p->db->mutex);
sqlite3_log(SQLITE_MISUSE,
"bind on a busy prepared statement: [%s]", p->zSql);
return SQLITE_MISUSE_BKPT;
}
if( i<1 || i>p->nVar ){
- sqlite3Error(p->db, SQLITE_RANGE);
+ sqlite3Error(p->db, SQLITE_RANGE, 0);
sqlite3_mutex_leave(p->db->mutex);
return SQLITE_RANGE;
}
@@ -72183,7 +65421,7 @@ static int vdbeUnbind(Vdbe *p, int i){
pVar = &p->aVar[i];
sqlite3VdbeMemRelease(pVar);
pVar->flags = MEM_Null;
- sqlite3Error(p->db, SQLITE_OK);
+ sqlite3Error(p->db, SQLITE_OK, 0);
/* If the bit corresponding to this variable in Vdbe.expmask is set, then
** binding a new value to this variable invalidates the current query plan.
@@ -72225,7 +65463,7 @@ static int bindText(
if( rc==SQLITE_OK && encoding!=0 ){
rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db));
}
- sqlite3Error(p->db, rc);
+ sqlite3Error(p->db, rc, 0);
rc = sqlite3ApiExit(p->db, rc);
}
sqlite3_mutex_leave(p->db->mutex);
@@ -72239,7 +65477,7 @@ static int bindText(
/*
** Bind a blob value to an SQL statement variable.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(
+SQLITE_API int sqlite3_bind_blob(
sqlite3_stmt *pStmt,
int i,
const void *zData,
@@ -72248,21 +65486,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(
){
return bindText(pStmt, i, zData, nData, xDel, 0);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(
- sqlite3_stmt *pStmt,
- int i,
- const void *zData,
- sqlite3_uint64 nData,
- void (*xDel)(void*)
-){
- assert( xDel!=SQLITE_DYNAMIC );
- if( nData>0x7fffffff ){
- return invokeValueDestructor(zData, xDel, 0);
- }else{
- return bindText(pStmt, i, zData, (int)nData, xDel, 0);
- }
-}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
int rc;
Vdbe *p = (Vdbe *)pStmt;
rc = vdbeUnbind(p, i);
@@ -72272,10 +65496,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt *pStmt, int i, do
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
return sqlite3_bind_int64(p, i, (i64)iValue);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
int rc;
Vdbe *p = (Vdbe *)pStmt;
rc = vdbeUnbind(p, i);
@@ -72285,7 +65509,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sql
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
int rc;
Vdbe *p = (Vdbe*)pStmt;
rc = vdbeUnbind(p, i);
@@ -72294,7 +65518,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(
+SQLITE_API int sqlite3_bind_text(
sqlite3_stmt *pStmt,
int i,
const char *zData,
@@ -72303,24 +65527,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(
){
return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(
- sqlite3_stmt *pStmt,
- int i,
- const char *zData,
- sqlite3_uint64 nData,
- void (*xDel)(void*),
- unsigned char enc
-){
- assert( xDel!=SQLITE_DYNAMIC );
- if( nData>0x7fffffff ){
- return invokeValueDestructor(zData, xDel, 0);
- }else{
- if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
- return bindText(pStmt, i, zData, (int)nData, xDel, enc);
- }
-}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(
+SQLITE_API int sqlite3_bind_text16(
sqlite3_stmt *pStmt,
int i,
const void *zData,
@@ -72330,15 +65538,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(
return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE);
}
#endif /* SQLITE_OMIT_UTF16 */
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
int rc;
- switch( sqlite3_value_type((sqlite3_value*)pValue) ){
+ switch( pValue->type ){
case SQLITE_INTEGER: {
rc = sqlite3_bind_int64(pStmt, i, pValue->u.i);
break;
}
case SQLITE_FLOAT: {
- rc = sqlite3_bind_double(pStmt, i, pValue->u.r);
+ rc = sqlite3_bind_double(pStmt, i, pValue->r);
break;
}
case SQLITE_BLOB: {
@@ -72361,7 +65569,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt *pStmt, int i, con
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
int rc;
Vdbe *p = (Vdbe *)pStmt;
rc = vdbeUnbind(p, i);
@@ -72371,26 +65579,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i,
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
- int rc;
- Vdbe *p = (Vdbe *)pStmt;
- sqlite3_mutex_enter(p->db->mutex);
- if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){
- rc = SQLITE_TOOBIG;
- }else{
- assert( (n & 0x7FFFFFFF)==n );
- rc = sqlite3_bind_zeroblob(pStmt, i, n);
- }
- rc = sqlite3ApiExit(p->db, rc);
- sqlite3_mutex_leave(p->db->mutex);
- return rc;
-}
/*
** Return the number of wildcards that can be potentially bound to.
** This routine is added to support DBD::SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
return p ? p->nVar : 0;
}
@@ -72401,7 +65595,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
**
** The result is always UTF-8.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
Vdbe *p = (Vdbe*)pStmt;
if( p==0 || i<1 || i>p->nzVar ){
return 0;
@@ -72429,7 +65623,7 @@ SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nNa
}
return 0;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName));
}
@@ -72455,7 +65649,7 @@ SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt
** Deprecated external interface. Internal/core SQLite code
** should call sqlite3TransferBindings.
**
-** It is misuse to call this routine with statements from different
+** Is is misuse to call this routine with statements from different
** database connections. But as this is a deprecated interface, we
** will not bother to check for that condition.
**
@@ -72463,7 +65657,7 @@ SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt
** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise
** SQLITE_OK is returned.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
+SQLITE_API int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
Vdbe *pFrom = (Vdbe*)pFromStmt;
Vdbe *pTo = (Vdbe*)pToStmt;
if( pFrom->nVar!=pTo->nVar ){
@@ -72485,7 +65679,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt,
** the first argument to the sqlite3_prepare() that was used to create
** the statement in the first place.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt *pStmt){
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){
return pStmt ? ((Vdbe*)pStmt)->db : 0;
}
@@ -72493,16 +65687,16 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt *pStmt){
** Return true if the prepared statement is guaranteed to not modify the
** database.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){
return pStmt ? ((Vdbe*)pStmt)->readOnly : 1;
}
/*
** Return true if the prepared statement is in need of being reset.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
Vdbe *v = (Vdbe*)pStmt;
- return v!=0 && v->pc>=0 && v->magic==VDBE_MAGIC_RUN;
+ return v!=0 && v->pc>0 && v->magic==VDBE_MAGIC_RUN;
}
/*
@@ -72511,14 +65705,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt *pStmt){
** prepared statement for the database connection. Return NULL if there
** are no more.
*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
sqlite3_stmt *pNext;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(pDb) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(pDb->mutex);
if( pStmt==0 ){
pNext = (sqlite3_stmt*)pDb->pVdbe;
@@ -72532,89 +65720,13 @@ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_
/*
** Return the value of a status counter for a prepared statement
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
Vdbe *pVdbe = (Vdbe*)pStmt;
- u32 v;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !pStmt ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- v = pVdbe->aCounter[op];
+ u32 v = pVdbe->aCounter[op];
if( resetFlag ) pVdbe->aCounter[op] = 0;
return (int)v;
}
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
-/*
-** Return status data for a single loop within query pStmt.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
- sqlite3_stmt *pStmt, /* Prepared statement being queried */
- int idx, /* Index of loop to report on */
- int iScanStatusOp, /* Which metric to return */
- void *pOut /* OUT: Write the answer here */
-){
- Vdbe *p = (Vdbe*)pStmt;
- ScanStatus *pScan;
- if( idx<0 || idx>=p->nScan ) return 1;
- pScan = &p->aScan[idx];
- switch( iScanStatusOp ){
- case SQLITE_SCANSTAT_NLOOP: {
- *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop];
- break;
- }
- case SQLITE_SCANSTAT_NVISIT: {
- *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit];
- break;
- }
- case SQLITE_SCANSTAT_EST: {
- double r = 1.0;
- LogEst x = pScan->nEst;
- while( x<100 ){
- x += 10;
- r *= 0.5;
- }
- *(double*)pOut = r*sqlite3LogEstToInt(x);
- break;
- }
- case SQLITE_SCANSTAT_NAME: {
- *(const char**)pOut = pScan->zName;
- break;
- }
- case SQLITE_SCANSTAT_EXPLAIN: {
- if( pScan->addrExplain ){
- *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z;
- }else{
- *(const char**)pOut = 0;
- }
- break;
- }
- case SQLITE_SCANSTAT_SELECTID: {
- if( pScan->addrExplain ){
- *(int*)pOut = p->aOp[ pScan->addrExplain ].p1;
- }else{
- *(int*)pOut = -1;
- }
- break;
- }
- default: {
- return 1;
- }
- }
- return 0;
-}
-
-/*
-** Zero all counters associated with the sqlite3_stmt_scanstatus() data.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
- Vdbe *p = (Vdbe*)pStmt;
- memset(p->anExec, 0, p->nOp * sizeof(i64));
-}
-#endif /* SQLITE_ENABLE_STMT_SCANSTATUS */
-
/************** End of vdbeapi.c *********************************************/
/************** Begin file vdbetrace.c ***************************************/
/*
@@ -72634,8 +65746,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt
**
** The Vdbe parse-tree explainer is also found here.
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
#ifndef SQLITE_OMIT_TRACE
@@ -72683,7 +65793,7 @@ static int findNextHostParameter(const char *zSql, int *pnToken){
** ALGORITHM: Scan the input string looking for host parameters in any of
** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within
** string literals, quoted identifier names, and comments. For text forms,
-** the host parameter index is found by scanning the prepared
+** the host parameter index is found by scanning the perpared
** statement for the corresponding OP_Variable opcode. Once the host
** parameter index is known, locate the value in p->aVar[]. Then render
** the value as a literal in place of the host parameter name.
@@ -72703,18 +65813,16 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
char zBase[100]; /* Initial working space */
db = p->db;
- sqlite3StrAccumInit(&out, db, zBase, sizeof(zBase),
+ sqlite3StrAccumInit(&out, zBase, sizeof(zBase),
db->aLimit[SQLITE_LIMIT_LENGTH]);
+ out.db = db;
if( db->nVdbeExec>1 ){
while( *zRawSql ){
const char *zStart = zRawSql;
while( *(zRawSql++)!='\n' && *zRawSql );
sqlite3StrAccumAppend(&out, "-- ", 3);
- assert( (zRawSql - zStart) > 0 );
sqlite3StrAccumAppend(&out, zStart, (int)(zRawSql-zStart));
}
- }else if( p->nVar==0 ){
- sqlite3StrAccumAppend(&out, zRawSql, sqlite3Strlen30(zRawSql));
}else{
while( zRawSql[0] ){
n = findNextHostParameter(zRawSql, &nToken);
@@ -72731,12 +65839,10 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
idx = nextIndex;
}
}else{
- assert( zRawSql[0]==':' || zRawSql[0]=='$' ||
- zRawSql[0]=='@' || zRawSql[0]=='#' );
+ assert( zRawSql[0]==':' || zRawSql[0]=='$' || zRawSql[0]=='@' );
testcase( zRawSql[0]==':' );
testcase( zRawSql[0]=='$' );
testcase( zRawSql[0]=='@' );
- testcase( zRawSql[0]=='#' );
idx = sqlite3VdbeParameterIndex(p, zRawSql, nToken);
assert( idx>0 );
}
@@ -72747,9 +65853,9 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
if( pVar->flags & MEM_Null ){
sqlite3StrAccumAppend(&out, "NULL", 4);
}else if( pVar->flags & MEM_Int ){
- sqlite3XPrintf(&out, 0, "%lld", pVar->u.i);
+ sqlite3XPrintf(&out, "%lld", pVar->u.i);
}else if( pVar->flags & MEM_Real ){
- sqlite3XPrintf(&out, 0, "%!.15g", pVar->u.r);
+ sqlite3XPrintf(&out, "%!.15g", pVar->r);
}else if( pVar->flags & MEM_Str ){
int nOut; /* Number of bytes of the string text to include in output */
#ifndef SQLITE_OMIT_UTF16
@@ -72770,17 +65876,15 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
while( nOut<pVar->n && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; }
}
#endif
- sqlite3XPrintf(&out, 0, "'%.*q'", nOut, pVar->z);
+ sqlite3XPrintf(&out, "'%.*q'", nOut, pVar->z);
#ifdef SQLITE_TRACE_SIZE_LIMIT
- if( nOut<pVar->n ){
- sqlite3XPrintf(&out, 0, "/*+%d bytes*/", pVar->n-nOut);
- }
+ if( nOut<pVar->n ) sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut);
#endif
#ifndef SQLITE_OMIT_UTF16
if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8);
#endif
}else if( pVar->flags & MEM_Zero ){
- sqlite3XPrintf(&out, 0, "zeroblob(%d)", pVar->u.nZero);
+ sqlite3XPrintf(&out, "zeroblob(%d)", pVar->u.nZero);
}else{
int nOut; /* Number of bytes of the blob to include in output */
assert( pVar->flags & MEM_Blob );
@@ -72790,13 +65894,11 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT;
#endif
for(i=0; i<nOut; i++){
- sqlite3XPrintf(&out, 0, "%02x", pVar->z[i]&0xff);
+ sqlite3XPrintf(&out, "%02x", pVar->z[i]&0xff);
}
sqlite3StrAccumAppend(&out, "'", 1);
#ifdef SQLITE_TRACE_SIZE_LIMIT
- if( nOut<pVar->n ){
- sqlite3XPrintf(&out, 0, "/*+%d bytes*/", pVar->n-nOut);
- }
+ if( nOut<pVar->n ) sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut);
#endif
}
}
@@ -72806,6 +65908,121 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
#endif /* #ifndef SQLITE_OMIT_TRACE */
+/*****************************************************************************
+** The following code implements the data-structure explaining logic
+** for the Vdbe.
+*/
+
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+
+/*
+** Allocate a new Explain object
+*/
+SQLITE_PRIVATE void sqlite3ExplainBegin(Vdbe *pVdbe){
+ if( pVdbe ){
+ Explain *p;
+ sqlite3BeginBenignMalloc();
+ p = (Explain *)sqlite3MallocZero( sizeof(Explain) );
+ if( p ){
+ p->pVdbe = pVdbe;
+ sqlite3_free(pVdbe->pExplain);
+ pVdbe->pExplain = p;
+ sqlite3StrAccumInit(&p->str, p->zBase, sizeof(p->zBase),
+ SQLITE_MAX_LENGTH);
+ p->str.useMalloc = 2;
+ }else{
+ sqlite3EndBenignMalloc();
+ }
+ }
+}
+
+/*
+** Return true if the Explain ends with a new-line.
+*/
+static int endsWithNL(Explain *p){
+ return p && p->str.zText && p->str.nChar
+ && p->str.zText[p->str.nChar-1]=='\n';
+}
+
+/*
+** Append text to the indentation
+*/
+SQLITE_PRIVATE void sqlite3ExplainPrintf(Vdbe *pVdbe, const char *zFormat, ...){
+ Explain *p;
+ if( pVdbe && (p = pVdbe->pExplain)!=0 ){
+ va_list ap;
+ if( p->nIndent && endsWithNL(p) ){
+ int n = p->nIndent;
+ if( n>ArraySize(p->aIndent) ) n = ArraySize(p->aIndent);
+ sqlite3AppendSpace(&p->str, p->aIndent[n-1]);
+ }
+ va_start(ap, zFormat);
+ sqlite3VXPrintf(&p->str, 1, zFormat, ap);
+ va_end(ap);
+ }
+}
+
+/*
+** Append a '\n' if there is not already one.
+*/
+SQLITE_PRIVATE void sqlite3ExplainNL(Vdbe *pVdbe){
+ Explain *p;
+ if( pVdbe && (p = pVdbe->pExplain)!=0 && !endsWithNL(p) ){
+ sqlite3StrAccumAppend(&p->str, "\n", 1);
+ }
+}
+
+/*
+** Push a new indentation level. Subsequent lines will be indented
+** so that they begin at the current cursor position.
+*/
+SQLITE_PRIVATE void sqlite3ExplainPush(Vdbe *pVdbe){
+ Explain *p;
+ if( pVdbe && (p = pVdbe->pExplain)!=0 ){
+ if( p->str.zText && p->nIndent<ArraySize(p->aIndent) ){
+ const char *z = p->str.zText;
+ int i = p->str.nChar-1;
+ int x;
+ while( i>=0 && z[i]!='\n' ){ i--; }
+ x = (p->str.nChar - 1) - i;
+ if( p->nIndent && x<p->aIndent[p->nIndent-1] ){
+ x = p->aIndent[p->nIndent-1];
+ }
+ p->aIndent[p->nIndent] = x;
+ }
+ p->nIndent++;
+ }
+}
+
+/*
+** Pop the indentation stack by one level.
+*/
+SQLITE_PRIVATE void sqlite3ExplainPop(Vdbe *p){
+ if( p && p->pExplain ) p->pExplain->nIndent--;
+}
+
+/*
+** Free the indentation structure
+*/
+SQLITE_PRIVATE void sqlite3ExplainFinish(Vdbe *pVdbe){
+ if( pVdbe && pVdbe->pExplain ){
+ sqlite3_free(pVdbe->zExplain);
+ sqlite3ExplainNL(pVdbe);
+ pVdbe->zExplain = sqlite3StrAccumFinish(&pVdbe->pExplain->str);
+ sqlite3_free(pVdbe->pExplain);
+ pVdbe->pExplain = 0;
+ sqlite3EndBenignMalloc();
+ }
+}
+
+/*
+** Return the explanation of a virtual machine.
+*/
+SQLITE_PRIVATE const char *sqlite3VdbeExplanation(Vdbe *pVdbe){
+ return (pVdbe && pVdbe->zExplain) ? pVdbe->zExplain : 0;
+}
+#endif /* defined(SQLITE_DEBUG) */
+
/************** End of vdbetrace.c *******************************************/
/************** Begin file vdbe.c ********************************************/
/*
@@ -72819,8 +66036,33 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
** May you share freely, never taking more than you give.
**
*************************************************************************
-** The code in this file implements the function that runs the
-** bytecode of a prepared statement.
+** The code in this file implements execution method of the
+** Virtual Database Engine (VDBE). A separate file ("vdbeaux.c")
+** handles housekeeping details such as creating and deleting
+** VDBE instances. This file is solely interested in executing
+** the VDBE program.
+**
+** In the external interface, an "sqlite3_stmt*" is an opaque pointer
+** to a VDBE.
+**
+** The SQL parser generates a program which is then executed by
+** the VDBE to do the work of the SQL statement. VDBE programs are
+** similar in form to assembly language. The program consists of
+** a linear sequence of operations. Each operation has an opcode
+** and 5 operands. Operands P1, P2, and P3 are integers. Operand P4
+** is a null-terminated string. Operand P5 is an unsigned character.
+** Few opcodes use all 5 operands.
+**
+** Computation results are stored on a set of registers numbered beginning
+** with 1 and going up to Vdbe.nMem. Each register can store
+** either an integer, a null-terminated string, a floating point
+** number, or the SQL "NULL" value. An implicit conversion from one
+** type to the other occurs as necessary.
+**
+** Most of the code in this file is taken up by the sqlite3VdbeExec()
+** function which does the work of interpreting a VDBE program.
+** But other routines are also provided to help in building up
+** a program instruction by instruction.
**
** Various scripts scan this source file in order to generate HTML
** documentation, headers files, or other derived files. The formatting
@@ -72828,17 +66070,11 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
** in this file for details. If in doubt, do not deviate from existing
** commenting and indentation practices when changing or adding code.
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
/*
** Invoke this macro on memory cells just prior to changing the
** value of the cell. This macro verifies that shallow copies are
-** not misused. A shallow copy of a string or blob just copies a
-** pointer to the string or blob, not the content. If the original
-** is changed while the copy is still in use, the string or blob might
-** be changed out from under the copy. This macro verifies that nothing
-** like that ever happens.
+** not misused.
*/
#ifdef SQLITE_DEBUG
# define memAboutToChange(P,M) sqlite3VdbeMemAboutToChange(P,M)
@@ -72897,7 +66133,7 @@ static void updateMaxBlobsize(Mem *p){
#endif
/*
-** The next global variable is incremented each time the OP_Found opcode
+** The next global variable is incremented each type the OP_Found opcode
** is executed. This is used to test whether or not the foreign key
** operation implemented using OP_FkIsZero is working. This variable
** has no function other than to help verify the correct operation of the
@@ -72918,45 +66154,11 @@ SQLITE_API int sqlite3_found_count = 0;
#endif
/*
-** Invoke the VDBE coverage callback, if that callback is defined. This
-** feature is used for test suite validation only and does not appear an
-** production builds.
-**
-** M is an integer, 2 or 3, that indices how many different ways the
-** branch can go. It is usually 2. "I" is the direction the branch
-** goes. 0 means falls through. 1 means branch is taken. 2 means the
-** second alternative branch is taken.
-**
-** iSrcLine is the source code line (from the __LINE__ macro) that
-** generated the VDBE instruction. This instrumentation assumes that all
-** source code is in a single file (the amalgamation). Special values 1
-** and 2 for the iSrcLine parameter mean that this particular branch is
-** always taken or never taken, respectively.
-*/
-#if !defined(SQLITE_VDBE_COVERAGE)
-# define VdbeBranchTaken(I,M)
-#else
-# define VdbeBranchTaken(I,M) vdbeTakeBranch(pOp->iSrcLine,I,M)
- static void vdbeTakeBranch(int iSrcLine, u8 I, u8 M){
- if( iSrcLine<=2 && ALWAYS(iSrcLine>0) ){
- M = iSrcLine;
- /* Assert the truth of VdbeCoverageAlwaysTaken() and
- ** VdbeCoverageNeverTaken() */
- assert( (M & I)==I );
- }else{
- if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/
- sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg,
- iSrcLine,I,M);
- }
- }
-#endif
-
-/*
** Convert the given register into a string if it isn't one
** already. Return non-zero if a malloc() fails.
*/
#define Stringify(P, enc) \
- if(((P)->flags&(MEM_Str|MEM_Blob))==0 && sqlite3VdbeMemStringify(P,enc,0)) \
+ if(((P)->flags&(MEM_Str|MEM_Blob))==0 && sqlite3VdbeMemStringify(P,enc)) \
{ goto no_mem; }
/*
@@ -72968,14 +66170,38 @@ SQLITE_API int sqlite3_found_count = 0;
**
** This routine converts an ephemeral string into a dynamically allocated
** string that the register itself controls. In other words, it
-** converts an MEM_Ephem string into a string with P.z==P.zMalloc.
+** converts an MEM_Ephem string into an MEM_Dyn string.
*/
#define Deephemeralize(P) \
if( ((P)->flags&MEM_Ephem)!=0 \
&& sqlite3VdbeMemMakeWriteable(P) ){ goto no_mem;}
/* Return true if the cursor was opened using the OP_OpenSorter opcode. */
-#define isSorter(x) ((x)->pSorter!=0)
+# define isSorter(x) ((x)->pSorter!=0)
+
+/*
+** Argument pMem points at a register that will be passed to a
+** user-defined function or returned to the user as the result of a query.
+** This routine sets the pMem->type variable used by the sqlite3_value_*()
+** routines.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemStoreType(Mem *pMem){
+ int flags = pMem->flags;
+ if( flags & MEM_Null ){
+ pMem->type = SQLITE_NULL;
+ }
+ else if( flags & MEM_Int ){
+ pMem->type = SQLITE_INTEGER;
+ }
+ else if( flags & MEM_Real ){
+ pMem->type = SQLITE_FLOAT;
+ }
+ else if( flags & MEM_Str ){
+ pMem->type = SQLITE_TEXT;
+ }else{
+ pMem->type = SQLITE_BLOB;
+ }
+}
/*
** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL
@@ -73019,12 +66245,11 @@ static VdbeCursor *allocateCursor(
sqlite3VdbeFreeCursor(p, p->apCsr[iCur]);
p->apCsr[iCur] = 0;
}
- if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){
+ if( SQLITE_OK==sqlite3VdbeMemGrow(pMem, nByte, 0) ){
p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z;
memset(pCx, 0, sizeof(VdbeCursor));
pCx->iDb = iDb;
pCx->nField = nField;
- pCx->aOffset = &pCx->aType[nField];
if( isBtreeCursor ){
pCx->pCursor = (BtCursor*)
&pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField];
@@ -73039,29 +66264,21 @@ static VdbeCursor *allocateCursor(
** do so without loss of information. In other words, if the string
** looks like a number, convert it into a number. If it does not
** look like a number, leave it alone.
-**
-** If the bTryForInt flag is true, then extra effort is made to give
-** an integer representation. Strings that look like floating point
-** values but which have no fractional component (example: '48.00')
-** will have a MEM_Int representation when bTryForInt is true.
-**
-** If bTryForInt is false, then if the input string contains a decimal
-** point or exponential notation, the result is only MEM_Real, even
-** if there is an exact integer representation of the quantity.
*/
-static void applyNumericAffinity(Mem *pRec, int bTryForInt){
- double rValue;
- i64 iValue;
- u8 enc = pRec->enc;
- assert( (pRec->flags & (MEM_Str|MEM_Int|MEM_Real))==MEM_Str );
- if( sqlite3AtoF(pRec->z, &rValue, pRec->n, enc)==0 ) return;
- if( 0==sqlite3Atoi64(pRec->z, &iValue, pRec->n, enc) ){
- pRec->u.i = iValue;
- pRec->flags |= MEM_Int;
- }else{
- pRec->u.r = rValue;
- pRec->flags |= MEM_Real;
- if( bTryForInt ) sqlite3VdbeIntegerAffinity(pRec);
+static void applyNumericAffinity(Mem *pRec){
+ if( (pRec->flags & (MEM_Real|MEM_Int))==0 ){
+ double rValue;
+ i64 iValue;
+ u8 enc = pRec->enc;
+ if( (pRec->flags&MEM_Str)==0 ) return;
+ if( sqlite3AtoF(pRec->z, &rValue, pRec->n, enc)==0 ) return;
+ if( 0==sqlite3Atoi64(pRec->z, &iValue, pRec->n, enc) ){
+ pRec->u.i = iValue;
+ pRec->flags |= MEM_Int;
+ }else{
+ pRec->r = rValue;
+ pRec->flags |= MEM_Real;
+ }
}
}
@@ -73080,7 +66297,7 @@ static void applyNumericAffinity(Mem *pRec, int bTryForInt){
** SQLITE_AFF_TEXT:
** Convert pRec to a text representation.
**
-** SQLITE_AFF_BLOB:
+** SQLITE_AFF_NONE:
** No-op. pRec is unchanged.
*/
static void applyAffinity(
@@ -73088,25 +66305,22 @@ static void applyAffinity(
char affinity, /* The affinity to be applied */
u8 enc /* Use this text encoding */
){
- if( affinity>=SQLITE_AFF_NUMERIC ){
- assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL
- || affinity==SQLITE_AFF_NUMERIC );
- if( (pRec->flags & MEM_Int)==0 ){
- if( (pRec->flags & MEM_Real)==0 ){
- if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1);
- }else{
- sqlite3VdbeIntegerAffinity(pRec);
- }
- }
- }else if( affinity==SQLITE_AFF_TEXT ){
+ if( affinity==SQLITE_AFF_TEXT ){
/* Only attempt the conversion to TEXT if there is an integer or real
** representation (blob and NULL do not get converted) but no string
** representation.
*/
if( 0==(pRec->flags&MEM_Str) && (pRec->flags&(MEM_Real|MEM_Int)) ){
- sqlite3VdbeMemStringify(pRec, enc, 1);
+ sqlite3VdbeMemStringify(pRec, enc);
}
pRec->flags &= ~(MEM_Real|MEM_Int);
+ }else if( affinity!=SQLITE_AFF_NONE ){
+ assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL
+ || affinity==SQLITE_AFF_NUMERIC );
+ applyNumericAffinity(pRec);
+ if( pRec->flags & MEM_Real ){
+ sqlite3VdbeIntegerAffinity(pRec);
+ }
}
}
@@ -73116,14 +66330,13 @@ static void applyAffinity(
** is appropriate. But only do the conversion if it is possible without
** loss of information and return the revised type of the argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value *pVal){
- int eType = sqlite3_value_type(pVal);
- if( eType==SQLITE_TEXT ){
- Mem *pMem = (Mem*)pVal;
- applyNumericAffinity(pMem, 0);
- eType = sqlite3_value_type(pVal);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value *pVal){
+ Mem *pMem = (Mem*)pVal;
+ if( pMem->type==SQLITE_TEXT ){
+ applyNumericAffinity(pMem);
+ sqlite3VdbeMemStoreType(pMem);
}
- return eType;
+ return pMem->type;
}
/*
@@ -73138,41 +66351,6 @@ SQLITE_PRIVATE void sqlite3ValueApplyAffinity(
applyAffinity((Mem *)pVal, affinity, enc);
}
-/*
-** pMem currently only holds a string type (or maybe a BLOB that we can
-** interpret as a string if we want to). Compute its corresponding
-** numeric type, if has one. Set the pMem->u.r and pMem->u.i fields
-** accordingly.
-*/
-static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){
- assert( (pMem->flags & (MEM_Int|MEM_Real))==0 );
- assert( (pMem->flags & (MEM_Str|MEM_Blob))!=0 );
- if( sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc)==0 ){
- return 0;
- }
- if( sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc)==SQLITE_OK ){
- return MEM_Int;
- }
- return MEM_Real;
-}
-
-/*
-** Return the numeric type for pMem, either MEM_Int or MEM_Real or both or
-** none.
-**
-** Unlike applyNumericAffinity(), this routine does not modify pMem->flags.
-** But it does set pMem->u.r and pMem->u.i appropriately.
-*/
-static u16 numericType(Mem *pMem){
- if( pMem->flags & (MEM_Int|MEM_Real) ){
- return pMem->flags & (MEM_Int|MEM_Real);
- }
- if( pMem->flags & (MEM_Str|MEM_Blob) ){
- return computeNumericType(pMem);
- }
- return 0;
-}
-
#ifdef SQLITE_DEBUG
/*
** Write a nice string representation of the contents of cell pMem
@@ -73261,7 +66439,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf){
** Print the value of a register for tracing purposes:
*/
static void memTracePrint(Mem *p){
- if( p->flags & MEM_Undefined ){
+ if( p->flags & MEM_Invalid ){
printf(" undefined");
}else if( p->flags & MEM_Null ){
printf(" NULL");
@@ -73271,7 +66449,7 @@ static void memTracePrint(Mem *p){
printf(" i:%lld", p->u.i);
#ifndef SQLITE_OMIT_FLOATING_POINT
}else if( p->flags & MEM_Real ){
- printf(" r:%g", p->u.r);
+ printf(" r:%g", p->r);
#endif
}else if( p->flags & MEM_RowSet ){
printf(" (rowset)");
@@ -73394,6 +66572,20 @@ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
#endif
+/*
+** The CHECK_FOR_INTERRUPT macro defined here looks to see if the
+** sqlite3_interrupt() routine has been called. If it has been, then
+** processing of the VDBE program is interrupted.
+**
+** This macro added to every instruction that does a jump in order to
+** implement a loop. This test used to be on every single instruction,
+** but that meant we more testing than we needed. By only testing the
+** flag on jump instructions, we get a (small) speed improvement.
+*/
+#define CHECK_FOR_INTERRUPT \
+ if( db->u1.isInterrupted ) goto abort_due_to_interrupt;
+
+
#ifndef NDEBUG
/*
** This function is only called from within an assert() expression. It
@@ -73414,34 +66606,44 @@ static int checkSavepointCount(sqlite3 *db){
}
#endif
-/*
-** Return the register of pOp->p2 after first preparing it to be
-** overwritten with an integer value.
-*/
-static Mem *out2Prerelease(Vdbe *p, VdbeOp *pOp){
- Mem *pOut;
- assert( pOp->p2>0 );
- assert( pOp->p2<=(p->nMem-p->nCursor) );
- pOut = &p->aMem[pOp->p2];
- memAboutToChange(p, pOut);
- if( VdbeMemDynamic(pOut) ) sqlite3VdbeMemSetNull(pOut);
- pOut->flags = MEM_Int;
- return pOut;
-}
-
/*
-** Execute as much of a VDBE program as we can.
-** This is the core of sqlite3_step().
+** Execute as much of a VDBE program as we can then return.
+**
+** sqlite3VdbeMakeReady() must be called before this routine in order to
+** close the program with a final OP_Halt and to set up the callbacks
+** and the error message pointer.
+**
+** Whenever a row or result data is available, this routine will either
+** invoke the result callback (if there is one) or return with
+** SQLITE_ROW.
+**
+** If an attempt is made to open a locked database, then this routine
+** will either invoke the busy callback (if there is one) or it will
+** return SQLITE_BUSY.
+**
+** If an error occurs, an error message is written to memory obtained
+** from sqlite3_malloc() and p->zErrMsg is made to point to that memory.
+** The error code is stored in p->rc and this routine returns SQLITE_ERROR.
+**
+** If the callback ever returns non-zero, then the program exits
+** immediately. There will be no error message but the p->rc field is
+** set to SQLITE_ABORT and this routine will return SQLITE_ERROR.
+**
+** A memory allocation error causes p->rc to be set to SQLITE_NOMEM and this
+** routine to return SQLITE_ERROR.
+**
+** Other fatal errors return SQLITE_ERROR.
+**
+** After this routine has finished, sqlite3VdbeFinalize() should be
+** used to clean up the mess that was left behind.
*/
SQLITE_PRIVATE int sqlite3VdbeExec(
Vdbe *p /* The VDBE */
){
+ int pc=0; /* The program counter */
Op *aOp = p->aOp; /* Copy of p->aOp */
- Op *pOp = aOp; /* Current operation */
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
- Op *pOrigOp; /* Value of pOp at the top of the loop */
-#endif
+ Op *pOp; /* Current operation */
int rc = SQLITE_OK; /* Value to return */
sqlite3 *db = p->db; /* The database */
u8 resetSchemaOnFault = 0; /* Reset schema after an error if positive */
@@ -73460,8 +66662,431 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
i64 lastRowid = db->lastRowid; /* Saved value of the last insert ROWID */
#ifdef VDBE_PROFILE
u64 start; /* CPU clock count at start of opcode */
+ int origPc; /* Program counter at start of opcode */
+#endif
+ /********************************************************************
+ ** Automatically generated code
+ **
+ ** The following union is automatically generated by the
+ ** vdbe-compress.tcl script. The purpose of this union is to
+ ** reduce the amount of stack space required by this function.
+ ** See comments in the vdbe-compress.tcl script for details.
+ */
+ union vdbeExecUnion {
+ struct OP_Yield_stack_vars {
+ int pcDest;
+ } aa;
+ struct OP_Halt_stack_vars {
+ const char *zType;
+ const char *zLogFmt;
+ } ab;
+ struct OP_Null_stack_vars {
+ int cnt;
+ u16 nullFlag;
+ } ac;
+ struct OP_Variable_stack_vars {
+ Mem *pVar; /* Value being transferred */
+ } ad;
+ struct OP_Move_stack_vars {
+ char *zMalloc; /* Holding variable for allocated memory */
+ int n; /* Number of registers left to copy */
+ int p1; /* Register to copy from */
+ int p2; /* Register to copy to */
+ } ae;
+ struct OP_Copy_stack_vars {
+ int n;
+ } af;
+ struct OP_ResultRow_stack_vars {
+ Mem *pMem;
+ int i;
+ } ag;
+ struct OP_Concat_stack_vars {
+ i64 nByte;
+ } ah;
+ struct OP_Remainder_stack_vars {
+ char bIntint; /* Started out as two integer operands */
+ int flags; /* Combined MEM_* flags from both inputs */
+ i64 iA; /* Integer value of left operand */
+ i64 iB; /* Integer value of right operand */
+ double rA; /* Real value of left operand */
+ double rB; /* Real value of right operand */
+ } ai;
+ struct OP_Function_stack_vars {
+ int i;
+ Mem *pArg;
+ sqlite3_context ctx;
+ sqlite3_value **apVal;
+ int n;
+ } aj;
+ struct OP_ShiftRight_stack_vars {
+ i64 iA;
+ u64 uA;
+ i64 iB;
+ u8 op;
+ } ak;
+ struct OP_Ge_stack_vars {
+ int res; /* Result of the comparison of pIn1 against pIn3 */
+ char affinity; /* Affinity to use for comparison */
+ u16 flags1; /* Copy of initial value of pIn1->flags */
+ u16 flags3; /* Copy of initial value of pIn3->flags */
+ } al;
+ struct OP_Compare_stack_vars {
+ int n;
+ int i;
+ int p1;
+ int p2;
+ const KeyInfo *pKeyInfo;
+ int idx;
+ CollSeq *pColl; /* Collating sequence to use on this term */
+ int bRev; /* True for DESCENDING sort order */
+ } am;
+ struct OP_Or_stack_vars {
+ int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
+ int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
+ } an;
+ struct OP_IfNot_stack_vars {
+ int c;
+ } ao;
+ struct OP_Column_stack_vars {
+ i64 payloadSize64; /* Number of bytes in the record */
+ int p2; /* column number to retrieve */
+ VdbeCursor *pC; /* The VDBE cursor */
+ BtCursor *pCrsr; /* The BTree cursor */
+ u32 *aType; /* aType[i] holds the numeric type of the i-th column */
+ u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */
+ int len; /* The length of the serialized data for the column */
+ int i; /* Loop counter */
+ Mem *pDest; /* Where to write the extracted value */
+ Mem sMem; /* For storing the record being decoded */
+ const u8 *zData; /* Part of the record being decoded */
+ const u8 *zHdr; /* Next unparsed byte of the header */
+ const u8 *zEndHdr; /* Pointer to first byte after the header */
+ u32 offset; /* Offset into the data */
+ u32 szField; /* Number of bytes in the content of a field */
+ u32 avail; /* Number of bytes of available data */
+ u32 t; /* A type code from the record header */
+ Mem *pReg; /* PseudoTable input register */
+ } ap;
+ struct OP_Affinity_stack_vars {
+ const char *zAffinity; /* The affinity to be applied */
+ char cAff; /* A single character of affinity */
+ } aq;
+ struct OP_MakeRecord_stack_vars {
+ u8 *zNewRecord; /* A buffer to hold the data for the new record */
+ Mem *pRec; /* The new record */
+ u64 nData; /* Number of bytes of data space */
+ int nHdr; /* Number of bytes of header space */
+ i64 nByte; /* Data space required for this record */
+ int nZero; /* Number of zero bytes at the end of the record */
+ int nVarint; /* Number of bytes in a varint */
+ u32 serial_type; /* Type field */
+ Mem *pData0; /* First field to be combined into the record */
+ Mem *pLast; /* Last field of the record */
+ int nField; /* Number of fields in the record */
+ char *zAffinity; /* The affinity string for the record */
+ int file_format; /* File format to use for encoding */
+ int i; /* Space used in zNewRecord[] */
+ int len; /* Length of a field */
+ } ar;
+ struct OP_Count_stack_vars {
+ i64 nEntry;
+ BtCursor *pCrsr;
+ } as;
+ struct OP_Savepoint_stack_vars {
+ int p1; /* Value of P1 operand */
+ char *zName; /* Name of savepoint */
+ int nName;
+ Savepoint *pNew;
+ Savepoint *pSavepoint;
+ Savepoint *pTmp;
+ int iSavepoint;
+ int ii;
+ } at;
+ struct OP_AutoCommit_stack_vars {
+ int desiredAutoCommit;
+ int iRollback;
+ int turnOnAC;
+ } au;
+ struct OP_Transaction_stack_vars {
+ Btree *pBt;
+ } av;
+ struct OP_ReadCookie_stack_vars {
+ int iMeta;
+ int iDb;
+ int iCookie;
+ } aw;
+ struct OP_SetCookie_stack_vars {
+ Db *pDb;
+ } ax;
+ struct OP_VerifyCookie_stack_vars {
+ int iMeta;
+ int iGen;
+ Btree *pBt;
+ } ay;
+ struct OP_OpenWrite_stack_vars {
+ int nField;
+ KeyInfo *pKeyInfo;
+ int p2;
+ int iDb;
+ int wrFlag;
+ Btree *pX;
+ VdbeCursor *pCur;
+ Db *pDb;
+ } az;
+ struct OP_OpenEphemeral_stack_vars {
+ VdbeCursor *pCx;
+ KeyInfo *pKeyInfo;
+ } ba;
+ struct OP_SorterOpen_stack_vars {
+ VdbeCursor *pCx;
+ } bb;
+ struct OP_OpenPseudo_stack_vars {
+ VdbeCursor *pCx;
+ } bc;
+ struct OP_SeekGt_stack_vars {
+ int res;
+ int oc;
+ VdbeCursor *pC;
+ UnpackedRecord r;
+ int nField;
+ i64 iKey; /* The rowid we are to seek to */
+ } bd;
+ struct OP_Seek_stack_vars {
+ VdbeCursor *pC;
+ } be;
+ struct OP_Found_stack_vars {
+ int alreadyExists;
+ int ii;
+ VdbeCursor *pC;
+ int res;
+ char *pFree;
+ UnpackedRecord *pIdxKey;
+ UnpackedRecord r;
+ char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*4 + 7];
+ } bf;
+ struct OP_NotExists_stack_vars {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ u64 iKey;
+ } bg;
+ struct OP_NewRowid_stack_vars {
+ i64 v; /* The new rowid */
+ VdbeCursor *pC; /* Cursor of table to get the new rowid */
+ int res; /* Result of an sqlite3BtreeLast() */
+ int cnt; /* Counter to limit the number of searches */
+ Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */
+ VdbeFrame *pFrame; /* Root frame of VDBE */
+ } bh;
+ struct OP_InsertInt_stack_vars {
+ Mem *pData; /* MEM cell holding data for the record to be inserted */
+ Mem *pKey; /* MEM cell holding key for the record */
+ i64 iKey; /* The integer ROWID or key for the record to be inserted */
+ VdbeCursor *pC; /* Cursor to table into which insert is written */
+ int nZero; /* Number of zero-bytes to append */
+ int seekResult; /* Result of prior seek or 0 if no USESEEKRESULT flag */
+ const char *zDb; /* database name - used by the update hook */
+ const char *zTbl; /* Table name - used by the opdate hook */
+ int op; /* Opcode for update hook: SQLITE_UPDATE or SQLITE_INSERT */
+ } bi;
+ struct OP_Delete_stack_vars {
+ i64 iKey;
+ VdbeCursor *pC;
+ } bj;
+ struct OP_SorterCompare_stack_vars {
+ VdbeCursor *pC;
+ int res;
+ int nIgnore;
+ } bk;
+ struct OP_SorterData_stack_vars {
+ VdbeCursor *pC;
+ } bl;
+ struct OP_RowData_stack_vars {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ u32 n;
+ i64 n64;
+ } bm;
+ struct OP_Rowid_stack_vars {
+ VdbeCursor *pC;
+ i64 v;
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ } bn;
+ struct OP_NullRow_stack_vars {
+ VdbeCursor *pC;
+ } bo;
+ struct OP_Last_stack_vars {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ } bp;
+ struct OP_Rewind_stack_vars {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ } bq;
+ struct OP_SorterNext_stack_vars {
+ VdbeCursor *pC;
+ int res;
+ } br;
+ struct OP_IdxInsert_stack_vars {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int nKey;
+ const char *zKey;
+ } bs;
+ struct OP_IdxDelete_stack_vars {
+ VdbeCursor *pC;
+ BtCursor *pCrsr;
+ int res;
+ UnpackedRecord r;
+ } bt;
+ struct OP_IdxRowid_stack_vars {
+ BtCursor *pCrsr;
+ VdbeCursor *pC;
+ i64 rowid;
+ } bu;
+ struct OP_IdxGE_stack_vars {
+ VdbeCursor *pC;
+ int res;
+ UnpackedRecord r;
+ } bv;
+ struct OP_Destroy_stack_vars {
+ int iMoved;
+ int iCnt;
+ Vdbe *pVdbe;
+ int iDb;
+ } bw;
+ struct OP_Clear_stack_vars {
+ int nChange;
+ } bx;
+ struct OP_CreateTable_stack_vars {
+ int pgno;
+ int flags;
+ Db *pDb;
+ } by;
+ struct OP_ParseSchema_stack_vars {
+ int iDb;
+ const char *zMaster;
+ char *zSql;
+ InitData initData;
+ } bz;
+ struct OP_IntegrityCk_stack_vars {
+ int nRoot; /* Number of tables to check. (Number of root pages.) */
+ int *aRoot; /* Array of rootpage numbers for tables to be checked */
+ int j; /* Loop counter */
+ int nErr; /* Number of errors reported */
+ char *z; /* Text of the error report */
+ Mem *pnErr; /* Register keeping track of errors remaining */
+ } ca;
+ struct OP_RowSetRead_stack_vars {
+ i64 val;
+ } cb;
+ struct OP_RowSetTest_stack_vars {
+ int iSet;
+ int exists;
+ } cc;
+ struct OP_Program_stack_vars {
+ int nMem; /* Number of memory registers for sub-program */
+ int nByte; /* Bytes of runtime space required for sub-program */
+ Mem *pRt; /* Register to allocate runtime space */
+ Mem *pMem; /* Used to iterate through memory cells */
+ Mem *pEnd; /* Last memory cell in new array */
+ VdbeFrame *pFrame; /* New vdbe frame to execute in */
+ SubProgram *pProgram; /* Sub-program to execute */
+ void *t; /* Token identifying trigger */
+ } cd;
+ struct OP_Param_stack_vars {
+ VdbeFrame *pFrame;
+ Mem *pIn;
+ } ce;
+ struct OP_MemMax_stack_vars {
+ Mem *pIn1;
+ VdbeFrame *pFrame;
+ } cf;
+ struct OP_AggStep_stack_vars {
+ int n;
+ int i;
+ Mem *pMem;
+ Mem *pRec;
+ sqlite3_context ctx;
+ sqlite3_value **apVal;
+ } cg;
+ struct OP_AggFinal_stack_vars {
+ Mem *pMem;
+ } ch;
+ struct OP_Checkpoint_stack_vars {
+ int i; /* Loop counter */
+ int aRes[3]; /* Results */
+ Mem *pMem; /* Write results here */
+ } ci;
+ struct OP_JournalMode_stack_vars {
+ Btree *pBt; /* Btree to change journal mode of */
+ Pager *pPager; /* Pager associated with pBt */
+ int eNew; /* New journal mode */
+ int eOld; /* The old journal mode */
+#ifndef SQLITE_OMIT_WAL
+ const char *zFilename; /* Name of database file for pPager */
#endif
- /*** INSERT STACK UNION HERE ***/
+ } cj;
+ struct OP_IncrVacuum_stack_vars {
+ Btree *pBt;
+ } ck;
+ struct OP_VBegin_stack_vars {
+ VTable *pVTab;
+ } cl;
+ struct OP_VOpen_stack_vars {
+ VdbeCursor *pCur;
+ sqlite3_vtab_cursor *pVtabCursor;
+ sqlite3_vtab *pVtab;
+ sqlite3_module *pModule;
+ } cm;
+ struct OP_VFilter_stack_vars {
+ int nArg;
+ int iQuery;
+ const sqlite3_module *pModule;
+ Mem *pQuery;
+ Mem *pArgc;
+ sqlite3_vtab_cursor *pVtabCursor;
+ sqlite3_vtab *pVtab;
+ VdbeCursor *pCur;
+ int res;
+ int i;
+ Mem **apArg;
+ } cn;
+ struct OP_VColumn_stack_vars {
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ Mem *pDest;
+ sqlite3_context sContext;
+ } co;
+ struct OP_VNext_stack_vars {
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ int res;
+ VdbeCursor *pCur;
+ } cp;
+ struct OP_VRename_stack_vars {
+ sqlite3_vtab *pVtab;
+ Mem *pName;
+ } cq;
+ struct OP_VUpdate_stack_vars {
+ sqlite3_vtab *pVtab;
+ sqlite3_module *pModule;
+ int nArg;
+ int i;
+ sqlite_int64 rowid;
+ Mem **apArg;
+ Mem *pX;
+ } cr;
+ struct OP_Trace_stack_vars {
+ char *zTrace;
+ char *z;
+ } cs;
+ } u;
+ /* End automatically generated code
+ ********************************************************************/
assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */
sqlite3VdbeEnter(p);
@@ -73470,20 +67095,24 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
** sqlite3_column_text16() failed. */
goto no_mem;
}
- assert( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_BUSY );
+ assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY );
assert( p->bIsReader || p->readOnly!=0 );
p->rc = SQLITE_OK;
p->iCurrentTime = 0;
assert( p->explain==0 );
p->pResultSet = 0;
db->busyHandler.nBusy = 0;
- if( db->u1.isInterrupted ) goto abort_due_to_interrupt;
+ CHECK_FOR_INTERRUPT;
sqlite3VdbeIOTraceSql(p);
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
if( db->xProgress ){
- u32 iPrior = p->aCounter[SQLITE_STMTSTATUS_VM_STEP];
assert( 0 < db->nProgressOps );
- nProgressLimit = db->nProgressOps - (iPrior % db->nProgressOps);
+ nProgressLimit = (unsigned)p->aCounter[SQLITE_STMTSTATUS_VM_STEP];
+ if( nProgressLimit==0 ){
+ nProgressLimit = db->nProgressOps;
+ }else{
+ nProgressLimit %= (unsigned)db->nProgressOps;
+ }
}
#endif
#ifdef SQLITE_DEBUG
@@ -73513,22 +67142,21 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
}
sqlite3EndBenignMalloc();
#endif
- for(pOp=&aOp[p->pc]; rc==SQLITE_OK; pOp++){
- assert( pOp>=aOp && pOp<&aOp[p->nOp]);
+ for(pc=p->pc; rc==SQLITE_OK; pc++){
+ assert( pc>=0 && pc<p->nOp );
if( db->mallocFailed ) goto no_mem;
#ifdef VDBE_PROFILE
+ origPc = pc;
start = sqlite3Hwtime();
#endif
nVmStep++;
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- if( p->anExec ) p->anExec[(int)(pOp-aOp)]++;
-#endif
+ pOp = &aOp[pc];
/* Only allow tracing if SQLITE_DEBUG is defined.
*/
#ifdef SQLITE_DEBUG
if( db->flags & SQLITE_VdbeTrace ){
- sqlite3VdbePrintOp(stdout, (int)(pOp - aOp), pOp);
+ sqlite3VdbePrintOp(stdout, pc, pOp);
}
#endif
@@ -73545,28 +67173,39 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
}
#endif
+ /* On any opcode with the "out2-prerelease" tag, free any
+ ** external allocations out of mem[p2] and set mem[p2] to be
+ ** an undefined integer. Opcodes will either fill in the integer
+ ** value or convert mem[p2] to a different type.
+ */
+ assert( pOp->opflags==sqlite3OpcodeProperty[pOp->opcode] );
+ if( pOp->opflags & OPFLG_OUT2_PRERELEASE ){
+ assert( pOp->p2>0 );
+ assert( pOp->p2<=(p->nMem-p->nCursor) );
+ pOut = &aMem[pOp->p2];
+ memAboutToChange(p, pOut);
+ VdbeMemRelease(pOut);
+ pOut->flags = MEM_Int;
+ }
+
/* Sanity checking on other operands */
#ifdef SQLITE_DEBUG
- assert( pOp->opflags==sqlite3OpcodeProperty[pOp->opcode] );
if( (pOp->opflags & OPFLG_IN1)!=0 ){
assert( pOp->p1>0 );
assert( pOp->p1<=(p->nMem-p->nCursor) );
assert( memIsValid(&aMem[pOp->p1]) );
- assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) );
REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]);
}
if( (pOp->opflags & OPFLG_IN2)!=0 ){
assert( pOp->p2>0 );
assert( pOp->p2<=(p->nMem-p->nCursor) );
assert( memIsValid(&aMem[pOp->p2]) );
- assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) );
REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]);
}
if( (pOp->opflags & OPFLG_IN3)!=0 ){
assert( pOp->p3>0 );
assert( pOp->p3<=(p->nMem-p->nCursor) );
assert( memIsValid(&aMem[pOp->p3]) );
- assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) );
REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]);
}
if( (pOp->opflags & OPFLG_OUT2)!=0 ){
@@ -73580,9 +67219,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
memAboutToChange(p, &aMem[pOp->p3]);
}
#endif
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
- pOrigOp = pOp;
-#endif
switch( pOp->opcode ){
@@ -73606,7 +67242,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
**
** Other keywords in the comment that follows each case are used to
** construct the OPFLG_INITIALIZER value that initializes opcodeProperty[].
-** Keywords include: in1, in2, in3, out2, out3. See
+** Keywords include: in1, in2, in3, out2_prerelease, out2, out3. See
** the mkopcodeh.awk script for additional information.
**
** Documentation about VDBE opcodes is generated by scanning this file
@@ -73627,15 +67263,9 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
** The next instruction executed will be
** the one at index P2 from the beginning of
** the program.
-**
-** The P1 parameter is not actually used by this opcode. However, it
-** is sometimes set to 1 instead of 0 as a hint to the command-line shell
-** that this Goto is the bottom of a loop and that the lines from P2 down
-** to the current line should be indented for EXPLAIN output.
*/
case OP_Goto: { /* jump */
-jump_to_p2_and_check_for_interrupt:
- pOp = &aOp[pOp->p2 - 1];
+ pc = pOp->p2 - 1;
/* Opcodes that are used as the bottom of a loop (OP_Next, OP_Prev,
** OP_VNext, OP_RowSetNext, or OP_SorterNext) all jump here upon
@@ -73648,7 +67278,7 @@ jump_to_p2_and_check_for_interrupt:
** checks on every opcode. This helps sqlite3_step() to run about 1.5%
** faster according to "valgrind --tool=cachegrind" */
check_for_interrupt:
- if( db->u1.isInterrupted ) goto abort_due_to_interrupt;
+ CHECK_FOR_INTERRUPT;
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
/* Call the progress callback if it is configured and the required number
** of VDBE ops have been executed (either since this invocation of
@@ -73677,103 +67307,46 @@ check_for_interrupt:
case OP_Gosub: { /* jump */
assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) );
pIn1 = &aMem[pOp->p1];
- assert( VdbeMemDynamic(pIn1)==0 );
+ assert( (pIn1->flags & MEM_Dyn)==0 );
memAboutToChange(p, pIn1);
pIn1->flags = MEM_Int;
- pIn1->u.i = (int)(pOp-aOp);
+ pIn1->u.i = pc;
REGISTER_TRACE(pOp->p1, pIn1);
-
- /* Most jump operations do a goto to this spot in order to update
- ** the pOp pointer. */
-jump_to_p2:
- pOp = &aOp[pOp->p2 - 1];
+ pc = pOp->p2 - 1;
break;
}
/* Opcode: Return P1 * * * *
**
-** Jump to the next instruction after the address in register P1. After
-** the jump, register P1 becomes undefined.
+** Jump to the next instruction after the address in register P1.
*/
case OP_Return: { /* in1 */
pIn1 = &aMem[pOp->p1];
- assert( pIn1->flags==MEM_Int );
- pOp = &aOp[pIn1->u.i];
- pIn1->flags = MEM_Undefined;
+ assert( pIn1->flags & MEM_Int );
+ pc = (int)pIn1->u.i;
break;
}
-/* Opcode: InitCoroutine P1 P2 P3 * *
-**
-** Set up register P1 so that it will Yield to the coroutine
-** located at address P3.
+/* Opcode: Yield P1 * * * *
**
-** If P2!=0 then the coroutine implementation immediately follows
-** this opcode. So jump over the coroutine implementation to
-** address P2.
-**
-** See also: EndCoroutine
+** Swap the program counter with the value in register P1.
*/
-case OP_InitCoroutine: { /* jump */
- assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) );
- assert( pOp->p2>=0 && pOp->p2<p->nOp );
- assert( pOp->p3>=0 && pOp->p3<p->nOp );
- pOut = &aMem[pOp->p1];
- assert( !VdbeMemDynamic(pOut) );
- pOut->u.i = pOp->p3 - 1;
- pOut->flags = MEM_Int;
- if( pOp->p2 ) goto jump_to_p2;
- break;
-}
-
-/* Opcode: EndCoroutine P1 * * * *
-**
-** The instruction at the address in register P1 is a Yield.
-** Jump to the P2 parameter of that Yield.
-** After the jump, register P1 becomes undefined.
-**
-** See also: InitCoroutine
-*/
-case OP_EndCoroutine: { /* in1 */
- VdbeOp *pCaller;
- pIn1 = &aMem[pOp->p1];
- assert( pIn1->flags==MEM_Int );
- assert( pIn1->u.i>=0 && pIn1->u.i<p->nOp );
- pCaller = &aOp[pIn1->u.i];
- assert( pCaller->opcode==OP_Yield );
- assert( pCaller->p2>=0 && pCaller->p2<p->nOp );
- pOp = &aOp[pCaller->p2 - 1];
- pIn1->flags = MEM_Undefined;
- break;
-}
-
-/* Opcode: Yield P1 P2 * * *
-**
-** Swap the program counter with the value in register P1. This
-** has the effect of yielding to a coroutine.
-**
-** If the coroutine that is launched by this instruction ends with
-** Yield or Return then continue to the next instruction. But if
-** the coroutine launched by this instruction ends with
-** EndCoroutine, then jump to P2 rather than continuing with the
-** next instruction.
-**
-** See also: InitCoroutine
-*/
-case OP_Yield: { /* in1, jump */
+case OP_Yield: { /* in1 */
+#if 0 /* local variables moved into u.aa */
int pcDest;
+#endif /* local variables moved into u.aa */
pIn1 = &aMem[pOp->p1];
- assert( VdbeMemDynamic(pIn1)==0 );
+ assert( (pIn1->flags & MEM_Dyn)==0 );
pIn1->flags = MEM_Int;
- pcDest = (int)pIn1->u.i;
- pIn1->u.i = (int)(pOp - aOp);
+ u.aa.pcDest = (int)pIn1->u.i;
+ pIn1->u.i = pc;
REGISTER_TRACE(pOp->p1, pIn1);
- pOp = &aOp[pcDest];
+ pc = u.aa.pcDest;
break;
}
/* Opcode: HaltIfNull P1 P2 P3 P4 P5
-** Synopsis: if r[P3]=null halt
+** Synopsis: if r[P3] null then halt
**
** Check the value in register P3. If it is NULL then Halt using
** parameter P1, P2, and P4 as if this were a Halt instruction. If the
@@ -73817,36 +67390,34 @@ case OP_HaltIfNull: { /* in3 */
** is the same as executing Halt.
*/
case OP_Halt: {
+#if 0 /* local variables moved into u.ab */
const char *zType;
const char *zLogFmt;
- VdbeFrame *pFrame;
- int pcx;
+#endif /* local variables moved into u.ab */
- pcx = (int)(pOp - aOp);
if( pOp->p1==SQLITE_OK && p->pFrame ){
/* Halt the sub-program. Return control to the parent frame. */
- pFrame = p->pFrame;
+ VdbeFrame *pFrame = p->pFrame;
p->pFrame = pFrame->pParent;
p->nFrame--;
sqlite3VdbeSetChanges(db, p->nChange);
- pcx = sqlite3VdbeFrameRestore(pFrame);
+ pc = sqlite3VdbeFrameRestore(pFrame);
lastRowid = db->lastRowid;
if( pOp->p2==OE_Ignore ){
- /* Instruction pcx is the OP_Program that invoked the sub-program
+ /* Instruction pc is the OP_Program that invoked the sub-program
** currently being halted. If the p2 instruction of this OP_Halt
** instruction is set to OE_Ignore, then the sub-program is throwing
** an IGNORE exception. In this case jump to the address specified
** as the p2 of the calling OP_Program. */
- pcx = p->aOp[pcx].p2-1;
+ pc = p->aOp[pc].p2-1;
}
aOp = p->aOp;
aMem = p->aMem;
- pOp = &aOp[pcx];
break;
}
p->rc = pOp->p1;
p->errorAction = (u8)pOp->p2;
- p->pc = pcx;
+ p->pc = pc;
if( p->rc ){
if( pOp->p5 ){
static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK",
@@ -73856,20 +67427,21 @@ case OP_Halt: {
testcase( pOp->p5==2 );
testcase( pOp->p5==3 );
testcase( pOp->p5==4 );
- zType = azType[pOp->p5-1];
+ u.ab.zType = azType[pOp->p5-1];
}else{
- zType = 0;
+ u.ab.zType = 0;
}
- assert( zType!=0 || pOp->p4.z!=0 );
- zLogFmt = "abort at %d in [%s]: %s";
- if( zType && pOp->p4.z ){
- sqlite3VdbeError(p, "%s constraint failed: %s", zType, pOp->p4.z);
+ assert( u.ab.zType!=0 || pOp->p4.z!=0 );
+ u.ab.zLogFmt = "abort at %d in [%s]: %s";
+ if( u.ab.zType && pOp->p4.z ){
+ sqlite3SetString(&p->zErrMsg, db, "%s constraint failed: %s",
+ u.ab.zType, pOp->p4.z);
}else if( pOp->p4.z ){
- sqlite3VdbeError(p, "%s", pOp->p4.z);
+ sqlite3SetString(&p->zErrMsg, db, "%s", pOp->p4.z);
}else{
- sqlite3VdbeError(p, "%s constraint failed", zType);
+ sqlite3SetString(&p->zErrMsg, db, "%s constraint failed", u.ab.zType);
}
- sqlite3_log(pOp->p1, zLogFmt, pcx, p->zSql, p->zErrMsg);
+ sqlite3_log(pOp->p1, u.ab.zLogFmt, pc, p->zSql, p->zErrMsg);
}
rc = sqlite3VdbeHalt(p);
assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR );
@@ -73888,8 +67460,7 @@ case OP_Halt: {
**
** The 32-bit integer value P1 is written into register P2.
*/
-case OP_Integer: { /* out2 */
- pOut = out2Prerelease(p, pOp);
+case OP_Integer: { /* out2-prerelease */
pOut->u.i = pOp->p1;
break;
}
@@ -73900,8 +67471,7 @@ case OP_Integer: { /* out2 */
** P4 is a pointer to a 64-bit integer value.
** Write that value into register P2.
*/
-case OP_Int64: { /* out2 */
- pOut = out2Prerelease(p, pOp);
+case OP_Int64: { /* out2-prerelease */
assert( pOp->p4.pI64!=0 );
pOut->u.i = *pOp->p4.pI64;
break;
@@ -73914,11 +67484,10 @@ case OP_Int64: { /* out2 */
** P4 is a pointer to a 64-bit floating point value.
** Write that value into register P2.
*/
-case OP_Real: { /* same as TK_FLOAT, out2 */
- pOut = out2Prerelease(p, pOp);
+case OP_Real: { /* same as TK_FLOAT, out2-prerelease */
pOut->flags = MEM_Real;
assert( !sqlite3IsNaN(*pOp->p4.pReal) );
- pOut->u.r = *pOp->p4.pReal;
+ pOut->r = *pOp->p4.pReal;
break;
}
#endif
@@ -73927,13 +67496,10 @@ case OP_Real: { /* same as TK_FLOAT, out2 */
** Synopsis: r[P2]='P4'
**
** P4 points to a nul terminated UTF-8 string. This opcode is transformed
-** into a String opcode before it is executed for the first time. During
-** this transformation, the length of string P4 is computed and stored
-** as the P1 parameter.
+** into an OP_String before it is executed for the first time.
*/
-case OP_String8: { /* same as TK_STRING, out2 */
+case OP_String8: { /* same as TK_STRING, out2-prerelease */
assert( pOp->p4.z!=0 );
- pOut = out2Prerelease(p, pOp);
pOp->opcode = OP_String;
pOp->p1 = sqlite3Strlen30(pOp->p4.z);
@@ -73942,10 +67508,11 @@ case OP_String8: { /* same as TK_STRING, out2 */
rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC);
if( rc==SQLITE_TOOBIG ) goto too_big;
if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem;
- assert( pOut->szMalloc>0 && pOut->zMalloc==pOut->z );
- assert( VdbeMemDynamic(pOut)==0 );
- pOut->szMalloc = 0;
+ assert( pOut->zMalloc==pOut->z );
+ assert( pOut->flags & MEM_Dyn );
+ pOut->zMalloc = 0;
pOut->flags |= MEM_Static;
+ pOut->flags &= ~MEM_Dyn;
if( pOp->p4type==P4_DYNAMIC ){
sqlite3DbFree(db, pOp->p4.z);
}
@@ -73960,31 +67527,18 @@ case OP_String8: { /* same as TK_STRING, out2 */
/* Fall through to the next case, OP_String */
}
-/* Opcode: String P1 P2 P3 P4 P5
+/* Opcode: String P1 P2 * P4 *
** Synopsis: r[P2]='P4' (len=P1)
**
** The string value P4 of length P1 (bytes) is stored in register P2.
-**
-** If P5!=0 and the content of register P3 is greater than zero, then
-** the datatype of the register P2 is converted to BLOB. The content is
-** the same sequence of bytes, it is merely interpreted as a BLOB instead
-** of a string, as if it had been CAST.
*/
-case OP_String: { /* out2 */
+case OP_String: { /* out2-prerelease */
assert( pOp->p4.z!=0 );
- pOut = out2Prerelease(p, pOp);
pOut->flags = MEM_Str|MEM_Static|MEM_Term;
pOut->z = pOp->p4.z;
pOut->n = pOp->p1;
pOut->enc = encoding;
UPDATE_MAX_BLOBSIZE(pOut);
- if( pOp->p5 ){
- assert( pOp->p3>0 );
- assert( pOp->p3<=(p->nMem-p->nCursor) );
- pIn3 = &aMem[pOp->p3];
- assert( pIn3->flags & MEM_Int );
- if( pIn3->u.i ) pOut->flags = MEM_Blob|MEM_Static|MEM_Term;
- }
break;
}
@@ -74000,47 +67554,33 @@ case OP_String: { /* out2 */
** NULL values will not compare equal even if SQLITE_NULLEQ is set on
** OP_Ne or OP_Eq.
*/
-case OP_Null: { /* out2 */
+case OP_Null: { /* out2-prerelease */
+#if 0 /* local variables moved into u.ac */
int cnt;
u16 nullFlag;
- pOut = out2Prerelease(p, pOp);
- cnt = pOp->p3-pOp->p2;
+#endif /* local variables moved into u.ac */
+ u.ac.cnt = pOp->p3-pOp->p2;
assert( pOp->p3<=(p->nMem-p->nCursor) );
- pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null;
- while( cnt>0 ){
+ pOut->flags = u.ac.nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null;
+ while( u.ac.cnt>0 ){
pOut++;
memAboutToChange(p, pOut);
- sqlite3VdbeMemSetNull(pOut);
- pOut->flags = nullFlag;
- cnt--;
+ VdbeMemRelease(pOut);
+ pOut->flags = u.ac.nullFlag;
+ u.ac.cnt--;
}
break;
}
-/* Opcode: SoftNull P1 * * * *
-** Synopsis: r[P1]=NULL
-**
-** Set register P1 to have the value NULL as seen by the OP_MakeRecord
-** instruction, but do not free any string or blob memory associated with
-** the register, so that if the value was a string or blob that was
-** previously copied using OP_SCopy, the copies will continue to be valid.
-*/
-case OP_SoftNull: {
- assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) );
- pOut = &aMem[pOp->p1];
- pOut->flags = (pOut->flags|MEM_Null)&~MEM_Undefined;
- break;
-}
-/* Opcode: Blob P1 P2 * P4 *
+/* Opcode: Blob P1 P2 * P4
** Synopsis: r[P2]=P4 (len=P1)
**
** P4 points to a blob of data P1 bytes long. Store this
** blob in register P2.
*/
-case OP_Blob: { /* out2 */
+case OP_Blob: { /* out2-prerelease */
assert( pOp->p1 <= SQLITE_MAX_LENGTH );
- pOut = out2Prerelease(p, pOp);
sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0);
pOut->enc = encoding;
UPDATE_MAX_BLOBSIZE(pOut);
@@ -74052,20 +67592,21 @@ case OP_Blob: { /* out2 */
**
** Transfer the values of bound parameter P1 into register P2
**
-** If the parameter is named, then its name appears in P4.
+** If the parameter is named, then its name appears in P4 and P3==1.
** The P4 value is used by sqlite3_bind_parameter_name().
*/
-case OP_Variable: { /* out2 */
+case OP_Variable: { /* out2-prerelease */
+#if 0 /* local variables moved into u.ad */
Mem *pVar; /* Value being transferred */
+#endif /* local variables moved into u.ad */
assert( pOp->p1>0 && pOp->p1<=p->nVar );
assert( pOp->p4.z==0 || pOp->p4.z==p->azVar[pOp->p1-1] );
- pVar = &p->aVar[pOp->p1 - 1];
- if( sqlite3VdbeMemTooBig(pVar) ){
+ u.ad.pVar = &p->aVar[pOp->p1 - 1];
+ if( sqlite3VdbeMemTooBig(u.ad.pVar) ){
goto too_big;
}
- pOut = out2Prerelease(p, pOp);
- sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static);
+ sqlite3VdbeMemShallowCopy(pOut, u.ad.pVar, MEM_Static);
UPDATE_MAX_BLOBSIZE(pOut);
break;
}
@@ -74073,46 +67614,50 @@ case OP_Variable: { /* out2 */
/* Opcode: Move P1 P2 P3 * *
** Synopsis: r[P2@P3]=r[P1@P3]
**
-** Move the P3 values in register P1..P1+P3-1 over into
-** registers P2..P2+P3-1. Registers P1..P1+P3-1 are
+** Move the values in register P1..P1+P3 over into
+** registers P2..P2+P3. Registers P1..P1+P3 are
** left holding a NULL. It is an error for register ranges
-** P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error
-** for P3 to be less than 1.
+** P1..P1+P3 and P2..P2+P3 to overlap.
*/
case OP_Move: {
+#if 0 /* local variables moved into u.ae */
+ char *zMalloc; /* Holding variable for allocated memory */
int n; /* Number of registers left to copy */
int p1; /* Register to copy from */
int p2; /* Register to copy to */
+#endif /* local variables moved into u.ae */
- n = pOp->p3;
- p1 = pOp->p1;
- p2 = pOp->p2;
- assert( n>0 && p1>0 && p2>0 );
- assert( p1+n<=p2 || p2+n<=p1 );
+ u.ae.n = pOp->p3;
+ u.ae.p1 = pOp->p1;
+ u.ae.p2 = pOp->p2;
+ assert( u.ae.n>=0 && u.ae.p1>0 && u.ae.p2>0 );
+ assert( u.ae.p1+u.ae.n<=u.ae.p2 || u.ae.p2+u.ae.n<=u.ae.p1 );
- pIn1 = &aMem[p1];
- pOut = &aMem[p2];
+ pIn1 = &aMem[u.ae.p1];
+ pOut = &aMem[u.ae.p2];
do{
assert( pOut<=&aMem[(p->nMem-p->nCursor)] );
assert( pIn1<=&aMem[(p->nMem-p->nCursor)] );
assert( memIsValid(pIn1) );
memAboutToChange(p, pOut);
+ u.ae.zMalloc = pOut->zMalloc;
+ pOut->zMalloc = 0;
sqlite3VdbeMemMove(pOut, pIn1);
#ifdef SQLITE_DEBUG
- if( pOut->pScopyFrom>=&aMem[p1] && pOut->pScopyFrom<pOut ){
- pOut->pScopyFrom += pOp->p2 - p1;
+ if( pOut->pScopyFrom>=&aMem[u.ae.p1] && pOut->pScopyFrom<&aMem[u.ae.p1+pOp->p3] ){
+ pOut->pScopyFrom += u.ae.p1 - pOp->p2;
}
#endif
- Deephemeralize(pOut);
- REGISTER_TRACE(p2++, pOut);
+ pIn1->zMalloc = u.ae.zMalloc;
+ REGISTER_TRACE(u.ae.p2++, pOut);
pIn1++;
pOut++;
- }while( --n );
+ }while( u.ae.n-- );
break;
}
/* Opcode: Copy P1 P2 P3 * *
-** Synopsis: r[P2@P3+1]=r[P1@P3+1]
+** Synopsis: r[P2@P3]=r[P1@P3]
**
** Make a copy of registers P1..P1+P3 into registers P2..P2+P3.
**
@@ -74120,9 +67665,11 @@ case OP_Move: {
** is made of any string or blob constant. See also OP_SCopy.
*/
case OP_Copy: {
+#if 0 /* local variables moved into u.af */
int n;
+#endif /* local variables moved into u.af */
- n = pOp->p3;
+ u.af.n = pOp->p3;
pIn1 = &aMem[pOp->p1];
pOut = &aMem[pOp->p2];
assert( pOut!=pIn1 );
@@ -74132,8 +67679,8 @@ case OP_Copy: {
#ifdef SQLITE_DEBUG
pOut->pScopyFrom = 0;
#endif
- REGISTER_TRACE(pOp->p2+pOp->p3-n, pOut);
- if( (n--)==0 ) break;
+ REGISTER_TRACE(pOp->p2+pOp->p3-u.af.n, pOut);
+ if( (u.af.n--)==0 ) break;
pOut++;
pIn1++;
}
@@ -74170,12 +67717,14 @@ case OP_SCopy: { /* out2 */
** The registers P1 through P1+P2-1 contain a single row of
** results. This opcode causes the sqlite3_step() call to terminate
** with an SQLITE_ROW return code and it sets up the sqlite3_stmt
-** structure to provide access to the r(P1)..r(P1+P2-1) values as
-** the result row.
+** structure to provide access to the top P1 values as the result
+** row.
*/
case OP_ResultRow: {
+#if 0 /* local variables moved into u.ag */
Mem *pMem;
int i;
+#endif /* local variables moved into u.ag */
assert( p->nResColumn==pOp->p2 );
assert( pOp->p1>0 );
assert( pOp->p1+pOp->p2<=(p->nMem-p->nCursor)+1 );
@@ -74201,8 +67750,8 @@ case OP_ResultRow: {
break;
}
- /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then
- ** DML statements invoke this opcode to return the number of rows
+ /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then
+ ** DML statements invoke this opcode to return the number of rows
** modified to the user. This is the only way that a VM that
** opens a statement transaction may invoke this opcode.
**
@@ -74229,20 +67778,21 @@ case OP_ResultRow: {
** and have an assigned type. The results are de-ephemeralized as
** a side effect.
*/
- pMem = p->pResultSet = &aMem[pOp->p1];
- for(i=0; i<pOp->p2; i++){
- assert( memIsValid(&pMem[i]) );
- Deephemeralize(&pMem[i]);
- assert( (pMem[i].flags & MEM_Ephem)==0
- || (pMem[i].flags & (MEM_Str|MEM_Blob))==0 );
- sqlite3VdbeMemNulTerminate(&pMem[i]);
- REGISTER_TRACE(pOp->p1+i, &pMem[i]);
+ u.ag.pMem = p->pResultSet = &aMem[pOp->p1];
+ for(u.ag.i=0; u.ag.i<pOp->p2; u.ag.i++){
+ assert( memIsValid(&u.ag.pMem[u.ag.i]) );
+ Deephemeralize(&u.ag.pMem[u.ag.i]);
+ assert( (u.ag.pMem[u.ag.i].flags & MEM_Ephem)==0
+ || (u.ag.pMem[u.ag.i].flags & (MEM_Str|MEM_Blob))==0 );
+ sqlite3VdbeMemNulTerminate(&u.ag.pMem[u.ag.i]);
+ sqlite3VdbeMemStoreType(&u.ag.pMem[u.ag.i]);
+ REGISTER_TRACE(pOp->p1+u.ag.i, &u.ag.pMem[u.ag.i]);
}
if( db->mallocFailed ) goto no_mem;
/* Return SQLITE_ROW
*/
- p->pc = (int)(pOp - aOp) + 1;
+ p->pc = pc + 1;
rc = SQLITE_ROW;
goto vdbe_return;
}
@@ -74261,7 +67811,9 @@ case OP_ResultRow: {
** to avoid a memcpy().
*/
case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
+#if 0 /* local variables moved into u.ah */
i64 nByte;
+#endif /* local variables moved into u.ah */
pIn1 = &aMem[pOp->p1];
pIn2 = &aMem[pOp->p2];
@@ -74274,22 +67826,22 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
if( ExpandBlob(pIn1) || ExpandBlob(pIn2) ) goto no_mem;
Stringify(pIn1, encoding);
Stringify(pIn2, encoding);
- nByte = pIn1->n + pIn2->n;
- if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ u.ah.nByte = pIn1->n + pIn2->n;
+ if( u.ah.nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){
goto too_big;
}
- if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){
+ MemSetTypeFlag(pOut, MEM_Str);
+ if( sqlite3VdbeMemGrow(pOut, (int)u.ah.nByte+2, pOut==pIn2) ){
goto no_mem;
}
- MemSetTypeFlag(pOut, MEM_Str);
if( pOut!=pIn2 ){
memcpy(pOut->z, pIn2->z, pIn2->n);
}
memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n);
- pOut->z[nByte]=0;
- pOut->z[nByte+1] = 0;
+ pOut->z[u.ah.nByte]=0;
+ pOut->z[u.ah.nByte+1] = 0;
pOut->flags |= MEM_Term;
- pOut->n = (int)nByte;
+ pOut->n = (int)u.ah.nByte;
pOut->enc = encoding;
UPDATE_MAX_BLOBSIZE(pOut);
break;
@@ -74338,79 +67890,79 @@ case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */
case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */
case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */
case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */
+#if 0 /* local variables moved into u.ai */
char bIntint; /* Started out as two integer operands */
- u16 flags; /* Combined MEM_* flags from both inputs */
- u16 type1; /* Numeric type of left operand */
- u16 type2; /* Numeric type of right operand */
+ int flags; /* Combined MEM_* flags from both inputs */
i64 iA; /* Integer value of left operand */
i64 iB; /* Integer value of right operand */
double rA; /* Real value of left operand */
double rB; /* Real value of right operand */
+#endif /* local variables moved into u.ai */
pIn1 = &aMem[pOp->p1];
- type1 = numericType(pIn1);
+ applyNumericAffinity(pIn1);
pIn2 = &aMem[pOp->p2];
- type2 = numericType(pIn2);
+ applyNumericAffinity(pIn2);
pOut = &aMem[pOp->p3];
- flags = pIn1->flags | pIn2->flags;
- if( (flags & MEM_Null)!=0 ) goto arithmetic_result_is_null;
- if( (type1 & type2 & MEM_Int)!=0 ){
- iA = pIn1->u.i;
- iB = pIn2->u.i;
- bIntint = 1;
+ u.ai.flags = pIn1->flags | pIn2->flags;
+ if( (u.ai.flags & MEM_Null)!=0 ) goto arithmetic_result_is_null;
+ if( (pIn1->flags & pIn2->flags & MEM_Int)==MEM_Int ){
+ u.ai.iA = pIn1->u.i;
+ u.ai.iB = pIn2->u.i;
+ u.ai.bIntint = 1;
switch( pOp->opcode ){
- case OP_Add: if( sqlite3AddInt64(&iB,iA) ) goto fp_math; break;
- case OP_Subtract: if( sqlite3SubInt64(&iB,iA) ) goto fp_math; break;
- case OP_Multiply: if( sqlite3MulInt64(&iB,iA) ) goto fp_math; break;
+ case OP_Add: if( sqlite3AddInt64(&u.ai.iB,u.ai.iA) ) goto fp_math; break;
+ case OP_Subtract: if( sqlite3SubInt64(&u.ai.iB,u.ai.iA) ) goto fp_math; break;
+ case OP_Multiply: if( sqlite3MulInt64(&u.ai.iB,u.ai.iA) ) goto fp_math; break;
case OP_Divide: {
- if( iA==0 ) goto arithmetic_result_is_null;
- if( iA==-1 && iB==SMALLEST_INT64 ) goto fp_math;
- iB /= iA;
+ if( u.ai.iA==0 ) goto arithmetic_result_is_null;
+ if( u.ai.iA==-1 && u.ai.iB==SMALLEST_INT64 ) goto fp_math;
+ u.ai.iB /= u.ai.iA;
break;
}
default: {
- if( iA==0 ) goto arithmetic_result_is_null;
- if( iA==-1 ) iA = 1;
- iB %= iA;
+ if( u.ai.iA==0 ) goto arithmetic_result_is_null;
+ if( u.ai.iA==-1 ) u.ai.iA = 1;
+ u.ai.iB %= u.ai.iA;
break;
}
}
- pOut->u.i = iB;
+ pOut->u.i = u.ai.iB;
MemSetTypeFlag(pOut, MEM_Int);
}else{
- bIntint = 0;
+ u.ai.bIntint = 0;
fp_math:
- rA = sqlite3VdbeRealValue(pIn1);
- rB = sqlite3VdbeRealValue(pIn2);
+ u.ai.rA = sqlite3VdbeRealValue(pIn1);
+ u.ai.rB = sqlite3VdbeRealValue(pIn2);
switch( pOp->opcode ){
- case OP_Add: rB += rA; break;
- case OP_Subtract: rB -= rA; break;
- case OP_Multiply: rB *= rA; break;
+ case OP_Add: u.ai.rB += u.ai.rA; break;
+ case OP_Subtract: u.ai.rB -= u.ai.rA; break;
+ case OP_Multiply: u.ai.rB *= u.ai.rA; break;
case OP_Divide: {
/* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
- if( rA==(double)0 ) goto arithmetic_result_is_null;
- rB /= rA;
+ if( u.ai.rA==(double)0 ) goto arithmetic_result_is_null;
+ u.ai.rB /= u.ai.rA;
break;
}
default: {
- iA = (i64)rA;
- iB = (i64)rB;
- if( iA==0 ) goto arithmetic_result_is_null;
- if( iA==-1 ) iA = 1;
- rB = (double)(iB % iA);
+ u.ai.iA = (i64)u.ai.rA;
+ u.ai.iB = (i64)u.ai.rB;
+ if( u.ai.iA==0 ) goto arithmetic_result_is_null;
+ if( u.ai.iA==-1 ) u.ai.iA = 1;
+ u.ai.rB = (double)(u.ai.iB % u.ai.iA);
break;
}
}
#ifdef SQLITE_OMIT_FLOATING_POINT
- pOut->u.i = rB;
+ pOut->u.i = u.ai.rB;
MemSetTypeFlag(pOut, MEM_Int);
#else
- if( sqlite3IsNaN(rB) ){
+ if( sqlite3IsNaN(u.ai.rB) ){
goto arithmetic_result_is_null;
}
- pOut->u.r = rB;
+ pOut->r = u.ai.rB;
MemSetTypeFlag(pOut, MEM_Real);
- if( ((type1|type2)&MEM_Real)==0 && !bIntint ){
+ if( (u.ai.flags & MEM_Real)==0 && !u.ai.bIntint ){
sqlite3VdbeIntegerAffinity(pOut);
}
#endif
@@ -74435,7 +67987,7 @@ arithmetic_result_is_null:
**
** The interface used by the implementation of the aforementioned functions
** to retrieve the collation sequence set by this opcode is not available
-** publicly. Only built-in functions have access to this feature.
+** publicly, only to user functions defined in func.c.
*/
case OP_CollSeq: {
assert( pOp->p4type==P4_COLLSEQ );
@@ -74445,10 +67997,10 @@ case OP_CollSeq: {
break;
}
-/* Opcode: Function0 P1 P2 P3 P4 P5
+/* Opcode: Function P1 P2 P3 P4 P5
** Synopsis: r[P3]=func(r[P2@P5])
**
-** Invoke a user function (P4 is a pointer to a FuncDef object that
+** Invoke a user function (P4 is a pointer to a Function structure that
** defines the function) with P5 arguments taken from register P2 and
** successors. The result of the function is stored in register P3.
** Register P3 must not be one of the function inputs.
@@ -74460,100 +68012,98 @@ case OP_CollSeq: {
** sqlite3_set_auxdata() API may be safely retained until the next
** invocation of this opcode.
**
-** See also: Function, AggStep, AggFinal
-*/
-/* Opcode: Function P1 P2 P3 P4 P5
-** Synopsis: r[P3]=func(r[P2@P5])
-**
-** Invoke a user function (P4 is a pointer to an sqlite3_context object that
-** contains a pointer to the function to be run) with P5 arguments taken
-** from register P2 and successors. The result of the function is stored
-** in register P3. Register P3 must not be one of the function inputs.
-**
-** P1 is a 32-bit bitmask indicating whether or not each argument to the
-** function was determined to be constant at compile time. If the first
-** argument was constant then bit 0 of P1 is set. This is used to determine
-** whether meta data associated with a user function argument using the
-** sqlite3_set_auxdata() API may be safely retained until the next
-** invocation of this opcode.
-**
-** SQL functions are initially coded as OP_Function0 with P4 pointing
-** to a FuncDef object. But on first evaluation, the P4 operand is
-** automatically converted into an sqlite3_context object and the operation
-** changed to this OP_Function opcode. In this way, the initialization of
-** the sqlite3_context object occurs only once, rather than once for each
-** evaluation of the function.
-**
-** See also: Function0, AggStep, AggFinal
+** See also: AggStep and AggFinal
*/
-case OP_Function0: {
- int n;
- sqlite3_context *pCtx;
-
- assert( pOp->p4type==P4_FUNCDEF );
- n = pOp->p5;
- assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
- assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem-p->nCursor)+1) );
- assert( pOp->p3<pOp->p2 || pOp->p3>=pOp->p2+n );
- pCtx = sqlite3DbMallocRaw(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*));
- if( pCtx==0 ) goto no_mem;
- pCtx->pOut = 0;
- pCtx->pFunc = pOp->p4.pFunc;
- pCtx->iOp = (int)(pOp - aOp);
- pCtx->pVdbe = p;
- pCtx->argc = n;
- pOp->p4type = P4_FUNCCTX;
- pOp->p4.pCtx = pCtx;
- pOp->opcode = OP_Function;
- /* Fall through into OP_Function */
-}
case OP_Function: {
+#if 0 /* local variables moved into u.aj */
int i;
- sqlite3_context *pCtx;
-
- assert( pOp->p4type==P4_FUNCCTX );
- pCtx = pOp->p4.pCtx;
+ Mem *pArg;
+ sqlite3_context ctx;
+ sqlite3_value **apVal;
+ int n;
+#endif /* local variables moved into u.aj */
- /* If this function is inside of a trigger, the register array in aMem[]
- ** might change from one evaluation to the next. The next block of code
- ** checks to see if the register array has changed, and if so it
- ** reinitializes the relavant parts of the sqlite3_context object */
+ u.aj.n = pOp->p5;
+ u.aj.apVal = p->apArg;
+ assert( u.aj.apVal || u.aj.n==0 );
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
pOut = &aMem[pOp->p3];
- if( pCtx->pOut != pOut ){
- pCtx->pOut = pOut;
- for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
+ memAboutToChange(p, pOut);
+
+ assert( u.aj.n==0 || (pOp->p2>0 && pOp->p2+u.aj.n<=(p->nMem-p->nCursor)+1) );
+ assert( pOp->p3<pOp->p2 || pOp->p3>=pOp->p2+u.aj.n );
+ u.aj.pArg = &aMem[pOp->p2];
+ for(u.aj.i=0; u.aj.i<u.aj.n; u.aj.i++, u.aj.pArg++){
+ assert( memIsValid(u.aj.pArg) );
+ u.aj.apVal[u.aj.i] = u.aj.pArg;
+ Deephemeralize(u.aj.pArg);
+ sqlite3VdbeMemStoreType(u.aj.pArg);
+ REGISTER_TRACE(pOp->p2+u.aj.i, u.aj.pArg);
}
- memAboutToChange(p, pCtx->pOut);
-#ifdef SQLITE_DEBUG
- for(i=0; i<pCtx->argc; i++){
- assert( memIsValid(pCtx->argv[i]) );
- REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]);
+ assert( pOp->p4type==P4_FUNCDEF );
+ u.aj.ctx.pFunc = pOp->p4.pFunc;
+ u.aj.ctx.iOp = pc;
+ u.aj.ctx.pVdbe = p;
+
+ /* The output cell may already have a buffer allocated. Move
+ ** the pointer to u.aj.ctx.s so in case the user-function can use
+ ** the already allocated buffer instead of allocating a new one.
+ */
+ memcpy(&u.aj.ctx.s, pOut, sizeof(Mem));
+ pOut->flags = MEM_Null;
+ pOut->xDel = 0;
+ pOut->zMalloc = 0;
+ MemSetTypeFlag(&u.aj.ctx.s, MEM_Null);
+
+ u.aj.ctx.fErrorOrAux = 0;
+ if( u.aj.ctx.pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ assert( pOp>aOp );
+ assert( pOp[-1].p4type==P4_COLLSEQ );
+ assert( pOp[-1].opcode==OP_CollSeq );
+ u.aj.ctx.pColl = pOp[-1].p4.pColl;
}
-#endif
- MemSetTypeFlag(pCtx->pOut, MEM_Null);
- pCtx->fErrorOrAux = 0;
db->lastRowid = lastRowid;
- (*pCtx->pFunc->xFunc)(pCtx, pCtx->argc, pCtx->argv); /* IMP: R-24505-23230 */
- lastRowid = db->lastRowid; /* Remember rowid changes made by xFunc */
+ (*u.aj.ctx.pFunc->xFunc)(&u.aj.ctx, u.aj.n, u.aj.apVal); /* IMP: R-24505-23230 */
+ lastRowid = db->lastRowid;
+
+ if( db->mallocFailed ){
+ /* Even though a malloc() has failed, the implementation of the
+ ** user function may have called an sqlite3_result_XXX() function
+ ** to return a value. The following call releases any resources
+ ** associated with such a value.
+ */
+ sqlite3VdbeMemRelease(&u.aj.ctx.s);
+ goto no_mem;
+ }
/* If the function returned an error, throw an exception */
- if( pCtx->fErrorOrAux ){
- if( pCtx->isError ){
- sqlite3VdbeError(p, "%s", sqlite3_value_text(pCtx->pOut));
- rc = pCtx->isError;
+ if( u.aj.ctx.fErrorOrAux ){
+ if( u.aj.ctx.isError ){
+ sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&u.aj.ctx.s));
+ rc = u.aj.ctx.isError;
}
- sqlite3VdbeDeleteAuxData(p, pCtx->iOp, pOp->p1);
+ sqlite3VdbeDeleteAuxData(p, pc, pOp->p1);
}
/* Copy the result of the function into register P3 */
- if( pOut->flags & (MEM_Str|MEM_Blob) ){
- sqlite3VdbeChangeEncoding(pCtx->pOut, encoding);
- if( sqlite3VdbeMemTooBig(pCtx->pOut) ) goto too_big;
+ sqlite3VdbeChangeEncoding(&u.aj.ctx.s, encoding);
+ assert( pOut->flags==MEM_Null );
+ memcpy(pOut, &u.aj.ctx.s, sizeof(Mem));
+ if( sqlite3VdbeMemTooBig(pOut) ){
+ goto too_big;
}
- REGISTER_TRACE(pOp->p3, pCtx->pOut);
- UPDATE_MAX_BLOBSIZE(pCtx->pOut);
+#if 0
+ /* The app-defined function has done something that as caused this
+ ** statement to expire. (Perhaps the function called sqlite3_exec()
+ ** with a CREATE TABLE statement.)
+ */
+ if( p->expired ) rc = SQLITE_ABORT;
+#endif
+
+ REGISTER_TRACE(pOp->p3, pOut);
+ UPDATE_MAX_BLOBSIZE(pOut);
break;
}
@@ -74591,10 +68141,12 @@ case OP_BitAnd: /* same as TK_BITAND, in1, in2, out3 */
case OP_BitOr: /* same as TK_BITOR, in1, in2, out3 */
case OP_ShiftLeft: /* same as TK_LSHIFT, in1, in2, out3 */
case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */
+#if 0 /* local variables moved into u.ak */
i64 iA;
u64 uA;
i64 iB;
u8 op;
+#endif /* local variables moved into u.ak */
pIn1 = &aMem[pOp->p1];
pIn2 = &aMem[pOp->p2];
@@ -74603,38 +68155,38 @@ case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */
sqlite3VdbeMemSetNull(pOut);
break;
}
- iA = sqlite3VdbeIntValue(pIn2);
- iB = sqlite3VdbeIntValue(pIn1);
- op = pOp->opcode;
- if( op==OP_BitAnd ){
- iA &= iB;
- }else if( op==OP_BitOr ){
- iA |= iB;
- }else if( iB!=0 ){
- assert( op==OP_ShiftRight || op==OP_ShiftLeft );
+ u.ak.iA = sqlite3VdbeIntValue(pIn2);
+ u.ak.iB = sqlite3VdbeIntValue(pIn1);
+ u.ak.op = pOp->opcode;
+ if( u.ak.op==OP_BitAnd ){
+ u.ak.iA &= u.ak.iB;
+ }else if( u.ak.op==OP_BitOr ){
+ u.ak.iA |= u.ak.iB;
+ }else if( u.ak.iB!=0 ){
+ assert( u.ak.op==OP_ShiftRight || u.ak.op==OP_ShiftLeft );
/* If shifting by a negative amount, shift in the other direction */
- if( iB<0 ){
+ if( u.ak.iB<0 ){
assert( OP_ShiftRight==OP_ShiftLeft+1 );
- op = 2*OP_ShiftLeft + 1 - op;
- iB = iB>(-64) ? -iB : 64;
+ u.ak.op = 2*OP_ShiftLeft + 1 - u.ak.op;
+ u.ak.iB = u.ak.iB>(-64) ? -u.ak.iB : 64;
}
- if( iB>=64 ){
- iA = (iA>=0 || op==OP_ShiftLeft) ? 0 : -1;
+ if( u.ak.iB>=64 ){
+ u.ak.iA = (u.ak.iA>=0 || u.ak.op==OP_ShiftLeft) ? 0 : -1;
}else{
- memcpy(&uA, &iA, sizeof(uA));
- if( op==OP_ShiftLeft ){
- uA <<= iB;
+ memcpy(&u.ak.uA, &u.ak.iA, sizeof(u.ak.uA));
+ if( u.ak.op==OP_ShiftLeft ){
+ u.ak.uA <<= u.ak.iB;
}else{
- uA >>= iB;
+ u.ak.uA >>= u.ak.iB;
/* Sign-extend on a right shift of a negative number */
- if( iA<0 ) uA |= ((((u64)0xffffffff)<<32)|0xffffffff) << (64-iB);
+ if( u.ak.iA<0 ) u.ak.uA |= ((((u64)0xffffffff)<<32)|0xffffffff) << (64-u.ak.iB);
}
- memcpy(&iA, &uA, sizeof(iA));
+ memcpy(&u.ak.iA, &u.ak.uA, sizeof(u.ak.iA));
}
}
- pOut->u.i = iA;
+ pOut->u.i = u.ak.iA;
MemSetTypeFlag(pOut, MEM_Int);
break;
}
@@ -74666,13 +68218,13 @@ case OP_MustBeInt: { /* jump, in1 */
pIn1 = &aMem[pOp->p1];
if( (pIn1->flags & MEM_Int)==0 ){
applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding);
- VdbeBranchTaken((pIn1->flags&MEM_Int)==0, 2);
if( (pIn1->flags & MEM_Int)==0 ){
if( pOp->p2==0 ){
rc = SQLITE_MISMATCH;
goto abort_due_to_error;
}else{
- goto jump_to_p2;
+ pc = pOp->p2 - 1;
+ break;
}
}
}
@@ -74700,37 +68252,106 @@ case OP_RealAffinity: { /* in1 */
#endif
#ifndef SQLITE_OMIT_CAST
-/* Opcode: Cast P1 P2 * * *
-** Synopsis: affinity(r[P1])
+/* Opcode: ToText P1 * * * *
**
-** Force the value in register P1 to be the type defined by P2.
-**
-** <ul>
-** <li value="97"> TEXT
-** <li value="98"> BLOB
-** <li value="99"> NUMERIC
-** <li value="100"> INTEGER
-** <li value="101"> REAL
-** </ul>
+** Force the value in register P1 to be text.
+** If the value is numeric, convert it to a string using the
+** equivalent of printf(). Blob values are unchanged and
+** are afterwards simply interpreted as text.
**
** A NULL value is not changed by this routine. It remains NULL.
*/
-case OP_Cast: { /* in1 */
- assert( pOp->p2>=SQLITE_AFF_BLOB && pOp->p2<=SQLITE_AFF_REAL );
- testcase( pOp->p2==SQLITE_AFF_TEXT );
- testcase( pOp->p2==SQLITE_AFF_BLOB );
- testcase( pOp->p2==SQLITE_AFF_NUMERIC );
- testcase( pOp->p2==SQLITE_AFF_INTEGER );
- testcase( pOp->p2==SQLITE_AFF_REAL );
+case OP_ToText: { /* same as TK_TO_TEXT, in1 */
pIn1 = &aMem[pOp->p1];
memAboutToChange(p, pIn1);
+ if( pIn1->flags & MEM_Null ) break;
+ assert( MEM_Str==(MEM_Blob>>3) );
+ pIn1->flags |= (pIn1->flags&MEM_Blob)>>3;
+ applyAffinity(pIn1, SQLITE_AFF_TEXT, encoding);
rc = ExpandBlob(pIn1);
- sqlite3VdbeMemCast(pIn1, pOp->p2, encoding);
+ assert( pIn1->flags & MEM_Str || db->mallocFailed );
+ pIn1->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero);
+ UPDATE_MAX_BLOBSIZE(pIn1);
+ break;
+}
+
+/* Opcode: ToBlob P1 * * * *
+**
+** Force the value in register P1 to be a BLOB.
+** If the value is numeric, convert it to a string first.
+** Strings are simply reinterpreted as blobs with no change
+** to the underlying data.
+**
+** A NULL value is not changed by this routine. It remains NULL.
+*/
+case OP_ToBlob: { /* same as TK_TO_BLOB, in1 */
+ pIn1 = &aMem[pOp->p1];
+ if( pIn1->flags & MEM_Null ) break;
+ if( (pIn1->flags & MEM_Blob)==0 ){
+ applyAffinity(pIn1, SQLITE_AFF_TEXT, encoding);
+ assert( pIn1->flags & MEM_Str || db->mallocFailed );
+ MemSetTypeFlag(pIn1, MEM_Blob);
+ }else{
+ pIn1->flags &= ~(MEM_TypeMask&~MEM_Blob);
+ }
UPDATE_MAX_BLOBSIZE(pIn1);
break;
}
+
+/* Opcode: ToNumeric P1 * * * *
+**
+** Force the value in register P1 to be numeric (either an
+** integer or a floating-point number.)
+** If the value is text or blob, try to convert it to an using the
+** equivalent of atoi() or atof() and store 0 if no such conversion
+** is possible.
+**
+** A NULL value is not changed by this routine. It remains NULL.
+*/
+case OP_ToNumeric: { /* same as TK_TO_NUMERIC, in1 */
+ pIn1 = &aMem[pOp->p1];
+ sqlite3VdbeMemNumerify(pIn1);
+ break;
+}
#endif /* SQLITE_OMIT_CAST */
+/* Opcode: ToInt P1 * * * *
+**
+** Force the value in register P1 to be an integer. If
+** The value is currently a real number, drop its fractional part.
+** If the value is text or blob, try to convert it to an integer using the
+** equivalent of atoi() and store 0 if no such conversion is possible.
+**
+** A NULL value is not changed by this routine. It remains NULL.
+*/
+case OP_ToInt: { /* same as TK_TO_INT, in1 */
+ pIn1 = &aMem[pOp->p1];
+ if( (pIn1->flags & MEM_Null)==0 ){
+ sqlite3VdbeMemIntegerify(pIn1);
+ }
+ break;
+}
+
+#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_FLOATING_POINT)
+/* Opcode: ToReal P1 * * * *
+**
+** Force the value in register P1 to be a floating point number.
+** If The value is currently an integer, convert it.
+** If the value is text or blob, try to convert it to an integer using the
+** equivalent of atoi() and store 0.0 if no such conversion is possible.
+**
+** A NULL value is not changed by this routine. It remains NULL.
+*/
+case OP_ToReal: { /* same as TK_TO_REAL, in1 */
+ pIn1 = &aMem[pOp->p1];
+ memAboutToChange(p, pIn1);
+ if( (pIn1->flags & MEM_Null)==0 ){
+ sqlite3VdbeMemRealify(pIn1);
+ }
+ break;
+}
+#endif /* !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_FLOATING_POINT) */
+
/* Opcode: Lt P1 P2 P3 P4 P5
** Synopsis: if r[P1]<r[P3] goto P2
**
@@ -74819,16 +68440,18 @@ case OP_Lt: /* same as TK_LT, jump, in1, in3 */
case OP_Le: /* same as TK_LE, jump, in1, in3 */
case OP_Gt: /* same as TK_GT, jump, in1, in3 */
case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
+#if 0 /* local variables moved into u.al */
int res; /* Result of the comparison of pIn1 against pIn3 */
char affinity; /* Affinity to use for comparison */
u16 flags1; /* Copy of initial value of pIn1->flags */
u16 flags3; /* Copy of initial value of pIn3->flags */
+#endif /* local variables moved into u.al */
pIn1 = &aMem[pOp->p1];
pIn3 = &aMem[pOp->p3];
- flags1 = pIn1->flags;
- flags3 = pIn3->flags;
- if( (flags1 | flags3)&MEM_Null ){
+ u.al.flags1 = pIn1->flags;
+ u.al.flags3 = pIn3->flags;
+ if( (u.al.flags1 | u.al.flags3)&MEM_Null ){
/* One or both operands are NULL */
if( pOp->p5 & SQLITE_NULLEQ ){
/* If SQLITE_NULLEQ is set (which will only happen if the operator is
@@ -74836,98 +68459,65 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
** or not both operands are null.
*/
assert( pOp->opcode==OP_Eq || pOp->opcode==OP_Ne );
- assert( (flags1 & MEM_Cleared)==0 );
- assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 );
- if( (flags1&MEM_Null)!=0
- && (flags3&MEM_Null)!=0
- && (flags3&MEM_Cleared)==0
+ assert( (u.al.flags1 & MEM_Cleared)==0 );
+ if( (u.al.flags1&MEM_Null)!=0
+ && (u.al.flags3&MEM_Null)!=0
+ && (u.al.flags3&MEM_Cleared)==0
){
- res = 0; /* Results are equal */
+ u.al.res = 0; /* Results are equal */
}else{
- res = 1; /* Results are not equal */
+ u.al.res = 1; /* Results are not equal */
}
}else{
/* SQLITE_NULLEQ is clear and at least one operand is NULL,
** then the result is always NULL.
** The jump is taken if the SQLITE_JUMPIFNULL bit is set.
*/
- if( pOp->p5 & SQLITE_STOREP2 ){
+ if( pOp->p5 & SQLITE_JUMPIFNULL ){
+ pc = pOp->p2-1;
+ }else if( pOp->p5 & SQLITE_STOREP2 ){
pOut = &aMem[pOp->p2];
MemSetTypeFlag(pOut, MEM_Null);
REGISTER_TRACE(pOp->p2, pOut);
- }else{
- VdbeBranchTaken(2,3);
- if( pOp->p5 & SQLITE_JUMPIFNULL ){
- goto jump_to_p2;
- }
}
break;
}
}else{
/* Neither operand is NULL. Do a comparison. */
- affinity = pOp->p5 & SQLITE_AFF_MASK;
- if( affinity>=SQLITE_AFF_NUMERIC ){
- if( (pIn1->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
- applyNumericAffinity(pIn1,0);
- }
- if( (pIn3->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
- applyNumericAffinity(pIn3,0);
- }
- }else if( affinity==SQLITE_AFF_TEXT ){
- if( (pIn1->flags & MEM_Str)==0 && (pIn1->flags & (MEM_Int|MEM_Real))!=0 ){
- testcase( pIn1->flags & MEM_Int );
- testcase( pIn1->flags & MEM_Real );
- sqlite3VdbeMemStringify(pIn1, encoding, 1);
- testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) );
- flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask);
- }
- if( (pIn3->flags & MEM_Str)==0 && (pIn3->flags & (MEM_Int|MEM_Real))!=0 ){
- testcase( pIn3->flags & MEM_Int );
- testcase( pIn3->flags & MEM_Real );
- sqlite3VdbeMemStringify(pIn3, encoding, 1);
- testcase( (flags3&MEM_Dyn) != (pIn3->flags&MEM_Dyn) );
- flags3 = (pIn3->flags & ~MEM_TypeMask) | (flags3 & MEM_TypeMask);
- }
+ u.al.affinity = pOp->p5 & SQLITE_AFF_MASK;
+ if( u.al.affinity ){
+ applyAffinity(pIn1, u.al.affinity, encoding);
+ applyAffinity(pIn3, u.al.affinity, encoding);
+ if( db->mallocFailed ) goto no_mem;
}
+
assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 );
- if( pIn1->flags & MEM_Zero ){
- sqlite3VdbeMemExpandBlob(pIn1);
- flags1 &= ~MEM_Zero;
- }
- if( pIn3->flags & MEM_Zero ){
- sqlite3VdbeMemExpandBlob(pIn3);
- flags3 &= ~MEM_Zero;
- }
- if( db->mallocFailed ) goto no_mem;
- res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl);
+ ExpandBlob(pIn1);
+ ExpandBlob(pIn3);
+ u.al.res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl);
}
switch( pOp->opcode ){
- case OP_Eq: res = res==0; break;
- case OP_Ne: res = res!=0; break;
- case OP_Lt: res = res<0; break;
- case OP_Le: res = res<=0; break;
- case OP_Gt: res = res>0; break;
- default: res = res>=0; break;
+ case OP_Eq: u.al.res = u.al.res==0; break;
+ case OP_Ne: u.al.res = u.al.res!=0; break;
+ case OP_Lt: u.al.res = u.al.res<0; break;
+ case OP_Le: u.al.res = u.al.res<=0; break;
+ case OP_Gt: u.al.res = u.al.res>0; break;
+ default: u.al.res = u.al.res>=0; break;
}
- /* Undo any changes made by applyAffinity() to the input registers. */
- assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) );
- pIn1->flags = flags1;
- assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) );
- pIn3->flags = flags3;
-
if( pOp->p5 & SQLITE_STOREP2 ){
pOut = &aMem[pOp->p2];
memAboutToChange(p, pOut);
MemSetTypeFlag(pOut, MEM_Int);
- pOut->u.i = res;
+ pOut->u.i = u.al.res;
REGISTER_TRACE(pOp->p2, pOut);
- }else{
- VdbeBranchTaken(res!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3);
- if( res ){
- goto jump_to_p2;
- }
+ }else if( u.al.res ){
+ pc = pOp->p2-1;
}
+
+ /* Undo any changes made by applyAffinity() to the input registers. */
+ pIn1->flags = (pIn1->flags&~MEM_TypeMask) | (u.al.flags1&MEM_TypeMask);
+ pIn3->flags = (pIn3->flags&~MEM_TypeMask) | (u.al.flags3&MEM_TypeMask);
break;
}
@@ -74948,7 +68538,6 @@ case OP_Permutation: {
}
/* Opcode: Compare P1 P2 P3 P4 P5
-** Synopsis: r[P1@P3] <-> r[P2@P3]
**
** Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this
** vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of
@@ -74968,6 +68557,7 @@ case OP_Permutation: {
** and strings are less than blobs.
*/
case OP_Compare: {
+#if 0 /* local variables moved into u.am */
int n;
int i;
int p1;
@@ -74976,37 +68566,38 @@ case OP_Compare: {
int idx;
CollSeq *pColl; /* Collating sequence to use on this term */
int bRev; /* True for DESCENDING sort order */
+#endif /* local variables moved into u.am */
if( (pOp->p5 & OPFLAG_PERMUTE)==0 ) aPermute = 0;
- n = pOp->p3;
- pKeyInfo = pOp->p4.pKeyInfo;
- assert( n>0 );
- assert( pKeyInfo!=0 );
- p1 = pOp->p1;
- p2 = pOp->p2;
+ u.am.n = pOp->p3;
+ u.am.pKeyInfo = pOp->p4.pKeyInfo;
+ assert( u.am.n>0 );
+ assert( u.am.pKeyInfo!=0 );
+ u.am.p1 = pOp->p1;
+ u.am.p2 = pOp->p2;
#if SQLITE_DEBUG
if( aPermute ){
int k, mx = 0;
- for(k=0; k<n; k++) if( aPermute[k]>mx ) mx = aPermute[k];
- assert( p1>0 && p1+mx<=(p->nMem-p->nCursor)+1 );
- assert( p2>0 && p2+mx<=(p->nMem-p->nCursor)+1 );
+ for(k=0; k<u.am.n; k++) if( aPermute[k]>mx ) mx = aPermute[k];
+ assert( u.am.p1>0 && u.am.p1+mx<=(p->nMem-p->nCursor)+1 );
+ assert( u.am.p2>0 && u.am.p2+mx<=(p->nMem-p->nCursor)+1 );
}else{
- assert( p1>0 && p1+n<=(p->nMem-p->nCursor)+1 );
- assert( p2>0 && p2+n<=(p->nMem-p->nCursor)+1 );
+ assert( u.am.p1>0 && u.am.p1+u.am.n<=(p->nMem-p->nCursor)+1 );
+ assert( u.am.p2>0 && u.am.p2+u.am.n<=(p->nMem-p->nCursor)+1 );
}
#endif /* SQLITE_DEBUG */
- for(i=0; i<n; i++){
- idx = aPermute ? aPermute[i] : i;
- assert( memIsValid(&aMem[p1+idx]) );
- assert( memIsValid(&aMem[p2+idx]) );
- REGISTER_TRACE(p1+idx, &aMem[p1+idx]);
- REGISTER_TRACE(p2+idx, &aMem[p2+idx]);
- assert( i<pKeyInfo->nField );
- pColl = pKeyInfo->aColl[i];
- bRev = pKeyInfo->aSortOrder[i];
- iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl);
+ for(u.am.i=0; u.am.i<u.am.n; u.am.i++){
+ u.am.idx = aPermute ? aPermute[u.am.i] : u.am.i;
+ assert( memIsValid(&aMem[u.am.p1+u.am.idx]) );
+ assert( memIsValid(&aMem[u.am.p2+u.am.idx]) );
+ REGISTER_TRACE(u.am.p1+u.am.idx, &aMem[u.am.p1+u.am.idx]);
+ REGISTER_TRACE(u.am.p2+u.am.idx, &aMem[u.am.p2+u.am.idx]);
+ assert( u.am.i<u.am.pKeyInfo->nField );
+ u.am.pColl = u.am.pKeyInfo->aColl[u.am.i];
+ u.am.bRev = u.am.pKeyInfo->aSortOrder[u.am.i];
+ iCompare = sqlite3MemCompare(&aMem[u.am.p1+u.am.idx], &aMem[u.am.p2+u.am.idx], u.am.pColl);
if( iCompare ){
- if( bRev ) iCompare = -iCompare;
+ if( u.am.bRev ) iCompare = -iCompare;
break;
}
}
@@ -75022,11 +68613,11 @@ case OP_Compare: {
*/
case OP_Jump: { /* jump */
if( iCompare<0 ){
- VdbeBranchTaken(0,3); pOp = &aOp[pOp->p1 - 1];
+ pc = pOp->p1 - 1;
}else if( iCompare==0 ){
- VdbeBranchTaken(1,3); pOp = &aOp[pOp->p2 - 1];
+ pc = pOp->p2 - 1;
}else{
- VdbeBranchTaken(2,3); pOp = &aOp[pOp->p3 - 1];
+ pc = pOp->p3 - 1;
}
break;
}
@@ -75053,33 +68644,35 @@ case OP_Jump: { /* jump */
*/
case OP_And: /* same as TK_AND, in1, in2, out3 */
case OP_Or: { /* same as TK_OR, in1, in2, out3 */
+#if 0 /* local variables moved into u.an */
int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
+#endif /* local variables moved into u.an */
pIn1 = &aMem[pOp->p1];
if( pIn1->flags & MEM_Null ){
- v1 = 2;
+ u.an.v1 = 2;
}else{
- v1 = sqlite3VdbeIntValue(pIn1)!=0;
+ u.an.v1 = sqlite3VdbeIntValue(pIn1)!=0;
}
pIn2 = &aMem[pOp->p2];
if( pIn2->flags & MEM_Null ){
- v2 = 2;
+ u.an.v2 = 2;
}else{
- v2 = sqlite3VdbeIntValue(pIn2)!=0;
+ u.an.v2 = sqlite3VdbeIntValue(pIn2)!=0;
}
if( pOp->opcode==OP_And ){
static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 };
- v1 = and_logic[v1*3+v2];
+ u.an.v1 = and_logic[u.an.v1*3+u.an.v2];
}else{
static const unsigned char or_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 };
- v1 = or_logic[v1*3+v2];
+ u.an.v1 = or_logic[u.an.v1*3+u.an.v2];
}
pOut = &aMem[pOp->p3];
- if( v1==2 ){
+ if( u.an.v1==2 ){
MemSetTypeFlag(pOut, MEM_Null);
}else{
- pOut->u.i = v1;
+ pOut->u.i = u.an.v1;
MemSetTypeFlag(pOut, MEM_Int);
}
break;
@@ -75095,10 +68688,10 @@ case OP_Or: { /* same as TK_OR, in1, in2, out3 */
case OP_Not: { /* same as TK_NOT, in1, out2 */
pIn1 = &aMem[pOp->p1];
pOut = &aMem[pOp->p2];
- sqlite3VdbeMemSetNull(pOut);
- if( (pIn1->flags & MEM_Null)==0 ){
- pOut->flags = MEM_Int;
- pOut->u.i = !sqlite3VdbeIntValue(pIn1);
+ if( pIn1->flags & MEM_Null ){
+ sqlite3VdbeMemSetNull(pOut);
+ }else{
+ sqlite3VdbeMemSetInt64(pOut, !sqlite3VdbeIntValue(pIn1));
}
break;
}
@@ -75113,30 +68706,23 @@ case OP_Not: { /* same as TK_NOT, in1, out2 */
case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */
pIn1 = &aMem[pOp->p1];
pOut = &aMem[pOp->p2];
- sqlite3VdbeMemSetNull(pOut);
- if( (pIn1->flags & MEM_Null)==0 ){
- pOut->flags = MEM_Int;
- pOut->u.i = ~sqlite3VdbeIntValue(pIn1);
+ if( pIn1->flags & MEM_Null ){
+ sqlite3VdbeMemSetNull(pOut);
+ }else{
+ sqlite3VdbeMemSetInt64(pOut, ~sqlite3VdbeIntValue(pIn1));
}
break;
}
/* Opcode: Once P1 P2 * * *
**
-** Check the "once" flag number P1. If it is set, jump to instruction P2.
-** Otherwise, set the flag and fall through to the next instruction.
-** In other words, this opcode causes all following opcodes up through P2
-** (but not including P2) to run just once and to be skipped on subsequent
-** times through the loop.
-**
-** All "once" flags are initially cleared whenever a prepared statement
-** first begins to run.
+** Check if OP_Once flag P1 is set. If so, jump to instruction P2. Otherwise,
+** set the flag and fall through to the next instruction.
*/
case OP_Once: { /* jump */
assert( pOp->p1<p->nOnceFlag );
- VdbeBranchTaken(p->aOnceFlag[pOp->p1]!=0, 2);
if( p->aOnceFlag[pOp->p1] ){
- goto jump_to_p2;
+ pc = pOp->p2-1;
}else{
p->aOnceFlag[pOp->p1] = 1;
}
@@ -75147,31 +68733,32 @@ case OP_Once: { /* jump */
**
** Jump to P2 if the value in register P1 is true. The value
** is considered true if it is numeric and non-zero. If the value
-** in P1 is NULL then take the jump if and only if P3 is non-zero.
+** in P1 is NULL then take the jump if P3 is non-zero.
*/
/* Opcode: IfNot P1 P2 P3 * *
**
** Jump to P2 if the value in register P1 is False. The value
** is considered false if it has a numeric value of zero. If the value
-** in P1 is NULL then take the jump if and only if P3 is non-zero.
+** in P1 is NULL then take the jump if P3 is zero.
*/
case OP_If: /* jump, in1 */
case OP_IfNot: { /* jump, in1 */
+#if 0 /* local variables moved into u.ao */
int c;
+#endif /* local variables moved into u.ao */
pIn1 = &aMem[pOp->p1];
if( pIn1->flags & MEM_Null ){
- c = pOp->p3;
+ u.ao.c = pOp->p3;
}else{
#ifdef SQLITE_OMIT_FLOATING_POINT
- c = sqlite3VdbeIntValue(pIn1)!=0;
+ u.ao.c = sqlite3VdbeIntValue(pIn1)!=0;
#else
- c = sqlite3VdbeRealValue(pIn1)!=0.0;
+ u.ao.c = sqlite3VdbeRealValue(pIn1)!=0.0;
#endif
- if( pOp->opcode==OP_IfNot ) c = !c;
+ if( pOp->opcode==OP_IfNot ) u.ao.c = !u.ao.c;
}
- VdbeBranchTaken(c!=0, 2);
- if( c ){
- goto jump_to_p2;
+ if( u.ao.c ){
+ pc = pOp->p2-1;
}
break;
}
@@ -75183,9 +68770,8 @@ case OP_IfNot: { /* jump, in1 */
*/
case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */
pIn1 = &aMem[pOp->p1];
- VdbeBranchTaken( (pIn1->flags & MEM_Null)!=0, 2);
if( (pIn1->flags & MEM_Null)!=0 ){
- goto jump_to_p2;
+ pc = pOp->p2 - 1;
}
break;
}
@@ -75197,9 +68783,8 @@ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */
*/
case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */
pIn1 = &aMem[pOp->p1];
- VdbeBranchTaken( (pIn1->flags & MEM_Null)==0, 2);
if( (pIn1->flags & MEM_Null)==0 ){
- goto jump_to_p2;
+ pc = pOp->p2 - 1;
}
break;
}
@@ -75230,10 +68815,12 @@ case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */
** skipped for length() and all content loading can be skipped for typeof().
*/
case OP_Column: {
+#if 0 /* local variables moved into u.ap */
i64 payloadSize64; /* Number of bytes in the record */
int p2; /* column number to retrieve */
VdbeCursor *pC; /* The VDBE cursor */
BtCursor *pCrsr; /* The BTree cursor */
+ u32 *aType; /* aType[i] holds the numeric type of the i-th column */
u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */
int len; /* The length of the serialized data for the column */
int i; /* Loop counter */
@@ -75246,73 +68833,87 @@ case OP_Column: {
u32 szField; /* Number of bytes in the content of a field */
u32 avail; /* Number of bytes of available data */
u32 t; /* A type code from the record header */
- u16 fx; /* pDest->flags value */
Mem *pReg; /* PseudoTable input register */
+#endif /* local variables moved into u.ap */
- p2 = pOp->p2;
+ u.ap.p2 = pOp->p2;
assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
- pDest = &aMem[pOp->p3];
- memAboutToChange(p, pDest);
+ u.ap.pDest = &aMem[pOp->p3];
+ memAboutToChange(p, u.ap.pDest);
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( p2<pC->nField );
- aOffset = pC->aOffset;
+ u.ap.pC = p->apCsr[pOp->p1];
+ assert( u.ap.pC!=0 );
+ assert( u.ap.p2<u.ap.pC->nField );
+ u.ap.aType = u.ap.pC->aType;
+ u.ap.aOffset = u.ap.aType + u.ap.pC->nField;
#ifndef SQLITE_OMIT_VIRTUALTABLE
- assert( pC->pVtabCursor==0 ); /* OP_Column never called on virtual table */
+ assert( u.ap.pC->pVtabCursor==0 ); /* OP_Column never called on virtual table */
#endif
- pCrsr = pC->pCursor;
- assert( pCrsr!=0 || pC->pseudoTableReg>0 ); /* pCrsr NULL on PseudoTables */
- assert( pCrsr!=0 || pC->nullRow ); /* pC->nullRow on PseudoTables */
+ u.ap.pCrsr = u.ap.pC->pCursor;
+ assert( u.ap.pCrsr!=0 || u.ap.pC->pseudoTableReg>0 ); /* u.ap.pCrsr NULL on PseudoTables */
+ assert( u.ap.pCrsr!=0 || u.ap.pC->nullRow ); /* u.ap.pC->nullRow on PseudoTables */
/* If the cursor cache is stale, bring it up-to-date */
- rc = sqlite3VdbeCursorMoveto(pC);
+ rc = sqlite3VdbeCursorMoveto(u.ap.pC);
if( rc ) goto abort_due_to_error;
- if( pC->cacheStatus!=p->cacheCtr ){
- if( pC->nullRow ){
- if( pCrsr==0 ){
- assert( pC->pseudoTableReg>0 );
- pReg = &aMem[pC->pseudoTableReg];
- assert( pReg->flags & MEM_Blob );
- assert( memIsValid(pReg) );
- pC->payloadSize = pC->szRow = avail = pReg->n;
- pC->aRow = (u8*)pReg->z;
+ if( u.ap.pC->cacheStatus!=p->cacheCtr || (pOp->p5&OPFLAG_CLEARCACHE)!=0 ){
+ if( u.ap.pC->nullRow ){
+ if( u.ap.pCrsr==0 ){
+ assert( u.ap.pC->pseudoTableReg>0 );
+ u.ap.pReg = &aMem[u.ap.pC->pseudoTableReg];
+ if( u.ap.pC->multiPseudo ){
+ sqlite3VdbeMemShallowCopy(u.ap.pDest, u.ap.pReg+u.ap.p2, MEM_Ephem);
+ Deephemeralize(u.ap.pDest);
+ goto op_column_out;
+ }
+ assert( u.ap.pReg->flags & MEM_Blob );
+ assert( memIsValid(u.ap.pReg) );
+ u.ap.pC->payloadSize = u.ap.pC->szRow = u.ap.avail = u.ap.pReg->n;
+ u.ap.pC->aRow = (u8*)u.ap.pReg->z;
}else{
- sqlite3VdbeMemSetNull(pDest);
+ MemSetTypeFlag(u.ap.pDest, MEM_Null);
goto op_column_out;
}
}else{
- assert( pCrsr );
- if( pC->isTable==0 ){
- assert( sqlite3BtreeCursorIsValid(pCrsr) );
- VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &payloadSize64);
+ assert( u.ap.pCrsr );
+ if( u.ap.pC->isTable==0 ){
+ assert( sqlite3BtreeCursorIsValid(u.ap.pCrsr) );
+ VVA_ONLY(rc =) sqlite3BtreeKeySize(u.ap.pCrsr, &u.ap.payloadSize64);
assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */
/* sqlite3BtreeParseCellPtr() uses getVarint32() to extract the
- ** payload size, so it is impossible for payloadSize64 to be
+ ** payload size, so it is impossible for u.ap.payloadSize64 to be
** larger than 32 bits. */
- assert( (payloadSize64 & SQLITE_MAX_U32)==(u64)payloadSize64 );
- pC->aRow = sqlite3BtreeKeyFetch(pCrsr, &avail);
- pC->payloadSize = (u32)payloadSize64;
+ assert( (u.ap.payloadSize64 & SQLITE_MAX_U32)==(u64)u.ap.payloadSize64 );
+ u.ap.pC->aRow = sqlite3BtreeKeyFetch(u.ap.pCrsr, &u.ap.avail);
+ u.ap.pC->payloadSize = (u32)u.ap.payloadSize64;
}else{
- assert( sqlite3BtreeCursorIsValid(pCrsr) );
- VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &pC->payloadSize);
+ assert( sqlite3BtreeCursorIsValid(u.ap.pCrsr) );
+ VVA_ONLY(rc =) sqlite3BtreeDataSize(u.ap.pCrsr, &u.ap.pC->payloadSize);
assert( rc==SQLITE_OK ); /* DataSize() cannot fail */
- pC->aRow = sqlite3BtreeDataFetch(pCrsr, &avail);
+ u.ap.pC->aRow = sqlite3BtreeDataFetch(u.ap.pCrsr, &u.ap.avail);
}
- assert( avail<=65536 ); /* Maximum page size is 64KiB */
- if( pC->payloadSize <= (u32)avail ){
- pC->szRow = pC->payloadSize;
+ assert( u.ap.avail<=65536 ); /* Maximum page size is 64KiB */
+ if( u.ap.pC->payloadSize <= (u32)u.ap.avail ){
+ u.ap.pC->szRow = u.ap.pC->payloadSize;
}else{
- pC->szRow = avail;
+ u.ap.pC->szRow = u.ap.avail;
}
- if( pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ if( u.ap.pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
goto too_big;
}
}
- pC->cacheStatus = p->cacheCtr;
- pC->iHdrOffset = getVarint32(pC->aRow, offset);
- pC->nHdrParsed = 0;
- aOffset[0] = offset;
+ u.ap.pC->cacheStatus = p->cacheCtr;
+ u.ap.pC->iHdrOffset = getVarint32(u.ap.pC->aRow, u.ap.offset);
+ u.ap.pC->nHdrParsed = 0;
+ u.ap.aOffset[0] = u.ap.offset;
+ if( u.ap.avail<u.ap.offset ){
+ /* u.ap.pC->aRow does not have to hold the entire row, but it does at least
+ ** need to cover the header of the record. If u.ap.pC->aRow does not contain
+ ** the complete header, then set it to zero, forcing the header to be
+ ** dynamically allocated. */
+ u.ap.pC->aRow = 0;
+ u.ap.pC->szRow = 0;
+ }
/* Make sure a corrupt database has not given us an oversize header.
** Do this now to avoid an oversize memory allocation.
@@ -75323,169 +68924,150 @@ case OP_Column: {
** 3-byte type for each of the maximum of 32768 columns plus three
** extra bytes for the header length itself. 32768*3 + 3 = 98307.
*/
- if( offset > 98307 || offset > pC->payloadSize ){
+ if( u.ap.offset > 98307 || u.ap.offset > u.ap.pC->payloadSize ){
rc = SQLITE_CORRUPT_BKPT;
goto op_column_error;
}
-
- if( avail<offset ){
- /* pC->aRow does not have to hold the entire row, but it does at least
- ** need to cover the header of the record. If pC->aRow does not contain
- ** the complete header, then set it to zero, forcing the header to be
- ** dynamically allocated. */
- pC->aRow = 0;
- pC->szRow = 0;
- }
-
- /* The following goto is an optimization. It can be omitted and
- ** everything will still work. But OP_Column is measurably faster
- ** by skipping the subsequent conditional, which is always true.
- */
- assert( pC->nHdrParsed<=p2 ); /* Conditional skipped */
- goto op_column_read_header;
}
- /* Make sure at least the first p2+1 entries of the header have been
- ** parsed and valid information is in aOffset[] and pC->aType[].
+ /* Make sure at least the first u.ap.p2+1 entries of the header have been
+ ** parsed and valid information is in u.ap.aOffset[] and u.ap.aType[].
*/
- if( pC->nHdrParsed<=p2 ){
+ if( u.ap.pC->nHdrParsed<=u.ap.p2 ){
/* If there is more header available for parsing in the record, try
- ** to extract additional fields up through the p2+1-th field
+ ** to extract additional fields up through the u.ap.p2+1-th field
*/
- op_column_read_header:
- if( pC->iHdrOffset<aOffset[0] ){
- /* Make sure zData points to enough of the record to cover the header. */
- if( pC->aRow==0 ){
- memset(&sMem, 0, sizeof(sMem));
- rc = sqlite3VdbeMemFromBtree(pCrsr, 0, aOffset[0],
- !pC->isTable, &sMem);
+ if( u.ap.pC->iHdrOffset<u.ap.aOffset[0] ){
+ /* Make sure u.ap.zData points to enough of the record to cover the header. */
+ if( u.ap.pC->aRow==0 ){
+ memset(&u.ap.sMem, 0, sizeof(u.ap.sMem));
+ rc = sqlite3VdbeMemFromBtree(u.ap.pCrsr, 0, u.ap.aOffset[0],
+ !u.ap.pC->isTable, &u.ap.sMem);
if( rc!=SQLITE_OK ){
goto op_column_error;
}
- zData = (u8*)sMem.z;
+ u.ap.zData = (u8*)u.ap.sMem.z;
}else{
- zData = pC->aRow;
+ u.ap.zData = u.ap.pC->aRow;
}
-
- /* Fill in pC->aType[i] and aOffset[i] values through the p2-th field. */
- i = pC->nHdrParsed;
- offset = aOffset[i];
- zHdr = zData + pC->iHdrOffset;
- zEndHdr = zData + aOffset[0];
- assert( i<=p2 && zHdr<zEndHdr );
+
+ /* Fill in u.ap.aType[u.ap.i] and u.ap.aOffset[u.ap.i] values through the u.ap.p2-th field. */
+ u.ap.i = u.ap.pC->nHdrParsed;
+ u.ap.offset = u.ap.aOffset[u.ap.i];
+ u.ap.zHdr = u.ap.zData + u.ap.pC->iHdrOffset;
+ u.ap.zEndHdr = u.ap.zData + u.ap.aOffset[0];
+ assert( u.ap.i<=u.ap.p2 && u.ap.zHdr<u.ap.zEndHdr );
do{
- if( zHdr[0]<0x80 ){
- t = zHdr[0];
- zHdr++;
+ if( u.ap.zHdr[0]<0x80 ){
+ u.ap.t = u.ap.zHdr[0];
+ u.ap.zHdr++;
}else{
- zHdr += sqlite3GetVarint32(zHdr, &t);
+ u.ap.zHdr += sqlite3GetVarint32(u.ap.zHdr, &u.ap.t);
}
- pC->aType[i] = t;
- szField = sqlite3VdbeSerialTypeLen(t);
- offset += szField;
- if( offset<szField ){ /* True if offset overflows */
- zHdr = &zEndHdr[1]; /* Forces SQLITE_CORRUPT return below */
+ u.ap.aType[u.ap.i] = u.ap.t;
+ u.ap.szField = sqlite3VdbeSerialTypeLen(u.ap.t);
+ u.ap.offset += u.ap.szField;
+ if( u.ap.offset<u.ap.szField ){ /* True if u.ap.offset overflows */
+ u.ap.zHdr = &u.ap.zEndHdr[1]; /* Forces SQLITE_CORRUPT return below */
break;
}
- i++;
- aOffset[i] = offset;
- }while( i<=p2 && zHdr<zEndHdr );
- pC->nHdrParsed = i;
- pC->iHdrOffset = (u32)(zHdr - zData);
- if( pC->aRow==0 ){
- sqlite3VdbeMemRelease(&sMem);
- sMem.flags = MEM_Null;
- }
-
- /* The record is corrupt if any of the following are true:
- ** (1) the bytes of the header extend past the declared header size
- ** (zHdr>zEndHdr)
- ** (2) the entire header was used but not all data was used
- ** (zHdr==zEndHdr && offset!=pC->payloadSize)
- ** (3) the end of the data extends beyond the end of the record.
- ** (offset > pC->payloadSize)
+ u.ap.i++;
+ u.ap.aOffset[u.ap.i] = u.ap.offset;
+ }while( u.ap.i<=u.ap.p2 && u.ap.zHdr<u.ap.zEndHdr );
+ u.ap.pC->nHdrParsed = u.ap.i;
+ u.ap.pC->iHdrOffset = (u32)(u.ap.zHdr - u.ap.zData);
+ if( u.ap.pC->aRow==0 ){
+ sqlite3VdbeMemRelease(&u.ap.sMem);
+ u.ap.sMem.flags = MEM_Null;
+ }
+
+ /* If we have read more header data than was contained in the header,
+ ** or if the end of the last field appears to be past the end of the
+ ** record, or if the end of the last field appears to be before the end
+ ** of the record (when all fields present), then we must be dealing
+ ** with a corrupt database.
*/
- if( (zHdr>=zEndHdr && (zHdr>zEndHdr || offset!=pC->payloadSize))
- || (offset > pC->payloadSize)
+ if( (u.ap.zHdr > u.ap.zEndHdr)
+ || (u.ap.offset > u.ap.pC->payloadSize)
+ || (u.ap.zHdr==u.ap.zEndHdr && u.ap.offset!=u.ap.pC->payloadSize)
){
rc = SQLITE_CORRUPT_BKPT;
goto op_column_error;
}
}
- /* If after trying to extract new entries from the header, nHdrParsed is
- ** still not up to p2, that means that the record has fewer than p2
+ /* If after trying to extra new entries from the header, nHdrParsed is
+ ** still not up to u.ap.p2, that means that the record has fewer than u.ap.p2
** columns. So the result will be either the default value or a NULL.
*/
- if( pC->nHdrParsed<=p2 ){
+ if( u.ap.pC->nHdrParsed<=u.ap.p2 ){
if( pOp->p4type==P4_MEM ){
- sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static);
+ sqlite3VdbeMemShallowCopy(u.ap.pDest, pOp->p4.pMem, MEM_Static);
}else{
- sqlite3VdbeMemSetNull(pDest);
+ MemSetTypeFlag(u.ap.pDest, MEM_Null);
}
goto op_column_out;
}
}
- /* Extract the content for the p2+1-th column. Control can only
- ** reach this point if aOffset[p2], aOffset[p2+1], and pC->aType[p2] are
+ /* Extract the content for the u.ap.p2+1-th column. Control can only
+ ** reach this point if u.ap.aOffset[u.ap.p2], u.ap.aOffset[u.ap.p2+1], and u.ap.aType[u.ap.p2] are
** all valid.
*/
- assert( p2<pC->nHdrParsed );
+ assert( u.ap.p2<u.ap.pC->nHdrParsed );
assert( rc==SQLITE_OK );
- assert( sqlite3VdbeCheckMemInvariants(pDest) );
- if( VdbeMemDynamic(pDest) ) sqlite3VdbeMemSetNull(pDest);
- t = pC->aType[p2];
- if( pC->szRow>=aOffset[p2+1] ){
+ if( u.ap.pC->szRow>=u.ap.aOffset[u.ap.p2+1] ){
/* This is the common case where the desired content fits on the original
** page - where the content is not on an overflow page */
- sqlite3VdbeSerialGet(pC->aRow+aOffset[p2], t, pDest);
+ VdbeMemRelease(u.ap.pDest);
+ sqlite3VdbeSerialGet(u.ap.pC->aRow+u.ap.aOffset[u.ap.p2], u.ap.aType[u.ap.p2], u.ap.pDest);
}else{
/* This branch happens only when content is on overflow pages */
+ u.ap.t = u.ap.aType[u.ap.p2];
if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0
- && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0))
- || (len = sqlite3VdbeSerialTypeLen(t))==0
+ && ((u.ap.t>=12 && (u.ap.t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0))
+ || (u.ap.len = sqlite3VdbeSerialTypeLen(u.ap.t))==0
){
- /* Content is irrelevant for
- ** 1. the typeof() function,
- ** 2. the length(X) function if X is a blob, and
- ** 3. if the content length is zero.
- ** So we might as well use bogus content rather than reading
- ** content from disk. NULL will work for the value for strings
- ** and blobs and whatever is in the payloadSize64 variable
- ** will work for everything else. */
- sqlite3VdbeSerialGet(t<=13 ? (u8*)&payloadSize64 : 0, t, pDest);
- }else{
- rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, !pC->isTable,
- pDest);
+ /* Content is irrelevant for the typeof() function and for
+ ** the length(X) function if X is a blob. So we might as well use
+ ** bogus content rather than reading content from disk. NULL works
+ ** for text and blob and whatever is in the u.ap.payloadSize64 variable
+ ** will work for everything else. Content is also irrelevant if
+ ** the content length is 0. */
+ u.ap.zData = u.ap.t<=13 ? (u8*)&u.ap.payloadSize64 : 0;
+ u.ap.sMem.zMalloc = 0;
+ }else{
+ memset(&u.ap.sMem, 0, sizeof(u.ap.sMem));
+ sqlite3VdbeMemMove(&u.ap.sMem, u.ap.pDest);
+ rc = sqlite3VdbeMemFromBtree(u.ap.pCrsr, u.ap.aOffset[u.ap.p2], u.ap.len, !u.ap.pC->isTable,
+ &u.ap.sMem);
if( rc!=SQLITE_OK ){
goto op_column_error;
}
- sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
- pDest->flags &= ~MEM_Ephem;
+ u.ap.zData = (u8*)u.ap.sMem.z;
+ }
+ sqlite3VdbeSerialGet(u.ap.zData, u.ap.t, u.ap.pDest);
+ /* If we dynamically allocated space to hold the data (in the
+ ** sqlite3VdbeMemFromBtree() call above) then transfer control of that
+ ** dynamically allocated space over to the u.ap.pDest structure.
+ ** This prevents a memory copy. */
+ if( u.ap.sMem.zMalloc ){
+ assert( u.ap.sMem.z==u.ap.sMem.zMalloc );
+ assert( !(u.ap.pDest->flags & MEM_Dyn) );
+ assert( !(u.ap.pDest->flags & (MEM_Blob|MEM_Str)) || u.ap.pDest->z==u.ap.sMem.z );
+ u.ap.pDest->flags &= ~(MEM_Ephem|MEM_Static);
+ u.ap.pDest->flags |= MEM_Term;
+ u.ap.pDest->z = u.ap.sMem.z;
+ u.ap.pDest->zMalloc = u.ap.sMem.zMalloc;
}
}
- pDest->enc = encoding;
+ u.ap.pDest->enc = encoding;
op_column_out:
- /* If the column value is an ephemeral string, go ahead and persist
- ** that string in case the cursor moves before the column value is
- ** used. The following code does the equivalent of Deephemeralize()
- ** but does it faster. */
- if( (pDest->flags & MEM_Ephem)!=0 && pDest->z ){
- fx = pDest->flags & (MEM_Str|MEM_Blob);
- assert( fx!=0 );
- zData = (const u8*)pDest->z;
- len = pDest->n;
- if( sqlite3VdbeMemClearAndResize(pDest, len+2) ) goto no_mem;
- memcpy(pDest->z, zData, len);
- pDest->z[len] = 0;
- pDest->z[len+1] = 0;
- pDest->flags = fx|MEM_Term;
- }
+ rc = sqlite3VdbeMemMakeWriteable(u.ap.pDest);
op_column_error:
- UPDATE_MAX_BLOBSIZE(pDest);
- REGISTER_TRACE(pOp->p3, pDest);
+ UPDATE_MAX_BLOBSIZE(u.ap.pDest);
+ REGISTER_TRACE(pOp->p3, u.ap.pDest);
break;
}
@@ -75499,17 +69081,20 @@ op_column_error:
** memory cell in the range.
*/
case OP_Affinity: {
+#if 0 /* local variables moved into u.aq */
const char *zAffinity; /* The affinity to be applied */
char cAff; /* A single character of affinity */
+#endif /* local variables moved into u.aq */
- zAffinity = pOp->p4.z;
- assert( zAffinity!=0 );
- assert( zAffinity[pOp->p2]==0 );
+ u.aq.zAffinity = pOp->p4.z;
+ assert( u.aq.zAffinity!=0 );
+ assert( u.aq.zAffinity[pOp->p2]==0 );
pIn1 = &aMem[pOp->p1];
- while( (cAff = *(zAffinity++))!=0 ){
+ while( (u.aq.cAff = *(u.aq.zAffinity++))!=0 ){
assert( pIn1 <= &p->aMem[(p->nMem-p->nCursor)] );
assert( memIsValid(pIn1) );
- applyAffinity(pIn1, cAff, encoding);
+ ExpandBlob(pIn1);
+ applyAffinity(pIn1, u.aq.cAff, encoding);
pIn1++;
}
break;
@@ -75529,15 +69114,16 @@ case OP_Affinity: {
** The mapping from character to affinity is given by the SQLITE_AFF_
** macros defined in sqliteInt.h.
**
-** If P4 is NULL then all index fields have the affinity BLOB.
+** If P4 is NULL then all index fields have the affinity NONE.
*/
case OP_MakeRecord: {
+#if 0 /* local variables moved into u.ar */
u8 *zNewRecord; /* A buffer to hold the data for the new record */
Mem *pRec; /* The new record */
u64 nData; /* Number of bytes of data space */
int nHdr; /* Number of bytes of header space */
i64 nByte; /* Data space required for this record */
- i64 nZero; /* Number of zero bytes at the end of the record */
+ int nZero; /* Number of zero bytes at the end of the record */
int nVarint; /* Number of bytes in a varint */
u32 serial_type; /* Type field */
Mem *pData0; /* First field to be combined into the record */
@@ -75545,126 +69131,102 @@ case OP_MakeRecord: {
int nField; /* Number of fields in the record */
char *zAffinity; /* The affinity string for the record */
int file_format; /* File format to use for encoding */
- int i; /* Space used in zNewRecord[] header */
- int j; /* Space used in zNewRecord[] content */
+ int i; /* Space used in zNewRecord[] */
int len; /* Length of a field */
+#endif /* local variables moved into u.ar */
/* Assuming the record contains N fields, the record format looks
** like this:
**
** ------------------------------------------------------------------------
- ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 |
+ ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 |
** ------------------------------------------------------------------------
**
** Data(0) is taken from register P1. Data(1) comes from register P1+1
- ** and so forth.
+ ** and so froth.
**
- ** Each type field is a varint representing the serial type of the
+ ** Each type field is a varint representing the serial type of the
** corresponding data element (see sqlite3VdbeSerialType()). The
** hdr-size field is also a varint which is the offset from the beginning
** of the record to data0.
*/
- nData = 0; /* Number of bytes of data space */
- nHdr = 0; /* Number of bytes of header space */
- nZero = 0; /* Number of zero bytes at the end of the record */
- nField = pOp->p1;
- zAffinity = pOp->p4.z;
- assert( nField>0 && pOp->p2>0 && pOp->p2+nField<=(p->nMem-p->nCursor)+1 );
- pData0 = &aMem[nField];
- nField = pOp->p2;
- pLast = &pData0[nField-1];
- file_format = p->minWriteFileFormat;
+ u.ar.nData = 0; /* Number of bytes of data space */
+ u.ar.nHdr = 0; /* Number of bytes of header space */
+ u.ar.nZero = 0; /* Number of zero bytes at the end of the record */
+ u.ar.nField = pOp->p1;
+ u.ar.zAffinity = pOp->p4.z;
+ assert( u.ar.nField>0 && pOp->p2>0 && pOp->p2+u.ar.nField<=(p->nMem-p->nCursor)+1 );
+ u.ar.pData0 = &aMem[u.ar.nField];
+ u.ar.nField = pOp->p2;
+ u.ar.pLast = &u.ar.pData0[u.ar.nField-1];
+ u.ar.file_format = p->minWriteFileFormat;
/* Identify the output register */
assert( pOp->p3<pOp->p1 || pOp->p3>=pOp->p1+pOp->p2 );
pOut = &aMem[pOp->p3];
memAboutToChange(p, pOut);
- /* Apply the requested affinity to all inputs
- */
- assert( pData0<=pLast );
- if( zAffinity ){
- pRec = pData0;
- do{
- applyAffinity(pRec++, *(zAffinity++), encoding);
- assert( zAffinity[0]==0 || pRec<=pLast );
- }while( zAffinity[0] );
- }
-
/* Loop through the elements that will make up the record to figure
** out how much space is required for the new record.
*/
- pRec = pLast;
- do{
- assert( memIsValid(pRec) );
- pRec->uTemp = serial_type = sqlite3VdbeSerialType(pRec, file_format);
- len = sqlite3VdbeSerialTypeLen(serial_type);
- if( pRec->flags & MEM_Zero ){
- if( nData ){
- if( sqlite3VdbeMemExpandBlob(pRec) ) goto no_mem;
- }else{
- nZero += pRec->u.nZero;
- len -= pRec->u.nZero;
- }
- }
- nData += len;
- testcase( serial_type==127 );
- testcase( serial_type==128 );
- nHdr += serial_type<=127 ? 1 : sqlite3VarintLen(serial_type);
- }while( (--pRec)>=pData0 );
-
- /* EVIDENCE-OF: R-22564-11647 The header begins with a single varint
- ** which determines the total number of bytes in the header. The varint
- ** value is the size of the header in bytes including the size varint
- ** itself. */
- testcase( nHdr==126 );
- testcase( nHdr==127 );
- if( nHdr<=126 ){
- /* The common case */
- nHdr += 1;
- }else{
- /* Rare case of a really large header */
- nVarint = sqlite3VarintLen(nHdr);
- nHdr += nVarint;
- if( nVarint<sqlite3VarintLen(nHdr) ) nHdr++;
- }
- nByte = nHdr+nData;
- if( nByte+nZero>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ for(u.ar.pRec=u.ar.pData0; u.ar.pRec<=u.ar.pLast; u.ar.pRec++){
+ assert( memIsValid(u.ar.pRec) );
+ if( u.ar.zAffinity ){
+ applyAffinity(u.ar.pRec, u.ar.zAffinity[u.ar.pRec-u.ar.pData0], encoding);
+ }
+ if( u.ar.pRec->flags&MEM_Zero && u.ar.pRec->n>0 ){
+ sqlite3VdbeMemExpandBlob(u.ar.pRec);
+ }
+ u.ar.serial_type = sqlite3VdbeSerialType(u.ar.pRec, u.ar.file_format);
+ u.ar.len = sqlite3VdbeSerialTypeLen(u.ar.serial_type);
+ u.ar.nData += u.ar.len;
+ u.ar.nHdr += sqlite3VarintLen(u.ar.serial_type);
+ if( u.ar.pRec->flags & MEM_Zero ){
+ /* Only pure zero-filled BLOBs can be input to this Opcode.
+ ** We do not allow blobs with a prefix and a zero-filled tail. */
+ u.ar.nZero += u.ar.pRec->u.nZero;
+ }else if( u.ar.len ){
+ u.ar.nZero = 0;
+ }
+ }
+
+ /* Add the initial header varint and total the size */
+ u.ar.nHdr += u.ar.nVarint = sqlite3VarintLen(u.ar.nHdr);
+ if( u.ar.nVarint<sqlite3VarintLen(u.ar.nHdr) ){
+ u.ar.nHdr++;
+ }
+ u.ar.nByte = u.ar.nHdr+u.ar.nData-u.ar.nZero;
+ if( u.ar.nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){
goto too_big;
}
- /* Make sure the output register has a buffer large enough to store
+ /* Make sure the output register has a buffer large enough to store
** the new record. The output register (pOp->p3) is not allowed to
** be one of the input registers (because the following call to
- ** sqlite3VdbeMemClearAndResize() could clobber the value before it is used).
+ ** sqlite3VdbeMemGrow() could clobber the value before it is used).
*/
- if( sqlite3VdbeMemClearAndResize(pOut, (int)nByte) ){
+ if( sqlite3VdbeMemGrow(pOut, (int)u.ar.nByte, 0) ){
goto no_mem;
}
- zNewRecord = (u8 *)pOut->z;
+ u.ar.zNewRecord = (u8 *)pOut->z;
/* Write the record */
- i = putVarint32(zNewRecord, nHdr);
- j = nHdr;
- assert( pData0<=pLast );
- pRec = pData0;
- do{
- serial_type = pRec->uTemp;
- /* EVIDENCE-OF: R-06529-47362 Following the size varint are one or more
- ** additional varints, one per column. */
- i += putVarint32(&zNewRecord[i], serial_type); /* serial type */
- /* EVIDENCE-OF: R-64536-51728 The values for each column in the record
- ** immediately follow the header. */
- j += sqlite3VdbeSerialPut(&zNewRecord[j], pRec, serial_type); /* content */
- }while( (++pRec)<=pLast );
- assert( i==nHdr );
- assert( j==nByte );
+ u.ar.i = putVarint32(u.ar.zNewRecord, u.ar.nHdr);
+ for(u.ar.pRec=u.ar.pData0; u.ar.pRec<=u.ar.pLast; u.ar.pRec++){
+ u.ar.serial_type = sqlite3VdbeSerialType(u.ar.pRec, u.ar.file_format);
+ u.ar.i += putVarint32(&u.ar.zNewRecord[u.ar.i], u.ar.serial_type); /* serial type */
+ }
+ for(u.ar.pRec=u.ar.pData0; u.ar.pRec<=u.ar.pLast; u.ar.pRec++){ /* serial data */
+ u.ar.i += sqlite3VdbeSerialPut(&u.ar.zNewRecord[u.ar.i], (int)(u.ar.nByte-u.ar.i), u.ar.pRec,u.ar.file_format);
+ }
+ assert( u.ar.i==u.ar.nByte );
assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
- pOut->n = (int)nByte;
- pOut->flags = MEM_Blob;
- if( nZero ){
- pOut->u.nZero = nZero;
+ pOut->n = (int)u.ar.nByte;
+ pOut->flags = MEM_Blob | MEM_Dyn;
+ pOut->xDel = 0;
+ if( u.ar.nZero ){
+ pOut->u.nZero = u.ar.nZero;
pOut->flags |= MEM_Zero;
}
pOut->enc = SQLITE_UTF8; /* In case the blob is ever converted to text */
@@ -75680,16 +69242,16 @@ case OP_MakeRecord: {
** opened by cursor P1 in register P2
*/
#ifndef SQLITE_OMIT_BTREECOUNT
-case OP_Count: { /* out2 */
+case OP_Count: { /* out2-prerelease */
+#if 0 /* local variables moved into u.as */
i64 nEntry;
BtCursor *pCrsr;
+#endif /* local variables moved into u.as */
- pCrsr = p->apCsr[pOp->p1]->pCursor;
- assert( pCrsr );
- nEntry = 0; /* Not needed. Only used to silence a warning. */
- rc = sqlite3BtreeCount(pCrsr, &nEntry);
- pOut = out2Prerelease(p, pOp);
- pOut->u.i = nEntry;
+ u.as.pCrsr = p->apCsr[pOp->p1]->pCursor;
+ assert( u.as.pCrsr );
+ rc = sqlite3BtreeCount(u.as.pCrsr, &u.as.nEntry);
+ pOut->u.i = u.as.nEntry;
break;
}
#endif
@@ -75701,6 +69263,7 @@ case OP_Count: { /* out2 */
** existing savepoint, P1==1, or to rollback an existing savepoint P1==2.
*/
case OP_Savepoint: {
+#if 0 /* local variables moved into u.at */
int p1; /* Value of P1 operand */
char *zName; /* Name of savepoint */
int nName;
@@ -75709,28 +69272,30 @@ case OP_Savepoint: {
Savepoint *pTmp;
int iSavepoint;
int ii;
+#endif /* local variables moved into u.at */
- p1 = pOp->p1;
- zName = pOp->p4.z;
+ u.at.p1 = pOp->p1;
+ u.at.zName = pOp->p4.z;
- /* Assert that the p1 parameter is valid. Also that if there is no open
- ** transaction, then there cannot be any savepoints.
+ /* Assert that the u.at.p1 parameter is valid. Also that if there is no open
+ ** transaction, then there cannot be any savepoints.
*/
assert( db->pSavepoint==0 || db->autoCommit==0 );
- assert( p1==SAVEPOINT_BEGIN||p1==SAVEPOINT_RELEASE||p1==SAVEPOINT_ROLLBACK );
+ assert( u.at.p1==SAVEPOINT_BEGIN||u.at.p1==SAVEPOINT_RELEASE||u.at.p1==SAVEPOINT_ROLLBACK );
assert( db->pSavepoint || db->isTransactionSavepoint==0 );
assert( checkSavepointCount(db) );
assert( p->bIsReader );
- if( p1==SAVEPOINT_BEGIN ){
+ if( u.at.p1==SAVEPOINT_BEGIN ){
if( db->nVdbeWrite>0 ){
- /* A new savepoint cannot be created if there are active write
+ /* A new savepoint cannot be created if there are active write
** statements (i.e. open read/write incremental blob handles).
*/
- sqlite3VdbeError(p, "cannot open savepoint - SQL statements in progress");
+ sqlite3SetString(&p->zErrMsg, db, "cannot open savepoint - "
+ "SQL statements in progress");
rc = SQLITE_BUSY;
}else{
- nName = sqlite3Strlen30(zName);
+ u.at.nName = sqlite3Strlen30(u.at.zName);
#ifndef SQLITE_OMIT_VIRTUALTABLE
/* This call is Ok even if this savepoint is actually a transaction
@@ -75744,11 +69309,11 @@ case OP_Savepoint: {
#endif
/* Create a new savepoint structure. */
- pNew = sqlite3DbMallocRaw(db, sizeof(Savepoint)+nName+1);
- if( pNew ){
- pNew->zName = (char *)&pNew[1];
- memcpy(pNew->zName, zName, nName+1);
-
+ u.at.pNew = sqlite3DbMallocRaw(db, sizeof(Savepoint)+u.at.nName+1);
+ if( u.at.pNew ){
+ u.at.pNew->zName = (char *)&u.at.pNew[1];
+ memcpy(u.at.pNew->zName, u.at.zName, u.at.nName+1);
+
/* If there is no open transaction, then mark this as a special
** "transaction savepoint". */
if( db->autoCommit ){
@@ -75757,50 +69322,51 @@ case OP_Savepoint: {
}else{
db->nSavepoint++;
}
-
+
/* Link the new savepoint into the database handle's list. */
- pNew->pNext = db->pSavepoint;
- db->pSavepoint = pNew;
- pNew->nDeferredCons = db->nDeferredCons;
- pNew->nDeferredImmCons = db->nDeferredImmCons;
+ u.at.pNew->pNext = db->pSavepoint;
+ db->pSavepoint = u.at.pNew;
+ u.at.pNew->nDeferredCons = db->nDeferredCons;
+ u.at.pNew->nDeferredImmCons = db->nDeferredImmCons;
}
}
}else{
- iSavepoint = 0;
+ u.at.iSavepoint = 0;
/* Find the named savepoint. If there is no such savepoint, then an
** an error is returned to the user. */
for(
- pSavepoint = db->pSavepoint;
- pSavepoint && sqlite3StrICmp(pSavepoint->zName, zName);
- pSavepoint = pSavepoint->pNext
+ u.at.pSavepoint = db->pSavepoint;
+ u.at.pSavepoint && sqlite3StrICmp(u.at.pSavepoint->zName, u.at.zName);
+ u.at.pSavepoint = u.at.pSavepoint->pNext
){
- iSavepoint++;
+ u.at.iSavepoint++;
}
- if( !pSavepoint ){
- sqlite3VdbeError(p, "no such savepoint: %s", zName);
+ if( !u.at.pSavepoint ){
+ sqlite3SetString(&p->zErrMsg, db, "no such savepoint: %s", u.at.zName);
rc = SQLITE_ERROR;
- }else if( db->nVdbeWrite>0 && p1==SAVEPOINT_RELEASE ){
- /* It is not possible to release (commit) a savepoint if there are
+ }else if( db->nVdbeWrite>0 && u.at.p1==SAVEPOINT_RELEASE ){
+ /* It is not possible to release (commit) a savepoint if there are
** active write statements.
*/
- sqlite3VdbeError(p, "cannot release savepoint - "
- "SQL statements in progress");
+ sqlite3SetString(&p->zErrMsg, db,
+ "cannot release savepoint - SQL statements in progress"
+ );
rc = SQLITE_BUSY;
}else{
/* Determine whether or not this is a transaction savepoint. If so,
- ** and this is a RELEASE command, then the current transaction
- ** is committed.
+ ** and this is a RELEASE command, then the current transaction
+ ** is committed.
*/
- int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint;
- if( isTransaction && p1==SAVEPOINT_RELEASE ){
+ int isTransaction = u.at.pSavepoint->pNext==0 && db->isTransactionSavepoint;
+ if( isTransaction && u.at.p1==SAVEPOINT_RELEASE ){
if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){
goto vdbe_return;
}
db->autoCommit = 1;
if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){
- p->pc = (int)(pOp - aOp);
+ p->pc = pc;
db->autoCommit = 0;
p->rc = rc = SQLITE_BUSY;
goto vdbe_return;
@@ -75808,59 +69374,52 @@ case OP_Savepoint: {
db->isTransactionSavepoint = 0;
rc = p->rc;
}else{
- int isSchemaChange;
- iSavepoint = db->nSavepoint - iSavepoint - 1;
- if( p1==SAVEPOINT_ROLLBACK ){
- isSchemaChange = (db->flags & SQLITE_InternChanges)!=0;
- for(ii=0; ii<db->nDb; ii++){
- rc = sqlite3BtreeTripAllCursors(db->aDb[ii].pBt,
- SQLITE_ABORT_ROLLBACK,
- isSchemaChange==0);
- if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ u.at.iSavepoint = db->nSavepoint - u.at.iSavepoint - 1;
+ if( u.at.p1==SAVEPOINT_ROLLBACK ){
+ for(u.at.ii=0; u.at.ii<db->nDb; u.at.ii++){
+ sqlite3BtreeTripAllCursors(db->aDb[u.at.ii].pBt, SQLITE_ABORT);
}
- }else{
- isSchemaChange = 0;
}
- for(ii=0; ii<db->nDb; ii++){
- rc = sqlite3BtreeSavepoint(db->aDb[ii].pBt, p1, iSavepoint);
+ for(u.at.ii=0; u.at.ii<db->nDb; u.at.ii++){
+ rc = sqlite3BtreeSavepoint(db->aDb[u.at.ii].pBt, u.at.p1, u.at.iSavepoint);
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
}
- if( isSchemaChange ){
+ if( u.at.p1==SAVEPOINT_ROLLBACK && (db->flags&SQLITE_InternChanges)!=0 ){
sqlite3ExpirePreparedStatements(db);
sqlite3ResetAllSchemasOfConnection(db);
db->flags = (db->flags | SQLITE_InternChanges);
}
}
-
- /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all
+
+ /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all
** savepoints nested inside of the savepoint being operated on. */
- while( db->pSavepoint!=pSavepoint ){
- pTmp = db->pSavepoint;
- db->pSavepoint = pTmp->pNext;
- sqlite3DbFree(db, pTmp);
+ while( db->pSavepoint!=u.at.pSavepoint ){
+ u.at.pTmp = db->pSavepoint;
+ db->pSavepoint = u.at.pTmp->pNext;
+ sqlite3DbFree(db, u.at.pTmp);
db->nSavepoint--;
}
- /* If it is a RELEASE, then destroy the savepoint being operated on
- ** too. If it is a ROLLBACK TO, then set the number of deferred
+ /* If it is a RELEASE, then destroy the savepoint being operated on
+ ** too. If it is a ROLLBACK TO, then set the number of deferred
** constraint violations present in the database to the value stored
** when the savepoint was created. */
- if( p1==SAVEPOINT_RELEASE ){
- assert( pSavepoint==db->pSavepoint );
- db->pSavepoint = pSavepoint->pNext;
- sqlite3DbFree(db, pSavepoint);
+ if( u.at.p1==SAVEPOINT_RELEASE ){
+ assert( u.at.pSavepoint==db->pSavepoint );
+ db->pSavepoint = u.at.pSavepoint->pNext;
+ sqlite3DbFree(db, u.at.pSavepoint);
if( !isTransaction ){
db->nSavepoint--;
}
}else{
- db->nDeferredCons = pSavepoint->nDeferredCons;
- db->nDeferredImmCons = pSavepoint->nDeferredImmCons;
+ db->nDeferredCons = u.at.pSavepoint->nDeferredCons;
+ db->nDeferredImmCons = u.at.pSavepoint->nDeferredImmCons;
}
- if( !isTransaction || p1==SAVEPOINT_ROLLBACK ){
- rc = sqlite3VtabSavepoint(db, p1, iSavepoint);
+ if( !isTransaction ){
+ rc = sqlite3VtabSavepoint(db, u.at.p1, u.at.iSavepoint);
if( rc!=SQLITE_OK ) goto abort_due_to_error;
}
}
@@ -75879,40 +69438,53 @@ case OP_Savepoint: {
** This instruction causes the VM to halt.
*/
case OP_AutoCommit: {
+#if 0 /* local variables moved into u.au */
int desiredAutoCommit;
int iRollback;
int turnOnAC;
+#endif /* local variables moved into u.au */
- desiredAutoCommit = pOp->p1;
- iRollback = pOp->p2;
- turnOnAC = desiredAutoCommit && !db->autoCommit;
- assert( desiredAutoCommit==1 || desiredAutoCommit==0 );
- assert( desiredAutoCommit==1 || iRollback==0 );
+ u.au.desiredAutoCommit = pOp->p1;
+ u.au.iRollback = pOp->p2;
+ u.au.turnOnAC = u.au.desiredAutoCommit && !db->autoCommit;
+ assert( u.au.desiredAutoCommit==1 || u.au.desiredAutoCommit==0 );
+ assert( u.au.desiredAutoCommit==1 || u.au.iRollback==0 );
assert( db->nVdbeActive>0 ); /* At least this one VM is active */
assert( p->bIsReader );
- if( turnOnAC && !iRollback && db->nVdbeWrite>0 ){
+#if 0
+ if( u.au.turnOnAC && u.au.iRollback && db->nVdbeActive>1 ){
+ /* If this instruction implements a ROLLBACK and other VMs are
+ ** still running, and a transaction is active, return an error indicating
+ ** that the other VMs must complete first.
+ */
+ sqlite3SetString(&p->zErrMsg, db, "cannot rollback transaction - "
+ "SQL statements in progress");
+ rc = SQLITE_BUSY;
+ }else
+#endif
+ if( u.au.turnOnAC && !u.au.iRollback && db->nVdbeWrite>0 ){
/* If this instruction implements a COMMIT and other VMs are writing
- ** return an error indicating that the other VMs must complete first.
+ ** return an error indicating that the other VMs must complete first.
*/
- sqlite3VdbeError(p, "cannot commit transaction - "
- "SQL statements in progress");
+ sqlite3SetString(&p->zErrMsg, db, "cannot commit transaction - "
+ "SQL statements in progress");
rc = SQLITE_BUSY;
- }else if( desiredAutoCommit!=db->autoCommit ){
- if( iRollback ){
- assert( desiredAutoCommit==1 );
+ }else if( u.au.desiredAutoCommit!=db->autoCommit ){
+ if( u.au.iRollback ){
+ assert( u.au.desiredAutoCommit==1 );
sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK);
db->autoCommit = 1;
}else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){
goto vdbe_return;
}else{
- db->autoCommit = (u8)desiredAutoCommit;
- }
- if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){
- p->pc = (int)(pOp - aOp);
- db->autoCommit = (u8)(1-desiredAutoCommit);
- p->rc = rc = SQLITE_BUSY;
- goto vdbe_return;
+ db->autoCommit = (u8)u.au.desiredAutoCommit;
+ if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){
+ p->pc = pc;
+ db->autoCommit = (u8)(1-u.au.desiredAutoCommit);
+ p->rc = rc = SQLITE_BUSY;
+ goto vdbe_return;
+ }
}
assert( db->nStatement==0 );
sqlite3CloseSavepoints(db);
@@ -75923,29 +69495,35 @@ case OP_AutoCommit: {
}
goto vdbe_return;
}else{
- sqlite3VdbeError(p,
- (!desiredAutoCommit)?"cannot start a transaction within a transaction":(
- (iRollback)?"cannot rollback - no transaction is active":
+ sqlite3SetString(&p->zErrMsg, db,
+ (!u.au.desiredAutoCommit)?"cannot start a transaction within a transaction":(
+ (u.au.iRollback)?"cannot rollback - no transaction is active":
"cannot commit - no transaction is active"));
-
+
rc = SQLITE_ERROR;
}
break;
}
-/* Opcode: Transaction P1 P2 P3 P4 P5
+/* Opcode: Transaction P1 P2 * * *
**
-** Begin a transaction on database P1 if a transaction is not already
-** active.
-** If P2 is non-zero, then a write-transaction is started, or if a
-** read-transaction is already active, it is upgraded to a write-transaction.
-** If P2 is zero, then a read-transaction is started.
+** Begin a transaction. The transaction ends when a Commit or Rollback
+** opcode is encountered. Depending on the ON CONFLICT setting, the
+** transaction might also be rolled back if an error is encountered.
**
** P1 is the index of the database file on which the transaction is
** started. Index 0 is the main database file and index 1 is the
** file used for temporary tables. Indices of 2 or more are used for
** attached databases.
**
+** If P2 is non-zero, then a write-transaction is started. A RESERVED lock is
+** obtained on the database file when a write-transaction is started. No
+** other process can start another write transaction while this transaction is
+** underway. Starting a write transaction also creates a rollback journal. A
+** write transaction must be started before any changes can be made to the
+** database. If P2 is greater than or equal to 2 then an EXCLUSIVE lock is
+** also obtained on the file.
+**
** If a write-transaction is started and the Vdbe.usesStmtJournal flag is
** true (this flag is set if the Vdbe may modify more than one row and may
** throw an ABORT exception), a statement transaction may also be opened.
@@ -75956,58 +69534,47 @@ case OP_AutoCommit: {
** entire transaction. If no error is encountered, the statement transaction
** will automatically commit when the VDBE halts.
**
-** If P5!=0 then this opcode also checks the schema cookie against P3
-** and the schema generation counter against P4.
-** The cookie changes its value whenever the database schema changes.
-** This operation is used to detect when that the cookie has changed
-** and that the current process needs to reread the schema. If the schema
-** cookie in P3 differs from the schema cookie in the database header or
-** if the schema generation counter in P4 differs from the current
-** generation counter, then an SQLITE_SCHEMA error is raised and execution
-** halts. The sqlite3_step() wrapper function might then reprepare the
-** statement and rerun it from the beginning.
+** If P2 is zero, then a read-lock is obtained on the database file.
*/
case OP_Transaction: {
+#if 0 /* local variables moved into u.av */
Btree *pBt;
- int iMeta;
- int iGen;
+#endif /* local variables moved into u.av */
assert( p->bIsReader );
assert( p->readOnly==0 || pOp->p2==0 );
assert( pOp->p1>=0 && pOp->p1<db->nDb );
- assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){
rc = SQLITE_READONLY;
goto abort_due_to_error;
}
- pBt = db->aDb[pOp->p1].pBt;
+ u.av.pBt = db->aDb[pOp->p1].pBt;
- if( pBt ){
- rc = sqlite3BtreeBeginTrans(pBt, pOp->p2);
- testcase( rc==SQLITE_BUSY_SNAPSHOT );
- testcase( rc==SQLITE_BUSY_RECOVERY );
- if( (rc&0xff)==SQLITE_BUSY ){
- p->pc = (int)(pOp - aOp);
- p->rc = rc;
+ if( u.av.pBt ){
+ rc = sqlite3BtreeBeginTrans(u.av.pBt, pOp->p2);
+ if( rc==SQLITE_BUSY ){
+ p->pc = pc;
+ p->rc = rc = SQLITE_BUSY;
goto vdbe_return;
}
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
- if( pOp->p2 && p->usesStmtJournal
- && (db->autoCommit==0 || db->nVdbeRead>1)
+ if( pOp->p2 && p->usesStmtJournal
+ && (db->autoCommit==0 || db->nVdbeRead>1)
){
- assert( sqlite3BtreeIsInTrans(pBt) );
+ assert( sqlite3BtreeIsInTrans(u.av.pBt) );
if( p->iStatement==0 ){
assert( db->nStatement>=0 && db->nSavepoint>=0 );
- db->nStatement++;
+ db->nStatement++;
p->iStatement = db->nSavepoint + db->nStatement;
}
rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, p->iStatement-1);
if( rc==SQLITE_OK ){
- rc = sqlite3BtreeBeginStmt(pBt, p->iStatement);
+ rc = sqlite3BtreeBeginStmt(u.av.pBt, p->iStatement);
}
/* Store the current value of the database handles deferred constraint
@@ -76016,40 +69583,6 @@ case OP_Transaction: {
p->nStmtDefCons = db->nDeferredCons;
p->nStmtDefImmCons = db->nDeferredImmCons;
}
-
- /* Gather the schema version number for checking:
- ** IMPLEMENTATION-OF: R-32195-19465 The schema version is used by SQLite
- ** each time a query is executed to ensure that the internal cache of the
- ** schema used when compiling the SQL query matches the schema of the
- ** database against which the compiled query is actually executed.
- */
- sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta);
- iGen = db->aDb[pOp->p1].pSchema->iGeneration;
- }else{
- iGen = iMeta = 0;
- }
- assert( pOp->p5==0 || pOp->p4type==P4_INT32 );
- if( pOp->p5 && (iMeta!=pOp->p3 || iGen!=pOp->p4.i) ){
- sqlite3DbFree(db, p->zErrMsg);
- p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed");
- /* If the schema-cookie from the database file matches the cookie
- ** stored with the in-memory representation of the schema, do
- ** not reload the schema from the database file.
- **
- ** If virtual-tables are in use, this is not just an optimization.
- ** Often, v-tables store their data in other SQLite tables, which
- ** are queried from within xNext() and other v-table methods using
- ** prepared queries. If such a query is out-of-date, we do not want to
- ** discard the database schema, as the user code implementing the
- ** v-table would have to be ready for the sqlite3_vtab structure itself
- ** to be invalidated whenever sqlite3_step() is called from within
- ** a v-table method.
- */
- if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){
- sqlite3ResetOneSchema(db, pOp->p1);
- }
- p->expired = 1;
- rc = SQLITE_SCHEMA;
}
break;
}
@@ -76066,22 +69599,23 @@ case OP_Transaction: {
** must be started or there must be an open cursor) before
** executing this instruction.
*/
-case OP_ReadCookie: { /* out2 */
+case OP_ReadCookie: { /* out2-prerelease */
+#if 0 /* local variables moved into u.aw */
int iMeta;
int iDb;
int iCookie;
+#endif /* local variables moved into u.aw */
assert( p->bIsReader );
- iDb = pOp->p1;
- iCookie = pOp->p3;
+ u.aw.iDb = pOp->p1;
+ u.aw.iCookie = pOp->p3;
assert( pOp->p3<SQLITE_N_BTREE_META );
- assert( iDb>=0 && iDb<db->nDb );
- assert( db->aDb[iDb].pBt!=0 );
- assert( DbMaskTest(p->btreeMask, iDb) );
+ assert( u.aw.iDb>=0 && u.aw.iDb<db->nDb );
+ assert( db->aDb[u.aw.iDb].pBt!=0 );
+ assert( (p->btreeMask & (((yDbMask)1)<<u.aw.iDb))!=0 );
- sqlite3BtreeGetMeta(db->aDb[iDb].pBt, iCookie, (u32 *)&iMeta);
- pOut = out2Prerelease(p, pOp);
- pOut->u.i = iMeta;
+ sqlite3BtreeGetMeta(db->aDb[u.aw.iDb].pBt, u.aw.iCookie, (u32 *)&u.aw.iMeta);
+ pOut->u.i = u.aw.iMeta;
break;
}
@@ -76096,25 +69630,27 @@ case OP_ReadCookie: { /* out2 */
** A transaction must be started before executing this opcode.
*/
case OP_SetCookie: { /* in3 */
+#if 0 /* local variables moved into u.ax */
Db *pDb;
+#endif /* local variables moved into u.ax */
assert( pOp->p2<SQLITE_N_BTREE_META );
assert( pOp->p1>=0 && pOp->p1<db->nDb );
- assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
assert( p->readOnly==0 );
- pDb = &db->aDb[pOp->p1];
- assert( pDb->pBt!=0 );
+ u.ax.pDb = &db->aDb[pOp->p1];
+ assert( u.ax.pDb->pBt!=0 );
assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) );
pIn3 = &aMem[pOp->p3];
sqlite3VdbeMemIntegerify(pIn3);
/* See note about index shifting on OP_ReadCookie */
- rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, (int)pIn3->u.i);
+ rc = sqlite3BtreeUpdateMeta(u.ax.pDb->pBt, pOp->p2, (int)pIn3->u.i);
if( pOp->p2==BTREE_SCHEMA_VERSION ){
/* When the schema cookie changes, record the new cookie internally */
- pDb->pSchema->schema_cookie = (int)pIn3->u.i;
+ u.ax.pDb->pSchema->schema_cookie = (int)pIn3->u.i;
db->flags |= SQLITE_InternChanges;
}else if( pOp->p2==BTREE_FILE_FORMAT ){
/* Record changes in the file format */
- pDb->pSchema->file_format = (u8)pIn3->u.i;
+ u.ax.pDb->pSchema->file_format = (u8)pIn3->u.i;
}
if( pOp->p1==1 ){
/* Invalidate all prepared statements whenever the TEMP database
@@ -76125,6 +69661,68 @@ case OP_SetCookie: { /* in3 */
break;
}
+/* Opcode: VerifyCookie P1 P2 P3 * *
+**
+** Check the value of global database parameter number 0 (the
+** schema version) and make sure it is equal to P2 and that the
+** generation counter on the local schema parse equals P3.
+**
+** P1 is the database number which is 0 for the main database file
+** and 1 for the file holding temporary tables and some higher number
+** for auxiliary databases.
+**
+** The cookie changes its value whenever the database schema changes.
+** This operation is used to detect when that the cookie has changed
+** and that the current process needs to reread the schema.
+**
+** Either a transaction needs to have been started or an OP_Open needs
+** to be executed (to establish a read lock) before this opcode is
+** invoked.
+*/
+case OP_VerifyCookie: {
+#if 0 /* local variables moved into u.ay */
+ int iMeta;
+ int iGen;
+ Btree *pBt;
+#endif /* local variables moved into u.ay */
+
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
+ assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) );
+ assert( p->bIsReader );
+ u.ay.pBt = db->aDb[pOp->p1].pBt;
+ if( u.ay.pBt ){
+ sqlite3BtreeGetMeta(u.ay.pBt, BTREE_SCHEMA_VERSION, (u32 *)&u.ay.iMeta);
+ u.ay.iGen = db->aDb[pOp->p1].pSchema->iGeneration;
+ }else{
+ u.ay.iGen = u.ay.iMeta = 0;
+ }
+ if( u.ay.iMeta!=pOp->p2 || u.ay.iGen!=pOp->p3 ){
+ sqlite3DbFree(db, p->zErrMsg);
+ p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed");
+ /* If the schema-cookie from the database file matches the cookie
+ ** stored with the in-memory representation of the schema, do
+ ** not reload the schema from the database file.
+ **
+ ** If virtual-tables are in use, this is not just an optimization.
+ ** Often, v-tables store their data in other SQLite tables, which
+ ** are queried from within xNext() and other v-table methods using
+ ** prepared queries. If such a query is out-of-date, we do not want to
+ ** discard the database schema, as the user code implementing the
+ ** v-table would have to be ready for the sqlite3_vtab structure itself
+ ** to be invalidated whenever sqlite3_step() is called from within
+ ** a v-table method.
+ */
+ if( db->aDb[pOp->p1].pSchema->schema_cookie!=u.ay.iMeta ){
+ sqlite3ResetOneSchema(db, pOp->p1);
+ }
+
+ p->expired = 1;
+ rc = SQLITE_SCHEMA;
+ }
+ break;
+}
+
/* Opcode: OpenRead P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
**
@@ -76154,21 +69752,7 @@ case OP_SetCookie: { /* in3 */
** sequence of the index being opened. Otherwise, if P4 is an integer
** value, it is set to the number of columns in the table.
**
-** See also: OpenWrite, ReopenIdx
-*/
-/* Opcode: ReopenIdx P1 P2 P3 P4 P5
-** Synopsis: root=P2 iDb=P3
-**
-** The ReopenIdx opcode works exactly like ReadOpen except that it first
-** checks to see if the cursor on P1 is already open with a root page
-** number of P2 and if it is this opcode becomes a no-op. In other words,
-** if the cursor is already open, do not reopen it.
-**
-** The ReopenIdx opcode may only be used with P5==0 and with P4 being
-** a P4_KEYINFO object. Furthermore, the P3 value must be the same as
-** every other ReopenIdx or OpenRead for the same cursor number.
-**
-** See the OpenRead opcode documentation for additional information.
+** See also OpenWrite.
*/
/* Opcode: OpenWrite P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
@@ -76190,7 +69774,9 @@ case OP_SetCookie: { /* in3 */
**
** See also OpenRead.
*/
-case OP_ReopenIdx: {
+case OP_OpenRead:
+case OP_OpenWrite: {
+#if 0 /* local variables moved into u.az */
int nField;
KeyInfo *pKeyInfo;
int p2;
@@ -76199,94 +69785,82 @@ case OP_ReopenIdx: {
Btree *pX;
VdbeCursor *pCur;
Db *pDb;
+#endif /* local variables moved into u.az */
- assert( pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ );
- assert( pOp->p4type==P4_KEYINFO );
- pCur = p->apCsr[pOp->p1];
- if( pCur && pCur->pgnoRoot==(u32)pOp->p2 ){
- assert( pCur->iDb==pOp->p3 ); /* Guaranteed by the code generator */
- goto open_cursor_set_hints;
- }
- /* If the cursor is not currently open or is open on a different
- ** index, then fall through into OP_OpenRead to force a reopen */
-case OP_OpenRead:
-case OP_OpenWrite:
-
- assert( (pOp->p5&(OPFLAG_P2ISREG|OPFLAG_BULKCSR|OPFLAG_SEEKEQ))==pOp->p5 );
- assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ );
+ assert( (pOp->p5&(OPFLAG_P2ISREG|OPFLAG_BULKCSR))==pOp->p5 );
+ assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 );
assert( p->bIsReader );
- assert( pOp->opcode==OP_OpenRead || pOp->opcode==OP_ReopenIdx
- || p->readOnly==0 );
+ assert( pOp->opcode==OP_OpenRead || p->readOnly==0 );
if( p->expired ){
- rc = SQLITE_ABORT_ROLLBACK;
+ rc = SQLITE_ABORT;
break;
}
- nField = 0;
- pKeyInfo = 0;
- p2 = pOp->p2;
- iDb = pOp->p3;
- assert( iDb>=0 && iDb<db->nDb );
- assert( DbMaskTest(p->btreeMask, iDb) );
- pDb = &db->aDb[iDb];
- pX = pDb->pBt;
- assert( pX!=0 );
+ u.az.nField = 0;
+ u.az.pKeyInfo = 0;
+ u.az.p2 = pOp->p2;
+ u.az.iDb = pOp->p3;
+ assert( u.az.iDb>=0 && u.az.iDb<db->nDb );
+ assert( (p->btreeMask & (((yDbMask)1)<<u.az.iDb))!=0 );
+ u.az.pDb = &db->aDb[u.az.iDb];
+ u.az.pX = u.az.pDb->pBt;
+ assert( u.az.pX!=0 );
if( pOp->opcode==OP_OpenWrite ){
- wrFlag = 1;
- assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
- if( pDb->pSchema->file_format < p->minWriteFileFormat ){
- p->minWriteFileFormat = pDb->pSchema->file_format;
+ u.az.wrFlag = 1;
+ assert( sqlite3SchemaMutexHeld(db, u.az.iDb, 0) );
+ if( u.az.pDb->pSchema->file_format < p->minWriteFileFormat ){
+ p->minWriteFileFormat = u.az.pDb->pSchema->file_format;
}
}else{
- wrFlag = 0;
+ u.az.wrFlag = 0;
}
if( pOp->p5 & OPFLAG_P2ISREG ){
- assert( p2>0 );
- assert( p2<=(p->nMem-p->nCursor) );
- pIn2 = &aMem[p2];
+ assert( u.az.p2>0 );
+ assert( u.az.p2<=(p->nMem-p->nCursor) );
+ pIn2 = &aMem[u.az.p2];
assert( memIsValid(pIn2) );
assert( (pIn2->flags & MEM_Int)!=0 );
sqlite3VdbeMemIntegerify(pIn2);
- p2 = (int)pIn2->u.i;
- /* The p2 value always comes from a prior OP_CreateTable opcode and
- ** that opcode will always set the p2 value to 2 or more or else fail.
+ u.az.p2 = (int)pIn2->u.i;
+ /* The u.az.p2 value always comes from a prior OP_CreateTable opcode and
+ ** that opcode will always set the u.az.p2 value to 2 or more or else fail.
** If there were a failure, the prepared statement would have halted
** before reaching this instruction. */
- if( NEVER(p2<2) ) {
+ if( NEVER(u.az.p2<2) ) {
rc = SQLITE_CORRUPT_BKPT;
goto abort_due_to_error;
}
}
if( pOp->p4type==P4_KEYINFO ){
- pKeyInfo = pOp->p4.pKeyInfo;
- assert( pKeyInfo->enc==ENC(db) );
- assert( pKeyInfo->db==db );
- nField = pKeyInfo->nField+pKeyInfo->nXField;
+ u.az.pKeyInfo = pOp->p4.pKeyInfo;
+ assert( u.az.pKeyInfo->enc==ENC(db) );
+ assert( u.az.pKeyInfo->db==db );
+ u.az.nField = u.az.pKeyInfo->nField+u.az.pKeyInfo->nXField;
}else if( pOp->p4type==P4_INT32 ){
- nField = pOp->p4.i;
+ u.az.nField = pOp->p4.i;
}
assert( pOp->p1>=0 );
- assert( nField>=0 );
- testcase( nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */
- pCur = allocateCursor(p, pOp->p1, nField, iDb, 1);
- if( pCur==0 ) goto no_mem;
- pCur->nullRow = 1;
- pCur->isOrdered = 1;
- pCur->pgnoRoot = p2;
- rc = sqlite3BtreeCursor(pX, p2, wrFlag, pKeyInfo, pCur->pCursor);
- pCur->pKeyInfo = pKeyInfo;
+ assert( u.az.nField>=0 );
+ testcase( u.az.nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */
+ u.az.pCur = allocateCursor(p, pOp->p1, u.az.nField, u.az.iDb, 1);
+ if( u.az.pCur==0 ) goto no_mem;
+ u.az.pCur->nullRow = 1;
+ u.az.pCur->isOrdered = 1;
+ rc = sqlite3BtreeCursor(u.az.pX, u.az.p2, u.az.wrFlag, u.az.pKeyInfo, u.az.pCur->pCursor);
+ u.az.pCur->pKeyInfo = u.az.pKeyInfo;
+ assert( OPFLAG_BULKCSR==BTREE_BULKLOAD );
+ sqlite3BtreeCursorHints(u.az.pCur->pCursor, (pOp->p5 & OPFLAG_BULKCSR));
+
+ /* Since it performs no memory allocation or IO, the only value that
+ ** sqlite3BtreeCursor() may return is SQLITE_OK. */
+ assert( rc==SQLITE_OK );
+
/* Set the VdbeCursor.isTable variable. Previous versions of
** SQLite used to check if the root-page flags were sane at this point
** and report database corruption if they were not, but this check has
- ** since moved into the btree layer. */
- pCur->isTable = pOp->p4type!=P4_KEYINFO;
-
-open_cursor_set_hints:
- assert( OPFLAG_BULKCSR==BTREE_BULKLOAD );
- assert( OPFLAG_SEEKEQ==BTREE_SEEK_EQ );
- sqlite3BtreeCursorHints(pCur->pCursor,
- (pOp->p5 & (OPFLAG_BULKCSR|OPFLAG_SEEKEQ)));
+ ** since moved into the btree layer. */
+ u.az.pCur->isTable = pOp->p4type!=P4_KEYINFO;
break;
}
@@ -76318,10 +69892,12 @@ open_cursor_set_hints:
*/
case OP_OpenAutoindex:
case OP_OpenEphemeral: {
+#if 0 /* local variables moved into u.ba */
VdbeCursor *pCx;
KeyInfo *pKeyInfo;
+#endif /* local variables moved into u.ba */
- static const int vfsFlags =
+ static const int vfsFlags =
SQLITE_OPEN_READWRITE |
SQLITE_OPEN_CREATE |
SQLITE_OPEN_EXCLUSIVE |
@@ -76329,14 +69905,13 @@ case OP_OpenEphemeral: {
SQLITE_OPEN_TRANSIENT_DB;
assert( pOp->p1>=0 );
assert( pOp->p2>=0 );
- pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1);
- if( pCx==0 ) goto no_mem;
- pCx->nullRow = 1;
- pCx->isEphemeral = 1;
- rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBt,
+ u.ba.pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1);
+ if( u.ba.pCx==0 ) goto no_mem;
+ u.ba.pCx->nullRow = 1;
+ rc = sqlite3BtreeOpen(db->pVfs, 0, db, &u.ba.pCx->pBt,
BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags);
if( rc==SQLITE_OK ){
- rc = sqlite3BtreeBeginTrans(pCx->pBt, 1);
+ rc = sqlite3BtreeBeginTrans(u.ba.pCx->pBt, 1);
}
if( rc==SQLITE_OK ){
/* If a transient index is required, create it by calling
@@ -76344,76 +69919,57 @@ case OP_OpenEphemeral: {
** opening it. If a transient table is required, just use the
** automatically created table with root-page 1 (an BLOB_INTKEY table).
*/
- if( (pKeyInfo = pOp->p4.pKeyInfo)!=0 ){
+ if( (u.ba.pKeyInfo = pOp->p4.pKeyInfo)!=0 ){
int pgno;
assert( pOp->p4type==P4_KEYINFO );
- rc = sqlite3BtreeCreateTable(pCx->pBt, &pgno, BTREE_BLOBKEY | pOp->p5);
+ rc = sqlite3BtreeCreateTable(u.ba.pCx->pBt, &pgno, BTREE_BLOBKEY | pOp->p5);
if( rc==SQLITE_OK ){
assert( pgno==MASTER_ROOT+1 );
- assert( pKeyInfo->db==db );
- assert( pKeyInfo->enc==ENC(db) );
- pCx->pKeyInfo = pKeyInfo;
- rc = sqlite3BtreeCursor(pCx->pBt, pgno, 1, pKeyInfo, pCx->pCursor);
+ assert( u.ba.pKeyInfo->db==db );
+ assert( u.ba.pKeyInfo->enc==ENC(db) );
+ u.ba.pCx->pKeyInfo = u.ba.pKeyInfo;
+ rc = sqlite3BtreeCursor(u.ba.pCx->pBt, pgno, 1, u.ba.pKeyInfo, u.ba.pCx->pCursor);
}
- pCx->isTable = 0;
+ u.ba.pCx->isTable = 0;
}else{
- rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, pCx->pCursor);
- pCx->isTable = 1;
+ rc = sqlite3BtreeCursor(u.ba.pCx->pBt, MASTER_ROOT, 1, 0, u.ba.pCx->pCursor);
+ u.ba.pCx->isTable = 1;
}
}
- pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED);
+ u.ba.pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED);
break;
}
-/* Opcode: SorterOpen P1 P2 P3 P4 *
+/* Opcode: SorterOpen P1 * * P4 *
**
** This opcode works like OP_OpenEphemeral except that it opens
** a transient index that is specifically designed to sort large
** tables using an external merge-sort algorithm.
-**
-** If argument P3 is non-zero, then it indicates that the sorter may
-** assume that a stable sort considering the first P3 fields of each
-** key is sufficient to produce the required results.
*/
case OP_SorterOpen: {
+#if 0 /* local variables moved into u.bb */
VdbeCursor *pCx;
+#endif /* local variables moved into u.bb */
assert( pOp->p1>=0 );
assert( pOp->p2>=0 );
- pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1);
- if( pCx==0 ) goto no_mem;
- pCx->pKeyInfo = pOp->p4.pKeyInfo;
- assert( pCx->pKeyInfo->db==db );
- assert( pCx->pKeyInfo->enc==ENC(db) );
- rc = sqlite3VdbeSorterInit(db, pOp->p3, pCx);
- break;
-}
-
-/* Opcode: SequenceTest P1 P2 * * *
-** Synopsis: if( cursor[P1].ctr++ ) pc = P2
-**
-** P1 is a sorter cursor. If the sequence counter is currently zero, jump
-** to P2. Regardless of whether or not the jump is taken, increment the
-** the sequence value.
-*/
-case OP_SequenceTest: {
- VdbeCursor *pC;
- assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC->pSorter );
- if( (pC->seqCount++)==0 ){
- goto jump_to_p2;
- }
+ u.bb.pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1);
+ if( u.bb.pCx==0 ) goto no_mem;
+ u.bb.pCx->pKeyInfo = pOp->p4.pKeyInfo;
+ assert( u.bb.pCx->pKeyInfo->db==db );
+ assert( u.bb.pCx->pKeyInfo->enc==ENC(db) );
+ rc = sqlite3VdbeSorterInit(db, u.bb.pCx);
break;
}
-/* Opcode: OpenPseudo P1 P2 P3 * *
-** Synopsis: P3 columns in r[P2]
+/* Opcode: OpenPseudo P1 P2 P3 * P5
+** Synopsis: content in r[P2@P3]
**
** Open a new cursor that points to a fake table that contains a single
-** row of data. The content of that one row is the content of memory
-** register P2. In other words, cursor P1 becomes an alias for the
-** MEM_Blob content contained in register P2.
+** row of data. The content of that one row in the content of memory
+** register P2 when P5==0. In other words, cursor P1 becomes an alias for the
+** MEM_Blob content contained in register P2. When P5==1, then the
+** row is represented by P3 consecutive registers beginning with P2.
**
** A pseudo-table created by this opcode is used to hold a single
** row output from the sorter so that the row can be decomposed into
@@ -76424,16 +69980,18 @@ case OP_SequenceTest: {
** the pseudo-table.
*/
case OP_OpenPseudo: {
+#if 0 /* local variables moved into u.bc */
VdbeCursor *pCx;
+#endif /* local variables moved into u.bc */
assert( pOp->p1>=0 );
assert( pOp->p3>=0 );
- pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, 0);
- if( pCx==0 ) goto no_mem;
- pCx->nullRow = 1;
- pCx->pseudoTableReg = pOp->p2;
- pCx->isTable = 1;
- assert( pOp->p5==0 );
+ u.bc.pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, 0);
+ if( u.bc.pCx==0 ) goto no_mem;
+ u.bc.pCx->nullRow = 1;
+ u.bc.pCx->pseudoTableReg = pOp->p2;
+ u.bc.pCx->isTable = 1;
+ u.bc.pCx->multiPseudo = pOp->p5;
break;
}
@@ -76449,27 +70007,7 @@ case OP_Close: {
break;
}
-#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
-/* Opcode: ColumnsUsed P1 * * P4 *
-**
-** This opcode (which only exists if SQLite was compiled with
-** SQLITE_ENABLE_COLUMN_USED_MASK) identifies which columns of the
-** table or index for cursor P1 are used. P4 is a 64-bit integer
-** (P4_INT64) in which the first 63 bits are one for each of the
-** first 63 columns of the table or index that are actually used
-** by the cursor. The high-order bit is set if any column after
-** the 64th is used.
-*/
-case OP_ColumnsUsed: {
- VdbeCursor *pC;
- pC = p->apCsr[pOp->p1];
- assert( pC->pCursor );
- pC->maskUsed = *(u64*)pOp->p4.pI64;
- break;
-}
-#endif
-
-/* Opcode: SeekGE P1 P2 P3 P4 *
+/* Opcode: SeekGe P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
@@ -76481,13 +70019,9 @@ case OP_ColumnsUsed: {
** is greater than or equal to the key value. If there are no records
** greater than or equal to the key and P2 is not zero, then jump to P2.
**
-** This opcode leaves the cursor configured to move in forward order,
-** from the beginning toward the end. In other words, the cursor is
-** configured to use Next, not Prev.
-**
-** See also: Found, NotFound, SeekLt, SeekGt, SeekLe
+** See also: Found, NotFound, Distinct, SeekLt, SeekGt, SeekLe
*/
-/* Opcode: SeekGT P1 P2 P3 P4 *
+/* Opcode: SeekGt P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
@@ -76499,13 +70033,9 @@ case OP_ColumnsUsed: {
** is greater than the key value. If there are no records greater than
** the key and P2 is not zero, then jump to P2.
**
-** This opcode leaves the cursor configured to move in forward order,
-** from the beginning toward the end. In other words, the cursor is
-** configured to use Next, not Prev.
-**
-** See also: Found, NotFound, SeekLt, SeekGe, SeekLe
+** See also: Found, NotFound, Distinct, SeekLt, SeekGe, SeekLe
*/
-/* Opcode: SeekLT P1 P2 P3 P4 *
+/* Opcode: SeekLt P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
@@ -76517,13 +70047,9 @@ case OP_ColumnsUsed: {
** is less than the key value. If there are no records less than
** the key and P2 is not zero, then jump to P2.
**
-** This opcode leaves the cursor configured to move in reverse order,
-** from the end toward the beginning. In other words, the cursor is
-** configured to use Prev, not Next.
-**
-** See also: Found, NotFound, SeekGt, SeekGe, SeekLe
+** See also: Found, NotFound, Distinct, SeekGt, SeekGe, SeekLe
*/
-/* Opcode: SeekLE P1 P2 P3 P4 *
+/* Opcode: SeekLe P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
@@ -76535,63 +70061,41 @@ case OP_ColumnsUsed: {
** is less than or equal to the key value. If there are no records
** less than or equal to the key and P2 is not zero, then jump to P2.
**
-** This opcode leaves the cursor configured to move in reverse order,
-** from the end toward the beginning. In other words, the cursor is
-** configured to use Prev, not Next.
-**
-** See also: Found, NotFound, SeekGt, SeekGe, SeekLt
+** See also: Found, NotFound, Distinct, SeekGt, SeekGe, SeekLt
*/
-case OP_SeekLT: /* jump, in3 */
-case OP_SeekLE: /* jump, in3 */
-case OP_SeekGE: /* jump, in3 */
-case OP_SeekGT: { /* jump, in3 */
+case OP_SeekLt: /* jump, in3 */
+case OP_SeekLe: /* jump, in3 */
+case OP_SeekGe: /* jump, in3 */
+case OP_SeekGt: { /* jump, in3 */
+#if 0 /* local variables moved into u.bd */
int res;
int oc;
VdbeCursor *pC;
UnpackedRecord r;
int nField;
i64 iKey; /* The rowid we are to seek to */
+#endif /* local variables moved into u.bd */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( pOp->p2!=0 );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->pseudoTableReg==0 );
- assert( OP_SeekLE == OP_SeekLT+1 );
- assert( OP_SeekGE == OP_SeekLT+2 );
- assert( OP_SeekGT == OP_SeekLT+3 );
- assert( pC->isOrdered );
- assert( pC->pCursor!=0 );
- oc = pOp->opcode;
- pC->nullRow = 0;
-#ifdef SQLITE_DEBUG
- pC->seekOp = pOp->opcode;
-#endif
-
- /* For a cursor with the BTREE_SEEK_EQ hint, only the OP_SeekGE and
- ** OP_SeekLE opcodes are allowed, and these must be immediately followed
- ** by an OP_IdxGT or OP_IdxLT opcode, respectively, with the same key.
- */
-#ifdef SQLITE_DEBUG
- if( sqlite3BtreeCursorHasHint(pC->pCursor, BTREE_SEEK_EQ) ){
- assert( pOp->opcode==OP_SeekGE || pOp->opcode==OP_SeekLE );
- assert( pOp[1].opcode==OP_IdxLT || pOp[1].opcode==OP_IdxGT );
- assert( pOp[1].p1==pOp[0].p1 );
- assert( pOp[1].p2==pOp[0].p2 );
- assert( pOp[1].p3==pOp[0].p3 );
- assert( pOp[1].p4.i==pOp[0].p4.i );
- }
-#endif
-
- if( pC->isTable ){
+ u.bd.pC = p->apCsr[pOp->p1];
+ assert( u.bd.pC!=0 );
+ assert( u.bd.pC->pseudoTableReg==0 );
+ assert( OP_SeekLe == OP_SeekLt+1 );
+ assert( OP_SeekGe == OP_SeekLt+2 );
+ assert( OP_SeekGt == OP_SeekLt+3 );
+ assert( u.bd.pC->isOrdered );
+ assert( u.bd.pC->pCursor!=0 );
+ u.bd.oc = pOp->opcode;
+ u.bd.pC->nullRow = 0;
+ if( u.bd.pC->isTable ){
/* The input value in P3 might be of any type: integer, real, string,
** blob, or NULL. But it needs to be an integer before we can do
- ** the seek, so convert it. */
+ ** the seek, so covert it. */
pIn3 = &aMem[pOp->p3];
- if( (pIn3->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
- applyNumericAffinity(pIn3, 0);
- }
- iKey = sqlite3VdbeIntValue(pIn3);
+ applyNumericAffinity(pIn3);
+ u.bd.iKey = sqlite3VdbeIntValue(pIn3);
+ u.bd.pC->rowidIsValid = 0;
/* If the P3 value could not be converted into an integer without
** loss of information, then special processing is required... */
@@ -76599,98 +70103,101 @@ case OP_SeekGT: { /* jump, in3 */
if( (pIn3->flags & MEM_Real)==0 ){
/* If the P3 value cannot be converted into any kind of a number,
** then the seek is not possible, so jump to P2 */
- VdbeBranchTaken(1,2); goto jump_to_p2;
+ pc = pOp->p2 - 1;
break;
}
- /* If the approximation iKey is larger than the actual real search
+ /* If the approximation u.bd.iKey is larger than the actual real search
** term, substitute >= for > and < for <=. e.g. if the search term
** is 4.9 and the integer approximation 5:
**
** (x > 4.9) -> (x >= 5)
** (x <= 4.9) -> (x < 5)
*/
- if( pIn3->u.r<(double)iKey ){
- assert( OP_SeekGE==(OP_SeekGT-1) );
- assert( OP_SeekLT==(OP_SeekLE-1) );
- assert( (OP_SeekLE & 0x0001)==(OP_SeekGT & 0x0001) );
- if( (oc & 0x0001)==(OP_SeekGT & 0x0001) ) oc--;
+ if( pIn3->r<(double)u.bd.iKey ){
+ assert( OP_SeekGe==(OP_SeekGt-1) );
+ assert( OP_SeekLt==(OP_SeekLe-1) );
+ assert( (OP_SeekLe & 0x0001)==(OP_SeekGt & 0x0001) );
+ if( (u.bd.oc & 0x0001)==(OP_SeekGt & 0x0001) ) u.bd.oc--;
}
- /* If the approximation iKey is smaller than the actual real search
+ /* If the approximation u.bd.iKey is smaller than the actual real search
** term, substitute <= for < and > for >=. */
- else if( pIn3->u.r>(double)iKey ){
- assert( OP_SeekLE==(OP_SeekLT+1) );
- assert( OP_SeekGT==(OP_SeekGE+1) );
- assert( (OP_SeekLT & 0x0001)==(OP_SeekGE & 0x0001) );
- if( (oc & 0x0001)==(OP_SeekLT & 0x0001) ) oc++;
+ else if( pIn3->r>(double)u.bd.iKey ){
+ assert( OP_SeekLe==(OP_SeekLt+1) );
+ assert( OP_SeekGt==(OP_SeekGe+1) );
+ assert( (OP_SeekLt & 0x0001)==(OP_SeekGe & 0x0001) );
+ if( (u.bd.oc & 0x0001)==(OP_SeekLt & 0x0001) ) u.bd.oc++;
}
- }
- rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)iKey, 0, &res);
- pC->movetoTarget = iKey; /* Used by OP_Delete */
+ }
+ rc = sqlite3BtreeMovetoUnpacked(u.bd.pC->pCursor, 0, (u64)u.bd.iKey, 0, &u.bd.res);
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
+ if( u.bd.res==0 ){
+ u.bd.pC->rowidIsValid = 1;
+ u.bd.pC->lastRowid = u.bd.iKey;
+ }
}else{
- nField = pOp->p4.i;
+ u.bd.nField = pOp->p4.i;
assert( pOp->p4type==P4_INT32 );
- assert( nField>0 );
- r.pKeyInfo = pC->pKeyInfo;
- r.nField = (u16)nField;
+ assert( u.bd.nField>0 );
+ u.bd.r.pKeyInfo = u.bd.pC->pKeyInfo;
+ u.bd.r.nField = (u16)u.bd.nField;
/* The next line of code computes as follows, only faster:
- ** if( oc==OP_SeekGT || oc==OP_SeekLE ){
- ** r.default_rc = -1;
+ ** if( u.bd.oc==OP_SeekGt || u.bd.oc==OP_SeekLe ){
+ ** u.bd.r.flags = UNPACKED_INCRKEY;
** }else{
- ** r.default_rc = +1;
+ ** u.bd.r.flags = 0;
** }
*/
- r.default_rc = ((1 & (oc - OP_SeekLT)) ? -1 : +1);
- assert( oc!=OP_SeekGT || r.default_rc==-1 );
- assert( oc!=OP_SeekLE || r.default_rc==-1 );
- assert( oc!=OP_SeekGE || r.default_rc==+1 );
- assert( oc!=OP_SeekLT || r.default_rc==+1 );
+ u.bd.r.flags = (u8)(UNPACKED_INCRKEY * (1 & (u.bd.oc - OP_SeekLt)));
+ assert( u.bd.oc!=OP_SeekGt || u.bd.r.flags==UNPACKED_INCRKEY );
+ assert( u.bd.oc!=OP_SeekLe || u.bd.r.flags==UNPACKED_INCRKEY );
+ assert( u.bd.oc!=OP_SeekGe || u.bd.r.flags==0 );
+ assert( u.bd.oc!=OP_SeekLt || u.bd.r.flags==0 );
- r.aMem = &aMem[pOp->p3];
+ u.bd.r.aMem = &aMem[pOp->p3];
#ifdef SQLITE_DEBUG
- { int i; for(i=0; i<r.nField; i++) assert( memIsValid(&r.aMem[i]) ); }
+ { int i; for(i=0; i<u.bd.r.nField; i++) assert( memIsValid(&u.bd.r.aMem[i]) ); }
#endif
- ExpandBlob(r.aMem);
- rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, &r, 0, 0, &res);
+ ExpandBlob(u.bd.r.aMem);
+ rc = sqlite3BtreeMovetoUnpacked(u.bd.pC->pCursor, &u.bd.r, 0, 0, &u.bd.res);
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
+ u.bd.pC->rowidIsValid = 0;
}
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
+ u.bd.pC->deferredMoveto = 0;
+ u.bd.pC->cacheStatus = CACHE_STALE;
#ifdef SQLITE_TEST
sqlite3_search_count++;
#endif
- if( oc>=OP_SeekGE ){ assert( oc==OP_SeekGE || oc==OP_SeekGT );
- if( res<0 || (res==0 && oc==OP_SeekGT) ){
- res = 0;
- rc = sqlite3BtreeNext(pC->pCursor, &res);
+ if( u.bd.oc>=OP_SeekGe ){ assert( u.bd.oc==OP_SeekGe || u.bd.oc==OP_SeekGt );
+ if( u.bd.res<0 || (u.bd.res==0 && u.bd.oc==OP_SeekGt) ){
+ rc = sqlite3BtreeNext(u.bd.pC->pCursor, &u.bd.res);
if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ u.bd.pC->rowidIsValid = 0;
}else{
- res = 0;
+ u.bd.res = 0;
}
}else{
- assert( oc==OP_SeekLT || oc==OP_SeekLE );
- if( res>0 || (res==0 && oc==OP_SeekLT) ){
- res = 0;
- rc = sqlite3BtreePrevious(pC->pCursor, &res);
+ assert( u.bd.oc==OP_SeekLt || u.bd.oc==OP_SeekLe );
+ if( u.bd.res>0 || (u.bd.res==0 && u.bd.oc==OP_SeekLt) ){
+ rc = sqlite3BtreePrevious(u.bd.pC->pCursor, &u.bd.res);
if( rc!=SQLITE_OK ) goto abort_due_to_error;
+ u.bd.pC->rowidIsValid = 0;
}else{
- /* res might be negative because the table is empty. Check to
+ /* u.bd.res might be negative because the table is empty. Check to
** see if this is the case.
*/
- res = sqlite3BtreeEof(pC->pCursor);
+ u.bd.res = sqlite3BtreeEof(u.bd.pC->pCursor);
}
}
assert( pOp->p2>0 );
- VdbeBranchTaken(res!=0,2);
- if( res ){
- goto jump_to_p2;
+ if( u.bd.res ){
+ pc = pOp->p2 - 1;
}
break;
}
@@ -76706,17 +70213,20 @@ case OP_SeekGT: { /* jump, in3 */
** occur, no unnecessary I/O happens.
*/
case OP_Seek: { /* in2 */
+#if 0 /* local variables moved into u.be */
VdbeCursor *pC;
+#endif /* local variables moved into u.be */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->pCursor!=0 );
- assert( pC->isTable );
- pC->nullRow = 0;
+ u.be.pC = p->apCsr[pOp->p1];
+ assert( u.be.pC!=0 );
+ assert( u.be.pC->pCursor!=0 );
+ assert( u.be.pC->isTable );
+ u.be.pC->nullRow = 0;
pIn2 = &aMem[pOp->p2];
- pC->movetoTarget = sqlite3VdbeIntValue(pIn2);
- pC->deferredMoveto = 1;
+ u.be.pC->movetoTarget = sqlite3VdbeIntValue(pIn2);
+ u.be.pC->rowidIsValid = 0;
+ u.be.pC->deferredMoveto = 1;
break;
}
@@ -76732,10 +70242,6 @@ case OP_Seek: { /* in2 */
** is a prefix of any entry in P1 then a jump is made to P2 and
** P1 is left pointing at the matching entry.
**
-** This operation leaves the cursor in a state where it can be
-** advanced in the forward direction. The Next instruction will work,
-** but not the Prev instruction.
-**
** See also: NotFound, NoConflict, NotExists. SeekGe
*/
/* Opcode: NotFound P1 P2 P3 P4 *
@@ -76751,10 +70257,6 @@ case OP_Seek: { /* in2 */
** falls through to the next instruction and P1 is left pointing at the
** matching entry.
**
-** This operation leaves the cursor in a state where it cannot be
-** advanced in either direction. In other words, the Next and Prev
-** opcodes do not work after this operation.
-**
** See also: Found, NotExists, NoConflict
*/
/* Opcode: NoConflict P1 P2 P3 P4 *
@@ -76774,17 +70276,13 @@ case OP_Seek: { /* in2 */
** This opcode is similar to OP_NotFound with the exceptions that the
** branch is always taken if any part of the search key input is NULL.
**
-** This operation leaves the cursor in a state where it cannot be
-** advanced in either direction. In other words, the Next and Prev
-** opcodes do not work after this operation.
-**
** See also: NotFound, Found, NotExists
*/
case OP_NoConflict: /* jump, in3 */
case OP_NotFound: /* jump, in3 */
case OP_Found: { /* jump, in3 */
+#if 0 /* local variables moved into u.bf */
int alreadyExists;
- int takeJump;
int ii;
VdbeCursor *pC;
int res;
@@ -76792,72 +70290,72 @@ case OP_Found: { /* jump, in3 */
UnpackedRecord *pIdxKey;
UnpackedRecord r;
char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*4 + 7];
+#endif /* local variables moved into u.bf */
#ifdef SQLITE_TEST
if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++;
#endif
+ u.bf.alreadyExists = 0;
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( pOp->p4type==P4_INT32 );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
-#ifdef SQLITE_DEBUG
- pC->seekOp = pOp->opcode;
-#endif
+ u.bf.pC = p->apCsr[pOp->p1];
+ assert( u.bf.pC!=0 );
pIn3 = &aMem[pOp->p3];
- assert( pC->pCursor!=0 );
- assert( pC->isTable==0 );
- pFree = 0;
+ assert( u.bf.pC->pCursor!=0 );
+ assert( u.bf.pC->isTable==0 );
if( pOp->p4.i>0 ){
- r.pKeyInfo = pC->pKeyInfo;
- r.nField = (u16)pOp->p4.i;
- r.aMem = pIn3;
- for(ii=0; ii<r.nField; ii++){
- assert( memIsValid(&r.aMem[ii]) );
- ExpandBlob(&r.aMem[ii]);
+ u.bf.r.pKeyInfo = u.bf.pC->pKeyInfo;
+ u.bf.r.nField = (u16)pOp->p4.i;
+ u.bf.r.aMem = pIn3;
#ifdef SQLITE_DEBUG
- if( ii ) REGISTER_TRACE(pOp->p3+ii, &r.aMem[ii]);
-#endif
+ {
+ int i;
+ for(i=0; i<u.bf.r.nField; i++){
+ assert( memIsValid(&u.bf.r.aMem[i]) );
+ if( i ) REGISTER_TRACE(pOp->p3+i, &u.bf.r.aMem[i]);
+ }
}
- pIdxKey = &r;
+#endif
+ u.bf.r.flags = UNPACKED_PREFIX_MATCH;
+ u.bf.pIdxKey = &u.bf.r;
}else{
- pIdxKey = sqlite3VdbeAllocUnpackedRecord(
- pC->pKeyInfo, aTempRec, sizeof(aTempRec), &pFree
+ u.bf.pIdxKey = sqlite3VdbeAllocUnpackedRecord(
+ u.bf.pC->pKeyInfo, u.bf.aTempRec, sizeof(u.bf.aTempRec), &u.bf.pFree
);
- if( pIdxKey==0 ) goto no_mem;
+ if( u.bf.pIdxKey==0 ) goto no_mem;
assert( pIn3->flags & MEM_Blob );
- ExpandBlob(pIn3);
- sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey);
+ assert( (pIn3->flags & MEM_Zero)==0 ); /* zeroblobs already expanded */
+ sqlite3VdbeRecordUnpack(u.bf.pC->pKeyInfo, pIn3->n, pIn3->z, u.bf.pIdxKey);
+ u.bf.pIdxKey->flags |= UNPACKED_PREFIX_MATCH;
}
- pIdxKey->default_rc = 0;
- takeJump = 0;
if( pOp->opcode==OP_NoConflict ){
/* For the OP_NoConflict opcode, take the jump if any of the
** input fields are NULL, since any key with a NULL will not
** conflict */
- for(ii=0; ii<pIdxKey->nField; ii++){
- if( pIdxKey->aMem[ii].flags & MEM_Null ){
- takeJump = 1;
+ for(u.bf.ii=0; u.bf.ii<u.bf.r.nField; u.bf.ii++){
+ if( u.bf.r.aMem[u.bf.ii].flags & MEM_Null ){
+ pc = pOp->p2 - 1;
break;
}
}
}
- rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, pIdxKey, 0, 0, &res);
- sqlite3DbFree(db, pFree);
+ rc = sqlite3BtreeMovetoUnpacked(u.bf.pC->pCursor, u.bf.pIdxKey, 0, 0, &u.bf.res);
+ if( pOp->p4.i==0 ){
+ sqlite3DbFree(db, u.bf.pFree);
+ }
if( rc!=SQLITE_OK ){
break;
}
- pC->seekResult = res;
- alreadyExists = (res==0);
- pC->nullRow = 1-alreadyExists;
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
+ u.bf.pC->seekResult = u.bf.res;
+ u.bf.alreadyExists = (u.bf.res==0);
+ u.bf.pC->nullRow = 1-u.bf.alreadyExists;
+ u.bf.pC->deferredMoveto = 0;
+ u.bf.pC->cacheStatus = CACHE_STALE;
if( pOp->opcode==OP_Found ){
- VdbeBranchTaken(alreadyExists!=0,2);
- if( alreadyExists ) goto jump_to_p2;
+ if( u.bf.alreadyExists ) pc = pOp->p2 - 1;
}else{
- VdbeBranchTaken(takeJump||alreadyExists==0,2);
- if( takeJump || !alreadyExists ) goto jump_to_p2;
+ if( !u.bf.alreadyExists ) pc = pOp->p2 - 1;
}
break;
}
@@ -76867,71 +70365,59 @@ case OP_Found: { /* jump, in3 */
**
** P1 is the index of a cursor open on an SQL table btree (with integer
** keys). P3 is an integer rowid. If P1 does not contain a record with
-** rowid P3 then jump immediately to P2. Or, if P2 is 0, raise an
-** SQLITE_CORRUPT error. If P1 does contain a record with rowid P3 then
-** leave the cursor pointing at that record and fall through to the next
-** instruction.
+** rowid P3 then jump immediately to P2. If P1 does contain a record
+** with rowid P3 then leave the cursor pointing at that record and fall
+** through to the next instruction.
**
** The OP_NotFound opcode performs the same operation on index btrees
** (with arbitrary multi-value keys).
**
-** This opcode leaves the cursor in a state where it cannot be advanced
-** in either direction. In other words, the Next and Prev opcodes will
-** not work following this opcode.
-**
** See also: Found, NotFound, NoConflict
*/
case OP_NotExists: { /* jump, in3 */
+#if 0 /* local variables moved into u.bg */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
u64 iKey;
+#endif /* local variables moved into u.bg */
pIn3 = &aMem[pOp->p3];
assert( pIn3->flags & MEM_Int );
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
-#ifdef SQLITE_DEBUG
- pC->seekOp = 0;
-#endif
- assert( pC->isTable );
- assert( pC->pseudoTableReg==0 );
- pCrsr = pC->pCursor;
- assert( pCrsr!=0 );
- res = 0;
- iKey = pIn3->u.i;
- rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res);
- assert( rc==SQLITE_OK || res==0 );
- pC->movetoTarget = iKey; /* Used by OP_Delete */
- pC->nullRow = 0;
- pC->cacheStatus = CACHE_STALE;
- pC->deferredMoveto = 0;
- VdbeBranchTaken(res!=0,2);
- pC->seekResult = res;
- if( res!=0 ){
- assert( rc==SQLITE_OK );
- if( pOp->p2==0 ){
- rc = SQLITE_CORRUPT_BKPT;
- }else{
- goto jump_to_p2;
- }
- }
+ u.bg.pC = p->apCsr[pOp->p1];
+ assert( u.bg.pC!=0 );
+ assert( u.bg.pC->isTable );
+ assert( u.bg.pC->pseudoTableReg==0 );
+ u.bg.pCrsr = u.bg.pC->pCursor;
+ assert( u.bg.pCrsr!=0 );
+ u.bg.res = 0;
+ u.bg.iKey = pIn3->u.i;
+ rc = sqlite3BtreeMovetoUnpacked(u.bg.pCrsr, 0, u.bg.iKey, 0, &u.bg.res);
+ u.bg.pC->lastRowid = pIn3->u.i;
+ u.bg.pC->rowidIsValid = u.bg.res==0 ?1:0;
+ u.bg.pC->nullRow = 0;
+ u.bg.pC->cacheStatus = CACHE_STALE;
+ u.bg.pC->deferredMoveto = 0;
+ if( u.bg.res!=0 ){
+ pc = pOp->p2 - 1;
+ assert( u.bg.pC->rowidIsValid==0 );
+ }
+ u.bg.pC->seekResult = u.bg.res;
break;
}
/* Opcode: Sequence P1 P2 * * *
-** Synopsis: r[P2]=cursor[P1].ctr++
+** Synopsis: r[P2]=rowid
**
** Find the next available sequence number for cursor P1.
** Write the sequence number into register P2.
** The sequence number on the cursor is incremented after this
** instruction.
*/
-case OP_Sequence: { /* out2 */
+case OP_Sequence: { /* out2-prerelease */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( p->apCsr[pOp->p1]!=0 );
- pOut = out2Prerelease(p, pOp);
pOut->u.i = p->apCsr[pOp->p1]->seqCount++;
break;
}
@@ -76952,22 +70438,24 @@ case OP_Sequence: { /* out2 */
** generated record number. This P3 mechanism is used to help implement the
** AUTOINCREMENT feature.
*/
-case OP_NewRowid: { /* out2 */
+case OP_NewRowid: { /* out2-prerelease */
+#if 0 /* local variables moved into u.bh */
i64 v; /* The new rowid */
VdbeCursor *pC; /* Cursor of table to get the new rowid */
int res; /* Result of an sqlite3BtreeLast() */
int cnt; /* Counter to limit the number of searches */
Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */
VdbeFrame *pFrame; /* Root frame of VDBE */
+#endif /* local variables moved into u.bh */
- v = 0;
- res = 0;
- pOut = out2Prerelease(p, pOp);
+ u.bh.v = 0;
+ u.bh.res = 0;
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->pCursor!=0 );
- {
+ u.bh.pC = p->apCsr[pOp->p1];
+ assert( u.bh.pC!=0 );
+ if( NEVER(u.bh.pC->pCursor==0) ){
+ /* The zero initialization above is all that is needed */
+ }else{
/* The next rowid or record number (different terms for the same
** thing) is obtained in a two-step algorithm.
**
@@ -76981,7 +70469,7 @@ case OP_NewRowid: { /* out2 */
** succeeded. If the random rowid does exist, we select a new one
** and try again, up to 100 times.
*/
- assert( pC->isTable );
+ assert( u.bh.pC->isTable );
#ifdef SQLITE_32BIT_ROWID
# define MAX_ROWID 0x7fffffff
@@ -76993,80 +70481,97 @@ case OP_NewRowid: { /* out2 */
# define MAX_ROWID (i64)( (((u64)0x7fffffff)<<32) | (u64)0xffffffff )
#endif
- if( !pC->useRandomRowid ){
- rc = sqlite3BtreeLast(pC->pCursor, &res);
- if( rc!=SQLITE_OK ){
- goto abort_due_to_error;
- }
- if( res ){
- v = 1; /* IMP: R-61914-48074 */
- }else{
- assert( sqlite3BtreeCursorIsValid(pC->pCursor) );
- rc = sqlite3BtreeKeySize(pC->pCursor, &v);
- assert( rc==SQLITE_OK ); /* Cannot fail following BtreeLast() */
- if( v>=MAX_ROWID ){
- pC->useRandomRowid = 1;
+ if( !u.bh.pC->useRandomRowid ){
+ u.bh.v = sqlite3BtreeGetCachedRowid(u.bh.pC->pCursor);
+ if( u.bh.v==0 ){
+ rc = sqlite3BtreeLast(u.bh.pC->pCursor, &u.bh.res);
+ if( rc!=SQLITE_OK ){
+ goto abort_due_to_error;
+ }
+ if( u.bh.res ){
+ u.bh.v = 1; /* IMP: R-61914-48074 */
}else{
- v++; /* IMP: R-29538-34987 */
+ assert( sqlite3BtreeCursorIsValid(u.bh.pC->pCursor) );
+ rc = sqlite3BtreeKeySize(u.bh.pC->pCursor, &u.bh.v);
+ assert( rc==SQLITE_OK ); /* Cannot fail following BtreeLast() */
+ if( u.bh.v>=MAX_ROWID ){
+ u.bh.pC->useRandomRowid = 1;
+ }else{
+ u.bh.v++; /* IMP: R-29538-34987 */
+ }
}
}
- }
#ifndef SQLITE_OMIT_AUTOINCREMENT
- if( pOp->p3 ){
- /* Assert that P3 is a valid memory cell. */
- assert( pOp->p3>0 );
- if( p->pFrame ){
- for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
- /* Assert that P3 is a valid memory cell. */
- assert( pOp->p3<=pFrame->nMem );
- pMem = &pFrame->aMem[pOp->p3];
- }else{
+ if( pOp->p3 ){
/* Assert that P3 is a valid memory cell. */
- assert( pOp->p3<=(p->nMem-p->nCursor) );
- pMem = &aMem[pOp->p3];
- memAboutToChange(p, pMem);
- }
- assert( memIsValid(pMem) );
+ assert( pOp->p3>0 );
+ if( p->pFrame ){
+ for(u.bh.pFrame=p->pFrame; u.bh.pFrame->pParent; u.bh.pFrame=u.bh.pFrame->pParent);
+ /* Assert that P3 is a valid memory cell. */
+ assert( pOp->p3<=u.bh.pFrame->nMem );
+ u.bh.pMem = &u.bh.pFrame->aMem[pOp->p3];
+ }else{
+ /* Assert that P3 is a valid memory cell. */
+ assert( pOp->p3<=(p->nMem-p->nCursor) );
+ u.bh.pMem = &aMem[pOp->p3];
+ memAboutToChange(p, u.bh.pMem);
+ }
+ assert( memIsValid(u.bh.pMem) );
- REGISTER_TRACE(pOp->p3, pMem);
- sqlite3VdbeMemIntegerify(pMem);
- assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */
- if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){
- rc = SQLITE_FULL; /* IMP: R-12275-61338 */
- goto abort_due_to_error;
- }
- if( v<pMem->u.i+1 ){
- v = pMem->u.i + 1;
+ REGISTER_TRACE(pOp->p3, u.bh.pMem);
+ sqlite3VdbeMemIntegerify(u.bh.pMem);
+ assert( (u.bh.pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */
+ if( u.bh.pMem->u.i==MAX_ROWID || u.bh.pC->useRandomRowid ){
+ rc = SQLITE_FULL; /* IMP: R-12275-61338 */
+ goto abort_due_to_error;
+ }
+ if( u.bh.v<u.bh.pMem->u.i+1 ){
+ u.bh.v = u.bh.pMem->u.i + 1;
+ }
+ u.bh.pMem->u.i = u.bh.v;
}
- pMem->u.i = v;
- }
#endif
- if( pC->useRandomRowid ){
+
+ sqlite3BtreeSetCachedRowid(u.bh.pC->pCursor, u.bh.v<MAX_ROWID ? u.bh.v+1 : 0);
+ }
+ if( u.bh.pC->useRandomRowid ){
/* IMPLEMENTATION-OF: R-07677-41881 If the largest ROWID is equal to the
** largest possible integer (9223372036854775807) then the database
** engine starts picking positive candidate ROWIDs at random until
** it finds one that is not previously used. */
assert( pOp->p3==0 ); /* We cannot be in random rowid mode if this is
** an AUTOINCREMENT table. */
- cnt = 0;
- do{
- sqlite3_randomness(sizeof(v), &v);
- v &= (MAX_ROWID>>1); v++; /* Ensure that v is greater than zero */
- }while( ((rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)v,
- 0, &res))==SQLITE_OK)
- && (res==0)
- && (++cnt<100));
- if( rc==SQLITE_OK && res==0 ){
+ /* on the first attempt, simply do one more than previous */
+ u.bh.v = lastRowid;
+ u.bh.v &= (MAX_ROWID>>1); /* ensure doesn't go negative */
+ u.bh.v++; /* ensure non-zero */
+ u.bh.cnt = 0;
+ while( ((rc = sqlite3BtreeMovetoUnpacked(u.bh.pC->pCursor, 0, (u64)u.bh.v,
+ 0, &u.bh.res))==SQLITE_OK)
+ && (u.bh.res==0)
+ && (++u.bh.cnt<100)){
+ /* collision - try another random rowid */
+ sqlite3_randomness(sizeof(u.bh.v), &u.bh.v);
+ if( u.bh.cnt<5 ){
+ /* try "small" random rowids for the initial attempts */
+ u.bh.v &= 0xffffff;
+ }else{
+ u.bh.v &= (MAX_ROWID>>1); /* ensure doesn't go negative */
+ }
+ u.bh.v++; /* ensure non-zero */
+ }
+ if( rc==SQLITE_OK && u.bh.res==0 ){
rc = SQLITE_FULL; /* IMP: R-38219-53002 */
goto abort_due_to_error;
}
- assert( v>0 ); /* EV: R-40812-03570 */
+ assert( u.bh.v>0 ); /* EV: R-40812-03570 */
}
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
+ u.bh.pC->rowidIsValid = 0;
+ u.bh.pC->deferredMoveto = 0;
+ u.bh.pC->cacheStatus = CACHE_STALE;
}
- pOut->u.i = v;
+ pOut->u.i = u.bh.v;
break;
}
@@ -77118,6 +70623,7 @@ case OP_NewRowid: { /* out2 */
*/
case OP_Insert:
case OP_InsertInt: {
+#if 0 /* local variables moved into u.bi */
Mem *pData; /* MEM cell holding data for the record to be inserted */
Mem *pKey; /* MEM cell holding key for the record */
i64 iKey; /* The integer ROWID or key for the record to be inserted */
@@ -77127,70 +70633,72 @@ case OP_InsertInt: {
const char *zDb; /* database name - used by the update hook */
const char *zTbl; /* Table name - used by the opdate hook */
int op; /* Opcode for update hook: SQLITE_UPDATE or SQLITE_INSERT */
+#endif /* local variables moved into u.bi */
- pData = &aMem[pOp->p2];
+ u.bi.pData = &aMem[pOp->p2];
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- assert( memIsValid(pData) );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->pCursor!=0 );
- assert( pC->pseudoTableReg==0 );
- assert( pC->isTable );
- REGISTER_TRACE(pOp->p2, pData);
+ assert( memIsValid(u.bi.pData) );
+ u.bi.pC = p->apCsr[pOp->p1];
+ assert( u.bi.pC!=0 );
+ assert( u.bi.pC->pCursor!=0 );
+ assert( u.bi.pC->pseudoTableReg==0 );
+ assert( u.bi.pC->isTable );
+ REGISTER_TRACE(pOp->p2, u.bi.pData);
if( pOp->opcode==OP_Insert ){
- pKey = &aMem[pOp->p3];
- assert( pKey->flags & MEM_Int );
- assert( memIsValid(pKey) );
- REGISTER_TRACE(pOp->p3, pKey);
- iKey = pKey->u.i;
+ u.bi.pKey = &aMem[pOp->p3];
+ assert( u.bi.pKey->flags & MEM_Int );
+ assert( memIsValid(u.bi.pKey) );
+ REGISTER_TRACE(pOp->p3, u.bi.pKey);
+ u.bi.iKey = u.bi.pKey->u.i;
}else{
assert( pOp->opcode==OP_InsertInt );
- iKey = pOp->p3;
+ u.bi.iKey = pOp->p3;
}
if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++;
- if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = iKey;
- if( pData->flags & MEM_Null ){
- pData->z = 0;
- pData->n = 0;
+ if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = u.bi.iKey;
+ if( u.bi.pData->flags & MEM_Null ){
+ u.bi.pData->z = 0;
+ u.bi.pData->n = 0;
}else{
- assert( pData->flags & (MEM_Blob|MEM_Str) );
+ assert( u.bi.pData->flags & (MEM_Blob|MEM_Str) );
}
- seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0);
- if( pData->flags & MEM_Zero ){
- nZero = pData->u.nZero;
+ u.bi.seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? u.bi.pC->seekResult : 0);
+ if( u.bi.pData->flags & MEM_Zero ){
+ u.bi.nZero = u.bi.pData->u.nZero;
}else{
- nZero = 0;
+ u.bi.nZero = 0;
}
- rc = sqlite3BtreeInsert(pC->pCursor, 0, iKey,
- pData->z, pData->n, nZero,
- (pOp->p5 & OPFLAG_APPEND)!=0, seekResult
+ sqlite3BtreeSetCachedRowid(u.bi.pC->pCursor, 0);
+ rc = sqlite3BtreeInsert(u.bi.pC->pCursor, 0, u.bi.iKey,
+ u.bi.pData->z, u.bi.pData->n, u.bi.nZero,
+ (pOp->p5 & OPFLAG_APPEND)!=0, u.bi.seekResult
);
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
+ u.bi.pC->rowidIsValid = 0;
+ u.bi.pC->deferredMoveto = 0;
+ u.bi.pC->cacheStatus = CACHE_STALE;
/* Invoke the update-hook if required. */
if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z ){
- zDb = db->aDb[pC->iDb].zName;
- zTbl = pOp->p4.z;
- op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT);
- assert( pC->isTable );
- db->xUpdateCallback(db->pUpdateArg, op, zDb, zTbl, iKey);
- assert( pC->iDb>=0 );
+ u.bi.zDb = db->aDb[u.bi.pC->iDb].zName;
+ u.bi.zTbl = pOp->p4.z;
+ u.bi.op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT);
+ assert( u.bi.pC->isTable );
+ db->xUpdateCallback(db->pUpdateArg, u.bi.op, u.bi.zDb, u.bi.zTbl, u.bi.iKey);
+ assert( u.bi.pC->iDb>=0 );
}
break;
}
-/* Opcode: Delete P1 P2 * P4 P5
+/* Opcode: Delete P1 P2 * P4 *
**
** Delete the record at which the P1 cursor is currently pointing.
**
-** If the P5 parameter is non-zero, the cursor will be left pointing at
-** either the next or the previous record in the table. If it is left
-** pointing at the next record, then the next Next instruction will be a
-** no-op. As a result, in this case it is OK to delete a record from within a
-** Next loop. If P5 is zero, then the cursor is left in an undefined state.
+** The cursor will be left pointing at either the next or the previous
+** record in the table. If it is left pointing at the next record, then
+** the next Next instruction will be a no-op. Hence it is OK to delete
+** a record from within an Next loop.
**
** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is
** incremented (otherwise not).
@@ -77204,39 +70712,37 @@ case OP_InsertInt: {
** using OP_NotFound prior to invoking this opcode.
*/
case OP_Delete: {
+#if 0 /* local variables moved into u.bj */
+ i64 iKey;
VdbeCursor *pC;
- u8 hasUpdateCallback;
+#endif /* local variables moved into u.bj */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->pCursor!=0 ); /* Only valid for real tables, no pseudotables */
- assert( pC->deferredMoveto==0 );
+ u.bj.pC = p->apCsr[pOp->p1];
+ assert( u.bj.pC!=0 );
+ assert( u.bj.pC->pCursor!=0 ); /* Only valid for real tables, no pseudotables */
+ u.bj.iKey = u.bj.pC->lastRowid; /* Only used for the update hook */
+
+ /* The OP_Delete opcode always follows an OP_NotExists or OP_Last or
+ ** OP_Column on the same table without any intervening operations that
+ ** might move or invalidate the cursor. Hence cursor u.bj.pC is always pointing
+ ** to the row to be deleted and the sqlite3VdbeCursorMoveto() operation
+ ** below is always a no-op and cannot fail. We will run it anyhow, though,
+ ** to guard against future changes to the code generator.
+ **/
+ assert( u.bj.pC->deferredMoveto==0 );
+ rc = sqlite3VdbeCursorMoveto(u.bj.pC);
+ if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error;
- hasUpdateCallback = db->xUpdateCallback && pOp->p4.z && pC->isTable;
- if( pOp->p5 && hasUpdateCallback ){
- sqlite3BtreeKeySize(pC->pCursor, &pC->movetoTarget);
- }
-
-#ifdef SQLITE_DEBUG
- /* The seek operation that positioned the cursor prior to OP_Delete will
- ** have also set the pC->movetoTarget field to the rowid of the row that
- ** is being deleted */
- if( pOp->p4.z && pC->isTable && pOp->p5==0 ){
- i64 iKey = 0;
- sqlite3BtreeKeySize(pC->pCursor, &iKey);
- assert( pC->movetoTarget==iKey );
- }
-#endif
-
- rc = sqlite3BtreeDelete(pC->pCursor, pOp->p5);
- pC->cacheStatus = CACHE_STALE;
+ sqlite3BtreeSetCachedRowid(u.bj.pC->pCursor, 0);
+ rc = sqlite3BtreeDelete(u.bj.pC->pCursor);
+ u.bj.pC->cacheStatus = CACHE_STALE;
/* Invoke the update-hook if required. */
- if( rc==SQLITE_OK && hasUpdateCallback ){
+ if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z && u.bj.pC->isTable ){
db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE,
- db->aDb[pC->iDb].zName, pOp->p4.z, pC->movetoTarget);
- assert( pC->iDb>=0 );
+ db->aDb[u.bj.pC->iDb].zName, pOp->p4.z, u.bj.iKey);
+ assert( u.bj.pC->iDb>=0 );
}
if( pOp->p2 & OPFLAG_NCHANGE ) p->nChange++;
break;
@@ -77255,12 +70761,12 @@ case OP_ResetCount: {
}
/* Opcode: SorterCompare P1 P2 P3 P4
-** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2
+** Synopsis: if key(P1)!=rtrim(r[P3],P4) goto P2
**
** P1 is a sorter cursor. This instruction compares a prefix of the
-** record blob in register P3 against a prefix of the entry that
-** the sorter cursor currently points to. Only the first P4 fields
-** of r[P3] and the sorter record are compared.
+** the record blob in register P3 against a prefix of the entry that
+** the sorter cursor currently points to. The final P4 fields of both
+** the P3 and sorter record are ignored.
**
** If either P3 or the sorter contains a NULL in one of their significant
** fields (not counting the P4 fields at the end which are ignored) then
@@ -77270,44 +70776,38 @@ case OP_ResetCount: {
** each other. Jump to P2 if they are different.
*/
case OP_SorterCompare: {
+#if 0 /* local variables moved into u.bk */
VdbeCursor *pC;
int res;
- int nKeyCol;
+ int nIgnore;
+#endif /* local variables moved into u.bk */
- pC = p->apCsr[pOp->p1];
- assert( isSorter(pC) );
+ u.bk.pC = p->apCsr[pOp->p1];
+ assert( isSorter(u.bk.pC) );
assert( pOp->p4type==P4_INT32 );
pIn3 = &aMem[pOp->p3];
- nKeyCol = pOp->p4.i;
- res = 0;
- rc = sqlite3VdbeSorterCompare(pC, pIn3, nKeyCol, &res);
- VdbeBranchTaken(res!=0,2);
- if( res ) goto jump_to_p2;
+ u.bk.nIgnore = pOp->p4.i;
+ rc = sqlite3VdbeSorterCompare(u.bk.pC, pIn3, u.bk.nIgnore, &u.bk.res);
+ if( u.bk.res ){
+ pc = pOp->p2-1;
+ }
break;
};
-/* Opcode: SorterData P1 P2 P3 * *
+/* Opcode: SorterData P1 P2 * * *
** Synopsis: r[P2]=data
**
** Write into register P2 the current sorter data for sorter cursor P1.
-** Then clear the column header cache on cursor P3.
-**
-** This opcode is normally use to move a record out of the sorter and into
-** a register that is the source for a pseudo-table cursor created using
-** OpenPseudo. That pseudo-table cursor is the one that is identified by
-** parameter P3. Clearing the P3 column cache as part of this opcode saves
-** us from having to issue a separate NullRow instruction to clear that cache.
*/
case OP_SorterData: {
+#if 0 /* local variables moved into u.bl */
VdbeCursor *pC;
+#endif /* local variables moved into u.bl */
pOut = &aMem[pOp->p2];
- pC = p->apCsr[pOp->p1];
- assert( isSorter(pC) );
- rc = sqlite3VdbeSorterRowkey(pC, pOut);
- assert( rc!=SQLITE_OK || (pOut->flags & MEM_Blob) );
- assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- p->apCsr[pOp->p3]->cacheStatus = CACHE_STALE;
+ u.bl.pC = p->apCsr[pOp->p1];
+ assert( isSorter(u.bl.pC) );
+ rc = sqlite3VdbeSorterRowkey(u.bl.pC, pOut);
break;
}
@@ -77327,7 +70827,7 @@ case OP_SorterData: {
**
** Write into register P2 the complete row key for cursor P1.
** There is no interpretation of the data.
-** The key is copied onto the P2 register exactly as
+** The key is copied onto the P3 register exactly as
** it is found in the database file.
**
** If the P1 cursor must be pointing to a valid row (not a NULL row)
@@ -77335,65 +70835,62 @@ case OP_SorterData: {
*/
case OP_RowKey:
case OP_RowData: {
+#if 0 /* local variables moved into u.bm */
VdbeCursor *pC;
BtCursor *pCrsr;
u32 n;
i64 n64;
+#endif /* local variables moved into u.bm */
pOut = &aMem[pOp->p2];
memAboutToChange(p, pOut);
/* Note that RowKey and RowData are really exactly the same instruction */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( isSorter(pC)==0 );
- assert( pC->isTable || pOp->opcode!=OP_RowData );
- assert( pC->isTable==0 || pOp->opcode==OP_RowData );
- assert( pC!=0 );
- assert( pC->nullRow==0 );
- assert( pC->pseudoTableReg==0 );
- assert( pC->pCursor!=0 );
- pCrsr = pC->pCursor;
+ u.bm.pC = p->apCsr[pOp->p1];
+ assert( isSorter(u.bm.pC)==0 );
+ assert( u.bm.pC->isTable || pOp->opcode!=OP_RowData );
+ assert( u.bm.pC->isTable==0 || pOp->opcode==OP_RowData );
+ assert( u.bm.pC!=0 );
+ assert( u.bm.pC->nullRow==0 );
+ assert( u.bm.pC->pseudoTableReg==0 );
+ assert( u.bm.pC->pCursor!=0 );
+ u.bm.pCrsr = u.bm.pC->pCursor;
+ assert( sqlite3BtreeCursorIsValid(u.bm.pCrsr) );
/* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or
** OP_Rewind/Op_Next with no intervening instructions that might invalidate
- ** the cursor. If this where not the case, on of the following assert()s
- ** would fail. Should this ever change (because of changes in the code
- ** generator) then the fix would be to insert a call to
- ** sqlite3VdbeCursorMoveto().
+ ** the cursor. Hence the following sqlite3VdbeCursorMoveto() call is always
+ ** a no-op and can never fail. But we leave it in place as a safety.
*/
- assert( pC->deferredMoveto==0 );
- assert( sqlite3BtreeCursorIsValid(pCrsr) );
-#if 0 /* Not required due to the previous to assert() statements */
- rc = sqlite3VdbeCursorMoveto(pC);
- if( rc!=SQLITE_OK ) goto abort_due_to_error;
-#endif
+ assert( u.bm.pC->deferredMoveto==0 );
+ rc = sqlite3VdbeCursorMoveto(u.bm.pC);
+ if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error;
- if( pC->isTable==0 ){
- assert( !pC->isTable );
- VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &n64);
+ if( u.bm.pC->isTable==0 ){
+ assert( !u.bm.pC->isTable );
+ VVA_ONLY(rc =) sqlite3BtreeKeySize(u.bm.pCrsr, &u.bm.n64);
assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */
- if( n64>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ if( u.bm.n64>db->aLimit[SQLITE_LIMIT_LENGTH] ){
goto too_big;
}
- n = (u32)n64;
+ u.bm.n = (u32)u.bm.n64;
}else{
- VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &n);
+ VVA_ONLY(rc =) sqlite3BtreeDataSize(u.bm.pCrsr, &u.bm.n);
assert( rc==SQLITE_OK ); /* DataSize() cannot fail */
- if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ if( u.bm.n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
goto too_big;
}
}
- testcase( n==0 );
- if( sqlite3VdbeMemClearAndResize(pOut, MAX(n,32)) ){
+ if( sqlite3VdbeMemGrow(pOut, u.bm.n, 0) ){
goto no_mem;
}
- pOut->n = n;
+ pOut->n = u.bm.n;
MemSetTypeFlag(pOut, MEM_Blob);
- if( pC->isTable==0 ){
- rc = sqlite3BtreeKey(pCrsr, 0, n, pOut->z);
+ if( u.bm.pC->isTable==0 ){
+ rc = sqlite3BtreeKey(u.bm.pCrsr, 0, u.bm.n, pOut->z);
}else{
- rc = sqlite3BtreeData(pCrsr, 0, n, pOut->z);
+ rc = sqlite3BtreeData(u.bm.pCrsr, 0, u.bm.n, pOut->z);
}
pOut->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */
UPDATE_MAX_BLOBSIZE(pOut);
@@ -77411,42 +70908,43 @@ case OP_RowData: {
** be a separate OP_VRowid opcode for use with virtual tables, but this
** one opcode now works for both table types.
*/
-case OP_Rowid: { /* out2 */
+case OP_Rowid: { /* out2-prerelease */
+#if 0 /* local variables moved into u.bn */
VdbeCursor *pC;
i64 v;
sqlite3_vtab *pVtab;
const sqlite3_module *pModule;
+#endif /* local variables moved into u.bn */
- pOut = out2Prerelease(p, pOp);
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->pseudoTableReg==0 || pC->nullRow );
- if( pC->nullRow ){
+ u.bn.pC = p->apCsr[pOp->p1];
+ assert( u.bn.pC!=0 );
+ assert( u.bn.pC->pseudoTableReg==0 || u.bn.pC->nullRow );
+ if( u.bn.pC->nullRow ){
pOut->flags = MEM_Null;
break;
- }else if( pC->deferredMoveto ){
- v = pC->movetoTarget;
+ }else if( u.bn.pC->deferredMoveto ){
+ u.bn.v = u.bn.pC->movetoTarget;
#ifndef SQLITE_OMIT_VIRTUALTABLE
- }else if( pC->pVtabCursor ){
- pVtab = pC->pVtabCursor->pVtab;
- pModule = pVtab->pModule;
- assert( pModule->xRowid );
- rc = pModule->xRowid(pC->pVtabCursor, &v);
- sqlite3VtabImportErrmsg(p, pVtab);
+ }else if( u.bn.pC->pVtabCursor ){
+ u.bn.pVtab = u.bn.pC->pVtabCursor->pVtab;
+ u.bn.pModule = u.bn.pVtab->pModule;
+ assert( u.bn.pModule->xRowid );
+ rc = u.bn.pModule->xRowid(u.bn.pC->pVtabCursor, &u.bn.v);
+ sqlite3VtabImportErrmsg(p, u.bn.pVtab);
#endif /* SQLITE_OMIT_VIRTUALTABLE */
}else{
- assert( pC->pCursor!=0 );
- rc = sqlite3VdbeCursorRestore(pC);
+ assert( u.bn.pC->pCursor!=0 );
+ rc = sqlite3VdbeCursorMoveto(u.bn.pC);
if( rc ) goto abort_due_to_error;
- if( pC->nullRow ){
- pOut->flags = MEM_Null;
- break;
+ if( u.bn.pC->rowidIsValid ){
+ u.bn.v = u.bn.pC->lastRowid;
+ }else{
+ rc = sqlite3BtreeKeySize(u.bn.pC->pCursor, &u.bn.v);
+ assert( rc==SQLITE_OK ); /* Always so because of CursorMoveto() above */
}
- rc = sqlite3BtreeKeySize(pC->pCursor, &v);
- assert( rc==SQLITE_OK ); /* Always so because of CursorRestore() above */
}
- pOut->u.i = v;
+ pOut->u.i = u.bn.v;
break;
}
@@ -77457,53 +70955,51 @@ case OP_Rowid: { /* out2 */
** write a NULL.
*/
case OP_NullRow: {
+#if 0 /* local variables moved into u.bo */
VdbeCursor *pC;
+#endif /* local variables moved into u.bo */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- pC->nullRow = 1;
- pC->cacheStatus = CACHE_STALE;
- if( pC->pCursor ){
- sqlite3BtreeClearCursor(pC->pCursor);
+ u.bo.pC = p->apCsr[pOp->p1];
+ assert( u.bo.pC!=0 );
+ u.bo.pC->nullRow = 1;
+ u.bo.pC->rowidIsValid = 0;
+ u.bo.pC->cacheStatus = CACHE_STALE;
+ assert( u.bo.pC->pCursor || u.bo.pC->pVtabCursor );
+ if( u.bo.pC->pCursor ){
+ sqlite3BtreeClearCursor(u.bo.pC->pCursor);
}
break;
}
-/* Opcode: Last P1 P2 P3 * *
+/* Opcode: Last P1 P2 * * *
**
-** The next use of the Rowid or Column or Prev instruction for P1
+** The next use of the Rowid or Column or Next instruction for P1
** will refer to the last entry in the database table or index.
** If the table or index is empty and P2>0, then jump immediately to P2.
** If P2 is 0 or if the table or index is not empty, fall through
** to the following instruction.
-**
-** This opcode leaves the cursor configured to move in reverse order,
-** from the end toward the beginning. In other words, the cursor is
-** configured to use Prev, not Next.
*/
case OP_Last: { /* jump */
+#if 0 /* local variables moved into u.bp */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
+#endif /* local variables moved into u.bp */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- pCrsr = pC->pCursor;
- res = 0;
- assert( pCrsr!=0 );
- rc = sqlite3BtreeLast(pCrsr, &res);
- pC->nullRow = (u8)res;
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
- pC->seekResult = pOp->p3;
-#ifdef SQLITE_DEBUG
- pC->seekOp = OP_Last;
-#endif
- if( pOp->p2>0 ){
- VdbeBranchTaken(res!=0,2);
- if( res ) goto jump_to_p2;
+ u.bp.pC = p->apCsr[pOp->p1];
+ assert( u.bp.pC!=0 );
+ u.bp.pCrsr = u.bp.pC->pCursor;
+ u.bp.res = 0;
+ assert( u.bp.pCrsr!=0 );
+ rc = sqlite3BtreeLast(u.bp.pCrsr, &u.bp.res);
+ u.bp.pC->nullRow = (u8)u.bp.res;
+ u.bp.pC->deferredMoveto = 0;
+ u.bp.pC->rowidIsValid = 0;
+ u.bp.pC->cacheStatus = CACHE_STALE;
+ if( pOp->p2>0 && u.bp.res ){
+ pc = pOp->p2 - 1;
}
break;
}
@@ -77534,62 +71030,50 @@ case OP_Sort: { /* jump */
**
** The next use of the Rowid or Column or Next instruction for P1
** will refer to the first entry in the database table or index.
-** If the table or index is empty, jump immediately to P2.
-** If the table or index is not empty, fall through to the following
-** instruction.
-**
-** This opcode leaves the cursor configured to move in forward order,
-** from the beginning toward the end. In other words, the cursor is
-** configured to use Next, not Prev.
+** If the table or index is empty and P2>0, then jump immediately to P2.
+** If P2 is 0 or if the table or index is not empty, fall through
+** to the following instruction.
*/
case OP_Rewind: { /* jump */
+#if 0 /* local variables moved into u.bq */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
+#endif /* local variables moved into u.bq */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) );
- res = 1;
-#ifdef SQLITE_DEBUG
- pC->seekOp = OP_Rewind;
-#endif
- if( isSorter(pC) ){
- rc = sqlite3VdbeSorterRewind(pC, &res);
- }else{
- pCrsr = pC->pCursor;
- assert( pCrsr );
- rc = sqlite3BtreeFirst(pCrsr, &res);
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
- }
- pC->nullRow = (u8)res;
+ u.bq.pC = p->apCsr[pOp->p1];
+ assert( u.bq.pC!=0 );
+ assert( isSorter(u.bq.pC)==(pOp->opcode==OP_SorterSort) );
+ u.bq.res = 1;
+ if( isSorter(u.bq.pC) ){
+ rc = sqlite3VdbeSorterRewind(db, u.bq.pC, &u.bq.res);
+ }else{
+ u.bq.pCrsr = u.bq.pC->pCursor;
+ assert( u.bq.pCrsr );
+ rc = sqlite3BtreeFirst(u.bq.pCrsr, &u.bq.res);
+ u.bq.pC->deferredMoveto = 0;
+ u.bq.pC->cacheStatus = CACHE_STALE;
+ u.bq.pC->rowidIsValid = 0;
+ }
+ u.bq.pC->nullRow = (u8)u.bq.res;
assert( pOp->p2>0 && pOp->p2<p->nOp );
- VdbeBranchTaken(res!=0,2);
- if( res ) goto jump_to_p2;
+ if( u.bq.res ){
+ pc = pOp->p2 - 1;
+ }
break;
}
-/* Opcode: Next P1 P2 P3 P4 P5
+/* Opcode: Next P1 P2 * * P5
**
** Advance cursor P1 so that it points to the next key/data pair in its
** table or index. If there are no more key/value pairs then fall through
** to the following instruction. But if the cursor advance was successful,
** jump immediately to P2.
**
-** The Next opcode is only valid following an SeekGT, SeekGE, or
-** OP_Rewind opcode used to position the cursor. Next is not allowed
-** to follow SeekLT, SeekLE, or OP_Last.
-**
** The P1 cursor must be for a real table, not a pseudo-table. P1 must have
** been opened prior to this opcode or the program will segfault.
**
-** The P3 value is a hint to the btree implementation. If P3==1, that
-** means P1 is an SQL index and that this instruction could have been
-** omitted if that index had been unique. P3 is usually 0. P3 is
-** always either 0 or 1.
-**
** P4 is always of type P4_ADVANCE. The function pointer points to
** sqlite3BtreeNext().
**
@@ -77598,50 +71082,41 @@ case OP_Rewind: { /* jump */
**
** See also: Prev, NextIfOpen
*/
-/* Opcode: NextIfOpen P1 P2 P3 P4 P5
+/* Opcode: NextIfOpen P1 P2 * * P5
**
-** This opcode works just like Next except that if cursor P1 is not
+** This opcode works just like OP_Next except that if cursor P1 is not
** open it behaves a no-op.
*/
-/* Opcode: Prev P1 P2 P3 P4 P5
+/* Opcode: Prev P1 P2 * * P5
**
** Back up cursor P1 so that it points to the previous key/data pair in its
** table or index. If there is no previous key/value pairs then fall through
** to the following instruction. But if the cursor backup was successful,
** jump immediately to P2.
**
-**
-** The Prev opcode is only valid following an SeekLT, SeekLE, or
-** OP_Last opcode used to position the cursor. Prev is not allowed
-** to follow SeekGT, SeekGE, or OP_Rewind.
-**
** The P1 cursor must be for a real table, not a pseudo-table. If P1 is
** not open then the behavior is undefined.
**
-** The P3 value is a hint to the btree implementation. If P3==1, that
-** means P1 is an SQL index and that this instruction could have been
-** omitted if that index had been unique. P3 is usually 0. P3 is
-** always either 0 or 1.
-**
** P4 is always of type P4_ADVANCE. The function pointer points to
** sqlite3BtreePrevious().
**
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
*/
-/* Opcode: PrevIfOpen P1 P2 P3 P4 P5
+/* Opcode: PrevIfOpen P1 P2 * * P5
**
-** This opcode works just like Prev except that if cursor P1 is not
+** This opcode works just like OP_Prev except that if cursor P1 is not
** open it behaves a no-op.
*/
case OP_SorterNext: { /* jump */
+#if 0 /* local variables moved into u.br */
VdbeCursor *pC;
int res;
+#endif /* local variables moved into u.br */
- pC = p->apCsr[pOp->p1];
- assert( isSorter(pC) );
- res = 0;
- rc = sqlite3VdbeSorterNext(db, pC, &res);
+ u.br.pC = p->apCsr[pOp->p1];
+ assert( isSorter(u.br.pC) );
+ rc = sqlite3VdbeSorterNext(db, u.br.pC, &u.br.res);
goto next_tail;
case OP_PrevIfOpen: /* jump */
case OP_NextIfOpen: /* jump */
@@ -77651,41 +71126,28 @@ case OP_Prev: /* jump */
case OP_Next: /* jump */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( pOp->p5<ArraySize(p->aCounter) );
- pC = p->apCsr[pOp->p1];
- res = pOp->p3;
- assert( pC!=0 );
- assert( pC->deferredMoveto==0 );
- assert( pC->pCursor );
- assert( res==0 || (res==1 && pC->isTable==0) );
- testcase( res==1 );
+ u.br.pC = p->apCsr[pOp->p1];
+ assert( u.br.pC!=0 );
+ assert( u.br.pC->deferredMoveto==0 );
+ assert( u.br.pC->pCursor );
assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext );
assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious );
assert( pOp->opcode!=OP_NextIfOpen || pOp->p4.xAdvance==sqlite3BtreeNext );
assert( pOp->opcode!=OP_PrevIfOpen || pOp->p4.xAdvance==sqlite3BtreePrevious);
-
- /* The Next opcode is only used after SeekGT, SeekGE, and Rewind.
- ** The Prev opcode is only used after SeekLT, SeekLE, and Last. */
- assert( pOp->opcode!=OP_Next || pOp->opcode!=OP_NextIfOpen
- || pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE
- || pC->seekOp==OP_Rewind || pC->seekOp==OP_Found);
- assert( pOp->opcode!=OP_Prev || pOp->opcode!=OP_PrevIfOpen
- || pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE
- || pC->seekOp==OP_Last );
-
- rc = pOp->p4.xAdvance(pC->pCursor, &res);
+ rc = pOp->p4.xAdvance(u.br.pC->pCursor, &u.br.res);
next_tail:
- pC->cacheStatus = CACHE_STALE;
- VdbeBranchTaken(res==0,2);
- if( res==0 ){
- pC->nullRow = 0;
+ u.br.pC->cacheStatus = CACHE_STALE;
+ if( u.br.res==0 ){
+ u.br.pC->nullRow = 0;
+ pc = pOp->p2 - 1;
p->aCounter[pOp->p5]++;
#ifdef SQLITE_TEST
sqlite3_search_count++;
#endif
- goto jump_to_p2_and_check_for_interrupt;
}else{
- pC->nullRow = 1;
+ u.br.pC->nullRow = 1;
}
+ u.br.pC->rowidIsValid = 0;
goto check_for_interrupt;
}
@@ -77699,44 +71161,40 @@ next_tail:
** P3 is a flag that provides a hint to the b-tree layer that this
** insert is likely to be an append.
**
-** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is
-** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear,
-** then the change counter is unchanged.
-**
-** If P5 has the OPFLAG_USESEEKRESULT bit set, then the cursor must have
-** just done a seek to the spot where the new entry is to be inserted.
-** This flag avoids doing an extra seek.
-**
** This instruction only works for indices. The equivalent instruction
** for tables is OP_Insert.
*/
case OP_SorterInsert: /* in2 */
case OP_IdxInsert: { /* in2 */
+#if 0 /* local variables moved into u.bs */
VdbeCursor *pC;
+ BtCursor *pCrsr;
int nKey;
const char *zKey;
+#endif /* local variables moved into u.bs */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( isSorter(pC)==(pOp->opcode==OP_SorterInsert) );
+ u.bs.pC = p->apCsr[pOp->p1];
+ assert( u.bs.pC!=0 );
+ assert( isSorter(u.bs.pC)==(pOp->opcode==OP_SorterInsert) );
pIn2 = &aMem[pOp->p2];
assert( pIn2->flags & MEM_Blob );
+ u.bs.pCrsr = u.bs.pC->pCursor;
if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++;
- assert( pC->pCursor!=0 );
- assert( pC->isTable==0 );
+ assert( u.bs.pCrsr!=0 );
+ assert( u.bs.pC->isTable==0 );
rc = ExpandBlob(pIn2);
if( rc==SQLITE_OK ){
- if( pOp->opcode==OP_SorterInsert ){
- rc = sqlite3VdbeSorterWrite(pC, pIn2);
+ if( isSorter(u.bs.pC) ){
+ rc = sqlite3VdbeSorterWrite(db, u.bs.pC, pIn2);
}else{
- nKey = pIn2->n;
- zKey = pIn2->z;
- rc = sqlite3BtreeInsert(pC->pCursor, zKey, nKey, "", 0, 0, pOp->p3,
- ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0)
+ u.bs.nKey = pIn2->n;
+ u.bs.zKey = pIn2->z;
+ rc = sqlite3BtreeInsert(u.bs.pCrsr, u.bs.zKey, u.bs.nKey, "", 0, 0, pOp->p3,
+ ((pOp->p5 & OPFLAG_USESEEKRESULT) ? u.bs.pC->seekResult : 0)
);
- assert( pC->deferredMoveto==0 );
- pC->cacheStatus = CACHE_STALE;
+ assert( u.bs.pC->deferredMoveto==0 );
+ u.bs.pC->cacheStatus = CACHE_STALE;
}
}
break;
@@ -77750,32 +71208,34 @@ case OP_IdxInsert: { /* in2 */
** index opened by cursor P1.
*/
case OP_IdxDelete: {
+#if 0 /* local variables moved into u.bt */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
UnpackedRecord r;
+#endif /* local variables moved into u.bt */
assert( pOp->p3>0 );
assert( pOp->p2>0 && pOp->p2+pOp->p3<=(p->nMem-p->nCursor)+1 );
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- pCrsr = pC->pCursor;
- assert( pCrsr!=0 );
+ u.bt.pC = p->apCsr[pOp->p1];
+ assert( u.bt.pC!=0 );
+ u.bt.pCrsr = u.bt.pC->pCursor;
+ assert( u.bt.pCrsr!=0 );
assert( pOp->p5==0 );
- r.pKeyInfo = pC->pKeyInfo;
- r.nField = (u16)pOp->p3;
- r.default_rc = 0;
- r.aMem = &aMem[pOp->p2];
+ u.bt.r.pKeyInfo = u.bt.pC->pKeyInfo;
+ u.bt.r.nField = (u16)pOp->p3;
+ u.bt.r.flags = UNPACKED_PREFIX_MATCH;
+ u.bt.r.aMem = &aMem[pOp->p2];
#ifdef SQLITE_DEBUG
- { int i; for(i=0; i<r.nField; i++) assert( memIsValid(&r.aMem[i]) ); }
+ { int i; for(i=0; i<u.bt.r.nField; i++) assert( memIsValid(&u.bt.r.aMem[i]) ); }
#endif
- rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &res);
- if( rc==SQLITE_OK && res==0 ){
- rc = sqlite3BtreeDelete(pCrsr, 0);
+ rc = sqlite3BtreeMovetoUnpacked(u.bt.pCrsr, &u.bt.r, 0, 0, &u.bt.res);
+ if( rc==SQLITE_OK && u.bt.res==0 ){
+ rc = sqlite3BtreeDelete(u.bt.pCrsr);
}
- assert( pC->deferredMoveto==0 );
- pC->cacheStatus = CACHE_STALE;
+ assert( u.bt.pC->deferredMoveto==0 );
+ u.bt.pC->cacheStatus = CACHE_STALE;
break;
}
@@ -77788,35 +71248,29 @@ case OP_IdxDelete: {
**
** See also: Rowid, MakeRecord.
*/
-case OP_IdxRowid: { /* out2 */
+case OP_IdxRowid: { /* out2-prerelease */
+#if 0 /* local variables moved into u.bu */
BtCursor *pCrsr;
VdbeCursor *pC;
i64 rowid;
+#endif /* local variables moved into u.bu */
- pOut = out2Prerelease(p, pOp);
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- pCrsr = pC->pCursor;
- assert( pCrsr!=0 );
+ u.bu.pC = p->apCsr[pOp->p1];
+ assert( u.bu.pC!=0 );
+ u.bu.pCrsr = u.bu.pC->pCursor;
+ assert( u.bu.pCrsr!=0 );
pOut->flags = MEM_Null;
- assert( pC->isTable==0 );
- assert( pC->deferredMoveto==0 );
-
- /* sqlite3VbeCursorRestore() can only fail if the record has been deleted
- ** out from under the cursor. That will never happend for an IdxRowid
- ** opcode, hence the NEVER() arround the check of the return value.
- */
- rc = sqlite3VdbeCursorRestore(pC);
- if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error;
-
- if( !pC->nullRow ){
- rowid = 0; /* Not needed. Only used to silence a warning. */
- rc = sqlite3VdbeIdxRowid(db, pCrsr, &rowid);
+ rc = sqlite3VdbeCursorMoveto(u.bu.pC);
+ if( NEVER(rc) ) goto abort_due_to_error;
+ assert( u.bu.pC->deferredMoveto==0 );
+ assert( u.bu.pC->isTable==0 );
+ if( !u.bu.pC->nullRow ){
+ rc = sqlite3VdbeIdxRowid(db, u.bu.pCrsr, &u.bu.rowid);
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
- pOut->u.i = rowid;
+ pOut->u.i = u.bu.rowid;
pOut->flags = MEM_Int;
}
break;
@@ -77826,87 +71280,67 @@ case OP_IdxRowid: { /* out2 */
** Synopsis: key=r[P3@P4]
**
** The P4 register values beginning with P3 form an unpacked index
-** key that omits the PRIMARY KEY. Compare this key value against the index
-** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID
-** fields at the end.
+** key that omits the ROWID. Compare this key value against the index
+** that P1 is currently pointing to, ignoring the ROWID on the P1 index.
**
** If the P1 index entry is greater than or equal to the key value
** then jump to P2. Otherwise fall through to the next instruction.
-*/
-/* Opcode: IdxGT P1 P2 P3 P4 P5
-** Synopsis: key=r[P3@P4]
-**
-** The P4 register values beginning with P3 form an unpacked index
-** key that omits the PRIMARY KEY. Compare this key value against the index
-** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID
-** fields at the end.
**
-** If the P1 index entry is greater than the key value
-** then jump to P2. Otherwise fall through to the next instruction.
+** If P5 is non-zero then the key value is increased by an epsilon
+** prior to the comparison. This make the opcode work like IdxGT except
+** that if the key from register P3 is a prefix of the key in the cursor,
+** the result is false whereas it would be true with IdxGT.
*/
/* Opcode: IdxLT P1 P2 P3 P4 P5
** Synopsis: key=r[P3@P4]
**
** The P4 register values beginning with P3 form an unpacked index
-** key that omits the PRIMARY KEY or ROWID. Compare this key value against
-** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or
-** ROWID on the P1 index.
+** key that omits the ROWID. Compare this key value against the index
+** that P1 is currently pointing to, ignoring the ROWID on the P1 index.
**
** If the P1 index entry is less than the key value then jump to P2.
** Otherwise fall through to the next instruction.
-*/
-/* Opcode: IdxLE P1 P2 P3 P4 P5
-** Synopsis: key=r[P3@P4]
**
-** The P4 register values beginning with P3 form an unpacked index
-** key that omits the PRIMARY KEY or ROWID. Compare this key value against
-** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or
-** ROWID on the P1 index.
-**
-** If the P1 index entry is less than or equal to the key value then jump
-** to P2. Otherwise fall through to the next instruction.
+** If P5 is non-zero then the key value is increased by an epsilon prior
+** to the comparison. This makes the opcode work like IdxLE.
*/
-case OP_IdxLE: /* jump */
-case OP_IdxGT: /* jump */
case OP_IdxLT: /* jump */
-case OP_IdxGE: { /* jump */
+case OP_IdxGE: { /* jump */
+#if 0 /* local variables moved into u.bv */
VdbeCursor *pC;
int res;
UnpackedRecord r;
+#endif /* local variables moved into u.bv */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pC->isOrdered );
- assert( pC->pCursor!=0);
- assert( pC->deferredMoveto==0 );
+ u.bv.pC = p->apCsr[pOp->p1];
+ assert( u.bv.pC!=0 );
+ assert( u.bv.pC->isOrdered );
+ assert( u.bv.pC->pCursor!=0);
+ assert( u.bv.pC->deferredMoveto==0 );
assert( pOp->p5==0 || pOp->p5==1 );
assert( pOp->p4type==P4_INT32 );
- r.pKeyInfo = pC->pKeyInfo;
- r.nField = (u16)pOp->p4.i;
- if( pOp->opcode<OP_IdxLT ){
- assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxGT );
- r.default_rc = -1;
+ u.bv.r.pKeyInfo = u.bv.pC->pKeyInfo;
+ u.bv.r.nField = (u16)pOp->p4.i;
+ if( pOp->p5 ){
+ u.bv.r.flags = UNPACKED_INCRKEY | UNPACKED_PREFIX_MATCH;
}else{
- assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxLT );
- r.default_rc = 0;
+ u.bv.r.flags = UNPACKED_PREFIX_MATCH;
}
- r.aMem = &aMem[pOp->p3];
+ u.bv.r.aMem = &aMem[pOp->p3];
#ifdef SQLITE_DEBUG
- { int i; for(i=0; i<r.nField; i++) assert( memIsValid(&r.aMem[i]) ); }
+ { int i; for(i=0; i<u.bv.r.nField; i++) assert( memIsValid(&u.bv.r.aMem[i]) ); }
#endif
- res = 0; /* Not needed. Only used to silence a warning. */
- rc = sqlite3VdbeIdxKeyCompare(db, pC, &r, &res);
- assert( (OP_IdxLE&1)==(OP_IdxLT&1) && (OP_IdxGE&1)==(OP_IdxGT&1) );
- if( (pOp->opcode&1)==(OP_IdxLT&1) ){
- assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxLT );
- res = -res;
+ rc = sqlite3VdbeIdxKeyCompare(u.bv.pC, &u.bv.r, &u.bv.res);
+ if( pOp->opcode==OP_IdxLT ){
+ u.bv.res = -u.bv.res;
}else{
- assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxGT );
- res++;
+ assert( pOp->opcode==OP_IdxGE );
+ u.bv.res++;
+ }
+ if( u.bv.res>0 ){
+ pc = pOp->p2 - 1 ;
}
- VdbeBranchTaken(res>0,2);
- if( res>0 ) goto jump_to_p2;
break;
}
@@ -77930,29 +71364,44 @@ case OP_IdxGE: { /* jump */
**
** See also: Clear
*/
-case OP_Destroy: { /* out2 */
+case OP_Destroy: { /* out2-prerelease */
+#if 0 /* local variables moved into u.bw */
int iMoved;
+ int iCnt;
+ Vdbe *pVdbe;
int iDb;
+#endif /* local variables moved into u.bw */
assert( p->readOnly==0 );
- pOut = out2Prerelease(p, pOp);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ u.bw.iCnt = 0;
+ for(u.bw.pVdbe=db->pVdbe; u.bw.pVdbe; u.bw.pVdbe = u.bw.pVdbe->pNext){
+ if( u.bw.pVdbe->magic==VDBE_MAGIC_RUN && u.bw.pVdbe->bIsReader
+ && u.bw.pVdbe->inVtabMethod<2 && u.bw.pVdbe->pc>=0
+ ){
+ u.bw.iCnt++;
+ }
+ }
+#else
+ u.bw.iCnt = db->nVdbeRead;
+#endif
pOut->flags = MEM_Null;
- if( db->nVdbeRead > db->nVDestroy+1 ){
+ if( u.bw.iCnt>1 ){
rc = SQLITE_LOCKED;
p->errorAction = OE_Abort;
}else{
- iDb = pOp->p3;
- assert( DbMaskTest(p->btreeMask, iDb) );
- iMoved = 0; /* Not needed. Only to silence a warning. */
- rc = sqlite3BtreeDropTable(db->aDb[iDb].pBt, pOp->p1, &iMoved);
+ u.bw.iDb = pOp->p3;
+ assert( u.bw.iCnt==1 );
+ assert( (p->btreeMask & (((yDbMask)1)<<u.bw.iDb))!=0 );
+ rc = sqlite3BtreeDropTable(db->aDb[u.bw.iDb].pBt, pOp->p1, &u.bw.iMoved);
pOut->flags = MEM_Int;
- pOut->u.i = iMoved;
+ pOut->u.i = u.bw.iMoved;
#ifndef SQLITE_OMIT_AUTOVACUUM
- if( rc==SQLITE_OK && iMoved!=0 ){
- sqlite3RootPageMoved(db, iDb, iMoved, pOp->p1);
+ if( rc==SQLITE_OK && u.bw.iMoved!=0 ){
+ sqlite3RootPageMoved(db, u.bw.iDb, u.bw.iMoved, pOp->p1);
/* All OP_Destroy operations occur on the same btree */
- assert( resetSchemaOnFault==0 || resetSchemaOnFault==iDb+1 );
- resetSchemaOnFault = iDb+1;
+ assert( resetSchemaOnFault==0 || resetSchemaOnFault==u.bw.iDb+1 );
+ resetSchemaOnFault = u.bw.iDb+1;
}
#endif
}
@@ -77978,48 +71427,28 @@ case OP_Destroy: { /* out2 */
** See also: Destroy
*/
case OP_Clear: {
+#if 0 /* local variables moved into u.bx */
int nChange;
-
- nChange = 0;
+#endif /* local variables moved into u.bx */
+
+ u.bx.nChange = 0;
assert( p->readOnly==0 );
- assert( DbMaskTest(p->btreeMask, pOp->p2) );
+ assert( pOp->p1!=1 );
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p2))!=0 );
rc = sqlite3BtreeClearTable(
- db->aDb[pOp->p2].pBt, pOp->p1, (pOp->p3 ? &nChange : 0)
+ db->aDb[pOp->p2].pBt, pOp->p1, (pOp->p3 ? &u.bx.nChange : 0)
);
if( pOp->p3 ){
- p->nChange += nChange;
+ p->nChange += u.bx.nChange;
if( pOp->p3>0 ){
assert( memIsValid(&aMem[pOp->p3]) );
memAboutToChange(p, &aMem[pOp->p3]);
- aMem[pOp->p3].u.i += nChange;
+ aMem[pOp->p3].u.i += u.bx.nChange;
}
}
break;
}
-/* Opcode: ResetSorter P1 * * * *
-**
-** Delete all contents from the ephemeral table or sorter
-** that is open on cursor P1.
-**
-** This opcode only works for cursors used for sorting and
-** opened with OP_OpenEphemeral or OP_SorterOpen.
-*/
-case OP_ResetSorter: {
- VdbeCursor *pC;
-
- assert( pOp->p1>=0 && pOp->p1<p->nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- if( pC->pSorter ){
- sqlite3VdbeSorterReset(db, pC->pSorter);
- }else{
- assert( pC->isEphemeral );
- rc = sqlite3BtreeClearTableOfCursor(pC->pCursor);
- }
- break;
-}
-
/* Opcode: CreateTable P1 P2 * * *
** Synopsis: r[P2]=root iDb=P1
**
@@ -78044,27 +71473,28 @@ case OP_ResetSorter: {
**
** See documentation on OP_CreateTable for additional information.
*/
-case OP_CreateIndex: /* out2 */
-case OP_CreateTable: { /* out2 */
+case OP_CreateIndex: /* out2-prerelease */
+case OP_CreateTable: { /* out2-prerelease */
+#if 0 /* local variables moved into u.by */
int pgno;
int flags;
Db *pDb;
+#endif /* local variables moved into u.by */
- pOut = out2Prerelease(p, pOp);
- pgno = 0;
+ u.by.pgno = 0;
assert( pOp->p1>=0 && pOp->p1<db->nDb );
- assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
assert( p->readOnly==0 );
- pDb = &db->aDb[pOp->p1];
- assert( pDb->pBt!=0 );
+ u.by.pDb = &db->aDb[pOp->p1];
+ assert( u.by.pDb->pBt!=0 );
if( pOp->opcode==OP_CreateTable ){
- /* flags = BTREE_INTKEY; */
- flags = BTREE_INTKEY;
+ /* u.by.flags = BTREE_INTKEY; */
+ u.by.flags = BTREE_INTKEY;
}else{
- flags = BTREE_BLOBKEY;
+ u.by.flags = BTREE_BLOBKEY;
}
- rc = sqlite3BtreeCreateTable(pDb->pBt, &pgno, flags);
- pOut->u.i = pgno;
+ rc = sqlite3BtreeCreateTable(u.by.pDb->pBt, &u.by.pgno, u.by.flags);
+ pOut->u.i = u.by.pgno;
break;
}
@@ -78077,42 +71507,44 @@ case OP_CreateTable: { /* out2 */
** then runs the new virtual machine. It is thus a re-entrant opcode.
*/
case OP_ParseSchema: {
+#if 0 /* local variables moved into u.bz */
int iDb;
const char *zMaster;
char *zSql;
InitData initData;
+#endif /* local variables moved into u.bz */
/* Any prepared statement that invokes this opcode will hold mutexes
- ** on every btree. This is a prerequisite for invoking
+ ** on every btree. This is a prerequisite for invoking
** sqlite3InitCallback().
*/
#ifdef SQLITE_DEBUG
- for(iDb=0; iDb<db->nDb; iDb++){
- assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) );
+ for(u.bz.iDb=0; u.bz.iDb<db->nDb; u.bz.iDb++){
+ assert( u.bz.iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[u.bz.iDb].pBt) );
}
#endif
- iDb = pOp->p1;
- assert( iDb>=0 && iDb<db->nDb );
- assert( DbHasProperty(db, iDb, DB_SchemaLoaded) );
+ u.bz.iDb = pOp->p1;
+ assert( u.bz.iDb>=0 && u.bz.iDb<db->nDb );
+ assert( DbHasProperty(db, u.bz.iDb, DB_SchemaLoaded) );
/* Used to be a conditional */ {
- zMaster = SCHEMA_TABLE(iDb);
- initData.db = db;
- initData.iDb = pOp->p1;
- initData.pzErrMsg = &p->zErrMsg;
- zSql = sqlite3MPrintf(db,
+ u.bz.zMaster = SCHEMA_TABLE(u.bz.iDb);
+ u.bz.initData.db = db;
+ u.bz.initData.iDb = pOp->p1;
+ u.bz.initData.pzErrMsg = &p->zErrMsg;
+ u.bz.zSql = sqlite3MPrintf(db,
"SELECT name, rootpage, sql FROM '%q'.%s WHERE %s ORDER BY rowid",
- db->aDb[iDb].zName, zMaster, pOp->p4.z);
- if( zSql==0 ){
+ db->aDb[u.bz.iDb].zName, u.bz.zMaster, pOp->p4.z);
+ if( u.bz.zSql==0 ){
rc = SQLITE_NOMEM;
}else{
assert( db->init.busy==0 );
db->init.busy = 1;
- initData.rc = SQLITE_OK;
+ u.bz.initData.rc = SQLITE_OK;
assert( !db->mallocFailed );
- rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0);
- if( rc==SQLITE_OK ) rc = initData.rc;
- sqlite3DbFree(db, zSql);
+ rc = sqlite3_exec(db, u.bz.zSql, sqlite3InitCallback, &u.bz.initData, 0);
+ if( rc==SQLITE_OK ) rc = u.bz.initData.rc;
+ sqlite3DbFree(db, u.bz.zSql);
db->init.busy = 0;
}
}
@@ -78120,7 +71552,7 @@ case OP_ParseSchema: {
if( rc==SQLITE_NOMEM ){
goto no_mem;
}
- break;
+ break;
}
#if !defined(SQLITE_OMIT_ANALYZE)
@@ -78141,8 +71573,7 @@ case OP_LoadAnalysis: {
**
** Remove the internal (in-memory) data structures that describe
** the table named P4 in database P1. This is called after a table
-** is dropped from disk (using the Destroy opcode) in order to keep
-** the internal representation of the
+** is dropped in order to keep the internal representation of the
** schema consistent with what is on disk.
*/
case OP_DropTable: {
@@ -78154,8 +71585,7 @@ case OP_DropTable: {
**
** Remove the internal (in-memory) data structures that describe
** the index named P4 in database P1. This is called after an index
-** is dropped from disk (using the Destroy opcode)
-** in order to keep the internal representation of the
+** is dropped in order to keep the internal representation of the
** schema consistent with what is on disk.
*/
case OP_DropIndex: {
@@ -78167,8 +71597,7 @@ case OP_DropIndex: {
**
** Remove the internal (in-memory) data structures that describe
** the trigger named P4 in database P1. This is called after a trigger
-** is dropped from disk (using the Destroy opcode) in order to keep
-** the internal representation of the
+** is dropped in order to keep the internal representation of the
** schema consistent with what is on disk.
*/
case OP_DropTrigger: {
@@ -78199,40 +71628,42 @@ case OP_DropTrigger: {
** This opcode is used to implement the integrity_check pragma.
*/
case OP_IntegrityCk: {
+#if 0 /* local variables moved into u.ca */
int nRoot; /* Number of tables to check. (Number of root pages.) */
int *aRoot; /* Array of rootpage numbers for tables to be checked */
int j; /* Loop counter */
int nErr; /* Number of errors reported */
char *z; /* Text of the error report */
Mem *pnErr; /* Register keeping track of errors remaining */
+#endif /* local variables moved into u.ca */
assert( p->bIsReader );
- nRoot = pOp->p2;
- assert( nRoot>0 );
- aRoot = sqlite3DbMallocRaw(db, sizeof(int)*(nRoot+1) );
- if( aRoot==0 ) goto no_mem;
+ u.ca.nRoot = pOp->p2;
+ assert( u.ca.nRoot>0 );
+ u.ca.aRoot = sqlite3DbMallocRaw(db, sizeof(int)*(u.ca.nRoot+1) );
+ if( u.ca.aRoot==0 ) goto no_mem;
assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
- pnErr = &aMem[pOp->p3];
- assert( (pnErr->flags & MEM_Int)!=0 );
- assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 );
+ u.ca.pnErr = &aMem[pOp->p3];
+ assert( (u.ca.pnErr->flags & MEM_Int)!=0 );
+ assert( (u.ca.pnErr->flags & (MEM_Str|MEM_Blob))==0 );
pIn1 = &aMem[pOp->p1];
- for(j=0; j<nRoot; j++){
- aRoot[j] = (int)sqlite3VdbeIntValue(&pIn1[j]);
+ for(u.ca.j=0; u.ca.j<u.ca.nRoot; u.ca.j++){
+ u.ca.aRoot[u.ca.j] = (int)sqlite3VdbeIntValue(&pIn1[u.ca.j]);
}
- aRoot[j] = 0;
+ u.ca.aRoot[u.ca.j] = 0;
assert( pOp->p5<db->nDb );
- assert( DbMaskTest(p->btreeMask, pOp->p5) );
- z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot,
- (int)pnErr->u.i, &nErr);
- sqlite3DbFree(db, aRoot);
- pnErr->u.i -= nErr;
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p5))!=0 );
+ u.ca.z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, u.ca.aRoot, u.ca.nRoot,
+ (int)u.ca.pnErr->u.i, &u.ca.nErr);
+ sqlite3DbFree(db, u.ca.aRoot);
+ u.ca.pnErr->u.i -= u.ca.nErr;
sqlite3VdbeMemSetNull(pIn1);
- if( nErr==0 ){
- assert( z==0 );
- }else if( z==0 ){
+ if( u.ca.nErr==0 ){
+ assert( u.ca.z==0 );
+ }else if( u.ca.z==0 ){
goto no_mem;
}else{
- sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free);
+ sqlite3VdbeMemSetStr(pIn1, u.ca.z, -1, SQLITE_UTF8, sqlite3_free);
}
UPDATE_MAX_BLOBSIZE(pIn1);
sqlite3VdbeChangeEncoding(pIn1, encoding);
@@ -78268,20 +71699,20 @@ case OP_RowSetAdd: { /* in1, in2 */
** unchanged and jump to instruction P2.
*/
case OP_RowSetRead: { /* jump, in1, out3 */
+#if 0 /* local variables moved into u.cb */
i64 val;
+#endif /* local variables moved into u.cb */
pIn1 = &aMem[pOp->p1];
- if( (pIn1->flags & MEM_RowSet)==0
- || sqlite3RowSetNext(pIn1->u.pRowSet, &val)==0
+ if( (pIn1->flags & MEM_RowSet)==0
+ || sqlite3RowSetNext(pIn1->u.pRowSet, &u.cb.val)==0
){
/* The boolean index is empty */
sqlite3VdbeMemSetNull(pIn1);
- VdbeBranchTaken(1,2);
- goto jump_to_p2_and_check_for_interrupt;
+ pc = pOp->p2 - 1;
}else{
/* A value was pulled from the index */
- VdbeBranchTaken(0,2);
- sqlite3VdbeMemSetInt64(&aMem[pOp->p3], val);
+ sqlite3VdbeMemSetInt64(&aMem[pOp->p3], u.cb.val);
}
goto check_for_interrupt;
}
@@ -78311,12 +71742,14 @@ case OP_RowSetRead: { /* jump, in1, out3 */
** inserted as part of some other set).
*/
case OP_RowSetTest: { /* jump, in1, in3 */
+#if 0 /* local variables moved into u.cc */
int iSet;
int exists;
+#endif /* local variables moved into u.cc */
pIn1 = &aMem[pOp->p1];
pIn3 = &aMem[pOp->p3];
- iSet = pOp->p4.i;
+ u.cc.iSet = pOp->p4.i;
assert( pIn3->flags&MEM_Int );
/* If there is anything other than a rowset object in memory cell P1,
@@ -78328,13 +71761,17 @@ case OP_RowSetTest: { /* jump, in1, in3 */
}
assert( pOp->p4type==P4_INT32 );
- assert( iSet==-1 || iSet>=0 );
- if( iSet ){
- exists = sqlite3RowSetTest(pIn1->u.pRowSet, iSet, pIn3->u.i);
- VdbeBranchTaken(exists!=0,2);
- if( exists ) goto jump_to_p2;
+ assert( u.cc.iSet==-1 || u.cc.iSet>=0 );
+ if( u.cc.iSet ){
+ u.cc.exists = sqlite3RowSetTest(pIn1->u.pRowSet,
+ (u8)(u.cc.iSet>=0 ? u.cc.iSet & 0xf : 0xff),
+ pIn3->u.i);
+ if( u.cc.exists ){
+ pc = pOp->p2 - 1;
+ break;
+ }
}
- if( iSet>=0 ){
+ if( u.cc.iSet>=0 ){
sqlite3RowSetInsert(pIn1->u.pRowSet, pIn3->u.i);
}
break;
@@ -78343,7 +71780,7 @@ case OP_RowSetTest: { /* jump, in1, in3 */
#ifndef SQLITE_OMIT_TRIGGER
-/* Opcode: Program P1 P2 P3 P4 P5
+/* Opcode: Program P1 P2 P3 P4 *
**
** Execute the trigger program passed as P4 (type P4_SUBPROGRAM).
**
@@ -78355,10 +71792,9 @@ case OP_RowSetTest: { /* jump, in1, in3 */
** memory required by the sub-vdbe at runtime.
**
** P4 is a pointer to the VM containing the trigger program.
-**
-** If P5 is non-zero, then recursive program invocation is enabled.
*/
case OP_Program: { /* jump */
+#if 0 /* local variables moved into u.cd */
int nMem; /* Number of memory registers for sub-program */
int nByte; /* Bytes of runtime space required for sub-program */
Mem *pRt; /* Register to allocate runtime space */
@@ -78367,105 +71803,99 @@ case OP_Program: { /* jump */
VdbeFrame *pFrame; /* New vdbe frame to execute in */
SubProgram *pProgram; /* Sub-program to execute */
void *t; /* Token identifying trigger */
+#endif /* local variables moved into u.cd */
- pProgram = pOp->p4.pProgram;
- pRt = &aMem[pOp->p3];
- assert( pProgram->nOp>0 );
-
- /* If the p5 flag is clear, then recursive invocation of triggers is
+ u.cd.pProgram = pOp->p4.pProgram;
+ u.cd.pRt = &aMem[pOp->p3];
+ assert( u.cd.pProgram->nOp>0 );
+
+ /* If the p5 flag is clear, then recursive invocation of triggers is
** disabled for backwards compatibility (p5 is set if this sub-program
** is really a trigger, not a foreign key action, and the flag set
** and cleared by the "PRAGMA recursive_triggers" command is clear).
- **
- ** It is recursive invocation of triggers, at the SQL level, that is
- ** disabled. In some cases a single trigger may generate more than one
- ** SubProgram (if the trigger may be executed with more than one different
+ **
+ ** It is recursive invocation of triggers, at the SQL level, that is
+ ** disabled. In some cases a single trigger may generate more than one
+ ** SubProgram (if the trigger may be executed with more than one different
** ON CONFLICT algorithm). SubProgram structures associated with a
- ** single trigger all have the same value for the SubProgram.token
+ ** single trigger all have the same value for the SubProgram.token
** variable. */
if( pOp->p5 ){
- t = pProgram->token;
- for(pFrame=p->pFrame; pFrame && pFrame->token!=t; pFrame=pFrame->pParent);
- if( pFrame ) break;
+ u.cd.t = u.cd.pProgram->token;
+ for(u.cd.pFrame=p->pFrame; u.cd.pFrame && u.cd.pFrame->token!=u.cd.t; u.cd.pFrame=u.cd.pFrame->pParent);
+ if( u.cd.pFrame ) break;
}
if( p->nFrame>=db->aLimit[SQLITE_LIMIT_TRIGGER_DEPTH] ){
rc = SQLITE_ERROR;
- sqlite3VdbeError(p, "too many levels of trigger recursion");
+ sqlite3SetString(&p->zErrMsg, db, "too many levels of trigger recursion");
break;
}
- /* Register pRt is used to store the memory required to save the state
+ /* Register u.cd.pRt is used to store the memory required to save the state
** of the current program, and the memory required at runtime to execute
- ** the trigger program. If this trigger has been fired before, then pRt
+ ** the trigger program. If this trigger has been fired before, then u.cd.pRt
** is already allocated. Otherwise, it must be initialized. */
- if( (pRt->flags&MEM_Frame)==0 ){
- /* SubProgram.nMem is set to the number of memory cells used by the
+ if( (u.cd.pRt->flags&MEM_Frame)==0 ){
+ /* SubProgram.nMem is set to the number of memory cells used by the
** program stored in SubProgram.aOp. As well as these, one memory
** cell is required for each cursor used by the program. Set local
- ** variable nMem (and later, VdbeFrame.nChildMem) to this value.
+ ** variable u.cd.nMem (and later, VdbeFrame.nChildMem) to this value.
*/
- nMem = pProgram->nMem + pProgram->nCsr;
- nByte = ROUND8(sizeof(VdbeFrame))
- + nMem * sizeof(Mem)
- + pProgram->nCsr * sizeof(VdbeCursor *)
- + pProgram->nOnce * sizeof(u8);
- pFrame = sqlite3DbMallocZero(db, nByte);
- if( !pFrame ){
+ u.cd.nMem = u.cd.pProgram->nMem + u.cd.pProgram->nCsr;
+ u.cd.nByte = ROUND8(sizeof(VdbeFrame))
+ + u.cd.nMem * sizeof(Mem)
+ + u.cd.pProgram->nCsr * sizeof(VdbeCursor *)
+ + u.cd.pProgram->nOnce * sizeof(u8);
+ u.cd.pFrame = sqlite3DbMallocZero(db, u.cd.nByte);
+ if( !u.cd.pFrame ){
goto no_mem;
}
- sqlite3VdbeMemRelease(pRt);
- pRt->flags = MEM_Frame;
- pRt->u.pFrame = pFrame;
+ sqlite3VdbeMemRelease(u.cd.pRt);
+ u.cd.pRt->flags = MEM_Frame;
+ u.cd.pRt->u.pFrame = u.cd.pFrame;
- pFrame->v = p;
- pFrame->nChildMem = nMem;
- pFrame->nChildCsr = pProgram->nCsr;
- pFrame->pc = (int)(pOp - aOp);
- pFrame->aMem = p->aMem;
- pFrame->nMem = p->nMem;
- pFrame->apCsr = p->apCsr;
- pFrame->nCursor = p->nCursor;
- pFrame->aOp = p->aOp;
- pFrame->nOp = p->nOp;
- pFrame->token = pProgram->token;
- pFrame->aOnceFlag = p->aOnceFlag;
- pFrame->nOnceFlag = p->nOnceFlag;
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- pFrame->anExec = p->anExec;
-#endif
+ u.cd.pFrame->v = p;
+ u.cd.pFrame->nChildMem = u.cd.nMem;
+ u.cd.pFrame->nChildCsr = u.cd.pProgram->nCsr;
+ u.cd.pFrame->pc = pc;
+ u.cd.pFrame->aMem = p->aMem;
+ u.cd.pFrame->nMem = p->nMem;
+ u.cd.pFrame->apCsr = p->apCsr;
+ u.cd.pFrame->nCursor = p->nCursor;
+ u.cd.pFrame->aOp = p->aOp;
+ u.cd.pFrame->nOp = p->nOp;
+ u.cd.pFrame->token = u.cd.pProgram->token;
+ u.cd.pFrame->aOnceFlag = p->aOnceFlag;
+ u.cd.pFrame->nOnceFlag = p->nOnceFlag;
- pEnd = &VdbeFrameMem(pFrame)[pFrame->nChildMem];
- for(pMem=VdbeFrameMem(pFrame); pMem!=pEnd; pMem++){
- pMem->flags = MEM_Undefined;
- pMem->db = db;
+ u.cd.pEnd = &VdbeFrameMem(u.cd.pFrame)[u.cd.pFrame->nChildMem];
+ for(u.cd.pMem=VdbeFrameMem(u.cd.pFrame); u.cd.pMem!=u.cd.pEnd; u.cd.pMem++){
+ u.cd.pMem->flags = MEM_Invalid;
+ u.cd.pMem->db = db;
}
}else{
- pFrame = pRt->u.pFrame;
- assert( pProgram->nMem+pProgram->nCsr==pFrame->nChildMem );
- assert( pProgram->nCsr==pFrame->nChildCsr );
- assert( (int)(pOp - aOp)==pFrame->pc );
+ u.cd.pFrame = u.cd.pRt->u.pFrame;
+ assert( u.cd.pProgram->nMem+u.cd.pProgram->nCsr==u.cd.pFrame->nChildMem );
+ assert( u.cd.pProgram->nCsr==u.cd.pFrame->nChildCsr );
+ assert( pc==u.cd.pFrame->pc );
}
p->nFrame++;
- pFrame->pParent = p->pFrame;
- pFrame->lastRowid = lastRowid;
- pFrame->nChange = p->nChange;
- pFrame->nDbChange = p->db->nChange;
+ u.cd.pFrame->pParent = p->pFrame;
+ u.cd.pFrame->lastRowid = lastRowid;
+ u.cd.pFrame->nChange = p->nChange;
p->nChange = 0;
- p->pFrame = pFrame;
- p->aMem = aMem = &VdbeFrameMem(pFrame)[-1];
- p->nMem = pFrame->nChildMem;
- p->nCursor = (u16)pFrame->nChildCsr;
+ p->pFrame = u.cd.pFrame;
+ p->aMem = aMem = &VdbeFrameMem(u.cd.pFrame)[-1];
+ p->nMem = u.cd.pFrame->nChildMem;
+ p->nCursor = (u16)u.cd.pFrame->nChildCsr;
p->apCsr = (VdbeCursor **)&aMem[p->nMem+1];
- p->aOp = aOp = pProgram->aOp;
- p->nOp = pProgram->nOp;
+ p->aOp = aOp = u.cd.pProgram->aOp;
+ p->nOp = u.cd.pProgram->nOp;
p->aOnceFlag = (u8 *)&p->apCsr[p->nCursor];
- p->nOnceFlag = pProgram->nOnce;
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- p->anExec = 0;
-#endif
- pOp = &aOp[-1];
+ p->nOnceFlag = u.cd.pProgram->nOnce;
+ pc = -1;
memset(p->aOnceFlag, 0, p->nOnceFlag);
break;
@@ -78483,13 +71913,14 @@ case OP_Program: { /* jump */
** the value of the P1 argument to the value of the P1 argument to the
** calling OP_Program instruction.
*/
-case OP_Param: { /* out2 */
+case OP_Param: { /* out2-prerelease */
+#if 0 /* local variables moved into u.ce */
VdbeFrame *pFrame;
Mem *pIn;
- pOut = out2Prerelease(p, pOp);
- pFrame = p->pFrame;
- pIn = &pFrame->aMem[pOp->p1 + pFrame->aOp[pFrame->pc].p1];
- sqlite3VdbeMemShallowCopy(pOut, pIn, MEM_Ephem);
+#endif /* local variables moved into u.ce */
+ u.ce.pFrame = p->pFrame;
+ u.ce.pIn = &u.ce.pFrame->aMem[pOp->p1 + u.ce.pFrame->aOp[u.ce.pFrame->pc].p1];
+ sqlite3VdbeMemShallowCopy(pOut, u.ce.pIn, MEM_Ephem);
break;
}
@@ -78529,11 +71960,9 @@ case OP_FkCounter: {
*/
case OP_FkIfZero: { /* jump */
if( pOp->p1 ){
- VdbeBranchTaken(db->nDeferredCons==0 && db->nDeferredImmCons==0, 2);
- if( db->nDeferredCons==0 && db->nDeferredImmCons==0 ) goto jump_to_p2;
+ if( db->nDeferredCons==0 && db->nDeferredImmCons==0 ) pc = pOp->p2-1;
}else{
- VdbeBranchTaken(p->nFkConstraint==0 && db->nDeferredImmCons==0, 2);
- if( p->nFkConstraint==0 && db->nDeferredImmCons==0 ) goto jump_to_p2;
+ if( p->nFkConstraint==0 && db->nDeferredImmCons==0 ) pc = pOp->p2-1;
}
break;
}
@@ -78552,207 +71981,143 @@ case OP_FkIfZero: { /* jump */
** an integer.
*/
case OP_MemMax: { /* in2 */
+#if 0 /* local variables moved into u.cf */
+ Mem *pIn1;
VdbeFrame *pFrame;
+#endif /* local variables moved into u.cf */
if( p->pFrame ){
- for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
- pIn1 = &pFrame->aMem[pOp->p1];
+ for(u.cf.pFrame=p->pFrame; u.cf.pFrame->pParent; u.cf.pFrame=u.cf.pFrame->pParent);
+ u.cf.pIn1 = &u.cf.pFrame->aMem[pOp->p1];
}else{
- pIn1 = &aMem[pOp->p1];
+ u.cf.pIn1 = &aMem[pOp->p1];
}
- assert( memIsValid(pIn1) );
- sqlite3VdbeMemIntegerify(pIn1);
+ assert( memIsValid(u.cf.pIn1) );
+ sqlite3VdbeMemIntegerify(u.cf.pIn1);
pIn2 = &aMem[pOp->p2];
sqlite3VdbeMemIntegerify(pIn2);
- if( pIn1->u.i<pIn2->u.i){
- pIn1->u.i = pIn2->u.i;
+ if( u.cf.pIn1->u.i<pIn2->u.i){
+ u.cf.pIn1->u.i = pIn2->u.i;
}
break;
}
#endif /* SQLITE_OMIT_AUTOINCREMENT */
-/* Opcode: IfPos P1 P2 P3 * *
-** Synopsis: if r[P1]>0 then r[P1]-=P3, goto P2
+/* Opcode: IfPos P1 P2 * * *
+** Synopsis: if r[P1]>0 goto P2
**
-** Register P1 must contain an integer.
-** If the value of register P1 is 1 or greater, subtract P3 from the
-** value in P1 and jump to P2.
+** If the value of register P1 is 1 or greater, jump to P2.
**
-** If the initial value of register P1 is less than 1, then the
-** value is unchanged and control passes through to the next instruction.
+** It is illegal to use this instruction on a register that does
+** not contain an integer. An assertion fault will result if you try.
*/
case OP_IfPos: { /* jump, in1 */
pIn1 = &aMem[pOp->p1];
assert( pIn1->flags&MEM_Int );
- VdbeBranchTaken( pIn1->u.i>0, 2);
if( pIn1->u.i>0 ){
- pIn1->u.i -= pOp->p3;
- goto jump_to_p2;
+ pc = pOp->p2 - 1;
}
break;
}
-/* Opcode: SetIfNotPos P1 P2 P3 * *
-** Synopsis: if r[P1]<=0 then r[P2]=P3
+/* Opcode: IfNeg P1 P2 * * *
+** Synopsis: if r[P1]<0 goto P2
**
-** Register P1 must contain an integer.
-** If the value of register P1 is not positive (if it is less than 1) then
-** set the value of register P2 to be the integer P3.
-*/
-case OP_SetIfNotPos: { /* in1, in2 */
- pIn1 = &aMem[pOp->p1];
- assert( pIn1->flags&MEM_Int );
- if( pIn1->u.i<=0 ){
- pOut = out2Prerelease(p, pOp);
- pOut->u.i = pOp->p3;
- }
- break;
-}
-
-/* Opcode: IfNotZero P1 P2 P3 * *
-** Synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2
+** If the value of register P1 is less than zero, jump to P2.
**
-** Register P1 must contain an integer. If the content of register P1 is
-** initially nonzero, then subtract P3 from the value in register P1 and
-** jump to P2. If register P1 is initially zero, leave it unchanged
-** and fall through.
+** It is illegal to use this instruction on a register that does
+** not contain an integer. An assertion fault will result if you try.
*/
-case OP_IfNotZero: { /* jump, in1 */
+case OP_IfNeg: { /* jump, in1 */
pIn1 = &aMem[pOp->p1];
assert( pIn1->flags&MEM_Int );
- VdbeBranchTaken(pIn1->u.i<0, 2);
- if( pIn1->u.i ){
- pIn1->u.i -= pOp->p3;
- goto jump_to_p2;
+ if( pIn1->u.i<0 ){
+ pc = pOp->p2 - 1;
}
break;
}
-/* Opcode: DecrJumpZero P1 P2 * * *
-** Synopsis: if (--r[P1])==0 goto P2
+/* Opcode: IfZero P1 P2 P3 * *
+** Synopsis: r[P1]+=P3, if r[P1]==0 goto P2
**
-** Register P1 must hold an integer. Decrement the value in register P1
-** then jump to P2 if the new value is exactly zero.
-*/
-case OP_DecrJumpZero: { /* jump, in1 */
- pIn1 = &aMem[pOp->p1];
- assert( pIn1->flags&MEM_Int );
- pIn1->u.i--;
- VdbeBranchTaken(pIn1->u.i==0, 2);
- if( pIn1->u.i==0 ) goto jump_to_p2;
- break;
-}
-
-
-/* Opcode: JumpZeroIncr P1 P2 * * *
-** Synopsis: if (r[P1]++)==0 ) goto P2
+** The register P1 must contain an integer. Add literal P3 to the
+** value in register P1. If the result is exactly 0, jump to P2.
**
-** The register P1 must contain an integer. If register P1 is initially
-** zero, then jump to P2. Increment register P1 regardless of whether or
-** not the jump is taken.
+** It is illegal to use this instruction on a register that does
+** not contain an integer. An assertion fault will result if you try.
*/
-case OP_JumpZeroIncr: { /* jump, in1 */
+case OP_IfZero: { /* jump, in1 */
pIn1 = &aMem[pOp->p1];
assert( pIn1->flags&MEM_Int );
- VdbeBranchTaken(pIn1->u.i==0, 2);
- if( (pIn1->u.i++)==0 ) goto jump_to_p2;
+ pIn1->u.i += pOp->p3;
+ if( pIn1->u.i==0 ){
+ pc = pOp->p2 - 1;
+ }
break;
}
-/* Opcode: AggStep0 * P2 P3 P4 P5
-** Synopsis: accum=r[P3] step(r[P2@P5])
-**
-** Execute the step function for an aggregate. The
-** function has P5 arguments. P4 is a pointer to the FuncDef
-** structure that specifies the function. Register P3 is the
-** accumulator.
-**
-** The P5 arguments are taken from register P2 and its
-** successors.
-*/
/* Opcode: AggStep * P2 P3 P4 P5
** Synopsis: accum=r[P3] step(r[P2@P5])
**
** Execute the step function for an aggregate. The
-** function has P5 arguments. P4 is a pointer to an sqlite3_context
-** object that is used to run the function. Register P3 is
-** as the accumulator.
+** function has P5 arguments. P4 is a pointer to the FuncDef
+** structure that specifies the function. Use register
+** P3 as the accumulator.
**
** The P5 arguments are taken from register P2 and its
** successors.
-**
-** This opcode is initially coded as OP_AggStep0. On first evaluation,
-** the FuncDef stored in P4 is converted into an sqlite3_context and
-** the opcode is changed. In this way, the initialization of the
-** sqlite3_context only happens once, instead of on each call to the
-** step function.
*/
-case OP_AggStep0: {
- int n;
- sqlite3_context *pCtx;
-
- assert( pOp->p4type==P4_FUNCDEF );
- n = pOp->p5;
- assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
- assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem-p->nCursor)+1) );
- assert( pOp->p3<pOp->p2 || pOp->p3>=pOp->p2+n );
- pCtx = sqlite3DbMallocRaw(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*));
- if( pCtx==0 ) goto no_mem;
- pCtx->pMem = 0;
- pCtx->pFunc = pOp->p4.pFunc;
- pCtx->iOp = (int)(pOp - aOp);
- pCtx->pVdbe = p;
- pCtx->argc = n;
- pOp->p4type = P4_FUNCCTX;
- pOp->p4.pCtx = pCtx;
- pOp->opcode = OP_AggStep;
- /* Fall through into OP_AggStep */
-}
case OP_AggStep: {
+#if 0 /* local variables moved into u.cg */
+ int n;
int i;
- sqlite3_context *pCtx;
Mem *pMem;
- Mem t;
-
- assert( pOp->p4type==P4_FUNCCTX );
- pCtx = pOp->p4.pCtx;
- pMem = &aMem[pOp->p3];
-
- /* If this function is inside of a trigger, the register array in aMem[]
- ** might change from one evaluation to the next. The next block of code
- ** checks to see if the register array has changed, and if so it
- ** reinitializes the relavant parts of the sqlite3_context object */
- if( pCtx->pMem != pMem ){
- pCtx->pMem = pMem;
- for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
- }
-
-#ifdef SQLITE_DEBUG
- for(i=0; i<pCtx->argc; i++){
- assert( memIsValid(pCtx->argv[i]) );
- REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]);
+ Mem *pRec;
+ sqlite3_context ctx;
+ sqlite3_value **apVal;
+#endif /* local variables moved into u.cg */
+
+ u.cg.n = pOp->p5;
+ assert( u.cg.n>=0 );
+ u.cg.pRec = &aMem[pOp->p2];
+ u.cg.apVal = p->apArg;
+ assert( u.cg.apVal || u.cg.n==0 );
+ for(u.cg.i=0; u.cg.i<u.cg.n; u.cg.i++, u.cg.pRec++){
+ assert( memIsValid(u.cg.pRec) );
+ u.cg.apVal[u.cg.i] = u.cg.pRec;
+ memAboutToChange(p, u.cg.pRec);
+ sqlite3VdbeMemStoreType(u.cg.pRec);
+ }
+ u.cg.ctx.pFunc = pOp->p4.pFunc;
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
+ u.cg.ctx.pMem = u.cg.pMem = &aMem[pOp->p3];
+ u.cg.pMem->n++;
+ u.cg.ctx.s.flags = MEM_Null;
+ u.cg.ctx.s.z = 0;
+ u.cg.ctx.s.zMalloc = 0;
+ u.cg.ctx.s.xDel = 0;
+ u.cg.ctx.s.db = db;
+ u.cg.ctx.isError = 0;
+ u.cg.ctx.pColl = 0;
+ u.cg.ctx.skipFlag = 0;
+ if( u.cg.ctx.pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ assert( pOp>p->aOp );
+ assert( pOp[-1].p4type==P4_COLLSEQ );
+ assert( pOp[-1].opcode==OP_CollSeq );
+ u.cg.ctx.pColl = pOp[-1].p4.pColl;
}
-#endif
-
- pMem->n++;
- sqlite3VdbeMemInit(&t, db, MEM_Null);
- pCtx->pOut = &t;
- pCtx->fErrorOrAux = 0;
- pCtx->skipFlag = 0;
- (pCtx->pFunc->xStep)(pCtx,pCtx->argc,pCtx->argv); /* IMP: R-24505-23230 */
- if( pCtx->fErrorOrAux ){
- if( pCtx->isError ){
- sqlite3VdbeError(p, "%s", sqlite3_value_text(&t));
- rc = pCtx->isError;
- }
- sqlite3VdbeMemRelease(&t);
- }else{
- assert( t.flags==MEM_Null );
+ (u.cg.ctx.pFunc->xStep)(&u.cg.ctx, u.cg.n, u.cg.apVal); /* IMP: R-24505-23230 */
+ if( u.cg.ctx.isError ){
+ sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&u.cg.ctx.s));
+ rc = u.cg.ctx.isError;
}
- if( pCtx->skipFlag ){
+ if( u.cg.ctx.skipFlag ){
assert( pOp[-1].opcode==OP_CollSeq );
- i = pOp[-1].p1;
- if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1);
+ u.cg.i = pOp[-1].p1;
+ if( u.cg.i ) sqlite3VdbeMemSetInt64(&aMem[u.cg.i], 1);
}
+
+ sqlite3VdbeMemRelease(&u.cg.ctx.s);
+
break;
}
@@ -78770,17 +72135,19 @@ case OP_AggStep: {
** the step function was not previously called.
*/
case OP_AggFinal: {
+#if 0 /* local variables moved into u.ch */
Mem *pMem;
+#endif /* local variables moved into u.ch */
assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) );
- pMem = &aMem[pOp->p1];
- assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 );
- rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc);
+ u.ch.pMem = &aMem[pOp->p1];
+ assert( (u.ch.pMem->flags & ~(MEM_Null|MEM_Agg))==0 );
+ rc = sqlite3VdbeMemFinalize(u.ch.pMem, pOp->p4.pFunc);
if( rc ){
- sqlite3VdbeError(p, "%s", sqlite3_value_text(pMem));
+ sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(u.ch.pMem));
}
- sqlite3VdbeChangeEncoding(pMem, encoding);
- UPDATE_MAX_BLOBSIZE(pMem);
- if( sqlite3VdbeMemTooBig(pMem) ){
+ sqlite3VdbeChangeEncoding(u.ch.pMem, encoding);
+ UPDATE_MAX_BLOBSIZE(u.ch.pMem);
+ if( sqlite3VdbeMemTooBig(u.ch.pMem) ){
goto too_big;
}
break;
@@ -78790,8 +72157,8 @@ case OP_AggFinal: {
/* Opcode: Checkpoint P1 P2 P3 * *
**
** Checkpoint database P1. This is a no-op if P1 is not currently in
-** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL,
-** RESTART, or TRUNCATE. Write 1 or 0 into mem[P3] if the checkpoint returns
+** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL
+** or RESTART. Write 1 or 0 into mem[P3] if the checkpoint returns
** SQLITE_BUSY or not, respectively. Write the number of pages in the
** WAL after the checkpoint into mem[P3+1] and the number of pages
** in the WAL that have been checkpointed after the checkpoint
@@ -78799,32 +72166,33 @@ case OP_AggFinal: {
** mem[P3+2] are initialized to -1.
*/
case OP_Checkpoint: {
+#if 0 /* local variables moved into u.ci */
int i; /* Loop counter */
int aRes[3]; /* Results */
Mem *pMem; /* Write results here */
+#endif /* local variables moved into u.ci */
assert( p->readOnly==0 );
- aRes[0] = 0;
- aRes[1] = aRes[2] = -1;
+ u.ci.aRes[0] = 0;
+ u.ci.aRes[1] = u.ci.aRes[2] = -1;
assert( pOp->p2==SQLITE_CHECKPOINT_PASSIVE
|| pOp->p2==SQLITE_CHECKPOINT_FULL
|| pOp->p2==SQLITE_CHECKPOINT_RESTART
- || pOp->p2==SQLITE_CHECKPOINT_TRUNCATE
);
- rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &aRes[1], &aRes[2]);
+ rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &u.ci.aRes[1], &u.ci.aRes[2]);
if( rc==SQLITE_BUSY ){
rc = SQLITE_OK;
- aRes[0] = 1;
+ u.ci.aRes[0] = 1;
+ }
+ for(u.ci.i=0, u.ci.pMem = &aMem[pOp->p3]; u.ci.i<3; u.ci.i++, u.ci.pMem++){
+ sqlite3VdbeMemSetInt64(u.ci.pMem, (i64)u.ci.aRes[u.ci.i]);
}
- for(i=0, pMem = &aMem[pOp->p3]; i<3; i++, pMem++){
- sqlite3VdbeMemSetInt64(pMem, (i64)aRes[i]);
- }
break;
};
#endif
#ifndef SQLITE_OMIT_PRAGMA
-/* Opcode: JournalMode P1 P2 P3 * *
+/* Opcode: JournalMode P1 P2 P3 * P5
**
** Change the journal mode of database P1 to P3. P3 must be one of the
** PAGER_JOURNALMODE_XXX values. If changing between the various rollback
@@ -78835,7 +72203,8 @@ case OP_Checkpoint: {
**
** Write a string containing the final journal-mode to register P2.
*/
-case OP_JournalMode: { /* out2 */
+case OP_JournalMode: { /* out2-prerelease */
+#if 0 /* local variables moved into u.cj */
Btree *pBt; /* Btree to change journal mode of */
Pager *pPager; /* Pager associated with pBt */
int eNew; /* New journal mode */
@@ -78843,85 +72212,86 @@ case OP_JournalMode: { /* out2 */
#ifndef SQLITE_OMIT_WAL
const char *zFilename; /* Name of database file for pPager */
#endif
+#endif /* local variables moved into u.cj */
- pOut = out2Prerelease(p, pOp);
- eNew = pOp->p3;
- assert( eNew==PAGER_JOURNALMODE_DELETE
- || eNew==PAGER_JOURNALMODE_TRUNCATE
- || eNew==PAGER_JOURNALMODE_PERSIST
- || eNew==PAGER_JOURNALMODE_OFF
- || eNew==PAGER_JOURNALMODE_MEMORY
- || eNew==PAGER_JOURNALMODE_WAL
- || eNew==PAGER_JOURNALMODE_QUERY
+ u.cj.eNew = pOp->p3;
+ assert( u.cj.eNew==PAGER_JOURNALMODE_DELETE
+ || u.cj.eNew==PAGER_JOURNALMODE_TRUNCATE
+ || u.cj.eNew==PAGER_JOURNALMODE_PERSIST
+ || u.cj.eNew==PAGER_JOURNALMODE_OFF
+ || u.cj.eNew==PAGER_JOURNALMODE_MEMORY
+ || u.cj.eNew==PAGER_JOURNALMODE_WAL
+ || u.cj.eNew==PAGER_JOURNALMODE_QUERY
);
assert( pOp->p1>=0 && pOp->p1<db->nDb );
assert( p->readOnly==0 );
- pBt = db->aDb[pOp->p1].pBt;
- pPager = sqlite3BtreePager(pBt);
- eOld = sqlite3PagerGetJournalMode(pPager);
- if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld;
- if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld;
+ u.cj.pBt = db->aDb[pOp->p1].pBt;
+ u.cj.pPager = sqlite3BtreePager(u.cj.pBt);
+ u.cj.eOld = sqlite3PagerGetJournalMode(u.cj.pPager);
+ if( u.cj.eNew==PAGER_JOURNALMODE_QUERY ) u.cj.eNew = u.cj.eOld;
+ if( !sqlite3PagerOkToChangeJournalMode(u.cj.pPager) ) u.cj.eNew = u.cj.eOld;
#ifndef SQLITE_OMIT_WAL
- zFilename = sqlite3PagerFilename(pPager, 1);
+ u.cj.zFilename = sqlite3PagerFilename(u.cj.pPager, 1);
/* Do not allow a transition to journal_mode=WAL for a database
- ** in temporary storage or if the VFS does not support shared memory
+ ** in temporary storage or if the VFS does not support shared memory
*/
- if( eNew==PAGER_JOURNALMODE_WAL
- && (sqlite3Strlen30(zFilename)==0 /* Temp file */
- || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */
+ if( u.cj.eNew==PAGER_JOURNALMODE_WAL
+ && (sqlite3Strlen30(u.cj.zFilename)==0 /* Temp file */
+ || !sqlite3PagerWalSupported(u.cj.pPager)) /* No shared-memory support */
){
- eNew = eOld;
+ u.cj.eNew = u.cj.eOld;
}
- if( (eNew!=eOld)
- && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL)
+ if( (u.cj.eNew!=u.cj.eOld)
+ && (u.cj.eOld==PAGER_JOURNALMODE_WAL || u.cj.eNew==PAGER_JOURNALMODE_WAL)
){
if( !db->autoCommit || db->nVdbeRead>1 ){
rc = SQLITE_ERROR;
- sqlite3VdbeError(p,
+ sqlite3SetString(&p->zErrMsg, db,
"cannot change %s wal mode from within a transaction",
- (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of")
+ (u.cj.eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of")
);
break;
}else{
-
- if( eOld==PAGER_JOURNALMODE_WAL ){
+
+ if( u.cj.eOld==PAGER_JOURNALMODE_WAL ){
/* If leaving WAL mode, close the log file. If successful, the call
- ** to PagerCloseWal() checkpoints and deletes the write-ahead-log
- ** file. An EXCLUSIVE lock may still be held on the database file
- ** after a successful return.
+ ** to PagerCloseWal() checkpoints and deletes the write-ahead-log
+ ** file. An EXCLUSIVE lock may still be held on the database file
+ ** after a successful return.
*/
- rc = sqlite3PagerCloseWal(pPager);
+ rc = sqlite3PagerCloseWal(u.cj.pPager);
if( rc==SQLITE_OK ){
- sqlite3PagerSetJournalMode(pPager, eNew);
+ sqlite3PagerSetJournalMode(u.cj.pPager, u.cj.eNew);
}
- }else if( eOld==PAGER_JOURNALMODE_MEMORY ){
+ }else if( u.cj.eOld==PAGER_JOURNALMODE_MEMORY ){
/* Cannot transition directly from MEMORY to WAL. Use mode OFF
** as an intermediate */
- sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF);
+ sqlite3PagerSetJournalMode(u.cj.pPager, PAGER_JOURNALMODE_OFF);
}
-
+
/* Open a transaction on the database file. Regardless of the journal
** mode, this transaction always uses a rollback journal.
*/
- assert( sqlite3BtreeIsInTrans(pBt)==0 );
+ assert( sqlite3BtreeIsInTrans(u.cj.pBt)==0 );
if( rc==SQLITE_OK ){
- rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1));
+ rc = sqlite3BtreeSetVersion(u.cj.pBt, (u.cj.eNew==PAGER_JOURNALMODE_WAL ? 2 : 1));
}
}
}
#endif /* ifndef SQLITE_OMIT_WAL */
if( rc ){
- eNew = eOld;
+ u.cj.eNew = u.cj.eOld;
}
- eNew = sqlite3PagerSetJournalMode(pPager, eNew);
+ u.cj.eNew = sqlite3PagerSetJournalMode(u.cj.pPager, u.cj.eNew);
+ pOut = &aMem[pOp->p2];
pOut->flags = MEM_Str|MEM_Static|MEM_Term;
- pOut->z = (char *)sqlite3JournalModename(eNew);
+ pOut->z = (char *)sqlite3JournalModename(u.cj.eNew);
pOut->n = sqlite3Strlen30(pOut->z);
pOut->enc = SQLITE_UTF8;
sqlite3VdbeChangeEncoding(pOut, encoding);
@@ -78951,17 +72321,18 @@ case OP_Vacuum: {
** P2. Otherwise, fall through to the next instruction.
*/
case OP_IncrVacuum: { /* jump */
+#if 0 /* local variables moved into u.ck */
Btree *pBt;
+#endif /* local variables moved into u.ck */
assert( pOp->p1>=0 && pOp->p1<db->nDb );
- assert( DbMaskTest(p->btreeMask, pOp->p1) );
+ assert( (p->btreeMask & (((yDbMask)1)<<pOp->p1))!=0 );
assert( p->readOnly==0 );
- pBt = db->aDb[pOp->p1].pBt;
- rc = sqlite3BtreeIncrVacuum(pBt);
- VdbeBranchTaken(rc==SQLITE_DONE,2);
+ u.ck.pBt = db->aDb[pOp->p1].pBt;
+ rc = sqlite3BtreeIncrVacuum(u.ck.pBt);
if( rc==SQLITE_DONE ){
+ pc = pOp->p2 - 1;
rc = SQLITE_OK;
- goto jump_to_p2;
}
break;
}
@@ -78969,13 +72340,12 @@ case OP_IncrVacuum: { /* jump */
/* Opcode: Expire P1 * * * *
**
-** Cause precompiled statements to expire. When an expired statement
-** is executed using sqlite3_step() it will either automatically
-** reprepare itself (if it was originally created using sqlite3_prepare_v2())
-** or it will fail with SQLITE_SCHEMA.
+** Cause precompiled statements to become expired. An expired statement
+** fails with an error code of SQLITE_SCHEMA if it is ever executed
+** (via sqlite3_step()).
**
** If P1 is 0, then all SQL statements become expired. If P1 is non-zero,
-** then only the currently executing statement is expired.
+** then only the currently executing statement is affected.
*/
case OP_Expire: {
if( !pOp->p1 ){
@@ -79007,12 +72377,12 @@ case OP_TableLock: {
if( isWriteLock || 0==(db->flags&SQLITE_ReadUncommitted) ){
int p1 = pOp->p1;
assert( p1>=0 && p1<db->nDb );
- assert( DbMaskTest(p->btreeMask, p1) );
+ assert( (p->btreeMask & (((yDbMask)1)<<p1))!=0 );
assert( isWriteLock==0 || isWriteLock==1 );
rc = sqlite3BtreeLockTable(db->aDb[p1].pBt, pOp->p2, isWriteLock);
if( (rc&0xFF)==SQLITE_LOCKED ){
const char *z = pOp->p4.z;
- sqlite3VdbeError(p, "database table is locked: %s", z);
+ sqlite3SetString(&p->zErrMsg, db, "database table is locked: %s", z);
}
}
break;
@@ -79030,38 +72400,24 @@ case OP_TableLock: {
** code will be set to SQLITE_LOCKED.
*/
case OP_VBegin: {
+#if 0 /* local variables moved into u.cl */
VTable *pVTab;
- pVTab = pOp->p4.pVtab;
- rc = sqlite3VtabBegin(db, pVTab);
- if( pVTab ) sqlite3VtabImportErrmsg(p, pVTab->pVtab);
+#endif /* local variables moved into u.cl */
+ u.cl.pVTab = pOp->p4.pVtab;
+ rc = sqlite3VtabBegin(db, u.cl.pVTab);
+ if( u.cl.pVTab ) sqlite3VtabImportErrmsg(p, u.cl.pVTab->pVtab);
break;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
#ifndef SQLITE_OMIT_VIRTUALTABLE
-/* Opcode: VCreate P1 P2 * * *
+/* Opcode: VCreate P1 * * P4 *
**
-** P2 is a register that holds the name of a virtual table in database
-** P1. Call the xCreate method for that table.
+** P4 is the name of a virtual table in database P1. Call the xCreate method
+** for that table.
*/
case OP_VCreate: {
- Mem sMem; /* For storing the record being decoded */
- const char *zTab; /* Name of the virtual table */
-
- memset(&sMem, 0, sizeof(sMem));
- sMem.db = db;
- /* Because P2 is always a static string, it is impossible for the
- ** sqlite3VdbeMemCopy() to fail */
- assert( (aMem[pOp->p2].flags & MEM_Str)!=0 );
- assert( (aMem[pOp->p2].flags & MEM_Static)!=0 );
- rc = sqlite3VdbeMemCopy(&sMem, &aMem[pOp->p2]);
- assert( rc==SQLITE_OK );
- zTab = (const char*)sqlite3_value_text(&sMem);
- assert( zTab || db->mallocFailed );
- if( zTab ){
- rc = sqlite3VtabCallCreate(db, pOp->p1, zTab, &p->zErrMsg);
- }
- sqlite3VdbeMemRelease(&sMem);
+ rc = sqlite3VtabCallCreate(db, pOp->p1, pOp->p4.z, &p->zErrMsg);
break;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
@@ -79073,9 +72429,9 @@ case OP_VCreate: {
** of that table.
*/
case OP_VDestroy: {
- db->nVDestroy++;
+ p->inVtabMethod = 2;
rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p4.z);
- db->nVDestroy--;
+ p->inVtabMethod = 0;
break;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
@@ -79088,35 +72444,32 @@ case OP_VDestroy: {
** table and stores that cursor in P1.
*/
case OP_VOpen: {
+#if 0 /* local variables moved into u.cm */
VdbeCursor *pCur;
sqlite3_vtab_cursor *pVtabCursor;
sqlite3_vtab *pVtab;
- const sqlite3_module *pModule;
+ sqlite3_module *pModule;
+#endif /* local variables moved into u.cm */
assert( p->bIsReader );
- pCur = 0;
- pVtabCursor = 0;
- pVtab = pOp->p4.pVtab->pVtab;
- if( pVtab==0 || NEVER(pVtab->pModule==0) ){
- rc = SQLITE_LOCKED;
- break;
- }
- pModule = pVtab->pModule;
- rc = pModule->xOpen(pVtab, &pVtabCursor);
- sqlite3VtabImportErrmsg(p, pVtab);
+ u.cm.pCur = 0;
+ u.cm.pVtabCursor = 0;
+ u.cm.pVtab = pOp->p4.pVtab->pVtab;
+ u.cm.pModule = (sqlite3_module *)u.cm.pVtab->pModule;
+ assert(u.cm.pVtab && u.cm.pModule);
+ rc = u.cm.pModule->xOpen(u.cm.pVtab, &u.cm.pVtabCursor);
+ sqlite3VtabImportErrmsg(p, u.cm.pVtab);
if( SQLITE_OK==rc ){
/* Initialize sqlite3_vtab_cursor base class */
- pVtabCursor->pVtab = pVtab;
+ u.cm.pVtabCursor->pVtab = u.cm.pVtab;
/* Initialize vdbe cursor object */
- pCur = allocateCursor(p, pOp->p1, 0, -1, 0);
- if( pCur ){
- pCur->pVtabCursor = pVtabCursor;
- pVtab->nRef++;
+ u.cm.pCur = allocateCursor(p, pOp->p1, 0, -1, 0);
+ if( u.cm.pCur ){
+ u.cm.pCur->pVtabCursor = u.cm.pVtabCursor;
}else{
- assert( db->mallocFailed );
- pModule->xClose(pVtabCursor);
- goto no_mem;
+ db->mallocFailed = 1;
+ u.cm.pModule->xClose(u.cm.pVtabCursor);
}
}
break;
@@ -79125,7 +72478,7 @@ case OP_VOpen: {
#ifndef SQLITE_OMIT_VIRTUALTABLE
/* Opcode: VFilter P1 P2 P3 P4 *
-** Synopsis: iplan=r[P3] zplan='P4'
+** Synopsis: iPlan=r[P3] zPlan='P4'
**
** P1 is a cursor opened using VOpen. P2 is an address to jump to if
** the filtered result set is empty.
@@ -79144,6 +72497,7 @@ case OP_VOpen: {
** A jump is made to P2 if the result set after filtering would be empty.
*/
case OP_VFilter: { /* jump */
+#if 0 /* local variables moved into u.cn */
int nArg;
int iQuery;
const sqlite3_module *pModule;
@@ -79155,36 +72509,46 @@ case OP_VFilter: { /* jump */
int res;
int i;
Mem **apArg;
-
- pQuery = &aMem[pOp->p3];
- pArgc = &pQuery[1];
- pCur = p->apCsr[pOp->p1];
- assert( memIsValid(pQuery) );
- REGISTER_TRACE(pOp->p3, pQuery);
- assert( pCur->pVtabCursor );
- pVtabCursor = pCur->pVtabCursor;
- pVtab = pVtabCursor->pVtab;
- pModule = pVtab->pModule;
+#endif /* local variables moved into u.cn */
+
+ u.cn.pQuery = &aMem[pOp->p3];
+ u.cn.pArgc = &u.cn.pQuery[1];
+ u.cn.pCur = p->apCsr[pOp->p1];
+ assert( memIsValid(u.cn.pQuery) );
+ REGISTER_TRACE(pOp->p3, u.cn.pQuery);
+ assert( u.cn.pCur->pVtabCursor );
+ u.cn.pVtabCursor = u.cn.pCur->pVtabCursor;
+ u.cn.pVtab = u.cn.pVtabCursor->pVtab;
+ u.cn.pModule = u.cn.pVtab->pModule;
/* Grab the index number and argc parameters */
- assert( (pQuery->flags&MEM_Int)!=0 && pArgc->flags==MEM_Int );
- nArg = (int)pArgc->u.i;
- iQuery = (int)pQuery->u.i;
+ assert( (u.cn.pQuery->flags&MEM_Int)!=0 && u.cn.pArgc->flags==MEM_Int );
+ u.cn.nArg = (int)u.cn.pArgc->u.i;
+ u.cn.iQuery = (int)u.cn.pQuery->u.i;
/* Invoke the xFilter method */
- res = 0;
- apArg = p->apArg;
- for(i = 0; i<nArg; i++){
- apArg[i] = &pArgc[i+1];
- }
- rc = pModule->xFilter(pVtabCursor, iQuery, pOp->p4.z, nArg, apArg);
- sqlite3VtabImportErrmsg(p, pVtab);
- if( rc==SQLITE_OK ){
- res = pModule->xEof(pVtabCursor);
+ {
+ u.cn.res = 0;
+ u.cn.apArg = p->apArg;
+ for(u.cn.i = 0; u.cn.i<u.cn.nArg; u.cn.i++){
+ u.cn.apArg[u.cn.i] = &u.cn.pArgc[u.cn.i+1];
+ sqlite3VdbeMemStoreType(u.cn.apArg[u.cn.i]);
+ }
+
+ p->inVtabMethod = 1;
+ rc = u.cn.pModule->xFilter(u.cn.pVtabCursor, u.cn.iQuery, pOp->p4.z, u.cn.nArg, u.cn.apArg);
+ p->inVtabMethod = 0;
+ sqlite3VtabImportErrmsg(p, u.cn.pVtab);
+ if( rc==SQLITE_OK ){
+ u.cn.res = u.cn.pModule->xEof(u.cn.pVtabCursor);
+ }
+
+ if( u.cn.res ){
+ pc = pOp->p2 - 1;
+ }
}
- pCur->nullRow = 0;
- VdbeBranchTaken(res!=0,2);
- if( res ) goto jump_to_p2;
+ u.cn.pCur->nullRow = 0;
+
break;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
@@ -79198,36 +72562,51 @@ case OP_VFilter: { /* jump */
** P1 cursor is pointing to into register P3.
*/
case OP_VColumn: {
+#if 0 /* local variables moved into u.co */
sqlite3_vtab *pVtab;
const sqlite3_module *pModule;
Mem *pDest;
sqlite3_context sContext;
+#endif /* local variables moved into u.co */
VdbeCursor *pCur = p->apCsr[pOp->p1];
assert( pCur->pVtabCursor );
assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
- pDest = &aMem[pOp->p3];
- memAboutToChange(p, pDest);
+ u.co.pDest = &aMem[pOp->p3];
+ memAboutToChange(p, u.co.pDest);
if( pCur->nullRow ){
- sqlite3VdbeMemSetNull(pDest);
+ sqlite3VdbeMemSetNull(u.co.pDest);
break;
}
- pVtab = pCur->pVtabCursor->pVtab;
- pModule = pVtab->pModule;
- assert( pModule->xColumn );
- memset(&sContext, 0, sizeof(sContext));
- sContext.pOut = pDest;
- MemSetTypeFlag(pDest, MEM_Null);
- rc = pModule->xColumn(pCur->pVtabCursor, &sContext, pOp->p2);
- sqlite3VtabImportErrmsg(p, pVtab);
- if( sContext.isError ){
- rc = sContext.isError;
- }
- sqlite3VdbeChangeEncoding(pDest, encoding);
- REGISTER_TRACE(pOp->p3, pDest);
- UPDATE_MAX_BLOBSIZE(pDest);
-
- if( sqlite3VdbeMemTooBig(pDest) ){
+ u.co.pVtab = pCur->pVtabCursor->pVtab;
+ u.co.pModule = u.co.pVtab->pModule;
+ assert( u.co.pModule->xColumn );
+ memset(&u.co.sContext, 0, sizeof(u.co.sContext));
+
+ /* The output cell may already have a buffer allocated. Move
+ ** the current contents to u.co.sContext.s so in case the user-function
+ ** can use the already allocated buffer instead of allocating a
+ ** new one.
+ */
+ sqlite3VdbeMemMove(&u.co.sContext.s, u.co.pDest);
+ MemSetTypeFlag(&u.co.sContext.s, MEM_Null);
+
+ rc = u.co.pModule->xColumn(pCur->pVtabCursor, &u.co.sContext, pOp->p2);
+ sqlite3VtabImportErrmsg(p, u.co.pVtab);
+ if( u.co.sContext.isError ){
+ rc = u.co.sContext.isError;
+ }
+
+ /* Copy the result of the function to the P3 register. We
+ ** do this regardless of whether or not an error occurred to ensure any
+ ** dynamic allocation in u.co.sContext.s (a Mem struct) is released.
+ */
+ sqlite3VdbeChangeEncoding(&u.co.sContext.s, encoding);
+ sqlite3VdbeMemMove(u.co.pDest, &u.co.sContext.s);
+ REGISTER_TRACE(pOp->p3, u.co.pDest);
+ UPDATE_MAX_BLOBSIZE(u.co.pDest);
+
+ if( sqlite3VdbeMemTooBig(u.co.pDest) ){
goto too_big;
}
break;
@@ -79242,36 +72621,40 @@ case OP_VColumn: {
** the end of its result set, then fall through to the next instruction.
*/
case OP_VNext: { /* jump */
+#if 0 /* local variables moved into u.cp */
sqlite3_vtab *pVtab;
const sqlite3_module *pModule;
int res;
VdbeCursor *pCur;
+#endif /* local variables moved into u.cp */
- res = 0;
- pCur = p->apCsr[pOp->p1];
- assert( pCur->pVtabCursor );
- if( pCur->nullRow ){
+ u.cp.res = 0;
+ u.cp.pCur = p->apCsr[pOp->p1];
+ assert( u.cp.pCur->pVtabCursor );
+ if( u.cp.pCur->nullRow ){
break;
}
- pVtab = pCur->pVtabCursor->pVtab;
- pModule = pVtab->pModule;
- assert( pModule->xNext );
+ u.cp.pVtab = u.cp.pCur->pVtabCursor->pVtab;
+ u.cp.pModule = u.cp.pVtab->pModule;
+ assert( u.cp.pModule->xNext );
/* Invoke the xNext() method of the module. There is no way for the
** underlying implementation to return an error if one occurs during
- ** xNext(). Instead, if an error occurs, true is returned (indicating that
+ ** xNext(). Instead, if an error occurs, true is returned (indicating that
** data is available) and the error code returned when xColumn or
** some other method is next invoked on the save virtual table cursor.
*/
- rc = pModule->xNext(pCur->pVtabCursor);
- sqlite3VtabImportErrmsg(p, pVtab);
+ p->inVtabMethod = 1;
+ rc = u.cp.pModule->xNext(u.cp.pCur->pVtabCursor);
+ p->inVtabMethod = 0;
+ sqlite3VtabImportErrmsg(p, u.cp.pVtab);
if( rc==SQLITE_OK ){
- res = pModule->xEof(pCur->pVtabCursor);
+ u.cp.res = u.cp.pModule->xEof(u.cp.pCur->pVtabCursor);
}
- VdbeBranchTaken(!res,2);
- if( !res ){
+
+ if( !u.cp.res ){
/* If there is data, jump to P2 */
- goto jump_to_p2_and_check_for_interrupt;
+ pc = pOp->p2 - 1;
}
goto check_for_interrupt;
}
@@ -79285,23 +72668,25 @@ case OP_VNext: { /* jump */
** in register P1 is passed as the zName argument to the xRename method.
*/
case OP_VRename: {
+#if 0 /* local variables moved into u.cq */
sqlite3_vtab *pVtab;
Mem *pName;
+#endif /* local variables moved into u.cq */
- pVtab = pOp->p4.pVtab->pVtab;
- pName = &aMem[pOp->p1];
- assert( pVtab->pModule->xRename );
- assert( memIsValid(pName) );
+ u.cq.pVtab = pOp->p4.pVtab->pVtab;
+ u.cq.pName = &aMem[pOp->p1];
+ assert( u.cq.pVtab->pModule->xRename );
+ assert( memIsValid(u.cq.pName) );
assert( p->readOnly==0 );
- REGISTER_TRACE(pOp->p1, pName);
- assert( pName->flags & MEM_Str );
- testcase( pName->enc==SQLITE_UTF8 );
- testcase( pName->enc==SQLITE_UTF16BE );
- testcase( pName->enc==SQLITE_UTF16LE );
- rc = sqlite3VdbeChangeEncoding(pName, SQLITE_UTF8);
+ REGISTER_TRACE(pOp->p1, u.cq.pName);
+ assert( u.cq.pName->flags & MEM_Str );
+ testcase( u.cq.pName->enc==SQLITE_UTF8 );
+ testcase( u.cq.pName->enc==SQLITE_UTF16BE );
+ testcase( u.cq.pName->enc==SQLITE_UTF16LE );
+ rc = sqlite3VdbeChangeEncoding(u.cq.pName, SQLITE_UTF8);
if( rc==SQLITE_OK ){
- rc = pVtab->pModule->xRename(pVtab, pName->z);
- sqlite3VtabImportErrmsg(p, pVtab);
+ rc = u.cq.pVtab->pModule->xRename(u.cq.pVtab, u.cq.pName->z);
+ sqlite3VtabImportErrmsg(p, u.cq.pVtab);
p->expired = 0;
}
break;
@@ -79309,7 +72694,7 @@ case OP_VRename: {
#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
-/* Opcode: VUpdate P1 P2 P3 P4 P5
+/* Opcode: VUpdate P1 P2 P3 P4 *
** Synopsis: data=r[P3@P2]
**
** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.
@@ -79332,48 +72717,44 @@ case OP_VRename: {
** P1 is a boolean flag. If it is set to true and the xUpdate call
** is successful, then the value returned by sqlite3_last_insert_rowid()
** is set to the value of the rowid for the row just inserted.
-**
-** P5 is the error actions (OE_Replace, OE_Fail, OE_Ignore, etc) to
-** apply in the case of a constraint failure on an insert or update.
*/
case OP_VUpdate: {
+#if 0 /* local variables moved into u.cr */
sqlite3_vtab *pVtab;
- const sqlite3_module *pModule;
+ sqlite3_module *pModule;
int nArg;
int i;
sqlite_int64 rowid;
Mem **apArg;
Mem *pX;
+#endif /* local variables moved into u.cr */
- assert( pOp->p2==1 || pOp->p5==OE_Fail || pOp->p5==OE_Rollback
+ assert( pOp->p2==1 || pOp->p5==OE_Fail || pOp->p5==OE_Rollback
|| pOp->p5==OE_Abort || pOp->p5==OE_Ignore || pOp->p5==OE_Replace
);
assert( p->readOnly==0 );
- pVtab = pOp->p4.pVtab->pVtab;
- if( pVtab==0 || NEVER(pVtab->pModule==0) ){
- rc = SQLITE_LOCKED;
- break;
- }
- pModule = pVtab->pModule;
- nArg = pOp->p2;
+ u.cr.pVtab = pOp->p4.pVtab->pVtab;
+ u.cr.pModule = (sqlite3_module *)u.cr.pVtab->pModule;
+ u.cr.nArg = pOp->p2;
assert( pOp->p4type==P4_VTAB );
- if( ALWAYS(pModule->xUpdate) ){
+ if( ALWAYS(u.cr.pModule->xUpdate) ){
u8 vtabOnConflict = db->vtabOnConflict;
- apArg = p->apArg;
- pX = &aMem[pOp->p3];
- for(i=0; i<nArg; i++){
- assert( memIsValid(pX) );
- memAboutToChange(p, pX);
- apArg[i] = pX;
- pX++;
+ u.cr.apArg = p->apArg;
+ u.cr.pX = &aMem[pOp->p3];
+ for(u.cr.i=0; u.cr.i<u.cr.nArg; u.cr.i++){
+ assert( memIsValid(u.cr.pX) );
+ memAboutToChange(p, u.cr.pX);
+ sqlite3VdbeMemStoreType(u.cr.pX);
+ u.cr.apArg[u.cr.i] = u.cr.pX;
+ u.cr.pX++;
}
db->vtabOnConflict = pOp->p5;
- rc = pModule->xUpdate(pVtab, nArg, apArg, &rowid);
+ rc = u.cr.pModule->xUpdate(u.cr.pVtab, u.cr.nArg, u.cr.apArg, &u.cr.rowid);
db->vtabOnConflict = vtabOnConflict;
- sqlite3VtabImportErrmsg(p, pVtab);
+ sqlite3VtabImportErrmsg(p, u.cr.pVtab);
if( rc==SQLITE_OK && pOp->p1 ){
- assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) );
- db->lastRowid = lastRowid = rowid;
+ assert( u.cr.nArg>1 && u.cr.apArg[0] && (u.cr.apArg[0]->flags&MEM_Null) );
+ db->lastRowid = lastRowid = u.cr.rowid;
}
if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){
if( pOp->p5==OE_Ignore ){
@@ -79394,8 +72775,7 @@ case OP_VUpdate: {
**
** Write the current number of pages in database P1 to memory cell P2.
*/
-case OP_Pagecount: { /* out2 */
- pOut = out2Prerelease(p, pOp);
+case OP_Pagecount: { /* out2-prerelease */
pOut->u.i = sqlite3BtreeLastPage(db->aDb[pOp->p1].pBt);
break;
}
@@ -79411,11 +72791,10 @@ case OP_Pagecount: { /* out2 */
**
** Store the maximum page count after the change in register P2.
*/
-case OP_MaxPgcnt: { /* out2 */
+case OP_MaxPgcnt: { /* out2-prerelease */
unsigned int newMax;
Btree *pBt;
- pOut = out2Prerelease(p, pOp);
pBt = db->aDb[pOp->p1].pBt;
newMax = 0;
if( pOp->p3 ){
@@ -79428,52 +72807,46 @@ case OP_MaxPgcnt: { /* out2 */
#endif
-/* Opcode: Init * P2 * P4 *
-** Synopsis: Start at P2
-**
-** Programs contain a single instance of this opcode as the very first
-** opcode.
+#ifndef SQLITE_OMIT_TRACE
+/* Opcode: Trace * * * P4 *
**
** If tracing is enabled (by the sqlite3_trace()) interface, then
** the UTF-8 string contained in P4 is emitted on the trace callback.
-** Or if P4 is blank, use the string returned by sqlite3_sql().
-**
-** If P2 is not zero, jump to instruction P2.
*/
-case OP_Init: { /* jump */
+case OP_Trace: {
+#if 0 /* local variables moved into u.cs */
char *zTrace;
char *z;
+#endif /* local variables moved into u.cs */
-#ifndef SQLITE_OMIT_TRACE
if( db->xTrace
&& !p->doingRerun
- && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
+ && (u.cs.zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
){
- z = sqlite3VdbeExpandSql(p, zTrace);
- db->xTrace(db->pTraceArg, z);
- sqlite3DbFree(db, z);
+ u.cs.z = sqlite3VdbeExpandSql(p, u.cs.zTrace);
+ db->xTrace(db->pTraceArg, u.cs.z);
+ sqlite3DbFree(db, u.cs.z);
}
#ifdef SQLITE_USE_FCNTL_TRACE
- zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql);
- if( zTrace ){
+ u.cs.zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql);
+ if( u.cs.zTrace ){
int i;
for(i=0; i<db->nDb; i++){
- if( DbMaskTest(p->btreeMask, i)==0 ) continue;
- sqlite3_file_control(db, db->aDb[i].zName, SQLITE_FCNTL_TRACE, zTrace);
+ if( ((1<<i) & p->btreeMask)==0 ) continue;
+ sqlite3_file_control(db, db->aDb[i].zName, SQLITE_FCNTL_TRACE, u.cs.zTrace);
}
}
#endif /* SQLITE_USE_FCNTL_TRACE */
#ifdef SQLITE_DEBUG
if( (db->flags & SQLITE_SqlTrace)!=0
- && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
+ && (u.cs.zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
){
- sqlite3DebugPrintf("SQL-trace: %s\n", zTrace);
+ sqlite3DebugPrintf("SQL-trace: %s\n", u.cs.zTrace);
}
#endif /* SQLITE_DEBUG */
-#endif /* SQLITE_OMIT_TRACE */
- if( pOp->p2 ) goto jump_to_p2;
break;
}
+#endif
/* Opcode: Noop * * * * *
@@ -79502,9 +72875,13 @@ default: { /* This is really OP_Noop and OP_Explain */
#ifdef VDBE_PROFILE
{
- u64 endTime = sqlite3Hwtime();
- if( endTime>start ) pOrigOp->cycles += endTime - start;
- pOrigOp->cnt++;
+ u64 elapsed = sqlite3Hwtime() - start;
+ pOp->cycles += elapsed;
+ pOp->cnt++;
+#if 0
+ fprintf(stdout, "%10llu ", elapsed);
+ sqlite3VdbePrintOp(stdout, origPc, &aOp[origPc]);
+#endif
}
#endif
@@ -79514,16 +72891,16 @@ default: { /* This is really OP_Noop and OP_Explain */
** the evaluator loop. So we can leave it out when NDEBUG is defined.
*/
#ifndef NDEBUG
- assert( pOp>=&aOp[-1] && pOp<&aOp[p->nOp-1] );
+ assert( pc>=-1 && pc<p->nOp );
#ifdef SQLITE_DEBUG
if( db->flags & SQLITE_VdbeTrace ){
if( rc!=0 ) printf("rc=%d\n",rc);
- if( pOrigOp->opflags & (OPFLG_OUT2) ){
- registerTrace(pOrigOp->p2, &aMem[pOrigOp->p2]);
+ if( pOp->opflags & (OPFLG_OUT2_PRERELEASE|OPFLG_OUT2) ){
+ registerTrace(pOp->p2, &aMem[pOp->p2]);
}
- if( pOrigOp->opflags & OPFLG_OUT3 ){
- registerTrace(pOrigOp->p3, &aMem[pOrigOp->p3]);
+ if( pOp->opflags & OPFLG_OUT3 ){
+ registerTrace(pOp->p3, &aMem[pOp->p3]);
}
}
#endif /* SQLITE_DEBUG */
@@ -79538,7 +72915,7 @@ vdbe_error_halt:
p->rc = rc;
testcase( sqlite3GlobalConfig.xLog!=0 );
sqlite3_log(rc, "statement aborts at %d: [%s] %s",
- (int)(pOp - aOp), p->zSql, p->zErrMsg);
+ pc, p->zSql, p->zErrMsg);
sqlite3VdbeHalt(p);
if( rc==SQLITE_IOERR_NOMEM ) db->mallocFailed = 1;
rc = SQLITE_ERROR;
@@ -79560,7 +72937,7 @@ vdbe_return:
** is encountered.
*/
too_big:
- sqlite3VdbeError(p, "string or blob too big");
+ sqlite3SetString(&p->zErrMsg, db, "string or blob too big");
rc = SQLITE_TOOBIG;
goto vdbe_error_halt;
@@ -79568,7 +72945,7 @@ too_big:
*/
no_mem:
db->mallocFailed = 1;
- sqlite3VdbeError(p, "out of memory");
+ sqlite3SetString(&p->zErrMsg, db, "out of memory");
rc = SQLITE_NOMEM;
goto vdbe_error_halt;
@@ -79579,7 +72956,7 @@ abort_due_to_error:
assert( p->zErrMsg==0 );
if( db->mallocFailed ) rc = SQLITE_NOMEM;
if( rc!=SQLITE_IOERR_NOMEM ){
- sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc));
+ sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(rc));
}
goto vdbe_error_halt;
@@ -79590,11 +72967,10 @@ abort_due_to_interrupt:
assert( db->u1.isInterrupted );
rc = SQLITE_INTERRUPT;
p->rc = rc;
- sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc));
+ sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(rc));
goto vdbe_error_halt;
}
-
/************** End of vdbe.c ************************************************/
/************** Begin file vdbeblob.c ****************************************/
/*
@@ -79612,8 +72988,6 @@ abort_due_to_interrupt:
** This file contains code used to implement incremental BLOB I/O.
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
#ifndef SQLITE_OMIT_INCRBLOB
@@ -79676,7 +73050,9 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
p->iOffset = pC->aType[p->iCol + pC->nField];
p->nByte = sqlite3VdbeSerialTypeLen(type);
p->pCsr = pC->pCursor;
- sqlite3BtreeIncrblobCursor(p->pCsr);
+ sqlite3BtreeEnterCursor(p->pCsr);
+ sqlite3BtreeCacheOverflow(p->pCsr);
+ sqlite3BtreeLeaveCursor(p->pCsr);
}
}
@@ -79703,7 +73079,7 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
/*
** Open a blob handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
+SQLITE_API int sqlite3_blob_open(
sqlite3* db, /* The database connection */
const char *zDb, /* The attached database containing the blob */
const char *zTable, /* The table containing the blob */
@@ -79730,20 +73106,22 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
** which closes the b-tree cursor and (possibly) commits the
** transaction.
*/
- static const int iLn = VDBE_OFFSET_LINENO(4);
static const VdbeOpList openBlob[] = {
- /* {OP_Transaction, 0, 0, 0}, // 0: Inserted separately */
- {OP_TableLock, 0, 0, 0}, /* 1: Acquire a read or write lock */
+ {OP_Transaction, 0, 0, 0}, /* 0: Start a transaction */
+ {OP_VerifyCookie, 0, 0, 0}, /* 1: Check the schema cookie */
+ {OP_TableLock, 0, 0, 0}, /* 2: Acquire a read or write lock */
+
/* One of the following two instructions is replaced by an OP_Noop. */
- {OP_OpenRead, 0, 0, 0}, /* 2: Open cursor 0 for reading */
- {OP_OpenWrite, 0, 0, 0}, /* 3: Open cursor 0 for read/write */
- {OP_Variable, 1, 1, 1}, /* 4: Push the rowid to the stack */
- {OP_NotExists, 0, 10, 1}, /* 5: Seek the cursor */
- {OP_Column, 0, 0, 1}, /* 6 */
- {OP_ResultRow, 1, 0, 0}, /* 7 */
- {OP_Goto, 0, 4, 0}, /* 8 */
- {OP_Close, 0, 0, 0}, /* 9 */
- {OP_Halt, 0, 0, 0}, /* 10 */
+ {OP_OpenRead, 0, 0, 0}, /* 3: Open cursor 0 for reading */
+ {OP_OpenWrite, 0, 0, 0}, /* 4: Open cursor 0 for read/write */
+
+ {OP_Variable, 1, 1, 1}, /* 5: Push the rowid to the stack */
+ {OP_NotExists, 0, 10, 1}, /* 6: Seek the cursor */
+ {OP_Column, 0, 0, 1}, /* 7 */
+ {OP_ResultRow, 1, 0, 0}, /* 8 */
+ {OP_Goto, 0, 5, 0}, /* 9 */
+ {OP_Close, 0, 0, 0}, /* 10 */
+ {OP_Halt, 0, 0, 0}, /* 11 */
};
int rc = SQLITE_OK;
@@ -79752,18 +73130,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
Parse *pParse = 0;
Incrblob *pBlob = 0;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( ppBlob==0 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
- *ppBlob = 0;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zTable==0 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
flags = !!flags; /* flags = (flags ? 1 : 0); */
+ *ppBlob = 0;
sqlite3_mutex_enter(db->mutex);
@@ -79846,8 +73214,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
int j;
for(j=0; j<pIdx->nKeyCol; j++){
- /* FIXME: Be smarter about indexes that use expressions */
- if( pIdx->aiColumn[j]==iCol || pIdx->aiColumn[j]==XN_EXPR ){
+ if( pIdx->aiColumn[j]==iCol ){
zFault = "indexed";
}
}
@@ -79861,37 +73228,42 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
}
}
- pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(pParse);
+ pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(db);
assert( pBlob->pStmt || db->mallocFailed );
if( pBlob->pStmt ){
Vdbe *v = (Vdbe *)pBlob->pStmt;
int iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
+ sqlite3VdbeAddOpList(v, sizeof(openBlob)/sizeof(VdbeOpList), openBlob);
+
- sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, flags,
- pTab->pSchema->schema_cookie,
- pTab->pSchema->iGeneration);
- sqlite3VdbeChangeP5(v, 1);
- sqlite3VdbeAddOpList(v, ArraySize(openBlob), openBlob, iLn);
+ /* Configure the OP_Transaction */
+ sqlite3VdbeChangeP1(v, 0, iDb);
+ sqlite3VdbeChangeP2(v, 0, flags);
+
+ /* Configure the OP_VerifyCookie */
+ sqlite3VdbeChangeP1(v, 1, iDb);
+ sqlite3VdbeChangeP2(v, 1, pTab->pSchema->schema_cookie);
+ sqlite3VdbeChangeP3(v, 1, pTab->pSchema->iGeneration);
/* Make sure a mutex is held on the table to be accessed */
sqlite3VdbeUsesBtree(v, iDb);
/* Configure the OP_TableLock instruction */
#ifdef SQLITE_OMIT_SHARED_CACHE
- sqlite3VdbeChangeToNoop(v, 1);
+ sqlite3VdbeChangeToNoop(v, 2);
#else
- sqlite3VdbeChangeP1(v, 1, iDb);
- sqlite3VdbeChangeP2(v, 1, pTab->tnum);
- sqlite3VdbeChangeP3(v, 1, flags);
- sqlite3VdbeChangeP4(v, 1, pTab->zName, P4_TRANSIENT);
+ sqlite3VdbeChangeP1(v, 2, iDb);
+ sqlite3VdbeChangeP2(v, 2, pTab->tnum);
+ sqlite3VdbeChangeP3(v, 2, flags);
+ sqlite3VdbeChangeP4(v, 2, pTab->zName, P4_TRANSIENT);
#endif
/* Remove either the OP_OpenWrite or OpenRead. Set the P2
** parameter of the other to pTab->tnum. */
- sqlite3VdbeChangeToNoop(v, 3 - flags);
- sqlite3VdbeChangeP2(v, 2 + flags, pTab->tnum);
- sqlite3VdbeChangeP3(v, 2 + flags, iDb);
+ sqlite3VdbeChangeToNoop(v, 4 - flags);
+ sqlite3VdbeChangeP2(v, 3 + flags, pTab->tnum);
+ sqlite3VdbeChangeP3(v, 3 + flags, iDb);
/* Configure the number of columns. Configure the cursor to
** think that the table has one more column than it really
@@ -79900,8 +73272,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
** we can invoke OP_Column to fill in the vdbe cursors type
** and offset cache without causing any IO.
*/
- sqlite3VdbeChangeP4(v, 2+flags, SQLITE_INT_TO_PTR(pTab->nCol+1),P4_INT32);
- sqlite3VdbeChangeP2(v, 6, pTab->nCol);
+ sqlite3VdbeChangeP4(v, 3+flags, SQLITE_INT_TO_PTR(pTab->nCol+1),P4_INT32);
+ sqlite3VdbeChangeP2(v, 7, pTab->nCol);
if( !db->mallocFailed ){
pParse->nVar = 1;
pParse->nMem = 1;
@@ -79928,7 +73300,7 @@ blob_open_out:
if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt);
sqlite3DbFree(db, pBlob);
}
- sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr);
+ sqlite3Error(db, rc, (zErr ? "%s" : 0), zErr);
sqlite3DbFree(db, zErr);
sqlite3ParserReset(pParse);
sqlite3StackFree(db, pParse);
@@ -79941,7 +73313,7 @@ blob_open_out:
** Close a blob handle that was previously created using
** sqlite3_blob_open().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *pBlob){
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *pBlob){
Incrblob *p = (Incrblob *)pBlob;
int rc;
sqlite3 *db;
@@ -79978,9 +73350,10 @@ static int blobReadWrite(
sqlite3_mutex_enter(db->mutex);
v = (Vdbe*)p->pStmt;
- if( n<0 || iOffset<0 || ((sqlite3_int64)iOffset+n)>p->nByte ){
+ if( n<0 || iOffset<0 || (iOffset+n)>p->nByte ){
/* Request is out of range. Return a transient error. */
rc = SQLITE_ERROR;
+ sqlite3Error(db, SQLITE_ERROR, 0);
}else if( v==0 ){
/* If there is no statement handle, then the blob-handle has
** already been invalidated. Return SQLITE_ABORT in this case.
@@ -79998,10 +73371,10 @@ static int blobReadWrite(
sqlite3VdbeFinalize(v);
p->pStmt = 0;
}else{
+ db->errCode = rc;
v->rc = rc;
}
}
- sqlite3Error(db, rc);
rc = sqlite3ApiExit(db, rc);
sqlite3_mutex_leave(db->mutex);
return rc;
@@ -80010,14 +73383,14 @@ static int blobReadWrite(
/*
** Read data from a blob handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){
return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreeData);
}
/*
** Write data to a blob handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){
return blobReadWrite(pBlob, (void *)z, n, iOffset, sqlite3BtreePutData);
}
@@ -80027,7 +73400,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *pBlob, const void
** The Incrblob.nByte field is fixed for the lifetime of the Incrblob
** so no mutex is required for access.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *pBlob){
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *pBlob){
Incrblob *p = (Incrblob *)pBlob;
return (p && p->pStmt) ? p->nByte : 0;
}
@@ -80042,7 +73415,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *pBlob){
** subsequent calls to sqlite3_blob_xxx() functions (except blob_close())
** immediately return SQLITE_ABORT.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
int rc;
Incrblob *p = (Incrblob *)pBlob;
sqlite3 *db;
@@ -80060,7 +73433,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_i
char *zErr;
rc = blobSeekToRow(p, iRow, &zErr);
if( rc!=SQLITE_OK ){
- sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr);
+ sqlite3Error(db, rc, (zErr ? "%s" : 0), zErr);
sqlite3DbFree(db, zErr);
}
assert( rc!=SQLITE_SCHEMA );
@@ -80077,7 +73450,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_i
/************** End of vdbeblob.c ********************************************/
/************** Begin file vdbesort.c ****************************************/
/*
-** 2011-07-09
+** 2011 July 9
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
@@ -80088,205 +73461,42 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_i
**
*************************************************************************
** This file contains code for the VdbeSorter object, used in concert with
-** a VdbeCursor to sort large numbers of keys for CREATE INDEX statements
-** or by SELECT statements with ORDER BY clauses that cannot be satisfied
-** using indexes and without LIMIT clauses.
-**
-** The VdbeSorter object implements a multi-threaded external merge sort
-** algorithm that is efficient even if the number of elements being sorted
-** exceeds the available memory.
-**
-** Here is the (internal, non-API) interface between this module and the
-** rest of the SQLite system:
-**
-** sqlite3VdbeSorterInit() Create a new VdbeSorter object.
-**
-** sqlite3VdbeSorterWrite() Add a single new row to the VdbeSorter
-** object. The row is a binary blob in the
-** OP_MakeRecord format that contains both
-** the ORDER BY key columns and result columns
-** in the case of a SELECT w/ ORDER BY, or
-** the complete record for an index entry
-** in the case of a CREATE INDEX.
-**
-** sqlite3VdbeSorterRewind() Sort all content previously added.
-** Position the read cursor on the
-** first sorted element.
-**
-** sqlite3VdbeSorterNext() Advance the read cursor to the next sorted
-** element.
-**
-** sqlite3VdbeSorterRowkey() Return the complete binary blob for the
-** row currently under the read cursor.
-**
-** sqlite3VdbeSorterCompare() Compare the binary blob for the row
-** currently under the read cursor against
-** another binary blob X and report if
-** X is strictly less than the read cursor.
-** Used to enforce uniqueness in a
-** CREATE UNIQUE INDEX statement.
-**
-** sqlite3VdbeSorterClose() Close the VdbeSorter object and reclaim
-** all resources.
-**
-** sqlite3VdbeSorterReset() Refurbish the VdbeSorter for reuse. This
-** is like Close() followed by Init() only
-** much faster.
-**
-** The interfaces above must be called in a particular order. Write() can
-** only occur in between Init()/Reset() and Rewind(). Next(), Rowkey(), and
-** Compare() can only occur in between Rewind() and Close()/Reset(). i.e.
-**
-** Init()
-** for each record: Write()
-** Rewind()
-** Rowkey()/Compare()
-** Next()
-** Close()
-**
-** Algorithm:
-**
-** Records passed to the sorter via calls to Write() are initially held
-** unsorted in main memory. Assuming the amount of memory used never exceeds
-** a threshold, when Rewind() is called the set of records is sorted using
-** an in-memory merge sort. In this case, no temporary files are required
-** and subsequent calls to Rowkey(), Next() and Compare() read records
-** directly from main memory.
-**
-** If the amount of space used to store records in main memory exceeds the
-** threshold, then the set of records currently in memory are sorted and
-** written to a temporary file in "Packed Memory Array" (PMA) format.
-** A PMA created at this point is known as a "level-0 PMA". Higher levels
-** of PMAs may be created by merging existing PMAs together - for example
-** merging two or more level-0 PMAs together creates a level-1 PMA.
-**
-** The threshold for the amount of main memory to use before flushing
-** records to a PMA is roughly the same as the limit configured for the
-** page-cache of the main database. Specifically, the threshold is set to
-** the value returned by "PRAGMA main.page_size" multipled by
-** that returned by "PRAGMA main.cache_size", in bytes.
-**
-** If the sorter is running in single-threaded mode, then all PMAs generated
-** are appended to a single temporary file. Or, if the sorter is running in
-** multi-threaded mode then up to (N+1) temporary files may be opened, where
-** N is the configured number of worker threads. In this case, instead of
-** sorting the records and writing the PMA to a temporary file itself, the
-** calling thread usually launches a worker thread to do so. Except, if
-** there are already N worker threads running, the main thread does the work
-** itself.
-**
-** The sorter is running in multi-threaded mode if (a) the library was built
-** with pre-processor symbol SQLITE_MAX_WORKER_THREADS set to a value greater
-** than zero, and (b) worker threads have been enabled at runtime by calling
-** "PRAGMA threads=N" with some value of N greater than 0.
-**
-** When Rewind() is called, any data remaining in memory is flushed to a
-** final PMA. So at this point the data is stored in some number of sorted
-** PMAs within temporary files on disk.
-**
-** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the
-** sorter is running in single-threaded mode, then these PMAs are merged
-** incrementally as keys are retreived from the sorter by the VDBE. The
-** MergeEngine object, described in further detail below, performs this
-** merge.
-**
-** Or, if running in multi-threaded mode, then a background thread is
-** launched to merge the existing PMAs. Once the background thread has
-** merged T bytes of data into a single sorted PMA, the main thread
-** begins reading keys from that PMA while the background thread proceeds
-** with merging the next T bytes of data. And so on.
-**
-** Parameter T is set to half the value of the memory threshold used
-** by Write() above to determine when to create a new PMA.
-**
-** If there are more than SORTER_MAX_MERGE_COUNT PMAs in total when
-** Rewind() is called, then a hierarchy of incremental-merges is used.
-** First, T bytes of data from the first SORTER_MAX_MERGE_COUNT PMAs on
-** disk are merged together. Then T bytes of data from the second set, and
-** so on, such that no operation ever merges more than SORTER_MAX_MERGE_COUNT
-** PMAs at a time. This done is to improve locality.
-**
-** If running in multi-threaded mode and there are more than
-** SORTER_MAX_MERGE_COUNT PMAs on disk when Rewind() is called, then more
-** than one background thread may be created. Specifically, there may be
-** one background thread for each temporary file on disk, and one background
-** thread to merge the output of each of the others to a single PMA for
-** the main thread to read from.
-*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
-
-/*
-** If SQLITE_DEBUG_SORTER_THREADS is defined, this module outputs various
-** messages to stderr that may be helpful in understanding the performance
-** characteristics of the sorter in multi-threaded mode.
+** a VdbeCursor to sort large numbers of keys (as may be required, for
+** example, by CREATE INDEX statements on tables too large to fit in main
+** memory).
*/
-#if 0
-# define SQLITE_DEBUG_SORTER_THREADS 1
-#endif
-/*
-** Hard-coded maximum amount of data to accumulate in memory before flushing
-** to a level 0 PMA. The purpose of this limit is to prevent various integer
-** overflows. 512MiB.
-*/
-#define SQLITE_MAX_PMASZ (1<<29)
-/*
-** Private objects used by the sorter
-*/
-typedef struct MergeEngine MergeEngine; /* Merge PMAs together */
-typedef struct PmaReader PmaReader; /* Incrementally read one PMA */
-typedef struct PmaWriter PmaWriter; /* Incrementally write one PMA */
-typedef struct SorterRecord SorterRecord; /* A record being sorted */
-typedef struct SortSubtask SortSubtask; /* A sub-task in the sort process */
-typedef struct SorterFile SorterFile; /* Temporary file object wrapper */
-typedef struct SorterList SorterList; /* In-memory list of records */
-typedef struct IncrMerger IncrMerger; /* Read & merge multiple PMAs */
-/*
-** A container for a temp file handle and the current amount of data
-** stored in the file.
-*/
-struct SorterFile {
- sqlite3_file *pFd; /* File handle */
- i64 iEof; /* Bytes of data stored in pFd */
-};
+typedef struct VdbeSorterIter VdbeSorterIter;
+typedef struct SorterRecord SorterRecord;
+typedef struct FileWriter FileWriter;
/*
-** An in-memory list of objects to be sorted.
+** NOTES ON DATA STRUCTURE USED FOR N-WAY MERGES:
**
-** If aMemory==0 then each object is allocated separately and the objects
-** are connected using SorterRecord.u.pNext. If aMemory!=0 then all objects
-** are stored in the aMemory[] bulk memory, one right after the other, and
-** are connected using SorterRecord.u.iNext.
-*/
-struct SorterList {
- SorterRecord *pList; /* Linked list of records */
- u8 *aMemory; /* If non-NULL, bulk memory to hold pList */
- int szPMA; /* Size of pList as PMA in bytes */
-};
-
-/*
-** The MergeEngine object is used to combine two or more smaller PMAs into
-** one big PMA using a merge operation. Separate PMAs all need to be
-** combined into one big PMA in order to be able to step through the sorted
-** records in order.
+** As keys are added to the sorter, they are written to disk in a series
+** of sorted packed-memory-arrays (PMAs). The size of each PMA is roughly
+** the same as the cache-size allowed for temporary databases. In order
+** to allow the caller to extract keys from the sorter in sorted order,
+** all PMAs currently stored on disk must be merged together. This comment
+** describes the data structure used to do so. The structure supports
+** merging any number of arrays in a single pass with no redundant comparison
+** operations.
**
-** The aReadr[] array contains a PmaReader object for each of the PMAs being
-** merged. An aReadr[] object either points to a valid key or else is at EOF.
-** ("EOF" means "End Of File". When aReadr[] is at EOF there is no more data.)
-** For the purposes of the paragraphs below, we assume that the array is
-** actually N elements in size, where N is the smallest power of 2 greater
-** to or equal to the number of PMAs being merged. The extra aReadr[] elements
-** are treated as if they are empty (always at EOF).
+** The aIter[] array contains an iterator for each of the PMAs being merged.
+** An aIter[] iterator either points to a valid key or else is at EOF. For
+** the purposes of the paragraphs below, we assume that the array is actually
+** N elements in size, where N is the smallest power of 2 greater to or equal
+** to the number of iterators being merged. The extra aIter[] elements are
+** treated as if they are empty (always at EOF).
**
** The aTree[] array is also N elements in size. The value of N is stored in
-** the MergeEngine.nTree variable.
+** the VdbeSorter.nTree variable.
**
** The final (N/2) elements of aTree[] contain the results of comparing
-** pairs of PMA keys together. Element i contains the result of
-** comparing aReadr[2*i-N] and aReadr[2*i-N+1]. Whichever key is smaller, the
+** pairs of iterator keys together. Element i contains the result of
+** comparing aIter[2*i-N] and aIter[2*i-N+1]. Whichever key is smaller, the
** aTree element is set to the index of it.
**
** For the purposes of this comparison, EOF is considered greater than any
@@ -80294,34 +73504,34 @@ struct SorterList {
** values), it doesn't matter which index is stored.
**
** The (N/4) elements of aTree[] that precede the final (N/2) described
-** above contains the index of the smallest of each block of 4 PmaReaders
-** And so on. So that aTree[1] contains the index of the PmaReader that
+** above contains the index of the smallest of each block of 4 iterators.
+** And so on. So that aTree[1] contains the index of the iterator that
** currently points to the smallest key value. aTree[0] is unused.
**
** Example:
**
-** aReadr[0] -> Banana
-** aReadr[1] -> Feijoa
-** aReadr[2] -> Elderberry
-** aReadr[3] -> Currant
-** aReadr[4] -> Grapefruit
-** aReadr[5] -> Apple
-** aReadr[6] -> Durian
-** aReadr[7] -> EOF
+** aIter[0] -> Banana
+** aIter[1] -> Feijoa
+** aIter[2] -> Elderberry
+** aIter[3] -> Currant
+** aIter[4] -> Grapefruit
+** aIter[5] -> Apple
+** aIter[6] -> Durian
+** aIter[7] -> EOF
**
** aTree[] = { X, 5 0, 5 0, 3, 5, 6 }
**
** The current element is "Apple" (the value of the key indicated by
-** PmaReader 5). When the Next() operation is invoked, PmaReader 5 will
+** iterator 5). When the Next() operation is invoked, iterator 5 will
** be advanced to the next key in its segment. Say the next key is
** "Eggplant":
**
-** aReadr[5] -> Eggplant
+** aIter[5] -> Eggplant
**
-** The contents of aTree[] are updated first by comparing the new PmaReader
-** 5 key to the current key of PmaReader 4 (still "Grapefruit"). The PmaReader
+** The contents of aTree[] are updated first by comparing the new iterator
+** 5 key to the current key of iterator 4 (still "Grapefruit"). The iterator
** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree.
-** The value of PmaReader 6 - "Durian" - is now smaller than that of PmaReader
+** The value of iterator 6 - "Durian" - is now smaller than that of iterator
** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Banana<Durian),
** so the value written into element 1 of the array is 0. As follows:
**
@@ -80331,250 +73541,97 @@ struct SorterList {
** key comparison operations are required, where N is the number of segments
** being merged (rounded up to the next power of 2).
*/
-struct MergeEngine {
- int nTree; /* Used size of aTree/aReadr (power of 2) */
- SortSubtask *pTask; /* Used by this thread only */
- int *aTree; /* Current state of incremental merge */
- PmaReader *aReadr; /* Array of PmaReaders to merge data from */
-};
-
-/*
-** This object represents a single thread of control in a sort operation.
-** Exactly VdbeSorter.nTask instances of this object are allocated
-** as part of each VdbeSorter object. Instances are never allocated any
-** other way. VdbeSorter.nTask is set to the number of worker threads allowed
-** (see SQLITE_CONFIG_WORKER_THREADS) plus one (the main thread). Thus for
-** single-threaded operation, there is exactly one instance of this object
-** and for multi-threaded operation there are two or more instances.
-**
-** Essentially, this structure contains all those fields of the VdbeSorter
-** structure for which each thread requires a separate instance. For example,
-** each thread requries its own UnpackedRecord object to unpack records in
-** as part of comparison operations.
-**
-** Before a background thread is launched, variable bDone is set to 0. Then,
-** right before it exits, the thread itself sets bDone to 1. This is used for
-** two purposes:
-**
-** 1. When flushing the contents of memory to a level-0 PMA on disk, to
-** attempt to select a SortSubtask for which there is not already an
-** active background thread (since doing so causes the main thread
-** to block until it finishes).
-**
-** 2. If SQLITE_DEBUG_SORTER_THREADS is defined, to determine if a call
-** to sqlite3ThreadJoin() is likely to block. Cases that are likely to
-** block provoke debugging output.
-**
-** In both cases, the effects of the main thread seeing (bDone==0) even
-** after the thread has finished are not dire. So we don't worry about
-** memory barriers and such here.
-*/
-typedef int (*SorterCompare)(SortSubtask*,int*,const void*,int,const void*,int);
-struct SortSubtask {
- SQLiteThread *pThread; /* Background thread, if any */
- int bDone; /* Set if thread is finished but not joined */
- VdbeSorter *pSorter; /* Sorter that owns this sub-task */
- UnpackedRecord *pUnpacked; /* Space to unpack a record */
- SorterList list; /* List for thread to write to a PMA */
- int nPMA; /* Number of PMAs currently in file */
- SorterCompare xCompare; /* Compare function to use */
- SorterFile file; /* Temp file for level-0 PMAs */
- SorterFile file2; /* Space for other PMAs */
-};
-
-
-/*
-** Main sorter structure. A single instance of this is allocated for each
-** sorter cursor created by the VDBE.
-**
-** mxKeysize:
-** As records are added to the sorter by calls to sqlite3VdbeSorterWrite(),
-** this variable is updated so as to be set to the size on disk of the
-** largest record in the sorter.
-*/
struct VdbeSorter {
+ i64 iWriteOff; /* Current write offset within file pTemp1 */
+ i64 iReadOff; /* Current read offset within file pTemp1 */
+ int nInMemory; /* Current size of pRecord list as PMA */
+ int nTree; /* Used size of aTree/aIter (power of 2) */
+ int nPMA; /* Number of PMAs stored in pTemp1 */
int mnPmaSize; /* Minimum PMA size, in bytes */
int mxPmaSize; /* Maximum PMA size, in bytes. 0==no limit */
- int mxKeysize; /* Largest serialized key seen so far */
- int pgsz; /* Main database page size */
- PmaReader *pReader; /* Readr data from here after Rewind() */
- MergeEngine *pMerger; /* Or here, if bUseThreads==0 */
- sqlite3 *db; /* Database connection */
- KeyInfo *pKeyInfo; /* How to compare records */
- UnpackedRecord *pUnpacked; /* Used by VdbeSorterCompare() */
- SorterList list; /* List of in-memory records */
- int iMemory; /* Offset of free space in list.aMemory */
- int nMemory; /* Size of list.aMemory allocation in bytes */
- u8 bUsePMA; /* True if one or more PMAs created */
- u8 bUseThreads; /* True to use background threads */
- u8 iPrev; /* Previous thread used to flush PMA */
- u8 nTask; /* Size of aTask[] array */
- u8 typeMask;
- SortSubtask aTask[1]; /* One or more subtasks */
-};
-
-#define SORTER_TYPE_INTEGER 0x01
-#define SORTER_TYPE_TEXT 0x02
-
-/*
-** An instance of the following object is used to read records out of a
-** PMA, in sorted order. The next key to be read is cached in nKey/aKey.
-** aKey might point into aMap or into aBuffer. If neither of those locations
-** contain a contiguous representation of the key, then aAlloc is allocated
-** and the key is copied into aAlloc and aKey is made to poitn to aAlloc.
-**
-** pFd==0 at EOF.
-*/
-struct PmaReader {
- i64 iReadOff; /* Current read offset */
- i64 iEof; /* 1 byte past EOF for this PmaReader */
- int nAlloc; /* Bytes of space at aAlloc */
- int nKey; /* Number of bytes in key */
- sqlite3_file *pFd; /* File handle we are reading from */
- u8 *aAlloc; /* Space for aKey if aBuffer and pMap wont work */
- u8 *aKey; /* Pointer to current key */
- u8 *aBuffer; /* Current read buffer */
- int nBuffer; /* Size of read buffer in bytes */
- u8 *aMap; /* Pointer to mapping of entire file */
- IncrMerger *pIncr; /* Incremental merger */
+ VdbeSorterIter *aIter; /* Array of iterators to merge */
+ int *aTree; /* Current state of incremental merge */
+ sqlite3_file *pTemp1; /* PMA file 1 */
+ SorterRecord *pRecord; /* Head of in-memory record list */
+ UnpackedRecord *pUnpacked; /* Used to unpack keys */
};
/*
-** Normally, a PmaReader object iterates through an existing PMA stored
-** within a temp file. However, if the PmaReader.pIncr variable points to
-** an object of the following type, it may be used to iterate/merge through
-** multiple PMAs simultaneously.
-**
-** There are two types of IncrMerger object - single (bUseThread==0) and
-** multi-threaded (bUseThread==1).
-**
-** A multi-threaded IncrMerger object uses two temporary files - aFile[0]
-** and aFile[1]. Neither file is allowed to grow to more than mxSz bytes in
-** size. When the IncrMerger is initialized, it reads enough data from
-** pMerger to populate aFile[0]. It then sets variables within the
-** corresponding PmaReader object to read from that file and kicks off
-** a background thread to populate aFile[1] with the next mxSz bytes of
-** sorted record data from pMerger.
-**
-** When the PmaReader reaches the end of aFile[0], it blocks until the
-** background thread has finished populating aFile[1]. It then exchanges
-** the contents of the aFile[0] and aFile[1] variables within this structure,
-** sets the PmaReader fields to read from the new aFile[0] and kicks off
-** another background thread to populate the new aFile[1]. And so on, until
-** the contents of pMerger are exhausted.
-**
-** A single-threaded IncrMerger does not open any temporary files of its
-** own. Instead, it has exclusive access to mxSz bytes of space beginning
-** at offset iStartOff of file pTask->file2. And instead of using a
-** background thread to prepare data for the PmaReader, with a single
-** threaded IncrMerger the allocate part of pTask->file2 is "refilled" with
-** keys from pMerger by the calling thread whenever the PmaReader runs out
-** of data.
-*/
-struct IncrMerger {
- SortSubtask *pTask; /* Task that owns this merger */
- MergeEngine *pMerger; /* Merge engine thread reads data from */
- i64 iStartOff; /* Offset to start writing file at */
- int mxSz; /* Maximum bytes of data to store */
- int bEof; /* Set to true when merge is finished */
- int bUseThread; /* True to use a bg thread for this object */
- SorterFile aFile[2]; /* aFile[0] for reading, [1] for writing */
+** The following type is an iterator for a PMA. It caches the current key in
+** variables nKey/aKey. If the iterator is at EOF, pFile==0.
+*/
+struct VdbeSorterIter {
+ i64 iReadOff; /* Current read offset */
+ i64 iEof; /* 1 byte past EOF for this iterator */
+ int nAlloc; /* Bytes of space at aAlloc */
+ int nKey; /* Number of bytes in key */
+ sqlite3_file *pFile; /* File iterator is reading from */
+ u8 *aAlloc; /* Allocated space */
+ u8 *aKey; /* Pointer to current key */
+ u8 *aBuffer; /* Current read buffer */
+ int nBuffer; /* Size of read buffer in bytes */
};
/*
-** An instance of this object is used for writing a PMA.
-**
-** The PMA is written one record at a time. Each record is of an arbitrary
-** size. But I/O is more efficient if it occurs in page-sized blocks where
-** each block is aligned on a page boundary. This object caches writes to
-** the PMA so that aligned, page-size blocks are written.
+** An instance of this structure is used to organize the stream of records
+** being written to files by the merge-sort code into aligned, page-sized
+** blocks. Doing all I/O in aligned page-sized blocks helps I/O to go
+** faster on many operating systems.
*/
-struct PmaWriter {
+struct FileWriter {
int eFWErr; /* Non-zero if in an error state */
u8 *aBuffer; /* Pointer to write buffer */
int nBuffer; /* Size of write buffer in bytes */
int iBufStart; /* First byte of buffer to write */
int iBufEnd; /* Last byte of buffer to write */
i64 iWriteOff; /* Offset of start of buffer in file */
- sqlite3_file *pFd; /* File handle to write to */
+ sqlite3_file *pFile; /* File to write to */
};
/*
-** This object is the header on a single record while that record is being
-** held in memory and prior to being written out as part of a PMA.
-**
-** How the linked list is connected depends on how memory is being managed
-** by this module. If using a separate allocation for each in-memory record
-** (VdbeSorter.list.aMemory==0), then the list is always connected using the
-** SorterRecord.u.pNext pointers.
-**
-** Or, if using the single large allocation method (VdbeSorter.list.aMemory!=0),
-** then while records are being accumulated the list is linked using the
-** SorterRecord.u.iNext offset. This is because the aMemory[] array may
-** be sqlite3Realloc()ed while records are being accumulated. Once the VM
-** has finished passing records to the sorter, or when the in-memory buffer
-** is full, the list is sorted. As part of the sorting process, it is
-** converted to use the SorterRecord.u.pNext pointers. See function
-** vdbeSorterSort() for details.
+** A structure to store a single record. All in-memory records are connected
+** together into a linked list headed at VdbeSorter.pRecord using the
+** SorterRecord.pNext pointer.
*/
struct SorterRecord {
- int nVal; /* Size of the record in bytes */
- union {
- SorterRecord *pNext; /* Pointer to next record in list */
- int iNext; /* Offset within aMemory of next record */
- } u;
- /* The data for the record immediately follows this header */
+ void *pVal;
+ int nVal;
+ SorterRecord *pNext;
};
-/* Return a pointer to the buffer containing the record data for SorterRecord
-** object p. Should be used as if:
-**
-** void *SRVAL(SorterRecord *p) { return (void*)&p[1]; }
-*/
-#define SRVAL(p) ((void*)((SorterRecord*)(p) + 1))
-
+/* Minimum allowable value for the VdbeSorter.nWorking variable */
+#define SORTER_MIN_WORKING 10
-/* Maximum number of PMAs that a single MergeEngine can merge */
+/* Maximum number of segments to merge in a single pass. */
#define SORTER_MAX_MERGE_COUNT 16
-static int vdbeIncrSwap(IncrMerger*);
-static void vdbeIncrFree(IncrMerger *);
-
/*
-** Free all memory belonging to the PmaReader object passed as the
+** Free all memory belonging to the VdbeSorterIter object passed as the second
** argument. All structure fields are set to zero before returning.
*/
-static void vdbePmaReaderClear(PmaReader *pReadr){
- sqlite3_free(pReadr->aAlloc);
- sqlite3_free(pReadr->aBuffer);
- if( pReadr->aMap ) sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap);
- vdbeIncrFree(pReadr->pIncr);
- memset(pReadr, 0, sizeof(PmaReader));
+static void vdbeSorterIterZero(sqlite3 *db, VdbeSorterIter *pIter){
+ sqlite3DbFree(db, pIter->aAlloc);
+ sqlite3DbFree(db, pIter->aBuffer);
+ memset(pIter, 0, sizeof(VdbeSorterIter));
}
/*
-** Read the next nByte bytes of data from the PMA p.
+** Read nByte bytes of data from the stream of data iterated by object p.
** If successful, set *ppOut to point to a buffer containing the data
** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite
** error code.
**
-** The buffer returned in *ppOut is only valid until the
+** The buffer indicated by *ppOut may only be considered valid until the
** next call to this function.
*/
-static int vdbePmaReadBlob(
- PmaReader *p, /* PmaReader from which to take the blob */
+static int vdbeSorterIterRead(
+ sqlite3 *db, /* Database handle (for malloc) */
+ VdbeSorterIter *p, /* Iterator */
int nByte, /* Bytes of data to read */
u8 **ppOut /* OUT: Pointer to buffer containing data */
){
int iBuf; /* Offset within buffer to read from */
int nAvail; /* Bytes of data available in buffer */
-
- if( p->aMap ){
- *ppOut = &p->aMap[p->iReadOff];
- p->iReadOff += nByte;
- return SQLITE_OK;
- }
-
assert( p->aBuffer );
/* If there is no more data to be read from the buffer, read the next
@@ -80593,8 +73650,8 @@ static int vdbePmaReadBlob(
}
assert( nRead>0 );
- /* Readr data from the file. Return early if an error occurs. */
- rc = sqlite3OsRead(p->pFd, p->aBuffer, nRead, p->iReadOff);
+ /* Read data from the file. Return early if an error occurs. */
+ rc = sqlite3OsRead(p->pFile, p->aBuffer, nRead, p->iReadOff);
assert( rc!=SQLITE_IOERR_SHORT_READ );
if( rc!=SQLITE_OK ) return rc;
}
@@ -80614,13 +73671,11 @@ static int vdbePmaReadBlob(
/* Extend the p->aAlloc[] allocation if required. */
if( p->nAlloc<nByte ){
- u8 *aNew;
- int nNew = MAX(128, p->nAlloc*2);
+ int nNew = p->nAlloc*2;
while( nByte>nNew ) nNew = nNew*2;
- aNew = sqlite3Realloc(p->aAlloc, nNew);
- if( !aNew ) return SQLITE_NOMEM;
+ p->aAlloc = sqlite3DbReallocOrFree(db, p->aAlloc, nNew);
+ if( !p->aAlloc ) return SQLITE_NOMEM;
p->nAlloc = nNew;
- p->aAlloc = aNew;
}
/* Copy as much data as is available in the buffer into the start of
@@ -80632,13 +73687,13 @@ static int vdbePmaReadBlob(
/* The following loop copies up to p->nBuffer bytes per iteration into
** the p->aAlloc[] buffer. */
while( nRem>0 ){
- int rc; /* vdbePmaReadBlob() return code */
+ int rc; /* vdbeSorterIterRead() return code */
int nCopy; /* Number of bytes to copy */
u8 *aNext; /* Pointer to buffer to copy data from */
nCopy = nRem;
if( nRem>p->nBuffer ) nCopy = p->nBuffer;
- rc = vdbePmaReadBlob(p, nCopy, &aNext);
+ rc = vdbeSorterIterRead(db, p, nCopy, &aNext);
if( rc!=SQLITE_OK ) return rc;
assert( aNext!=p->aAlloc );
memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy);
@@ -80655,444 +73710,236 @@ static int vdbePmaReadBlob(
** Read a varint from the stream of data accessed by p. Set *pnOut to
** the value read.
*/
-static int vdbePmaReadVarint(PmaReader *p, u64 *pnOut){
+static int vdbeSorterIterVarint(sqlite3 *db, VdbeSorterIter *p, u64 *pnOut){
int iBuf;
- if( p->aMap ){
- p->iReadOff += sqlite3GetVarint(&p->aMap[p->iReadOff], pnOut);
+ iBuf = p->iReadOff % p->nBuffer;
+ if( iBuf && (p->nBuffer-iBuf)>=9 ){
+ p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut);
}else{
- iBuf = p->iReadOff % p->nBuffer;
- if( iBuf && (p->nBuffer-iBuf)>=9 ){
- p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut);
- }else{
- u8 aVarint[16], *a;
- int i = 0, rc;
- do{
- rc = vdbePmaReadBlob(p, 1, &a);
- if( rc ) return rc;
- aVarint[(i++)&0xf] = a[0];
- }while( (a[0]&0x80)!=0 );
- sqlite3GetVarint(aVarint, pnOut);
- }
+ u8 aVarint[16], *a;
+ int i = 0, rc;
+ do{
+ rc = vdbeSorterIterRead(db, p, 1, &a);
+ if( rc ) return rc;
+ aVarint[(i++)&0xf] = a[0];
+ }while( (a[0]&0x80)!=0 );
+ sqlite3GetVarint(aVarint, pnOut);
}
return SQLITE_OK;
}
-/*
-** Attempt to memory map file pFile. If successful, set *pp to point to the
-** new mapping and return SQLITE_OK. If the mapping is not attempted
-** (because the file is too large or the VFS layer is configured not to use
-** mmap), return SQLITE_OK and set *pp to NULL.
-**
-** Or, if an error occurs, return an SQLite error code. The final value of
-** *pp is undefined in this case.
-*/
-static int vdbeSorterMapFile(SortSubtask *pTask, SorterFile *pFile, u8 **pp){
- int rc = SQLITE_OK;
- if( pFile->iEof<=(i64)(pTask->pSorter->db->nMaxSorterMmap) ){
- sqlite3_file *pFd = pFile->pFd;
- if( pFd->pMethods->iVersion>=3 ){
- rc = sqlite3OsFetch(pFd, 0, (int)pFile->iEof, (void**)pp);
- testcase( rc!=SQLITE_OK );
- }
- }
- return rc;
-}
-
-/*
-** Attach PmaReader pReadr to file pFile (if it is not already attached to
-** that file) and seek it to offset iOff within the file. Return SQLITE_OK
-** if successful, or an SQLite error code if an error occurs.
-*/
-static int vdbePmaReaderSeek(
- SortSubtask *pTask, /* Task context */
- PmaReader *pReadr, /* Reader whose cursor is to be moved */
- SorterFile *pFile, /* Sorter file to read from */
- i64 iOff /* Offset in pFile */
-){
- int rc = SQLITE_OK;
-
- assert( pReadr->pIncr==0 || pReadr->pIncr->bEof==0 );
-
- if( sqlite3FaultSim(201) ) return SQLITE_IOERR_READ;
- if( pReadr->aMap ){
- sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap);
- pReadr->aMap = 0;
- }
- pReadr->iReadOff = iOff;
- pReadr->iEof = pFile->iEof;
- pReadr->pFd = pFile->pFd;
-
- rc = vdbeSorterMapFile(pTask, pFile, &pReadr->aMap);
- if( rc==SQLITE_OK && pReadr->aMap==0 ){
- int pgsz = pTask->pSorter->pgsz;
- int iBuf = pReadr->iReadOff % pgsz;
- if( pReadr->aBuffer==0 ){
- pReadr->aBuffer = (u8*)sqlite3Malloc(pgsz);
- if( pReadr->aBuffer==0 ) rc = SQLITE_NOMEM;
- pReadr->nBuffer = pgsz;
- }
- if( rc==SQLITE_OK && iBuf ){
- int nRead = pgsz - iBuf;
- if( (pReadr->iReadOff + nRead) > pReadr->iEof ){
- nRead = (int)(pReadr->iEof - pReadr->iReadOff);
- }
- rc = sqlite3OsRead(
- pReadr->pFd, &pReadr->aBuffer[iBuf], nRead, pReadr->iReadOff
- );
- testcase( rc!=SQLITE_OK );
- }
- }
-
- return rc;
-}
/*
-** Advance PmaReader pReadr to the next key in its PMA. Return SQLITE_OK if
+** Advance iterator pIter to the next key in its PMA. Return SQLITE_OK if
** no error occurs, or an SQLite error code if one does.
*/
-static int vdbePmaReaderNext(PmaReader *pReadr){
- int rc = SQLITE_OK; /* Return Code */
+static int vdbeSorterIterNext(
+ sqlite3 *db, /* Database handle (for sqlite3DbMalloc() ) */
+ VdbeSorterIter *pIter /* Iterator to advance */
+){
+ int rc; /* Return Code */
u64 nRec = 0; /* Size of record in bytes */
-
- if( pReadr->iReadOff>=pReadr->iEof ){
- IncrMerger *pIncr = pReadr->pIncr;
- int bEof = 1;
- if( pIncr ){
- rc = vdbeIncrSwap(pIncr);
- if( rc==SQLITE_OK && pIncr->bEof==0 ){
- rc = vdbePmaReaderSeek(
- pIncr->pTask, pReadr, &pIncr->aFile[0], pIncr->iStartOff
- );
- bEof = 0;
- }
- }
-
- if( bEof ){
- /* This is an EOF condition */
- vdbePmaReaderClear(pReadr);
- testcase( rc!=SQLITE_OK );
- return rc;
- }
+ if( pIter->iReadOff>=pIter->iEof ){
+ /* This is an EOF condition */
+ vdbeSorterIterZero(db, pIter);
+ return SQLITE_OK;
}
+ rc = vdbeSorterIterVarint(db, pIter, &nRec);
if( rc==SQLITE_OK ){
- rc = vdbePmaReadVarint(pReadr, &nRec);
- }
- if( rc==SQLITE_OK ){
- pReadr->nKey = (int)nRec;
- rc = vdbePmaReadBlob(pReadr, (int)nRec, &pReadr->aKey);
- testcase( rc!=SQLITE_OK );
+ pIter->nKey = (int)nRec;
+ rc = vdbeSorterIterRead(db, pIter, (int)nRec, &pIter->aKey);
}
return rc;
}
/*
-** Initialize PmaReader pReadr to scan through the PMA stored in file pFile
+** Initialize iterator pIter to scan through the PMA stored in file pFile
** starting at offset iStart and ending at offset iEof-1. This function
-** leaves the PmaReader pointing to the first key in the PMA (or EOF if the
+** leaves the iterator pointing to the first key in the PMA (or EOF if the
** PMA is empty).
-**
-** If the pnByte parameter is NULL, then it is assumed that the file
-** contains a single PMA, and that that PMA omits the initial length varint.
*/
-static int vdbePmaReaderInit(
- SortSubtask *pTask, /* Task context */
- SorterFile *pFile, /* Sorter file to read from */
+static int vdbeSorterIterInit(
+ sqlite3 *db, /* Database handle */
+ const VdbeSorter *pSorter, /* Sorter object */
i64 iStart, /* Start offset in pFile */
- PmaReader *pReadr, /* PmaReader to populate */
+ VdbeSorterIter *pIter, /* Iterator to populate */
i64 *pnByte /* IN/OUT: Increment this value by PMA size */
){
- int rc;
+ int rc = SQLITE_OK;
+ int nBuf;
- assert( pFile->iEof>iStart );
- assert( pReadr->aAlloc==0 && pReadr->nAlloc==0 );
- assert( pReadr->aBuffer==0 );
- assert( pReadr->aMap==0 );
+ nBuf = sqlite3BtreeGetPageSize(db->aDb[0].pBt);
- rc = vdbePmaReaderSeek(pTask, pReadr, pFile, iStart);
- if( rc==SQLITE_OK ){
- u64 nByte; /* Size of PMA in bytes */
- rc = vdbePmaReadVarint(pReadr, &nByte);
- pReadr->iEof = pReadr->iReadOff + nByte;
- *pnByte += nByte;
+ assert( pSorter->iWriteOff>iStart );
+ assert( pIter->aAlloc==0 );
+ assert( pIter->aBuffer==0 );
+ pIter->pFile = pSorter->pTemp1;
+ pIter->iReadOff = iStart;
+ pIter->nAlloc = 128;
+ pIter->aAlloc = (u8 *)sqlite3DbMallocRaw(db, pIter->nAlloc);
+ pIter->nBuffer = nBuf;
+ pIter->aBuffer = (u8 *)sqlite3DbMallocRaw(db, nBuf);
+
+ if( !pIter->aBuffer ){
+ rc = SQLITE_NOMEM;
+ }else{
+ int iBuf;
+
+ iBuf = iStart % nBuf;
+ if( iBuf ){
+ int nRead = nBuf - iBuf;
+ if( (iStart + nRead) > pSorter->iWriteOff ){
+ nRead = (int)(pSorter->iWriteOff - iStart);
+ }
+ rc = sqlite3OsRead(
+ pSorter->pTemp1, &pIter->aBuffer[iBuf], nRead, iStart
+ );
+ assert( rc!=SQLITE_IOERR_SHORT_READ );
+ }
+
+ if( rc==SQLITE_OK ){
+ u64 nByte; /* Size of PMA in bytes */
+ pIter->iEof = pSorter->iWriteOff;
+ rc = vdbeSorterIterVarint(db, pIter, &nByte);
+ pIter->iEof = pIter->iReadOff + nByte;
+ *pnByte += nByte;
+ }
}
if( rc==SQLITE_OK ){
- rc = vdbePmaReaderNext(pReadr);
+ rc = vdbeSorterIterNext(db, pIter);
}
return rc;
}
-/*
-** A version of vdbeSorterCompare() that assumes that it has already been
-** determined that the first field of key1 is equal to the first field of
-** key2.
-*/
-static int vdbeSorterCompareTail(
- SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
- int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
- const void *pKey1, int nKey1, /* Left side of comparison */
- const void *pKey2, int nKey2 /* Right side of comparison */
-){
- UnpackedRecord *r2 = pTask->pUnpacked;
- if( *pbKey2Cached==0 ){
- sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2);
- *pbKey2Cached = 1;
- }
- return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, r2, 1);
-}
/*
** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2,
-** size nKey2 bytes). Use (pTask->pKeyInfo) for the collation sequences
-** used by the comparison. Return the result of the comparison.
-**
-** If IN/OUT parameter *pbKey2Cached is true when this function is called,
-** it is assumed that (pTask->pUnpacked) contains the unpacked version
-** of key2. If it is false, (pTask->pUnpacked) is populated with the unpacked
-** version of key2 and *pbKey2Cached set to true before returning.
-**
-** If an OOM error is encountered, (pTask->pUnpacked->error_rc) is set
-** to SQLITE_NOMEM.
-*/
-static int vdbeSorterCompare(
- SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
- int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
- const void *pKey1, int nKey1, /* Left side of comparison */
- const void *pKey2, int nKey2 /* Right side of comparison */
-){
- UnpackedRecord *r2 = pTask->pUnpacked;
- if( !*pbKey2Cached ){
- sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2);
- *pbKey2Cached = 1;
- }
- return sqlite3VdbeRecordCompare(nKey1, pKey1, r2);
-}
-
-/*
-** A specially optimized version of vdbeSorterCompare() that assumes that
-** the first field of each key is a TEXT value and that the collation
-** sequence to compare them with is BINARY.
-*/
-static int vdbeSorterCompareText(
- SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
- int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
+** size nKey2 bytes). Argument pKeyInfo supplies the collation functions
+** used by the comparison. If an error occurs, return an SQLite error code.
+** Otherwise, return SQLITE_OK and set *pRes to a negative, zero or positive
+** value, depending on whether key1 is smaller, equal to or larger than key2.
+**
+** If the bOmitRowid argument is non-zero, assume both keys end in a rowid
+** field. For the purposes of the comparison, ignore it. Also, if bOmitRowid
+** is true and key1 contains even a single NULL value, it is considered to
+** be less than key2. Even if key2 also contains NULL values.
+**
+** If pKey2 is passed a NULL pointer, then it is assumed that the pCsr->aSpace
+** has been allocated and contains an unpacked record that is used as key2.
+*/
+static void vdbeSorterCompare(
+ const VdbeCursor *pCsr, /* Cursor object (for pKeyInfo) */
+ int nIgnore, /* Ignore the last nIgnore fields */
const void *pKey1, int nKey1, /* Left side of comparison */
- const void *pKey2, int nKey2 /* Right side of comparison */
+ const void *pKey2, int nKey2, /* Right side of comparison */
+ int *pRes /* OUT: Result of comparison */
){
- const u8 * const p1 = (const u8 * const)pKey1;
- const u8 * const p2 = (const u8 * const)pKey2;
- const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */
- const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */
-
- int n1;
- int n2;
- int res;
+ KeyInfo *pKeyInfo = pCsr->pKeyInfo;
+ VdbeSorter *pSorter = pCsr->pSorter;
+ UnpackedRecord *r2 = pSorter->pUnpacked;
+ int i;
- getVarint32(&p1[1], n1); n1 = (n1 - 13) / 2;
- getVarint32(&p2[1], n2); n2 = (n2 - 13) / 2;
- res = memcmp(v1, v2, MIN(n1, n2));
- if( res==0 ){
- res = n1 - n2;
+ if( pKey2 ){
+ sqlite3VdbeRecordUnpack(pKeyInfo, nKey2, pKey2, r2);
}
- if( res==0 ){
- if( pTask->pSorter->pKeyInfo->nField>1 ){
- res = vdbeSorterCompareTail(
- pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2
- );
- }
- }else{
- if( pTask->pSorter->pKeyInfo->aSortOrder[0] ){
- res = res * -1;
+ if( nIgnore ){
+ r2->nField = pKeyInfo->nField - nIgnore;
+ assert( r2->nField>0 );
+ for(i=0; i<r2->nField; i++){
+ if( r2->aMem[i].flags & MEM_Null ){
+ *pRes = -1;
+ return;
+ }
}
+ r2->flags |= UNPACKED_PREFIX_MATCH;
}
- return res;
+ *pRes = sqlite3VdbeRecordCompare(nKey1, pKey1, r2);
}
/*
-** A specially optimized version of vdbeSorterCompare() that assumes that
-** the first field of each key is an INTEGER value.
+** This function is called to compare two iterator keys when merging
+** multiple b-tree segments. Parameter iOut is the index of the aTree[]
+** value to recalculate.
*/
-static int vdbeSorterCompareInt(
- SortSubtask *pTask, /* Subtask context (for pKeyInfo) */
- int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */
- const void *pKey1, int nKey1, /* Left side of comparison */
- const void *pKey2, int nKey2 /* Right side of comparison */
-){
- const u8 * const p1 = (const u8 * const)pKey1;
- const u8 * const p2 = (const u8 * const)pKey2;
- const int s1 = p1[1]; /* Left hand serial type */
- const int s2 = p2[1]; /* Right hand serial type */
- const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */
- const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */
- int res; /* Return value */
-
- assert( (s1>0 && s1<7) || s1==8 || s1==9 );
- assert( (s2>0 && s2<7) || s2==8 || s2==9 );
-
- if( s1>7 && s2>7 ){
- res = s1 - s2;
- }else{
- if( s1==s2 ){
- if( (*v1 ^ *v2) & 0x80 ){
- /* The two values have different signs */
- res = (*v1 & 0x80) ? -1 : +1;
- }else{
- /* The two values have the same sign. Compare using memcmp(). */
- static const u8 aLen[] = {0, 1, 2, 3, 4, 6, 8 };
- int i;
- res = 0;
- for(i=0; i<aLen[s1]; i++){
- if( (res = v1[i] - v2[i]) ) break;
- }
- }
- }else{
- if( s2>7 ){
- res = +1;
- }else if( s1>7 ){
- res = -1;
- }else{
- res = s1 - s2;
- }
- assert( res!=0 );
+static int vdbeSorterDoCompare(const VdbeCursor *pCsr, int iOut){
+ VdbeSorter *pSorter = pCsr->pSorter;
+ int i1;
+ int i2;
+ int iRes;
+ VdbeSorterIter *p1;
+ VdbeSorterIter *p2;
- if( res>0 ){
- if( *v1 & 0x80 ) res = -1;
- }else{
- if( *v2 & 0x80 ) res = +1;
- }
- }
+ assert( iOut<pSorter->nTree && iOut>0 );
+
+ if( iOut>=(pSorter->nTree/2) ){
+ i1 = (iOut - pSorter->nTree/2) * 2;
+ i2 = i1 + 1;
+ }else{
+ i1 = pSorter->aTree[iOut*2];
+ i2 = pSorter->aTree[iOut*2+1];
}
- if( res==0 ){
- if( pTask->pSorter->pKeyInfo->nField>1 ){
- res = vdbeSorterCompareTail(
- pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2
- );
+ p1 = &pSorter->aIter[i1];
+ p2 = &pSorter->aIter[i2];
+
+ if( p1->pFile==0 ){
+ iRes = i2;
+ }else if( p2->pFile==0 ){
+ iRes = i1;
+ }else{
+ int res;
+ assert( pCsr->pSorter->pUnpacked!=0 ); /* allocated in vdbeSorterMerge() */
+ vdbeSorterCompare(
+ pCsr, 0, p1->aKey, p1->nKey, p2->aKey, p2->nKey, &res
+ );
+ if( res<=0 ){
+ iRes = i1;
+ }else{
+ iRes = i2;
}
- }else if( pTask->pSorter->pKeyInfo->aSortOrder[0] ){
- res = res * -1;
}
- return res;
+ pSorter->aTree[iOut] = iRes;
+ return SQLITE_OK;
}
/*
** Initialize the temporary index cursor just opened as a sorter cursor.
-**
-** Usually, the sorter module uses the value of (pCsr->pKeyInfo->nField)
-** to determine the number of fields that should be compared from the
-** records being sorted. However, if the value passed as argument nField
-** is non-zero and the sorter is able to guarantee a stable sort, nField
-** is used instead. This is used when sorting records for a CREATE INDEX
-** statement. In this case, keys are always delivered to the sorter in
-** order of the primary key, which happens to be make up the final part
-** of the records being sorted. So if the sort is stable, there is never
-** any reason to compare PK fields and they can be ignored for a small
-** performance boost.
-**
-** The sorter can guarantee a stable sort when running in single-threaded
-** mode, but not in multi-threaded mode.
-**
-** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
*/
-SQLITE_PRIVATE int sqlite3VdbeSorterInit(
- sqlite3 *db, /* Database connection (for malloc()) */
- int nField, /* Number of key fields in each record */
- VdbeCursor *pCsr /* Cursor that holds the new sorter */
-){
+SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *db, VdbeCursor *pCsr){
int pgsz; /* Page size of main database */
- int i; /* Used to iterate through aTask[] */
int mxCache; /* Cache size */
VdbeSorter *pSorter; /* The new sorter */
- KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */
- int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */
- int sz; /* Size of pSorter in bytes */
- int rc = SQLITE_OK;
-#if SQLITE_MAX_WORKER_THREADS==0
-# define nWorker 0
-#else
- int nWorker;
-#endif
-
- /* Initialize the upper limit on the number of worker threads */
-#if SQLITE_MAX_WORKER_THREADS>0
- if( sqlite3TempInMemory(db) || sqlite3GlobalConfig.bCoreMutex==0 ){
- nWorker = 0;
- }else{
- nWorker = db->aLimit[SQLITE_LIMIT_WORKER_THREADS];
- }
-#endif
-
- /* Do not allow the total number of threads (main thread + all workers)
- ** to exceed the maximum merge count */
-#if SQLITE_MAX_WORKER_THREADS>=SORTER_MAX_MERGE_COUNT
- if( nWorker>=SORTER_MAX_MERGE_COUNT ){
- nWorker = SORTER_MAX_MERGE_COUNT-1;
- }
-#endif
+ char *d; /* Dummy */
assert( pCsr->pKeyInfo && pCsr->pBt==0 );
- szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nField-1)*sizeof(CollSeq*);
- sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask);
-
- pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo);
- pCsr->pSorter = pSorter;
+ pCsr->pSorter = pSorter = sqlite3DbMallocZero(db, sizeof(VdbeSorter));
if( pSorter==0 ){
- rc = SQLITE_NOMEM;
- }else{
- pSorter->pKeyInfo = pKeyInfo = (KeyInfo*)((u8*)pSorter + sz);
- memcpy(pKeyInfo, pCsr->pKeyInfo, szKeyInfo);
- pKeyInfo->db = 0;
- if( nField && nWorker==0 ){
- pKeyInfo->nXField += (pKeyInfo->nField - nField);
- pKeyInfo->nField = nField;
- }
- pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt);
- pSorter->nTask = nWorker + 1;
- pSorter->iPrev = nWorker-1;
- pSorter->bUseThreads = (pSorter->nTask>1);
- pSorter->db = db;
- for(i=0; i<pSorter->nTask; i++){
- SortSubtask *pTask = &pSorter->aTask[i];
- pTask->pSorter = pSorter;
- }
-
- if( !sqlite3TempInMemory(db) ){
- u32 szPma = sqlite3GlobalConfig.szPma;
- pSorter->mnPmaSize = szPma * pgsz;
- mxCache = db->aDb[0].pSchema->cache_size;
- if( mxCache<(int)szPma ) mxCache = (int)szPma;
- pSorter->mxPmaSize = MIN((i64)mxCache*pgsz, SQLITE_MAX_PMASZ);
-
- /* EVIDENCE-OF: R-26747-61719 When the application provides any amount of
- ** scratch memory using SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary
- ** large heap allocations.
- */
- if( sqlite3GlobalConfig.pScratch==0 ){
- assert( pSorter->iMemory==0 );
- pSorter->nMemory = pgsz;
- pSorter->list.aMemory = (u8*)sqlite3Malloc(pgsz);
- if( !pSorter->list.aMemory ) rc = SQLITE_NOMEM;
- }
- }
+ return SQLITE_NOMEM;
+ }
+
+ pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pCsr->pKeyInfo, 0, 0, &d);
+ if( pSorter->pUnpacked==0 ) return SQLITE_NOMEM;
+ assert( pSorter->pUnpacked==(UnpackedRecord *)d );
- if( (pKeyInfo->nField+pKeyInfo->nXField)<13
- && (pKeyInfo->aColl[0]==0 || pKeyInfo->aColl[0]==db->pDfltColl)
- ){
- pSorter->typeMask = SORTER_TYPE_INTEGER | SORTER_TYPE_TEXT;
- }
+ if( !sqlite3TempInMemory(db) ){
+ pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt);
+ pSorter->mnPmaSize = SORTER_MIN_WORKING * pgsz;
+ mxCache = db->aDb[0].pSchema->cache_size;
+ if( mxCache<SORTER_MIN_WORKING ) mxCache = SORTER_MIN_WORKING;
+ pSorter->mxPmaSize = mxCache * pgsz;
}
- return rc;
+ return SQLITE_OK;
}
-#undef nWorker /* Defined at the top of this function */
/*
** Free the list of sorted records starting at pRecord.
@@ -81101,341 +73948,76 @@ static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){
SorterRecord *p;
SorterRecord *pNext;
for(p=pRecord; p; p=pNext){
- pNext = p->u.pNext;
+ pNext = p->pNext;
sqlite3DbFree(db, p);
}
}
/*
-** Free all resources owned by the object indicated by argument pTask. All
-** fields of *pTask are zeroed before returning.
-*/
-static void vdbeSortSubtaskCleanup(sqlite3 *db, SortSubtask *pTask){
- sqlite3DbFree(db, pTask->pUnpacked);
-#if SQLITE_MAX_WORKER_THREADS>0
- /* pTask->list.aMemory can only be non-zero if it was handed memory
- ** from the main thread. That only occurs SQLITE_MAX_WORKER_THREADS>0 */
- if( pTask->list.aMemory ){
- sqlite3_free(pTask->list.aMemory);
- }else
-#endif
- {
- assert( pTask->list.aMemory==0 );
- vdbeSorterRecordFree(0, pTask->list.pList);
- }
- if( pTask->file.pFd ){
- sqlite3OsCloseFree(pTask->file.pFd);
- }
- if( pTask->file2.pFd ){
- sqlite3OsCloseFree(pTask->file2.pFd);
- }
- memset(pTask, 0, sizeof(SortSubtask));
-}
-
-#ifdef SQLITE_DEBUG_SORTER_THREADS
-static void vdbeSorterWorkDebug(SortSubtask *pTask, const char *zEvent){
- i64 t;
- int iTask = (pTask - pTask->pSorter->aTask);
- sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t);
- fprintf(stderr, "%lld:%d %s\n", t, iTask, zEvent);
-}
-static void vdbeSorterRewindDebug(const char *zEvent){
- i64 t;
- sqlite3OsCurrentTimeInt64(sqlite3_vfs_find(0), &t);
- fprintf(stderr, "%lld:X %s\n", t, zEvent);
-}
-static void vdbeSorterPopulateDebug(
- SortSubtask *pTask,
- const char *zEvent
-){
- i64 t;
- int iTask = (pTask - pTask->pSorter->aTask);
- sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t);
- fprintf(stderr, "%lld:bg%d %s\n", t, iTask, zEvent);
-}
-static void vdbeSorterBlockDebug(
- SortSubtask *pTask,
- int bBlocked,
- const char *zEvent
-){
- if( bBlocked ){
- i64 t;
- sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t);
- fprintf(stderr, "%lld:main %s\n", t, zEvent);
- }
-}
-#else
-# define vdbeSorterWorkDebug(x,y)
-# define vdbeSorterRewindDebug(y)
-# define vdbeSorterPopulateDebug(x,y)
-# define vdbeSorterBlockDebug(x,y,z)
-#endif
-
-#if SQLITE_MAX_WORKER_THREADS>0
-/*
-** Join thread pTask->thread.
-*/
-static int vdbeSorterJoinThread(SortSubtask *pTask){
- int rc = SQLITE_OK;
- if( pTask->pThread ){
-#ifdef SQLITE_DEBUG_SORTER_THREADS
- int bDone = pTask->bDone;
-#endif
- void *pRet = SQLITE_INT_TO_PTR(SQLITE_ERROR);
- vdbeSorterBlockDebug(pTask, !bDone, "enter");
- (void)sqlite3ThreadJoin(pTask->pThread, &pRet);
- vdbeSorterBlockDebug(pTask, !bDone, "exit");
- rc = SQLITE_PTR_TO_INT(pRet);
- assert( pTask->bDone==1 );
- pTask->bDone = 0;
- pTask->pThread = 0;
- }
- return rc;
-}
-
-/*
-** Launch a background thread to run xTask(pIn).
-*/
-static int vdbeSorterCreateThread(
- SortSubtask *pTask, /* Thread will use this task object */
- void *(*xTask)(void*), /* Routine to run in a separate thread */
- void *pIn /* Argument passed into xTask() */
-){
- assert( pTask->pThread==0 && pTask->bDone==0 );
- return sqlite3ThreadCreate(&pTask->pThread, xTask, pIn);
-}
-
-/*
-** Join all outstanding threads launched by SorterWrite() to create
-** level-0 PMAs.
-*/
-static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){
- int rc = rcin;
- int i;
-
- /* This function is always called by the main user thread.
- **
- ** If this function is being called after SorterRewind() has been called,
- ** it is possible that thread pSorter->aTask[pSorter->nTask-1].pThread
- ** is currently attempt to join one of the other threads. To avoid a race
- ** condition where this thread also attempts to join the same object, join
- ** thread pSorter->aTask[pSorter->nTask-1].pThread first. */
- for(i=pSorter->nTask-1; i>=0; i--){
- SortSubtask *pTask = &pSorter->aTask[i];
- int rc2 = vdbeSorterJoinThread(pTask);
- if( rc==SQLITE_OK ) rc = rc2;
- }
- return rc;
-}
-#else
-# define vdbeSorterJoinAll(x,rcin) (rcin)
-# define vdbeSorterJoinThread(pTask) SQLITE_OK
-#endif
-
-/*
-** Allocate a new MergeEngine object capable of handling up to
-** nReader PmaReader inputs.
-**
-** nReader is automatically rounded up to the next power of two.
-** nReader may not exceed SORTER_MAX_MERGE_COUNT even after rounding up.
-*/
-static MergeEngine *vdbeMergeEngineNew(int nReader){
- int N = 2; /* Smallest power of two >= nReader */
- int nByte; /* Total bytes of space to allocate */
- MergeEngine *pNew; /* Pointer to allocated object to return */
-
- assert( nReader<=SORTER_MAX_MERGE_COUNT );
-
- while( N<nReader ) N += N;
- nByte = sizeof(MergeEngine) + N * (sizeof(int) + sizeof(PmaReader));
-
- pNew = sqlite3FaultSim(100) ? 0 : (MergeEngine*)sqlite3MallocZero(nByte);
- if( pNew ){
- pNew->nTree = N;
- pNew->pTask = 0;
- pNew->aReadr = (PmaReader*)&pNew[1];
- pNew->aTree = (int*)&pNew->aReadr[N];
- }
- return pNew;
-}
-
-/*
-** Free the MergeEngine object passed as the only argument.
-*/
-static void vdbeMergeEngineFree(MergeEngine *pMerger){
- int i;
- if( pMerger ){
- for(i=0; i<pMerger->nTree; i++){
- vdbePmaReaderClear(&pMerger->aReadr[i]);
- }
- }
- sqlite3_free(pMerger);
-}
-
-/*
-** Free all resources associated with the IncrMerger object indicated by
-** the first argument.
-*/
-static void vdbeIncrFree(IncrMerger *pIncr){
- if( pIncr ){
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pIncr->bUseThread ){
- vdbeSorterJoinThread(pIncr->pTask);
- if( pIncr->aFile[0].pFd ) sqlite3OsCloseFree(pIncr->aFile[0].pFd);
- if( pIncr->aFile[1].pFd ) sqlite3OsCloseFree(pIncr->aFile[1].pFd);
- }
-#endif
- vdbeMergeEngineFree(pIncr->pMerger);
- sqlite3_free(pIncr);
- }
-}
-
-/*
-** Reset a sorting cursor back to its original empty state.
-*/
-SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *db, VdbeSorter *pSorter){
- int i;
- (void)vdbeSorterJoinAll(pSorter, SQLITE_OK);
- assert( pSorter->bUseThreads || pSorter->pReader==0 );
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pSorter->pReader ){
- vdbePmaReaderClear(pSorter->pReader);
- sqlite3DbFree(db, pSorter->pReader);
- pSorter->pReader = 0;
- }
-#endif
- vdbeMergeEngineFree(pSorter->pMerger);
- pSorter->pMerger = 0;
- for(i=0; i<pSorter->nTask; i++){
- SortSubtask *pTask = &pSorter->aTask[i];
- vdbeSortSubtaskCleanup(db, pTask);
- pTask->pSorter = pSorter;
- }
- if( pSorter->list.aMemory==0 ){
- vdbeSorterRecordFree(0, pSorter->list.pList);
- }
- pSorter->list.pList = 0;
- pSorter->list.szPMA = 0;
- pSorter->bUsePMA = 0;
- pSorter->iMemory = 0;
- pSorter->mxKeysize = 0;
- sqlite3DbFree(db, pSorter->pUnpacked);
- pSorter->pUnpacked = 0;
-}
-
-/*
** Free any cursor components allocated by sqlite3VdbeSorterXXX routines.
*/
SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){
VdbeSorter *pSorter = pCsr->pSorter;
if( pSorter ){
- sqlite3VdbeSorterReset(db, pSorter);
- sqlite3_free(pSorter->list.aMemory);
+ if( pSorter->aIter ){
+ int i;
+ for(i=0; i<pSorter->nTree; i++){
+ vdbeSorterIterZero(db, &pSorter->aIter[i]);
+ }
+ sqlite3DbFree(db, pSorter->aIter);
+ }
+ if( pSorter->pTemp1 ){
+ sqlite3OsCloseFree(pSorter->pTemp1);
+ }
+ vdbeSorterRecordFree(db, pSorter->pRecord);
+ sqlite3DbFree(db, pSorter->pUnpacked);
sqlite3DbFree(db, pSorter);
pCsr->pSorter = 0;
}
}
-#if SQLITE_MAX_MMAP_SIZE>0
-/*
-** The first argument is a file-handle open on a temporary file. The file
-** is guaranteed to be nByte bytes or smaller in size. This function
-** attempts to extend the file to nByte bytes in size and to ensure that
-** the VFS has memory mapped it.
-**
-** Whether or not the file does end up memory mapped of course depends on
-** the specific VFS implementation.
-*/
-static void vdbeSorterExtendFile(sqlite3 *db, sqlite3_file *pFd, i64 nByte){
- if( nByte<=(i64)(db->nMaxSorterMmap) && pFd->pMethods->iVersion>=3 ){
- void *p = 0;
- int chunksize = 4*1024;
- sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_CHUNK_SIZE, &chunksize);
- sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_SIZE_HINT, &nByte);
- sqlite3OsFetch(pFd, 0, (int)nByte, &p);
- sqlite3OsUnfetch(pFd, 0, p);
- }
-}
-#else
-# define vdbeSorterExtendFile(x,y,z)
-#endif
-
/*
** Allocate space for a file-handle and open a temporary file. If successful,
-** set *ppFd to point to the malloc'd file-handle and return SQLITE_OK.
-** Otherwise, set *ppFd to 0 and return an SQLite error code.
+** set *ppFile to point to the malloc'd file-handle and return SQLITE_OK.
+** Otherwise, set *ppFile to 0 and return an SQLite error code.
*/
-static int vdbeSorterOpenTempFile(
- sqlite3 *db, /* Database handle doing sort */
- i64 nExtend, /* Attempt to extend file to this size */
- sqlite3_file **ppFd
-){
- int rc;
- if( sqlite3FaultSim(202) ) return SQLITE_IOERR_ACCESS;
- rc = sqlite3OsOpenMalloc(db->pVfs, 0, ppFd,
+static int vdbeSorterOpenTempFile(sqlite3 *db, sqlite3_file **ppFile){
+ int dummy;
+ return sqlite3OsOpenMalloc(db->pVfs, 0, ppFile,
SQLITE_OPEN_TEMP_JOURNAL |
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE |
- SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &rc
+ SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &dummy
);
- if( rc==SQLITE_OK ){
- i64 max = SQLITE_MAX_MMAP_SIZE;
- sqlite3OsFileControlHint(*ppFd, SQLITE_FCNTL_MMAP_SIZE, (void*)&max);
- if( nExtend>0 ){
- vdbeSorterExtendFile(db, *ppFd, nExtend);
- }
- }
- return rc;
-}
-
-/*
-** If it has not already been allocated, allocate the UnpackedRecord
-** structure at pTask->pUnpacked. Return SQLITE_OK if successful (or
-** if no allocation was required), or SQLITE_NOMEM otherwise.
-*/
-static int vdbeSortAllocUnpacked(SortSubtask *pTask){
- if( pTask->pUnpacked==0 ){
- char *pFree;
- pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord(
- pTask->pSorter->pKeyInfo, 0, 0, &pFree
- );
- assert( pTask->pUnpacked==(UnpackedRecord*)pFree );
- if( pFree==0 ) return SQLITE_NOMEM;
- pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nField;
- pTask->pUnpacked->errCode = 0;
- }
- return SQLITE_OK;
}
-
/*
** Merge the two sorted lists p1 and p2 into a single list.
** Set *ppOut to the head of the new list.
*/
static void vdbeSorterMerge(
- SortSubtask *pTask, /* Calling thread context */
+ const VdbeCursor *pCsr, /* For pKeyInfo */
SorterRecord *p1, /* First list to merge */
SorterRecord *p2, /* Second list to merge */
SorterRecord **ppOut /* OUT: Head of merged list */
){
SorterRecord *pFinal = 0;
SorterRecord **pp = &pFinal;
- int bCached = 0;
+ void *pVal2 = p2 ? p2->pVal : 0;
while( p1 && p2 ){
int res;
- res = pTask->xCompare(
- pTask, &bCached, SRVAL(p1), p1->nVal, SRVAL(p2), p2->nVal
- );
-
+ vdbeSorterCompare(pCsr, 0, p1->pVal, p1->nVal, pVal2, p2->nVal, &res);
if( res<=0 ){
*pp = p1;
- pp = &p1->u.pNext;
- p1 = p1->u.pNext;
+ pp = &p1->pNext;
+ p1 = p1->pNext;
+ pVal2 = 0;
}else{
*pp = p2;
- pp = &p2->u.pNext;
- p2 = p2->u.pNext;
- bCached = 0;
+ pp = &p2->pNext;
+ p2 = p2->pNext;
+ if( p2==0 ) break;
+ pVal2 = p2->pVal;
}
}
*pp = p1 ? p1 : p2;
@@ -81443,56 +74025,27 @@ static void vdbeSorterMerge(
}
/*
-** Return the SorterCompare function to compare values collected by the
-** sorter object passed as the only argument.
-*/
-static SorterCompare vdbeSorterGetCompare(VdbeSorter *p){
- if( p->typeMask==SORTER_TYPE_INTEGER ){
- return vdbeSorterCompareInt;
- }else if( p->typeMask==SORTER_TYPE_TEXT ){
- return vdbeSorterCompareText;
- }
- return vdbeSorterCompare;
-}
-
-/*
-** Sort the linked list of records headed at pTask->pList. Return
-** SQLITE_OK if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if
-** an error occurs.
+** Sort the linked list of records headed at pCsr->pRecord. Return SQLITE_OK
+** if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if an error
+** occurs.
*/
-static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){
+static int vdbeSorterSort(const VdbeCursor *pCsr){
int i;
SorterRecord **aSlot;
SorterRecord *p;
- int rc;
-
- rc = vdbeSortAllocUnpacked(pTask);
- if( rc!=SQLITE_OK ) return rc;
-
- p = pList->pList;
- pTask->xCompare = vdbeSorterGetCompare(pTask->pSorter);
+ VdbeSorter *pSorter = pCsr->pSorter;
aSlot = (SorterRecord **)sqlite3MallocZero(64 * sizeof(SorterRecord *));
if( !aSlot ){
return SQLITE_NOMEM;
}
+ p = pSorter->pRecord;
while( p ){
- SorterRecord *pNext;
- if( pList->aMemory ){
- if( (u8*)p==pList->aMemory ){
- pNext = 0;
- }else{
- assert( p->u.iNext<sqlite3MallocSize(pList->aMemory) );
- pNext = (SorterRecord*)&pList->aMemory[p->u.iNext];
- }
- }else{
- pNext = p->u.pNext;
- }
-
- p->u.pNext = 0;
+ SorterRecord *pNext = p->pNext;
+ p->pNext = 0;
for(i=0; aSlot[i]; i++){
- vdbeSorterMerge(pTask, p, aSlot[i], &p);
+ vdbeSorterMerge(pCsr, p, aSlot[i], &p);
aSlot[i] = 0;
}
aSlot[i] = p;
@@ -81501,43 +74054,42 @@ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){
p = 0;
for(i=0; i<64; i++){
- vdbeSorterMerge(pTask, p, aSlot[i], &p);
+ vdbeSorterMerge(pCsr, p, aSlot[i], &p);
}
- pList->pList = p;
+ pSorter->pRecord = p;
sqlite3_free(aSlot);
- assert( pTask->pUnpacked->errCode==SQLITE_OK
- || pTask->pUnpacked->errCode==SQLITE_NOMEM
- );
- return pTask->pUnpacked->errCode;
+ return SQLITE_OK;
}
/*
-** Initialize a PMA-writer object.
+** Initialize a file-writer object.
*/
-static void vdbePmaWriterInit(
- sqlite3_file *pFd, /* File handle to write to */
- PmaWriter *p, /* Object to populate */
- int nBuf, /* Buffer size */
- i64 iStart /* Offset of pFd to begin writing at */
+static void fileWriterInit(
+ sqlite3 *db, /* Database (for malloc) */
+ sqlite3_file *pFile, /* File to write to */
+ FileWriter *p, /* Object to populate */
+ i64 iStart /* Offset of pFile to begin writing at */
){
- memset(p, 0, sizeof(PmaWriter));
- p->aBuffer = (u8*)sqlite3Malloc(nBuf);
+ int nBuf = sqlite3BtreeGetPageSize(db->aDb[0].pBt);
+
+ memset(p, 0, sizeof(FileWriter));
+ p->aBuffer = (u8 *)sqlite3DbMallocRaw(db, nBuf);
if( !p->aBuffer ){
p->eFWErr = SQLITE_NOMEM;
}else{
p->iBufEnd = p->iBufStart = (iStart % nBuf);
p->iWriteOff = iStart - p->iBufStart;
p->nBuffer = nBuf;
- p->pFd = pFd;
+ p->pFile = pFile;
}
}
/*
-** Write nData bytes of data to the PMA. Return SQLITE_OK
+** Write nData bytes of data to the file-write object. Return SQLITE_OK
** if successful, or an SQLite error code if an error occurs.
*/
-static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){
+static void fileWriterWrite(FileWriter *p, u8 *pData, int nData){
int nRem = nData;
while( nRem>0 && p->eFWErr==0 ){
int nCopy = nRem;
@@ -81548,7 +74100,7 @@ static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){
memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy);
p->iBufEnd += nCopy;
if( p->iBufEnd==p->nBuffer ){
- p->eFWErr = sqlite3OsWrite(p->pFd,
+ p->eFWErr = sqlite3OsWrite(p->pFile,
&p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart,
p->iWriteOff + p->iBufStart
);
@@ -81562,44 +74114,43 @@ static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){
}
/*
-** Flush any buffered data to disk and clean up the PMA-writer object.
-** The results of using the PMA-writer after this call are undefined.
+** Flush any buffered data to disk and clean up the file-writer object.
+** The results of using the file-writer after this call are undefined.
** Return SQLITE_OK if flushing the buffered data succeeds or is not
** required. Otherwise, return an SQLite error code.
**
** Before returning, set *piEof to the offset immediately following the
** last byte written to the file.
*/
-static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof){
+static int fileWriterFinish(sqlite3 *db, FileWriter *p, i64 *piEof){
int rc;
if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){
- p->eFWErr = sqlite3OsWrite(p->pFd,
+ p->eFWErr = sqlite3OsWrite(p->pFile,
&p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart,
p->iWriteOff + p->iBufStart
);
}
*piEof = (p->iWriteOff + p->iBufEnd);
- sqlite3_free(p->aBuffer);
+ sqlite3DbFree(db, p->aBuffer);
rc = p->eFWErr;
- memset(p, 0, sizeof(PmaWriter));
+ memset(p, 0, sizeof(FileWriter));
return rc;
}
/*
-** Write value iVal encoded as a varint to the PMA. Return
+** Write value iVal encoded as a varint to the file-write object. Return
** SQLITE_OK if successful, or an SQLite error code if an error occurs.
*/
-static void vdbePmaWriteVarint(PmaWriter *p, u64 iVal){
+static void fileWriterWriteVarint(FileWriter *p, u64 iVal){
int nByte;
u8 aByte[10];
nByte = sqlite3PutVarint(aByte, iVal);
- vdbePmaWriteBlob(p, aByte, nByte);
+ fileWriterWrite(p, aByte, nByte);
}
/*
-** Write the current contents of in-memory linked-list pList to a level-0
-** PMA in the temp file belonging to sub-task pTask. Return SQLITE_OK if
-** successful, or an SQLite error code otherwise.
+** Write the current contents of the in-memory linked-list to a PMA. Return
+** SQLITE_OK if successful, or an SQLite error code otherwise.
**
** The format of a PMA is:
**
@@ -81610,255 +74161,76 @@ static void vdbePmaWriteVarint(PmaWriter *p, u64 iVal){
** Each record consists of a varint followed by a blob of data (the
** key). The varint is the number of bytes in the blob of data.
*/
-static int vdbeSorterListToPMA(SortSubtask *pTask, SorterList *pList){
- sqlite3 *db = pTask->pSorter->db;
+static int vdbeSorterListToPMA(sqlite3 *db, const VdbeCursor *pCsr){
int rc = SQLITE_OK; /* Return code */
- PmaWriter writer; /* Object used to write to the file */
-
-#ifdef SQLITE_DEBUG
- /* Set iSz to the expected size of file pTask->file after writing the PMA.
- ** This is used by an assert() statement at the end of this function. */
- i64 iSz = pList->szPMA + sqlite3VarintLen(pList->szPMA) + pTask->file.iEof;
-#endif
+ VdbeSorter *pSorter = pCsr->pSorter;
+ FileWriter writer;
- vdbeSorterWorkDebug(pTask, "enter");
- memset(&writer, 0, sizeof(PmaWriter));
- assert( pList->szPMA>0 );
+ memset(&writer, 0, sizeof(FileWriter));
- /* If the first temporary PMA file has not been opened, open it now. */
- if( pTask->file.pFd==0 ){
- rc = vdbeSorterOpenTempFile(db, 0, &pTask->file.pFd);
- assert( rc!=SQLITE_OK || pTask->file.pFd );
- assert( pTask->file.iEof==0 );
- assert( pTask->nPMA==0 );
+ if( pSorter->nInMemory==0 ){
+ assert( pSorter->pRecord==0 );
+ return rc;
}
- /* Try to get the file to memory map */
- if( rc==SQLITE_OK ){
- vdbeSorterExtendFile(db, pTask->file.pFd, pTask->file.iEof+pList->szPMA+9);
- }
+ rc = vdbeSorterSort(pCsr);
- /* Sort the list */
- if( rc==SQLITE_OK ){
- rc = vdbeSorterSort(pTask, pList);
+ /* If the first temporary PMA file has not been opened, open it now. */
+ if( rc==SQLITE_OK && pSorter->pTemp1==0 ){
+ rc = vdbeSorterOpenTempFile(db, &pSorter->pTemp1);
+ assert( rc!=SQLITE_OK || pSorter->pTemp1 );
+ assert( pSorter->iWriteOff==0 );
+ assert( pSorter->nPMA==0 );
}
if( rc==SQLITE_OK ){
SorterRecord *p;
SorterRecord *pNext = 0;
- vdbePmaWriterInit(pTask->file.pFd, &writer, pTask->pSorter->pgsz,
- pTask->file.iEof);
- pTask->nPMA++;
- vdbePmaWriteVarint(&writer, pList->szPMA);
- for(p=pList->pList; p; p=pNext){
- pNext = p->u.pNext;
- vdbePmaWriteVarint(&writer, p->nVal);
- vdbePmaWriteBlob(&writer, SRVAL(p), p->nVal);
- if( pList->aMemory==0 ) sqlite3_free(p);
+ fileWriterInit(db, pSorter->pTemp1, &writer, pSorter->iWriteOff);
+ pSorter->nPMA++;
+ fileWriterWriteVarint(&writer, pSorter->nInMemory);
+ for(p=pSorter->pRecord; p; p=pNext){
+ pNext = p->pNext;
+ fileWriterWriteVarint(&writer, p->nVal);
+ fileWriterWrite(&writer, p->pVal, p->nVal);
+ sqlite3DbFree(db, p);
}
- pList->pList = p;
- rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof);
+ pSorter->pRecord = p;
+ rc = fileWriterFinish(db, &writer, &pSorter->iWriteOff);
}
- vdbeSorterWorkDebug(pTask, "exit");
- assert( rc!=SQLITE_OK || pList->pList==0 );
- assert( rc!=SQLITE_OK || pTask->file.iEof==iSz );
return rc;
}
/*
-** Advance the MergeEngine to its next entry.
-** Set *pbEof to true there is no next entry because
-** the MergeEngine has reached the end of all its inputs.
-**
-** Return SQLITE_OK if successful or an error code if an error occurs.
-*/
-static int vdbeMergeEngineStep(
- MergeEngine *pMerger, /* The merge engine to advance to the next row */
- int *pbEof /* Set TRUE at EOF. Set false for more content */
-){
- int rc;
- int iPrev = pMerger->aTree[1];/* Index of PmaReader to advance */
- SortSubtask *pTask = pMerger->pTask;
-
- /* Advance the current PmaReader */
- rc = vdbePmaReaderNext(&pMerger->aReadr[iPrev]);
-
- /* Update contents of aTree[] */
- if( rc==SQLITE_OK ){
- int i; /* Index of aTree[] to recalculate */
- PmaReader *pReadr1; /* First PmaReader to compare */
- PmaReader *pReadr2; /* Second PmaReader to compare */
- int bCached = 0;
-
- /* Find the first two PmaReaders to compare. The one that was just
- ** advanced (iPrev) and the one next to it in the array. */
- pReadr1 = &pMerger->aReadr[(iPrev & 0xFFFE)];
- pReadr2 = &pMerger->aReadr[(iPrev | 0x0001)];
-
- for(i=(pMerger->nTree+iPrev)/2; i>0; i=i/2){
- /* Compare pReadr1 and pReadr2. Store the result in variable iRes. */
- int iRes;
- if( pReadr1->pFd==0 ){
- iRes = +1;
- }else if( pReadr2->pFd==0 ){
- iRes = -1;
- }else{
- iRes = pTask->xCompare(pTask, &bCached,
- pReadr1->aKey, pReadr1->nKey, pReadr2->aKey, pReadr2->nKey
- );
- }
-
- /* If pReadr1 contained the smaller value, set aTree[i] to its index.
- ** Then set pReadr2 to the next PmaReader to compare to pReadr1. In this
- ** case there is no cache of pReadr2 in pTask->pUnpacked, so set
- ** pKey2 to point to the record belonging to pReadr2.
- **
- ** Alternatively, if pReadr2 contains the smaller of the two values,
- ** set aTree[i] to its index and update pReadr1. If vdbeSorterCompare()
- ** was actually called above, then pTask->pUnpacked now contains
- ** a value equivalent to pReadr2. So set pKey2 to NULL to prevent
- ** vdbeSorterCompare() from decoding pReadr2 again.
- **
- ** If the two values were equal, then the value from the oldest
- ** PMA should be considered smaller. The VdbeSorter.aReadr[] array
- ** is sorted from oldest to newest, so pReadr1 contains older values
- ** than pReadr2 iff (pReadr1<pReadr2). */
- if( iRes<0 || (iRes==0 && pReadr1<pReadr2) ){
- pMerger->aTree[i] = (int)(pReadr1 - pMerger->aReadr);
- pReadr2 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ];
- bCached = 0;
- }else{
- if( pReadr1->pFd ) bCached = 0;
- pMerger->aTree[i] = (int)(pReadr2 - pMerger->aReadr);
- pReadr1 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ];
- }
- }
- *pbEof = (pMerger->aReadr[pMerger->aTree[1]].pFd==0);
- }
-
- return (rc==SQLITE_OK ? pTask->pUnpacked->errCode : rc);
-}
-
-#if SQLITE_MAX_WORKER_THREADS>0
-/*
-** The main routine for background threads that write level-0 PMAs.
-*/
-static void *vdbeSorterFlushThread(void *pCtx){
- SortSubtask *pTask = (SortSubtask*)pCtx;
- int rc; /* Return code */
- assert( pTask->bDone==0 );
- rc = vdbeSorterListToPMA(pTask, &pTask->list);
- pTask->bDone = 1;
- return SQLITE_INT_TO_PTR(rc);
-}
-#endif /* SQLITE_MAX_WORKER_THREADS>0 */
-
-/*
-** Flush the current contents of VdbeSorter.list to a new PMA, possibly
-** using a background thread.
-*/
-static int vdbeSorterFlushPMA(VdbeSorter *pSorter){
-#if SQLITE_MAX_WORKER_THREADS==0
- pSorter->bUsePMA = 1;
- return vdbeSorterListToPMA(&pSorter->aTask[0], &pSorter->list);
-#else
- int rc = SQLITE_OK;
- int i;
- SortSubtask *pTask = 0; /* Thread context used to create new PMA */
- int nWorker = (pSorter->nTask-1);
-
- /* Set the flag to indicate that at least one PMA has been written.
- ** Or will be, anyhow. */
- pSorter->bUsePMA = 1;
-
- /* Select a sub-task to sort and flush the current list of in-memory
- ** records to disk. If the sorter is running in multi-threaded mode,
- ** round-robin between the first (pSorter->nTask-1) tasks. Except, if
- ** the background thread from a sub-tasks previous turn is still running,
- ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy,
- ** fall back to using the final sub-task. The first (pSorter->nTask-1)
- ** sub-tasks are prefered as they use background threads - the final
- ** sub-task uses the main thread. */
- for(i=0; i<nWorker; i++){
- int iTest = (pSorter->iPrev + i + 1) % nWorker;
- pTask = &pSorter->aTask[iTest];
- if( pTask->bDone ){
- rc = vdbeSorterJoinThread(pTask);
- }
- if( rc!=SQLITE_OK || pTask->pThread==0 ) break;
- }
-
- if( rc==SQLITE_OK ){
- if( i==nWorker ){
- /* Use the foreground thread for this operation */
- rc = vdbeSorterListToPMA(&pSorter->aTask[nWorker], &pSorter->list);
- }else{
- /* Launch a background thread for this operation */
- u8 *aMem = pTask->list.aMemory;
- void *pCtx = (void*)pTask;
-
- assert( pTask->pThread==0 && pTask->bDone==0 );
- assert( pTask->list.pList==0 );
- assert( pTask->list.aMemory==0 || pSorter->list.aMemory!=0 );
-
- pSorter->iPrev = (u8)(pTask - pSorter->aTask);
- pTask->list = pSorter->list;
- pSorter->list.pList = 0;
- pSorter->list.szPMA = 0;
- if( aMem ){
- pSorter->list.aMemory = aMem;
- pSorter->nMemory = sqlite3MallocSize(aMem);
- }else if( pSorter->list.aMemory ){
- pSorter->list.aMemory = sqlite3Malloc(pSorter->nMemory);
- if( !pSorter->list.aMemory ) return SQLITE_NOMEM;
- }
-
- rc = vdbeSorterCreateThread(pTask, vdbeSorterFlushThread, pCtx);
- }
- }
-
- return rc;
-#endif /* SQLITE_MAX_WORKER_THREADS!=0 */
-}
-
-/*
** Add a record to the sorter.
*/
SQLITE_PRIVATE int sqlite3VdbeSorterWrite(
- const VdbeCursor *pCsr, /* Sorter cursor */
+ sqlite3 *db, /* Database handle */
+ const VdbeCursor *pCsr, /* Sorter cursor */
Mem *pVal /* Memory cell containing record */
){
VdbeSorter *pSorter = pCsr->pSorter;
int rc = SQLITE_OK; /* Return Code */
SorterRecord *pNew; /* New list element */
- int bFlush; /* True to flush contents of memory to PMA */
- int nReq; /* Bytes of memory required */
- int nPMA; /* Bytes of PMA space required */
- int t; /* serial type of first record field */
+ assert( pSorter );
+ pSorter->nInMemory += sqlite3VarintLen(pVal->n) + pVal->n;
- getVarint32((const u8*)&pVal->z[1], t);
- if( t>0 && t<10 && t!=7 ){
- pSorter->typeMask &= SORTER_TYPE_INTEGER;
- }else if( t>10 && (t & 0x01) ){
- pSorter->typeMask &= SORTER_TYPE_TEXT;
+ pNew = (SorterRecord *)sqlite3DbMallocRaw(db, pVal->n + sizeof(SorterRecord));
+ if( pNew==0 ){
+ rc = SQLITE_NOMEM;
}else{
- pSorter->typeMask = 0;
+ pNew->pVal = (void *)&pNew[1];
+ memcpy(pNew->pVal, pVal->z, pVal->n);
+ pNew->nVal = pVal->n;
+ pNew->pNext = pSorter->pRecord;
+ pSorter->pRecord = pNew;
}
- assert( pSorter );
-
- /* Figure out whether or not the current contents of memory should be
- ** flushed to a PMA before continuing. If so, do so.
- **
- ** If using the single large allocation mode (pSorter->aMemory!=0), then
- ** flush the contents of memory to a new PMA if (a) at least one value is
- ** already in memory and (b) the new value will not fit in memory.
- **
- ** Or, if using separate allocations for each record, flush the contents
- ** of memory to a PMA if either of the following are true:
+ /* See if the contents of the sorter should now be written out. They
+ ** are written out when either of the following are true:
**
** * The total memory allocated for the in-memory list is greater
** than (page-size * cache-size), or
@@ -81866,809 +74238,161 @@ SQLITE_PRIVATE int sqlite3VdbeSorterWrite(
** * The total memory allocated for the in-memory list is greater
** than (page-size * 10) and sqlite3HeapNearlyFull() returns true.
*/
- nReq = pVal->n + sizeof(SorterRecord);
- nPMA = pVal->n + sqlite3VarintLen(pVal->n);
- if( pSorter->mxPmaSize ){
- if( pSorter->list.aMemory ){
- bFlush = pSorter->iMemory && (pSorter->iMemory+nReq) > pSorter->mxPmaSize;
- }else{
- bFlush = (
- (pSorter->list.szPMA > pSorter->mxPmaSize)
- || (pSorter->list.szPMA > pSorter->mnPmaSize && sqlite3HeapNearlyFull())
- );
- }
- if( bFlush ){
- rc = vdbeSorterFlushPMA(pSorter);
- pSorter->list.szPMA = 0;
- pSorter->iMemory = 0;
- assert( rc!=SQLITE_OK || pSorter->list.pList==0 );
- }
- }
-
- pSorter->list.szPMA += nPMA;
- if( nPMA>pSorter->mxKeysize ){
- pSorter->mxKeysize = nPMA;
- }
-
- if( pSorter->list.aMemory ){
- int nMin = pSorter->iMemory + nReq;
-
- if( nMin>pSorter->nMemory ){
- u8 *aNew;
- int nNew = pSorter->nMemory * 2;
- while( nNew < nMin ) nNew = nNew*2;
- if( nNew > pSorter->mxPmaSize ) nNew = pSorter->mxPmaSize;
- if( nNew < nMin ) nNew = nMin;
-
- aNew = sqlite3Realloc(pSorter->list.aMemory, nNew);
- if( !aNew ) return SQLITE_NOMEM;
- pSorter->list.pList = (SorterRecord*)(
- aNew + ((u8*)pSorter->list.pList - pSorter->list.aMemory)
- );
- pSorter->list.aMemory = aNew;
- pSorter->nMemory = nNew;
- }
-
- pNew = (SorterRecord*)&pSorter->list.aMemory[pSorter->iMemory];
- pSorter->iMemory += ROUND8(nReq);
- pNew->u.iNext = (int)((u8*)(pSorter->list.pList) - pSorter->list.aMemory);
- }else{
- pNew = (SorterRecord *)sqlite3Malloc(nReq);
- if( pNew==0 ){
- return SQLITE_NOMEM;
- }
- pNew->u.pNext = pSorter->list.pList;
- }
-
- memcpy(SRVAL(pNew), pVal->z, pVal->n);
- pNew->nVal = pVal->n;
- pSorter->list.pList = pNew;
-
- return rc;
-}
-
-/*
-** Read keys from pIncr->pMerger and populate pIncr->aFile[1]. The format
-** of the data stored in aFile[1] is the same as that used by regular PMAs,
-** except that the number-of-bytes varint is omitted from the start.
-*/
-static int vdbeIncrPopulate(IncrMerger *pIncr){
- int rc = SQLITE_OK;
- int rc2;
- i64 iStart = pIncr->iStartOff;
- SorterFile *pOut = &pIncr->aFile[1];
- SortSubtask *pTask = pIncr->pTask;
- MergeEngine *pMerger = pIncr->pMerger;
- PmaWriter writer;
- assert( pIncr->bEof==0 );
-
- vdbeSorterPopulateDebug(pTask, "enter");
-
- vdbePmaWriterInit(pOut->pFd, &writer, pTask->pSorter->pgsz, iStart);
- while( rc==SQLITE_OK ){
- int dummy;
- PmaReader *pReader = &pMerger->aReadr[ pMerger->aTree[1] ];
- int nKey = pReader->nKey;
- i64 iEof = writer.iWriteOff + writer.iBufEnd;
-
- /* Check if the output file is full or if the input has been exhausted.
- ** In either case exit the loop. */
- if( pReader->pFd==0 ) break;
- if( (iEof + nKey + sqlite3VarintLen(nKey))>(iStart + pIncr->mxSz) ) break;
-
- /* Write the next key to the output. */
- vdbePmaWriteVarint(&writer, nKey);
- vdbePmaWriteBlob(&writer, pReader->aKey, nKey);
- assert( pIncr->pMerger->pTask==pTask );
- rc = vdbeMergeEngineStep(pIncr->pMerger, &dummy);
- }
-
- rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof);
- if( rc==SQLITE_OK ) rc = rc2;
- vdbeSorterPopulateDebug(pTask, "exit");
- return rc;
-}
-
-#if SQLITE_MAX_WORKER_THREADS>0
-/*
-** The main routine for background threads that populate aFile[1] of
-** multi-threaded IncrMerger objects.
-*/
-static void *vdbeIncrPopulateThread(void *pCtx){
- IncrMerger *pIncr = (IncrMerger*)pCtx;
- void *pRet = SQLITE_INT_TO_PTR( vdbeIncrPopulate(pIncr) );
- pIncr->pTask->bDone = 1;
- return pRet;
-}
-
-/*
-** Launch a background thread to populate aFile[1] of pIncr.
-*/
-static int vdbeIncrBgPopulate(IncrMerger *pIncr){
- void *p = (void*)pIncr;
- assert( pIncr->bUseThread );
- return vdbeSorterCreateThread(pIncr->pTask, vdbeIncrPopulateThread, p);
-}
-#endif
-
-/*
-** This function is called when the PmaReader corresponding to pIncr has
-** finished reading the contents of aFile[0]. Its purpose is to "refill"
-** aFile[0] such that the PmaReader should start rereading it from the
-** beginning.
-**
-** For single-threaded objects, this is accomplished by literally reading
-** keys from pIncr->pMerger and repopulating aFile[0].
-**
-** For multi-threaded objects, all that is required is to wait until the
-** background thread is finished (if it is not already) and then swap
-** aFile[0] and aFile[1] in place. If the contents of pMerger have not
-** been exhausted, this function also launches a new background thread
-** to populate the new aFile[1].
-**
-** SQLITE_OK is returned on success, or an SQLite error code otherwise.
-*/
-static int vdbeIncrSwap(IncrMerger *pIncr){
- int rc = SQLITE_OK;
-
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pIncr->bUseThread ){
- rc = vdbeSorterJoinThread(pIncr->pTask);
-
- if( rc==SQLITE_OK ){
- SorterFile f0 = pIncr->aFile[0];
- pIncr->aFile[0] = pIncr->aFile[1];
- pIncr->aFile[1] = f0;
- }
-
- if( rc==SQLITE_OK ){
- if( pIncr->aFile[0].iEof==pIncr->iStartOff ){
- pIncr->bEof = 1;
- }else{
- rc = vdbeIncrBgPopulate(pIncr);
- }
- }
- }else
+ if( rc==SQLITE_OK && pSorter->mxPmaSize>0 && (
+ (pSorter->nInMemory>pSorter->mxPmaSize)
+ || (pSorter->nInMemory>pSorter->mnPmaSize && sqlite3HeapNearlyFull())
+ )){
+#ifdef SQLITE_DEBUG
+ i64 nExpect = pSorter->iWriteOff
+ + sqlite3VarintLen(pSorter->nInMemory)
+ + pSorter->nInMemory;
#endif
- {
- rc = vdbeIncrPopulate(pIncr);
- pIncr->aFile[0] = pIncr->aFile[1];
- if( pIncr->aFile[0].iEof==pIncr->iStartOff ){
- pIncr->bEof = 1;
- }
+ rc = vdbeSorterListToPMA(db, pCsr);
+ pSorter->nInMemory = 0;
+ assert( rc!=SQLITE_OK || (nExpect==pSorter->iWriteOff) );
}
return rc;
}
/*
-** Allocate and return a new IncrMerger object to read data from pMerger.
-**
-** If an OOM condition is encountered, return NULL. In this case free the
-** pMerger argument before returning.
+** Helper function for sqlite3VdbeSorterRewind().
*/
-static int vdbeIncrMergerNew(
- SortSubtask *pTask, /* The thread that will be using the new IncrMerger */
- MergeEngine *pMerger, /* The MergeEngine that the IncrMerger will control */
- IncrMerger **ppOut /* Write the new IncrMerger here */
-){
- int rc = SQLITE_OK;
- IncrMerger *pIncr = *ppOut = (IncrMerger*)
- (sqlite3FaultSim(100) ? 0 : sqlite3MallocZero(sizeof(*pIncr)));
- if( pIncr ){
- pIncr->pMerger = pMerger;
- pIncr->pTask = pTask;
- pIncr->mxSz = MAX(pTask->pSorter->mxKeysize+9,pTask->pSorter->mxPmaSize/2);
- pTask->file2.iEof += pIncr->mxSz;
- }else{
- vdbeMergeEngineFree(pMerger);
- rc = SQLITE_NOMEM;
- }
- return rc;
-}
-
-#if SQLITE_MAX_WORKER_THREADS>0
-/*
-** Set the "use-threads" flag on object pIncr.
-*/
-static void vdbeIncrMergerSetThreads(IncrMerger *pIncr){
- pIncr->bUseThread = 1;
- pIncr->pTask->file2.iEof -= pIncr->mxSz;
-}
-#endif /* SQLITE_MAX_WORKER_THREADS>0 */
-
-
-
-/*
-** Recompute pMerger->aTree[iOut] by comparing the next keys on the
-** two PmaReaders that feed that entry. Neither of the PmaReaders
-** are advanced. This routine merely does the comparison.
-*/
-static void vdbeMergeEngineCompare(
- MergeEngine *pMerger, /* Merge engine containing PmaReaders to compare */
- int iOut /* Store the result in pMerger->aTree[iOut] */
-){
- int i1;
- int i2;
- int iRes;
- PmaReader *p1;
- PmaReader *p2;
-
- assert( iOut<pMerger->nTree && iOut>0 );
-
- if( iOut>=(pMerger->nTree/2) ){
- i1 = (iOut - pMerger->nTree/2) * 2;
- i2 = i1 + 1;
- }else{
- i1 = pMerger->aTree[iOut*2];
- i2 = pMerger->aTree[iOut*2+1];
- }
-
- p1 = &pMerger->aReadr[i1];
- p2 = &pMerger->aReadr[i2];
-
- if( p1->pFd==0 ){
- iRes = i2;
- }else if( p2->pFd==0 ){
- iRes = i1;
- }else{
- SortSubtask *pTask = pMerger->pTask;
- int bCached = 0;
- int res;
- assert( pTask->pUnpacked!=0 ); /* from vdbeSortSubtaskMain() */
- res = pTask->xCompare(
- pTask, &bCached, p1->aKey, p1->nKey, p2->aKey, p2->nKey
- );
- if( res<=0 ){
- iRes = i1;
- }else{
- iRes = i2;
- }
- }
-
- pMerger->aTree[iOut] = iRes;
-}
-
-/*
-** Allowed values for the eMode parameter to vdbeMergeEngineInit()
-** and vdbePmaReaderIncrMergeInit().
-**
-** Only INCRINIT_NORMAL is valid in single-threaded builds (when
-** SQLITE_MAX_WORKER_THREADS==0). The other values are only used
-** when there exists one or more separate worker threads.
-*/
-#define INCRINIT_NORMAL 0
-#define INCRINIT_TASK 1
-#define INCRINIT_ROOT 2
-
-/*
-** Forward reference required as the vdbeIncrMergeInit() and
-** vdbePmaReaderIncrInit() routines are called mutually recursively when
-** building a merge tree.
-*/
-static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode);
-
-/*
-** Initialize the MergeEngine object passed as the second argument. Once this
-** function returns, the first key of merged data may be read from the
-** MergeEngine object in the usual fashion.
-**
-** If argument eMode is INCRINIT_ROOT, then it is assumed that any IncrMerge
-** objects attached to the PmaReader objects that the merger reads from have
-** already been populated, but that they have not yet populated aFile[0] and
-** set the PmaReader objects up to read from it. In this case all that is
-** required is to call vdbePmaReaderNext() on each PmaReader to point it at
-** its first key.
-**
-** Otherwise, if eMode is any value other than INCRINIT_ROOT, then use
-** vdbePmaReaderIncrMergeInit() to initialize each PmaReader that feeds data
-** to pMerger.
-**
-** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
-*/
-static int vdbeMergeEngineInit(
- SortSubtask *pTask, /* Thread that will run pMerger */
- MergeEngine *pMerger, /* MergeEngine to initialize */
- int eMode /* One of the INCRINIT_XXX constants */
+static int vdbeSorterInitMerge(
+ sqlite3 *db, /* Database handle */
+ const VdbeCursor *pCsr, /* Cursor handle for this sorter */
+ i64 *pnByte /* Sum of bytes in all opened PMAs */
){
+ VdbeSorter *pSorter = pCsr->pSorter;
int rc = SQLITE_OK; /* Return code */
- int i; /* For looping over PmaReader objects */
- int nTree = pMerger->nTree;
-
- /* eMode is always INCRINIT_NORMAL in single-threaded mode */
- assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL );
-
- /* Verify that the MergeEngine is assigned to a single thread */
- assert( pMerger->pTask==0 );
- pMerger->pTask = pTask;
-
- for(i=0; i<nTree; i++){
- if( SQLITE_MAX_WORKER_THREADS>0 && eMode==INCRINIT_ROOT ){
- /* PmaReaders should be normally initialized in order, as if they are
- ** reading from the same temp file this makes for more linear file IO.
- ** However, in the INCRINIT_ROOT case, if PmaReader aReadr[nTask-1] is
- ** in use it will block the vdbePmaReaderNext() call while it uses
- ** the main thread to fill its buffer. So calling PmaReaderNext()
- ** on this PmaReader before any of the multi-threaded PmaReaders takes
- ** better advantage of multi-processor hardware. */
- rc = vdbePmaReaderNext(&pMerger->aReadr[nTree-i-1]);
- }else{
- rc = vdbePmaReaderIncrInit(&pMerger->aReadr[i], INCRINIT_NORMAL);
- }
- if( rc!=SQLITE_OK ) return rc;
- }
+ int i; /* Used to iterator through aIter[] */
+ i64 nByte = 0; /* Total bytes in all opened PMAs */
- for(i=pMerger->nTree-1; i>0; i--){
- vdbeMergeEngineCompare(pMerger, i);
+ /* Initialize the iterators. */
+ for(i=0; i<SORTER_MAX_MERGE_COUNT; i++){
+ VdbeSorterIter *pIter = &pSorter->aIter[i];
+ rc = vdbeSorterIterInit(db, pSorter, pSorter->iReadOff, pIter, &nByte);
+ pSorter->iReadOff = pIter->iEof;
+ assert( rc!=SQLITE_OK || pSorter->iReadOff<=pSorter->iWriteOff );
+ if( rc!=SQLITE_OK || pSorter->iReadOff>=pSorter->iWriteOff ) break;
}
- return pTask->pUnpacked->errCode;
-}
-/*
-** The PmaReader passed as the first argument is guaranteed to be an
-** incremental-reader (pReadr->pIncr!=0). This function serves to open
-** and/or initialize the temp file related fields of the IncrMerge
-** object at (pReadr->pIncr).
-**
-** If argument eMode is set to INCRINIT_NORMAL, then all PmaReaders
-** in the sub-tree headed by pReadr are also initialized. Data is then
-** loaded into the buffers belonging to pReadr and it is set to point to
-** the first key in its range.
-**
-** If argument eMode is set to INCRINIT_TASK, then pReadr is guaranteed
-** to be a multi-threaded PmaReader and this function is being called in a
-** background thread. In this case all PmaReaders in the sub-tree are
-** initialized as for INCRINIT_NORMAL and the aFile[1] buffer belonging to
-** pReadr is populated. However, pReadr itself is not set up to point
-** to its first key. A call to vdbePmaReaderNext() is still required to do
-** that.
-**
-** The reason this function does not call vdbePmaReaderNext() immediately
-** in the INCRINIT_TASK case is that vdbePmaReaderNext() assumes that it has
-** to block on thread (pTask->thread) before accessing aFile[1]. But, since
-** this entire function is being run by thread (pTask->thread), that will
-** lead to the current background thread attempting to join itself.
-**
-** Finally, if argument eMode is set to INCRINIT_ROOT, it may be assumed
-** that pReadr->pIncr is a multi-threaded IncrMerge objects, and that all
-** child-trees have already been initialized using IncrInit(INCRINIT_TASK).
-** In this case vdbePmaReaderNext() is called on all child PmaReaders and
-** the current PmaReader set to point to the first key in its range.
-**
-** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
-*/
-static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){
- int rc = SQLITE_OK;
- IncrMerger *pIncr = pReadr->pIncr;
- SortSubtask *pTask = pIncr->pTask;
- sqlite3 *db = pTask->pSorter->db;
-
- /* eMode is always INCRINIT_NORMAL in single-threaded mode */
- assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL );
-
- rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode);
-
- /* Set up the required files for pIncr. A multi-theaded IncrMerge object
- ** requires two temp files to itself, whereas a single-threaded object
- ** only requires a region of pTask->file2. */
- if( rc==SQLITE_OK ){
- int mxSz = pIncr->mxSz;
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pIncr->bUseThread ){
- rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[0].pFd);
- if( rc==SQLITE_OK ){
- rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[1].pFd);
- }
- }else
-#endif
- /*if( !pIncr->bUseThread )*/{
- if( pTask->file2.pFd==0 ){
- assert( pTask->file2.iEof>0 );
- rc = vdbeSorterOpenTempFile(db, pTask->file2.iEof, &pTask->file2.pFd);
- pTask->file2.iEof = 0;
- }
- if( rc==SQLITE_OK ){
- pIncr->aFile[1].pFd = pTask->file2.pFd;
- pIncr->iStartOff = pTask->file2.iEof;
- pTask->file2.iEof += mxSz;
- }
- }
- }
-
-#if SQLITE_MAX_WORKER_THREADS>0
- if( rc==SQLITE_OK && pIncr->bUseThread ){
- /* Use the current thread to populate aFile[1], even though this
- ** PmaReader is multi-threaded. If this is an INCRINIT_TASK object,
- ** then this function is already running in background thread
- ** pIncr->pTask->thread.
- **
- ** If this is the INCRINIT_ROOT object, then it is running in the
- ** main VDBE thread. But that is Ok, as that thread cannot return
- ** control to the VDBE or proceed with anything useful until the
- ** first results are ready from this merger object anyway.
- */
- assert( eMode==INCRINIT_ROOT || eMode==INCRINIT_TASK );
- rc = vdbeIncrPopulate(pIncr);
- }
-#endif
-
- if( rc==SQLITE_OK && (SQLITE_MAX_WORKER_THREADS==0 || eMode!=INCRINIT_TASK) ){
- rc = vdbePmaReaderNext(pReadr);
+ /* Initialize the aTree[] array. */
+ for(i=pSorter->nTree-1; rc==SQLITE_OK && i>0; i--){
+ rc = vdbeSorterDoCompare(pCsr, i);
}
+ *pnByte = nByte;
return rc;
}
-#if SQLITE_MAX_WORKER_THREADS>0
/*
-** The main routine for vdbePmaReaderIncrMergeInit() operations run in
-** background threads.
+** Once the sorter has been populated, this function is called to prepare
+** for iterating through its contents in sorted order.
*/
-static void *vdbePmaReaderBgIncrInit(void *pCtx){
- PmaReader *pReader = (PmaReader*)pCtx;
- void *pRet = SQLITE_INT_TO_PTR(
- vdbePmaReaderIncrMergeInit(pReader,INCRINIT_TASK)
- );
- pReader->pIncr->pTask->bDone = 1;
- return pRet;
-}
-#endif
-
-/*
-** If the PmaReader passed as the first argument is not an incremental-reader
-** (if pReadr->pIncr==0), then this function is a no-op. Otherwise, it invokes
-** the vdbePmaReaderIncrMergeInit() function with the parameters passed to
-** this routine to initialize the incremental merge.
-**
-** If the IncrMerger object is multi-threaded (IncrMerger.bUseThread==1),
-** then a background thread is launched to call vdbePmaReaderIncrMergeInit().
-** Or, if the IncrMerger is single threaded, the same function is called
-** using the current thread.
-*/
-static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode){
- IncrMerger *pIncr = pReadr->pIncr; /* Incremental merger */
- int rc = SQLITE_OK; /* Return code */
- if( pIncr ){
-#if SQLITE_MAX_WORKER_THREADS>0
- assert( pIncr->bUseThread==0 || eMode==INCRINIT_TASK );
- if( pIncr->bUseThread ){
- void *pCtx = (void*)pReadr;
- rc = vdbeSorterCreateThread(pIncr->pTask, vdbePmaReaderBgIncrInit, pCtx);
- }else
-#endif
- {
- rc = vdbePmaReaderIncrMergeInit(pReadr, eMode);
- }
- }
- return rc;
-}
-
-/*
-** Allocate a new MergeEngine object to merge the contents of nPMA level-0
-** PMAs from pTask->file. If no error occurs, set *ppOut to point to
-** the new object and return SQLITE_OK. Or, if an error does occur, set *ppOut
-** to NULL and return an SQLite error code.
-**
-** When this function is called, *piOffset is set to the offset of the
-** first PMA to read from pTask->file. Assuming no error occurs, it is
-** set to the offset immediately following the last byte of the last
-** PMA before returning. If an error does occur, then the final value of
-** *piOffset is undefined.
-*/
-static int vdbeMergeEngineLevel0(
- SortSubtask *pTask, /* Sorter task to read from */
- int nPMA, /* Number of PMAs to read */
- i64 *piOffset, /* IN/OUT: Readr offset in pTask->file */
- MergeEngine **ppOut /* OUT: New merge-engine */
-){
- MergeEngine *pNew; /* Merge engine to return */
- i64 iOff = *piOffset;
- int i;
- int rc = SQLITE_OK;
-
- *ppOut = pNew = vdbeMergeEngineNew(nPMA);
- if( pNew==0 ) rc = SQLITE_NOMEM;
+SQLITE_PRIVATE int sqlite3VdbeSorterRewind(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){
+ VdbeSorter *pSorter = pCsr->pSorter;
+ int rc; /* Return code */
+ sqlite3_file *pTemp2 = 0; /* Second temp file to use */
+ i64 iWrite2 = 0; /* Write offset for pTemp2 */
+ int nIter; /* Number of iterators used */
+ int nByte; /* Bytes of space required for aIter/aTree */
+ int N = 2; /* Power of 2 >= nIter */
- for(i=0; i<nPMA && rc==SQLITE_OK; i++){
- i64 nDummy;
- PmaReader *pReadr = &pNew->aReadr[i];
- rc = vdbePmaReaderInit(pTask, &pTask->file, iOff, pReadr, &nDummy);
- iOff = pReadr->iEof;
- }
+ assert( pSorter );
- if( rc!=SQLITE_OK ){
- vdbeMergeEngineFree(pNew);
- *ppOut = 0;
+ /* If no data has been written to disk, then do not do so now. Instead,
+ ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly
+ ** from the in-memory list. */
+ if( pSorter->nPMA==0 ){
+ *pbEof = !pSorter->pRecord;
+ assert( pSorter->aTree==0 );
+ return vdbeSorterSort(pCsr);
}
- *piOffset = iOff;
- return rc;
-}
-/*
-** Return the depth of a tree comprising nPMA PMAs, assuming a fanout of
-** SORTER_MAX_MERGE_COUNT. The returned value does not include leaf nodes.
-**
-** i.e.
-**
-** nPMA<=16 -> TreeDepth() == 0
-** nPMA<=256 -> TreeDepth() == 1
-** nPMA<=65536 -> TreeDepth() == 2
-*/
-static int vdbeSorterTreeDepth(int nPMA){
- int nDepth = 0;
- i64 nDiv = SORTER_MAX_MERGE_COUNT;
- while( nDiv < (i64)nPMA ){
- nDiv = nDiv * SORTER_MAX_MERGE_COUNT;
- nDepth++;
- }
- return nDepth;
-}
+ /* Write the current in-memory list to a PMA. */
+ rc = vdbeSorterListToPMA(db, pCsr);
+ if( rc!=SQLITE_OK ) return rc;
-/*
-** pRoot is the root of an incremental merge-tree with depth nDepth (according
-** to vdbeSorterTreeDepth()). pLeaf is the iSeq'th leaf to be added to the
-** tree, counting from zero. This function adds pLeaf to the tree.
-**
-** If successful, SQLITE_OK is returned. If an error occurs, an SQLite error
-** code is returned and pLeaf is freed.
-*/
-static int vdbeSorterAddToTree(
- SortSubtask *pTask, /* Task context */
- int nDepth, /* Depth of tree according to TreeDepth() */
- int iSeq, /* Sequence number of leaf within tree */
- MergeEngine *pRoot, /* Root of tree */
- MergeEngine *pLeaf /* Leaf to add to tree */
-){
- int rc = SQLITE_OK;
- int nDiv = 1;
- int i;
- MergeEngine *p = pRoot;
- IncrMerger *pIncr;
+ /* Allocate space for aIter[] and aTree[]. */
+ nIter = pSorter->nPMA;
+ if( nIter>SORTER_MAX_MERGE_COUNT ) nIter = SORTER_MAX_MERGE_COUNT;
+ assert( nIter>0 );
+ while( N<nIter ) N += N;
+ nByte = N * (sizeof(int) + sizeof(VdbeSorterIter));
+ pSorter->aIter = (VdbeSorterIter *)sqlite3DbMallocZero(db, nByte);
+ if( !pSorter->aIter ) return SQLITE_NOMEM;
+ pSorter->aTree = (int *)&pSorter->aIter[N];
+ pSorter->nTree = N;
- rc = vdbeIncrMergerNew(pTask, pLeaf, &pIncr);
+ do {
+ int iNew; /* Index of new, merged, PMA */
- for(i=1; i<nDepth; i++){
- nDiv = nDiv * SORTER_MAX_MERGE_COUNT;
- }
+ for(iNew=0;
+ rc==SQLITE_OK && iNew*SORTER_MAX_MERGE_COUNT<pSorter->nPMA;
+ iNew++
+ ){
+ int rc2; /* Return code from fileWriterFinish() */
+ FileWriter writer; /* Object used to write to disk */
+ i64 nWrite; /* Number of bytes in new PMA */
- for(i=1; i<nDepth && rc==SQLITE_OK; i++){
- int iIter = (iSeq / nDiv) % SORTER_MAX_MERGE_COUNT;
- PmaReader *pReadr = &p->aReadr[iIter];
+ memset(&writer, 0, sizeof(FileWriter));
- if( pReadr->pIncr==0 ){
- MergeEngine *pNew = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT);
- if( pNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- rc = vdbeIncrMergerNew(pTask, pNew, &pReadr->pIncr);
+ /* If there are SORTER_MAX_MERGE_COUNT or less PMAs in file pTemp1,
+ ** initialize an iterator for each of them and break out of the loop.
+ ** These iterators will be incrementally merged as the VDBE layer calls
+ ** sqlite3VdbeSorterNext().
+ **
+ ** Otherwise, if pTemp1 contains more than SORTER_MAX_MERGE_COUNT PMAs,
+ ** initialize interators for SORTER_MAX_MERGE_COUNT of them. These PMAs
+ ** are merged into a single PMA that is written to file pTemp2.
+ */
+ rc = vdbeSorterInitMerge(db, pCsr, &nWrite);
+ assert( rc!=SQLITE_OK || pSorter->aIter[ pSorter->aTree[1] ].pFile );
+ if( rc!=SQLITE_OK || pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){
+ break;
}
- }
- if( rc==SQLITE_OK ){
- p = pReadr->pIncr->pMerger;
- nDiv = nDiv / SORTER_MAX_MERGE_COUNT;
- }
- }
-
- if( rc==SQLITE_OK ){
- p->aReadr[iSeq % SORTER_MAX_MERGE_COUNT].pIncr = pIncr;
- }else{
- vdbeIncrFree(pIncr);
- }
- return rc;
-}
-/*
-** This function is called as part of a SorterRewind() operation on a sorter
-** that has already written two or more level-0 PMAs to one or more temp
-** files. It builds a tree of MergeEngine/IncrMerger/PmaReader objects that
-** can be used to incrementally merge all PMAs on disk.
-**
-** If successful, SQLITE_OK is returned and *ppOut set to point to the
-** MergeEngine object at the root of the tree before returning. Or, if an
-** error occurs, an SQLite error code is returned and the final value
-** of *ppOut is undefined.
-*/
-static int vdbeSorterMergeTreeBuild(
- VdbeSorter *pSorter, /* The VDBE cursor that implements the sort */
- MergeEngine **ppOut /* Write the MergeEngine here */
-){
- MergeEngine *pMain = 0;
- int rc = SQLITE_OK;
- int iTask;
-
-#if SQLITE_MAX_WORKER_THREADS>0
- /* If the sorter uses more than one task, then create the top-level
- ** MergeEngine here. This MergeEngine will read data from exactly
- ** one PmaReader per sub-task. */
- assert( pSorter->bUseThreads || pSorter->nTask==1 );
- if( pSorter->nTask>1 ){
- pMain = vdbeMergeEngineNew(pSorter->nTask);
- if( pMain==0 ) rc = SQLITE_NOMEM;
- }
-#endif
-
- for(iTask=0; rc==SQLITE_OK && iTask<pSorter->nTask; iTask++){
- SortSubtask *pTask = &pSorter->aTask[iTask];
- assert( pTask->nPMA>0 || SQLITE_MAX_WORKER_THREADS>0 );
- if( SQLITE_MAX_WORKER_THREADS==0 || pTask->nPMA ){
- MergeEngine *pRoot = 0; /* Root node of tree for this task */
- int nDepth = vdbeSorterTreeDepth(pTask->nPMA);
- i64 iReadOff = 0;
-
- if( pTask->nPMA<=SORTER_MAX_MERGE_COUNT ){
- rc = vdbeMergeEngineLevel0(pTask, pTask->nPMA, &iReadOff, &pRoot);
- }else{
- int i;
- int iSeq = 0;
- pRoot = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT);
- if( pRoot==0 ) rc = SQLITE_NOMEM;
- for(i=0; i<pTask->nPMA && rc==SQLITE_OK; i += SORTER_MAX_MERGE_COUNT){
- MergeEngine *pMerger = 0; /* New level-0 PMA merger */
- int nReader; /* Number of level-0 PMAs to merge */
-
- nReader = MIN(pTask->nPMA - i, SORTER_MAX_MERGE_COUNT);
- rc = vdbeMergeEngineLevel0(pTask, nReader, &iReadOff, &pMerger);
- if( rc==SQLITE_OK ){
- rc = vdbeSorterAddToTree(pTask, nDepth, iSeq++, pRoot, pMerger);
- }
- }
+ /* Open the second temp file, if it is not already open. */
+ if( pTemp2==0 ){
+ assert( iWrite2==0 );
+ rc = vdbeSorterOpenTempFile(db, &pTemp2);
}
if( rc==SQLITE_OK ){
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pMain!=0 ){
- rc = vdbeIncrMergerNew(pTask, pRoot, &pMain->aReadr[iTask].pIncr);
- }else
-#endif
- {
- assert( pMain==0 );
- pMain = pRoot;
- }
- }else{
- vdbeMergeEngineFree(pRoot);
- }
- }
- }
+ int bEof = 0;
+ fileWriterInit(db, pTemp2, &writer, iWrite2);
+ fileWriterWriteVarint(&writer, nWrite);
+ while( rc==SQLITE_OK && bEof==0 ){
+ VdbeSorterIter *pIter = &pSorter->aIter[ pSorter->aTree[1] ];
+ assert( pIter->pFile );
- if( rc!=SQLITE_OK ){
- vdbeMergeEngineFree(pMain);
- pMain = 0;
- }
- *ppOut = pMain;
- return rc;
-}
-
-/*
-** This function is called as part of an sqlite3VdbeSorterRewind() operation
-** on a sorter that has written two or more PMAs to temporary files. It sets
-** up either VdbeSorter.pMerger (for single threaded sorters) or pReader
-** (for multi-threaded sorters) so that it can be used to iterate through
-** all records stored in the sorter.
-**
-** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
-*/
-static int vdbeSorterSetupMerge(VdbeSorter *pSorter){
- int rc; /* Return code */
- SortSubtask *pTask0 = &pSorter->aTask[0];
- MergeEngine *pMain = 0;
-#if SQLITE_MAX_WORKER_THREADS
- sqlite3 *db = pTask0->pSorter->db;
- int i;
- SorterCompare xCompare = vdbeSorterGetCompare(pSorter);
- for(i=0; i<pSorter->nTask; i++){
- pSorter->aTask[i].xCompare = xCompare;
- }
-#endif
-
- rc = vdbeSorterMergeTreeBuild(pSorter, &pMain);
- if( rc==SQLITE_OK ){
-#if SQLITE_MAX_WORKER_THREADS
- assert( pSorter->bUseThreads==0 || pSorter->nTask>1 );
- if( pSorter->bUseThreads ){
- int iTask;
- PmaReader *pReadr = 0;
- SortSubtask *pLast = &pSorter->aTask[pSorter->nTask-1];
- rc = vdbeSortAllocUnpacked(pLast);
- if( rc==SQLITE_OK ){
- pReadr = (PmaReader*)sqlite3DbMallocZero(db, sizeof(PmaReader));
- pSorter->pReader = pReadr;
- if( pReadr==0 ) rc = SQLITE_NOMEM;
- }
- if( rc==SQLITE_OK ){
- rc = vdbeIncrMergerNew(pLast, pMain, &pReadr->pIncr);
- if( rc==SQLITE_OK ){
- vdbeIncrMergerSetThreads(pReadr->pIncr);
- for(iTask=0; iTask<(pSorter->nTask-1); iTask++){
- IncrMerger *pIncr;
- if( (pIncr = pMain->aReadr[iTask].pIncr) ){
- vdbeIncrMergerSetThreads(pIncr);
- assert( pIncr->pTask!=pLast );
- }
- }
- for(iTask=0; rc==SQLITE_OK && iTask<pSorter->nTask; iTask++){
- /* Check that:
- **
- ** a) The incremental merge object is configured to use the
- ** right task, and
- ** b) If it is using task (nTask-1), it is configured to run
- ** in single-threaded mode. This is important, as the
- ** root merge (INCRINIT_ROOT) will be using the same task
- ** object.
- */
- PmaReader *p = &pMain->aReadr[iTask];
- assert( p->pIncr==0 || (
- (p->pIncr->pTask==&pSorter->aTask[iTask]) /* a */
- && (iTask!=pSorter->nTask-1 || p->pIncr->bUseThread==0) /* b */
- ));
- rc = vdbePmaReaderIncrInit(p, INCRINIT_TASK);
- }
+ fileWriterWriteVarint(&writer, pIter->nKey);
+ fileWriterWrite(&writer, pIter->aKey, pIter->nKey);
+ rc = sqlite3VdbeSorterNext(db, pCsr, &bEof);
}
- pMain = 0;
- }
- if( rc==SQLITE_OK ){
- rc = vdbePmaReaderIncrMergeInit(pReadr, INCRINIT_ROOT);
+ rc2 = fileWriterFinish(db, &writer, &iWrite2);
+ if( rc==SQLITE_OK ) rc = rc2;
}
- }else
-#endif
- {
- rc = vdbeMergeEngineInit(pTask0, pMain, INCRINIT_NORMAL);
- pSorter->pMerger = pMain;
- pMain = 0;
}
- }
-
- if( rc!=SQLITE_OK ){
- vdbeMergeEngineFree(pMain);
- }
- return rc;
-}
-
-
-/*
-** Once the sorter has been populated by calls to sqlite3VdbeSorterWrite,
-** this function is called to prepare for iterating through the records
-** in sorted order.
-*/
-SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *pCsr, int *pbEof){
- VdbeSorter *pSorter = pCsr->pSorter;
- int rc = SQLITE_OK; /* Return code */
-
- assert( pSorter );
- /* If no data has been written to disk, then do not do so now. Instead,
- ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly
- ** from the in-memory list. */
- if( pSorter->bUsePMA==0 ){
- if( pSorter->list.pList ){
- *pbEof = 0;
- rc = vdbeSorterSort(&pSorter->aTask[0], &pSorter->list);
+ if( pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){
+ break;
}else{
- *pbEof = 1;
+ sqlite3_file *pTmp = pSorter->pTemp1;
+ pSorter->nPMA = iNew;
+ pSorter->pTemp1 = pTemp2;
+ pTemp2 = pTmp;
+ pSorter->iWriteOff = iWrite2;
+ pSorter->iReadOff = 0;
+ iWrite2 = 0;
}
- return rc;
- }
-
- /* Write the current in-memory list to a PMA. When the VdbeSorterWrite()
- ** function flushes the contents of memory to disk, it immediately always
- ** creates a new list consisting of a single key immediately afterwards.
- ** So the list is never empty at this point. */
- assert( pSorter->list.pList );
- rc = vdbeSorterFlushPMA(pSorter);
-
- /* Join all threads */
- rc = vdbeSorterJoinAll(pSorter, rc);
-
- vdbeSorterRewindDebug("rewind");
+ }while( rc==SQLITE_OK );
- /* Assuming no errors have occurred, set up a merger structure to
- ** incrementally read and merge all remaining PMAs. */
- assert( pSorter->pReader==0 );
- if( rc==SQLITE_OK ){
- rc = vdbeSorterSetupMerge(pSorter);
- *pbEof = 0;
+ if( pTemp2 ){
+ sqlite3OsCloseFree(pTemp2);
}
-
- vdbeSorterRewindDebug("rewinddone");
+ *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0);
return rc;
}
@@ -82679,28 +74403,22 @@ SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *db, const VdbeCursor *pCsr, in
VdbeSorter *pSorter = pCsr->pSorter;
int rc; /* Return code */
- assert( pSorter->bUsePMA || (pSorter->pReader==0 && pSorter->pMerger==0) );
- if( pSorter->bUsePMA ){
- assert( pSorter->pReader==0 || pSorter->pMerger==0 );
- assert( pSorter->bUseThreads==0 || pSorter->pReader );
- assert( pSorter->bUseThreads==1 || pSorter->pMerger );
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pSorter->bUseThreads ){
- rc = vdbePmaReaderNext(pSorter->pReader);
- *pbEof = (pSorter->pReader->pFd==0);
- }else
-#endif
- /*if( !pSorter->bUseThreads )*/ {
- assert( pSorter->pMerger!=0 );
- assert( pSorter->pMerger->pTask==(&pSorter->aTask[0]) );
- rc = vdbeMergeEngineStep(pSorter->pMerger, pbEof);
+ if( pSorter->aTree ){
+ int iPrev = pSorter->aTree[1];/* Index of iterator to advance */
+ int i; /* Index of aTree[] to recalculate */
+
+ rc = vdbeSorterIterNext(db, &pSorter->aIter[iPrev]);
+ for(i=(pSorter->nTree+iPrev)/2; rc==SQLITE_OK && i>0; i=i/2){
+ rc = vdbeSorterDoCompare(pCsr, i);
}
+
+ *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0);
}else{
- SorterRecord *pFree = pSorter->list.pList;
- pSorter->list.pList = pFree->u.pNext;
- pFree->u.pNext = 0;
- if( pSorter->list.aMemory==0 ) vdbeSorterRecordFree(db, pFree);
- *pbEof = !pSorter->list.pList;
+ SorterRecord *pFree = pSorter->pRecord;
+ pSorter->pRecord = pFree->pNext;
+ pFree->pNext = 0;
+ vdbeSorterRecordFree(db, pFree);
+ *pbEof = !pSorter->pRecord;
rc = SQLITE_OK;
}
return rc;
@@ -82715,21 +74433,14 @@ static void *vdbeSorterRowkey(
int *pnKey /* OUT: Size of current key in bytes */
){
void *pKey;
- if( pSorter->bUsePMA ){
- PmaReader *pReader;
-#if SQLITE_MAX_WORKER_THREADS>0
- if( pSorter->bUseThreads ){
- pReader = pSorter->pReader;
- }else
-#endif
- /*if( !pSorter->bUseThreads )*/{
- pReader = &pSorter->pMerger->aReadr[pSorter->pMerger->aTree[1]];
- }
- *pnKey = pReader->nKey;
- pKey = pReader->aKey;
+ if( pSorter->aTree ){
+ VdbeSorterIter *pIter;
+ pIter = &pSorter->aIter[ pSorter->aTree[1] ];
+ *pnKey = pIter->nKey;
+ pKey = pIter->aKey;
}else{
- *pnKey = pSorter->list.pList->nVal;
- pKey = SRVAL(pSorter->list.pList);
+ *pnKey = pSorter->pRecord->nVal;
+ pKey = pSorter->pRecord->pVal;
}
return pKey;
}
@@ -82742,7 +74453,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){
void *pKey; int nKey; /* Sorter key to copy into pOut */
pKey = vdbeSorterRowkey(pSorter, &nKey);
- if( sqlite3VdbeMemClearAndResize(pOut, nKey) ){
+ if( sqlite3VdbeMemGrow(pOut, nKey, 0) ){
return SQLITE_NOMEM;
}
pOut->n = nKey;
@@ -82757,48 +74468,22 @@ SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){
** passed as the first argument currently points to. For the purposes of
** the comparison, ignore the rowid field at the end of each record.
**
-** If the sorter cursor key contains any NULL values, consider it to be
-** less than pVal. Even if pVal also contains NULL values.
-**
** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM).
** Otherwise, set *pRes to a negative, zero or positive value if the
** key in pVal is smaller than, equal to or larger than the current sorter
** key.
-**
-** This routine forms the core of the OP_SorterCompare opcode, which in
-** turn is used to verify uniqueness when constructing a UNIQUE INDEX.
*/
SQLITE_PRIVATE int sqlite3VdbeSorterCompare(
const VdbeCursor *pCsr, /* Sorter cursor */
Mem *pVal, /* Value to compare to current sorter key */
- int nKeyCol, /* Compare this many columns */
+ int nIgnore, /* Ignore this many fields at the end */
int *pRes /* OUT: Result of comparison */
){
VdbeSorter *pSorter = pCsr->pSorter;
- UnpackedRecord *r2 = pSorter->pUnpacked;
- KeyInfo *pKeyInfo = pCsr->pKeyInfo;
- int i;
void *pKey; int nKey; /* Sorter key to compare pVal with */
- if( r2==0 ){
- char *p;
- r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo,0,0,&p);
- assert( pSorter->pUnpacked==(UnpackedRecord*)p );
- if( r2==0 ) return SQLITE_NOMEM;
- r2->nField = nKeyCol;
- }
- assert( r2->nField==nKeyCol );
-
pKey = vdbeSorterRowkey(pSorter, &nKey);
- sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, r2);
- for(i=0; i<nKeyCol; i++){
- if( r2->aMem[i].flags & MEM_Null ){
- *pRes = -1;
- return SQLITE_OK;
- }
- }
-
- *pRes = sqlite3VdbeRecordCompare(pVal->n, pVal->z, r2);
+ vdbeSorterCompare(pCsr, nIgnore, pVal->z, pVal->n, pKey, nKey, pRes);
return SQLITE_OK;
}
@@ -82831,7 +74516,6 @@ SQLITE_PRIVATE int sqlite3VdbeSorterCompare(
** 2) The sqlite3JournalCreate() function is called.
*/
#ifdef SQLITE_ENABLE_ATOMIC_WRITE
-/* #include "sqliteInt.h" */
/*
@@ -83079,7 +74763,6 @@ SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){
** The in-memory rollback journal is used to journal transactions for
** ":memory:" databases and when the journal_mode=MEMORY pragma is used.
*/
-/* #include "sqliteInt.h" */
/* Forward references to internal structures */
typedef struct MemJournal MemJournal;
@@ -83091,7 +74774,7 @@ typedef struct FileChunk FileChunk;
**
** The size chosen is a little less than a power of two. That way,
** the FileChunk object will have a size that almost exactly fills
-** a power-of-two allocation. This minimizes wasted space in power-of-two
+** a power-of-two allocation. This mimimizes wasted space in power-of-two
** memory allocators.
*/
#define JOURNAL_CHUNKSIZE ((int)(1024-sizeof(FileChunk*)))
@@ -83335,14 +75018,13 @@ SQLITE_PRIVATE int sqlite3MemJournalSize(void){
** This file contains routines used for walking the parser tree for
** an SQL statement.
*/
-/* #include "sqliteInt.h" */
/* #include <stdlib.h> */
/* #include <string.h> */
/*
** Walk an expression tree. Invoke the callback once for each node
-** of the expression, while descending. (In other words, the callback
+** of the expression, while decending. (In other words, the callback
** is invoked before visiting children.)
**
** The return value from the callback should be one of the WRC_*
@@ -83428,11 +75110,6 @@ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){
if( sqlite3WalkSelect(pWalker, pItem->pSelect) ){
return WRC_Abort;
}
- if( pItem->fg.isTabFunc
- && sqlite3WalkExprList(pWalker, pItem->u1.pFuncArg)
- ){
- return WRC_Abort;
- }
}
}
return WRC_Continue;
@@ -83441,12 +75118,9 @@ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){
/*
** Call sqlite3WalkExpr() for every expression in Select statement p.
** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and
-** on the compound select chain, p->pPrior.
-**
-** If it is not NULL, the xSelectCallback() callback is invoked before
-** the walk of the expressions and FROM clause. The xSelectCallback2()
-** method, if it is not NULL, is invoked following the walk of the
-** expressions and FROM clause.
+** on the compound select chain, p->pPrior. Invoke the xSelectCallback()
+** either before or after the walk of expressions and FROM clause, depending
+** on whether pWalker->bSelectDepthFirst is false or true, respectively.
**
** Return WRC_Continue under normal conditions. Return WRC_Abort if
** there is an abort request.
@@ -83456,13 +75130,11 @@ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){
*/
SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
int rc;
- if( p==0 || (pWalker->xSelectCallback==0 && pWalker->xSelectCallback2==0) ){
- return WRC_Continue;
- }
+ if( p==0 || pWalker->xSelectCallback==0 ) return WRC_Continue;
rc = WRC_Continue;
pWalker->walkerDepth++;
while( p ){
- if( pWalker->xSelectCallback ){
+ if( !pWalker->bSelectDepthFirst ){
rc = pWalker->xSelectCallback(pWalker, p);
if( rc ) break;
}
@@ -83472,8 +75144,12 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
pWalker->walkerDepth--;
return WRC_Abort;
}
- if( pWalker->xSelectCallback2 ){
- pWalker->xSelectCallback2(pWalker, p);
+ if( pWalker->bSelectDepthFirst ){
+ rc = pWalker->xSelectCallback(pWalker, p);
+ /* Depth-first search is currently only used for
+ ** selectAddSubqueryTypeInfo() and that routine always returns
+ ** WRC_Continue (0). So the following branch is never taken. */
+ if( NEVER(rc) ) break;
}
p = p->pPrior;
}
@@ -83499,7 +75175,6 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
** resolve all identifiers by associating them with a particular
** table and column.
*/
-/* #include "sqliteInt.h" */
/* #include <stdlib.h> */
/* #include <string.h> */
@@ -83513,7 +75188,7 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
** is a helper function - a callback for the tree walker.
*/
static int incrAggDepth(Walker *pWalker, Expr *pExpr){
- if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.n;
+ if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.i;
return WRC_Continue;
}
static void incrAggFunctionDepth(Expr *pExpr, int N){
@@ -83521,7 +75196,7 @@ static void incrAggFunctionDepth(Expr *pExpr, int N){
Walker w;
memset(&w, 0, sizeof(w));
w.xExprCallback = incrAggDepth;
- w.u.n = N;
+ w.u.i = N;
sqlite3WalkExpr(&w, pExpr);
}
}
@@ -83530,6 +75205,30 @@ static void incrAggFunctionDepth(Expr *pExpr, int N){
** Turn the pExpr expression into an alias for the iCol-th column of the
** result set in pEList.
**
+** If the result set column is a simple column reference, then this routine
+** makes an exact copy. But for any other kind of expression, this
+** routine make a copy of the result set column as the argument to the
+** TK_AS operator. The TK_AS operator causes the expression to be
+** evaluated just once and then reused for each alias.
+**
+** The reason for suppressing the TK_AS term when the expression is a simple
+** column reference is so that the column reference will be recognized as
+** usable by indices within the WHERE clause processing logic.
+**
+** The TK_AS operator is inhibited if zType[0]=='G'. This means
+** that in a GROUP BY clause, the expression is evaluated twice. Hence:
+**
+** SELECT random()%5 AS x, count(*) FROM tab GROUP BY x
+**
+** Is equivalent to:
+**
+** SELECT random()%5 AS x, count(*) FROM tab GROUP BY random()%5
+**
+** The result of random()%5 in the GROUP BY clause is probably different
+** from the result in the result-set. On the other hand Standard SQL does
+** not allow the GROUP BY clause to contain references to result-set columns.
+** So this should never come up in well-formed queries.
+**
** If the reference is followed by a COLLATE operator, then make sure
** the COLLATE operator is preserved. For example:
**
@@ -83540,7 +75239,7 @@ static void incrAggFunctionDepth(Expr *pExpr, int N){
** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase;
**
** The nSubquery parameter specifies how many levels of subquery the
-** alias is removed from the original expression. The usual value is
+** alias is removed from the original expression. The usually value is
** zero but it might be more if the alias is contained within a subquery
** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION
** structures must be increased by the nSubquery amount.
@@ -83560,14 +75259,23 @@ static void resolveAlias(
assert( iCol>=0 && iCol<pEList->nExpr );
pOrig = pEList->a[iCol].pExpr;
assert( pOrig!=0 );
+ assert( pOrig->flags & EP_Resolved );
db = pParse->db;
pDup = sqlite3ExprDup(db, pOrig, 0);
if( pDup==0 ) return;
- if( zType[0]!='G' ) incrAggFunctionDepth(pDup, nSubquery);
+ if( pOrig->op!=TK_COLUMN && zType[0]!='G' ){
+ incrAggFunctionDepth(pDup, nSubquery);
+ pDup = sqlite3PExpr(pParse, TK_AS, pDup, 0, 0);
+ if( pDup==0 ) return;
+ ExprSetProperty(pDup, EP_Skip);
+ if( pEList->a[iCol].u.x.iAlias==0 ){
+ pEList->a[iCol].u.x.iAlias = (u16)(++pParse->nAlias);
+ }
+ pDup->iTable = pEList->a[iCol].u.x.iAlias;
+ }
if( pExpr->op==TK_COLLATE ){
pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken);
}
- ExprSetProperty(pDup, EP_Alias);
/* Before calling sqlite3ExprDelete(), set the EP_Static flag. This
** prevents ExprDelete() from deleting the Expr structure itself,
@@ -83699,10 +75407,9 @@ static int lookupName(
testcase( pNC->ncFlags & NC_PartIdx );
testcase( pNC->ncFlags & NC_IsCheck );
if( (pNC->ncFlags & (NC_PartIdx|NC_IsCheck))!=0 ){
- /* Silently ignore database qualifiers inside CHECK constraints and
- ** partial indices. Do not raise errors because that might break
- ** legacy and because it does not hurt anything to just ignore the
- ** database name. */
+ /* Silently ignore database qualifiers inside CHECK constraints and partial
+ ** indices. Do not raise errors because that might break legacy and
+ ** because it does not hurt anything to just ignore the database name. */
zDb = 0;
}else{
for(i=0; i<db->nDb; i++){
@@ -83759,7 +75466,7 @@ static int lookupName(
** USING clause, then skip this match.
*/
if( cnt==1 ){
- if( pItem->fg.jointype & JT_NATURAL ) continue;
+ if( pItem->jointype & JT_NATURAL ) continue;
if( nameInUsingClause(pItem->pUsing, zCol) ) continue;
}
cnt++;
@@ -83773,11 +75480,6 @@ static int lookupName(
if( pMatch ){
pExpr->iTable = pMatch->iCursor;
pExpr->pTab = pMatch->pTab;
- /* RIGHT JOIN not (yet) supported */
- assert( (pMatch->fg.jointype & JT_RIGHT)==0 );
- if( (pMatch->fg.jointype & JT_LEFT)!=0 ){
- ExprSetProperty(pExpr, EP_CanBeNull);
- }
pSchema = pExpr->pTab->pSchema;
}
} /* if( pSrcList ) */
@@ -83795,8 +75497,6 @@ static int lookupName(
}else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){
pExpr->iTable = 0;
pTab = pParse->pTriggerTab;
- }else{
- pTab = 0;
}
if( pTab ){
@@ -83811,8 +75511,8 @@ static int lookupName(
break;
}
}
- if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){
- /* IMP: R-51414-32910 */
+ if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && HasRowid(pTab) ){
+ /* IMP: R-24309-18625 */
/* IMP: R-44911-55124 */
iCol = -1;
}
@@ -83840,13 +75540,8 @@ static int lookupName(
/*
** Perhaps the name is a reference to the ROWID
*/
- if( cnt==0
- && cntTab==1
- && pMatch
- && (pNC->ncFlags & NC_IdxExpr)==0
- && sqlite3IsRowid(zCol)
- && VisibleRowid(pMatch->pTab)
- ){
+ assert( pTab!=0 || cntTab==0 );
+ if( cnt==0 && cntTab==1 && sqlite3IsRowid(zCol) && HasRowid(pTab) ){
cnt = 1;
pExpr->iColumn = -1; /* IMP: R-44911-55124 */
pExpr->affinity = SQLITE_AFF_INTEGER;
@@ -83865,9 +75560,9 @@ static int lookupName(
** resolved by the time the WHERE clause is resolved.
**
** The ability to use an output result-set column in the WHERE, GROUP BY,
- ** or HAVING clauses, or as part of a larger expression in the ORDER BY
+ ** or HAVING clauses, or as part of a larger expression in the ORDRE BY
** clause is not standard SQL. This is a (goofy) SQLite extension, that
- ** is supported for backwards compatibility only. Hence, we issue a warning
+ ** is supported for backwards compatibility only. TO DO: Issue a warning
** on sqlite3_log() whenever the capability is used.
*/
if( (pEList = pNC->pEList)!=0
@@ -83964,7 +75659,7 @@ static int lookupName(
lookupname_end:
if( cnt==1 ){
assert( pNC!=0 );
- if( !ExprHasProperty(pExpr, EP_Alias) ){
+ if( pExpr->op!=TK_AS ){
sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList);
}
/* Increment the nRef value on all name contexts from TopNC up to
@@ -84005,25 +75700,36 @@ SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSr
}
/*
-** Report an error that an expression is not valid for some set of
-** pNC->ncFlags values determined by validMask.
+** Report an error that an expression is not valid for a partial index WHERE
+** clause.
*/
-static void notValid(
+static void notValidPartIdxWhere(
Parse *pParse, /* Leave error message here */
NameContext *pNC, /* The name context */
- const char *zMsg, /* Type of error */
- int validMask /* Set of contexts for which prohibited */
+ const char *zMsg /* Type of error */
){
- assert( (validMask&~(NC_IsCheck|NC_PartIdx|NC_IdxExpr))==0 );
- if( (pNC->ncFlags & validMask)!=0 ){
- const char *zIn = "partial index WHERE clauses";
- if( pNC->ncFlags & NC_IdxExpr ) zIn = "index expressions";
+ if( (pNC->ncFlags & NC_PartIdx)!=0 ){
+ sqlite3ErrorMsg(pParse, "%s prohibited in partial index WHERE clauses",
+ zMsg);
+ }
+}
+
#ifndef SQLITE_OMIT_CHECK
- else if( pNC->ncFlags & NC_IsCheck ) zIn = "CHECK constraints";
-#endif
- sqlite3ErrorMsg(pParse, "%s prohibited in %s", zMsg, zIn);
+/*
+** Report an error that an expression is not valid for a CHECK constraint.
+*/
+static void notValidCheckConstraint(
+ Parse *pParse, /* Leave error message here */
+ NameContext *pNC, /* The name context */
+ const char *zMsg /* Type of error */
+){
+ if( (pNC->ncFlags & NC_IsCheck)!=0 ){
+ sqlite3ErrorMsg(pParse,"%s prohibited in CHECK constraints", zMsg);
}
}
+#else
+# define notValidCheckConstraint(P,N,M)
+#endif
/*
** Expression p should encode a floating point value between 1.0 and 0.0.
@@ -84036,7 +75742,7 @@ static int exprProbability(Expr *p){
sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8);
assert( r>=0.0 );
if( r>1.0 ) return -1;
- return (int)(r*134217728.0);
+ return (int)(r*1000.0);
}
/*
@@ -84089,8 +75795,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
pExpr->affinity = SQLITE_AFF_INTEGER;
break;
}
-#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT)
- && !defined(SQLITE_OMIT_SUBQUERY) */
+#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) */
/* A lone identifier is the name of a column.
*/
@@ -84108,8 +75813,6 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
Expr *pRight;
/* if( pSrcList==0 ) break; */
- notValid(pParse, pNC, "the \".\" operator", NC_IdxExpr);
- /*notValid(pParse, pNC, "the \".\" operator", NC_PartIdx|NC_IsCheck, 1);*/
pRight = pExpr->pRight;
if( pRight->op==TK_ID ){
zDb = 0;
@@ -84139,7 +75842,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
u8 enc = ENC(pParse->db); /* The database encoding */
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
- notValid(pParse, pNC, "functions", NC_PartIdx);
+ notValidPartIdxWhere(pParse, pNC, "functions");
zId = pExpr->u.zToken;
nId = sqlite3Strlen30(zId);
pDef = sqlite3FindFunction(pParse->db, zId, nId, n, enc, 0);
@@ -84157,25 +75860,21 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
if( n==2 ){
pExpr->iTable = exprProbability(pList->a[1].pExpr);
if( pExpr->iTable<0 ){
- sqlite3ErrorMsg(pParse,
- "second argument to likelihood() must be a "
- "constant between 0.0 and 1.0");
+ sqlite3ErrorMsg(pParse, "second argument to likelihood() must be a "
+ "constant between 0.0 and 1.0");
pNC->nErr++;
}
}else{
- /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is
- ** equivalent to likelihood(X, 0.0625).
- ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is
- ** short-hand for likelihood(X,0.0625).
- ** EVIDENCE-OF: R-36850-34127 The likely(X) function is short-hand
- ** for likelihood(X,0.9375).
- ** EVIDENCE-OF: R-53436-40973 The likely(X) function is equivalent
- ** to likelihood(X,0.9375). */
- /* TUNING: unlikely() probability is 0.0625. likely() is 0.9375 */
- pExpr->iTable = pDef->zName[0]=='u' ? 8388608 : 125829120;
+ /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is equivalent to
+ ** likelihood(X, 0.0625).
+ ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is short-hand for
+ ** likelihood(X,0.0625). */
+ pExpr->iTable = 62; /* TUNING: Default 2nd arg to unlikely() is 0.0625 */
}
}
+ }
#ifndef SQLITE_OMIT_AUTHORIZATION
+ if( pDef ){
auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0);
if( auth!=SQLITE_OK ){
if( auth==SQLITE_DENY ){
@@ -84186,20 +75885,9 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
pExpr->op = TK_NULL;
return WRC_Prune;
}
-#endif
- if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){
- /* For the purposes of the EP_ConstFunc flag, date and time
- ** functions and other functions that change slowly are considered
- ** constant because they are constant for the duration of one query */
- ExprSetProperty(pExpr,EP_ConstFunc);
- }
- if( (pDef->funcFlags & SQLITE_FUNC_CONSTANT)==0 ){
- /* Date/time functions that use 'now', and other functions like
- ** sqlite_version() that might change over time cannot be used
- ** in an index. */
- notValid(pParse, pNC, "non-deterministic functions", NC_IdxExpr);
- }
+ if( pDef->funcFlags & SQLITE_FUNC_CONSTANT ) ExprSetProperty(pExpr,EP_Constant);
}
+#endif
if( is_agg && (pNC->ncFlags & NC_AllowAgg)==0 ){
sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId);
pNC->nErr++;
@@ -84222,13 +75910,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
pExpr->op2++;
pNC2 = pNC2->pNext;
}
- assert( pDef!=0 );
- if( pNC2 ){
- assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
- testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
- pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX);
-
- }
+ if( pNC2 ) pNC2->ncFlags |= NC_HasAgg;
pNC->ncFlags |= NC_AllowAgg;
}
/* FIX ME: Compute pExpr->affinity based on the expected return
@@ -84244,7 +75926,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_IN );
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
int nRef = pNC->nRef;
- notValid(pParse, pNC, "subqueries", NC_IsCheck|NC_PartIdx|NC_IdxExpr);
+ notValidCheckConstraint(pParse, pNC, "subqueries");
+ notValidPartIdxWhere(pParse, pNC, "subqueries");
sqlite3WalkSelect(pWalker, pExpr->x.pSelect);
assert( pNC->nRef>=nRef );
if( nRef!=pNC->nRef ){
@@ -84254,7 +75937,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
break;
}
case TK_VARIABLE: {
- notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr);
+ notValidCheckConstraint(pParse, pNC, "parameters");
+ notValidPartIdxWhere(pParse, pNC, "parameters");
break;
}
}
@@ -84448,11 +76132,9 @@ static int resolveCompoundOrderBy(
if( pItem->pExpr==pE ){
pItem->pExpr = pNew;
}else{
- Expr *pParent = pItem->pExpr;
- assert( pParent->op==TK_COLLATE );
- while( pParent->pLeft->op==TK_COLLATE ) pParent = pParent->pLeft;
- assert( pParent->pLeft==pE );
- pParent->pLeft = pNew;
+ assert( pItem->pExpr->op==TK_COLLATE );
+ assert( pItem->pExpr->pLeft==pE );
+ pItem->pExpr->pLeft = pNew;
}
sqlite3ExprDelete(db, pE);
pItem->u.x.iOrderByCol = (u16)iCol;
@@ -84509,8 +76191,7 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(
resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr);
return 1;
}
- resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr,
- zType,0);
+ resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, zType,0);
}
}
return 0;
@@ -84590,7 +76271,7 @@ static int resolveOrderGroupBy(
}
/*
-** Resolve names in the SELECT statement p and all of its descendants.
+** Resolve names in the SELECT statement p and all of its descendents.
*/
static int resolveSelectStep(Walker *pWalker, Select *p){
NameContext *pOuterNC; /* Context that contains this SELECT */
@@ -84598,6 +76279,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
int isCompound; /* True if p is a compound select */
int nCompound; /* Number of compound terms processed so far */
Parse *pParse; /* Parsing context */
+ ExprList *pEList; /* Result set expression list */
int i; /* Loop counter */
ExprList *pGroupBy; /* The GROUP BY clause */
Select *pLeftmost; /* Left-most of SELECT of a compound */
@@ -84642,20 +76324,6 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
sqlite3ResolveExprNames(&sNC, p->pOffset) ){
return WRC_Abort;
}
-
- /* If the SF_Converted flags is set, then this Select object was
- ** was created by the convertCompoundSelectToSubquery() function.
- ** In this case the ORDER BY clause (p->pOrderBy) should be resolved
- ** as if it were part of the sub-query, not the parent. This block
- ** moves the pOrderBy down to the sub-query. It will be moved back
- ** after the names have been resolved. */
- if( p->selFlags & SF_Converted ){
- Select *pSub = p->pSrc->a[0].pSelect;
- assert( p->pSrc->nSrc==1 && p->pOrderBy );
- assert( pSub->pPrior && pSub->pOrderBy==0 );
- pSub->pOrderBy = p->pOrderBy;
- p->pOrderBy = 0;
- }
/* Recursively resolve names in all subqueries
*/
@@ -84670,7 +76338,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
** parent contexts. After resolving references to expressions in
** pItem->pSelect, check if this value has changed. If so, then
** SELECT statement pItem->pSelect must be correlated. Set the
- ** pItem->fg.isCorrelated flag if this is the case. */
+ ** pItem->isCorrelated flag if this is the case. */
for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef;
if( pItem->zName ) pParse->zAuthContext = pItem->zName;
@@ -84679,8 +76347,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
if( pParse->nErr || db->mallocFailed ) return WRC_Abort;
for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef;
- assert( pItem->fg.isCorrelated==0 && nRef<=0 );
- pItem->fg.isCorrelated = (nRef!=0);
+ assert( pItem->isCorrelated==0 && nRef<=0 );
+ pItem->isCorrelated = (nRef!=0);
}
}
@@ -84692,7 +76360,14 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
sNC.pNext = pOuterNC;
/* Resolve names in the result set. */
- if( sqlite3ResolveExprListNames(&sNC, p->pEList) ) return WRC_Abort;
+ pEList = p->pEList;
+ assert( pEList!=0 );
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *pX = pEList->a[i].pExpr;
+ if( sqlite3ResolveExprNames(&sNC, pX) ){
+ return WRC_Abort;
+ }
+ }
/* If there are no aggregate functions in the result-set, and no GROUP BY
** expression, do not allow aggregates in any of the other expressions.
@@ -84700,8 +76375,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
assert( (p->selFlags & SF_Aggregate)==0 );
pGroupBy = p->pGroupBy;
if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){
- assert( NC_MinMaxAgg==SF_MinMaxAgg );
- p->selFlags |= SF_Aggregate | (sNC.ncFlags&NC_MinMaxAgg);
+ p->selFlags |= SF_Aggregate;
}else{
sNC.ncFlags &= ~NC_AllowAgg;
}
@@ -84725,46 +76399,18 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort;
if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort;
- /* Resolve names in table-valued-function arguments */
- for(i=0; i<p->pSrc->nSrc; i++){
- struct SrcList_item *pItem = &p->pSrc->a[i];
- if( pItem->fg.isTabFunc
- && sqlite3ResolveExprListNames(&sNC, pItem->u1.pFuncArg)
- ){
- return WRC_Abort;
- }
- }
-
/* The ORDER BY and GROUP BY clauses may not refer to terms in
** outer queries
*/
sNC.pNext = 0;
sNC.ncFlags |= NC_AllowAgg;
- /* If this is a converted compound query, move the ORDER BY clause from
- ** the sub-query back to the parent query. At this point each term
- ** within the ORDER BY clause has been transformed to an integer value.
- ** These integers will be replaced by copies of the corresponding result
- ** set expressions by the call to resolveOrderGroupBy() below. */
- if( p->selFlags & SF_Converted ){
- Select *pSub = p->pSrc->a[0].pSelect;
- p->pOrderBy = pSub->pOrderBy;
- pSub->pOrderBy = 0;
- }
-
/* Process the ORDER BY clause for singleton SELECT statements.
** The ORDER BY clause for compounds SELECT statements is handled
** below, after all of the result-sets for all of the elements of
** the compound have been resolved.
- **
- ** If there is an ORDER BY clause on a term of a compound-select other
- ** than the right-most term, then that is a syntax error. But the error
- ** is not detected until much later, and so we need to go ahead and
- ** resolve those symbols on the incorrect ORDER BY for consistency.
*/
- if( isCompound<=nCompound /* Defer right-most ORDER BY of a compound */
- && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER")
- ){
+ if( !isCompound && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") ){
return WRC_Abort;
}
if( db->mallocFailed ){
@@ -84789,13 +76435,6 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
}
}
- /* If this is part of a compound SELECT, check that it has the right
- ** number of expressions in the select list. */
- if( p->pNext && p->pEList->nExpr!=p->pNext->pEList->nExpr ){
- sqlite3SelectWrongNumTermsError(pParse, p->pNext);
- return WRC_Abort;
- }
-
/* Advance to the next term of the compound
*/
p = p->pPrior;
@@ -84864,7 +76503,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames(
NameContext *pNC, /* Namespace to resolve expressions in. */
Expr *pExpr /* The expression to be analyzed. */
){
- u16 savedHasAgg;
+ u8 savedHasAgg;
Walker w;
if( pExpr==0 ) return 0;
@@ -84877,8 +76516,8 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames(
pParse->nHeight += pExpr->nHeight;
}
#endif
- savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg);
- pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg);
+ savedHasAgg = pNC->ncFlags & NC_HasAgg;
+ pNC->ncFlags &= ~NC_HasAgg;
memset(&w, 0, sizeof(w));
w.xExprCallback = resolveExprStep;
w.xSelectCallback = resolveSelectStep;
@@ -84893,27 +76532,12 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames(
}
if( pNC->ncFlags & NC_HasAgg ){
ExprSetProperty(pExpr, EP_Agg);
+ }else if( savedHasAgg ){
+ pNC->ncFlags |= NC_HasAgg;
}
- pNC->ncFlags |= savedHasAgg;
return ExprHasProperty(pExpr, EP_Error);
}
-/*
-** Resolve all names for all expression in an expression list. This is
-** just like sqlite3ResolveExprNames() except that it works for an expression
-** list rather than a single expression.
-*/
-SQLITE_PRIVATE int sqlite3ResolveExprListNames(
- NameContext *pNC, /* Namespace to resolve expressions in. */
- ExprList *pList /* The expression list to be analyzed. */
-){
- int i;
- assert( pList!=0 );
- for(i=0; i<pList->nExpr; i++){
- if( sqlite3ResolveExprNames(pNC, pList->a[i].pExpr) ) return WRC_Abort;
- }
- return WRC_Continue;
-}
/*
** Resolve all names in all expressions of a SELECT and in all
@@ -84957,14 +76581,15 @@ SQLITE_PRIVATE void sqlite3ResolveSelectNames(
SQLITE_PRIVATE void sqlite3ResolveSelfReference(
Parse *pParse, /* Parsing context */
Table *pTab, /* The table being referenced */
- int type, /* NC_IsCheck or NC_PartIdx or NC_IdxExpr */
+ int type, /* NC_IsCheck or NC_PartIdx */
Expr *pExpr, /* Expression to resolve. May be NULL. */
ExprList *pList /* Expression list to resolve. May be NUL. */
){
SrcList sSrc; /* Fake SrcList for pParse->pNewTable */
NameContext sNC; /* Name context for pParse->pNewTable */
+ int i; /* Loop counter */
- assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr );
+ assert( type==NC_IsCheck || type==NC_PartIdx );
memset(&sNC, 0, sizeof(sNC));
memset(&sSrc, 0, sizeof(sSrc));
sSrc.nSrc = 1;
@@ -84975,7 +76600,13 @@ SQLITE_PRIVATE void sqlite3ResolveSelfReference(
sNC.pSrcList = &sSrc;
sNC.ncFlags = type;
if( sqlite3ResolveExprNames(&sNC, pExpr) ) return;
- if( pList ) sqlite3ResolveExprListNames(&sNC, pList);
+ if( pList ){
+ for(i=0; i<pList->nExpr; i++){
+ if( sqlite3ResolveExprNames(&sNC, pList->a[i].pExpr) ){
+ return;
+ }
+ }
+ }
}
/************** End of resolve.c *********************************************/
@@ -84994,7 +76625,6 @@ SQLITE_PRIVATE void sqlite3ResolveSelfReference(
** This file contains routines used for analyzing expressions and
** for generating VDBE code that evaluates expressions in SQLite.
*/
-/* #include "sqliteInt.h" */
/*
** Return the 'affinity' of the expression pExpr if any.
@@ -85004,7 +76634,7 @@ SQLITE_PRIVATE void sqlite3ResolveSelfReference(
** affinity of that column is returned. Otherwise, 0x00 is returned,
** indicating no affinity for the expression.
**
-** i.e. the WHERE clause expressions in the following statements all
+** i.e. the WHERE clause expresssions in the following statements all
** have an affinity:
**
** CREATE TABLE t1(a);
@@ -85015,7 +76645,6 @@ SQLITE_PRIVATE void sqlite3ResolveSelfReference(
SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr){
int op;
pExpr = sqlite3ExprSkipCollate(pExpr);
- if( pExpr->flags & EP_Generic ) return 0;
op = pExpr->op;
if( op==TK_SELECT ){
assert( pExpr->flags&EP_xIsSelect );
@@ -85048,14 +76677,9 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr){
** If a memory allocation error occurs, that fact is recorded in pParse->db
** and the pExpr parameter is returned unchanged.
*/
-SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(
- Parse *pParse, /* Parsing context */
- Expr *pExpr, /* Add the "COLLATE" clause to this expression */
- const Token *pCollName, /* Name of collating sequence */
- int dequote /* True to dequote pCollName */
-){
+SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr *pExpr, Token *pCollName){
if( pCollName->n>0 ){
- Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, dequote);
+ Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, 1);
if( pNew ){
pNew->pLeft = pExpr;
pNew->flags |= EP_Collate|EP_Skip;
@@ -85069,11 +76693,11 @@ SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, con
assert( zC!=0 );
s.z = zC;
s.n = sqlite3Strlen30(s.z);
- return sqlite3ExprAddCollateToken(pParse, pExpr, &s, 0);
+ return sqlite3ExprAddCollateToken(pParse, pExpr, &s);
}
/*
-** Skip over any TK_COLLATE operators and any unlikely()
+** Skip over any TK_COLLATE or TK_AS operators and any unlikely()
** or likelihood() function at the root of an expression.
*/
SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){
@@ -85084,7 +76708,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){
assert( pExpr->op==TK_FUNCTION );
pExpr = pExpr->x.pList->a[0].pExpr;
}else{
- assert( pExpr->op==TK_COLLATE );
+ assert( pExpr->op==TK_COLLATE || pExpr->op==TK_AS );
pExpr = pExpr->pLeft;
}
}
@@ -85106,7 +76730,6 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){
Expr *p = pExpr;
while( p ){
int op = p->op;
- if( p->flags & EP_Generic ) break;
if( op==TK_CAST || op==TK_UPLUS ){
p = p->pLeft;
continue;
@@ -85115,9 +76738,9 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){
pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken);
break;
}
- if( (op==TK_AGG_COLUMN || op==TK_COLUMN
+ if( p->pTab!=0
+ && (op==TK_AGG_COLUMN || op==TK_COLUMN
|| op==TK_REGISTER || op==TK_TRIGGER)
- && p->pTab!=0
){
/* op==TK_REGISTER && p->pTab!=0 happens when pExpr was originally
** a TK_COLUMN but was previously evaluated and cached in a register */
@@ -85129,25 +76752,10 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){
break;
}
if( p->flags & EP_Collate ){
- if( p->pLeft && (p->pLeft->flags & EP_Collate)!=0 ){
+ if( ALWAYS(p->pLeft) && (p->pLeft->flags & EP_Collate)!=0 ){
p = p->pLeft;
}else{
- Expr *pNext = p->pRight;
- /* The Expr.x union is never used at the same time as Expr.pRight */
- assert( p->x.pList==0 || p->pRight==0 );
- /* p->flags holds EP_Collate and p->pLeft->flags does not. And
- ** p->x.pSelect cannot. So if p->x.pLeft exists, it must hold at
- ** least one EP_Collate. Thus the following two ALWAYS. */
- if( p->x.pList!=0 && ALWAYS(!ExprHasProperty(p, EP_xIsSelect)) ){
- int i;
- for(i=0; ALWAYS(i<p->x.pList->nExpr); i++){
- if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){
- pNext = p->x.pList->a[i].pExpr;
- break;
- }
- }
- }
- p = pNext;
+ p = p->pRight;
}
}else{
break;
@@ -85173,13 +76781,13 @@ SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2){
if( sqlite3IsNumericAffinity(aff1) || sqlite3IsNumericAffinity(aff2) ){
return SQLITE_AFF_NUMERIC;
}else{
- return SQLITE_AFF_BLOB;
+ return SQLITE_AFF_NONE;
}
}else if( !aff1 && !aff2 ){
/* Neither side of the comparison is a column. Compare the
** results directly.
*/
- return SQLITE_AFF_BLOB;
+ return SQLITE_AFF_NONE;
}else{
/* One side is a column, the other is not. Use the columns affinity. */
assert( aff1==0 || aff2==0 );
@@ -85203,7 +76811,7 @@ static char comparisonAffinity(Expr *pExpr){
}else if( ExprHasProperty(pExpr, EP_xIsSelect) ){
aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff);
}else if( !aff ){
- aff = SQLITE_AFF_BLOB;
+ aff = SQLITE_AFF_NONE;
}
return aff;
}
@@ -85217,7 +76825,7 @@ static char comparisonAffinity(Expr *pExpr){
SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity){
char aff = comparisonAffinity(pExpr);
switch( aff ){
- case SQLITE_AFF_BLOB:
+ case SQLITE_AFF_NONE:
return 1;
case SQLITE_AFF_TEXT:
return idx_affinity==SQLITE_AFF_TEXT;
@@ -85353,9 +76961,6 @@ static void heightOfSelect(Select *p, int *pnHeight){
** Expr.pSelect member has a height of 1. Any other expression
** has a height equal to the maximum height of any other
** referenced Expr plus one.
-**
-** Also propagate EP_Propagate flags up from Expr.x.pList to Expr.flags,
-** if appropriate.
*/
static void exprSetHeight(Expr *p){
int nHeight = 0;
@@ -85363,9 +76968,8 @@ static void exprSetHeight(Expr *p){
heightOfExpr(p->pRight, &nHeight);
if( ExprHasProperty(p, EP_xIsSelect) ){
heightOfSelect(p->x.pSelect, &nHeight);
- }else if( p->x.pList ){
+ }else{
heightOfExprList(p->x.pList, &nHeight);
- p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList);
}
p->nHeight = nHeight + 1;
}
@@ -85374,12 +76978,8 @@ static void exprSetHeight(Expr *p){
** Set the Expr.nHeight variable using the exprSetHeight() function. If
** the height is greater than the maximum allowed expression depth,
** leave an error in pParse.
-**
-** Also propagate all EP_Propagate flags from the Expr.x.pList into
-** Expr.flags.
*/
-SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
- if( pParse->nErr ) return;
+SQLITE_PRIVATE void sqlite3ExprSetHeight(Parse *pParse, Expr *p){
exprSetHeight(p);
sqlite3ExprCheckHeight(pParse, p->nHeight);
}
@@ -85393,17 +76993,8 @@ SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){
heightOfSelect(p, &nHeight);
return nHeight;
}
-#else /* ABOVE: Height enforcement enabled. BELOW: Height enforcement off */
-/*
-** Propagate all EP_Propagate flags from the Expr.x.pList into
-** Expr.flags.
-*/
-SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
- if( p && p->x.pList && !ExprHasProperty(p, EP_xIsSelect) ){
- p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList);
- }
-}
-#define exprSetHeight(y)
+#else
+ #define exprSetHeight(y)
#endif /* SQLITE_MAX_EXPR_DEPTH>0 */
/*
@@ -85415,7 +77006,7 @@ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
** is responsible for making sure the node eventually gets freed.
**
** If dequote is true, then the token (if it exists) is dequoted.
-** If dequote is false, no dequoting is performed. The deQuote
+** If dequote is false, no dequoting is performance. The deQuote
** parameter is ignored if pToken is NULL or if the token does not
** appear to be quoted. If the quotes were of the form "..." (double-quotes)
** then the EP_DblQuoted flag is set on the expression node.
@@ -85505,18 +77096,18 @@ SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(
}else{
if( pRight ){
pRoot->pRight = pRight;
- pRoot->flags |= EP_Propagate & pRight->flags;
+ pRoot->flags |= EP_Collate & pRight->flags;
}
if( pLeft ){
pRoot->pLeft = pLeft;
- pRoot->flags |= EP_Propagate & pLeft->flags;
+ pRoot->flags |= EP_Collate & pLeft->flags;
}
exprSetHeight(pRoot);
}
}
/*
-** Allocate an Expr node which joins as many as two subtrees.
+** Allocate a Expr node which joins as many as two subtrees.
**
** One or both of the subtrees can be NULL. Return a pointer to the new
** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed,
@@ -85530,7 +77121,7 @@ SQLITE_PRIVATE Expr *sqlite3PExpr(
const Token *pToken /* Argument token */
){
Expr *p;
- if( op==TK_AND && pLeft && pRight && pParse->nErr==0 ){
+ if( op==TK_AND && pLeft && pRight ){
/* Take advantage of short-circuit false optimization for AND */
p = sqlite3ExprAnd(pParse->db, pLeft, pRight);
}else{
@@ -85544,25 +77135,16 @@ SQLITE_PRIVATE Expr *sqlite3PExpr(
}
/*
-** If the expression is always either TRUE or FALSE (respectively),
-** then return 1. If one cannot determine the truth value of the
-** expression at compile-time return 0.
-**
-** This is an optimization. If is OK to return 0 here even if
-** the expression really is always false or false (a false negative).
-** But it is a bug to return 1 if the expression might have different
-** boolean values in different circumstances (a false positive.)
+** Return 1 if an expression must be FALSE in all cases and 0 if the
+** expression might be true. This is an optimization. If is OK to
+** return 0 here even if the expression really is always false (a
+** false negative). But it is a bug to return 1 if the expression
+** might be true in some rare circumstances (a false positive.)
**
** Note that if the expression is part of conditional for a
** LEFT JOIN, then we cannot determine at compile-time whether or not
** is it true or false, so always return 0.
*/
-static int exprAlwaysTrue(Expr *p){
- int v = 0;
- if( ExprHasProperty(p, EP_FromJoin) ) return 0;
- if( !sqlite3ExprIsInteger(p, &v) ) return 0;
- return v!=0;
-}
static int exprAlwaysFalse(Expr *p){
int v = 0;
if( ExprHasProperty(p, EP_FromJoin) ) return 0;
@@ -85609,7 +77191,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *
}
pNew->x.pList = pList;
assert( !ExprHasProperty(pNew, EP_xIsSelect) );
- sqlite3ExprSetHeightAndFlags(pParse, pNew);
+ sqlite3ExprSetHeight(pParse, pNew);
return pNew;
}
@@ -85626,7 +77208,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *
**
** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number
** as the previous instance of the same wildcard. Or if this is the first
-** instance of the wildcard, the next sequential variable number is
+** instance of the wildcard, the next sequenial variable number is
** assigned.
*/
SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){
@@ -85761,7 +77343,7 @@ static int exprStructSize(Expr *p){
** During expression analysis, extra information is computed and moved into
** later parts of teh Expr object and that extra information might get chopped
** off if the expression is reduced. Note also that it does not work to
-** make an EXPRDUP_REDUCE copy of a reduced expression. It is only legal
+** make a EXPRDUP_REDUCE copy of a reduced expression. It is only legal
** to reduce a pristine expression tree from the parser. The implementation
** of dupedExprStructSize() contain multiple assert() statements that attempt
** to enforce this constraint.
@@ -85830,7 +77412,7 @@ static int dupedExprSize(Expr *p, int flags){
** is not NULL then *pzBuffer is assumed to point to a buffer large enough
** to store the copy of expression p, the copies of p->u.zToken
** (if applicable), and the copies of the p->pLeft and p->pRight expressions,
-** if any. Before returning, *pzBuffer is set to the first byte past the
+** if any. Before returning, *pzBuffer is set to the first byte passed the
** portion of the buffer copied into by this function.
*/
static Expr *exprDup(sqlite3 *db, Expr *p, int flags, u8 **pzBuffer){
@@ -85917,33 +77499,6 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int flags, u8 **pzBuffer){
}
/*
-** Create and return a deep copy of the object passed as the second
-** argument. If an OOM condition is encountered, NULL is returned
-** and the db->mallocFailed flag set.
-*/
-#ifndef SQLITE_OMIT_CTE
-static With *withDup(sqlite3 *db, With *p){
- With *pRet = 0;
- if( p ){
- int nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1);
- pRet = sqlite3DbMallocZero(db, nByte);
- if( pRet ){
- int i;
- pRet->nCte = p->nCte;
- for(i=0; i<p->nCte; i++){
- pRet->a[i].pSelect = sqlite3SelectDup(db, p->a[i].pSelect, 0);
- pRet->a[i].pCols = sqlite3ExprListDup(db, p->a[i].pCols, 0);
- pRet->a[i].zName = sqlite3DbStrDup(db, p->a[i].zName);
- }
- }
- }
- return pRet;
-}
-#else
-# define withDup(x,y) 0
-#endif
-
-/*
** The following group of routines make deep copies of expressions,
** expression lists, ID lists, and select statements. The copies can
** be deleted (by being passed to their respective ...Delete() routines)
@@ -85970,6 +77525,7 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags)
if( p==0 ) return 0;
pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) );
if( pNew==0 ) return 0;
+ pNew->iECursor = 0;
pNew->nExpr = i = p->nExpr;
if( (flags & EXPRDUP_REDUCE)==0 ) for(i=1; i<p->nExpr; i+=i){}
pNew->a = pItem = sqlite3DbMallocRaw(db, i*sizeof(p->a[0]) );
@@ -86016,18 +77572,15 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){
pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase);
pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName);
pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias);
- pNewItem->fg = pOldItem->fg;
+ pNewItem->jointype = pOldItem->jointype;
pNewItem->iCursor = pOldItem->iCursor;
pNewItem->addrFillSub = pOldItem->addrFillSub;
pNewItem->regReturn = pOldItem->regReturn;
- if( pNewItem->fg.isIndexedBy ){
- pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy);
- }
- pNewItem->pIBIndex = pOldItem->pIBIndex;
- if( pNewItem->fg.isTabFunc ){
- pNewItem->u1.pFuncArg =
- sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags);
- }
+ pNewItem->isCorrelated = pOldItem->isCorrelated;
+ pNewItem->viaCoroutine = pOldItem->viaCoroutine;
+ pNewItem->zIndex = sqlite3DbStrDup(db, pOldItem->zIndex);
+ pNewItem->notIndexed = pOldItem->notIndexed;
+ pNewItem->pIndex = pOldItem->pIndex;
pTab = pNewItem->pTab = pOldItem->pTab;
if( pTab ){
pTab->nRef++;
@@ -86082,11 +77635,10 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){
pNew->iLimit = 0;
pNew->iOffset = 0;
pNew->selFlags = p->selFlags & ~SF_UsesEphemeral;
+ pNew->pRightmost = 0;
pNew->addrOpenEphm[0] = -1;
pNew->addrOpenEphm[1] = -1;
- pNew->nSelectRow = p->nSelectRow;
- pNew->pWith = withDup(db, p->pWith);
- sqlite3SelectSetName(pNew, p->zSelName);
+ pNew->addrOpenEphm[2] = -1;
return pNew;
}
#else
@@ -86143,20 +77695,6 @@ no_mem:
}
/*
-** Set the sort order for the last element on the given ExprList.
-*/
-SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList *p, int iSortOrder){
- if( p==0 ) return;
- assert( SQLITE_SO_UNDEFINED<0 && SQLITE_SO_ASC>=0 && SQLITE_SO_DESC>0 );
- assert( p->nExpr>0 );
- if( iSortOrder<0 ){
- assert( p->a[p->nExpr-1].sortOrder==SQLITE_SO_ASC );
- return;
- }
- p->a[p->nExpr-1].sortOrder = (u8)iSortOrder;
-}
-
-/*
** Set the ExprList.a[].zName element of the most recently added item
** on the expression list.
**
@@ -86241,67 +77779,37 @@ SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){
}
/*
-** Return the bitwise-OR of all Expr.flags fields in the given
-** ExprList.
-*/
-SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList *pList){
- int i;
- u32 m = 0;
- if( pList ){
- for(i=0; i<pList->nExpr; i++){
- Expr *pExpr = pList->a[i].pExpr;
- if( ALWAYS(pExpr) ) m |= pExpr->flags;
- }
- }
- return m;
-}
-
-/*
-** These routines are Walker callbacks used to check expressions to
-** see if they are "constant" for some definition of constant. The
-** Walker.eCode value determines the type of "constant" we are looking
-** for.
+** These routines are Walker callbacks. Walker.u.pi is a pointer
+** to an integer. These routines are checking an expression to see
+** if it is a constant. Set *Walker.u.pi to 0 if the expression is
+** not constant.
**
** These callback routines are used to implement the following:
**
-** sqlite3ExprIsConstant() pWalker->eCode==1
-** sqlite3ExprIsConstantNotJoin() pWalker->eCode==2
-** sqlite3ExprIsTableConstant() pWalker->eCode==3
-** sqlite3ExprIsConstantOrFunction() pWalker->eCode==4 or 5
+** sqlite3ExprIsConstant()
+** sqlite3ExprIsConstantNotJoin()
+** sqlite3ExprIsConstantOrFunction()
**
-** In all cases, the callbacks set Walker.eCode=0 and abort if the expression
-** is found to not be a constant.
-**
-** The sqlite3ExprIsConstantOrFunction() is used for evaluating expressions
-** in a CREATE TABLE statement. The Walker.eCode value is 5 when parsing
-** an existing schema and 4 when processing a new statement. A bound
-** parameter raises an error for new statements, but is silently converted
-** to NULL for existing schemas. This allows sqlite_master tables that
-** contain a bound parameter because they were generated by older versions
-** of SQLite to be parsed by newer versions of SQLite without raising a
-** malformed schema error.
*/
static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){
- /* If pWalker->eCode is 2 then any term of the expression that comes from
- ** the ON or USING clauses of a left join disqualifies the expression
+ /* If pWalker->u.i is 3 then any term of the expression that comes from
+ ** the ON or USING clauses of a join disqualifies the expression
** from being considered constant. */
- if( pWalker->eCode==2 && ExprHasProperty(pExpr, EP_FromJoin) ){
- pWalker->eCode = 0;
+ if( pWalker->u.i==3 && ExprHasProperty(pExpr, EP_FromJoin) ){
+ pWalker->u.i = 0;
return WRC_Abort;
}
switch( pExpr->op ){
/* Consider functions to be constant if all their arguments are constant
- ** and either pWalker->eCode==4 or 5 or the function has the
- ** SQLITE_FUNC_CONST flag. */
+ ** and either pWalker->u.i==2 or the function as the SQLITE_FUNC_CONST
+ ** flag. */
case TK_FUNCTION:
- if( pWalker->eCode>=4 || ExprHasProperty(pExpr,EP_ConstFunc) ){
+ if( pWalker->u.i==2 || ExprHasProperty(pExpr,EP_Constant) ){
return WRC_Continue;
- }else{
- pWalker->eCode = 0;
- return WRC_Abort;
}
+ /* Fall through */
case TK_ID:
case TK_COLUMN:
case TK_AGG_FUNCTION:
@@ -86310,25 +77818,8 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_COLUMN );
testcase( pExpr->op==TK_AGG_FUNCTION );
testcase( pExpr->op==TK_AGG_COLUMN );
- if( pWalker->eCode==3 && pExpr->iTable==pWalker->u.iCur ){
- return WRC_Continue;
- }else{
- pWalker->eCode = 0;
- return WRC_Abort;
- }
- case TK_VARIABLE:
- if( pWalker->eCode==5 ){
- /* Silently convert bound parameters that appear inside of CREATE
- ** statements into a NULL when parsing the CREATE statement text out
- ** of the sqlite_master table */
- pExpr->op = TK_NULL;
- }else if( pWalker->eCode==4 ){
- /* A bound parameter in a CREATE statement that originates from
- ** sqlite3_prepare() causes an error */
- pWalker->eCode = 0;
- return WRC_Abort;
- }
- /* Fall through */
+ pWalker->u.i = 0;
+ return WRC_Abort;
default:
testcase( pExpr->op==TK_SELECT ); /* selectNodeIsConstant will disallow */
testcase( pExpr->op==TK_EXISTS ); /* selectNodeIsConstant will disallow */
@@ -86337,22 +77828,21 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){
}
static int selectNodeIsConstant(Walker *pWalker, Select *NotUsed){
UNUSED_PARAMETER(NotUsed);
- pWalker->eCode = 0;
+ pWalker->u.i = 0;
return WRC_Abort;
}
-static int exprIsConst(Expr *p, int initFlag, int iCur){
+static int exprIsConst(Expr *p, int initFlag){
Walker w;
memset(&w, 0, sizeof(w));
- w.eCode = initFlag;
+ w.u.i = initFlag;
w.xExprCallback = exprNodeIsConstant;
w.xSelectCallback = selectNodeIsConstant;
- w.u.iCur = iCur;
sqlite3WalkExpr(&w, p);
- return w.eCode;
+ return w.u.i;
}
/*
-** Walk an expression tree. Return non-zero if the expression is constant
+** Walk an expression tree. Return 1 if the expression is constant
** and 0 if it involves variables or function calls.
**
** For the purposes of this function, a double-quoted string (ex: "abc")
@@ -86360,31 +77850,21 @@ static int exprIsConst(Expr *p, int initFlag, int iCur){
** a constant.
*/
SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){
- return exprIsConst(p, 1, 0);
+ return exprIsConst(p, 1);
}
/*
-** Walk an expression tree. Return non-zero if the expression is constant
+** Walk an expression tree. Return 1 if the expression is constant
** that does no originate from the ON or USING clauses of a join.
** Return 0 if it involves variables or function calls or terms from
** an ON or USING clause.
*/
SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){
- return exprIsConst(p, 2, 0);
+ return exprIsConst(p, 3);
}
/*
-** Walk an expression tree. Return non-zero if the expression is constant
-** for any single row of the table with cursor iCur. In other words, the
-** expression must not refer to any non-deterministic function nor any
-** table other than iCur.
-*/
-SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){
- return exprIsConst(p, 3, iCur);
-}
-
-/*
-** Walk an expression tree. Return non-zero if the expression is constant
+** Walk an expression tree. Return 1 if the expression is constant
** or a function call with constant arguments. Return and 0 if there
** are any variables.
**
@@ -86392,9 +77872,8 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){
** is considered a variable but a single-quoted string (ex: 'abc') is
** a constant.
*/
-SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){
- assert( isInit==0 || isInit==1 );
- return exprIsConst(p, 4+isInit, 0);
+SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p){
+ return exprIsConst(p, 2);
}
/*
@@ -86459,16 +77938,30 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){
case TK_FLOAT:
case TK_BLOB:
return 0;
- case TK_COLUMN:
- assert( p->pTab!=0 );
- return ExprHasProperty(p, EP_CanBeNull) ||
- (p->iColumn>=0 && p->pTab->aCol[p->iColumn].notNull==0);
default:
return 1;
}
}
/*
+** Generate an OP_IsNull instruction that tests register iReg and jumps
+** to location iDest if the value in iReg is NULL. The value in iReg
+** was computed by pExpr. If we can look at pExpr at compile-time and
+** determine that it can never generate a NULL, then the OP_IsNull operation
+** can be omitted.
+*/
+SQLITE_PRIVATE void sqlite3ExprCodeIsNullJump(
+ Vdbe *v, /* The VDBE under construction */
+ const Expr *pExpr, /* Only generate OP_IsNull if this expr can be NULL */
+ int iReg, /* Test the value in this register for NULL */
+ int iDest /* Jump here if the value is null */
+){
+ if( sqlite3ExprCanBeNull(pExpr) ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iDest);
+ }
+}
+
+/*
** Return TRUE if the given expression is a constant which would be
** unchanged by OP_Affinity with the affinity given in the second
** argument.
@@ -86480,7 +77973,7 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){
*/
SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr *p, char aff){
u8 op;
- if( aff==SQLITE_AFF_BLOB ) return 1;
+ if( aff==SQLITE_AFF_NONE ) return 1;
while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; }
op = p->op;
if( op==TK_REGISTER ) op = p->op2;
@@ -86571,40 +78064,6 @@ SQLITE_PRIVATE int sqlite3CodeOnce(Parse *pParse){
}
/*
-** Generate code that checks the left-most column of index table iCur to see if
-** it contains any NULL entries. Cause the register at regHasNull to be set
-** to a non-NULL value if iCur contains no NULLs. Cause register regHasNull
-** to be set to NULL if iCur contains one or more NULL values.
-*/
-static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){
- int addr1;
- sqlite3VdbeAddOp2(v, OP_Integer, 0, regHasNull);
- addr1 = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_Column, iCur, 0, regHasNull);
- sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
- VdbeComment((v, "first_entry_in(%d)", iCur));
- sqlite3VdbeJumpHere(v, addr1);
-}
-
-
-#ifndef SQLITE_OMIT_SUBQUERY
-/*
-** The argument is an IN operator with a list (not a subquery) on the
-** right-hand side. Return TRUE if that list is constant.
-*/
-static int sqlite3InRhsIsConstant(Expr *pIn){
- Expr *pLHS;
- int res;
- assert( !ExprHasProperty(pIn, EP_xIsSelect) );
- pLHS = pIn->pLeft;
- pIn->pLeft = 0;
- res = sqlite3ExprIsConstant(pIn);
- pIn->pLeft = pLHS;
- return res;
-}
-#endif
-
-/*
** This function is used by the implementation of the IN (...) operator.
** The pX parameter is the expression on the RHS of the IN operator, which
** might be either a list of expressions or a subquery.
@@ -86613,7 +78072,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** be used either to test for membership in the RHS set or to iterate through
** all members of the RHS set, skipping duplicates.
**
-** A cursor is opened on the b-tree object that is the RHS of the IN operator
+** A cursor is opened on the b-tree object that the RHS of the IN operator
** and pX->iTable is set to the index of that cursor.
**
** The returned value of this function indicates the b-tree type, as follows:
@@ -86623,8 +78082,6 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index.
** IN_INDEX_EPH - The cursor was opened on a specially created and
** populated epheremal table.
-** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be
-** implemented as a sequence of comparisons.
**
** An existing b-tree might be used if the RHS expression pX is a simple
** subquery such as:
@@ -86633,64 +78090,59 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
**
** If the RHS of the IN operator is a list or a more complex subquery, then
** an ephemeral table might need to be generated from the RHS and then
-** pX->iTable made to point to the ephemeral table instead of an
-** existing table.
-**
-** The inFlags parameter must contain exactly one of the bits
-** IN_INDEX_MEMBERSHIP or IN_INDEX_LOOP. If inFlags contains
-** IN_INDEX_MEMBERSHIP, then the generated table will be used for a
-** fast membership test. When the IN_INDEX_LOOP bit is set, the
-** IN index will be used to loop over all values of the RHS of the
-** IN operator.
-**
-** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate
-** through the set members) then the b-tree must not contain duplicates.
-** An epheremal table must be used unless the selected <column> is guaranteed
+** pX->iTable made to point to the ephermeral table instead of an
+** existing table.
+**
+** If the prNotFound parameter is 0, then the b-tree will be used to iterate
+** through the set members, skipping any duplicates. In this case an
+** epheremal table must be used unless the selected <column> is guaranteed
** to be unique - either because it is an INTEGER PRIMARY KEY or it
** has a UNIQUE constraint or UNIQUE index.
**
-** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used
-** for fast set membership tests) then an epheremal table must
+** If the prNotFound parameter is not 0, then the b-tree will be used
+** for fast set membership tests. In this case an epheremal table must
** be used unless <column> is an INTEGER PRIMARY KEY or an index can
** be found with <column> as its left-most column.
**
-** If the IN_INDEX_NOOP_OK and IN_INDEX_MEMBERSHIP are both set and
-** if the RHS of the IN operator is a list (not a subquery) then this
-** routine might decide that creating an ephemeral b-tree for membership
-** testing is too expensive and return IN_INDEX_NOOP. In that case, the
-** calling routine should implement the IN operator using a sequence
-** of Eq or Ne comparison operations.
-**
** When the b-tree is being used for membership tests, the calling function
-** might need to know whether or not the RHS side of the IN operator
-** contains a NULL. If prRhsHasNull is not a NULL pointer and
-** if there is any chance that the (...) might contain a NULL value at
+** needs to know whether or not the structure contains an SQL NULL
+** value in order to correctly evaluate expressions like "X IN (Y, Z)".
+** If there is any chance that the (...) might contain a NULL value at
** runtime, then a register is allocated and the register number written
-** to *prRhsHasNull. If there is no chance that the (...) contains a
-** NULL value, then *prRhsHasNull is left unchanged.
+** to *prNotFound. If there is no chance that the (...) contains a
+** NULL value, then *prNotFound is left unchanged.
+**
+** If a register is allocated and its location stored in *prNotFound, then
+** its initial value is NULL. If the (...) does not remain constant
+** for the duration of the query (i.e. the SELECT within the (...)
+** is a correlated subquery) then the value of the allocated register is
+** reset to NULL each time the subquery is rerun. This allows the
+** caller to use vdbe code equivalent to the following:
+**
+** if( register==NULL ){
+** has_null = <test if data structure contains null>
+** register = 1
+** }
**
-** If a register is allocated and its location stored in *prRhsHasNull, then
-** the value in that register will be NULL if the b-tree contains one or more
-** NULL values, and it will be some non-NULL value if the b-tree contains no
-** NULL values.
+** in order to avoid running the <test if data structure contains null>
+** test more often than is necessary.
*/
#ifndef SQLITE_OMIT_SUBQUERY
-SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int *prRhsHasNull){
+SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, int *prNotFound){
Select *p; /* SELECT to the right of IN operator */
int eType = 0; /* Type of RHS table. IN_INDEX_* */
int iTab = pParse->nTab++; /* Cursor of the RHS table */
- int mustBeUnique; /* True if RHS must be unique */
+ int mustBeUnique = (prNotFound==0); /* True if RHS must be unique */
Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */
assert( pX->op==TK_IN );
- mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0;
/* Check to see if an existing table or index can be used to
** satisfy the query. This is preferable to generating a new
** ephemeral table.
*/
p = (ExprHasProperty(pX, EP_xIsSelect) ? pX->x.pSelect : 0);
- if( pParse->nErr==0 && isCandidateForInOpt(p) ){
+ if( ALWAYS(pParse->nErr==0) && isCandidateForInOpt(p) ){
sqlite3 *db = pParse->db; /* Database connection */
Table *pTab; /* Table <table>. */
Expr *pExpr; /* Expression <column> */
@@ -86705,7 +78157,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
pExpr = p->pEList->a[0].pExpr;
iCol = (i16)pExpr->iColumn;
- /* Code an OP_Transaction and OP_TableLock for <table>. */
+ /* Code an OP_VerifyCookie and OP_TableLock for <table>. */
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
sqlite3CodeVerifySchema(pParse, iDb);
sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
@@ -86716,8 +78168,9 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
*/
assert(v);
if( iCol<0 ){
- int iAddr = sqlite3CodeOnce(pParse);
- VdbeCoverage(v);
+ int iAddr;
+
+ iAddr = sqlite3CodeOnce(pParse);
sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead);
eType = IN_INDEX_ROWID;
@@ -86740,55 +78193,41 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
for(pIdx=pTab->pIndex; pIdx && eType==0 && affinity_ok; pIdx=pIdx->pNext){
if( (pIdx->aiColumn[0]==iCol)
&& sqlite3FindCollSeq(db, ENC(db), pIdx->azColl[0], 0)==pReq
- && (!mustBeUnique || (pIdx->nKeyCol==1 && IsUniqueIndex(pIdx)))
+ && (!mustBeUnique || (pIdx->nKeyCol==1 && pIdx->onError!=OE_None))
){
- int iAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ int iAddr = sqlite3CodeOnce(pParse);
sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb);
sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
VdbeComment((v, "%s", pIdx->zName));
assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 );
eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0];
- if( prRhsHasNull && !pTab->aCol[iCol].notNull ){
- *prRhsHasNull = ++pParse->nMem;
- sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull);
- }
sqlite3VdbeJumpHere(v, iAddr);
+ if( prNotFound && !pTab->aCol[iCol].notNull ){
+ *prNotFound = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, *prNotFound);
+ }
}
}
}
}
- /* If no preexisting index is available for the IN clause
- ** and IN_INDEX_NOOP is an allowed reply
- ** and the RHS of the IN operator is a list, not a subquery
- ** and the RHS is not contant or has two or fewer terms,
- ** then it is not worth creating an ephemeral table to evaluate
- ** the IN operator so return IN_INDEX_NOOP.
- */
- if( eType==0
- && (inFlags & IN_INDEX_NOOP_OK)
- && !ExprHasProperty(pX, EP_xIsSelect)
- && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2)
- ){
- eType = IN_INDEX_NOOP;
- }
-
-
if( eType==0 ){
- /* Could not find an existing table or index to use as the RHS b-tree.
+ /* Could not found an existing table or index to use as the RHS b-tree.
** We will have to generate an ephemeral table to do the job.
*/
u32 savedNQueryLoop = pParse->nQueryLoop;
int rMayHaveNull = 0;
eType = IN_INDEX_EPH;
- if( inFlags & IN_INDEX_LOOP ){
+ if( prNotFound ){
+ *prNotFound = rMayHaveNull = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, *prNotFound);
+ }else{
+ testcase( pParse->nQueryLoop>0 );
pParse->nQueryLoop = 0;
if( pX->pLeft->iColumn<0 && !ExprHasProperty(pX, EP_xIsSelect) ){
eType = IN_INDEX_ROWID;
}
- }else if( prRhsHasNull ){
- *prRhsHasNull = rMayHaveNull = ++pParse->nMem;
}
sqlite3CodeSubselect(pParse, pX, rMayHaveNull, eType==IN_INDEX_ROWID);
pParse->nQueryLoop = savedNQueryLoop;
@@ -86819,9 +78258,15 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
**
** If rMayHaveNull is non-zero, that means that the operation is an IN
** (not a SELECT or EXISTS) and that the RHS might contains NULLs.
-** All this routine does is initialize the register given by rMayHaveNull
-** to NULL. Calling routines will take care of changing this register
-** value to non-NULL if the RHS is NULL-free.
+** Furthermore, the IN is in a WHERE clause and that we really want
+** to iterate over the RHS of the IN operator in order to quickly locate
+** all corresponding LHS elements. All this routine does is initialize
+** the register given by rMayHaveNull to NULL. Calling routines will take
+** care of changing this register value to non-NULL if the RHS is NULL-free.
+**
+** If rMayHaveNull is zero, that means that the subquery is being used
+** for membership testing only. There is no need to initialize any
+** registers to indicate the presence or absence of NULLs on the RHS.
**
** For a SELECT or EXISTS operator, return the register that holds the
** result. For IN operators or if an error occurs, the return value is 0.
@@ -86830,10 +78275,10 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
SQLITE_PRIVATE int sqlite3CodeSubselect(
Parse *pParse, /* Parsing context */
Expr *pExpr, /* The IN, SELECT, or EXISTS operator */
- int rHasNullFlag, /* Register that records whether NULLs exist in RHS */
+ int rMayHaveNull, /* Register that records whether NULLs exist in RHS */
int isRowid /* If true, LHS of IN operator is a rowid */
){
- int jmpIfDynamic = -1; /* One-time test address */
+ int testAddr = -1; /* One-time test address */
int rReg = 0; /* Register storing resulting */
Vdbe *v = sqlite3GetVdbe(pParse);
if( NEVER(v==0) ) return 0;
@@ -86850,13 +78295,13 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** save the results, and reuse the same result on subsequent invocations.
*/
if( !ExprHasProperty(pExpr, EP_VarSelect) ){
- jmpIfDynamic = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ testAddr = sqlite3CodeOnce(pParse);
}
#ifndef SQLITE_OMIT_EXPLAIN
if( pParse->explain==2 ){
char *zMsg = sqlite3MPrintf(
- pParse->db, "EXECUTE %s%s SUBQUERY %d", jmpIfDynamic>=0?"":"CORRELATED ",
+ pParse->db, "EXECUTE %s%s SUBQUERY %d", testAddr>=0?"":"CORRELATED ",
pExpr->op==TK_IN?"LIST":"SCALAR", pParse->iNextSelectId
);
sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC);
@@ -86870,6 +78315,10 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
Expr *pLeft = pExpr->pLeft; /* the LHS of the IN operator */
KeyInfo *pKeyInfo = 0; /* Key information */
+ if( rMayHaveNull ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, rMayHaveNull);
+ }
+
affinity = sqlite3ExprAffinity(pLeft);
/* Whether this is an 'x IN(SELECT...)' or an 'x IN(<exprlist>)'
@@ -86887,6 +78336,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
*/
pExpr->iTable = pParse->nTab++;
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, !isRowid);
+ if( rMayHaveNull==0 ) sqlite3VdbeChangeP5(v, BTREE_UNORDERED);
pKeyInfo = isRowid ? 0 : sqlite3KeyInfoAlloc(pParse->db, 1, 1);
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
@@ -86895,7 +78345,6 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** Generate code to write the results of the select into the temporary
** table allocated and opened above.
*/
- Select *pSelect = pExpr->x.pSelect;
SelectDest dest;
ExprList *pEList;
@@ -86903,14 +78352,13 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable);
dest.affSdst = (u8)affinity;
assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable );
- pSelect->iLimit = 0;
- testcase( pSelect->selFlags & SF_Distinct );
+ pExpr->x.pSelect->iLimit = 0;
testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */
- if( sqlite3Select(pParse, pSelect, &dest) ){
+ if( sqlite3Select(pParse, pExpr->x.pSelect, &dest) ){
sqlite3KeyInfoUnref(pKeyInfo);
return 0;
}
- pEList = pSelect->pEList;
+ pEList = pExpr->x.pSelect->pEList;
assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */
assert( pEList!=0 );
assert( pEList->nExpr>0 );
@@ -86931,7 +78379,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
int r1, r2, r3;
if( !affinity ){
- affinity = SQLITE_AFF_BLOB;
+ affinity = SQLITE_AFF_NONE;
}
if( pKeyInfo ){
assert( sqlite3KeyInfoIsWriteable(pKeyInfo) );
@@ -86941,7 +78389,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
/* Loop through each expression in <exprlist>. */
r1 = sqlite3GetTempReg(pParse);
r2 = sqlite3GetTempReg(pParse);
- if( isRowid ) sqlite3VdbeAddOp2(v, OP_Null, 0, r2);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, r2);
for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){
Expr *pE2 = pItem->pExpr;
int iValToIns;
@@ -86951,9 +78399,9 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** this code only executes once. Because for a non-constant
** expression we need to rerun this code each time.
*/
- if( jmpIfDynamic>=0 && !sqlite3ExprIsConstant(pE2) ){
- sqlite3VdbeChangeToNoop(v, jmpIfDynamic);
- jmpIfDynamic = -1;
+ if( testAddr>=0 && !sqlite3ExprIsConstant(pE2) ){
+ sqlite3VdbeChangeToNoop(v, testAddr);
+ testAddr = -1;
}
/* Evaluate the expression and insert it into the temp table */
@@ -86964,7 +78412,6 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
if( isRowid ){
sqlite3VdbeAddOp2(v, OP_MustBeInt, r3,
sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
sqlite3VdbeAddOp3(v, OP_Insert, pExpr->iTable, r2, r3);
}else{
sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1);
@@ -87003,7 +78450,6 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
sqlite3SelectDestInit(&dest, 0, ++pParse->nMem);
if( pExpr->op==TK_SELECT ){
dest.eDest = SRT_Mem;
- dest.iSdst = dest.iSDParm;
sqlite3VdbeAddOp2(v, OP_Null, 0, dest.iSDParm);
VdbeComment((v, "Init subquery result"));
}else{
@@ -87015,7 +78461,6 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0,
&sqlite3IntTokens[1]);
pSel->iLimit = 0;
- pSel->selFlags &= ~SF_MultiValue;
if( sqlite3Select(pParse, pSel, &dest) ){
return 0;
}
@@ -87025,14 +78470,10 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
}
}
- if( rHasNullFlag ){
- sqlite3SetHasNullFlag(v, pExpr->iTable, rHasNullFlag);
+ if( testAddr>=0 ){
+ sqlite3VdbeJumpHere(v, testAddr);
}
-
- if( jmpIfDynamic>=0 ){
- sqlite3VdbeJumpHere(v, jmpIfDynamic);
- }
- sqlite3ExprCachePop(pParse);
+ sqlite3ExprCachePop(pParse, 1);
return rReg;
}
@@ -87051,7 +78492,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** if the LHS is NULL or if the LHS is not contained within the RHS and the
** RHS contains one or more NULL values.
**
-** This routine generates code that jumps to destIfFalse if the LHS is not
+** This routine generates code will jump to destIfFalse if the LHS is not
** contained within the RHS. If due to NULLs we cannot determine if the LHS
** is contained in the RHS then jump to destIfNull. If the LHS is contained
** within the RHS then fall through.
@@ -87074,9 +78515,7 @@ static void sqlite3ExprCodeIN(
v = pParse->pVdbe;
assert( v!=0 ); /* OOM detected prior to this routine */
VdbeNoopComment((v, "begin IN expr"));
- eType = sqlite3FindInIndex(pParse, pExpr,
- IN_INDEX_MEMBERSHIP | IN_INDEX_NOOP_OK,
- destIfFalse==destIfNull ? 0 : &rRhsHasNull);
+ eType = sqlite3FindInIndex(pParse, pExpr, &rRhsHasNull);
/* Figure out the affinity to use to create a key from the results
** of the expression. affinityStr stores a static string suitable for
@@ -87090,122 +78529,101 @@ static void sqlite3ExprCodeIN(
r1 = sqlite3GetTempReg(pParse);
sqlite3ExprCode(pParse, pExpr->pLeft, r1);
- /* If sqlite3FindInIndex() did not find or create an index that is
- ** suitable for evaluating the IN operator, then evaluate using a
- ** sequence of comparisons.
+ /* If the LHS is NULL, then the result is either false or NULL depending
+ ** on whether the RHS is empty or not, respectively.
*/
- if( eType==IN_INDEX_NOOP ){
- ExprList *pList = pExpr->x.pList;
- CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft);
- int labelOk = sqlite3VdbeMakeLabel(v);
- int r2, regToFree;
- int regCkNull = 0;
- int ii;
- assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
- if( destIfNull!=destIfFalse ){
- regCkNull = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp3(v, OP_BitAnd, r1, r1, regCkNull);
- }
- for(ii=0; ii<pList->nExpr; ii++){
- r2 = sqlite3ExprCodeTemp(pParse, pList->a[ii].pExpr, &regToFree);
- if( regCkNull && sqlite3ExprCanBeNull(pList->a[ii].pExpr) ){
- sqlite3VdbeAddOp3(v, OP_BitAnd, regCkNull, r2, regCkNull);
- }
- if( ii<pList->nExpr-1 || destIfNull!=destIfFalse ){
- sqlite3VdbeAddOp4(v, OP_Eq, r1, labelOk, r2,
- (void*)pColl, P4_COLLSEQ);
- VdbeCoverageIf(v, ii<pList->nExpr-1);
- VdbeCoverageIf(v, ii==pList->nExpr-1);
- sqlite3VdbeChangeP5(v, affinity);
- }else{
- assert( destIfNull==destIfFalse );
- sqlite3VdbeAddOp4(v, OP_Ne, r1, destIfFalse, r2,
- (void*)pColl, P4_COLLSEQ); VdbeCoverage(v);
- sqlite3VdbeChangeP5(v, affinity | SQLITE_JUMPIFNULL);
- }
- sqlite3ReleaseTempReg(pParse, regToFree);
- }
- if( regCkNull ){
- sqlite3VdbeAddOp2(v, OP_IsNull, regCkNull, destIfNull); VdbeCoverage(v);
- sqlite3VdbeGoto(v, destIfFalse);
- }
- sqlite3VdbeResolveLabel(v, labelOk);
- sqlite3ReleaseTempReg(pParse, regCkNull);
+ if( destIfNull==destIfFalse ){
+ /* Shortcut for the common case where the false and NULL outcomes are
+ ** the same. */
+ sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull);
}else{
-
- /* If the LHS is NULL, then the result is either false or NULL depending
- ** on whether the RHS is empty or not, respectively.
+ int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, r1);
+ sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull);
+ sqlite3VdbeJumpHere(v, addr1);
+ }
+
+ if( eType==IN_INDEX_ROWID ){
+ /* In this case, the RHS is the ROWID of table b-tree
*/
- if( sqlite3ExprCanBeNull(pExpr->pLeft) ){
- if( destIfNull==destIfFalse ){
- /* Shortcut for the common case where the false and NULL outcomes are
- ** the same. */
- sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull); VdbeCoverage(v);
- }else{
- int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, r1); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse);
- VdbeCoverage(v);
- sqlite3VdbeGoto(v, destIfNull);
- sqlite3VdbeJumpHere(v, addr1);
- }
- }
-
- if( eType==IN_INDEX_ROWID ){
- /* In this case, the RHS is the ROWID of table b-tree
+ sqlite3VdbeAddOp2(v, OP_MustBeInt, r1, destIfFalse);
+ sqlite3VdbeAddOp3(v, OP_NotExists, pExpr->iTable, destIfFalse, r1);
+ }else{
+ /* In this case, the RHS is an index b-tree.
+ */
+ sqlite3VdbeAddOp4(v, OP_Affinity, r1, 1, 0, &affinity, 1);
+
+ /* If the set membership test fails, then the result of the
+ ** "x IN (...)" expression must be either 0 or NULL. If the set
+ ** contains no NULL values, then the result is 0. If the set
+ ** contains one or more NULL values, then the result of the
+ ** expression is also NULL.
+ */
+ if( rRhsHasNull==0 || destIfFalse==destIfNull ){
+ /* This branch runs if it is known at compile time that the RHS
+ ** cannot contain NULL values. This happens as the result
+ ** of a "NOT NULL" constraint in the database schema.
+ **
+ ** Also run this branch if NULL is equivalent to FALSE
+ ** for this particular IN operator.
*/
- sqlite3VdbeAddOp2(v, OP_MustBeInt, r1, destIfFalse); VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_NotExists, pExpr->iTable, destIfFalse, r1);
- VdbeCoverage(v);
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse, r1, 1);
+
}else{
- /* In this case, the RHS is an index b-tree.
+ /* In this branch, the RHS of the IN might contain a NULL and
+ ** the presence of a NULL on the RHS makes a difference in the
+ ** outcome.
*/
- sqlite3VdbeAddOp4(v, OP_Affinity, r1, 1, 0, &affinity, 1);
-
- /* If the set membership test fails, then the result of the
- ** "x IN (...)" expression must be either 0 or NULL. If the set
- ** contains no NULL values, then the result is 0. If the set
- ** contains one or more NULL values, then the result of the
- ** expression is also NULL.
+ int j1, j2, j3;
+
+ /* First check to see if the LHS is contained in the RHS. If so,
+ ** then the presence of NULLs in the RHS does not matter, so jump
+ ** over all of the code that follows.
*/
- assert( destIfFalse!=destIfNull || rRhsHasNull==0 );
- if( rRhsHasNull==0 ){
- /* This branch runs if it is known at compile time that the RHS
- ** cannot contain NULL values. This happens as the result
- ** of a "NOT NULL" constraint in the database schema.
- **
- ** Also run this branch if NULL is equivalent to FALSE
- ** for this particular IN operator.
- */
- sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse, r1, 1);
- VdbeCoverage(v);
- }else{
- /* In this branch, the RHS of the IN might contain a NULL and
- ** the presence of a NULL on the RHS makes a difference in the
- ** outcome.
- */
- int addr1;
-
- /* First check to see if the LHS is contained in the RHS. If so,
- ** then the answer is TRUE the presence of NULLs in the RHS does
- ** not matter. If the LHS is not contained in the RHS, then the
- ** answer is NULL if the RHS contains NULLs and the answer is
- ** FALSE if the RHS is NULL-free.
- */
- addr1 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, r1, 1);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_IsNull, rRhsHasNull, destIfNull);
- VdbeCoverage(v);
- sqlite3VdbeGoto(v, destIfFalse);
- sqlite3VdbeJumpHere(v, addr1);
- }
+ j1 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, r1, 1);
+
+ /* Here we begin generating code that runs if the LHS is not
+ ** contained within the RHS. Generate additional code that
+ ** tests the RHS for NULLs. If the RHS contains a NULL then
+ ** jump to destIfNull. If there are no NULLs in the RHS then
+ ** jump to destIfFalse.
+ */
+ j2 = sqlite3VdbeAddOp1(v, OP_NotNull, rRhsHasNull);
+ j3 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, rRhsHasNull, 1);
+ sqlite3VdbeAddOp2(v, OP_Integer, -1, rRhsHasNull);
+ sqlite3VdbeJumpHere(v, j3);
+ sqlite3VdbeAddOp2(v, OP_AddImm, rRhsHasNull, 1);
+ sqlite3VdbeJumpHere(v, j2);
+
+ /* Jump to the appropriate target depending on whether or not
+ ** the RHS contains a NULL
+ */
+ sqlite3VdbeAddOp2(v, OP_If, rRhsHasNull, destIfNull);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse);
+
+ /* The OP_Found at the top of this branch jumps here when true,
+ ** causing the overall IN expression evaluation to fall through.
+ */
+ sqlite3VdbeJumpHere(v, j1);
}
}
sqlite3ReleaseTempReg(pParse, r1);
- sqlite3ExprCachePop(pParse);
+ sqlite3ExprCachePop(pParse, 1);
VdbeComment((v, "end IN expr"));
}
#endif /* SQLITE_OMIT_SUBQUERY */
+/*
+** Duplicate an 8-byte value
+*/
+static char *dup8bytes(Vdbe *v, const char *in){
+ char *out = sqlite3DbMallocRaw(sqlite3VdbeDb(v), 8);
+ if( out ){
+ memcpy(out, in, 8);
+ }
+ return out;
+}
+
#ifndef SQLITE_OMIT_FLOATING_POINT
/*
** Generate an instruction that will put the floating point
@@ -87218,10 +78636,12 @@ static void sqlite3ExprCodeIN(
static void codeReal(Vdbe *v, const char *z, int negateFlag, int iMem){
if( ALWAYS(z!=0) ){
double value;
+ char *zV;
sqlite3AtoF(z, &value, sqlite3Strlen30(z), SQLITE_UTF8);
assert( !sqlite3IsNaN(value) ); /* The new AtoF never returns NaN */
if( negateFlag ) value = -value;
- sqlite3VdbeAddOp4Dup8(v, OP_Real, 0, iMem, 0, (u8*)&value, P4_REAL);
+ zV = dup8bytes(v, (char*)&value);
+ sqlite3VdbeAddOp4(v, OP_Real, 0, iMem, 0, zV, P4_REAL);
}
}
#endif
@@ -87245,22 +78665,17 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){
i64 value;
const char *z = pExpr->u.zToken;
assert( z!=0 );
- c = sqlite3DecOrHexToI64(z, &value);
+ c = sqlite3Atoi64(z, &value, sqlite3Strlen30(z), SQLITE_UTF8);
if( c==0 || (c==2 && negFlag) ){
+ char *zV;
if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; }
- sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64);
+ zV = dup8bytes(v, (char*)&value);
+ sqlite3VdbeAddOp4(v, OP_Int64, 0, iMem, 0, zV, P4_INT64);
}else{
#ifdef SQLITE_OMIT_FLOATING_POINT
sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z);
#else
-#ifndef SQLITE_OMIT_HEX_INTEGER
- if( sqlite3_strnicmp(z,"0x",2)==0 ){
- sqlite3ErrorMsg(pParse, "hex literal too big: %s", z);
- }else
-#endif
- {
- codeReal(v, z, negFlag, iMem);
- }
+ codeReal(v, z, negFlag, iMem);
#endif
}
}
@@ -87289,8 +78704,7 @@ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int
int idxLru;
struct yColCache *p;
- /* Unless an error has occurred, register numbers are always positive. */
- assert( iReg>0 || pParse->nErr || pParse->db->mallocFailed );
+ assert( iReg>0 ); /* Register numbers are always positive */
assert( iCol>=-1 && iCol<32768 ); /* Finite column numbers */
/* The SQLITE_ColumnCache flag disables the column cache. This is used
@@ -87368,28 +78782,19 @@ SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){
*/
SQLITE_PRIVATE void sqlite3ExprCachePush(Parse *pParse){
pParse->iCacheLevel++;
-#ifdef SQLITE_DEBUG
- if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
- printf("PUSH to %d\n", pParse->iCacheLevel);
- }
-#endif
}
/*
** Remove from the column cache any entries that were added since the
-** the previous sqlite3ExprCachePush operation. In other words, restore
-** the cache to the state it was in prior the most recent Push.
+** the previous N Push operations. In other words, restore the cache
+** to the state it was in N Pushes ago.
*/
-SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){
+SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse, int N){
int i;
struct yColCache *p;
- assert( pParse->iCacheLevel>=1 );
- pParse->iCacheLevel--;
-#ifdef SQLITE_DEBUG
- if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
- printf("POP to %d\n", pParse->iCacheLevel);
- }
-#endif
+ assert( N>0 );
+ assert( pParse->iCacheLevel>=N );
+ pParse->iCacheLevel -= N;
for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
if( p->iReg && p->iLevel>pParse->iCacheLevel ){
cacheEntryClear(pParse, p);
@@ -87414,28 +78819,6 @@ static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){
}
}
-/* Generate code that will load into register regOut a value that is
-** appropriate for the iIdxCol-th column of index pIdx.
-*/
-SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(
- Parse *pParse, /* The parsing context */
- Index *pIdx, /* The index whose column is to be loaded */
- int iTabCur, /* Cursor pointing to a table row */
- int iIdxCol, /* The column of the index to be loaded */
- int regOut /* Store the index column value in this register */
-){
- i16 iTabCol = pIdx->aiColumn[iIdxCol];
- if( iTabCol==XN_EXPR ){
- assert( pIdx->aColExpr );
- assert( pIdx->aColExpr->nExpr>iIdxCol );
- pParse->iSelfTab = iTabCur;
- sqlite3ExprCode(pParse, pIdx->aColExpr->a[iIdxCol].pExpr, regOut);
- }else{
- sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pIdx->pTable, iTabCur,
- iTabCol, regOut);
- }
-}
-
/*
** Generate code to extract the value of the iCol-th column of a table.
*/
@@ -87506,11 +78889,6 @@ SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){
int i;
struct yColCache *p;
-#if SQLITE_DEBUG
- if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
- printf("CLEAR\n");
- }
-#endif
for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
if( p->iReg ){
cacheEntryClear(pParse, p);
@@ -87532,9 +78910,16 @@ SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse *pParse, int iStart, in
** over to iTo..iTo+nReg-1. Keep the column cache up-to-date.
*/
SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){
+ int i;
+ struct yColCache *p;
assert( iFrom>=iTo+nReg || iFrom+nReg<=iTo );
- sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg);
- sqlite3ExprCacheRemove(pParse, iFrom, nReg);
+ sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg-1);
+ for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
+ int x = p->iReg;
+ if( x>=iFrom && x<iFrom+nReg ){
+ p->iReg += iTo-iFrom;
+ }
+ }
}
#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
@@ -87621,9 +79006,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
inReg = pExpr->iColumn + pParse->ckBase;
break;
}else{
- /* Coding an expression that is part of an index where column names
- ** in the index refer to the table to which the index belongs */
- iTab = pParse->iSelfTab;
+ /* Deleting from a partial index */
+ iTab = pParse->iPartIdxTab;
}
}
inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab,
@@ -87644,7 +79028,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
#endif
case TK_STRING: {
assert( !ExprHasProperty(pExpr, EP_IntValue) );
- sqlite3VdbeLoadString(v, target, pExpr->u.zToken);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, target, 0, pExpr->u.zToken, 0);
break;
}
case TK_NULL: {
@@ -87683,16 +79067,33 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
inReg = pExpr->iTable;
break;
}
+ case TK_AS: {
+ inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
+ break;
+ }
#ifndef SQLITE_OMIT_CAST
case TK_CAST: {
/* Expressions of the form: CAST(pLeft AS token) */
+ int aff, to_op;
inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ aff = sqlite3AffinityType(pExpr->u.zToken, 0);
+ to_op = aff - SQLITE_AFF_TEXT + OP_ToText;
+ assert( to_op==OP_ToText || aff!=SQLITE_AFF_TEXT );
+ assert( to_op==OP_ToBlob || aff!=SQLITE_AFF_NONE );
+ assert( to_op==OP_ToNumeric || aff!=SQLITE_AFF_NUMERIC );
+ assert( to_op==OP_ToInt || aff!=SQLITE_AFF_INTEGER );
+ assert( to_op==OP_ToReal || aff!=SQLITE_AFF_REAL );
+ testcase( to_op==OP_ToText );
+ testcase( to_op==OP_ToBlob );
+ testcase( to_op==OP_ToNumeric );
+ testcase( to_op==OP_ToInt );
+ testcase( to_op==OP_ToReal );
if( inReg!=target ){
sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target);
inReg = target;
}
- sqlite3VdbeAddOp2(v, OP_Cast, target,
- sqlite3AffinityType(pExpr->u.zToken, 0));
+ sqlite3VdbeAddOp1(v, to_op, inReg);
testcase( usedAsColumnCache(pParse, inReg, inReg) );
sqlite3ExprCacheAffinityChange(pParse, inReg, 1);
break;
@@ -87704,16 +79105,22 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
case TK_GE:
case TK_NE:
case TK_EQ: {
+ assert( TK_LT==OP_Lt );
+ assert( TK_LE==OP_Le );
+ assert( TK_GT==OP_Gt );
+ assert( TK_GE==OP_Ge );
+ assert( TK_EQ==OP_Eq );
+ assert( TK_NE==OP_Ne );
+ testcase( op==TK_LT );
+ testcase( op==TK_LE );
+ testcase( op==TK_GT );
+ testcase( op==TK_GE );
+ testcase( op==TK_EQ );
+ testcase( op==TK_NE );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
r1, r2, inReg, SQLITE_STOREP2);
- assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
- assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
- assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
- assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
- assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
- assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
testcase( regFree1==0 );
testcase( regFree2==0 );
break;
@@ -87727,8 +79134,6 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
op = (op==TK_IS) ? TK_EQ : TK_NE;
codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
r1, r2, inReg, SQLITE_STOREP2 | SQLITE_NULLEQ);
- VdbeCoverageIf(v, op==TK_EQ);
- VdbeCoverageIf(v, op==TK_NE);
testcase( regFree1==0 );
testcase( regFree2==0 );
break;
@@ -87745,17 +79150,28 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
case TK_LSHIFT:
case TK_RSHIFT:
case TK_CONCAT: {
- assert( TK_AND==OP_And ); testcase( op==TK_AND );
- assert( TK_OR==OP_Or ); testcase( op==TK_OR );
- assert( TK_PLUS==OP_Add ); testcase( op==TK_PLUS );
- assert( TK_MINUS==OP_Subtract ); testcase( op==TK_MINUS );
- assert( TK_REM==OP_Remainder ); testcase( op==TK_REM );
- assert( TK_BITAND==OP_BitAnd ); testcase( op==TK_BITAND );
- assert( TK_BITOR==OP_BitOr ); testcase( op==TK_BITOR );
- assert( TK_SLASH==OP_Divide ); testcase( op==TK_SLASH );
- assert( TK_LSHIFT==OP_ShiftLeft ); testcase( op==TK_LSHIFT );
- assert( TK_RSHIFT==OP_ShiftRight ); testcase( op==TK_RSHIFT );
- assert( TK_CONCAT==OP_Concat ); testcase( op==TK_CONCAT );
+ assert( TK_AND==OP_And );
+ assert( TK_OR==OP_Or );
+ assert( TK_PLUS==OP_Add );
+ assert( TK_MINUS==OP_Subtract );
+ assert( TK_REM==OP_Remainder );
+ assert( TK_BITAND==OP_BitAnd );
+ assert( TK_BITOR==OP_BitOr );
+ assert( TK_SLASH==OP_Divide );
+ assert( TK_LSHIFT==OP_ShiftLeft );
+ assert( TK_RSHIFT==OP_ShiftRight );
+ assert( TK_CONCAT==OP_Concat );
+ testcase( op==TK_AND );
+ testcase( op==TK_OR );
+ testcase( op==TK_PLUS );
+ testcase( op==TK_MINUS );
+ testcase( op==TK_REM );
+ testcase( op==TK_BITAND );
+ testcase( op==TK_BITOR );
+ testcase( op==TK_SLASH );
+ testcase( op==TK_LSHIFT );
+ testcase( op==TK_RSHIFT );
+ testcase( op==TK_CONCAT );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
sqlite3VdbeAddOp3(v, op, r2, r1, target);
@@ -87787,8 +79203,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
}
case TK_BITNOT:
case TK_NOT: {
- assert( TK_BITNOT==OP_BitNot ); testcase( op==TK_BITNOT );
- assert( TK_NOT==OP_Not ); testcase( op==TK_NOT );
+ assert( TK_BITNOT==OP_BitNot );
+ assert( TK_NOT==OP_Not );
+ testcase( op==TK_BITNOT );
+ testcase( op==TK_NOT );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
testcase( regFree1==0 );
inReg = target;
@@ -87798,15 +79216,15 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
case TK_ISNULL:
case TK_NOTNULL: {
int addr;
- assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL );
- assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL );
+ assert( TK_ISNULL==OP_IsNull );
+ assert( TK_NOTNULL==OP_NotNull );
+ testcase( op==TK_ISNULL );
+ testcase( op==TK_NOTNULL );
sqlite3VdbeAddOp2(v, OP_Integer, 1, target);
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
testcase( regFree1==0 );
addr = sqlite3VdbeAddOp1(v, op, r1);
- VdbeCoverageIf(v, op==TK_ISNULL);
- VdbeCoverageIf(v, op==TK_NOTNULL);
- sqlite3VdbeAddOp2(v, OP_Integer, 0, target);
+ sqlite3VdbeAddOp2(v, OP_AddImm, target, -1);
sqlite3VdbeJumpHere(v, addr);
break;
}
@@ -87826,7 +79244,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
FuncDef *pDef; /* The function definition object */
int nId; /* Length of the function name in bytes */
const char *zId; /* The function name */
- u32 constMask = 0; /* Mask of function arguments that are constant */
+ int constMask = 0; /* Mask of function arguments that are constant */
int i; /* Loop counter */
u8 enc = ENC(db); /* The text encoding used by this database */
CollSeq *pColl = 0; /* A collating sequence */
@@ -87842,13 +79260,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
zId = pExpr->u.zToken;
nId = sqlite3Strlen30(zId);
pDef = sqlite3FindFunction(db, zId, nId, nFarg, enc, 0);
- if( pDef==0 || pDef->xFunc==0 ){
+ if( pDef==0 ){
sqlite3ErrorMsg(pParse, "unknown function: %.*s()", nId, zId);
break;
}
/* Attempt a direct implementation of the built-in COALESCE() and
- ** IFNULL() functions. This avoids unnecessary evaluation of
+ ** IFNULL() functions. This avoids unnecessary evalation of
** arguments past the first non-NULL argument.
*/
if( pDef->funcFlags & SQLITE_FUNC_COALESCE ){
@@ -87857,11 +79275,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target);
for(i=1; i<nFarg; i++){
sqlite3VdbeAddOp2(v, OP_NotNull, target, endCoalesce);
- VdbeCoverage(v);
sqlite3ExprCacheRemove(pParse, target, 1);
sqlite3ExprCachePush(pParse);
sqlite3ExprCode(pParse, pFarg->a[i].pExpr, target);
- sqlite3ExprCachePop(pParse);
+ sqlite3ExprCachePop(pParse, 1);
}
sqlite3VdbeResolveLabel(v, endCoalesce);
break;
@@ -87872,14 +79289,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
*/
if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){
assert( nFarg>=1 );
- inReg = sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target);
+ sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target);
break;
}
for(i=0; i<nFarg; i++){
if( i<32 && sqlite3ExprIsConstant(pFarg->a[i].pExpr) ){
- testcase( i==31 );
- constMask |= MASKBIT32(i);
+ constMask |= (1<<i);
}
if( (pDef->funcFlags & SQLITE_FUNC_NEEDCOLL)!=0 && !pColl ){
pColl = sqlite3ExprCollSeq(pParse, pFarg->a[i].pExpr);
@@ -87913,9 +79329,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
}
sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */
- sqlite3ExprCodeExprList(pParse, pFarg, r1, 0,
+ sqlite3ExprCodeExprList(pParse, pFarg, r1,
SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR);
- sqlite3ExprCachePop(pParse); /* Ticket 2ea2425d34be */
+ sqlite3ExprCachePop(pParse, 1); /* Ticket 2ea2425d34be */
}else{
r1 = 0;
}
@@ -87942,7 +79358,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
if( !pColl ) pColl = db->pDfltColl;
sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ);
}
- sqlite3VdbeAddOp4(v, OP_Function0, constMask, r1, target,
+ sqlite3VdbeAddOp4(v, OP_Function, constMask, r1, target,
(char*)pDef, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nFarg);
if( nFarg && constMask==0 ){
@@ -87995,14 +79411,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
r3 = sqlite3GetTempReg(pParse);
r4 = sqlite3GetTempReg(pParse);
codeCompare(pParse, pLeft, pRight, OP_Ge,
- r1, r2, r3, SQLITE_STOREP2); VdbeCoverage(v);
+ r1, r2, r3, SQLITE_STOREP2);
pLItem++;
pRight = pLItem->pExpr;
sqlite3ReleaseTempReg(pParse, regFree2);
r2 = sqlite3ExprCodeTemp(pParse, pRight, &regFree2);
testcase( regFree2==0 );
codeCompare(pParse, pLeft, pRight, OP_Le, r1, r2, r4, SQLITE_STOREP2);
- VdbeCoverage(v);
sqlite3VdbeAddOp3(v, OP_And, r3, r4, target);
sqlite3ReleaseTempReg(pParse, r3);
sqlite3ReleaseTempReg(pParse, r4);
@@ -88057,10 +79472,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
#ifndef SQLITE_OMIT_FLOATING_POINT
/* If the column has REAL affinity, it may currently be stored as an
- ** integer. Use OP_RealAffinity to make sure it is really real.
- **
- ** EVIDENCE-OF: R-60985-57662 SQLite will convert the value back to
- ** floating point when extracting it from the record. */
+ ** integer. Use OP_RealAffinity to make sure it is really real. */
if( pExpr->iColumn>=0
&& pTab->aCol[pExpr->iColumn].affinity==SQLITE_AFF_REAL
){
@@ -88137,14 +79549,14 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
sqlite3ExprIfFalse(pParse, pTest, nextCase, SQLITE_JUMPIFNULL);
testcase( aListelem[i+1].pExpr->op==TK_COLUMN );
sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target);
- sqlite3VdbeGoto(v, endLabel);
- sqlite3ExprCachePop(pParse);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, endLabel);
+ sqlite3ExprCachePop(pParse, 1);
sqlite3VdbeResolveLabel(v, nextCase);
}
if( (nExpr&1)!=0 ){
sqlite3ExprCachePush(pParse);
sqlite3ExprCode(pParse, pEList->a[nExpr-1].pExpr, target);
- sqlite3ExprCachePop(pParse);
+ sqlite3ExprCachePop(pParse, 1);
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, target);
}
@@ -88172,7 +79584,6 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
if( pExpr->affinity==OE_Ignore ){
sqlite3VdbeAddOp4(
v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0);
- VdbeCoverage(v);
}else{
sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_TRIGGER,
pExpr->affinity, pExpr->u.zToken, 0, 0);
@@ -88260,7 +79671,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){
** results in register target. The results are guaranteed to appear
** in register target.
*/
-SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){
+SQLITE_PRIVATE int sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){
int inReg;
assert( target>0 && target<=pParse->nMem );
@@ -88273,24 +79684,11 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){
sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, inReg, target);
}
}
+ return target;
}
/*
-** Generate code that will evaluate expression pExpr and store the
-** results in register target. The results are guaranteed to appear
-** in register target. If the expression is constant, then this routine
-** might choose to code the expression at initialization time.
-*/
-SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){
- if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){
- sqlite3ExprCodeAtInit(pParse, pExpr, target, 0);
- }else{
- sqlite3ExprCode(pParse, pExpr, target);
- }
-}
-
-/*
-** Generate code that evaluates the given expression and puts the result
+** Generate code that evalutes the given expression and puts the result
** in register target.
**
** Also make a copy of the expression results into another "cache" register
@@ -88301,18 +79699,299 @@ SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int ta
** times. They are evaluated once and the results of the expression
** are reused.
*/
-SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr, int target){
+SQLITE_PRIVATE int sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr, int target){
Vdbe *v = pParse->pVdbe;
- int iMem;
-
+ int inReg;
+ inReg = sqlite3ExprCode(pParse, pExpr, target);
assert( target>0 );
- assert( pExpr->op!=TK_REGISTER );
- sqlite3ExprCode(pParse, pExpr, target);
- iMem = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Copy, target, iMem);
- exprToRegister(pExpr, iMem);
+ /* The only place, other than this routine, where expressions can be
+ ** converted to TK_REGISTER is internal subexpressions in BETWEEN and
+ ** CASE operators. Neither ever calls this routine. And this routine
+ ** is never called twice on the same expression. Hence it is impossible
+ ** for the input to this routine to already be a register. Nevertheless,
+ ** it seems prudent to keep the ALWAYS() in case the conditions above
+ ** change with future modifications or enhancements. */
+ if( ALWAYS(pExpr->op!=TK_REGISTER) ){
+ int iMem;
+ iMem = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Copy, inReg, iMem);
+ exprToRegister(pExpr, iMem);
+ }
+ return inReg;
}
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+/*
+** Generate a human-readable explanation of an expression tree.
+*/
+SQLITE_PRIVATE void sqlite3ExplainExpr(Vdbe *pOut, Expr *pExpr){
+ int op; /* The opcode being coded */
+ const char *zBinOp = 0; /* Binary operator */
+ const char *zUniOp = 0; /* Unary operator */
+ if( pExpr==0 ){
+ op = TK_NULL;
+ }else{
+ op = pExpr->op;
+ }
+ switch( op ){
+ case TK_AGG_COLUMN: {
+ sqlite3ExplainPrintf(pOut, "AGG{%d:%d}",
+ pExpr->iTable, pExpr->iColumn);
+ break;
+ }
+ case TK_COLUMN: {
+ if( pExpr->iTable<0 ){
+ /* This only happens when coding check constraints */
+ sqlite3ExplainPrintf(pOut, "COLUMN(%d)", pExpr->iColumn);
+ }else{
+ sqlite3ExplainPrintf(pOut, "{%d:%d}",
+ pExpr->iTable, pExpr->iColumn);
+ }
+ break;
+ }
+ case TK_INTEGER: {
+ if( pExpr->flags & EP_IntValue ){
+ sqlite3ExplainPrintf(pOut, "%d", pExpr->u.iValue);
+ }else{
+ sqlite3ExplainPrintf(pOut, "%s", pExpr->u.zToken);
+ }
+ break;
+ }
+#ifndef SQLITE_OMIT_FLOATING_POINT
+ case TK_FLOAT: {
+ sqlite3ExplainPrintf(pOut,"%s", pExpr->u.zToken);
+ break;
+ }
+#endif
+ case TK_STRING: {
+ sqlite3ExplainPrintf(pOut,"%Q", pExpr->u.zToken);
+ break;
+ }
+ case TK_NULL: {
+ sqlite3ExplainPrintf(pOut,"NULL");
+ break;
+ }
+#ifndef SQLITE_OMIT_BLOB_LITERAL
+ case TK_BLOB: {
+ sqlite3ExplainPrintf(pOut,"%s", pExpr->u.zToken);
+ break;
+ }
+#endif
+ case TK_VARIABLE: {
+ sqlite3ExplainPrintf(pOut,"VARIABLE(%s,%d)",
+ pExpr->u.zToken, pExpr->iColumn);
+ break;
+ }
+ case TK_REGISTER: {
+ sqlite3ExplainPrintf(pOut,"REGISTER(%d)", pExpr->iTable);
+ break;
+ }
+ case TK_AS: {
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ break;
+ }
+#ifndef SQLITE_OMIT_CAST
+ case TK_CAST: {
+ /* Expressions of the form: CAST(pLeft AS token) */
+ const char *zAff = "unk";
+ switch( sqlite3AffinityType(pExpr->u.zToken, 0) ){
+ case SQLITE_AFF_TEXT: zAff = "TEXT"; break;
+ case SQLITE_AFF_NONE: zAff = "NONE"; break;
+ case SQLITE_AFF_NUMERIC: zAff = "NUMERIC"; break;
+ case SQLITE_AFF_INTEGER: zAff = "INTEGER"; break;
+ case SQLITE_AFF_REAL: zAff = "REAL"; break;
+ }
+ sqlite3ExplainPrintf(pOut, "CAST-%s(", zAff);
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ sqlite3ExplainPrintf(pOut, ")");
+ break;
+ }
+#endif /* SQLITE_OMIT_CAST */
+ case TK_LT: zBinOp = "LT"; break;
+ case TK_LE: zBinOp = "LE"; break;
+ case TK_GT: zBinOp = "GT"; break;
+ case TK_GE: zBinOp = "GE"; break;
+ case TK_NE: zBinOp = "NE"; break;
+ case TK_EQ: zBinOp = "EQ"; break;
+ case TK_IS: zBinOp = "IS"; break;
+ case TK_ISNOT: zBinOp = "ISNOT"; break;
+ case TK_AND: zBinOp = "AND"; break;
+ case TK_OR: zBinOp = "OR"; break;
+ case TK_PLUS: zBinOp = "ADD"; break;
+ case TK_STAR: zBinOp = "MUL"; break;
+ case TK_MINUS: zBinOp = "SUB"; break;
+ case TK_REM: zBinOp = "REM"; break;
+ case TK_BITAND: zBinOp = "BITAND"; break;
+ case TK_BITOR: zBinOp = "BITOR"; break;
+ case TK_SLASH: zBinOp = "DIV"; break;
+ case TK_LSHIFT: zBinOp = "LSHIFT"; break;
+ case TK_RSHIFT: zBinOp = "RSHIFT"; break;
+ case TK_CONCAT: zBinOp = "CONCAT"; break;
+
+ case TK_UMINUS: zUniOp = "UMINUS"; break;
+ case TK_UPLUS: zUniOp = "UPLUS"; break;
+ case TK_BITNOT: zUniOp = "BITNOT"; break;
+ case TK_NOT: zUniOp = "NOT"; break;
+ case TK_ISNULL: zUniOp = "ISNULL"; break;
+ case TK_NOTNULL: zUniOp = "NOTNULL"; break;
+
+ case TK_COLLATE: {
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ sqlite3ExplainPrintf(pOut,".COLLATE(%s)",pExpr->u.zToken);
+ break;
+ }
+
+ case TK_AGG_FUNCTION:
+ case TK_FUNCTION: {
+ ExprList *pFarg; /* List of function arguments */
+ if( ExprHasProperty(pExpr, EP_TokenOnly) ){
+ pFarg = 0;
+ }else{
+ pFarg = pExpr->x.pList;
+ }
+ if( op==TK_AGG_FUNCTION ){
+ sqlite3ExplainPrintf(pOut, "AGG_FUNCTION%d:%s(",
+ pExpr->op2, pExpr->u.zToken);
+ }else{
+ sqlite3ExplainPrintf(pOut, "FUNCTION:%s(", pExpr->u.zToken);
+ }
+ if( pFarg ){
+ sqlite3ExplainExprList(pOut, pFarg);
+ }
+ sqlite3ExplainPrintf(pOut, ")");
+ break;
+ }
+#ifndef SQLITE_OMIT_SUBQUERY
+ case TK_EXISTS: {
+ sqlite3ExplainPrintf(pOut, "EXISTS(");
+ sqlite3ExplainSelect(pOut, pExpr->x.pSelect);
+ sqlite3ExplainPrintf(pOut,")");
+ break;
+ }
+ case TK_SELECT: {
+ sqlite3ExplainPrintf(pOut, "(");
+ sqlite3ExplainSelect(pOut, pExpr->x.pSelect);
+ sqlite3ExplainPrintf(pOut, ")");
+ break;
+ }
+ case TK_IN: {
+ sqlite3ExplainPrintf(pOut, "IN(");
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ sqlite3ExplainPrintf(pOut, ",");
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ sqlite3ExplainSelect(pOut, pExpr->x.pSelect);
+ }else{
+ sqlite3ExplainExprList(pOut, pExpr->x.pList);
+ }
+ sqlite3ExplainPrintf(pOut, ")");
+ break;
+ }
+#endif /* SQLITE_OMIT_SUBQUERY */
+
+ /*
+ ** x BETWEEN y AND z
+ **
+ ** This is equivalent to
+ **
+ ** x>=y AND x<=z
+ **
+ ** X is stored in pExpr->pLeft.
+ ** Y is stored in pExpr->pList->a[0].pExpr.
+ ** Z is stored in pExpr->pList->a[1].pExpr.
+ */
+ case TK_BETWEEN: {
+ Expr *pX = pExpr->pLeft;
+ Expr *pY = pExpr->x.pList->a[0].pExpr;
+ Expr *pZ = pExpr->x.pList->a[1].pExpr;
+ sqlite3ExplainPrintf(pOut, "BETWEEN(");
+ sqlite3ExplainExpr(pOut, pX);
+ sqlite3ExplainPrintf(pOut, ",");
+ sqlite3ExplainExpr(pOut, pY);
+ sqlite3ExplainPrintf(pOut, ",");
+ sqlite3ExplainExpr(pOut, pZ);
+ sqlite3ExplainPrintf(pOut, ")");
+ break;
+ }
+ case TK_TRIGGER: {
+ /* If the opcode is TK_TRIGGER, then the expression is a reference
+ ** to a column in the new.* or old.* pseudo-tables available to
+ ** trigger programs. In this case Expr.iTable is set to 1 for the
+ ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn
+ ** is set to the column of the pseudo-table to read, or to -1 to
+ ** read the rowid field.
+ */
+ sqlite3ExplainPrintf(pOut, "%s(%d)",
+ pExpr->iTable ? "NEW" : "OLD", pExpr->iColumn);
+ break;
+ }
+ case TK_CASE: {
+ sqlite3ExplainPrintf(pOut, "CASE(");
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ sqlite3ExplainPrintf(pOut, ",");
+ sqlite3ExplainExprList(pOut, pExpr->x.pList);
+ break;
+ }
+#ifndef SQLITE_OMIT_TRIGGER
+ case TK_RAISE: {
+ const char *zType = "unk";
+ switch( pExpr->affinity ){
+ case OE_Rollback: zType = "rollback"; break;
+ case OE_Abort: zType = "abort"; break;
+ case OE_Fail: zType = "fail"; break;
+ case OE_Ignore: zType = "ignore"; break;
+ }
+ sqlite3ExplainPrintf(pOut, "RAISE-%s(%s)", zType, pExpr->u.zToken);
+ break;
+ }
+#endif
+ }
+ if( zBinOp ){
+ sqlite3ExplainPrintf(pOut,"%s(", zBinOp);
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ sqlite3ExplainPrintf(pOut,",");
+ sqlite3ExplainExpr(pOut, pExpr->pRight);
+ sqlite3ExplainPrintf(pOut,")");
+ }else if( zUniOp ){
+ sqlite3ExplainPrintf(pOut,"%s(", zUniOp);
+ sqlite3ExplainExpr(pOut, pExpr->pLeft);
+ sqlite3ExplainPrintf(pOut,")");
+ }
+}
+#endif /* defined(SQLITE_ENABLE_TREE_EXPLAIN) */
+
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+/*
+** Generate a human-readable explanation of an expression list.
+*/
+SQLITE_PRIVATE void sqlite3ExplainExprList(Vdbe *pOut, ExprList *pList){
+ int i;
+ if( pList==0 || pList->nExpr==0 ){
+ sqlite3ExplainPrintf(pOut, "(empty-list)");
+ return;
+ }else if( pList->nExpr==1 ){
+ sqlite3ExplainExpr(pOut, pList->a[0].pExpr);
+ }else{
+ sqlite3ExplainPush(pOut);
+ for(i=0; i<pList->nExpr; i++){
+ sqlite3ExplainPrintf(pOut, "item[%d] = ", i);
+ sqlite3ExplainPush(pOut);
+ sqlite3ExplainExpr(pOut, pList->a[i].pExpr);
+ sqlite3ExplainPop(pOut);
+ if( pList->a[i].zName ){
+ sqlite3ExplainPrintf(pOut, " AS %s", pList->a[i].zName);
+ }
+ if( pList->a[i].bSpanIsTab ){
+ sqlite3ExplainPrintf(pOut, " (%s)", pList->a[i].zSpan);
+ }
+ if( i<pList->nExpr-1 ){
+ sqlite3ExplainNL(pOut);
+ }
+ }
+ sqlite3ExplainPop(pOut);
+ }
+}
+#endif /* SQLITE_DEBUG */
+
/*
** Generate code that pushes the value of every element of the given
** expression list into a sequence of registers beginning at target.
@@ -88329,13 +80008,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
Parse *pParse, /* Parsing context */
ExprList *pList, /* The expression list to be coded */
int target, /* Where to write results */
- int srcReg, /* Source registers if SQLITE_ECEL_REF */
u8 flags /* SQLITE_ECEL_* flags */
){
struct ExprList_item *pItem;
- int i, j, n;
+ int i, n;
u8 copyOp = (flags & SQLITE_ECEL_DUP) ? OP_Copy : OP_SCopy;
- Vdbe *v = pParse->pVdbe;
assert( pList!=0 );
assert( target>0 );
assert( pParse->pVdbe!=0 ); /* Never gets this far otherwise */
@@ -88343,23 +80020,12 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR;
for(pItem=pList->a, i=0; i<n; i++, pItem++){
Expr *pExpr = pItem->pExpr;
- if( (flags & SQLITE_ECEL_REF)!=0 && (j = pList->a[i].u.x.iOrderByCol)>0 ){
- sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i);
- }else if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){
+ if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){
sqlite3ExprCodeAtInit(pParse, pExpr, target+i, 0);
}else{
int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i);
if( inReg!=target+i ){
- VdbeOp *pOp;
- if( copyOp==OP_Copy
- && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy
- && pOp->p1+pOp->p3+1==inReg
- && pOp->p2+pOp->p3+1==target+i
- ){
- pOp->p3++;
- }else{
- sqlite3VdbeAddOp2(v, copyOp, inReg, target+i);
- }
+ sqlite3VdbeAddOp2(pParse->pVdbe, copyOp, inReg, target+i);
}
}
}
@@ -88376,7 +80042,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
** x>=y AND x<=z
**
** Code it as such, taking care to do the common subexpression
-** elimination of x.
+** elementation of x.
*/
static void exprCodeBetween(
Parse *pParse, /* Parsing and code generating context */
@@ -88450,19 +80116,17 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
case TK_AND: {
int d2 = sqlite3VdbeMakeLabel(v);
testcase( jumpIfNull==0 );
- sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL);
sqlite3ExprCachePush(pParse);
+ sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL);
sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
sqlite3VdbeResolveLabel(v, d2);
- sqlite3ExprCachePop(pParse);
+ sqlite3ExprCachePop(pParse, 1);
break;
}
case TK_OR: {
testcase( jumpIfNull==0 );
sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull);
- sqlite3ExprCachePush(pParse);
sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
- sqlite3ExprCachePop(pParse);
break;
}
case TK_NOT: {
@@ -88476,17 +80140,23 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
case TK_GE:
case TK_NE:
case TK_EQ: {
+ assert( TK_LT==OP_Lt );
+ assert( TK_LE==OP_Le );
+ assert( TK_GT==OP_Gt );
+ assert( TK_GE==OP_Ge );
+ assert( TK_EQ==OP_Eq );
+ assert( TK_NE==OP_Ne );
+ testcase( op==TK_LT );
+ testcase( op==TK_LE );
+ testcase( op==TK_GT );
+ testcase( op==TK_GE );
+ testcase( op==TK_EQ );
+ testcase( op==TK_NE );
testcase( jumpIfNull==0 );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
r1, r2, dest, jumpIfNull);
- assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
- assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
- assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
- assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
- assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
- assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
testcase( regFree1==0 );
testcase( regFree2==0 );
break;
@@ -88500,20 +80170,18 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
op = (op==TK_IS) ? TK_EQ : TK_NE;
codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
r1, r2, dest, SQLITE_NULLEQ);
- VdbeCoverageIf(v, op==TK_EQ);
- VdbeCoverageIf(v, op==TK_NE);
testcase( regFree1==0 );
testcase( regFree2==0 );
break;
}
case TK_ISNULL:
case TK_NOTNULL: {
- assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL );
- assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL );
+ assert( TK_ISNULL==OP_IsNull );
+ assert( TK_NOTNULL==OP_NotNull );
+ testcase( op==TK_ISNULL );
+ testcase( op==TK_NOTNULL );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
sqlite3VdbeAddOp2(v, op, r1, dest);
- VdbeCoverageIf(v, op==TK_ISNULL);
- VdbeCoverageIf(v, op==TK_NOTNULL);
testcase( regFree1==0 );
break;
}
@@ -88527,23 +80195,16 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
int destIfFalse = sqlite3VdbeMakeLabel(v);
int destIfNull = jumpIfNull ? dest : destIfFalse;
sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull);
- sqlite3VdbeGoto(v, dest);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, dest);
sqlite3VdbeResolveLabel(v, destIfFalse);
break;
}
#endif
default: {
- if( exprAlwaysTrue(pExpr) ){
- sqlite3VdbeGoto(v, dest);
- }else if( exprAlwaysFalse(pExpr) ){
- /* No-op */
- }else{
- r1 = sqlite3ExprCodeTemp(pParse, pExpr, &regFree1);
- sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0);
- VdbeCoverage(v);
- testcase( regFree1==0 );
- testcase( jumpIfNull==0 );
- }
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr, &regFree1);
+ sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0);
+ testcase( regFree1==0 );
+ testcase( jumpIfNull==0 );
break;
}
}
@@ -88606,19 +80267,17 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
case TK_AND: {
testcase( jumpIfNull==0 );
sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull);
- sqlite3ExprCachePush(pParse);
sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
- sqlite3ExprCachePop(pParse);
break;
}
case TK_OR: {
int d2 = sqlite3VdbeMakeLabel(v);
testcase( jumpIfNull==0 );
- sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL);
sqlite3ExprCachePush(pParse);
+ sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL);
sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
sqlite3VdbeResolveLabel(v, d2);
- sqlite3ExprCachePop(pParse);
+ sqlite3ExprCachePop(pParse, 1);
break;
}
case TK_NOT: {
@@ -88632,17 +80291,17 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
case TK_GE:
case TK_NE:
case TK_EQ: {
+ testcase( op==TK_LT );
+ testcase( op==TK_LE );
+ testcase( op==TK_GT );
+ testcase( op==TK_GE );
+ testcase( op==TK_EQ );
+ testcase( op==TK_NE );
testcase( jumpIfNull==0 );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
r1, r2, dest, jumpIfNull);
- assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
- assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
- assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
- assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
- assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
- assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
testcase( regFree1==0 );
testcase( regFree2==0 );
break;
@@ -88656,18 +80315,16 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
op = (pExpr->op==TK_IS) ? TK_NE : TK_EQ;
codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
r1, r2, dest, SQLITE_NULLEQ);
- VdbeCoverageIf(v, op==TK_EQ);
- VdbeCoverageIf(v, op==TK_NE);
testcase( regFree1==0 );
testcase( regFree2==0 );
break;
}
case TK_ISNULL:
case TK_NOTNULL: {
+ testcase( op==TK_ISNULL );
+ testcase( op==TK_NOTNULL );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
sqlite3VdbeAddOp2(v, op, r1, dest);
- testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL);
- testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL);
testcase( regFree1==0 );
break;
}
@@ -88689,17 +80346,10 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
}
#endif
default: {
- if( exprAlwaysFalse(pExpr) ){
- sqlite3VdbeGoto(v, dest);
- }else if( exprAlwaysTrue(pExpr) ){
- /* no-op */
- }else{
- r1 = sqlite3ExprCodeTemp(pParse, pExpr, &regFree1);
- sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0);
- VdbeCoverage(v);
- testcase( regFree1==0 );
- testcase( jumpIfNull==0 );
- }
+ r1 = sqlite3ExprCodeTemp(pParse, pExpr, &regFree1);
+ sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0);
+ testcase( regFree1==0 );
+ testcase( jumpIfNull==0 );
break;
}
}
@@ -88708,21 +80358,6 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
}
/*
-** Like sqlite3ExprIfFalse() except that a copy is made of pExpr before
-** code generation, and that copy is deleted after code generation. This
-** ensures that the original pExpr is unchanged.
-*/
-SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,int jumpIfNull){
- sqlite3 *db = pParse->db;
- Expr *pCopy = sqlite3ExprDup(db, pExpr, 0);
- if( db->mallocFailed==0 ){
- sqlite3ExprIfFalse(pParse, pCopy, dest, jumpIfNull);
- }
- sqlite3ExprDelete(db, pCopy);
-}
-
-
-/*
** Do a deep comparison of two expression trees. Return 0 if the two
** expressions are completely identical. Return 1 if they differ only
** by a COLLATE operator at the top level. Return 2 if there are differences
@@ -88766,9 +80401,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Expr *pA, Expr *pB, int iTab){
return 2;
}
if( pA->op!=TK_COLUMN && ALWAYS(pA->op!=TK_AGG_COLUMN) && pA->u.zToken ){
- if( pA->op==TK_FUNCTION ){
- if( sqlite3StrICmp(pA->u.zToken,pB->u.zToken)!=0 ) return 2;
- }else if( strcmp(pA->u.zToken,pB->u.zToken)!=0 ){
+ if( strcmp(pA->u.zToken,pB->u.zToken)!=0 ){
return pA->op==TK_COLLATE ? 1 : 2;
}
}
@@ -88778,7 +80411,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Expr *pA, Expr *pB, int iTab){
if( sqlite3ExprCompare(pA->pLeft, pB->pLeft, iTab) ) return 2;
if( sqlite3ExprCompare(pA->pRight, pB->pRight, iTab) ) return 2;
if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList, iTab) ) return 2;
- if( ALWAYS((combinedFlags & EP_Reduced)==0) && pA->op!=TK_STRING ){
+ if( ALWAYS((combinedFlags & EP_Reduced)==0) ){
if( pA->iColumn!=pB->iColumn ) return 2;
if( pA->iTable!=pB->iTable
&& (pA->iTable!=iTab || NEVER(pB->iTable>=0)) ) return 2;
@@ -88880,11 +80513,10 @@ static int exprSrcCount(Walker *pWalker, Expr *pExpr){
int i;
struct SrcCount *p = pWalker->u.pSrcCount;
SrcList *pSrc = p->pSrc;
- int nSrc = pSrc ? pSrc->nSrc : 0;
- for(i=0; i<nSrc; i++){
+ for(i=0; i<pSrc->nSrc; i++){
if( pExpr->iTable==pSrc->a[i].iCursor ) break;
}
- if( i<nSrc ){
+ if( i<pSrc->nSrc ){
p->nThis++;
}else{
p->nOther++;
@@ -89131,7 +80763,7 @@ SQLITE_PRIVATE int sqlite3GetTempReg(Parse *pParse){
** purpose.
**
** If a register is currently being used by the column cache, then
-** the deallocation is deferred until the column cache line that uses
+** the dallocation is deferred until the column cache line that uses
** the register becomes stale.
*/
SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){
@@ -89197,7 +80829,6 @@ SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){
** This file contains C code routines that used to generate VDBE code
** that implements the ALTER TABLE command.
*/
-/* #include "sqliteInt.h" */
/*
** The code in this file only exists if we are not omitting the
@@ -89262,8 +80893,8 @@ static void renameTableFunc(
assert( len>0 );
} while( token!=TK_LP && token!=TK_USING );
- zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql),
- zSql, zTableName, tname.z+tname.n);
+ zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", ((u8*)tname.z) - zSql, zSql,
+ zTableName, tname.z+tname.n);
sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC);
}
}
@@ -89301,7 +80932,6 @@ static void renameParentFunc(
int token; /* Type of token */
UNUSED_PARAMETER(NotUsed);
- if( zInput==0 || zOld==0 ) return;
for(z=zInput; *z; z=z+n){
n = sqlite3GetToken(z, &token);
if( token==TK_REFERENCES ){
@@ -89311,13 +80941,12 @@ static void renameParentFunc(
n = sqlite3GetToken(z, &token);
}while( token==TK_SPACE );
- if( token==TK_ILLEGAL ) break;
zParent = sqlite3DbStrNDup(db, (const char *)z, n);
if( zParent==0 ) break;
sqlite3Dequote(zParent);
if( 0==sqlite3StrICmp((const char *)zOld, zParent) ){
char *zOut = sqlite3MPrintf(db, "%s%.*s\"%w\"",
- (zOutput?zOutput:""), (int)(z-zInput), zInput, (const char *)zNew
+ (zOutput?zOutput:""), z-zInput, zInput, (const char *)zNew
);
sqlite3DbFree(db, zOutput);
zOutput = zOut;
@@ -89360,8 +80989,8 @@ static void renameTriggerFunc(
UNUSED_PARAMETER(NotUsed);
/* The principle used to locate the table name in the CREATE TRIGGER
- ** statement is that the table name is the first token that is immediately
- ** preceded by either TK_ON or TK_DOT and immediately followed by one
+ ** statement is that the table name is the first token that is immediatedly
+ ** preceded by either TK_ON or TK_DOT and immediatedly followed by one
** of TK_WHEN, TK_BEGIN or TK_FOR.
*/
if( zSql ){
@@ -89403,8 +81032,8 @@ static void renameTriggerFunc(
/* Variable tname now contains the token that is the old table-name
** in the CREATE TRIGGER statement.
*/
- zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql),
- zSql, zTableName, tname.z+tname.n);
+ zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", ((u8*)tname.z) - zSql, zSql,
+ zTableName, tname.z+tname.n);
sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC);
}
}
@@ -89656,7 +81285,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
}
#endif
- /* Begin a transaction for database iDb.
+ /* Begin a transaction and code the VerifyCookie for database iDb.
** Then modify the schema cookie (since the ALTER TABLE modifies the
** schema). Open a statement transaction if the table is a virtual
** table.
@@ -89676,7 +81305,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
#ifndef SQLITE_OMIT_VIRTUALTABLE
if( pVTab ){
int i = ++pParse->nMem;
- sqlite3VdbeLoadString(v, i, zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, i, 0, zName, 0);
sqlite3VdbeAddOp4(v, OP_VRename, i, 0, 0,(const char*)pVTab, P4_VTAB);
sqlite3MayAbort(pParse);
}
@@ -89787,14 +81416,13 @@ SQLITE_PRIVATE void sqlite3MinimumFileFormat(Parse *pParse, int iDb, int minForm
if( ALWAYS(v) ){
int r1 = sqlite3GetTempReg(pParse);
int r2 = sqlite3GetTempReg(pParse);
- int addr1;
+ int j1;
sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT);
sqlite3VdbeUsesBtree(v, iDb);
sqlite3VdbeAddOp2(v, OP_Integer, minFormat, r2);
- addr1 = sqlite3VdbeAddOp3(v, OP_Ge, r2, 0, r1);
- sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); VdbeCoverage(v);
+ j1 = sqlite3VdbeAddOp3(v, OP_Ge, r2, 0, r1);
sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, r2);
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeJumpHere(v, j1);
sqlite3ReleaseTempReg(pParse, r1);
sqlite3ReleaseTempReg(pParse, r2);
}
@@ -89876,10 +81504,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
*/
if( pDflt ){
sqlite3_value *pVal = 0;
- int rc;
- rc = sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_BLOB, &pVal);
- assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
- if( rc!=SQLITE_OK ){
+ if( sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_NONE, &pVal) ){
db->mallocFailed = 1;
return;
}
@@ -90055,7 +81680,7 @@ exit_begin_add_column:
** not possible to enable both STAT3 and STAT4 at the same time. If they
** are both enabled, then STAT4 takes precedence.
**
-** For most applications, sqlite_stat1 provides all the statistics required
+** For most applications, sqlite_stat1 provides all the statisics required
** for the query planner to make good choices.
**
** Format of sqlite_stat1:
@@ -90160,7 +81785,6 @@ exit_begin_add_column:
** integer in the equivalent columns in sqlite_stat4.
*/
#ifndef SQLITE_OMIT_ANALYZE
-/* #include "sqliteInt.h" */
#if defined(SQLITE_ENABLE_STAT4)
# define IsStat4 1
@@ -90266,7 +81890,6 @@ static void openStatTable(
assert( i<ArraySize(aTable) );
sqlite3VdbeAddOp4Int(v, OP_OpenWrite, iStatCur+i, aRoot[i], iDb, 3);
sqlite3VdbeChangeP5(v, aCreateTbl[i]);
- VdbeComment((v, aTable[i].zName));
}
}
@@ -90302,8 +81925,7 @@ struct Stat4Sample {
struct Stat4Accum {
tRowcnt nRow; /* Number of rows in the entire table */
tRowcnt nPSample; /* How often to do a periodic sample */
- int nCol; /* Number of columns in index + pk/rowid */
- int nKeyCol; /* Number of index columns w/o the pk/rowid */
+ int nCol; /* Number of columns in index + rowid */
int mxSample; /* Maximum number of samples to accumulate */
Stat4Sample current; /* Current row as a Stat4Sample */
u32 iPrn; /* Pseudo-random number used for sampling */
@@ -90389,27 +82011,13 @@ static void stat4Destructor(void *pOld){
}
/*
-** Implementation of the stat_init(N,K,C) SQL function. The three parameters
-** are:
-** N: The number of columns in the index including the rowid/pk (note 1)
-** K: The number of columns in the index excluding the rowid/pk.
-** C: The number of rows in the index (note 2)
-**
-** Note 1: In the special case of the covering index that implements a
-** WITHOUT ROWID table, N is the number of PRIMARY KEY columns, not the
-** total number of columns in the table.
-**
-** Note 2: C is only used for STAT3 and STAT4.
-**
-** For indexes on ordinary rowid tables, N==K+1. But for indexes on
-** WITHOUT ROWID tables, N=K+P where P is the number of columns in the
-** PRIMARY KEY of the table. The covering index that implements the
-** original WITHOUT ROWID table as N==K as a special case.
+** Implementation of the stat_init(N,C) SQL function. The two parameters
+** are the number of rows in the table or index (C) and the number of columns
+** in the index (N). The second argument (C) is only used for STAT3 and STAT4.
**
** This routine allocates the Stat4Accum object in heap memory. The return
-** value is a pointer to the Stat4Accum object. The datatype of the
-** return value is BLOB, but it is really just a pointer to the Stat4Accum
-** object.
+** value is a pointer to the the Stat4Accum object encoded as a blob (i.e.
+** the size of the blob is sizeof(void*) bytes).
*/
static void statInit(
sqlite3_context *context,
@@ -90418,7 +82026,6 @@ static void statInit(
){
Stat4Accum *p;
int nCol; /* Number of columns in index being sampled */
- int nKeyCol; /* Number of key columns */
int nColUp; /* nCol rounded up for alignment */
int n; /* Bytes of space to allocate */
sqlite3 *db; /* Database connection */
@@ -90429,11 +82036,8 @@ static void statInit(
/* Decode the three function arguments */
UNUSED_PARAMETER(argc);
nCol = sqlite3_value_int(argv[0]);
- assert( nCol>0 );
+ assert( nCol>1 ); /* >1 because it includes the rowid column */
nColUp = sizeof(tRowcnt)<8 ? (nCol+1)&~1 : nCol;
- nKeyCol = sqlite3_value_int(argv[1]);
- assert( nKeyCol<=nCol );
- assert( nKeyCol>0 );
/* Allocate the space required for the Stat4Accum object */
n = sizeof(*p)
@@ -90455,7 +82059,6 @@ static void statInit(
p->db = db;
p->nRow = 0;
p->nCol = nCol;
- p->nKeyCol = nKeyCol;
p->current.anDLt = (tRowcnt*)&p[1];
p->current.anEq = &p->current.anDLt[nColUp];
@@ -90466,9 +82069,9 @@ static void statInit(
p->iGet = -1;
p->mxSample = mxSample;
- p->nPSample = (tRowcnt)(sqlite3_value_int64(argv[2])/(mxSample/3+1) + 1);
+ p->nPSample = (tRowcnt)(sqlite3_value_int64(argv[1])/(mxSample/3+1) + 1);
p->current.anLt = &p->current.anEq[nColUp];
- p->iPrn = 0x689e962d*(u32)nCol ^ 0xd0944565*(u32)sqlite3_value_int(argv[2]);
+ p->iPrn = nCol*0x689e962d ^ sqlite3_value_int(argv[1])*0xd0944565;
/* Set up the Stat4Accum.a[] and aBest[] arrays */
p->a = (struct Stat4Sample*)&p->current.anLt[nColUp];
@@ -90487,14 +82090,11 @@ static void statInit(
}
#endif
- /* Return a pointer to the allocated object to the caller. Note that
- ** only the pointer (the 2nd parameter) matters. The size of the object
- ** (given by the 3rd parameter) is never used and can be any positive
- ** value. */
- sqlite3_result_blob(context, p, sizeof(*p), stat4Destructor);
+ /* Return a pointer to the allocated object to the caller */
+ sqlite3_result_blob(context, p, sizeof(p), stat4Destructor);
}
static const FuncDef statInitFuncdef = {
- 2+IsStat34, /* nArg */
+ 1+IsStat34, /* nArg */
SQLITE_UTF8, /* funcFlags */
0, /* pUserData */
0, /* pNext */
@@ -90718,10 +82318,7 @@ static void samplePushPrevious(Stat4Accum *p, int iChng){
** R Rowid for the current row. Might be a key record for
** WITHOUT ROWID tables.
**
-** This SQL function always returns NULL. It's purpose it to accumulate
-** statistical data and/or samples in the Stat4Accum object about the
-** index being analyzed. The stat_get() SQL function will later be used to
-** extract relevant information for constructing the sqlite_statN tables.
+** The SQL function always returns NULL.
**
** The R parameter is only used for STAT3 and STAT4
*/
@@ -90738,7 +82335,7 @@ static void statPush(
UNUSED_PARAMETER( argc );
UNUSED_PARAMETER( context );
- assert( p->nCol>0 );
+ assert( p->nCol>1 ); /* Includes rowid field */
assert( iChng<p->nCol );
if( p->nRow==0 ){
@@ -90815,10 +82412,7 @@ static const FuncDef statPushFuncdef = {
/*
** Implementation of the stat_get(P,J) SQL function. This routine is
-** used to query statistical information that has been gathered into
-** the Stat4Accum object by prior calls to stat_push(). The P parameter
-** has type BLOB but it is really just a pointer to the Stat4Accum object.
-** The content to returned is determined by the parameter J
+** used to query the results. Content is returned for parameter J
** which is one of the STAT_GET_xxxx values defined above.
**
** If neither STAT3 nor STAT4 are enabled, then J is always
@@ -90869,7 +82463,7 @@ static void statGet(
char *z;
int i;
- char *zRet = sqlite3MallocZero( (p->nKeyCol+1)*25 );
+ char *zRet = sqlite3MallocZero(p->nCol * 25);
if( zRet==0 ){
sqlite3_result_error_nomem(context);
return;
@@ -90877,7 +82471,7 @@ static void statGet(
sqlite3_snprintf(24, zRet, "%llu", (u64)p->nRow);
z = zRet + sqlite3Strlen30(zRet);
- for(i=0; i<p->nKeyCol; i++){
+ for(i=0; i<(p->nCol-1); i++){
u64 nDistinct = p->current.anDLt[i] + 1;
u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
sqlite3_snprintf(24, z, " %llu", iVal);
@@ -90963,7 +82557,7 @@ static void callStatGet(Vdbe *v, int regStat4, int iParam, int regOut){
#else
UNUSED_PARAMETER( iParam );
#endif
- sqlite3VdbeAddOp3(v, OP_Function0, 0, regStat4, regOut);
+ sqlite3VdbeAddOp3(v, OP_Function, 0, regStat4, regOut);
sqlite3VdbeChangeP4(v, -1, (char*)&statGetFuncdef, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, 1 + IsStat34);
}
@@ -91034,30 +82628,30 @@ static void analyzeOneTable(
iIdxCur = iTab++;
pParse->nTab = MAX(pParse->nTab, iTab);
sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead);
- sqlite3VdbeLoadString(v, regTabname, pTab->zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, regTabname, 0, pTab->zName, 0);
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- int nCol; /* Number of columns in pIdx. "N" */
+ int nCol; /* Number of columns indexed by pIdx */
+ int *aGotoChng; /* Array of jump instruction addresses */
int addrRewind; /* Address of "OP_Rewind iIdxCur" */
+ int addrGotoChng0; /* Address of "Goto addr_chng_0" */
int addrNextRow; /* Address of "next_row:" */
const char *zIdxName; /* Name of the index */
- int nColTest; /* Number of columns to test for changes */
if( pOnlyIdx && pOnlyIdx!=pIdx ) continue;
if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0;
- if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){
- nCol = pIdx->nKeyCol;
+ VdbeNoopComment((v, "Begin analysis of %s", pIdx->zName));
+ nCol = pIdx->nKeyCol;
+ aGotoChng = sqlite3DbMallocRaw(db, sizeof(int)*(nCol+1));
+ if( aGotoChng==0 ) continue;
+
+ /* Populate the register containing the index name. */
+ if( pIdx->autoIndex==2 && !HasRowid(pTab) ){
zIdxName = pTab->zName;
- nColTest = nCol - 1;
}else{
- nCol = pIdx->nColumn;
zIdxName = pIdx->zName;
- nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1;
}
-
- /* Populate the register containing the index name. */
- sqlite3VdbeLoadString(v, regIdxname, zIdxName);
- VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName));
+ sqlite3VdbeAddOp4(v, OP_String8, 0, regIdxname, 0, zIdxName, 0);
/*
** Pseudo-code for loop that calls stat_push():
@@ -91082,7 +82676,7 @@ static void analyzeOneTable(
** regPrev(1) = idx(1)
** ...
**
- ** endDistinctTest:
+ ** chng_addr_N:
** regRowid = idx(rowid)
** stat_push(P, regChng, regRowid)
** Next csr
@@ -91095,7 +82689,7 @@ static void analyzeOneTable(
** the regPrev array and a trailing rowid (the rowid slot is required
** when building a record to insert into the sample column of
** the sqlite_stat4 table. */
- pParse->nMem = MAX(pParse->nMem, regPrev+nColTest);
+ pParse->nMem = MAX(pParse->nMem, regPrev+nCol);
/* Open a read-only cursor on the index being analyzed. */
assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) );
@@ -91105,22 +82699,18 @@ static void analyzeOneTable(
/* Invoke the stat_init() function. The arguments are:
**
- ** (1) the number of columns in the index including the rowid
- ** (or for a WITHOUT ROWID table, the number of PK columns),
- ** (2) the number of columns in the key without the rowid/pk
- ** (3) the number of rows in the index,
- **
+ ** (1) the number of columns in the index including the rowid,
+ ** (2) the number of rows in the index,
**
- ** The third argument is only used for STAT3 and STAT4
+ ** The second argument is only used for STAT3 and STAT4
*/
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+3);
+ sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+2);
#endif
- sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat4+1);
- sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regStat4+2);
- sqlite3VdbeAddOp3(v, OP_Function0, 0, regStat4+1, regStat4);
+ sqlite3VdbeAddOp2(v, OP_Integer, nCol+1, regStat4+1);
+ sqlite3VdbeAddOp3(v, OP_Function, 0, regStat4+1, regStat4);
sqlite3VdbeChangeP4(v, -1, (char*)&statInitFuncdef, P4_FUNCDEF);
- sqlite3VdbeChangeP5(v, 2+IsStat34);
+ sqlite3VdbeChangeP5(v, 1+IsStat34);
/* Implementation of the following:
**
@@ -91131,64 +82721,44 @@ static void analyzeOneTable(
**
*/
addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur);
- VdbeCoverage(v);
sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng);
- addrNextRow = sqlite3VdbeCurrentAddr(v);
+ addrGotoChng0 = sqlite3VdbeAddOp0(v, OP_Goto);
- if( nColTest>0 ){
- int endDistinctTest = sqlite3VdbeMakeLabel(v);
- int *aGotoChng; /* Array of jump instruction addresses */
- aGotoChng = sqlite3DbMallocRaw(db, sizeof(int)*nColTest);
- if( aGotoChng==0 ) continue;
+ /*
+ ** next_row:
+ ** regChng = 0
+ ** if( idx(0) != regPrev(0) ) goto chng_addr_0
+ ** regChng = 1
+ ** if( idx(1) != regPrev(1) ) goto chng_addr_1
+ ** ...
+ ** regChng = N
+ ** goto chng_addr_N
+ */
+ addrNextRow = sqlite3VdbeCurrentAddr(v);
+ for(i=0; i<nCol; i++){
+ char *pColl = (char*)sqlite3LocateCollSeq(pParse, pIdx->azColl[i]);
+ sqlite3VdbeAddOp2(v, OP_Integer, i, regChng);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp);
+ aGotoChng[i] =
+ sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ);
+ sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
+ }
+ sqlite3VdbeAddOp2(v, OP_Integer, nCol, regChng);
+ aGotoChng[nCol] = sqlite3VdbeAddOp0(v, OP_Goto);
- /*
- ** next_row:
- ** regChng = 0
- ** if( idx(0) != regPrev(0) ) goto chng_addr_0
- ** regChng = 1
- ** if( idx(1) != regPrev(1) ) goto chng_addr_1
- ** ...
- ** regChng = N
- ** goto endDistinctTest
- */
- sqlite3VdbeAddOp0(v, OP_Goto);
- addrNextRow = sqlite3VdbeCurrentAddr(v);
- if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){
- /* For a single-column UNIQUE index, once we have found a non-NULL
- ** row, we know that all the rest will be distinct, so skip
- ** subsequent distinctness tests. */
- sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest);
- VdbeCoverage(v);
- }
- for(i=0; i<nColTest; i++){
- char *pColl = (char*)sqlite3LocateCollSeq(pParse, pIdx->azColl[i]);
- sqlite3VdbeAddOp2(v, OP_Integer, i, regChng);
- sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp);
- aGotoChng[i] =
- sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ);
- sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
- VdbeCoverage(v);
- }
- sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng);
- sqlite3VdbeGoto(v, endDistinctTest);
-
-
- /*
- ** chng_addr_0:
- ** regPrev(0) = idx(0)
- ** chng_addr_1:
- ** regPrev(1) = idx(1)
- ** ...
- */
- sqlite3VdbeJumpHere(v, addrNextRow-1);
- for(i=0; i<nColTest; i++){
- sqlite3VdbeJumpHere(v, aGotoChng[i]);
- sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regPrev+i);
- }
- sqlite3VdbeResolveLabel(v, endDistinctTest);
- sqlite3DbFree(db, aGotoChng);
+ /*
+ ** chng_addr_0:
+ ** regPrev(0) = idx(0)
+ ** chng_addr_1:
+ ** regPrev(1) = idx(1)
+ ** ...
+ */
+ sqlite3VdbeJumpHere(v, addrGotoChng0);
+ for(i=0; i<nCol; i++){
+ sqlite3VdbeJumpHere(v, aGotoChng[i]);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regPrev+i);
}
-
+
/*
** chng_addr_N:
** regRowid = idx(rowid) // STAT34 only
@@ -91196,6 +82766,7 @@ static void analyzeOneTable(
** Next csr
** if !eof(csr) goto next_row;
*/
+ sqlite3VdbeJumpHere(v, aGotoChng[nCol]);
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
assert( regRowid==(regStat4+2) );
if( HasRowid(pTab) ){
@@ -91206,7 +82777,6 @@ static void analyzeOneTable(
regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol);
for(j=0; j<pPk->nKeyCol; j++){
k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]);
- assert( k>=0 && k<pTab->nCol );
sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j);
VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName));
}
@@ -91215,15 +82785,14 @@ static void analyzeOneTable(
}
#endif
assert( regChng==(regStat4+1) );
- sqlite3VdbeAddOp3(v, OP_Function0, 1, regStat4, regTemp);
+ sqlite3VdbeAddOp3(v, OP_Function, 1, regStat4, regTemp);
sqlite3VdbeChangeP4(v, -1, (char*)&statPushFuncdef, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, 2+IsStat34);
- sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow);
/* Add the entry to the stat1 table. */
callStatGet(v, regStat4, STAT_GET_STAT1, regStat1);
- assert( "BBB"[0]==SQLITE_AFF_TEXT );
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0);
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "aaa", 0);
sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid);
sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
@@ -91241,38 +82810,36 @@ static void analyzeOneTable(
int addrIsNull;
u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound;
- pParse->nMem = MAX(pParse->nMem, regCol+nCol);
+ pParse->nMem = MAX(pParse->nMem, regCol+nCol+1);
addrNext = sqlite3VdbeCurrentAddr(v);
callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid);
addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid);
- VdbeCoverage(v);
callStatGet(v, regStat4, STAT_GET_NEQ, regEq);
callStatGet(v, regStat4, STAT_GET_NLT, regLt);
callStatGet(v, regStat4, STAT_GET_NDLT, regDLt);
sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0);
- /* We know that the regSampleRowid row exists because it was read by
- ** the previous loop. Thus the not-found jump of seekOp will never
- ** be taken */
- VdbeCoverageNeverTaken(v);
#ifdef SQLITE_ENABLE_STAT3
- sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, 0, regSample);
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur,
+ pIdx->aiColumn[0], regSample);
#else
for(i=0; i<nCol; i++){
- sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, i, regCol+i);
+ i16 iCol = pIdx->aiColumn[i];
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, iCol, regCol+i);
}
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol, regSample);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol+1, regSample);
#endif
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp);
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 6, regTemp, "bbbbbb", 0);
sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid);
sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid);
- sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrNext);
sqlite3VdbeJumpHere(v, addrIsNull);
}
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
/* End of analysis */
sqlite3VdbeJumpHere(v, addrRewind);
+ sqlite3DbFree(db, aGotoChng);
}
@@ -91282,10 +82849,9 @@ static void analyzeOneTable(
if( pOnlyIdx==0 && needTableCnt ){
VdbeComment((v, "%s", pTab->zName));
sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1);
- jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v);
+ jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1);
sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname);
- assert( "BBB"[0]==SQLITE_AFF_TEXT );
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0);
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "aaa", 0);
sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid);
sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
@@ -91374,7 +82940,6 @@ SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){
Table *pTab;
Index *pIdx;
Token *pTableName;
- Vdbe *v;
/* Read the database schema. If an error occurs, leave an error message
** and code in pParse and return NULL. */
@@ -91422,8 +82987,6 @@ SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){
}
}
}
- v = sqlite3GetVdbe(pParse);
- if( v ) sqlite3VdbeAddOp0(v, OP_Expire);
}
/*
@@ -91445,7 +83008,6 @@ static void decodeIntArray(
char *zIntArray, /* String containing int array to decode */
int nOut, /* Number of slots in aOut[] */
tRowcnt *aOut, /* Store integers here */
- LogEst *aLog, /* Or, if aOut==0, here */
Index *pIndex /* Handle extra flags for this index, if not NULL */
){
char *z = zIntArray;
@@ -91456,7 +83018,7 @@ static void decodeIntArray(
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
if( z==0 ) z = "";
#else
- assert( z!=0 );
+ if( NEVER(z==0) ) z = "";
#endif
for(i=0; *z && i<nOut; i++){
v = 0;
@@ -91464,39 +83026,21 @@ static void decodeIntArray(
v = v*10 + c - '0';
z++;
}
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- if( aOut ) aOut[i] = v;
- if( aLog ) aLog[i] = sqlite3LogEst(v);
-#else
- assert( aOut==0 );
- UNUSED_PARAMETER(aOut);
- assert( aLog!=0 );
- aLog[i] = sqlite3LogEst(v);
-#endif
+ aOut[i] = v;
if( *z==' ' ) z++;
}
#ifndef SQLITE_ENABLE_STAT3_OR_STAT4
- assert( pIndex!=0 ); {
+ assert( pIndex!=0 );
#else
- if( pIndex ){
-#endif
- pIndex->bUnordered = 0;
- pIndex->noSkipScan = 0;
- while( z[0] ){
- if( sqlite3_strglob("unordered*", z)==0 ){
- pIndex->bUnordered = 1;
- }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){
- pIndex->szIdxRow = sqlite3LogEst(sqlite3Atoi(z+3));
- }else if( sqlite3_strglob("noskipscan*", z)==0 ){
- pIndex->noSkipScan = 1;
- }
-#ifdef SQLITE_ENABLE_COSTMULT
- else if( sqlite3_strglob("costmult=[0-9]*",z)==0 ){
- pIndex->pTable->costMult = sqlite3LogEst(sqlite3Atoi(z+9));
- }
+ if( pIndex )
#endif
- while( z[0]!=0 && z[0]!=' ' ) z++;
- while( z[0]==' ' ) z++;
+ {
+ if( strcmp(z, "unordered")==0 ){
+ pIndex->bUnordered = 1;
+ }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){
+ int v32 = 0;
+ sqlite3GetInt32(z+3, &v32);
+ pIndex->szIdxRow = sqlite3LogEst(v32);
}
}
}
@@ -91538,28 +83082,12 @@ static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){
z = argv[2];
if( pIndex ){
- tRowcnt *aiRowEst = 0;
- int nCol = pIndex->nKeyCol+1;
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- /* Index.aiRowEst may already be set here if there are duplicate
- ** sqlite_stat1 entries for this index. In that case just clobber
- ** the old data with the new instead of allocating a new array. */
- if( pIndex->aiRowEst==0 ){
- pIndex->aiRowEst = (tRowcnt*)sqlite3MallocZero(sizeof(tRowcnt) * nCol);
- if( pIndex->aiRowEst==0 ) pInfo->db->mallocFailed = 1;
- }
- aiRowEst = pIndex->aiRowEst;
-#endif
- pIndex->bUnordered = 0;
- decodeIntArray((char*)z, nCol, aiRowEst, pIndex->aiRowLogEst, pIndex);
- if( pIndex->pPartIdxWhere==0 ) pTable->nRowLogEst = pIndex->aiRowLogEst[0];
+ decodeIntArray((char*)z, pIndex->nKeyCol+1, pIndex->aiRowEst, pIndex);
+ if( pIndex->pPartIdxWhere==0 ) pTable->nRowEst = pIndex->aiRowEst[0];
}else{
Index fakeIdx;
fakeIdx.szIdxRow = pTable->szTabRow;
-#ifdef SQLITE_ENABLE_COSTMULT
- fakeIdx.pTable = pTable;
-#endif
- decodeIntArray((char*)z, 1, 0, &pTable->nRowLogEst, &fakeIdx);
+ decodeIntArray((char*)z, 1, &pTable->nRowEst, &fakeIdx);
pTable->szTabRow = fakeIdx.szIdxRow;
}
@@ -91600,52 +83128,30 @@ static void initAvgEq(Index *pIdx){
IndexSample *aSample = pIdx->aSample;
IndexSample *pFinal = &aSample[pIdx->nSample-1];
int iCol;
- int nCol = 1;
- if( pIdx->nSampleCol>1 ){
- /* If this is stat4 data, then calculate aAvgEq[] values for all
- ** sample columns except the last. The last is always set to 1, as
- ** once the trailing PK fields are considered all index keys are
- ** unique. */
- nCol = pIdx->nSampleCol-1;
- pIdx->aAvgEq[nCol] = 1;
- }
- for(iCol=0; iCol<nCol; iCol++){
- int nSample = pIdx->nSample;
+ for(iCol=0; iCol<pIdx->nKeyCol; iCol++){
int i; /* Used to iterate through samples */
tRowcnt sumEq = 0; /* Sum of the nEq values */
+ tRowcnt nSum = 0; /* Number of terms contributing to sumEq */
tRowcnt avgEq = 0;
- tRowcnt nRow; /* Number of rows in index */
- i64 nSum100 = 0; /* Number of terms contributing to sumEq */
- i64 nDist100; /* Number of distinct values in index */
-
- if( !pIdx->aiRowEst || iCol>=pIdx->nKeyCol || pIdx->aiRowEst[iCol+1]==0 ){
- nRow = pFinal->anLt[iCol];
- nDist100 = (i64)100 * pFinal->anDLt[iCol];
- nSample--;
- }else{
- nRow = pIdx->aiRowEst[0];
- nDist100 = ((i64)100 * pIdx->aiRowEst[0]) / pIdx->aiRowEst[iCol+1];
- }
- pIdx->nRowEst0 = nRow;
+ tRowcnt nDLt = pFinal->anDLt[iCol];
/* Set nSum to the number of distinct (iCol+1) field prefixes that
- ** occur in the stat4 table for this index. Set sumEq to the sum of
- ** the nEq values for column iCol for the same set (adding the value
- ** only once where there exist duplicate prefixes). */
- for(i=0; i<nSample; i++){
- if( i==(pIdx->nSample-1)
- || aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol]
- ){
+ ** occur in the stat4 table for this index before pFinal. Set
+ ** sumEq to the sum of the nEq values for column iCol for the same
+ ** set (adding the value only once where there exist dupicate
+ ** prefixes). */
+ for(i=0; i<(pIdx->nSample-1); i++){
+ if( aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol] ){
sumEq += aSample[i].anEq[iCol];
- nSum100 += 100;
+ nSum++;
}
}
-
- if( nDist100>nSum100 ){
- avgEq = ((i64)100 * (nRow - sumEq))/(nDist100 - nSum100);
+ if( nDLt>nSum ){
+ avgEq = (pFinal->anLt[iCol] - sumEq)/(nDLt - nSum);
}
if( avgEq==0 ) avgEq = 1;
pIdx->aAvgEq[iCol] = avgEq;
+ if( pIdx->nSampleCol==1 ) break;
}
}
}
@@ -91704,6 +83210,7 @@ static int loadStatTbl(
while( sqlite3_step(pStmt)==SQLITE_ROW ){
int nIdxCol = 1; /* Number of columns in stat4 records */
+ int nAvgCol = 1; /* Number of entries in Index.aAvgEq */
char *zIndex; /* Index name */
Index *pIdx; /* Pointer to the index object */
@@ -91721,17 +83228,13 @@ static int loadStatTbl(
** loaded from the stat4 table. In this case ignore stat3 data. */
if( pIdx==0 || pIdx->nSample ) continue;
if( bStat3==0 ){
- assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 );
- if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){
- nIdxCol = pIdx->nKeyCol;
- }else{
- nIdxCol = pIdx->nColumn;
- }
+ nIdxCol = pIdx->nKeyCol+1;
+ nAvgCol = pIdx->nKeyCol;
}
pIdx->nSampleCol = nIdxCol;
nByte = sizeof(IndexSample) * nSample;
nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample;
- nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */
+ nByte += nAvgCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */
pIdx->aSample = sqlite3DbMallocZero(db, nByte);
if( pIdx->aSample==0 ){
@@ -91739,7 +83242,7 @@ static int loadStatTbl(
return SQLITE_NOMEM;
}
pSpace = (tRowcnt*)&pIdx->aSample[nSample];
- pIdx->aAvgEq = pSpace; pSpace += nIdxCol;
+ pIdx->aAvgEq = pSpace; pSpace += nAvgCol;
for(i=0; i<nSample; i++){
pIdx->aSample[i].anEq = pSpace; pSpace += nIdxCol;
pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol;
@@ -91776,9 +83279,9 @@ static int loadStatTbl(
pPrevIdx = pIdx;
}
pSample = &pIdx->aSample[pIdx->nSample];
- decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0);
- decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0);
- decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0);
+ decodeIntArray((char*)sqlite3_column_text(pStmt,1), nCol, pSample->anEq, 0);
+ decodeIntArray((char*)sqlite3_column_text(pStmt,2), nCol, pSample->anLt, 0);
+ decodeIntArray((char*)sqlite3_column_text(pStmt,3), nCol, pSample->anDLt,0);
/* Take a copy of the sample. Add two 0x00 bytes the end of the buffer.
** This is in case the sample record is corrupted. In that case, the
@@ -91888,17 +83391,12 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){
/* Load the statistics from the sqlite_stat4 table. */
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- if( rc==SQLITE_OK && OptimizationEnabled(db, SQLITE_Stat34) ){
+ if( rc==SQLITE_OK ){
int lookasideEnabled = db->lookaside.bEnabled;
db->lookaside.bEnabled = 0;
rc = loadStat4(db, sInfo.zDatabase);
db->lookaside.bEnabled = lookasideEnabled;
}
- for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){
- Index *pIdx = sqliteHashData(i);
- sqlite3_free(pIdx->aiRowEst);
- pIdx->aiRowEst = 0;
- }
#endif
if( rc==SQLITE_NOMEM ){
@@ -91925,7 +83423,6 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){
*************************************************************************
** This file contains code used to implement the ATTACH and DETACH commands.
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_OMIT_ATTACH
/*
@@ -91952,6 +83449,10 @@ static int resolveAttachExpr(NameContext *pName, Expr *pExpr)
if( pExpr ){
if( pExpr->op!=TK_ID ){
rc = sqlite3ResolveExprNames(pName, pExpr);
+ if( rc==SQLITE_OK && !sqlite3ExprIsConstant(pExpr) ){
+ sqlite3ErrorMsg(pName->pParse, "invalid name: \"%s\"", pExpr->u.zToken);
+ return SQLITE_ERROR;
+ }
}else{
pExpr->op = TK_STRING;
}
@@ -92064,7 +83565,6 @@ static void attachFunc(
"attached databases must use the same text encoding as main database");
rc = SQLITE_ERROR;
}
- sqlite3BtreeEnter(aNew->pBt);
pPager = sqlite3BtreePager(aNew->pBt);
sqlite3PagerLockingMode(pPager, db->dfltLockMode);
sqlite3BtreeSecureDelete(aNew->pBt,
@@ -92072,7 +83572,6 @@ static void attachFunc(
#ifndef SQLITE_OMIT_PAGER_PRAGMAS
sqlite3BtreeSetPagerFlags(aNew->pBt, 3 | (db->flags & PAGER_FLAGS_MASK));
#endif
- sqlite3BtreeLeave(aNew->pBt);
}
aNew->safety_level = 3;
aNew->zName = sqlite3DbStrDup(db, zName);
@@ -92105,7 +83604,7 @@ static void attachFunc(
case SQLITE_NULL:
/* No key specified. Use the key from the main database */
sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey);
- if( nKey>0 || sqlite3BtreeGetOptimalReserve(db->aDb[0].pBt)>0 ){
+ if( nKey>0 || sqlite3BtreeGetReserve(db->aDb[0].pBt)>0 ){
rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey);
}
break;
@@ -92123,15 +83622,6 @@ static void attachFunc(
rc = sqlite3Init(db, &zErrDyn);
sqlite3BtreeLeaveAll(db);
}
-#ifdef SQLITE_USER_AUTHENTICATION
- if( rc==SQLITE_OK ){
- u8 newAuth = 0;
- rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth);
- if( newAuth<db->auth.authLevel ){
- rc = SQLITE_AUTH_USER;
- }
- }
-#endif
if( rc ){
int iDb = db->nDb - 1;
assert( iDb>=2 );
@@ -92212,7 +83702,7 @@ static void detachFunc(
sqlite3BtreeClose(pDb->pBt);
pDb->pBt = 0;
pDb->pSchema = 0;
- sqlite3CollapseDatabaseArray(db);
+ sqlite3ResetAllSchemasOfConnection(db);
return;
detach_error:
@@ -92246,6 +83736,7 @@ static void codeAttach(
SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) ||
SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey))
){
+ pParse->nErr++;
goto attach_end;
}
@@ -92273,7 +83764,7 @@ static void codeAttach(
assert( v || db->mallocFailed );
if( v ){
- sqlite3VdbeAddOp3(v, OP_Function0, 0, regArgs+3-pFunc->nArg, regArgs+3);
+ sqlite3VdbeAddOp3(v, OP_Function, 0, regArgs+3-pFunc->nArg, regArgs+3);
assert( pFunc->nArg==-1 || (pFunc->nArg&0xff)==pFunc->nArg );
sqlite3VdbeChangeP5(v, (u8)(pFunc->nArg));
sqlite3VdbeChangeP4(v, -1, (char *)pFunc, P4_FUNCDEF);
@@ -92515,7 +84006,6 @@ SQLITE_PRIVATE int sqlite3FixTriggerStep(
** systems that do not need this facility may omit it by recompiling
** the library with -DSQLITE_OMIT_AUTHORIZATION=1
*/
-/* #include "sqliteInt.h" */
/*
** All of the code in this file may be omitted by defining a single
@@ -92568,16 +84058,13 @@ SQLITE_PRIVATE int sqlite3FixTriggerStep(
** Setting the auth function to NULL disables this hook. The default
** setting of the auth function is NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
+SQLITE_API int sqlite3_set_authorizer(
sqlite3 *db,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pArg
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
- db->xAuth = (sqlite3_xauth)xAuth;
+ db->xAuth = xAuth;
db->pAuthArg = pArg;
sqlite3ExpirePreparedStatements(db);
sqlite3_mutex_leave(db->mutex);
@@ -92612,11 +84099,7 @@ SQLITE_PRIVATE int sqlite3AuthReadCol(
char *zDb = db->aDb[iDb].zName; /* Name of attached database */
int rc; /* Auth callback return code */
- rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext
-#ifdef SQLITE_USER_AUTHENTICATION
- ,db->auth.zAuthUser
-#endif
- );
+ rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext);
if( rc==SQLITE_DENY ){
if( db->nDb>2 || iDb!=0 ){
sqlite3ErrorMsg(pParse, "access to %s.%s.%s is prohibited",zDb,zTab,zCol);
@@ -92716,11 +84199,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck(
if( db->xAuth==0 ){
return SQLITE_OK;
}
- rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext
-#ifdef SQLITE_USER_AUTHENTICATION
- ,db->auth.zAuthUser
-#endif
- );
+ rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext);
if( rc==SQLITE_DENY ){
sqlite3ErrorMsg(pParse, "not authorized");
pParse->rc = SQLITE_AUTH;
@@ -92786,7 +84265,6 @@ SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){
** COMMIT
** ROLLBACK
*/
-/* #include "sqliteInt.h" */
/*
** This routine is called when a new SQL statement is beginning to
@@ -92878,19 +84356,6 @@ static void codeTableLocks(Parse *pParse){
#endif
/*
-** Return TRUE if the given yDbMask object is empty - if it contains no
-** 1 bits. This routine is used by the DbMaskAllZero() and DbMaskNotZero()
-** macros when SQLITE_MAX_ATTACHED is greater than 30.
-*/
-#if SQLITE_MAX_ATTACHED>30
-SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask m){
- int i;
- for(i=0; i<sizeof(yDbMask); i++) if( m[i] ) return 0;
- return 1;
-}
-#endif
-
-/*
** This routine is called after a single SQL statement has been
** parsed and a VDBE program to execute that statement has been
** prepared. This routine puts the finishing touches on the
@@ -92906,11 +84371,9 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
assert( pParse->pToplevel==0 );
db = pParse->db;
+ if( db->mallocFailed ) return;
if( pParse->nested ) return;
- if( db->mallocFailed || pParse->nErr ){
- if( pParse->rc==SQLITE_OK ) pParse->rc = SQLITE_ERROR;
- return;
- }
+ if( pParse->nErr ) return;
/* Begin by generating some termination code at the end of the
** vdbe program
@@ -92919,45 +84382,28 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
assert( !pParse->isMultiWrite
|| sqlite3VdbeAssertMayAbort(v, pParse->mayAbort));
if( v ){
- while( sqlite3VdbeDeletePriorOpcode(v, OP_Close) ){}
sqlite3VdbeAddOp0(v, OP_Halt);
-#if SQLITE_USER_AUTHENTICATION
- if( pParse->nTableLock>0 && db->init.busy==0 ){
- sqlite3UserAuthInit(db);
- if( db->auth.authLevel<UAUTH_User ){
- pParse->rc = SQLITE_AUTH_USER;
- sqlite3ErrorMsg(pParse, "user not authenticated");
- return;
- }
- }
-#endif
-
/* The cookie mask contains one bit for each database file open.
** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are
** set for each database that is used. Generate code to start a
** transaction on each used database and to verify the schema cookie
** on each used database.
*/
- if( db->mallocFailed==0
- && (DbMaskNonZero(pParse->cookieMask) || pParse->pConstExpr)
- ){
- int iDb, i;
- assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init );
- sqlite3VdbeJumpHere(v, 0);
- for(iDb=0; iDb<db->nDb; iDb++){
- if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue;
+ if( pParse->cookieGoto>0 ){
+ yDbMask mask;
+ int iDb, i, addr;
+ sqlite3VdbeJumpHere(v, pParse->cookieGoto-1);
+ for(iDb=0, mask=1; iDb<db->nDb; mask<<=1, iDb++){
+ if( (mask & pParse->cookieMask)==0 ) continue;
sqlite3VdbeUsesBtree(v, iDb);
- sqlite3VdbeAddOp4Int(v,
- OP_Transaction, /* Opcode */
- iDb, /* P1 */
- DbMaskTest(pParse->writeMask,iDb), /* P2 */
- pParse->cookieValue[iDb], /* P3 */
- db->aDb[iDb].pSchema->iGeneration /* P4 */
- );
- if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1);
- VdbeComment((v,
- "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite));
+ sqlite3VdbeAddOp2(v,OP_Transaction, iDb, (mask & pParse->writeMask)!=0);
+ if( db->init.busy==0 ){
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ sqlite3VdbeAddOp3(v, OP_VerifyCookie,
+ iDb, pParse->cookieValue[iDb],
+ db->aDb[iDb].pSchema->iGeneration);
+ }
}
#ifndef SQLITE_OMIT_VIRTUALTABLE
for(i=0; i<pParse->nVtabLock; i++){
@@ -92978,23 +84424,24 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
sqlite3AutoincrementBegin(pParse);
/* Code constant expressions that where factored out of inner loops */
+ addr = pParse->cookieGoto;
if( pParse->pConstExpr ){
ExprList *pEL = pParse->pConstExpr;
- pParse->okConstFactor = 0;
+ pParse->cookieGoto = 0;
for(i=0; i<pEL->nExpr; i++){
sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg);
}
}
/* Finally, jump back to the beginning of the executable code. */
- sqlite3VdbeGoto(v, 1);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addr);
}
}
/* Get the VDBE program ready for execution
*/
- if( v && pParse->nErr==0 && !db->mallocFailed ){
+ if( v && ALWAYS(pParse->nErr==0) && !db->mallocFailed ){
assert( pParse->iCacheLevel==0 ); /* Disables and re-enables match */
/* A minimum of one cursor is required if autoincrement is used
* See ticket [a696379c1f08866] */
@@ -93009,7 +84456,8 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
pParse->nMem = 0;
pParse->nSet = 0;
pParse->nVar = 0;
- DbMaskZero(pParse->cookieMask);
+ pParse->cookieMask = 0;
+ pParse->cookieGoto = 0;
}
/*
@@ -93050,16 +84498,6 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){
pParse->nested--;
}
-#if SQLITE_USER_AUTHENTICATION
-/*
-** Return TRUE if zTable is the name of the system table that stores the
-** list of users and their access credentials.
-*/
-SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){
- return sqlite3_stricmp(zTable, "sqlite_user")==0;
-}
-#endif
-
/*
** Locate the in-memory structure that describes a particular database
** table given the name of that table and (optionally) the name of the
@@ -93075,21 +84513,16 @@ SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){
SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){
Table *p = 0;
int i;
-
+ int nName;
+ assert( zName!=0 );
+ nName = sqlite3Strlen30(zName);
/* All mutexes are required for schema access. Make sure we hold them. */
assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) );
-#if SQLITE_USER_AUTHENTICATION
- /* Only the admin user is allowed to know that the sqlite_user table
- ** exists */
- if( db->auth.authLevel<UAUTH_Admin && sqlite3UserAuthTable(zName)!=0 ){
- return 0;
- }
-#endif
for(i=OMIT_TEMPDB; i<db->nDb; i++){
int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
if( zDatabase!=0 && sqlite3StrICmp(zDatabase, db->aDb[j].zName) ) continue;
assert( sqlite3SchemaMutexHeld(db, j, 0) );
- p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName);
+ p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName, nName);
if( p ) break;
}
return p;
@@ -93122,17 +84555,6 @@ SQLITE_PRIVATE Table *sqlite3LocateTable(
p = sqlite3FindTable(pParse->db, zName, zDbase);
if( p==0 ){
const char *zMsg = isView ? "no such view" : "no such table";
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- if( sqlite3FindDbName(pParse->db, zDbase)<1 ){
- /* If zName is the not the name of a table in the schema created using
- ** CREATE, then check to see if it is the name of an virtual table that
- ** can be an eponymous virtual table. */
- Module *pMod = (Module*)sqlite3HashFind(&pParse->db->aModule, zName);
- if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){
- return pMod->pEpoTab;
- }
- }
-#endif
if( zDbase ){
sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName);
}else{
@@ -93140,12 +84562,6 @@ SQLITE_PRIVATE Table *sqlite3LocateTable(
}
pParse->checkSchema = 1;
}
-#if SQLITE_USER_AUTHENTICATION
- else if( pParse->db->auth.authLevel<UAUTH_User ){
- sqlite3ErrorMsg(pParse, "user not authenticated");
- p = 0;
- }
-#endif
return p;
}
@@ -93189,6 +84605,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem(
SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){
Index *p = 0;
int i;
+ int nName = sqlite3Strlen30(zName);
/* All mutexes are required for schema access. Make sure we hold them. */
assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) );
for(i=OMIT_TEMPDB; i<db->nDb; i++){
@@ -93197,7 +84614,7 @@ SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const cha
assert( pSchema );
if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zName) ) continue;
assert( sqlite3SchemaMutexHeld(db, j, 0) );
- p = sqlite3HashFind(&pSchema->idxHash, zName);
+ p = sqlite3HashFind(&pSchema->idxHash, zName, nName);
if( p ) break;
}
return p;
@@ -93210,13 +84627,10 @@ static void freeIndex(sqlite3 *db, Index *p){
#ifndef SQLITE_OMIT_ANALYZE
sqlite3DeleteIndexSamples(db, p);
#endif
+ if( db==0 || db->pnBytesFreed==0 ) sqlite3KeyInfoUnref(p->pKeyInfo);
sqlite3ExprDelete(db, p->pPartIdxWhere);
- sqlite3ExprListDelete(db, p->aColExpr);
sqlite3DbFree(db, p->zColAff);
if( p->isResized ) sqlite3DbFree(db, p->azColl);
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- sqlite3_free(p->aiRowEst);
-#endif
sqlite3DbFree(db, p);
}
@@ -93228,11 +84642,13 @@ static void freeIndex(sqlite3 *db, Index *p){
*/
SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char *zIdxName){
Index *pIndex;
+ int len;
Hash *pHash;
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
pHash = &db->aDb[iDb].pSchema->idxHash;
- pIndex = sqlite3HashInsert(pHash, zIdxName, 0);
+ len = sqlite3Strlen30(zIdxName);
+ pIndex = sqlite3HashInsert(pHash, zIdxName, len, 0);
if( ALWAYS(pIndex) ){
if( pIndex->pTable->pIndex==pIndex ){
pIndex->pTable->pIndex = pIndex->pNext;
@@ -93338,7 +84754,7 @@ SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3 *db){
** Delete memory allocated for the column names of a table or view (the
** Table.aCol[] array).
*/
-SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){
+static void sqliteDeleteColumnNames(sqlite3 *db, Table *pTable){
int i;
Column *pCol;
assert( pTable!=0 );
@@ -93392,7 +84808,7 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
if( !db || db->pnBytesFreed==0 ){
char *zName = pIndex->zName;
TESTONLY ( Index *pOld = ) sqlite3HashInsert(
- &pIndex->pSchema->idxHash, zName, 0
+ &pIndex->pSchema->idxHash, zName, sqlite3Strlen30(zName), 0
);
assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
assert( pOld==pIndex || pOld==0 );
@@ -93405,11 +84821,13 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
/* Delete the Table structure itself.
*/
- sqlite3DeleteColumnNames(db, pTable);
+ sqliteDeleteColumnNames(db, pTable);
sqlite3DbFree(db, pTable->zName);
sqlite3DbFree(db, pTable->zColAff);
sqlite3SelectDelete(db, pTable->pSelect);
+#ifndef SQLITE_OMIT_CHECK
sqlite3ExprListDelete(db, pTable->pCheck);
+#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
sqlite3VtabClear(db, pTable);
#endif
@@ -93433,7 +84851,8 @@ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
testcase( zTabName[0]==0 ); /* Zero-length table names are allowed */
pDb = &db->aDb[iDb];
- p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, 0);
+ p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName,
+ sqlite3Strlen30(zTabName),0);
sqlite3DeleteTable(db, p);
db->flags |= SQLITE_InternChanges;
}
@@ -93539,12 +84958,14 @@ SQLITE_PRIVATE int sqlite3TwoPartName(
if( ALWAYS(pName2!=0) && pName2->n>0 ){
if( db->init.busy ) {
sqlite3ErrorMsg(pParse, "corrupt database");
+ pParse->nErr++;
return -1;
}
*pUnqual = pName2;
iDb = sqlite3FindDb(db, pName1);
if( iDb<0 ){
sqlite3ErrorMsg(pParse, "unknown database %T", pName1);
+ pParse->nErr++;
return -1;
}
}else{
@@ -93577,7 +84998,7 @@ SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *pParse, const char *zName){
*/
SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){
Index *p;
- for(p=pTab->pIndex; p && !IsPrimaryKeyIndex(p); p=p->pNext){}
+ for(p=pTab->pIndex; p && p->autoIndex!=2; p=p->pNext){}
return p;
}
@@ -93703,7 +85124,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
if( !noErr ){
sqlite3ErrorMsg(pParse, "table %T already exists", pName);
}else{
- assert( !db->init.busy || CORRUPT_DB );
+ assert( !db->init.busy );
sqlite3CodeVerifySchema(pParse, iDb);
}
goto begin_table_error;
@@ -93725,7 +85146,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
pTable->iPKey = -1;
pTable->pSchema = db->aDb[iDb].pSchema;
pTable->nRef = 1;
- pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+ pTable->nRowEst = 1048576;
assert( pParse->pNewTable==0 );
pParse->pNewTable = pTable;
@@ -93749,12 +85170,10 @@ SQLITE_PRIVATE void sqlite3StartTable(
** now.
*/
if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){
- int addr1;
+ int j1;
int fileFormat;
int reg1, reg2, reg3;
- /* nullRow[] is an OP_Record encoding of a row containing 5 NULLs */
- static const char nullRow[] = { 6, 0, 0, 0, 0, 0 };
- sqlite3BeginWriteOperation(pParse, 1, iDb);
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
#ifndef SQLITE_OMIT_VIRTUALTABLE
if( isVirtual ){
@@ -93770,14 +85189,14 @@ SQLITE_PRIVATE void sqlite3StartTable(
reg3 = ++pParse->nMem;
sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT);
sqlite3VdbeUsesBtree(v, iDb);
- addr1 = sqlite3VdbeAddOp1(v, OP_If, reg3); VdbeCoverage(v);
+ j1 = sqlite3VdbeAddOp1(v, OP_If, reg3);
fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ?
1 : SQLITE_MAX_FILE_FORMAT;
sqlite3VdbeAddOp2(v, OP_Integer, fileFormat, reg3);
sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, reg3);
sqlite3VdbeAddOp2(v, OP_Integer, ENC(db), reg3);
sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_TEXT_ENCODING, reg3);
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeJumpHere(v, j1);
/* This just creates a place-holder record in the sqlite_master table.
** The record created does not contain anything yet. It will be replaced
@@ -93798,7 +85217,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
}
sqlite3OpenMasterTable(pParse, iDb);
sqlite3VdbeAddOp2(v, OP_NewRowid, 0, reg1);
- sqlite3VdbeAddOp4(v, OP_Blob, 6, reg3, 0, nullRow, P4_STATIC);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, reg3);
sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
sqlite3VdbeAddOp0(v, OP_Close);
@@ -93870,10 +85289,10 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName){
pCol->zName = z;
/* If there is no type specified, columns have the default affinity
- ** 'BLOB'. If there is a type specified, then sqlite3AddColumnType() will
+ ** 'NONE'. If there is a type specified, then sqlite3AddColumnType() will
** be called next to set pCol->affinity correctly.
*/
- pCol->affinity = SQLITE_AFF_BLOB;
+ pCol->affinity = SQLITE_AFF_NONE;
pCol->szEst = 1;
p->nCol++;
}
@@ -93908,7 +85327,7 @@ SQLITE_PRIVATE void sqlite3AddNotNull(Parse *pParse, int onError){
** 'CHAR' | SQLITE_AFF_TEXT
** 'CLOB' | SQLITE_AFF_TEXT
** 'TEXT' | SQLITE_AFF_TEXT
-** 'BLOB' | SQLITE_AFF_BLOB
+** 'BLOB' | SQLITE_AFF_NONE
** 'REAL' | SQLITE_AFF_REAL
** 'FLOA' | SQLITE_AFF_REAL
** 'DOUB' | SQLITE_AFF_REAL
@@ -93934,7 +85353,7 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, u8 *pszEst){
aff = SQLITE_AFF_TEXT;
}else if( h==(('b'<<24)+('l'<<16)+('o'<<8)+'b') /* BLOB */
&& (aff==SQLITE_AFF_NUMERIC || aff==SQLITE_AFF_REAL) ){
- aff = SQLITE_AFF_BLOB;
+ aff = SQLITE_AFF_NONE;
if( zIn[0]=='(' ) zChar = zIn;
#ifndef SQLITE_OMIT_FLOATING_POINT
}else if( h==(('r'<<24)+('e'<<16)+('a'<<8)+'l') /* REAL */
@@ -93957,7 +85376,7 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, u8 *pszEst){
** estimate is scaled so that the size of an integer is 1. */
if( pszEst ){
*pszEst = 1; /* default size is approx 4 bytes */
- if( aff<SQLITE_AFF_NUMERIC ){
+ if( aff<=SQLITE_AFF_NONE ){
if( zChar ){
while( zChar[0] ){
if( sqlite3Isdigit(zChar[0]) ){
@@ -93994,8 +85413,7 @@ SQLITE_PRIVATE void sqlite3AddColumnType(Parse *pParse, Token *pType){
p = pParse->pNewTable;
if( p==0 || NEVER(p->nCol<1) ) return;
pCol = &p->aCol[p->nCol-1];
- assert( pCol->zType==0 || CORRUPT_DB );
- sqlite3DbFree(pParse->db, pCol->zType);
+ assert( pCol->zType==0 );
pCol->zType = sqlite3NameFromToken(pParse->db, pType);
pCol->affinity = sqlite3AffinityType(pCol->zType, &pCol->szEst);
}
@@ -94017,7 +85435,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse *pParse, ExprSpan *pSpan){
p = pParse->pNewTable;
if( p!=0 ){
pCol = &(p->aCol[p->nCol-1]);
- if( !sqlite3ExprIsConstantOrFunction(pSpan->pExpr, db->init.busy) ){
+ if( !sqlite3ExprIsConstantOrFunction(pSpan->pExpr) ){
sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant",
pCol->zName);
}else{
@@ -94036,30 +85454,6 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse *pParse, ExprSpan *pSpan){
}
/*
-** Backwards Compatibility Hack:
-**
-** Historical versions of SQLite accepted strings as column names in
-** indexes and PRIMARY KEY constraints and in UNIQUE constraints. Example:
-**
-** CREATE TABLE xyz(a,b,c,d,e,PRIMARY KEY('a'),UNIQUE('b','c' COLLATE trim)
-** CREATE INDEX abc ON xyz('c','d' DESC,'e' COLLATE nocase DESC);
-**
-** This is goofy. But to preserve backwards compatibility we continue to
-** accept it. This routine does the necessary conversion. It converts
-** the expression given in its argument from a TK_STRING into a TK_ID
-** if the expression is just a TK_STRING with an optional COLLATE clause.
-** If the epxression is anything other than TK_STRING, the expression is
-** unchanged.
-*/
-static void sqlite3StringToId(Expr *p){
- if( p->op==TK_STRING ){
- p->op = TK_ID;
- }else if( p->op==TK_COLLATE && p->pLeft->op==TK_STRING ){
- p->pLeft->op = TK_ID;
- }
-}
-
-/*
** Designate the PRIMARY KEY for the table. pList is a list of names
** of columns that form the primary key. If pList is NULL, then the
** most recently added column of the table is the primary key.
@@ -94103,24 +85497,18 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey(
}else{
nTerm = pList->nExpr;
for(i=0; i<nTerm; i++){
- Expr *pCExpr = sqlite3ExprSkipCollate(pList->a[i].pExpr);
- assert( pCExpr!=0 );
- sqlite3StringToId(pCExpr);
- if( pCExpr->op==TK_ID ){
- const char *zCName = pCExpr->u.zToken;
- for(iCol=0; iCol<pTab->nCol; iCol++){
- if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zName)==0 ){
- pTab->aCol[iCol].colFlags |= COLFLAG_PRIMKEY;
- zType = pTab->aCol[iCol].zType;
- break;
- }
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( sqlite3StrICmp(pList->a[i].zName, pTab->aCol[iCol].zName)==0 ){
+ pTab->aCol[iCol].colFlags |= COLFLAG_PRIMKEY;
+ zType = pTab->aCol[iCol].zType;
+ break;
}
}
}
}
if( nTerm==1
&& zType && sqlite3StrICmp(zType, "INTEGER")==0
- && sortOrder!=SQLITE_SO_DESC
+ && sortOrder==SQLITE_SO_ASC
){
pTab->iPKey = iCol;
pTab->keyConf = (u8)onError;
@@ -94133,11 +85521,14 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey(
"INTEGER PRIMARY KEY");
#endif
}else{
+ Vdbe *v = pParse->pVdbe;
Index *p;
+ if( v ) pParse->addrSkipPK = sqlite3VdbeAddOp0(v, OP_Noop);
p = sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0,
0, sortOrder, 0);
if( p ){
- p->idxType = SQLITE_IDXTYPE_PRIMARYKEY;
+ p->autoIndex = 2;
+ if( v ) sqlite3VdbeJumpHere(v, pParse->addrSkipPK);
}
pList = 0;
}
@@ -94156,10 +85547,7 @@ SQLITE_PRIVATE void sqlite3AddCheckConstraint(
){
#ifndef SQLITE_OMIT_CHECK
Table *pTab = pParse->pNewTable;
- sqlite3 *db = pParse->db;
- if( pTab && !IN_DECLARE_VTAB
- && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt)
- ){
+ if( pTab && !IN_DECLARE_VTAB ){
pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr);
if( pParse->constraintName.n ){
sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1);
@@ -94305,10 +85693,10 @@ static void identPut(char *z, int *pIdx, char *zSignedIdent){
for(j=0; zIdent[j]; j++){
if( !sqlite3Isalnum(zIdent[j]) && zIdent[j]!='_' ) break;
}
- needQuote = sqlite3Isdigit(zIdent[0])
- || sqlite3KeywordCode(zIdent, j)!=TK_ID
- || zIdent[j]!=0
- || j==0;
+ needQuote = sqlite3Isdigit(zIdent[0]) || sqlite3KeywordCode(zIdent, j)!=TK_ID;
+ if( !needQuote ){
+ needQuote = zIdent[j];
+ }
if( needQuote ) z[i++] = '"';
for(j=0; zIdent[j]; j++){
@@ -94356,8 +85744,8 @@ static char *createTableStmt(sqlite3 *db, Table *p){
zStmt[k++] = '(';
for(pCol=p->aCol, i=0; i<p->nCol; i++, pCol++){
static const char * const azType[] = {
- /* SQLITE_AFF_BLOB */ "",
/* SQLITE_AFF_TEXT */ " TEXT",
+ /* SQLITE_AFF_NONE */ "",
/* SQLITE_AFF_NUMERIC */ " NUM",
/* SQLITE_AFF_INTEGER */ " INT",
/* SQLITE_AFF_REAL */ " REAL"
@@ -94369,17 +85757,17 @@ static char *createTableStmt(sqlite3 *db, Table *p){
k += sqlite3Strlen30(&zStmt[k]);
zSep = zSep2;
identPut(zStmt, &k, pCol->zName);
- assert( pCol->affinity-SQLITE_AFF_BLOB >= 0 );
- assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) );
- testcase( pCol->affinity==SQLITE_AFF_BLOB );
+ assert( pCol->affinity-SQLITE_AFF_TEXT >= 0 );
+ assert( pCol->affinity-SQLITE_AFF_TEXT < ArraySize(azType) );
testcase( pCol->affinity==SQLITE_AFF_TEXT );
+ testcase( pCol->affinity==SQLITE_AFF_NONE );
testcase( pCol->affinity==SQLITE_AFF_NUMERIC );
testcase( pCol->affinity==SQLITE_AFF_INTEGER );
testcase( pCol->affinity==SQLITE_AFF_REAL );
- zType = azType[pCol->affinity - SQLITE_AFF_BLOB];
+ zType = azType[pCol->affinity - SQLITE_AFF_TEXT];
len = sqlite3Strlen30(zType);
- assert( pCol->affinity==SQLITE_AFF_BLOB
+ assert( pCol->affinity==SQLITE_AFF_NONE
|| pCol->affinity==sqlite3AffinityType(zType, 0) );
memcpy(&zStmt[k], zType, len);
k += len;
@@ -94461,7 +85849,7 @@ static int hasColumn(const i16 *aiCol, int nCol, int x){
** no rowid btree for a WITHOUT ROWID. Instead, the canonical
** data storage is a covering index btree.
** (2) Bypass the creation of the sqlite_master table entry
-** for the PRIMARY KEY as the primary key index is now
+** for the PRIMARY KEY as the the primary key index is now
** identified by the sqlite_master table entry of the table itself.
** (3) Set the Index.tnum of the PRIMARY KEY Index object in the
** schema to the rootpage from the main table.
@@ -94482,12 +85870,20 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
Vdbe *v = pParse->pVdbe;
/* Convert the OP_CreateTable opcode that would normally create the
- ** root-page for the table into an OP_CreateIndex opcode. The index
+ ** root-page for the table into a OP_CreateIndex opcode. The index
** created will become the PRIMARY KEY index.
*/
if( pParse->addrCrTab ){
assert( v );
- sqlite3VdbeChangeOpcode(v, pParse->addrCrTab, OP_CreateIndex);
+ sqlite3VdbeGetOp(v, pParse->addrCrTab)->opcode = OP_CreateIndex;
+ }
+
+ /* Bypass the creation of the PRIMARY KEY btree and the sqlite_master
+ ** table entry.
+ */
+ if( pParse->addrSkipPK ){
+ assert( v );
+ sqlite3VdbeGetOp(v, pParse->addrSkipPK)->opcode = OP_Goto;
}
/* Locate the PRIMARY KEY index. Or, if this table was originally
@@ -94495,56 +85891,28 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
*/
if( pTab->iPKey>=0 ){
ExprList *pList;
- Token ipkToken;
- ipkToken.z = pTab->aCol[pTab->iPKey].zName;
- ipkToken.n = sqlite3Strlen30(ipkToken.z);
- pList = sqlite3ExprListAppend(pParse, 0,
- sqlite3ExprAlloc(db, TK_ID, &ipkToken, 0));
+ pList = sqlite3ExprListAppend(pParse, 0, 0);
if( pList==0 ) return;
+ pList->a[0].zName = sqlite3DbStrDup(pParse->db,
+ pTab->aCol[pTab->iPKey].zName);
pList->a[0].sortOrder = pParse->iPkSortOrder;
assert( pParse->pNewTable==pTab );
pPk = sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0);
if( pPk==0 ) return;
- pPk->idxType = SQLITE_IDXTYPE_PRIMARYKEY;
+ pPk->autoIndex = 2;
pTab->iPKey = -1;
}else{
pPk = sqlite3PrimaryKeyIndex(pTab);
-
- /* Bypass the creation of the PRIMARY KEY btree and the sqlite_master
- ** table entry. This is only required if currently generating VDBE
- ** code for a CREATE TABLE (not when parsing one as part of reading
- ** a database schema). */
- if( v ){
- assert( db->init.busy==0 );
- sqlite3VdbeChangeOpcode(v, pPk->tnum, OP_Goto);
- }
-
- /*
- ** Remove all redundant columns from the PRIMARY KEY. For example, change
- ** "PRIMARY KEY(a,b,a,b,c,b,c,d)" into just "PRIMARY KEY(a,b,c,d)". Later
- ** code assumes the PRIMARY KEY contains no repeated columns.
- */
- for(i=j=1; i<pPk->nKeyCol; i++){
- if( hasColumn(pPk->aiColumn, j, pPk->aiColumn[i]) ){
- pPk->nColumn--;
- }else{
- pPk->aiColumn[j++] = pPk->aiColumn[i];
- }
- }
- pPk->nKeyCol = j;
}
pPk->isCovering = 1;
assert( pPk!=0 );
nPk = pPk->nKeyCol;
- /* Make sure every column of the PRIMARY KEY is NOT NULL. (Except,
- ** do not enforce this for imposter tables.) */
- if( !db->init.imposterTable ){
- for(i=0; i<nPk; i++){
- pTab->aCol[pPk->aiColumn[i]].notNull = 1;
- }
- pPk->uniqNotNull = 1;
+ /* Make sure every column of the PRIMARY KEY is NOT NULL */
+ for(i=0; i<nPk; i++){
+ pTab->aCol[pPk->aiColumn[i]].notNull = 1;
}
+ pPk->uniqNotNull = 1;
/* The root page of the PRIMARY KEY is the table root page */
pPk->tnum = pTab->tnum;
@@ -94554,7 +85922,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
*/
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
int n;
- if( IsPrimaryKeyIndex(pIdx) ) continue;
+ if( pIdx->autoIndex==2 ) continue;
for(i=n=0; i<nPk; i++){
if( !hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ) n++;
}
@@ -94626,10 +85994,9 @@ SQLITE_PRIVATE void sqlite3EndTable(
int iDb; /* Database in which the table lives */
Index *pIdx; /* An implied index of the table */
- if( pEnd==0 && pSelect==0 ){
+ if( (pEnd==0 && pSelect==0) || db->mallocFailed ){
return;
}
- assert( !db->mallocFailed );
p = pParse->pNewTable;
if( p==0 ) return;
@@ -94655,7 +86022,7 @@ SQLITE_PRIVATE void sqlite3EndTable(
if( (p->tabFlags & TF_HasPrimaryKey)==0 ){
sqlite3ErrorMsg(pParse, "PRIMARY KEY missing on table %s", p->zName);
}else{
- p->tabFlags |= TF_WithoutRowid | TF_NoVisibleRowid;
+ p->tabFlags |= TF_WithoutRowid;
convertToWithoutRowidTable(pParse, p);
}
}
@@ -94723,46 +86090,26 @@ SQLITE_PRIVATE void sqlite3EndTable(
** be redundant.
*/
if( pSelect ){
- SelectDest dest; /* Where the SELECT should store results */
- int regYield; /* Register holding co-routine entry-point */
- int addrTop; /* Top of the co-routine */
- int regRec; /* A record to be insert into the new table */
- int regRowid; /* Rowid of the next row to insert */
- int addrInsLoop; /* Top of the loop for inserting rows */
- Table *pSelTab; /* A table that describes the SELECT results */
-
- regYield = ++pParse->nMem;
- regRec = ++pParse->nMem;
- regRowid = ++pParse->nMem;
+ SelectDest dest;
+ Table *pSelTab;
+
assert(pParse->nTab==1);
- sqlite3MayAbort(pParse);
sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb);
sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG);
pParse->nTab = 2;
- addrTop = sqlite3VdbeCurrentAddr(v) + 1;
- sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop);
- sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield);
+ sqlite3SelectDestInit(&dest, SRT_Table, 1);
sqlite3Select(pParse, pSelect, &dest);
- sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield);
- sqlite3VdbeJumpHere(v, addrTop - 1);
- if( pParse->nErr ) return;
- pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect);
- if( pSelTab==0 ) return;
- assert( p->aCol==0 );
- p->nCol = pSelTab->nCol;
- p->aCol = pSelTab->aCol;
- pSelTab->nCol = 0;
- pSelTab->aCol = 0;
- sqlite3DeleteTable(db, pSelTab);
- addrInsLoop = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm);
- VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec);
- sqlite3TableAffinity(v, p, 0);
- sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid);
- sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid);
- sqlite3VdbeGoto(v, addrInsLoop);
- sqlite3VdbeJumpHere(v, addrInsLoop);
sqlite3VdbeAddOp1(v, OP_Close, 1);
+ if( pParse->nErr==0 ){
+ pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect);
+ if( pSelTab==0 ) return;
+ assert( p->aCol==0 );
+ p->nCol = pSelTab->nCol;
+ p->aCol = pSelTab->aCol;
+ pSelTab->nCol = 0;
+ pSelTab->aCol = 0;
+ sqlite3DeleteTable(db, pSelTab);
+ }
}
/* Compute the complete text of the CREATE statement */
@@ -94824,7 +86171,8 @@ SQLITE_PRIVATE void sqlite3EndTable(
Table *pOld;
Schema *pSchema = p->pSchema;
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
- pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, p);
+ pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName,
+ sqlite3Strlen30(p->zName),p);
if( pOld ){
assert( p==pOld ); /* Malloc must have failed inside HashInsert() */
db->mallocFailed = 1;
@@ -94857,7 +86205,6 @@ SQLITE_PRIVATE void sqlite3CreateView(
Token *pBegin, /* The CREATE token that begins the statement */
Token *pName1, /* The token that holds the name of the view */
Token *pName2, /* The token that holds the name of the view */
- ExprList *pCNames, /* Optional list of view column names */
Select *pSelect, /* A SELECT statement that will become the new view */
int isTemp, /* TRUE for a TEMPORARY view */
int noErr /* Suppress error messages if VIEW already exists */
@@ -94873,15 +86220,22 @@ SQLITE_PRIVATE void sqlite3CreateView(
if( pParse->nVar>0 ){
sqlite3ErrorMsg(pParse, "parameters are not allowed in views");
- goto create_view_fail;
+ sqlite3SelectDelete(db, pSelect);
+ return;
}
sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr);
p = pParse->pNewTable;
- if( p==0 || pParse->nErr ) goto create_view_fail;
+ if( p==0 || pParse->nErr ){
+ sqlite3SelectDelete(db, pSelect);
+ return;
+ }
sqlite3TwoPartName(pParse, pName1, pName2, &pName);
iDb = sqlite3SchemaToIndex(db, p->pSchema);
sqlite3FixInit(&sFix, pParse, iDb, "view", pName);
- if( sqlite3FixSelect(&sFix, pSelect) ) goto create_view_fail;
+ if( sqlite3FixSelect(&sFix, pSelect) ){
+ sqlite3SelectDelete(db, pSelect);
+ return;
+ }
/* Make a copy of the entire SELECT statement that defines the view.
** This will force all the Expr.token.z values to be dynamically
@@ -94889,31 +86243,30 @@ SQLITE_PRIVATE void sqlite3CreateView(
** they will persist after the current sqlite3_exec() call returns.
*/
p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
- p->pCheck = sqlite3ExprListDup(db, pCNames, EXPRDUP_REDUCE);
- if( db->mallocFailed ) goto create_view_fail;
+ sqlite3SelectDelete(db, pSelect);
+ if( db->mallocFailed ){
+ return;
+ }
+ if( !db->init.busy ){
+ sqlite3ViewGetColumnNames(pParse, p);
+ }
/* Locate the end of the CREATE VIEW statement. Make sEnd point to
** the end.
*/
sEnd = pParse->sLastToken;
- assert( sEnd.z[0]!=0 );
- if( sEnd.z[0]!=';' ){
+ if( ALWAYS(sEnd.z[0]!=0) && sEnd.z[0]!=';' ){
sEnd.z += sEnd.n;
}
sEnd.n = 0;
n = (int)(sEnd.z - pBegin->z);
- assert( n>0 );
z = pBegin->z;
- while( sqlite3Isspace(z[n-1]) ){ n--; }
+ while( ALWAYS(n>0) && sqlite3Isspace(z[n-1]) ){ n--; }
sEnd.z = &z[n-1];
sEnd.n = 1;
/* Use sqlite3EndTable() to add the view to the SQLITE_MASTER table */
sqlite3EndTable(pParse, 0, &sEnd, 0, 0);
-
-create_view_fail:
- sqlite3SelectDelete(db, pSelect);
- sqlite3ExprListDelete(db, pCNames);
return;
}
#endif /* SQLITE_OMIT_VIEW */
@@ -94930,8 +86283,7 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
int nErr = 0; /* Number of errors encountered */
int n; /* Temporarily holds the number of cursors assigned */
sqlite3 *db = pParse->db; /* Database connection for malloc errors */
- sqlite3_xauth xAuth; /* Saved xAuth pointer */
- u8 bEnabledLA; /* Saved db->lookaside.bEnabled state */
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*);
assert( pTable );
@@ -94977,46 +86329,40 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
** statement that defines the view.
*/
assert( pTable->pSelect );
- bEnabledLA = db->lookaside.bEnabled;
- if( pTable->pCheck ){
+ pSel = sqlite3SelectDup(db, pTable->pSelect, 0);
+ if( pSel ){
+ u8 enableLookaside = db->lookaside.bEnabled;
+ n = pParse->nTab;
+ sqlite3SrcListAssignCursors(pParse, pSel->pSrc);
+ pTable->nCol = -1;
db->lookaside.bEnabled = 0;
- sqlite3ColumnsFromExprList(pParse, pTable->pCheck,
- &pTable->nCol, &pTable->aCol);
- }else{
- pSel = sqlite3SelectDup(db, pTable->pSelect, 0);
- if( pSel ){
- n = pParse->nTab;
- sqlite3SrcListAssignCursors(pParse, pSel->pSrc);
- pTable->nCol = -1;
- db->lookaside.bEnabled = 0;
#ifndef SQLITE_OMIT_AUTHORIZATION
- xAuth = db->xAuth;
- db->xAuth = 0;
- pSelTab = sqlite3ResultSetOfSelect(pParse, pSel);
- db->xAuth = xAuth;
+ xAuth = db->xAuth;
+ db->xAuth = 0;
+ pSelTab = sqlite3ResultSetOfSelect(pParse, pSel);
+ db->xAuth = xAuth;
#else
- pSelTab = sqlite3ResultSetOfSelect(pParse, pSel);
+ pSelTab = sqlite3ResultSetOfSelect(pParse, pSel);
#endif
- pParse->nTab = n;
- if( pSelTab ){
- assert( pTable->aCol==0 );
- pTable->nCol = pSelTab->nCol;
- pTable->aCol = pSelTab->aCol;
- pSelTab->nCol = 0;
- pSelTab->aCol = 0;
- sqlite3DeleteTable(db, pSelTab);
- assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) );
- }else{
- pTable->nCol = 0;
- nErr++;
- }
- sqlite3SelectDelete(db, pSel);
- } else {
+ db->lookaside.bEnabled = enableLookaside;
+ pParse->nTab = n;
+ if( pSelTab ){
+ assert( pTable->aCol==0 );
+ pTable->nCol = pSelTab->nCol;
+ pTable->aCol = pSelTab->aCol;
+ pSelTab->nCol = 0;
+ pSelTab->aCol = 0;
+ sqlite3DeleteTable(db, pSelTab);
+ assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) );
+ pTable->pSchema->flags |= DB_UnresetViews;
+ }else{
+ pTable->nCol = 0;
nErr++;
}
+ sqlite3SelectDelete(db, pSel);
+ } else {
+ nErr++;
}
- db->lookaside.bEnabled = bEnabledLA;
- pTable->pSchema->schemaFlags |= DB_UnresetViews;
#endif /* SQLITE_OMIT_VIEW */
return nErr;
}
@@ -95033,7 +86379,7 @@ static void sqliteViewResetAll(sqlite3 *db, int idx){
for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){
Table *pTab = sqliteHashData(i);
if( pTab->pSelect ){
- sqlite3DeleteColumnNames(db, pTab);
+ sqliteDeleteColumnNames(db, pTab);
pTab->aCol = 0;
pTab->nCol = 0;
}
@@ -95283,7 +86629,6 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView,
}
assert( pParse->nErr==0 );
assert( pName->nSrc==1 );
- if( sqlite3ReadSchema(pParse) ) goto exit_drop_table;
if( noErr ) db->suppressErr++;
pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]);
if( noErr ) db->suppressErr--;
@@ -95477,7 +86822,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey(
assert( sqlite3SchemaMutexHeld(db, 0, p->pSchema) );
pNextTo = (FKey *)sqlite3HashInsert(&p->pSchema->fkeyHash,
- pFKey->zTo, (void *)pFKey
+ pFKey->zTo, sqlite3Strlen30(pFKey->zTo), (void *)pFKey
);
if( pNextTo==pFKey ){
db->mallocFailed = 1;
@@ -95540,7 +86885,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){
int iPartIdxLabel; /* Jump to this label to skip a row */
Vdbe *v; /* Generate code into this virtual machine */
KeyInfo *pKey; /* KeyInfo for index */
- int regRecord; /* Register holding assembled index record */
+ int regRecord; /* Register holding assemblied index record */
sqlite3 *db = pParse->db; /* The database connection */
int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema);
@@ -95565,43 +86910,42 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){
/* Open the sorter cursor if we are to use one. */
iSorter = pParse->nTab++;
- sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, pIndex->nKeyCol, (char*)
+ sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, 0, (char*)
sqlite3KeyInfoRef(pKey), P4_KEYINFO);
/* Open the table. Loop through all rows of the table, inserting index
** records into the sorter. */
sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead);
- addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); VdbeCoverage(v);
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0);
regRecord = sqlite3GetTempReg(pParse);
- sqlite3GenerateIndexKey(pParse,pIndex,iTab,regRecord,0,&iPartIdxLabel,0,0);
+ sqlite3GenerateIndexKey(pParse, pIndex, iTab, regRecord, 0, &iPartIdxLabel);
sqlite3VdbeAddOp2(v, OP_SorterInsert, iSorter, regRecord);
- sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel);
- sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); VdbeCoverage(v);
+ sqlite3VdbeResolveLabel(v, iPartIdxLabel);
+ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1);
sqlite3VdbeJumpHere(v, addr1);
if( memRootPage<0 ) sqlite3VdbeAddOp2(v, OP_Clear, tnum, iDb);
sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb,
(char *)pKey, P4_KEYINFO);
sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR|((memRootPage>=0)?OPFLAG_P2ISREG:0));
- addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0); VdbeCoverage(v);
+ addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0);
assert( pKey!=0 || db->mallocFailed || pParse->nErr );
- if( IsUniqueIndex(pIndex) && pKey!=0 ){
+ if( pIndex->onError!=OE_None && pKey!=0 ){
int j2 = sqlite3VdbeCurrentAddr(v) + 3;
- sqlite3VdbeGoto(v, j2);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, j2);
addr2 = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp4Int(v, OP_SorterCompare, iSorter, j2, regRecord,
- pIndex->nKeyCol); VdbeCoverage(v);
+ pKey->nField - pIndex->nKeyCol);
sqlite3UniqueConstraint(pParse, OE_Abort, pIndex);
}else{
addr2 = sqlite3VdbeCurrentAddr(v);
}
- sqlite3VdbeAddOp3(v, OP_SorterData, iSorter, regRecord, iIdx);
- sqlite3VdbeAddOp3(v, OP_Last, iIdx, 0, -1);
- sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, 0);
+ sqlite3VdbeAddOp2(v, OP_SorterData, iSorter, regRecord);
+ sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, 1);
sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
sqlite3ReleaseTempReg(pParse, regRecord);
- sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2);
sqlite3VdbeJumpHere(v, addr1);
sqlite3VdbeAddOp1(v, OP_Close, iTab);
@@ -95627,15 +86971,15 @@ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(
nByte = ROUND8(sizeof(Index)) + /* Index structure */
ROUND8(sizeof(char*)*nCol) + /* Index.azColl */
- ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */
+ ROUND8(sizeof(tRowcnt)*(nCol+1) + /* Index.aiRowEst */
sizeof(i16)*nCol + /* Index.aiColumn */
sizeof(u8)*nCol); /* Index.aSortOrder */
p = sqlite3DbMallocZero(db, nByte + nExtra);
if( p ){
char *pExtra = ((char*)p)+ROUND8(sizeof(Index));
- p->azColl = (char**)pExtra; pExtra += ROUND8(sizeof(char*)*nCol);
- p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1);
- p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol;
+ p->azColl = (char**)pExtra; pExtra += ROUND8(sizeof(char*)*nCol);
+ p->aiRowEst = (tRowcnt*)pExtra; pExtra += sizeof(tRowcnt)*(nCol+1);
+ p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol;
p->aSortOrder = (u8*)pExtra;
p->nColumn = nCol;
p->nKeyCol = nCol - 1;
@@ -95658,7 +87002,7 @@ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(
**
** If the index is created successfully, return a pointer to the new Index
** structure. This is used by sqlite3AddPrimaryKey() to mark the index
-** as the tables primary key (Index.idxType==SQLITE_IDXTYPE_PRIMARYKEY)
+** as the tables primary key (Index.autoIndex==2).
*/
SQLITE_PRIVATE Index *sqlite3CreateIndex(
Parse *pParse, /* All information about this parse */
@@ -95685,12 +87029,14 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
int iDb; /* Index of the database that is being written */
Token *pName = 0; /* Unqualified name of the index to create */
struct ExprList_item *pListItem; /* For looping over pList */
+ const Column *pTabCol; /* A column in the table */
int nExtra = 0; /* Space allocated for zExtra[] */
int nExtraCol; /* Number of extra columns needed */
char *zExtra = 0; /* Extra space after the Index object */
Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */
- if( db->mallocFailed || IN_DECLARE_VTAB || pParse->nErr>0 ){
+ assert( pParse->nErr==0 ); /* Never called with prior errors */
+ if( db->mallocFailed || IN_DECLARE_VTAB ){
goto exit_create_index;
}
if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
@@ -95752,10 +87098,6 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
assert( pTab!=0 );
assert( pParse->nErr==0 );
if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0
- && db->init.busy==0
-#if SQLITE_USER_AUTHENTICATION
- && sqlite3UserAuthTable(pTab->zName)==0
-#endif
&& sqlite3StrNICmp(&pTab->zName[7],"altertab_",9)!=0 ){
sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName);
goto exit_create_index;
@@ -95839,16 +87181,11 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
** So create a fake list to simulate this.
*/
if( pList==0 ){
- Token prevCol;
- prevCol.z = pTab->aCol[pTab->nCol-1].zName;
- prevCol.n = sqlite3Strlen30(prevCol.z);
- pList = sqlite3ExprListAppend(pParse, 0,
- sqlite3ExprAlloc(db, TK_ID, &prevCol, 0));
+ pList = sqlite3ExprListAppend(pParse, 0, 0);
if( pList==0 ) goto exit_create_index;
- assert( pList->nExpr==1 );
- sqlite3ExprListSetSortOrder(pList, sortOrder);
- }else{
- sqlite3ExprListCheckLength(pParse, pList, "index");
+ pList->a[0].zName = sqlite3DbStrDup(pParse->db,
+ pTab->aCol[pTab->nCol-1].zName);
+ pList->a[0].sortOrder = (u8)sortOrder;
}
/* Figure out how many bytes of space are required to store explicitly
@@ -95856,8 +87193,8 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
*/
for(i=0; i<pList->nExpr; i++){
Expr *pExpr = pList->a[i].pExpr;
- assert( pExpr!=0 );
- if( pExpr->op==TK_COLLATE ){
+ if( pExpr ){
+ assert( pExpr->op==TK_COLLATE );
nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken));
}
}
@@ -95872,7 +87209,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
if( db->mallocFailed ){
goto exit_create_index;
}
- assert( EIGHT_BYTE_ALIGNMENT(pIndex->aiRowLogEst) );
+ assert( EIGHT_BYTE_ALIGNMENT(pIndex->aiRowEst) );
assert( EIGHT_BYTE_ALIGNMENT(pIndex->azColl) );
pIndex->zName = zExtra;
zExtra += nName + 1;
@@ -95880,7 +87217,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
pIndex->pTable = pTab;
pIndex->onError = (u8)onError;
pIndex->uniqNotNull = onError!=OE_None;
- pIndex->idxType = pName ? SQLITE_IDXTYPE_APPDEF : SQLITE_IDXTYPE_UNIQUE;
+ pIndex->autoIndex = (u8)(pName==0);
pIndex->pSchema = db->aDb[iDb].pSchema;
pIndex->nKeyCol = pList->nExpr;
if( pPIWhere ){
@@ -95898,54 +87235,35 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
sortOrderMask = 0; /* Ignore DESC */
}
- /* Analyze the list of expressions that form the terms of the index and
- ** report any errors. In the common case where the expression is exactly
- ** a table column, store that column in aiColumn[]. For general expressions,
- ** populate pIndex->aColExpr and store XN_EXPR (-2) in aiColumn[].
+ /* Scan the names of the columns of the table to be indexed and
+ ** load the column indices into the Index structure. Report an error
+ ** if any column is not found.
**
- ** TODO: Issue a warning if two or more columns of the index are identical.
- ** TODO: Issue a warning if the table primary key is used as part of the
- ** index key.
+ ** TODO: Add a test to make sure that the same column is not named
+ ** more than once within the same index. Only the first instance of
+ ** the column will ever be used by the optimizer. Note that using the
+ ** same column more than once cannot be an error because that would
+ ** break backwards compatibility - it needs to be a warning.
*/
for(i=0, pListItem=pList->a; i<pList->nExpr; i++, pListItem++){
- Expr *pCExpr; /* The i-th index expression */
- int requestedSortOrder; /* ASC or DESC on the i-th expression */
+ const char *zColName = pListItem->zName;
+ int requestedSortOrder;
char *zColl; /* Collation sequence name */
- sqlite3StringToId(pListItem->pExpr);
- sqlite3ResolveSelfReference(pParse, pTab, NC_IdxExpr, pListItem->pExpr, 0);
- if( pParse->nErr ) goto exit_create_index;
- pCExpr = sqlite3ExprSkipCollate(pListItem->pExpr);
- if( pCExpr->op!=TK_COLUMN ){
- if( pTab==pParse->pNewTable ){
- sqlite3ErrorMsg(pParse, "expressions prohibited in PRIMARY KEY and "
- "UNIQUE constraints");
- goto exit_create_index;
- }
- if( pIndex->aColExpr==0 ){
- ExprList *pCopy = sqlite3ExprListDup(db, pList, 0);
- pIndex->aColExpr = pCopy;
- if( !db->mallocFailed ){
- assert( pCopy!=0 );
- pListItem = &pCopy->a[i];
- }
- }
- j = XN_EXPR;
- pIndex->aiColumn[i] = XN_EXPR;
- pIndex->uniqNotNull = 0;
- }else{
- j = pCExpr->iColumn;
- assert( j<=0x7fff );
- if( j<0 ){
- j = pTab->iPKey;
- }else if( pTab->aCol[j].notNull==0 ){
- pIndex->uniqNotNull = 0;
- }
- pIndex->aiColumn[i] = (i16)j;
+ for(j=0, pTabCol=pTab->aCol; j<pTab->nCol; j++, pTabCol++){
+ if( sqlite3StrICmp(zColName, pTabCol->zName)==0 ) break;
+ }
+ if( j>=pTab->nCol ){
+ sqlite3ErrorMsg(pParse, "table %s has no column named %s",
+ pTab->zName, zColName);
+ pParse->checkSchema = 1;
+ goto exit_create_index;
}
- zColl = 0;
- if( pListItem->pExpr->op==TK_COLLATE ){
+ assert( pTab->nCol<=0x7fff && j<=0x7fff );
+ pIndex->aiColumn[i] = (i16)j;
+ if( pListItem->pExpr ){
int nColl;
+ assert( pListItem->pExpr->op==TK_COLLATE );
zColl = pListItem->pExpr->u.zToken;
nColl = sqlite3Strlen30(zColl) + 1;
assert( nExtra>=nColl );
@@ -95953,26 +87271,21 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
zColl = zExtra;
zExtra += nColl;
nExtra -= nColl;
- }else if( j>=0 ){
+ }else{
zColl = pTab->aCol[j].zColl;
+ if( !zColl ) zColl = "BINARY";
}
- if( !zColl ) zColl = "BINARY";
if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){
goto exit_create_index;
}
pIndex->azColl[i] = zColl;
requestedSortOrder = pListItem->sortOrder & sortOrderMask;
pIndex->aSortOrder[i] = (u8)requestedSortOrder;
+ if( pTab->aCol[j].notNull==0 ) pIndex->uniqNotNull = 0;
}
-
- /* Append the table key to the end of the index. For WITHOUT ROWID
- ** tables (when pPk!=0) this will be the declared PRIMARY KEY. For
- ** normal tables (when pPk==0) this will be the rowid.
- */
if( pPk ){
for(j=0; j<pPk->nKeyCol; j++){
int x = pPk->aiColumn[j];
- assert( x>=0 );
if( hasColumn(pIndex->aiColumn, pIndex->nKeyCol, x) ){
pIndex->nColumn--;
}else{
@@ -95984,7 +87297,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
}
assert( i==pIndex->nColumn );
}else{
- pIndex->aiColumn[i] = XN_ROWID;
+ pIndex->aiColumn[i] = -1;
pIndex->azColl[i] = "BINARY";
}
sqlite3DefaultRowEst(pIndex);
@@ -96015,15 +87328,14 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
Index *pIdx;
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
int k;
- assert( IsUniqueIndex(pIdx) );
- assert( pIdx->idxType!=SQLITE_IDXTYPE_APPDEF );
- assert( IsUniqueIndex(pIndex) );
+ assert( pIdx->onError!=OE_None );
+ assert( pIdx->autoIndex );
+ assert( pIndex->onError!=OE_None );
if( pIdx->nKeyCol!=pIndex->nKeyCol ) continue;
for(k=0; k<pIdx->nKeyCol; k++){
const char *z1;
const char *z2;
- assert( pIdx->aiColumn[k]>=0 );
if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break;
z1 = pIdx->azColl[k];
z2 = pIndex->azColl[k];
@@ -96046,7 +87358,6 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
pIdx->onError = pIndex->onError;
}
}
- pRet = pIdx;
goto exit_create_index;
}
}
@@ -96055,12 +87366,12 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
/* Link the new Index structure to its table and to the other
** in-memory database structures.
*/
- assert( pParse->nErr==0 );
if( db->init.busy ){
Index *p;
assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
p = sqlite3HashInsert(&pIndex->pSchema->idxHash,
- pIndex->zName, pIndex);
+ pIndex->zName, sqlite3Strlen30(pIndex->zName),
+ pIndex);
if( p ){
assert( p==pIndex ); /* Malloc must have failed */
db->mallocFailed = 1;
@@ -96085,7 +87396,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
** has just been created, it contains no data and the index initialization
** step can be skipped.
*/
- else if( HasRowid(pTab) || pTblName!=0 ){
+ else if( pParse->nErr==0 && (HasRowid(pTab) || pTblName!=0) ){
Vdbe *v;
char *zStmt;
int iMem = ++pParse->nMem;
@@ -96093,15 +87404,10 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
v = sqlite3GetVdbe(pParse);
if( v==0 ) goto exit_create_index;
- sqlite3BeginWriteOperation(pParse, 1, iDb);
- /* Create the rootpage for the index using CreateIndex. But before
- ** doing so, code a Noop instruction and store its address in
- ** Index.tnum. This is required in case this index is actually a
- ** PRIMARY KEY and the table is actually a WITHOUT ROWID table. In
- ** that case the convertToWithoutRowidTable() routine will replace
- ** the Noop with a Goto to jump over the VDBE code generated below. */
- pIndex->tnum = sqlite3VdbeAddOp0(v, OP_Noop);
+ /* Create the rootpage for the index
+ */
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
sqlite3VdbeAddOp2(v, OP_CreateIndex, iDb, iMem);
/* Gather the complete text of the CREATE INDEX statement into
@@ -96141,8 +87447,6 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName));
sqlite3VdbeAddOp1(v, OP_Expire, 0);
}
-
- sqlite3VdbeJumpHere(v, pIndex->tnum);
}
/* When adding an index to the list of indices for a table, make
@@ -96182,11 +87486,11 @@ exit_create_index:
** Fill the Index.aiRowEst[] array with default information - information
** to be used when we have not run the ANALYZE command.
**
-** aiRowEst[0] is supposed to contain the number of elements in the index.
+** aiRowEst[0] is suppose to contain the number of elements in the index.
** Since we do not know, guess 1 million. aiRowEst[1] is an estimate of the
** number of rows in the table that match any particular value of the
** first column of the index. aiRowEst[2] is an estimate of the number
-** of rows that match any particular combination of the first 2 columns
+** of rows that match any particular combiniation of the first 2 columns
** of the index. And so forth. It must always be the case that
*
** aiRowEst[N]<=aiRowEst[N-1]
@@ -96197,27 +87501,20 @@ exit_create_index:
** are based on typical values found in actual indices.
*/
SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){
- /* 10, 9, 8, 7, 6 */
- LogEst aVal[] = { 33, 32, 30, 28, 26 };
- LogEst *a = pIdx->aiRowLogEst;
- int nCopy = MIN(ArraySize(aVal), pIdx->nKeyCol);
+ tRowcnt *a = pIdx->aiRowEst;
int i;
-
- /* Set the first entry (number of rows in the index) to the estimated
- ** number of rows in the table. Or 10, if the estimated number of rows
- ** in the table is less than that. */
- a[0] = pIdx->pTable->nRowLogEst;
- if( a[0]<33 ) a[0] = 33; assert( 33==sqlite3LogEst(10) );
-
- /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is
- ** 6 and each subsequent value (if any) is 5. */
- memcpy(&a[1], aVal, nCopy*sizeof(LogEst));
- for(i=nCopy+1; i<=pIdx->nKeyCol; i++){
- a[i] = 23; assert( 23==sqlite3LogEst(5) );
+ tRowcnt n;
+ assert( a!=0 );
+ a[0] = pIdx->pTable->nRowEst;
+ if( a[0]<10 ) a[0] = 10;
+ n = 10;
+ for(i=1; i<=pIdx->nKeyCol; i++){
+ a[i] = n;
+ if( n>5 ) n--;
+ }
+ if( pIdx->onError!=OE_None ){
+ a[pIdx->nKeyCol] = 1;
}
-
- assert( 0==sqlite3LogEst(1) );
- if( IsUniqueIndex(pIdx) ) a[pIdx->nKeyCol] = 0;
}
/*
@@ -96248,7 +87545,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists
pParse->checkSchema = 1;
goto exit_drop_index;
}
- if( pIndex->idxType!=SQLITE_IDXTYPE_APPDEF ){
+ if( pIndex->autoIndex ){
sqlite3ErrorMsg(pParse, "index associated with UNIQUE "
"or PRIMARY KEY constraint cannot be dropped", 0);
goto exit_drop_index;
@@ -96417,7 +87714,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(
assert( iStart<=pSrc->nSrc );
/* Allocate additional space if needed */
- if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){
+ if( pSrc->nSrc+nExtra>pSrc->nAlloc ){
SrcList *pNew;
int nAlloc = pSrc->nSrc+nExtra;
int nGot;
@@ -96429,7 +87726,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(
}
pSrc = pNew;
nGot = (sqlite3DbMallocSize(db, pNew) - sizeof(*pSrc))/sizeof(pSrc->a[0])+1;
- pSrc->nAlloc = nGot;
+ pSrc->nAlloc = (u8)nGot;
}
/* Move existing slots that come after the newly inserted slots
@@ -96437,7 +87734,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(
for(i=pSrc->nSrc-1; i>=iStart; i--){
pSrc->a[i+nExtra] = pSrc->a[i];
}
- pSrc->nSrc += nExtra;
+ pSrc->nSrc += (i8)nExtra;
/* Zero the newly allocated slots */
memset(&pSrc->a[iStart], 0, sizeof(pSrc->a[0])*nExtra);
@@ -96545,8 +87842,7 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){
sqlite3DbFree(db, pItem->zDatabase);
sqlite3DbFree(db, pItem->zName);
sqlite3DbFree(db, pItem->zAlias);
- if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy);
- if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg);
+ sqlite3DbFree(db, pItem->zIndex);
sqlite3DeleteTable(db, pItem->pTab);
sqlite3SelectDelete(db, pItem->pSelect);
sqlite3ExprDelete(db, pItem->pOn);
@@ -96562,7 +87858,7 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){
** if this is the first term of the FROM clause. pTable and pDatabase
** are the name of the table and database named in the FROM clause term.
** pDatabase is NULL if the database name qualifier is missing - the
-** usual case. If the term has an alias, then pAlias points to the
+** usual case. If the term has a alias, then pAlias points to the
** alias token. If the term is a subquery, then pSubquery is the
** SELECT statement that the subquery encodes. The pTable and
** pDatabase parameters are NULL for subqueries. The pOn and pUsing
@@ -96619,38 +87915,18 @@ SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pI
assert( pIndexedBy!=0 );
if( p && ALWAYS(p->nSrc>0) ){
struct SrcList_item *pItem = &p->a[p->nSrc-1];
- assert( pItem->fg.notIndexed==0 );
- assert( pItem->fg.isIndexedBy==0 );
- assert( pItem->fg.isTabFunc==0 );
+ assert( pItem->notIndexed==0 && pItem->zIndex==0 );
if( pIndexedBy->n==1 && !pIndexedBy->z ){
/* A "NOT INDEXED" clause was supplied. See parse.y
** construct "indexed_opt" for details. */
- pItem->fg.notIndexed = 1;
+ pItem->notIndexed = 1;
}else{
- pItem->u1.zIndexedBy = sqlite3NameFromToken(pParse->db, pIndexedBy);
- pItem->fg.isIndexedBy = (pItem->u1.zIndexedBy!=0);
+ pItem->zIndex = sqlite3NameFromToken(pParse->db, pIndexedBy);
}
}
}
/*
-** Add the list of function arguments to the SrcList entry for a
-** table-valued-function.
-*/
-SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *pList){
- if( p && pList ){
- struct SrcList_item *pItem = &p->a[p->nSrc-1];
- assert( pItem->fg.notIndexed==0 );
- assert( pItem->fg.isIndexedBy==0 );
- assert( pItem->fg.isTabFunc==0 );
- pItem->u1.pFuncArg = pList;
- pItem->fg.isTabFunc = 1;
- }else{
- sqlite3ExprListDelete(pParse->db, pList);
- }
-}
-
-/*
** When building up a FROM clause in the parser, the join operator
** is initially attached to the left operand. But the code generator
** expects the join operator to be on the right operand. This routine
@@ -96668,10 +87944,11 @@ SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *
SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList *p){
if( p ){
int i;
+ assert( p->a || p->nSrc==0 );
for(i=p->nSrc-1; i>0; i--){
- p->a[i].fg.jointype = p->a[i-1].fg.jointype;
+ p->a[i].jointype = p->a[i-1].jointype;
}
- p->a[0].fg.jointype = 0;
+ p->a[0].jointype = 0;
}
}
@@ -96789,24 +88066,59 @@ SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){
}
/*
-** Record the fact that the schema cookie will need to be verified
-** for database iDb. The code to actually verify the schema cookie
-** will occur at the end of the top-level VDBE and will be generated
-** later, by sqlite3FinishCoding().
+** Generate VDBE code that will verify the schema cookie and start
+** a read-transaction for all named database files.
+**
+** It is important that all schema cookies be verified and all
+** read transactions be started before anything else happens in
+** the VDBE program. But this routine can be called after much other
+** code has been generated. So here is what we do:
+**
+** The first time this routine is called, we code an OP_Goto that
+** will jump to a subroutine at the end of the program. Then we
+** record every database that needs its schema verified in the
+** pParse->cookieMask field. Later, after all other code has been
+** generated, the subroutine that does the cookie verifications and
+** starts the transactions will be coded and the OP_Goto P2 value
+** will be made to point to that subroutine. The generation of the
+** cookie verification subroutine code happens in sqlite3FinishCoding().
+**
+** If iDb<0 then code the OP_Goto only - don't set flag to verify the
+** schema on any databases. This can be used to position the OP_Goto
+** early in the code, before we know if any database tables will be used.
*/
SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
- sqlite3 *db = pToplevel->db;
- assert( iDb>=0 && iDb<db->nDb );
- assert( db->aDb[iDb].pBt!=0 || iDb==1 );
- assert( iDb<SQLITE_MAX_ATTACHED+2 );
- assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
- if( DbMaskTest(pToplevel->cookieMask, iDb)==0 ){
- DbMaskSet(pToplevel->cookieMask, iDb);
- pToplevel->cookieValue[iDb] = db->aDb[iDb].pSchema->schema_cookie;
- if( !OMIT_TEMPDB && iDb==1 ){
- sqlite3OpenTempDatabase(pToplevel);
+#ifndef SQLITE_OMIT_TRIGGER
+ if( pToplevel!=pParse ){
+ /* This branch is taken if a trigger is currently being coded. In this
+ ** case, set cookieGoto to a non-zero value to show that this function
+ ** has been called. This is used by the sqlite3ExprCodeConstants()
+ ** function. */
+ pParse->cookieGoto = -1;
+ }
+#endif
+ if( pToplevel->cookieGoto==0 ){
+ Vdbe *v = sqlite3GetVdbe(pToplevel);
+ if( v==0 ) return; /* This only happens if there was a prior error */
+ pToplevel->cookieGoto = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0)+1;
+ }
+ if( iDb>=0 ){
+ sqlite3 *db = pToplevel->db;
+ yDbMask mask;
+
+ assert( iDb<db->nDb );
+ assert( db->aDb[iDb].pBt!=0 || iDb==1 );
+ assert( iDb<SQLITE_MAX_ATTACHED+2 );
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ mask = ((yDbMask)1)<<iDb;
+ if( (pToplevel->cookieMask & mask)==0 ){
+ pToplevel->cookieMask |= mask;
+ pToplevel->cookieValue[iDb] = db->aDb[iDb].pSchema->schema_cookie;
+ if( !OMIT_TEMPDB && iDb==1 ){
+ sqlite3OpenTempDatabase(pToplevel);
+ }
}
}
}
@@ -96842,7 +88154,7 @@ SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb)
SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
sqlite3CodeVerifySchema(pParse, iDb);
- DbMaskSet(pToplevel->writeMask, iDb);
+ pToplevel->writeMask |= ((yDbMask)1)<<iDb;
pToplevel->isMultiWrite |= setStatement;
}
@@ -96914,22 +88226,18 @@ SQLITE_PRIVATE void sqlite3UniqueConstraint(
StrAccum errMsg;
Table *pTab = pIdx->pTable;
- sqlite3StrAccumInit(&errMsg, pParse->db, 0, 0, 200);
- if( pIdx->aColExpr ){
- sqlite3XPrintf(&errMsg, 0, "index '%q'", pIdx->zName);
- }else{
- for(j=0; j<pIdx->nKeyCol; j++){
- char *zCol;
- assert( pIdx->aiColumn[j]>=0 );
- zCol = pTab->aCol[pIdx->aiColumn[j]].zName;
- if( j ) sqlite3StrAccumAppend(&errMsg, ", ", 2);
- sqlite3XPrintf(&errMsg, 0, "%s.%s", pTab->zName, zCol);
- }
+ sqlite3StrAccumInit(&errMsg, 0, 0, 200);
+ errMsg.db = pParse->db;
+ for(j=0; j<pIdx->nKeyCol; j++){
+ char *zCol = pTab->aCol[pIdx->aiColumn[j]].zName;
+ if( j ) sqlite3StrAccumAppend(&errMsg, ", ", 2);
+ sqlite3StrAccumAppend(&errMsg, pTab->zName, -1);
+ sqlite3StrAccumAppend(&errMsg, ".", 1);
+ sqlite3StrAccumAppend(&errMsg, zCol, -1);
}
zErr = sqlite3StrAccumFinish(&errMsg);
sqlite3HaltConstraint(pParse,
- IsPrimaryKeyIndex(pIdx) ? SQLITE_CONSTRAINT_PRIMARYKEY
- : SQLITE_CONSTRAINT_UNIQUE,
+ (pIdx->autoIndex==2)?SQLITE_CONSTRAINT_PRIMARYKEY:SQLITE_CONSTRAINT_UNIQUE,
onError, zErr, P4_DYNAMIC, P5_ConstraintUnique);
}
@@ -97096,102 +88404,40 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
** when it has finished using it.
*/
SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){
- int i;
- int nCol = pIdx->nColumn;
- int nKey = pIdx->nKeyCol;
- KeyInfo *pKey;
if( pParse->nErr ) return 0;
- if( pIdx->uniqNotNull ){
- pKey = sqlite3KeyInfoAlloc(pParse->db, nKey, nCol-nKey);
- }else{
- pKey = sqlite3KeyInfoAlloc(pParse->db, nCol, 0);
- }
- if( pKey ){
- assert( sqlite3KeyInfoIsWriteable(pKey) );
- for(i=0; i<nCol; i++){
- char *zColl = pIdx->azColl[i];
- assert( zColl!=0 );
- pKey->aColl[i] = strcmp(zColl,"BINARY")==0 ? 0 :
- sqlite3LocateCollSeq(pParse, zColl);
- pKey->aSortOrder[i] = pIdx->aSortOrder[i];
- }
- if( pParse->nErr ){
- sqlite3KeyInfoUnref(pKey);
- pKey = 0;
- }
+#ifndef SQLITE_OMIT_SHARED_CACHE
+ if( pIdx->pKeyInfo && pIdx->pKeyInfo->db!=pParse->db ){
+ sqlite3KeyInfoUnref(pIdx->pKeyInfo);
+ pIdx->pKeyInfo = 0;
}
- return pKey;
-}
-
-#ifndef SQLITE_OMIT_CTE
-/*
-** This routine is invoked once per CTE by the parser while parsing a
-** WITH clause.
-*/
-SQLITE_PRIVATE With *sqlite3WithAdd(
- Parse *pParse, /* Parsing context */
- With *pWith, /* Existing WITH clause, or NULL */
- Token *pName, /* Name of the common-table */
- ExprList *pArglist, /* Optional column name list for the table */
- Select *pQuery /* Query used to initialize the table */
-){
- sqlite3 *db = pParse->db;
- With *pNew;
- char *zName;
-
- /* Check that the CTE name is unique within this WITH clause. If
- ** not, store an error in the Parse structure. */
- zName = sqlite3NameFromToken(pParse->db, pName);
- if( zName && pWith ){
+#endif
+ if( pIdx->pKeyInfo==0 ){
int i;
- for(i=0; i<pWith->nCte; i++){
- if( sqlite3StrICmp(zName, pWith->a[i].zName)==0 ){
- sqlite3ErrorMsg(pParse, "duplicate WITH table name: %s", zName);
- }
+ int nCol = pIdx->nColumn;
+ int nKey = pIdx->nKeyCol;
+ KeyInfo *pKey;
+ if( pIdx->uniqNotNull ){
+ pKey = sqlite3KeyInfoAlloc(pParse->db, nKey, nCol-nKey);
+ }else{
+ pKey = sqlite3KeyInfoAlloc(pParse->db, nCol, 0);
}
- }
-
- if( pWith ){
- int nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte);
- pNew = sqlite3DbRealloc(db, pWith, nByte);
- }else{
- pNew = sqlite3DbMallocZero(db, sizeof(*pWith));
- }
- assert( zName!=0 || pNew==0 );
- assert( db->mallocFailed==0 || pNew==0 );
-
- if( pNew==0 ){
- sqlite3ExprListDelete(db, pArglist);
- sqlite3SelectDelete(db, pQuery);
- sqlite3DbFree(db, zName);
- pNew = pWith;
- }else{
- pNew->a[pNew->nCte].pSelect = pQuery;
- pNew->a[pNew->nCte].pCols = pArglist;
- pNew->a[pNew->nCte].zName = zName;
- pNew->a[pNew->nCte].zCteErr = 0;
- pNew->nCte++;
- }
-
- return pNew;
-}
-
-/*
-** Free the contents of the With object passed as the second argument.
-*/
-SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){
- if( pWith ){
- int i;
- for(i=0; i<pWith->nCte; i++){
- struct Cte *pCte = &pWith->a[i];
- sqlite3ExprListDelete(db, pCte->pCols);
- sqlite3SelectDelete(db, pCte->pSelect);
- sqlite3DbFree(db, pCte->zName);
+ if( pKey ){
+ assert( sqlite3KeyInfoIsWriteable(pKey) );
+ for(i=0; i<nCol; i++){
+ char *zColl = pIdx->azColl[i];
+ if( NEVER(zColl==0) ) zColl = "BINARY";
+ pKey->aColl[i] = sqlite3LocateCollSeq(pParse, zColl);
+ pKey->aSortOrder[i] = pIdx->aSortOrder[i];
+ }
+ if( pParse->nErr ){
+ sqlite3KeyInfoUnref(pKey);
+ }else{
+ pIdx->pKeyInfo = pKey;
+ }
}
- sqlite3DbFree(db, pWith);
}
+ return sqlite3KeyInfoRef(pIdx->pKeyInfo);
}
-#endif /* !defined(SQLITE_OMIT_CTE) */
/************** End of build.c ***********************************************/
/************** Begin file callback.c ****************************************/
@@ -97211,7 +88457,6 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){
** of user defined functions and collation sequences.
*/
-/* #include "sqliteInt.h" */
/*
** Invoke the 'collation needed' callback to request a collation sequence
@@ -97339,7 +88584,7 @@ SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){
**
** Each pointer stored in the sqlite3.aCollSeq hash table contains an
** array of three CollSeq structures. The first is the collation sequence
-** preferred for UTF-8, the second UTF-16le, and the third UTF-16be.
+** prefferred for UTF-8, the second UTF-16le, and the third UTF-16be.
**
** Stored immediately after the three collation sequences is a copy of
** the collation sequence name. A pointer to this string is stored in
@@ -97351,11 +88596,11 @@ static CollSeq *findCollSeqEntry(
int create /* Create a new entry if true */
){
CollSeq *pColl;
- pColl = sqlite3HashFind(&db->aCollSeq, zName);
+ int nName = sqlite3Strlen30(zName);
+ pColl = sqlite3HashFind(&db->aCollSeq, zName, nName);
if( 0==pColl && create ){
- int nName = sqlite3Strlen30(zName);
- pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1);
+ pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1 );
if( pColl ){
CollSeq *pDel = 0;
pColl[0].zName = (char*)&pColl[3];
@@ -97366,7 +88611,7 @@ static CollSeq *findCollSeqEntry(
pColl[2].enc = SQLITE_UTF16BE;
memcpy(pColl[0].zName, zName, nName);
pColl[0].zName[nName] = 0;
- pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, pColl);
+ pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, nName, pColl);
/* If a malloc() failure occurred in sqlite3HashInsert(), it will
** return the pColl pointer to be deleted (because it wasn't added
@@ -97554,6 +88799,7 @@ SQLITE_PRIVATE FuncDef *sqlite3FindFunction(
assert( nArg>=(-2) );
assert( nArg>=(-1) || createFlag==0 );
+ assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE );
h = (sqlite3UpperToLower[(u8)zName[0]] + nName) % ArraySize(db->aFunc.a);
/* First search for a match amongst the application-defined functions.
@@ -97644,9 +88890,9 @@ SQLITE_PRIVATE void sqlite3SchemaClear(void *p){
sqlite3HashClear(&temp1);
sqlite3HashClear(&pSchema->fkeyHash);
pSchema->pSeqTab = 0;
- if( pSchema->schemaFlags & DB_SchemaLoaded ){
+ if( pSchema->flags & DB_SchemaLoaded ){
pSchema->iGeneration++;
- pSchema->schemaFlags &= ~DB_SchemaLoaded;
+ pSchema->flags &= ~DB_SchemaLoaded;
}
}
@@ -97689,7 +88935,6 @@ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){
** This file contains C code routines that are called by the parser
** in order to generate code for DELETE FROM statements.
*/
-/* #include "sqliteInt.h" */
/*
** While a SrcList can in general represent multiple tables and subqueries
@@ -97767,15 +89012,17 @@ SQLITE_PRIVATE void sqlite3MaterializeView(
Parse *pParse, /* Parsing context */
Table *pView, /* View definition */
Expr *pWhere, /* Optional WHERE clause to be added */
- int iCur /* Cursor number for ephemeral table */
+ int iCur /* Cursor number for ephemerial table */
){
SelectDest dest;
Select *pSel;
SrcList *pFrom;
sqlite3 *db = pParse->db;
int iDb = sqlite3SchemaToIndex(db, pView->pSchema);
+
pWhere = sqlite3ExprDup(db, pWhere, 0);
pFrom = sqlite3SrcListAppend(db, 0, 0, 0);
+
if( pFrom ){
assert( pFrom->nSrc==1 );
pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName);
@@ -97783,7 +89030,10 @@ SQLITE_PRIVATE void sqlite3MaterializeView(
assert( pFrom->a[0].pOn==0 );
assert( pFrom->a[0].pUsing==0 );
}
+
pSel = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, 0, 0, 0, 0);
+ if( pSel ) pSel->selFlags |= SF_Materialize;
+
sqlite3SelectDestInit(&dest, SRT_EphemTab, iCur);
sqlite3Select(pParse, pSel, &dest);
sqlite3SelectDelete(db, pSel);
@@ -97866,7 +89116,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
pInClause->x.pSelect = pSelect;
pInClause->flags |= EP_xIsSelect;
- sqlite3ExprSetHeightAndFlags(pParse, pInClause);
+ sqlite3ExprSetHeight(pParse, pInClause);
return pInClause;
/* something went wrong. clean up anything allocated. */
@@ -97903,8 +89153,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
WhereInfo *pWInfo; /* Information about the WHERE clause */
Index *pIdx; /* For looping over indices of the table */
int iTabCur; /* Cursor number for the table */
- int iDataCur = 0; /* VDBE cursor for the canonical data source */
- int iIdxCur = 0; /* Cursor number of the first index */
+ int iDataCur; /* VDBE cursor for the canonical data source */
+ int iIdxCur; /* Cursor number of the first index */
int nIdx; /* Number of indices */
sqlite3 *db; /* Main database structure */
AuthContext sContext; /* Authorization context */
@@ -97912,7 +89162,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
int iDb; /* Database number */
int memCnt = -1; /* Memory cell used for change counting */
int rcauth; /* Value returned by authorization callback */
- int eOnePass; /* ONEPASS_OFF or _SINGLE or _MULTI */
+ int okOnePass; /* True for one-pass algorithm without the FIFO */
int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */
u8 *aToOpen = 0; /* Open cursor iTabCur+j if aToOpen[j] is true */
Index *pPk; /* The PRIMARY KEY index on the table */
@@ -97924,12 +89174,12 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
int iRowSet = 0; /* Register for rowset of rows to delete */
int addrBypass = 0; /* Address of jump over the delete logic */
int addrLoop = 0; /* Top of the delete loop */
- int addrEphOpen = 0; /* Instruction to open the Ephemeral table */
+ int addrDelete = 0; /* Jump directly to the delete logic */
+ int addrEphOpen = 0; /* Instruction to open the Ephermeral table */
#ifndef SQLITE_OMIT_TRIGGER
int isView; /* True if attempting to delete from a view */
Trigger *pTrigger; /* List of table triggers, if required */
- int bComplex; /* True if there are either triggers or FKs */
#endif
memset(&sContext, 0, sizeof(sContext));
@@ -97953,11 +89203,9 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
#ifndef SQLITE_OMIT_TRIGGER
pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0);
isView = pTab->pSelect!=0;
- bComplex = pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0);
#else
# define pTrigger 0
# define isView 0
-# define bComplex 0
#endif
#ifdef SQLITE_OMIT_VIEW
# undef isView
@@ -98007,7 +89255,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
sqlite3BeginWriteOperation(pParse, 1, iDb);
/* If we are trying to delete from a view, realize that view into
- ** an ephemeral table.
+ ** a ephemeral table.
*/
#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
if( isView ){
@@ -98038,10 +89286,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
** It is easier just to erase the whole table. Prior to version 3.6.5,
** this optimization caused the row change count (the value returned by
** API function sqlite3_count_changes) to be set incorrectly. */
- if( rcauth==SQLITE_OK
- && pWhere==0
- && !bComplex
- && !IsVirtual(pTab)
+ if( rcauth==SQLITE_OK && pWhere==0 && !pTrigger && !IsVirtual(pTab)
+ && 0==sqlite3FkRequired(pParse, pTab, 0, 0)
){
assert( !isView );
sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName);
@@ -98056,8 +89302,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
}else
#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */
{
- u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK;
- wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW);
if( HasRowid(pTab) ){
/* For a rowid table, initialize the RowSet to an empty set */
pPk = 0;
@@ -98065,7 +89309,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
iRowSet = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet);
}else{
- /* For a WITHOUT ROWID table, create an ephemeral table used to
+ /* For a WITHOUT ROWID table, create an ephermeral table used to
** hold all primary keys for rows to be deleted. */
pPk = sqlite3PrimaryKeyIndex(pTab);
assert( pPk!=0 );
@@ -98078,18 +89322,13 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
}
/* Construct a query to find the rowid or primary key for every row
- ** to be deleted, based on the WHERE clause. Set variable eOnePass
- ** to indicate the strategy used to implement this delete:
- **
- ** ONEPASS_OFF: Two-pass approach - use a FIFO for rowids/PK values.
- ** ONEPASS_SINGLE: One-pass approach - at most one row deleted.
- ** ONEPASS_MULTI: One-pass approach - any number of rows may be deleted.
+ ** to be deleted, based on the WHERE clause.
*/
- pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, wcf, iTabCur+1);
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0,
+ WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK,
+ iTabCur+1);
if( pWInfo==0 ) goto delete_from_cleanup;
- eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
- assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI );
- assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF );
+ okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
/* Keep track of the number of rows to be deleted */
if( db->flags & SQLITE_CountRows ){
@@ -98099,7 +89338,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
/* Extract the rowid or primary key for the current row */
if( pPk ){
for(i=0; i<nPk; i++){
- assert( pPk->aiColumn[i]>=0 );
sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur,
pPk->aiColumn[i], iPk+i);
}
@@ -98110,10 +89348,11 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( iKey>pParse->nMem ) pParse->nMem = iKey;
}
- if( eOnePass!=ONEPASS_OFF ){
- /* For ONEPASS, no need to store the rowid/primary-key. There is only
+ if( okOnePass ){
+ /* For ONEPASS, no need to store the rowid/primary-key. There is only
** one, so just keep it in its register(s) and fall through to the
- ** delete code. */
+ ** delete code.
+ */
nKey = nPk; /* OP_Found will use an unpacked key */
aToOpen = sqlite3DbMallocRaw(db, nIdx+2);
if( aToOpen==0 ){
@@ -98125,27 +89364,27 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iTabCur] = 0;
if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iTabCur] = 0;
if( addrEphOpen ) sqlite3VdbeChangeToNoop(v, addrEphOpen);
+ addrDelete = sqlite3VdbeAddOp0(v, OP_Goto); /* Jump to DELETE logic */
+ }else if( pPk ){
+ /* Construct a composite key for the row to be deleted and remember it */
+ iKey = ++pParse->nMem;
+ nKey = 0; /* Zero tells OP_Found to use a composite key */
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey,
+ sqlite3IndexAffinityStr(v, pPk), P4_TRANSIENT);
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, iEphCur, iKey);
}else{
- if( pPk ){
- /* Add the PK key for this row to the temporary table */
- iKey = ++pParse->nMem;
- nKey = 0; /* Zero tells OP_Found to use a composite key */
- sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey,
- sqlite3IndexAffinityStr(pParse->db, pPk), nPk);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iEphCur, iKey);
- }else{
- /* Add the rowid of the row to be deleted to the RowSet */
- nKey = 1; /* OP_Seek always uses a single rowid */
- sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey);
- }
+ /* Get the rowid of the row to be deleted and remember it in the RowSet */
+ nKey = 1; /* OP_Seek always uses a single rowid */
+ sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey);
}
- /* If this DELETE cannot use the ONEPASS strategy, this is the
- ** end of the WHERE loop */
- if( eOnePass!=ONEPASS_OFF ){
+ /* End of the WHERE loop */
+ sqlite3WhereEnd(pWInfo);
+ if( okOnePass ){
+ /* Bypass the delete logic below if the WHERE loop found zero rows */
addrBypass = sqlite3VdbeMakeLabel(v);
- }else{
- sqlite3WhereEnd(pWInfo);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrBypass);
+ sqlite3VdbeJumpHere(v, addrDelete);
}
/* Unless this is a view, open cursors for the table we are
@@ -98154,35 +89393,28 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
** triggers.
*/
if( !isView ){
- int iAddrOnce = 0;
- if( eOnePass==ONEPASS_MULTI ){
- iAddrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v);
- }
- testcase( IsVirtual(pTab) );
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, iTabCur, aToOpen,
&iDataCur, &iIdxCur);
- assert( pPk || IsVirtual(pTab) || iDataCur==iTabCur );
- assert( pPk || IsVirtual(pTab) || iIdxCur==iDataCur+1 );
- if( eOnePass==ONEPASS_MULTI ) sqlite3VdbeJumpHere(v, iAddrOnce);
+ assert( pPk || iDataCur==iTabCur );
+ assert( pPk || iIdxCur==iDataCur+1 );
}
/* Set up a loop over the rowids/primary-keys that were found in the
** where-clause loop above.
*/
- if( eOnePass!=ONEPASS_OFF ){
- assert( nKey==nPk ); /* OP_Found will use an unpacked key */
- if( !IsVirtual(pTab) && aToOpen[iDataCur-iTabCur] ){
- assert( pPk!=0 || pTab->pSelect!=0 );
+ if( okOnePass ){
+ /* Just one row. Hence the top-of-loop is a no-op */
+ assert( nKey==nPk ); /* OP_Found will use an unpacked key */
+ if( aToOpen[iDataCur-iTabCur] ){
+ assert( pPk!=0 );
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, addrBypass, iKey, nKey);
- VdbeCoverage(v);
}
}else if( pPk ){
- addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v);
+ addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur);
sqlite3VdbeAddOp2(v, OP_RowKey, iEphCur, iKey);
assert( nKey==0 ); /* OP_Found will use a composite key */
}else{
addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey);
- VdbeCoverage(v);
assert( nKey==1 );
}
@@ -98193,32 +89425,23 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
sqlite3VtabMakeWritable(pParse, pTab);
sqlite3VdbeAddOp4(v, OP_VUpdate, 0, 1, iKey, pVTab, P4_VTAB);
sqlite3VdbeChangeP5(v, OE_Abort);
- assert( eOnePass==ONEPASS_OFF || eOnePass==ONEPASS_SINGLE );
sqlite3MayAbort(pParse);
- if( eOnePass==ONEPASS_SINGLE && sqlite3IsToplevel(pParse) ){
- pParse->isMultiWrite = 0;
- }
}else
#endif
{
int count = (pParse->nested==0); /* True to count changes */
- int iIdxNoSeek = -1;
- if( bComplex==0 && aiCurOnePass[1]!=iDataCur ){
- iIdxNoSeek = aiCurOnePass[1];
- }
sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
- iKey, nKey, count, OE_Default, eOnePass, iIdxNoSeek);
+ iKey, nKey, count, OE_Default, okOnePass);
}
/* End of the loop over all rowids/primary-keys. */
- if( eOnePass!=ONEPASS_OFF ){
+ if( okOnePass ){
sqlite3VdbeResolveLabel(v, addrBypass);
- sqlite3WhereEnd(pWInfo);
}else if( pPk ){
- sqlite3VdbeAddOp2(v, OP_Next, iEphCur, addrLoop+1); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iEphCur, addrLoop+1);
sqlite3VdbeJumpHere(v, addrLoop);
}else{
- sqlite3VdbeGoto(v, addrLoop);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrLoop);
sqlite3VdbeJumpHere(v, addrLoop);
}
@@ -98257,7 +89480,7 @@ delete_from_cleanup:
return;
}
/* Make sure "isView" and other macros defined above are undefined. Otherwise
-** they may interfere with compilation of other functions in this file
+** thely may interfere with compilation of other functions in this file
** (or in another file, if this file becomes part of the amalgamation). */
#ifdef isView
#undef isView
@@ -98285,25 +89508,6 @@ delete_from_cleanup:
** sequence of nPk memory cells starting at iPk. If nPk==0 that means
** that a search record formed from OP_MakeRecord is contained in the
** single memory location iPk.
-**
-** eMode:
-** Parameter eMode may be passed either ONEPASS_OFF (0), ONEPASS_SINGLE, or
-** ONEPASS_MULTI. If eMode is not ONEPASS_OFF, then the cursor
-** iDataCur already points to the row to delete. If eMode is ONEPASS_OFF
-** then this function must seek iDataCur to the entry identified by iPk
-** and nPk before reading from it.
-**
-** If eMode is ONEPASS_MULTI, then this call is being made as part
-** of a ONEPASS delete that affects multiple rows. In this case, if
-** iIdxNoSeek is a valid cursor number (>=0), then its position should
-** be preserved following the delete operation. Or, if iIdxNoSeek is not
-** a valid cursor number, the position of iDataCur should be preserved
-** instead.
-**
-** iIdxNoSeek:
-** If iIdxNoSeek is a valid cursor number (>=0), then it identifies an
-** index cursor (from within array of cursors starting at iIdxCur) that
-** already points to the index entry to be deleted.
*/
SQLITE_PRIVATE void sqlite3GenerateRowDelete(
Parse *pParse, /* Parsing context */
@@ -98315,8 +89519,7 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
i16 nPk, /* Number of PRIMARY KEY memory cells */
u8 count, /* If non-zero, increment the row change counter */
u8 onconf, /* Default ON CONFLICT policy for triggers */
- u8 eMode, /* ONEPASS_OFF, _SINGLE, or _MULTI. See above */
- int iIdxNoSeek /* Cursor number of cursor that does not need seeking */
+ u8 bNoSeek /* iDataCur is already pointing to the row to delete */
){
Vdbe *v = pParse->pVdbe; /* Vdbe */
int iOld = 0; /* First register in OLD.* array */
@@ -98333,11 +89536,7 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
** not attempt to delete it or fire any DELETE triggers. */
iLabel = sqlite3VdbeMakeLabel(v);
opSeek = HasRowid(pTab) ? OP_NotExists : OP_NotFound;
- if( eMode==ONEPASS_OFF ){
- sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk);
- VdbeCoverageIf(v, opSeek==OP_NotExists);
- VdbeCoverageIf(v, opSeek==OP_NotFound);
- }
+ if( !bNoSeek ) sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk);
/* If there are any triggers to fire, allocate a range of registers to
** use for the old.* references in the triggers. */
@@ -98359,9 +89558,7 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
** used by any BEFORE and AFTER triggers that exist. */
sqlite3VdbeAddOp2(v, OP_Copy, iPk, iOld);
for(iCol=0; iCol<pTab->nCol; iCol++){
- testcase( mask!=0xffffffff && iCol==31 );
- testcase( mask!=0xffffffff && iCol==32 );
- if( mask==0xffffffff || (iCol<=31 && (mask & MASKBIT32(iCol))!=0) ){
+ if( mask==0xffffffff || mask&(1<<iCol) ){
sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, iCol, iOld+iCol+1);
}
}
@@ -98379,8 +89576,6 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
*/
if( addrStart<sqlite3VdbeCurrentAddr(v) ){
sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk);
- VdbeCoverageIf(v, opSeek==OP_NotExists);
- VdbeCoverageIf(v, opSeek==OP_NotFound);
}
/* Do FK processing. This call checks that any FK constraints that
@@ -98393,15 +89588,11 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
** a view (in which case the only effect of the DELETE statement is to
** fire the INSTEAD OF triggers). */
if( pTab->pSelect==0 ){
- sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek);
+ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, 0);
sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0));
if( count ){
sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT);
}
- if( iIdxNoSeek>=0 ){
- sqlite3VdbeAddOp1(v, OP_Delete, iIdxNoSeek);
- }
- sqlite3VdbeChangeP5(v, eMode==ONEPASS_MULTI);
}
/* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to
@@ -98444,14 +89635,12 @@ SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(
Table *pTab, /* Table containing the row to be deleted */
int iDataCur, /* Cursor of table holding data. */
int iIdxCur, /* First index cursor */
- int *aRegIdx, /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */
- int iIdxNoSeek /* Do not delete from this cursor */
+ int *aRegIdx /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */
){
int i; /* Index loop counter */
- int r1 = -1; /* Register holding an index key */
+ int r1; /* Register holding an index key */
int iPartIdxLabel; /* Jump destination for skipping partial index entries */
Index *pIdx; /* Current index */
- Index *pPrior = 0; /* Prior index */
Vdbe *v; /* The prepared statement under construction */
Index *pPk; /* PRIMARY KEY index, or NULL for rowid tables */
@@ -98461,14 +89650,11 @@ SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(
assert( iIdxCur+i!=iDataCur || pPk==pIdx );
if( aRegIdx!=0 && aRegIdx[i]==0 ) continue;
if( pIdx==pPk ) continue;
- if( iIdxCur+i==iIdxNoSeek ) continue;
VdbeModuleComment((v, "GenRowIdxDel for %s", pIdx->zName));
- r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 1,
- &iPartIdxLabel, pPrior, r1);
+ r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 1, &iPartIdxLabel);
sqlite3VdbeAddOp3(v, OP_IdxDelete, iIdxCur+i, r1,
- pIdx->uniqNotNull ? pIdx->nKeyCol : pIdx->nColumn);
- sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel);
- pPrior = pIdx;
+ pIdx->uniqNotNull ? pIdx->nKeyCol : pIdx->nColumn);
+ sqlite3VdbeResolveLabel(v, iPartIdxLabel);
}
}
@@ -98486,22 +89672,10 @@ SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(
**
** If *piPartIdxLabel is not NULL, fill it in with a label and jump
** to that label if pIdx is a partial index that should be skipped.
-** The label should be resolved using sqlite3ResolvePartIdxLabel().
** A partial index should be skipped if its WHERE clause evaluates
** to false or null. If pIdx is not a partial index, *piPartIdxLabel
** will be set to zero which is an empty label that is ignored by
-** sqlite3ResolvePartIdxLabel().
-**
-** The pPrior and regPrior parameters are used to implement a cache to
-** avoid unnecessary register loads. If pPrior is not NULL, then it is
-** a pointer to a different index for which an index key has just been
-** computed into register regPrior. If the current pIdx index is generating
-** its key into the same sequence of registers and if pPrior and pIdx share
-** a column in common, then the register corresponding to that column already
-** holds the correct value and the loading of that register is skipped.
-** This optimization is helpful when doing a DELETE or an INTEGRITY_CHECK
-** on a table with multiple indices, and especially with the ROWID or
-** PRIMARY KEY columns of the index.
+** sqlite3VdbeResolveLabel().
*/
SQLITE_PRIVATE int sqlite3GenerateIndexKey(
Parse *pParse, /* Parsing context */
@@ -98509,65 +89683,54 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey(
int iDataCur, /* Cursor number from which to take column data */
int regOut, /* Put the new key into this register if not 0 */
int prefixOnly, /* Compute only a unique prefix of the key */
- int *piPartIdxLabel, /* OUT: Jump to this label to skip partial index */
- Index *pPrior, /* Previously generated index key */
- int regPrior /* Register holding previous generated key */
+ int *piPartIdxLabel /* OUT: Jump to this label to skip partial index */
){
Vdbe *v = pParse->pVdbe;
int j;
+ Table *pTab = pIdx->pTable;
int regBase;
int nCol;
+ Index *pPk;
if( piPartIdxLabel ){
if( pIdx->pPartIdxWhere ){
*piPartIdxLabel = sqlite3VdbeMakeLabel(v);
- pParse->iSelfTab = iDataCur;
- sqlite3ExprCachePush(pParse);
- sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel,
- SQLITE_JUMPIFNULL);
+ pParse->iPartIdxTab = iDataCur;
+ sqlite3ExprIfFalse(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel,
+ SQLITE_JUMPIFNULL);
}else{
*piPartIdxLabel = 0;
}
}
nCol = (prefixOnly && pIdx->uniqNotNull) ? pIdx->nKeyCol : pIdx->nColumn;
regBase = sqlite3GetTempRange(pParse, nCol);
- if( pPrior && (regBase!=regPrior || pPrior->pPartIdxWhere) ) pPrior = 0;
+ pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
for(j=0; j<nCol; j++){
- if( pPrior
- && pPrior->aiColumn[j]==pIdx->aiColumn[j]
- && pPrior->aiColumn[j]!=XN_EXPR
- ){
- /* This column was already computed by the previous index */
- continue;
+ i16 idx = pIdx->aiColumn[j];
+ if( pPk ) idx = sqlite3ColumnOfIndex(pPk, idx);
+ if( idx<0 || idx==pTab->iPKey ){
+ sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regBase+j);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_Column, iDataCur, idx, regBase+j);
+ sqlite3ColumnDefault(v, pTab, pIdx->aiColumn[j], -1);
}
- sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iDataCur, j, regBase+j);
- /* If the column affinity is REAL but the number is an integer, then it
- ** might be stored in the table as an integer (using a compact
- ** representation) then converted to REAL by an OP_RealAffinity opcode.
- ** But we are getting ready to store this value back into an index, where
- ** it should be converted by to INTEGER again. So omit the OP_RealAffinity
- ** opcode if it is present */
- sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity);
}
if( regOut ){
+ const char *zAff;
+ if( pTab->pSelect
+ || OptimizationDisabled(pParse->db, SQLITE_IdxRealAsInt)
+ ){
+ zAff = 0;
+ }else{
+ zAff = sqlite3IndexAffinityStr(v, pIdx);
+ }
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut);
+ sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT);
}
sqlite3ReleaseTempRange(pParse, regBase, nCol);
return regBase;
}
-/*
-** If a prior call to sqlite3GenerateIndexKey() generated a jump-over label
-** because it was a partial index, then this routine should be called to
-** resolve that label.
-*/
-SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){
- if( iLabel ){
- sqlite3VdbeResolveLabel(pParse->pVdbe, iLabel);
- sqlite3ExprCachePop(pParse);
- }
-}
-
/************** End of delete.c **********************************************/
/************** Begin file func.c ********************************************/
/*
@@ -98581,25 +89744,21 @@ SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file contains the C-language implementations for many of the SQL
-** functions of SQLite. (Some function, and in particular the date and
-** time functions, are implemented separately.)
+** This file contains the C functions that implement various SQL
+** functions of SQLite.
+**
+** There is only one exported symbol in this file - the function
+** sqliteRegisterBuildinFunctions() found at the bottom of the file.
+** All other code has file scope.
*/
-/* #include "sqliteInt.h" */
/* #include <stdlib.h> */
/* #include <assert.h> */
-/* #include "vdbeInt.h" */
/*
** Return the collating function associated with a function.
*/
static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){
- VdbeOp *pOp;
- assert( context->pVdbe!=0 );
- pOp = &context->pVdbe->aOp[context->iOp-1];
- assert( pOp->opcode==OP_CollSeq );
- assert( pOp->p4type==P4_COLLSEQ );
- return pOp->p4.pColl;
+ return context->pColl;
}
/*
@@ -98711,7 +89870,7 @@ static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
case SQLITE_INTEGER: {
i64 iVal = sqlite3_value_int64(argv[0]);
if( iVal<0 ){
- if( iVal==SMALLEST_INT64 ){
+ if( (iVal<<1)==0 ){
/* IMP: R-31676-45509 If X is the integer -9223372036854775808
** then abs(X) throws an integer overflow error since there is no
** equivalent positive 64-bit two complement value. */
@@ -98731,8 +89890,8 @@ static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
default: {
/* Because sqlite3_value_double() returns 0.0 if the argument is not
** something that can be converted into a number, we have:
- ** IMP: R-01992-00519 Abs(X) returns 0.0 if X is a string or blob
- ** that cannot be converted to a numeric value.
+ ** IMP: R-57326-31541 Abs(X) return 0.0 if X is a string or blob that
+ ** cannot be converted to a numeric value.
*/
double rVal = sqlite3_value_double(argv[0]);
if( rVal<0 ) rVal = -rVal;
@@ -98793,32 +89952,6 @@ static void instrFunc(
}
/*
-** Implementation of the printf() function.
-*/
-static void printfFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- PrintfArguments x;
- StrAccum str;
- const char *zFormat;
- int n;
- sqlite3 *db = sqlite3_context_db_handle(context);
-
- if( argc>=1 && (zFormat = (const char*)sqlite3_value_text(argv[0]))!=0 ){
- x.nArg = argc-1;
- x.nUsed = 0;
- x.apArg = argv+1;
- sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]);
- sqlite3XPrintf(&str, SQLITE_PRINTF_SQLFUNC, zFormat, &x);
- n = str.nChar;
- sqlite3_result_text(context, sqlite3StrAccumFinish(&str), n,
- SQLITE_DYNAMIC);
- }
-}
-
-/*
** Implementation of the substr() function.
**
** substr(x,p1,p2) returns p2 characters of x[] beginning with p1.
@@ -98865,14 +89998,6 @@ static void substrFunc(
}
}
}
-#ifdef SQLITE_SUBSTR_COMPATIBILITY
- /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as
- ** as substr(X,1,N) - it returns the first N characters of X. This
- ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8]
- ** from 2009-02-02 for compatibility of applications that exploited the
- ** old buggy behavior. */
- if( p1==0 ) p1 = 1; /* <rdar://problem/6778339> */
-#endif
if( argc==3 ){
p2 = sqlite3_value_int(argv[2]);
if( p2<0 ){
@@ -98910,14 +90035,13 @@ static void substrFunc(
for(z2=z; *z2 && p2; p2--){
SQLITE_SKIP_UTF8(z2);
}
- sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT,
- SQLITE_UTF8);
+ sqlite3_result_text(context, (char*)z, (int)(z2-z), SQLITE_TRANSIENT);
}else{
if( p1+p2>len ){
p2 = len-p1;
if( p2<0 ) p2 = 0;
}
- sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT);
+ sqlite3_result_blob(context, (char*)&z[p1], (int)p2, SQLITE_TRANSIENT);
}
}
@@ -98960,7 +90084,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
#endif
/*
-** Allocate nByte bytes of space using sqlite3Malloc(). If the
+** Allocate nByte bytes of space using sqlite3_malloc(). If the
** allocation fails, call sqlite3_result_error_nomem() to notify
** the database handle that malloc() has failed and return NULL.
** If nByte is larger than the maximum string or blob length, then
@@ -98976,7 +90100,7 @@ static void *contextMalloc(sqlite3_context *context, i64 nByte){
sqlite3_result_error_toobig(context);
z = 0;
}else{
- z = sqlite3Malloc(nByte);
+ z = sqlite3Malloc((int)nByte);
if( !z ){
sqlite3_result_error_nomem(context);
}
@@ -99147,15 +90271,15 @@ struct compareInfo {
/*
** For LIKE and GLOB matching on EBCDIC machines, assume that every
-** character is exactly one byte in size. Also, provde the Utf8Read()
-** macro for fast reading of the next character in the common case where
-** the next character is ASCII.
+** character is exactly one byte in size. Also, all characters are
+** able to participate in upper-case-to-lower-case mappings in EBCDIC
+** whereas only characters less than 0x80 do in ASCII.
*/
#if defined(SQLITE_EBCDIC)
-# define sqlite3Utf8Read(A) (*((*A)++))
-# define Utf8Read(A) (*(A++))
+# define sqlite3Utf8Read(A) (*((*A)++))
+# define GlobUpperToLower(A) A = sqlite3UpperToLower[A]
#else
-# define Utf8Read(A) (A[0]<0x80?*(A++):sqlite3Utf8Read(&A))
+# define GlobUpperToLower(A) if( !((A)&~0x7f) ){ A = sqlite3UpperToLower[A]; }
#endif
static const struct compareInfo globInfo = { '*', '?', '[', 0 };
@@ -99168,7 +90292,7 @@ static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 };
/*
** Compare two UTF-8 strings for equality where the first string can
-** potentially be a "glob" or "like" expression. Return true (1) if they
+** potentially be a "glob" expression. Return true (1) if they
** are the same and false (0) if they are different.
**
** Globbing rules:
@@ -99188,18 +90312,11 @@ static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 };
** "[a-z]" matches any single lower-case letter. To match a '-', make
** it the last character in the list.
**
-** Like matching rules:
-**
-** '%' Matches any sequence of zero or more characters
-**
-*** '_' Matches any one character
-**
-** Ec Where E is the "esc" character and c is any other
-** character, including '%', '_', and esc, match exactly c.
+** This routine is usually quick, but can be N**2 in the worst case.
**
-** The comments within this routine usually assume glob matching.
+** Hints: to match '*' or '?', put them in "[]". Like this:
**
-** This routine is usually quick, but can be N**2 in the worst case.
+** abc[*]xyz Matches "abc*xyz" only
*/
static int patternCompare(
const u8 *zPattern, /* The glob pattern */
@@ -99207,123 +90324,104 @@ static int patternCompare(
const struct compareInfo *pInfo, /* Information about how to do the compare */
u32 esc /* The escape character */
){
- u32 c, c2; /* Next pattern and input string chars */
- u32 matchOne = pInfo->matchOne; /* "?" or "_" */
- u32 matchAll = pInfo->matchAll; /* "*" or "%" */
- u32 matchOther; /* "[" or the escape character */
- u8 noCase = pInfo->noCase; /* True if uppercase==lowercase */
- const u8 *zEscaped = 0; /* One past the last escaped input char */
-
- /* The GLOB operator does not have an ESCAPE clause. And LIKE does not
- ** have the matchSet operator. So we either have to look for one or
- ** the other, never both. Hence the single variable matchOther is used
- ** to store the one we have to look for.
- */
- matchOther = esc ? esc : pInfo->matchSet;
-
- while( (c = Utf8Read(zPattern))!=0 ){
- if( c==matchAll ){ /* Match "*" */
- /* Skip over multiple "*" characters in the pattern. If there
- ** are also "?" characters, skip those as well, but consume a
- ** single character of the input string for each "?" skipped */
- while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){
+ u32 c, c2;
+ int invert;
+ int seen;
+ u8 matchOne = pInfo->matchOne;
+ u8 matchAll = pInfo->matchAll;
+ u8 matchSet = pInfo->matchSet;
+ u8 noCase = pInfo->noCase;
+ int prevEscape = 0; /* True if the previous character was 'escape' */
+
+ while( (c = sqlite3Utf8Read(&zPattern))!=0 ){
+ if( c==matchAll && !prevEscape ){
+ while( (c=sqlite3Utf8Read(&zPattern)) == matchAll
+ || c == matchOne ){
if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){
return 0;
}
}
if( c==0 ){
- return 1; /* "*" at the end of the pattern matches */
- }else if( c==matchOther ){
- if( esc ){
- c = sqlite3Utf8Read(&zPattern);
- if( c==0 ) return 0;
- }else{
- /* "[...]" immediately follows the "*". We have to do a slow
- ** recursive search in this case, but it is an unusual case. */
- assert( matchOther<0x80 ); /* '[' is a single-byte character */
- while( *zString
- && patternCompare(&zPattern[-1],zString,pInfo,esc)==0 ){
- SQLITE_SKIP_UTF8(zString);
- }
- return *zString!=0;
+ return 1;
+ }else if( c==esc ){
+ c = sqlite3Utf8Read(&zPattern);
+ if( c==0 ){
+ return 0;
+ }
+ }else if( c==matchSet ){
+ assert( esc==0 ); /* This is GLOB, not LIKE */
+ assert( matchSet<0x80 ); /* '[' is a single-byte character */
+ while( *zString && patternCompare(&zPattern[-1],zString,pInfo,esc)==0 ){
+ SQLITE_SKIP_UTF8(zString);
}
+ return *zString!=0;
}
-
- /* At this point variable c contains the first character of the
- ** pattern string past the "*". Search in the input string for the
- ** first matching character and recursively contine the match from
- ** that point.
- **
- ** For a case-insensitive search, set variable cx to be the same as
- ** c but in the other case and search the input string for either
- ** c or cx.
- */
- if( c<=0x80 ){
- u32 cx;
+ while( (c2 = sqlite3Utf8Read(&zString))!=0 ){
if( noCase ){
- cx = sqlite3Toupper(c);
- c = sqlite3Tolower(c);
+ GlobUpperToLower(c2);
+ GlobUpperToLower(c);
+ while( c2 != 0 && c2 != c ){
+ c2 = sqlite3Utf8Read(&zString);
+ GlobUpperToLower(c2);
+ }
}else{
- cx = c;
- }
- while( (c2 = *(zString++))!=0 ){
- if( c2!=c && c2!=cx ) continue;
- if( patternCompare(zPattern,zString,pInfo,esc) ) return 1;
- }
- }else{
- while( (c2 = Utf8Read(zString))!=0 ){
- if( c2!=c ) continue;
- if( patternCompare(zPattern,zString,pInfo,esc) ) return 1;
+ while( c2 != 0 && c2 != c ){
+ c2 = sqlite3Utf8Read(&zString);
+ }
}
+ if( c2==0 ) return 0;
+ if( patternCompare(zPattern,zString,pInfo,esc) ) return 1;
}
return 0;
- }
- if( c==matchOther ){
- if( esc ){
- c = sqlite3Utf8Read(&zPattern);
- if( c==0 ) return 0;
- zEscaped = zPattern;
- }else{
- u32 prior_c = 0;
- int seen = 0;
- int invert = 0;
- c = sqlite3Utf8Read(&zString);
- if( c==0 ) return 0;
+ }else if( c==matchOne && !prevEscape ){
+ if( sqlite3Utf8Read(&zString)==0 ){
+ return 0;
+ }
+ }else if( c==matchSet ){
+ u32 prior_c = 0;
+ assert( esc==0 ); /* This only occurs for GLOB, not LIKE */
+ seen = 0;
+ invert = 0;
+ c = sqlite3Utf8Read(&zString);
+ if( c==0 ) return 0;
+ c2 = sqlite3Utf8Read(&zPattern);
+ if( c2=='^' ){
+ invert = 1;
c2 = sqlite3Utf8Read(&zPattern);
- if( c2=='^' ){
- invert = 1;
- c2 = sqlite3Utf8Read(&zPattern);
- }
- if( c2==']' ){
- if( c==']' ) seen = 1;
+ }
+ if( c2==']' ){
+ if( c==']' ) seen = 1;
+ c2 = sqlite3Utf8Read(&zPattern);
+ }
+ while( c2 && c2!=']' ){
+ if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){
c2 = sqlite3Utf8Read(&zPattern);
- }
- while( c2 && c2!=']' ){
- if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){
- c2 = sqlite3Utf8Read(&zPattern);
- if( c>=prior_c && c<=c2 ) seen = 1;
- prior_c = 0;
- }else{
- if( c==c2 ){
- seen = 1;
- }
- prior_c = c2;
+ if( c>=prior_c && c<=c2 ) seen = 1;
+ prior_c = 0;
+ }else{
+ if( c==c2 ){
+ seen = 1;
}
- c2 = sqlite3Utf8Read(&zPattern);
- }
- if( c2==0 || (seen ^ invert)==0 ){
- return 0;
+ prior_c = c2;
}
- continue;
+ c2 = sqlite3Utf8Read(&zPattern);
}
+ if( c2==0 || (seen ^ invert)==0 ){
+ return 0;
+ }
+ }else if( esc==c && !prevEscape ){
+ prevEscape = 1;
+ }else{
+ c2 = sqlite3Utf8Read(&zString);
+ if( noCase ){
+ GlobUpperToLower(c);
+ GlobUpperToLower(c2);
+ }
+ if( c!=c2 ){
+ return 0;
+ }
+ prevEscape = 0;
}
- c2 = Utf8Read(zString);
- if( c==c2 ) continue;
- if( noCase && c<0x80 && c2<0x80 && sqlite3Tolower(c)==sqlite3Tolower(c2) ){
- continue;
- }
- if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue;
- return 0;
}
return *zString==0;
}
@@ -99331,7 +90429,7 @@ static int patternCompare(
/*
** The sqlite3_strglob() interface.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlobPattern, const char *zString){
+SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, 0)==0;
}
@@ -99626,7 +90724,7 @@ static void charFunc(
){
unsigned char *z, *zOut;
int i;
- zOut = z = sqlite3_malloc64( argc*4+1 );
+ zOut = z = sqlite3_malloc( argc*4 );
if( z==0 ){
sqlite3_result_error_nomem(context);
return;
@@ -99653,7 +90751,7 @@ static void charFunc(
*zOut++ = 0x80 + (u8)(c & 0x3F);
} \
}
- sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8);
+ sqlite3_result_text(context, (char*)z, (int)(zOut-z), sqlite3_free);
}
/*
@@ -99694,14 +90792,16 @@ static void zeroblobFunc(
sqlite3_value **argv
){
i64 n;
- int rc;
+ sqlite3 *db = sqlite3_context_db_handle(context);
assert( argc==1 );
UNUSED_PARAMETER(argc);
n = sqlite3_value_int64(argv[0]);
- if( n<0 ) n = 0;
- rc = sqlite3_result_zeroblob64(context, n); /* IMP: R-00293-64994 */
- if( rc ){
- sqlite3_result_error_code(context, rc);
+ testcase( n==db->aLimit[SQLITE_LIMIT_LENGTH] );
+ testcase( n==db->aLimit[SQLITE_LIMIT_LENGTH]+1 );
+ if( n>db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ sqlite3_result_error_toobig(context);
+ }else{
+ sqlite3_result_zeroblob(context, (int)n); /* IMP: R-00293-64994 */
}
}
@@ -99772,7 +90872,7 @@ static void replaceFunc(
return;
}
zOld = zOut;
- zOut = sqlite3_realloc64(zOut, (int)nOut);
+ zOut = sqlite3_realloc(zOut, (int)nOut);
if( zOut==0 ){
sqlite3_result_error_nomem(context);
sqlite3_free(zOld);
@@ -100101,7 +91201,6 @@ static void minmaxStep(
sqlite3SkipAccumulatorLoad(context);
}
}else{
- pBest->db = sqlite3_context_db_handle(context);
sqlite3VdbeMemCopy(pBest, pArg);
}
}
@@ -100134,7 +91233,8 @@ static void groupConcatStep(
if( pAccum ){
sqlite3 *db = sqlite3_context_db_handle(context);
- int firstTerm = pAccum->mxAlloc==0;
+ int firstTerm = pAccum->useMalloc==0;
+ pAccum->useMalloc = 2;
pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH];
if( !firstTerm ){
if( argc==2 ){
@@ -100144,11 +91244,11 @@ static void groupConcatStep(
zSep = ",";
nSep = 1;
}
- if( nSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep);
+ sqlite3StrAccumAppend(pAccum, zSep, nSep);
}
zVal = (char*)sqlite3_value_text(argv[0]);
nVal = sqlite3_value_bytes(argv[0]);
- if( zVal ) sqlite3StrAccumAppend(pAccum, zVal, nVal);
+ sqlite3StrAccumAppend(pAccum, zVal, nVal);
}
}
static void groupConcatFinalize(sqlite3_context *context){
@@ -100218,11 +91318,6 @@ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive)
** then set aWc[0] through aWc[2] to the wildcard characters and
** return TRUE. If the function is not a LIKE-style function then
** return FALSE.
-**
-** *pIsNocase is set to true if uppercase and lowercase are equivalent for
-** the function (default for LIKE). If the function makes the distinction
-** between uppercase and lowercase (as does GLOB) then *pIsNocase is set to
-** false.
*/
SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){
FuncDef *pDef;
@@ -100253,7 +91348,7 @@ SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocas
}
/*
-** All of the FuncDef structures in the aBuiltinFunc[] array above
+** All all of the FuncDef structures in the aBuiltinFunc[] array above
** to the global function hash table. This occurs at start-time (as
** a consequence of calling sqlite3_initialize()).
**
@@ -100277,18 +91372,15 @@ SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){
FUNCTION(trim, 2, 3, 0, trimFunc ),
FUNCTION(min, -1, 0, 1, minmaxFunc ),
FUNCTION(min, 0, 0, 1, 0 ),
- AGGREGATE2(min, 1, 0, 1, minmaxStep, minMaxFinalize,
- SQLITE_FUNC_MINMAX ),
+ AGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize ),
FUNCTION(max, -1, 1, 1, minmaxFunc ),
FUNCTION(max, 0, 1, 1, 0 ),
- AGGREGATE2(max, 1, 1, 1, minmaxStep, minMaxFinalize,
- SQLITE_FUNC_MINMAX ),
+ AGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize ),
FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF),
FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH),
FUNCTION(instr, 2, 0, 0, instrFunc ),
FUNCTION(substr, 2, 0, 0, substrFunc ),
FUNCTION(substr, 3, 0, 0, substrFunc ),
- FUNCTION(printf, -1, 0, 0, printfFunc ),
FUNCTION(unicode, 1, 0, 0, unicodeFunc ),
FUNCTION(char, -1, 0, 0, charFunc ),
FUNCTION(abs, 1, 0, 0, absFunc ),
@@ -100305,19 +91397,15 @@ SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){
FUNCTION2(ifnull, 2, 0, 0, noopFunc, SQLITE_FUNC_COALESCE),
FUNCTION2(unlikely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY),
FUNCTION2(likelihood, 2, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY),
- FUNCTION2(likely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY),
VFUNCTION(random, 0, 0, 0, randomFunc ),
VFUNCTION(randomblob, 1, 0, 0, randomBlob ),
FUNCTION(nullif, 2, 0, 1, nullifFunc ),
- DFUNCTION(sqlite_version, 0, 0, 0, versionFunc ),
- DFUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ),
+ FUNCTION(sqlite_version, 0, 0, 0, versionFunc ),
+ FUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ),
FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ),
-#if SQLITE_USER_AUTHENTICATION
- FUNCTION(sqlite_crypt, 2, 0, 0, sqlite3CryptFunc ),
-#endif
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
- DFUNCTION(sqlite_compileoption_used,1, 0, 0, compileoptionusedFunc ),
- DFUNCTION(sqlite_compileoption_get, 1, 0, 0, compileoptiongetFunc ),
+ FUNCTION(sqlite_compileoption_used,1, 0, 0, compileoptionusedFunc ),
+ FUNCTION(sqlite_compileoption_get, 1, 0, 0, compileoptiongetFunc ),
#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */
FUNCTION(quote, 1, 0, 0, quoteFunc ),
VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid),
@@ -100329,14 +91417,14 @@ SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){
FUNCTION(soundex, 1, 0, 0, soundexFunc ),
#endif
#ifndef SQLITE_OMIT_LOAD_EXTENSION
- VFUNCTION(load_extension, 1, 0, 0, loadExt ),
- VFUNCTION(load_extension, 2, 0, 0, loadExt ),
+ FUNCTION(load_extension, 1, 0, 0, loadExt ),
+ FUNCTION(load_extension, 2, 0, 0, loadExt ),
#endif
AGGREGATE(sum, 1, 0, 0, sumStep, sumFinalize ),
AGGREGATE(total, 1, 0, 0, sumStep, totalFinalize ),
AGGREGATE(avg, 1, 0, 0, sumStep, avgFinalize ),
- AGGREGATE2(count, 0, 0, 0, countStep, countFinalize,
- SQLITE_FUNC_COUNT ),
+ /* AGGREGATE(count, 0, 0, 0, countStep, countFinalize ), */
+ {0,SQLITE_UTF8|SQLITE_FUNC_COUNT,0,0,0,countStep,countFinalize,"count",0,0},
AGGREGATE(count, 1, 0, 0, countStep, countFinalize ),
AGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize),
AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize),
@@ -100382,7 +91470,6 @@ SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){
** This file contains code used by the compiler to add foreign key
** support to compiled SQL statements.
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_OMIT_FOREIGN_KEY
#ifndef SQLITE_OMIT_TRIGGER
@@ -100544,7 +91631,7 @@ SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){
**
** 4) No parent key columns were provided explicitly as part of the
** foreign key definition, and the PRIMARY KEY of the parent table
-** consists of a different number of columns to the child key in
+** consists of a a different number of columns to the child key in
** the child table.
**
** then non-zero is returned, and a "foreign key mismatch" error loaded
@@ -100596,7 +91683,7 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex(
}
for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){
- if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) ){
+ if( pIdx->nKeyCol==nCol && pIdx->onError!=OE_None ){
/* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number
** of columns. If each indexed column corresponds to a foreign key
** column of pFKey, then this index is a winner. */
@@ -100604,8 +91691,8 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex(
if( zKey==0 ){
/* If zKey is NULL, then this foreign key is implicitly mapped to
** the PRIMARY KEY of table pParent. The PRIMARY KEY index may be
- ** identified by the test. */
- if( IsPrimaryKeyIndex(pIdx) ){
+ ** identified by the test (Index.autoIndex==2). */
+ if( pIdx->autoIndex==2 ){
if( aiCol ){
int i;
for(i=0; i<nCol; i++) aiCol[i] = pFKey->aCol[i].iFrom;
@@ -100623,8 +91710,6 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex(
char *zDfltColl; /* Def. collation for column */
char *zIdxCol; /* Name of indexed column */
- if( iCol<0 ) break; /* No foreign keys against expression indexes */
-
/* If the index uses a collation sequence that is different from
** the default collation sequence for the column, this index is
** unusable. Bail out early in this case. */
@@ -100713,11 +91798,10 @@ static void fkLookupParent(
** search for a matching row in the parent table. */
if( nIncr<0 ){
sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, iOk);
- VdbeCoverage(v);
}
for(i=0; i<pFKey->nCol; i++){
int iReg = aiCol[i] + regData + 1;
- sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iOk); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iOk);
}
if( isIgnore==0 ){
@@ -100734,20 +91818,18 @@ static void fkLookupParent(
** will have INTEGER affinity applied to it, which may not be correct. */
sqlite3VdbeAddOp2(v, OP_SCopy, aiCol[0]+1+regData, regTemp);
iMustBeInt = sqlite3VdbeAddOp2(v, OP_MustBeInt, regTemp, 0);
- VdbeCoverage(v);
/* If the parent table is the same as the child table, and we are about
** to increment the constraint-counter (i.e. this is an INSERT operation),
** then check if the row being inserted matches itself. If so, do not
** increment the constraint-counter. */
if( pTab==pFKey->pFrom && nIncr==1 ){
- sqlite3VdbeAddOp3(v, OP_Eq, regData, iOk, regTemp); VdbeCoverage(v);
- sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ sqlite3VdbeAddOp3(v, OP_Eq, regData, iOk, regTemp);
}
sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead);
- sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regTemp); VdbeCoverage(v);
- sqlite3VdbeGoto(v, iOk);
+ sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regTemp);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, iOk);
sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
sqlite3VdbeJumpHere(v, iMustBeInt);
sqlite3ReleaseTempReg(pParse, regTemp);
@@ -100777,21 +91859,20 @@ static void fkLookupParent(
for(i=0; i<nCol; i++){
int iChild = aiCol[i]+1+regData;
int iParent = pIdx->aiColumn[i]+1+regData;
- assert( pIdx->aiColumn[i]>=0 );
assert( aiCol[i]!=pTab->iPKey );
if( pIdx->aiColumn[i]==pTab->iPKey ){
/* The parent key is a composite key that includes the IPK column */
iParent = regData;
}
- sqlite3VdbeAddOp3(v, OP_Ne, iChild, iJump, iParent); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Ne, iChild, iJump, iParent);
sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL);
}
- sqlite3VdbeGoto(v, iOk);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, iOk);
}
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regTemp, nCol, regRec,
- sqlite3IndexAffinityStr(pParse->db,pIdx), nCol);
- sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regTemp, nCol, regRec);
+ sqlite3VdbeChangeP4(v, -1, sqlite3IndexAffinityStr(v,pIdx), P4_TRANSIENT);
+ sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0);
sqlite3ReleaseTempReg(pParse, regRec);
sqlite3ReleaseTempRange(pParse, regTemp, nCol);
@@ -100811,7 +91892,7 @@ static void fkLookupParent(
OE_Abort, 0, P4_STATIC, P5_ConstraintFK);
}else{
if( nIncr>0 && pFKey->isDeferred==0 ){
- sqlite3MayAbort(pParse);
+ sqlite3ParseToplevel(pParse)->mayAbort = 1;
}
sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr);
}
@@ -100883,10 +91964,6 @@ static Expr *exprTableColumn(
** code for an SQL UPDATE operation, this function may be called twice -
** once to "delete" the old row and once to "insert" the new row.
**
-** Parameter nIncr is passed -1 when inserting a row (as this may decrease
-** the number of FK violations in the db) or +1 when deleting one (as this
-** may increase the number of FK constraint problems).
-**
** The code generated by this function scans through the rows in the child
** table that correspond to the parent table row being deleted or inserted.
** For each child row found, one of the following actions is taken:
@@ -100933,7 +92010,6 @@ static void fkScanChildren(
if( nIncr<0 ){
iFkIfZero = sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, 0);
- VdbeCoverage(v);
}
/* Create an Expr object representing an SQL expression like:
@@ -100986,7 +92062,6 @@ static void fkScanChildren(
assert( pIdx!=0 );
for(i=0; i<pPk->nKeyCol; i++){
i16 iCol = pIdx->aiColumn[i];
- assert( iCol>=0 );
pLeft = exprTableRegister(pParse, pTab, regData, iCol);
pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, iCol);
pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0);
@@ -101004,9 +92079,13 @@ static void fkScanChildren(
sqlite3ResolveExprNames(&sNameContext, pWhere);
/* Create VDBE to loop through the entries in pSrc that match the WHERE
- ** clause. For each row found, increment either the deferred or immediate
- ** foreign key constraint counter. */
+ ** clause. If the constraint is not deferred, throw an exception for
+ ** each row found. Otherwise, for deferred constraints, increment the
+ ** deferred constraint counter by nIncr for each row selected. */
pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0);
+ if( nIncr>0 && pFKey->isDeferred==0 ){
+ sqlite3ParseToplevel(pParse)->mayAbort = 1;
+ }
sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr);
if( pWInfo ){
sqlite3WhereEnd(pWInfo);
@@ -101034,7 +92113,8 @@ static void fkScanChildren(
** table).
*/
SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *pTab){
- return (FKey *)sqlite3HashFind(&pTab->pSchema->fkeyHash, pTab->zName);
+ int nName = sqlite3Strlen30(pTab->zName);
+ return (FKey *)sqlite3HashFind(&pTab->pSchema->fkeyHash, pTab->zName, nName);
}
/*
@@ -101092,7 +92172,7 @@ SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTa
}
if( !p ) return;
iSkip = sqlite3VdbeMakeLabel(v);
- sqlite3VdbeAddOp2(v, OP_FkIfZero, 1, iSkip); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_FkIfZero, 1, iSkip);
}
pParse->disableTriggers = 1;
@@ -101110,7 +92190,6 @@ SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTa
*/
if( (db->flags & SQLITE_DeferFKs)==0 ){
sqlite3VdbeAddOp2(v, OP_FkIfZero, 0, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY,
OE_Abort, 0, P4_STATIC, P5_ConstraintFK);
}
@@ -101186,24 +92265,6 @@ static int fkParentIsModified(
}
/*
-** Return true if the parser passed as the first argument is being
-** used to code a trigger that is really a "SET NULL" action belonging
-** to trigger pFKey.
-*/
-static int isSetNullAction(Parse *pParse, FKey *pFKey){
- Parse *pTop = sqlite3ParseToplevel(pParse);
- if( pTop->pTriggerPrg ){
- Trigger *p = pTop->pTriggerPrg->pTrigger;
- if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull)
- || (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull)
- ){
- return 1;
- }
- }
- return 0;
-}
-
-/*
** This function is called when inserting, deleting or updating a row of
** table pTab to generate VDBE code to perform foreign key constraint
** processing for the operation.
@@ -101255,7 +92316,7 @@ SQLITE_PRIVATE void sqlite3FkCheck(
int *aiCol;
int iCol;
int i;
- int bIgnore = 0;
+ int isIgnore = 0;
if( aChange
&& sqlite3_stricmp(pTab->zName, pFKey->zTo)!=0
@@ -101288,7 +92349,7 @@ SQLITE_PRIVATE void sqlite3FkCheck(
int iJump = sqlite3VdbeCurrentAddr(v) + pFKey->nCol + 1;
for(i=0; i<pFKey->nCol; i++){
int iReg = pFKey->aCol[i].iFrom + regOld + 1;
- sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump);
}
sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, -1);
}
@@ -101306,7 +92367,6 @@ SQLITE_PRIVATE void sqlite3FkCheck(
if( aiCol[i]==pTab->iPKey ){
aiCol[i] = -1;
}
- assert( pIdx==0 || pIdx->aiColumn[i]>=0 );
#ifndef SQLITE_OMIT_AUTHORIZATION
/* Request permission to read the parent key columns. If the
** authorization callback returns SQLITE_IGNORE, behave as if any
@@ -101315,7 +92375,7 @@ SQLITE_PRIVATE void sqlite3FkCheck(
int rcauth;
char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zName;
rcauth = sqlite3AuthReadCol(pParse, pTo->zName, zCol, iDb);
- bIgnore = (rcauth==SQLITE_IGNORE);
+ isIgnore = (rcauth==SQLITE_IGNORE);
}
#endif
}
@@ -101330,18 +92390,12 @@ SQLITE_PRIVATE void sqlite3FkCheck(
/* A row is being removed from the child table. Search for the parent.
** If the parent does not exist, removing the child row resolves an
** outstanding foreign key constraint violation. */
- fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regOld, -1, bIgnore);
+ fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regOld, -1,isIgnore);
}
- if( regNew!=0 && !isSetNullAction(pParse, pFKey) ){
+ if( regNew!=0 ){
/* A row is being added to the child table. If a parent row cannot
- ** be found, adding the child row has violated the FK constraint.
- **
- ** If this operation is being performed as part of a trigger program
- ** that is actually a "SET NULL" action belonging to this very
- ** foreign key, then omit this scan altogether. As all child key
- ** values are guaranteed to be NULL, it is not possible for adding
- ** this row to cause an FK violation. */
- fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regNew, +1, bIgnore);
+ ** be found, adding the child row has violated the FK constraint. */
+ fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regNew, +1,isIgnore);
}
sqlite3DbFree(db, aiFree);
@@ -101362,8 +92416,8 @@ SQLITE_PRIVATE void sqlite3FkCheck(
&& !pParse->pToplevel && !pParse->isMultiWrite
){
assert( regOld==0 && regNew!=0 );
- /* Inserting a single row into a parent table cannot cause (or fix)
- ** an immediate foreign key violation. So do nothing in this case. */
+ /* Inserting a single row into a parent table cannot cause an immediate
+ ** foreign key violation. So do nothing in this case. */
continue;
}
@@ -101387,28 +92441,13 @@ SQLITE_PRIVATE void sqlite3FkCheck(
fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regNew, -1);
}
if( regOld!=0 ){
- int eAction = pFKey->aAction[aChange!=0];
+ /* If there is a RESTRICT action configured for the current operation
+ ** on the parent table of this FK, then throw an exception
+ ** immediately if the FK constraint is violated, even if this is a
+ ** deferred trigger. That's what RESTRICT means. To defer checking
+ ** the constraint, the FK should specify NO ACTION (represented
+ ** using OE_None). NO ACTION is the default. */
fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1);
- /* If this is a deferred FK constraint, or a CASCADE or SET NULL
- ** action applies, then any foreign key violations caused by
- ** removing the parent key will be rectified by the action trigger.
- ** So do not set the "may-abort" flag in this case.
- **
- ** Note 1: If the FK is declared "ON UPDATE CASCADE", then the
- ** may-abort flag will eventually be set on this statement anyway
- ** (when this function is called as part of processing the UPDATE
- ** within the action trigger).
- **
- ** Note 2: At first glance it may seem like SQLite could simply omit
- ** all OP_FkCounter related scans when either CASCADE or SET NULL
- ** applies. The trouble starts if the CASCADE or SET NULL action
- ** trigger causes other triggers or action rules attached to the
- ** child table to fire. In these cases the fk constraint counters
- ** might be set incorrectly if any OP_FkCounter related scans are
- ** omitted. */
- if( !pFKey->isDeferred && eAction!=OE_Cascade && eAction!=OE_SetNull ){
- sqlite3MayAbort(pParse);
- }
}
pItem->zName = 0;
sqlite3SrcListDelete(db, pSrc);
@@ -101438,10 +92477,7 @@ SQLITE_PRIVATE u32 sqlite3FkOldmask(
Index *pIdx = 0;
sqlite3FkLocateIndex(pParse, pTab, p, &pIdx, 0);
if( pIdx ){
- for(i=0; i<pIdx->nKeyCol; i++){
- assert( pIdx->aiColumn[i]>=0 );
- mask |= COLUMN_MASK(pIdx->aiColumn[i]);
- }
+ for(i=0; i<pIdx->nKeyCol; i++) mask |= COLUMN_MASK(pIdx->aiColumn[i]);
}
}
}
@@ -101563,9 +92599,7 @@ static Trigger *fkActionTrigger(
iFromCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom;
assert( iFromCol>=0 );
- assert( pIdx!=0 || (pTab->iPKey>=0 && pTab->iPKey<pTab->nCol) );
- assert( pIdx==0 || pIdx->aiColumn[i]>=0 );
- tToCol.z = pTab->aCol[pIdx ? pIdx->aiColumn[i] : pTab->iPKey].zName;
+ tToCol.z = pIdx ? pTab->aCol[pIdx->aiColumn[i]].zName : "oid";
tFromCol.z = pFKey->pFrom->aCol[iFromCol].zName;
tToCol.n = sqlite3Strlen30(tToCol.z);
@@ -101577,10 +92611,10 @@ static Trigger *fkActionTrigger(
** parent table are used for the comparison. */
pEq = sqlite3PExpr(pParse, TK_EQ,
sqlite3PExpr(pParse, TK_DOT,
- sqlite3ExprAlloc(db, TK_ID, &tOld, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tOld),
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol)
, 0),
- sqlite3ExprAlloc(db, TK_ID, &tFromCol, 0)
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tFromCol)
, 0);
pWhere = sqlite3ExprAnd(db, pWhere, pEq);
@@ -101592,12 +92626,12 @@ static Trigger *fkActionTrigger(
if( pChanges ){
pEq = sqlite3PExpr(pParse, TK_IS,
sqlite3PExpr(pParse, TK_DOT,
- sqlite3ExprAlloc(db, TK_ID, &tOld, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0),
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tOld),
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol),
0),
sqlite3PExpr(pParse, TK_DOT,
- sqlite3ExprAlloc(db, TK_ID, &tNew, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0),
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tNew),
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol),
0),
0);
pWhen = sqlite3ExprAnd(db, pWhen, pEq);
@@ -101607,8 +92641,8 @@ static Trigger *fkActionTrigger(
Expr *pNew;
if( action==OE_Cascade ){
pNew = sqlite3PExpr(pParse, TK_DOT,
- sqlite3ExprAlloc(db, TK_ID, &tNew, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tNew),
+ sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol)
, 0);
}else if( action==OE_SetDflt ){
Expr *pDflt = pFKey->pFrom->aCol[iFromCol].pDflt;
@@ -101655,12 +92689,13 @@ static Trigger *fkActionTrigger(
pTrigger = (Trigger *)sqlite3DbMallocZero(db,
sizeof(Trigger) + /* struct Trigger */
sizeof(TriggerStep) + /* Single step in trigger program */
- nFrom + 1 /* Space for pStep->zTarget */
+ nFrom + 1 /* Space for pStep->target.z */
);
if( pTrigger ){
pStep = pTrigger->step_list = (TriggerStep *)&pTrigger[1];
- pStep->zTarget = (char *)&pStep[1];
- memcpy((char *)pStep->zTarget, zFrom, nFrom);
+ pStep->target.z = (char *)&pStep[1];
+ pStep->target.n = nFrom;
+ memcpy((char *)pStep->target.z, zFrom, nFrom);
pStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE);
@@ -101756,7 +92791,7 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){
}else{
void *p = (void *)pFKey->pNextTo;
const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo);
- sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p);
+ sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, sqlite3Strlen30(z), p);
}
if( pFKey->pNextTo ){
pFKey->pNextTo->pPrevTo = pFKey->pPrevTo;
@@ -101796,7 +92831,6 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){
** This file contains C code routines that are called by the parser
** to handle INSERT statements in SQLite.
*/
-/* #include "sqliteInt.h" */
/*
** Generate code that will
@@ -101826,7 +92860,7 @@ SQLITE_PRIVATE void sqlite3OpenTable(
}else{
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
assert( pPk!=0 );
- assert( pPk->tnum==pTab->tnum );
+ assert( pPk->tnum=pTab->tnum );
sqlite3VdbeAddOp3(v, opcode, iCur, pPk->tnum, iDb);
sqlite3VdbeSetP4KeyInfo(pParse, pPk);
VdbeComment((v, "%s", pTab->zName));
@@ -101840,20 +92874,20 @@ SQLITE_PRIVATE void sqlite3OpenTable(
**
** Character Column affinity
** ------------------------------
-** 'A' BLOB
-** 'B' TEXT
-** 'C' NUMERIC
-** 'D' INTEGER
-** 'F' REAL
+** 'a' TEXT
+** 'b' NONE
+** 'c' NUMERIC
+** 'd' INTEGER
+** 'e' REAL
**
-** An extra 'D' is appended to the end of the string to cover the
+** An extra 'd' is appended to the end of the string to cover the
** rowid that appears as the last column in every index.
**
** Memory for the buffer containing the column index affinity string
** is managed along with the rest of the Index structure. It will be
** released when sqlite3DeleteIndex() is called.
*/
-SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
+SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(Vdbe *v, Index *pIdx){
if( !pIdx->zColAff ){
/* The first time a column affinity string for a particular index is
** required, it is allocated and populated here. It is then stored as
@@ -101865,6 +92899,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
*/
int n;
Table *pTab = pIdx->pTable;
+ sqlite3 *db = sqlite3VdbeDb(v);
pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1);
if( !pIdx->zColAff ){
db->mallocFailed = 1;
@@ -101872,18 +92907,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
}
for(n=0; n<pIdx->nColumn; n++){
i16 x = pIdx->aiColumn[n];
- if( x>=0 ){
- pIdx->zColAff[n] = pTab->aCol[x].affinity;
- }else if( x==XN_ROWID ){
- pIdx->zColAff[n] = SQLITE_AFF_INTEGER;
- }else{
- char aff;
- assert( x==XN_EXPR );
- assert( pIdx->aColExpr!=0 );
- aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr);
- if( aff==0 ) aff = SQLITE_AFF_BLOB;
- pIdx->zColAff[n] = aff;
- }
+ pIdx->zColAff[n] = x<0 ? SQLITE_AFF_INTEGER : pTab->aCol[x].affinity;
}
pIdx->zColAff[n] = 0;
}
@@ -101892,30 +92916,32 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
}
/*
-** Compute the affinity string for table pTab, if it has not already been
-** computed. As an optimization, omit trailing SQLITE_AFF_BLOB affinities.
-**
-** If the affinity exists (if it is no entirely SQLITE_AFF_BLOB values) and
-** if iReg>0 then code an OP_Affinity opcode that will set the affinities
-** for register iReg and following. Or if affinities exists and iReg==0,
-** then just set the P4 operand of the previous opcode (which should be
-** an OP_MakeRecord) to the affinity string.
-**
-** A column affinity string has one character per column:
+** Set P4 of the most recently inserted opcode to a column affinity
+** string for table pTab. A column affinity string has one character
+** for each column indexed by the index, according to the affinity of the
+** column:
**
** Character Column affinity
** ------------------------------
-** 'A' BLOB
-** 'B' TEXT
-** 'C' NUMERIC
-** 'D' INTEGER
-** 'E' REAL
-*/
-SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
- int i;
- char *zColAff = pTab->zColAff;
- if( zColAff==0 ){
+** 'a' TEXT
+** 'b' NONE
+** 'c' NUMERIC
+** 'd' INTEGER
+** 'e' REAL
+*/
+SQLITE_PRIVATE void sqlite3TableAffinityStr(Vdbe *v, Table *pTab){
+ /* The first time a column affinity string for a particular table
+ ** is required, it is allocated and populated here. It is then
+ ** stored as a member of the Table structure for subsequent use.
+ **
+ ** The column affinity string will eventually be deleted by
+ ** sqlite3DeleteTable() when the Table structure itself is cleaned up.
+ */
+ if( !pTab->zColAff ){
+ char *zColAff;
+ int i;
sqlite3 *db = sqlite3VdbeDb(v);
+
zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1);
if( !zColAff ){
db->mallocFailed = 1;
@@ -101925,28 +92951,22 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
for(i=0; i<pTab->nCol; i++){
zColAff[i] = pTab->aCol[i].affinity;
}
- do{
- zColAff[i--] = 0;
- }while( i>=0 && zColAff[i]==SQLITE_AFF_BLOB );
+ zColAff[pTab->nCol] = '\0';
+
pTab->zColAff = zColAff;
}
- i = sqlite3Strlen30(zColAff);
- if( i ){
- if( iReg ){
- sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i);
- }else{
- sqlite3VdbeChangeP4(v, -1, zColAff, i);
- }
- }
+
+ sqlite3VdbeChangeP4(v, -1, pTab->zColAff, P4_TRANSIENT);
}
/*
** Return non-zero if the table pTab in database iDb or any of its indices
-** have been opened at any point in the VDBE program. This is used to see if
+** have been opened at any point in the VDBE program beginning at location
+** iStartAddr throught the end of the program. This is used to see if
** a statement of the form "INSERT INTO <iDb, pTab> SELECT ..." can
-** run without using a temporary table for the results of the SELECT.
+** run without using temporary table for the results of the SELECT.
*/
-static int readsTable(Parse *p, int iDb, Table *pTab){
+static int readsTable(Parse *p, int iStartAddr, int iDb, Table *pTab){
Vdbe *v = sqlite3GetVdbe(p);
int i;
int iEnd = sqlite3VdbeCurrentAddr(v);
@@ -101954,7 +92974,7 @@ static int readsTable(Parse *p, int iDb, Table *pTab){
VTable *pVTab = IsVirtual(pTab) ? sqlite3GetVTable(p->db, pTab) : 0;
#endif
- for(i=1; i<iEnd; i++){
+ for(i=iStartAddr; i<iEnd; i++){
VdbeOp *pOp = sqlite3VdbeGetOp(v, i);
assert( pOp!=0 );
if( pOp->opcode==OP_OpenRead && pOp->p3==iDb ){
@@ -102044,7 +93064,7 @@ SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){
/* This routine is never called during trigger-generation. It is
** only called from the top-level */
assert( pParse->pTriggerTab==0 );
- assert( sqlite3IsToplevel(pParse) );
+ assert( pParse==sqlite3ParseToplevel(pParse) );
assert( v ); /* We failed long ago if this is not so */
for(p = pParse->pAinc; p; p = p->pNext){
@@ -102054,15 +93074,15 @@ SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){
sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenRead);
sqlite3VdbeAddOp3(v, OP_Null, 0, memId, memId+1);
addr = sqlite3VdbeCurrentAddr(v);
- sqlite3VdbeLoadString(v, memId-1, p->pTab->zName);
- sqlite3VdbeAddOp2(v, OP_Rewind, 0, addr+9); VdbeCoverage(v);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, memId-1, 0, p->pTab->zName, 0);
+ sqlite3VdbeAddOp2(v, OP_Rewind, 0, addr+9);
sqlite3VdbeAddOp3(v, OP_Column, 0, 0, memId);
- sqlite3VdbeAddOp3(v, OP_Ne, memId-1, addr+7, memId); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Ne, memId-1, addr+7, memId);
sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL);
sqlite3VdbeAddOp2(v, OP_Rowid, 0, memId+1);
sqlite3VdbeAddOp3(v, OP_Column, 0, 1, memId);
- sqlite3VdbeGoto(v, addr+9);
- sqlite3VdbeAddOp2(v, OP_Next, 0, addr+2); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addr+9);
+ sqlite3VdbeAddOp2(v, OP_Next, 0, addr+2);
sqlite3VdbeAddOp2(v, OP_Integer, 0, memId);
sqlite3VdbeAddOp0(v, OP_Close);
}
@@ -102097,16 +93117,25 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){
assert( v );
for(p = pParse->pAinc; p; p = p->pNext){
Db *pDb = &db->aDb[p->iDb];
- int addr1;
+ int j1, j2, j3, j4, j5;
int iRec;
int memId = p->regCtr;
iRec = sqlite3GetTempReg(pParse);
assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) );
sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite);
- addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, memId+1); VdbeCoverage(v);
+ j1 = sqlite3VdbeAddOp1(v, OP_NotNull, memId+1);
+ j2 = sqlite3VdbeAddOp0(v, OP_Rewind);
+ j3 = sqlite3VdbeAddOp3(v, OP_Column, 0, 0, iRec);
+ j4 = sqlite3VdbeAddOp3(v, OP_Eq, memId-1, 0, iRec);
+ sqlite3VdbeAddOp2(v, OP_Next, 0, j3);
+ sqlite3VdbeJumpHere(v, j2);
sqlite3VdbeAddOp2(v, OP_NewRowid, 0, memId+1);
- sqlite3VdbeJumpHere(v, addr1);
+ j5 = sqlite3VdbeAddOp0(v, OP_Goto);
+ sqlite3VdbeJumpHere(v, j4);
+ sqlite3VdbeAddOp2(v, OP_Rowid, 0, memId+1);
+ sqlite3VdbeJumpHere(v, j1);
+ sqlite3VdbeJumpHere(v, j5);
sqlite3VdbeAddOp3(v, OP_MakeRecord, memId-1, 2, iRec);
sqlite3VdbeAddOp3(v, OP_Insert, 0, iRec, memId+1);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
@@ -102124,6 +93153,97 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){
#endif /* SQLITE_OMIT_AUTOINCREMENT */
+/*
+** Generate code for a co-routine that will evaluate a subquery one
+** row at a time.
+**
+** The pSelect parameter is the subquery that the co-routine will evaluation.
+** Information about the location of co-routine and the registers it will use
+** is returned by filling in the pDest object.
+**
+** Registers are allocated as follows:
+**
+** pDest->iSDParm The register holding the next entry-point of the
+** co-routine. Run the co-routine to its next breakpoint
+** by calling "OP_Yield $X" where $X is pDest->iSDParm.
+**
+** pDest->iSDParm+1 The register holding the "completed" flag for the
+** co-routine. This register is 0 if the previous Yield
+** generated a new result row, or 1 if the subquery
+** has completed. If the Yield is called again
+** after this register becomes 1, then the VDBE will
+** halt with an SQLITE_INTERNAL error.
+**
+** pDest->iSdst First result register.
+**
+** pDest->nSdst Number of result registers.
+**
+** This routine handles all of the register allocation and fills in the
+** pDest structure appropriately.
+**
+** Here is a schematic of the generated code assuming that X is the
+** co-routine entry-point register reg[pDest->iSDParm], that EOF is the
+** completed flag reg[pDest->iSDParm+1], and R and S are the range of
+** registers that hold the result set, reg[pDest->iSdst] through
+** reg[pDest->iSdst+pDest->nSdst-1]:
+**
+** X <- A
+** EOF <- 0
+** goto B
+** A: setup for the SELECT
+** loop rows in the SELECT
+** load results into registers R..S
+** yield X
+** end loop
+** cleanup after the SELECT
+** EOF <- 1
+** yield X
+** halt-error
+** B:
+**
+** To use this subroutine, the caller generates code as follows:
+**
+** [ Co-routine generated by this subroutine, shown above ]
+** S: yield X
+** if EOF goto E
+** if skip this row, goto C
+** if terminate loop, goto E
+** deal with this row
+** C: goto S
+** E:
+*/
+SQLITE_PRIVATE int sqlite3CodeCoroutine(Parse *pParse, Select *pSelect, SelectDest *pDest){
+ int regYield; /* Register holding co-routine entry-point */
+ int regEof; /* Register holding co-routine completion flag */
+ int addrTop; /* Top of the co-routine */
+ int j1; /* Jump instruction */
+ int rc; /* Result code */
+ Vdbe *v; /* VDBE under construction */
+
+ regYield = ++pParse->nMem;
+ regEof = ++pParse->nMem;
+ v = sqlite3GetVdbe(pParse);
+ addrTop = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_Integer, addrTop+2, regYield); /* X <- A */
+ VdbeComment((v, "Co-routine entry point"));
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regEof); /* EOF <- 0 */
+ VdbeComment((v, "Co-routine completion flag"));
+ sqlite3SelectDestInit(pDest, SRT_Coroutine, regYield);
+ j1 = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0);
+ rc = sqlite3Select(pParse, pSelect, pDest);
+ assert( pParse->nErr==0 || rc );
+ if( pParse->db->mallocFailed && rc==SQLITE_OK ) rc = SQLITE_NOMEM;
+ if( rc ) return rc;
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, regEof); /* EOF <- 1 */
+ sqlite3VdbeAddOp1(v, OP_Yield, regYield); /* yield X */
+ sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_INTERNAL, OE_Abort);
+ VdbeComment((v, "End of coroutine"));
+ sqlite3VdbeJumpHere(v, j1); /* label B: */
+ return rc;
+}
+
+
+
/* Forward declaration */
static int xferOptimization(
Parse *pParse, /* Parser context */
@@ -102136,23 +93256,20 @@ static int xferOptimization(
/*
** This routine is called to handle SQL of the following forms:
**
-** insert into TABLE (IDLIST) values(EXPRLIST),(EXPRLIST),...
+** insert into TABLE (IDLIST) values(EXPRLIST)
** insert into TABLE (IDLIST) select
-** insert into TABLE (IDLIST) default values
**
** The IDLIST following the table name is always optional. If omitted,
-** then a list of all (non-hidden) columns for the table is substituted.
-** The IDLIST appears in the pColumn parameter. pColumn is NULL if IDLIST
-** is omitted.
+** then a list of all columns for the table is substituted. The IDLIST
+** appears in the pColumn parameter. pColumn is NULL if IDLIST is omitted.
**
-** For the pSelect parameter holds the values to be inserted for the
-** first two forms shown above. A VALUES clause is really just short-hand
-** for a SELECT statement that omits the FROM clause and everything else
-** that follows. If the pSelect parameter is NULL, that means that the
-** DEFAULT VALUES form of the INSERT statement is intended.
+** The pList parameter holds EXPRLIST in the first form of the INSERT
+** statement above, and pSelect is NULL. For the second form, pList is
+** NULL and pSelect is a pointer to the select statement used to generate
+** data for the insert.
**
** The code generated follows one of four templates. For a simple
-** insert with data coming from a single-row VALUES clause, the code executes
+** insert with data coming from a VALUES clause, the code executes
** once straight down through. Pseudo-code follows (we call this
** the "1st template"):
**
@@ -102189,6 +93306,7 @@ static int xferOptimization(
** and the SELECT clause does not read from <table> at any time.
** The generated code follows this template:
**
+** EOF <- 0
** X <- A
** goto B
** A: setup for the SELECT
@@ -102197,9 +93315,12 @@ static int xferOptimization(
** yield X
** end loop
** cleanup after the SELECT
-** end-coroutine X
+** EOF <- 1
+** yield X
+** goto A
** B: open write cursor to <table> and its indices
-** C: yield X, at EOF goto D
+** C: yield X
+** if EOF goto D
** insert the select result into <table> from R..R+n
** goto C
** D: cleanup
@@ -102207,9 +93328,10 @@ static int xferOptimization(
** The 4th template is used if the insert statement takes its
** values from a SELECT but the data is being inserted into a table
** that is also read as part of the SELECT. In the third form,
-** we have to use an intermediate table to store the results of
+** we have to use a intermediate table to store the results of
** the select. The template is like this:
**
+** EOF <- 0
** X <- A
** goto B
** A: setup for the SELECT
@@ -102218,9 +93340,12 @@ static int xferOptimization(
** yield X
** end loop
** cleanup after the SELECT
-** end co-routine R
+** EOF <- 1
+** yield X
+** halt-error
** B: open temp table
-** L: yield X, at EOF goto M
+** L: yield X
+** if EOF goto M
** insert row from R..R+n into temp table
** goto L
** M: open write cursor to <table> and its indices
@@ -102233,6 +93358,7 @@ static int xferOptimization(
SQLITE_PRIVATE void sqlite3Insert(
Parse *pParse, /* Parser context */
SrcList *pTabList, /* Name of table into which we are inserting */
+ ExprList *pList, /* List of values to be inserted */
Select *pSelect, /* A SELECT statement to use as the data source */
IdList *pColumn, /* Column names corresponding to IDLIST. */
int onError /* How to handle constraint errors */
@@ -102250,17 +93376,16 @@ SQLITE_PRIVATE void sqlite3Insert(
int iIdxCur = 0; /* First index cursor */
int ipkColumn = -1; /* Column that is the INTEGER PRIMARY KEY */
int endOfLoop; /* Label for the end of the insertion loop */
+ int useTempTable = 0; /* Store SELECT results in intermediate table */
int srcTab = 0; /* Data comes from this temporary cursor if >=0 */
int addrInsTop = 0; /* Jump to label "D" */
int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */
+ int addrSelect = 0; /* Address of coroutine that implements the SELECT */
SelectDest dest; /* Destination for SELECT on rhs of INSERT */
int iDb; /* Index of database holding TABLE */
Db *pDb; /* The database containing table being inserted into */
- u8 useTempTable = 0; /* Store SELECT results in intermediate table */
- u8 appendFlag = 0; /* True if the insert is likely to be an append */
- u8 withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */
- u8 bIdListInOrder; /* True if IDLIST is in table order */
- ExprList *pList = 0; /* List of VALUES() to be inserted */
+ int appendFlag = 0; /* True if the insert is likely to be an append */
+ int withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */
/* Register allocations */
int regFromSelect = 0;/* Base register for data coming from SELECT */
@@ -102269,6 +93394,7 @@ SQLITE_PRIVATE void sqlite3Insert(
int regIns; /* Block of regs holding rowid+data being inserted */
int regRowid; /* registers holding insert rowid */
int regData; /* register holding first column to insert */
+ int regEof = 0; /* Register recording end of SELECT data */
int *aRegIdx = 0; /* One register allocated to each index */
#ifndef SQLITE_OMIT_TRIGGER
@@ -102283,17 +93409,6 @@ SQLITE_PRIVATE void sqlite3Insert(
goto insert_cleanup;
}
- /* If the Select object is really just a simple VALUES() list with a
- ** single row (the common case) then keep that one row of values
- ** and discard the other (unused) parts of the pSelect object
- */
- if( pSelect && (pSelect->selFlags & SF_Values)!=0 && pSelect->pPrior==0 ){
- pList = pSelect->pEList;
- pSelect->pEList = 0;
- sqlite3SelectDelete(db, pSelect);
- pSelect = 0;
- }
-
/* Locate the table into which we will be inserting new information.
*/
assert( pTabList->nSrc==1 );
@@ -102371,83 +93486,21 @@ SQLITE_PRIVATE void sqlite3Insert(
*/
regAutoinc = autoIncBegin(pParse, iDb, pTab);
- /* Allocate registers for holding the rowid of the new row,
- ** the content of the new row, and the assembled row record.
- */
- regRowid = regIns = pParse->nMem+1;
- pParse->nMem += pTab->nCol + 1;
- if( IsVirtual(pTab) ){
- regRowid++;
- pParse->nMem++;
- }
- regData = regRowid+1;
-
- /* If the INSERT statement included an IDLIST term, then make sure
- ** all elements of the IDLIST really are columns of the table and
- ** remember the column indices.
- **
- ** If the table has an INTEGER PRIMARY KEY column and that column
- ** is named in the IDLIST, then record in the ipkColumn variable
- ** the index into IDLIST of the primary key column. ipkColumn is
- ** the index of the primary key as it appears in IDLIST, not as
- ** is appears in the original table. (The index of the INTEGER
- ** PRIMARY KEY in the original table is pTab->iPKey.)
- */
- bIdListInOrder = (pTab->tabFlags & TF_OOOHidden)==0;
- if( pColumn ){
- for(i=0; i<pColumn->nId; i++){
- pColumn->a[i].idx = -1;
- }
- for(i=0; i<pColumn->nId; i++){
- for(j=0; j<pTab->nCol; j++){
- if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){
- pColumn->a[i].idx = j;
- if( i!=j ) bIdListInOrder = 0;
- if( j==pTab->iPKey ){
- ipkColumn = i; assert( !withoutRowid );
- }
- break;
- }
- }
- if( j>=pTab->nCol ){
- if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){
- ipkColumn = i;
- bIdListInOrder = 0;
- }else{
- sqlite3ErrorMsg(pParse, "table %S has no column named %s",
- pTabList, 0, pColumn->a[i].zName);
- pParse->checkSchema = 1;
- goto insert_cleanup;
- }
- }
- }
- }
-
/* Figure out how many columns of data are supplied. If the data
** is coming from a SELECT statement, then generate a co-routine that
** produces a single row of the SELECT on each invocation. The
** co-routine is the common header to the 3rd and 4th templates.
*/
if( pSelect ){
- /* Data is coming from a SELECT or from a multi-row VALUES clause.
- ** Generate a co-routine to run the SELECT. */
- int regYield; /* Register holding co-routine entry-point */
- int addrTop; /* Top of the co-routine */
- int rc; /* Result code */
-
- regYield = ++pParse->nMem;
- addrTop = sqlite3VdbeCurrentAddr(v) + 1;
- sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop);
- sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield);
- dest.iSdst = bIdListInOrder ? regData : 0;
- dest.nSdst = pTab->nCol;
- rc = sqlite3Select(pParse, pSelect, &dest);
+ /* Data is coming from a SELECT. Generate a co-routine to run the SELECT */
+ int rc = sqlite3CodeCoroutine(pParse, pSelect, &dest);
+ if( rc ) goto insert_cleanup;
+
+ regEof = dest.iSDParm + 1;
regFromSelect = dest.iSdst;
- if( rc || db->mallocFailed || pParse->nErr ) goto insert_cleanup;
- sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield);
- sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */
assert( pSelect->pEList );
nColumn = pSelect->pEList->nExpr;
+ assert( dest.nSdst==nColumn );
/* Set useTempTable to TRUE if the result of the SELECT statement
** should be written into a temporary table (template 4). Set to
@@ -102458,7 +93511,7 @@ SQLITE_PRIVATE void sqlite3Insert(
** of the tables being read by the SELECT statement. Also use a
** temp table in the case of row triggers.
*/
- if( pTrigger || readsTable(pParse, iDb, pTab) ){
+ if( pTrigger || readsTable(pParse, addrSelect, iDb, pTab) ){
useTempTable = 1;
}
@@ -102468,55 +93521,48 @@ SQLITE_PRIVATE void sqlite3Insert(
** here is from the 4th template:
**
** B: open temp table
- ** L: yield X, goto M at EOF
+ ** L: yield X
+ ** if EOF goto M
** insert row from R..R+n into temp table
** goto L
** M: ...
*/
int regRec; /* Register to hold packed record */
int regTempRowid; /* Register to hold temp table ROWID */
- int addrL; /* Label "L" */
+ int addrTop; /* Label "L" */
+ int addrIf; /* Address of jump to M */
srcTab = pParse->nTab++;
regRec = sqlite3GetTempReg(pParse);
regTempRowid = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, srcTab, nColumn);
- addrL = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v);
+ addrTop = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm);
+ addrIf = sqlite3VdbeAddOp1(v, OP_If, regEof);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regFromSelect, nColumn, regRec);
sqlite3VdbeAddOp2(v, OP_NewRowid, srcTab, regTempRowid);
sqlite3VdbeAddOp3(v, OP_Insert, srcTab, regRec, regTempRowid);
- sqlite3VdbeGoto(v, addrL);
- sqlite3VdbeJumpHere(v, addrL);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop);
+ sqlite3VdbeJumpHere(v, addrIf);
sqlite3ReleaseTempReg(pParse, regRec);
sqlite3ReleaseTempReg(pParse, regTempRowid);
}
}else{
- /* This is the case if the data for the INSERT is coming from a
- ** single-row VALUES clause
+ /* This is the case if the data for the INSERT is coming from a VALUES
+ ** clause
*/
NameContext sNC;
memset(&sNC, 0, sizeof(sNC));
sNC.pParse = pParse;
srcTab = -1;
assert( useTempTable==0 );
- if( pList ){
- nColumn = pList->nExpr;
- if( sqlite3ResolveExprListNames(&sNC, pList) ){
+ nColumn = pList ? pList->nExpr : 0;
+ for(i=0; i<nColumn; i++){
+ if( sqlite3ResolveExprNames(&sNC, pList->a[i].pExpr) ){
goto insert_cleanup;
}
- }else{
- nColumn = 0;
}
}
- /* If there is no IDLIST term but the table has an integer primary
- ** key, the set the ipkColumn variable to the integer primary key
- ** column index in the original table definition.
- */
- if( pColumn==0 && nColumn>0 ){
- ipkColumn = pTab->iPKey;
- }
-
/* Make sure the number of columns in the source data matches the number
** of columns to be inserted into the table.
*/
@@ -102535,6 +93581,52 @@ SQLITE_PRIVATE void sqlite3Insert(
sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId);
goto insert_cleanup;
}
+
+ /* If the INSERT statement included an IDLIST term, then make sure
+ ** all elements of the IDLIST really are columns of the table and
+ ** remember the column indices.
+ **
+ ** If the table has an INTEGER PRIMARY KEY column and that column
+ ** is named in the IDLIST, then record in the ipkColumn variable
+ ** the index into IDLIST of the primary key column. ipkColumn is
+ ** the index of the primary key as it appears in IDLIST, not as
+ ** is appears in the original table. (The index of the INTEGER
+ ** PRIMARY KEY in the original table is pTab->iPKey.)
+ */
+ if( pColumn ){
+ for(i=0; i<pColumn->nId; i++){
+ pColumn->a[i].idx = -1;
+ }
+ for(i=0; i<pColumn->nId; i++){
+ for(j=0; j<pTab->nCol; j++){
+ if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){
+ pColumn->a[i].idx = j;
+ if( j==pTab->iPKey ){
+ ipkColumn = i; assert( !withoutRowid );
+ }
+ break;
+ }
+ }
+ if( j>=pTab->nCol ){
+ if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){
+ ipkColumn = i;
+ }else{
+ sqlite3ErrorMsg(pParse, "table %S has no column named %s",
+ pTabList, 0, pColumn->a[i].zName);
+ pParse->checkSchema = 1;
+ goto insert_cleanup;
+ }
+ }
+ }
+ }
+
+ /* If there is no IDLIST term but the table has an integer primary
+ ** key, the set the ipkColumn variable to the integer primary key
+ ** column index in the original table definition.
+ */
+ if( pColumn==0 && nColumn>0 ){
+ ipkColumn = pTab->iPKey;
+ }
/* Initialize the count of rows to be inserted
*/
@@ -102562,27 +93654,39 @@ SQLITE_PRIVATE void sqlite3Insert(
/* This block codes the top of loop only. The complete loop is the
** following pseudocode (template 4):
**
- ** rewind temp table, if empty goto D
+ ** rewind temp table
** C: loop over rows of intermediate table
** transfer values form intermediate table into <table>
** end loop
** D: ...
*/
- addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab); VdbeCoverage(v);
+ addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab);
addrCont = sqlite3VdbeCurrentAddr(v);
}else if( pSelect ){
/* This block codes the top of loop only. The complete loop is the
** following pseudocode (template 3):
**
- ** C: yield X, at EOF goto D
+ ** C: yield X
+ ** if EOF goto D
** insert the select result into <table> from R..R+n
** goto C
** D: ...
*/
- addrInsTop = addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm);
- VdbeCoverage(v);
+ addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm);
+ addrInsTop = sqlite3VdbeAddOp1(v, OP_If, regEof);
}
+ /* Allocate registers for holding the rowid of the new row,
+ ** the content of the new row, and the assemblied row record.
+ */
+ regRowid = regIns = pParse->nMem+1;
+ pParse->nMem += pTab->nCol + 1;
+ if( IsVirtual(pTab) ){
+ regRowid++;
+ pParse->nMem++;
+ }
+ regData = regRowid+1;
+
/* Run the BEFORE and INSTEAD OF triggers, if there are any
*/
endOfLoop = sqlite3VdbeMakeLabel(v);
@@ -102598,7 +93702,7 @@ SQLITE_PRIVATE void sqlite3Insert(
if( ipkColumn<0 ){
sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols);
}else{
- int addr1;
+ int j1;
assert( !withoutRowid );
if( useTempTable ){
sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regCols);
@@ -102606,10 +93710,10 @@ SQLITE_PRIVATE void sqlite3Insert(
assert( pSelect==0 ); /* Otherwise useTempTable is true */
sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regCols);
}
- addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regCols); VdbeCoverage(v);
+ j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regCols);
sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols);
- sqlite3VdbeJumpHere(v, addr1);
- sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, j1);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols);
}
/* Cannot have triggers on a virtual table. If it were possible,
@@ -102643,7 +93747,8 @@ SQLITE_PRIVATE void sqlite3Insert(
** table column affinities.
*/
if( !isView ){
- sqlite3TableAffinity(v, pTab, regCols+1);
+ sqlite3VdbeAddOp2(v, OP_Affinity, regCols+1, pTab->nCol);
+ sqlite3TableAffinityStr(v, pTab);
}
/* Fire BEFORE or INSTEAD OF triggers */
@@ -102665,7 +93770,7 @@ SQLITE_PRIVATE void sqlite3Insert(
if( useTempTable ){
sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regRowid);
}else if( pSelect ){
- sqlite3VdbeAddOp2(v, OP_Copy, regFromSelect+ipkColumn, regRowid);
+ sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+ipkColumn, regRowid);
}else{
VdbeOp *pOp;
sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regRowid);
@@ -102682,16 +93787,16 @@ SQLITE_PRIVATE void sqlite3Insert(
** to generate a unique primary key value.
*/
if( !appendFlag ){
- int addr1;
+ int j1;
if( !IsVirtual(pTab) ){
- addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid); VdbeCoverage(v);
+ j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid);
sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc);
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeJumpHere(v, j1);
}else{
- addr1 = sqlite3VdbeCurrentAddr(v);
- sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, addr1+2); VdbeCoverage(v);
+ j1 = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, j1+2);
}
- sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid);
}
}else if( IsVirtual(pTab) || withoutRowid ){
sqlite3VdbeAddOp2(v, OP_Null, 0, regRowid);
@@ -102711,9 +93816,8 @@ SQLITE_PRIVATE void sqlite3Insert(
/* The value of the INTEGER PRIMARY KEY column is always a NULL.
** Whenever this column is read, the rowid will be substituted
** in its place. Hence, fill this column with a NULL to avoid
- ** taking up data space with information that will never be used.
- ** As there may be shallow copies of this value, make it a soft-NULL */
- sqlite3VdbeAddOp1(v, OP_SoftNull, iRegStore);
+ ** taking up data space with information that will never be used. */
+ sqlite3VdbeAddOp2(v, OP_Null, 0, iRegStore);
continue;
}
if( pColumn==0 ){
@@ -102730,13 +93834,11 @@ SQLITE_PRIVATE void sqlite3Insert(
}
}
if( j<0 || nColumn==0 || (pColumn && j>=pColumn->nId) ){
- sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore);
+ sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, iRegStore);
}else if( useTempTable ){
sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, iRegStore);
}else if( pSelect ){
- if( regFromSelect!=regData ){
- sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+j, iRegStore);
- }
+ sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+j, iRegStore);
}else{
sqlite3ExprCode(pParse, pList->a[j].pExpr, iRegStore);
}
@@ -102782,11 +93884,11 @@ SQLITE_PRIVATE void sqlite3Insert(
*/
sqlite3VdbeResolveLabel(v, endOfLoop);
if( useTempTable ){
- sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont);
sqlite3VdbeJumpHere(v, addrInsTop);
sqlite3VdbeAddOp1(v, OP_Close, srcTab);
}else if( pSelect ){
- sqlite3VdbeGoto(v, addrCont);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrCont);
sqlite3VdbeJumpHere(v, addrInsTop);
}
@@ -102827,7 +93929,7 @@ insert_cleanup:
}
/* Make sure "isView" and other macros defined above are undefined. Otherwise
-** they may interfere with compilation of other functions in this file
+** thely may interfere with compilation of other functions in this file
** (or in another file, if this file becomes part of the amalgamation). */
#ifdef isView
#undef isView
@@ -102943,14 +94045,12 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int ix; /* Index loop counter */
int nCol; /* Number of columns */
int onError; /* Conflict resolution strategy */
- int addr1; /* Address of jump instruction */
+ int j1; /* Addresss of jump instruction */
int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */
int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */
int ipkTop = 0; /* Top of the rowid change constraint check */
int ipkBottom = 0; /* Bottom of the rowid change constraint check */
u8 isUpdate; /* True if this is an UPDATE operation */
- u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */
- int regRowid = -1; /* Register holding ROWID value */
isUpdate = regOldData!=0;
db = pParse->db;
@@ -103004,20 +94104,17 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
sqlite3VdbeAddOp4(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError,
regNewData+1+i, zMsg, P4_DYNAMIC);
sqlite3VdbeChangeP5(v, P5_ConstraintNotNull);
- VdbeCoverage(v);
break;
}
case OE_Ignore: {
sqlite3VdbeAddOp2(v, OP_IsNull, regNewData+1+i, ignoreDest);
- VdbeCoverage(v);
break;
}
default: {
assert( onError==OE_Replace );
- addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regNewData+1+i);
- VdbeCoverage(v);
+ j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regNewData+1+i);
sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regNewData+1+i);
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeJumpHere(v, j1);
break;
}
}
@@ -103034,7 +94131,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int allOk = sqlite3VdbeMakeLabel(v);
sqlite3ExprIfTrue(pParse, pCheck->a[i].pExpr, allOk, SQLITE_JUMPIFNULL);
if( onError==OE_Ignore ){
- sqlite3VdbeGoto(v, ignoreDest);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest);
}else{
char *zName = pCheck->a[i].zName;
if( zName==0 ) zName = pTab->zName;
@@ -103067,8 +94164,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
** it might have changed. Skip the conflict logic below if the rowid
** is unchanged. */
sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData);
- sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
- VdbeCoverage(v);
}
/* If the response to a rowid conflict is REPLACE but the response
@@ -103088,7 +94183,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
/* Check to see if the new rowid already exists in the table. Skip
** the following conflict logic if it does not. */
sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, addrRowidOk, regNewData);
- VdbeCoverage(v);
/* Generate code that deals with a rowid collision */
switch( onError ){
@@ -103132,20 +94226,17 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
if( pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0) ){
sqlite3MultiWrite(pParse);
sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
- regNewData, 1, 0, OE_Replace,
- ONEPASS_SINGLE, -1);
- }else{
- if( pTab->pIndex ){
- sqlite3MultiWrite(pParse);
- sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,-1);
- }
+ regNewData, 1, 0, OE_Replace, 1);
+ }else if( pTab->pIndex ){
+ sqlite3MultiWrite(pParse);
+ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, 0);
}
seenReplace = 1;
break;
}
case OE_Ignore: {
/*assert( seenReplace==0 );*/
- sqlite3VdbeGoto(v, ignoreDest);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest);
break;
}
}
@@ -103170,10 +94261,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */
if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */
- if( bAffinityDone==0 ){
- sqlite3TableAffinity(v, pTab, regNewData+1);
- bAffinityDone = 1;
- }
iThisCur = iIdxCur+ix;
addrUniqueOk = sqlite3VdbeMakeLabel(v);
@@ -103181,8 +94268,8 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
if( pIdx->pPartIdxWhere ){
sqlite3VdbeAddOp2(v, OP_Null, 0, aRegIdx[ix]);
pParse->ckBase = regNewData+1;
- sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, addrUniqueOk,
- SQLITE_JUMPIFNULL);
+ sqlite3ExprIfFalse(pParse, pIdx->pPartIdxWhere, addrUniqueOk,
+ SQLITE_JUMPIFNULL);
pParse->ckBase = 0;
}
@@ -103193,24 +94280,16 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
for(i=0; i<pIdx->nColumn; i++){
int iField = pIdx->aiColumn[i];
int x;
- if( iField==XN_EXPR ){
- pParse->ckBase = regNewData+1;
- sqlite3ExprCode(pParse, pIdx->aColExpr->a[i].pExpr, regIdx+i);
- pParse->ckBase = 0;
- VdbeComment((v, "%s column %d", pIdx->zName, i));
+ if( iField<0 || iField==pTab->iPKey ){
+ x = regNewData;
}else{
- if( iField==XN_ROWID || iField==pTab->iPKey ){
- if( regRowid==regIdx+i ) continue; /* ROWID already in regIdx+i */
- x = regNewData;
- regRowid = pIdx->pPartIdxWhere ? -1 : regIdx+i;
- }else{
- x = iField + regNewData + 1;
- }
- sqlite3VdbeAddOp2(v, OP_SCopy, x, regIdx+i);
- VdbeComment((v, "%s", iField<0 ? "rowid" : pTab->aCol[iField].zName));
+ x = iField + regNewData + 1;
}
+ sqlite3VdbeAddOp2(v, OP_SCopy, x, regIdx+i);
+ VdbeComment((v, "%s", iField<0 ? "rowid" : pTab->aCol[iField].zName));
}
sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]);
+ sqlite3VdbeChangeP4(v, -1, sqlite3IndexAffinityStr(v, pIdx), P4_TRANSIENT);
VdbeComment((v, "for %s", pIdx->zName));
sqlite3ExprCacheAffinityChange(pParse, regIdx, pIdx->nColumn);
@@ -103238,60 +94317,51 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
/* Check to see if the new index entry will be unique */
sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk,
- regIdx, pIdx->nKeyCol); VdbeCoverage(v);
+ regIdx, pIdx->nKeyCol);
/* Generate code to handle collisions */
regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField);
- if( isUpdate || onError==OE_Replace ){
- if( HasRowid(pTab) ){
- sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR);
- /* Conflict only if the rowid of the existing index entry
- ** is different from old-rowid */
- if( isUpdate ){
- sqlite3VdbeAddOp3(v, OP_Eq, regR, addrUniqueOk, regOldData);
- sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
- VdbeCoverage(v);
- }
- }else{
- int x;
- /* Extract the PRIMARY KEY from the end of the index entry and
- ** store it in registers regR..regR+nPk-1 */
- if( pIdx!=pPk ){
- for(i=0; i<pPk->nKeyCol; i++){
- assert( pPk->aiColumn[i]>=0 );
- x = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[i]);
- sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i);
- VdbeComment((v, "%s.%s", pTab->zName,
- pTab->aCol[pPk->aiColumn[i]].zName));
- }
- }
- if( isUpdate ){
- /* If currently processing the PRIMARY KEY of a WITHOUT ROWID
- ** table, only conflict if the new PRIMARY KEY values are actually
- ** different from the old.
- **
- ** For a UNIQUE index, only conflict if the PRIMARY KEY values
- ** of the matched index row are different from the original PRIMARY
- ** KEY values of this row before the update. */
- int addrJump = sqlite3VdbeCurrentAddr(v)+pPk->nKeyCol;
- int op = OP_Ne;
- int regCmp = (IsPrimaryKeyIndex(pIdx) ? regIdx : regR);
-
- for(i=0; i<pPk->nKeyCol; i++){
- char *p4 = (char*)sqlite3LocateCollSeq(pParse, pPk->azColl[i]);
- x = pPk->aiColumn[i];
- assert( x>=0 );
- if( i==(pPk->nKeyCol-1) ){
- addrJump = addrUniqueOk;
- op = OP_Eq;
- }
- sqlite3VdbeAddOp4(v, op,
- regOldData+1+x, addrJump, regCmp+i, p4, P4_COLLSEQ
- );
- sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
- VdbeCoverageIf(v, op==OP_Eq);
- VdbeCoverageIf(v, op==OP_Ne);
+ if( HasRowid(pTab) ){
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR);
+ /* Conflict only if the rowid of the existing index entry
+ ** is different from old-rowid */
+ if( isUpdate ){
+ sqlite3VdbeAddOp3(v, OP_Eq, regR, addrUniqueOk, regOldData);
+ }
+ }else{
+ int x;
+ /* Extract the PRIMARY KEY from the end of the index entry and
+ ** store it in registers regR..regR+nPk-1 */
+ if( (isUpdate || onError==OE_Replace) && pIdx!=pPk ){
+ for(i=0; i<pPk->nKeyCol; i++){
+ x = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[i]);
+ sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i);
+ VdbeComment((v, "%s.%s", pTab->zName,
+ pTab->aCol[pPk->aiColumn[i]].zName));
+ }
+ }
+ if( isUpdate ){
+ /* If currently processing the PRIMARY KEY of a WITHOUT ROWID
+ ** table, only conflict if the new PRIMARY KEY values are actually
+ ** different from the old.
+ **
+ ** For a UNIQUE index, only conflict if the PRIMARY KEY values
+ ** of the matched index row are different from the original PRIMARY
+ ** KEY values of this row before the update. */
+ int addrJump = sqlite3VdbeCurrentAddr(v)+pPk->nKeyCol;
+ int op = OP_Ne;
+ int regCmp = (pIdx->autoIndex==2 ? regIdx : regR);
+
+ for(i=0; i<pPk->nKeyCol; i++){
+ char *p4 = (char*)sqlite3LocateCollSeq(pParse, pPk->azColl[i]);
+ x = pPk->aiColumn[i];
+ if( i==(pPk->nKeyCol-1) ){
+ addrJump = addrUniqueOk;
+ op = OP_Eq;
}
+ sqlite3VdbeAddOp4(v, op,
+ regOldData+1+x, addrJump, regCmp+i, p4, P4_COLLSEQ
+ );
}
}
}
@@ -103307,7 +94377,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
break;
}
case OE_Ignore: {
- sqlite3VdbeGoto(v, ignoreDest);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest);
break;
}
default: {
@@ -103318,8 +94388,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0);
}
sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
- regR, nPkField, 0, OE_Replace,
- (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), -1);
+ regR, nPkField, 0, OE_Replace, pIdx==pPk);
seenReplace = 1;
break;
}
@@ -103329,7 +94398,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField);
}
if( ipkTop ){
- sqlite3VdbeGoto(v, ipkTop+1);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, ipkTop+1);
sqlite3VdbeJumpHere(v, ipkBottom);
}
@@ -103361,24 +94430,21 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
Index *pIdx; /* An index being inserted or updated */
u8 pik_flags; /* flag values passed to the btree insert */
int regData; /* Content registers (after the rowid) */
- int regRec; /* Register holding assembled record for the table */
+ int regRec; /* Register holding assemblied record for the table */
int i; /* Loop counter */
- u8 bAffinityDone = 0; /* True if OP_Affinity has been run already */
v = sqlite3GetVdbe(pParse);
assert( v!=0 );
assert( pTab->pSelect==0 ); /* This table is not a VIEW */
for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
if( aRegIdx[i]==0 ) continue;
- bAffinityDone = 1;
if( pIdx->pPartIdxWhere ){
sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
}
sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i]);
pik_flags = 0;
if( useSeekResult ) pik_flags = OPFLAG_USESEEKRESULT;
- if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
+ if( pIdx->autoIndex==2 && !HasRowid(pTab) ){
assert( pParse->nested==0 );
pik_flags |= OPFLAG_NCHANGE;
}
@@ -103388,7 +94454,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
regData = regNewData + 1;
regRec = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec);
- if( !bAffinityDone ) sqlite3TableAffinity(v, pTab, 0);
+ sqlite3TableAffinityStr(v, pTab);
sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol);
if( pParse->nested ){
pik_flags = 0;
@@ -103426,9 +94492,6 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
** For a WITHOUT ROWID table, *piDataCur will be somewhere in the range
** of *piIdxCurs, depending on where the PRIMARY KEY index appears on the
** pTab->pIndex list.
-**
-** If pTab is a virtual table, then this routine is a no-op and the
-** *piDataCur and *piIdxCur values are left uninitialized.
*/
SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
Parse *pParse, /* Parsing context */
@@ -103447,9 +94510,9 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
assert( op==OP_OpenRead || op==OP_OpenWrite );
if( IsVirtual(pTab) ){
- /* This routine is a no-op for virtual tables. Leave the output
- ** variables *piDataCur and *piIdxCur uninitialized so that valgrind
- ** can detect if they are used by mistake in the caller. */
+ assert( aToOpen==0 );
+ *piDataCur = 0;
+ *piIdxCur = 1;
return 0;
}
iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
@@ -103467,7 +94530,7 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
int iIdxCur = iBase++;
assert( pIdx->pSchema==pTab->pSchema );
- if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) && piDataCur ){
+ if( pIdx->autoIndex==2 && !HasRowid(pTab) && piDataCur ){
*piDataCur = iIdxCur;
}
if( aToOpen==0 || aToOpen[i+1] ){
@@ -103486,7 +94549,7 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
** The following global variable is incremented whenever the
** transfer optimization is used. This is used for testing
** purposes only - to make sure the transfer optimization really
-** is happening when it is supposed to.
+** is happening when it is suppose to.
*/
SQLITE_API int sqlite3_xferopt_count;
#endif /* SQLITE_TEST */
@@ -103532,13 +94595,6 @@ static int xferCompatibleIndex(Index *pDest, Index *pSrc){
if( pSrc->aiColumn[i]!=pDest->aiColumn[i] ){
return 0; /* Different columns indexed */
}
- if( pSrc->aiColumn[i]==XN_EXPR ){
- assert( pSrc->aColExpr!=0 && pDest->aColExpr!=0 );
- if( sqlite3ExprCompare(pSrc->aColExpr->a[i].pExpr,
- pDest->aColExpr->a[i].pExpr, -1)!=0 ){
- return 0; /* Different expressions in the index */
- }
- }
if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){
return 0; /* Different sort orders */
}
@@ -103560,7 +94616,7 @@ static int xferCompatibleIndex(Index *pDest, Index *pSrc){
** INSERT INTO tab1 SELECT * FROM tab2;
**
** The xfer optimization transfers raw records from tab2 over to tab1.
-** Columns are not decoded and reassembled, which greatly improves
+** Columns are not decoded and reassemblied, which greatly improves
** performance. Raw index records are transferred in the same way.
**
** The xfer optimization is only attempted if tab1 and tab2 are compatible.
@@ -103586,7 +94642,6 @@ static int xferOptimization(
int onError, /* How to handle constraint errors */
int iDbDest /* The database of pDest */
){
- sqlite3 *db = pParse->db;
ExprList *pEList; /* The result set of the SELECT */
Table *pSrc; /* The table in the FROM clause of SELECT */
Index *pSrcIdx, *pDestIdx; /* Source and destination indices */
@@ -103605,12 +94660,6 @@ static int xferOptimization(
if( pSelect==0 ){
return 0; /* Must be of the form INSERT INTO ... SELECT ... */
}
- if( pParse->pWith || pSelect->pWith ){
- /* Do not attempt to process this query if there are an WITH clauses
- ** attached to it. Proceeding may generate a false "no such table: xxx"
- ** error if pSelect reads from a CTE named "xxx". */
- return 0;
- }
if( sqlite3TriggerList(pParse, pDest) ){
return 0; /* tab1 must not have triggers */
}
@@ -103691,27 +94740,18 @@ static int xferOptimization(
return 0; /* Both tables must have the same INTEGER PRIMARY KEY */
}
for(i=0; i<pDest->nCol; i++){
- Column *pDestCol = &pDest->aCol[i];
- Column *pSrcCol = &pSrc->aCol[i];
- if( pDestCol->affinity!=pSrcCol->affinity ){
+ if( pDest->aCol[i].affinity!=pSrc->aCol[i].affinity ){
return 0; /* Affinity must be the same on all columns */
}
- if( !xferCompatibleCollation(pDestCol->zColl, pSrcCol->zColl) ){
+ if( !xferCompatibleCollation(pDest->aCol[i].zColl, pSrc->aCol[i].zColl) ){
return 0; /* Collating sequence must be the same on all columns */
}
- if( pDestCol->notNull && !pSrcCol->notNull ){
+ if( pDest->aCol[i].notNull && !pSrc->aCol[i].notNull ){
return 0; /* tab2 must be NOT NULL if tab1 is */
}
- /* Default values for second and subsequent columns need to match. */
- if( i>0
- && ((pDestCol->zDflt==0)!=(pSrcCol->zDflt==0)
- || (pDestCol->zDflt && strcmp(pDestCol->zDflt, pSrcCol->zDflt)!=0))
- ){
- return 0; /* Default values must be the same for all columns */
- }
}
for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){
- if( IsUniqueIndex(pDestIdx) ){
+ if( pDestIdx->onError!=OE_None ){
destHasUniqueIdx = 1;
}
for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){
@@ -103734,11 +94774,11 @@ static int xferOptimization(
** the extra complication to make this rule less restrictive is probably
** not worth the effort. Ticket [6284df89debdfa61db8073e062908af0c9b6118e]
*/
- if( (db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){
+ if( (pParse->db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){
return 0;
}
#endif
- if( (db->flags & SQLITE_CountRows)!=0 ){
+ if( (pParse->db->flags & SQLITE_CountRows)!=0 ){
return 0; /* xfer opt does not play well with PRAGMA count_changes */
}
@@ -103749,7 +94789,7 @@ static int xferOptimization(
#ifdef SQLITE_TEST
sqlite3_xferopt_count++;
#endif
- iDbSrc = sqlite3SchemaToIndex(db, pSrc->pSchema);
+ iDbSrc = sqlite3SchemaToIndex(pParse->db, pSrc->pSchema);
v = sqlite3GetVdbe(pParse);
sqlite3CodeVerifySchema(pParse, iDbSrc);
iSrc = pParse->nTab++;
@@ -103759,18 +94799,14 @@ static int xferOptimization(
regRowid = sqlite3GetTempReg(pParse);
sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite);
assert( HasRowid(pDest) || destHasUniqueIdx );
- if( (db->flags & SQLITE_Vacuum)==0 && (
- (pDest->iPKey<0 && pDest->pIndex!=0) /* (1) */
+ if( (pDest->iPKey<0 && pDest->pIndex!=0) /* (1) */
|| destHasUniqueIdx /* (2) */
|| (onError!=OE_Abort && onError!=OE_Rollback) /* (3) */
- )){
+ ){
/* In some circumstances, we are able to run the xfer optimization
- ** only if the destination table is initially empty. Unless the
- ** SQLITE_Vacuum flag is set, this block generates code to make
- ** that determination. If SQLITE_Vacuum is set, then the destination
- ** table is always empty.
- **
- ** Conditions under which the destination must be empty:
+ ** only if the destination table is initially empty. This code makes
+ ** that determination. Conditions under which the destination must
+ ** be empty:
**
** (1) There is no INTEGER PRIMARY KEY but there are indices.
** (If the destination is not initially empty, the rowid fields
@@ -103781,17 +94817,16 @@ static int xferOptimization(
**
** (3) onError is something other than OE_Abort and OE_Rollback.
*/
- addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0); VdbeCoverage(v);
- emptyDestTest = sqlite3VdbeAddOp0(v, OP_Goto);
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0);
+ emptyDestTest = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0);
sqlite3VdbeJumpHere(v, addr1);
}
if( HasRowid(pSrc) ){
sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead);
- emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v);
+ emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0);
if( pDest->iPKey>=0 ){
addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid);
addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid);
- VdbeCoverage(v);
sqlite3RowidConstraint(pParse, onError, pDest);
sqlite3VdbeJumpHere(v, addr2);
autoIncStep(pParse, regAutoinc, regRowid);
@@ -103805,7 +94840,7 @@ static int xferOptimization(
sqlite3VdbeAddOp3(v, OP_Insert, iDest, regData, regRowid);
sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND);
sqlite3VdbeChangeP4(v, -1, pDest->zName, 0);
- sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1);
sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0);
sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
}else{
@@ -103813,7 +94848,6 @@ static int xferOptimization(
sqlite3TableLock(pParse, iDbSrc, pSrc->tnum, 0, pSrc->zName);
}
for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){
- u8 idxInsFlags = 0;
for(pSrcIdx=pSrc->pIndex; ALWAYS(pSrcIdx); pSrcIdx=pSrcIdx->pNext){
if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break;
}
@@ -103825,44 +94859,15 @@ static int xferOptimization(
sqlite3VdbeSetP4KeyInfo(pParse, pDestIdx);
sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR);
VdbeComment((v, "%s", pDestIdx->zName));
- addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v);
+ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0);
sqlite3VdbeAddOp2(v, OP_RowKey, iSrc, regData);
- if( db->flags & SQLITE_Vacuum ){
- /* This INSERT command is part of a VACUUM operation, which guarantees
- ** that the destination table is empty. If all indexed columns use
- ** collation sequence BINARY, then it can also be assumed that the
- ** index will be populated by inserting keys in strictly sorted
- ** order. In this case, instead of seeking within the b-tree as part
- ** of every OP_IdxInsert opcode, an OP_Last is added before the
- ** OP_IdxInsert to seek to the point within the b-tree where each key
- ** should be inserted. This is faster.
- **
- ** If any of the indexed columns use a collation sequence other than
- ** BINARY, this optimization is disabled. This is because the user
- ** might change the definition of a collation sequence and then run
- ** a VACUUM command. In that case keys may not be written in strictly
- ** sorted order. */
- for(i=0; i<pSrcIdx->nColumn; i++){
- char *zColl = pSrcIdx->azColl[i];
- assert( zColl!=0 );
- if( sqlite3_stricmp("BINARY", zColl) ) break;
- }
- if( i==pSrcIdx->nColumn ){
- idxInsFlags = OPFLAG_USESEEKRESULT;
- sqlite3VdbeAddOp3(v, OP_Last, iDest, 0, -1);
- }
- }
- if( !HasRowid(pSrc) && pDestIdx->idxType==2 ){
- idxInsFlags |= OPFLAG_NCHANGE;
- }
sqlite3VdbeAddOp3(v, OP_IdxInsert, iDest, regData, 1);
- sqlite3VdbeChangeP5(v, idxInsFlags);
- sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1);
sqlite3VdbeJumpHere(v, addr1);
sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0);
sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
}
- if( emptySrcTest ) sqlite3VdbeJumpHere(v, emptySrcTest);
+ sqlite3VdbeJumpHere(v, emptySrcTest);
sqlite3ReleaseTempReg(pParse, regRowid);
sqlite3ReleaseTempReg(pParse, regData);
if( emptyDestTest ){
@@ -103895,7 +94900,6 @@ static int xferOptimization(
** accessed by users of the library.
*/
-/* #include "sqliteInt.h" */
/*
** Execute SQL code. Return one of the SQLITE_ success/failure
@@ -103907,7 +94911,7 @@ static int xferOptimization(
** argument to xCallback(). If xCallback=NULL then no callback
** is invoked, even for queries.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
+SQLITE_API int sqlite3_exec(
sqlite3 *db, /* The database on which the SQL executes */
const char *zSql, /* The SQL to be executed */
sqlite3_callback xCallback, /* Invoke this callback routine */
@@ -103924,7 +94928,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
if( zSql==0 ) zSql = "";
sqlite3_mutex_enter(db->mutex);
- sqlite3Error(db, SQLITE_OK);
+ sqlite3Error(db, SQLITE_OK, 0);
while( rc==SQLITE_OK && zSql[0] ){
int nCol;
char **azVals = 0;
@@ -103976,13 +94980,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
}
}
if( xCallback(pArg, nCol, azVals, azCols) ){
- /* EVIDENCE-OF: R-38229-40159 If the callback function to
- ** sqlite3_exec() returns non-zero, then sqlite3_exec() will
- ** return SQLITE_ABORT. */
rc = SQLITE_ABORT;
sqlite3VdbeFinalize((Vdbe *)pStmt);
pStmt = 0;
- sqlite3Error(db, SQLITE_ABORT);
+ sqlite3Error(db, SQLITE_ABORT, 0);
goto exec_out;
}
}
@@ -104005,14 +95006,14 @@ exec_out:
sqlite3DbFree(db, azCols);
rc = sqlite3ApiExit(db, rc);
- if( rc!=SQLITE_OK && pzErrMsg ){
+ if( rc!=SQLITE_OK && ALWAYS(rc==sqlite3_errcode(db)) && pzErrMsg ){
int nErrMsg = 1 + sqlite3Strlen30(sqlite3_errmsg(db));
*pzErrMsg = sqlite3Malloc(nErrMsg);
if( *pzErrMsg ){
memcpy(*pzErrMsg, sqlite3_errmsg(db), nErrMsg);
}else{
rc = SQLITE_NOMEM;
- sqlite3Error(db, SQLITE_NOMEM);
+ sqlite3Error(db, SQLITE_NOMEM, 0);
}
}else if( pzErrMsg ){
*pzErrMsg = 0;
@@ -104064,7 +95065,6 @@ exec_out:
*/
#ifndef _SQLITE3EXT_H_
#define _SQLITE3EXT_H_
-/* #include "sqlite3.h" */
typedef struct sqlite3_api_routines sqlite3_api_routines;
@@ -104075,7 +95075,7 @@ typedef struct sqlite3_api_routines sqlite3_api_routines;
** WARNING: In order to maintain backwards compatibility, add new
** interfaces to the end of this structure only. If you insert new
** interfaces in the middle of this structure, then older different
-** versions of SQLite will not be able to load each other's shared
+** versions of SQLite will not be able to load each others' shared
** libraries!
*/
struct sqlite3_api_routines {
@@ -104297,36 +95297,11 @@ struct sqlite3_api_routines {
const char *(*uri_parameter)(const char*,const char*);
char *(*vsnprintf)(int,char*,const char*,va_list);
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
- /* Version 3.8.7 and later */
- int (*auto_extension)(void(*)(void));
- int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
- void(*)(void*));
- int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
- void(*)(void*),unsigned char);
- int (*cancel_auto_extension)(void(*)(void));
- int (*load_extension)(sqlite3*,const char*,const char*,char**);
- void *(*malloc64)(sqlite3_uint64);
- sqlite3_uint64 (*msize)(void*);
- void *(*realloc64)(void*,sqlite3_uint64);
- void (*reset_auto_extension)(void);
- void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
- void(*)(void*));
- void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
- void(*)(void*), unsigned char);
- int (*strglob)(const char*,const char*);
- /* Version 3.8.11 and later */
- sqlite3_value *(*value_dup)(const sqlite3_value*);
- void (*value_free)(sqlite3_value*);
- int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
- int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
- /* Version 3.9.0 and later */
- unsigned int (*value_subtype)(sqlite3_value*);
- void (*result_subtype)(sqlite3_context*,unsigned int);
};
/*
** The following macros redefine the API routines so that they are
-** redirected through the global sqlite3_api structure.
+** redirected throught the global sqlite3_api structure.
**
** This header file is also used by the loadext.c source file
** (part of the main SQLite library - not an extension) so that
@@ -104335,7 +95310,7 @@ struct sqlite3_api_routines {
** the API. So the redefinition macros are only valid if the
** SQLITE_CORE macros is undefined.
*/
-#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+#ifndef SQLITE_CORE
#define sqlite3_aggregate_context sqlite3_api->aggregate_context
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_aggregate_count sqlite3_api->aggregate_count
@@ -104462,7 +95437,6 @@ struct sqlite3_api_routines {
#define sqlite3_value_text16le sqlite3_api->value_text16le
#define sqlite3_value_type sqlite3_api->value_type
#define sqlite3_vmprintf sqlite3_api->vmprintf
-#define sqlite3_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_overload_function sqlite3_api->overload_function
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
@@ -104540,30 +95514,9 @@ struct sqlite3_api_routines {
#define sqlite3_uri_parameter sqlite3_api->uri_parameter
#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
-/* Version 3.8.7 and later */
-#define sqlite3_auto_extension sqlite3_api->auto_extension
-#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
-#define sqlite3_bind_text64 sqlite3_api->bind_text64
-#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
-#define sqlite3_load_extension sqlite3_api->load_extension
-#define sqlite3_malloc64 sqlite3_api->malloc64
-#define sqlite3_msize sqlite3_api->msize
-#define sqlite3_realloc64 sqlite3_api->realloc64
-#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
-#define sqlite3_result_blob64 sqlite3_api->result_blob64
-#define sqlite3_result_text64 sqlite3_api->result_text64
-#define sqlite3_strglob sqlite3_api->strglob
-/* Version 3.8.11 and later */
-#define sqlite3_value_dup sqlite3_api->value_dup
-#define sqlite3_value_free sqlite3_api->value_free
-#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
-#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
-/* Version 3.9.0 and later */
-#define sqlite3_value_subtype sqlite3_api->value_subtype
-#define sqlite3_result_subtype sqlite3_api->result_subtype
-#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
-
-#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+#endif /* SQLITE_CORE */
+
+#ifndef SQLITE_CORE
/* This case when the file really is being compiled as a loadable
** extension */
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
@@ -104582,7 +95535,6 @@ struct sqlite3_api_routines {
/************** End of sqlite3ext.h ******************************************/
/************** Continuing where we left off in loadext.c ********************/
-/* #include "sqliteInt.h" */
/* #include <string.h> */
#ifndef SQLITE_OMIT_LOAD_EXTENSION
@@ -104599,6 +95551,7 @@ struct sqlite3_api_routines {
# define sqlite3_column_table_name16 0
# define sqlite3_column_origin_name 0
# define sqlite3_column_origin_name16 0
+# define sqlite3_table_column_metadata 0
#endif
#ifdef SQLITE_OMIT_AUTHORIZATION
@@ -104954,28 +95907,7 @@ static const sqlite3_api_routines sqlite3Apis = {
sqlite3_uri_int64,
sqlite3_uri_parameter,
sqlite3_vsnprintf,
- sqlite3_wal_checkpoint_v2,
- /* Version 3.8.7 and later */
- sqlite3_auto_extension,
- sqlite3_bind_blob64,
- sqlite3_bind_text64,
- sqlite3_cancel_auto_extension,
- sqlite3_load_extension,
- sqlite3_malloc64,
- sqlite3_msize,
- sqlite3_realloc64,
- sqlite3_reset_auto_extension,
- sqlite3_result_blob64,
- sqlite3_result_text64,
- sqlite3_strglob,
- /* Version 3.8.11 and later */
- (sqlite3_value*(*)(const sqlite3_value*))sqlite3_value_dup,
- sqlite3_value_free,
- sqlite3_result_zeroblob64,
- sqlite3_bind_zeroblob64,
- /* Version 3.9.0 and later */
- sqlite3_value_subtype,
- sqlite3_result_subtype
+ sqlite3_wal_checkpoint_v2
};
/*
@@ -105003,7 +95935,7 @@ static int sqlite3LoadExtension(
const char *zEntry;
char *zAltEntry = 0;
void **aHandle;
- u64 nMsg = 300 + sqlite3Strlen30(zFile);
+ int nMsg = 300 + sqlite3Strlen30(zFile);
int ii;
/* Shared library endings to try if zFile cannot be loaded as written */
@@ -105046,7 +95978,7 @@ static int sqlite3LoadExtension(
#endif
if( handle==0 ){
if( pzErrMsg ){
- *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg);
+ *pzErrMsg = zErrmsg = sqlite3_malloc(nMsg);
if( zErrmsg ){
sqlite3_snprintf(nMsg, zErrmsg,
"unable to open shared library [%s]", zFile);
@@ -105072,7 +96004,7 @@ static int sqlite3LoadExtension(
if( xInit==0 && zProc==0 ){
int iFile, iEntry, c;
int ncFile = sqlite3Strlen30(zFile);
- zAltEntry = sqlite3_malloc64(ncFile+30);
+ zAltEntry = sqlite3_malloc(ncFile+30);
if( zAltEntry==0 ){
sqlite3OsDlClose(pVfs, handle);
return SQLITE_NOMEM;
@@ -105094,7 +96026,7 @@ static int sqlite3LoadExtension(
if( xInit==0 ){
if( pzErrMsg ){
nMsg += sqlite3Strlen30(zEntry);
- *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg);
+ *pzErrMsg = zErrmsg = sqlite3_malloc(nMsg);
if( zErrmsg ){
sqlite3_snprintf(nMsg, zErrmsg,
"no entry point [%s] in shared library [%s]", zEntry, zFile);
@@ -105129,7 +96061,7 @@ static int sqlite3LoadExtension(
db->aExtension[db->nExtension++] = handle;
return SQLITE_OK;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
+SQLITE_API int sqlite3_load_extension(
sqlite3 *db, /* Load the extension into this database connection */
const char *zFile, /* Name of the shared library containing extension */
const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */
@@ -105160,7 +96092,7 @@ SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){
** Enable or disable extension loading. Extension loading is disabled by
** default so as not to open security holes in older applications.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff){
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){
sqlite3_mutex_enter(db->mutex);
if( onoff ){
db->flags |= SQLITE_LoadExtension;
@@ -105180,7 +96112,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
** dummy pointer.
*/
#ifdef SQLITE_OMIT_LOAD_EXTENSION
-static const sqlite3_api_routines sqlite3Apis;
+static const sqlite3_api_routines sqlite3Apis = { 0 };
#endif
@@ -105193,7 +96125,7 @@ static const sqlite3_api_routines sqlite3Apis;
*/
typedef struct sqlite3AutoExtList sqlite3AutoExtList;
static SQLITE_WSD struct sqlite3AutoExtList {
- u32 nExt; /* Number of entries in aExt[] */
+ int nExt; /* Number of entries in aExt[] */
void (**aExt)(void); /* Pointers to the extension init functions */
} sqlite3Autoext = { 0, 0 };
@@ -105217,7 +96149,7 @@ static SQLITE_WSD struct sqlite3AutoExtList {
** Register a statically linked extension that is automatically
** loaded by every new database connection.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){
+SQLITE_API int sqlite3_auto_extension(void (*xInit)(void)){
int rc = SQLITE_OK;
#ifndef SQLITE_OMIT_AUTOINIT
rc = sqlite3_initialize();
@@ -105226,7 +96158,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){
}else
#endif
{
- u32 i;
+ int i;
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
#endif
@@ -105236,9 +96168,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){
if( wsdAutoext.aExt[i]==xInit ) break;
}
if( i==wsdAutoext.nExt ){
- u64 nByte = (wsdAutoext.nExt+1)*sizeof(wsdAutoext.aExt[0]);
+ int nByte = (wsdAutoext.nExt+1)*sizeof(wsdAutoext.aExt[0]);
void (**aNew)(void);
- aNew = sqlite3_realloc64(wsdAutoext.aExt, nByte);
+ aNew = sqlite3_realloc(wsdAutoext.aExt, nByte);
if( aNew==0 ){
rc = SQLITE_NOMEM;
}else{
@@ -105262,7 +96194,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){
** Return 1 if xInit was found on the list and removed. Return 0 if xInit
** was not on the list.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xInit)(void)){
+SQLITE_API int sqlite3_cancel_auto_extension(void (*xInit)(void)){
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
#endif
@@ -105270,7 +96202,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xInit)(void))
int n = 0;
wsdAutoextInit;
sqlite3_mutex_enter(mutex);
- for(i=(int)wsdAutoext.nExt-1; i>=0; i--){
+ for(i=wsdAutoext.nExt-1; i>=0; i--){
if( wsdAutoext.aExt[i]==xInit ){
wsdAutoext.nExt--;
wsdAutoext.aExt[i] = wsdAutoext.aExt[wsdAutoext.nExt];
@@ -105285,7 +96217,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xInit)(void))
/*
** Reset the automatic extension loading mechanism.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void){
+SQLITE_API void sqlite3_reset_auto_extension(void){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize()==SQLITE_OK )
#endif
@@ -105308,7 +96240,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void){
** If anything goes wrong, set an error in the database connection.
*/
SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
- u32 i;
+ int i;
int go = 1;
int rc;
int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*);
@@ -105334,7 +96266,7 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
sqlite3_mutex_leave(mutex);
zErrmsg = 0;
if( xInit && (rc = xInit(db, &zErrmsg, &sqlite3Apis))!=0 ){
- sqlite3ErrorWithMsg(db, rc,
+ sqlite3Error(db, rc,
"automatic extension loading failed: %s", zErrmsg);
go = 0;
}
@@ -105357,7 +96289,6 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
*************************************************************************
** This file contains code used to implement the PRAGMA command.
*/
-/* #include "sqliteInt.h" */
#if !defined(SQLITE_ENABLE_LOCKING_STYLE)
# if defined(__APPLE__)
@@ -105368,18 +96299,11 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
#endif
/***************************************************************************
-** The "pragma.h" include file is an automatically generated file that
-** that includes the PragType_XXXX macro definitions and the aPragmaName[]
-** object. This ensures that the aPragmaName[] table is arranged in
-** lexicographical order to facility a binary search of the pragma name.
-** Do not edit pragma.h directly. Edit and rerun the script in at
-** ../tool/mkpragmatab.tcl. */
-/************** Include pragma.h in the middle of pragma.c *******************/
-/************** Begin file pragma.h ******************************************/
-/* DO NOT EDIT!
-** This file is automatically generated by the script at
-** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit
-** that script and rerun it.
+** The next block of code, including the PragTyp_XXXX macro definitions and
+** the aPragmaName[] object is composed of generated code. DO NOT EDIT.
+**
+** To add new pragmas, edit the code in ../tool/mkpragmatab.tcl and rerun
+** that script. Then copy/paste the output in place of the following:
*/
#define PragTyp_HEADER_VALUE 0
#define PragTyp_AUTO_VACUUM 1
@@ -105414,17 +96338,15 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
#define PragTyp_TABLE_INFO 30
#define PragTyp_TEMP_STORE 31
#define PragTyp_TEMP_STORE_DIRECTORY 32
-#define PragTyp_THREADS 33
-#define PragTyp_WAL_AUTOCHECKPOINT 34
-#define PragTyp_WAL_CHECKPOINT 35
-#define PragTyp_ACTIVATE_EXTENSIONS 36
-#define PragTyp_HEXKEY 37
-#define PragTyp_KEY 38
-#define PragTyp_REKEY 39
-#define PragTyp_LOCK_STATUS 40
-#define PragTyp_PARSER_TRACE 41
+#define PragTyp_WAL_AUTOCHECKPOINT 33
+#define PragTyp_WAL_CHECKPOINT 34
+#define PragTyp_ACTIVATE_EXTENSIONS 35
+#define PragTyp_HEXKEY 36
+#define PragTyp_KEY 37
+#define PragTyp_REKEY 38
+#define PragTyp_LOCK_STATUS 39
+#define PragTyp_PARSER_TRACE 40
#define PragFlag_NeedSchema 0x01
-#define PragFlag_ReadOnly 0x02
static const struct sPragmaNames {
const char *const zName; /* Name of pragma */
u8 ePragTyp; /* PragTyp_XXX value */
@@ -105441,7 +96363,7 @@ static const struct sPragmaNames {
{ /* zName: */ "application_id",
/* ePragTyp: */ PragTyp_HEADER_VALUE,
/* ePragFlag: */ 0,
- /* iArg: */ BTREE_APPLICATION_ID },
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_AUTOVACUUM)
{ /* zName: */ "auto_vacuum",
@@ -105464,7 +96386,7 @@ static const struct sPragmaNames {
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
{ /* zName: */ "cache_size",
/* ePragTyp: */ PragTyp_CACHE_SIZE,
- /* ePragFlag: */ 0,
+ /* ePragFlag: */ PragFlag_NeedSchema,
/* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
@@ -105477,10 +96399,6 @@ static const struct sPragmaNames {
/* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE,
/* ePragFlag: */ 0,
/* iArg: */ 0 },
- { /* zName: */ "cell_size_check",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_CellSizeCk },
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
{ /* zName: */ "checkpoint_fullfsync",
/* ePragTyp: */ PragTyp_FLAG,
@@ -105511,12 +96429,6 @@ static const struct sPragmaNames {
/* ePragFlag: */ 0,
/* iArg: */ 0 },
#endif
-#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
- { /* zName: */ "data_version",
- /* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ PragFlag_ReadOnly,
- /* iArg: */ BTREE_DATA_VERSION },
-#endif
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
{ /* zName: */ "database_list",
/* ePragTyp: */ PragTyp_DATABASE_LIST,
@@ -105572,8 +96484,8 @@ static const struct sPragmaNames {
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
{ /* zName: */ "freelist_count",
/* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ PragFlag_ReadOnly,
- /* iArg: */ BTREE_FREE_PAGE_COUNT },
+ /* ePragFlag: */ 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
{ /* zName: */ "full_column_names",
@@ -105618,10 +96530,6 @@ static const struct sPragmaNames {
/* ePragTyp: */ PragTyp_INDEX_LIST,
/* ePragFlag: */ PragFlag_NeedSchema,
/* iArg: */ 0 },
- { /* zName: */ "index_xinfo",
- /* ePragTyp: */ PragTyp_INDEX_INFO,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 1 },
#endif
#if !defined(SQLITE_OMIT_INTEGRITY_CHECK)
{ /* zName: */ "integrity_check",
@@ -105729,7 +96637,7 @@ static const struct sPragmaNames {
{ /* zName: */ "schema_version",
/* ePragTyp: */ PragTyp_HEADER_VALUE,
/* ePragFlag: */ 0,
- /* iArg: */ BTREE_SCHEMA_VERSION },
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
{ /* zName: */ "secure_delete",
@@ -105787,15 +96695,11 @@ static const struct sPragmaNames {
/* ePragFlag: */ 0,
/* iArg: */ 0 },
#endif
- { /* zName: */ "threads",
- /* ePragTyp: */ PragTyp_THREADS,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
{ /* zName: */ "user_version",
/* ePragTyp: */ PragTyp_HEADER_VALUE,
/* ePragFlag: */ 0,
- /* iArg: */ BTREE_USER_VERSION },
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if defined(SQLITE_DEBUG)
@@ -105838,10 +96742,9 @@ static const struct sPragmaNames {
/* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode },
#endif
};
-/* Number of pragmas: 60 on by default, 73 total. */
-
-/************** End of pragma.h **********************************************/
-/************** Continuing where we left off in pragma.c *********************/
+/* Number of pragmas: 56 on by default, 69 total. */
+/* End of the automatically generated pragma table.
+***************************************************************************/
/*
** Interpret the given string as a safety level. Return 0 for OFF,
@@ -105854,7 +96757,7 @@ static const struct sPragmaNames {
** to support legacy SQL code. The safety level used to be boolean
** and older scripts may have used numbers 0 for OFF and 1 for ON.
*/
-static u8 getSafetyLevel(const char *z, int omitFull, u8 dflt){
+static u8 getSafetyLevel(const char *z, int omitFull, int dflt){
/* 123456789 123456789 */
static const char zText[] = "onoffalseyestruefull";
static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 16};
@@ -105876,7 +96779,7 @@ static u8 getSafetyLevel(const char *z, int omitFull, u8 dflt){
/*
** Interpret the given string as a boolean value.
*/
-SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z, u8 dflt){
+SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z, int dflt){
return getSafetyLevel(z,1,dflt)!=0;
}
@@ -105973,45 +96876,19 @@ static int changeTempStorage(Parse *pParse, const char *zStorageType){
#endif /* SQLITE_PAGER_PRAGMAS */
/*
-** Set the names of the first N columns to the values in azCol[]
-*/
-static void setAllColumnNames(
- Vdbe *v, /* The query under construction */
- int N, /* Number of columns */
- const char **azCol /* Names of columns */
-){
- int i;
- sqlite3VdbeSetNumCols(v, N);
- for(i=0; i<N; i++){
- sqlite3VdbeSetColName(v, i, COLNAME_NAME, azCol[i], SQLITE_STATIC);
- }
-}
-static void setOneColumnName(Vdbe *v, const char *z){
- setAllColumnNames(v, 1, &z);
-}
-
-/*
** Generate code to return a single integer value.
*/
-static void returnSingleInt(Vdbe *v, const char *zLabel, i64 value){
- sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, 1, 0, (const u8*)&value, P4_INT64);
- setOneColumnName(v, zLabel);
- sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
-}
-
-/*
-** Generate code to return a single text value.
-*/
-static void returnSingleText(
- Vdbe *v, /* Prepared statement under construction */
- const char *zLabel, /* Name of the result column */
- const char *zValue /* Value to be returned */
-){
- if( zValue ){
- sqlite3VdbeLoadString(v, 1, (const char*)zValue);
- setOneColumnName(v, zLabel);
- sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+static void returnSingleInt(Parse *pParse, const char *zLabel, i64 value){
+ Vdbe *v = sqlite3GetVdbe(pParse);
+ int mem = ++pParse->nMem;
+ i64 *pI64 = sqlite3DbMallocRaw(pParse->db, sizeof(value));
+ if( pI64 ){
+ memcpy(pI64, &value, sizeof(value));
}
+ sqlite3VdbeAddOp4(v, OP_Int64, 0, mem, 0, (char*)pI64, P4_INT64);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLabel, SQLITE_STATIC);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, mem, 1);
}
@@ -106115,12 +96992,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
Token *pId; /* Pointer to <id> token */
char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */
int iDb; /* Database index for <database> */
- int lwr, upr, mid = 0; /* Binary search bounds */
+ int lwr, upr, mid; /* Binary search bounds */
int rc; /* return value form SQLITE_FCNTL_PRAGMA */
sqlite3 *db = pParse->db; /* The database connection */
Db *pDb; /* The specific database being pragmaed */
Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */
- const struct sPragmaNames *pPragma;
if( v==0 ) return;
sqlite3VdbeRunOnlyOnce(v);
@@ -106156,17 +97032,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
/* Send an SQLITE_FCNTL_PRAGMA file-control to the underlying VFS
** connection. If it returns SQLITE_OK, then assume that the VFS
** handled the pragma and generate a no-op prepared statement.
- **
- ** IMPLEMENTATION-OF: R-12238-55120 Whenever a PRAGMA statement is parsed,
- ** an SQLITE_FCNTL_PRAGMA file control is sent to the open sqlite3_file
- ** object corresponding to the database file to which the pragma
- ** statement refers.
- **
- ** IMPLEMENTATION-OF: R-29875-31678 The argument to the SQLITE_FCNTL_PRAGMA
- ** file control is an array of pointers to strings (char**) in which the
- ** second element of the array is the name of the pragma and the third
- ** element is the argument to the pragma or NULL if the pragma has no
- ** argument.
*/
aFcntl[0] = 0;
aFcntl[1] = zLeft;
@@ -106175,8 +97040,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
db->busyHandler.nBusy = 0;
rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl);
if( rc==SQLITE_OK ){
- returnSingleText(v, "result", aFcntl[0]);
- sqlite3_free(aFcntl[0]);
+ if( aFcntl[0] ){
+ int mem = ++pParse->nMem;
+ sqlite3VdbeAddOp4(v, OP_String8, 0, mem, 0, aFcntl[0], 0);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "result", SQLITE_STATIC);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, mem, 1);
+ sqlite3_free(aFcntl[0]);
+ }
goto pragma_out;
}
if( rc!=SQLITE_NOTFOUND ){
@@ -106203,15 +97074,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
}
if( lwr>upr ) goto pragma_out;
- pPragma = &aPragmaNames[mid];
/* Make sure the database schema is loaded if the pragma requires that */
- if( (pPragma->mPragFlag & PragFlag_NeedSchema)!=0 ){
+ if( (aPragmaNames[mid].mPragFlag & PragFlag_NeedSchema)!=0 ){
if( sqlite3ReadSchema(pParse) ) goto pragma_out;
}
/* Jump to the appropriate pragma handler */
- switch( pPragma->ePragTyp ){
+ switch( aPragmaNames[mid].ePragTyp ){
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED)
/*
@@ -106231,7 +97101,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
** size of historical compatibility.
*/
case PragTyp_DEFAULT_CACHE_SIZE: {
- static const int iLn = VDBE_OFFSET_LINENO(2);
static const VdbeOpList getCacheSize[] = {
{ OP_Transaction, 0, 0, 0}, /* 0 */
{ OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */
@@ -106246,9 +97115,10 @@ SQLITE_PRIVATE void sqlite3Pragma(
int addr;
sqlite3VdbeUsesBtree(v, iDb);
if( !zRight ){
- setOneColumnName(v, "cache_size");
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cache_size", SQLITE_STATIC);
pParse->nMem += 2;
- addr = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize,iLn);
+ addr = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize);
sqlite3VdbeChangeP1(v, addr, iDb);
sqlite3VdbeChangeP1(v, addr+1, iDb);
sqlite3VdbeChangeP1(v, addr+6, SQLITE_DEFAULT_CACHE_SIZE);
@@ -106280,7 +97150,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( pBt!=0 );
if( !zRight ){
int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0;
- returnSingleInt(v, "page_size", size);
+ returnSingleInt(pParse, "page_size", size);
}else{
/* Malloc may fail when setting the page-size, as there is an internal
** buffer that the pager module resizes using sqlite3_realloc().
@@ -106315,7 +97185,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
}
b = sqlite3BtreeSecureDelete(pBt, b);
- returnSingleInt(v, "secure_delete", b);
+ returnSingleInt(pParse, "secure_delete", b);
break;
}
@@ -106394,7 +97264,10 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){
zRet = "exclusive";
}
- returnSingleText(v, "locking_mode", zRet);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "locking_mode", SQLITE_STATIC);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zRet, 0);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
break;
}
@@ -106407,7 +97280,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */
int ii; /* Loop counter */
- setOneColumnName(v, "journal_mode");
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "journal_mode", SQLITE_STATIC);
+
if( zRight==0 ){
/* If there is no "=MODE" part of the pragma, do a query for the
** current mode */
@@ -106449,11 +97324,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
Pager *pPager = sqlite3BtreePager(pDb->pBt);
i64 iLimit = -2;
if( zRight ){
- sqlite3DecOrHexToI64(zRight, &iLimit);
+ sqlite3Atoi64(zRight, &iLimit, sqlite3Strlen30(zRight), SQLITE_UTF8);
if( iLimit<-1 ) iLimit = -1;
}
iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit);
- returnSingleInt(v, "journal_size_limit", iLimit);
+ returnSingleInt(pParse, "journal_size_limit", iLimit);
break;
}
@@ -106471,7 +97346,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
Btree *pBt = pDb->pBt;
assert( pBt!=0 );
if( !zRight ){
- returnSingleInt(v, "auto_vacuum", sqlite3BtreeGetAutoVacuum(pBt));
+ returnSingleInt(pParse, "auto_vacuum", sqlite3BtreeGetAutoVacuum(pBt));
}else{
int eAuto = getAutoVacuum(zRight);
assert( eAuto>=0 && eAuto<=2 );
@@ -106488,7 +97363,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
** file. Before writing to meta[6], check that meta[3] indicates
** that this really is an auto-vacuum capable database.
*/
- static const int iLn = VDBE_OFFSET_LINENO(2);
static const VdbeOpList setMeta6[] = {
{ OP_Transaction, 0, 1, 0}, /* 0 */
{ OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE},
@@ -106498,7 +97372,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
{ OP_SetCookie, 0, BTREE_INCR_VACUUM, 1}, /* 5 */
};
int iAddr;
- iAddr = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6, iLn);
+ iAddr = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6);
sqlite3VdbeChangeP1(v, iAddr, iDb);
sqlite3VdbeChangeP1(v, iAddr+1, iDb);
sqlite3VdbeChangeP2(v, iAddr+2, iAddr+4);
@@ -106524,10 +97398,10 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
sqlite3BeginWriteOperation(pParse, 0, iDb);
sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1);
- addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); VdbeCoverage(v);
+ addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb);
sqlite3VdbeAddOp1(v, OP_ResultRow, 1);
sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1);
- sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr);
sqlite3VdbeJumpHere(v, addr);
break;
}
@@ -106548,13 +97422,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
case PragTyp_CACHE_SIZE: {
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
if( !zRight ){
- if( sqlite3ReadSchema(pParse) ) goto pragma_out;
- returnSingleInt(v, "cache_size", pDb->pSchema->cache_size);
+ returnSingleInt(pParse, "cache_size", pDb->pSchema->cache_size);
}else{
int size = sqlite3Atoi(zRight);
pDb->pSchema->cache_size = size;
sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size);
- if( sqlite3ReadSchema(pParse) ) goto pragma_out;
}
break;
}
@@ -106579,7 +97451,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
if( zRight ){
int ii;
- sqlite3DecOrHexToI64(zRight, &sz);
+ sqlite3Atoi64(zRight, &sz, sqlite3Strlen30(zRight), SQLITE_UTF8);
if( sz<0 ) sz = sqlite3GlobalConfig.szMmap;
if( pId2->n==0 ) db->szMmap = sz;
for(ii=db->nDb-1; ii>=0; ii--){
@@ -106595,7 +97467,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
rc = SQLITE_OK;
#endif
if( rc==SQLITE_OK ){
- returnSingleInt(v, "mmap_size", sz);
+ returnSingleInt(pParse, "mmap_size", sz);
}else if( rc!=SQLITE_NOTFOUND ){
pParse->nErr++;
pParse->rc = rc;
@@ -106616,7 +97488,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_TEMP_STORE: {
if( !zRight ){
- returnSingleInt(v, "temp_store", db->temp_store);
+ returnSingleInt(pParse, "temp_store", db->temp_store);
}else{
changeTempStorage(pParse, zRight);
}
@@ -106635,7 +97507,13 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_TEMP_STORE_DIRECTORY: {
if( !zRight ){
- returnSingleText(v, "temp_store_directory", sqlite3_temp_directory);
+ if( sqlite3_temp_directory ){
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME,
+ "temp_store_directory", SQLITE_STATIC);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_temp_directory, 0);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+ }
}else{
#ifndef SQLITE_OMIT_WSD
if( zRight[0] ){
@@ -106679,7 +97557,13 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_DATA_STORE_DIRECTORY: {
if( !zRight ){
- returnSingleText(v, "data_store_directory", sqlite3_data_directory);
+ if( sqlite3_data_directory ){
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME,
+ "data_store_directory", SQLITE_STATIC);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_data_directory, 0);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+ }
}else{
#ifndef SQLITE_OMIT_WSD
if( zRight[0] ){
@@ -106718,7 +97602,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3_file *pFile = sqlite3PagerFile(pPager);
sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE,
&proxy_file_path);
- returnSingleText(v, "lock_proxy_file", proxy_file_path);
+
+ if( proxy_file_path ){
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME,
+ "lock_proxy_file", SQLITE_STATIC);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, proxy_file_path, 0);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
+ }
}else{
Pager *pPager = sqlite3BtreePager(pDb->pBt);
sqlite3_file *pFile = sqlite3PagerFile(pPager);
@@ -106750,15 +97641,13 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_SYNCHRONOUS: {
if( !zRight ){
- returnSingleInt(v, "synchronous", pDb->safety_level-1);
+ returnSingleInt(pParse, "synchronous", pDb->safety_level-1);
}else{
if( !db->autoCommit ){
sqlite3ErrorMsg(pParse,
"Safety level may not be changed inside a transaction");
}else{
- int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK;
- if( iLevel==0 ) iLevel = 1;
- pDb->safety_level = iLevel;
+ pDb->safety_level = getSafetyLevel(zRight,0,1)+1;
setAllPagerFlags(db);
}
}
@@ -106769,20 +97658,15 @@ SQLITE_PRIVATE void sqlite3Pragma(
#ifndef SQLITE_OMIT_FLAG_PRAGMAS
case PragTyp_FLAG: {
if( zRight==0 ){
- returnSingleInt(v, pPragma->zName, (db->flags & pPragma->iArg)!=0 );
+ returnSingleInt(pParse, aPragmaNames[mid].zName,
+ (db->flags & aPragmaNames[mid].iArg)!=0 );
}else{
- int mask = pPragma->iArg; /* Mask of bits to set or clear. */
+ int mask = aPragmaNames[mid].iArg; /* Mask of bits to set or clear. */
if( db->autoCommit==0 ){
/* Foreign key support may not be enabled or disabled while not
** in auto-commit mode. */
mask &= ~(SQLITE_ForeignKeys);
}
-#if SQLITE_USER_AUTHENTICATION
- if( db->auth.authLevel==UAUTH_User ){
- /* Do not allow non-admin users to modify the schema arbitrarily */
- mask &= ~(SQLITE_WriteSchema);
- }
-#endif
if( sqlite3GetBoolean(zRight, 0) ){
db->flags |= mask;
@@ -106819,36 +97703,43 @@ SQLITE_PRIVATE void sqlite3Pragma(
Table *pTab;
pTab = sqlite3FindTable(db, zRight, zDb);
if( pTab ){
- static const char *azCol[] = {
- "cid", "name", "type", "notnull", "dflt_value", "pk"
- };
int i, k;
int nHidden = 0;
Column *pCol;
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ sqlite3VdbeSetNumCols(v, 6);
pParse->nMem = 6;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 6, azCol); assert( 6==ArraySize(azCol) );
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cid", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "type", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "notnull", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "dflt_value", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "pk", SQLITE_STATIC);
sqlite3ViewGetColumnNames(pParse, pTab);
for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
if( IsHiddenColumn(pCol) ){
nHidden++;
continue;
}
+ sqlite3VdbeAddOp2(v, OP_Integer, i-nHidden, 1);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pCol->zName, 0);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
+ pCol->zType ? pCol->zType : "", 0);
+ sqlite3VdbeAddOp2(v, OP_Integer, (pCol->notNull ? 1 : 0), 4);
+ if( pCol->zDflt ){
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 5, 0, (char*)pCol->zDflt, 0);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, 5);
+ }
if( (pCol->colFlags & COLFLAG_PRIMKEY)==0 ){
k = 0;
}else if( pPk==0 ){
k = 1;
}else{
- for(k=1; k<=pTab->nCol && pPk->aiColumn[k-1]!=i; k++){}
- }
- sqlite3VdbeMultiLoad(v, 1, "issisi",
- i-nHidden,
- pCol->zName,
- pCol->zType ? pCol->zType : "",
- pCol->notNull ? 1 : 0,
- pCol->zDflt,
- k);
+ for(k=1; ALWAYS(k<=pTab->nCol) && pPk->aiColumn[k-1]!=i; k++){}
+ }
+ sqlite3VdbeAddOp2(v, OP_Integer, k, 6);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 6);
}
}
@@ -106856,26 +97747,29 @@ SQLITE_PRIVATE void sqlite3Pragma(
break;
case PragTyp_STATS: {
- static const char *azCol[] = { "table", "index", "width", "height" };
Index *pIdx;
HashElem *i;
v = sqlite3GetVdbe(pParse);
+ sqlite3VdbeSetNumCols(v, 4);
pParse->nMem = 4;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 4, azCol); assert( 4==ArraySize(azCol) );
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "table", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "index", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "width", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "height", SQLITE_STATIC);
for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){
Table *pTab = sqliteHashData(i);
- sqlite3VdbeMultiLoad(v, 1, "ssii",
- pTab->zName,
- 0,
- (int)sqlite3LogEstToInt(pTab->szTabRow),
- (int)sqlite3LogEstToInt(pTab->nRowLogEst));
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, pTab->zName, 0);
+ sqlite3VdbeAddOp2(v, OP_Null, 0, 2);
+ sqlite3VdbeAddOp2(v, OP_Integer,
+ (int)sqlite3LogEstToInt(pTab->szTabRow), 3);
+ sqlite3VdbeAddOp2(v, OP_Integer, (int)pTab->nRowEst, 4);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4);
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- sqlite3VdbeMultiLoad(v, 2, "sii",
- pIdx->zName,
- (int)sqlite3LogEstToInt(pIdx->szIdxRow),
- (int)sqlite3LogEstToInt(pIdx->aiRowLogEst[0]));
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0);
+ sqlite3VdbeAddOp2(v, OP_Integer,
+ (int)sqlite3LogEstToInt(pIdx->szIdxRow), 3);
+ sqlite3VdbeAddOp2(v, OP_Integer, (int)pIdx->aiRowEst[0], 4);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4);
}
}
@@ -106887,35 +97781,21 @@ SQLITE_PRIVATE void sqlite3Pragma(
Table *pTab;
pIdx = sqlite3FindIndex(db, zRight, zDb);
if( pIdx ){
- static const char *azCol[] = {
- "seqno", "cid", "name", "desc", "coll", "key"
- };
int i;
- int mx;
- if( pPragma->iArg ){
- /* PRAGMA index_xinfo (newer version with more rows and columns) */
- mx = pIdx->nColumn;
- pParse->nMem = 6;
- }else{
- /* PRAGMA index_info (legacy version) */
- mx = pIdx->nKeyCol;
- pParse->nMem = 3;
- }
pTab = pIdx->pTable;
+ sqlite3VdbeSetNumCols(v, 3);
+ pParse->nMem = 3;
sqlite3CodeVerifySchema(pParse, iDb);
- assert( pParse->nMem<=ArraySize(azCol) );
- setAllColumnNames(v, pParse->nMem, azCol);
- for(i=0; i<mx; i++){
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seqno", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "cid", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "name", SQLITE_STATIC);
+ for(i=0; i<pIdx->nKeyCol; i++){
i16 cnum = pIdx->aiColumn[i];
- sqlite3VdbeMultiLoad(v, 1, "iis", i, cnum,
- cnum<0 ? 0 : pTab->aCol[cnum].zName);
- if( pPragma->iArg ){
- sqlite3VdbeMultiLoad(v, 4, "isi",
- pIdx->aSortOrder[i],
- pIdx->azColl[i],
- i<pIdx->nKeyCol);
- }
- sqlite3VdbeAddOp2(v, OP_ResultRow, 1, pParse->nMem);
+ sqlite3VdbeAddOp2(v, OP_Integer, i, 1);
+ sqlite3VdbeAddOp2(v, OP_Integer, cnum, 2);
+ assert( pTab->nCol>cnum );
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pTab->aCol[cnum].zName, 0);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
}
}
}
@@ -106927,53 +97807,53 @@ SQLITE_PRIVATE void sqlite3Pragma(
int i;
pTab = sqlite3FindTable(db, zRight, zDb);
if( pTab ){
- static const char *azCol[] = {
- "seq", "name", "unique", "origin", "partial"
- };
v = sqlite3GetVdbe(pParse);
- pParse->nMem = 5;
+ sqlite3VdbeSetNumCols(v, 3);
+ pParse->nMem = 3;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 5, azCol); assert( 5==ArraySize(azCol) );
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "unique", SQLITE_STATIC);
for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){
- const char *azOrigin[] = { "c", "u", "pk" };
- sqlite3VdbeMultiLoad(v, 1, "isisi",
- i,
- pIdx->zName,
- IsUniqueIndex(pIdx),
- azOrigin[pIdx->idxType],
- pIdx->pPartIdxWhere!=0);
- sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 5);
+ sqlite3VdbeAddOp2(v, OP_Integer, i, 1);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0);
+ sqlite3VdbeAddOp2(v, OP_Integer, pIdx->onError!=OE_None, 3);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
}
}
}
break;
case PragTyp_DATABASE_LIST: {
- static const char *azCol[] = { "seq", "name", "file" };
int i;
+ sqlite3VdbeSetNumCols(v, 3);
pParse->nMem = 3;
- setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) );
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "file", SQLITE_STATIC);
for(i=0; i<db->nDb; i++){
if( db->aDb[i].pBt==0 ) continue;
assert( db->aDb[i].zName!=0 );
- sqlite3VdbeMultiLoad(v, 1, "iss",
- i,
- db->aDb[i].zName,
- sqlite3BtreeGetFilename(db->aDb[i].pBt));
+ sqlite3VdbeAddOp2(v, OP_Integer, i, 1);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, db->aDb[i].zName, 0);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
+ sqlite3BtreeGetFilename(db->aDb[i].pBt), 0);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
}
}
break;
case PragTyp_COLLATION_LIST: {
- static const char *azCol[] = { "seq", "name" };
int i = 0;
HashElem *p;
+ sqlite3VdbeSetNumCols(v, 2);
pParse->nMem = 2;
- setAllColumnNames(v, 2, azCol); assert( 2==ArraySize(azCol) );
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC);
for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){
CollSeq *pColl = (CollSeq *)sqliteHashData(p);
- sqlite3VdbeMultiLoad(v, 1, "is", i++, pColl->zName);
+ sqlite3VdbeAddOp2(v, OP_Integer, i++, 1);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pColl->zName, 0);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2);
}
}
@@ -106989,26 +97869,33 @@ SQLITE_PRIVATE void sqlite3Pragma(
v = sqlite3GetVdbe(pParse);
pFK = pTab->pFKey;
if( pFK ){
- static const char *azCol[] = {
- "id", "seq", "table", "from", "to", "on_update", "on_delete",
- "match"
- };
int i = 0;
+ sqlite3VdbeSetNumCols(v, 8);
pParse->nMem = 8;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 8, azCol); assert( 8==ArraySize(azCol) );
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "id", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "seq", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "table", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "from", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "to", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "on_update", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 6, COLNAME_NAME, "on_delete", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 7, COLNAME_NAME, "match", SQLITE_STATIC);
while(pFK){
int j;
for(j=0; j<pFK->nCol; j++){
- sqlite3VdbeMultiLoad(v, 1, "iissssss",
- i,
- j,
- pFK->zTo,
- pTab->aCol[pFK->aCol[j].iFrom].zName,
- pFK->aCol[j].zCol,
- actionName(pFK->aAction[1]), /* ON UPDATE */
- actionName(pFK->aAction[0]), /* ON DELETE */
- "NONE");
+ char *zCol = pFK->aCol[j].zCol;
+ char *zOnDelete = (char *)actionName(pFK->aAction[0]);
+ char *zOnUpdate = (char *)actionName(pFK->aAction[1]);
+ sqlite3VdbeAddOp2(v, OP_Integer, i, 1);
+ sqlite3VdbeAddOp2(v, OP_Integer, j, 2);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pFK->zTo, 0);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0,
+ pTab->aCol[pFK->aCol[j].iFrom].zName, 0);
+ sqlite3VdbeAddOp4(v, zCol ? OP_String8 : OP_Null, 0, 5, 0, zCol, 0);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 6, 0, zOnUpdate, 0);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 7, 0, zOnDelete, 0);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 8, 0, "NONE", 0);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 8);
}
++i;
@@ -107037,14 +97924,17 @@ SQLITE_PRIVATE void sqlite3Pragma(
int addrTop; /* Top of a loop checking foreign keys */
int addrOk; /* Jump here if the key is OK */
int *aiCols; /* child to parent column mapping */
- static const char *azCol[] = { "table", "rowid", "parent", "fkid" };
regResult = pParse->nMem+1;
pParse->nMem += 4;
regKey = ++pParse->nMem;
regRow = ++pParse->nMem;
v = sqlite3GetVdbe(pParse);
- setAllColumnNames(v, 4, azCol); assert( 4==ArraySize(azCol) );
+ sqlite3VdbeSetNumCols(v, 4);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "table", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "rowid", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "parent", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "fkid", SQLITE_STATIC);
sqlite3CodeVerifySchema(pParse, iDb);
k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash);
while( k ){
@@ -107059,7 +97949,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow;
sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead);
- sqlite3VdbeLoadString(v, regResult, pTab->zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, regResult, 0, pTab->zName,
+ P4_TRANSIENT);
for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){
pParent = sqlite3FindTable(db, pFK->zTo, zDb);
if( pParent==0 ) continue;
@@ -107081,7 +97972,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( pParse->nErr>0 || pFK==0 );
if( pFK ) break;
if( pParse->nTab<i ) pParse->nTab = i;
- addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v);
+ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0);
for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){
pParent = sqlite3FindTable(db, pFK->zTo, zDb);
pIdx = 0;
@@ -107097,35 +97988,37 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( iKey!=pTab->iPKey ){
sqlite3VdbeAddOp3(v, OP_Column, 0, iKey, regRow);
sqlite3ColumnDefault(v, pTab, iKey, regRow);
- sqlite3VdbeAddOp2(v, OP_IsNull, regRow, addrOk); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_MustBeInt, regRow,
- sqlite3VdbeCurrentAddr(v)+3); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IsNull, regRow, addrOk);
+ sqlite3VdbeAddOp2(v, OP_MustBeInt, regRow,
+ sqlite3VdbeCurrentAddr(v)+3);
}else{
sqlite3VdbeAddOp2(v, OP_Rowid, 0, regRow);
}
- sqlite3VdbeAddOp3(v, OP_NotExists, i, 0, regRow); VdbeCoverage(v);
- sqlite3VdbeGoto(v, addrOk);
+ sqlite3VdbeAddOp3(v, OP_NotExists, i, 0, regRow);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrOk);
sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
}else{
for(j=0; j<pFK->nCol; j++){
sqlite3ExprCodeGetColumnOfTable(v, pTab, 0,
aiCols ? aiCols[j] : pFK->aCol[j].iFrom, regRow+j);
- sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk);
}
if( pParent ){
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey,
- sqlite3IndexAffinityStr(db,pIdx), pFK->nCol);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regRow, pFK->nCol, regKey);
+ sqlite3VdbeChangeP4(v, -1,
+ sqlite3IndexAffinityStr(v,pIdx), P4_TRANSIENT);
sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0);
- VdbeCoverage(v);
}
}
sqlite3VdbeAddOp2(v, OP_Rowid, 0, regResult+1);
- sqlite3VdbeMultiLoad(v, regResult+2, "si", pFK->zTo, i-1);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, regResult+2, 0,
+ pFK->zTo, P4_TRANSIENT);
+ sqlite3VdbeAddOp2(v, OP_Integer, i-1, regResult+3);
sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, 4);
sqlite3VdbeResolveLabel(v, addrOk);
sqlite3DbFree(db, aiCols);
}
- sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1);
sqlite3VdbeJumpHere(v, addrTop);
}
}
@@ -107172,10 +98065,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
** messages have been generated, output OK. Otherwise output the
** error message
*/
- static const int iLn = VDBE_OFFSET_LINENO(2);
static const VdbeOpList endCode[] = {
{ OP_AddImm, 1, 0, 0}, /* 0 */
- { OP_If, 1, 0, 0}, /* 1 */
+ { OP_IfNeg, 1, 0, 0}, /* 1 */
{ OP_String8, 0, 3, 0}, /* 2 */
{ OP_ResultRow, 3, 1, 0},
};
@@ -107197,7 +98089,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
/* Initialize the VDBE program */
pParse->nMem = 6;
- setOneColumnName(v, "integrity_check");
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "integrity_check", SQLITE_STATIC);
/* Set the maximum error count */
mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX;
@@ -107220,7 +98113,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3CodeVerifySchema(pParse, i);
addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */
- VdbeCoverage(v);
sqlite3VdbeAddOp2(v, OP_Halt, 0, 0);
sqlite3VdbeJumpHere(v, addr);
@@ -107252,11 +98144,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
/* Do the b-tree integrity checks */
sqlite3VdbeAddOp3(v, OP_IntegrityCk, 2, cnt, 1);
sqlite3VdbeChangeP5(v, (u8)i);
- addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v);
+ addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2);
sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName),
P4_DYNAMIC);
- sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1);
+ sqlite3VdbeAddOp2(v, OP_Move, 2, 4);
sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2);
sqlite3VdbeAddOp2(v, OP_ResultRow, 2, 1);
sqlite3VdbeJumpHere(v, addr);
@@ -107266,15 +98158,12 @@ SQLITE_PRIVATE void sqlite3Pragma(
for(x=sqliteHashFirst(pTbls); x && !isQuick; x=sqliteHashNext(x)){
Table *pTab = sqliteHashData(x);
Index *pIdx, *pPk;
- Index *pPrior = 0;
int loopTop;
int iDataCur, iIdxCur;
- int r1 = -1;
if( pTab->pIndex==0 ) continue;
pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Stop if out of errors */
- VdbeCoverage(v);
sqlite3VdbeAddOp2(v, OP_Halt, 0, 0);
sqlite3VdbeJumpHere(v, addr);
sqlite3ExprCacheClear(pParse);
@@ -107285,98 +98174,52 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3VdbeAddOp2(v, OP_Integer, 0, 8+j); /* index entries counter */
}
pParse->nMem = MAX(pParse->nMem, 8+j);
- sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0);
loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1);
- /* Verify that all NOT NULL columns really are NOT NULL */
- for(j=0; j<pTab->nCol; j++){
- char *zErr;
- int jmp2, jmp3;
- if( j==pTab->iPKey ) continue;
- if( pTab->aCol[j].notNull==0 ) continue;
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3);
- sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
- jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */
- zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName,
- pTab->aCol[j].zName);
- sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
- sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1);
- jmp3 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v);
- sqlite3VdbeAddOp0(v, OP_Halt);
- sqlite3VdbeJumpHere(v, jmp2);
- sqlite3VdbeJumpHere(v, jmp3);
- }
- /* Validate index entries for the current row */
for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
- int jmp2, jmp3, jmp4, jmp5;
- int ckUniq = sqlite3VdbeMakeLabel(v);
+ int jmp2, jmp3, jmp4;
+ int r1;
if( pPk==pIdx ) continue;
- r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3,
- pPrior, r1);
- pPrior = pIdx;
+ r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3);
sqlite3VdbeAddOp2(v, OP_AddImm, 8+j, 1); /* increment entry count */
- /* Verify that an index entry exists for the current table row */
- jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, ckUniq, r1,
- pIdx->nColumn); VdbeCoverage(v);
+ jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, 0, r1,
+ pIdx->nColumn);
sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */
- sqlite3VdbeLoadString(v, 3, "row ");
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, "row ", P4_STATIC);
sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3);
- sqlite3VdbeLoadString(v, 4, " missing from index ");
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, " missing from index ",
+ P4_STATIC);
sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3);
- jmp5 = sqlite3VdbeLoadString(v, 4, pIdx->zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, pIdx->zName, P4_TRANSIENT);
sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3);
sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1);
- jmp4 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v);
+ jmp4 = sqlite3VdbeAddOp1(v, OP_IfPos, 1);
sqlite3VdbeAddOp0(v, OP_Halt);
- sqlite3VdbeJumpHere(v, jmp2);
- /* For UNIQUE indexes, verify that only one entry exists with the
- ** current key. The entry is unique if (1) any column is NULL
- ** or (2) the next entry has a different key */
- if( IsUniqueIndex(pIdx) ){
- int uniqOk = sqlite3VdbeMakeLabel(v);
- int jmp6;
- int kk;
- for(kk=0; kk<pIdx->nKeyCol; kk++){
- int iCol = pIdx->aiColumn[kk];
- assert( iCol!=XN_ROWID && iCol<pTab->nCol );
- if( iCol>=0 && pTab->aCol[iCol].notNull ) continue;
- sqlite3VdbeAddOp2(v, OP_IsNull, r1+kk, uniqOk);
- VdbeCoverage(v);
- }
- jmp6 = sqlite3VdbeAddOp1(v, OP_Next, iIdxCur+j); VdbeCoverage(v);
- sqlite3VdbeGoto(v, uniqOk);
- sqlite3VdbeJumpHere(v, jmp6);
- sqlite3VdbeAddOp4Int(v, OP_IdxGT, iIdxCur+j, uniqOk, r1,
- pIdx->nKeyCol); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */
- sqlite3VdbeLoadString(v, 3, "non-unique entry in index ");
- sqlite3VdbeGoto(v, jmp5);
- sqlite3VdbeResolveLabel(v, uniqOk);
- }
sqlite3VdbeJumpHere(v, jmp4);
- sqlite3ResolvePartIdxLabel(pParse, jmp3);
+ sqlite3VdbeJumpHere(v, jmp2);
+ sqlite3VdbeResolveLabel(v, jmp3);
}
- sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop);
sqlite3VdbeJumpHere(v, loopTop-1);
#ifndef SQLITE_OMIT_BTREECOUNT
- sqlite3VdbeLoadString(v, 2, "wrong # of entries in index ");
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0,
+ "wrong # of entries in index ", P4_STATIC);
for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
if( pPk==pIdx ) continue;
addr = sqlite3VdbeCurrentAddr(v);
- sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr+2); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr+2);
sqlite3VdbeAddOp2(v, OP_Halt, 0, 0);
sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3);
- sqlite3VdbeAddOp3(v, OP_Eq, 8+j, addr+8, 3); VdbeCoverage(v);
- sqlite3VdbeChangeP5(v, SQLITE_NOTNULL);
+ sqlite3VdbeAddOp3(v, OP_Eq, 8+j, addr+8, 3);
sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1);
- sqlite3VdbeLoadString(v, 3, pIdx->zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pIdx->zName, P4_TRANSIENT);
sqlite3VdbeAddOp3(v, OP_Concat, 3, 2, 7);
sqlite3VdbeAddOp2(v, OP_ResultRow, 7, 1);
}
#endif /* SQLITE_OMIT_BTREECOUNT */
}
}
- addr = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode, iLn);
+ addr = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode);
sqlite3VdbeChangeP2(v, addr, -mxErr);
sqlite3VdbeJumpHere(v, addr+1);
sqlite3VdbeChangeP4(v, addr+2, "ok", P4_STATIC);
@@ -107425,10 +98268,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
const struct EncName *pEnc;
if( !zRight ){ /* "PRAGMA encoding" */
if( sqlite3ReadSchema(pParse) ) goto pragma_out;
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "encoding", SQLITE_STATIC);
+ sqlite3VdbeAddOp2(v, OP_String8, 0, 1);
assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 );
assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE );
assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE );
- returnSingleText(v, "encoding", encnames[ENC(pParse->db)].zName);
+ sqlite3VdbeChangeP4(v, -1, encnames[ENC(pParse->db)].zName, P4_STATIC);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
}else{ /* "PRAGMA encoding = XXX" */
/* Only change the value of sqlite.enc if the database handle is not
** initialized. If the main database exists, the new sqlite.enc value
@@ -107441,8 +98288,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
){
for(pEnc=&encnames[0]; pEnc->zName; pEnc++){
if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){
- SCHEMA_ENC(db) = ENC(db) =
- pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE;
+ ENC(pParse->db) = pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE;
break;
}
}
@@ -107487,16 +98333,31 @@ SQLITE_PRIVATE void sqlite3Pragma(
** applications for any purpose.
*/
case PragTyp_HEADER_VALUE: {
- int iCookie = pPragma->iArg; /* Which cookie to read or write */
+ int iCookie; /* Cookie index. 1 for schema-cookie, 6 for user-cookie. */
sqlite3VdbeUsesBtree(v, iDb);
- if( zRight && (pPragma->mPragFlag & PragFlag_ReadOnly)==0 ){
+ switch( zLeft[0] ){
+ case 'a': case 'A':
+ iCookie = BTREE_APPLICATION_ID;
+ break;
+ case 'f': case 'F':
+ iCookie = BTREE_FREE_PAGE_COUNT;
+ break;
+ case 's': case 'S':
+ iCookie = BTREE_SCHEMA_VERSION;
+ break;
+ default:
+ iCookie = BTREE_USER_VERSION;
+ break;
+ }
+
+ if( zRight && iCookie!=BTREE_FREE_PAGE_COUNT ){
/* Write the specified cookie value */
static const VdbeOpList setCookie[] = {
{ OP_Transaction, 0, 1, 0}, /* 0 */
{ OP_Integer, 0, 1, 0}, /* 1 */
{ OP_SetCookie, 0, 0, 1}, /* 2 */
};
- int addr = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie, 0);
+ int addr = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie);
sqlite3VdbeChangeP1(v, addr, iDb);
sqlite3VdbeChangeP1(v, addr+1, sqlite3Atoi(zRight));
sqlite3VdbeChangeP1(v, addr+2, iDb);
@@ -107508,7 +98369,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
{ OP_ReadCookie, 0, 1, 0}, /* 1 */
{ OP_ResultRow, 1, 1, 0}
};
- int addr = sqlite3VdbeAddOpList(v, ArraySize(readCookie), readCookie, 0);
+ int addr = sqlite3VdbeAddOpList(v, ArraySize(readCookie), readCookie);
sqlite3VdbeChangeP1(v, addr, iDb);
sqlite3VdbeChangeP1(v, addr+1, iDb);
sqlite3VdbeChangeP3(v, addr+1, iCookie);
@@ -107529,10 +98390,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
case PragTyp_COMPILE_OPTIONS: {
int i = 0;
const char *zOpt;
+ sqlite3VdbeSetNumCols(v, 1);
pParse->nMem = 1;
- setOneColumnName(v, "compile_option");
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "compile_option", SQLITE_STATIC);
while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){
- sqlite3VdbeLoadString(v, 1, zOpt);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zOpt, 0);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
}
}
@@ -107541,12 +98403,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
#ifndef SQLITE_OMIT_WAL
/*
- ** PRAGMA [database.]wal_checkpoint = passive|full|restart|truncate
+ ** PRAGMA [database.]wal_checkpoint = passive|full|restart
**
** Checkpoint the database.
*/
case PragTyp_WAL_CHECKPOINT: {
- static const char *azCol[] = { "busy", "log", "checkpointed" };
int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED);
int eMode = SQLITE_CHECKPOINT_PASSIVE;
if( zRight ){
@@ -107554,12 +98415,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
eMode = SQLITE_CHECKPOINT_FULL;
}else if( sqlite3StrICmp(zRight, "restart")==0 ){
eMode = SQLITE_CHECKPOINT_RESTART;
- }else if( sqlite3StrICmp(zRight, "truncate")==0 ){
- eMode = SQLITE_CHECKPOINT_TRUNCATE;
}
}
- setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) );
+ sqlite3VdbeSetNumCols(v, 3);
pParse->nMem = 3;
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "busy", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "log", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "checkpointed", SQLITE_STATIC);
+
sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
}
@@ -107577,7 +98440,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( zRight ){
sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight));
}
- returnSingleInt(v, "wal_autocheckpoint",
+ returnSingleInt(pParse, "wal_autocheckpoint",
db->xWalCallback==sqlite3WalDefaultHook ?
SQLITE_PTR_TO_INT(db->pWalArg) : 0);
}
@@ -107587,9 +98450,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
/*
** PRAGMA shrink_memory
**
- ** IMPLEMENTATION-OF: R-23445-46109 This pragma causes the database
- ** connection on which it is invoked to free up as much memory as it
- ** can, by calling sqlite3_db_release_memory().
+ ** This pragma attempts to free as much memory as possible from the
+ ** current database connection.
*/
case PragTyp_SHRINK_MEMORY: {
sqlite3_db_release_memory(db);
@@ -107606,11 +98468,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
** disables the timeout.
*/
/*case PragTyp_BUSY_TIMEOUT*/ default: {
- assert( pPragma->ePragTyp==PragTyp_BUSY_TIMEOUT );
+ assert( aPragmaNames[mid].ePragTyp==PragTyp_BUSY_TIMEOUT );
if( zRight ){
sqlite3_busy_timeout(db, sqlite3Atoi(zRight));
}
- returnSingleInt(v, "timeout", db->busyTimeout);
+ returnSingleInt(pParse, "timeout", db->busyTimeout);
break;
}
@@ -107618,39 +98480,15 @@ SQLITE_PRIVATE void sqlite3Pragma(
** PRAGMA soft_heap_limit
** PRAGMA soft_heap_limit = N
**
- ** IMPLEMENTATION-OF: R-26343-45930 This pragma invokes the
- ** sqlite3_soft_heap_limit64() interface with the argument N, if N is
- ** specified and is a non-negative integer.
- ** IMPLEMENTATION-OF: R-64451-07163 The soft_heap_limit pragma always
- ** returns the same integer that would be returned by the
- ** sqlite3_soft_heap_limit64(-1) C-language function.
+ ** Call sqlite3_soft_heap_limit64(N). Return the result. If N is omitted,
+ ** use -1.
*/
case PragTyp_SOFT_HEAP_LIMIT: {
sqlite3_int64 N;
- if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){
+ if( zRight && sqlite3Atoi64(zRight, &N, 1000000, SQLITE_UTF8)==SQLITE_OK ){
sqlite3_soft_heap_limit64(N);
}
- returnSingleInt(v, "soft_heap_limit", sqlite3_soft_heap_limit64(-1));
- break;
- }
-
- /*
- ** PRAGMA threads
- ** PRAGMA threads = N
- **
- ** Configure the maximum number of worker threads. Return the new
- ** maximum, which might be less than requested.
- */
- case PragTyp_THREADS: {
- sqlite3_int64 N;
- if( zRight
- && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK
- && N>=0
- ){
- sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff));
- }
- returnSingleInt(v, "threads",
- sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1));
+ returnSingleInt(pParse, "soft_heap_limit", sqlite3_soft_heap_limit64(-1));
break;
}
@@ -107662,15 +98500,17 @@ SQLITE_PRIVATE void sqlite3Pragma(
static const char *const azLockName[] = {
"unlocked", "shared", "reserved", "pending", "exclusive"
};
- static const char *azCol[] = { "database", "status" };
int i;
- setAllColumnNames(v, 2, azCol); assert( 2==ArraySize(azCol) );
+ sqlite3VdbeSetNumCols(v, 2);
pParse->nMem = 2;
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "database", SQLITE_STATIC);
+ sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "status", SQLITE_STATIC);
for(i=0; i<db->nDb; i++){
Btree *pBt;
const char *zState = "unknown";
int j;
if( db->aDb[i].zName==0 ) continue;
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, db->aDb[i].zName, P4_STATIC);
pBt = db->aDb[i].pBt;
if( pBt==0 || sqlite3BtreePager(pBt)==0 ){
zState = "closed";
@@ -107678,7 +98518,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){
zState = azLockName[j];
}
- sqlite3VdbeMultiLoad(v, 1, "ss", db->aDb[i].zName, zState);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, zState, P4_STATIC);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2);
}
break;
@@ -107754,7 +98594,6 @@ pragma_out:
** interface, and routines that contribute to loading the database schema
** from disk.
*/
-/* #include "sqliteInt.h" */
/*
** Fill the InitData structure with an error message that indicates
@@ -107767,13 +98606,13 @@ static void corruptSchema(
){
sqlite3 *db = pData->db;
if( !db->mallocFailed && (db->flags & SQLITE_RecoveryMode)==0 ){
- char *z;
if( zObj==0 ) zObj = "?";
- z = sqlite3_mprintf("malformed database schema (%s)", zObj);
- if( z && zExtra ) z = sqlite3_mprintf("%z - %s", z, zExtra);
- sqlite3DbFree(db, *pData->pzErrMsg);
- *pData->pzErrMsg = z;
- if( z==0 ) db->mallocFailed = 1;
+ sqlite3SetString(pData->pzErrMsg, db,
+ "malformed database schema (%s)", zObj);
+ if( zExtra ){
+ *pData->pzErrMsg = sqlite3MAppendf(db, *pData->pzErrMsg,
+ "%s - %s", *pData->pzErrMsg, zExtra);
+ }
}
pData->rc = db->mallocFailed ? SQLITE_NOMEM : SQLITE_CORRUPT_BKPT;
}
@@ -107808,7 +98647,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */
if( argv[1]==0 ){
corruptSchema(pData, argv[0], 0);
- }else if( sqlite3_strnicmp(argv[2],"create ",7)==0 ){
+ }else if( argv[2] && argv[2][0] ){
/* Call the parser to process a CREATE TABLE, INDEX or VIEW.
** But because db->init.busy is set to 1, no VDBE code is generated
** or executed. All the parser does is build the internal data
@@ -107839,8 +98678,8 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
}
}
sqlite3_finalize(pStmt);
- }else if( argv[0]==0 || (argv[2]!=0 && argv[2][0]!=0) ){
- corruptSchema(pData, argv[0], 0);
+ }else if( argv[0]==0 ){
+ corruptSchema(pData, 0, 0);
}else{
/* If the SQL column is blank it means this is an index that
** was created to be the PRIMARY KEY or to fulfill a UNIQUE
@@ -107965,7 +98804,7 @@ static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){
if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){
rc = sqlite3BtreeBeginTrans(pDb->pBt, 0);
if( rc!=SQLITE_OK ){
- sqlite3SetString(pzErrMsg, db, sqlite3ErrStr(rc));
+ sqlite3SetString(pzErrMsg, db, "%s", sqlite3ErrStr(rc));
goto initone_error_out;
}
openedTransaction = 1;
@@ -108069,7 +98908,7 @@ static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){
db->aDb[iDb].zName, zMasterName);
#ifndef SQLITE_OMIT_AUTHORIZATION
{
- sqlite3_xauth xAuth;
+ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*);
xAuth = db->xAuth;
db->xAuth = 0;
#endif
@@ -108135,11 +98974,8 @@ SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){
int commit_internal = !(db->flags&SQLITE_InternChanges);
assert( sqlite3_mutex_held(db->mutex) );
- assert( sqlite3BtreeHoldsMutex(db->aDb[0].pBt) );
- assert( db->init.busy==0 );
rc = SQLITE_OK;
db->init.busy = 1;
- ENC(db) = SCHEMA_ENC(db);
for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue;
rc = sqlite3InitOne(db, i, pzErrMsg);
@@ -108153,8 +98989,8 @@ SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){
** schema may contain references to objects in other databases.
*/
#ifndef SQLITE_OMIT_TEMPDB
- assert( db->nDb>1 );
- if( rc==SQLITE_OK && !DbHasProperty(db, 1, DB_SchemaLoaded) ){
+ if( rc==SQLITE_OK && ALWAYS(db->nDb>1)
+ && !DbHasProperty(db, 1, DB_SchemaLoaded) ){
rc = sqlite3InitOne(db, 1, pzErrMsg);
if( rc ){
sqlite3ResetOneSchema(db, 1);
@@ -108272,11 +99108,7 @@ SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){
** Free all memory allocations in the pParse object
*/
SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){
- if( pParse ){
- sqlite3 *db = pParse->db;
- sqlite3DbFree(db, pParse->aLabel);
- sqlite3ExprListDelete(db, pParse->pConstExpr);
- }
+ if( pParse ) sqlite3ExprListDelete(pParse->db, pParse->pConstExpr);
}
/*
@@ -108337,7 +99169,7 @@ static int sqlite3Prepare(
rc = sqlite3BtreeSchemaLocked(pBt);
if( rc ){
const char *zDb = db->aDb[i].zName;
- sqlite3ErrorWithMsg(db, rc, "database schema is locked: %s", zDb);
+ sqlite3Error(db, rc, "database schema is locked: %s", zDb);
testcase( db->flags & SQLITE_ReadUncommitted );
goto end_prepare;
}
@@ -108354,7 +99186,7 @@ static int sqlite3Prepare(
testcase( nBytes==mxLen );
testcase( nBytes==mxLen+1 );
if( nBytes>mxLen ){
- sqlite3ErrorWithMsg(db, SQLITE_TOOBIG, "statement too long");
+ sqlite3Error(db, SQLITE_TOOBIG, "statement too long");
rc = sqlite3ApiExit(db, SQLITE_TOOBIG);
goto end_prepare;
}
@@ -108421,10 +99253,10 @@ static int sqlite3Prepare(
}
if( zErrMsg ){
- sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg);
+ sqlite3Error(db, rc, "%s", zErrMsg);
sqlite3DbFree(db, zErrMsg);
}else{
- sqlite3Error(db, rc);
+ sqlite3Error(db, rc, 0);
}
/* Delete any TriggerPrg structures allocated while parsing this statement. */
@@ -108452,12 +99284,9 @@ static int sqlite3LockAndPrepare(
const char **pzTail /* OUT: End of parsed string */
){
int rc;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( ppStmt==0 ) return SQLITE_MISUSE_BKPT;
-#endif
+ assert( ppStmt!=0 );
*ppStmt = 0;
- if( !sqlite3SafetyCheckOk(db)||zSql==0 ){
+ if( !sqlite3SafetyCheckOk(db) ){
return SQLITE_MISUSE_BKPT;
}
sqlite3_mutex_enter(db->mutex);
@@ -108518,7 +99347,7 @@ SQLITE_PRIVATE int sqlite3Reprepare(Vdbe *p){
** and the statement is automatically recompiled if an schema change
** occurs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
+SQLITE_API int sqlite3_prepare(
sqlite3 *db, /* Database handle. */
const char *zSql, /* UTF-8 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -108530,7 +99359,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
+SQLITE_API int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle. */
const char *zSql, /* UTF-8 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -108564,11 +99393,9 @@ static int sqlite3Prepare16(
const char *zTail8 = 0;
int rc = SQLITE_OK;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( ppStmt==0 ) return SQLITE_MISUSE_BKPT;
-#endif
+ assert( ppStmt );
*ppStmt = 0;
- if( !sqlite3SafetyCheckOk(db)||zSql==0 ){
+ if( !sqlite3SafetyCheckOk(db) ){
return SQLITE_MISUSE_BKPT;
}
if( nBytes>=0 ){
@@ -108606,7 +99433,7 @@ static int sqlite3Prepare16(
** and the statement is automatically recompiled if an schema change
** occurs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
+SQLITE_API int sqlite3_prepare16(
sqlite3 *db, /* Database handle. */
const void *zSql, /* UTF-16 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -108618,7 +99445,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
+SQLITE_API int sqlite3_prepare16_v2(
sqlite3 *db, /* Database handle. */
const void *zSql, /* UTF-16 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -108649,72 +99476,22 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
** This file contains C code routines that are called by the parser
** to handle SELECT statements in SQLite.
*/
-/* #include "sqliteInt.h" */
-
-/*
-** Trace output macros
-*/
-#if SELECTTRACE_ENABLED
-/***/ int sqlite3SelectTrace = 0;
-# define SELECTTRACE(K,P,S,X) \
- if(sqlite3SelectTrace&(K)) \
- sqlite3DebugPrintf("%*s%s.%p: ",(P)->nSelectIndent*2-2,"",\
- (S)->zSelName,(S)),\
- sqlite3DebugPrintf X
-#else
-# define SELECTTRACE(K,P,S,X)
-#endif
/*
-** An instance of the following object is used to record information about
-** how to process the DISTINCT keyword, to simplify passing that information
-** into the selectInnerLoop() routine.
+** Delete all the content of a Select structure but do not deallocate
+** the select structure itself.
*/
-typedef struct DistinctCtx DistinctCtx;
-struct DistinctCtx {
- u8 isTnct; /* True if the DISTINCT keyword is present */
- u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */
- int tabTnct; /* Ephemeral table used for DISTINCT processing */
- int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */
-};
-
-/*
-** An instance of the following object is used to record information about
-** the ORDER BY (or GROUP BY) clause of query is being coded.
-*/
-typedef struct SortCtx SortCtx;
-struct SortCtx {
- ExprList *pOrderBy; /* The ORDER BY (or GROUP BY clause) */
- int nOBSat; /* Number of ORDER BY terms satisfied by indices */
- int iECursor; /* Cursor number for the sorter */
- int regReturn; /* Register holding block-output return address */
- int labelBkOut; /* Start label for the block-output subroutine */
- int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */
- u8 sortFlags; /* Zero or more SORTFLAG_* bits */
-};
-#define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */
-
-/*
-** Delete all the content of a Select structure. Deallocate the structure
-** itself only if bFree is true.
-*/
-static void clearSelect(sqlite3 *db, Select *p, int bFree){
- while( p ){
- Select *pPrior = p->pPrior;
- sqlite3ExprListDelete(db, p->pEList);
- sqlite3SrcListDelete(db, p->pSrc);
- sqlite3ExprDelete(db, p->pWhere);
- sqlite3ExprListDelete(db, p->pGroupBy);
- sqlite3ExprDelete(db, p->pHaving);
- sqlite3ExprListDelete(db, p->pOrderBy);
- sqlite3ExprDelete(db, p->pLimit);
- sqlite3ExprDelete(db, p->pOffset);
- sqlite3WithDelete(db, p->pWith);
- if( bFree ) sqlite3DbFree(db, p);
- p = pPrior;
- bFree = 1;
- }
+static void clearSelect(sqlite3 *db, Select *p){
+ sqlite3ExprListDelete(db, p->pEList);
+ sqlite3SrcListDelete(db, p->pSrc);
+ sqlite3ExprDelete(db, p->pWhere);
+ sqlite3ExprListDelete(db, p->pGroupBy);
+ sqlite3ExprDelete(db, p->pHaving);
+ sqlite3ExprListDelete(db, p->pOrderBy);
+ sqlite3SelectDelete(db, p->pPrior);
+ sqlite3ExprDelete(db, p->pLimit);
+ sqlite3ExprDelete(db, p->pOffset);
}
/*
@@ -108749,6 +99526,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
Select standin;
sqlite3 *db = pParse->db;
pNew = sqlite3DbMallocZero(db, sizeof(*pNew) );
+ assert( db->mallocFailed || !pOffset || pLimit ); /* OFFSET implies LIMIT */
if( pNew==0 ){
assert( db->mallocFailed );
pNew = &standin;
@@ -108768,11 +99546,13 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
pNew->op = TK_SELECT;
pNew->pLimit = pLimit;
pNew->pOffset = pOffset;
- assert( pOffset==0 || pLimit!=0 || pParse->nErr>0 || db->mallocFailed!=0 );
+ assert( pOffset==0 || pLimit!=0 );
pNew->addrOpenEphm[0] = -1;
pNew->addrOpenEphm[1] = -1;
+ pNew->addrOpenEphm[2] = -1;
if( db->mallocFailed ) {
- clearSelect(db, pNew, pNew!=&standin);
+ clearSelect(db, pNew);
+ if( pNew!=&standin ) sqlite3DbFree(db, pNew);
pNew = 0;
}else{
assert( pNew->pSrc!=0 || pParse->nErr>0 );
@@ -108781,31 +99561,14 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
return pNew;
}
-#if SELECTTRACE_ENABLED
-/*
-** Set the name of a Select object
-*/
-SQLITE_PRIVATE void sqlite3SelectSetName(Select *p, const char *zName){
- if( p && zName ){
- sqlite3_snprintf(sizeof(p->zSelName), p->zSelName, "%s", zName);
- }
-}
-#endif
-
-
/*
** Delete the given Select structure and all of its substructures.
*/
SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){
- clearSelect(db, p, 1);
-}
-
-/*
-** Return a pointer to the right-most SELECT statement in a compound.
-*/
-static Select *findRightmost(Select *p){
- while( p->pNext ) p = p->pNext;
- return p;
+ if( p ){
+ clearSelect(db, p);
+ sqlite3DbFree(db, p);
+ }
}
/*
@@ -109003,12 +99766,6 @@ static void setJoinExpr(Expr *p, int iTable){
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
ExprSetVVAProperty(p, EP_NoReduce);
p->iRightJoinTable = (i16)iTable;
- if( p->op==TK_FUNCTION && p->x.pList ){
- int i;
- for(i=0; i<p->x.pList->nExpr; i++){
- setJoinExpr(p->x.pList->a[i].pExpr, iTable);
- }
- }
setJoinExpr(p->pLeft, iTable);
p = p->pRight;
}
@@ -109043,12 +99800,12 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){
int isOuter;
if( NEVER(pLeftTab==0 || pRightTab==0) ) continue;
- isOuter = (pRight->fg.jointype & JT_OUTER)!=0;
+ isOuter = (pRight->jointype & JT_OUTER)!=0;
/* When the NATURAL keyword is present, add WHERE clause terms for
** every column that the two tables have in common.
*/
- if( pRight->fg.jointype & JT_NATURAL ){
+ if( pRight->jointype & JT_NATURAL ){
if( pRight->pOn || pRight->pUsing ){
sqlite3ErrorMsg(pParse, "a NATURAL join may not have "
"an ON or USING clause", 0);
@@ -109116,110 +99873,49 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){
return 0;
}
-/* Forward reference */
-static KeyInfo *keyInfoFromExprList(
- Parse *pParse, /* Parsing context */
- ExprList *pList, /* Form the KeyInfo object from this ExprList */
- int iStart, /* Begin with this column of pList */
- int nExtra /* Add this many extra columns to the end */
-);
-
/*
-** Generate code that will push the record in registers regData
-** through regData+nData-1 onto the sorter.
+** Insert code into "v" that will push the record on the top of the
+** stack into the sorter.
*/
static void pushOntoSorter(
Parse *pParse, /* Parser context */
- SortCtx *pSort, /* Information about the ORDER BY clause */
+ ExprList *pOrderBy, /* The ORDER BY clause */
Select *pSelect, /* The whole SELECT statement */
- int regData, /* First register holding data to be sorted */
- int regOrigData, /* First register holding data before packing */
- int nData, /* Number of elements in the data array */
- int nPrefixReg /* No. of reg prior to regData available for use */
-){
- Vdbe *v = pParse->pVdbe; /* Stmt under construction */
- int bSeq = ((pSort->sortFlags & SORTFLAG_UseSorter)==0);
- int nExpr = pSort->pOrderBy->nExpr; /* No. of ORDER BY terms */
- int nBase = nExpr + bSeq + nData; /* Fields in sorter record */
- int regBase; /* Regs for sorter record */
- int regRecord = ++pParse->nMem; /* Assembled sorter record */
- int nOBSat = pSort->nOBSat; /* ORDER BY terms to skip */
- int op; /* Opcode to add sorter record to sorter */
-
- assert( bSeq==0 || bSeq==1 );
- assert( nData==1 || regData==regOrigData );
- if( nPrefixReg ){
- assert( nPrefixReg==nExpr+bSeq );
- regBase = regData - nExpr - bSeq;
- }else{
- regBase = pParse->nMem + 1;
- pParse->nMem += nBase;
- }
- sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, regOrigData,
- SQLITE_ECEL_DUP|SQLITE_ECEL_REF);
- if( bSeq ){
- sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr);
- }
- if( nPrefixReg==0 ){
- sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData);
- }
-
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regRecord);
- if( nOBSat>0 ){
- int regPrevKey; /* The first nOBSat columns of the previous row */
- int addrFirst; /* Address of the OP_IfNot opcode */
- int addrJmp; /* Address of the OP_Jump opcode */
- VdbeOp *pOp; /* Opcode that opens the sorter */
- int nKey; /* Number of sorting key columns, including OP_Sequence */
- KeyInfo *pKI; /* Original KeyInfo on the sorter table */
-
- regPrevKey = pParse->nMem+1;
- pParse->nMem += pSort->nOBSat;
- nKey = nExpr - pSort->nOBSat + bSeq;
- if( bSeq ){
- addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr);
- }else{
- addrFirst = sqlite3VdbeAddOp1(v, OP_SequenceTest, pSort->iECursor);
- }
- VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat);
- pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex);
- if( pParse->db->mallocFailed ) return;
- pOp->p2 = nKey + nData;
- pKI = pOp->p4.pKeyInfo;
- memset(pKI->aSortOrder, 0, pKI->nField); /* Makes OP_Jump below testable */
- sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO);
- testcase( pKI->nXField>2 );
- pOp->p4.pKeyInfo = keyInfoFromExprList(pParse, pSort->pOrderBy, nOBSat,
- pKI->nXField-1);
- addrJmp = sqlite3VdbeCurrentAddr(v);
- sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v);
- pSort->labelBkOut = sqlite3VdbeMakeLabel(v);
- pSort->regReturn = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut);
- sqlite3VdbeAddOp1(v, OP_ResetSorter, pSort->iECursor);
- sqlite3VdbeJumpHere(v, addrFirst);
- sqlite3ExprCodeMove(pParse, regBase, regPrevKey, pSort->nOBSat);
- sqlite3VdbeJumpHere(v, addrJmp);
- }
- if( pSort->sortFlags & SORTFLAG_UseSorter ){
+ int regData /* Register holding data to be sorted */
+){
+ Vdbe *v = pParse->pVdbe;
+ int nExpr = pOrderBy->nExpr;
+ int regBase = sqlite3GetTempRange(pParse, nExpr+2);
+ int regRecord = sqlite3GetTempReg(pParse);
+ int op;
+ sqlite3ExprCacheClear(pParse);
+ sqlite3ExprCodeExprList(pParse, pOrderBy, regBase, 0);
+ sqlite3VdbeAddOp2(v, OP_Sequence, pOrderBy->iECursor, regBase+nExpr);
+ sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+1, 1);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nExpr + 2, regRecord);
+ if( pSelect->selFlags & SF_UseSorter ){
op = OP_SorterInsert;
}else{
op = OP_IdxInsert;
}
- sqlite3VdbeAddOp2(v, op, pSort->iECursor, regRecord);
+ sqlite3VdbeAddOp2(v, op, pOrderBy->iECursor, regRecord);
+ sqlite3ReleaseTempReg(pParse, regRecord);
+ sqlite3ReleaseTempRange(pParse, regBase, nExpr+2);
if( pSelect->iLimit ){
- int addr;
+ int addr1, addr2;
int iLimit;
if( pSelect->iOffset ){
iLimit = pSelect->iOffset+1;
}else{
iLimit = pSelect->iLimit;
}
- addr = sqlite3VdbeAddOp3(v, OP_IfNotZero, iLimit, 0, 1); VdbeCoverage(v);
- sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor);
- sqlite3VdbeAddOp1(v, OP_Delete, pSort->iECursor);
- sqlite3VdbeJumpHere(v, addr);
+ addr1 = sqlite3VdbeAddOp1(v, OP_IfZero, iLimit);
+ sqlite3VdbeAddOp2(v, OP_AddImm, iLimit, -1);
+ addr2 = sqlite3VdbeAddOp0(v, OP_Goto);
+ sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp1(v, OP_Last, pOrderBy->iECursor);
+ sqlite3VdbeAddOp1(v, OP_Delete, pOrderBy->iECursor);
+ sqlite3VdbeJumpHere(v, addr2);
}
}
@@ -109228,12 +99924,16 @@ static void pushOntoSorter(
*/
static void codeOffset(
Vdbe *v, /* Generate code into this VM */
- int iOffset, /* Register holding the offset counter */
+ Select *p, /* The SELECT statement being coded */
int iContinue /* Jump here to skip the current record */
){
- if( iOffset>0 ){
- sqlite3VdbeAddOp3(v, OP_IfPos, iOffset, iContinue, 1); VdbeCoverage(v);
- VdbeComment((v, "OFFSET"));
+ if( p->iOffset && iContinue!=0 ){
+ int addr;
+ sqlite3VdbeAddOp2(v, OP_AddImm, p->iOffset, -1);
+ addr = sqlite3VdbeAddOp1(v, OP_IfNeg, p->iOffset);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, iContinue);
+ VdbeComment((v, "skip OFFSET records"));
+ sqlite3VdbeJumpHere(v, addr);
}
}
@@ -109258,7 +99958,7 @@ static void codeDistinct(
v = pParse->pVdbe;
r1 = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v);
+ sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N);
sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1);
sqlite3VdbeAddOp2(v, OP_IdxInsert, iTab, r1);
sqlite3ReleaseTempReg(pParse, r1);
@@ -109289,20 +99989,34 @@ static int checkForMultiColumnSelectError(
#endif
/*
+** An instance of the following object is used to record information about
+** how to process the DISTINCT keyword, to simplify passing that information
+** into the selectInnerLoop() routine.
+*/
+typedef struct DistinctCtx DistinctCtx;
+struct DistinctCtx {
+ u8 isTnct; /* True if the DISTINCT keyword is present */
+ u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */
+ int tabTnct; /* Ephemeral table used for DISTINCT processing */
+ int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */
+};
+
+/*
** This routine generates the code for the inside of the inner loop
** of a SELECT.
**
-** If srcTab is negative, then the pEList expressions
-** are evaluated in order to get the data for this row. If srcTab is
-** zero or more, then data is pulled from srcTab and pEList is used only
-** to get number columns and the datatype for each column.
+** If srcTab and nColumn are both zero, then the pEList expressions
+** are evaluated in order to get the data for this row. If nColumn>0
+** then data is pulled from srcTab and pEList is used only to get the
+** datatypes for each column.
*/
static void selectInnerLoop(
Parse *pParse, /* The parser context */
Select *p, /* The complete select statement being coded */
ExprList *pEList, /* List of values being extracted */
int srcTab, /* Pull data from this table */
- SortCtx *pSort, /* If not NULL, info on how to process ORDER BY */
+ int nColumn, /* Number of columns in the source table */
+ ExprList *pOrderBy, /* If not NULL, sort results using this key */
DistinctCtx *pDistinct, /* If not NULL, info on how to process DISTINCT */
SelectDest *pDest, /* How to dispose of the results */
int iContinue, /* Jump here to continue with next row */
@@ -109315,62 +100029,51 @@ static void selectInnerLoop(
int eDest = pDest->eDest; /* How to dispose of results */
int iParm = pDest->iSDParm; /* First argument to disposal method */
int nResultCol; /* Number of result columns */
- int nPrefixReg = 0; /* Number of extra registers before regResult */
assert( v );
+ if( NEVER(v==0) ) return;
assert( pEList!=0 );
hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP;
- if( pSort && pSort->pOrderBy==0 ) pSort = 0;
- if( pSort==0 && !hasDistinct ){
- assert( iContinue!=0 );
- codeOffset(v, p->iOffset, iContinue);
+ if( pOrderBy==0 && !hasDistinct ){
+ codeOffset(v, p, iContinue);
}
/* Pull the requested columns.
*/
- nResultCol = pEList->nExpr;
-
+ if( nColumn>0 ){
+ nResultCol = nColumn;
+ }else{
+ nResultCol = pEList->nExpr;
+ }
if( pDest->iSdst==0 ){
- if( pSort ){
- nPrefixReg = pSort->pOrderBy->nExpr;
- if( !(pSort->sortFlags & SORTFLAG_UseSorter) ) nPrefixReg++;
- pParse->nMem += nPrefixReg;
- }
pDest->iSdst = pParse->nMem+1;
+ pDest->nSdst = nResultCol;
pParse->nMem += nResultCol;
- }else if( pDest->iSdst+nResultCol > pParse->nMem ){
- /* This is an error condition that can result, for example, when a SELECT
- ** on the right-hand side of an INSERT contains more result columns than
- ** there are columns in the table on the left. The error will be caught
- ** and reported later. But we need to make sure enough memory is allocated
- ** to avoid other spurious errors in the meantime. */
- pParse->nMem += nResultCol;
+ }else{
+ assert( pDest->nSdst==nResultCol );
}
- pDest->nSdst = nResultCol;
regResult = pDest->iSdst;
- if( srcTab>=0 ){
- for(i=0; i<nResultCol; i++){
+ if( nColumn>0 ){
+ for(i=0; i<nColumn; i++){
sqlite3VdbeAddOp3(v, OP_Column, srcTab, i, regResult+i);
- VdbeComment((v, "%s", pEList->a[i].zName));
}
}else if( eDest!=SRT_Exists ){
/* If the destination is an EXISTS(...) expression, the actual
** values returned by the SELECT are not required.
*/
- u8 ecelFlags;
- if( eDest==SRT_Mem || eDest==SRT_Output || eDest==SRT_Coroutine ){
- ecelFlags = SQLITE_ECEL_DUP;
- }else{
- ecelFlags = 0;
- }
- sqlite3ExprCodeExprList(pParse, pEList, regResult, 0, ecelFlags);
+ sqlite3ExprCacheClear(pParse);
+ sqlite3ExprCodeExprList(pParse, pEList, regResult,
+ (eDest==SRT_Output)?SQLITE_ECEL_DUP:0);
}
+ nColumn = nResultCol;
/* If the DISTINCT keyword was present on the SELECT statement
** and this row has been seen before, then do not make this row
** part of the result.
*/
if( hasDistinct ){
+ assert( pEList!=0 );
+ assert( pEList->nExpr==nColumn );
switch( pDistinct->eTnctType ){
case WHERE_DISTINCT_ORDERED: {
VdbeOp *pOp; /* No longer required OpenEphemeral instr. */
@@ -109379,7 +100082,7 @@ static void selectInnerLoop(
/* Allocate space for the previous row */
regPrev = pParse->nMem+1;
- pParse->nMem += nResultCol;
+ pParse->nMem += nColumn;
/* Change the OP_OpenEphemeral coded earlier to an OP_Null
** sets the MEM_Cleared bit on the first register of the
@@ -109393,21 +100096,19 @@ static void selectInnerLoop(
pOp->p1 = 1;
pOp->p2 = regPrev;
- iJump = sqlite3VdbeCurrentAddr(v) + nResultCol;
- for(i=0; i<nResultCol; i++){
+ iJump = sqlite3VdbeCurrentAddr(v) + nColumn;
+ for(i=0; i<nColumn; i++){
CollSeq *pColl = sqlite3ExprCollSeq(pParse, pEList->a[i].pExpr);
- if( i<nResultCol-1 ){
+ if( i<nColumn-1 ){
sqlite3VdbeAddOp3(v, OP_Ne, regResult+i, iJump, regPrev+i);
- VdbeCoverage(v);
}else{
sqlite3VdbeAddOp3(v, OP_Eq, regResult+i, iContinue, regPrev+i);
- VdbeCoverage(v);
- }
+ }
sqlite3VdbeChangeP4(v, -1, (const char *)pColl, P4_COLLSEQ);
sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
}
- assert( sqlite3VdbeCurrentAddr(v)==iJump || pParse->db->mallocFailed );
- sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nResultCol-1);
+ assert( sqlite3VdbeCurrentAddr(v)==iJump );
+ sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nColumn-1);
break;
}
@@ -109418,13 +100119,12 @@ static void selectInnerLoop(
default: {
assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED );
- codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol,
- regResult);
+ codeDistinct(pParse, pDistinct->tabTnct, iContinue, nColumn, regResult);
break;
}
}
- if( pSort==0 ){
- codeOffset(v, p->iOffset, iContinue);
+ if( pOrderBy==0 ){
+ codeOffset(v, p, iContinue);
}
}
@@ -109436,7 +100136,7 @@ static void selectInnerLoop(
case SRT_Union: {
int r1;
r1 = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nColumn, r1);
sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1);
sqlite3ReleaseTempReg(pParse, r1);
break;
@@ -109447,39 +100147,21 @@ static void selectInnerLoop(
** the temporary table iParm.
*/
case SRT_Except: {
- sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nResultCol);
+ sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nColumn);
break;
}
-#endif /* SQLITE_OMIT_COMPOUND_SELECT */
+#endif
/* Store the result as data using a unique key.
*/
- case SRT_Fifo:
- case SRT_DistFifo:
case SRT_Table:
case SRT_EphemTab: {
- int r1 = sqlite3GetTempRange(pParse, nPrefixReg+1);
+ int r1 = sqlite3GetTempReg(pParse);
testcase( eDest==SRT_Table );
testcase( eDest==SRT_EphemTab );
- testcase( eDest==SRT_Fifo );
- testcase( eDest==SRT_DistFifo );
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
-#ifndef SQLITE_OMIT_CTE
- if( eDest==SRT_DistFifo ){
- /* If the destination is DistFifo, then cursor (iParm+1) is open
- ** on an ephemeral index. If the current row is already present
- ** in the index, do not write it to the output. If not, add the
- ** current row to the index and proceed with writing it to the
- ** output table as well. */
- int addr = sqlite3VdbeCurrentAddr(v) + 4;
- sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r1);
- assert( pSort==0 );
- }
-#endif
- if( pSort ){
- pushOntoSorter(pParse, pSort, p, r1+nPrefixReg,regResult,1,nPrefixReg);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nColumn, r1);
+ if( pOrderBy ){
+ pushOntoSorter(pParse, pOrderBy, p, r1);
}else{
int r2 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2);
@@ -109487,7 +100169,7 @@ static void selectInnerLoop(
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
sqlite3ReleaseTempReg(pParse, r2);
}
- sqlite3ReleaseTempRange(pParse, r1, nPrefixReg+1);
+ sqlite3ReleaseTempReg(pParse, r1);
break;
}
@@ -109497,15 +100179,15 @@ static void selectInnerLoop(
** item into the set table with bogus data.
*/
case SRT_Set: {
- assert( nResultCol==1 );
+ assert( nColumn==1 );
pDest->affSdst =
sqlite3CompareAffinity(pEList->a[0].pExpr, pDest->affSdst);
- if( pSort ){
+ if( pOrderBy ){
/* At first glance you would think we could optimize out the
** ORDER BY in this case since the order of entries in the set
** does not matter. But there might be a LIMIT clause, in which
** case the order does matter */
- pushOntoSorter(pParse, pSort, p, regResult, regResult, 1, nPrefixReg);
+ pushOntoSorter(pParse, pOrderBy, p, regResult);
}else{
int r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult,1,r1, &pDest->affSdst, 1);
@@ -109529,82 +100211,39 @@ static void selectInnerLoop(
** of the scan loop.
*/
case SRT_Mem: {
- assert( nResultCol==1 );
- if( pSort ){
- pushOntoSorter(pParse, pSort, p, regResult, regResult, 1, nPrefixReg);
+ assert( nColumn==1 );
+ if( pOrderBy ){
+ pushOntoSorter(pParse, pOrderBy, p, regResult);
}else{
- assert( regResult==iParm );
+ sqlite3ExprCodeMove(pParse, regResult, iParm, 1);
/* The LIMIT clause will jump out of the loop for us */
}
break;
}
#endif /* #ifndef SQLITE_OMIT_SUBQUERY */
- case SRT_Coroutine: /* Send data to a co-routine */
- case SRT_Output: { /* Return the results */
+ /* Send the data to the callback function or to a subroutine. In the
+ ** case of a subroutine, the subroutine itself is responsible for
+ ** popping the data from the stack.
+ */
+ case SRT_Coroutine:
+ case SRT_Output: {
testcase( eDest==SRT_Coroutine );
testcase( eDest==SRT_Output );
- if( pSort ){
- pushOntoSorter(pParse, pSort, p, regResult, regResult, nResultCol,
- nPrefixReg);
+ if( pOrderBy ){
+ int r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nColumn, r1);
+ pushOntoSorter(pParse, pOrderBy, p, r1);
+ sqlite3ReleaseTempReg(pParse, r1);
}else if( eDest==SRT_Coroutine ){
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
}else{
- sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol);
- sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol);
+ sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nColumn);
+ sqlite3ExprCacheAffinityChange(pParse, regResult, nColumn);
}
break;
}
-#ifndef SQLITE_OMIT_CTE
- /* Write the results into a priority queue that is order according to
- ** pDest->pOrderBy (in pSO). pDest->iSDParm (in iParm) is the cursor for an
- ** index with pSO->nExpr+2 columns. Build a key using pSO for the first
- ** pSO->nExpr columns, then make sure all keys are unique by adding a
- ** final OP_Sequence column. The last column is the record as a blob.
- */
- case SRT_DistQueue:
- case SRT_Queue: {
- int nKey;
- int r1, r2, r3;
- int addrTest = 0;
- ExprList *pSO;
- pSO = pDest->pOrderBy;
- assert( pSO );
- nKey = pSO->nExpr;
- r1 = sqlite3GetTempReg(pParse);
- r2 = sqlite3GetTempRange(pParse, nKey+2);
- r3 = r2+nKey+1;
- if( eDest==SRT_DistQueue ){
- /* If the destination is DistQueue, then cursor (iParm+1) is open
- ** on a second ephemeral index that holds all values every previously
- ** added to the queue. */
- addrTest = sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, 0,
- regResult, nResultCol);
- VdbeCoverage(v);
- }
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r3);
- if( eDest==SRT_DistQueue ){
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r3);
- sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
- }
- for(i=0; i<nKey; i++){
- sqlite3VdbeAddOp2(v, OP_SCopy,
- regResult + pSO->a[i].u.x.iOrderByCol - 1,
- r2+i);
- }
- sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey);
- sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1);
- if( addrTest ) sqlite3VdbeJumpHere(v, addrTest);
- sqlite3ReleaseTempReg(pParse, r1);
- sqlite3ReleaseTempRange(pParse, r2, nKey+2);
- break;
- }
-#endif /* SQLITE_OMIT_CTE */
-
-
-
#if !defined(SQLITE_OMIT_TRIGGER)
/* Discard the results. This is used for SELECT statements inside
** the body of a TRIGGER. The purpose of such selects is to call
@@ -109622,8 +100261,8 @@ static void selectInnerLoop(
** there is a sorter, in which case the sorter has already limited
** the output for us.
*/
- if( pSort==0 && p->iLimit ){
- sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v);
+ if( pOrderBy==0 && p->iLimit ){
+ sqlite3VdbeAddOp3(v, OP_IfZero, p->iLimit, iBreak, -1);
}
}
@@ -109689,16 +100328,11 @@ SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; }
** then the KeyInfo structure is appropriate for initializing a virtual
** index to implement a DISTINCT test.
**
-** Space to hold the KeyInfo structure is obtained from malloc. The calling
+** Space to hold the KeyInfo structure is obtain from malloc. The calling
** function is responsible for seeing that this structure is eventually
** freed.
*/
-static KeyInfo *keyInfoFromExprList(
- Parse *pParse, /* Parsing context */
- ExprList *pList, /* Form the KeyInfo object from this ExprList */
- int iStart, /* Begin with this column of pList */
- int nExtra /* Add this many extra columns to the end */
-){
+static KeyInfo *keyInfoFromExprList(Parse *pParse, ExprList *pList){
int nExpr;
KeyInfo *pInfo;
struct ExprList_item *pItem;
@@ -109706,20 +100340,21 @@ static KeyInfo *keyInfoFromExprList(
int i;
nExpr = pList->nExpr;
- pInfo = sqlite3KeyInfoAlloc(db, nExpr-iStart, nExtra+1);
+ pInfo = sqlite3KeyInfoAlloc(db, nExpr, 1);
if( pInfo ){
assert( sqlite3KeyInfoIsWriteable(pInfo) );
- for(i=iStart, pItem=pList->a+iStart; i<nExpr; i++, pItem++){
+ for(i=0, pItem=pList->a; i<nExpr; i++, pItem++){
CollSeq *pColl;
pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
if( !pColl ) pColl = db->pDfltColl;
- pInfo->aColl[i-iStart] = pColl;
- pInfo->aSortOrder[i-iStart] = pItem->sortOrder;
+ pInfo->aColl[i] = pColl;
+ pInfo->aSortOrder[i] = pItem->sortOrder;
}
}
return pInfo;
}
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
/*
** Name of the connection operator, used for error messages.
*/
@@ -109733,6 +100368,7 @@ static const char *selectOpName(int id){
}
return z;
}
+#endif /* SQLITE_OMIT_COMPOUND_SELECT */
#ifndef SQLITE_OMIT_EXPLAIN
/*
@@ -109814,71 +100450,51 @@ static void explainComposite(
static void generateSortTail(
Parse *pParse, /* Parsing context */
Select *p, /* The SELECT statement */
- SortCtx *pSort, /* Information on the ORDER BY clause */
+ Vdbe *v, /* Generate code into this VDBE */
int nColumn, /* Number of columns of data */
SelectDest *pDest /* Write the sorted results here */
){
- Vdbe *v = pParse->pVdbe; /* The prepared statement */
int addrBreak = sqlite3VdbeMakeLabel(v); /* Jump here to exit loop */
int addrContinue = sqlite3VdbeMakeLabel(v); /* Jump here for next cycle */
int addr;
- int addrOnce = 0;
int iTab;
- ExprList *pOrderBy = pSort->pOrderBy;
+ int pseudoTab = 0;
+ ExprList *pOrderBy = p->pOrderBy;
+
int eDest = pDest->eDest;
int iParm = pDest->iSDParm;
+
int regRow;
int regRowid;
- int nKey;
- int iSortTab; /* Sorter cursor to read from */
- int nSortData; /* Trailing values to read from sorter */
- int i;
- int bSeq; /* True if sorter record includes seq. no. */
-#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
- struct ExprList_item *aOutEx = p->pEList->a;
-#endif
- if( pSort->labelBkOut ){
- sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut);
- sqlite3VdbeGoto(v, addrBreak);
- sqlite3VdbeResolveLabel(v, pSort->labelBkOut);
- }
- iTab = pSort->iECursor;
+ iTab = pOrderBy->iECursor;
+ regRow = sqlite3GetTempReg(pParse);
if( eDest==SRT_Output || eDest==SRT_Coroutine ){
+ pseudoTab = pParse->nTab++;
+ sqlite3VdbeAddOp3(v, OP_OpenPseudo, pseudoTab, regRow, nColumn);
regRowid = 0;
- regRow = pDest->iSdst;
- nSortData = nColumn;
}else{
regRowid = sqlite3GetTempReg(pParse);
- regRow = sqlite3GetTempReg(pParse);
- nSortData = 1;
}
- nKey = pOrderBy->nExpr - pSort->nOBSat;
- if( pSort->sortFlags & SORTFLAG_UseSorter ){
+ if( p->selFlags & SF_UseSorter ){
int regSortOut = ++pParse->nMem;
- iSortTab = pParse->nTab++;
- if( pSort->labelBkOut ){
- addrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v);
- }
- sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut, nKey+1+nSortData);
- if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
+ int ptab2 = pParse->nTab++;
+ sqlite3VdbeAddOp3(v, OP_OpenPseudo, ptab2, regSortOut, pOrderBy->nExpr+2);
addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak);
- VdbeCoverage(v);
- codeOffset(v, p->iOffset, addrContinue);
- sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab);
- bSeq = 0;
+ codeOffset(v, p, addrContinue);
+ sqlite3VdbeAddOp2(v, OP_SorterData, iTab, regSortOut);
+ sqlite3VdbeAddOp3(v, OP_Column, ptab2, pOrderBy->nExpr+1, regRow);
+ sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE);
}else{
- addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v);
- codeOffset(v, p->iOffset, addrContinue);
- iSortTab = iTab;
- bSeq = 1;
- }
- for(i=0; i<nSortData; i++){
- sqlite3VdbeAddOp3(v, OP_Column, iSortTab, nKey+bSeq+i, regRow+i);
- VdbeComment((v, "%s", aOutEx[i].zName ? aOutEx[i].zName : aOutEx[i].zSpan));
+ addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak);
+ codeOffset(v, p, addrContinue);
+ sqlite3VdbeAddOp3(v, OP_Column, iTab, pOrderBy->nExpr+1, regRow);
}
switch( eDest ){
+ case SRT_Table:
case SRT_EphemTab: {
+ testcase( eDest==SRT_Table );
+ testcase( eDest==SRT_EphemTab );
sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, regRowid);
sqlite3VdbeAddOp3(v, OP_Insert, iParm, regRow, regRowid);
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
@@ -109901,9 +100517,17 @@ static void generateSortTail(
}
#endif
default: {
+ int i;
assert( eDest==SRT_Output || eDest==SRT_Coroutine );
testcase( eDest==SRT_Output );
testcase( eDest==SRT_Coroutine );
+ for(i=0; i<nColumn; i++){
+ assert( regRow!=pDest->iSdst+i );
+ sqlite3VdbeAddOp3(v, OP_Column, pseudoTab, i, pDest->iSdst+i);
+ if( i==0 ){
+ sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE);
+ }
+ }
if( eDest==SRT_Output ){
sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn);
sqlite3ExprCacheAffinityChange(pParse, pDest->iSdst, nColumn);
@@ -109913,20 +100537,21 @@ static void generateSortTail(
break;
}
}
- if( regRowid ){
- sqlite3ReleaseTempReg(pParse, regRow);
- sqlite3ReleaseTempReg(pParse, regRowid);
- }
+ sqlite3ReleaseTempReg(pParse, regRow);
+ sqlite3ReleaseTempReg(pParse, regRowid);
+
/* The bottom of the loop
*/
sqlite3VdbeResolveLabel(v, addrContinue);
- if( pSort->sortFlags & SORTFLAG_UseSorter ){
- sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v);
+ if( p->selFlags & SF_UseSorter ){
+ sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr);
}else{
- sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr);
}
- if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn);
sqlite3VdbeResolveLabel(v, addrBreak);
+ if( eDest==SRT_Output || eDest==SRT_Coroutine ){
+ sqlite3VdbeAddOp2(v, OP_Close, pseudoTab, 0);
+ }
}
/*
@@ -109955,27 +100580,28 @@ static void generateSortTail(
*/
#ifdef SQLITE_ENABLE_COLUMN_METADATA
# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,C,D,E,F)
-#else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */
-# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,F)
-#endif
static const char *columnTypeImpl(
NameContext *pNC,
Expr *pExpr,
-#ifdef SQLITE_ENABLE_COLUMN_METADATA
const char **pzOrigDb,
const char **pzOrigTab,
const char **pzOrigCol,
-#endif
u8 *pEstWidth
){
- char const *zType = 0;
- int j;
- u8 estWidth = 1;
-#ifdef SQLITE_ENABLE_COLUMN_METADATA
char const *zOrigDb = 0;
char const *zOrigTab = 0;
char const *zOrigCol = 0;
-#endif
+#else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */
+# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,F)
+static const char *columnTypeImpl(
+ NameContext *pNC,
+ Expr *pExpr,
+ u8 *pEstWidth
+){
+#endif /* !defined(SQLITE_ENABLE_COLUMN_METADATA) */
+ char const *zType = 0;
+ int j;
+ u8 estWidth = 1;
if( NEVER(pExpr==0) || pNC->pSrcList==0 ) return 0;
switch( pExpr->op ){
@@ -110032,9 +100658,6 @@ static const char *columnTypeImpl(
/* If iCol is less than zero, then the expression requests the
** rowid of the sub-select or view. This expression is legal (see
** test case misc2.2.2) - it always evaluates to NULL.
- **
- ** The ALWAYS() is because iCol>=pS->pEList->nExpr will have been
- ** caught already by name resolution.
*/
NameContext sNC;
Expr *p = pS->pEList->a[iCol].pExpr;
@@ -110043,7 +100666,7 @@ static const char *columnTypeImpl(
sNC.pParse = pNC->pParse;
zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol, &estWidth);
}
- }else if( pTab->pSchema ){
+ }else if( ALWAYS(pTab->pSchema) ){
/* A real table */
assert( !pS );
if( iCol<0 ) iCol = pTab->iPKey;
@@ -110204,16 +100827,15 @@ static void generateColumnNames(
sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT);
}
}else{
- const char *z = pEList->a[i].zSpan;
- z = z==0 ? sqlite3MPrintf(db, "column%d", i+1) : sqlite3DbStrDup(db, z);
- sqlite3VdbeSetColName(v, i, COLNAME_NAME, z, SQLITE_DYNAMIC);
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME,
+ sqlite3DbStrDup(db, pEList->a[i].zSpan), SQLITE_DYNAMIC);
}
}
generateColumnTypes(pParse, pTabList, pEList);
}
/*
-** Given an expression list (which is really the list of expressions
+** Given a an expression list (which is really the list of expressions
** that form the result set of a SELECT statement) compute appropriate
** column names for a table that would hold the expression list.
**
@@ -110225,7 +100847,7 @@ static void generateColumnNames(
** Return SQLITE_OK on success. If a memory allocation error occurs,
** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM.
*/
-SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
+static int selectColumnsFromExprList(
Parse *pParse, /* Parsing context */
ExprList *pEList, /* Expr list from which to derive column names */
i16 *pnCol, /* Write the number of columns here */
@@ -110286,7 +100908,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
}
/* Make sure the column name is unique. If the name is not unique,
- ** append an integer to the name so that it becomes unique.
+ ** append a integer to the name so that it becomes unique.
*/
nName = sqlite3Strlen30(zName);
for(j=cnt=0; j<i; j++){
@@ -110294,7 +100916,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
char *zNewName;
int k;
for(k=nName-1; k>1 && sqlite3Isdigit(zName[k]); k--){}
- if( k>=0 && zName[k]==':' ) nName = k;
+ if( zName[k]==':' ) nName = k;
zName[nName] = 0;
zNewName = sqlite3MPrintf(db, "%s:%d", zName, ++cnt);
sqlite3DbFree(db, zName);
@@ -110351,15 +100973,12 @@ static void selectAddColumnTypeAndCollation(
a = pSelect->pEList->a;
for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
p = a[i].pExpr;
- if( pCol->zType==0 ){
- pCol->zType = sqlite3DbStrDup(db,
- columnType(&sNC, p,0,0,0, &pCol->szEst));
- }
+ pCol->zType = sqlite3DbStrDup(db, columnType(&sNC, p,0,0,0, &pCol->szEst));
szAll += pCol->szEst;
pCol->affinity = sqlite3ExprAffinity(p);
- if( pCol->affinity==0 ) pCol->affinity = SQLITE_AFF_BLOB;
+ if( pCol->affinity==0 ) pCol->affinity = SQLITE_AFF_NONE;
pColl = sqlite3ExprCollSeq(pParse, p);
- if( pColl && pCol->zColl==0 ){
+ if( pColl ){
pCol->zColl = sqlite3DbStrDup(db, pColl->zName);
}
}
@@ -110391,8 +101010,8 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){
assert( db->lookaside.bEnabled==0 );
pTab->nRef = 1;
pTab->zName = 0;
- pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
- sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol);
+ pTab->nRowEst = 1048576;
+ selectColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol);
selectAddColumnTypeAndCollation(pParse, pTab, pSelect);
pTab->iPKey = -1;
if( db->mallocFailed ){
@@ -110409,14 +101028,12 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){
SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){
Vdbe *v = pParse->pVdbe;
if( v==0 ){
- v = pParse->pVdbe = sqlite3VdbeCreate(pParse);
- if( v ) sqlite3VdbeAddOp0(v, OP_Init);
- if( pParse->pToplevel==0
- && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst)
- ){
- pParse->okConstFactor = 1;
+ v = pParse->pVdbe = sqlite3VdbeCreate(pParse->db);
+#ifndef SQLITE_OMIT_TRACE
+ if( v ){
+ sqlite3VdbeAddOp0(v, OP_Trace);
}
-
+#endif
}
return v;
}
@@ -110433,13 +101050,8 @@ SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){
**
** This routine changes the values of iLimit and iOffset only if
** a limit or offset is defined by pLimit and pOffset. iLimit and
-** iOffset should have been preset to appropriate default values (zero)
-** prior to calling this routine.
-**
-** The iOffset register (if it exists) is initialized to the value
-** of the OFFSET. The iLimit register is initialized to LIMIT. Register
-** iOffset+1 is initialized to LIMIT+OFFSET.
-**
+** iOffset should have been preset to appropriate default values
+** (usually but not always -1) prior to calling this routine.
** Only if pLimit!=0 or pOffset!=0 do the limit registers get
** redefined. The UNION ALL operator uses this property to force
** the reuse of the same limit and offset registers across multiple
@@ -110449,7 +101061,7 @@ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){
Vdbe *v = 0;
int iLimit = 0;
int iOffset;
- int n;
+ int addr1, n;
if( p->iLimit ) return;
/*
@@ -110463,31 +101075,35 @@ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){
if( p->pLimit ){
p->iLimit = iLimit = ++pParse->nMem;
v = sqlite3GetVdbe(pParse);
- assert( v!=0 );
+ if( NEVER(v==0) ) return; /* VDBE should have already been allocated */
if( sqlite3ExprIsInteger(p->pLimit, &n) ){
sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit);
VdbeComment((v, "LIMIT counter"));
if( n==0 ){
- sqlite3VdbeGoto(v, iBreak);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak);
}else if( n>=0 && p->nSelectRow>(u64)n ){
p->nSelectRow = n;
}
}else{
sqlite3ExprCode(pParse, p->pLimit, iLimit);
- sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit);
VdbeComment((v, "LIMIT counter"));
- sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, iBreak); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IfZero, iLimit, iBreak);
}
if( p->pOffset ){
p->iOffset = iOffset = ++pParse->nMem;
pParse->nMem++; /* Allocate an extra register for limit+offset */
sqlite3ExprCode(pParse, p->pOffset, iOffset);
- sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset);
VdbeComment((v, "OFFSET counter"));
- sqlite3VdbeAddOp3(v, OP_SetIfNotPos, iOffset, iOffset, 0);
+ addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iOffset);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, iOffset);
+ sqlite3VdbeJumpHere(v, addr1);
sqlite3VdbeAddOp3(v, OP_Add, iLimit, iOffset, iOffset+1);
VdbeComment((v, "LIMIT+OFFSET"));
- sqlite3VdbeAddOp3(v, OP_SetIfNotPos, iLimit, iOffset+1, -1);
+ addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iLimit);
+ sqlite3VdbeAddOp2(v, OP_Integer, -1, iOffset+1);
+ sqlite3VdbeJumpHere(v, addr1);
}
}
}
@@ -110509,271 +101125,22 @@ static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){
pRet = 0;
}
assert( iCol>=0 );
- /* iCol must be less than p->pEList->nExpr. Otherwise an error would
- ** have been thrown during name resolution and we would not have gotten
- ** this far */
- if( pRet==0 && ALWAYS(iCol<p->pEList->nExpr) ){
+ if( pRet==0 && iCol<p->pEList->nExpr ){
pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr);
}
return pRet;
}
+#endif /* SQLITE_OMIT_COMPOUND_SELECT */
-/*
-** The select statement passed as the second parameter is a compound SELECT
-** with an ORDER BY clause. This function allocates and returns a KeyInfo
-** structure suitable for implementing the ORDER BY.
-**
-** Space to hold the KeyInfo structure is obtained from malloc. The calling
-** function is responsible for ensuring that this structure is eventually
-** freed.
-*/
-static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){
- ExprList *pOrderBy = p->pOrderBy;
- int nOrderBy = p->pOrderBy->nExpr;
- sqlite3 *db = pParse->db;
- KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1);
- if( pRet ){
- int i;
- for(i=0; i<nOrderBy; i++){
- struct ExprList_item *pItem = &pOrderBy->a[i];
- Expr *pTerm = pItem->pExpr;
- CollSeq *pColl;
-
- if( pTerm->flags & EP_Collate ){
- pColl = sqlite3ExprCollSeq(pParse, pTerm);
- }else{
- pColl = multiSelectCollSeq(pParse, p, pItem->u.x.iOrderByCol-1);
- if( pColl==0 ) pColl = db->pDfltColl;
- pOrderBy->a[i].pExpr =
- sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName);
- }
- assert( sqlite3KeyInfoIsWriteable(pRet) );
- pRet->aColl[i] = pColl;
- pRet->aSortOrder[i] = pOrderBy->a[i].sortOrder;
- }
- }
-
- return pRet;
-}
-
-#ifndef SQLITE_OMIT_CTE
-/*
-** This routine generates VDBE code to compute the content of a WITH RECURSIVE
-** query of the form:
-**
-** <recursive-table> AS (<setup-query> UNION [ALL] <recursive-query>)
-** \___________/ \_______________/
-** p->pPrior p
-**
-**
-** There is exactly one reference to the recursive-table in the FROM clause
-** of recursive-query, marked with the SrcList->a[].fg.isRecursive flag.
-**
-** The setup-query runs once to generate an initial set of rows that go
-** into a Queue table. Rows are extracted from the Queue table one by
-** one. Each row extracted from Queue is output to pDest. Then the single
-** extracted row (now in the iCurrent table) becomes the content of the
-** recursive-table for a recursive-query run. The output of the recursive-query
-** is added back into the Queue table. Then another row is extracted from Queue
-** and the iteration continues until the Queue table is empty.
-**
-** If the compound query operator is UNION then no duplicate rows are ever
-** inserted into the Queue table. The iDistinct table keeps a copy of all rows
-** that have ever been inserted into Queue and causes duplicates to be
-** discarded. If the operator is UNION ALL, then duplicates are allowed.
-**
-** If the query has an ORDER BY, then entries in the Queue table are kept in
-** ORDER BY order and the first entry is extracted for each cycle. Without
-** an ORDER BY, the Queue table is just a FIFO.
-**
-** If a LIMIT clause is provided, then the iteration stops after LIMIT rows
-** have been output to pDest. A LIMIT of zero means to output no rows and a
-** negative LIMIT means to output all rows. If there is also an OFFSET clause
-** with a positive value, then the first OFFSET outputs are discarded rather
-** than being sent to pDest. The LIMIT count does not begin until after OFFSET
-** rows have been skipped.
-*/
-static void generateWithRecursiveQuery(
- Parse *pParse, /* Parsing context */
- Select *p, /* The recursive SELECT to be coded */
- SelectDest *pDest /* What to do with query results */
-){
- SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */
- int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */
- Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
- Select *pSetup = p->pPrior; /* The setup query */
- int addrTop; /* Top of the loop */
- int addrCont, addrBreak; /* CONTINUE and BREAK addresses */
- int iCurrent = 0; /* The Current table */
- int regCurrent; /* Register holding Current table */
- int iQueue; /* The Queue table */
- int iDistinct = 0; /* To ensure unique results if UNION */
- int eDest = SRT_Fifo; /* How to write to Queue */
- SelectDest destQueue; /* SelectDest targetting the Queue table */
- int i; /* Loop counter */
- int rc; /* Result code */
- ExprList *pOrderBy; /* The ORDER BY clause */
- Expr *pLimit, *pOffset; /* Saved LIMIT and OFFSET */
- int regLimit, regOffset; /* Registers used by LIMIT and OFFSET */
-
- /* Obtain authorization to do a recursive query */
- if( sqlite3AuthCheck(pParse, SQLITE_RECURSIVE, 0, 0, 0) ) return;
-
- /* Process the LIMIT and OFFSET clauses, if they exist */
- addrBreak = sqlite3VdbeMakeLabel(v);
- computeLimitRegisters(pParse, p, addrBreak);
- pLimit = p->pLimit;
- pOffset = p->pOffset;
- regLimit = p->iLimit;
- regOffset = p->iOffset;
- p->pLimit = p->pOffset = 0;
- p->iLimit = p->iOffset = 0;
- pOrderBy = p->pOrderBy;
-
- /* Locate the cursor number of the Current table */
- for(i=0; ALWAYS(i<pSrc->nSrc); i++){
- if( pSrc->a[i].fg.isRecursive ){
- iCurrent = pSrc->a[i].iCursor;
- break;
- }
- }
-
- /* Allocate cursors numbers for Queue and Distinct. The cursor number for
- ** the Distinct table must be exactly one greater than Queue in order
- ** for the SRT_DistFifo and SRT_DistQueue destinations to work. */
- iQueue = pParse->nTab++;
- if( p->op==TK_UNION ){
- eDest = pOrderBy ? SRT_DistQueue : SRT_DistFifo;
- iDistinct = pParse->nTab++;
- }else{
- eDest = pOrderBy ? SRT_Queue : SRT_Fifo;
- }
- sqlite3SelectDestInit(&destQueue, eDest, iQueue);
-
- /* Allocate cursors for Current, Queue, and Distinct. */
- regCurrent = ++pParse->nMem;
- sqlite3VdbeAddOp3(v, OP_OpenPseudo, iCurrent, regCurrent, nCol);
- if( pOrderBy ){
- KeyInfo *pKeyInfo = multiSelectOrderByKeyInfo(pParse, p, 1);
- sqlite3VdbeAddOp4(v, OP_OpenEphemeral, iQueue, pOrderBy->nExpr+2, 0,
- (char*)pKeyInfo, P4_KEYINFO);
- destQueue.pOrderBy = pOrderBy;
- }else{
- sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iQueue, nCol);
- }
- VdbeComment((v, "Queue table"));
- if( iDistinct ){
- p->addrOpenEphm[0] = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iDistinct, 0);
- p->selFlags |= SF_UsesEphemeral;
- }
-
- /* Detach the ORDER BY clause from the compound SELECT */
- p->pOrderBy = 0;
-
- /* Store the results of the setup-query in Queue. */
- pSetup->pNext = 0;
- rc = sqlite3Select(pParse, pSetup, &destQueue);
- pSetup->pNext = p;
- if( rc ) goto end_of_recursive_query;
-
- /* Find the next row in the Queue and output that row */
- addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iQueue, addrBreak); VdbeCoverage(v);
-
- /* Transfer the next row in Queue over to Current */
- sqlite3VdbeAddOp1(v, OP_NullRow, iCurrent); /* To reset column cache */
- if( pOrderBy ){
- sqlite3VdbeAddOp3(v, OP_Column, iQueue, pOrderBy->nExpr+1, regCurrent);
- }else{
- sqlite3VdbeAddOp2(v, OP_RowData, iQueue, regCurrent);
- }
- sqlite3VdbeAddOp1(v, OP_Delete, iQueue);
-
- /* Output the single row in Current */
- addrCont = sqlite3VdbeMakeLabel(v);
- codeOffset(v, regOffset, addrCont);
- selectInnerLoop(pParse, p, p->pEList, iCurrent,
- 0, 0, pDest, addrCont, addrBreak);
- if( regLimit ){
- sqlite3VdbeAddOp2(v, OP_DecrJumpZero, regLimit, addrBreak);
- VdbeCoverage(v);
- }
- sqlite3VdbeResolveLabel(v, addrCont);
-
- /* Execute the recursive SELECT taking the single row in Current as
- ** the value for the recursive-table. Store the results in the Queue.
- */
- if( p->selFlags & SF_Aggregate ){
- sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
- }else{
- p->pPrior = 0;
- sqlite3Select(pParse, p, &destQueue);
- assert( p->pPrior==0 );
- p->pPrior = pSetup;
- }
-
- /* Keep running the loop until the Queue is empty */
- sqlite3VdbeGoto(v, addrTop);
- sqlite3VdbeResolveLabel(v, addrBreak);
-
-end_of_recursive_query:
- sqlite3ExprListDelete(pParse->db, p->pOrderBy);
- p->pOrderBy = pOrderBy;
- p->pLimit = pLimit;
- p->pOffset = pOffset;
- return;
-}
-#endif /* SQLITE_OMIT_CTE */
-
-/* Forward references */
+/* Forward reference */
static int multiSelectOrderBy(
Parse *pParse, /* Parsing context */
Select *p, /* The right-most of SELECTs to be coded */
SelectDest *pDest /* What to do with query results */
);
-/*
-** Handle the special case of a compound-select that originates from a
-** VALUES clause. By handling this as a special case, we avoid deep
-** recursion, and thus do not need to enforce the SQLITE_LIMIT_COMPOUND_SELECT
-** on a VALUES clause.
-**
-** Because the Select object originates from a VALUES clause:
-** (1) It has no LIMIT or OFFSET
-** (2) All terms are UNION ALL
-** (3) There is no ORDER BY clause
-*/
-static int multiSelectValues(
- Parse *pParse, /* Parsing context */
- Select *p, /* The right-most of SELECTs to be coded */
- SelectDest *pDest /* What to do with query results */
-){
- Select *pPrior;
- int nRow = 1;
- int rc = 0;
- assert( p->selFlags & SF_MultiValue );
- do{
- assert( p->selFlags & SF_Values );
- assert( p->op==TK_ALL || (p->op==TK_SELECT && p->pPrior==0) );
- assert( p->pLimit==0 );
- assert( p->pOffset==0 );
- assert( p->pNext==0 || p->pEList->nExpr==p->pNext->pEList->nExpr );
- if( p->pPrior==0 ) break;
- assert( p->pPrior->pNext==p );
- p = p->pPrior;
- nRow++;
- }while(1);
- while( p ){
- pPrior = p->pPrior;
- p->pPrior = 0;
- rc = sqlite3Select(pParse, p, pDest);
- p->pPrior = pPrior;
- if( rc ) break;
- p->nSelectRow = nRow;
- p = p->pNext;
- }
- return rc;
-}
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
/*
** This routine is called to process a compound query form from
** two or more separate queries using UNION, UNION ALL, EXCEPT, or
@@ -110817,17 +101184,18 @@ static int multiSelect(
Select *pDelete = 0; /* Chain of simple selects to delete */
sqlite3 *db; /* Database connection */
#ifndef SQLITE_OMIT_EXPLAIN
- int iSub1 = 0; /* EQP id of left-hand query */
- int iSub2 = 0; /* EQP id of right-hand query */
+ int iSub1; /* EQP id of left-hand query */
+ int iSub2; /* EQP id of right-hand query */
#endif
/* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only
** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT.
*/
assert( p && p->pPrior ); /* Calling function guarantees this much */
- assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION );
db = pParse->db;
pPrior = p->pPrior;
+ assert( pPrior->pRightmost!=pPrior );
+ assert( pPrior->pRightmost==p->pRightmost );
dest = *pDest;
if( pPrior->pOrderBy ){
sqlite3ErrorMsg(pParse,"ORDER BY clause should come after %s not before",
@@ -110854,30 +101222,26 @@ static int multiSelect(
dest.eDest = SRT_Table;
}
- /* Special handling for a compound-select that originates as a VALUES clause.
- */
- if( p->selFlags & SF_MultiValue ){
- rc = multiSelectValues(pParse, p, &dest);
- goto multi_select_end;
- }
-
/* Make sure all SELECTs in the statement have the same number of elements
** in their result sets.
*/
assert( p->pEList && pPrior->pEList );
- assert( p->pEList->nExpr==pPrior->pEList->nExpr );
-
-#ifndef SQLITE_OMIT_CTE
- if( p->selFlags & SF_Recursive ){
- generateWithRecursiveQuery(pParse, p, &dest);
- }else
-#endif
+ if( p->pEList->nExpr!=pPrior->pEList->nExpr ){
+ if( p->selFlags & SF_Values ){
+ sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms");
+ }else{
+ sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s"
+ " do not have the same number of result columns", selectOpName(p->op));
+ }
+ rc = 1;
+ goto multi_select_end;
+ }
/* Compound SELECTs that have an ORDER BY clause are handled separately.
*/
if( p->pOrderBy ){
return multiSelectOrderBy(pParse, p, pDest);
- }else
+ }
/* Generate code for the left and right SELECT statements.
*/
@@ -110901,13 +101265,8 @@ static int multiSelect(
p->iLimit = pPrior->iLimit;
p->iOffset = pPrior->iOffset;
if( p->iLimit ){
- addr = sqlite3VdbeAddOp1(v, OP_IfNot, p->iLimit); VdbeCoverage(v);
+ addr = sqlite3VdbeAddOp1(v, OP_IfZero, p->iLimit);
VdbeComment((v, "Jump ahead if LIMIT reached"));
- if( p->iOffset ){
- sqlite3VdbeAddOp3(v, OP_SetIfNotPos, p->iOffset, p->iOffset, 0);
- sqlite3VdbeAddOp3(v, OP_Add, p->iLimit, p->iOffset, p->iOffset+1);
- sqlite3VdbeAddOp3(v, OP_SetIfNotPos, p->iLimit, p->iOffset+1, -1);
- }
}
explainSetInteger(iSub2, pParse->iNextSelectId);
rc = sqlite3Select(pParse, p, &dest);
@@ -110938,10 +101297,12 @@ static int multiSelect(
testcase( p->op==TK_EXCEPT );
testcase( p->op==TK_UNION );
priorOp = SRT_Union;
- if( dest.eDest==priorOp ){
+ if( dest.eDest==priorOp && ALWAYS(!p->pLimit &&!p->pOffset) ){
/* We can reuse a temporary table generated by a SELECT to our
** right.
*/
+ assert( p->pRightmost!=p ); /* Can only happen for leftward elements
+ ** of a 3-way or more compound */
assert( p->pLimit==0 ); /* Not allowed on leftward elements */
assert( p->pOffset==0 ); /* Not allowed on leftward elements */
unionTab = dest.iSDParm;
@@ -110954,7 +101315,7 @@ static int multiSelect(
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0);
assert( p->addrOpenEphm[0] == -1 );
p->addrOpenEphm[0] = addr;
- findRightmost(p)->selFlags |= SF_UsesEphemeral;
+ p->pRightmost->selFlags |= SF_UsesEphemeral;
assert( p->pEList );
}
@@ -111013,12 +101374,12 @@ static int multiSelect(
iBreak = sqlite3VdbeMakeLabel(v);
iCont = sqlite3VdbeMakeLabel(v);
computeLimitRegisters(pParse, p, iBreak);
- sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak);
iStart = sqlite3VdbeCurrentAddr(v);
- selectInnerLoop(pParse, p, p->pEList, unionTab,
+ selectInnerLoop(pParse, p, p->pEList, unionTab, p->pEList->nExpr,
0, 0, &dest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
- sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart);
sqlite3VdbeResolveLabel(v, iBreak);
sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0);
}
@@ -111043,7 +101404,7 @@ static int multiSelect(
addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0);
assert( p->addrOpenEphm[0] == -1 );
p->addrOpenEphm[0] = addr;
- findRightmost(p)->selFlags |= SF_UsesEphemeral;
+ p->pRightmost->selFlags |= SF_UsesEphemeral;
assert( p->pEList );
/* Code the SELECTs to our left into temporary table "tab1".
@@ -111088,15 +101449,15 @@ static int multiSelect(
iBreak = sqlite3VdbeMakeLabel(v);
iCont = sqlite3VdbeMakeLabel(v);
computeLimitRegisters(pParse, p, iBreak);
- sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak);
r1 = sqlite3GetTempReg(pParse);
iStart = sqlite3VdbeAddOp2(v, OP_RowKey, tab1, r1);
- sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v);
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0);
sqlite3ReleaseTempReg(pParse, r1);
- selectInnerLoop(pParse, p, p->pEList, tab1,
+ selectInnerLoop(pParse, p, p->pEList, tab1, p->pEList->nExpr,
0, 0, &dest, iCont, iBreak);
sqlite3VdbeResolveLabel(v, iCont);
- sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart);
sqlite3VdbeResolveLabel(v, iBreak);
sqlite3VdbeAddOp2(v, OP_Close, tab2, 0);
sqlite3VdbeAddOp2(v, OP_Close, tab1, 0);
@@ -111122,7 +101483,7 @@ static int multiSelect(
CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */
int nCol; /* Number of columns in result set */
- assert( p->pNext==0 );
+ assert( p->pRightmost==p );
nCol = p->pEList->nExpr;
pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1);
if( !pKeyInfo ){
@@ -111163,19 +101524,6 @@ multi_select_end:
#endif /* SQLITE_OMIT_COMPOUND_SELECT */
/*
-** Error message for when two or more terms of a compound select have different
-** size result sets.
-*/
-SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){
- if( p->selFlags & SF_Values ){
- sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms");
- }else{
- sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s"
- " do not have the same number of result columns", selectOpName(p->op));
- }
-}
-
-/*
** Code an output subroutine for a coroutine implementation of a
** SELECT statment.
**
@@ -111215,12 +101563,12 @@ static int generateOutputSubroutine(
/* Suppress duplicates for UNION, EXCEPT, and INTERSECT
*/
if( regPrev ){
- int addr1, addr2;
- addr1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); VdbeCoverage(v);
- addr2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst,
+ int j1, j2;
+ j1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev);
+ j2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst,
(char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO);
- sqlite3VdbeAddOp3(v, OP_Jump, addr2+2, iContinue, addr2+2); VdbeCoverage(v);
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp3(v, OP_Jump, j2+2, iContinue, j2+2);
+ sqlite3VdbeJumpHere(v, j1);
sqlite3VdbeAddOp3(v, OP_Copy, pIn->iSdst, regPrev+1, pIn->nSdst-1);
sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev);
}
@@ -111228,16 +101576,17 @@ static int generateOutputSubroutine(
/* Suppress the first OFFSET entries if there is an OFFSET clause
*/
- codeOffset(v, p->iOffset, iContinue);
+ codeOffset(v, p, iContinue);
- assert( pDest->eDest!=SRT_Exists );
- assert( pDest->eDest!=SRT_Table );
switch( pDest->eDest ){
/* Store the result as data using a unique key.
*/
+ case SRT_Table:
case SRT_EphemTab: {
int r1 = sqlite3GetTempReg(pParse);
int r2 = sqlite3GetTempReg(pParse);
+ testcase( pDest->eDest==SRT_Table );
+ testcase( pDest->eDest==SRT_EphemTab );
sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1);
sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iSDParm, r2);
sqlite3VdbeAddOp3(v, OP_Insert, pDest->iSDParm, r1, r2);
@@ -111254,7 +101603,7 @@ static int generateOutputSubroutine(
*/
case SRT_Set: {
int r1;
- assert( pIn->nSdst==1 || pParse->nErr>0 );
+ assert( pIn->nSdst==1 );
pDest->affSdst =
sqlite3CompareAffinity(p->pEList->a[0].pExpr, pDest->affSdst);
r1 = sqlite3GetTempReg(pParse);
@@ -111265,12 +101614,22 @@ static int generateOutputSubroutine(
break;
}
+#if 0 /* Never occurs on an ORDER BY query */
+ /* If any row exist in the result set, record that fact and abort.
+ */
+ case SRT_Exists: {
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, pDest->iSDParm);
+ /* The LIMIT clause will terminate the loop for us */
+ break;
+ }
+#endif
+
/* If this is a scalar select that is part of an expression, then
** store the results in the appropriate memory cell and break out
** of the scan loop.
*/
case SRT_Mem: {
- assert( pIn->nSdst==1 || pParse->nErr>0 ); testcase( pIn->nSdst!=1 );
+ assert( pIn->nSdst==1 );
sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, 1);
/* The LIMIT clause will jump out of the loop for us */
break;
@@ -111285,7 +101644,7 @@ static int generateOutputSubroutine(
pDest->iSdst = sqlite3GetTempRange(pParse, pIn->nSdst);
pDest->nSdst = pIn->nSdst;
}
- sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pIn->nSdst);
+ sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pDest->nSdst);
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
break;
}
@@ -111309,7 +101668,7 @@ static int generateOutputSubroutine(
/* Jump to the end of the loop if the LIMIT is reached.
*/
if( p->iLimit ){
- sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_IfZero, p->iLimit, iBreak, -1);
}
/* Generate the subroutine return
@@ -111417,7 +101776,9 @@ static int multiSelectOrderBy(
SelectDest destA; /* Destination for coroutine A */
SelectDest destB; /* Destination for coroutine B */
int regAddrA; /* Address register for select-A coroutine */
+ int regEofA; /* Flag to indicate when select-A is complete */
int regAddrB; /* Address register for select-B coroutine */
+ int regEofB; /* Flag to indicate when select-B is complete */
int addrSelectA; /* Address of the select-A coroutine */
int addrSelectB; /* Address of the select-B coroutine */
int regOutA; /* Address register for the output-A subroutine */
@@ -111425,7 +101786,6 @@ static int multiSelectOrderBy(
int addrOutA; /* Address of the output-A subroutine */
int addrOutB = 0; /* Address of the output-B subroutine */
int addrEofA; /* Address of the select-A-exhausted subroutine */
- int addrEofA_noB; /* Alternate addrEofA if B is uninitialized */
int addrEofB; /* Address of the select-B-exhausted subroutine */
int addrAltB; /* Address of the A<B subroutine */
int addrAeqB; /* Address of the A==B subroutine */
@@ -111437,7 +101797,7 @@ static int multiSelectOrderBy(
int savedOffset; /* Saved value of p->iOffset */
int labelCmpr; /* Label for the start of the merge algorithm */
int labelEnd; /* Label for the end of the overall SELECT stmt */
- int addr1; /* Jump instructions that get retargetted */
+ int j1; /* Jump instructions that get retargetted */
int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */
KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */
KeyInfo *pKeyMerge; /* Comparison information for merging rows */
@@ -111501,11 +101861,28 @@ static int multiSelectOrderBy(
if( aPermute ){
struct ExprList_item *pItem;
for(i=0, pItem=pOrderBy->a; i<nOrderBy; i++, pItem++){
- assert( pItem->u.x.iOrderByCol>0 );
- assert( pItem->u.x.iOrderByCol<=p->pEList->nExpr );
+ assert( pItem->u.x.iOrderByCol>0
+ && pItem->u.x.iOrderByCol<=p->pEList->nExpr );
aPermute[i] = pItem->u.x.iOrderByCol - 1;
}
- pKeyMerge = multiSelectOrderByKeyInfo(pParse, p, 1);
+ pKeyMerge = sqlite3KeyInfoAlloc(db, nOrderBy, 1);
+ if( pKeyMerge ){
+ for(i=0; i<nOrderBy; i++){
+ CollSeq *pColl;
+ Expr *pTerm = pOrderBy->a[i].pExpr;
+ if( pTerm->flags & EP_Collate ){
+ pColl = sqlite3ExprCollSeq(pParse, pTerm);
+ }else{
+ pColl = multiSelectCollSeq(pParse, p, aPermute[i]);
+ if( pColl==0 ) pColl = db->pDfltColl;
+ pOrderBy->a[i].pExpr =
+ sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName);
+ }
+ assert( sqlite3KeyInfoIsWriteable(pKeyMerge) );
+ pKeyMerge->aColl[i] = pColl;
+ pKeyMerge->aSortOrder[i] = pOrderBy->a[i].sortOrder;
+ }
+ }
}else{
pKeyMerge = 0;
}
@@ -111540,7 +101917,6 @@ static int multiSelectOrderBy(
/* Separate the left and the right query from one another
*/
p->pPrior = 0;
- pPrior->pNext = 0;
sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER");
if( pPrior->pPrior==0 ){
sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER");
@@ -111563,30 +101939,37 @@ static int multiSelectOrderBy(
p->pOffset = 0;
regAddrA = ++pParse->nMem;
+ regEofA = ++pParse->nMem;
regAddrB = ++pParse->nMem;
+ regEofB = ++pParse->nMem;
regOutA = ++pParse->nMem;
regOutB = ++pParse->nMem;
sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA);
sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB);
+ /* Jump past the various subroutines and coroutines to the main
+ ** merge loop
+ */
+ j1 = sqlite3VdbeAddOp0(v, OP_Goto);
+ addrSelectA = sqlite3VdbeCurrentAddr(v);
+
+
/* Generate a coroutine to evaluate the SELECT statement to the
** left of the compound operator - the "A" select.
*/
- addrSelectA = sqlite3VdbeCurrentAddr(v) + 1;
- addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrA, 0, addrSelectA);
- VdbeComment((v, "left SELECT"));
+ VdbeNoopComment((v, "Begin coroutine for left SELECT"));
pPrior->iLimit = regLimitA;
explainSetInteger(iSub1, pParse->iNextSelectId);
sqlite3Select(pParse, pPrior, &destA);
- sqlite3VdbeAddOp1(v, OP_EndCoroutine, regAddrA);
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, regEofA);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrA);
+ VdbeNoopComment((v, "End coroutine for left SELECT"));
/* Generate a coroutine to evaluate the SELECT statement on
** the right - the "B" select
*/
- addrSelectB = sqlite3VdbeCurrentAddr(v) + 1;
- addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrB, 0, addrSelectB);
- VdbeComment((v, "right SELECT"));
+ addrSelectB = sqlite3VdbeCurrentAddr(v);
+ VdbeNoopComment((v, "Begin coroutine for right SELECT"));
savedLimit = p->iLimit;
savedOffset = p->iOffset;
p->iLimit = regLimitB;
@@ -111595,7 +101978,9 @@ static int multiSelectOrderBy(
sqlite3Select(pParse, p, &destB);
p->iLimit = savedLimit;
p->iOffset = savedOffset;
- sqlite3VdbeAddOp1(v, OP_EndCoroutine, regAddrB);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, regEofB);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrB);
+ VdbeNoopComment((v, "End coroutine for right SELECT"));
/* Generate a subroutine that outputs the current row of the A
** select as the next output row of the compound select.
@@ -111619,14 +102004,14 @@ static int multiSelectOrderBy(
/* Generate a subroutine to run when the results from select A
** are exhausted and only data in select B remains.
*/
+ VdbeNoopComment((v, "eof-A subroutine"));
if( op==TK_EXCEPT || op==TK_INTERSECT ){
- addrEofA_noB = addrEofA = labelEnd;
+ addrEofA = sqlite3VdbeAddOp2(v, OP_Goto, 0, labelEnd);
}else{
- VdbeNoopComment((v, "eof-A subroutine"));
- addrEofA = sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
- addrEofA_noB = sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, labelEnd);
- VdbeCoverage(v);
- sqlite3VdbeGoto(v, addrEofA);
+ addrEofA = sqlite3VdbeAddOp2(v, OP_If, regEofB, labelEnd);
+ sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrB);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofA);
p->nSelectRow += pPrior->nSelectRow;
}
@@ -111638,17 +102023,19 @@ static int multiSelectOrderBy(
if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow;
}else{
VdbeNoopComment((v, "eof-B subroutine"));
- addrEofB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
- sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, labelEnd); VdbeCoverage(v);
- sqlite3VdbeGoto(v, addrEofB);
+ addrEofB = sqlite3VdbeAddOp2(v, OP_If, regEofA, labelEnd);
+ sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrA);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofB);
}
/* Generate code to handle the case of A<B
*/
VdbeNoopComment((v, "A-lt-B subroutine"));
addrAltB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA);
- sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA); VdbeCoverage(v);
- sqlite3VdbeGoto(v, labelCmpr);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrA);
+ sqlite3VdbeAddOp2(v, OP_If, regEofA, addrEofA);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, labelCmpr);
/* Generate code to handle the case of A==B
*/
@@ -111660,8 +102047,9 @@ static int multiSelectOrderBy(
}else{
VdbeNoopComment((v, "A-eq-B subroutine"));
addrAeqB =
- sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA); VdbeCoverage(v);
- sqlite3VdbeGoto(v, labelCmpr);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrA);
+ sqlite3VdbeAddOp2(v, OP_If, regEofA, addrEofA);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, labelCmpr);
}
/* Generate code to handle the case of A>B
@@ -111671,14 +102059,19 @@ static int multiSelectOrderBy(
if( op==TK_ALL || op==TK_UNION ){
sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB);
}
- sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v);
- sqlite3VdbeGoto(v, labelCmpr);
+ sqlite3VdbeAddOp1(v, OP_Yield, regAddrB);
+ sqlite3VdbeAddOp2(v, OP_If, regEofB, addrEofB);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, labelCmpr);
/* This code runs once to initialize everything.
*/
- sqlite3VdbeJumpHere(v, addr1);
- sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA_noB); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, j1);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regEofA);
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regEofB);
+ sqlite3VdbeAddOp2(v, OP_Gosub, regAddrA, addrSelectA);
+ sqlite3VdbeAddOp2(v, OP_Gosub, regAddrB, addrSelectB);
+ sqlite3VdbeAddOp2(v, OP_If, regEofA, addrEofA);
+ sqlite3VdbeAddOp2(v, OP_If, regEofB, addrEofB);
/* Implement the main merge loop
*/
@@ -111687,7 +102080,7 @@ static int multiSelectOrderBy(
sqlite3VdbeAddOp4(v, OP_Compare, destA.iSdst, destB.iSdst, nOrderBy,
(char*)pKeyMerge, P4_KEYINFO);
sqlite3VdbeChangeP5(v, OPFLAG_PERMUTE);
- sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB);
/* Jump to the this point in order to terminate the query.
*/
@@ -111707,19 +102100,18 @@ static int multiSelectOrderBy(
sqlite3SelectDelete(db, p->pPrior);
}
p->pPrior = pPrior;
- pPrior->pNext = p;
/*** TBD: Insert subroutine calls to close cursors on incomplete
**** subqueries ****/
explainComposite(pParse, p->op, iSub1, iSub2, 0);
- return pParse->nErr!=0;
+ return SQLITE_OK;
}
#endif
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/* Forward Declarations */
static void substExprList(sqlite3*, ExprList*, int, ExprList*);
-static void substSelect(sqlite3*, Select *, int, ExprList*, int);
+static void substSelect(sqlite3*, Select *, int, ExprList *);
/*
** Scan through the expression pExpr. Replace every reference to
@@ -111756,7 +102148,7 @@ static Expr *substExpr(
pExpr->pLeft = substExpr(db, pExpr->pLeft, iTable, pEList);
pExpr->pRight = substExpr(db, pExpr->pRight, iTable, pEList);
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
- substSelect(db, pExpr->x.pSelect, iTable, pEList, 1);
+ substSelect(db, pExpr->x.pSelect, iTable, pEList);
}else{
substExprList(db, pExpr->x.pList, iTable, pEList);
}
@@ -111779,28 +102171,25 @@ static void substSelect(
sqlite3 *db, /* Report malloc errors here */
Select *p, /* SELECT statement in which to make substitutions */
int iTable, /* Table to be replaced */
- ExprList *pEList, /* Substitute values */
- int doPrior /* Do substitutes on p->pPrior too */
+ ExprList *pEList /* Substitute values */
){
SrcList *pSrc;
struct SrcList_item *pItem;
int i;
if( !p ) return;
- do{
- substExprList(db, p->pEList, iTable, pEList);
- substExprList(db, p->pGroupBy, iTable, pEList);
- substExprList(db, p->pOrderBy, iTable, pEList);
- p->pHaving = substExpr(db, p->pHaving, iTable, pEList);
- p->pWhere = substExpr(db, p->pWhere, iTable, pEList);
- pSrc = p->pSrc;
- assert( pSrc!=0 );
+ substExprList(db, p->pEList, iTable, pEList);
+ substExprList(db, p->pGroupBy, iTable, pEList);
+ substExprList(db, p->pOrderBy, iTable, pEList);
+ p->pHaving = substExpr(db, p->pHaving, iTable, pEList);
+ p->pWhere = substExpr(db, p->pWhere, iTable, pEList);
+ substSelect(db, p->pPrior, iTable, pEList);
+ pSrc = p->pSrc;
+ assert( pSrc ); /* Even for (SELECT 1) we have: pSrc!=0 but pSrc->nSrc==0 */
+ if( ALWAYS(pSrc) ){
for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){
- substSelect(db, pItem->pSelect, iTable, pEList, 1);
- if( pItem->fg.isTabFunc ){
- substExprList(db, pItem->u1.pFuncArg, iTable, pEList);
- }
+ substSelect(db, pItem->pSelect, iTable, pEList);
}
- }while( doPrior && (p = p->pPrior)!=0 );
+ }
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
@@ -111826,7 +102215,7 @@ static void substSelect(
**
** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5
**
-** The code generated for this simplification gives the same result
+** The code generated for this simpification gives the same result
** but only has to scan the data once. And because indices might
** exist on the table t1, a complete scan of the data might be
** avoided.
@@ -111835,10 +102224,7 @@ static void substSelect(
**
** (1) The subquery and the outer query do not both use aggregates.
**
-** (2) The subquery is not an aggregate or (2a) the outer query is not a join
-** and (2b) the outer query does not use subqueries other than the one
-** FROM-clause subquery that is a candidate for flattening. (2b is
-** due to ticket [2f7170d73bf9abf80] from 2015-02-09.)
+** (2) The subquery is not an aggregate or the outer query is not a join.
**
** (3) The subquery is not the right operand of a left outer join
** (Originally ticket #306. Strengthened by ticket #3300)
@@ -111862,10 +102248,8 @@ static void substSelect(
** (9) The subquery does not use LIMIT or the outer query does not use
** aggregates.
**
-** (**) Restriction (10) was removed from the code on 2005-02-05 but we
-** accidently carried the comment forward until 2014-09-15. Original
-** text: "The subquery does not use aggregates or the outer query
-** does not use LIMIT."
+** (10) The subquery does not use aggregates or the outer query does not
+** use LIMIT.
**
** (11) The subquery and the outer query do not both have ORDER BY clauses.
**
@@ -111921,19 +102305,6 @@ static void substSelect(
** (21) The subquery does not use LIMIT or the outer query is not
** DISTINCT. (See ticket [752e1646fc]).
**
-** (22) The subquery is not a recursive CTE.
-**
-** (23) The parent is not a recursive CTE, or the sub-query is not a
-** compound query. This restriction is because transforming the
-** parent to a compound query confuses the code that handles
-** recursive queries in multiSelect().
-**
-** (24) The subquery is not an aggregate that uses the built-in min() or
-** or max() functions. (Without this restriction, a query like:
-** "SELECT x FROM (SELECT max(y), x FROM t1)" would not necessarily
-** return the value X for which Y was maximal.)
-**
-**
** In this routine, the "p" parameter is a pointer to the outer query.
** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query
** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates.
@@ -111952,7 +102323,7 @@ static int flattenSubquery(
int subqueryIsAgg /* True if the subquery uses aggregate functions */
){
const char *zSavedAuthContext = pParse->zAuthContext;
- Select *pParent; /* Current UNION ALL term of the other query */
+ Select *pParent;
Select *pSub; /* The inner query or "subquery" */
Select *pSub1; /* Pointer to the rightmost select in sub-query */
SrcList *pSrc; /* The FROM clause of the outer query */
@@ -111975,27 +102346,18 @@ static int flattenSubquery(
iParent = pSubitem->iCursor;
pSub = pSubitem->pSelect;
assert( pSub!=0 );
- if( subqueryIsAgg ){
- if( isAgg ) return 0; /* Restriction (1) */
- if( pSrc->nSrc>1 ) return 0; /* Restriction (2a) */
- if( (p->pWhere && ExprHasProperty(p->pWhere,EP_Subquery))
- || (sqlite3ExprListFlags(p->pEList) & EP_Subquery)!=0
- || (sqlite3ExprListFlags(p->pOrderBy) & EP_Subquery)!=0
- ){
- return 0; /* Restriction (2b) */
- }
- }
-
+ if( isAgg && subqueryIsAgg ) return 0; /* Restriction (1) */
+ if( subqueryIsAgg && pSrc->nSrc>1 ) return 0; /* Restriction (2) */
pSubSrc = pSub->pSrc;
assert( pSubSrc );
/* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants,
- ** not arbitrary expressions, we allowed some combining of LIMIT and OFFSET
+ ** not arbitrary expresssions, we allowed some combining of LIMIT and OFFSET
** because they could be computed at compile-time. But when LIMIT and OFFSET
** became arbitrary expressions, we were forced to add restrictions (13)
** and (14). */
if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */
if( pSub->pOffset ) return 0; /* Restriction (14) */
- if( (p->selFlags & SF_Compound)!=0 && pSub->pLimit ){
+ if( p->pRightmost && pSub->pLimit ){
return 0; /* Restriction (15) */
}
if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */
@@ -112014,14 +102376,6 @@ static int flattenSubquery(
if( pSub->pLimit && (p->selFlags & SF_Distinct)!=0 ){
return 0; /* Restriction (21) */
}
- testcase( pSub->selFlags & SF_Recursive );
- testcase( pSub->selFlags & SF_MinMaxAgg );
- if( pSub->selFlags & (SF_Recursive|SF_MinMaxAgg) ){
- return 0; /* Restrictions (22) and (24) */
- }
- if( (p->selFlags & SF_Recursive) && pSub->pPrior ){
- return 0; /* Restriction (23) */
- }
/* OBSOLETE COMMENT 1:
** Restriction 3: If the subquery is a join, make sure the subquery is
@@ -112055,7 +102409,7 @@ static int flattenSubquery(
** is fraught with danger. Best to avoid the whole thing. If the
** subquery is the right term of a LEFT JOIN, then do not flatten.
*/
- if( (pSubitem->fg.jointype & JT_OUTER)!=0 ){
+ if( (pSubitem->jointype & JT_OUTER)!=0 ){
return 0;
}
@@ -112075,10 +102429,10 @@ static int flattenSubquery(
testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct );
testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate );
assert( pSub->pSrc!=0 );
- assert( pSub->pEList->nExpr==pSub1->pEList->nExpr );
if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0
|| (pSub1->pPrior && pSub1->op!=TK_ALL)
|| pSub1->pSrc->nSrc<1
+ || pSub->pEList->nExpr!=pSub1->pEList->nExpr
){
return 0;
}
@@ -112095,8 +102449,6 @@ static int flattenSubquery(
}
/***** If we reach this point, flattening is permitted. *****/
- SELECTTRACE(1,pParse,p,("flatten %s.%p from term %d\n",
- pSub->zSelName, pSub, iFrom));
/* Authorize the subquery */
pParse->zAuthContext = pSubitem->zName;
@@ -112149,23 +102501,19 @@ static int flattenSubquery(
p->pLimit = 0;
p->pOffset = 0;
pNew = sqlite3SelectDup(db, p, 0);
- sqlite3SelectSetName(pNew, pSub->zSelName);
p->pOffset = pOffset;
p->pLimit = pLimit;
p->pOrderBy = pOrderBy;
p->pSrc = pSrc;
p->op = TK_ALL;
+ p->pRightmost = 0;
if( pNew==0 ){
- p->pPrior = pPrior;
+ pNew = pPrior;
}else{
pNew->pPrior = pPrior;
- if( pPrior ) pPrior->pNext = pNew;
- pNew->pNext = p;
- p->pPrior = pNew;
- SELECTTRACE(2,pParse,p,
- ("compound-subquery flattener creates %s.%p as peer\n",
- pNew->zSelName, pNew));
+ pNew->pRightmost = 0;
}
+ p->pPrior = pNew;
if( db->mallocFailed ) return 1;
}
@@ -112226,7 +102574,7 @@ static int flattenSubquery(
if( pSrc ){
assert( pParent==p ); /* First time through the loop */
- jointype = pSubitem->fg.jointype;
+ jointype = pSubitem->jointype;
}else{
assert( pParent!=p ); /* 2nd and subsequent times through the loop */
pSrc = pParent->pSrc = sqlite3SrcListAppend(db, 0, 0, 0);
@@ -112247,9 +102595,9 @@ static int flattenSubquery(
**
** The outer query has 3 slots in its FROM clause. One slot of the
** outer query (the middle slot) is used by the subquery. The next
- ** block of code will expand the outer query FROM clause to 4 slots.
- ** The middle slot is expanded to two slots in order to make space
- ** for the two elements in the FROM clause of the subquery.
+ ** block of code will expand the out query to 4 slots. The middle
+ ** slot is expanded to two slots in order to make space for the
+ ** two elements in the FROM clause of the subquery.
*/
if( nSubSrc>1 ){
pParent->pSrc = pSrc = sqlite3SrcListEnlarge(db, pSrc, nSubSrc-1,iFrom+1);
@@ -112266,7 +102614,7 @@ static int flattenSubquery(
pSrc->a[i+iFrom] = pSubSrc->a[i];
memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i]));
}
- pSrc->a[iFrom].fg.jointype = jointype;
+ pSrc->a[iFrom].jointype = jointype;
/* Now begin substituting subquery result set expressions for
** references to the iParent in the outer query.
@@ -112288,39 +102636,36 @@ static int flattenSubquery(
pList->a[i].zName = zName;
}
}
+ substExprList(db, pParent->pEList, iParent, pSub->pEList);
+ if( isAgg ){
+ substExprList(db, pParent->pGroupBy, iParent, pSub->pEList);
+ pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList);
+ }
if( pSub->pOrderBy ){
- /* At this point, any non-zero iOrderByCol values indicate that the
- ** ORDER BY column expression is identical to the iOrderByCol'th
- ** expression returned by SELECT statement pSub. Since these values
- ** do not necessarily correspond to columns in SELECT statement pParent,
- ** zero them before transfering the ORDER BY clause.
- **
- ** Not doing this may cause an error if a subsequent call to this
- ** function attempts to flatten a compound sub-query into pParent
- ** (the only way this can happen is if the compound sub-query is
- ** currently part of pSub->pSrc). See ticket [d11a6e908f]. */
- ExprList *pOrderBy = pSub->pOrderBy;
- for(i=0; i<pOrderBy->nExpr; i++){
- pOrderBy->a[i].u.x.iOrderByCol = 0;
- }
assert( pParent->pOrderBy==0 );
- assert( pSub->pPrior==0 );
- pParent->pOrderBy = pOrderBy;
+ pParent->pOrderBy = pSub->pOrderBy;
pSub->pOrderBy = 0;
+ }else if( pParent->pOrderBy ){
+ substExprList(db, pParent->pOrderBy, iParent, pSub->pEList);
+ }
+ if( pSub->pWhere ){
+ pWhere = sqlite3ExprDup(db, pSub->pWhere, 0);
+ }else{
+ pWhere = 0;
}
- pWhere = sqlite3ExprDup(db, pSub->pWhere, 0);
if( subqueryIsAgg ){
assert( pParent->pHaving==0 );
pParent->pHaving = pParent->pWhere;
pParent->pWhere = pWhere;
+ pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList);
pParent->pHaving = sqlite3ExprAnd(db, pParent->pHaving,
sqlite3ExprDup(db, pSub->pHaving, 0));
assert( pParent->pGroupBy==0 );
pParent->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy, 0);
}else{
+ pParent->pWhere = substExpr(db, pParent->pWhere, iParent, pSub->pEList);
pParent->pWhere = sqlite3ExprAnd(db, pParent->pWhere, pWhere);
}
- substSelect(db, pParent, iParent, pSub->pEList, 0);
/* The flattened query is distinct if either the inner or the
** outer query is distinct.
@@ -112344,88 +102689,10 @@ static int flattenSubquery(
*/
sqlite3SelectDelete(db, pSub1);
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,("After flattening:\n"));
- sqlite3TreeViewSelect(0, p, 0);
- }
-#endif
-
return 1;
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
-
-
-#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
-/*
-** Make copies of relevant WHERE clause terms of the outer query into
-** the WHERE clause of subquery. Example:
-**
-** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1) WHERE x=5 AND y=10;
-**
-** Transformed into:
-**
-** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1 WHERE a=5 AND c-d=10)
-** WHERE x=5 AND y=10;
-**
-** The hope is that the terms added to the inner query will make it more
-** efficient.
-**
-** Do not attempt this optimization if:
-**
-** (1) The inner query is an aggregate. (In that case, we'd really want
-** to copy the outer WHERE-clause terms onto the HAVING clause of the
-** inner query. But they probably won't help there so do not bother.)
-**
-** (2) The inner query is the recursive part of a common table expression.
-**
-** (3) The inner query has a LIMIT clause (since the changes to the WHERE
-** close would change the meaning of the LIMIT).
-**
-** (4) The inner query is the right operand of a LEFT JOIN. (The caller
-** enforces this restriction since this routine does not have enough
-** information to know.)
-**
-** (5) The WHERE clause expression originates in the ON or USING clause
-** of a LEFT JOIN.
-**
-** Return 0 if no changes are made and non-zero if one or more WHERE clause
-** terms are duplicated into the subquery.
-*/
-static int pushDownWhereTerms(
- sqlite3 *db, /* The database connection (for malloc()) */
- Select *pSubq, /* The subquery whose WHERE clause is to be augmented */
- Expr *pWhere, /* The WHERE clause of the outer query */
- int iCursor /* Cursor number of the subquery */
-){
- Expr *pNew;
- int nChng = 0;
- if( pWhere==0 ) return 0;
- if( (pSubq->selFlags & (SF_Aggregate|SF_Recursive))!=0 ){
- return 0; /* restrictions (1) and (2) */
- }
- if( pSubq->pLimit!=0 ){
- return 0; /* restriction (3) */
- }
- while( pWhere->op==TK_AND ){
- nChng += pushDownWhereTerms(db, pSubq, pWhere->pRight, iCursor);
- pWhere = pWhere->pLeft;
- }
- if( ExprHasProperty(pWhere,EP_FromJoin) ) return 0; /* restriction 5 */
- if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){
- nChng++;
- while( pSubq ){
- pNew = sqlite3ExprDup(db, pWhere, 0);
- pNew = substExpr(db, pNew, iCursor, pSubq->pEList);
- pSubq->pWhere = sqlite3ExprAnd(db, pSubq->pWhere, pNew);
- pSubq = pSubq->pPrior;
- }
- }
- return nChng;
-}
-#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
-
/*
** Based on the contents of the AggInfo structure indicated by the first
** argument, this function checks if the following are true:
@@ -112468,7 +102735,7 @@ static u8 minMaxQuery(AggInfo *pAggInfo, ExprList **ppMinMax){
/*
** The select statement passed as the first argument is an aggregate query.
-** The second argument is the associated aggregate-info object. This
+** The second argment is the associated aggregate-info object. This
** function tests if the SELECT is of the form:
**
** SELECT count(*) FROM <tbl>
@@ -112509,20 +102776,20 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){
** pFrom->pIndex and return SQLITE_OK.
*/
SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){
- if( pFrom->pTab && pFrom->fg.isIndexedBy ){
+ if( pFrom->pTab && pFrom->zIndex ){
Table *pTab = pFrom->pTab;
- char *zIndexedBy = pFrom->u1.zIndexedBy;
+ char *zIndex = pFrom->zIndex;
Index *pIdx;
for(pIdx=pTab->pIndex;
- pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy);
+ pIdx && sqlite3StrICmp(pIdx->zName, zIndex);
pIdx=pIdx->pNext
);
if( !pIdx ){
- sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0);
+ sqlite3ErrorMsg(pParse, "no such index: %s", zIndex, 0);
pParse->checkSchema = 1;
return SQLITE_ERROR;
}
- pFrom->pIBIndex = pIdx;
+ pFrom->pIndex = pIdx;
}
return SQLITE_OK;
}
@@ -112585,210 +102852,11 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){
pNew->pHaving = 0;
pNew->pOrderBy = 0;
p->pPrior = 0;
- p->pNext = 0;
- p->pWith = 0;
- p->selFlags &= ~SF_Compound;
- assert( (p->selFlags & SF_Converted)==0 );
- p->selFlags |= SF_Converted;
- assert( pNew->pPrior!=0 );
- pNew->pPrior->pNext = pNew;
pNew->pLimit = 0;
pNew->pOffset = 0;
return WRC_Continue;
}
-#ifndef SQLITE_OMIT_CTE
-/*
-** Argument pWith (which may be NULL) points to a linked list of nested
-** WITH contexts, from inner to outermost. If the table identified by
-** FROM clause element pItem is really a common-table-expression (CTE)
-** then return a pointer to the CTE definition for that table. Otherwise
-** return NULL.
-**
-** If a non-NULL value is returned, set *ppContext to point to the With
-** object that the returned CTE belongs to.
-*/
-static struct Cte *searchWith(
- With *pWith, /* Current outermost WITH clause */
- struct SrcList_item *pItem, /* FROM clause element to resolve */
- With **ppContext /* OUT: WITH clause return value belongs to */
-){
- const char *zName;
- if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){
- With *p;
- for(p=pWith; p; p=p->pOuter){
- int i;
- for(i=0; i<p->nCte; i++){
- if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){
- *ppContext = p;
- return &p->a[i];
- }
- }
- }
- }
- return 0;
-}
-
-/* The code generator maintains a stack of active WITH clauses
-** with the inner-most WITH clause being at the top of the stack.
-**
-** This routine pushes the WITH clause passed as the second argument
-** onto the top of the stack. If argument bFree is true, then this
-** WITH clause will never be popped from the stack. In this case it
-** should be freed along with the Parse object. In other cases, when
-** bFree==0, the With object will be freed along with the SELECT
-** statement with which it is associated.
-*/
-SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){
- assert( bFree==0 || pParse->pWith==0 );
- if( pWith ){
- pWith->pOuter = pParse->pWith;
- pParse->pWith = pWith;
- pParse->bFreeWith = bFree;
- }
-}
-
-/*
-** This function checks if argument pFrom refers to a CTE declared by
-** a WITH clause on the stack currently maintained by the parser. And,
-** if currently processing a CTE expression, if it is a recursive
-** reference to the current CTE.
-**
-** If pFrom falls into either of the two categories above, pFrom->pTab
-** and other fields are populated accordingly. The caller should check
-** (pFrom->pTab!=0) to determine whether or not a successful match
-** was found.
-**
-** Whether or not a match is found, SQLITE_OK is returned if no error
-** occurs. If an error does occur, an error message is stored in the
-** parser and some error code other than SQLITE_OK returned.
-*/
-static int withExpand(
- Walker *pWalker,
- struct SrcList_item *pFrom
-){
- Parse *pParse = pWalker->pParse;
- sqlite3 *db = pParse->db;
- struct Cte *pCte; /* Matched CTE (or NULL if no match) */
- With *pWith; /* WITH clause that pCte belongs to */
-
- assert( pFrom->pTab==0 );
-
- pCte = searchWith(pParse->pWith, pFrom, &pWith);
- if( pCte ){
- Table *pTab;
- ExprList *pEList;
- Select *pSel;
- Select *pLeft; /* Left-most SELECT statement */
- int bMayRecursive; /* True if compound joined by UNION [ALL] */
- With *pSavedWith; /* Initial value of pParse->pWith */
-
- /* If pCte->zCteErr is non-NULL at this point, then this is an illegal
- ** recursive reference to CTE pCte. Leave an error in pParse and return
- ** early. If pCte->zCteErr is NULL, then this is not a recursive reference.
- ** In this case, proceed. */
- if( pCte->zCteErr ){
- sqlite3ErrorMsg(pParse, pCte->zCteErr, pCte->zName);
- return SQLITE_ERROR;
- }
-
- assert( pFrom->pTab==0 );
- pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
- if( pTab==0 ) return WRC_Abort;
- pTab->nRef = 1;
- pTab->zName = sqlite3DbStrDup(db, pCte->zName);
- pTab->iPKey = -1;
- pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
- pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid;
- pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0);
- if( db->mallocFailed ) return SQLITE_NOMEM;
- assert( pFrom->pSelect );
-
- /* Check if this is a recursive CTE. */
- pSel = pFrom->pSelect;
- bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION );
- if( bMayRecursive ){
- int i;
- SrcList *pSrc = pFrom->pSelect->pSrc;
- for(i=0; i<pSrc->nSrc; i++){
- struct SrcList_item *pItem = &pSrc->a[i];
- if( pItem->zDatabase==0
- && pItem->zName!=0
- && 0==sqlite3StrICmp(pItem->zName, pCte->zName)
- ){
- pItem->pTab = pTab;
- pItem->fg.isRecursive = 1;
- pTab->nRef++;
- pSel->selFlags |= SF_Recursive;
- }
- }
- }
-
- /* Only one recursive reference is permitted. */
- if( pTab->nRef>2 ){
- sqlite3ErrorMsg(
- pParse, "multiple references to recursive table: %s", pCte->zName
- );
- return SQLITE_ERROR;
- }
- assert( pTab->nRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nRef==2 ));
-
- pCte->zCteErr = "circular reference: %s";
- pSavedWith = pParse->pWith;
- pParse->pWith = pWith;
- sqlite3WalkSelect(pWalker, bMayRecursive ? pSel->pPrior : pSel);
-
- for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior);
- pEList = pLeft->pEList;
- if( pCte->pCols ){
- if( pEList && pEList->nExpr!=pCte->pCols->nExpr ){
- sqlite3ErrorMsg(pParse, "table %s has %d values for %d columns",
- pCte->zName, pEList->nExpr, pCte->pCols->nExpr
- );
- pParse->pWith = pSavedWith;
- return SQLITE_ERROR;
- }
- pEList = pCte->pCols;
- }
-
- sqlite3ColumnsFromExprList(pParse, pEList, &pTab->nCol, &pTab->aCol);
- if( bMayRecursive ){
- if( pSel->selFlags & SF_Recursive ){
- pCte->zCteErr = "multiple recursive references: %s";
- }else{
- pCte->zCteErr = "recursive reference in a subquery: %s";
- }
- sqlite3WalkSelect(pWalker, pSel);
- }
- pCte->zCteErr = 0;
- pParse->pWith = pSavedWith;
- }
-
- return SQLITE_OK;
-}
-#endif
-
-#ifndef SQLITE_OMIT_CTE
-/*
-** If the SELECT passed as the second argument has an associated WITH
-** clause, pop it from the stack stored as part of the Parse object.
-**
-** This function is used as the xSelectCallback2() callback by
-** sqlite3SelectExpand() when walking a SELECT tree to resolve table
-** names and other FROM clause elements.
-*/
-static void selectPopWith(Walker *pWalker, Select *p){
- Parse *pParse = pWalker->pParse;
- With *pWith = findRightmost(p)->pWith;
- if( pWith!=0 ){
- assert( pParse->pWith==pWith );
- pParse->pWith = pWith->pOuter;
- }
-}
-#else
-#define selectPopWith 0
-#endif
-
/*
** This routine is a Walker callback for "expanding" a SELECT statement.
** "Expanding" means to do the following:
@@ -112801,10 +102869,10 @@ static void selectPopWith(Walker *pWalker, Select *p){
** fill pTabList->a[].pSelect with a copy of the SELECT statement
** that implements the view. A copy is made of the view's SELECT
** statement so that we can freely modify or delete that statement
-** without worrying about messing up the persistent representation
+** without worrying about messing up the presistent representation
** of the view.
**
-** (3) Add terms to the WHERE clause to accommodate the NATURAL keyword
+** (3) Add terms to the WHERE clause to accomodate the NATURAL keyword
** on joins and the ON and USING clause of joins.
**
** (4) Scan the list of columns in the result set (pEList) looking
@@ -112832,9 +102900,6 @@ static int selectExpander(Walker *pWalker, Select *p){
}
pTabList = p->pSrc;
pEList = p->pEList;
- if( pWalker->xSelectCallback2==selectPopWith ){
- sqlite3WithPush(pParse, findRightmost(p)->pWith, 0);
- }
/* Make sure cursor numbers have been assigned to all entries in
** the FROM clause of the SELECT statement.
@@ -112847,28 +102912,27 @@ static int selectExpander(Walker *pWalker, Select *p){
*/
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
Table *pTab;
- assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 );
- if( pFrom->fg.isRecursive ) continue;
- assert( pFrom->pTab==0 );
-#ifndef SQLITE_OMIT_CTE
- if( withExpand(pWalker, pFrom) ) return WRC_Abort;
- if( pFrom->pTab ) {} else
-#endif
+ if( pFrom->pTab!=0 ){
+ /* This statement has already been prepared. There is no need
+ ** to go further. */
+ assert( i==0 );
+ return WRC_Prune;
+ }
if( pFrom->zName==0 ){
#ifndef SQLITE_OMIT_SUBQUERY
Select *pSel = pFrom->pSelect;
/* A sub-query in the FROM clause of a SELECT */
assert( pSel!=0 );
assert( pFrom->pTab==0 );
- if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort;
+ sqlite3WalkSelect(pWalker, pSel);
pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
if( pTab==0 ) return WRC_Abort;
pTab->nRef = 1;
pTab->zName = sqlite3MPrintf(db, "sqlite_sq_%p", (void*)pTab);
while( pSel->pPrior ){ pSel = pSel->pPrior; }
- sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
+ selectColumnsFromExprList(pParse, pSel->pEList, &pTab->nCol, &pTab->aCol);
pTab->iPKey = -1;
- pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+ pTab->nRowEst = 1048576;
pTab->tabFlags |= TF_Ephemeral;
#endif
}else{
@@ -112885,19 +102949,11 @@ static int selectExpander(Walker *pWalker, Select *p){
pTab->nRef++;
#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE)
if( pTab->pSelect || IsVirtual(pTab) ){
- i16 nCol;
+ /* We reach here if the named table is a really a view */
if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort;
assert( pFrom->pSelect==0 );
- if( pFrom->fg.isTabFunc && !IsVirtual(pTab) ){
- sqlite3ErrorMsg(pParse, "'%s' is not a function", pTab->zName);
- return WRC_Abort;
- }
pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0);
- sqlite3SelectSetName(pFrom->pSelect, pTab->zName);
- nCol = pTab->nCol;
- pTab->nCol = -1;
sqlite3WalkSelect(pWalker, pFrom->pSelect);
- pTab->nCol = nCol;
}
#endif
}
@@ -112943,6 +102999,13 @@ static int selectExpander(Walker *pWalker, Select *p){
int longNames = (flags & SQLITE_FullColNames)!=0
&& (flags & SQLITE_ShortColNames)==0;
+ /* When processing FROM-clause subqueries, it is always the case
+ ** that full_column_names=OFF and short_column_names=ON. The
+ ** sqlite3ResultSetOfSelect() routine makes it so. */
+ assert( (p->selFlags & SF_NestedFrom)==0
+ || ((flags & SQLITE_FullColNames)==0 &&
+ (flags & SQLITE_ShortColNames)!=0) );
+
for(k=0; k<pEList->nExpr; k++){
pE = a[k].pExpr;
pRight = pE->pRight;
@@ -113010,7 +103073,7 @@ static int selectExpander(Walker *pWalker, Select *p){
tableSeen = 1;
if( i>0 && zTName==0 ){
- if( (pFrom->fg.jointype & JT_NATURAL)!=0
+ if( (pFrom->jointype & JT_NATURAL)!=0
&& tableAndColumnIndex(pTabList, i, zName, 0, 0)
){
/* In a NATURAL join, omit the join columns from the
@@ -113117,9 +103180,6 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
sqlite3WalkSelect(&w, pSelect);
}
w.xSelectCallback = selectExpander;
- if( (pSelect->selFlags & SF_MultiValue)==0 ){
- w.xSelectCallback2 = selectPopWith;
- }
sqlite3WalkSelect(&w, pSelect);
}
@@ -113138,29 +103198,29 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
** at that point because identifiers had not yet been resolved. This
** routine is called after identifier resolution.
*/
-static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
+static int selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
Parse *pParse;
int i;
SrcList *pTabList;
struct SrcList_item *pFrom;
assert( p->selFlags & SF_Resolved );
- assert( (p->selFlags & SF_HasTypeInfo)==0 );
- p->selFlags |= SF_HasTypeInfo;
- pParse = pWalker->pParse;
- pTabList = p->pSrc;
- for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
- Table *pTab = pFrom->pTab;
- assert( pTab!=0 );
- if( (pTab->tabFlags & TF_Ephemeral)!=0 ){
- /* A sub-query in the FROM clause of a SELECT */
- Select *pSel = pFrom->pSelect;
- if( pSel ){
+ if( (p->selFlags & SF_HasTypeInfo)==0 ){
+ p->selFlags |= SF_HasTypeInfo;
+ pParse = pWalker->pParse;
+ pTabList = p->pSrc;
+ for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
+ Table *pTab = pFrom->pTab;
+ if( ALWAYS(pTab!=0) && (pTab->tabFlags & TF_Ephemeral)!=0 ){
+ /* A sub-query in the FROM clause of a SELECT */
+ Select *pSel = pFrom->pSelect;
+ assert( pSel );
while( pSel->pPrior ) pSel = pSel->pPrior;
selectAddColumnTypeAndCollation(pParse, pTab, pSel);
}
}
}
+ return WRC_Continue;
}
#endif
@@ -113176,9 +103236,10 @@ static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){
#ifndef SQLITE_OMIT_SUBQUERY
Walker w;
memset(&w, 0, sizeof(w));
- w.xSelectCallback2 = selectAddSubqueryTypeInfo;
+ w.xSelectCallback = selectAddSubqueryTypeInfo;
w.xExprCallback = exprWalkNoop;
w.pParse = pParse;
+ w.bSelectDepthFirst = 1;
sqlite3WalkSelect(&w, pSelect);
#endif
}
@@ -113225,23 +103286,14 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
Vdbe *v = pParse->pVdbe;
int i;
struct AggInfo_func *pFunc;
- int nReg = pAggInfo->nFunc + pAggInfo->nColumn;
- if( nReg==0 ) return;
-#ifdef SQLITE_DEBUG
- /* Verify that all AggInfo registers are within the range specified by
- ** AggInfo.mnReg..AggInfo.mxReg */
- assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 );
- for(i=0; i<pAggInfo->nColumn; i++){
- assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg
- && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg );
+ if( pAggInfo->nFunc+pAggInfo->nColumn==0 ){
+ return;
}
- for(i=0; i<pAggInfo->nFunc; i++){
- assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg
- && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg );
+ for(i=0; i<pAggInfo->nColumn; i++){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pAggInfo->aCol[i].iMem);
}
-#endif
- sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg);
for(pFunc=pAggInfo->aFunc, i=0; i<pAggInfo->nFunc; i++, pFunc++){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pFunc->iMem);
if( pFunc->iDistinct>=0 ){
Expr *pE = pFunc->pExpr;
assert( !ExprHasProperty(pE, EP_xIsSelect) );
@@ -113250,7 +103302,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
"argument");
pFunc->iDistinct = -1;
}else{
- KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList, 0, 0);
+ KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList);
sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0,
(char*)pKeyInfo, P4_KEYINFO);
}
@@ -113287,6 +103339,7 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){
struct AggInfo_col *pC;
pAggInfo->directMode = 1;
+ sqlite3ExprCacheClear(pParse);
for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
int nArg;
int addrNext = 0;
@@ -113296,15 +103349,14 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){
if( pList ){
nArg = pList->nExpr;
regAgg = sqlite3GetTempRange(pParse, nArg);
- sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP);
+ sqlite3ExprCodeExprList(pParse, pList, regAgg, SQLITE_ECEL_DUP);
}else{
nArg = 0;
regAgg = 0;
}
if( pF->iDistinct>=0 ){
addrNext = sqlite3VdbeMakeLabel(v);
- testcase( nArg==0 ); /* Error condition */
- testcase( nArg>1 ); /* Also an error */
+ assert( nArg==1 );
codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg);
}
if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
@@ -113321,7 +103373,7 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){
if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
}
- sqlite3VdbeAddOp4(v, OP_AggStep0, 0, regAgg, pF->iMem,
+ sqlite3VdbeAddOp4(v, OP_AggStep, 0, regAgg, pF->iMem,
(void*)pF->pFunc, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nArg);
sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg);
@@ -113343,7 +103395,7 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){
** values to an OP_Copy.
*/
if( regHit ){
- addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v);
+ addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit);
}
sqlite3ExprCacheClear(pParse);
for(i=0, pC=pAggInfo->aCol; i<pAggInfo->nAccumulator; i++, pC++){
@@ -113367,11 +103419,10 @@ static void explainSimpleCount(
Index *pIdx /* Index used to optimize scan, or NULL */
){
if( pParse->explain==2 ){
- int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx)));
char *zEqp = sqlite3MPrintf(pParse->db, "SCAN TABLE %s%s%s",
- pTab->zName,
- bCover ? " USING COVERING INDEX " : "",
- bCover ? pIdx->zName : ""
+ pTab->zName,
+ pIdx ? " USING COVERING INDEX " : "",
+ pIdx ? pIdx->zName : ""
);
sqlite3VdbeAddOp4(
pParse->pVdbe, OP_Explain, pParse->iSelectId, 0, 0, zEqp, P4_DYNAMIC
@@ -113385,8 +103436,50 @@ static void explainSimpleCount(
/*
** Generate code for the SELECT statement given in the p argument.
**
-** The results are returned according to the SelectDest structure.
-** See comments in sqliteInt.h for further information.
+** The results are distributed in various ways depending on the
+** contents of the SelectDest structure pointed to by argument pDest
+** as follows:
+**
+** pDest->eDest Result
+** ------------ -------------------------------------------
+** SRT_Output Generate a row of output (using the OP_ResultRow
+** opcode) for each row in the result set.
+**
+** SRT_Mem Only valid if the result is a single column.
+** Store the first column of the first result row
+** in register pDest->iSDParm then abandon the rest
+** of the query. This destination implies "LIMIT 1".
+**
+** SRT_Set The result must be a single column. Store each
+** row of result as the key in table pDest->iSDParm.
+** Apply the affinity pDest->affSdst before storing
+** results. Used to implement "IN (SELECT ...)".
+**
+** SRT_Union Store results as a key in a temporary table
+** identified by pDest->iSDParm.
+**
+** SRT_Except Remove results from the temporary table pDest->iSDParm.
+**
+** SRT_Table Store results in temporary table pDest->iSDParm.
+** This is like SRT_EphemTab except that the table
+** is assumed to already be open.
+**
+** SRT_EphemTab Create an temporary table pDest->iSDParm and store
+** the result there. The cursor is left open after
+** returning. This is like SRT_Table except that
+** this destination uses OP_OpenEphemeral to create
+** the table first.
+**
+** SRT_Coroutine Generate a co-routine that returns a new row of
+** results each time it is invoked. The entry point
+** of the co-routine is stored in register pDest->iSDParm.
+**
+** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result
+** set is not empty.
+**
+** SRT_Discard Throw the results away. This is used by SELECT
+** statements within triggers whose only purpose is
+** the side-effects of functions.
**
** This routine returns the number of errors. If any errors are
** encountered, then an appropriate error message is left in
@@ -113404,14 +103497,15 @@ SQLITE_PRIVATE int sqlite3Select(
WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */
Vdbe *v; /* The virtual machine under construction */
int isAgg; /* True for select lists like "count(*)" */
- ExprList *pEList = 0; /* List of columns to extract. */
+ ExprList *pEList; /* List of columns to extract. */
SrcList *pTabList; /* List of tables to select from */
Expr *pWhere; /* The WHERE clause. May be NULL */
+ ExprList *pOrderBy; /* The ORDER BY clause. May be NULL */
ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */
Expr *pHaving; /* The HAVING clause. May be NULL */
int rc = 1; /* Value to return from this function */
+ int addrSortIndex; /* Address of an OP_OpenEphemeral instruction */
DistinctCtx sDistinct; /* Info on how to code the DISTINCT keyword */
- SortCtx sSort; /* Info on how to code the ORDER BY clause */
AggInfo sAggInfo; /* Information used by aggregate queries */
int iEnd; /* Address of the end of the query */
sqlite3 *db; /* The database connection */
@@ -113427,23 +103521,10 @@ SQLITE_PRIVATE int sqlite3Select(
}
if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
memset(&sAggInfo, 0, sizeof(sAggInfo));
-#if SELECTTRACE_ENABLED
- pParse->nSelectIndent++;
- SELECTTRACE(1,pParse,p, ("begin processing:\n"));
- if( sqlite3SelectTrace & 0x100 ){
- sqlite3TreeViewSelect(0, p, 0);
- }
-#endif
- assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo );
- assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo );
- assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue );
- assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue );
if( IgnorableOrderby(pDest) ){
assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union ||
- pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard ||
- pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo ||
- pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo);
+ pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard);
/* If ORDER BY makes no difference in the output then neither does
** DISTINCT so it can be removed too. */
sqlite3ExprListDelete(db, p->pOrderBy);
@@ -113451,93 +103532,38 @@ SQLITE_PRIVATE int sqlite3Select(
p->selFlags &= ~SF_Distinct;
}
sqlite3SelectPrep(pParse, p, 0);
- memset(&sSort, 0, sizeof(sSort));
- sSort.pOrderBy = p->pOrderBy;
+ pOrderBy = p->pOrderBy;
pTabList = p->pSrc;
+ pEList = p->pEList;
if( pParse->nErr || db->mallocFailed ){
goto select_end;
}
- assert( p->pEList!=0 );
isAgg = (p->selFlags & SF_Aggregate)!=0;
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p, ("after name resolution:\n"));
- sqlite3TreeViewSelect(0, p, 0);
- }
-#endif
+ assert( pEList!=0 );
+ /* Begin generating code.
+ */
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) goto select_end;
/* If writing to memory or generating a set
** only a single column may be output.
*/
#ifndef SQLITE_OMIT_SUBQUERY
- if( checkForMultiColumnSelectError(pParse, pDest, p->pEList->nExpr) ){
+ if( checkForMultiColumnSelectError(pParse, pDest, pEList->nExpr) ){
goto select_end;
}
#endif
- /* Try to flatten subqueries in the FROM clause up into the main query
+ /* Generate code for all sub-queries in the FROM clause
*/
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
for(i=0; !p->pPrior && i<pTabList->nSrc; i++){
struct SrcList_item *pItem = &pTabList->a[i];
+ SelectDest dest;
Select *pSub = pItem->pSelect;
int isAggSub;
- Table *pTab = pItem->pTab;
- if( pSub==0 ) continue;
- /* Catch mismatch in the declared columns of a view and the number of
- ** columns in the SELECT on the RHS */
- if( pTab->nCol!=pSub->pEList->nExpr ){
- sqlite3ErrorMsg(pParse, "expected %d columns for '%s' but got %d",
- pTab->nCol, pTab->zName, pSub->pEList->nExpr);
- goto select_end;
- }
-
- isAggSub = (pSub->selFlags & SF_Aggregate)!=0;
- if( flattenSubquery(pParse, p, i, isAgg, isAggSub) ){
- /* This subquery can be absorbed into its parent. */
- if( isAggSub ){
- isAgg = 1;
- p->selFlags |= SF_Aggregate;
- }
- i = -1;
- }
- pTabList = p->pSrc;
- if( db->mallocFailed ) goto select_end;
- if( !IgnorableOrderby(pDest) ){
- sSort.pOrderBy = p->pOrderBy;
- }
- }
-#endif
-
- /* Get a pointer the VDBE under construction, allocating a new VDBE if one
- ** does not already exist */
- v = sqlite3GetVdbe(pParse);
- if( v==0 ) goto select_end;
-
-#ifndef SQLITE_OMIT_COMPOUND_SELECT
- /* Handle compound SELECT statements using the separate multiSelect()
- ** procedure.
- */
- if( p->pPrior ){
- rc = multiSelect(pParse, p, pDest);
- explainSetInteger(pParse->iSelectId, iRestoreSelectId);
-#if SELECTTRACE_ENABLED
- SELECTTRACE(1,pParse,p,("end compound-select processing\n"));
- pParse->nSelectIndent--;
-#endif
- return rc;
- }
-#endif
-
- /* Generate code for all sub-queries in the FROM clause
- */
-#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
- for(i=0; i<pTabList->nSrc; i++){
- struct SrcList_item *pItem = &pTabList->a[i];
- SelectDest dest;
- Select *pSub = pItem->pSelect;
if( pSub==0 ) continue;
/* Sometimes the code for a subquery will be generated more than
@@ -113547,7 +103573,7 @@ SQLITE_PRIVATE int sqlite3Select(
** is sufficient, though the subroutine to manifest the view does need
** to be invoked again. */
if( pItem->addrFillSub ){
- if( pItem->fg.viaCoroutine==0 ){
+ if( pItem->viaCoroutine==0 ){
sqlite3VdbeAddOp2(v, OP_Gosub, pItem->regReturn, pItem->addrFillSub);
}
continue;
@@ -113562,41 +103588,50 @@ SQLITE_PRIVATE int sqlite3Select(
*/
pParse->nHeight += sqlite3SelectExprHeight(p);
- /* Make copies of constant WHERE-clause terms in the outer query down
- ** inside the subquery. This can help the subquery to run more efficiently.
- */
- if( (pItem->fg.jointype & JT_OUTER)==0
- && pushDownWhereTerms(db, pSub, p->pWhere, pItem->iCursor)
- ){
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,("After WHERE-clause push-down:\n"));
- sqlite3TreeViewSelect(0, p, 0);
+ isAggSub = (pSub->selFlags & SF_Aggregate)!=0;
+ if( flattenSubquery(pParse, p, i, isAgg, isAggSub) ){
+ /* This subquery can be absorbed into its parent. */
+ if( isAggSub ){
+ isAgg = 1;
+ p->selFlags |= SF_Aggregate;
}
-#endif
- }
-
- /* Generate code to implement the subquery
- */
- if( pTabList->nSrc==1
- && (p->selFlags & SF_All)==0
- && OptimizationEnabled(db, SQLITE_SubqCoroutine)
+ i = -1;
+ }else if( pTabList->nSrc==1 && (p->selFlags & SF_Materialize)==0
+ && OptimizationEnabled(db, SQLITE_SubqCoroutine)
){
/* Implement a co-routine that will return a single row of the result
** set on each invocation.
*/
- int addrTop = sqlite3VdbeCurrentAddr(v)+1;
+ int addrTop;
+ int addrEof;
pItem->regReturn = ++pParse->nMem;
- sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop);
- VdbeComment((v, "%s", pItem->pTab->zName));
+ addrEof = ++pParse->nMem;
+ /* Before coding the OP_Goto to jump to the start of the main routine,
+ ** ensure that the jump to the verify-schema routine has already
+ ** been coded. Otherwise, the verify-schema would likely be coded as
+ ** part of the co-routine. If the main routine then accessed the
+ ** database before invoking the co-routine for the first time (for
+ ** example to initialize a LIMIT register from a sub-select), it would
+ ** be doing so without having verified the schema version and obtained
+ ** the required db locks. See ticket d6b36be38. */
+ sqlite3CodeVerifySchema(pParse, -1);
+ sqlite3VdbeAddOp0(v, OP_Goto);
+ addrTop = sqlite3VdbeAddOp1(v, OP_OpenPseudo, pItem->iCursor);
+ sqlite3VdbeChangeP5(v, 1);
+ VdbeComment((v, "coroutine for %s", pItem->pTab->zName));
pItem->addrFillSub = addrTop;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, addrEof);
+ sqlite3VdbeChangeP5(v, 1);
sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn);
explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId);
sqlite3Select(pParse, pSub, &dest);
- pItem->pTab->nRowLogEst = sqlite3LogEst(pSub->nSelectRow);
- pItem->fg.viaCoroutine = 1;
- pItem->regResult = dest.iSdst;
- sqlite3VdbeAddOp1(v, OP_EndCoroutine, pItem->regReturn);
+ pItem->pTab->nRowEst = (unsigned)pSub->nSelectRow;
+ pItem->viaCoroutine = 1;
+ sqlite3VdbeChangeP2(v, addrTop, dest.iSdst);
+ sqlite3VdbeChangeP3(v, addrTop, dest.nSdst);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, addrEof);
+ sqlite3VdbeAddOp1(v, OP_Yield, pItem->regReturn);
+ VdbeComment((v, "end %s", pItem->pTab->zName));
sqlite3VdbeJumpHere(v, addrTop-1);
sqlite3ClearTempRegCache(pParse);
}else{
@@ -113612,45 +103647,76 @@ SQLITE_PRIVATE int sqlite3Select(
pItem->regReturn = ++pParse->nMem;
topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn);
pItem->addrFillSub = topAddr+1;
- if( pItem->fg.isCorrelated==0 ){
+ VdbeNoopComment((v, "materialize %s", pItem->pTab->zName));
+ if( pItem->isCorrelated==0 ){
/* If the subquery is not correlated and if we are not inside of
** a trigger, then we only need to compute the value of the subquery
** once. */
- onceAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v);
- VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName));
- }else{
- VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName));
+ onceAddr = sqlite3CodeOnce(pParse);
}
sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor);
explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId);
sqlite3Select(pParse, pSub, &dest);
- pItem->pTab->nRowLogEst = sqlite3LogEst(pSub->nSelectRow);
+ pItem->pTab->nRowEst = (unsigned)pSub->nSelectRow;
if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr);
retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn);
VdbeComment((v, "end %s", pItem->pTab->zName));
sqlite3VdbeChangeP1(v, topAddr, retAddr);
sqlite3ClearTempRegCache(pParse);
}
- if( db->mallocFailed ) goto select_end;
+ if( /*pParse->nErr ||*/ db->mallocFailed ){
+ goto select_end;
+ }
pParse->nHeight -= sqlite3SelectExprHeight(p);
+ pTabList = p->pSrc;
+ if( !IgnorableOrderby(pDest) ){
+ pOrderBy = p->pOrderBy;
+ }
}
-#endif
-
- /* Various elements of the SELECT copied into local variables for
- ** convenience */
pEList = p->pEList;
+#endif
pWhere = p->pWhere;
pGroupBy = p->pGroupBy;
pHaving = p->pHaving;
sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0;
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x400 ){
- SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n"));
- sqlite3TreeViewSelect(0, p, 0);
+#ifndef SQLITE_OMIT_COMPOUND_SELECT
+ /* If there is are a sequence of queries, do the earlier ones first.
+ */
+ if( p->pPrior ){
+ if( p->pRightmost==0 ){
+ Select *pLoop, *pRight = 0;
+ int cnt = 0;
+ int mxSelect;
+ for(pLoop=p; pLoop; pLoop=pLoop->pPrior, cnt++){
+ pLoop->pRightmost = p;
+ pLoop->pNext = pRight;
+ pRight = pLoop;
+ }
+ mxSelect = db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT];
+ if( mxSelect && cnt>mxSelect ){
+ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT");
+ goto select_end;
+ }
+ }
+ rc = multiSelect(pParse, p, pDest);
+ explainSetInteger(pParse->iSelectId, iRestoreSelectId);
+ return rc;
}
#endif
+ /* If there is both a GROUP BY and an ORDER BY clause and they are
+ ** identical, then disable the ORDER BY clause since the GROUP BY
+ ** will cause elements to come out in the correct order. This is
+ ** an optimization - the correct answer should result regardless.
+ ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER
+ ** to disable this optimization for testing purposes.
+ */
+ if( sqlite3ExprListCompare(p->pGroupBy, pOrderBy, -1)==0
+ && OptimizationEnabled(db, SQLITE_GroupByOrder) ){
+ pOrderBy = 0;
+ }
+
/* If the query is DISTINCT with an ORDER BY but is not an aggregate, and
** if the select-list is the same as the ORDER BY list, then this query
** can be rewritten as a GROUP BY. In other words, this:
@@ -113659,7 +103725,7 @@ SQLITE_PRIVATE int sqlite3Select(
**
** is transformed to:
**
- ** SELECT xyz FROM ... GROUP BY xyz ORDER BY xyz
+ ** SELECT xyz FROM ... GROUP BY xyz
**
** The second form is preferred as a single index (or temp-table) may be
** used for both the ORDER BY and DISTINCT processing. As originally
@@ -113667,35 +103733,35 @@ SQLITE_PRIVATE int sqlite3Select(
** BY and DISTINCT, and an index or separate temp-table for the other.
*/
if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct
- && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0
+ && sqlite3ExprListCompare(pOrderBy, p->pEList, -1)==0
){
p->selFlags &= ~SF_Distinct;
- pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0);
+ p->pGroupBy = sqlite3ExprListDup(db, p->pEList, 0);
+ pGroupBy = p->pGroupBy;
+ pOrderBy = 0;
/* Notice that even thought SF_Distinct has been cleared from p->selFlags,
** the sDistinct.isTnct is still set. Hence, isTnct represents the
** original setting of the SF_Distinct flag, not the current setting */
assert( sDistinct.isTnct );
}
- /* If there is an ORDER BY clause, then create an ephemeral index to
- ** do the sorting. But this sorting ephemeral index might end up
- ** being unused if the data can be extracted in pre-sorted order.
- ** If that is the case, then the OP_OpenEphemeral instruction will be
- ** changed to an OP_Noop once we figure out that the sorting index is
- ** not needed. The sSort.addrSortIndex variable is used to facilitate
- ** that change.
+ /* If there is an ORDER BY clause, then this sorting
+ ** index might end up being unused if the data can be
+ ** extracted in pre-sorted order. If that is the case, then the
+ ** OP_OpenEphemeral instruction will be changed to an OP_Noop once
+ ** we figure out that the sorting index is not needed. The addrSortIndex
+ ** variable is used to facilitate that change.
*/
- if( sSort.pOrderBy ){
+ if( pOrderBy ){
KeyInfo *pKeyInfo;
- pKeyInfo = keyInfoFromExprList(pParse, sSort.pOrderBy, 0, pEList->nExpr);
- sSort.iECursor = pParse->nTab++;
- sSort.addrSortIndex =
+ pKeyInfo = keyInfoFromExprList(pParse, pOrderBy);
+ pOrderBy->iECursor = pParse->nTab++;
+ p->addrOpenEphm[2] = addrSortIndex =
sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
- sSort.iECursor, sSort.pOrderBy->nExpr+1+pEList->nExpr, 0,
- (char*)pKeyInfo, P4_KEYINFO
- );
+ pOrderBy->iECursor, pOrderBy->nExpr+2, 0,
+ (char*)pKeyInfo, P4_KEYINFO);
}else{
- sSort.addrSortIndex = -1;
+ addrSortIndex = -1;
}
/* If the output is destined for a temporary table, open that table.
@@ -113709,19 +103775,19 @@ SQLITE_PRIVATE int sqlite3Select(
iEnd = sqlite3VdbeMakeLabel(v);
p->nSelectRow = LARGEST_INT64;
computeLimitRegisters(pParse, p, iEnd);
- if( p->iLimit==0 && sSort.addrSortIndex>=0 ){
- sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen);
- sSort.sortFlags |= SORTFLAG_UseSorter;
+ if( p->iLimit==0 && addrSortIndex>=0 ){
+ sqlite3VdbeGetOp(v, addrSortIndex)->opcode = OP_SorterOpen;
+ p->selFlags |= SF_UseSorter;
}
- /* Open an ephemeral index to use for the distinct set.
+ /* Open a virtual index to use for the distinct set.
*/
if( p->selFlags & SF_Distinct ){
sDistinct.tabTnct = pParse->nTab++;
sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
- sDistinct.tabTnct, 0, 0,
- (char*)keyInfoFromExprList(pParse, p->pEList,0,0),
- P4_KEYINFO);
+ sDistinct.tabTnct, 0, 0,
+ (char*)keyInfoFromExprList(pParse, p->pEList),
+ P4_KEYINFO);
sqlite3VdbeChangeP5(v, BTREE_UNORDERED);
sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED;
}else{
@@ -113733,8 +103799,8 @@ SQLITE_PRIVATE int sqlite3Select(
u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0);
/* Begin the database scan. */
- pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy,
- p->pEList, wctrlFlags, 0);
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pOrderBy, p->pEList,
+ wctrlFlags, 0);
if( pWInfo==0 ) goto select_end;
if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){
p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo);
@@ -113742,23 +103808,19 @@ SQLITE_PRIVATE int sqlite3Select(
if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){
sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo);
}
- if( sSort.pOrderBy ){
- sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo);
- if( sSort.nOBSat==sSort.pOrderBy->nExpr ){
- sSort.pOrderBy = 0;
- }
- }
+ if( pOrderBy && sqlite3WhereIsOrdered(pWInfo) ) pOrderBy = 0;
/* If sorting index that was created by a prior OP_OpenEphemeral
** instruction ended up not being needed, then change the OP_OpenEphemeral
** into an OP_Noop.
*/
- if( sSort.addrSortIndex>=0 && sSort.pOrderBy==0 ){
- sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
+ if( addrSortIndex>=0 && pOrderBy==0 ){
+ sqlite3VdbeChangeToNoop(v, addrSortIndex);
+ p->addrOpenEphm[2] = -1;
}
/* Use the standard inner loop. */
- selectInnerLoop(pParse, p, pEList, -1, &sSort, &sDistinct, pDest,
+ selectInnerLoop(pParse, p, pEList, 0, 0, pOrderBy, &sDistinct, pDest,
sqlite3WhereContinueLabel(pWInfo),
sqlite3WhereBreakLabel(pWInfo));
@@ -113779,7 +103841,6 @@ SQLITE_PRIVATE int sqlite3Select(
int addrEnd; /* End of processing for this SELECT */
int sortPTab = 0; /* Pseudotable used to decode sorting results */
int sortOut = 0; /* Output register from the sorter */
- int orderByGrp = 0; /* True if the GROUP BY and ORDER BY are the same */
/* Remove any and all aliases between the result set and the
** GROUP BY clause.
@@ -113799,17 +103860,6 @@ SQLITE_PRIVATE int sqlite3Select(
p->nSelectRow = 1;
}
- /* If there is both a GROUP BY and an ORDER BY clause and they are
- ** identical, then it may be possible to disable the ORDER BY clause
- ** on the grounds that the GROUP BY will cause elements to come out
- ** in the correct order. It also may not - the GROUP BY might use a
- ** database index that causes rows to be grouped together as required
- ** but not actually sorted. Either way, record the fact that the
- ** ORDER BY and GROUP BY clauses are the same by setting the orderByGrp
- ** variable. */
- if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){
- orderByGrp = 1;
- }
/* Create a label to jump to when we want to abort the query */
addrEnd = sqlite3VdbeMakeLabel(v);
@@ -113822,11 +103872,10 @@ SQLITE_PRIVATE int sqlite3Select(
sNC.pParse = pParse;
sNC.pSrcList = pTabList;
sNC.pAggInfo = &sAggInfo;
- sAggInfo.mnReg = pParse->nMem+1;
- sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0;
+ sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr+1 : 0;
sAggInfo.pGroupBy = pGroupBy;
sqlite3ExprAnalyzeAggList(&sNC, pEList);
- sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy);
+ sqlite3ExprAnalyzeAggList(&sNC, pOrderBy);
if( pHaving ){
sqlite3ExprAnalyzeAggregates(&sNC, pHaving);
}
@@ -113837,7 +103886,6 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3ExprAnalyzeAggList(&sNC, sAggInfo.aFunc[i].pExpr->x.pList);
sNC.ncFlags &= ~NC_InAggFunc;
}
- sAggInfo.mxReg = pParse->nMem;
if( db->mallocFailed ) goto select_end;
/* Processing for aggregates with GROUP BY is very different and
@@ -113845,7 +103893,7 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( pGroupBy ){
KeyInfo *pKeyInfo; /* Keying information for the group by clause */
- int addr1; /* A-vs-B comparision jump */
+ int j1; /* A-vs-B comparision jump */
int addrOutputRow; /* Start of subroutine that outputs a result row */
int regOutputRow; /* Return address register for output subroutine */
int addrSetAbort; /* Set the abort flag and return */
@@ -113860,7 +103908,7 @@ SQLITE_PRIVATE int sqlite3Select(
** will be converted into a Noop.
*/
sAggInfo.sortingIdx = pParse->nTab++;
- pKeyInfo = keyInfoFromExprList(pParse, pGroupBy, 0, sAggInfo.nColumn);
+ pKeyInfo = keyInfoFromExprList(pParse, pGroupBy);
addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen,
sAggInfo.sortingIdx, sAggInfo.nSortingColumn,
0, (char*)pKeyInfo, P4_KEYINFO);
@@ -113889,11 +103937,10 @@ SQLITE_PRIVATE int sqlite3Select(
** in the right order to begin with.
*/
sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
- pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0,
- WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0
- );
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0,
+ WHERE_GROUPBY, 0);
if( pWInfo==0 ) goto select_end;
- if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){
+ if( sqlite3WhereIsOrdered(pWInfo) ){
/* The optimizer is able to deliver rows in group by order so
** we do not have to sort. The OP_OpenEphemeral table will be
** cancelled later because we still need to use the pKeyInfo
@@ -113916,8 +103963,8 @@ SQLITE_PRIVATE int sqlite3Select(
groupBySort = 1;
nGroupBy = pGroupBy->nExpr;
- nCol = nGroupBy;
- j = nGroupBy;
+ nCol = nGroupBy + 1;
+ j = nGroupBy+1;
for(i=0; i<sAggInfo.nColumn; i++){
if( sAggInfo.aCol[i].iSorterColumn>=j ){
nCol++;
@@ -113926,8 +103973,9 @@ SQLITE_PRIVATE int sqlite3Select(
}
regBase = sqlite3GetTempRange(pParse, nCol);
sqlite3ExprCacheClear(pParse);
- sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0);
- j = nGroupBy;
+ sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0);
+ sqlite3VdbeAddOp2(v, OP_Sequence, sAggInfo.sortingIdx,regBase+nGroupBy);
+ j = nGroupBy+1;
for(i=0; i<sAggInfo.nColumn; i++){
struct AggInfo_col *pCol = &sAggInfo.aCol[i];
if( pCol->iSorterColumn>=j ){
@@ -113952,24 +104000,9 @@ SQLITE_PRIVATE int sqlite3Select(
sortOut = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol);
sqlite3VdbeAddOp2(v, OP_SorterSort, sAggInfo.sortingIdx, addrEnd);
- VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v);
+ VdbeComment((v, "GROUP BY sort"));
sAggInfo.useSortingIdx = 1;
sqlite3ExprCacheClear(pParse);
-
- }
-
- /* If the index or temporary table used by the GROUP BY sort
- ** will naturally deliver rows in the order required by the ORDER BY
- ** clause, cancel the ephemeral table open coded earlier.
- **
- ** This is an optimization - the correct answer should result regardless.
- ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER to
- ** disable this optimization for testing purposes. */
- if( orderByGrp && OptimizationEnabled(db, SQLITE_GroupByOrder)
- && (groupBySort || sqlite3WhereIsSorted(pWInfo))
- ){
- sSort.pOrderBy = 0;
- sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
}
/* Evaluate the current GROUP BY terms and store in b0, b1, b2...
@@ -113980,12 +104013,12 @@ SQLITE_PRIVATE int sqlite3Select(
addrTopOfLoop = sqlite3VdbeCurrentAddr(v);
sqlite3ExprCacheClear(pParse);
if( groupBySort ){
- sqlite3VdbeAddOp3(v, OP_SorterData, sAggInfo.sortingIdx,
- sortOut, sortPTab);
+ sqlite3VdbeAddOp2(v, OP_SorterData, sAggInfo.sortingIdx, sortOut);
}
for(j=0; j<pGroupBy->nExpr; j++){
if( groupBySort ){
sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j);
+ if( j==0 ) sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE);
}else{
sAggInfo.directMode = 1;
sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j);
@@ -113993,8 +104026,8 @@ SQLITE_PRIVATE int sqlite3Select(
}
sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr,
(char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO);
- addr1 = sqlite3VdbeCurrentAddr(v);
- sqlite3VdbeAddOp3(v, OP_Jump, addr1+1, 0, addr1+1); VdbeCoverage(v);
+ j1 = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp3(v, OP_Jump, j1+1, 0, j1+1);
/* Generate code that runs whenever the GROUP BY changes.
** Changes in the GROUP BY are detected by the previous code
@@ -114008,7 +104041,7 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr);
sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow);
VdbeComment((v, "output one row"));
- sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd);
VdbeComment((v, "check abort flag"));
sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
VdbeComment((v, "reset accumulator"));
@@ -114016,7 +104049,7 @@ SQLITE_PRIVATE int sqlite3Select(
/* Update the aggregate accumulators based on the content of
** the current row
*/
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeJumpHere(v, j1);
updateAccumulator(pParse, &sAggInfo);
sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag);
VdbeComment((v, "indicate data in accumulator"));
@@ -114025,7 +104058,6 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( groupBySort ){
sqlite3VdbeAddOp2(v, OP_SorterNext, sAggInfo.sortingIdx, addrTopOfLoop);
- VdbeCoverage(v);
}else{
sqlite3WhereEnd(pWInfo);
sqlite3VdbeChangeToNoop(v, addrSortingIdx);
@@ -114038,7 +104070,7 @@ SQLITE_PRIVATE int sqlite3Select(
/* Jump over the subroutines
*/
- sqlite3VdbeGoto(v, addrEnd);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEnd);
/* Generate a subroutine that outputs a single row of the result
** set. This subroutine first looks at the iUseFlag. If iUseFlag
@@ -114054,12 +104086,11 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3VdbeResolveLabel(v, addrOutputRow);
addrOutputRow = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2);
- VdbeCoverage(v);
VdbeComment((v, "Groupby result generator entry point"));
sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
finalizeAggFunctions(pParse, &sAggInfo);
sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL);
- selectInnerLoop(pParse, p, p->pEList, -1, &sSort,
+ selectInnerLoop(pParse, p, p->pEList, 0, 0, pOrderBy,
&sDistinct, pDest,
addrOutputRow+1, addrSetAbort);
sqlite3VdbeAddOp1(v, OP_Return, regOutputRow);
@@ -114191,8 +104222,8 @@ SQLITE_PRIVATE int sqlite3Select(
}
updateAccumulator(pParse, &sAggInfo);
assert( pMinMax==0 || pMinMax->nExpr==1 );
- if( sqlite3WhereIsOrdered(pWInfo)>0 ){
- sqlite3VdbeGoto(v, sqlite3WhereBreakLabel(pWInfo));
+ if( sqlite3WhereIsOrdered(pWInfo) ){
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3WhereBreakLabel(pWInfo));
VdbeComment((v, "%s() by index",
(flag==WHERE_ORDERBY_MIN?"min":"max")));
}
@@ -114200,9 +104231,9 @@ SQLITE_PRIVATE int sqlite3Select(
finalizeAggFunctions(pParse, &sAggInfo);
}
- sSort.pOrderBy = 0;
+ pOrderBy = 0;
sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL);
- selectInnerLoop(pParse, p, p->pEList, -1, 0, 0,
+ selectInnerLoop(pParse, p, p->pEList, 0, 0, 0, 0,
pDest, addrEnd, addrEnd);
sqlite3ExprListDelete(db, pDel);
}
@@ -114217,19 +104248,19 @@ SQLITE_PRIVATE int sqlite3Select(
/* If there is an ORDER BY clause, then we need to sort the results
** and send them to the callback one by one.
*/
- if( sSort.pOrderBy ){
- explainTempTable(pParse,
- sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY");
- generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest);
+ if( pOrderBy ){
+ explainTempTable(pParse, "ORDER BY");
+ generateSortTail(pParse, p, v, pEList->nExpr, pDest);
}
/* Jump here to skip this query
*/
sqlite3VdbeResolveLabel(v, iEnd);
- /* The SELECT has been coded. If there is an error in the Parse structure,
- ** set the return code to 1. Otherwise 0. */
- rc = (pParse->nErr>0);
+ /* The SELECT was successfully coded. Set the return code to 0
+ ** to indicate no errors.
+ */
+ rc = 0;
/* Control jumps to here if an error is encountered above, or upon
** successful coding of the SELECT.
@@ -114245,13 +104276,108 @@ select_end:
sqlite3DbFree(db, sAggInfo.aCol);
sqlite3DbFree(db, sAggInfo.aFunc);
-#if SELECTTRACE_ENABLED
- SELECTTRACE(1,pParse,p,("end processing\n"));
- pParse->nSelectIndent--;
-#endif
return rc;
}
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+/*
+** Generate a human-readable description of a the Select object.
+*/
+static void explainOneSelect(Vdbe *pVdbe, Select *p){
+ sqlite3ExplainPrintf(pVdbe, "SELECT ");
+ if( p->selFlags & (SF_Distinct|SF_Aggregate) ){
+ if( p->selFlags & SF_Distinct ){
+ sqlite3ExplainPrintf(pVdbe, "DISTINCT ");
+ }
+ if( p->selFlags & SF_Aggregate ){
+ sqlite3ExplainPrintf(pVdbe, "agg_flag ");
+ }
+ sqlite3ExplainNL(pVdbe);
+ sqlite3ExplainPrintf(pVdbe, " ");
+ }
+ sqlite3ExplainExprList(pVdbe, p->pEList);
+ sqlite3ExplainNL(pVdbe);
+ if( p->pSrc && p->pSrc->nSrc ){
+ int i;
+ sqlite3ExplainPrintf(pVdbe, "FROM ");
+ sqlite3ExplainPush(pVdbe);
+ for(i=0; i<p->pSrc->nSrc; i++){
+ struct SrcList_item *pItem = &p->pSrc->a[i];
+ sqlite3ExplainPrintf(pVdbe, "{%d,*} = ", pItem->iCursor);
+ if( pItem->pSelect ){
+ sqlite3ExplainSelect(pVdbe, pItem->pSelect);
+ if( pItem->pTab ){
+ sqlite3ExplainPrintf(pVdbe, " (tabname=%s)", pItem->pTab->zName);
+ }
+ }else if( pItem->zName ){
+ sqlite3ExplainPrintf(pVdbe, "%s", pItem->zName);
+ }
+ if( pItem->zAlias ){
+ sqlite3ExplainPrintf(pVdbe, " (AS %s)", pItem->zAlias);
+ }
+ if( pItem->jointype & JT_LEFT ){
+ sqlite3ExplainPrintf(pVdbe, " LEFT-JOIN");
+ }
+ sqlite3ExplainNL(pVdbe);
+ }
+ sqlite3ExplainPop(pVdbe);
+ }
+ if( p->pWhere ){
+ sqlite3ExplainPrintf(pVdbe, "WHERE ");
+ sqlite3ExplainExpr(pVdbe, p->pWhere);
+ sqlite3ExplainNL(pVdbe);
+ }
+ if( p->pGroupBy ){
+ sqlite3ExplainPrintf(pVdbe, "GROUPBY ");
+ sqlite3ExplainExprList(pVdbe, p->pGroupBy);
+ sqlite3ExplainNL(pVdbe);
+ }
+ if( p->pHaving ){
+ sqlite3ExplainPrintf(pVdbe, "HAVING ");
+ sqlite3ExplainExpr(pVdbe, p->pHaving);
+ sqlite3ExplainNL(pVdbe);
+ }
+ if( p->pOrderBy ){
+ sqlite3ExplainPrintf(pVdbe, "ORDERBY ");
+ sqlite3ExplainExprList(pVdbe, p->pOrderBy);
+ sqlite3ExplainNL(pVdbe);
+ }
+ if( p->pLimit ){
+ sqlite3ExplainPrintf(pVdbe, "LIMIT ");
+ sqlite3ExplainExpr(pVdbe, p->pLimit);
+ sqlite3ExplainNL(pVdbe);
+ }
+ if( p->pOffset ){
+ sqlite3ExplainPrintf(pVdbe, "OFFSET ");
+ sqlite3ExplainExpr(pVdbe, p->pOffset);
+ sqlite3ExplainNL(pVdbe);
+ }
+}
+SQLITE_PRIVATE void sqlite3ExplainSelect(Vdbe *pVdbe, Select *p){
+ if( p==0 ){
+ sqlite3ExplainPrintf(pVdbe, "(null-select)");
+ return;
+ }
+ while( p->pPrior ){
+ p->pPrior->pNext = p;
+ p = p->pPrior;
+ }
+ sqlite3ExplainPush(pVdbe);
+ while( p ){
+ explainOneSelect(pVdbe, p);
+ p = p->pNext;
+ if( p==0 ) break;
+ sqlite3ExplainNL(pVdbe);
+ sqlite3ExplainPrintf(pVdbe, "%s\n", selectOpName(p->op));
+ }
+ sqlite3ExplainPrintf(pVdbe, "END");
+ sqlite3ExplainPop(pVdbe);
+}
+
+/* End of the structure debug printing code
+*****************************************************************************/
+#endif /* defined(SQLITE_ENABLE_TREE_EXPLAIN) */
+
/************** End of select.c **********************************************/
/************** Begin file table.c *******************************************/
/*
@@ -114272,7 +104398,6 @@ select_end:
** These routines are in a separate files so that they will not be linked
** if they are not used.
*/
-/* #include "sqliteInt.h" */
/* #include <stdlib.h> */
/* #include <string.h> */
@@ -114285,10 +104410,10 @@ select_end:
typedef struct TabResult {
char **azResult; /* Accumulated output */
char *zErrMsg; /* Error message text, if an error occurs */
- u32 nAlloc; /* Slots allocated for azResult[] */
- u32 nRow; /* Number of rows in the result */
- u32 nColumn; /* Number of columns in the result */
- u32 nData; /* Slots used in azResult[]. (nRow+1)*nColumn */
+ int nAlloc; /* Slots allocated for azResult[] */
+ int nRow; /* Number of rows in the result */
+ int nColumn; /* Number of columns in the result */
+ int nData; /* Slots used in azResult[]. (nRow+1)*nColumn */
int rc; /* Return code from sqlite3_exec() */
} TabResult;
@@ -114314,7 +104439,7 @@ static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){
if( p->nData + need > p->nAlloc ){
char **azNew;
p->nAlloc = p->nAlloc*2 + need;
- azNew = sqlite3_realloc64( p->azResult, sizeof(char*)*p->nAlloc );
+ azNew = sqlite3_realloc( p->azResult, sizeof(char*)*p->nAlloc );
if( azNew==0 ) goto malloc_failed;
p->azResult = azNew;
}
@@ -114329,7 +104454,7 @@ static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){
if( z==0 ) goto malloc_failed;
p->azResult[p->nData++] = z;
}
- }else if( (int)p->nColumn!=nCol ){
+ }else if( p->nColumn!=nCol ){
sqlite3_free(p->zErrMsg);
p->zErrMsg = sqlite3_mprintf(
"sqlite3_get_table() called with two or more incompatible queries"
@@ -114346,7 +104471,7 @@ static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){
z = 0;
}else{
int n = sqlite3Strlen30(argv[i])+1;
- z = sqlite3_malloc64( n );
+ z = sqlite3_malloc( n );
if( z==0 ) goto malloc_failed;
memcpy(z, argv[i], n);
}
@@ -114371,7 +104496,7 @@ malloc_failed:
** Instead, the entire table should be passed to sqlite3_free_table() when
** the calling procedure is finished using it.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
+SQLITE_API int sqlite3_get_table(
sqlite3 *db, /* The database on which the SQL executes */
const char *zSql, /* The SQL to be executed */
char ***pazResult, /* Write the result table here */
@@ -114382,9 +104507,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
int rc;
TabResult res;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || pazResult==0 ) return SQLITE_MISUSE_BKPT;
-#endif
*pazResult = 0;
if( pnColumn ) *pnColumn = 0;
if( pnRow ) *pnRow = 0;
@@ -114395,7 +104517,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
res.nData = 1;
res.nAlloc = 20;
res.rc = SQLITE_OK;
- res.azResult = sqlite3_malloc64(sizeof(char*)*res.nAlloc );
+ res.azResult = sqlite3_malloc(sizeof(char*)*res.nAlloc );
if( res.azResult==0 ){
db->errCode = SQLITE_NOMEM;
return SQLITE_NOMEM;
@@ -114423,7 +104545,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
}
if( res.nAlloc>res.nData ){
char **azNew;
- azNew = sqlite3_realloc64( res.azResult, sizeof(char*)*res.nData );
+ azNew = sqlite3_realloc( res.azResult, sizeof(char*)*res.nData );
if( azNew==0 ){
sqlite3_free_table(&res.azResult[1]);
db->errCode = SQLITE_NOMEM;
@@ -114440,8 +104562,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
/*
** This routine frees the space the sqlite3_get_table() malloced.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(
- char **azResult /* Result returned from sqlite3_get_table() */
+SQLITE_API void sqlite3_free_table(
+ char **azResult /* Result returned from from sqlite3_get_table() */
){
if( azResult ){
int i, n;
@@ -114469,7 +104591,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(
*************************************************************************
** This file contains the implementation for TRIGGERs
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_OMIT_TRIGGER
/*
@@ -114586,7 +104707,7 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
** ^^^^^^^^
**
** To maintain backwards compatibility, ignore the database
- ** name on pTableName if we are reparsing out of SQLITE_MASTER.
+ ** name on pTableName if we are reparsing our of SQLITE_MASTER.
*/
if( db->init.busy && iDb!=1 ){
sqlite3DbFree(db, pTableName->a[0].zDatabase);
@@ -114639,7 +104760,8 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
goto trigger_cleanup;
}
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
- if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),zName) ){
+ if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),
+ zName, sqlite3Strlen30(zName)) ){
if( !noErr ){
sqlite3ErrorMsg(pParse, "trigger %T already exists", pName);
}else{
@@ -114652,6 +104774,7 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
/* Do not create a trigger on a system table */
if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){
sqlite3ErrorMsg(pParse, "cannot create trigger on system table");
+ pParse->nErr++;
goto trigger_cleanup;
}
@@ -114781,12 +104904,13 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
Trigger *pLink = pTrig;
Hash *pHash = &db->aDb[iDb].pSchema->trigHash;
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
- pTrig = sqlite3HashInsert(pHash, zName, pTrig);
+ pTrig = sqlite3HashInsert(pHash, zName, sqlite3Strlen30(zName), pTrig);
if( pTrig ){
db->mallocFailed = 1;
}else if( pLink->pSchema==pLink->pTabSchema ){
Table *pTab;
- pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table);
+ int n = sqlite3Strlen30(pLink->table);
+ pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table, n);
assert( pTab!=0 );
pLink->pNext = pTab->pTrigger;
pTab->pTrigger = pLink;
@@ -114831,12 +104955,12 @@ static TriggerStep *triggerStepAllocate(
){
TriggerStep *pTriggerStep;
- pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n + 1);
+ pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n);
if( pTriggerStep ){
char *z = (char*)&pTriggerStep[1];
memcpy(z, pName->z, pName->n);
- sqlite3Dequote(z);
- pTriggerStep->zTarget = z;
+ pTriggerStep->target.z = z;
+ pTriggerStep->target.n = pName->n;
pTriggerStep->op = op;
}
return pTriggerStep;
@@ -114853,21 +104977,25 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(
sqlite3 *db, /* The database connection */
Token *pTableName, /* Name of the table into which we insert */
IdList *pColumn, /* List of columns in pTableName to insert into */
+ ExprList *pEList, /* The VALUE clause: a list of values to be inserted */
Select *pSelect, /* A SELECT statement that supplies values */
u8 orconf /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */
){
TriggerStep *pTriggerStep;
- assert(pSelect != 0 || db->mallocFailed);
+ assert(pEList == 0 || pSelect == 0);
+ assert(pEList != 0 || pSelect != 0 || db->mallocFailed);
pTriggerStep = triggerStepAllocate(db, TK_INSERT, pTableName);
if( pTriggerStep ){
pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
pTriggerStep->pIdList = pColumn;
+ pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE);
pTriggerStep->orconf = orconf;
}else{
sqlite3IdListDelete(db, pColumn);
}
+ sqlite3ExprListDelete(db, pEList);
sqlite3SelectDelete(db, pSelect);
return pTriggerStep;
@@ -114945,6 +105073,7 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr)
int i;
const char *zDb;
const char *zName;
+ int nName;
sqlite3 *db = pParse->db;
if( db->mallocFailed ) goto drop_trigger_cleanup;
@@ -114955,12 +105084,13 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr)
assert( pName->nSrc==1 );
zDb = pName->a[0].zDatabase;
zName = pName->a[0].zName;
+ nName = sqlite3Strlen30(zName);
assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) );
for(i=OMIT_TEMPDB; i<db->nDb; i++){
int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
if( zDb && sqlite3StrICmp(db->aDb[j].zName, zDb) ) continue;
assert( sqlite3SchemaMutexHeld(db, j, 0) );
- pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName);
+ pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName, nName);
if( pTrigger ) break;
}
if( !pTrigger ){
@@ -114983,7 +105113,8 @@ drop_trigger_cleanup:
** is set on.
*/
static Table *tableOfTrigger(Trigger *pTrigger){
- return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table);
+ int n = sqlite3Strlen30(pTrigger->table);
+ return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table, n);
}
@@ -115019,7 +105150,6 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){
assert( pTable!=0 );
if( (v = sqlite3GetVdbe(pParse))!=0 ){
int base;
- static const int iLn = VDBE_OFFSET_LINENO(2);
static const VdbeOpList dropTrigger[] = {
{ OP_Rewind, 0, ADDR(9), 0},
{ OP_String8, 0, 1, 0}, /* 1 */
@@ -115034,7 +105164,7 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){
sqlite3BeginWriteOperation(pParse, 0, iDb);
sqlite3OpenMasterTable(pParse, iDb);
- base = sqlite3VdbeAddOpList(v, ArraySize(dropTrigger), dropTrigger, iLn);
+ base = sqlite3VdbeAddOpList(v, ArraySize(dropTrigger), dropTrigger);
sqlite3VdbeChangeP4(v, base+1, pTrigger->zName, P4_TRANSIENT);
sqlite3VdbeChangeP4(v, base+4, "trigger", P4_STATIC);
sqlite3ChangeCookie(pParse, iDb);
@@ -115055,7 +105185,7 @@ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3 *db, int iDb, const ch
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
pHash = &(db->aDb[iDb].pSchema->trigHash);
- pTrigger = sqlite3HashInsert(pHash, zName, 0);
+ pTrigger = sqlite3HashInsert(pHash, zName, sqlite3Strlen30(zName), 0);
if( ALWAYS(pTrigger) ){
if( pTrigger->pSchema==pTrigger->pTabSchema ){
Table *pTab = tableOfTrigger(pTrigger);
@@ -115119,7 +105249,7 @@ SQLITE_PRIVATE Trigger *sqlite3TriggersExist(
}
/*
-** Convert the pStep->zTarget string into a SrcList and return a pointer
+** Convert the pStep->target token into a SrcList and return a pointer
** to that SrcList.
**
** This routine adds a specific database name, if needed, to the target when
@@ -115132,17 +105262,17 @@ static SrcList *targetSrcList(
Parse *pParse, /* The parsing context */
TriggerStep *pStep /* The trigger containing the target token */
){
- sqlite3 *db = pParse->db;
int iDb; /* Index of the database to use */
SrcList *pSrc; /* SrcList to be returned */
- pSrc = sqlite3SrcListAppend(db, 0, 0, 0);
+ pSrc = sqlite3SrcListAppend(pParse->db, 0, &pStep->target, 0);
if( pSrc ){
assert( pSrc->nSrc>0 );
- pSrc->a[pSrc->nSrc-1].zName = sqlite3DbStrDup(db, pStep->zTarget);
- iDb = sqlite3SchemaToIndex(db, pStep->pTrig->pSchema);
+ assert( pSrc->a!=0 );
+ iDb = sqlite3SchemaToIndex(pParse->db, pStep->pTrig->pSchema);
if( iDb==0 || iDb>=2 ){
- assert( iDb<db->nDb );
+ sqlite3 *db = pParse->db;
+ assert( iDb<pParse->db->nDb );
pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName);
}
}
@@ -115180,7 +105310,15 @@ static int codeTriggerProgram(
** INSERT OR IGNORE INTO t1 ... ; -- insert into t2 uses IGNORE policy
*/
pParse->eOrconf = (orconf==OE_Default)?pStep->orconf:(u8)orconf;
- assert( pParse->okConstFactor==0 );
+
+ /* Clear the cookieGoto flag. When coding triggers, the cookieGoto
+ ** variable is used as a flag to indicate to sqlite3ExprCodeConstants()
+ ** that it is not safe to refactor constants (this happens after the
+ ** start of the first loop in the SQL statement is coded - at that
+ ** point code may be conditionally executed, so it is no longer safe to
+ ** initialize constant register values). */
+ assert( pParse->cookieGoto==0 || pParse->cookieGoto==-1 );
+ pParse->cookieGoto = 0;
switch( pStep->op ){
case TK_UPDATE: {
@@ -115195,6 +105333,7 @@ static int codeTriggerProgram(
case TK_INSERT: {
sqlite3Insert(pParse,
targetSrcList(pParse, pStep),
+ sqlite3ExprListDup(db, pStep->pExprList, 0),
sqlite3SelectDup(db, pStep->pSelect, 0),
sqlite3IdListDup(db, pStep->pIdList),
pParse->eOrconf
@@ -115254,7 +105393,6 @@ static void transferParseError(Parse *pTo, Parse *pFrom){
if( pTo->nErr==0 ){
pTo->zErrMsg = pFrom->zErrMsg;
pTo->nErr = pFrom->nErr;
- pTo->rc = pFrom->rc;
}else{
sqlite3DbFree(pFrom->db, pFrom->zErrMsg);
}
@@ -115593,7 +105731,6 @@ SQLITE_PRIVATE u32 sqlite3TriggerColmask(
** This file contains C code routines that are called by the parser
** to handle UPDATE statements.
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_OMIT_VIRTUALTABLE
/* Forward declaration */
@@ -115653,7 +105790,7 @@ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){
sqlite3VdbeChangeP4(v, -1, (const char *)pValue, P4_MEM);
}
#ifndef SQLITE_OMIT_FLOATING_POINT
- if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){
+ if( iReg>=0 && pTab->aCol[i].affinity==SQLITE_AFF_REAL ){
sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
}
#endif
@@ -115715,9 +105852,9 @@ SQLITE_PRIVATE void sqlite3Update(
/* Register Allocations */
int regRowCount = 0; /* A count of rows changed */
- int regOldRowid = 0; /* The old rowid */
- int regNewRowid = 0; /* The new rowid */
- int regNew = 0; /* Content of the NEW.* table in triggers */
+ int regOldRowid; /* The old rowid */
+ int regNewRowid; /* The new rowid */
+ int regNew; /* Content of the NEW.* table in triggers */
int regOld = 0; /* Content of OLD.* table in triggers */
int regRowSet = 0; /* Rowset of rows to be updated */
int regKey = 0; /* composite PRIMARY KEY value */
@@ -115768,7 +105905,7 @@ SQLITE_PRIVATE void sqlite3Update(
iIdxCur = iDataCur+1;
pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){
- if( IsPrimaryKeyIndex(pIdx) && pPk!=0 ){
+ if( pIdx->autoIndex==2 && pPk!=0 ){
iDataCur = pParse->nTab;
pTabList->a[0].iCursor = iDataCur;
}
@@ -115853,9 +105990,7 @@ SQLITE_PRIVATE void sqlite3Update(
/* There is one entry in the aRegIdx[] array for each index on the table
** being updated. Fill in aRegIdx[] with a register number that will hold
- ** the key for accessing each index.
- **
- ** FIXME: Be smarter about omitting indexes that use expressions.
+ ** the key for accessing each index.
*/
for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
int reg;
@@ -115864,8 +105999,7 @@ SQLITE_PRIVATE void sqlite3Update(
}else{
reg = 0;
for(i=0; i<pIdx->nKeyCol; i++){
- i16 iIdxCol = pIdx->aiColumn[i];
- if( iIdxCol<0 || aXRef[iIdxCol]>=0 ){
+ if( aXRef[pIdx->aiColumn[i]]>=0 ){
reg = ++pParse->nMem;
break;
}
@@ -115881,20 +106015,29 @@ SQLITE_PRIVATE void sqlite3Update(
if( pParse->nested==0 ) sqlite3VdbeCountChanges(v);
sqlite3BeginWriteOperation(pParse, 1, iDb);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ /* Virtual tables must be handled separately */
+ if( IsVirtual(pTab) ){
+ updateVirtualTable(pParse, pTabList, pTab, pChanges, pRowidExpr, aXRef,
+ pWhere, onError);
+ pWhere = 0;
+ pTabList = 0;
+ goto update_cleanup;
+ }
+#endif
+
/* Allocate required registers. */
- if( !IsVirtual(pTab) ){
- regRowSet = ++pParse->nMem;
- regOldRowid = regNewRowid = ++pParse->nMem;
- if( chngPk || pTrigger || hasFK ){
- regOld = pParse->nMem + 1;
- pParse->nMem += pTab->nCol;
- }
- if( chngKey || pTrigger || hasFK ){
- regNewRowid = ++pParse->nMem;
- }
- regNew = pParse->nMem + 1;
+ regRowSet = ++pParse->nMem;
+ regOldRowid = regNewRowid = ++pParse->nMem;
+ if( chngPk || pTrigger || hasFK ){
+ regOld = pParse->nMem + 1;
pParse->nMem += pTab->nCol;
}
+ if( chngKey || pTrigger || hasFK ){
+ regNewRowid = ++pParse->nMem;
+ }
+ regNew = pParse->nMem + 1;
+ pParse->nMem += pTab->nCol;
/* Start the view context. */
if( isView ){
@@ -115902,7 +106045,7 @@ SQLITE_PRIVATE void sqlite3Update(
}
/* If we are trying to update a view, realize that view into
- ** an ephemeral table.
+ ** a ephemeral table.
*/
#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
if( isView ){
@@ -115917,15 +106060,6 @@ SQLITE_PRIVATE void sqlite3Update(
goto update_cleanup;
}
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- /* Virtual tables must be handled separately */
- if( IsVirtual(pTab) ){
- updateVirtualTable(pParse, pTabList, pTab, pChanges, pRowidExpr, aXRef,
- pWhere, onError);
- goto update_cleanup;
- }
-#endif
-
/* Begin the database scan
*/
if( HasRowid(pTab) ){
@@ -115965,7 +106099,6 @@ SQLITE_PRIVATE void sqlite3Update(
if( pWInfo==0 ) goto update_cleanup;
okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
for(i=0; i<nPk; i++){
- assert( pPk->aiColumn[i]>=0 );
sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, pPk->aiColumn[i],
iPk+i);
}
@@ -115975,7 +106108,7 @@ SQLITE_PRIVATE void sqlite3Update(
regKey = iPk;
}else{
sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey,
- sqlite3IndexAffinityStr(db, pPk), nPk);
+ sqlite3IndexAffinityStr(v, pPk), P4_TRANSIENT);
sqlite3VdbeAddOp2(v, OP_IdxInsert, iEph, regKey);
}
sqlite3WhereEnd(pWInfo);
@@ -116016,27 +106149,21 @@ SQLITE_PRIVATE void sqlite3Update(
/* Top of the update loop */
if( okOnePass ){
- if( aToOpen[iDataCur-iBaseCur] && !isView ){
- assert( pPk );
+ if( aToOpen[iDataCur-iBaseCur] ){
+ assert( pPk!=0 );
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey, nKey);
- VdbeCoverageNeverTaken(v);
}
labelContinue = labelBreak;
sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak);
- VdbeCoverageIf(v, pPk==0);
- VdbeCoverageIf(v, pPk!=0);
}else if( pPk ){
labelContinue = sqlite3VdbeMakeLabel(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak);
addrTop = sqlite3VdbeAddOp2(v, OP_RowKey, iEph, regKey);
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey, 0);
- VdbeCoverage(v);
}else{
labelContinue = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet, labelBreak,
regOldRowid);
- VdbeCoverage(v);
sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid);
- VdbeCoverage(v);
}
/* If the record number will change, set register regNewRowid to
@@ -116046,7 +106173,7 @@ SQLITE_PRIVATE void sqlite3Update(
assert( chngKey || pTrigger || hasFK || regOldRowid==regNewRowid );
if( chngRowid ){
sqlite3ExprCode(pParse, pRowidExpr, regNewRowid);
- sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid); VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid);
}
/* Compute the old pre-UPDATE content of the row being changed, if that
@@ -116058,10 +106185,9 @@ SQLITE_PRIVATE void sqlite3Update(
);
for(i=0; i<pTab->nCol; i++){
if( oldmask==0xffffffff
- || (i<32 && (oldmask & MASKBIT32(i))!=0)
+ || (i<32 && (oldmask & (1<<i)))
|| (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0
){
- testcase( oldmask!=0xffffffff && i==31 );
sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regOld+i);
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, regOld+i);
@@ -116073,7 +106199,7 @@ SQLITE_PRIVATE void sqlite3Update(
}
/* Populate the array of registers beginning at regNew with the new
- ** row data. This array is used to check constants, create the new
+ ** row data. This array is used to check constaints, create the new
** table and index records, and as the values for any new.* references
** made by triggers.
**
@@ -116088,14 +106214,15 @@ SQLITE_PRIVATE void sqlite3Update(
newmask = sqlite3TriggerColmask(
pParse, pTrigger, pChanges, 1, TRIGGER_BEFORE, pTab, onError
);
+ sqlite3VdbeAddOp3(v, OP_Null, 0, regNew, regNew+pTab->nCol-1);
for(i=0; i<pTab->nCol; i++){
if( i==pTab->iPKey ){
- sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i);
+ /*sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i);*/
}else{
j = aXRef[i];
if( j>=0 ){
sqlite3ExprCode(pParse, pChanges->a[j].pExpr, regNew+i);
- }else if( 0==(tmask&TRIGGER_BEFORE) || i>31 || (newmask & MASKBIT32(i)) ){
+ }else if( 0==(tmask&TRIGGER_BEFORE) || i>31 || (newmask&(1<<i)) ){
/* This branch loads the value of a column that will not be changed
** into a register. This is done if there are no BEFORE triggers, or
** if there are one or more BEFORE triggers that use this value via
@@ -116104,8 +106231,6 @@ SQLITE_PRIVATE void sqlite3Update(
testcase( i==31 );
testcase( i==32 );
sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i);
- }else{
- sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i);
}
}
}
@@ -116114,7 +106239,8 @@ SQLITE_PRIVATE void sqlite3Update(
** verified. One could argue that this is wrong.
*/
if( tmask&TRIGGER_BEFORE ){
- sqlite3TableAffinity(v, pTab, regNew);
+ sqlite3VdbeAddOp2(v, OP_Affinity, regNew, pTab->nCol);
+ sqlite3TableAffinityStr(v, pTab);
sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
TRIGGER_BEFORE, pTab, regOldRowid, onError, labelContinue);
@@ -116126,10 +106252,8 @@ SQLITE_PRIVATE void sqlite3Update(
*/
if( pPk ){
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue,regKey,nKey);
- VdbeCoverage(v);
}else{
sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid);
- VdbeCoverage(v);
}
/* If it did not delete it, the row-trigger may still have modified
@@ -116145,7 +106269,7 @@ SQLITE_PRIVATE void sqlite3Update(
}
if( !isView ){
- int addr1 = 0; /* Address of jump instruction */
+ int j1 = 0; /* Address of jump instruction */
int bReplace = 0; /* True if REPLACE conflict resolution might happen */
/* Do constraint checks. */
@@ -116161,20 +106285,19 @@ SQLITE_PRIVATE void sqlite3Update(
/* Delete the index entries associated with the current record. */
if( bReplace || chngKey ){
if( pPk ){
- addr1 = sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, 0, regKey, nKey);
+ j1 = sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, 0, regKey, nKey);
}else{
- addr1 = sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, 0, regOldRowid);
+ j1 = sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, 0, regOldRowid);
}
- VdbeCoverageNeverTaken(v);
}
- sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx, -1);
+ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx);
/* If changing the record number, delete the old record. */
if( hasFK || chngKey || pPk!=0 ){
sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0);
}
if( bReplace || chngKey ){
- sqlite3VdbeJumpHere(v, addr1);
+ sqlite3VdbeJumpHere(v, j1);
}
if( hasFK ){
@@ -116209,9 +106332,9 @@ SQLITE_PRIVATE void sqlite3Update(
/* Nothing to do at end-of-loop for a single-pass */
}else if( pPk ){
sqlite3VdbeResolveLabel(v, labelContinue);
- sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop);
}else{
- sqlite3VdbeGoto(v, labelContinue);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, labelContinue);
}
sqlite3VdbeResolveLabel(v, labelBreak);
@@ -116252,7 +106375,7 @@ update_cleanup:
return;
}
/* Make sure "isView" and other macros defined above are undefined. Otherwise
-** they may interfere with compilation of other functions in this file
+** thely may interfere with compilation of other functions in this file
** (or in another file, if this file becomes part of the amalgamation). */
#ifdef isView
#undef isView
@@ -116265,23 +106388,21 @@ update_cleanup:
/*
** Generate code for an UPDATE of a virtual table.
**
-** There are two possible strategies - the default and the special
-** "onepass" strategy. Onepass is only used if the virtual table
-** implementation indicates that pWhere may match at most one row.
-**
-** The default strategy is to create an ephemeral table that contains
+** The strategy is that we create an ephemerial table that contains
** for each row to be changed:
**
** (A) The original rowid of that row.
-** (B) The revised rowid for the row.
+** (B) The revised rowid for the row. (note1)
** (C) The content of every column in the row.
**
-** Then loop through the contents of this ephemeral table executing a
-** VUpdate for each row. When finished, drop the ephemeral table.
+** Then we loop over this ephemeral table and for each row in
+** the ephermeral table call VUpdate.
+**
+** When finished, drop the ephemeral table.
**
-** The "onepass" strategy does not use an ephemeral table. Instead, it
-** stores the same values (A, B and C above) in a register array and
-** makes a single invocation of VUpdate.
+** (note1) Actually, if we know in advance that (A) is always the same
+** as (B) we only store (A), then duplicate (A) when pulling
+** it out of the ephemeral table before calling VUpdate.
*/
static void updateVirtualTable(
Parse *pParse, /* The parsing context */
@@ -116294,96 +106415,68 @@ static void updateVirtualTable(
int onError /* ON CONFLICT strategy */
){
Vdbe *v = pParse->pVdbe; /* Virtual machine under construction */
+ ExprList *pEList = 0; /* The result set of the SELECT statement */
+ Select *pSelect = 0; /* The SELECT statement */
+ Expr *pExpr; /* Temporary expression */
int ephemTab; /* Table holding the result of the SELECT */
int i; /* Loop counter */
+ int addr; /* Address of top of loop */
+ int iReg; /* First register in set passed to OP_VUpdate */
sqlite3 *db = pParse->db; /* Database connection */
const char *pVTab = (const char*)sqlite3GetVTable(db, pTab);
- WhereInfo *pWInfo;
- int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */
- int regArg; /* First register in VUpdate arg array */
- int regRec; /* Register in which to assemble record */
- int regRowid; /* Register for ephem table rowid */
- int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */
- int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */
- int bOnePass; /* True to use onepass strategy */
- int addr; /* Address of OP_OpenEphemeral */
-
- /* Allocate nArg registers to martial the arguments to VUpdate. Then
- ** create and open the ephemeral table in which the records created from
- ** these arguments will be temporarily stored. */
- assert( v );
- ephemTab = pParse->nTab++;
- addr= sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, nArg);
- regArg = pParse->nMem + 1;
- pParse->nMem += nArg;
- regRec = ++pParse->nMem;
- regRowid = ++pParse->nMem;
-
- /* Start scanning the virtual table */
- pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0,0,WHERE_ONEPASS_DESIRED,0);
- if( pWInfo==0 ) return;
-
- /* Populate the argument registers. */
- sqlite3VdbeAddOp2(v, OP_Rowid, iCsr, regArg);
+ SelectDest dest;
+
+ /* Construct the SELECT statement that will find the new values for
+ ** all updated rows.
+ */
+ pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, "_rowid_"));
if( pRowid ){
- sqlite3ExprCode(pParse, pRowid, regArg+1);
- }else{
- sqlite3VdbeAddOp2(v, OP_Rowid, iCsr, regArg+1);
+ pEList = sqlite3ExprListAppend(pParse, pEList,
+ sqlite3ExprDup(db, pRowid, 0));
}
+ assert( pTab->iPKey<0 );
for(i=0; i<pTab->nCol; i++){
if( aXRef[i]>=0 ){
- sqlite3ExprCode(pParse, pChanges->a[aXRef[i]].pExpr, regArg+2+i);
+ pExpr = sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0);
}else{
- sqlite3VdbeAddOp3(v, OP_VColumn, iCsr, i, regArg+2+i);
+ pExpr = sqlite3Expr(db, TK_ID, pTab->aCol[i].zName);
}
+ pEList = sqlite3ExprListAppend(pParse, pEList, pExpr);
}
+ pSelect = sqlite3SelectNew(pParse, pEList, pSrc, pWhere, 0, 0, 0, 0, 0, 0);
+
+ /* Create the ephemeral table into which the update results will
+ ** be stored.
+ */
+ assert( v );
+ ephemTab = pParse->nTab++;
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, pTab->nCol+1+(pRowid!=0));
+ sqlite3VdbeChangeP5(v, BTREE_UNORDERED);
- bOnePass = sqlite3WhereOkOnePass(pWInfo, aDummy);
-
- if( bOnePass ){
- /* If using the onepass strategy, no-op out the OP_OpenEphemeral coded
- ** above. Also, if this is a top-level parse (not a trigger), clear the
- ** multi-write flag so that the VM does not open a statement journal */
- sqlite3VdbeChangeToNoop(v, addr);
- if( sqlite3IsToplevel(pParse) ){
- pParse->isMultiWrite = 0;
- }
- }else{
- /* Create a record from the argument register contents and insert it into
- ** the ephemeral table. */
- sqlite3VdbeAddOp3(v, OP_MakeRecord, regArg, nArg, regRec);
- sqlite3VdbeAddOp2(v, OP_NewRowid, ephemTab, regRowid);
- sqlite3VdbeAddOp3(v, OP_Insert, ephemTab, regRec, regRowid);
- }
-
-
- if( bOnePass==0 ){
- /* End the virtual table scan */
- sqlite3WhereEnd(pWInfo);
-
- /* Begin scannning through the ephemeral table. */
- addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v);
+ /* fill the ephemeral table
+ */
+ sqlite3SelectDestInit(&dest, SRT_Table, ephemTab);
+ sqlite3Select(pParse, pSelect, &dest);
- /* Extract arguments from the current row of the ephemeral table and
- ** invoke the VUpdate method. */
- for(i=0; i<nArg; i++){
- sqlite3VdbeAddOp3(v, OP_Column, ephemTab, i, regArg+i);
- }
+ /* Generate code to scan the ephemeral table and call VUpdate. */
+ iReg = ++pParse->nMem;
+ pParse->nMem += pTab->nCol+1;
+ addr = sqlite3VdbeAddOp2(v, OP_Rewind, ephemTab, 0);
+ sqlite3VdbeAddOp3(v, OP_Column, ephemTab, 0, iReg);
+ sqlite3VdbeAddOp3(v, OP_Column, ephemTab, (pRowid?1:0), iReg+1);
+ for(i=0; i<pTab->nCol; i++){
+ sqlite3VdbeAddOp3(v, OP_Column, ephemTab, i+1+(pRowid!=0), iReg+2+i);
}
sqlite3VtabMakeWritable(pParse, pTab);
- sqlite3VdbeAddOp4(v, OP_VUpdate, 0, nArg, regArg, pVTab, P4_VTAB);
+ sqlite3VdbeAddOp4(v, OP_VUpdate, 0, pTab->nCol+2, iReg, pVTab, P4_VTAB);
sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError);
sqlite3MayAbort(pParse);
+ sqlite3VdbeAddOp2(v, OP_Next, ephemTab, addr+1);
+ sqlite3VdbeJumpHere(v, addr);
+ sqlite3VdbeAddOp2(v, OP_Close, ephemTab, 0);
- /* End of the ephemeral table scan. Or, if using the onepass strategy,
- ** jump to here if the scan visited zero rows. */
- if( bOnePass==0 ){
- sqlite3VdbeAddOp2(v, OP_Next, ephemTab, addr+1); VdbeCoverage(v);
- sqlite3VdbeJumpHere(v, addr);
- sqlite3VdbeAddOp2(v, OP_Close, ephemTab, 0);
- }else{
- sqlite3WhereEnd(pWInfo);
- }
+ /* Cleanup */
+ sqlite3SelectDelete(db, pSelect);
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
@@ -116405,8 +106498,6 @@ static void updateVirtualTable(
** Most of the code in this file may be omitted by defining the
** SQLITE_OMIT_VACUUM macro.
*/
-/* #include "sqliteInt.h" */
-/* #include "vdbeInt.h" */
#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH)
/*
@@ -116478,14 +106569,14 @@ static int execExecSql(sqlite3 *db, char **pzErrMsg, const char *zSql){
** step (3) requires additional temporary disk space approximately equal
** to the size of the original database for the rollback journal.
** Hence, temporary disk space that is approximately 2x the size of the
-** original database is required. Every page of the database is written
+** orginal database is required. Every page of the database is written
** approximately 3 times: Once for step (2) and twice for step (3).
** Two writes per page are required in step (3) because the original
** database content must be written into the rollback journal prior to
** overwriting the database with the vacuumed content.
**
** Only 1x temporary space and only 1x writes would be required if
-** the copy of step (3) were replaced by deleting the original database
+** the copy of step (3) were replace by deleting the original database
** and renaming the transient database as the original. But that will
** not work if other processes are attached to the original database.
** And a power loss in between deleting the original and renaming the
@@ -116575,7 +106666,7 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
** cause problems for the call to BtreeSetPageSize() below. */
sqlite3BtreeCommit(pTemp);
- nRes = sqlite3BtreeGetOptimalReserve(pMain);
+ nRes = sqlite3BtreeGetReserve(pMain);
/* A VACUUM cannot change the pagesize of an encrypted database. */
#ifdef SQLITE_HAS_CODEC
@@ -116641,8 +106732,6 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
** the contents to the temporary database.
*/
- assert( (db->flags & SQLITE_Vacuum)==0 );
- db->flags |= SQLITE_Vacuum;
rc = execExecSql(db, pzErrMsg,
"SELECT 'INSERT INTO vacuum_db.' || quote(name) "
"|| ' SELECT * FROM main.' || quote(name) || ';'"
@@ -116650,8 +106739,6 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
"WHERE type = 'table' AND name!='sqlite_sequence' "
" AND coalesce(rootpage,1)>0"
);
- assert( (db->flags & SQLITE_Vacuum)!=0 );
- db->flags &= ~SQLITE_Vacuum;
if( rc!=SQLITE_OK ) goto end_of_vacuum;
/* Copy over the sequence table
@@ -116779,7 +106866,6 @@ end_of_vacuum:
** This file contains code used to help implement virtual tables.
*/
#ifndef SQLITE_OMIT_VIRTUALTABLE
-/* #include "sqliteInt.h" */
/*
** Before a virtual table xCreate() or xConnect() method is invoked, the
@@ -116791,8 +106877,6 @@ end_of_vacuum:
struct VtabCtx {
VTable *pVTable; /* The virtual table being constructed */
Table *pTab; /* The Table object to which the virtual table belongs */
- VtabCtx *pPrior; /* Parent context (if any) */
- int bDeclared; /* True after sqlite3_declare_vtab() is called */
};
/*
@@ -116812,7 +106896,7 @@ static int createModule(
sqlite3_mutex_enter(db->mutex);
nName = sqlite3Strlen30(zName);
- if( sqlite3HashFind(&db->aModule, zName) ){
+ if( sqlite3HashFind(&db->aModule, zName, nName) ){
rc = SQLITE_MISUSE_BKPT;
}else{
Module *pMod;
@@ -116825,8 +106909,7 @@ static int createModule(
pMod->pModule = pModule;
pMod->pAux = pAux;
pMod->xDestroy = xDestroy;
- pMod->pEpoTab = 0;
- pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod);
+ pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,nName,(void*)pMod);
assert( pDel==0 || pDel==pMod );
if( pDel ){
db->mallocFailed = 1;
@@ -116845,31 +106928,25 @@ static int createModule(
/*
** External API function used to create a new virtual-table module.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
+SQLITE_API int sqlite3_create_module(
sqlite3 *db, /* Database in which module is registered */
const char *zName, /* Name assigned to this module */
const sqlite3_module *pModule, /* The definition of the module */
void *pAux /* Context pointer for xCreate/xConnect */
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
-#endif
return createModule(db, zName, pModule, pAux, 0);
}
/*
** External API function used to create a new virtual-table module.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
+SQLITE_API int sqlite3_create_module_v2(
sqlite3 *db, /* Database in which module is registered */
const char *zName, /* Name assigned to this module */
const sqlite3_module *pModule, /* The definition of the module */
void *pAux, /* Context pointer for xCreate/xConnect */
void (*xDestroy)(void *) /* Module destructor function */
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
-#endif
return createModule(db, zName, pModule, pAux, xDestroy);
}
@@ -117053,17 +107130,23 @@ SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){
** deleted.
*/
static void addModuleArgument(sqlite3 *db, Table *pTable, char *zArg){
- int nBytes = sizeof(char *)*(2+pTable->nModuleArg);
+ int i = pTable->nModuleArg++;
+ int nBytes = sizeof(char *)*(1+pTable->nModuleArg);
char **azModuleArg;
azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes);
if( azModuleArg==0 ){
+ int j;
+ for(j=0; j<i; j++){
+ sqlite3DbFree(db, pTable->azModuleArg[j]);
+ }
sqlite3DbFree(db, zArg);
+ sqlite3DbFree(db, pTable->azModuleArg);
+ pTable->nModuleArg = 0;
}else{
- int i = pTable->nModuleArg++;
azModuleArg[i] = zArg;
azModuleArg[i+1] = 0;
- pTable->azModuleArg = azModuleArg;
}
+ pTable->azModuleArg = azModuleArg;
}
/*
@@ -117096,12 +107179,7 @@ SQLITE_PRIVATE void sqlite3VtabBeginParse(
addModuleArgument(db, pTable, sqlite3NameFromToken(db, pModuleName));
addModuleArgument(db, pTable, 0);
addModuleArgument(db, pTable, sqlite3DbStrDup(db, pTable->zName));
- assert( (pParse->sNameToken.z==pName2->z && pName2->z!=0)
- || (pParse->sNameToken.z==pName1->z && pName2->z==0)
- );
- pParse->sNameToken.n = (int)(
- &pModuleName->z[pModuleName->n] - pParse->sNameToken.z
- );
+ pParse->sNameToken.n = (int)(&pModuleName->z[pModuleName->n] - pName1->z);
#ifndef SQLITE_OMIT_AUTHORIZATION
/* Creating a virtual table invokes the authorization callback twice.
@@ -117153,7 +107231,6 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
char *zStmt;
char *zWhere;
int iDb;
- int iReg;
Vdbe *v;
/* Compute the complete text of the CREATE VIRTUAL TABLE statement */
@@ -117188,10 +107265,8 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
sqlite3VdbeAddOp2(v, OP_Expire, 0, 0);
zWhere = sqlite3MPrintf(db, "name='%q' AND type='table'", pTab->zName);
sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere);
-
- iReg = ++pParse->nMem;
- sqlite3VdbeLoadString(v, iReg, pTab->zName);
- sqlite3VdbeAddOp2(v, OP_VCreate, iDb, iReg);
+ sqlite3VdbeAddOp4(v, OP_VCreate, iDb, 0, 0,
+ pTab->zName, sqlite3Strlen30(pTab->zName) + 1);
}
/* If we are rereading the sqlite_master table create the in-memory
@@ -117203,8 +107278,9 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
Table *pOld;
Schema *pSchema = pTab->pSchema;
const char *zName = pTab->zName;
+ int nName = sqlite3Strlen30(zName);
assert( sqlite3SchemaMutexHeld(db, 0, pSchema) );
- pOld = sqlite3HashInsert(&pSchema->tblHash, zName, pTab);
+ pOld = sqlite3HashInsert(&pSchema->tblHash, zName, nName, pTab);
if( pOld ){
db->mallocFailed = 1;
assert( pTab==pOld ); /* Malloc must have failed inside HashInsert() */
@@ -117234,7 +107310,7 @@ SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse *pParse, Token *p){
pArg->z = p->z;
pArg->n = p->n;
}else{
- assert(pArg->z <= p->z);
+ assert(pArg->z < p->z);
pArg->n = (int)(&p->z[p->n] - pArg->z);
}
}
@@ -117251,27 +107327,15 @@ static int vtabCallConstructor(
int (*xConstruct)(sqlite3*,void*,int,const char*const*,sqlite3_vtab**,char**),
char **pzErr
){
- VtabCtx sCtx;
+ VtabCtx sCtx, *pPriorCtx;
VTable *pVTable;
int rc;
const char *const*azArg = (const char *const*)pTab->azModuleArg;
int nArg = pTab->nModuleArg;
char *zErr = 0;
- char *zModuleName;
+ char *zModuleName = sqlite3MPrintf(db, "%s", pTab->zName);
int iDb;
- VtabCtx *pCtx;
-
- /* Check that the virtual-table is not already being initialized */
- for(pCtx=db->pVtabCtx; pCtx; pCtx=pCtx->pPrior){
- if( pCtx->pTab==pTab ){
- *pzErr = sqlite3MPrintf(db,
- "vtable constructor called recursively: %s", pTab->zName
- );
- return SQLITE_LOCKED;
- }
- }
- zModuleName = sqlite3MPrintf(db, "%s", pTab->zName);
if( !zModuleName ){
return SQLITE_NOMEM;
}
@@ -117292,13 +107356,11 @@ static int vtabCallConstructor(
assert( xConstruct );
sCtx.pTab = pTab;
sCtx.pVTable = pVTable;
- sCtx.pPrior = db->pVtabCtx;
- sCtx.bDeclared = 0;
+ pPriorCtx = db->pVtabCtx;
db->pVtabCtx = &sCtx;
rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr);
- db->pVtabCtx = sCtx.pPrior;
+ db->pVtabCtx = pPriorCtx;
if( rc==SQLITE_NOMEM ) db->mallocFailed = 1;
- assert( sCtx.pTab==pTab );
if( SQLITE_OK!=rc ){
if( zErr==0 ){
@@ -117311,17 +107373,15 @@ static int vtabCallConstructor(
}else if( ALWAYS(pVTable->pVtab) ){
/* Justification of ALWAYS(): A correct vtab constructor must allocate
** the sqlite3_vtab object if successful. */
- memset(pVTable->pVtab, 0, sizeof(pVTable->pVtab[0]));
pVTable->pVtab->pModule = pMod->pModule;
pVTable->nRef = 1;
- if( sCtx.bDeclared==0 ){
+ if( sCtx.pTab ){
const char *zFormat = "vtable constructor did not declare schema: %s";
*pzErr = sqlite3MPrintf(db, zFormat, pTab->zName);
sqlite3VtabUnlock(pVTable);
rc = SQLITE_ERROR;
}else{
int iCol;
- u8 oooHidden = 0;
/* If everything went according to plan, link the new VTable structure
** into the linked list headed by pTab->pVTable. Then loop through the
** columns of the table to see if any of them contain the token "hidden".
@@ -117334,10 +107394,7 @@ static int vtabCallConstructor(
char *zType = pTab->aCol[iCol].zType;
int nType;
int i = 0;
- if( !zType ){
- pTab->tabFlags |= oooHidden;
- continue;
- }
+ if( !zType ) continue;
nType = sqlite3Strlen30(zType);
if( sqlite3StrNICmp("hidden", zType, 6)||(zType[6] && zType[6]!=' ') ){
for(i=0; i<nType; i++){
@@ -117360,9 +107417,6 @@ static int vtabCallConstructor(
zType[i-1] = '\0';
}
pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN;
- oooHidden = TF_OOOHidden;
- }else{
- pTab->tabFlags |= oooHidden;
}
}
}
@@ -117392,7 +107446,7 @@ SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse *pParse, Table *pTab){
/* Locate the required virtual table module */
zMod = pTab->azModuleArg[0];
- pMod = (Module*)sqlite3HashFind(&db->aModule, zMod);
+ pMod = (Module*)sqlite3HashFind(&db->aModule, zMod, sqlite3Strlen30(zMod));
if( !pMod ){
const char *zModule = pTab->azModuleArg[0];
@@ -117460,13 +107514,13 @@ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab,
/* Locate the required virtual table module */
zMod = pTab->azModuleArg[0];
- pMod = (Module*)sqlite3HashFind(&db->aModule, zMod);
+ pMod = (Module*)sqlite3HashFind(&db->aModule, zMod, sqlite3Strlen30(zMod));
/* If the module has been registered and includes a Create method,
** invoke it now. If the module has not been registered, return an
** error. Otherwise, do nothing.
*/
- if( pMod==0 || pMod->pModule->xCreate==0 || pMod->pModule->xDestroy==0 ){
+ if( !pMod ){
*pzErr = sqlite3MPrintf(db, "no such module: %s", zMod);
rc = SQLITE_ERROR;
}else{
@@ -117490,26 +107544,19 @@ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab,
** valid to call this function from within the xCreate() or xConnect() of a
** virtual table module.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
- VtabCtx *pCtx;
+SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
Parse *pParse;
+
int rc = SQLITE_OK;
Table *pTab;
char *zErr = 0;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
- pCtx = db->pVtabCtx;
- if( !pCtx || pCtx->bDeclared ){
- sqlite3Error(db, SQLITE_MISUSE);
+ if( !db->pVtabCtx || !(pTab = db->pVtabCtx->pTab) ){
+ sqlite3Error(db, SQLITE_MISUSE, 0);
sqlite3_mutex_leave(db->mutex);
return SQLITE_MISUSE_BKPT;
}
- pTab = pCtx->pTab;
assert( (pTab->tabFlags & TF_Virtual)!=0 );
pParse = sqlite3StackAllocZero(db, sizeof(*pParse));
@@ -117532,9 +107579,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3 *db, const char *zCre
pParse->pNewTable->nCol = 0;
pParse->pNewTable->aCol = 0;
}
- pCtx->bDeclared = 1;
+ db->pVtabCtx->pTab = 0;
}else{
- sqlite3ErrorWithMsg(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr);
+ sqlite3Error(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr);
sqlite3DbFree(db, zErr);
rc = SQLITE_ERROR;
}
@@ -117567,18 +107614,11 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab
pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName);
if( ALWAYS(pTab!=0 && pTab->pVTable!=0) ){
- VTable *p;
- int (*xDestroy)(sqlite3_vtab *);
- for(p=pTab->pVTable; p; p=p->pNext){
- assert( p->pVtab );
- if( p->pVtab->nRef>0 ){
- return SQLITE_LOCKED;
- }
- }
- p = vtabDisconnectAll(db, pTab);
- xDestroy = p->pMod->pModule->xDestroy;
- assert( xDestroy!=0 ); /* Checked before the virtual table is created */
- rc = xDestroy(p->pVtab);
+ VTable *p = vtabDisconnectAll(db, pTab);
+
+ assert( rc==SQLITE_OK );
+ rc = p->pMod->pModule->xDestroy(p->pVtab);
+
/* Remove the sqlite3_vtab* from the aVTrans[] array, if applicable */
if( rc==SQLITE_OK ){
assert( pTab->pVTable==p && p->pNext==0 );
@@ -117602,10 +107642,8 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab
static void callFinaliser(sqlite3 *db, int offset){
int i;
if( db->aVTrans ){
- VTable **aVTrans = db->aVTrans;
- db->aVTrans = 0;
for(i=0; i<db->nVTrans; i++){
- VTable *pVTab = aVTrans[i];
+ VTable *pVTab = db->aVTrans[i];
sqlite3_vtab *p = pVTab->pVtab;
if( p ){
int (*x)(sqlite3_vtab *);
@@ -117615,8 +107653,9 @@ static void callFinaliser(sqlite3 *db, int offset){
pVTab->iSavepoint = 0;
sqlite3VtabUnlock(pVTab);
}
- sqlite3DbFree(db, aVTrans);
+ sqlite3DbFree(db, db->aVTrans);
db->nVTrans = 0;
+ db->aVTrans = 0;
}
}
@@ -117704,9 +107743,7 @@ SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *db, VTable *pVTab){
if( rc==SQLITE_OK ){
rc = pModule->xBegin(pVTab->pVtab);
if( rc==SQLITE_OK ){
- int iSvpt = db->nStatement + db->nSavepoint;
addToVTrans(db, pVTab);
- if( iSvpt ) rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, iSvpt-1);
}
}
}
@@ -117732,7 +107769,7 @@ SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){
int rc = SQLITE_OK;
assert( op==SAVEPOINT_RELEASE||op==SAVEPOINT_ROLLBACK||op==SAVEPOINT_BEGIN );
- assert( iSavepoint>=-1 );
+ assert( iSavepoint>=0 );
if( db->aVTrans ){
int i;
for(i=0; rc==SQLITE_OK && i<db->nVTrans; i++){
@@ -117850,7 +107887,7 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){
if( pTab==pToplevel->apVtabLock[i] ) return;
}
n = (pToplevel->nVtabLock+1)*sizeof(pToplevel->apVtabLock[0]);
- apVtabLock = sqlite3_realloc64(pToplevel->apVtabLock, n);
+ apVtabLock = sqlite3_realloc(pToplevel->apVtabLock, n);
if( apVtabLock ){
pToplevel->apVtabLock = apVtabLock;
pToplevel->apVtabLock[pToplevel->nVtabLock++] = pTab;
@@ -117860,80 +107897,16 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){
}
/*
-** Check to see if virtual tale module pMod can be have an eponymous
-** virtual table instance. If it can, create one if one does not already
-** exist. Return non-zero if the eponymous virtual table instance exists
-** when this routine returns, and return zero if it does not exist.
-**
-** An eponymous virtual table instance is one that is named after its
-** module, and more importantly, does not require a CREATE VIRTUAL TABLE
-** statement in order to come into existance. Eponymous virtual table
-** instances always exist. They cannot be DROP-ed.
-**
-** Any virtual table module for which xConnect and xCreate are the same
-** method can have an eponymous virtual table instance.
-*/
-SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){
- const sqlite3_module *pModule = pMod->pModule;
- Table *pTab;
- char *zErr = 0;
- int nName;
- int rc;
- sqlite3 *db = pParse->db;
- if( pMod->pEpoTab ) return 1;
- if( pModule->xCreate!=0 && pModule->xCreate!=pModule->xConnect ) return 0;
- nName = sqlite3Strlen30(pMod->zName) + 1;
- pTab = sqlite3DbMallocZero(db, sizeof(Table) + nName);
- if( pTab==0 ) return 0;
- pMod->pEpoTab = pTab;
- pTab->zName = (char*)&pTab[1];
- memcpy(pTab->zName, pMod->zName, nName);
- pTab->nRef = 1;
- pTab->pSchema = db->aDb[0].pSchema;
- pTab->tabFlags |= TF_Virtual;
- pTab->nModuleArg = 0;
- pTab->iPKey = -1;
- addModuleArgument(db, pTab, sqlite3DbStrDup(db, pTab->zName));
- addModuleArgument(db, pTab, 0);
- addModuleArgument(db, pTab, sqlite3DbStrDup(db, pTab->zName));
- rc = vtabCallConstructor(db, pTab, pMod, pModule->xConnect, &zErr);
- if( rc ){
- sqlite3ErrorMsg(pParse, "%s", zErr);
- sqlite3DbFree(db, zErr);
- sqlite3VtabEponymousTableClear(db, pMod);
- return 0;
- }
- return 1;
-}
-
-/*
-** Erase the eponymous virtual table instance associated with
-** virtual table module pMod, if it exists.
-*/
-SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3 *db, Module *pMod){
- Table *pTab = pMod->pEpoTab;
- if( pTab!=0 ){
- sqlite3DeleteColumnNames(db, pTab);
- sqlite3VtabClear(db, pTab);
- sqlite3DbFree(db, pTab);
- pMod->pEpoTab = 0;
- }
-}
-
-/*
** Return the ON CONFLICT resolution mode in effect for the virtual
** table update operation currently in progress.
**
** The results of this routine are undefined unless it is called from
** within an xUpdate method.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *db){
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *db){
static const unsigned char aMap[] = {
SQLITE_ROLLBACK, SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE
};
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
assert( OE_Rollback==1 && OE_Abort==2 && OE_Fail==3 );
assert( OE_Ignore==4 && OE_Replace==5 );
assert( db->vtabOnConflict>=1 && db->vtabOnConflict<=5 );
@@ -117945,14 +107918,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *db){
** the SQLite core with additional information about the behavior
** of the virtual table being implemented.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){
+SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){
va_list ap;
int rc = SQLITE_OK;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
+
va_start(ap, op);
switch( op ){
case SQLITE_VTAB_CONSTRAINT_SUPPORT: {
@@ -117971,7 +107942,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){
}
va_end(ap);
- if( rc!=SQLITE_OK ) sqlite3Error(db, rc);
+ if( rc!=SQLITE_OK ) sqlite3Error(db, rc, 0);
sqlite3_mutex_leave(db->mutex);
return rc;
}
@@ -117979,9 +107950,9 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){
#endif /* SQLITE_OMIT_VIRTUALTABLE */
/************** End of vtab.c ************************************************/
-/************** Begin file wherecode.c ***************************************/
+/************** Begin file where.c *******************************************/
/*
-** 2015-06-06
+** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
@@ -117992,15 +107963,13 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){
**
*************************************************************************
** This module contains C code that generates VDBE code used to process
-** the WHERE clause of SQL statements.
-**
-** This file was split off from where.c on 2015-06-06 in order to reduce the
-** size of where.c and make it easier to edit. This file contains the routines
-** that actually generate the bulk of the WHERE loop code. The original where.c
-** file retains the code that does query planning and analysis.
+** the WHERE clause of SQL statements. This module is responsible for
+** generating the code that loops through a table looking for applicable
+** rows. Indices are selected and used to speed the search when doing
+** so is applicable. Because this module is responsible for selecting
+** indices, you might also think of this module as the "query optimizer".
*/
-/* #include "sqliteInt.h" */
-/************** Include whereInt.h in the middle of wherecode.c **************/
+/************** Include whereInt.h in the middle of where.c ******************/
/************** Begin file whereInt.h ****************************************/
/*
** 2013-11-12
@@ -118023,7 +107992,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){
** Trace output macros
*/
#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG)
-/***/ int sqlite3WhereTrace;
+/***/ int sqlite3WhereTrace = 0;
#endif
#if defined(SQLITE_DEBUG) \
&& (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE))
@@ -118073,10 +108042,8 @@ struct WhereLevel {
int addrCont; /* Jump here to continue with the next loop cycle */
int addrFirst; /* First instruction of interior of the loop */
int addrBody; /* Beginning of the body of this loop */
- int iLikeRepCntr; /* LIKE range processing counter register */
- int addrLikeRep; /* LIKE range processing address */
u8 iFrom; /* Which entry in the FROM clause */
- u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */
+ u8 op, p5; /* Opcode and P5 of the opcode that ends the loop */
int p1, p2; /* Operands of the opcode used to ends the loop */
union { /* Information that depends on pWLoop->wsFlags */
struct {
@@ -118091,9 +108058,6 @@ struct WhereLevel {
} u;
struct WhereLoop *pWLoop; /* The selected WhereLoop object */
Bitmask notReady; /* FROM entries not usable at this level */
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- int addrVisit; /* Address at which row is visited */
-#endif
};
/*
@@ -118124,25 +108088,25 @@ struct WhereLoop {
union {
struct { /* Information for internal btree tables */
u16 nEq; /* Number of equality constraints */
+ u16 nSkip; /* Number of initial index columns to skip */
Index *pIndex; /* Index used, or NULL */
} btree;
struct { /* Information for virtual tables */
int idxNum; /* Index number */
u8 needFree; /* True if sqlite3_free(idxStr) is needed */
- i8 isOrdered; /* True if satisfies ORDER BY */
+ u8 isOrdered; /* True if satisfies ORDER BY */
u16 omitMask; /* Terms that may be omitted */
char *idxStr; /* Index identifier string */
} vtab;
} u;
u32 wsFlags; /* WHERE_* flags describing the plan */
u16 nLTerm; /* Number of entries in aLTerm[] */
- u16 nSkip; /* Number of NULL aLTerm[] entries */
/**** whereLoopXfer() copies fields above ***********************/
# define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot)
u16 nLSlot; /* Number of slots allocated for aLTerm[] */
WhereTerm **aLTerm; /* WhereTerms used */
WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */
- WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */
+ WhereTerm *aLTermSpace[4]; /* Initial aLTerm[] space */
};
/* This object holds the prerequisites and the cost of running a
@@ -118165,6 +108129,10 @@ struct WhereOrSet {
WhereOrCost a[N_OR_COST]; /* Set of best costs */
};
+
+/* Forward declaration of methods */
+static int whereLoopResize(sqlite3*, WhereLoop*, int);
+
/*
** Each instance of this object holds a sequence of WhereLoop objects
** that implement some or all of a query plan.
@@ -118181,15 +108149,15 @@ struct WhereOrSet {
** 1. Then using those as a basis to compute the N best WherePath objects
** of length 2. And so forth until the length of WherePaths equals the
** number of nodes in the FROM clause. The best (lowest cost) WherePath
-** at the end is the chosen query plan.
+** at the end is the choosen query plan.
*/
struct WherePath {
Bitmask maskLoop; /* Bitmask of all WhereLoop objects in this path */
Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */
LogEst nRow; /* Estimated number of rows generated by this path */
LogEst rCost; /* Total cost of this path */
- LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */
- i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */
+ u8 isOrdered; /* True if this path satisfies ORDER BY */
+ u8 isOrderedValid; /* True if the isOrdered field is valid */
WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */
};
@@ -118255,7 +108223,7 @@ struct WhereTerm {
} u;
LogEst truthProb; /* Probability of truth for this expression */
u16 eOperator; /* A WO_xx value describing <op> */
- u16 wtFlags; /* TERM_xxx bit flags. See below */
+ u8 wtFlags; /* TERM_xxx bit flags. See below */
u8 nChild; /* Number of children that must disable us */
WhereClause *pWC; /* The clause this term is part of */
Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */
@@ -118277,10 +108245,6 @@ struct WhereTerm {
#else
# define TERM_VNULL 0x00 /* Disabled if not using stat3 */
#endif
-#define TERM_LIKEOPT 0x100 /* Virtual terms from the LIKE optimization */
-#define TERM_LIKECOND 0x200 /* Conditionally this LIKE operator term */
-#define TERM_LIKE 0x400 /* The original LIKE operator */
-#define TERM_IS 0x800 /* Term.pExpr is an IS operator */
/*
** An instance of the WhereScan object is used as an iterator for locating
@@ -118290,14 +108254,12 @@ struct WhereScan {
WhereClause *pOrigWC; /* Original, innermost WhereClause */
WhereClause *pWC; /* WhereClause currently being scanned */
char *zCollName; /* Required collating sequence, if not NULL */
- Expr *pIdxExpr; /* Search for this index expression */
char idxaff; /* Must match this affinity, if zCollName!=NULL */
unsigned char nEquiv; /* Number of entries in aEquiv[] */
unsigned char iEquiv; /* Next unused slot in aEquiv[] */
u32 opMask; /* Acceptable operators */
int k; /* Resume scanning at this->pWC->a[this->k] */
- int aiCur[11]; /* Cursors in the equivalence class */
- i16 aiColumn[11]; /* Corresponding column number in the eq-class */
+ int aEquiv[22]; /* Cursor,Column pairs for equivalence classes */
};
/*
@@ -118375,11 +108337,6 @@ struct WhereMaskSet {
};
/*
-** Initialize a WhereMaskSet object
-*/
-#define initMaskSet(P) (P)->n=0
-
-/*
** This object is a convenience wrapper holding all information needed
** to construct WhereLoop objects for a particular query.
*/
@@ -118414,9 +108371,8 @@ struct WhereInfo {
Bitmask revMask; /* Mask of ORDER BY terms that need reversing */
LogEst nRowOut; /* Estimated number of output rows */
u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */
- i8 nOBSat; /* Number of ORDER BY terms satisfied by indices */
- u8 sorted; /* True if really sorted (not just grouped) */
- u8 eOnePass; /* ONEPASS_OFF, or _SINGLE, or _MULTI */
+ u8 bOBSat; /* ORDER BY satisfied by indices */
+ u8 okOnePass; /* Ok to use one-pass algorithm for UPDATE/DELETE */
u8 untestedTerms; /* Not all WHERE terms resolved by outer loop */
u8 eDistinct; /* One of the WHERE_DISTINCT_* values below */
u8 nLevel; /* Number of nested loop */
@@ -118431,84 +108387,26 @@ struct WhereInfo {
};
/*
-** Private interfaces - callable only by other where.c routines.
-**
-** where.c:
-*/
-SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int);
-SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
- WhereClause *pWC, /* The WHERE clause to be searched */
- int iCur, /* Cursor number of LHS */
- int iColumn, /* Column number of LHS */
- Bitmask notReady, /* RHS must not overlap with this mask */
- u32 op, /* Mask of WO_xx values describing operator */
- Index *pIdx /* Must be compatible with this index, if not NULL */
-);
-
-/* wherecode.c: */
-#ifndef SQLITE_OMIT_EXPLAIN
-SQLITE_PRIVATE int sqlite3WhereExplainOneScan(
- Parse *pParse, /* Parse context */
- SrcList *pTabList, /* Table list this loop refers to */
- WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */
- int iLevel, /* Value for "level" column of output */
- int iFrom, /* Value for "from" column of output */
- u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */
-);
-#else
-# define sqlite3WhereExplainOneScan(u,v,w,x,y,z) 0
-#endif /* SQLITE_OMIT_EXPLAIN */
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
-SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
- Vdbe *v, /* Vdbe to add scanstatus entry to */
- SrcList *pSrclist, /* FROM clause pLvl reads data from */
- WhereLevel *pLvl, /* Level to add scanstatus() entry for */
- int addrExplain /* Address of OP_Explain (or 0) */
-);
-#else
-# define sqlite3WhereAddScanStatus(a, b, c, d) ((void)d)
-#endif
-SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
- WhereInfo *pWInfo, /* Complete information about the WHERE clause */
- int iLevel, /* Which level of pWInfo->a[] should be coded */
- Bitmask notReady /* Which tables are currently available */
-);
-
-/* whereexpr.c: */
-SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*);
-SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause*);
-SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause*,Expr*,u8);
-SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*);
-SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*);
-SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*);
-SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereClause*);
-
-
-
-
-
-/*
** Bitmasks for the operators on WhereTerm objects. These are all
** operators that are of interest to the query planner. An
** OR-ed combination of these values can be used when searching for
** particular WhereTerms within a WhereClause.
*/
-#define WO_IN 0x0001
-#define WO_EQ 0x0002
+#define WO_IN 0x001
+#define WO_EQ 0x002
#define WO_LT (WO_EQ<<(TK_LT-TK_EQ))
#define WO_LE (WO_EQ<<(TK_LE-TK_EQ))
#define WO_GT (WO_EQ<<(TK_GT-TK_EQ))
#define WO_GE (WO_EQ<<(TK_GE-TK_EQ))
-#define WO_MATCH 0x0040
-#define WO_IS 0x0080
-#define WO_ISNULL 0x0100
-#define WO_OR 0x0200 /* Two or more OR-connected terms */
-#define WO_AND 0x0400 /* Two or more AND-connected terms */
-#define WO_EQUIV 0x0800 /* Of the form A==B, both columns */
-#define WO_NOOP 0x1000 /* This term does not restrict search space */
+#define WO_MATCH 0x040
+#define WO_ISNULL 0x080
+#define WO_OR 0x100 /* Two or more OR-connected terms */
+#define WO_AND 0x200 /* Two or more AND-connected terms */
+#define WO_EQUIV 0x400 /* Of the form A==B, both columns */
+#define WO_NOOP 0x800 /* This term does not restrict search space */
-#define WO_ALL 0x1fff /* Mask of all possible WO_* values */
-#define WO_SINGLE 0x01ff /* Mask of all non-compound WO_* values */
+#define WO_ALL 0xfff /* Mask of all possible WO_* values */
+#define WO_SINGLE 0x0ff /* Mask of all non-compound WO_* values */
/*
** These are definitions of bits in the WhereLoop.wsFlags field.
@@ -118532,1536 +108430,139 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereC
#define WHERE_MULTI_OR 0x00002000 /* OR using multiple indices */
#define WHERE_AUTO_INDEX 0x00004000 /* Uses an ephemeral index */
#define WHERE_SKIPSCAN 0x00008000 /* Uses the skip-scan algorithm */
-#define WHERE_UNQ_WANTED 0x00010000 /* WHERE_ONEROW would have been helpful*/
-#define WHERE_PARTIALIDX 0x00020000 /* The automatic index is partial */
/************** End of whereInt.h ********************************************/
-/************** Continuing where we left off in wherecode.c ******************/
+/************** Continuing where we left off in where.c **********************/
-#ifndef SQLITE_OMIT_EXPLAIN
/*
-** This routine is a helper for explainIndexRange() below
-**
-** pStr holds the text of an expression that we are building up one term
-** at a time. This routine adds a new term to the end of the expression.
-** Terms are separated by AND so add the "AND" text for second and subsequent
-** terms only.
+** Return the estimated number of output rows from a WHERE clause
*/
-static void explainAppendTerm(
- StrAccum *pStr, /* The text expression being built */
- int iTerm, /* Index of this term. First is zero */
- const char *zColumn, /* Name of the column */
- const char *zOp /* Name of the operator */
-){
- if( iTerm ) sqlite3StrAccumAppend(pStr, " AND ", 5);
- sqlite3StrAccumAppendAll(pStr, zColumn);
- sqlite3StrAccumAppend(pStr, zOp, 1);
- sqlite3StrAccumAppend(pStr, "?", 1);
+SQLITE_PRIVATE u64 sqlite3WhereOutputRowCount(WhereInfo *pWInfo){
+ return sqlite3LogEstToInt(pWInfo->nRowOut);
}
/*
-** Return the name of the i-th column of the pIdx index.
+** Return one of the WHERE_DISTINCT_xxxxx values to indicate how this
+** WHERE clause returns outputs for DISTINCT processing.
*/
-static const char *explainIndexColumnName(Index *pIdx, int i){
- i = pIdx->aiColumn[i];
- if( i==XN_EXPR ) return "<expr>";
- if( i==XN_ROWID ) return "rowid";
- return pIdx->pTable->aCol[i].zName;
+SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){
+ return pWInfo->eDistinct;
}
/*
-** Argument pLevel describes a strategy for scanning table pTab. This
-** function appends text to pStr that describes the subset of table
-** rows scanned by the strategy in the form of an SQL expression.
-**
-** For example, if the query:
-**
-** SELECT * FROM t1 WHERE a=1 AND b>2;
-**
-** is run and there is an index on (a, b), then this function returns a
-** string similar to:
-**
-** "a=? AND b>?"
+** Return TRUE if the WHERE clause returns rows in ORDER BY order.
+** Return FALSE if the output needs to be sorted.
*/
-static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){
- Index *pIndex = pLoop->u.btree.pIndex;
- u16 nEq = pLoop->u.btree.nEq;
- u16 nSkip = pLoop->nSkip;
- int i, j;
-
- if( nEq==0 && (pLoop->wsFlags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ) return;
- sqlite3StrAccumAppend(pStr, " (", 2);
- for(i=0; i<nEq; i++){
- const char *z = explainIndexColumnName(pIndex, i);
- if( i ) sqlite3StrAccumAppend(pStr, " AND ", 5);
- sqlite3XPrintf(pStr, 0, i>=nSkip ? "%s=?" : "ANY(%s)", z);
- }
-
- j = i;
- if( pLoop->wsFlags&WHERE_BTM_LIMIT ){
- const char *z = explainIndexColumnName(pIndex, i);
- explainAppendTerm(pStr, i++, z, ">");
- }
- if( pLoop->wsFlags&WHERE_TOP_LIMIT ){
- const char *z = explainIndexColumnName(pIndex, j);
- explainAppendTerm(pStr, i, z, "<");
- }
- sqlite3StrAccumAppend(pStr, ")", 1);
+SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){
+ return pWInfo->bOBSat!=0;
}
/*
-** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN
-** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was
-** defined at compile-time. If it is not a no-op, a single OP_Explain opcode
-** is added to the output to describe the table scan strategy in pLevel.
-**
-** If an OP_Explain opcode is added to the VM, its address is returned.
-** Otherwise, if no OP_Explain is coded, zero is returned.
+** Return the VDBE address or label to jump to in order to continue
+** immediately with the next row of a WHERE clause.
*/
-SQLITE_PRIVATE int sqlite3WhereExplainOneScan(
- Parse *pParse, /* Parse context */
- SrcList *pTabList, /* Table list this loop refers to */
- WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */
- int iLevel, /* Value for "level" column of output */
- int iFrom, /* Value for "from" column of output */
- u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */
-){
- int ret = 0;
-#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS)
- if( pParse->explain==2 )
-#endif
- {
- struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom];
- Vdbe *v = pParse->pVdbe; /* VM being constructed */
- sqlite3 *db = pParse->db; /* Database handle */
- int iId = pParse->iSelectId; /* Select id (left-most output column) */
- int isSearch; /* True for a SEARCH. False for SCAN. */
- WhereLoop *pLoop; /* The controlling WhereLoop object */
- u32 flags; /* Flags that describe this loop */
- char *zMsg; /* Text to add to EQP output */
- StrAccum str; /* EQP output string */
- char zBuf[100]; /* Initial space for EQP output string */
-
- pLoop = pLevel->pWLoop;
- flags = pLoop->wsFlags;
- if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_ONETABLE_ONLY) ) return 0;
-
- isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0
- || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0))
- || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX));
-
- sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH);
- sqlite3StrAccumAppendAll(&str, isSearch ? "SEARCH" : "SCAN");
- if( pItem->pSelect ){
- sqlite3XPrintf(&str, 0, " SUBQUERY %d", pItem->iSelectId);
- }else{
- sqlite3XPrintf(&str, 0, " TABLE %s", pItem->zName);
- }
-
- if( pItem->zAlias ){
- sqlite3XPrintf(&str, 0, " AS %s", pItem->zAlias);
- }
- if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){
- const char *zFmt = 0;
- Index *pIdx;
-
- assert( pLoop->u.btree.pIndex!=0 );
- pIdx = pLoop->u.btree.pIndex;
- assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) );
- if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){
- if( isSearch ){
- zFmt = "PRIMARY KEY";
- }
- }else if( flags & WHERE_PARTIALIDX ){
- zFmt = "AUTOMATIC PARTIAL COVERING INDEX";
- }else if( flags & WHERE_AUTO_INDEX ){
- zFmt = "AUTOMATIC COVERING INDEX";
- }else if( flags & WHERE_IDX_ONLY ){
- zFmt = "COVERING INDEX %s";
- }else{
- zFmt = "INDEX %s";
- }
- if( zFmt ){
- sqlite3StrAccumAppend(&str, " USING ", 7);
- sqlite3XPrintf(&str, 0, zFmt, pIdx->zName);
- explainIndexRange(&str, pLoop);
- }
- }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){
- const char *zRangeOp;
- if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){
- zRangeOp = "=";
- }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){
- zRangeOp = ">? AND rowid<";
- }else if( flags&WHERE_BTM_LIMIT ){
- zRangeOp = ">";
- }else{
- assert( flags&WHERE_TOP_LIMIT);
- zRangeOp = "<";
- }
- sqlite3XPrintf(&str, 0, " USING INTEGER PRIMARY KEY (rowid%s?)",zRangeOp);
- }
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- else if( (flags & WHERE_VIRTUALTABLE)!=0 ){
- sqlite3XPrintf(&str, 0, " VIRTUAL TABLE INDEX %d:%s",
- pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr);
- }
-#endif
-#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS
- if( pLoop->nOut>=10 ){
- sqlite3XPrintf(&str, 0, " (~%llu rows)", sqlite3LogEstToInt(pLoop->nOut));
- }else{
- sqlite3StrAccumAppend(&str, " (~1 row)", 9);
- }
-#endif
- zMsg = sqlite3StrAccumFinish(&str);
- ret = sqlite3VdbeAddOp4(v, OP_Explain, iId, iLevel, iFrom, zMsg,P4_DYNAMIC);
- }
- return ret;
+SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo *pWInfo){
+ return pWInfo->iContinue;
}
-#endif /* SQLITE_OMIT_EXPLAIN */
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
/*
-** Configure the VM passed as the first argument with an
-** sqlite3_stmt_scanstatus() entry corresponding to the scan used to
-** implement level pLvl. Argument pSrclist is a pointer to the FROM
-** clause that the scan reads data from.
-**
-** If argument addrExplain is not 0, it must be the address of an
-** OP_Explain instruction that describes the same loop.
+** Return the VDBE address or label to jump to in order to break
+** out of a WHERE loop.
*/
-SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
- Vdbe *v, /* Vdbe to add scanstatus entry to */
- SrcList *pSrclist, /* FROM clause pLvl reads data from */
- WhereLevel *pLvl, /* Level to add scanstatus() entry for */
- int addrExplain /* Address of OP_Explain (or 0) */
-){
- const char *zObj = 0;
- WhereLoop *pLoop = pLvl->pWLoop;
- if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){
- zObj = pLoop->u.btree.pIndex->zName;
- }else{
- zObj = pSrclist->a[pLvl->iFrom].zName;
- }
- sqlite3VdbeScanStatus(
- v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj
- );
+SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo *pWInfo){
+ return pWInfo->iBreak;
}
-#endif
-
/*
-** Disable a term in the WHERE clause. Except, do not disable the term
-** if it controls a LEFT OUTER JOIN and it did not originate in the ON
-** or USING clause of that join.
-**
-** Consider the term t2.z='ok' in the following queries:
-**
-** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok'
-** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok'
-** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok'
-**
-** The t2.z='ok' is disabled in the in (2) because it originates
-** in the ON clause. The term is disabled in (3) because it is not part
-** of a LEFT OUTER JOIN. In (1), the term is not disabled.
-**
-** Disabling a term causes that term to not be tested in the inner loop
-** of the join. Disabling is an optimization. When terms are satisfied
-** by indices, we disable them to prevent redundant tests in the inner
-** loop. We would get the correct results if nothing were ever disabled,
-** but joins might run a little slower. The trick is to disable as much
-** as we can without disabling too much. If we disabled in (1), we'd get
-** the wrong answer. See ticket #813.
-**
-** If all the children of a term are disabled, then that term is also
-** automatically disabled. In this way, terms get disabled if derived
-** virtual terms are tested first. For example:
-**
-** x GLOB 'abc*' AND x>='abc' AND x<'acd'
-** \___________/ \______/ \_____/
-** parent child1 child2
+** Return TRUE if an UPDATE or DELETE statement can operate directly on
+** the rowids returned by a WHERE clause. Return FALSE if doing an
+** UPDATE or DELETE might change subsequent WHERE clause results.
**
-** Only the parent term was in the original WHERE clause. The child1
-** and child2 terms were added by the LIKE optimization. If both of
-** the virtual child terms are valid, then testing of the parent can be
-** skipped.
+** If the ONEPASS optimization is used (if this routine returns true)
+** then also write the indices of open cursors used by ONEPASS
+** into aiCur[0] and aiCur[1]. iaCur[0] gets the cursor of the data
+** table and iaCur[1] gets the cursor used by an auxiliary index.
+** Either value may be -1, indicating that cursor is not used.
+** Any cursors returned will have been opened for writing.
**
-** Usually the parent term is marked as TERM_CODED. But if the parent
-** term was originally TERM_LIKE, then the parent gets TERM_LIKECOND instead.
-** The TERM_LIKECOND marking indicates that the term should be coded inside
-** a conditional such that is only evaluated on the second pass of a
-** LIKE-optimization loop, when scanning BLOBs instead of strings.
+** aiCur[0] and aiCur[1] both get -1 if the where-clause logic is
+** unable to use the ONEPASS optimization.
*/
-static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
- int nLoop = 0;
- while( pTerm
- && (pTerm->wtFlags & TERM_CODED)==0
- && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin))
- && (pLevel->notReady & pTerm->prereqAll)==0
- ){
- if( nLoop && (pTerm->wtFlags & TERM_LIKE)!=0 ){
- pTerm->wtFlags |= TERM_LIKECOND;
- }else{
- pTerm->wtFlags |= TERM_CODED;
- }
- if( pTerm->iParent<0 ) break;
- pTerm = &pTerm->pWC->a[pTerm->iParent];
- pTerm->nChild--;
- if( pTerm->nChild!=0 ) break;
- nLoop++;
- }
+SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo *pWInfo, int *aiCur){
+ memcpy(aiCur, pWInfo->aiCurOnePass, sizeof(int)*2);
+ return pWInfo->okOnePass;
}
/*
-** Code an OP_Affinity opcode to apply the column affinity string zAff
-** to the n registers starting at base.
-**
-** As an optimization, SQLITE_AFF_BLOB entries (which are no-ops) at the
-** beginning and end of zAff are ignored. If all entries in zAff are
-** SQLITE_AFF_BLOB, then no code gets generated.
-**
-** This routine makes its own copy of zAff so that the caller is free
-** to modify zAff after this routine returns.
+** Move the content of pSrc into pDest
*/
-static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){
- Vdbe *v = pParse->pVdbe;
- if( zAff==0 ){
- assert( pParse->db->mallocFailed );
- return;
- }
- assert( v!=0 );
-
- /* Adjust base and n to skip over SQLITE_AFF_BLOB entries at the beginning
- ** and end of the affinity string.
- */
- while( n>0 && zAff[0]==SQLITE_AFF_BLOB ){
- n--;
- base++;
- zAff++;
- }
- while( n>1 && zAff[n-1]==SQLITE_AFF_BLOB ){
- n--;
- }
-
- /* Code the OP_Affinity opcode if there is anything left to do. */
- if( n>0 ){
- sqlite3VdbeAddOp2(v, OP_Affinity, base, n);
- sqlite3VdbeChangeP4(v, -1, zAff, n);
- sqlite3ExprCacheAffinityChange(pParse, base, n);
- }
+static void whereOrMove(WhereOrSet *pDest, WhereOrSet *pSrc){
+ pDest->n = pSrc->n;
+ memcpy(pDest->a, pSrc->a, pDest->n*sizeof(pDest->a[0]));
}
-
/*
-** Generate code for a single equality term of the WHERE clause. An equality
-** term can be either X=expr or X IN (...). pTerm is the term to be
-** coded.
-**
-** The current value for the constraint is left in register iReg.
+** Try to insert a new prerequisite/cost entry into the WhereOrSet pSet.
**
-** For a constraint of the form X=expr, the expression is evaluated and its
-** result is left on the stack. For constraints of the form X IN (...)
-** this routine sets up a loop that will iterate over all values of X.
+** The new entry might overwrite an existing entry, or it might be
+** appended, or it might be discarded. Do whatever is the right thing
+** so that pSet keeps the N_OR_COST best entries seen so far.
*/
-static int codeEqualityTerm(
- Parse *pParse, /* The parsing context */
- WhereTerm *pTerm, /* The term of the WHERE clause to be coded */
- WhereLevel *pLevel, /* The level of the FROM clause we are working on */
- int iEq, /* Index of the equality term within this level */
- int bRev, /* True for reverse-order IN operations */
- int iTarget /* Attempt to leave results in this register */
+static int whereOrInsert(
+ WhereOrSet *pSet, /* The WhereOrSet to be updated */
+ Bitmask prereq, /* Prerequisites of the new entry */
+ LogEst rRun, /* Run-cost of the new entry */
+ LogEst nOut /* Number of outputs for the new entry */
){
- Expr *pX = pTerm->pExpr;
- Vdbe *v = pParse->pVdbe;
- int iReg; /* Register holding results */
-
- assert( iTarget>0 );
- if( pX->op==TK_EQ || pX->op==TK_IS ){
- iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget);
- }else if( pX->op==TK_ISNULL ){
- iReg = iTarget;
- sqlite3VdbeAddOp2(v, OP_Null, 0, iReg);
-#ifndef SQLITE_OMIT_SUBQUERY
- }else{
- int eType;
- int iTab;
- struct InLoop *pIn;
- WhereLoop *pLoop = pLevel->pWLoop;
-
- if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0
- && pLoop->u.btree.pIndex!=0
- && pLoop->u.btree.pIndex->aSortOrder[iEq]
- ){
- testcase( iEq==0 );
- testcase( bRev );
- bRev = !bRev;
- }
- assert( pX->op==TK_IN );
- iReg = iTarget;
- eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0);
- if( eType==IN_INDEX_INDEX_DESC ){
- testcase( bRev );
- bRev = !bRev;
- }
- iTab = pX->iTable;
- sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0);
- VdbeCoverageIf(v, bRev);
- VdbeCoverageIf(v, !bRev);
- assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 );
- pLoop->wsFlags |= WHERE_IN_ABLE;
- if( pLevel->u.in.nIn==0 ){
- pLevel->addrNxt = sqlite3VdbeMakeLabel(v);
+ u16 i;
+ WhereOrCost *p;
+ for(i=pSet->n, p=pSet->a; i>0; i--, p++){
+ if( rRun<=p->rRun && (prereq & p->prereq)==prereq ){
+ goto whereOrInsert_done;
}
- pLevel->u.in.nIn++;
- pLevel->u.in.aInLoop =
- sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop,
- sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
- pIn = pLevel->u.in.aInLoop;
- if( pIn ){
- pIn += pLevel->u.in.nIn - 1;
- pIn->iCur = iTab;
- if( eType==IN_INDEX_ROWID ){
- pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iReg);
- }else{
- pIn->addrInTop = sqlite3VdbeAddOp3(v, OP_Column, iTab, 0, iReg);
- }
- pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen;
- sqlite3VdbeAddOp1(v, OP_IsNull, iReg); VdbeCoverage(v);
- }else{
- pLevel->u.in.nIn = 0;
+ if( p->rRun<=rRun && (p->prereq & prereq)==p->prereq ){
+ return 0;
}
-#endif
- }
- disableTerm(pLevel, pTerm);
- return iReg;
-}
-
-/*
-** Generate code that will evaluate all == and IN constraints for an
-** index scan.
-**
-** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c).
-** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10
-** The index has as many as three equality constraints, but in this
-** example, the third "c" value is an inequality. So only two
-** constraints are coded. This routine will generate code to evaluate
-** a==5 and b IN (1,2,3). The current values for a and b will be stored
-** in consecutive registers and the index of the first register is returned.
-**
-** In the example above nEq==2. But this subroutine works for any value
-** of nEq including 0. If nEq==0, this routine is nearly a no-op.
-** The only thing it does is allocate the pLevel->iMem memory cell and
-** compute the affinity string.
-**
-** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints
-** are == or IN and are covered by the nEq. nExtraReg is 1 if there is
-** an inequality constraint (such as the "c>=5 AND c<10" in the example) that
-** occurs after the nEq quality constraints.
-**
-** This routine allocates a range of nEq+nExtraReg memory cells and returns
-** the index of the first memory cell in that range. The code that
-** calls this routine will use that memory range to store keys for
-** start and termination conditions of the loop.
-** key value of the loop. If one or more IN operators appear, then
-** this routine allocates an additional nEq memory cells for internal
-** use.
-**
-** Before returning, *pzAff is set to point to a buffer containing a
-** copy of the column affinity string of the index allocated using
-** sqlite3DbMalloc(). Except, entries in the copy of the string associated
-** with equality constraints that use BLOB or NONE affinity are set to
-** SQLITE_AFF_BLOB. This is to deal with SQL such as the following:
-**
-** CREATE TABLE t1(a TEXT PRIMARY KEY, b);
-** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b;
-**
-** In the example above, the index on t1(a) has TEXT affinity. But since
-** the right hand side of the equality constraint (t2.b) has BLOB/NONE affinity,
-** no conversion should be attempted before using a t2.b value as part of
-** a key to search the index. Hence the first byte in the returned affinity
-** string in this example would be set to SQLITE_AFF_BLOB.
-*/
-static int codeAllEqualityTerms(
- Parse *pParse, /* Parsing context */
- WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */
- int bRev, /* Reverse the order of IN operators */
- int nExtraReg, /* Number of extra registers to allocate */
- char **pzAff /* OUT: Set to point to affinity string */
-){
- u16 nEq; /* The number of == or IN constraints to code */
- u16 nSkip; /* Number of left-most columns to skip */
- Vdbe *v = pParse->pVdbe; /* The vm under construction */
- Index *pIdx; /* The index being used for this loop */
- WhereTerm *pTerm; /* A single constraint term */
- WhereLoop *pLoop; /* The WhereLoop object */
- int j; /* Loop counter */
- int regBase; /* Base register */
- int nReg; /* Number of registers to allocate */
- char *zAff; /* Affinity string to return */
-
- /* This module is only called on query plans that use an index. */
- pLoop = pLevel->pWLoop;
- assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 );
- nEq = pLoop->u.btree.nEq;
- nSkip = pLoop->nSkip;
- pIdx = pLoop->u.btree.pIndex;
- assert( pIdx!=0 );
-
- /* Figure out how many memory cells we will need then allocate them.
- */
- regBase = pParse->nMem + 1;
- nReg = pLoop->u.btree.nEq + nExtraReg;
- pParse->nMem += nReg;
-
- zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx));
- if( !zAff ){
- pParse->db->mallocFailed = 1;
}
-
- if( nSkip ){
- int iIdxCur = pLevel->iIdxCur;
- sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur);
- VdbeCoverageIf(v, bRev==0);
- VdbeCoverageIf(v, bRev!=0);
- VdbeComment((v, "begin skip-scan on %s", pIdx->zName));
- j = sqlite3VdbeAddOp0(v, OP_Goto);
- pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT),
- iIdxCur, 0, regBase, nSkip);
- VdbeCoverageIf(v, bRev==0);
- VdbeCoverageIf(v, bRev!=0);
- sqlite3VdbeJumpHere(v, j);
- for(j=0; j<nSkip; j++){
- sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, j, regBase+j);
- testcase( pIdx->aiColumn[j]==XN_EXPR );
- VdbeComment((v, "%s", explainIndexColumnName(pIdx, j)));
- }
- }
-
- /* Evaluate the equality constraints
- */
- assert( zAff==0 || (int)strlen(zAff)>=nEq );
- for(j=nSkip; j<nEq; j++){
- int r1;
- pTerm = pLoop->aLTerm[j];
- assert( pTerm!=0 );
- /* The following testcase is true for indices with redundant columns.
- ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */
- testcase( (pTerm->wtFlags & TERM_CODED)!=0 );
- testcase( pTerm->wtFlags & TERM_VIRTUAL );
- r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j);
- if( r1!=regBase+j ){
- if( nReg==1 ){
- sqlite3ReleaseTempReg(pParse, regBase);
- regBase = r1;
- }else{
- sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j);
- }
- }
- testcase( pTerm->eOperator & WO_ISNULL );
- testcase( pTerm->eOperator & WO_IN );
- if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){
- Expr *pRight = pTerm->pExpr->pRight;
- if( (pTerm->wtFlags & TERM_IS)==0 && sqlite3ExprCanBeNull(pRight) ){
- sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk);
- VdbeCoverage(v);
- }
- if( zAff ){
- if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){
- zAff[j] = SQLITE_AFF_BLOB;
- }
- if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){
- zAff[j] = SQLITE_AFF_BLOB;
- }
- }
+ if( pSet->n<N_OR_COST ){
+ p = &pSet->a[pSet->n++];
+ p->nOut = nOut;
+ }else{
+ p = pSet->a;
+ for(i=1; i<pSet->n; i++){
+ if( p->rRun>pSet->a[i].rRun ) p = pSet->a + i;
}
+ if( p->rRun<=rRun ) return 0;
}
- *pzAff = zAff;
- return regBase;
-}
-
-/*
-** If the most recently coded instruction is a constant range contraint
-** that originated from the LIKE optimization, then change the P3 to be
-** pLoop->iLikeRepCntr and set P5.
-**
-** The LIKE optimization trys to evaluate "x LIKE 'abc%'" as a range
-** expression: "x>='ABC' AND x<'abd'". But this requires that the range
-** scan loop run twice, once for strings and a second time for BLOBs.
-** The OP_String opcodes on the second pass convert the upper and lower
-** bound string contants to blobs. This routine makes the necessary changes
-** to the OP_String opcodes for that to happen.
-*/
-static void whereLikeOptimizationStringFixup(
- Vdbe *v, /* prepared statement under construction */
- WhereLevel *pLevel, /* The loop that contains the LIKE operator */
- WhereTerm *pTerm /* The upper or lower bound just coded */
-){
- if( pTerm->wtFlags & TERM_LIKEOPT ){
- VdbeOp *pOp;
- assert( pLevel->iLikeRepCntr>0 );
- pOp = sqlite3VdbeGetOp(v, -1);
- assert( pOp!=0 );
- assert( pOp->opcode==OP_String8
- || pTerm->pWC->pWInfo->pParse->db->mallocFailed );
- pOp->p3 = pLevel->iLikeRepCntr;
- pOp->p5 = 1;
- }
+whereOrInsert_done:
+ p->prereq = prereq;
+ p->rRun = rRun;
+ if( p->nOut>nOut ) p->nOut = nOut;
+ return 1;
}
-
/*
-** Generate code for the start of the iLevel-th loop in the WHERE clause
-** implementation described by pWInfo.
+** Initialize a preallocated WhereClause structure.
*/
-SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
- WhereInfo *pWInfo, /* Complete information about the WHERE clause */
- int iLevel, /* Which level of pWInfo->a[] should be coded */
- Bitmask notReady /* Which tables are currently available */
+static void whereClauseInit(
+ WhereClause *pWC, /* The WhereClause to be initialized */
+ WhereInfo *pWInfo /* The WHERE processing context */
){
- int j, k; /* Loop counters */
- int iCur; /* The VDBE cursor for the table */
- int addrNxt; /* Where to jump to continue with the next IN case */
- int omitTable; /* True if we use the index only */
- int bRev; /* True if we need to scan in reverse order */
- WhereLevel *pLevel; /* The where level to be coded */
- WhereLoop *pLoop; /* The WhereLoop object being coded */
- WhereClause *pWC; /* Decomposition of the entire WHERE clause */
- WhereTerm *pTerm; /* A WHERE clause term */
- Parse *pParse; /* Parsing context */
- sqlite3 *db; /* Database connection */
- Vdbe *v; /* The prepared stmt under constructions */
- struct SrcList_item *pTabItem; /* FROM clause term being coded */
- int addrBrk; /* Jump here to break out of the loop */
- int addrCont; /* Jump here to continue with next cycle */
- int iRowidReg = 0; /* Rowid is stored in this register, if not zero */
- int iReleaseReg = 0; /* Temp register to free before returning */
-
- pParse = pWInfo->pParse;
- v = pParse->pVdbe;
- pWC = &pWInfo->sWC;
- db = pParse->db;
- pLevel = &pWInfo->a[iLevel];
- pLoop = pLevel->pWLoop;
- pTabItem = &pWInfo->pTabList->a[pLevel->iFrom];
- iCur = pTabItem->iCursor;
- pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur);
- bRev = (pWInfo->revMask>>iLevel)&1;
- omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0
- && (pWInfo->wctrlFlags & WHERE_FORCE_TABLE)==0;
- VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName));
-
- /* Create labels for the "break" and "continue" instructions
- ** for the current loop. Jump to addrBrk to break out of a loop.
- ** Jump to cont to go immediately to the next iteration of the
- ** loop.
- **
- ** When there is an IN operator, we also have a "addrNxt" label that
- ** means to continue with the next IN value combination. When
- ** there are no IN operators in the constraints, the "addrNxt" label
- ** is the same as "addrBrk".
- */
- addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(v);
- addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(v);
-
- /* If this is the right table of a LEFT OUTER JOIN, allocate and
- ** initialize a memory cell that records if this table matches any
- ** row of the left table of the join.
- */
- if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){
- pLevel->iLeftJoin = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin);
- VdbeComment((v, "init LEFT JOIN no-match flag"));
- }
-
- /* Special case of a FROM clause subquery implemented as a co-routine */
- if( pTabItem->fg.viaCoroutine ){
- int regYield = pTabItem->regReturn;
- sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
- pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk);
- VdbeCoverage(v);
- VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName));
- pLevel->op = OP_Goto;
- }else
-
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){
- /* Case 1: The table is a virtual-table. Use the VFilter and VNext
- ** to access the data.
- */
- int iReg; /* P3 Value for OP_VFilter */
- int addrNotFound;
- int nConstraint = pLoop->nLTerm;
-
- sqlite3ExprCachePush(pParse);
- iReg = sqlite3GetTempRange(pParse, nConstraint+2);
- addrNotFound = pLevel->addrBrk;
- for(j=0; j<nConstraint; j++){
- int iTarget = iReg+j+2;
- pTerm = pLoop->aLTerm[j];
- if( pTerm==0 ) continue;
- if( pTerm->eOperator & WO_IN ){
- codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
- addrNotFound = pLevel->addrNxt;
- }else{
- sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget);
- }
- }
- sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg);
- sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1);
- sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg,
- pLoop->u.vtab.idxStr,
- pLoop->u.vtab.needFree ? P4_MPRINTF : P4_STATIC);
- VdbeCoverage(v);
- pLoop->u.vtab.needFree = 0;
- for(j=0; j<nConstraint && j<16; j++){
- if( (pLoop->u.vtab.omitMask>>j)&1 ){
- disableTerm(pLevel, pLoop->aLTerm[j]);
- }
- }
- pLevel->p1 = iCur;
- pLevel->op = pWInfo->eOnePass ? OP_Noop : OP_VNext;
- pLevel->p2 = sqlite3VdbeCurrentAddr(v);
- sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2);
- sqlite3ExprCachePop(pParse);
- }else
-#endif /* SQLITE_OMIT_VIRTUALTABLE */
-
- if( (pLoop->wsFlags & WHERE_IPK)!=0
- && (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0
- ){
- /* Case 2: We can directly reference a single row using an
- ** equality comparison against the ROWID field. Or
- ** we reference multiple rows using a "rowid IN (...)"
- ** construct.
- */
- assert( pLoop->u.btree.nEq==1 );
- pTerm = pLoop->aLTerm[0];
- assert( pTerm!=0 );
- assert( pTerm->pExpr!=0 );
- assert( omitTable==0 );
- testcase( pTerm->wtFlags & TERM_VIRTUAL );
- iReleaseReg = ++pParse->nMem;
- iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg);
- if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg);
- addrNxt = pLevel->addrNxt;
- sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addrNxt, iRowidReg);
- VdbeCoverage(v);
- sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1);
- sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
- VdbeComment((v, "pk"));
- pLevel->op = OP_Noop;
- }else if( (pLoop->wsFlags & WHERE_IPK)!=0
- && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0
- ){
- /* Case 3: We have an inequality comparison against the ROWID field.
- */
- int testOp = OP_Noop;
- int start;
- int memEndValue = 0;
- WhereTerm *pStart, *pEnd;
-
- assert( omitTable==0 );
- j = 0;
- pStart = pEnd = 0;
- if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++];
- if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++];
- assert( pStart!=0 || pEnd!=0 );
- if( bRev ){
- pTerm = pStart;
- pStart = pEnd;
- pEnd = pTerm;
- }
- if( pStart ){
- Expr *pX; /* The expression that defines the start bound */
- int r1, rTemp; /* Registers for holding the start boundary */
-
- /* The following constant maps TK_xx codes into corresponding
- ** seek opcodes. It depends on a particular ordering of TK_xx
- */
- const u8 aMoveOp[] = {
- /* TK_GT */ OP_SeekGT,
- /* TK_LE */ OP_SeekLE,
- /* TK_LT */ OP_SeekLT,
- /* TK_GE */ OP_SeekGE
- };
- assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */
- assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */
- assert( TK_GE==TK_GT+3 ); /* ... is correcct. */
-
- assert( (pStart->wtFlags & TERM_VNULL)==0 );
- testcase( pStart->wtFlags & TERM_VIRTUAL );
- pX = pStart->pExpr;
- assert( pX!=0 );
- testcase( pStart->leftCursor!=iCur ); /* transitive constraints */
- r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp);
- sqlite3VdbeAddOp3(v, aMoveOp[pX->op-TK_GT], iCur, addrBrk, r1);
- VdbeComment((v, "pk"));
- VdbeCoverageIf(v, pX->op==TK_GT);
- VdbeCoverageIf(v, pX->op==TK_LE);
- VdbeCoverageIf(v, pX->op==TK_LT);
- VdbeCoverageIf(v, pX->op==TK_GE);
- sqlite3ExprCacheAffinityChange(pParse, r1, 1);
- sqlite3ReleaseTempReg(pParse, rTemp);
- disableTerm(pLevel, pStart);
- }else{
- sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk);
- VdbeCoverageIf(v, bRev==0);
- VdbeCoverageIf(v, bRev!=0);
- }
- if( pEnd ){
- Expr *pX;
- pX = pEnd->pExpr;
- assert( pX!=0 );
- assert( (pEnd->wtFlags & TERM_VNULL)==0 );
- testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */
- testcase( pEnd->wtFlags & TERM_VIRTUAL );
- memEndValue = ++pParse->nMem;
- sqlite3ExprCode(pParse, pX->pRight, memEndValue);
- if( pX->op==TK_LT || pX->op==TK_GT ){
- testOp = bRev ? OP_Le : OP_Ge;
- }else{
- testOp = bRev ? OP_Lt : OP_Gt;
- }
- disableTerm(pLevel, pEnd);
- }
- start = sqlite3VdbeCurrentAddr(v);
- pLevel->op = bRev ? OP_Prev : OP_Next;
- pLevel->p1 = iCur;
- pLevel->p2 = start;
- assert( pLevel->p5==0 );
- if( testOp!=OP_Noop ){
- iRowidReg = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg);
- sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
- sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg);
- VdbeCoverageIf(v, testOp==OP_Le);
- VdbeCoverageIf(v, testOp==OP_Lt);
- VdbeCoverageIf(v, testOp==OP_Ge);
- VdbeCoverageIf(v, testOp==OP_Gt);
- sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL);
- }
- }else if( pLoop->wsFlags & WHERE_INDEXED ){
- /* Case 4: A scan using an index.
- **
- ** The WHERE clause may contain zero or more equality
- ** terms ("==" or "IN" operators) that refer to the N
- ** left-most columns of the index. It may also contain
- ** inequality constraints (>, <, >= or <=) on the indexed
- ** column that immediately follows the N equalities. Only
- ** the right-most column can be an inequality - the rest must
- ** use the "==" and "IN" operators. For example, if the
- ** index is on (x,y,z), then the following clauses are all
- ** optimized:
- **
- ** x=5
- ** x=5 AND y=10
- ** x=5 AND y<10
- ** x=5 AND y>5 AND y<10
- ** x=5 AND y=5 AND z<=10
- **
- ** The z<10 term of the following cannot be used, only
- ** the x=5 term:
- **
- ** x=5 AND z<10
- **
- ** N may be zero if there are inequality constraints.
- ** If there are no inequality constraints, then N is at
- ** least one.
- **
- ** This case is also used when there are no WHERE clause
- ** constraints but an index is selected anyway, in order
- ** to force the output order to conform to an ORDER BY.
- */
- static const u8 aStartOp[] = {
- 0,
- 0,
- OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */
- OP_Last, /* 3: (!start_constraints && startEq && bRev) */
- OP_SeekGT, /* 4: (start_constraints && !startEq && !bRev) */
- OP_SeekLT, /* 5: (start_constraints && !startEq && bRev) */
- OP_SeekGE, /* 6: (start_constraints && startEq && !bRev) */
- OP_SeekLE /* 7: (start_constraints && startEq && bRev) */
- };
- static const u8 aEndOp[] = {
- OP_IdxGE, /* 0: (end_constraints && !bRev && !endEq) */
- OP_IdxGT, /* 1: (end_constraints && !bRev && endEq) */
- OP_IdxLE, /* 2: (end_constraints && bRev && !endEq) */
- OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */
- };
- u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */
- int regBase; /* Base register holding constraint values */
- WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */
- WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */
- int startEq; /* True if range start uses ==, >= or <= */
- int endEq; /* True if range end uses ==, >= or <= */
- int start_constraints; /* Start of range is constrained */
- int nConstraint; /* Number of constraint terms */
- Index *pIdx; /* The index we will be using */
- int iIdxCur; /* The VDBE cursor for the index */
- int nExtraReg = 0; /* Number of extra registers needed */
- int op; /* Instruction opcode */
- char *zStartAff; /* Affinity for start of range constraint */
- char cEndAff = 0; /* Affinity for end of range constraint */
- u8 bSeekPastNull = 0; /* True to seek past initial nulls */
- u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
-
- pIdx = pLoop->u.btree.pIndex;
- iIdxCur = pLevel->iIdxCur;
- assert( nEq>=pLoop->nSkip );
-
- /* If this loop satisfies a sort order (pOrderBy) request that
- ** was passed to this function to implement a "SELECT min(x) ..."
- ** query, then the caller will only allow the loop to run for
- ** a single iteration. This means that the first row returned
- ** should not have a NULL value stored in 'x'. If column 'x' is
- ** the first one after the nEq equality constraints in the index,
- ** this requires some special handling.
- */
- assert( pWInfo->pOrderBy==0
- || pWInfo->pOrderBy->nExpr==1
- || (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 );
- if( (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)!=0
- && pWInfo->nOBSat>0
- && (pIdx->nKeyCol>nEq)
- ){
- assert( pLoop->nSkip==0 );
- bSeekPastNull = 1;
- nExtraReg = 1;
- }
-
- /* Find any inequality constraint terms for the start and end
- ** of the range.
- */
- j = nEq;
- if( pLoop->wsFlags & WHERE_BTM_LIMIT ){
- pRangeStart = pLoop->aLTerm[j++];
- nExtraReg = 1;
- /* Like optimization range constraints always occur in pairs */
- assert( (pRangeStart->wtFlags & TERM_LIKEOPT)==0 ||
- (pLoop->wsFlags & WHERE_TOP_LIMIT)!=0 );
- }
- if( pLoop->wsFlags & WHERE_TOP_LIMIT ){
- pRangeEnd = pLoop->aLTerm[j++];
- nExtraReg = 1;
- if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){
- assert( pRangeStart!=0 ); /* LIKE opt constraints */
- assert( pRangeStart->wtFlags & TERM_LIKEOPT ); /* occur in pairs */
- pLevel->iLikeRepCntr = ++pParse->nMem;
- testcase( bRev );
- testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC );
- sqlite3VdbeAddOp2(v, OP_Integer,
- bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC),
- pLevel->iLikeRepCntr);
- VdbeComment((v, "LIKE loop counter"));
- pLevel->addrLikeRep = sqlite3VdbeCurrentAddr(v);
- }
- if( pRangeStart==0
- && (j = pIdx->aiColumn[nEq])>=0
- && pIdx->pTable->aCol[j].notNull==0
- ){
- bSeekPastNull = 1;
- }
- }
- assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 );
-
- /* Generate code to evaluate all constraint terms using == or IN
- ** and store the values of those terms in an array of registers
- ** starting at regBase.
- */
- regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff);
- assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq );
- if( zStartAff ) cEndAff = zStartAff[nEq];
- addrNxt = pLevel->addrNxt;
-
- /* If we are doing a reverse order scan on an ascending index, or
- ** a forward order scan on a descending index, interchange the
- ** start and end terms (pRangeStart and pRangeEnd).
- */
- if( (nEq<pIdx->nKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC))
- || (bRev && pIdx->nKeyCol==nEq)
- ){
- SWAP(WhereTerm *, pRangeEnd, pRangeStart);
- SWAP(u8, bSeekPastNull, bStopAtNull);
- }
-
- testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 );
- testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 );
- testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 );
- testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 );
- startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE);
- endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE);
- start_constraints = pRangeStart || nEq>0;
-
- /* Seek the index cursor to the start of the range. */
- nConstraint = nEq;
- if( pRangeStart ){
- Expr *pRight = pRangeStart->pExpr->pRight;
- sqlite3ExprCode(pParse, pRight, regBase+nEq);
- whereLikeOptimizationStringFixup(v, pLevel, pRangeStart);
- if( (pRangeStart->wtFlags & TERM_VNULL)==0
- && sqlite3ExprCanBeNull(pRight)
- ){
- sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
- VdbeCoverage(v);
- }
- if( zStartAff ){
- if( sqlite3CompareAffinity(pRight, zStartAff[nEq])==SQLITE_AFF_BLOB){
- /* Since the comparison is to be performed with no conversions
- ** applied to the operands, set the affinity to apply to pRight to
- ** SQLITE_AFF_BLOB. */
- zStartAff[nEq] = SQLITE_AFF_BLOB;
- }
- if( sqlite3ExprNeedsNoAffinityChange(pRight, zStartAff[nEq]) ){
- zStartAff[nEq] = SQLITE_AFF_BLOB;
- }
- }
- nConstraint++;
- testcase( pRangeStart->wtFlags & TERM_VIRTUAL );
- }else if( bSeekPastNull ){
- sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
- nConstraint++;
- startEq = 0;
- start_constraints = 1;
- }
- codeApplyAffinity(pParse, regBase, nConstraint - bSeekPastNull, zStartAff);
- op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
- assert( op!=0 );
- sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
- VdbeCoverage(v);
- VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
- VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last );
- VdbeCoverageIf(v, op==OP_SeekGT); testcase( op==OP_SeekGT );
- VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE );
- VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE );
- VdbeCoverageIf(v, op==OP_SeekLT); testcase( op==OP_SeekLT );
-
- /* Load the value for the inequality constraint at the end of the
- ** range (if any).
- */
- nConstraint = nEq;
- if( pRangeEnd ){
- Expr *pRight = pRangeEnd->pExpr->pRight;
- sqlite3ExprCacheRemove(pParse, regBase+nEq, 1);
- sqlite3ExprCode(pParse, pRight, regBase+nEq);
- whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd);
- if( (pRangeEnd->wtFlags & TERM_VNULL)==0
- && sqlite3ExprCanBeNull(pRight)
- ){
- sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
- VdbeCoverage(v);
- }
- if( sqlite3CompareAffinity(pRight, cEndAff)!=SQLITE_AFF_BLOB
- && !sqlite3ExprNeedsNoAffinityChange(pRight, cEndAff)
- ){
- codeApplyAffinity(pParse, regBase+nEq, 1, &cEndAff);
- }
- nConstraint++;
- testcase( pRangeEnd->wtFlags & TERM_VIRTUAL );
- }else if( bStopAtNull ){
- sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
- endEq = 0;
- nConstraint++;
- }
- sqlite3DbFree(db, zStartAff);
-
- /* Top of the loop body */
- pLevel->p2 = sqlite3VdbeCurrentAddr(v);
-
- /* Check if the index cursor is past the end of the range. */
- if( nConstraint ){
- op = aEndOp[bRev*2 + endEq];
- sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
- testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
- testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
- testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
- testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
- }
-
- /* Seek the table cursor, if required */
- disableTerm(pLevel, pRangeStart);
- disableTerm(pLevel, pRangeEnd);
- if( omitTable ){
- /* pIdx is a covering index. No need to access the main table. */
- }else if( HasRowid(pIdx->pTable) ){
- iRowidReg = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg);
- sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
- if( pWInfo->eOnePass!=ONEPASS_OFF ){
- sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowidReg);
- VdbeCoverage(v);
- }else{
- sqlite3VdbeAddOp2(v, OP_Seek, iCur, iRowidReg); /* Deferred seek */
- }
- }else if( iCur!=iIdxCur ){
- Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable);
- iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol);
- for(j=0; j<pPk->nKeyCol; j++){
- k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]);
- sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j);
- }
- sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont,
- iRowidReg, pPk->nKeyCol); VdbeCoverage(v);
- }
-
- /* Record the instruction used to terminate the loop. Disable
- ** WHERE clause terms made redundant by the index range scan.
- */
- if( pLoop->wsFlags & WHERE_ONEROW ){
- pLevel->op = OP_Noop;
- }else if( bRev ){
- pLevel->op = OP_Prev;
- }else{
- pLevel->op = OP_Next;
- }
- pLevel->p1 = iIdxCur;
- pLevel->p3 = (pLoop->wsFlags&WHERE_UNQ_WANTED)!=0 ? 1:0;
- if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){
- pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
- }else{
- assert( pLevel->p5==0 );
- }
- }else
-
-#ifndef SQLITE_OMIT_OR_OPTIMIZATION
- if( pLoop->wsFlags & WHERE_MULTI_OR ){
- /* Case 5: Two or more separately indexed terms connected by OR
- **
- ** Example:
- **
- ** CREATE TABLE t1(a,b,c,d);
- ** CREATE INDEX i1 ON t1(a);
- ** CREATE INDEX i2 ON t1(b);
- ** CREATE INDEX i3 ON t1(c);
- **
- ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13)
- **
- ** In the example, there are three indexed terms connected by OR.
- ** The top of the loop looks like this:
- **
- ** Null 1 # Zero the rowset in reg 1
- **
- ** Then, for each indexed term, the following. The arguments to
- ** RowSetTest are such that the rowid of the current row is inserted
- ** into the RowSet. If it is already present, control skips the
- ** Gosub opcode and jumps straight to the code generated by WhereEnd().
- **
- ** sqlite3WhereBegin(<term>)
- ** RowSetTest # Insert rowid into rowset
- ** Gosub 2 A
- ** sqlite3WhereEnd()
- **
- ** Following the above, code to terminate the loop. Label A, the target
- ** of the Gosub above, jumps to the instruction right after the Goto.
- **
- ** Null 1 # Zero the rowset in reg 1
- ** Goto B # The loop is finished.
- **
- ** A: <loop body> # Return data, whatever.
- **
- ** Return 2 # Jump back to the Gosub
- **
- ** B: <after the loop>
- **
- ** Added 2014-05-26: If the table is a WITHOUT ROWID table, then
- ** use an ephemeral index instead of a RowSet to record the primary
- ** keys of the rows we have already seen.
- **
- */
- WhereClause *pOrWc; /* The OR-clause broken out into subterms */
- SrcList *pOrTab; /* Shortened table list or OR-clause generation */
- Index *pCov = 0; /* Potential covering index (or NULL) */
- int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */
-
- int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */
- int regRowset = 0; /* Register for RowSet object */
- int regRowid = 0; /* Register holding rowid */
- int iLoopBody = sqlite3VdbeMakeLabel(v); /* Start of loop body */
- int iRetInit; /* Address of regReturn init */
- int untestedTerms = 0; /* Some terms not completely tested */
- int ii; /* Loop counter */
- u16 wctrlFlags; /* Flags for sub-WHERE clause */
- Expr *pAndExpr = 0; /* An ".. AND (...)" expression */
- Table *pTab = pTabItem->pTab;
-
- pTerm = pLoop->aLTerm[0];
- assert( pTerm!=0 );
- assert( pTerm->eOperator & WO_OR );
- assert( (pTerm->wtFlags & TERM_ORINFO)!=0 );
- pOrWc = &pTerm->u.pOrInfo->wc;
- pLevel->op = OP_Return;
- pLevel->p1 = regReturn;
-
- /* Set up a new SrcList in pOrTab containing the table being scanned
- ** by this loop in the a[0] slot and all notReady tables in a[1..] slots.
- ** This becomes the SrcList in the recursive call to sqlite3WhereBegin().
- */
- if( pWInfo->nLevel>1 ){
- int nNotReady; /* The number of notReady tables */
- struct SrcList_item *origSrc; /* Original list of tables */
- nNotReady = pWInfo->nLevel - iLevel - 1;
- pOrTab = sqlite3StackAllocRaw(db,
- sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0]));
- if( pOrTab==0 ) return notReady;
- pOrTab->nAlloc = (u8)(nNotReady + 1);
- pOrTab->nSrc = pOrTab->nAlloc;
- memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem));
- origSrc = pWInfo->pTabList->a;
- for(k=1; k<=nNotReady; k++){
- memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k]));
- }
- }else{
- pOrTab = pWInfo->pTabList;
- }
-
- /* Initialize the rowset register to contain NULL. An SQL NULL is
- ** equivalent to an empty rowset. Or, create an ephemeral index
- ** capable of holding primary keys in the case of a WITHOUT ROWID.
- **
- ** Also initialize regReturn to contain the address of the instruction
- ** immediately following the OP_Return at the bottom of the loop. This
- ** is required in a few obscure LEFT JOIN cases where control jumps
- ** over the top of the loop into the body of it. In this case the
- ** correct response for the end-of-loop code (the OP_Return) is to
- ** fall through to the next instruction, just as an OP_Next does if
- ** called on an uninitialized cursor.
- */
- if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
- if( HasRowid(pTab) ){
- regRowset = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset);
- }else{
- Index *pPk = sqlite3PrimaryKeyIndex(pTab);
- regRowset = pParse->nTab++;
- sqlite3VdbeAddOp2(v, OP_OpenEphemeral, regRowset, pPk->nKeyCol);
- sqlite3VdbeSetP4KeyInfo(pParse, pPk);
- }
- regRowid = ++pParse->nMem;
- }
- iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn);
-
- /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y
- ** Then for every term xN, evaluate as the subexpression: xN AND z
- ** That way, terms in y that are factored into the disjunction will
- ** be picked up by the recursive calls to sqlite3WhereBegin() below.
- **
- ** Actually, each subexpression is converted to "xN AND w" where w is
- ** the "interesting" terms of z - terms that did not originate in the
- ** ON or USING clause of a LEFT JOIN, and terms that are usable as
- ** indices.
- **
- ** This optimization also only applies if the (x1 OR x2 OR ...) term
- ** is not contained in the ON clause of a LEFT JOIN.
- ** See ticket http://www.sqlite.org/src/info/f2369304e4
- */
- if( pWC->nTerm>1 ){
- int iTerm;
- for(iTerm=0; iTerm<pWC->nTerm; iTerm++){
- Expr *pExpr = pWC->a[iTerm].pExpr;
- if( &pWC->a[iTerm] == pTerm ) continue;
- if( ExprHasProperty(pExpr, EP_FromJoin) ) continue;
- if( (pWC->a[iTerm].wtFlags & TERM_VIRTUAL)!=0 ) continue;
- if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue;
- testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO );
- pExpr = sqlite3ExprDup(db, pExpr, 0);
- pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr);
- }
- if( pAndExpr ){
- pAndExpr = sqlite3PExpr(pParse, TK_AND, 0, pAndExpr, 0);
- }
- }
-
- /* Run a separate WHERE clause for each term of the OR clause. After
- ** eliminating duplicates from other WHERE clauses, the action for each
- ** sub-WHERE clause is to to invoke the main loop body as a subroutine.
- */
- wctrlFlags = WHERE_OMIT_OPEN_CLOSE
- | WHERE_FORCE_TABLE
- | WHERE_ONETABLE_ONLY
- | WHERE_NO_AUTOINDEX;
- for(ii=0; ii<pOrWc->nTerm; ii++){
- WhereTerm *pOrTerm = &pOrWc->a[ii];
- if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){
- WhereInfo *pSubWInfo; /* Info for single OR-term scan */
- Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */
- int jmp1 = 0; /* Address of jump operation */
- if( pAndExpr && !ExprHasProperty(pOrExpr, EP_FromJoin) ){
- pAndExpr->pLeft = pOrExpr;
- pOrExpr = pAndExpr;
- }
- /* Loop through table entries that match term pOrTerm. */
- WHERETRACE(0xffff, ("Subplan for OR-clause:\n"));
- pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0,
- wctrlFlags, iCovCur);
- assert( pSubWInfo || pParse->nErr || db->mallocFailed );
- if( pSubWInfo ){
- WhereLoop *pSubLoop;
- int addrExplain = sqlite3WhereExplainOneScan(
- pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0
- );
- sqlite3WhereAddScanStatus(v, pOrTab, &pSubWInfo->a[0], addrExplain);
-
- /* This is the sub-WHERE clause body. First skip over
- ** duplicate rows from prior sub-WHERE clauses, and record the
- ** rowid (or PRIMARY KEY) for the current row so that the same
- ** row will be skipped in subsequent sub-WHERE clauses.
- */
- if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
- int r;
- int iSet = ((ii==pOrWc->nTerm-1)?-1:ii);
- if( HasRowid(pTab) ){
- r = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, regRowid, 0);
- jmp1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0,
- r,iSet);
- VdbeCoverage(v);
- }else{
- Index *pPk = sqlite3PrimaryKeyIndex(pTab);
- int nPk = pPk->nKeyCol;
- int iPk;
-
- /* Read the PK into an array of temp registers. */
- r = sqlite3GetTempRange(pParse, nPk);
- for(iPk=0; iPk<nPk; iPk++){
- int iCol = pPk->aiColumn[iPk];
- int rx;
- rx = sqlite3ExprCodeGetColumn(pParse, pTab, iCol, iCur,r+iPk,0);
- if( rx!=r+iPk ){
- sqlite3VdbeAddOp2(v, OP_SCopy, rx, r+iPk);
- }
- }
-
- /* Check if the temp table already contains this key. If so,
- ** the row has already been included in the result set and
- ** can be ignored (by jumping past the Gosub below). Otherwise,
- ** insert the key into the temp table and proceed with processing
- ** the row.
- **
- ** Use some of the same optimizations as OP_RowSetTest: If iSet
- ** is zero, assume that the key cannot already be present in
- ** the temp table. And if iSet is -1, assume that there is no
- ** need to insert the key into the temp table, as it will never
- ** be tested for. */
- if( iSet ){
- jmp1 = sqlite3VdbeAddOp4Int(v, OP_Found, regRowset, 0, r, nPk);
- VdbeCoverage(v);
- }
- if( iSet>=0 ){
- sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid);
- sqlite3VdbeAddOp3(v, OP_IdxInsert, regRowset, regRowid, 0);
- if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
- }
-
- /* Release the array of temp registers */
- sqlite3ReleaseTempRange(pParse, r, nPk);
- }
- }
-
- /* Invoke the main loop body as a subroutine */
- sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody);
-
- /* Jump here (skipping the main loop body subroutine) if the
- ** current sub-WHERE row is a duplicate from prior sub-WHEREs. */
- if( jmp1 ) sqlite3VdbeJumpHere(v, jmp1);
-
- /* The pSubWInfo->untestedTerms flag means that this OR term
- ** contained one or more AND term from a notReady table. The
- ** terms from the notReady table could not be tested and will
- ** need to be tested later.
- */
- if( pSubWInfo->untestedTerms ) untestedTerms = 1;
-
- /* If all of the OR-connected terms are optimized using the same
- ** index, and the index is opened using the same cursor number
- ** by each call to sqlite3WhereBegin() made by this loop, it may
- ** be possible to use that index as a covering index.
- **
- ** If the call to sqlite3WhereBegin() above resulted in a scan that
- ** uses an index, and this is either the first OR-connected term
- ** processed or the index is the same as that used by all previous
- ** terms, set pCov to the candidate covering index. Otherwise, set
- ** pCov to NULL to indicate that no candidate covering index will
- ** be available.
- */
- pSubLoop = pSubWInfo->a[0].pWLoop;
- assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 );
- if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0
- && (ii==0 || pSubLoop->u.btree.pIndex==pCov)
- && (HasRowid(pTab) || !IsPrimaryKeyIndex(pSubLoop->u.btree.pIndex))
- ){
- assert( pSubWInfo->a[0].iIdxCur==iCovCur );
- pCov = pSubLoop->u.btree.pIndex;
- wctrlFlags |= WHERE_REOPEN_IDX;
- }else{
- pCov = 0;
- }
-
- /* Finish the loop through table entries that match term pOrTerm. */
- sqlite3WhereEnd(pSubWInfo);
- }
- }
- }
- pLevel->u.pCovidx = pCov;
- if( pCov ) pLevel->iIdxCur = iCovCur;
- if( pAndExpr ){
- pAndExpr->pLeft = 0;
- sqlite3ExprDelete(db, pAndExpr);
- }
- sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v));
- sqlite3VdbeGoto(v, pLevel->addrBrk);
- sqlite3VdbeResolveLabel(v, iLoopBody);
-
- if( pWInfo->nLevel>1 ) sqlite3StackFree(db, pOrTab);
- if( !untestedTerms ) disableTerm(pLevel, pTerm);
- }else
-#endif /* SQLITE_OMIT_OR_OPTIMIZATION */
-
- {
- /* Case 6: There is no usable index. We must do a complete
- ** scan of the entire table.
- */
- static const u8 aStep[] = { OP_Next, OP_Prev };
- static const u8 aStart[] = { OP_Rewind, OP_Last };
- assert( bRev==0 || bRev==1 );
- if( pTabItem->fg.isRecursive ){
- /* Tables marked isRecursive have only a single row that is stored in
- ** a pseudo-cursor. No need to Rewind or Next such cursors. */
- pLevel->op = OP_Noop;
- }else{
- pLevel->op = aStep[bRev];
- pLevel->p1 = iCur;
- pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk);
- VdbeCoverageIf(v, bRev==0);
- VdbeCoverageIf(v, bRev!=0);
- pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
- }
- }
-
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- pLevel->addrVisit = sqlite3VdbeCurrentAddr(v);
-#endif
-
- /* Insert code to test every subexpression that can be completely
- ** computed using the current set of tables.
- */
- for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
- Expr *pE;
- int skipLikeAddr = 0;
- testcase( pTerm->wtFlags & TERM_VIRTUAL );
- testcase( pTerm->wtFlags & TERM_CODED );
- if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
- if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
- testcase( pWInfo->untestedTerms==0
- && (pWInfo->wctrlFlags & WHERE_ONETABLE_ONLY)!=0 );
- pWInfo->untestedTerms = 1;
- continue;
- }
- pE = pTerm->pExpr;
- assert( pE!=0 );
- if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){
- continue;
- }
- if( pTerm->wtFlags & TERM_LIKECOND ){
- assert( pLevel->iLikeRepCntr>0 );
- skipLikeAddr = sqlite3VdbeAddOp1(v, OP_IfNot, pLevel->iLikeRepCntr);
- VdbeCoverage(v);
- }
- sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL);
- if( skipLikeAddr ) sqlite3VdbeJumpHere(v, skipLikeAddr);
- pTerm->wtFlags |= TERM_CODED;
- }
-
- /* Insert code to test for implied constraints based on transitivity
- ** of the "==" operator.
- **
- ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123"
- ** and we are coding the t1 loop and the t2 loop has not yet coded,
- ** then we cannot use the "t1.a=t2.b" constraint, but we can code
- ** the implied "t1.a=123" constraint.
- */
- for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
- Expr *pE, *pEAlt;
- WhereTerm *pAlt;
- if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
- if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue;
- if( (pTerm->eOperator & WO_EQUIV)==0 ) continue;
- if( pTerm->leftCursor!=iCur ) continue;
- if( pLevel->iLeftJoin ) continue;
- pE = pTerm->pExpr;
- assert( !ExprHasProperty(pE, EP_FromJoin) );
- assert( (pTerm->prereqRight & pLevel->notReady)!=0 );
- pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.leftColumn, notReady,
- WO_EQ|WO_IN|WO_IS, 0);
- if( pAlt==0 ) continue;
- if( pAlt->wtFlags & (TERM_CODED) ) continue;
- testcase( pAlt->eOperator & WO_EQ );
- testcase( pAlt->eOperator & WO_IS );
- testcase( pAlt->eOperator & WO_IN );
- VdbeModuleComment((v, "begin transitive constraint"));
- pEAlt = sqlite3StackAllocRaw(db, sizeof(*pEAlt));
- if( pEAlt ){
- *pEAlt = *pAlt->pExpr;
- pEAlt->pLeft = pE->pLeft;
- sqlite3ExprIfFalse(pParse, pEAlt, addrCont, SQLITE_JUMPIFNULL);
- sqlite3StackFree(db, pEAlt);
- }
- }
-
- /* For a LEFT OUTER JOIN, generate code that will record the fact that
- ** at least one row of the right table has matched the left table.
- */
- if( pLevel->iLeftJoin ){
- pLevel->addrFirst = sqlite3VdbeCurrentAddr(v);
- sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin);
- VdbeComment((v, "record LEFT JOIN hit"));
- sqlite3ExprCacheClear(pParse);
- for(pTerm=pWC->a, j=0; j<pWC->nTerm; j++, pTerm++){
- testcase( pTerm->wtFlags & TERM_VIRTUAL );
- testcase( pTerm->wtFlags & TERM_CODED );
- if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
- if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
- assert( pWInfo->untestedTerms );
- continue;
- }
- assert( pTerm->pExpr );
- sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL);
- pTerm->wtFlags |= TERM_CODED;
- }
- }
-
- return pLevel->notReady;
+ pWC->pWInfo = pWInfo;
+ pWC->pOuter = 0;
+ pWC->nTerm = 0;
+ pWC->nSlot = ArraySize(pWC->aStatic);
+ pWC->a = pWC->aStatic;
}
-/************** End of wherecode.c *******************************************/
-/************** Begin file whereexpr.c ***************************************/
-/*
-** 2015-06-08
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This module contains C code that generates VDBE code used to process
-** the WHERE clause of SQL statements.
-**
-** This file was originally part of where.c but was split out to improve
-** readability and editabiliity. This file contains utility routines for
-** analyzing Expr objects in the WHERE clause.
-*/
-/* #include "sqliteInt.h" */
-/* #include "whereInt.h" */
-
-/* Forward declarations */
-static void exprAnalyze(SrcList*, WhereClause*, int);
+/* Forward reference */
+static void whereClauseClear(WhereClause*);
/*
** Deallocate all memory associated with a WhereOrInfo object.
*/
static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){
- sqlite3WhereClauseClear(&p->wc);
+ whereClauseClear(&p->wc);
sqlite3DbFree(db, p);
}
@@ -120069,11 +108570,34 @@ static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){
** Deallocate all memory associated with a WhereAndInfo object.
*/
static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){
- sqlite3WhereClauseClear(&p->wc);
+ whereClauseClear(&p->wc);
sqlite3DbFree(db, p);
}
/*
+** Deallocate a WhereClause structure. The WhereClause structure
+** itself is not freed. This routine is the inverse of whereClauseInit().
+*/
+static void whereClauseClear(WhereClause *pWC){
+ int i;
+ WhereTerm *a;
+ sqlite3 *db = pWC->pWInfo->pParse->db;
+ for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){
+ if( a->wtFlags & TERM_DYNAMIC ){
+ sqlite3ExprDelete(db, a->pExpr);
+ }
+ if( a->wtFlags & TERM_ORINFO ){
+ whereOrInfoDelete(db, a->u.pOrInfo);
+ }else if( a->wtFlags & TERM_ANDINFO ){
+ whereAndInfoDelete(db, a->u.pAndInfo);
+ }
+ }
+ if( pWC->a!=pWC->aStatic ){
+ sqlite3DbFree(db, pWC->a);
+ }
+}
+
+/*
** Add a single new WhereTerm entry to the WhereClause object pWC.
** The new WhereTerm object is constructed from Expr p and with wtFlags.
** The index in pWC->a[] of the new WhereTerm is returned on success.
@@ -120092,7 +108616,7 @@ static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){
** calling this routine. Such pointers may be reinitialized by referencing
** the pWC->a[] array.
*/
-static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
+static int whereClauseInsert(WhereClause *pWC, Expr *p, u8 wtFlags){
WhereTerm *pTerm;
int idx;
testcase( wtFlags & TERM_VIRTUAL );
@@ -120112,13 +108636,12 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
sqlite3DbFree(db, pOld);
}
pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]);
- memset(&pWC->a[pWC->nTerm], 0, sizeof(pWC->a[0])*(pWC->nSlot-pWC->nTerm));
}
pTerm = &pWC->a[idx = pWC->nTerm++];
if( p && ExprHasProperty(p, EP_Unlikely) ){
- pTerm->truthProb = sqlite3LogEst(p->iTable) - 270;
+ pTerm->truthProb = sqlite3LogEst(p->iTable) - 99;
}else{
- pTerm->truthProb = 1;
+ pTerm->truthProb = -1;
}
pTerm->pExpr = sqlite3ExprSkipCollate(p);
pTerm->wtFlags = wtFlags;
@@ -120128,6 +108651,121 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
}
/*
+** This routine identifies subexpressions in the WHERE clause where
+** each subexpression is separated by the AND operator or some other
+** operator specified in the op parameter. The WhereClause structure
+** is filled with pointers to subexpressions. For example:
+**
+** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22)
+** \________/ \_______________/ \________________/
+** slot[0] slot[1] slot[2]
+**
+** The original WHERE clause in pExpr is unaltered. All this routine
+** does is make slot[] entries point to substructure within pExpr.
+**
+** In the previous sentence and in the diagram, "slot[]" refers to
+** the WhereClause.a[] array. The slot[] array grows as needed to contain
+** all terms of the WHERE clause.
+*/
+static void whereSplit(WhereClause *pWC, Expr *pExpr, u8 op){
+ pWC->op = op;
+ if( pExpr==0 ) return;
+ if( pExpr->op!=op ){
+ whereClauseInsert(pWC, pExpr, 0);
+ }else{
+ whereSplit(pWC, pExpr->pLeft, op);
+ whereSplit(pWC, pExpr->pRight, op);
+ }
+}
+
+/*
+** Initialize a WhereMaskSet object
+*/
+#define initMaskSet(P) (P)->n=0
+
+/*
+** Return the bitmask for the given cursor number. Return 0 if
+** iCursor is not in the set.
+*/
+static Bitmask getMask(WhereMaskSet *pMaskSet, int iCursor){
+ int i;
+ assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 );
+ for(i=0; i<pMaskSet->n; i++){
+ if( pMaskSet->ix[i]==iCursor ){
+ return MASKBIT(i);
+ }
+ }
+ return 0;
+}
+
+/*
+** Create a new mask for cursor iCursor.
+**
+** There is one cursor per table in the FROM clause. The number of
+** tables in the FROM clause is limited by a test early in the
+** sqlite3WhereBegin() routine. So we know that the pMaskSet->ix[]
+** array will never overflow.
+*/
+static void createMask(WhereMaskSet *pMaskSet, int iCursor){
+ assert( pMaskSet->n < ArraySize(pMaskSet->ix) );
+ pMaskSet->ix[pMaskSet->n++] = iCursor;
+}
+
+/*
+** These routines walk (recursively) an expression tree and generate
+** a bitmask indicating which tables are used in that expression
+** tree.
+*/
+static Bitmask exprListTableUsage(WhereMaskSet*, ExprList*);
+static Bitmask exprSelectTableUsage(WhereMaskSet*, Select*);
+static Bitmask exprTableUsage(WhereMaskSet *pMaskSet, Expr *p){
+ Bitmask mask = 0;
+ if( p==0 ) return 0;
+ if( p->op==TK_COLUMN ){
+ mask = getMask(pMaskSet, p->iTable);
+ return mask;
+ }
+ mask = exprTableUsage(pMaskSet, p->pRight);
+ mask |= exprTableUsage(pMaskSet, p->pLeft);
+ if( ExprHasProperty(p, EP_xIsSelect) ){
+ mask |= exprSelectTableUsage(pMaskSet, p->x.pSelect);
+ }else{
+ mask |= exprListTableUsage(pMaskSet, p->x.pList);
+ }
+ return mask;
+}
+static Bitmask exprListTableUsage(WhereMaskSet *pMaskSet, ExprList *pList){
+ int i;
+ Bitmask mask = 0;
+ if( pList ){
+ for(i=0; i<pList->nExpr; i++){
+ mask |= exprTableUsage(pMaskSet, pList->a[i].pExpr);
+ }
+ }
+ return mask;
+}
+static Bitmask exprSelectTableUsage(WhereMaskSet *pMaskSet, Select *pS){
+ Bitmask mask = 0;
+ while( pS ){
+ SrcList *pSrc = pS->pSrc;
+ mask |= exprListTableUsage(pMaskSet, pS->pEList);
+ mask |= exprListTableUsage(pMaskSet, pS->pGroupBy);
+ mask |= exprListTableUsage(pMaskSet, pS->pOrderBy);
+ mask |= exprTableUsage(pMaskSet, pS->pWhere);
+ mask |= exprTableUsage(pMaskSet, pS->pHaving);
+ if( ALWAYS(pSrc!=0) ){
+ int i;
+ for(i=0; i<pSrc->nSrc; i++){
+ mask |= exprSelectTableUsage(pMaskSet, pSrc->a[i].pSelect);
+ mask |= exprTableUsage(pMaskSet, pSrc->a[i].pOn);
+ }
+ }
+ pS = pS->pPrior;
+ }
+ return mask;
+}
+
+/*
** Return TRUE if the given operator is one of the operators that is
** allowed for an indexable WHERE clause term. The allowed operators are
** "=", "<", ">", "<=", ">=", "IN", and "IS NULL"
@@ -120137,10 +108775,15 @@ static int allowedOp(int op){
assert( TK_LT>TK_EQ && TK_LT<TK_GE );
assert( TK_LE>TK_EQ && TK_LE<TK_GE );
assert( TK_GE==TK_EQ+4 );
- return op==TK_IN || (op>=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS;
+ return op==TK_IN || (op>=TK_EQ && op<=TK_GE) || op==TK_ISNULL;
}
/*
+** Swap two objects of type TYPE.
+*/
+#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;}
+
+/*
** Commute a comparison operator. Expressions of the form "X op Y"
** are converted into "Y op X".
**
@@ -120190,8 +108833,6 @@ static u16 operatorMask(int op){
c = WO_IN;
}else if( op==TK_ISNULL ){
c = WO_ISNULL;
- }else if( op==TK_IS ){
- c = WO_IS;
}else{
assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff );
c = (u16)(WO_EQ<<(op-TK_EQ));
@@ -120203,10 +108844,199 @@ static u16 operatorMask(int op){
assert( op!=TK_LE || c==WO_LE );
assert( op!=TK_GT || c==WO_GT );
assert( op!=TK_GE || c==WO_GE );
- assert( op!=TK_IS || c==WO_IS );
return c;
}
+/*
+** Advance to the next WhereTerm that matches according to the criteria
+** established when the pScan object was initialized by whereScanInit().
+** Return NULL if there are no more matching WhereTerms.
+*/
+static WhereTerm *whereScanNext(WhereScan *pScan){
+ int iCur; /* The cursor on the LHS of the term */
+ int iColumn; /* The column on the LHS of the term. -1 for IPK */
+ Expr *pX; /* An expression being tested */
+ WhereClause *pWC; /* Shorthand for pScan->pWC */
+ WhereTerm *pTerm; /* The term being tested */
+ int k = pScan->k; /* Where to start scanning */
+
+ while( pScan->iEquiv<=pScan->nEquiv ){
+ iCur = pScan->aEquiv[pScan->iEquiv-2];
+ iColumn = pScan->aEquiv[pScan->iEquiv-1];
+ while( (pWC = pScan->pWC)!=0 ){
+ for(pTerm=pWC->a+k; k<pWC->nTerm; k++, pTerm++){
+ if( pTerm->leftCursor==iCur
+ && pTerm->u.leftColumn==iColumn
+ && (pScan->iEquiv<=2 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin))
+ ){
+ if( (pTerm->eOperator & WO_EQUIV)!=0
+ && pScan->nEquiv<ArraySize(pScan->aEquiv)
+ ){
+ int j;
+ pX = sqlite3ExprSkipCollate(pTerm->pExpr->pRight);
+ assert( pX->op==TK_COLUMN );
+ for(j=0; j<pScan->nEquiv; j+=2){
+ if( pScan->aEquiv[j]==pX->iTable
+ && pScan->aEquiv[j+1]==pX->iColumn ){
+ break;
+ }
+ }
+ if( j==pScan->nEquiv ){
+ pScan->aEquiv[j] = pX->iTable;
+ pScan->aEquiv[j+1] = pX->iColumn;
+ pScan->nEquiv += 2;
+ }
+ }
+ if( (pTerm->eOperator & pScan->opMask)!=0 ){
+ /* Verify the affinity and collating sequence match */
+ if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){
+ CollSeq *pColl;
+ Parse *pParse = pWC->pWInfo->pParse;
+ pX = pTerm->pExpr;
+ if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){
+ continue;
+ }
+ assert(pX->pLeft);
+ pColl = sqlite3BinaryCompareCollSeq(pParse,
+ pX->pLeft, pX->pRight);
+ if( pColl==0 ) pColl = pParse->db->pDfltColl;
+ if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){
+ continue;
+ }
+ }
+ if( (pTerm->eOperator & WO_EQ)!=0
+ && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN
+ && pX->iTable==pScan->aEquiv[0]
+ && pX->iColumn==pScan->aEquiv[1]
+ ){
+ continue;
+ }
+ pScan->k = k+1;
+ return pTerm;
+ }
+ }
+ }
+ pScan->pWC = pScan->pWC->pOuter;
+ k = 0;
+ }
+ pScan->pWC = pScan->pOrigWC;
+ k = 0;
+ pScan->iEquiv += 2;
+ }
+ return 0;
+}
+
+/*
+** Initialize a WHERE clause scanner object. Return a pointer to the
+** first match. Return NULL if there are no matches.
+**
+** The scanner will be searching the WHERE clause pWC. It will look
+** for terms of the form "X <op> <expr>" where X is column iColumn of table
+** iCur. The <op> must be one of the operators described by opMask.
+**
+** If the search is for X and the WHERE clause contains terms of the
+** form X=Y then this routine might also return terms of the form
+** "Y <op> <expr>". The number of levels of transitivity is limited,
+** but is enough to handle most commonly occurring SQL statements.
+**
+** If X is not the INTEGER PRIMARY KEY then X must be compatible with
+** index pIdx.
+*/
+static WhereTerm *whereScanInit(
+ WhereScan *pScan, /* The WhereScan object being initialized */
+ WhereClause *pWC, /* The WHERE clause to be scanned */
+ int iCur, /* Cursor to scan for */
+ int iColumn, /* Column to scan for */
+ u32 opMask, /* Operator(s) to scan for */
+ Index *pIdx /* Must be compatible with this index */
+){
+ int j;
+
+ /* memset(pScan, 0, sizeof(*pScan)); */
+ pScan->pOrigWC = pWC;
+ pScan->pWC = pWC;
+ if( pIdx && iColumn>=0 ){
+ pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity;
+ for(j=0; pIdx->aiColumn[j]!=iColumn; j++){
+ if( NEVER(j>=pIdx->nKeyCol) ) return 0;
+ }
+ pScan->zCollName = pIdx->azColl[j];
+ }else{
+ pScan->idxaff = 0;
+ pScan->zCollName = 0;
+ }
+ pScan->opMask = opMask;
+ pScan->k = 0;
+ pScan->aEquiv[0] = iCur;
+ pScan->aEquiv[1] = iColumn;
+ pScan->nEquiv = 2;
+ pScan->iEquiv = 2;
+ return whereScanNext(pScan);
+}
+
+/*
+** Search for a term in the WHERE clause that is of the form "X <op> <expr>"
+** where X is a reference to the iColumn of table iCur and <op> is one of
+** the WO_xx operator codes specified by the op parameter.
+** Return a pointer to the term. Return 0 if not found.
+**
+** The term returned might by Y=<expr> if there is another constraint in
+** the WHERE clause that specifies that X=Y. Any such constraints will be
+** identified by the WO_EQUIV bit in the pTerm->eOperator field. The
+** aEquiv[] array holds X and all its equivalents, with each SQL variable
+** taking up two slots in aEquiv[]. The first slot is for the cursor number
+** and the second is for the column number. There are 22 slots in aEquiv[]
+** so that means we can look for X plus up to 10 other equivalent values.
+** Hence a search for X will return <expr> if X=A1 and A1=A2 and A2=A3
+** and ... and A9=A10 and A10=<expr>.
+**
+** If there are multiple terms in the WHERE clause of the form "X <op> <expr>"
+** then try for the one with no dependencies on <expr> - in other words where
+** <expr> is a constant expression of some kind. Only return entries of
+** the form "X <op> Y" where Y is a column in another table if no terms of
+** the form "X <op> <const-expr>" exist. If no terms with a constant RHS
+** exist, try to return a term that does not use WO_EQUIV.
+*/
+static WhereTerm *findTerm(
+ WhereClause *pWC, /* The WHERE clause to be searched */
+ int iCur, /* Cursor number of LHS */
+ int iColumn, /* Column number of LHS */
+ Bitmask notReady, /* RHS must not overlap with this mask */
+ u32 op, /* Mask of WO_xx values describing operator */
+ Index *pIdx /* Must be compatible with this index, if not NULL */
+){
+ WhereTerm *pResult = 0;
+ WhereTerm *p;
+ WhereScan scan;
+
+ p = whereScanInit(&scan, pWC, iCur, iColumn, op, pIdx);
+ while( p ){
+ if( (p->prereqRight & notReady)==0 ){
+ if( p->prereqRight==0 && (p->eOperator&WO_EQ)!=0 ){
+ return p;
+ }
+ if( pResult==0 ) pResult = p;
+ }
+ p = whereScanNext(&scan);
+ }
+ return pResult;
+}
+
+/* Forward reference */
+static void exprAnalyze(SrcList*, WhereClause*, int);
+
+/*
+** Call exprAnalyze on all terms in a WHERE clause.
+*/
+static void exprAnalyzeAll(
+ SrcList *pTabList, /* the FROM clause */
+ WhereClause *pWC /* the WHERE clause to be analyzed */
+){
+ int i;
+ for(i=pWC->nTerm-1; i>=0; i--){
+ exprAnalyze(pTabList, pWC, i);
+ }
+}
#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION
/*
@@ -120215,11 +109045,7 @@ static u16 operatorMask(int op){
** so and false if not.
**
** In order for the operator to be optimizible, the RHS must be a string
-** literal that does not begin with a wildcard. The LHS must be a column
-** that may only be NULL, a string, or a BLOB, never a number. (This means
-** that virtual tables cannot participate in the LIKE optimization.) The
-** collating sequence for the column on the LHS must be appropriate for
-** the operator.
+** literal that does not begin with a wildcard.
*/
static int isLikeOrGlob(
Parse *pParse, /* Parsing and code generating context */
@@ -120248,7 +109074,7 @@ static int isLikeOrGlob(
pLeft = pList->a[1].pExpr;
if( pLeft->op!=TK_COLUMN
|| sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT
- || IsVirtual(pLeft->pTab) /* Value might be numeric */
+ || IsVirtual(pLeft->pTab)
){
/* IMP: R-02065-49465 The left-hand side of the LIKE or GLOB operator must
** be the name of an indexed column with TEXT affinity. */
@@ -120256,12 +109082,12 @@ static int isLikeOrGlob(
}
assert( pLeft->iColumn!=(-1) ); /* Because IPK never has AFF_TEXT */
- pRight = sqlite3ExprSkipCollate(pList->a[0].pExpr);
+ pRight = pList->a[0].pExpr;
op = pRight->op;
if( op==TK_VARIABLE ){
Vdbe *pReprepare = pParse->pReprepare;
int iCol = pRight->iColumn;
- pVal = sqlite3VdbeGetBoundValue(pReprepare, iCol, SQLITE_AFF_BLOB);
+ pVal = sqlite3VdbeGetBoundValue(pReprepare, iCol, SQLITE_AFF_NONE);
if( pVal && sqlite3_value_type(pVal)==SQLITE_TEXT ){
z = (char *)sqlite3_value_text(pVal);
}
@@ -120289,7 +109115,7 @@ static int isLikeOrGlob(
** value of the variable means there is no need to invoke the LIKE
** function, then no OP_Variable will be added to the program.
** This causes problems for the sqlite3_bind_parameter_name()
- ** API. To work around them, add a dummy OP_Variable here.
+ ** API. To workaround them, add a dummy OP_Variable here.
*/
int r1 = sqlite3GetTempReg(pParse);
sqlite3ExprCodeTarget(pParse, pRight, r1);
@@ -120349,88 +109175,6 @@ static void transferJoinMarkings(Expr *pDerived, Expr *pBase){
}
}
-/*
-** Mark term iChild as being a child of term iParent
-*/
-static void markTermAsChild(WhereClause *pWC, int iChild, int iParent){
- pWC->a[iChild].iParent = iParent;
- pWC->a[iChild].truthProb = pWC->a[iParent].truthProb;
- pWC->a[iParent].nChild++;
-}
-
-/*
-** Return the N-th AND-connected subterm of pTerm. Or if pTerm is not
-** a conjunction, then return just pTerm when N==0. If N is exceeds
-** the number of available subterms, return NULL.
-*/
-static WhereTerm *whereNthSubterm(WhereTerm *pTerm, int N){
- if( pTerm->eOperator!=WO_AND ){
- return N==0 ? pTerm : 0;
- }
- if( N<pTerm->u.pAndInfo->wc.nTerm ){
- return &pTerm->u.pAndInfo->wc.a[N];
- }
- return 0;
-}
-
-/*
-** Subterms pOne and pTwo are contained within WHERE clause pWC. The
-** two subterms are in disjunction - they are OR-ed together.
-**
-** If these two terms are both of the form: "A op B" with the same
-** A and B values but different operators and if the operators are
-** compatible (if one is = and the other is <, for example) then
-** add a new virtual AND term to pWC that is the combination of the
-** two.
-**
-** Some examples:
-**
-** x<y OR x=y --> x<=y
-** x=y OR x=y --> x=y
-** x<=y OR x<y --> x<=y
-**
-** The following is NOT generated:
-**
-** x<y OR x>y --> x!=y
-*/
-static void whereCombineDisjuncts(
- SrcList *pSrc, /* the FROM clause */
- WhereClause *pWC, /* The complete WHERE clause */
- WhereTerm *pOne, /* First disjunct */
- WhereTerm *pTwo /* Second disjunct */
-){
- u16 eOp = pOne->eOperator | pTwo->eOperator;
- sqlite3 *db; /* Database connection (for malloc) */
- Expr *pNew; /* New virtual expression */
- int op; /* Operator for the combined expression */
- int idxNew; /* Index in pWC of the next virtual term */
-
- if( (pOne->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return;
- if( (pTwo->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return;
- if( (eOp & (WO_EQ|WO_LT|WO_LE))!=eOp
- && (eOp & (WO_EQ|WO_GT|WO_GE))!=eOp ) return;
- assert( pOne->pExpr->pLeft!=0 && pOne->pExpr->pRight!=0 );
- assert( pTwo->pExpr->pLeft!=0 && pTwo->pExpr->pRight!=0 );
- if( sqlite3ExprCompare(pOne->pExpr->pLeft, pTwo->pExpr->pLeft, -1) ) return;
- if( sqlite3ExprCompare(pOne->pExpr->pRight, pTwo->pExpr->pRight, -1) )return;
- /* If we reach this point, it means the two subterms can be combined */
- if( (eOp & (eOp-1))!=0 ){
- if( eOp & (WO_LT|WO_LE) ){
- eOp = WO_LE;
- }else{
- assert( eOp & (WO_GT|WO_GE) );
- eOp = WO_GE;
- }
- }
- db = pWC->pWInfo->pParse->db;
- pNew = sqlite3ExprDup(db, pOne->pExpr, 0);
- if( pNew==0 ) return;
- for(op=TK_EQ; eOp!=(WO_EQ<<(op-TK_EQ)); op++){ assert( op<TK_GE ); }
- pNew->op = op;
- idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC);
- exprAnalyze(pSrc, pWC, idxNew);
-}
-
#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY)
/*
** Analyze a term that consists of two or more OR-connected
@@ -120455,7 +109199,6 @@ static void whereCombineDisjuncts(
** (C) t1.x=t2.y OR (t1.x=t2.z AND t1.y=15)
** (D) x=expr1 OR (y>11 AND y<22 AND z LIKE '*hello*')
** (E) (p.a=1 AND q.b=2 AND r.c=3) OR (p.x=4 AND q.y=5 AND r.z=6)
-** (F) x>A OR (x=A AND y>=B)
**
** CASE 1:
**
@@ -120472,16 +109215,6 @@ static void whereCombineDisjuncts(
**
** CASE 2:
**
-** If there are exactly two disjuncts and one side has x>A and the other side
-** has x=A (for the same x and A) then add a new virtual conjunct term to the
-** WHERE clause of the form "x>=A". Example:
-**
-** x>A OR (x=A AND y>B) adds: x>=A
-**
-** The added conjunct can sometimes be helpful in query planning.
-**
-** CASE 3:
-**
** If all subterms are indexable by a single table T, then set
**
** WhereTerm.eOperator = WO_OR
@@ -120501,22 +109234,22 @@ static void whereCombineDisjuncts(
** is decided elsewhere. This analysis only looks at whether subterms
** appropriate for indexing exist.
**
-** All examples A through E above satisfy case 3. But if a term
-** also satisfies case 1 (such as B) we know that the optimizer will
-** always prefer case 1, so in that case we pretend that case 3 is not
+** All examples A through E above satisfy case 2. But if a term
+** also statisfies case 1 (such as B) we know that the optimizer will
+** always prefer case 1, so in that case we pretend that case 2 is not
** satisfied.
**
** It might be the case that multiple tables are indexable. For example,
** (E) above is indexable on tables P, Q, and R.
**
-** Terms that satisfy case 3 are candidates for lookup by using
+** Terms that satisfy case 2 are candidates for lookup by using
** separate indices to find rowids for each subterm and composing
** the union of all rowids using a RowSet object. This is similar
** to "bitmap indices" in other database engines.
**
** OTHERWISE:
**
-** If none of cases 1, 2, or 3 apply, then leave the eOperator set to
+** If neither case 1 nor case 2 apply, then leave the eOperator set to
** zero. This term is not useful for search.
*/
static void exprAnalyzeOrTerm(
@@ -120547,14 +109280,14 @@ static void exprAnalyzeOrTerm(
if( pOrInfo==0 ) return;
pTerm->wtFlags |= TERM_ORINFO;
pOrWc = &pOrInfo->wc;
- sqlite3WhereClauseInit(pOrWc, pWInfo);
- sqlite3WhereSplit(pOrWc, pExpr, TK_OR);
- sqlite3WhereExprAnalyze(pSrc, pOrWc);
+ whereClauseInit(pOrWc, pWInfo);
+ whereSplit(pOrWc, pExpr, TK_OR);
+ exprAnalyzeAll(pSrc, pOrWc);
if( db->mallocFailed ) return;
assert( pOrWc->nTerm>=2 );
/*
- ** Compute the set of tables that might satisfy cases 1 or 3.
+ ** Compute the set of tables that might satisfy cases 1 or 2.
*/
indexable = ~(Bitmask)0;
chngToIN = ~(Bitmask)0;
@@ -120573,16 +109306,16 @@ static void exprAnalyzeOrTerm(
pOrTerm->wtFlags |= TERM_ANDINFO;
pOrTerm->eOperator = WO_AND;
pAndWC = &pAndInfo->wc;
- sqlite3WhereClauseInit(pAndWC, pWC->pWInfo);
- sqlite3WhereSplit(pAndWC, pOrTerm->pExpr, TK_AND);
- sqlite3WhereExprAnalyze(pSrc, pAndWC);
+ whereClauseInit(pAndWC, pWC->pWInfo);
+ whereSplit(pAndWC, pOrTerm->pExpr, TK_AND);
+ exprAnalyzeAll(pSrc, pAndWC);
pAndWC->pOuter = pWC;
testcase( db->mallocFailed );
if( !db->mallocFailed ){
for(j=0, pAndTerm=pAndWC->a; j<pAndWC->nTerm; j++, pAndTerm++){
assert( pAndTerm->pExpr );
if( allowedOp(pAndTerm->pExpr->op) ){
- b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pAndTerm->leftCursor);
+ b |= getMask(&pWInfo->sMaskSet, pAndTerm->leftCursor);
}
}
}
@@ -120593,10 +109326,10 @@ static void exprAnalyzeOrTerm(
** corresponding TERM_VIRTUAL term */
}else{
Bitmask b;
- b = sqlite3WhereGetMask(&pWInfo->sMaskSet, pOrTerm->leftCursor);
+ b = getMask(&pWInfo->sMaskSet, pOrTerm->leftCursor);
if( pOrTerm->wtFlags & TERM_VIRTUAL ){
WhereTerm *pOther = &pOrWc->a[pOrTerm->iParent];
- b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pOther->leftCursor);
+ b |= getMask(&pWInfo->sMaskSet, pOther->leftCursor);
}
indexable &= b;
if( (pOrTerm->eOperator & WO_EQ)==0 ){
@@ -120608,26 +109341,12 @@ static void exprAnalyzeOrTerm(
}
/*
- ** Record the set of tables that satisfy case 3. The set might be
+ ** Record the set of tables that satisfy case 2. The set might be
** empty.
*/
pOrInfo->indexable = indexable;
pTerm->eOperator = indexable==0 ? 0 : WO_OR;
- /* For a two-way OR, attempt to implementation case 2.
- */
- if( indexable && pOrWc->nTerm==2 ){
- int iOne = 0;
- WhereTerm *pOne;
- while( (pOne = whereNthSubterm(&pOrWc->a[0],iOne++))!=0 ){
- int iTwo = 0;
- WhereTerm *pTwo;
- while( (pTwo = whereNthSubterm(&pOrWc->a[1],iTwo++))!=0 ){
- whereCombineDisjuncts(pSrc, pWC, pOne, pTwo);
- }
- }
- }
-
/*
** chngToIN holds a set of tables that *might* satisfy case 1. But
** we have to do some additional checking to see if case 1 really
@@ -120672,10 +109391,9 @@ static void exprAnalyzeOrTerm(
assert( j==1 );
continue;
}
- if( (chngToIN & sqlite3WhereGetMask(&pWInfo->sMaskSet,
- pOrTerm->leftCursor))==0 ){
+ if( (chngToIN & getMask(&pWInfo->sMaskSet, pOrTerm->leftCursor))==0 ){
/* This term must be of the form t1.a==t2.b where t2 is in the
- ** chngToIN set but t1 is not. This term will be either preceded
+ ** chngToIN set but t1 is not. This term will be either preceeded
** or follwed by an inverted copy (t2.b==t1.a). Skip this term
** and use its inversion. */
testcase( pOrTerm->wtFlags & TERM_COPIED );
@@ -120692,7 +109410,7 @@ static void exprAnalyzeOrTerm(
** on the second iteration */
assert( j==1 );
assert( IsPowerOfTwo(chngToIN) );
- assert( chngToIN==sqlite3WhereGetMask(&pWInfo->sMaskSet, iCursor) );
+ assert( chngToIN==getMask(&pWInfo->sMaskSet, iCursor) );
break;
}
testcase( j==1 );
@@ -120754,128 +109472,18 @@ static void exprAnalyzeOrTerm(
testcase( idxNew==0 );
exprAnalyze(pSrc, pWC, idxNew);
pTerm = &pWC->a[idxTerm];
- markTermAsChild(pWC, idxNew, idxTerm);
+ pWC->a[idxNew].iParent = idxTerm;
+ pTerm->nChild = 1;
}else{
sqlite3ExprListDelete(db, pList);
}
- pTerm->eOperator = WO_NOOP; /* case 1 trumps case 3 */
+ pTerm->eOperator = WO_NOOP; /* case 1 trumps case 2 */
}
}
}
#endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */
/*
-** We already know that pExpr is a binary operator where both operands are
-** column references. This routine checks to see if pExpr is an equivalence
-** relation:
-** 1. The SQLITE_Transitive optimization must be enabled
-** 2. Must be either an == or an IS operator
-** 3. Not originating in the ON clause of an OUTER JOIN
-** 4. The affinities of A and B must be compatible
-** 5a. Both operands use the same collating sequence OR
-** 5b. The overall collating sequence is BINARY
-** If this routine returns TRUE, that means that the RHS can be substituted
-** for the LHS anyplace else in the WHERE clause where the LHS column occurs.
-** This is an optimization. No harm comes from returning 0. But if 1 is
-** returned when it should not be, then incorrect answers might result.
-*/
-static int termIsEquivalence(Parse *pParse, Expr *pExpr){
- char aff1, aff2;
- CollSeq *pColl;
- const char *zColl1, *zColl2;
- if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0;
- if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0;
- if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0;
- aff1 = sqlite3ExprAffinity(pExpr->pLeft);
- aff2 = sqlite3ExprAffinity(pExpr->pRight);
- if( aff1!=aff2
- && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2))
- ){
- return 0;
- }
- pColl = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pExpr->pRight);
- if( pColl==0 || sqlite3StrICmp(pColl->zName, "BINARY")==0 ) return 1;
- pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft);
- /* Since pLeft and pRight are both a column references, their collating
- ** sequence should always be defined. */
- zColl1 = ALWAYS(pColl) ? pColl->zName : 0;
- pColl = sqlite3ExprCollSeq(pParse, pExpr->pRight);
- zColl2 = ALWAYS(pColl) ? pColl->zName : 0;
- return sqlite3StrICmp(zColl1, zColl2)==0;
-}
-
-/*
-** Recursively walk the expressions of a SELECT statement and generate
-** a bitmask indicating which tables are used in that expression
-** tree.
-*/
-static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){
- Bitmask mask = 0;
- while( pS ){
- SrcList *pSrc = pS->pSrc;
- mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pEList);
- mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pGroupBy);
- mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pOrderBy);
- mask |= sqlite3WhereExprUsage(pMaskSet, pS->pWhere);
- mask |= sqlite3WhereExprUsage(pMaskSet, pS->pHaving);
- if( ALWAYS(pSrc!=0) ){
- int i;
- for(i=0; i<pSrc->nSrc; i++){
- mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect);
- mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].pOn);
- }
- }
- pS = pS->pPrior;
- }
- return mask;
-}
-
-/*
-** Expression pExpr is one operand of a comparison operator that might
-** be useful for indexing. This routine checks to see if pExpr appears
-** in any index. Return TRUE (1) if pExpr is an indexed term and return
-** FALSE (0) if not. If TRUE is returned, also set *piCur to the cursor
-** number of the table that is indexed and *piColumn to the column number
-** of the column that is indexed, or -2 if an expression is being indexed.
-**
-** If pExpr is a TK_COLUMN column reference, then this routine always returns
-** true even if that particular column is not indexed, because the column
-** might be added to an automatic index later.
-*/
-static int exprMightBeIndexed(
- SrcList *pFrom, /* The FROM clause */
- Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */
- Expr *pExpr, /* An operand of a comparison operator */
- int *piCur, /* Write the referenced table cursor number here */
- int *piColumn /* Write the referenced table column number here */
-){
- Index *pIdx;
- int i;
- int iCur;
- if( pExpr->op==TK_COLUMN ){
- *piCur = pExpr->iTable;
- *piColumn = pExpr->iColumn;
- return 1;
- }
- if( mPrereq==0 ) return 0; /* No table references */
- if( (mPrereq&(mPrereq-1))!=0 ) return 0; /* Refs more than one table */
- for(i=0; mPrereq>1; i++, mPrereq>>=1){}
- iCur = pFrom->a[i].iCursor;
- for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- if( pIdx->aColExpr==0 ) continue;
- for(i=0; i<pIdx->nKeyCol; i++){
- if( pIdx->aiColumn[i]!=(-2) ) continue;
- if( sqlite3ExprCompare(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){
- *piCur = iCur;
- *piColumn = -2;
- return 1;
- }
- }
- }
- return 0;
-}
-
-/*
** The input to this routine is an WhereTerm structure with only the
** "pExpr" field filled in. The job of this routine is to analyze the
** subexpression and populate all the other fields of the WhereTerm
@@ -120907,7 +109515,7 @@ static void exprAnalyze(
Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */
Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */
int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */
- int noCase = 0; /* uppercase equivalent to lowercase */
+ int noCase = 0; /* LIKE/GLOB distinguishes case */
int op; /* Top-level operator. pExpr->op */
Parse *pParse = pWInfo->pParse; /* Parsing context */
sqlite3 *db = pParse->db; /* Database connection */
@@ -120919,23 +109527,23 @@ static void exprAnalyze(
pMaskSet = &pWInfo->sMaskSet;
pExpr = pTerm->pExpr;
assert( pExpr->op!=TK_AS && pExpr->op!=TK_COLLATE );
- prereqLeft = sqlite3WhereExprUsage(pMaskSet, pExpr->pLeft);
+ prereqLeft = exprTableUsage(pMaskSet, pExpr->pLeft);
op = pExpr->op;
if( op==TK_IN ){
assert( pExpr->pRight==0 );
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
- pTerm->prereqRight = exprSelectUsage(pMaskSet, pExpr->x.pSelect);
+ pTerm->prereqRight = exprSelectTableUsage(pMaskSet, pExpr->x.pSelect);
}else{
- pTerm->prereqRight = sqlite3WhereExprListUsage(pMaskSet, pExpr->x.pList);
+ pTerm->prereqRight = exprListTableUsage(pMaskSet, pExpr->x.pList);
}
}else if( op==TK_ISNULL ){
pTerm->prereqRight = 0;
}else{
- pTerm->prereqRight = sqlite3WhereExprUsage(pMaskSet, pExpr->pRight);
+ pTerm->prereqRight = exprTableUsage(pMaskSet, pExpr->pRight);
}
- prereqAll = sqlite3WhereExprUsage(pMaskSet, pExpr);
+ prereqAll = exprTableUsage(pMaskSet, pExpr);
if( ExprHasProperty(pExpr, EP_FromJoin) ){
- Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->iRightJoinTable);
+ Bitmask x = getMask(pMaskSet, pExpr->iRightJoinTable);
prereqAll |= x;
extraRight = x-1; /* ON clause terms may not be used with an index
** on left table of a LEFT JOIN. Ticket #3015 */
@@ -120945,19 +109553,15 @@ static void exprAnalyze(
pTerm->iParent = -1;
pTerm->eOperator = 0;
if( allowedOp(op) ){
- int iCur, iColumn;
Expr *pLeft = sqlite3ExprSkipCollate(pExpr->pLeft);
Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight);
u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV;
- if( exprMightBeIndexed(pSrc, prereqLeft, pLeft, &iCur, &iColumn) ){
- pTerm->leftCursor = iCur;
- pTerm->u.leftColumn = iColumn;
+ if( pLeft->op==TK_COLUMN ){
+ pTerm->leftCursor = pLeft->iTable;
+ pTerm->u.leftColumn = pLeft->iColumn;
pTerm->eOperator = operatorMask(op) & opMask;
}
- if( op==TK_IS ) pTerm->wtFlags |= TERM_IS;
- if( pRight
- && exprMightBeIndexed(pSrc, pTerm->prereqRight, pRight, &iCur, &iColumn)
- ){
+ if( pRight && pRight->op==TK_COLUMN ){
WhereTerm *pNew;
Expr *pDup;
u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */
@@ -120971,12 +109575,14 @@ static void exprAnalyze(
idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC);
if( idxNew==0 ) return;
pNew = &pWC->a[idxNew];
- markTermAsChild(pWC, idxNew, idxTerm);
- if( op==TK_IS ) pNew->wtFlags |= TERM_IS;
+ pNew->iParent = idxTerm;
pTerm = &pWC->a[idxTerm];
+ pTerm->nChild = 1;
pTerm->wtFlags |= TERM_COPIED;
-
- if( termIsEquivalence(pParse, pDup) ){
+ if( pExpr->op==TK_EQ
+ && !ExprHasProperty(pExpr, EP_FromJoin)
+ && OptimizationEnabled(db, SQLITE_Transitive)
+ ){
pTerm->eOperator |= WO_EQUIV;
eExtraOp = WO_EQUIV;
}
@@ -120985,8 +109591,9 @@ static void exprAnalyze(
pNew = pTerm;
}
exprCommute(pParse, pDup);
- pNew->leftCursor = iCur;
- pNew->u.leftColumn = iColumn;
+ pLeft = sqlite3ExprSkipCollate(pDup->pLeft);
+ pNew->leftCursor = pLeft->iTable;
+ pNew->u.leftColumn = pLeft->iColumn;
testcase( (prereqLeft | extraRight) != prereqLeft );
pNew->prereqRight = prereqLeft | extraRight;
pNew->prereqAll = prereqAll;
@@ -121027,8 +109634,9 @@ static void exprAnalyze(
testcase( idxNew==0 );
exprAnalyze(pSrc, pWC, idxNew);
pTerm = &pWC->a[idxTerm];
- markTermAsChild(pWC, idxNew, idxTerm);
+ pWC->a[idxNew].iParent = idxTerm;
}
+ pTerm->nChild = 2;
}
#endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */
@@ -121047,15 +109655,12 @@ static void exprAnalyze(
/* Add constraints to reduce the search space on a LIKE or GLOB
** operator.
**
- ** A like pattern of the form "x LIKE 'aBc%'" is changed into constraints
+ ** A like pattern of the form "x LIKE 'abc%'" is changed into constraints
**
- ** x>='ABC' AND x<'abd' AND x LIKE 'aBc%'
+ ** x>='abc' AND x<'abd' AND x LIKE 'abc%'
**
** The last character of the prefix "abc" is incremented to form the
- ** termination condition "abd". If case is not significant (the default
- ** for LIKE) then the lower-bound is made all uppercase and the upper-
- ** bound is made all lowercase so that the bounds also work when comparing
- ** BLOBs.
+ ** termination condition "abd".
*/
if( pWC->op==TK_AND
&& isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase)
@@ -121066,26 +109671,10 @@ static void exprAnalyze(
Expr *pNewExpr2;
int idxNew1;
int idxNew2;
- const char *zCollSeqName; /* Name of collating sequence */
- const u16 wtFlags = TERM_LIKEOPT | TERM_VIRTUAL | TERM_DYNAMIC;
+ Token sCollSeqName; /* Name of collating sequence */
pLeft = pExpr->x.pList->a[1].pExpr;
pStr2 = sqlite3ExprDup(db, pStr1, 0);
-
- /* Convert the lower bound to upper-case and the upper bound to
- ** lower-case (upper-case is less than lower-case in ASCII) so that
- ** the range constraints also work for BLOBs
- */
- if( noCase && !pParse->db->mallocFailed ){
- int i;
- char c;
- pTerm->wtFlags |= TERM_LIKE;
- for(i=0; (c = pStr1->u.zToken[i])!=0; i++){
- pStr1->u.zToken[i] = sqlite3Toupper(c);
- pStr2->u.zToken[i] = sqlite3Tolower(c);
- }
- }
-
if( !db->mallocFailed ){
u8 c, *pC; /* Last character before the first wildcard */
pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1];
@@ -121102,27 +109691,29 @@ static void exprAnalyze(
}
*pC = c + 1;
}
- zCollSeqName = noCase ? "NOCASE" : "BINARY";
+ sCollSeqName.z = noCase ? "NOCASE" : "BINARY";
+ sCollSeqName.n = 6;
pNewExpr1 = sqlite3ExprDup(db, pLeft, 0);
- pNewExpr1 = sqlite3PExpr(pParse, TK_GE,
- sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName),
+ pNewExpr1 = sqlite3PExpr(pParse, TK_GE,
+ sqlite3ExprAddCollateToken(pParse,pNewExpr1,&sCollSeqName),
pStr1, 0);
transferJoinMarkings(pNewExpr1, pExpr);
- idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags);
+ idxNew1 = whereClauseInsert(pWC, pNewExpr1, TERM_VIRTUAL|TERM_DYNAMIC);
testcase( idxNew1==0 );
exprAnalyze(pSrc, pWC, idxNew1);
pNewExpr2 = sqlite3ExprDup(db, pLeft, 0);
pNewExpr2 = sqlite3PExpr(pParse, TK_LT,
- sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName),
+ sqlite3ExprAddCollateToken(pParse,pNewExpr2,&sCollSeqName),
pStr2, 0);
transferJoinMarkings(pNewExpr2, pExpr);
- idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags);
+ idxNew2 = whereClauseInsert(pWC, pNewExpr2, TERM_VIRTUAL|TERM_DYNAMIC);
testcase( idxNew2==0 );
exprAnalyze(pSrc, pWC, idxNew2);
pTerm = &pWC->a[idxTerm];
if( isComplete ){
- markTermAsChild(pWC, idxNew1, idxTerm);
- markTermAsChild(pWC, idxNew2, idxTerm);
+ pWC->a[idxNew1].iParent = idxTerm;
+ pWC->a[idxNew2].iParent = idxTerm;
+ pTerm->nChild = 2;
}
}
#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */
@@ -121142,8 +109733,8 @@ static void exprAnalyze(
pRight = pExpr->x.pList->a[0].pExpr;
pLeft = pExpr->x.pList->a[1].pExpr;
- prereqExpr = sqlite3WhereExprUsage(pMaskSet, pRight);
- prereqColumn = sqlite3WhereExprUsage(pMaskSet, pLeft);
+ prereqExpr = exprTableUsage(pMaskSet, pRight);
+ prereqColumn = exprTableUsage(pMaskSet, pLeft);
if( (prereqExpr & prereqColumn)==0 ){
Expr *pNewExpr;
pNewExpr = sqlite3PExpr(pParse, TK_MATCH,
@@ -121155,8 +109746,9 @@ static void exprAnalyze(
pNewTerm->leftCursor = pLeft->iTable;
pNewTerm->u.leftColumn = pLeft->iColumn;
pNewTerm->eOperator = WO_MATCH;
- markTermAsChild(pWC, idxNew, idxTerm);
+ pNewTerm->iParent = idxTerm;
pTerm = &pWC->a[idxTerm];
+ pTerm->nChild = 1;
pTerm->wtFlags |= TERM_COPIED;
pNewTerm->prereqAll = pTerm->prereqAll;
}
@@ -121169,12 +109761,15 @@ static void exprAnalyze(
** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a
** virtual term of that form.
**
- ** Note that the virtual term must be tagged with TERM_VNULL.
+ ** Note that the virtual term must be tagged with TERM_VNULL. This
+ ** TERM_VNULL tag will suppress the not-null check at the beginning
+ ** of the loop. Without the TERM_VNULL flag, the not-null check at
+ ** the start of the loop will prevent any results from being returned.
*/
if( pExpr->op==TK_NOTNULL
&& pExpr->pLeft->op==TK_COLUMN
&& pExpr->pLeft->iColumn>=0
- && OptimizationEnabled(db, SQLITE_Stat34)
+ && OptimizationEnabled(db, SQLITE_Stat3)
){
Expr *pNewExpr;
Expr *pLeft = pExpr->pLeft;
@@ -121193,8 +109788,9 @@ static void exprAnalyze(
pNewTerm->leftCursor = pLeft->iTable;
pNewTerm->u.leftColumn = pLeft->iColumn;
pNewTerm->eOperator = WO_GT;
- markTermAsChild(pWC, idxNew, idxTerm);
+ pNewTerm->iParent = idxTerm;
pTerm = &pWC->a[idxTerm];
+ pTerm->nChild = 1;
pTerm->wtFlags |= TERM_COPIED;
pNewTerm->prereqAll = pTerm->prereqAll;
}
@@ -121207,532 +109803,8 @@ static void exprAnalyze(
pTerm->prereqRight |= extraRight;
}
-/***************************************************************************
-** Routines with file scope above. Interface to the rest of the where.c
-** subsystem follows.
-***************************************************************************/
-
-/*
-** This routine identifies subexpressions in the WHERE clause where
-** each subexpression is separated by the AND operator or some other
-** operator specified in the op parameter. The WhereClause structure
-** is filled with pointers to subexpressions. For example:
-**
-** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22)
-** \________/ \_______________/ \________________/
-** slot[0] slot[1] slot[2]
-**
-** The original WHERE clause in pExpr is unaltered. All this routine
-** does is make slot[] entries point to substructure within pExpr.
-**
-** In the previous sentence and in the diagram, "slot[]" refers to
-** the WhereClause.a[] array. The slot[] array grows as needed to contain
-** all terms of the WHERE clause.
-*/
-SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause *pWC, Expr *pExpr, u8 op){
- Expr *pE2 = sqlite3ExprSkipCollate(pExpr);
- pWC->op = op;
- if( pE2==0 ) return;
- if( pE2->op!=op ){
- whereClauseInsert(pWC, pExpr, 0);
- }else{
- sqlite3WhereSplit(pWC, pE2->pLeft, op);
- sqlite3WhereSplit(pWC, pE2->pRight, op);
- }
-}
-
-/*
-** Initialize a preallocated WhereClause structure.
-*/
-SQLITE_PRIVATE void sqlite3WhereClauseInit(
- WhereClause *pWC, /* The WhereClause to be initialized */
- WhereInfo *pWInfo /* The WHERE processing context */
-){
- pWC->pWInfo = pWInfo;
- pWC->pOuter = 0;
- pWC->nTerm = 0;
- pWC->nSlot = ArraySize(pWC->aStatic);
- pWC->a = pWC->aStatic;
-}
-
-/*
-** Deallocate a WhereClause structure. The WhereClause structure
-** itself is not freed. This routine is the inverse of sqlite3WhereClauseInit().
-*/
-SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){
- int i;
- WhereTerm *a;
- sqlite3 *db = pWC->pWInfo->pParse->db;
- for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){
- if( a->wtFlags & TERM_DYNAMIC ){
- sqlite3ExprDelete(db, a->pExpr);
- }
- if( a->wtFlags & TERM_ORINFO ){
- whereOrInfoDelete(db, a->u.pOrInfo);
- }else if( a->wtFlags & TERM_ANDINFO ){
- whereAndInfoDelete(db, a->u.pAndInfo);
- }
- }
- if( pWC->a!=pWC->aStatic ){
- sqlite3DbFree(db, pWC->a);
- }
-}
-
-
-/*
-** These routines walk (recursively) an expression tree and generate
-** a bitmask indicating which tables are used in that expression
-** tree.
-*/
-SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){
- Bitmask mask = 0;
- if( p==0 ) return 0;
- if( p->op==TK_COLUMN ){
- mask = sqlite3WhereGetMask(pMaskSet, p->iTable);
- return mask;
- }
- mask = sqlite3WhereExprUsage(pMaskSet, p->pRight);
- mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft);
- if( ExprHasProperty(p, EP_xIsSelect) ){
- mask |= exprSelectUsage(pMaskSet, p->x.pSelect);
- }else{
- mask |= sqlite3WhereExprListUsage(pMaskSet, p->x.pList);
- }
- return mask;
-}
-SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet *pMaskSet, ExprList *pList){
- int i;
- Bitmask mask = 0;
- if( pList ){
- for(i=0; i<pList->nExpr; i++){
- mask |= sqlite3WhereExprUsage(pMaskSet, pList->a[i].pExpr);
- }
- }
- return mask;
-}
-
-
-/*
-** Call exprAnalyze on all terms in a WHERE clause.
-**
-** Note that exprAnalyze() might add new virtual terms onto the
-** end of the WHERE clause. We do not want to analyze these new
-** virtual terms, so start analyzing at the end and work forward
-** so that the added virtual terms are never processed.
-*/
-SQLITE_PRIVATE void sqlite3WhereExprAnalyze(
- SrcList *pTabList, /* the FROM clause */
- WhereClause *pWC /* the WHERE clause to be analyzed */
-){
- int i;
- for(i=pWC->nTerm-1; i>=0; i--){
- exprAnalyze(pTabList, pWC, i);
- }
-}
-
-/*
-** For table-valued-functions, transform the function arguments into
-** new WHERE clause terms.
-**
-** Each function argument translates into an equality constraint against
-** a HIDDEN column in the table.
-*/
-SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
- Parse *pParse, /* Parsing context */
- struct SrcList_item *pItem, /* The FROM clause term to process */
- WhereClause *pWC /* Xfer function arguments to here */
-){
- Table *pTab;
- int j, k;
- ExprList *pArgs;
- Expr *pColRef;
- Expr *pTerm;
- if( pItem->fg.isTabFunc==0 ) return;
- pTab = pItem->pTab;
- assert( pTab!=0 );
- pArgs = pItem->u1.pFuncArg;
- assert( pArgs!=0 );
- for(j=k=0; j<pArgs->nExpr; j++){
- while( k<pTab->nCol && (pTab->aCol[k].colFlags & COLFLAG_HIDDEN)==0 ){ k++; }
- if( k>=pTab->nCol ){
- sqlite3ErrorMsg(pParse, "too many arguments on %s() - max %d",
- pTab->zName, j);
- return;
- }
- pColRef = sqlite3PExpr(pParse, TK_COLUMN, 0, 0, 0);
- if( pColRef==0 ) return;
- pColRef->iTable = pItem->iCursor;
- pColRef->iColumn = k++;
- pColRef->pTab = pTab;
- pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef,
- sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0);
- whereClauseInsert(pWC, pTerm, TERM_DYNAMIC);
- }
-}
-
-/************** End of whereexpr.c *******************************************/
-/************** Begin file where.c *******************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This module contains C code that generates VDBE code used to process
-** the WHERE clause of SQL statements. This module is responsible for
-** generating the code that loops through a table looking for applicable
-** rows. Indices are selected and used to speed the search when doing
-** so is applicable. Because this module is responsible for selecting
-** indices, you might also think of this module as the "query optimizer".
-*/
-/* #include "sqliteInt.h" */
-/* #include "whereInt.h" */
-
-/* Forward declaration of methods */
-static int whereLoopResize(sqlite3*, WhereLoop*, int);
-
-/* Test variable that can be set to enable WHERE tracing */
-#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG)
-/***/ int sqlite3WhereTrace = 0;
-#endif
-
-
-/*
-** Return the estimated number of output rows from a WHERE clause
-*/
-SQLITE_PRIVATE u64 sqlite3WhereOutputRowCount(WhereInfo *pWInfo){
- return sqlite3LogEstToInt(pWInfo->nRowOut);
-}
-
-/*
-** Return one of the WHERE_DISTINCT_xxxxx values to indicate how this
-** WHERE clause returns outputs for DISTINCT processing.
-*/
-SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){
- return pWInfo->eDistinct;
-}
-
-/*
-** Return TRUE if the WHERE clause returns rows in ORDER BY order.
-** Return FALSE if the output needs to be sorted.
-*/
-SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){
- return pWInfo->nOBSat;
-}
-
-/*
-** Return the VDBE address or label to jump to in order to continue
-** immediately with the next row of a WHERE clause.
-*/
-SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo *pWInfo){
- assert( pWInfo->iContinue!=0 );
- return pWInfo->iContinue;
-}
-
-/*
-** Return the VDBE address or label to jump to in order to break
-** out of a WHERE loop.
-*/
-SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo *pWInfo){
- return pWInfo->iBreak;
-}
-
-/*
-** Return ONEPASS_OFF (0) if an UPDATE or DELETE statement is unable to
-** operate directly on the rowis returned by a WHERE clause. Return
-** ONEPASS_SINGLE (1) if the statement can operation directly because only
-** a single row is to be changed. Return ONEPASS_MULTI (2) if the one-pass
-** optimization can be used on multiple
-**
-** If the ONEPASS optimization is used (if this routine returns true)
-** then also write the indices of open cursors used by ONEPASS
-** into aiCur[0] and aiCur[1]. iaCur[0] gets the cursor of the data
-** table and iaCur[1] gets the cursor used by an auxiliary index.
-** Either value may be -1, indicating that cursor is not used.
-** Any cursors returned will have been opened for writing.
-**
-** aiCur[0] and aiCur[1] both get -1 if the where-clause logic is
-** unable to use the ONEPASS optimization.
-*/
-SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo *pWInfo, int *aiCur){
- memcpy(aiCur, pWInfo->aiCurOnePass, sizeof(int)*2);
-#ifdef WHERETRACE_ENABLED
- if( sqlite3WhereTrace && pWInfo->eOnePass!=ONEPASS_OFF ){
- sqlite3DebugPrintf("%s cursors: %d %d\n",
- pWInfo->eOnePass==ONEPASS_SINGLE ? "ONEPASS_SINGLE" : "ONEPASS_MULTI",
- aiCur[0], aiCur[1]);
- }
-#endif
- return pWInfo->eOnePass;
-}
-
-/*
-** Move the content of pSrc into pDest
-*/
-static void whereOrMove(WhereOrSet *pDest, WhereOrSet *pSrc){
- pDest->n = pSrc->n;
- memcpy(pDest->a, pSrc->a, pDest->n*sizeof(pDest->a[0]));
-}
-
/*
-** Try to insert a new prerequisite/cost entry into the WhereOrSet pSet.
-**
-** The new entry might overwrite an existing entry, or it might be
-** appended, or it might be discarded. Do whatever is the right thing
-** so that pSet keeps the N_OR_COST best entries seen so far.
-*/
-static int whereOrInsert(
- WhereOrSet *pSet, /* The WhereOrSet to be updated */
- Bitmask prereq, /* Prerequisites of the new entry */
- LogEst rRun, /* Run-cost of the new entry */
- LogEst nOut /* Number of outputs for the new entry */
-){
- u16 i;
- WhereOrCost *p;
- for(i=pSet->n, p=pSet->a; i>0; i--, p++){
- if( rRun<=p->rRun && (prereq & p->prereq)==prereq ){
- goto whereOrInsert_done;
- }
- if( p->rRun<=rRun && (p->prereq & prereq)==p->prereq ){
- return 0;
- }
- }
- if( pSet->n<N_OR_COST ){
- p = &pSet->a[pSet->n++];
- p->nOut = nOut;
- }else{
- p = pSet->a;
- for(i=1; i<pSet->n; i++){
- if( p->rRun>pSet->a[i].rRun ) p = pSet->a + i;
- }
- if( p->rRun<=rRun ) return 0;
- }
-whereOrInsert_done:
- p->prereq = prereq;
- p->rRun = rRun;
- if( p->nOut>nOut ) p->nOut = nOut;
- return 1;
-}
-
-/*
-** Return the bitmask for the given cursor number. Return 0 if
-** iCursor is not in the set.
-*/
-SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet *pMaskSet, int iCursor){
- int i;
- assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 );
- for(i=0; i<pMaskSet->n; i++){
- if( pMaskSet->ix[i]==iCursor ){
- return MASKBIT(i);
- }
- }
- return 0;
-}
-
-/*
-** Create a new mask for cursor iCursor.
-**
-** There is one cursor per table in the FROM clause. The number of
-** tables in the FROM clause is limited by a test early in the
-** sqlite3WhereBegin() routine. So we know that the pMaskSet->ix[]
-** array will never overflow.
-*/
-static void createMask(WhereMaskSet *pMaskSet, int iCursor){
- assert( pMaskSet->n < ArraySize(pMaskSet->ix) );
- pMaskSet->ix[pMaskSet->n++] = iCursor;
-}
-
-/*
-** Advance to the next WhereTerm that matches according to the criteria
-** established when the pScan object was initialized by whereScanInit().
-** Return NULL if there are no more matching WhereTerms.
-*/
-static WhereTerm *whereScanNext(WhereScan *pScan){
- int iCur; /* The cursor on the LHS of the term */
- i16 iColumn; /* The column on the LHS of the term. -1 for IPK */
- Expr *pX; /* An expression being tested */
- WhereClause *pWC; /* Shorthand for pScan->pWC */
- WhereTerm *pTerm; /* The term being tested */
- int k = pScan->k; /* Where to start scanning */
-
- while( pScan->iEquiv<=pScan->nEquiv ){
- iCur = pScan->aiCur[pScan->iEquiv-1];
- iColumn = pScan->aiColumn[pScan->iEquiv-1];
- if( iColumn==XN_EXPR && pScan->pIdxExpr==0 ) return 0;
- while( (pWC = pScan->pWC)!=0 ){
- for(pTerm=pWC->a+k; k<pWC->nTerm; k++, pTerm++){
- if( pTerm->leftCursor==iCur
- && pTerm->u.leftColumn==iColumn
- && (iColumn!=XN_EXPR
- || sqlite3ExprCompare(pTerm->pExpr->pLeft,pScan->pIdxExpr,iCur)==0)
- && (pScan->iEquiv<=1 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin))
- ){
- if( (pTerm->eOperator & WO_EQUIV)!=0
- && pScan->nEquiv<ArraySize(pScan->aiCur)
- && (pX = sqlite3ExprSkipCollate(pTerm->pExpr->pRight))->op==TK_COLUMN
- ){
- int j;
- for(j=0; j<pScan->nEquiv; j++){
- if( pScan->aiCur[j]==pX->iTable
- && pScan->aiColumn[j]==pX->iColumn ){
- break;
- }
- }
- if( j==pScan->nEquiv ){
- pScan->aiCur[j] = pX->iTable;
- pScan->aiColumn[j] = pX->iColumn;
- pScan->nEquiv++;
- }
- }
- if( (pTerm->eOperator & pScan->opMask)!=0 ){
- /* Verify the affinity and collating sequence match */
- if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){
- CollSeq *pColl;
- Parse *pParse = pWC->pWInfo->pParse;
- pX = pTerm->pExpr;
- if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){
- continue;
- }
- assert(pX->pLeft);
- pColl = sqlite3BinaryCompareCollSeq(pParse,
- pX->pLeft, pX->pRight);
- if( pColl==0 ) pColl = pParse->db->pDfltColl;
- if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){
- continue;
- }
- }
- if( (pTerm->eOperator & (WO_EQ|WO_IS))!=0
- && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN
- && pX->iTable==pScan->aiCur[0]
- && pX->iColumn==pScan->aiColumn[0]
- ){
- testcase( pTerm->eOperator & WO_IS );
- continue;
- }
- pScan->k = k+1;
- return pTerm;
- }
- }
- }
- pScan->pWC = pScan->pWC->pOuter;
- k = 0;
- }
- pScan->pWC = pScan->pOrigWC;
- k = 0;
- pScan->iEquiv++;
- }
- return 0;
-}
-
-/*
-** Initialize a WHERE clause scanner object. Return a pointer to the
-** first match. Return NULL if there are no matches.
-**
-** The scanner will be searching the WHERE clause pWC. It will look
-** for terms of the form "X <op> <expr>" where X is column iColumn of table
-** iCur. The <op> must be one of the operators described by opMask.
-**
-** If the search is for X and the WHERE clause contains terms of the
-** form X=Y then this routine might also return terms of the form
-** "Y <op> <expr>". The number of levels of transitivity is limited,
-** but is enough to handle most commonly occurring SQL statements.
-**
-** If X is not the INTEGER PRIMARY KEY then X must be compatible with
-** index pIdx.
-*/
-static WhereTerm *whereScanInit(
- WhereScan *pScan, /* The WhereScan object being initialized */
- WhereClause *pWC, /* The WHERE clause to be scanned */
- int iCur, /* Cursor to scan for */
- int iColumn, /* Column to scan for */
- u32 opMask, /* Operator(s) to scan for */
- Index *pIdx /* Must be compatible with this index */
-){
- int j = 0;
-
- /* memset(pScan, 0, sizeof(*pScan)); */
- pScan->pOrigWC = pWC;
- pScan->pWC = pWC;
- pScan->pIdxExpr = 0;
- if( pIdx ){
- j = iColumn;
- iColumn = pIdx->aiColumn[j];
- if( iColumn==XN_EXPR ) pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr;
- }
- if( pIdx && iColumn>=0 ){
- pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity;
- pScan->zCollName = pIdx->azColl[j];
- }else{
- pScan->idxaff = 0;
- pScan->zCollName = 0;
- }
- pScan->opMask = opMask;
- pScan->k = 0;
- pScan->aiCur[0] = iCur;
- pScan->aiColumn[0] = iColumn;
- pScan->nEquiv = 1;
- pScan->iEquiv = 1;
- return whereScanNext(pScan);
-}
-
-/*
-** Search for a term in the WHERE clause that is of the form "X <op> <expr>"
-** where X is a reference to the iColumn of table iCur and <op> is one of
-** the WO_xx operator codes specified by the op parameter.
-** Return a pointer to the term. Return 0 if not found.
-**
-** If pIdx!=0 then search for terms matching the iColumn-th column of pIdx
-** rather than the iColumn-th column of table iCur.
-**
-** The term returned might by Y=<expr> if there is another constraint in
-** the WHERE clause that specifies that X=Y. Any such constraints will be
-** identified by the WO_EQUIV bit in the pTerm->eOperator field. The
-** aiCur[]/iaColumn[] arrays hold X and all its equivalents. There are 11
-** slots in aiCur[]/aiColumn[] so that means we can look for X plus up to 10
-** other equivalent values. Hence a search for X will return <expr> if X=A1
-** and A1=A2 and A2=A3 and ... and A9=A10 and A10=<expr>.
-**
-** If there are multiple terms in the WHERE clause of the form "X <op> <expr>"
-** then try for the one with no dependencies on <expr> - in other words where
-** <expr> is a constant expression of some kind. Only return entries of
-** the form "X <op> Y" where Y is a column in another table if no terms of
-** the form "X <op> <const-expr>" exist. If no terms with a constant RHS
-** exist, try to return a term that does not use WO_EQUIV.
-*/
-SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
- WhereClause *pWC, /* The WHERE clause to be searched */
- int iCur, /* Cursor number of LHS */
- int iColumn, /* Column number of LHS */
- Bitmask notReady, /* RHS must not overlap with this mask */
- u32 op, /* Mask of WO_xx values describing operator */
- Index *pIdx /* Must be compatible with this index, if not NULL */
-){
- WhereTerm *pResult = 0;
- WhereTerm *p;
- WhereScan scan;
-
- p = whereScanInit(&scan, pWC, iCur, iColumn, op, pIdx);
- op &= WO_EQ|WO_IS;
- while( p ){
- if( (p->prereqRight & notReady)==0 ){
- if( p->prereqRight==0 && (p->eOperator&op)!=0 ){
- testcase( p->eOperator & WO_IS );
- return p;
- }
- if( pResult==0 ) pResult = p;
- }
- p = whereScanNext(&scan);
- }
- return pResult;
-}
-
-/*
-** This function searches pList for an entry that matches the iCol-th column
+** This function searches pList for a entry that matches the iCol-th column
** of index pIdx.
**
** If such an expression is found, its index in pList->a[] is returned. If
@@ -121755,7 +109827,7 @@ static int findIndexCol(
&& p->iTable==iBase
){
CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[i].pExpr);
- if( pColl && 0==sqlite3StrICmp(pColl->zName, zColl) ){
+ if( ALWAYS(pColl) && 0==sqlite3StrICmp(pColl->zName, zColl) ){
return i;
}
}
@@ -121765,30 +109837,11 @@ static int findIndexCol(
}
/*
-** Return TRUE if the iCol-th column of index pIdx is NOT NULL
-*/
-static int indexColumnNotNull(Index *pIdx, int iCol){
- int j;
- assert( pIdx!=0 );
- assert( iCol>=0 && iCol<pIdx->nColumn );
- j = pIdx->aiColumn[iCol];
- if( j>=0 ){
- return pIdx->pTable->aCol[j].notNull;
- }else if( j==(-1) ){
- return 1;
- }else{
- assert( j==(-2) );
- return 0; /* Assume an indexed expression can always yield a NULL */
-
- }
-}
-
-/*
** Return true if the DISTINCT expression-list passed as the third argument
** is redundant.
**
-** A DISTINCT list is redundant if any subset of the columns in the
-** DISTINCT list are collectively unique and individually non-null.
+** A DISTINCT list is redundant if the database contains some subset of
+** columns that are unique and non-null.
*/
static int isDistinctRedundant(
Parse *pParse, /* Parsing context */
@@ -121831,11 +109884,14 @@ static int isDistinctRedundant(
** contain a "col=X" term are subject to a NOT NULL constraint.
*/
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- if( !IsUniqueIndex(pIdx) ) continue;
+ if( pIdx->onError==OE_None ) continue;
for(i=0; i<pIdx->nKeyCol; i++){
- if( 0==sqlite3WhereFindTerm(pWC, iBase, i, ~(Bitmask)0, WO_EQ, pIdx) ){
- if( findIndexCol(pParse, pDistinct, iBase, pIdx, i)<0 ) break;
- if( indexColumnNotNull(pIdx, i)==0 ) break;
+ i16 iCol = pIdx->aiColumn[i];
+ if( 0==findTerm(pWC, iBase, iCol, ~(Bitmask)0, WO_EQ, pIdx) ){
+ int iIdxCol = findIndexCol(pParse, pDistinct, iBase, pIdx, i);
+ if( iIdxCol<0 || pTab->aCol[iCol].notNull==0 ){
+ break;
+ }
}
}
if( i==pIdx->nKeyCol ){
@@ -121852,50 +109908,8 @@ static int isDistinctRedundant(
** Estimate the logarithm of the input value to base 2.
*/
static LogEst estLog(LogEst N){
- return N<=10 ? 0 : sqlite3LogEst(N) - 33;
-}
-
-/*
-** Convert OP_Column opcodes to OP_Copy in previously generated code.
-**
-** This routine runs over generated VDBE code and translates OP_Column
-** opcodes into OP_Copy when the table is being accessed via co-routine
-** instead of via table lookup.
-**
-** If the bIncrRowid parameter is 0, then any OP_Rowid instructions on
-** cursor iTabCur are transformed into OP_Null. Or, if bIncrRowid is non-zero,
-** then each OP_Rowid is transformed into an instruction to increment the
-** value stored in its output register.
-*/
-static void translateColumnToCopy(
- Vdbe *v, /* The VDBE containing code to translate */
- int iStart, /* Translate from this opcode to the end */
- int iTabCur, /* OP_Column/OP_Rowid references to this table */
- int iRegister, /* The first column is in this register */
- int bIncrRowid /* If non-zero, transform OP_rowid to OP_AddImm(1) */
-){
- VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart);
- int iEnd = sqlite3VdbeCurrentAddr(v);
- for(; iStart<iEnd; iStart++, pOp++){
- if( pOp->p1!=iTabCur ) continue;
- if( pOp->opcode==OP_Column ){
- pOp->opcode = OP_Copy;
- pOp->p1 = pOp->p2 + iRegister;
- pOp->p2 = pOp->p3;
- pOp->p3 = 0;
- }else if( pOp->opcode==OP_Rowid ){
- if( bIncrRowid ){
- /* Increment the value stored in the P2 operand of the OP_Rowid. */
- pOp->opcode = OP_AddImm;
- pOp->p1 = pOp->p2;
- pOp->p2 = 1;
- }else{
- pOp->opcode = OP_Null;
- pOp->p1 = 0;
- pOp->p3 = 0;
- }
- }
- }
+ LogEst x = sqlite3LogEst(N);
+ return x>33 ? x - 33 : 0;
}
/*
@@ -121956,12 +109970,11 @@ static int termCanDriveIndex(
){
char aff;
if( pTerm->leftCursor!=pSrc->iCursor ) return 0;
- if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0;
+ if( (pTerm->eOperator & WO_EQ)==0 ) return 0;
if( (pTerm->prereqRight & notReady)!=0 ) return 0;
if( pTerm->u.leftColumn<0 ) return 0;
aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity;
if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0;
- testcase( pTerm->pExpr->op==TK_IS );
return 1;
}
#endif
@@ -121998,17 +110011,12 @@ static void constructAutomaticIndex(
Bitmask idxCols; /* Bitmap of columns used for indexing */
Bitmask extraCols; /* Bitmap of additional columns */
u8 sentWarning = 0; /* True if a warnning has been issued */
- Expr *pPartial = 0; /* Partial Index Expression */
- int iContinue = 0; /* Jump here to skip excluded rows */
- struct SrcList_item *pTabItem; /* FROM clause term being indexed */
- int addrCounter; /* Address where integer counter is initialized */
- int regBase; /* Array of registers where record is assembled */
/* Generate code to skip over the creation and initialization of the
** transient index on 2nd and subsequent iterations of the loop. */
v = pParse->pVdbe;
assert( v!=0 );
- addrInit = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ addrInit = sqlite3CodeOnce(pParse);
/* Count the number of columns that will be added to the index
** and used to match WHERE clause constraints */
@@ -122018,17 +110026,6 @@ static void constructAutomaticIndex(
pLoop = pLevel->pWLoop;
idxCols = 0;
for(pTerm=pWC->a; pTerm<pWCEnd; pTerm++){
- Expr *pExpr = pTerm->pExpr;
- assert( !ExprHasProperty(pExpr, EP_FromJoin) /* prereq always non-zero */
- || pExpr->iRightJoinTable!=pSrc->iCursor /* for the right-hand */
- || pLoop->prereq!=0 ); /* table of a LEFT JOIN */
- if( pLoop->prereq==0
- && (pTerm->wtFlags & TERM_VIRTUAL)==0
- && !ExprHasProperty(pExpr, EP_FromJoin)
- && sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor) ){
- pPartial = sqlite3ExprAnd(pParse->db, pPartial,
- sqlite3ExprDup(pParse->db, pExpr, 0));
- }
if( termCanDriveIndex(pTerm, pSrc, notReady) ){
int iCol = pTerm->u.leftColumn;
Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol);
@@ -122041,9 +110038,7 @@ static void constructAutomaticIndex(
sentWarning = 1;
}
if( (idxCols & cMask)==0 ){
- if( whereLoopResize(pParse->db, pLoop, nKeyCol+1) ){
- goto end_auto_index_create;
- }
+ if( whereLoopResize(pParse->db, pLoop, nKeyCol+1) ) return;
pLoop->aLTerm[nKeyCol++] = pTerm;
idxCols |= cMask;
}
@@ -122063,7 +110058,7 @@ static void constructAutomaticIndex(
** if they go out of sync.
*/
extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1));
- mxBitCol = MIN(BMS-1,pTable->nCol);
+ mxBitCol = (pTable->nCol >= BMS-1) ? BMS-1 : pTable->nCol;
testcase( pTable->nCol==BMS-1 );
testcase( pTable->nCol==BMS-2 );
for(i=0; i<mxBitCol; i++){
@@ -122072,10 +110067,11 @@ static void constructAutomaticIndex(
if( pSrc->colUsed & MASKBIT(BMS-1) ){
nKeyCol += pTable->nCol - BMS + 1;
}
+ pLoop->wsFlags |= WHERE_COLUMN_EQ | WHERE_IDX_ONLY;
/* Construct the Index object to describe this index */
pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed);
- if( pIdx==0 ) goto end_auto_index_create;
+ if( pIdx==0 ) return;
pLoop->u.btree.pIndex = pIdx;
pIdx->zName = "auto-index";
pIdx->pTable = pTable;
@@ -122092,7 +110088,7 @@ static void constructAutomaticIndex(
idxCols |= cMask;
pIdx->aiColumn[n] = pTerm->u.leftColumn;
pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight);
- pIdx->azColl[n] = pColl ? pColl->zName : "BINARY";
+ pIdx->azColl[n] = ALWAYS(pColl) ? pColl->zName : "BINARY";
n++;
}
}
@@ -122116,7 +110112,7 @@ static void constructAutomaticIndex(
}
}
assert( n==nKeyCol );
- pIdx->aiColumn[n] = XN_ROWID;
+ pIdx->aiColumn[n] = -1;
pIdx->azColl[n] = "BINARY";
/* Create the automatic index */
@@ -122127,48 +110123,18 @@ static void constructAutomaticIndex(
VdbeComment((v, "for %s", pTable->zName));
/* Fill the automatic index with content */
- sqlite3ExprCachePush(pParse);
- pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom];
- if( pTabItem->fg.viaCoroutine ){
- int regYield = pTabItem->regReturn;
- addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0);
- sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
- addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield);
- VdbeCoverage(v);
- VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName));
- }else{
- addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v);
- }
- if( pPartial ){
- iContinue = sqlite3VdbeMakeLabel(v);
- sqlite3ExprIfFalse(pParse, pPartial, iContinue, SQLITE_JUMPIFNULL);
- pLoop->wsFlags |= WHERE_PARTIALIDX;
- }
+ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur);
regRecord = sqlite3GetTempReg(pParse);
- regBase = sqlite3GenerateIndexKey(
- pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0, 0, 0
- );
+ sqlite3GenerateIndexKey(pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0);
sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord);
sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
- if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue);
- if( pTabItem->fg.viaCoroutine ){
- sqlite3VdbeChangeP2(v, addrCounter, regBase+n);
- translateColumnToCopy(v, addrTop, pLevel->iTabCur, pTabItem->regResult, 1);
- sqlite3VdbeGoto(v, addrTop);
- pTabItem->fg.viaCoroutine = 0;
- }else{
- sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v);
- }
+ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1);
sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX);
sqlite3VdbeJumpHere(v, addrTop);
sqlite3ReleaseTempReg(pParse, regRecord);
- sqlite3ExprCachePop(pParse);
/* Jump here when skipping the initialization */
sqlite3VdbeJumpHere(v, addrInit);
-
-end_auto_index_create:
- sqlite3ExprDelete(pParse->db, pPartial);
}
#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */
@@ -122181,7 +110147,6 @@ end_auto_index_create:
static sqlite3_index_info *allocateIndexInfo(
Parse *pParse,
WhereClause *pWC,
- Bitmask mUnusable, /* Ignore terms with these prereqs */
struct SrcList_item *pSrc,
ExprList *pOrderBy
){
@@ -122198,15 +110163,11 @@ static sqlite3_index_info *allocateIndexInfo(
** to this virtual table */
for(i=nTerm=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
if( pTerm->leftCursor != pSrc->iCursor ) continue;
- if( pTerm->prereqRight & mUnusable ) continue;
assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) );
testcase( pTerm->eOperator & WO_IN );
testcase( pTerm->eOperator & WO_ISNULL );
- testcase( pTerm->eOperator & WO_IS );
- testcase( pTerm->eOperator & WO_ALL );
- if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV|WO_IS))==0 ) continue;
+ if( pTerm->eOperator & (WO_ISNULL) ) continue;
if( pTerm->wtFlags & TERM_VNULL ) continue;
- assert( pTerm->u.leftColumn>=(-1) );
nTerm++;
}
@@ -122254,15 +110215,11 @@ static sqlite3_index_info *allocateIndexInfo(
for(i=j=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
u8 op;
if( pTerm->leftCursor != pSrc->iCursor ) continue;
- if( pTerm->prereqRight & mUnusable ) continue;
assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) );
testcase( pTerm->eOperator & WO_IN );
- testcase( pTerm->eOperator & WO_IS );
testcase( pTerm->eOperator & WO_ISNULL );
- testcase( pTerm->eOperator & WO_ALL );
- if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV|WO_IS))==0 ) continue;
+ if( pTerm->eOperator & (WO_ISNULL) ) continue;
if( pTerm->wtFlags & TERM_VNULL ) continue;
- assert( pTerm->u.leftColumn>=(-1) );
pIdxCons[j].iColumn = pTerm->u.leftColumn;
pIdxCons[j].iTermOffset = i;
op = (u8)pTerm->eOperator & WO_ALL;
@@ -122335,21 +110292,18 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){
}
#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */
+
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
/*
** Estimate the location of a particular key among all keys in an
** index. Store the results in aStat as follows:
**
-** aStat[0] Est. number of rows less than pRec
-** aStat[1] Est. number of rows equal to pRec
+** aStat[0] Est. number of rows less than pVal
+** aStat[1] Est. number of rows equal to pVal
**
-** Return the index of the sample that is the smallest sample that
-** is greater than or equal to pRec. Note that this index is not an index
-** into the aSample[] array - it is an index into a virtual set of samples
-** based on the contents of aSample[] and the number of fields in record
-** pRec.
+** Return SQLITE_OK on success.
*/
-static int whereKeyStats(
+static void whereKeyStats(
Parse *pParse, /* Database connection */
Index *pIdx, /* Index to consider domain of */
UnpackedRecord *pRec, /* Vector of values to consider */
@@ -122358,158 +110312,66 @@ static int whereKeyStats(
){
IndexSample *aSample = pIdx->aSample;
int iCol; /* Index of required stats in anEq[] etc. */
- int i; /* Index of first sample >= pRec */
- int iSample; /* Smallest sample larger than or equal to pRec */
int iMin = 0; /* Smallest sample not yet tested */
+ int i = pIdx->nSample; /* Smallest sample larger than or equal to pRec */
int iTest; /* Next sample to test */
int res; /* Result of comparison operation */
- int nField; /* Number of fields in pRec */
- tRowcnt iLower = 0; /* anLt[] + anEq[] of largest sample pRec is > */
#ifndef SQLITE_DEBUG
UNUSED_PARAMETER( pParse );
#endif
assert( pRec!=0 );
+ iCol = pRec->nField - 1;
assert( pIdx->nSample>0 );
- assert( pRec->nField>0 && pRec->nField<=pIdx->nSampleCol );
-
- /* Do a binary search to find the first sample greater than or equal
- ** to pRec. If pRec contains a single field, the set of samples to search
- ** is simply the aSample[] array. If the samples in aSample[] contain more
- ** than one fields, all fields following the first are ignored.
- **
- ** If pRec contains N fields, where N is more than one, then as well as the
- ** samples in aSample[] (truncated to N fields), the search also has to
- ** consider prefixes of those samples. For example, if the set of samples
- ** in aSample is:
- **
- ** aSample[0] = (a, 5)
- ** aSample[1] = (a, 10)
- ** aSample[2] = (b, 5)
- ** aSample[3] = (c, 100)
- ** aSample[4] = (c, 105)
- **
- ** Then the search space should ideally be the samples above and the
- ** unique prefixes [a], [b] and [c]. But since that is hard to organize,
- ** the code actually searches this set:
- **
- ** 0: (a)
- ** 1: (a, 5)
- ** 2: (a, 10)
- ** 3: (a, 10)
- ** 4: (b)
- ** 5: (b, 5)
- ** 6: (c)
- ** 7: (c, 100)
- ** 8: (c, 105)
- ** 9: (c, 105)
- **
- ** For each sample in the aSample[] array, N samples are present in the
- ** effective sample array. In the above, samples 0 and 1 are based on
- ** sample aSample[0]. Samples 2 and 3 on aSample[1] etc.
- **
- ** Often, sample i of each block of N effective samples has (i+1) fields.
- ** Except, each sample may be extended to ensure that it is greater than or
- ** equal to the previous sample in the array. For example, in the above,
- ** sample 2 is the first sample of a block of N samples, so at first it
- ** appears that it should be 1 field in size. However, that would make it
- ** smaller than sample 1, so the binary search would not work. As a result,
- ** it is extended to two fields. The duplicates that this creates do not
- ** cause any problems.
- */
- nField = pRec->nField;
- iCol = 0;
- iSample = pIdx->nSample * nField;
+ assert( pRec->nField>0 && iCol<pIdx->nSampleCol );
do{
- int iSamp; /* Index in aSample[] of test sample */
- int n; /* Number of fields in test sample */
-
- iTest = (iMin+iSample)/2;
- iSamp = iTest / nField;
- if( iSamp>0 ){
- /* The proposed effective sample is a prefix of sample aSample[iSamp].
- ** Specifically, the shortest prefix of at least (1 + iTest%nField)
- ** fields that is greater than the previous effective sample. */
- for(n=(iTest % nField) + 1; n<nField; n++){
- if( aSample[iSamp-1].anLt[n-1]!=aSample[iSamp].anLt[n-1] ) break;
- }
- }else{
- n = iTest + 1;
- }
-
- pRec->nField = n;
- res = sqlite3VdbeRecordCompare(aSample[iSamp].n, aSample[iSamp].p, pRec);
+ iTest = (iMin+i)/2;
+ res = sqlite3VdbeRecordCompare(aSample[iTest].n, aSample[iTest].p, pRec);
if( res<0 ){
- iLower = aSample[iSamp].anLt[n-1] + aSample[iSamp].anEq[n-1];
iMin = iTest+1;
- }else if( res==0 && n<nField ){
- iLower = aSample[iSamp].anLt[n-1];
- iMin = iTest+1;
- res = -1;
}else{
- iSample = iTest;
- iCol = n-1;
+ i = iTest;
}
- }while( res && iMin<iSample );
- i = iSample / nField;
+ }while( res && iMin<i );
#ifdef SQLITE_DEBUG
/* The following assert statements check that the binary search code
** above found the right answer. This block serves no purpose other
** than to invoke the asserts. */
- if( pParse->db->mallocFailed==0 ){
- if( res==0 ){
- /* If (res==0) is true, then pRec must be equal to sample i. */
- assert( i<pIdx->nSample );
- assert( iCol==nField-1 );
- pRec->nField = nField;
- assert( 0==sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)
- || pParse->db->mallocFailed
- );
- }else{
- /* Unless i==pIdx->nSample, indicating that pRec is larger than
- ** all samples in the aSample[] array, pRec must be smaller than the
- ** (iCol+1) field prefix of sample i. */
- assert( i<=pIdx->nSample && i>=0 );
- pRec->nField = iCol+1;
- assert( i==pIdx->nSample
- || sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)>0
- || pParse->db->mallocFailed );
-
- /* if i==0 and iCol==0, then record pRec is smaller than all samples
- ** in the aSample[] array. Otherwise, if (iCol>0) then pRec must
- ** be greater than or equal to the (iCol) field prefix of sample i.
- ** If (i>0), then pRec must also be greater than sample (i-1). */
- if( iCol>0 ){
- pRec->nField = iCol;
- assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0
- || pParse->db->mallocFailed );
- }
- if( i>0 ){
- pRec->nField = nField;
- assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0
- || pParse->db->mallocFailed );
- }
- }
+ if( res==0 ){
+ /* If (res==0) is true, then sample $i must be equal to pRec */
+ assert( i<pIdx->nSample );
+ assert( 0==sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)
+ || pParse->db->mallocFailed );
+ }else{
+ /* Otherwise, pRec must be smaller than sample $i and larger than
+ ** sample ($i-1). */
+ assert( i==pIdx->nSample
+ || sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)>0
+ || pParse->db->mallocFailed );
+ assert( i==0
+ || sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0
+ || pParse->db->mallocFailed );
}
#endif /* ifdef SQLITE_DEBUG */
+ /* At this point, aSample[i] is the first sample that is greater than
+ ** or equal to pVal. Or if i==pIdx->nSample, then all samples are less
+ ** than pVal. If aSample[i]==pVal, then res==0.
+ */
if( res==0 ){
- /* Record pRec is equal to sample i */
- assert( iCol==nField-1 );
aStat[0] = aSample[i].anLt[iCol];
aStat[1] = aSample[i].anEq[iCol];
}else{
- /* At this point, the (iCol+1) field prefix of aSample[i] is the first
- ** sample that is greater than pRec. Or, if i==pIdx->nSample then pRec
- ** is larger than all samples in the array. */
- tRowcnt iUpper, iGap;
- if( i>=pIdx->nSample ){
- iUpper = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]);
+ tRowcnt iLower, iUpper, iGap;
+ if( i==0 ){
+ iLower = 0;
+ iUpper = aSample[0].anLt[iCol];
}else{
- iUpper = aSample[i].anLt[iCol];
+ iUpper = i>=pIdx->nSample ? pIdx->aiRowEst[0] : aSample[i].anLt[iCol];
+ iLower = aSample[i-1].anEq[iCol] + aSample[i-1].anLt[iCol];
}
-
+ aStat[1] = (pIdx->nKeyCol>iCol ? pIdx->aAvgEq[iCol] : 1);
if( iLower>=iUpper ){
iGap = 0;
}else{
@@ -122521,158 +110383,7 @@ static int whereKeyStats(
iGap = iGap/3;
}
aStat[0] = iLower + iGap;
- aStat[1] = pIdx->aAvgEq[iCol];
}
-
- /* Restore the pRec->nField value before returning. */
- pRec->nField = nField;
- return i;
-}
-#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
-
-/*
-** If it is not NULL, pTerm is a term that provides an upper or lower
-** bound on a range scan. Without considering pTerm, it is estimated
-** that the scan will visit nNew rows. This function returns the number
-** estimated to be visited after taking pTerm into account.
-**
-** If the user explicitly specified a likelihood() value for this term,
-** then the return value is the likelihood multiplied by the number of
-** input rows. Otherwise, this function assumes that an "IS NOT NULL" term
-** has a likelihood of 0.50, and any other term a likelihood of 0.25.
-*/
-static LogEst whereRangeAdjust(WhereTerm *pTerm, LogEst nNew){
- LogEst nRet = nNew;
- if( pTerm ){
- if( pTerm->truthProb<=0 ){
- nRet += pTerm->truthProb;
- }else if( (pTerm->wtFlags & TERM_VNULL)==0 ){
- nRet -= 20; assert( 20==sqlite3LogEst(4) );
- }
- }
- return nRet;
-}
-
-
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
-/*
-** Return the affinity for a single column of an index.
-*/
-static char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCol){
- assert( iCol>=0 && iCol<pIdx->nColumn );
- if( !pIdx->zColAff ){
- if( sqlite3IndexAffinityStr(db, pIdx)==0 ) return SQLITE_AFF_BLOB;
- }
- return pIdx->zColAff[iCol];
-}
-#endif
-
-
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
-/*
-** This function is called to estimate the number of rows visited by a
-** range-scan on a skip-scan index. For example:
-**
-** CREATE INDEX i1 ON t1(a, b, c);
-** SELECT * FROM t1 WHERE a=? AND c BETWEEN ? AND ?;
-**
-** Value pLoop->nOut is currently set to the estimated number of rows
-** visited for scanning (a=? AND b=?). This function reduces that estimate
-** by some factor to account for the (c BETWEEN ? AND ?) expression based
-** on the stat4 data for the index. this scan will be peformed multiple
-** times (once for each (a,b) combination that matches a=?) is dealt with
-** by the caller.
-**
-** It does this by scanning through all stat4 samples, comparing values
-** extracted from pLower and pUpper with the corresponding column in each
-** sample. If L and U are the number of samples found to be less than or
-** equal to the values extracted from pLower and pUpper respectively, and
-** N is the total number of samples, the pLoop->nOut value is adjusted
-** as follows:
-**
-** nOut = nOut * ( min(U - L, 1) / N )
-**
-** If pLower is NULL, or a value cannot be extracted from the term, L is
-** set to zero. If pUpper is NULL, or a value cannot be extracted from it,
-** U is set to N.
-**
-** Normally, this function sets *pbDone to 1 before returning. However,
-** if no value can be extracted from either pLower or pUpper (and so the
-** estimate of the number of rows delivered remains unchanged), *pbDone
-** is left as is.
-**
-** If an error occurs, an SQLite error code is returned. Otherwise,
-** SQLITE_OK.
-*/
-static int whereRangeSkipScanEst(
- Parse *pParse, /* Parsing & code generating context */
- WhereTerm *pLower, /* Lower bound on the range. ex: "x>123" Might be NULL */
- WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */
- WhereLoop *pLoop, /* Update the .nOut value of this loop */
- int *pbDone /* Set to true if at least one expr. value extracted */
-){
- Index *p = pLoop->u.btree.pIndex;
- int nEq = pLoop->u.btree.nEq;
- sqlite3 *db = pParse->db;
- int nLower = -1;
- int nUpper = p->nSample+1;
- int rc = SQLITE_OK;
- u8 aff = sqlite3IndexColumnAffinity(db, p, nEq);
- CollSeq *pColl;
-
- sqlite3_value *p1 = 0; /* Value extracted from pLower */
- sqlite3_value *p2 = 0; /* Value extracted from pUpper */
- sqlite3_value *pVal = 0; /* Value extracted from record */
-
- pColl = sqlite3LocateCollSeq(pParse, p->azColl[nEq]);
- if( pLower ){
- rc = sqlite3Stat4ValueFromExpr(pParse, pLower->pExpr->pRight, aff, &p1);
- nLower = 0;
- }
- if( pUpper && rc==SQLITE_OK ){
- rc = sqlite3Stat4ValueFromExpr(pParse, pUpper->pExpr->pRight, aff, &p2);
- nUpper = p2 ? 0 : p->nSample;
- }
-
- if( p1 || p2 ){
- int i;
- int nDiff;
- for(i=0; rc==SQLITE_OK && i<p->nSample; i++){
- rc = sqlite3Stat4Column(db, p->aSample[i].p, p->aSample[i].n, nEq, &pVal);
- if( rc==SQLITE_OK && p1 ){
- int res = sqlite3MemCompare(p1, pVal, pColl);
- if( res>=0 ) nLower++;
- }
- if( rc==SQLITE_OK && p2 ){
- int res = sqlite3MemCompare(p2, pVal, pColl);
- if( res>=0 ) nUpper++;
- }
- }
- nDiff = (nUpper - nLower);
- if( nDiff<=0 ) nDiff = 1;
-
- /* If there is both an upper and lower bound specified, and the
- ** comparisons indicate that they are close together, use the fallback
- ** method (assume that the scan visits 1/64 of the rows) for estimating
- ** the number of rows visited. Otherwise, estimate the number of rows
- ** using the method described in the header comment for this function. */
- if( nDiff!=1 || pUpper==0 || pLower==0 ){
- int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff));
- pLoop->nOut -= nAdjust;
- *pbDone = 1;
- WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n",
- nLower, nUpper, nAdjust*-1, pLoop->nOut));
- }
-
- }else{
- assert( *pbDone==0 );
- }
-
- sqlite3ValueFree(p1);
- sqlite3ValueFree(p2);
- sqlite3ValueFree(pVal);
-
- return rc;
}
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
@@ -122691,7 +110402,7 @@ static int whereRangeSkipScanEst(
** If either of the upper or lower bound is not present, then NULL is passed in
** place of the corresponding WhereTerm.
**
-** The value in (pBuilder->pNew->u.btree.nEq) is the number of the index
+** The value in (pBuilder->pNew->u.btree.nEq) is the index of the index
** column subject to the range constraint. Or, equivalently, the number of
** equality constraints optimized by the proposed index scan. For example,
** assuming index p is on t1(a, b), and the SQL query is:
@@ -122707,14 +110418,14 @@ static int whereRangeSkipScanEst(
**
** When this function is called, *pnOut is set to the sqlite3LogEst() of the
** number of rows that the index scan is expected to visit without
-** considering the range constraints. If nEq is 0, then *pnOut is the number of
+** considering the range constraints. If nEq is 0, this is the number of
** rows in the index. Assuming no error occurs, *pnOut is adjusted (reduced)
-** to account for the range constraints pLower and pUpper.
+** to account for the range contraints pLower and pUpper.
**
** In the absence of sqlite_stat4 ANALYZE data, or if such data cannot be
-** used, a single range inequality reduces the search space by a factor of 4.
-** and a pair of constraints (x>? AND x<?) reduces the expected number of
-** rows visited by a factor of 64.
+** used, each range inequality reduces the search space by a factor of 4.
+** Hence a pair of constraints (x>? AND x<?) reduces the expected number of
+** rows visited by a factor of 16.
*/
static int whereRangeScanEst(
Parse *pParse, /* Parsing & code generating context */
@@ -122731,144 +110442,116 @@ static int whereRangeScanEst(
Index *p = pLoop->u.btree.pIndex;
int nEq = pLoop->u.btree.nEq;
- if( p->nSample>0 && nEq<p->nSampleCol ){
- if( nEq==pBuilder->nRecValid ){
- UnpackedRecord *pRec = pBuilder->pRec;
- tRowcnt a[2];
- u8 aff;
-
- /* Variable iLower will be set to the estimate of the number of rows in
- ** the index that are less than the lower bound of the range query. The
- ** lower bound being the concatenation of $P and $L, where $P is the
- ** key-prefix formed by the nEq values matched against the nEq left-most
- ** columns of the index, and $L is the value in pLower.
- **
- ** Or, if pLower is NULL or $L cannot be extracted from it (because it
- ** is not a simple variable or literal value), the lower bound of the
- ** range is $P. Due to a quirk in the way whereKeyStats() works, even
- ** if $L is available, whereKeyStats() is called for both ($P) and
- ** ($P:$L) and the larger of the two returned values is used.
- **
- ** Similarly, iUpper is to be set to the estimate of the number of rows
- ** less than the upper bound of the range query. Where the upper bound
- ** is either ($P) or ($P:$U). Again, even if $U is available, both values
- ** of iUpper are requested of whereKeyStats() and the smaller used.
- **
- ** The number of rows between the two bounds is then just iUpper-iLower.
- */
- tRowcnt iLower; /* Rows less than the lower bound */
- tRowcnt iUpper; /* Rows less than the upper bound */
- int iLwrIdx = -2; /* aSample[] for the lower bound */
- int iUprIdx = -1; /* aSample[] for the upper bound */
+ if( p->nSample>0
+ && nEq==pBuilder->nRecValid
+ && nEq<p->nSampleCol
+ && OptimizationEnabled(pParse->db, SQLITE_Stat3)
+ ){
+ UnpackedRecord *pRec = pBuilder->pRec;
+ tRowcnt a[2];
+ u8 aff;
+
+ /* Variable iLower will be set to the estimate of the number of rows in
+ ** the index that are less than the lower bound of the range query. The
+ ** lower bound being the concatenation of $P and $L, where $P is the
+ ** key-prefix formed by the nEq values matched against the nEq left-most
+ ** columns of the index, and $L is the value in pLower.
+ **
+ ** Or, if pLower is NULL or $L cannot be extracted from it (because it
+ ** is not a simple variable or literal value), the lower bound of the
+ ** range is $P. Due to a quirk in the way whereKeyStats() works, even
+ ** if $L is available, whereKeyStats() is called for both ($P) and
+ ** ($P:$L) and the larger of the two returned values used.
+ **
+ ** Similarly, iUpper is to be set to the estimate of the number of rows
+ ** less than the upper bound of the range query. Where the upper bound
+ ** is either ($P) or ($P:$U). Again, even if $U is available, both values
+ ** of iUpper are requested of whereKeyStats() and the smaller used.
+ */
+ tRowcnt iLower;
+ tRowcnt iUpper;
- if( pRec ){
- testcase( pRec->nField!=pBuilder->nRecValid );
- pRec->nField = pBuilder->nRecValid;
- }
- aff = sqlite3IndexColumnAffinity(pParse->db, p, nEq);
- assert( nEq!=p->nKeyCol || aff==SQLITE_AFF_INTEGER );
- /* Determine iLower and iUpper using ($P) only. */
- if( nEq==0 ){
- iLower = 0;
- iUpper = p->nRowEst0;
- }else{
- /* Note: this call could be optimized away - since the same values must
- ** have been requested when testing key $P in whereEqualScanEst(). */
- whereKeyStats(pParse, p, pRec, 0, a);
- iLower = a[0];
- iUpper = a[0] + a[1];
- }
+ if( nEq==p->nKeyCol ){
+ aff = SQLITE_AFF_INTEGER;
+ }else{
+ aff = p->pTable->aCol[p->aiColumn[nEq]].affinity;
+ }
+ /* Determine iLower and iUpper using ($P) only. */
+ if( nEq==0 ){
+ iLower = 0;
+ iUpper = p->aiRowEst[0];
+ }else{
+ /* Note: this call could be optimized away - since the same values must
+ ** have been requested when testing key $P in whereEqualScanEst(). */
+ whereKeyStats(pParse, p, pRec, 0, a);
+ iLower = a[0];
+ iUpper = a[0] + a[1];
+ }
- assert( pLower==0 || (pLower->eOperator & (WO_GT|WO_GE))!=0 );
- assert( pUpper==0 || (pUpper->eOperator & (WO_LT|WO_LE))!=0 );
- assert( p->aSortOrder!=0 );
- if( p->aSortOrder[nEq] ){
- /* The roles of pLower and pUpper are swapped for a DESC index */
- SWAP(WhereTerm*, pLower, pUpper);
+ /* If possible, improve on the iLower estimate using ($P:$L). */
+ if( pLower ){
+ int bOk; /* True if value is extracted from pExpr */
+ Expr *pExpr = pLower->pExpr->pRight;
+ assert( (pLower->eOperator & (WO_GT|WO_GE))!=0 );
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk);
+ if( rc==SQLITE_OK && bOk ){
+ tRowcnt iNew;
+ whereKeyStats(pParse, p, pRec, 0, a);
+ iNew = a[0] + ((pLower->eOperator & WO_GT) ? a[1] : 0);
+ if( iNew>iLower ) iLower = iNew;
+ nOut--;
}
+ }
- /* If possible, improve on the iLower estimate using ($P:$L). */
- if( pLower ){
- int bOk; /* True if value is extracted from pExpr */
- Expr *pExpr = pLower->pExpr->pRight;
- rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk);
- if( rc==SQLITE_OK && bOk ){
- tRowcnt iNew;
- iLwrIdx = whereKeyStats(pParse, p, pRec, 0, a);
- iNew = a[0] + ((pLower->eOperator & (WO_GT|WO_LE)) ? a[1] : 0);
- if( iNew>iLower ) iLower = iNew;
- nOut--;
- pLower = 0;
- }
+ /* If possible, improve on the iUpper estimate using ($P:$U). */
+ if( pUpper ){
+ int bOk; /* True if value is extracted from pExpr */
+ Expr *pExpr = pUpper->pExpr->pRight;
+ assert( (pUpper->eOperator & (WO_LT|WO_LE))!=0 );
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk);
+ if( rc==SQLITE_OK && bOk ){
+ tRowcnt iNew;
+ whereKeyStats(pParse, p, pRec, 1, a);
+ iNew = a[0] + ((pUpper->eOperator & WO_LE) ? a[1] : 0);
+ if( iNew<iUpper ) iUpper = iNew;
+ nOut--;
}
+ }
- /* If possible, improve on the iUpper estimate using ($P:$U). */
- if( pUpper ){
- int bOk; /* True if value is extracted from pExpr */
- Expr *pExpr = pUpper->pExpr->pRight;
- rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk);
- if( rc==SQLITE_OK && bOk ){
- tRowcnt iNew;
- iUprIdx = whereKeyStats(pParse, p, pRec, 1, a);
- iNew = a[0] + ((pUpper->eOperator & (WO_GT|WO_LE)) ? a[1] : 0);
- if( iNew<iUpper ) iUpper = iNew;
- nOut--;
- pUpper = 0;
- }
+ pBuilder->pRec = pRec;
+ if( rc==SQLITE_OK ){
+ if( iUpper>iLower ){
+ nNew = sqlite3LogEst(iUpper - iLower);
+ }else{
+ nNew = 10; assert( 10==sqlite3LogEst(2) );
}
-
- pBuilder->pRec = pRec;
- if( rc==SQLITE_OK ){
- if( iUpper>iLower ){
- nNew = sqlite3LogEst(iUpper - iLower);
- /* TUNING: If both iUpper and iLower are derived from the same
- ** sample, then assume they are 4x more selective. This brings
- ** the estimated selectivity more in line with what it would be
- ** if estimated without the use of STAT3/4 tables. */
- if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) );
- }else{
- nNew = 10; assert( 10==sqlite3LogEst(2) );
- }
- if( nNew<nOut ){
- nOut = nNew;
- }
- WHERETRACE(0x10, ("STAT4 range scan: %u..%u est=%d\n",
- (u32)iLower, (u32)iUpper, nOut));
+ if( nNew<nOut ){
+ nOut = nNew;
}
- }else{
- int bDone = 0;
- rc = whereRangeSkipScanEst(pParse, pLower, pUpper, pLoop, &bDone);
- if( bDone ) return rc;
+ pLoop->nOut = (LogEst)nOut;
+ WHERETRACE(0x10, ("range scan regions: %u..%u est=%d\n",
+ (u32)iLower, (u32)iUpper, nOut));
+ return SQLITE_OK;
}
}
#else
UNUSED_PARAMETER(pParse);
UNUSED_PARAMETER(pBuilder);
- assert( pLower || pUpper );
#endif
- assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 );
- nNew = whereRangeAdjust(pLower, nOut);
- nNew = whereRangeAdjust(pUpper, nNew);
-
- /* TUNING: If there is both an upper and lower limit and neither limit
- ** has an application-defined likelihood(), assume the range is
- ** reduced by an additional 75%. This means that, by default, an open-ended
- ** range query (e.g. col > ?) is assumed to match 1/4 of the rows in the
- ** index. While a closed range (e.g. col BETWEEN ? AND ?) is estimated to
- ** match 1/64 of the index. */
- if( pLower && pLower->truthProb>0 && pUpper && pUpper->truthProb>0 ){
- nNew -= 20;
+ assert( pLower || pUpper );
+ /* TUNING: Each inequality constraint reduces the search space 4-fold.
+ ** A BETWEEN operator, therefore, reduces the search space 16-fold */
+ nNew = nOut;
+ if( pLower && (pLower->wtFlags & TERM_VNULL)==0 ){
+ nNew -= 20; assert( 20==sqlite3LogEst(4) );
+ nOut--;
+ }
+ if( pUpper ){
+ nNew -= 20; assert( 20==sqlite3LogEst(4) );
+ nOut--;
}
-
- nOut -= (pLower!=0) + (pUpper!=0);
if( nNew<10 ) nNew = 10;
if( nNew<nOut ) nOut = nNew;
-#if defined(WHERETRACE_ENABLED)
- if( pLoop->nOut>nOut ){
- WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n",
- pLoop->nOut, nOut));
- }
-#endif
pLoop->nOut = (LogEst)nOut;
return rc;
}
@@ -122906,7 +110589,7 @@ static int whereEqualScanEst(
int bOk;
assert( nEq>=1 );
- assert( nEq<=p->nColumn );
+ assert( nEq<=(p->nKeyCol+1) );
assert( p->aSample!=0 );
assert( p->nSample>0 );
assert( pBuilder->nRecValid<nEq );
@@ -122919,12 +110602,12 @@ static int whereEqualScanEst(
/* This is an optimization only. The call to sqlite3Stat4ProbeSetValue()
** below would return the same value. */
- if( nEq>=p->nColumn ){
+ if( nEq>p->nKeyCol ){
*pnRow = 1;
return SQLITE_OK;
}
- aff = sqlite3IndexColumnAffinity(pParse->db, p, nEq-1);
+ aff = p->pTable->aCol[p->aiColumn[nEq-1]].affinity;
rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq-1, &bOk);
pBuilder->pRec = pRec;
if( rc!=SQLITE_OK ) return rc;
@@ -122963,7 +110646,6 @@ static int whereInScanEst(
tRowcnt *pnRow /* Write the revised row estimate here */
){
Index *p = pBuilder->pNew->u.btree.pIndex;
- i64 nRow0 = sqlite3LogEstToInt(p->aiRowLogEst[0]);
int nRecValid = pBuilder->nRecValid;
int rc = SQLITE_OK; /* Subfunction return code */
tRowcnt nEst; /* Number of rows for a single term */
@@ -122972,44 +110654,1278 @@ static int whereInScanEst(
assert( p->aSample!=0 );
for(i=0; rc==SQLITE_OK && i<pList->nExpr; i++){
- nEst = nRow0;
+ nEst = p->aiRowEst[0];
rc = whereEqualScanEst(pParse, pBuilder, pList->a[i].pExpr, &nEst);
nRowEst += nEst;
pBuilder->nRecValid = nRecValid;
}
if( rc==SQLITE_OK ){
- if( nRowEst > nRow0 ) nRowEst = nRow0;
+ if( nRowEst > p->aiRowEst[0] ) nRowEst = p->aiRowEst[0];
*pnRow = nRowEst;
- WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst));
+ WHERETRACE(0x10,("IN row estimate: est=%g\n", nRowEst));
}
assert( pBuilder->nRecValid==nRecValid );
return rc;
}
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
+/*
+** Disable a term in the WHERE clause. Except, do not disable the term
+** if it controls a LEFT OUTER JOIN and it did not originate in the ON
+** or USING clause of that join.
+**
+** Consider the term t2.z='ok' in the following queries:
+**
+** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok'
+** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok'
+** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok'
+**
+** The t2.z='ok' is disabled in the in (2) because it originates
+** in the ON clause. The term is disabled in (3) because it is not part
+** of a LEFT OUTER JOIN. In (1), the term is not disabled.
+**
+** Disabling a term causes that term to not be tested in the inner loop
+** of the join. Disabling is an optimization. When terms are satisfied
+** by indices, we disable them to prevent redundant tests in the inner
+** loop. We would get the correct results if nothing were ever disabled,
+** but joins might run a little slower. The trick is to disable as much
+** as we can without disabling too much. If we disabled in (1), we'd get
+** the wrong answer. See ticket #813.
+*/
+static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
+ if( pTerm
+ && (pTerm->wtFlags & TERM_CODED)==0
+ && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin))
+ && (pLevel->notReady & pTerm->prereqAll)==0
+ ){
+ pTerm->wtFlags |= TERM_CODED;
+ if( pTerm->iParent>=0 ){
+ WhereTerm *pOther = &pTerm->pWC->a[pTerm->iParent];
+ if( (--pOther->nChild)==0 ){
+ disableTerm(pLevel, pOther);
+ }
+ }
+ }
+}
-#ifdef WHERETRACE_ENABLED
/*
-** Print the content of a WhereTerm object
+** Code an OP_Affinity opcode to apply the column affinity string zAff
+** to the n registers starting at base.
+**
+** As an optimization, SQLITE_AFF_NONE entries (which are no-ops) at the
+** beginning and end of zAff are ignored. If all entries in zAff are
+** SQLITE_AFF_NONE, then no code gets generated.
+**
+** This routine makes its own copy of zAff so that the caller is free
+** to modify zAff after this routine returns.
*/
-static void whereTermPrint(WhereTerm *pTerm, int iTerm){
- if( pTerm==0 ){
- sqlite3DebugPrintf("TERM-%-3d NULL\n", iTerm);
+static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){
+ Vdbe *v = pParse->pVdbe;
+ if( zAff==0 ){
+ assert( pParse->db->mallocFailed );
+ return;
+ }
+ assert( v!=0 );
+
+ /* Adjust base and n to skip over SQLITE_AFF_NONE entries at the beginning
+ ** and end of the affinity string.
+ */
+ while( n>0 && zAff[0]==SQLITE_AFF_NONE ){
+ n--;
+ base++;
+ zAff++;
+ }
+ while( n>1 && zAff[n-1]==SQLITE_AFF_NONE ){
+ n--;
+ }
+
+ /* Code the OP_Affinity opcode if there is anything left to do. */
+ if( n>0 ){
+ sqlite3VdbeAddOp2(v, OP_Affinity, base, n);
+ sqlite3VdbeChangeP4(v, -1, zAff, n);
+ sqlite3ExprCacheAffinityChange(pParse, base, n);
+ }
+}
+
+
+/*
+** Generate code for a single equality term of the WHERE clause. An equality
+** term can be either X=expr or X IN (...). pTerm is the term to be
+** coded.
+**
+** The current value for the constraint is left in register iReg.
+**
+** For a constraint of the form X=expr, the expression is evaluated and its
+** result is left on the stack. For constraints of the form X IN (...)
+** this routine sets up a loop that will iterate over all values of X.
+*/
+static int codeEqualityTerm(
+ Parse *pParse, /* The parsing context */
+ WhereTerm *pTerm, /* The term of the WHERE clause to be coded */
+ WhereLevel *pLevel, /* The level of the FROM clause we are working on */
+ int iEq, /* Index of the equality term within this level */
+ int bRev, /* True for reverse-order IN operations */
+ int iTarget /* Attempt to leave results in this register */
+){
+ Expr *pX = pTerm->pExpr;
+ Vdbe *v = pParse->pVdbe;
+ int iReg; /* Register holding results */
+
+ assert( iTarget>0 );
+ if( pX->op==TK_EQ ){
+ iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget);
+ }else if( pX->op==TK_ISNULL ){
+ iReg = iTarget;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, iReg);
+#ifndef SQLITE_OMIT_SUBQUERY
}else{
- char zType[4];
- memcpy(zType, "...", 4);
- if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V';
- if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E';
- if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L';
- sqlite3DebugPrintf(
- "TERM-%-3d %p %s cursor=%-3d prob=%-3d op=0x%03x wtFlags=0x%04x\n",
- iTerm, pTerm, zType, pTerm->leftCursor, pTerm->truthProb,
- pTerm->eOperator, pTerm->wtFlags);
- sqlite3TreeViewExpr(0, pTerm->pExpr, 0);
+ int eType;
+ int iTab;
+ struct InLoop *pIn;
+ WhereLoop *pLoop = pLevel->pWLoop;
+
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0
+ && pLoop->u.btree.pIndex!=0
+ && pLoop->u.btree.pIndex->aSortOrder[iEq]
+ ){
+ testcase( iEq==0 );
+ testcase( bRev );
+ bRev = !bRev;
+ }
+ assert( pX->op==TK_IN );
+ iReg = iTarget;
+ eType = sqlite3FindInIndex(pParse, pX, 0);
+ if( eType==IN_INDEX_INDEX_DESC ){
+ testcase( bRev );
+ bRev = !bRev;
+ }
+ iTab = pX->iTable;
+ sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0);
+ assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 );
+ pLoop->wsFlags |= WHERE_IN_ABLE;
+ if( pLevel->u.in.nIn==0 ){
+ pLevel->addrNxt = sqlite3VdbeMakeLabel(v);
+ }
+ pLevel->u.in.nIn++;
+ pLevel->u.in.aInLoop =
+ sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop,
+ sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
+ pIn = pLevel->u.in.aInLoop;
+ if( pIn ){
+ pIn += pLevel->u.in.nIn - 1;
+ pIn->iCur = iTab;
+ if( eType==IN_INDEX_ROWID ){
+ pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iReg);
+ }else{
+ pIn->addrInTop = sqlite3VdbeAddOp3(v, OP_Column, iTab, 0, iReg);
+ }
+ pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen;
+ sqlite3VdbeAddOp1(v, OP_IsNull, iReg);
+ }else{
+ pLevel->u.in.nIn = 0;
+ }
+#endif
}
+ disableTerm(pLevel, pTerm);
+ return iReg;
}
+
+/*
+** Generate code that will evaluate all == and IN constraints for an
+** index scan.
+**
+** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c).
+** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10
+** The index has as many as three equality constraints, but in this
+** example, the third "c" value is an inequality. So only two
+** constraints are coded. This routine will generate code to evaluate
+** a==5 and b IN (1,2,3). The current values for a and b will be stored
+** in consecutive registers and the index of the first register is returned.
+**
+** In the example above nEq==2. But this subroutine works for any value
+** of nEq including 0. If nEq==0, this routine is nearly a no-op.
+** The only thing it does is allocate the pLevel->iMem memory cell and
+** compute the affinity string.
+**
+** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints
+** are == or IN and are covered by the nEq. nExtraReg is 1 if there is
+** an inequality constraint (such as the "c>=5 AND c<10" in the example) that
+** occurs after the nEq quality constraints.
+**
+** This routine allocates a range of nEq+nExtraReg memory cells and returns
+** the index of the first memory cell in that range. The code that
+** calls this routine will use that memory range to store keys for
+** start and termination conditions of the loop.
+** key value of the loop. If one or more IN operators appear, then
+** this routine allocates an additional nEq memory cells for internal
+** use.
+**
+** Before returning, *pzAff is set to point to a buffer containing a
+** copy of the column affinity string of the index allocated using
+** sqlite3DbMalloc(). Except, entries in the copy of the string associated
+** with equality constraints that use NONE affinity are set to
+** SQLITE_AFF_NONE. This is to deal with SQL such as the following:
+**
+** CREATE TABLE t1(a TEXT PRIMARY KEY, b);
+** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b;
+**
+** In the example above, the index on t1(a) has TEXT affinity. But since
+** the right hand side of the equality constraint (t2.b) has NONE affinity,
+** no conversion should be attempted before using a t2.b value as part of
+** a key to search the index. Hence the first byte in the returned affinity
+** string in this example would be set to SQLITE_AFF_NONE.
+*/
+static int codeAllEqualityTerms(
+ Parse *pParse, /* Parsing context */
+ WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */
+ int bRev, /* Reverse the order of IN operators */
+ int nExtraReg, /* Number of extra registers to allocate */
+ char **pzAff /* OUT: Set to point to affinity string */
+){
+ u16 nEq; /* The number of == or IN constraints to code */
+ u16 nSkip; /* Number of left-most columns to skip */
+ Vdbe *v = pParse->pVdbe; /* The vm under construction */
+ Index *pIdx; /* The index being used for this loop */
+ WhereTerm *pTerm; /* A single constraint term */
+ WhereLoop *pLoop; /* The WhereLoop object */
+ int j; /* Loop counter */
+ int regBase; /* Base register */
+ int nReg; /* Number of registers to allocate */
+ char *zAff; /* Affinity string to return */
+
+ /* This module is only called on query plans that use an index. */
+ pLoop = pLevel->pWLoop;
+ assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ nEq = pLoop->u.btree.nEq;
+ nSkip = pLoop->u.btree.nSkip;
+ pIdx = pLoop->u.btree.pIndex;
+ assert( pIdx!=0 );
+
+ /* Figure out how many memory cells we will need then allocate them.
+ */
+ regBase = pParse->nMem + 1;
+ nReg = pLoop->u.btree.nEq + nExtraReg;
+ pParse->nMem += nReg;
+
+ zAff = sqlite3DbStrDup(pParse->db, sqlite3IndexAffinityStr(v, pIdx));
+ if( !zAff ){
+ pParse->db->mallocFailed = 1;
+ }
+
+ if( nSkip ){
+ int iIdxCur = pLevel->iIdxCur;
+ sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur);
+ VdbeComment((v, "begin skip-scan on %s", pIdx->zName));
+ j = sqlite3VdbeAddOp0(v, OP_Goto);
+ pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLt:OP_SeekGt),
+ iIdxCur, 0, regBase, nSkip);
+ sqlite3VdbeJumpHere(v, j);
+ for(j=0; j<nSkip; j++){
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, j, regBase+j);
+ assert( pIdx->aiColumn[j]>=0 );
+ VdbeComment((v, "%s", pIdx->pTable->aCol[pIdx->aiColumn[j]].zName));
+ }
+ }
+
+ /* Evaluate the equality constraints
+ */
+ assert( zAff==0 || (int)strlen(zAff)>=nEq );
+ for(j=nSkip; j<nEq; j++){
+ int r1;
+ pTerm = pLoop->aLTerm[j];
+ assert( pTerm!=0 );
+ /* The following testcase is true for indices with redundant columns.
+ ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */
+ testcase( (pTerm->wtFlags & TERM_CODED)!=0 );
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j);
+ if( r1!=regBase+j ){
+ if( nReg==1 ){
+ sqlite3ReleaseTempReg(pParse, regBase);
+ regBase = r1;
+ }else{
+ sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j);
+ }
+ }
+ testcase( pTerm->eOperator & WO_ISNULL );
+ testcase( pTerm->eOperator & WO_IN );
+ if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){
+ Expr *pRight = pTerm->pExpr->pRight;
+ sqlite3ExprCodeIsNullJump(v, pRight, regBase+j, pLevel->addrBrk);
+ if( zAff ){
+ if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_NONE ){
+ zAff[j] = SQLITE_AFF_NONE;
+ }
+ if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){
+ zAff[j] = SQLITE_AFF_NONE;
+ }
+ }
+ }
+ }
+ *pzAff = zAff;
+ return regBase;
+}
+
+#ifndef SQLITE_OMIT_EXPLAIN
+/*
+** This routine is a helper for explainIndexRange() below
+**
+** pStr holds the text of an expression that we are building up one term
+** at a time. This routine adds a new term to the end of the expression.
+** Terms are separated by AND so add the "AND" text for second and subsequent
+** terms only.
+*/
+static void explainAppendTerm(
+ StrAccum *pStr, /* The text expression being built */
+ int iTerm, /* Index of this term. First is zero */
+ const char *zColumn, /* Name of the column */
+ const char *zOp /* Name of the operator */
+){
+ if( iTerm ) sqlite3StrAccumAppend(pStr, " AND ", 5);
+ sqlite3StrAccumAppend(pStr, zColumn, -1);
+ sqlite3StrAccumAppend(pStr, zOp, 1);
+ sqlite3StrAccumAppend(pStr, "?", 1);
+}
+
+/*
+** Argument pLevel describes a strategy for scanning table pTab. This
+** function returns a pointer to a string buffer containing a description
+** of the subset of table rows scanned by the strategy in the form of an
+** SQL expression. Or, if all rows are scanned, NULL is returned.
+**
+** For example, if the query:
+**
+** SELECT * FROM t1 WHERE a=1 AND b>2;
+**
+** is run and there is an index on (a, b), then this function returns a
+** string similar to:
+**
+** "a=? AND b>?"
+**
+** The returned pointer points to memory obtained from sqlite3DbMalloc().
+** It is the responsibility of the caller to free the buffer when it is
+** no longer required.
+*/
+static char *explainIndexRange(sqlite3 *db, WhereLoop *pLoop, Table *pTab){
+ Index *pIndex = pLoop->u.btree.pIndex;
+ u16 nEq = pLoop->u.btree.nEq;
+ u16 nSkip = pLoop->u.btree.nSkip;
+ int i, j;
+ Column *aCol = pTab->aCol;
+ i16 *aiColumn = pIndex->aiColumn;
+ StrAccum txt;
+
+ if( nEq==0 && (pLoop->wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ){
+ return 0;
+ }
+ sqlite3StrAccumInit(&txt, 0, 0, SQLITE_MAX_LENGTH);
+ txt.db = db;
+ sqlite3StrAccumAppend(&txt, " (", 2);
+ for(i=0; i<nEq; i++){
+ char *z = (i==pIndex->nKeyCol ) ? "rowid" : aCol[aiColumn[i]].zName;
+ if( i>=nSkip ){
+ explainAppendTerm(&txt, i, z, "=");
+ }else{
+ if( i ) sqlite3StrAccumAppend(&txt, " AND ", 5);
+ sqlite3StrAccumAppend(&txt, "ANY(", 4);
+ sqlite3StrAccumAppend(&txt, z, -1);
+ sqlite3StrAccumAppend(&txt, ")", 1);
+ }
+ }
+
+ j = i;
+ if( pLoop->wsFlags&WHERE_BTM_LIMIT ){
+ char *z = (j==pIndex->nKeyCol ) ? "rowid" : aCol[aiColumn[j]].zName;
+ explainAppendTerm(&txt, i++, z, ">");
+ }
+ if( pLoop->wsFlags&WHERE_TOP_LIMIT ){
+ char *z = (j==pIndex->nKeyCol ) ? "rowid" : aCol[aiColumn[j]].zName;
+ explainAppendTerm(&txt, i, z, "<");
+ }
+ sqlite3StrAccumAppend(&txt, ")", 1);
+ return sqlite3StrAccumFinish(&txt);
+}
+
+/*
+** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN
+** command. If the query being compiled is an EXPLAIN QUERY PLAN, a single
+** record is added to the output to describe the table scan strategy in
+** pLevel.
+*/
+static void explainOneScan(
+ Parse *pParse, /* Parse context */
+ SrcList *pTabList, /* Table list this loop refers to */
+ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */
+ int iLevel, /* Value for "level" column of output */
+ int iFrom, /* Value for "from" column of output */
+ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */
+){
+#ifndef SQLITE_DEBUG
+ if( pParse->explain==2 )
+#endif
+ {
+ struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom];
+ Vdbe *v = pParse->pVdbe; /* VM being constructed */
+ sqlite3 *db = pParse->db; /* Database handle */
+ char *zMsg; /* Text to add to EQP output */
+ int iId = pParse->iSelectId; /* Select id (left-most output column) */
+ int isSearch; /* True for a SEARCH. False for SCAN. */
+ WhereLoop *pLoop; /* The controlling WhereLoop object */
+ u32 flags; /* Flags that describe this loop */
+
+ pLoop = pLevel->pWLoop;
+ flags = pLoop->wsFlags;
+ if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_ONETABLE_ONLY) ) return;
+
+ isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0
+ || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0))
+ || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX));
+
+ zMsg = sqlite3MPrintf(db, "%s", isSearch?"SEARCH":"SCAN");
+ if( pItem->pSelect ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s SUBQUERY %d", zMsg,pItem->iSelectId);
+ }else{
+ zMsg = sqlite3MAppendf(db, zMsg, "%s TABLE %s", zMsg, pItem->zName);
+ }
+
+ if( pItem->zAlias ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s AS %s", zMsg, pItem->zAlias);
+ }
+ if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0
+ && ALWAYS(pLoop->u.btree.pIndex!=0)
+ ){
+ char *zWhere = explainIndexRange(db, pLoop, pItem->pTab);
+ zMsg = sqlite3MAppendf(db, zMsg,
+ ((flags & WHERE_AUTO_INDEX) ?
+ "%s USING AUTOMATIC %sINDEX%.0s%s" :
+ "%s USING %sINDEX %s%s"),
+ zMsg, ((flags & WHERE_IDX_ONLY) ? "COVERING " : ""),
+ pLoop->u.btree.pIndex->zName, zWhere);
+ sqlite3DbFree(db, zWhere);
+ }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s USING INTEGER PRIMARY KEY", zMsg);
+
+ if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid=?)", zMsg);
+ }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid>? AND rowid<?)", zMsg);
+ }else if( flags&WHERE_BTM_LIMIT ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid>?)", zMsg);
+ }else if( ALWAYS(flags&WHERE_TOP_LIMIT) ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid<?)", zMsg);
+ }
+ }
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ else if( (flags & WHERE_VIRTUALTABLE)!=0 ){
+ zMsg = sqlite3MAppendf(db, zMsg, "%s VIRTUAL TABLE INDEX %d:%s", zMsg,
+ pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr);
+ }
#endif
+ zMsg = sqlite3MAppendf(db, zMsg, "%s", zMsg);
+ sqlite3VdbeAddOp4(v, OP_Explain, iId, iLevel, iFrom, zMsg, P4_DYNAMIC);
+ }
+}
+#else
+# define explainOneScan(u,v,w,x,y,z)
+#endif /* SQLITE_OMIT_EXPLAIN */
+
+
+/*
+** Generate code for the start of the iLevel-th loop in the WHERE clause
+** implementation described by pWInfo.
+*/
+static Bitmask codeOneLoopStart(
+ WhereInfo *pWInfo, /* Complete information about the WHERE clause */
+ int iLevel, /* Which level of pWInfo->a[] should be coded */
+ Bitmask notReady /* Which tables are currently available */
+){
+ int j, k; /* Loop counters */
+ int iCur; /* The VDBE cursor for the table */
+ int addrNxt; /* Where to jump to continue with the next IN case */
+ int omitTable; /* True if we use the index only */
+ int bRev; /* True if we need to scan in reverse order */
+ WhereLevel *pLevel; /* The where level to be coded */
+ WhereLoop *pLoop; /* The WhereLoop object being coded */
+ WhereClause *pWC; /* Decomposition of the entire WHERE clause */
+ WhereTerm *pTerm; /* A WHERE clause term */
+ Parse *pParse; /* Parsing context */
+ sqlite3 *db; /* Database connection */
+ Vdbe *v; /* The prepared stmt under constructions */
+ struct SrcList_item *pTabItem; /* FROM clause term being coded */
+ int addrBrk; /* Jump here to break out of the loop */
+ int addrCont; /* Jump here to continue with next cycle */
+ int iRowidReg = 0; /* Rowid is stored in this register, if not zero */
+ int iReleaseReg = 0; /* Temp register to free before returning */
+
+ pParse = pWInfo->pParse;
+ v = pParse->pVdbe;
+ pWC = &pWInfo->sWC;
+ db = pParse->db;
+ pLevel = &pWInfo->a[iLevel];
+ pLoop = pLevel->pWLoop;
+ pTabItem = &pWInfo->pTabList->a[pLevel->iFrom];
+ iCur = pTabItem->iCursor;
+ pLevel->notReady = notReady & ~getMask(&pWInfo->sMaskSet, iCur);
+ bRev = (pWInfo->revMask>>iLevel)&1;
+ omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0
+ && (pWInfo->wctrlFlags & WHERE_FORCE_TABLE)==0;
+ VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName));
+
+ /* Create labels for the "break" and "continue" instructions
+ ** for the current loop. Jump to addrBrk to break out of a loop.
+ ** Jump to cont to go immediately to the next iteration of the
+ ** loop.
+ **
+ ** When there is an IN operator, we also have a "addrNxt" label that
+ ** means to continue with the next IN value combination. When
+ ** there are no IN operators in the constraints, the "addrNxt" label
+ ** is the same as "addrBrk".
+ */
+ addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(v);
+ addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(v);
+
+ /* If this is the right table of a LEFT OUTER JOIN, allocate and
+ ** initialize a memory cell that records if this table matches any
+ ** row of the left table of the join.
+ */
+ if( pLevel->iFrom>0 && (pTabItem[0].jointype & JT_LEFT)!=0 ){
+ pLevel->iLeftJoin = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin);
+ VdbeComment((v, "init LEFT JOIN no-match flag"));
+ }
+
+ /* Special case of a FROM clause subquery implemented as a co-routine */
+ if( pTabItem->viaCoroutine ){
+ int regYield = pTabItem->regReturn;
+ sqlite3VdbeAddOp2(v, OP_Integer, pTabItem->addrFillSub-1, regYield);
+ pLevel->p2 = sqlite3VdbeAddOp1(v, OP_Yield, regYield);
+ VdbeComment((v, "next row of co-routine %s", pTabItem->pTab->zName));
+ sqlite3VdbeAddOp2(v, OP_If, regYield+1, addrBrk);
+ pLevel->op = OP_Goto;
+ }else
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){
+ /* Case 1: The table is a virtual-table. Use the VFilter and VNext
+ ** to access the data.
+ */
+ int iReg; /* P3 Value for OP_VFilter */
+ int addrNotFound;
+ int nConstraint = pLoop->nLTerm;
+
+ sqlite3ExprCachePush(pParse);
+ iReg = sqlite3GetTempRange(pParse, nConstraint+2);
+ addrNotFound = pLevel->addrBrk;
+ for(j=0; j<nConstraint; j++){
+ int iTarget = iReg+j+2;
+ pTerm = pLoop->aLTerm[j];
+ if( pTerm==0 ) continue;
+ if( pTerm->eOperator & WO_IN ){
+ codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
+ addrNotFound = pLevel->addrNxt;
+ }else{
+ sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget);
+ }
+ }
+ sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg);
+ sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1);
+ sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg,
+ pLoop->u.vtab.idxStr,
+ pLoop->u.vtab.needFree ? P4_MPRINTF : P4_STATIC);
+ pLoop->u.vtab.needFree = 0;
+ for(j=0; j<nConstraint && j<16; j++){
+ if( (pLoop->u.vtab.omitMask>>j)&1 ){
+ disableTerm(pLevel, pLoop->aLTerm[j]);
+ }
+ }
+ pLevel->op = OP_VNext;
+ pLevel->p1 = iCur;
+ pLevel->p2 = sqlite3VdbeCurrentAddr(v);
+ sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2);
+ sqlite3ExprCachePop(pParse, 1);
+ }else
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+ if( (pLoop->wsFlags & WHERE_IPK)!=0
+ && (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0
+ ){
+ /* Case 2: We can directly reference a single row using an
+ ** equality comparison against the ROWID field. Or
+ ** we reference multiple rows using a "rowid IN (...)"
+ ** construct.
+ */
+ assert( pLoop->u.btree.nEq==1 );
+ iReleaseReg = sqlite3GetTempReg(pParse);
+ pTerm = pLoop->aLTerm[0];
+ assert( pTerm!=0 );
+ assert( pTerm->pExpr!=0 );
+ assert( omitTable==0 );
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg);
+ addrNxt = pLevel->addrNxt;
+ sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt);
+ sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addrNxt, iRowidReg);
+ sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1);
+ sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
+ VdbeComment((v, "pk"));
+ pLevel->op = OP_Noop;
+ }else if( (pLoop->wsFlags & WHERE_IPK)!=0
+ && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0
+ ){
+ /* Case 3: We have an inequality comparison against the ROWID field.
+ */
+ int testOp = OP_Noop;
+ int start;
+ int memEndValue = 0;
+ WhereTerm *pStart, *pEnd;
+
+ assert( omitTable==0 );
+ j = 0;
+ pStart = pEnd = 0;
+ if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++];
+ if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++];
+ assert( pStart!=0 || pEnd!=0 );
+ if( bRev ){
+ pTerm = pStart;
+ pStart = pEnd;
+ pEnd = pTerm;
+ }
+ if( pStart ){
+ Expr *pX; /* The expression that defines the start bound */
+ int r1, rTemp; /* Registers for holding the start boundary */
+
+ /* The following constant maps TK_xx codes into corresponding
+ ** seek opcodes. It depends on a particular ordering of TK_xx
+ */
+ const u8 aMoveOp[] = {
+ /* TK_GT */ OP_SeekGt,
+ /* TK_LE */ OP_SeekLe,
+ /* TK_LT */ OP_SeekLt,
+ /* TK_GE */ OP_SeekGe
+ };
+ assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */
+ assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */
+ assert( TK_GE==TK_GT+3 ); /* ... is correcct. */
+
+ assert( (pStart->wtFlags & TERM_VNULL)==0 );
+ testcase( pStart->wtFlags & TERM_VIRTUAL );
+ pX = pStart->pExpr;
+ assert( pX!=0 );
+ testcase( pStart->leftCursor!=iCur ); /* transitive constraints */
+ r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp);
+ sqlite3VdbeAddOp3(v, aMoveOp[pX->op-TK_GT], iCur, addrBrk, r1);
+ VdbeComment((v, "pk"));
+ sqlite3ExprCacheAffinityChange(pParse, r1, 1);
+ sqlite3ReleaseTempReg(pParse, rTemp);
+ disableTerm(pLevel, pStart);
+ }else{
+ sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk);
+ }
+ if( pEnd ){
+ Expr *pX;
+ pX = pEnd->pExpr;
+ assert( pX!=0 );
+ assert( (pEnd->wtFlags & TERM_VNULL)==0 );
+ testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */
+ testcase( pEnd->wtFlags & TERM_VIRTUAL );
+ memEndValue = ++pParse->nMem;
+ sqlite3ExprCode(pParse, pX->pRight, memEndValue);
+ if( pX->op==TK_LT || pX->op==TK_GT ){
+ testOp = bRev ? OP_Le : OP_Ge;
+ }else{
+ testOp = bRev ? OP_Lt : OP_Gt;
+ }
+ disableTerm(pLevel, pEnd);
+ }
+ start = sqlite3VdbeCurrentAddr(v);
+ pLevel->op = bRev ? OP_Prev : OP_Next;
+ pLevel->p1 = iCur;
+ pLevel->p2 = start;
+ assert( pLevel->p5==0 );
+ if( testOp!=OP_Noop ){
+ iRowidReg = iReleaseReg = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg);
+ sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
+ sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg);
+ sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL);
+ }
+ }else if( pLoop->wsFlags & WHERE_INDEXED ){
+ /* Case 4: A scan using an index.
+ **
+ ** The WHERE clause may contain zero or more equality
+ ** terms ("==" or "IN" operators) that refer to the N
+ ** left-most columns of the index. It may also contain
+ ** inequality constraints (>, <, >= or <=) on the indexed
+ ** column that immediately follows the N equalities. Only
+ ** the right-most column can be an inequality - the rest must
+ ** use the "==" and "IN" operators. For example, if the
+ ** index is on (x,y,z), then the following clauses are all
+ ** optimized:
+ **
+ ** x=5
+ ** x=5 AND y=10
+ ** x=5 AND y<10
+ ** x=5 AND y>5 AND y<10
+ ** x=5 AND y=5 AND z<=10
+ **
+ ** The z<10 term of the following cannot be used, only
+ ** the x=5 term:
+ **
+ ** x=5 AND z<10
+ **
+ ** N may be zero if there are inequality constraints.
+ ** If there are no inequality constraints, then N is at
+ ** least one.
+ **
+ ** This case is also used when there are no WHERE clause
+ ** constraints but an index is selected anyway, in order
+ ** to force the output order to conform to an ORDER BY.
+ */
+ static const u8 aStartOp[] = {
+ 0,
+ 0,
+ OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */
+ OP_Last, /* 3: (!start_constraints && startEq && bRev) */
+ OP_SeekGt, /* 4: (start_constraints && !startEq && !bRev) */
+ OP_SeekLt, /* 5: (start_constraints && !startEq && bRev) */
+ OP_SeekGe, /* 6: (start_constraints && startEq && !bRev) */
+ OP_SeekLe /* 7: (start_constraints && startEq && bRev) */
+ };
+ static const u8 aEndOp[] = {
+ OP_Noop, /* 0: (!end_constraints) */
+ OP_IdxGE, /* 1: (end_constraints && !bRev) */
+ OP_IdxLT /* 2: (end_constraints && bRev) */
+ };
+ u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */
+ int isMinQuery = 0; /* If this is an optimized SELECT min(x).. */
+ int regBase; /* Base register holding constraint values */
+ int r1; /* Temp register */
+ WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */
+ WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */
+ int startEq; /* True if range start uses ==, >= or <= */
+ int endEq; /* True if range end uses ==, >= or <= */
+ int start_constraints; /* Start of range is constrained */
+ int nConstraint; /* Number of constraint terms */
+ Index *pIdx; /* The index we will be using */
+ int iIdxCur; /* The VDBE cursor for the index */
+ int nExtraReg = 0; /* Number of extra registers needed */
+ int op; /* Instruction opcode */
+ char *zStartAff; /* Affinity for start of range constraint */
+ char cEndAff = 0; /* Affinity for end of range constraint */
+
+ pIdx = pLoop->u.btree.pIndex;
+ iIdxCur = pLevel->iIdxCur;
+ assert( nEq>=pLoop->u.btree.nSkip );
+
+ /* If this loop satisfies a sort order (pOrderBy) request that
+ ** was passed to this function to implement a "SELECT min(x) ..."
+ ** query, then the caller will only allow the loop to run for
+ ** a single iteration. This means that the first row returned
+ ** should not have a NULL value stored in 'x'. If column 'x' is
+ ** the first one after the nEq equality constraints in the index,
+ ** this requires some special handling.
+ */
+ if( (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)!=0
+ && (pWInfo->bOBSat!=0)
+ && (pIdx->nKeyCol>nEq)
+ ){
+ assert( pLoop->u.btree.nSkip==0 );
+ isMinQuery = 1;
+ nExtraReg = 1;
+ }
+
+ /* Find any inequality constraint terms for the start and end
+ ** of the range.
+ */
+ j = nEq;
+ if( pLoop->wsFlags & WHERE_BTM_LIMIT ){
+ pRangeStart = pLoop->aLTerm[j++];
+ nExtraReg = 1;
+ }
+ if( pLoop->wsFlags & WHERE_TOP_LIMIT ){
+ pRangeEnd = pLoop->aLTerm[j++];
+ nExtraReg = 1;
+ }
+
+ /* Generate code to evaluate all constraint terms using == or IN
+ ** and store the values of those terms in an array of registers
+ ** starting at regBase.
+ */
+ regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff);
+ assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq );
+ if( zStartAff ) cEndAff = zStartAff[nEq];
+ addrNxt = pLevel->addrNxt;
+
+ /* If we are doing a reverse order scan on an ascending index, or
+ ** a forward order scan on a descending index, interchange the
+ ** start and end terms (pRangeStart and pRangeEnd).
+ */
+ if( (nEq<pIdx->nKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC))
+ || (bRev && pIdx->nKeyCol==nEq)
+ ){
+ SWAP(WhereTerm *, pRangeEnd, pRangeStart);
+ }
+
+ testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 );
+ testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 );
+ testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 );
+ testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 );
+ startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE);
+ endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE);
+ start_constraints = pRangeStart || nEq>0;
+
+ /* Seek the index cursor to the start of the range. */
+ nConstraint = nEq;
+ if( pRangeStart ){
+ Expr *pRight = pRangeStart->pExpr->pRight;
+ sqlite3ExprCode(pParse, pRight, regBase+nEq);
+ if( (pRangeStart->wtFlags & TERM_VNULL)==0 ){
+ sqlite3ExprCodeIsNullJump(v, pRight, regBase+nEq, addrNxt);
+ }
+ if( zStartAff ){
+ if( sqlite3CompareAffinity(pRight, zStartAff[nEq])==SQLITE_AFF_NONE){
+ /* Since the comparison is to be performed with no conversions
+ ** applied to the operands, set the affinity to apply to pRight to
+ ** SQLITE_AFF_NONE. */
+ zStartAff[nEq] = SQLITE_AFF_NONE;
+ }
+ if( sqlite3ExprNeedsNoAffinityChange(pRight, zStartAff[nEq]) ){
+ zStartAff[nEq] = SQLITE_AFF_NONE;
+ }
+ }
+ nConstraint++;
+ testcase( pRangeStart->wtFlags & TERM_VIRTUAL );
+ }else if( isMinQuery ){
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
+ nConstraint++;
+ startEq = 0;
+ start_constraints = 1;
+ }
+ codeApplyAffinity(pParse, regBase, nConstraint, zStartAff);
+ op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
+ assert( op!=0 );
+ testcase( op==OP_Rewind );
+ testcase( op==OP_Last );
+ testcase( op==OP_SeekGt );
+ testcase( op==OP_SeekGe );
+ testcase( op==OP_SeekLe );
+ testcase( op==OP_SeekLt );
+ sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
+
+ /* Load the value for the inequality constraint at the end of the
+ ** range (if any).
+ */
+ nConstraint = nEq;
+ if( pRangeEnd ){
+ Expr *pRight = pRangeEnd->pExpr->pRight;
+ sqlite3ExprCacheRemove(pParse, regBase+nEq, 1);
+ sqlite3ExprCode(pParse, pRight, regBase+nEq);
+ if( (pRangeEnd->wtFlags & TERM_VNULL)==0 ){
+ sqlite3ExprCodeIsNullJump(v, pRight, regBase+nEq, addrNxt);
+ }
+ if( sqlite3CompareAffinity(pRight, cEndAff)!=SQLITE_AFF_NONE
+ && !sqlite3ExprNeedsNoAffinityChange(pRight, cEndAff)
+ ){
+ codeApplyAffinity(pParse, regBase+nEq, 1, &cEndAff);
+ }
+ nConstraint++;
+ testcase( pRangeEnd->wtFlags & TERM_VIRTUAL );
+ }
+ sqlite3DbFree(db, zStartAff);
+
+ /* Top of the loop body */
+ pLevel->p2 = sqlite3VdbeCurrentAddr(v);
+
+ /* Check if the index cursor is past the end of the range. */
+ op = aEndOp[(pRangeEnd || nEq) * (1 + bRev)];
+ testcase( op==OP_Noop );
+ testcase( op==OP_IdxGE );
+ testcase( op==OP_IdxLT );
+ if( op!=OP_Noop ){
+ sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
+ sqlite3VdbeChangeP5(v, endEq!=bRev ?1:0);
+ }
+
+ /* If there are inequality constraints, check that the value
+ ** of the table column that the inequality contrains is not NULL.
+ ** If it is, jump to the next iteration of the loop.
+ */
+ r1 = sqlite3GetTempReg(pParse);
+ testcase( pLoop->wsFlags & WHERE_BTM_LIMIT );
+ testcase( pLoop->wsFlags & WHERE_TOP_LIMIT );
+ if( (pLoop->wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0
+ && (j = pIdx->aiColumn[nEq])>=0
+ && pIdx->pTable->aCol[j].notNull==0
+ && (nEq || (pLoop->wsFlags & WHERE_BTM_LIMIT)==0)
+ ){
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, nEq, r1);
+ VdbeComment((v, "%s", pIdx->pTable->aCol[j].zName));
+ sqlite3VdbeAddOp2(v, OP_IsNull, r1, addrCont);
+ }
+ sqlite3ReleaseTempReg(pParse, r1);
+
+ /* Seek the table cursor, if required */
+ disableTerm(pLevel, pRangeStart);
+ disableTerm(pLevel, pRangeEnd);
+ if( omitTable ){
+ /* pIdx is a covering index. No need to access the main table. */
+ }else if( HasRowid(pIdx->pTable) ){
+ iRowidReg = iReleaseReg = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg);
+ sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
+ sqlite3VdbeAddOp2(v, OP_Seek, iCur, iRowidReg); /* Deferred seek */
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable);
+ iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol);
+ for(j=0; j<pPk->nKeyCol; j++){
+ k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j);
+ }
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont,
+ iRowidReg, pPk->nKeyCol);
+ }
+
+ /* Record the instruction used to terminate the loop. Disable
+ ** WHERE clause terms made redundant by the index range scan.
+ */
+ if( pLoop->wsFlags & WHERE_ONEROW ){
+ pLevel->op = OP_Noop;
+ }else if( bRev ){
+ pLevel->op = OP_Prev;
+ }else{
+ pLevel->op = OP_Next;
+ }
+ pLevel->p1 = iIdxCur;
+ if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){
+ pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
+ }else{
+ assert( pLevel->p5==0 );
+ }
+ }else
+
+#ifndef SQLITE_OMIT_OR_OPTIMIZATION
+ if( pLoop->wsFlags & WHERE_MULTI_OR ){
+ /* Case 5: Two or more separately indexed terms connected by OR
+ **
+ ** Example:
+ **
+ ** CREATE TABLE t1(a,b,c,d);
+ ** CREATE INDEX i1 ON t1(a);
+ ** CREATE INDEX i2 ON t1(b);
+ ** CREATE INDEX i3 ON t1(c);
+ **
+ ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13)
+ **
+ ** In the example, there are three indexed terms connected by OR.
+ ** The top of the loop looks like this:
+ **
+ ** Null 1 # Zero the rowset in reg 1
+ **
+ ** Then, for each indexed term, the following. The arguments to
+ ** RowSetTest are such that the rowid of the current row is inserted
+ ** into the RowSet. If it is already present, control skips the
+ ** Gosub opcode and jumps straight to the code generated by WhereEnd().
+ **
+ ** sqlite3WhereBegin(<term>)
+ ** RowSetTest # Insert rowid into rowset
+ ** Gosub 2 A
+ ** sqlite3WhereEnd()
+ **
+ ** Following the above, code to terminate the loop. Label A, the target
+ ** of the Gosub above, jumps to the instruction right after the Goto.
+ **
+ ** Null 1 # Zero the rowset in reg 1
+ ** Goto B # The loop is finished.
+ **
+ ** A: <loop body> # Return data, whatever.
+ **
+ ** Return 2 # Jump back to the Gosub
+ **
+ ** B: <after the loop>
+ **
+ */
+ WhereClause *pOrWc; /* The OR-clause broken out into subterms */
+ SrcList *pOrTab; /* Shortened table list or OR-clause generation */
+ Index *pCov = 0; /* Potential covering index (or NULL) */
+ int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */
+
+ int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */
+ int regRowset = 0; /* Register for RowSet object */
+ int regRowid = 0; /* Register holding rowid */
+ int iLoopBody = sqlite3VdbeMakeLabel(v); /* Start of loop body */
+ int iRetInit; /* Address of regReturn init */
+ int untestedTerms = 0; /* Some terms not completely tested */
+ int ii; /* Loop counter */
+ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */
+
+ pTerm = pLoop->aLTerm[0];
+ assert( pTerm!=0 );
+ assert( pTerm->eOperator & WO_OR );
+ assert( (pTerm->wtFlags & TERM_ORINFO)!=0 );
+ pOrWc = &pTerm->u.pOrInfo->wc;
+ pLevel->op = OP_Return;
+ pLevel->p1 = regReturn;
+
+ /* Set up a new SrcList in pOrTab containing the table being scanned
+ ** by this loop in the a[0] slot and all notReady tables in a[1..] slots.
+ ** This becomes the SrcList in the recursive call to sqlite3WhereBegin().
+ */
+ if( pWInfo->nLevel>1 ){
+ int nNotReady; /* The number of notReady tables */
+ struct SrcList_item *origSrc; /* Original list of tables */
+ nNotReady = pWInfo->nLevel - iLevel - 1;
+ pOrTab = sqlite3StackAllocRaw(db,
+ sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0]));
+ if( pOrTab==0 ) return notReady;
+ pOrTab->nAlloc = (u8)(nNotReady + 1);
+ pOrTab->nSrc = pOrTab->nAlloc;
+ memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem));
+ origSrc = pWInfo->pTabList->a;
+ for(k=1; k<=nNotReady; k++){
+ memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k]));
+ }
+ }else{
+ pOrTab = pWInfo->pTabList;
+ }
+
+ /* Initialize the rowset register to contain NULL. An SQL NULL is
+ ** equivalent to an empty rowset.
+ **
+ ** Also initialize regReturn to contain the address of the instruction
+ ** immediately following the OP_Return at the bottom of the loop. This
+ ** is required in a few obscure LEFT JOIN cases where control jumps
+ ** over the top of the loop into the body of it. In this case the
+ ** correct response for the end-of-loop code (the OP_Return) is to
+ ** fall through to the next instruction, just as an OP_Next does if
+ ** called on an uninitialized cursor.
+ */
+ if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
+ regRowset = ++pParse->nMem;
+ regRowid = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset);
+ }
+ iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn);
+
+ /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y
+ ** Then for every term xN, evaluate as the subexpression: xN AND z
+ ** That way, terms in y that are factored into the disjunction will
+ ** be picked up by the recursive calls to sqlite3WhereBegin() below.
+ **
+ ** Actually, each subexpression is converted to "xN AND w" where w is
+ ** the "interesting" terms of z - terms that did not originate in the
+ ** ON or USING clause of a LEFT JOIN, and terms that are usable as
+ ** indices.
+ **
+ ** This optimization also only applies if the (x1 OR x2 OR ...) term
+ ** is not contained in the ON clause of a LEFT JOIN.
+ ** See ticket http://www.sqlite.org/src/info/f2369304e4
+ */
+ if( pWC->nTerm>1 ){
+ int iTerm;
+ for(iTerm=0; iTerm<pWC->nTerm; iTerm++){
+ Expr *pExpr = pWC->a[iTerm].pExpr;
+ if( &pWC->a[iTerm] == pTerm ) continue;
+ if( ExprHasProperty(pExpr, EP_FromJoin) ) continue;
+ if( pWC->a[iTerm].wtFlags & (TERM_ORINFO) ) continue;
+ if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue;
+ pExpr = sqlite3ExprDup(db, pExpr, 0);
+ pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr);
+ }
+ if( pAndExpr ){
+ pAndExpr = sqlite3PExpr(pParse, TK_AND, 0, pAndExpr, 0);
+ }
+ }
+
+ for(ii=0; ii<pOrWc->nTerm; ii++){
+ WhereTerm *pOrTerm = &pOrWc->a[ii];
+ if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){
+ WhereInfo *pSubWInfo; /* Info for single OR-term scan */
+ Expr *pOrExpr = pOrTerm->pExpr;
+ if( pAndExpr && !ExprHasProperty(pOrExpr, EP_FromJoin) ){
+ pAndExpr->pLeft = pOrExpr;
+ pOrExpr = pAndExpr;
+ }
+ /* Loop through table entries that match term pOrTerm. */
+ pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0,
+ WHERE_OMIT_OPEN_CLOSE | WHERE_AND_ONLY |
+ WHERE_FORCE_TABLE | WHERE_ONETABLE_ONLY, iCovCur);
+ assert( pSubWInfo || pParse->nErr || db->mallocFailed );
+ if( pSubWInfo ){
+ WhereLoop *pSubLoop;
+ explainOneScan(
+ pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0
+ );
+ if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
+ int iSet = ((ii==pOrWc->nTerm-1)?-1:ii);
+ int r;
+ r = sqlite3ExprCodeGetColumn(pParse, pTabItem->pTab, -1, iCur,
+ regRowid, 0);
+ sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset,
+ sqlite3VdbeCurrentAddr(v)+2, r, iSet);
+ }
+ sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody);
+
+ /* The pSubWInfo->untestedTerms flag means that this OR term
+ ** contained one or more AND term from a notReady table. The
+ ** terms from the notReady table could not be tested and will
+ ** need to be tested later.
+ */
+ if( pSubWInfo->untestedTerms ) untestedTerms = 1;
+
+ /* If all of the OR-connected terms are optimized using the same
+ ** index, and the index is opened using the same cursor number
+ ** by each call to sqlite3WhereBegin() made by this loop, it may
+ ** be possible to use that index as a covering index.
+ **
+ ** If the call to sqlite3WhereBegin() above resulted in a scan that
+ ** uses an index, and this is either the first OR-connected term
+ ** processed or the index is the same as that used by all previous
+ ** terms, set pCov to the candidate covering index. Otherwise, set
+ ** pCov to NULL to indicate that no candidate covering index will
+ ** be available.
+ */
+ pSubLoop = pSubWInfo->a[0].pWLoop;
+ assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 );
+ if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0
+ && (ii==0 || pSubLoop->u.btree.pIndex==pCov)
+ ){
+ assert( pSubWInfo->a[0].iIdxCur==iCovCur );
+ pCov = pSubLoop->u.btree.pIndex;
+ }else{
+ pCov = 0;
+ }
+
+ /* Finish the loop through table entries that match term pOrTerm. */
+ sqlite3WhereEnd(pSubWInfo);
+ }
+ }
+ }
+ pLevel->u.pCovidx = pCov;
+ if( pCov ) pLevel->iIdxCur = iCovCur;
+ if( pAndExpr ){
+ pAndExpr->pLeft = 0;
+ sqlite3ExprDelete(db, pAndExpr);
+ }
+ sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v));
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrBrk);
+ sqlite3VdbeResolveLabel(v, iLoopBody);
+
+ if( pWInfo->nLevel>1 ) sqlite3StackFree(db, pOrTab);
+ if( !untestedTerms ) disableTerm(pLevel, pTerm);
+ }else
+#endif /* SQLITE_OMIT_OR_OPTIMIZATION */
+
+ {
+ /* Case 6: There is no usable index. We must do a complete
+ ** scan of the entire table.
+ */
+ static const u8 aStep[] = { OP_Next, OP_Prev };
+ static const u8 aStart[] = { OP_Rewind, OP_Last };
+ assert( bRev==0 || bRev==1 );
+ pLevel->op = aStep[bRev];
+ pLevel->p1 = iCur;
+ pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk);
+ pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
+ }
+
+ /* Insert code to test every subexpression that can be completely
+ ** computed using the current set of tables.
+ */
+ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
+ Expr *pE;
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ testcase( pTerm->wtFlags & TERM_CODED );
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
+ testcase( pWInfo->untestedTerms==0
+ && (pWInfo->wctrlFlags & WHERE_ONETABLE_ONLY)!=0 );
+ pWInfo->untestedTerms = 1;
+ continue;
+ }
+ pE = pTerm->pExpr;
+ assert( pE!=0 );
+ if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){
+ continue;
+ }
+ sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL);
+ pTerm->wtFlags |= TERM_CODED;
+ }
+
+ /* Insert code to test for implied constraints based on transitivity
+ ** of the "==" operator.
+ **
+ ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123"
+ ** and we are coding the t1 loop and the t2 loop has not yet coded,
+ ** then we cannot use the "t1.a=t2.b" constraint, but we can code
+ ** the implied "t1.a=123" constraint.
+ */
+ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
+ Expr *pE, *pEAlt;
+ WhereTerm *pAlt;
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( pTerm->eOperator!=(WO_EQUIV|WO_EQ) ) continue;
+ if( pTerm->leftCursor!=iCur ) continue;
+ if( pLevel->iLeftJoin ) continue;
+ pE = pTerm->pExpr;
+ assert( !ExprHasProperty(pE, EP_FromJoin) );
+ assert( (pTerm->prereqRight & pLevel->notReady)!=0 );
+ pAlt = findTerm(pWC, iCur, pTerm->u.leftColumn, notReady, WO_EQ|WO_IN, 0);
+ if( pAlt==0 ) continue;
+ if( pAlt->wtFlags & (TERM_CODED) ) continue;
+ testcase( pAlt->eOperator & WO_EQ );
+ testcase( pAlt->eOperator & WO_IN );
+ VdbeModuleComment((v, "begin transitive constraint"));
+ pEAlt = sqlite3StackAllocRaw(db, sizeof(*pEAlt));
+ if( pEAlt ){
+ *pEAlt = *pAlt->pExpr;
+ pEAlt->pLeft = pE->pLeft;
+ sqlite3ExprIfFalse(pParse, pEAlt, addrCont, SQLITE_JUMPIFNULL);
+ sqlite3StackFree(db, pEAlt);
+ }
+ }
+
+ /* For a LEFT OUTER JOIN, generate code that will record the fact that
+ ** at least one row of the right table has matched the left table.
+ */
+ if( pLevel->iLeftJoin ){
+ pLevel->addrFirst = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin);
+ VdbeComment((v, "record LEFT JOIN hit"));
+ sqlite3ExprCacheClear(pParse);
+ for(pTerm=pWC->a, j=0; j<pWC->nTerm; j++, pTerm++){
+ testcase( pTerm->wtFlags & TERM_VIRTUAL );
+ testcase( pTerm->wtFlags & TERM_CODED );
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
+ assert( pWInfo->untestedTerms );
+ continue;
+ }
+ assert( pTerm->pExpr );
+ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL);
+ pTerm->wtFlags |= TERM_CODED;
+ }
+ }
+ sqlite3ReleaseTempReg(pParse, iReleaseReg);
+
+ return pLevel->notReady;
+}
+
+#if defined(WHERETRACE_ENABLED) && defined(SQLITE_ENABLE_TREE_EXPLAIN)
+/*
+** Generate "Explanation" text for a WhereTerm.
+*/
+static void whereExplainTerm(Vdbe *v, WhereTerm *pTerm){
+ char zType[4];
+ memcpy(zType, "...", 4);
+ if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V';
+ if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E';
+ if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L';
+ sqlite3ExplainPrintf(v, "%s ", zType);
+ sqlite3ExplainExpr(v, pTerm->pExpr);
+}
+#endif /* WHERETRACE_ENABLED && SQLITE_ENABLE_TREE_EXPLAIN */
+
#ifdef WHERETRACE_ENABLED
/*
@@ -123025,8 +111941,8 @@ static void whereLoopPrint(WhereLoop *p, WhereClause *pWC){
sqlite3DebugPrintf(" %12s",
pItem->zAlias ? pItem->zAlias : pTab->zName);
if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
- const char *zName;
- if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){
+ const char *zName;
+ if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){
if( strncmp(zName, "sqlite_autoindex_", 17)==0 ){
int i = sqlite3Strlen30(zName) - 1;
while( zName[i]!='_' ) i--;
@@ -123047,18 +111963,29 @@ static void whereLoopPrint(WhereLoop *p, WhereClause *pWC){
sqlite3DebugPrintf(" %-19s", z);
sqlite3_free(z);
}
- if( p->wsFlags & WHERE_SKIPSCAN ){
- sqlite3DebugPrintf(" f %05x %d-%d", p->wsFlags, p->nLTerm,p->nSkip);
- }else{
- sqlite3DebugPrintf(" f %05x N %d", p->wsFlags, p->nLTerm);
- }
+ sqlite3DebugPrintf(" f %04x N %d", p->wsFlags, p->nLTerm);
sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut);
- if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){
+#ifdef SQLITE_ENABLE_TREE_EXPLAIN
+ /* If the 0x100 bit of wheretracing is set, then show all of the constraint
+ ** expressions in the WhereLoop.aLTerm[] array.
+ */
+ if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ /* WHERETRACE 0x100 */
int i;
+ Vdbe *v = pWInfo->pParse->pVdbe;
+ sqlite3ExplainBegin(v);
for(i=0; i<p->nLTerm; i++){
- whereTermPrint(p->aLTerm[i], i);
+ WhereTerm *pTerm = p->aLTerm[i];
+ if( pTerm==0 ) continue;
+ sqlite3ExplainPrintf(v, " (%d) #%-2d ", i+1, (int)(pTerm-pWC->a));
+ sqlite3ExplainPush(v);
+ whereExplainTerm(v, pTerm);
+ sqlite3ExplainPop(v);
+ sqlite3ExplainNL(v);
}
+ sqlite3ExplainFinish(v);
+ sqlite3DebugPrintf("%s", sqlite3VdbeExplanation(v));
}
+#endif
}
#endif
@@ -123084,6 +112011,7 @@ static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){
p->u.vtab.idxStr = 0;
}else if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 && p->u.btree.pIndex!=0 ){
sqlite3DbFree(db, p->u.btree.pIndex->zColAff);
+ sqlite3KeyInfoUnref(p->u.btree.pIndex->pKeyInfo);
sqlite3DbFree(db, p->u.btree.pIndex);
p->u.btree.pIndex = 0;
}
@@ -123147,14 +112075,7 @@ static void whereLoopDelete(sqlite3 *db, WhereLoop *p){
*/
static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){
if( ALWAYS(pWInfo) ){
- int i;
- for(i=0; i<pWInfo->nLevel; i++){
- WhereLevel *pLevel = &pWInfo->a[i];
- if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE) ){
- sqlite3DbFree(db, pLevel->u.in.aInLoop);
- }
- }
- sqlite3WhereClauseClear(&pWInfo->sWC);
+ whereClauseClear(&pWInfo->sWC);
while( pWInfo->pLoops ){
WhereLoop *p = pWInfo->pLoops;
pWInfo->pLoops = p->pNextLoop;
@@ -123165,160 +112086,6 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){
}
/*
-** Return TRUE if all of the following are true:
-**
-** (1) X has the same or lower cost that Y
-** (2) X is a proper subset of Y
-** (3) X skips at least as many columns as Y
-**
-** By "proper subset" we mean that X uses fewer WHERE clause terms
-** than Y and that every WHERE clause term used by X is also used
-** by Y.
-**
-** If X is a proper subset of Y then Y is a better choice and ought
-** to have a lower cost. This routine returns TRUE when that cost
-** relationship is inverted and needs to be adjusted. The third rule
-** was added because if X uses skip-scan less than Y it still might
-** deserve a lower cost even if it is a proper subset of Y.
-*/
-static int whereLoopCheaperProperSubset(
- const WhereLoop *pX, /* First WhereLoop to compare */
- const WhereLoop *pY /* Compare against this WhereLoop */
-){
- int i, j;
- if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){
- return 0; /* X is not a subset of Y */
- }
- if( pY->nSkip > pX->nSkip ) return 0;
- if( pX->rRun >= pY->rRun ){
- if( pX->rRun > pY->rRun ) return 0; /* X costs more than Y */
- if( pX->nOut > pY->nOut ) return 0; /* X costs more than Y */
- }
- for(i=pX->nLTerm-1; i>=0; i--){
- if( pX->aLTerm[i]==0 ) continue;
- for(j=pY->nLTerm-1; j>=0; j--){
- if( pY->aLTerm[j]==pX->aLTerm[i] ) break;
- }
- if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */
- }
- return 1; /* All conditions meet */
-}
-
-/*
-** Try to adjust the cost of WhereLoop pTemplate upwards or downwards so
-** that:
-**
-** (1) pTemplate costs less than any other WhereLoops that are a proper
-** subset of pTemplate
-**
-** (2) pTemplate costs more than any other WhereLoops for which pTemplate
-** is a proper subset.
-**
-** To say "WhereLoop X is a proper subset of Y" means that X uses fewer
-** WHERE clause terms than Y and that every WHERE clause term used by X is
-** also used by Y.
-*/
-static void whereLoopAdjustCost(const WhereLoop *p, WhereLoop *pTemplate){
- if( (pTemplate->wsFlags & WHERE_INDEXED)==0 ) return;
- for(; p; p=p->pNextLoop){
- if( p->iTab!=pTemplate->iTab ) continue;
- if( (p->wsFlags & WHERE_INDEXED)==0 ) continue;
- if( whereLoopCheaperProperSubset(p, pTemplate) ){
- /* Adjust pTemplate cost downward so that it is cheaper than its
- ** subset p. */
- WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n",
- pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut-1));
- pTemplate->rRun = p->rRun;
- pTemplate->nOut = p->nOut - 1;
- }else if( whereLoopCheaperProperSubset(pTemplate, p) ){
- /* Adjust pTemplate cost upward so that it is costlier than p since
- ** pTemplate is a proper subset of p */
- WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n",
- pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut+1));
- pTemplate->rRun = p->rRun;
- pTemplate->nOut = p->nOut + 1;
- }
- }
-}
-
-/*
-** Search the list of WhereLoops in *ppPrev looking for one that can be
-** supplanted by pTemplate.
-**
-** Return NULL if the WhereLoop list contains an entry that can supplant
-** pTemplate, in other words if pTemplate does not belong on the list.
-**
-** If pX is a WhereLoop that pTemplate can supplant, then return the
-** link that points to pX.
-**
-** If pTemplate cannot supplant any existing element of the list but needs
-** to be added to the list, then return a pointer to the tail of the list.
-*/
-static WhereLoop **whereLoopFindLesser(
- WhereLoop **ppPrev,
- const WhereLoop *pTemplate
-){
- WhereLoop *p;
- for(p=(*ppPrev); p; ppPrev=&p->pNextLoop, p=*ppPrev){
- if( p->iTab!=pTemplate->iTab || p->iSortIdx!=pTemplate->iSortIdx ){
- /* If either the iTab or iSortIdx values for two WhereLoop are different
- ** then those WhereLoops need to be considered separately. Neither is
- ** a candidate to replace the other. */
- continue;
- }
- /* In the current implementation, the rSetup value is either zero
- ** or the cost of building an automatic index (NlogN) and the NlogN
- ** is the same for compatible WhereLoops. */
- assert( p->rSetup==0 || pTemplate->rSetup==0
- || p->rSetup==pTemplate->rSetup );
-
- /* whereLoopAddBtree() always generates and inserts the automatic index
- ** case first. Hence compatible candidate WhereLoops never have a larger
- ** rSetup. Call this SETUP-INVARIANT */
- assert( p->rSetup>=pTemplate->rSetup );
-
- /* Any loop using an appliation-defined index (or PRIMARY KEY or
- ** UNIQUE constraint) with one or more == constraints is better
- ** than an automatic index. Unless it is a skip-scan. */
- if( (p->wsFlags & WHERE_AUTO_INDEX)!=0
- && (pTemplate->nSkip)==0
- && (pTemplate->wsFlags & WHERE_INDEXED)!=0
- && (pTemplate->wsFlags & WHERE_COLUMN_EQ)!=0
- && (p->prereq & pTemplate->prereq)==pTemplate->prereq
- ){
- break;
- }
-
- /* If existing WhereLoop p is better than pTemplate, pTemplate can be
- ** discarded. WhereLoop p is better if:
- ** (1) p has no more dependencies than pTemplate, and
- ** (2) p has an equal or lower cost than pTemplate
- */
- if( (p->prereq & pTemplate->prereq)==p->prereq /* (1) */
- && p->rSetup<=pTemplate->rSetup /* (2a) */
- && p->rRun<=pTemplate->rRun /* (2b) */
- && p->nOut<=pTemplate->nOut /* (2c) */
- ){
- return 0; /* Discard pTemplate */
- }
-
- /* If pTemplate is always better than p, then cause p to be overwritten
- ** with pTemplate. pTemplate is better than p if:
- ** (1) pTemplate has no more dependences than p, and
- ** (2) pTemplate has an equal or lower cost than p.
- */
- if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */
- && p->rRun>=pTemplate->rRun /* (2a) */
- && p->nOut>=pTemplate->nOut /* (2b) */
- ){
- assert( p->rSetup>=pTemplate->rSetup ); /* SETUP-INVARIANT above */
- break; /* Cause p to be overwritten by pTemplate */
- }
- }
- return ppPrev;
-}
-
-/*
** Insert or replace a WhereLoop entry using the template supplied.
**
** An existing WhereLoop entry might be overwritten if the new template
@@ -123327,23 +112094,25 @@ static WhereLoop **whereLoopFindLesser(
** fewer dependencies than the template. Otherwise a new WhereLoop is
** added based on the template.
**
-** If pBuilder->pOrSet is not NULL then we care about only the
+** If pBuilder->pOrSet is not NULL then we only care about only the
** prerequisites and rRun and nOut costs of the N best loops. That
** information is gathered in the pBuilder->pOrSet object. This special
** processing mode is used only for OR clause processing.
**
** When accumulating multiple loops (when pBuilder->pOrSet is NULL) we
** still might overwrite similar loops with the new template if the
-** new template is better. Loops may be overwritten if the following
+** template is better. Loops may be overwritten if the following
** conditions are met:
**
** (1) They have the same iTab.
** (2) They have the same iSortIdx.
** (3) The template has same or fewer dependencies than the current loop
** (4) The template has the same or lower cost than the current loop
+** (5) The template uses more terms of the same index but has no additional
+** dependencies
*/
static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
- WhereLoop **ppPrev, *p;
+ WhereLoop **ppPrev, *p, *pNext = 0;
WhereInfo *pWInfo = pBuilder->pWInfo;
sqlite3 *db = pWInfo->pParse->db;
@@ -123351,40 +112120,79 @@ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
** and prereqs.
*/
if( pBuilder->pOrSet!=0 ){
- if( pTemplate->nLTerm ){
#if WHERETRACE_ENABLED
- u16 n = pBuilder->pOrSet->n;
- int x =
+ u16 n = pBuilder->pOrSet->n;
+ int x =
#endif
- whereOrInsert(pBuilder->pOrSet, pTemplate->prereq, pTemplate->rRun,
+ whereOrInsert(pBuilder->pOrSet, pTemplate->prereq, pTemplate->rRun,
pTemplate->nOut);
#if WHERETRACE_ENABLED /* 0x8 */
- if( sqlite3WhereTrace & 0x8 ){
- sqlite3DebugPrintf(x?" or-%d: ":" or-X: ", n);
- whereLoopPrint(pTemplate, pBuilder->pWC);
- }
-#endif
+ if( sqlite3WhereTrace & 0x8 ){
+ sqlite3DebugPrintf(x?" or-%d: ":" or-X: ", n);
+ whereLoopPrint(pTemplate, pBuilder->pWC);
}
+#endif
return SQLITE_OK;
}
- /* Look for an existing WhereLoop to replace with pTemplate
+ /* Search for an existing WhereLoop to overwrite, or which takes
+ ** priority over pTemplate.
*/
- whereLoopAdjustCost(pWInfo->pLoops, pTemplate);
- ppPrev = whereLoopFindLesser(&pWInfo->pLoops, pTemplate);
+ for(ppPrev=&pWInfo->pLoops, p=*ppPrev; p; ppPrev=&p->pNextLoop, p=*ppPrev){
+ if( p->iTab!=pTemplate->iTab || p->iSortIdx!=pTemplate->iSortIdx ){
+ /* If either the iTab or iSortIdx values for two WhereLoop are different
+ ** then those WhereLoops need to be considered separately. Neither is
+ ** a candidate to replace the other. */
+ continue;
+ }
+ /* In the current implementation, the rSetup value is either zero
+ ** or the cost of building an automatic index (NlogN) and the NlogN
+ ** is the same for compatible WhereLoops. */
+ assert( p->rSetup==0 || pTemplate->rSetup==0
+ || p->rSetup==pTemplate->rSetup );
- if( ppPrev==0 ){
- /* There already exists a WhereLoop on the list that is better
- ** than pTemplate, so just ignore pTemplate */
-#if WHERETRACE_ENABLED /* 0x8 */
- if( sqlite3WhereTrace & 0x8 ){
- sqlite3DebugPrintf(" skip: ");
- whereLoopPrint(pTemplate, pBuilder->pWC);
+ /* whereLoopAddBtree() always generates and inserts the automatic index
+ ** case first. Hence compatible candidate WhereLoops never have a larger
+ ** rSetup. Call this SETUP-INVARIANT */
+ assert( p->rSetup>=pTemplate->rSetup );
+
+ if( (p->prereq & pTemplate->prereq)==p->prereq
+ && p->rSetup<=pTemplate->rSetup
+ && p->rRun<=pTemplate->rRun
+ && p->nOut<=pTemplate->nOut
+ ){
+ /* This branch taken when p is equal or better than pTemplate in
+ ** all of (1) dependencies (2) setup-cost, (3) run-cost, and
+ ** (4) number of output rows. */
+ assert( p->rSetup==pTemplate->rSetup );
+ if( p->prereq==pTemplate->prereq
+ && p->nLTerm<pTemplate->nLTerm
+ && (p->wsFlags & pTemplate->wsFlags & WHERE_INDEXED)!=0
+ && (p->u.btree.pIndex==pTemplate->u.btree.pIndex
+ || pTemplate->rRun+p->nLTerm<=p->rRun+pTemplate->nLTerm)
+ ){
+ /* Overwrite an existing WhereLoop with an similar one that uses
+ ** more terms of the index */
+ pNext = p->pNextLoop;
+ break;
+ }else{
+ /* pTemplate is not helpful.
+ ** Return without changing or adding anything */
+ goto whereLoopInsert_noop;
+ }
+ }
+ if( (p->prereq & pTemplate->prereq)==pTemplate->prereq
+ && p->rRun>=pTemplate->rRun
+ && p->nOut>=pTemplate->nOut
+ ){
+ /* Overwrite an existing WhereLoop with a better one: one that is
+ ** better at one of (1) dependencies, (2) setup-cost, (3) run-cost
+ ** or (4) number of output rows, and is no worse in any of those
+ ** categories. */
+ assert( p->rSetup>=pTemplate->rSetup ); /* SETUP-INVARIANT above */
+ pNext = p->pNextLoop;
+ break;
}
-#endif
- return SQLITE_OK;
- }else{
- p = *ppPrev;
}
/* If we reach this point it means that either p[] should be overwritten
@@ -123394,41 +112202,21 @@ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
#if WHERETRACE_ENABLED /* 0x8 */
if( sqlite3WhereTrace & 0x8 ){
if( p!=0 ){
- sqlite3DebugPrintf("replace: ");
+ sqlite3DebugPrintf("ins-del: ");
whereLoopPrint(p, pBuilder->pWC);
}
- sqlite3DebugPrintf(" add: ");
+ sqlite3DebugPrintf("ins-new: ");
whereLoopPrint(pTemplate, pBuilder->pWC);
}
#endif
if( p==0 ){
- /* Allocate a new WhereLoop to add to the end of the list */
- *ppPrev = p = sqlite3DbMallocRaw(db, sizeof(WhereLoop));
+ p = sqlite3DbMallocRaw(db, sizeof(WhereLoop));
if( p==0 ) return SQLITE_NOMEM;
whereLoopInit(p);
- p->pNextLoop = 0;
- }else{
- /* We will be overwriting WhereLoop p[]. But before we do, first
- ** go through the rest of the list and delete any other entries besides
- ** p[] that are also supplated by pTemplate */
- WhereLoop **ppTail = &p->pNextLoop;
- WhereLoop *pToDel;
- while( *ppTail ){
- ppTail = whereLoopFindLesser(ppTail, pTemplate);
- if( ppTail==0 ) break;
- pToDel = *ppTail;
- if( pToDel==0 ) break;
- *ppTail = pToDel->pNextLoop;
-#if WHERETRACE_ENABLED /* 0x8 */
- if( sqlite3WhereTrace & 0x8 ){
- sqlite3DebugPrintf(" delete: ");
- whereLoopPrint(pToDel, pBuilder->pWC);
- }
-#endif
- whereLoopDelete(db, pToDel);
- }
}
whereLoopXfer(db, p, pTemplate);
+ p->pNextLoop = pNext;
+ *ppPrev = p;
if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
Index *pIndex = p->u.btree.pIndex;
if( pIndex && pIndex->tnum==0 ){
@@ -123436,48 +112224,35 @@ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
}
}
return SQLITE_OK;
+
+ /* Jump here if the insert is a no-op */
+whereLoopInsert_noop:
+#if WHERETRACE_ENABLED /* 0x8 */
+ if( sqlite3WhereTrace & 0x8 ){
+ sqlite3DebugPrintf("ins-noop: ");
+ whereLoopPrint(pTemplate, pBuilder->pWC);
+ }
+#endif
+ return SQLITE_OK;
}
/*
** Adjust the WhereLoop.nOut value downward to account for terms of the
** WHERE clause that reference the loop but which are not used by an
** index.
-*
-** For every WHERE clause term that is not used by the index
-** and which has a truth probability assigned by one of the likelihood(),
-** likely(), or unlikely() SQL functions, reduce the estimated number
-** of output rows by the probability specified.
-**
-** TUNING: For every WHERE clause term that is not used by the index
-** and which does not have an assigned truth probability, heuristics
-** described below are used to try to estimate the truth probability.
-** TODO --> Perhaps this is something that could be improved by better
-** table statistics.
-**
-** Heuristic 1: Estimate the truth probability as 93.75%. The 93.75%
-** value corresponds to -1 in LogEst notation, so this means decrement
-** the WhereLoop.nOut field for every such WHERE clause term.
-**
-** Heuristic 2: If there exists one or more WHERE clause terms of the
-** form "x==EXPR" and EXPR is not a constant 0 or 1, then make sure the
-** final output row estimate is no greater than 1/4 of the total number
-** of rows in the table. In other words, assume that x==EXPR will filter
-** out at least 3 out of 4 rows. If EXPR is -1 or 0 or 1, then maybe the
-** "x" column is boolean or else -1 or 0 or 1 is a common default value
-** on the "x" column and so in that case only cap the output row estimate
-** at 1/2 instead of 1/4.
-*/
-static void whereLoopOutputAdjust(
- WhereClause *pWC, /* The WHERE clause */
- WhereLoop *pLoop, /* The loop to adjust downward */
- LogEst nRow /* Number of rows in the entire table */
-){
+**
+** In the current implementation, the first extra WHERE clause term reduces
+** the number of output rows by a factor of 10 and each additional term
+** reduces the number of output rows by sqrt(2).
+*/
+static void whereLoopOutputAdjust(WhereClause *pWC, WhereLoop *pLoop){
WhereTerm *pTerm, *pX;
Bitmask notAllowed = ~(pLoop->prereq|pLoop->maskSelf);
- int i, j, k;
- LogEst iReduce = 0; /* pLoop->nOut should not exceed nRow-iReduce */
+ int i, j;
- assert( (pLoop->wsFlags & WHERE_AUTO_INDEX)==0 );
+ if( !OptimizationEnabled(pWC->pWInfo->pParse->db, SQLITE_AdjustOutEst) ){
+ return;
+ }
for(i=pWC->nTerm, pTerm=pWC->a; i>0; i--, pTerm++){
if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) break;
if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue;
@@ -123488,49 +112263,13 @@ static void whereLoopOutputAdjust(
if( pX==pTerm ) break;
if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break;
}
- if( j<0 ){
- if( pTerm->truthProb<=0 ){
- /* If a truth probability is specified using the likelihood() hints,
- ** then use the probability provided by the application. */
- pLoop->nOut += pTerm->truthProb;
- }else{
- /* In the absence of explicit truth probabilities, use heuristics to
- ** guess a reasonable truth probability. */
- pLoop->nOut--;
- if( pTerm->eOperator&(WO_EQ|WO_IS) ){
- Expr *pRight = pTerm->pExpr->pRight;
- testcase( pTerm->pExpr->op==TK_IS );
- if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){
- k = 10;
- }else{
- k = 20;
- }
- if( iReduce<k ) iReduce = k;
- }
- }
- }
+ if( j<0 ) pLoop->nOut += pTerm->truthProb;
}
- if( pLoop->nOut > nRow-iReduce ) pLoop->nOut = nRow - iReduce;
}
/*
-** Adjust the cost C by the costMult facter T. This only occurs if
-** compiled with -DSQLITE_ENABLE_COSTMULT
-*/
-#ifdef SQLITE_ENABLE_COSTMULT
-# define ApplyCostMultiplier(C,T) C += T
-#else
-# define ApplyCostMultiplier(C,T)
-#endif
-
-/*
-** We have so far matched pBuilder->pNew->u.btree.nEq terms of the
-** index pIndex. Try to match one more.
-**
-** When this function is called, pBuilder->pNew->nOut contains the
-** number of rows expected to be visited by filtering using the nEq
-** terms only. If it is modified, this value is restored before this
-** function returns.
+** We have so far matched pBuilder->pNew->u.btree.nEq terms of the index pIndex.
+** Try to match one more.
**
** If pProbe->tnum==0, that means pIndex is a fake index used for the
** INTEGER PRIMARY KEY.
@@ -123551,11 +112290,12 @@ static int whereLoopAddBtreeIndex(
Bitmask saved_prereq; /* Original value of pNew->prereq */
u16 saved_nLTerm; /* Original value of pNew->nLTerm */
u16 saved_nEq; /* Original value of pNew->u.btree.nEq */
- u16 saved_nSkip; /* Original value of pNew->nSkip */
+ u16 saved_nSkip; /* Original value of pNew->u.btree.nSkip */
u32 saved_wsFlags; /* Original value of pNew->wsFlags */
LogEst saved_nOut; /* Original value of pNew->nOut */
+ int iCol; /* Index of the column in the table */
int rc = SQLITE_OK; /* Return code */
- LogEst rSize; /* Number of rows in the table */
+ LogEst nRowEst; /* Estimated index selectivity */
LogEst rLogSize; /* Logarithm of table size */
WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */
@@ -123566,44 +112306,65 @@ static int whereLoopAddBtreeIndex(
assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 );
if( pNew->wsFlags & WHERE_BTM_LIMIT ){
opMask = WO_LT|WO_LE;
- }else if( /*pProbe->tnum<=0 ||*/ (pSrc->fg.jointype & JT_LEFT)!=0 ){
+ }else if( pProbe->tnum<=0 || (pSrc->jointype & JT_LEFT)!=0 ){
opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE;
}else{
- opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS;
+ opMask = WO_EQ|WO_IN|WO_ISNULL|WO_GT|WO_GE|WO_LT|WO_LE;
}
if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
- assert( pNew->u.btree.nEq<pProbe->nColumn );
-
+ assert( pNew->u.btree.nEq<=pProbe->nKeyCol );
+ if( pNew->u.btree.nEq < pProbe->nKeyCol ){
+ iCol = pProbe->aiColumn[pNew->u.btree.nEq];
+ nRowEst = sqlite3LogEst(pProbe->aiRowEst[pNew->u.btree.nEq+1]);
+ if( nRowEst==0 && pProbe->onError==OE_None ) nRowEst = 1;
+ }else{
+ iCol = -1;
+ nRowEst = 0;
+ }
+ pTerm = whereScanInit(&scan, pBuilder->pWC, pSrc->iCursor, iCol,
+ opMask, pProbe);
saved_nEq = pNew->u.btree.nEq;
- saved_nSkip = pNew->nSkip;
+ saved_nSkip = pNew->u.btree.nSkip;
saved_nLTerm = pNew->nLTerm;
saved_wsFlags = pNew->wsFlags;
saved_prereq = pNew->prereq;
saved_nOut = pNew->nOut;
- pTerm = whereScanInit(&scan, pBuilder->pWC, pSrc->iCursor, saved_nEq,
- opMask, pProbe);
pNew->rSetup = 0;
- rSize = pProbe->aiRowLogEst[0];
- rLogSize = estLog(rSize);
+ rLogSize = estLog(sqlite3LogEst(pProbe->aiRowEst[0]));
+
+ /* Consider using a skip-scan if there are no WHERE clause constraints
+ ** available for the left-most terms of the index, and if the average
+ ** number of repeats in the left-most terms is at least 18. The magic
+ ** number 18 was found by experimentation to be the payoff point where
+ ** skip-scan become faster than a full-scan.
+ */
+ if( pTerm==0
+ && saved_nEq==saved_nSkip
+ && saved_nEq+1<pProbe->nKeyCol
+ && pProbe->aiRowEst[saved_nEq+1]>=18 /* TUNING: Minimum for skip-scan */
+ ){
+ LogEst nIter;
+ pNew->u.btree.nEq++;
+ pNew->u.btree.nSkip++;
+ pNew->aLTerm[pNew->nLTerm++] = 0;
+ pNew->wsFlags |= WHERE_SKIPSCAN;
+ nIter = sqlite3LogEst(pProbe->aiRowEst[0]/pProbe->aiRowEst[saved_nEq+1]);
+ whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nIter);
+ }
for(; rc==SQLITE_OK && pTerm!=0; pTerm = whereScanNext(&scan)){
- u16 eOp = pTerm->eOperator; /* Shorthand for pTerm->eOperator */
- LogEst rCostIdx;
- LogEst nOutUnadjusted; /* nOut before IN() and WHERE adjustments */
int nIn = 0;
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
int nRecValid = pBuilder->nRecValid;
#endif
- if( (eOp==WO_ISNULL || (pTerm->wtFlags&TERM_VNULL)!=0)
- && indexColumnNotNull(pProbe, saved_nEq)
+ if( (pTerm->eOperator==WO_ISNULL || (pTerm->wtFlags&TERM_VNULL)!=0)
+ && (iCol<0 || pSrc->pTab->aCol[iCol].notNull)
){
continue; /* ignore IS [NOT] NULL constraints on NOT NULL columns */
}
if( pTerm->prereqRight & pNew->maskSelf ) continue;
- /* Do not allow the upper bound of a LIKE optimization range constraint
- ** to mix with a lower range bound from some other source */
- if( pTerm->wtFlags & TERM_LIKEOPT && pTerm->eOperator==WO_LT ) continue;
+ assert( pNew->nOut==saved_nOut );
pNew->wsFlags = saved_wsFlags;
pNew->u.btree.nEq = saved_nEq;
@@ -123611,14 +112372,8 @@ static int whereLoopAddBtreeIndex(
if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
pNew->aLTerm[pNew->nLTerm++] = pTerm;
pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf;
-
- assert( nInMul==0
- || (pNew->wsFlags & WHERE_COLUMN_NULL)!=0
- || (pNew->wsFlags & WHERE_COLUMN_IN)!=0
- || (pNew->wsFlags & WHERE_SKIPSCAN)!=0
- );
-
- if( eOp & WO_IN ){
+ pNew->rRun = rLogSize; /* Baseline cost is log2(N). Adjustments below */
+ if( pTerm->eOperator & WO_IN ){
Expr *pExpr = pTerm->pExpr;
pNew->wsFlags |= WHERE_COLUMN_IN;
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
@@ -123628,135 +112383,84 @@ static int whereLoopAddBtreeIndex(
/* "x IN (value, value, ...)" */
nIn = sqlite3LogEst(pExpr->x.pList->nExpr);
}
- assert( nIn>0 ); /* RHS always has 2 or more terms... The parser
- ** changes "x IN (?)" into "x=?". */
-
- }else if( eOp & (WO_EQ|WO_IS) ){
- int iCol = pProbe->aiColumn[saved_nEq];
+ pNew->rRun += nIn;
+ pNew->u.btree.nEq++;
+ pNew->nOut = nRowEst + nInMul + nIn;
+ }else if( pTerm->eOperator & (WO_EQ) ){
+ assert(
+ (pNew->wsFlags & (WHERE_COLUMN_NULL|WHERE_COLUMN_IN|WHERE_SKIPSCAN))!=0
+ || nInMul==0
+ );
pNew->wsFlags |= WHERE_COLUMN_EQ;
- assert( saved_nEq==pNew->u.btree.nEq );
- if( iCol==XN_ROWID
- || (iCol>0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1)
+ if( iCol<0
+ || (pProbe->onError!=OE_None && nInMul==0
+ && pNew->u.btree.nEq==pProbe->nKeyCol-1)
){
- if( iCol>=0 && pProbe->uniqNotNull==0 ){
- pNew->wsFlags |= WHERE_UNQ_WANTED;
- }else{
- pNew->wsFlags |= WHERE_ONEROW;
- }
+ assert( (pNew->wsFlags & WHERE_COLUMN_IN)==0 || iCol<0 );
+ pNew->wsFlags |= WHERE_ONEROW;
}
- }else if( eOp & WO_ISNULL ){
+ pNew->u.btree.nEq++;
+ pNew->nOut = nRowEst + nInMul;
+ }else if( pTerm->eOperator & (WO_ISNULL) ){
pNew->wsFlags |= WHERE_COLUMN_NULL;
- }else if( eOp & (WO_GT|WO_GE) ){
- testcase( eOp & WO_GT );
- testcase( eOp & WO_GE );
+ pNew->u.btree.nEq++;
+ /* TUNING: IS NULL selects 2 rows */
+ nIn = 10; assert( 10==sqlite3LogEst(2) );
+ pNew->nOut = nRowEst + nInMul + nIn;
+ }else if( pTerm->eOperator & (WO_GT|WO_GE) ){
+ testcase( pTerm->eOperator & WO_GT );
+ testcase( pTerm->eOperator & WO_GE );
pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT;
pBtm = pTerm;
pTop = 0;
- if( pTerm->wtFlags & TERM_LIKEOPT ){
- /* Range contraints that come from the LIKE optimization are
- ** always used in pairs. */
- pTop = &pTerm[1];
- assert( (pTop-(pTerm->pWC->a))<pTerm->pWC->nTerm );
- assert( pTop->wtFlags & TERM_LIKEOPT );
- assert( pTop->eOperator==WO_LT );
- if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
- pNew->aLTerm[pNew->nLTerm++] = pTop;
- pNew->wsFlags |= WHERE_TOP_LIMIT;
- }
}else{
- assert( eOp & (WO_LT|WO_LE) );
- testcase( eOp & WO_LT );
- testcase( eOp & WO_LE );
+ assert( pTerm->eOperator & (WO_LT|WO_LE) );
+ testcase( pTerm->eOperator & WO_LT );
+ testcase( pTerm->eOperator & WO_LE );
pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT;
pTop = pTerm;
pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ?
pNew->aLTerm[pNew->nLTerm-2] : 0;
}
-
- /* At this point pNew->nOut is set to the number of rows expected to
- ** be visited by the index scan before considering term pTerm, or the
- ** values of nIn and nInMul. In other words, assuming that all
- ** "x IN(...)" terms are replaced with "x = ?". This block updates
- ** the value of pNew->nOut to account for pTerm (but not nIn/nInMul). */
- assert( pNew->nOut==saved_nOut );
if( pNew->wsFlags & WHERE_COLUMN_RANGE ){
- /* Adjust nOut using stat3/stat4 data. Or, if there is no stat3/stat4
- ** data, using some other estimate. */
- whereRangeScanEst(pParse, pBuilder, pBtm, pTop, pNew);
- }else{
- int nEq = ++pNew->u.btree.nEq;
- assert( eOp & (WO_ISNULL|WO_EQ|WO_IN|WO_IS) );
-
+ /* Adjust nOut and rRun for STAT3 range values */
assert( pNew->nOut==saved_nOut );
- if( pTerm->truthProb<=0 && pProbe->aiColumn[saved_nEq]>=0 ){
- assert( (eOp & WO_IN) || nIn==0 );
- testcase( eOp & WO_IN );
- pNew->nOut += pTerm->truthProb;
- pNew->nOut -= nIn;
- }else{
+ whereRangeScanEst(pParse, pBuilder, pBtm, pTop, pNew);
+ }
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- tRowcnt nOut = 0;
- if( nInMul==0
- && pProbe->nSample
- && pNew->u.btree.nEq<=pProbe->nSampleCol
- && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect))
- ){
- Expr *pExpr = pTerm->pExpr;
- if( (eOp & (WO_EQ|WO_ISNULL|WO_IS))!=0 ){
- testcase( eOp & WO_EQ );
- testcase( eOp & WO_IS );
- testcase( eOp & WO_ISNULL );
- rc = whereEqualScanEst(pParse, pBuilder, pExpr->pRight, &nOut);
- }else{
- rc = whereInScanEst(pParse, pBuilder, pExpr->x.pList, &nOut);
- }
- if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK;
- if( rc!=SQLITE_OK ) break; /* Jump out of the pTerm loop */
- if( nOut ){
- pNew->nOut = sqlite3LogEst(nOut);
- if( pNew->nOut>saved_nOut ) pNew->nOut = saved_nOut;
- pNew->nOut -= nIn;
- }
- }
- if( nOut==0 )
-#endif
- {
- pNew->nOut += (pProbe->aiRowLogEst[nEq] - pProbe->aiRowLogEst[nEq-1]);
- if( eOp & WO_ISNULL ){
- /* TUNING: If there is no likelihood() value, assume that a
- ** "col IS NULL" expression matches twice as many rows
- ** as (col=?). */
- pNew->nOut += 10;
- }
- }
+ if( nInMul==0
+ && pProbe->nSample
+ && pNew->u.btree.nEq<=pProbe->nSampleCol
+ && OptimizationEnabled(db, SQLITE_Stat3)
+ ){
+ Expr *pExpr = pTerm->pExpr;
+ tRowcnt nOut = 0;
+ if( (pTerm->eOperator & (WO_EQ|WO_ISNULL))!=0 ){
+ testcase( pTerm->eOperator & WO_EQ );
+ testcase( pTerm->eOperator & WO_ISNULL );
+ rc = whereEqualScanEst(pParse, pBuilder, pExpr->pRight, &nOut);
+ }else if( (pTerm->eOperator & WO_IN)
+ && !ExprHasProperty(pExpr, EP_xIsSelect) ){
+ rc = whereInScanEst(pParse, pBuilder, pExpr->x.pList, &nOut);
+ }
+ assert( nOut==0 || rc==SQLITE_OK );
+ if( nOut ){
+ pNew->nOut = sqlite3LogEst(nOut);
+ if( pNew->nOut>saved_nOut ) pNew->nOut = saved_nOut;
}
}
-
- /* Set rCostIdx to the cost of visiting selected rows in index. Add
- ** it to pNew->rRun, which is currently set to the cost of the index
- ** seek only. Then, if this is a non-covering index, add the cost of
- ** visiting the rows in the main table. */
- rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow;
- pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx);
+#endif
if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){
- pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16);
+ /* Each row involves a step of the index, then a binary search of
+ ** the main table */
+ pNew->rRun = sqlite3LogEstAdd(pNew->rRun,rLogSize>27 ? rLogSize-17 : 10);
}
- ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult);
-
- nOutUnadjusted = pNew->nOut;
- pNew->rRun += nInMul + nIn;
- pNew->nOut += nInMul + nIn;
- whereLoopOutputAdjust(pBuilder->pWC, pNew, rSize);
+ /* Step cost for each output row */
+ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut);
+ whereLoopOutputAdjust(pBuilder->pWC, pNew);
rc = whereLoopInsert(pBuilder, pNew);
-
- if( pNew->wsFlags & WHERE_COLUMN_RANGE ){
- pNew->nOut = saved_nOut;
- }else{
- pNew->nOut = nOutUnadjusted;
- }
-
if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0
- && pNew->u.btree.nEq<pProbe->nColumn
+ && pNew->u.btree.nEq<(pProbe->nKeyCol + (pProbe->zName!=0))
){
whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn);
}
@@ -123767,45 +112471,10 @@ static int whereLoopAddBtreeIndex(
}
pNew->prereq = saved_prereq;
pNew->u.btree.nEq = saved_nEq;
- pNew->nSkip = saved_nSkip;
+ pNew->u.btree.nSkip = saved_nSkip;
pNew->wsFlags = saved_wsFlags;
pNew->nOut = saved_nOut;
pNew->nLTerm = saved_nLTerm;
-
- /* Consider using a skip-scan if there are no WHERE clause constraints
- ** available for the left-most terms of the index, and if the average
- ** number of repeats in the left-most terms is at least 18.
- **
- ** The magic number 18 is selected on the basis that scanning 17 rows
- ** is almost always quicker than an index seek (even though if the index
- ** contains fewer than 2^17 rows we assume otherwise in other parts of
- ** the code). And, even if it is not, it should not be too much slower.
- ** On the other hand, the extra seeks could end up being significantly
- ** more expensive. */
- assert( 42==sqlite3LogEst(18) );
- if( saved_nEq==saved_nSkip
- && saved_nEq+1<pProbe->nKeyCol
- && pProbe->noSkipScan==0
- && pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */
- && (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK
- ){
- LogEst nIter;
- pNew->u.btree.nEq++;
- pNew->nSkip++;
- pNew->aLTerm[pNew->nLTerm++] = 0;
- pNew->wsFlags |= WHERE_SKIPSCAN;
- nIter = pProbe->aiRowLogEst[saved_nEq] - pProbe->aiRowLogEst[saved_nEq+1];
- pNew->nOut -= nIter;
- /* TUNING: Because uncertainties in the estimates for skip-scan queries,
- ** add a 1.375 fudge factor to make skip-scan slightly less likely. */
- nIter += 5;
- whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nIter + nInMul);
- pNew->nOut = saved_nOut;
- pNew->u.btree.nEq = saved_nEq;
- pNew->nSkip = saved_nSkip;
- pNew->wsFlags = saved_wsFlags;
- }
-
return rc;
}
@@ -123823,25 +112492,17 @@ static int indexMightHelpWithOrderBy(
int iCursor
){
ExprList *pOB;
- ExprList *aColExpr;
int ii, jj;
if( pIndex->bUnordered ) return 0;
if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0;
for(ii=0; ii<pOB->nExpr; ii++){
Expr *pExpr = sqlite3ExprSkipCollate(pOB->a[ii].pExpr);
- if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){
- if( pExpr->iColumn<0 ) return 1;
+ if( pExpr->op!=TK_COLUMN ) return 0;
+ if( pExpr->iTable==iCursor ){
for(jj=0; jj<pIndex->nKeyCol; jj++){
if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1;
}
- }else if( (aColExpr = pIndex->aColExpr)!=0 ){
- for(jj=0; jj<pIndex->nKeyCol; jj++){
- if( pIndex->aiColumn[jj]!=XN_EXPR ) continue;
- if( sqlite3ExprCompare(pExpr,aColExpr->a[jj].pExpr,iCursor)==0 ){
- return 1;
- }
- }
}
}
return 0;
@@ -123871,17 +112532,8 @@ static Bitmask columnsInIndex(Index *pIdx){
static int whereUsablePartialIndex(int iTab, WhereClause *pWC, Expr *pWhere){
int i;
WhereTerm *pTerm;
- while( pWhere->op==TK_AND ){
- if( !whereUsablePartialIndex(iTab,pWC,pWhere->pLeft) ) return 0;
- pWhere = pWhere->pRight;
- }
for(i=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
- Expr *pExpr = pTerm->pExpr;
- if( sqlite3ExprImpliesExpr(pExpr, pWhere, iTab)
- && (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->iRightJoinTable==iTab)
- ){
- return 1;
- }
+ if( sqlite3ExprImpliesExpr(pTerm->pExpr, pWhere, iTab) ) return 1;
}
return 0;
}
@@ -123890,37 +112542,6 @@ static int whereUsablePartialIndex(int iTab, WhereClause *pWC, Expr *pWhere){
** Add all WhereLoop objects for a single table of the join where the table
** is idenfied by pBuilder->pNew->iTab. That table is guaranteed to be
** a b-tree table, not a virtual table.
-**
-** The costs (WhereLoop.rRun) of the b-tree loops added by this function
-** are calculated as follows:
-**
-** For a full scan, assuming the table (or index) contains nRow rows:
-**
-** cost = nRow * 3.0 // full-table scan
-** cost = nRow * K // scan of covering index
-** cost = nRow * (K+3.0) // scan of non-covering index
-**
-** where K is a value between 1.1 and 3.0 set based on the relative
-** estimated average size of the index and table records.
-**
-** For an index scan, where nVisit is the number of index rows visited
-** by the scan, and nSeek is the number of seek operations required on
-** the index b-tree:
-**
-** cost = nSeek * (log(nRow) + K * nVisit) // covering index
-** cost = nSeek * (log(nRow) + (K+3.0) * nVisit) // non-covering index
-**
-** Normally, nSeek is 1. nSeek values greater than 1 come about if the
-** WHERE clause includes "x IN (....)" terms used in place of "x=?". Or when
-** implicit "x IN (SELECT x FROM tbl)" terms are added for skip-scans.
-**
-** The estimated values (nRow, nVisit, nSeek) often contain a large amount
-** of uncertainty. For this reason, scoring is designed to pick plans that
-** "do the least harm" if the estimates are inaccurate. For example, a
-** log(nRow) factor is omitted from a non-covering index scan in order to
-** bias the scoring in favor of using an index, since the worst-case
-** performance of using an index is far better than the worst-case performance
-** of a full table scan.
*/
static int whereLoopAddBtree(
WhereLoopBuilder *pBuilder, /* WHERE clause information */
@@ -123929,7 +112550,7 @@ static int whereLoopAddBtree(
WhereInfo *pWInfo; /* WHERE analysis context */
Index *pProbe; /* An index we are evaluating */
Index sPk; /* A fake index object for the primary key */
- LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */
+ tRowcnt aiRowEstPk[2]; /* The aiRowEst[] value for the sPk index */
i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */
SrcList *pTabList; /* The FROM clause */
struct SrcList_item *pSrc; /* The FROM clause btree term to add */
@@ -123950,9 +112571,9 @@ static int whereLoopAddBtree(
pWC = pBuilder->pWC;
assert( !IsVirtual(pSrc->pTab) );
- if( pSrc->pIBIndex ){
+ if( pSrc->pIndex ){
/* An INDEXED BY clause specifies a particular index to use */
- pProbe = pSrc->pIBIndex;
+ pProbe = pSrc->pIndex;
}else if( !HasRowid(pTab) ){
pProbe = pTab->pIndex;
}else{
@@ -123963,35 +112584,32 @@ static int whereLoopAddBtree(
Index *pFirst; /* First of real indices on the table */
memset(&sPk, 0, sizeof(Index));
sPk.nKeyCol = 1;
- sPk.nColumn = 1;
sPk.aiColumn = &aiColumnPk;
- sPk.aiRowLogEst = aiRowEstPk;
+ sPk.aiRowEst = aiRowEstPk;
sPk.onError = OE_Replace;
sPk.pTable = pTab;
- sPk.szIdxRow = pTab->szTabRow;
- aiRowEstPk[0] = pTab->nRowLogEst;
- aiRowEstPk[1] = 0;
+ aiRowEstPk[0] = pTab->nRowEst;
+ aiRowEstPk[1] = 1;
pFirst = pSrc->pTab->pIndex;
- if( pSrc->fg.notIndexed==0 ){
+ if( pSrc->notIndexed==0 ){
/* The real indices of the table are only considered if the
** NOT INDEXED qualifier is omitted from the FROM clause */
sPk.pNext = pFirst;
}
pProbe = &sPk;
}
- rSize = pTab->nRowLogEst;
+ rSize = sqlite3LogEst(pTab->nRowEst);
rLogSize = estLog(rSize);
#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
/* Automatic indexes */
- if( !pBuilder->pOrSet /* Not part of an OR optimization */
- && (pWInfo->wctrlFlags & WHERE_NO_AUTOINDEX)==0
+ if( !pBuilder->pOrSet
&& (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0
- && pSrc->pIBIndex==0 /* Has no INDEXED BY clause */
- && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */
- && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */
- && !pSrc->fg.isCorrelated /* Not a correlated subquery */
- && !pSrc->fg.isRecursive /* Not a recursive common table expression. */
+ && pSrc->pIndex==0
+ && !pSrc->viaCoroutine
+ && !pSrc->notIndexed
+ && HasRowid(pTab)
+ && !pSrc->isCorrelated
){
/* Generate auto-index WhereLoops */
WhereTerm *pTerm;
@@ -124000,26 +112618,17 @@ static int whereLoopAddBtree(
if( pTerm->prereqRight & pNew->maskSelf ) continue;
if( termCanDriveIndex(pTerm, pSrc, 0) ){
pNew->u.btree.nEq = 1;
- pNew->nSkip = 0;
+ pNew->u.btree.nSkip = 0;
pNew->u.btree.pIndex = 0;
pNew->nLTerm = 1;
pNew->aLTerm[0] = pTerm;
/* TUNING: One-time cost for computing the automatic index is
- ** estimated to be X*N*log2(N) where N is the number of rows in
- ** the table being indexed and where X is 7 (LogEst=28) for normal
- ** tables or 1.375 (LogEst=4) for views and subqueries. The value
- ** of X is smaller for views and subqueries so that the query planner
- ** will be more aggressive about generating automatic indexes for
- ** those objects, since there is no opportunity to add schema
- ** indexes on subqueries and views. */
- pNew->rSetup = rLogSize + rSize + 4;
- if( pTab->pSelect==0 && (pTab->tabFlags & TF_Ephemeral)==0 ){
- pNew->rSetup += 24;
- }
- ApplyCostMultiplier(pNew->rSetup, pTab->costMult);
+ ** approximately 7*N*log2(N) where N is the number of rows in
+ ** the table being indexed. */
+ pNew->rSetup = rLogSize + rSize + 28; assert( 28==sqlite3LogEst(7) );
/* TUNING: Each index lookup yields 20 rows in the table. This
** is more than the usual guess of 10 rows, since we have no way
- ** of knowing how selective the index will ultimately be. It would
+ ** of knowning how selective the index will ultimately be. It would
** not be unreasonable to make this value much larger. */
pNew->nOut = 43; assert( 43==sqlite3LogEst(20) );
pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut);
@@ -124035,13 +112644,11 @@ static int whereLoopAddBtree(
*/
for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){
if( pProbe->pPartIdxWhere!=0
- && !whereUsablePartialIndex(pSrc->iCursor, pWC, pProbe->pPartIdxWhere) ){
- testcase( pNew->iTab!=pSrc->iCursor ); /* See ticket [98d973b8f5] */
+ && !whereUsablePartialIndex(pNew->iTab, pWC, pProbe->pPartIdxWhere) ){
continue; /* Partial index inappropriate for this query */
}
- rSize = pProbe->aiRowLogEst[0];
pNew->u.btree.nEq = 0;
- pNew->nSkip = 0;
+ pNew->u.btree.nSkip = 0;
pNew->nLTerm = 0;
pNew->iSortIdx = 0;
pNew->rSetup = 0;
@@ -124057,10 +112664,11 @@ static int whereLoopAddBtree(
/* Full table scan */
pNew->iSortIdx = b ? iSortIdx : 0;
- /* TUNING: Cost of full table scan is (N*3.0). */
- pNew->rRun = rSize + 16;
- ApplyCostMultiplier(pNew->rRun, pTab->costMult);
- whereLoopOutputAdjust(pWC, pNew, rSize);
+ /* TUNING: Cost of full table scan is 3*(N + log2(N)).
+ ** + The extra 3 factor is to encourage the use of indexed lookups
+ ** over full scans. FIXME */
+ pNew->rRun = sqlite3LogEstAdd(rSize,rLogSize) + 16;
+ whereLoopOutputAdjust(pWC, pNew);
rc = whereLoopInsert(pBuilder, pNew);
pNew->nOut = rSize;
if( rc ) break;
@@ -124086,17 +112694,20 @@ static int whereLoopAddBtree(
)
){
pNew->iSortIdx = b ? iSortIdx : 0;
-
- /* The cost of visiting the index rows is N*K, where K is
- ** between 1.1 and 3.0, depending on the relative sizes of the
- ** index and table rows. If this is a non-covering index scan,
- ** also add the cost of visiting table rows (N*3.0). */
- pNew->rRun = rSize + 1 + (15*pProbe->szIdxRow)/pTab->szTabRow;
- if( m!=0 ){
- pNew->rRun = sqlite3LogEstAdd(pNew->rRun, rSize+16);
+ if( m==0 ){
+ /* TUNING: Cost of a covering index scan is K*(N + log2(N)).
+ ** + The extra factor K of between 1.1 and 3.0 that depends
+ ** on the relative sizes of the table and the index. K
+ ** is smaller for smaller indices, thus favoring them.
+ */
+ pNew->rRun = sqlite3LogEstAdd(rSize,rLogSize) + 1 +
+ (15*pProbe->szIdxRow)/pTab->szTabRow;
+ }else{
+ /* TUNING: Cost of scanning a non-covering index is (N+1)*log2(N)
+ ** which we will simplify to just N*log2(N) */
+ pNew->rRun = rSize + rLogSize;
}
- ApplyCostMultiplier(pNew->rRun, pTab->costMult);
- whereLoopOutputAdjust(pWC, pNew, rSize);
+ whereLoopOutputAdjust(pWC, pNew);
rc = whereLoopInsert(pBuilder, pNew);
pNew->nOut = rSize;
if( rc ) break;
@@ -124112,7 +112723,7 @@ static int whereLoopAddBtree(
/* If there was an INDEXED BY clause, then only that one index is
** considered. */
- if( pSrc->pIBIndex ) break;
+ if( pSrc->pIndex ) break;
}
return rc;
}
@@ -124121,32 +112732,10 @@ static int whereLoopAddBtree(
/*
** Add all WhereLoop objects for a table of the join identified by
** pBuilder->pNew->iTab. That table is guaranteed to be a virtual table.
-**
-** If there are no LEFT or CROSS JOIN joins in the query, both mExtra and
-** mUnusable are set to 0. Otherwise, mExtra is a mask of all FROM clause
-** entries that occur before the virtual table in the FROM clause and are
-** separated from it by at least one LEFT or CROSS JOIN. Similarly, the
-** mUnusable mask contains all FROM clause entries that occur after the
-** virtual table and are separated from it by at least one LEFT or
-** CROSS JOIN.
-**
-** For example, if the query were:
-**
-** ... FROM t1, t2 LEFT JOIN t3, t4, vt CROSS JOIN t5, t6;
-**
-** then mExtra corresponds to (t1, t2) and mUnusable to (t5, t6).
-**
-** All the tables in mExtra must be scanned before the current virtual
-** table. So any terms for which all prerequisites are satisfied by
-** mExtra may be specified as "usable" in all calls to xBestIndex.
-** Conversely, all tables in mUnusable must be scanned after the current
-** virtual table, so any terms for which the prerequisites overlap with
-** mUnusable should always be configured as "not-usable" for xBestIndex.
*/
static int whereLoopAddVirtual(
WhereLoopBuilder *pBuilder, /* WHERE clause information */
- Bitmask mExtra, /* Tables that must be scanned before this one */
- Bitmask mUnusable /* Tables that must be scanned after this one */
+ Bitmask mExtra
){
WhereInfo *pWInfo; /* WHERE analysis context */
Parse *pParse; /* The parsing context */
@@ -124167,7 +112756,6 @@ static int whereLoopAddVirtual(
WhereLoop *pNew;
int rc = SQLITE_OK;
- assert( (mExtra & mUnusable)==0 );
pWInfo = pBuilder->pWInfo;
pParse = pWInfo->pParse;
db = pParse->db;
@@ -124176,7 +112764,7 @@ static int whereLoopAddVirtual(
pSrc = &pWInfo->pTabList->a[pNew->iTab];
pTab = pSrc->pTab;
assert( IsVirtual(pTab) );
- pIdxInfo = allocateIndexInfo(pParse, pWC, mUnusable, pSrc,pBuilder->pOrderBy);
+ pIdxInfo = allocateIndexInfo(pParse, pWC, pSrc, pBuilder->pOrderBy);
if( pIdxInfo==0 ) return SQLITE_NOMEM;
pNew->prereq = 0;
pNew->rSetup = 0;
@@ -124206,7 +112794,7 @@ static int whereLoopAddVirtual(
if( (pTerm->eOperator & WO_IN)!=0 ){
seenIn = 1;
}
- if( (pTerm->prereqRight & ~mExtra)!=0 ){
+ if( pTerm->prereqRight!=0 ){
seenVar = 1;
}else if( (pTerm->eOperator & WO_IN)==0 ){
pIdxCons->usable = 1;
@@ -124214,7 +112802,7 @@ static int whereLoopAddVirtual(
break;
case 1: /* Constants with IN operators */
assert( seenIn );
- pIdxCons->usable = (pTerm->prereqRight & ~mExtra)==0;
+ pIdxCons->usable = (pTerm->prereqRight==0);
break;
case 2: /* Variables without IN */
assert( seenVar );
@@ -124234,7 +112822,6 @@ static int whereLoopAddVirtual(
pIdxInfo->orderByConsumed = 0;
pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2;
pIdxInfo->estimatedRows = 25;
- pIdxInfo->idxFlags = 0;
rc = vtabBestIndex(pParse, pTab, pIdxInfo);
if( rc ) goto whereLoopAddVtab_exit;
pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint;
@@ -124280,7 +112867,6 @@ static int whereLoopAddVirtual(
** (2) Multiple outputs from a single IN value will not merge
** together. */
pIdxInfo->orderByConsumed = 0;
- pIdxInfo->idxFlags &= ~SQLITE_INDEX_SCAN_UNIQUE;
}
}
}
@@ -124291,19 +112877,11 @@ static int whereLoopAddVirtual(
pNew->u.vtab.needFree = pIdxInfo->needToFreeIdxStr;
pIdxInfo->needToFreeIdxStr = 0;
pNew->u.vtab.idxStr = pIdxInfo->idxStr;
- pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ?
- pIdxInfo->nOrderBy : 0);
+ pNew->u.vtab.isOrdered = (u8)((pIdxInfo->nOrderBy!=0)
+ && pIdxInfo->orderByConsumed);
pNew->rSetup = 0;
pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost);
pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows);
-
- /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated
- ** that the scan will visit at most one row. Clear it otherwise. */
- if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){
- pNew->wsFlags |= WHERE_ONEROW;
- }else{
- pNew->wsFlags &= ~WHERE_ONEROW;
- }
whereLoopInsert(pBuilder, pNew);
if( pNew->u.vtab.needFree ){
sqlite3_free(pNew->u.vtab.idxStr);
@@ -124323,11 +112901,7 @@ whereLoopAddVtab_exit:
** Add WhereLoop entries to handle OR terms. This works for either
** btrees or virtual tables.
*/
-static int whereLoopAddOr(
- WhereLoopBuilder *pBuilder,
- Bitmask mExtra,
- Bitmask mUnusable
-){
+static int whereLoopAddOr(WhereLoopBuilder *pBuilder, Bitmask mExtra){
WhereInfo *pWInfo = pBuilder->pWInfo;
WhereClause *pWC;
WhereLoop *pNew;
@@ -124336,14 +112910,16 @@ static int whereLoopAddOr(
int iCur;
WhereClause tempWC;
WhereLoopBuilder sSubBuild;
- WhereOrSet sSum, sCur;
+ WhereOrSet sSum, sCur, sPrev;
struct SrcList_item *pItem;
pWC = pBuilder->pWC;
+ if( pWInfo->wctrlFlags & WHERE_AND_ONLY ) return SQLITE_OK;
pWCEnd = pWC->a + pWC->nTerm;
pNew = pBuilder->pNew;
memset(&sSum, 0, sizeof(sSum));
pItem = pWInfo->pTabList->a + pNew->iTab;
+ if( !HasRowid(pItem->pTab) ) return SQLITE_OK;
iCur = pItem->iCursor;
for(pTerm=pWC->a; pTerm<pWCEnd && rc==SQLITE_OK; pTerm++){
@@ -124360,7 +112936,6 @@ static int whereLoopAddOr(
sSubBuild.pOrderBy = 0;
sSubBuild.pOrSet = &sCur;
- WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm));
for(pOrTerm=pOrWC->a; pOrTerm<pOrWCEnd; pOrTerm++){
if( (pOrTerm->eOperator & WO_AND)!=0 ){
sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc;
@@ -124375,26 +112950,14 @@ static int whereLoopAddOr(
continue;
}
sCur.n = 0;
-#ifdef WHERETRACE_ENABLED
- WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n",
- (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm));
- if( sqlite3WhereTrace & 0x400 ){
- for(i=0; i<sSubBuild.pWC->nTerm; i++){
- whereTermPrint(&sSubBuild.pWC->a[i], i);
- }
- }
-#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
if( IsVirtual(pItem->pTab) ){
- rc = whereLoopAddVirtual(&sSubBuild, mExtra, mUnusable);
+ rc = whereLoopAddVirtual(&sSubBuild, mExtra);
}else
#endif
{
rc = whereLoopAddBtree(&sSubBuild, mExtra);
}
- if( rc==SQLITE_OK ){
- rc = whereLoopAddOr(&sSubBuild, mExtra, mUnusable);
- }
assert( rc==SQLITE_OK || sCur.n==0 );
if( sCur.n==0 ){
sSum.n = 0;
@@ -124403,7 +112966,6 @@ static int whereLoopAddOr(
whereOrMove(&sSum, &sCur);
once = 0;
}else{
- WhereOrSet sPrev;
whereOrMove(&sPrev, &sSum);
sSum.n = 0;
for(i=0; i<sPrev.n; i++){
@@ -124422,24 +112984,12 @@ static int whereLoopAddOr(
pNew->iSortIdx = 0;
memset(&pNew->u, 0, sizeof(pNew->u));
for(i=0; rc==SQLITE_OK && i<sSum.n; i++){
- /* TUNING: Currently sSum.a[i].rRun is set to the sum of the costs
- ** of all sub-scans required by the OR-scan. However, due to rounding
- ** errors, it may be that the cost of the OR-scan is equal to its
- ** most expensive sub-scan. Add the smallest possible penalty
- ** (equivalent to multiplying the cost by 1.07) to ensure that
- ** this does not happen. Otherwise, for WHERE clauses such as the
- ** following where there is an index on "y":
- **
- ** WHERE likelihood(x=?, 0.99) OR y=?
- **
- ** the planner may elect to "OR" together a full-table scan and an
- ** index lookup. And other similarly odd results. */
- pNew->rRun = sSum.a[i].rRun + 1;
+ /* TUNING: Multiple by 3.5 for the secondary table lookup */
+ pNew->rRun = sSum.a[i].rRun + 18;
pNew->nOut = sSum.a[i].nOut;
pNew->prereq = sSum.a[i].prereq;
rc = whereLoopInsert(pBuilder, pNew);
}
- WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm));
}
}
return rc;
@@ -124455,43 +113005,33 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
int iTab;
SrcList *pTabList = pWInfo->pTabList;
struct SrcList_item *pItem;
- struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel];
sqlite3 *db = pWInfo->pParse->db;
+ int nTabList = pWInfo->nLevel;
int rc = SQLITE_OK;
+ u8 priorJoinType = 0;
WhereLoop *pNew;
- u8 priorJointype = 0;
/* Loop over the tables in the join, from left to right */
pNew = pBuilder->pNew;
whereLoopInit(pNew);
- for(iTab=0, pItem=pTabList->a; pItem<pEnd; iTab++, pItem++){
- Bitmask mUnusable = 0;
+ for(iTab=0, pItem=pTabList->a; iTab<nTabList; iTab++, pItem++){
pNew->iTab = iTab;
- pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor);
- if( ((pItem->fg.jointype|priorJointype) & (JT_LEFT|JT_CROSS))!=0 ){
- /* This condition is true when pItem is the FROM clause term on the
- ** right-hand-side of a LEFT or CROSS JOIN. */
+ pNew->maskSelf = getMask(&pWInfo->sMaskSet, pItem->iCursor);
+ if( ((pItem->jointype|priorJoinType) & (JT_LEFT|JT_CROSS))!=0 ){
mExtra = mPrior;
}
- priorJointype = pItem->fg.jointype;
+ priorJoinType = pItem->jointype;
if( IsVirtual(pItem->pTab) ){
- struct SrcList_item *p;
- for(p=&pItem[1]; p<pEnd; p++){
- if( mUnusable || (p->fg.jointype & (JT_LEFT|JT_CROSS)) ){
- mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor);
- }
- }
- rc = whereLoopAddVirtual(pBuilder, mExtra, mUnusable);
+ rc = whereLoopAddVirtual(pBuilder, mExtra);
}else{
rc = whereLoopAddBtree(pBuilder, mExtra);
}
if( rc==SQLITE_OK ){
- rc = whereLoopAddOr(pBuilder, mExtra, mUnusable);
+ rc = whereLoopAddOr(pBuilder, mExtra);
}
mPrior |= pNew->maskSelf;
if( rc || db->mallocFailed ) break;
}
-
whereLoopClear(db, pNew);
return rc;
}
@@ -124499,21 +113039,21 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
/*
** Examine a WherePath (with the addition of the extra WhereLoop of the 5th
** parameters) to see if it outputs rows in the requested ORDER BY
-** (or GROUP BY) without requiring a separate sort operation. Return N:
+** (or GROUP BY) without requiring a separate sort operation. Return:
**
-** N>0: N terms of the ORDER BY clause are satisfied
-** N==0: No terms of the ORDER BY clause are satisfied
-** N<0: Unknown yet how many terms of ORDER BY might be satisfied.
+** 0: ORDER BY is not satisfied. Sorting required
+** 1: ORDER BY is satisfied. Omit sorting
+** -1: Unknown at this time
**
** Note that processing for WHERE_GROUPBY and WHERE_DISTINCTBY is not as
** strict. With GROUP BY and DISTINCT the only requirement is that
** equivalent rows appear immediately adjacent to one another. GROUP BY
-** and DISTINCT do not require rows to appear in any particular order as long
-** as equivalent rows are grouped together. Thus for GROUP BY and DISTINCT
+** and DISTINT do not require rows to appear in any particular order as long
+** as equivelent rows are grouped together. Thus for GROUP BY and DISTINCT
** the pOrderBy terms can be matched in any order. With ORDER BY, the
** pOrderBy terms must be matched in strict left-to-right order.
*/
-static i8 wherePathSatisfiesOrderBy(
+static int wherePathSatisfiesOrderBy(
WhereInfo *pWInfo, /* The WHERE clause */
ExprList *pOrderBy, /* ORDER BY or GROUP BY or DISTINCT clause to check */
WherePath *pPath, /* The WherePath to check */
@@ -124569,6 +113109,14 @@ static i8 wherePathSatisfiesOrderBy(
*/
assert( pOrderBy!=0 );
+
+ /* Sortability of virtual tables is determined by the xBestIndex method
+ ** of the virtual table itself */
+ if( pLast->wsFlags & WHERE_VIRTUALTABLE ){
+ testcase( nLoop>0 ); /* True when outer loops are one-row and match
+ ** no ORDER BY terms */
+ return pLast->u.vtab.isOrdered;
+ }
if( nLoop && OptimizationDisabled(db, SQLITE_OrderByIdxJoin) ) return 0;
nOrderBy = pOrderBy->nExpr;
@@ -124581,10 +113129,7 @@ static i8 wherePathSatisfiesOrderBy(
for(iLoop=0; isOrderDistinct && obSat<obDone && iLoop<=nLoop; iLoop++){
if( iLoop>0 ) ready |= pLoop->maskSelf;
pLoop = iLoop<nLoop ? pPath->aLoop[iLoop] : pLast;
- if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){
- if( pLoop->u.vtab.isOrdered ) obSat = obDone;
- break;
- }
+ assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 );
iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor;
/* Mark off any ORDER BY term X that is a column in the table of
@@ -124597,10 +113142,10 @@ static i8 wherePathSatisfiesOrderBy(
pOBExpr = sqlite3ExprSkipCollate(pOrderBy->a[i].pExpr);
if( pOBExpr->op!=TK_COLUMN ) continue;
if( pOBExpr->iTable!=iCur ) continue;
- pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn,
- ~ready, WO_EQ|WO_ISNULL|WO_IS, 0);
+ pTerm = findTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn,
+ ~ready, WO_EQ|WO_ISNULL, 0);
if( pTerm==0 ) continue;
- if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0 && pOBExpr->iColumn>=0 ){
+ if( (pTerm->eOperator&WO_EQ)!=0 && pOBExpr->iColumn>=0 ){
const char *z1, *z2;
pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr);
if( !pColl ) pColl = db->pDfltColl;
@@ -124609,7 +113154,6 @@ static i8 wherePathSatisfiesOrderBy(
if( !pColl ) pColl = db->pDfltColl;
z2 = pColl->zName;
if( sqlite3StrICmp(z1, z2)!=0 ) continue;
- testcase( pTerm->pExpr->op==TK_IS );
}
obSat |= MASKBIT(i);
}
@@ -124625,9 +113169,8 @@ static i8 wherePathSatisfiesOrderBy(
nKeyCol = pIndex->nKeyCol;
nColumn = pIndex->nColumn;
assert( nColumn==nKeyCol+1 || !HasRowid(pIndex->pTable) );
- assert( pIndex->aiColumn[nColumn-1]==XN_ROWID
- || !HasRowid(pIndex->pTable));
- isOrderDistinct = IsUniqueIndex(pIndex);
+ assert( pIndex->aiColumn[nColumn-1]==(-1) || !HasRowid(pIndex->pTable));
+ isOrderDistinct = pIndex->onError!=OE_None;
}
/* Loop through all columns of the index and deal with the ones
@@ -124640,8 +113183,8 @@ static i8 wherePathSatisfiesOrderBy(
/* Skip over == and IS NULL terms */
if( j<pLoop->u.btree.nEq
- && pLoop->nSkip==0
- && ((i = pLoop->aLTerm[j]->eOperator) & (WO_EQ|WO_ISNULL|WO_IS))!=0
+ && pLoop->u.btree.nSkip==0
+ && ((i = pLoop->aLTerm[j]->eOperator) & (WO_EQ|WO_ISNULL))!=0
){
if( i & WO_ISNULL ){
testcase( isOrderDistinct );
@@ -124658,7 +113201,7 @@ static i8 wherePathSatisfiesOrderBy(
revIdx = pIndex->aSortOrder[j];
if( iColumn==pIndex->pTable->iPKey ) iColumn = -1;
}else{
- iColumn = XN_ROWID;
+ iColumn = -1;
revIdx = 0;
}
@@ -124674,7 +113217,7 @@ static i8 wherePathSatisfiesOrderBy(
}
/* Find the ORDER BY term that corresponds to the j-th column
- ** of the index and mark that ORDER BY term off
+ ** of the index and and mark that ORDER BY term off
*/
bOnce = 1;
isMatch = 0;
@@ -124684,15 +113227,9 @@ static i8 wherePathSatisfiesOrderBy(
testcase( wctrlFlags & WHERE_GROUPBY );
testcase( wctrlFlags & WHERE_DISTINCTBY );
if( (wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY))==0 ) bOnce = 0;
- if( iColumn>=(-1) ){
- if( pOBExpr->op!=TK_COLUMN ) continue;
- if( pOBExpr->iTable!=iCur ) continue;
- if( pOBExpr->iColumn!=iColumn ) continue;
- }else{
- if( sqlite3ExprCompare(pOBExpr,pIndex->aColExpr->a[j].pExpr,iCur) ){
- continue;
- }
- }
+ if( pOBExpr->op!=TK_COLUMN ) continue;
+ if( pOBExpr->iTable!=iCur ) continue;
+ if( pOBExpr->iColumn!=iColumn ) continue;
if( iColumn>=0 ){
pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr);
if( !pColl ) pColl = db->pDfltColl;
@@ -124701,23 +113238,23 @@ static i8 wherePathSatisfiesOrderBy(
isMatch = 1;
break;
}
- if( isMatch && (wctrlFlags & WHERE_GROUPBY)==0 ){
- /* Make sure the sort order is compatible in an ORDER BY clause.
- ** Sort order is irrelevant for a GROUP BY clause. */
- if( revSet ){
- if( (rev ^ revIdx)!=pOrderBy->a[i].sortOrder ) isMatch = 0;
- }else{
- rev = revIdx ^ pOrderBy->a[i].sortOrder;
- if( rev ) *pRevMask |= MASKBIT(iLoop);
- revSet = 1;
- }
- }
if( isMatch ){
if( iColumn<0 ){
testcase( distinctColumns==0 );
distinctColumns = 1;
}
obSat |= MASKBIT(i);
+ if( (pWInfo->wctrlFlags & WHERE_GROUPBY)==0 ){
+ /* Make sure the sort order is compatible in an ORDER BY clause.
+ ** Sort order is irrelevant for a GROUP BY clause. */
+ if( revSet ){
+ if( (rev ^ revIdx)!=pOrderBy->a[i].sortOrder ) return 0;
+ }else{
+ rev = revIdx ^ pOrderBy->a[i].sortOrder;
+ if( rev ) *pRevMask |= MASKBIT(iLoop);
+ revSet = 1;
+ }
+ }
}else{
/* No match found */
if( j==0 || j<nKeyCol ){
@@ -124738,58 +113275,19 @@ static i8 wherePathSatisfiesOrderBy(
orderDistinctMask |= pLoop->maskSelf;
for(i=0; i<nOrderBy; i++){
Expr *p;
- Bitmask mTerm;
if( MASKBIT(i) & obSat ) continue;
p = pOrderBy->a[i].pExpr;
- mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p);
- if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue;
- if( (mTerm&~orderDistinctMask)==0 ){
+ if( (exprTableUsage(&pWInfo->sMaskSet, p)&~orderDistinctMask)==0 ){
obSat |= MASKBIT(i);
}
}
}
} /* End the loop over all WhereLoops from outer-most down to inner-most */
- if( obSat==obDone ) return (i8)nOrderBy;
- if( !isOrderDistinct ){
- for(i=nOrderBy-1; i>0; i--){
- Bitmask m = MASKBIT(i) - 1;
- if( (obSat&m)==m ) return i;
- }
- return 0;
- }
+ if( obSat==obDone ) return 1;
+ if( !isOrderDistinct ) return 0;
return -1;
}
-
-/*
-** If the WHERE_GROUPBY flag is set in the mask passed to sqlite3WhereBegin(),
-** the planner assumes that the specified pOrderBy list is actually a GROUP
-** BY clause - and so any order that groups rows as required satisfies the
-** request.
-**
-** Normally, in this case it is not possible for the caller to determine
-** whether or not the rows are really being delivered in sorted order, or
-** just in some other order that provides the required grouping. However,
-** if the WHERE_SORTBYGROUP flag is also passed to sqlite3WhereBegin(), then
-** this function may be called on the returned WhereInfo object. It returns
-** true if the rows really will be sorted in the specified order, or false
-** otherwise.
-**
-** For example, assuming:
-**
-** CREATE INDEX i1 ON t1(x, Y);
-**
-** then
-**
-** SELECT * FROM t1 GROUP BY x,y ORDER BY x,y; -- IsSorted()==1
-** SELECT * FROM t1 GROUP BY y,x ORDER BY y,x; -- IsSorted()==0
-*/
-SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo *pWInfo){
- assert( pWInfo->wctrlFlags & WHERE_GROUPBY );
- assert( pWInfo->wctrlFlags & WHERE_SORTBYGROUP );
- return pWInfo->sorted;
-}
-
#ifdef WHERETRACE_ENABLED
/* For debugging use only: */
static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){
@@ -124802,44 +113300,6 @@ static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){
}
#endif
-/*
-** Return the cost of sorting nRow rows, assuming that the keys have
-** nOrderby columns and that the first nSorted columns are already in
-** order.
-*/
-static LogEst whereSortingCost(
- WhereInfo *pWInfo,
- LogEst nRow,
- int nOrderBy,
- int nSorted
-){
- /* TUNING: Estimated cost of a full external sort, where N is
- ** the number of rows to sort is:
- **
- ** cost = (3.0 * N * log(N)).
- **
- ** Or, if the order-by clause has X terms but only the last Y
- ** terms are out of order, then block-sorting will reduce the
- ** sorting cost to:
- **
- ** cost = (3.0 * N * log(N)) * (Y/X)
- **
- ** The (Y/X) term is implemented using stack variable rScale
- ** below. */
- LogEst rScale, rSortCost;
- assert( nOrderBy>0 && 66==sqlite3LogEst(100) );
- rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66;
- rSortCost = nRow + estLog(nRow) + rScale + 16;
-
- /* TUNING: The cost of implementing DISTINCT using a B-TREE is
- ** similar but with a larger constant of proportionality.
- ** Multiply by an additional factor of 3.0. */
- if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){
- rSortCost += 16;
- }
-
- return rSortCost;
-}
/*
** Given the list of WhereLoop objects at pWInfo->pLoops, this routine
@@ -124861,9 +113321,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
int iLoop; /* Loop counter over the terms of the join */
int ii, jj; /* Loop counters */
int mxI = 0; /* Index of next entry to replace */
- int nOrderBy; /* Number of ORDER BY clause terms */
+ LogEst rCost; /* Cost of a path */
+ LogEst nOut; /* Number of outputs */
LogEst mxCost = 0; /* Maximum cost of a set of paths */
- LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */
+ LogEst mxOut = 0; /* Maximum nOut value on the set of paths */
+ LogEst rSortCost; /* Cost to do a sort */
int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */
WherePath *aFrom; /* All nFrom paths at the previous level */
WherePath *aTo; /* The nTo best paths at the current level */
@@ -124871,9 +113333,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
WherePath *pTo; /* An element of aTo[] that we are working on */
WhereLoop *pWLoop; /* One of the WhereLoop objects */
WhereLoop **pX; /* Used to divy up the pSpace memory */
- LogEst *aSortCost = 0; /* Sorting and partial sorting costs */
char *pSpace; /* Temporary memory used by this routine */
- int nSpace; /* Bytes of space allocated at pSpace */
pParse = pWInfo->pParse;
db = pParse->db;
@@ -124881,25 +113341,13 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
/* TUNING: For simple queries, only the best path is tracked.
** For 2-way joins, the 5 best paths are followed.
** For joins of 3 or more tables, track the 10 best paths */
- mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10);
+ mxChoice = (nLoop==1) ? 1 : (nLoop==2 ? 5 : 10);
assert( nLoop<=pWInfo->pTabList->nSrc );
- WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst));
-
- /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this
- ** case the purpose of this call is to estimate the number of rows returned
- ** by the overall query. Once this estimate has been obtained, the caller
- ** will invoke this function a second time, passing the estimate as the
- ** nRowEst parameter. */
- if( pWInfo->pOrderBy==0 || nRowEst==0 ){
- nOrderBy = 0;
- }else{
- nOrderBy = pWInfo->pOrderBy->nExpr;
- }
+ WHERETRACE(0x002, ("---- begin solver\n"));
- /* Allocate and initialize space for aTo, aFrom and aSortCost[] */
- nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2;
- nSpace += sizeof(LogEst) * nOrderBy;
- pSpace = sqlite3DbMallocRaw(db, nSpace);
+ /* Allocate and initialize space for aTo and aFrom */
+ ii = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2;
+ pSpace = sqlite3DbMallocRaw(db, ii);
if( pSpace==0 ) return SQLITE_NOMEM;
aTo = (WherePath*)pSpace;
aFrom = aTo+mxChoice;
@@ -124908,35 +113356,27 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
for(ii=mxChoice*2, pFrom=aTo; ii>0; ii--, pFrom++, pX += nLoop){
pFrom->aLoop = pX;
}
- if( nOrderBy ){
- /* If there is an ORDER BY clause and it is not being ignored, set up
- ** space for the aSortCost[] array. Each element of the aSortCost array
- ** is either zero - meaning it has not yet been initialized - or the
- ** cost of sorting nRowEst rows of data where the first X terms of
- ** the ORDER BY clause are already in order, where X is the array
- ** index. */
- aSortCost = (LogEst*)pX;
- memset(aSortCost, 0, sizeof(LogEst) * nOrderBy);
- }
- assert( aSortCost==0 || &pSpace[nSpace]==(char*)&aSortCost[nOrderBy] );
- assert( aSortCost!=0 || &pSpace[nSpace]==(char*)pX );
/* Seed the search with a single WherePath containing zero WhereLoops.
**
- ** TUNING: Do not let the number of iterations go above 28. If the cost
- ** of computing an automatic index is not paid back within the first 28
+ ** TUNING: Do not let the number of iterations go above 25. If the cost
+ ** of computing an automatic index is not paid back within the first 25
** rows, then do not use the automatic index. */
- aFrom[0].nRow = MIN(pParse->nQueryLoop, 48); assert( 48==sqlite3LogEst(28) );
+ aFrom[0].nRow = MIN(pParse->nQueryLoop, 46); assert( 46==sqlite3LogEst(25) );
nFrom = 1;
- assert( aFrom[0].isOrdered==0 );
- if( nOrderBy ){
- /* If nLoop is zero, then there are no FROM terms in the query. Since
- ** in this case the query may return a maximum of one row, the results
- ** are already in the requested order. Set isOrdered to nOrderBy to
- ** indicate this. Or, if nLoop is greater than zero, set isOrdered to
- ** -1, indicating that the result set may or may not be ordered,
- ** depending on the loops added to the current plan. */
- aFrom[0].isOrdered = nLoop>0 ? -1 : nOrderBy;
+
+ /* Precompute the cost of sorting the final result set, if the caller
+ ** to sqlite3WhereBegin() was concerned about sorting */
+ rSortCost = 0;
+ if( pWInfo->pOrderBy==0 || nRowEst==0 ){
+ aFrom[0].isOrderedValid = 1;
+ }else{
+ /* TUNING: Estimated cost of sorting is 48*N*log2(N) where N is the
+ ** number of output rows. The 48 is the expected size of a row to sort.
+ ** FIXME: compute a better estimate of the 48 multiplier based on the
+ ** result set expressions. */
+ rSortCost = nRowEst + estLog(nRowEst);
+ WHERETRACE(0x002,("---- sort cost=%-3d\n", rSortCost));
}
/* Compute successively longer WherePaths using the previous generation
@@ -124946,82 +113386,60 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
nTo = 0;
for(ii=0, pFrom=aFrom; ii<nFrom; ii++, pFrom++){
for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){
- LogEst nOut; /* Rows visited by (pFrom+pWLoop) */
- LogEst rCost; /* Cost of path (pFrom+pWLoop) */
- LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */
- i8 isOrdered = pFrom->isOrdered; /* isOrdered for (pFrom+pWLoop) */
- Bitmask maskNew; /* Mask of src visited by (..) */
- Bitmask revMask = 0; /* Mask of rev-order loops for (..) */
-
+ Bitmask maskNew;
+ Bitmask revMask = 0;
+ u8 isOrderedValid = pFrom->isOrderedValid;
+ u8 isOrdered = pFrom->isOrdered;
if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue;
if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue;
/* At this point, pWLoop is a candidate to be the next loop.
** Compute its cost */
- rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow);
- rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted);
+ rCost = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow);
+ rCost = sqlite3LogEstAdd(rCost, pFrom->rCost);
nOut = pFrom->nRow + pWLoop->nOut;
maskNew = pFrom->maskLoop | pWLoop->maskSelf;
- if( isOrdered<0 ){
- isOrdered = wherePathSatisfiesOrderBy(pWInfo,
+ if( !isOrderedValid ){
+ switch( wherePathSatisfiesOrderBy(pWInfo,
pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags,
- iLoop, pWLoop, &revMask);
- }else{
- revMask = pFrom->revLoop;
- }
- if( isOrdered>=0 && isOrdered<nOrderBy ){
- if( aSortCost[isOrdered]==0 ){
- aSortCost[isOrdered] = whereSortingCost(
- pWInfo, nRowEst, nOrderBy, isOrdered
- );
+ iLoop, pWLoop, &revMask) ){
+ case 1: /* Yes. pFrom+pWLoop does satisfy the ORDER BY clause */
+ isOrdered = 1;
+ isOrderedValid = 1;
+ break;
+ case 0: /* No. pFrom+pWLoop will require a separate sort */
+ isOrdered = 0;
+ isOrderedValid = 1;
+ rCost = sqlite3LogEstAdd(rCost, rSortCost);
+ break;
+ default: /* Cannot tell yet. Try again on the next iteration */
+ break;
}
- rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]);
-
- WHERETRACE(0x002,
- ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n",
- aSortCost[isOrdered], (nOrderBy-isOrdered), nOrderBy,
- rUnsorted, rCost));
}else{
- rCost = rUnsorted;
+ revMask = pFrom->revLoop;
}
-
- /* Check to see if pWLoop should be added to the set of
- ** mxChoice best-so-far paths.
- **
- ** First look for an existing path among best-so-far paths
- ** that covers the same set of loops and has the same isOrdered
- ** setting as the current path candidate.
- **
- ** The term "((pTo->isOrdered^isOrdered)&0x80)==0" is equivalent
- ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range
- ** of legal values for isOrdered, -1..64.
- */
+ /* Check to see if pWLoop should be added to the mxChoice best so far */
for(jj=0, pTo=aTo; jj<nTo; jj++, pTo++){
if( pTo->maskLoop==maskNew
- && ((pTo->isOrdered^isOrdered)&0x80)==0
+ && pTo->isOrderedValid==isOrderedValid
+ && ((pTo->rCost<=rCost && pTo->nRow<=nOut) ||
+ (pTo->rCost>=rCost && pTo->nRow>=nOut))
){
testcase( jj==nTo-1 );
break;
}
}
if( jj>=nTo ){
- /* None of the existing best-so-far paths match the candidate. */
- if( nTo>=mxChoice
- && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted))
- ){
- /* The current candidate is no better than any of the mxChoice
- ** paths currently in the best-so-far buffer. So discard
- ** this candidate as not viable. */
+ if( nTo>=mxChoice && rCost>=mxCost ){
#ifdef WHERETRACE_ENABLED /* 0x4 */
if( sqlite3WhereTrace&0x4 ){
sqlite3DebugPrintf("Skip %s cost=%-3d,%3d order=%c\n",
wherePathName(pFrom, iLoop, pWLoop), rCost, nOut,
- isOrdered>=0 ? isOrdered+'0' : '?');
+ isOrderedValid ? (isOrdered ? 'Y' : 'N') : '?');
}
#endif
continue;
}
- /* If we reach this points it means that the new candidate path
- ** needs to be added to the set of best-so-far paths. */
+ /* Add a new Path to the aTo[] set */
if( nTo<mxChoice ){
/* Increase the size of the aTo set by one */
jj = nTo++;
@@ -125034,42 +113452,36 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
if( sqlite3WhereTrace&0x4 ){
sqlite3DebugPrintf("New %s cost=%-3d,%3d order=%c\n",
wherePathName(pFrom, iLoop, pWLoop), rCost, nOut,
- isOrdered>=0 ? isOrdered+'0' : '?');
+ isOrderedValid ? (isOrdered ? 'Y' : 'N') : '?');
}
#endif
}else{
- /* Control reaches here if best-so-far path pTo=aTo[jj] covers the
- ** same set of loops and has the sam isOrdered setting as the
- ** candidate path. Check to see if the candidate should replace
- ** pTo or if the candidate should be skipped */
- if( pTo->rCost<rCost || (pTo->rCost==rCost && pTo->nRow<=nOut) ){
+ if( pTo->rCost<=rCost && pTo->nRow<=nOut ){
#ifdef WHERETRACE_ENABLED /* 0x4 */
if( sqlite3WhereTrace&0x4 ){
sqlite3DebugPrintf(
"Skip %s cost=%-3d,%3d order=%c",
wherePathName(pFrom, iLoop, pWLoop), rCost, nOut,
- isOrdered>=0 ? isOrdered+'0' : '?');
+ isOrderedValid ? (isOrdered ? 'Y' : 'N') : '?');
sqlite3DebugPrintf(" vs %s cost=%-3d,%d order=%c\n",
wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow,
- pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?');
+ pTo->isOrderedValid ? (pTo->isOrdered ? 'Y' : 'N') : '?');
}
#endif
- /* Discard the candidate path from further consideration */
testcase( pTo->rCost==rCost );
continue;
}
testcase( pTo->rCost==rCost+1 );
- /* Control reaches here if the candidate path is better than the
- ** pTo path. Replace pTo with the candidate. */
+ /* A new and better score for a previously created equivalent path */
#ifdef WHERETRACE_ENABLED /* 0x4 */
if( sqlite3WhereTrace&0x4 ){
sqlite3DebugPrintf(
"Update %s cost=%-3d,%3d order=%c",
wherePathName(pFrom, iLoop, pWLoop), rCost, nOut,
- isOrdered>=0 ? isOrdered+'0' : '?');
+ isOrderedValid ? (isOrdered ? 'Y' : 'N') : '?');
sqlite3DebugPrintf(" was %s cost=%-3d,%3d order=%c\n",
wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow,
- pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?');
+ pTo->isOrderedValid ? (pTo->isOrdered ? 'Y' : 'N') : '?');
}
#endif
}
@@ -125078,20 +113490,18 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
pTo->revLoop = revMask;
pTo->nRow = nOut;
pTo->rCost = rCost;
- pTo->rUnsorted = rUnsorted;
+ pTo->isOrderedValid = isOrderedValid;
pTo->isOrdered = isOrdered;
memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop);
pTo->aLoop[iLoop] = pWLoop;
if( nTo>=mxChoice ){
mxI = 0;
mxCost = aTo[0].rCost;
- mxUnsorted = aTo[0].nRow;
+ mxOut = aTo[0].nRow;
for(jj=1, pTo=&aTo[1]; jj<mxChoice; jj++, pTo++){
- if( pTo->rCost>mxCost
- || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted)
- ){
+ if( pTo->rCost>mxCost || (pTo->rCost==mxCost && pTo->nRow>mxOut) ){
mxCost = pTo->rCost;
- mxUnsorted = pTo->rUnsorted;
+ mxOut = pTo->nRow;
mxI = jj;
}
}
@@ -125100,13 +113510,13 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
}
#ifdef WHERETRACE_ENABLED /* >=2 */
- if( sqlite3WhereTrace & 0x02 ){
+ if( sqlite3WhereTrace>=2 ){
sqlite3DebugPrintf("---- after round %d ----\n", iLoop);
for(ii=0, pTo=aTo; ii<nTo; ii++, pTo++){
sqlite3DebugPrintf(" %s cost=%-3d nrow=%-3d order=%c",
wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow,
- pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?');
- if( pTo->isOrdered>0 ){
+ pTo->isOrderedValid ? (pTo->isOrdered ? 'Y' : 'N') : '?');
+ if( pTo->isOrderedValid && pTo->isOrdered ){
sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop);
}else{
sqlite3DebugPrintf("\n");
@@ -125149,36 +113559,16 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
Bitmask notUsed;
int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pResultSet, pFrom,
WHERE_DISTINCTBY, nLoop-1, pFrom->aLoop[nLoop-1], &notUsed);
- if( rc==pWInfo->pResultSet->nExpr ){
- pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
- }
+ if( rc==1 ) pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
}
- if( pWInfo->pOrderBy ){
+ if( pFrom->isOrdered ){
if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){
- if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){
- pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
- }
+ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
}else{
- pWInfo->nOBSat = pFrom->isOrdered;
- if( pWInfo->nOBSat<0 ) pWInfo->nOBSat = 0;
+ pWInfo->bOBSat = 1;
pWInfo->revMask = pFrom->revLoop;
}
- if( (pWInfo->wctrlFlags & WHERE_SORTBYGROUP)
- && pWInfo->nOBSat==pWInfo->pOrderBy->nExpr && nLoop>0
- ){
- Bitmask revMask = 0;
- int nOrder = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy,
- pFrom, 0, nLoop-1, pFrom->aLoop[nLoop-1], &revMask
- );
- assert( pWInfo->sorted==0 );
- if( nOrder==pWInfo->pOrderBy->nExpr ){
- pWInfo->sorted = 1;
- pWInfo->revMask = revMask;
- }
- }
}
-
-
pWInfo->nRowOut = pFrom->nRow;
/* Free temporary memory and return success */
@@ -125214,15 +113604,14 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){
pItem = pWInfo->pTabList->a;
pTab = pItem->pTab;
if( IsVirtual(pTab) ) return 0;
- if( pItem->fg.isIndexedBy ) return 0;
+ if( pItem->zIndex ) return 0;
iCur = pItem->iCursor;
pWC = &pWInfo->sWC;
pLoop = pBuilder->pNew;
pLoop->wsFlags = 0;
- pLoop->nSkip = 0;
- pTerm = sqlite3WhereFindTerm(pWC, iCur, -1, 0, WO_EQ|WO_IS, 0);
+ pLoop->u.btree.nSkip = 0;
+ pTerm = findTerm(pWC, iCur, -1, 0, WO_EQ, 0);
if( pTerm ){
- testcase( pTerm->eOperator & WO_IS );
pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_IPK|WHERE_ONEROW;
pLoop->aLTerm[0] = pTerm;
pLoop->nLTerm = 1;
@@ -125231,17 +113620,15 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){
pLoop->rRun = 33; /* 33==sqlite3LogEst(10) */
}else{
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- int opMask;
assert( pLoop->aLTermSpace==pLoop->aLTerm );
- if( !IsUniqueIndex(pIdx)
+ assert( ArraySize(pLoop->aLTermSpace)==4 );
+ if( pIdx->onError==OE_None
|| pIdx->pPartIdxWhere!=0
|| pIdx->nKeyCol>ArraySize(pLoop->aLTermSpace)
) continue;
- opMask = pIdx->uniqNotNull ? (WO_EQ|WO_IS) : WO_EQ;
for(j=0; j<pIdx->nKeyCol; j++){
- pTerm = sqlite3WhereFindTerm(pWC, iCur, j, 0, opMask, pIdx);
+ pTerm = findTerm(pWC, iCur, pIdx->aiColumn[j], 0, WO_EQ, pIdx);
if( pTerm==0 ) break;
- testcase( pTerm->eOperator & WO_IS );
pLoop->aLTerm[j] = pTerm;
}
if( j!=pIdx->nKeyCol ) continue;
@@ -125260,10 +113647,10 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){
if( pLoop->wsFlags ){
pLoop->nOut = (LogEst)1;
pWInfo->a[0].pWLoop = pLoop;
- pLoop->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur);
+ pLoop->maskSelf = getMask(&pWInfo->sMaskSet, iCur);
pWInfo->a[0].iTabCur = iCur;
pWInfo->nRowOut = 1;
- if( pWInfo->pOrderBy ) pWInfo->nOBSat = pWInfo->pOrderBy->nExpr;
+ if( pWInfo->pOrderBy ) pWInfo->bOBSat = 1;
if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){
pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE;
}
@@ -125367,7 +113754,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
Parse *pParse, /* The parser context */
SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */
Expr *pWhere, /* The WHERE clause */
- ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */
+ ExprList *pOrderBy, /* An ORDER BY clause, or NULL */
ExprList *pResultSet, /* Result set of the query */
u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */
int iIdxCur /* If WHERE_ONETABLE_ONLY is set, index cursor number */
@@ -125385,18 +113772,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
sqlite3 *db; /* Database connection */
int rc; /* Return code */
- assert( (wctrlFlags & WHERE_ONEPASS_MULTIROW)==0 || (
- (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0
- && (wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0
- ));
/* Variable initialization */
db = pParse->db;
memset(&sWLB, 0, sizeof(sWLB));
-
- /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */
- testcase( pOrderBy && pOrderBy->nExpr==BMS-1 );
- if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0;
sWLB.pOrderBy = pOrderBy;
/* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via
@@ -125441,10 +113820,9 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
pWInfo->pTabList = pTabList;
pWInfo->pOrderBy = pOrderBy;
pWInfo->pResultSet = pResultSet;
- pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(v);
+ pWInfo->iBreak = sqlite3VdbeMakeLabel(v);
pWInfo->wctrlFlags = wctrlFlags;
pWInfo->savedNQueryLoop = pParse->nQueryLoop;
- assert( pWInfo->eOnePass==ONEPASS_OFF ); /* ONEPASS defaults to OFF */
pMaskSet = &pWInfo->sMaskSet;
sWLB.pWInfo = pWInfo;
sWLB.pWC = &pWInfo->sWC;
@@ -125459,24 +113837,22 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
** subexpression is separated by an AND operator.
*/
initMaskSet(pMaskSet);
- sqlite3WhereClauseInit(&pWInfo->sWC, pWInfo);
- sqlite3WhereSplit(&pWInfo->sWC, pWhere, TK_AND);
+ whereClauseInit(&pWInfo->sWC, pWInfo);
+ whereSplit(&pWInfo->sWC, pWhere, TK_AND);
+ sqlite3CodeVerifySchema(pParse, -1); /* Insert the cookie verifier Goto */
/* Special case: a WHERE clause that is constant. Evaluate the
** expression and either jump over all of the code or fall thru.
*/
- for(ii=0; ii<sWLB.pWC->nTerm; ii++){
- if( nTabList==0 || sqlite3ExprIsConstantNotJoin(sWLB.pWC->a[ii].pExpr) ){
- sqlite3ExprIfFalse(pParse, sWLB.pWC->a[ii].pExpr, pWInfo->iBreak,
- SQLITE_JUMPIFNULL);
- sWLB.pWC->a[ii].wtFlags |= TERM_CODED;
- }
+ if( pWhere && (nTabList==0 || sqlite3ExprIsConstantNotJoin(pWhere)) ){
+ sqlite3ExprIfFalse(pParse, pWhere, pWInfo->iBreak, SQLITE_JUMPIFNULL);
+ pWhere = 0;
}
/* Special case: No FROM clause
*/
if( nTabList==0 ){
- if( pOrderBy ) pWInfo->nOBSat = pOrderBy->nExpr;
+ if( pOrderBy ) pWInfo->bOBSat = 1;
if( wctrlFlags & WHERE_WANT_DISTINCT ){
pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE;
}
@@ -125484,12 +113860,14 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* Assign a bit from the bitmask to every term in the FROM clause.
**
- ** The N-th term of the FROM clause is assigned a bitmask of 1<<N.
- **
- ** The rule of the previous sentence ensures thta if X is the bitmask for
- ** a table T, then X-1 is the bitmask for all other tables to the left of T.
- ** Knowing the bitmask for all tables to the left of a left join is
- ** important. Ticket #3015.
+ ** When assigning bitmask values to FROM clause cursors, it must be
+ ** the case that if X is the bitmask for the N-th FROM clause term then
+ ** the bitmask for all FROM clause terms to the left of the N-th term
+ ** is (X-1). An expression from the ON clause of a LEFT JOIN can use
+ ** its Expr.iRightJoinTable value to find the bitmask of the right table
+ ** of the join. Subtracting one from the right table bitmask gives a
+ ** bitmask for all tables to the left of the join. Knowing the bitmask
+ ** for all tables to the left of a left join is important. Ticket #3015.
**
** Note that bitmasks are created for all pTabList->nSrc tables in
** pTabList, not just the first nTabList tables. nTabList is normally
@@ -125498,18 +113876,43 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
*/
for(ii=0; ii<pTabList->nSrc; ii++){
createMask(pMaskSet, pTabList->a[ii].iCursor);
- sqlite3WhereTabFuncArgs(pParse, &pTabList->a[ii], &pWInfo->sWC);
}
-#ifdef SQLITE_DEBUG
- for(ii=0; ii<pTabList->nSrc; ii++){
- Bitmask m = sqlite3WhereGetMask(pMaskSet, pTabList->a[ii].iCursor);
- assert( m==MASKBIT(ii) );
+#ifndef NDEBUG
+ {
+ Bitmask toTheLeft = 0;
+ for(ii=0; ii<pTabList->nSrc; ii++){
+ Bitmask m = getMask(pMaskSet, pTabList->a[ii].iCursor);
+ assert( (m-1)==toTheLeft );
+ toTheLeft |= m;
+ }
}
#endif
- /* Analyze all of the subexpressions. */
- sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC);
- if( db->mallocFailed ) goto whereBeginError;
+ /* Analyze all of the subexpressions. Note that exprAnalyze() might
+ ** add new virtual terms onto the end of the WHERE clause. We do not
+ ** want to analyze these virtual terms, so start analyzing at the end
+ ** and work forward so that the added virtual terms are never processed.
+ */
+ exprAnalyzeAll(pTabList, &pWInfo->sWC);
+ if( db->mallocFailed ){
+ goto whereBeginError;
+ }
+
+ /* If the ORDER BY (or GROUP BY) clause contains references to general
+ ** expressions, then we won't be able to satisfy it using indices, so
+ ** go ahead and disable it now.
+ */
+ if( pOrderBy && (wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){
+ for(ii=0; ii<pOrderBy->nExpr; ii++){
+ Expr *pExpr = sqlite3ExprSkipCollate(pOrderBy->a[ii].pExpr);
+ if( pExpr->op!=TK_COLUMN ){
+ pWInfo->pOrderBy = pOrderBy = 0;
+ break;
+ }else if( pExpr->iColumn<0 ){
+ break;
+ }
+ }
+ }
if( wctrlFlags & WHERE_WANT_DISTINCT ){
if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){
@@ -125523,27 +113926,35 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
}
/* Construct the WhereLoop objects */
- WHERETRACE(0xffff,("*** Optimizer Start *** (wctrlFlags: 0x%x)\n",
- wctrlFlags));
-#if defined(WHERETRACE_ENABLED)
- if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */
+ WHERETRACE(0xffff,("*** Optimizer Start ***\n"));
+ /* Display all terms of the WHERE clause */
+#if defined(WHERETRACE_ENABLED) && defined(SQLITE_ENABLE_TREE_EXPLAIN)
+ if( sqlite3WhereTrace & 0x100 ){
int i;
+ Vdbe *v = pParse->pVdbe;
+ sqlite3ExplainBegin(v);
for(i=0; i<sWLB.pWC->nTerm; i++){
- whereTermPrint(&sWLB.pWC->a[i], i);
+ sqlite3ExplainPrintf(v, "#%-2d ", i);
+ sqlite3ExplainPush(v);
+ whereExplainTerm(v, &sWLB.pWC->a[i]);
+ sqlite3ExplainPop(v);
+ sqlite3ExplainNL(v);
}
+ sqlite3ExplainFinish(v);
+ sqlite3DebugPrintf("%s", sqlite3VdbeExplanation(v));
}
#endif
-
if( nTabList!=1 || whereShortCut(&sWLB)==0 ){
rc = whereLoopAddAll(&sWLB);
if( rc ) goto whereBeginError;
-#ifdef WHERETRACE_ENABLED
- if( sqlite3WhereTrace ){ /* Display all of the WhereLoop objects */
+ /* Display all of the WhereLoop objects if wheretrace is enabled */
+#ifdef WHERETRACE_ENABLED /* !=0 */
+ if( sqlite3WhereTrace ){
WhereLoop *p;
int i;
- static const char zLabel[] = "0123456789abcdefghijklmnopqrstuvwyxz"
- "ABCDEFGHIJKLMNOPQRSTUVWYXZ";
+ static char zLabel[] = "0123456789abcdefghijklmnopqrstuvwyxz"
+ "ABCDEFGHIJKLMNOPQRSTUVWYXZ";
for(p=pWInfo->pLoops, i=0; p; p=p->pNextLoop, i++){
p->cId = zLabel[i%sizeof(zLabel)];
whereLoopPrint(p, sWLB.pWC);
@@ -125564,11 +113975,12 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
if( pParse->nErr || NEVER(db->mallocFailed) ){
goto whereBeginError;
}
-#ifdef WHERETRACE_ENABLED
+#ifdef WHERETRACE_ENABLED /* !=0 */
if( sqlite3WhereTrace ){
+ int ii;
sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut);
- if( pWInfo->nOBSat>0 ){
- sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask);
+ if( pWInfo->bOBSat ){
+ sqlite3DebugPrintf(" ORDERBY=0x%llx", pWInfo->revMask);
}
switch( pWInfo->eDistinct ){
case WHERE_DISTINCT_UNIQUE: {
@@ -125595,14 +114007,12 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
&& pResultSet!=0
&& OptimizationEnabled(db, SQLITE_OmitNoopJoin)
){
- Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pResultSet);
- if( sWLB.pOrderBy ){
- tabUsed |= sqlite3WhereExprListUsage(pMaskSet, sWLB.pOrderBy);
- }
+ Bitmask tabUsed = exprListTableUsage(pMaskSet, pResultSet);
+ if( sWLB.pOrderBy ) tabUsed |= exprListTableUsage(pMaskSet, sWLB.pOrderBy);
while( pWInfo->nLevel>=2 ){
WhereTerm *pTerm, *pEnd;
pLoop = pWInfo->a[pWInfo->nLevel-1].pWLoop;
- if( (pWInfo->pTabList->a[pLoop->iTab].fg.jointype & JT_LEFT)==0 ) break;
+ if( (pWInfo->pTabList->a[pLoop->iTab].jointype & JT_LEFT)==0 ) break;
if( (wctrlFlags & WHERE_WANT_DISTINCT)==0
&& (pLoop->wsFlags & WHERE_ONEROW)==0
){
@@ -125629,25 +114039,21 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* If the caller is an UPDATE or DELETE statement that is requesting
** to use a one-pass algorithm, determine if this is appropriate.
** The one-pass algorithm only works if the WHERE clause constrains
- ** the statement to update or delete a single row.
+ ** the statement to update a single row.
*/
assert( (wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || pWInfo->nLevel==1 );
- if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 ){
- int wsFlags = pWInfo->a[0].pWLoop->wsFlags;
- int bOnerow = (wsFlags & WHERE_ONEROW)!=0;
- if( bOnerow || ( (wctrlFlags & WHERE_ONEPASS_MULTIROW)
- && 0==(wsFlags & WHERE_VIRTUALTABLE)
- )){
- pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI;
- if( HasRowid(pTabList->a[0].pTab) ){
- pWInfo->a[0].pWLoop->wsFlags &= ~WHERE_IDX_ONLY;
- }
+ if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0
+ && (pWInfo->a[0].pWLoop->wsFlags & WHERE_ONEROW)!=0 ){
+ pWInfo->okOnePass = 1;
+ if( HasRowid(pTabList->a[0].pTab) ){
+ pWInfo->a[0].pWLoop->wsFlags &= ~WHERE_IDX_ONLY;
}
}
/* Open all tables in the pTabList and any indices selected for
** searching those tables.
*/
+ notReady = ~(Bitmask)0;
for(ii=0, pLevel=pWInfo->a; ii<nTabList; ii++, pLevel++){
Table *pTab; /* Table to open */
int iDb; /* Index of database containing table/index */
@@ -125672,15 +114078,15 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
&& (wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 ){
int op = OP_OpenRead;
- if( pWInfo->eOnePass!=ONEPASS_OFF ){
+ if( pWInfo->okOnePass ){
op = OP_OpenWrite;
pWInfo->aiCurOnePass[0] = pTabItem->iCursor;
};
sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op);
assert( pTabItem->iCursor==pLevel->iTabCur );
- testcase( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol==BMS-1 );
- testcase( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol==BMS );
- if( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol<BMS && HasRowid(pTab) ){
+ testcase( !pWInfo->okOnePass && pTab->nCol==BMS-1 );
+ testcase( !pWInfo->okOnePass && pTab->nCol==BMS );
+ if( !pWInfo->okOnePass && pTab->nCol<BMS && HasRowid(pTab) ){
Bitmask b = pTabItem->colUsed;
int n = 0;
for(; b; b=b>>1, n++){}
@@ -125688,10 +114094,6 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
SQLITE_INT_TO_PTR(n), P4_INT32);
assert( n<=pTab->nCol );
}
-#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
- sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, pTabItem->iCursor, 0, 0,
- (const u8*)&pTabItem->colUsed, P4_INT64);
-#endif
}else{
sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
}
@@ -125701,14 +114103,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
int op = OP_OpenRead;
/* iIdxCur is always set if to a positive value if ONEPASS is possible */
assert( iIdxCur!=0 || (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 );
- if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIx)
- && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0
- ){
- /* This is one term of an OR-optimization using the PRIMARY KEY of a
- ** WITHOUT ROWID table. No need for a separate index */
- iIndexCur = pLevel->iTabCur;
- op = 0;
- }else if( pWInfo->eOnePass!=ONEPASS_OFF ){
+ if( pWInfo->okOnePass ){
Index *pJ = pTabItem->pTab->pIndex;
iIndexCur = iIdxCur;
assert( wctrlFlags & WHERE_ONEPASS_DESIRED );
@@ -125720,41 +114115,18 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
pWInfo->aiCurOnePass[1] = iIndexCur;
}else if( iIdxCur && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ){
iIndexCur = iIdxCur;
- if( wctrlFlags & WHERE_REOPEN_IDX ) op = OP_ReopenIdx;
}else{
iIndexCur = pParse->nTab++;
}
pLevel->iIdxCur = iIndexCur;
assert( pIx->pSchema==pTab->pSchema );
assert( iIndexCur>=0 );
- if( op ){
- sqlite3VdbeAddOp3(v, op, iIndexCur, pIx->tnum, iDb);
- sqlite3VdbeSetP4KeyInfo(pParse, pIx);
- if( (pLoop->wsFlags & WHERE_CONSTRAINT)!=0
- && (pLoop->wsFlags & (WHERE_COLUMN_RANGE|WHERE_SKIPSCAN))==0
- && (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0
- ){
- sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ); /* Hint to COMDB2 */
- }
- VdbeComment((v, "%s", pIx->zName));
-#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
- {
- u64 colUsed = 0;
- int ii, jj;
- for(ii=0; ii<pIx->nColumn; ii++){
- jj = pIx->aiColumn[ii];
- if( jj<0 ) continue;
- if( jj>63 ) jj = 63;
- if( (pTabItem->colUsed & MASKBIT(jj))==0 ) continue;
- colUsed |= ((u64)1)<<(ii<63 ? ii : 63);
- }
- sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, iIndexCur, 0, 0,
- (u8*)&colUsed, P4_INT64);
- }
-#endif /* SQLITE_ENABLE_COLUMN_USED_MASK */
- }
+ sqlite3VdbeAddOp3(v, op, iIndexCur, pIx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIx);
+ VdbeComment((v, "%s", pIx->zName));
}
- if( iDb>=0 ) sqlite3CodeVerifySchema(pParse, iDb);
+ sqlite3CodeVerifySchema(pParse, iDb);
+ notReady &= ~getMask(&pWInfo->sMaskSet, pTabItem->iCursor);
}
pWInfo->iTop = sqlite3VdbeCurrentAddr(v);
if( db->mallocFailed ) goto whereBeginError;
@@ -125765,10 +114137,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
*/
notReady = ~(Bitmask)0;
for(ii=0; ii<nTabList; ii++){
- int addrExplain;
- int wsFlags;
pLevel = &pWInfo->a[ii];
- wsFlags = pLevel->pWLoop->wsFlags;
#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
if( (pLevel->pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 ){
constructAutomaticIndex(pParse, &pWInfo->sWC,
@@ -125776,15 +114145,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
if( db->mallocFailed ) goto whereBeginError;
}
#endif
- addrExplain = sqlite3WhereExplainOneScan(
- pParse, pTabList, pLevel, ii, pLevel->iFrom, wctrlFlags
- );
+ explainOneScan(pParse, pTabList, pLevel, ii, pLevel->iFrom, wctrlFlags);
pLevel->addrBody = sqlite3VdbeCurrentAddr(v);
- notReady = sqlite3WhereCodeOneLoopStart(pWInfo, ii, notReady);
+ notReady = codeOneLoopStart(pWInfo, ii, notReady);
pWInfo->iContinue = pLevel->addrCont;
- if( (wsFlags&WHERE_MULTI_OR)==0 && (wctrlFlags&WHERE_ONETABLE_ONLY)==0 ){
- sqlite3WhereAddScanStatus(v, pTabList, pLevel, addrExplain);
- }
}
/* Done. */
@@ -125823,12 +114187,8 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
pLoop = pLevel->pWLoop;
sqlite3VdbeResolveLabel(v, pLevel->addrCont);
if( pLevel->op!=OP_Noop ){
- sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3);
+ sqlite3VdbeAddOp2(v, pLevel->op, pLevel->p1, pLevel->p2);
sqlite3VdbeChangeP5(v, pLevel->p5);
- VdbeCoverage(v);
- VdbeCoverageIf(v, pLevel->op==OP_Next);
- VdbeCoverageIf(v, pLevel->op==OP_Prev);
- VdbeCoverageIf(v, pLevel->op==OP_VNext);
}
if( pLoop->wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){
struct InLoop *pIn;
@@ -125837,31 +114197,19 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){
sqlite3VdbeJumpHere(v, pIn->addrInTop+1);
sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop);
- VdbeCoverage(v);
- VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen);
- VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen);
sqlite3VdbeJumpHere(v, pIn->addrInTop-1);
}
+ sqlite3DbFree(db, pLevel->u.in.aInLoop);
}
sqlite3VdbeResolveLabel(v, pLevel->addrBrk);
if( pLevel->addrSkip ){
- sqlite3VdbeGoto(v, pLevel->addrSkip);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrSkip);
VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName));
sqlite3VdbeJumpHere(v, pLevel->addrSkip);
sqlite3VdbeJumpHere(v, pLevel->addrSkip-2);
}
- if( pLevel->addrLikeRep ){
- int op;
- if( sqlite3VdbeGetOp(v, pLevel->addrLikeRep-1)->p1 ){
- op = OP_DecrJumpZero;
- }else{
- op = OP_JumpZeroIncr;
- }
- sqlite3VdbeAddOp2(v, op, pLevel->iLikeRepCntr, pLevel->addrLikeRep);
- VdbeCoverage(v);
- }
if( pLevel->iLeftJoin ){
- addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v);
+ addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin);
assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
|| (pLoop->wsFlags & WHERE_INDEXED)!=0 );
if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 ){
@@ -125873,7 +114221,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
if( pLevel->op==OP_Return ){
sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst);
}else{
- sqlite3VdbeGoto(v, pLevel->addrFirst);
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrFirst);
}
sqlite3VdbeJumpHere(v, addr);
}
@@ -125888,24 +114236,12 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
assert( pWInfo->nLevel<=pTabList->nSrc );
for(i=0, pLevel=pWInfo->a; i<pWInfo->nLevel; i++, pLevel++){
- int k, last;
- VdbeOp *pOp;
Index *pIdx = 0;
struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom];
Table *pTab = pTabItem->pTab;
assert( pTab!=0 );
pLoop = pLevel->pWLoop;
- /* For a co-routine, change all OP_Column references to the table of
- ** the co-routine into OP_Copy of result contained in a register.
- ** OP_Rowid becomes OP_Null.
- */
- if( pTabItem->fg.viaCoroutine && !db->mallocFailed ){
- translateColumnToCopy(v, pLevel->addrBody, pLevel->iTabCur,
- pTabItem->regResult, 0);
- continue;
- }
-
/* Close all of the cursors that were opened by sqlite3WhereBegin.
** Except, do not close cursors that will be reused by the OR optimization
** (WHERE_OMIT_OPEN_CLOSE). And do not close the OP_OpenWrite cursors
@@ -125916,7 +114252,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
&& (pWInfo->wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0
){
int ws = pLoop->wsFlags;
- if( pWInfo->eOnePass==ONEPASS_OFF && (ws & WHERE_IDX_ONLY)==0 ){
+ if( !pWInfo->okOnePass && (ws & WHERE_IDX_ONLY)==0 ){
sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor);
}
if( (ws & WHERE_INDEXED)!=0
@@ -125943,10 +114279,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}else if( pLoop->wsFlags & WHERE_MULTI_OR ){
pIdx = pLevel->u.pCovidx;
}
- if( pIdx
- && (pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable))
- && !db->mallocFailed
- ){
+ if( pIdx && !db->mallocFailed ){
+ int k, last;
+ VdbeOp *pOp;
+
last = sqlite3VdbeCurrentAddr(v);
k = pLevel->addrBody;
pOp = sqlite3VdbeGetOp(v, k);
@@ -125958,7 +114294,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
if( !HasRowid(pTab) ){
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
x = pPk->aiColumn[x];
- assert( x>=0 );
}
x = sqlite3ColumnOfIndex(pIdx, x);
if( x>=0 ){
@@ -125996,7 +114331,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
** in the input grammar file. */
/* #include <stdio.h> */
-/* #include "sqliteInt.h" */
/*
** Disable all error recovery processing in the parser push-down
@@ -126043,28 +114377,14 @@ struct TrigEvent { int a; IdList * b; };
*/
struct AttachKey { int type; Token key; };
+/*
+** One or more VALUES claues
+*/
+struct ValueList {
+ ExprList *pList;
+ Select *pSelect;
+};
- /*
- ** For a compound SELECT statement, make sure p->pPrior->pNext==p for
- ** all elements in the list. And make sure list length does not exceed
- ** SQLITE_LIMIT_COMPOUND_SELECT.
- */
- static void parserDoubleLinkSelect(Parse *pParse, Select *p){
- if( p->pPrior ){
- Select *pNext = 0, *pLoop;
- int mxSelect, cnt = 0;
- for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){
- pLoop->pNext = pNext;
- pLoop->selFlags |= SF_Compound;
- }
- if( (p->selFlags & SF_MultiValue)==0 &&
- (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 &&
- cnt>mxSelect
- ){
- sqlite3ErrorMsg(pParse, "too many terms in compound SELECT");
- }
- }
- }
/* This is a utility routine used to set the ExprSpan.zStart and
** ExprSpan.zEnd values of pOut so that the span covers the complete
@@ -126118,7 +114438,7 @@ struct AttachKey { int type; Token key; };
** unary TK_ISNULL or TK_NOTNULL expression. */
static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){
sqlite3 *db = pParse->db;
- if( pY && pA && pY->op==TK_NULL ){
+ if( db->mallocFailed==0 && pY->op==TK_NULL ){
pA->op = (u8)op;
sqlite3ExprDelete(db, pA->pRight);
pA->pRight = 0;
@@ -126138,29 +114458,6 @@ struct AttachKey { int type; Token key; };
pOut->zStart = pPreOp->z;
pOut->zEnd = pOperand->zEnd;
}
-
- /* Add a single new term to an ExprList that is used to store a
- ** list of identifiers. Report an error if the ID list contains
- ** a COLLATE clause or an ASC or DESC keyword, except ignore the
- ** error while parsing a legacy schema.
- */
- static ExprList *parserAddExprIdListTerm(
- Parse *pParse,
- ExprList *pPrior,
- Token *pIdToken,
- int hasCollate,
- int sortOrder
- ){
- ExprList *p = sqlite3ExprListAppend(pParse, pPrior, 0);
- if( (hasCollate || sortOrder!=SQLITE_SO_UNDEFINED)
- && pParse->db->init.busy==0
- ){
- sqlite3ErrorMsg(pParse, "syntax error after column name \"%.*s\"",
- pIdToken->n, pIdToken->z);
- }
- sqlite3ExprListSetName(pParse, p, pIdToken, 1);
- return p;
- }
/* Next is all token values, in a form suitable for use by makeheaders.
** This section will be null unless lemon is run with the -m switch.
*/
@@ -126205,41 +114502,34 @@ struct AttachKey { int type; Token key; };
** sqlite3ParserARG_PDECL A parameter declaration for the %extra_argument
** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser
** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser
-** YYERRORSYMBOL is the code number of the error symbol. If not
-** defined, then do no error processing.
** YYNSTATE the combined number of states.
** YYNRULE the number of rules in the grammar
-** YY_MAX_SHIFT Maximum value for shift actions
-** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
-** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
-** YY_MIN_REDUCE Maximum value for reduce actions
-** YY_ERROR_ACTION The yy_action[] code for syntax error
-** YY_ACCEPT_ACTION The yy_action[] code for accept
-** YY_NO_ACTION The yy_action[] code for no-op
+** YYERRORSYMBOL is the code number of the error symbol. If not
+** defined, then do no error processing.
*/
#define YYCODETYPE unsigned char
-#define YYNOCODE 254
+#define YYNOCODE 253
#define YYACTIONTYPE unsigned short int
-#define YYWILDCARD 70
+#define YYWILDCARD 68
#define sqlite3ParserTOKENTYPE Token
typedef union {
int yyinit;
sqlite3ParserTOKENTYPE yy0;
- Select* yy3;
- ExprList* yy14;
- With* yy59;
- SrcList* yy65;
- struct LikeOp yy96;
- Expr* yy132;
- u8 yy186;
- int yy328;
- ExprSpan yy346;
- struct TrigEvent yy378;
- u16 yy381;
- IdList* yy408;
- struct {int value; int mask;} yy429;
- TriggerStep* yy473;
- struct LimitVal yy476;
+ int yy4;
+ struct TrigEvent yy90;
+ ExprSpan yy118;
+ u16 yy177;
+ TriggerStep* yy203;
+ u8 yy210;
+ struct {int value; int mask;} yy215;
+ SrcList* yy259;
+ struct ValueList yy260;
+ struct LimitVal yy292;
+ Expr* yy314;
+ ExprList* yy322;
+ struct LikeOp yy342;
+ IdList* yy384;
+ Select* yy387;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -126248,17 +114538,12 @@ typedef union {
#define sqlite3ParserARG_PDECL ,Parse *pParse
#define sqlite3ParserARG_FETCH Parse *pParse = yypParser->pParse
#define sqlite3ParserARG_STORE yypParser->pParse = pParse
+#define YYNSTATE 631
+#define YYNRULE 329
#define YYFALLBACK 1
-#define YYNSTATE 436
-#define YYNRULE 328
-#define YY_MAX_SHIFT 435
-#define YY_MIN_SHIFTREDUCE 649
-#define YY_MAX_SHIFTREDUCE 976
-#define YY_MIN_REDUCE 977
-#define YY_MAX_REDUCE 1304
-#define YY_ERROR_ACTION 1305
-#define YY_ACCEPT_ACTION 1306
-#define YY_NO_ACTION 1307
+#define YY_NO_ACTION (YYNSTATE+YYNRULE+2)
+#define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1)
+#define YY_ERROR_ACTION (YYNSTATE+YYNRULE)
/* The yyzerominor constant is used to initialize instances of
** YYMINORTYPE objects to zero. */
@@ -126285,20 +114570,16 @@ static const YYMINORTYPE yyzerominor = { 0 };
** Suppose the action integer is N. Then the action is determined as
** follows
**
-** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead
+** 0 <= N < YYNSTATE Shift N. That is, push the lookahead
** token onto the stack and goto state N.
**
-** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
-** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE.
+** YYNSTATE <= N < YYNSTATE+YYNRULE Reduce by rule N-YYNSTATE.
**
-** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
-** and YY_MAX_REDUCE
-
-** N == YY_ERROR_ACTION A syntax error has occurred.
+** N == YYNSTATE+YYNRULE A syntax error has occurred.
**
-** N == YY_ACCEPT_ACTION The parser accepts its input.
+** N == YYNSTATE+YYNRULE+1 The parser accepts its input.
**
-** N == YY_NO_ACTION No such action. Denotes unused
+** N == YYNSTATE+YYNRULE+2 No such action. Denotes unused
** slots in the yy_action[] table.
**
** The action table is constructed as a single large table named yy_action[].
@@ -126328,446 +114609,480 @@ static const YYMINORTYPE yyzerominor = { 0 };
** shifting non-terminals after a reduce.
** yy_default[] Default action for each state.
*/
-#define YY_ACTTAB_COUNT (1501)
+#define YY_ACTTAB_COUNT (1582)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 311, 1306, 145, 651, 2, 192, 652, 338, 780, 92,
- /* 10 */ 92, 92, 92, 85, 90, 90, 90, 90, 89, 89,
- /* 20 */ 88, 88, 88, 87, 335, 88, 88, 88, 87, 335,
- /* 30 */ 327, 856, 856, 92, 92, 92, 92, 776, 90, 90,
- /* 40 */ 90, 90, 89, 89, 88, 88, 88, 87, 335, 86,
- /* 50 */ 83, 166, 93, 94, 84, 868, 871, 860, 860, 91,
- /* 60 */ 91, 92, 92, 92, 92, 335, 90, 90, 90, 90,
- /* 70 */ 89, 89, 88, 88, 88, 87, 335, 311, 780, 90,
- /* 80 */ 90, 90, 90, 89, 89, 88, 88, 88, 87, 335,
- /* 90 */ 123, 808, 689, 689, 689, 689, 112, 230, 430, 257,
- /* 100 */ 809, 698, 430, 86, 83, 166, 324, 55, 856, 856,
- /* 110 */ 201, 158, 276, 387, 271, 386, 188, 689, 689, 828,
- /* 120 */ 833, 49, 944, 269, 833, 49, 123, 87, 335, 93,
- /* 130 */ 94, 84, 868, 871, 860, 860, 91, 91, 92, 92,
- /* 140 */ 92, 92, 342, 90, 90, 90, 90, 89, 89, 88,
- /* 150 */ 88, 88, 87, 335, 311, 328, 333, 332, 701, 408,
- /* 160 */ 394, 69, 690, 691, 690, 691, 715, 910, 251, 354,
- /* 170 */ 250, 698, 704, 430, 908, 430, 909, 89, 89, 88,
- /* 180 */ 88, 88, 87, 335, 391, 856, 856, 690, 691, 183,
- /* 190 */ 95, 340, 384, 381, 380, 833, 31, 833, 49, 912,
- /* 200 */ 912, 333, 332, 379, 123, 311, 93, 94, 84, 868,
- /* 210 */ 871, 860, 860, 91, 91, 92, 92, 92, 92, 114,
- /* 220 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87,
- /* 230 */ 335, 430, 408, 399, 435, 657, 856, 856, 346, 57,
- /* 240 */ 232, 828, 109, 20, 912, 912, 231, 393, 937, 760,
- /* 250 */ 97, 751, 752, 833, 49, 708, 708, 93, 94, 84,
- /* 260 */ 868, 871, 860, 860, 91, 91, 92, 92, 92, 92,
- /* 270 */ 707, 90, 90, 90, 90, 89, 89, 88, 88, 88,
- /* 280 */ 87, 335, 311, 114, 22, 706, 688, 58, 408, 390,
- /* 290 */ 251, 349, 240, 749, 752, 689, 689, 847, 685, 115,
- /* 300 */ 21, 231, 393, 689, 689, 697, 183, 355, 430, 384,
- /* 310 */ 381, 380, 192, 856, 856, 780, 123, 160, 159, 223,
- /* 320 */ 379, 738, 25, 315, 362, 841, 143, 689, 689, 835,
- /* 330 */ 833, 48, 339, 937, 93, 94, 84, 868, 871, 860,
- /* 340 */ 860, 91, 91, 92, 92, 92, 92, 914, 90, 90,
- /* 350 */ 90, 90, 89, 89, 88, 88, 88, 87, 335, 311,
- /* 360 */ 840, 840, 840, 266, 430, 690, 691, 778, 114, 1300,
- /* 370 */ 1300, 430, 1, 690, 691, 697, 688, 689, 689, 689,
- /* 380 */ 689, 689, 689, 287, 298, 780, 833, 10, 686, 115,
- /* 390 */ 856, 856, 355, 833, 10, 828, 366, 690, 691, 363,
- /* 400 */ 321, 76, 123, 74, 23, 737, 807, 323, 356, 353,
- /* 410 */ 847, 93, 94, 84, 868, 871, 860, 860, 91, 91,
- /* 420 */ 92, 92, 92, 92, 940, 90, 90, 90, 90, 89,
- /* 430 */ 89, 88, 88, 88, 87, 335, 311, 806, 841, 429,
- /* 440 */ 713, 941, 835, 430, 251, 354, 250, 690, 691, 690,
- /* 450 */ 691, 690, 691, 86, 83, 166, 24, 942, 151, 753,
- /* 460 */ 285, 907, 403, 907, 164, 833, 10, 856, 856, 965,
- /* 470 */ 306, 754, 679, 840, 840, 840, 795, 216, 794, 222,
- /* 480 */ 906, 344, 906, 904, 86, 83, 166, 286, 93, 94,
- /* 490 */ 84, 868, 871, 860, 860, 91, 91, 92, 92, 92,
- /* 500 */ 92, 430, 90, 90, 90, 90, 89, 89, 88, 88,
- /* 510 */ 88, 87, 335, 311, 430, 724, 352, 705, 427, 699,
- /* 520 */ 700, 376, 210, 833, 49, 793, 397, 857, 857, 940,
- /* 530 */ 213, 762, 727, 334, 699, 700, 833, 10, 86, 83,
- /* 540 */ 166, 345, 396, 902, 856, 856, 941, 385, 833, 9,
- /* 550 */ 406, 869, 872, 187, 890, 728, 347, 398, 404, 977,
- /* 560 */ 652, 338, 942, 954, 413, 93, 94, 84, 868, 871,
- /* 570 */ 860, 860, 91, 91, 92, 92, 92, 92, 861, 90,
- /* 580 */ 90, 90, 90, 89, 89, 88, 88, 88, 87, 335,
- /* 590 */ 311, 1219, 114, 430, 834, 430, 5, 165, 192, 688,
- /* 600 */ 832, 780, 430, 723, 430, 234, 325, 189, 163, 316,
- /* 610 */ 356, 955, 115, 235, 269, 833, 35, 833, 36, 747,
- /* 620 */ 720, 856, 856, 793, 833, 12, 833, 27, 745, 174,
- /* 630 */ 968, 1290, 968, 1291, 1290, 310, 1291, 693, 317, 245,
- /* 640 */ 264, 311, 93, 94, 84, 868, 871, 860, 860, 91,
- /* 650 */ 91, 92, 92, 92, 92, 832, 90, 90, 90, 90,
- /* 660 */ 89, 89, 88, 88, 88, 87, 335, 430, 320, 213,
- /* 670 */ 762, 780, 856, 856, 920, 920, 369, 257, 966, 220,
- /* 680 */ 966, 396, 663, 664, 665, 242, 259, 244, 262, 833,
- /* 690 */ 37, 650, 2, 93, 94, 84, 868, 871, 860, 860,
- /* 700 */ 91, 91, 92, 92, 92, 92, 430, 90, 90, 90,
- /* 710 */ 90, 89, 89, 88, 88, 88, 87, 335, 311, 430,
- /* 720 */ 239, 430, 917, 368, 430, 238, 916, 793, 833, 38,
- /* 730 */ 430, 825, 430, 66, 430, 392, 430, 766, 766, 430,
- /* 740 */ 367, 833, 39, 833, 28, 430, 833, 29, 68, 856,
- /* 750 */ 856, 900, 833, 40, 833, 41, 833, 42, 833, 11,
- /* 760 */ 72, 833, 43, 243, 305, 970, 114, 833, 99, 961,
- /* 770 */ 93, 94, 84, 868, 871, 860, 860, 91, 91, 92,
- /* 780 */ 92, 92, 92, 430, 90, 90, 90, 90, 89, 89,
- /* 790 */ 88, 88, 88, 87, 335, 311, 430, 361, 430, 165,
- /* 800 */ 147, 430, 186, 185, 184, 833, 44, 430, 289, 430,
- /* 810 */ 246, 430, 971, 430, 212, 163, 430, 357, 833, 45,
- /* 820 */ 833, 32, 932, 833, 46, 793, 856, 856, 718, 833,
- /* 830 */ 47, 833, 33, 833, 117, 833, 118, 75, 833, 119,
- /* 840 */ 288, 305, 967, 214, 935, 322, 311, 93, 94, 84,
- /* 850 */ 868, 871, 860, 860, 91, 91, 92, 92, 92, 92,
- /* 860 */ 430, 90, 90, 90, 90, 89, 89, 88, 88, 88,
- /* 870 */ 87, 335, 430, 832, 426, 317, 288, 856, 856, 114,
- /* 880 */ 763, 257, 833, 53, 930, 219, 364, 257, 257, 971,
- /* 890 */ 361, 396, 257, 257, 833, 34, 257, 311, 93, 94,
- /* 900 */ 84, 868, 871, 860, 860, 91, 91, 92, 92, 92,
- /* 910 */ 92, 430, 90, 90, 90, 90, 89, 89, 88, 88,
- /* 920 */ 88, 87, 335, 430, 217, 318, 124, 253, 856, 856,
- /* 930 */ 218, 943, 257, 833, 100, 898, 759, 774, 361, 755,
- /* 940 */ 423, 329, 758, 1017, 289, 833, 50, 682, 311, 93,
- /* 950 */ 82, 84, 868, 871, 860, 860, 91, 91, 92, 92,
- /* 960 */ 92, 92, 430, 90, 90, 90, 90, 89, 89, 88,
- /* 970 */ 88, 88, 87, 335, 430, 256, 419, 114, 249, 856,
- /* 980 */ 856, 331, 114, 400, 833, 101, 359, 187, 1064, 726,
- /* 990 */ 725, 739, 401, 416, 420, 360, 833, 102, 424, 311,
- /* 1000 */ 258, 94, 84, 868, 871, 860, 860, 91, 91, 92,
- /* 1010 */ 92, 92, 92, 430, 90, 90, 90, 90, 89, 89,
- /* 1020 */ 88, 88, 88, 87, 335, 430, 221, 261, 114, 114,
- /* 1030 */ 856, 856, 808, 114, 156, 833, 98, 772, 733, 734,
- /* 1040 */ 275, 809, 771, 316, 263, 265, 960, 833, 116, 307,
- /* 1050 */ 741, 274, 722, 84, 868, 871, 860, 860, 91, 91,
- /* 1060 */ 92, 92, 92, 92, 430, 90, 90, 90, 90, 89,
- /* 1070 */ 89, 88, 88, 88, 87, 335, 80, 425, 830, 3,
- /* 1080 */ 1214, 191, 430, 721, 336, 336, 833, 113, 252, 80,
- /* 1090 */ 425, 68, 3, 913, 913, 428, 270, 336, 336, 430,
- /* 1100 */ 377, 784, 430, 197, 833, 106, 430, 716, 428, 430,
- /* 1110 */ 267, 430, 897, 68, 414, 430, 769, 409, 430, 71,
- /* 1120 */ 430, 833, 105, 123, 833, 103, 847, 414, 833, 49,
- /* 1130 */ 843, 833, 104, 833, 52, 800, 123, 833, 54, 847,
- /* 1140 */ 833, 51, 833, 26, 831, 802, 77, 78, 191, 389,
- /* 1150 */ 430, 372, 114, 79, 432, 431, 911, 911, 835, 77,
- /* 1160 */ 78, 779, 893, 408, 410, 197, 79, 432, 431, 791,
- /* 1170 */ 226, 835, 833, 30, 772, 80, 425, 716, 3, 771,
- /* 1180 */ 411, 412, 897, 336, 336, 290, 291, 839, 703, 840,
- /* 1190 */ 840, 840, 842, 19, 428, 695, 684, 672, 111, 671,
- /* 1200 */ 843, 673, 840, 840, 840, 842, 19, 207, 661, 278,
- /* 1210 */ 148, 304, 280, 414, 282, 6, 822, 348, 248, 241,
- /* 1220 */ 358, 934, 720, 80, 425, 847, 3, 161, 382, 273,
- /* 1230 */ 284, 336, 336, 415, 296, 958, 895, 894, 157, 674,
- /* 1240 */ 107, 194, 428, 948, 135, 77, 78, 777, 953, 951,
- /* 1250 */ 56, 319, 79, 432, 431, 121, 66, 835, 59, 128,
- /* 1260 */ 146, 414, 350, 130, 351, 819, 131, 132, 133, 375,
- /* 1270 */ 173, 149, 138, 847, 936, 365, 178, 70, 425, 827,
- /* 1280 */ 3, 889, 62, 371, 915, 336, 336, 792, 840, 840,
- /* 1290 */ 840, 842, 19, 77, 78, 208, 428, 144, 179, 373,
- /* 1300 */ 79, 432, 431, 255, 180, 835, 260, 675, 181, 308,
- /* 1310 */ 388, 744, 326, 743, 742, 414, 731, 718, 712, 402,
- /* 1320 */ 309, 711, 788, 65, 277, 272, 789, 847, 730, 710,
- /* 1330 */ 709, 279, 193, 787, 281, 876, 840, 840, 840, 842,
- /* 1340 */ 19, 786, 283, 73, 418, 330, 422, 77, 78, 227,
- /* 1350 */ 96, 407, 67, 405, 79, 432, 431, 292, 228, 835,
- /* 1360 */ 215, 202, 229, 293, 767, 303, 302, 301, 204, 299,
- /* 1370 */ 294, 295, 676, 7, 681, 433, 669, 206, 110, 224,
- /* 1380 */ 203, 205, 434, 667, 666, 658, 120, 168, 656, 237,
- /* 1390 */ 840, 840, 840, 842, 19, 337, 155, 233, 236, 341,
- /* 1400 */ 167, 905, 108, 313, 903, 826, 314, 125, 126, 127,
- /* 1410 */ 129, 170, 247, 756, 172, 928, 134, 136, 171, 60,
- /* 1420 */ 61, 123, 169, 137, 175, 933, 176, 927, 8, 13,
- /* 1430 */ 177, 254, 191, 918, 139, 370, 924, 140, 678, 150,
- /* 1440 */ 374, 274, 182, 378, 141, 122, 63, 14, 383, 729,
- /* 1450 */ 268, 15, 64, 225, 846, 845, 874, 16, 765, 770,
- /* 1460 */ 4, 162, 209, 395, 211, 142, 878, 796, 801, 312,
- /* 1470 */ 190, 71, 68, 875, 873, 939, 199, 938, 17, 195,
- /* 1480 */ 18, 196, 417, 975, 152, 653, 976, 198, 153, 421,
- /* 1490 */ 877, 154, 200, 844, 696, 81, 343, 297, 1019, 1018,
- /* 1500 */ 300,
+ /* 0 */ 312, 961, 185, 420, 2, 171, 516, 515, 597, 56,
+ /* 10 */ 56, 56, 56, 49, 54, 54, 54, 54, 53, 53,
+ /* 20 */ 52, 52, 52, 51, 234, 197, 196, 195, 624, 623,
+ /* 30 */ 301, 590, 584, 56, 56, 56, 56, 156, 54, 54,
+ /* 40 */ 54, 54, 53, 53, 52, 52, 52, 51, 234, 628,
+ /* 50 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 60 */ 56, 56, 56, 466, 54, 54, 54, 54, 53, 53,
+ /* 70 */ 52, 52, 52, 51, 234, 312, 597, 52, 52, 52,
+ /* 80 */ 51, 234, 33, 54, 54, 54, 54, 53, 53, 52,
+ /* 90 */ 52, 52, 51, 234, 624, 623, 621, 620, 165, 624,
+ /* 100 */ 623, 383, 380, 379, 214, 328, 590, 584, 624, 623,
+ /* 110 */ 467, 59, 378, 619, 618, 617, 53, 53, 52, 52,
+ /* 120 */ 52, 51, 234, 506, 507, 57, 58, 48, 582, 581,
+ /* 130 */ 583, 583, 55, 55, 56, 56, 56, 56, 30, 54,
+ /* 140 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 150 */ 312, 50, 47, 146, 233, 232, 207, 474, 256, 349,
+ /* 160 */ 255, 475, 621, 620, 554, 438, 298, 621, 620, 236,
+ /* 170 */ 674, 435, 440, 553, 439, 366, 621, 620, 540, 224,
+ /* 180 */ 551, 590, 584, 176, 138, 282, 386, 277, 385, 168,
+ /* 190 */ 600, 422, 951, 548, 622, 951, 273, 572, 572, 566,
+ /* 200 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 210 */ 56, 56, 56, 354, 54, 54, 54, 54, 53, 53,
+ /* 220 */ 52, 52, 52, 51, 234, 312, 561, 526, 62, 675,
+ /* 230 */ 132, 595, 410, 348, 579, 579, 492, 426, 577, 419,
+ /* 240 */ 627, 65, 329, 560, 441, 237, 676, 123, 607, 67,
+ /* 250 */ 542, 532, 622, 170, 205, 500, 590, 584, 166, 559,
+ /* 260 */ 622, 403, 593, 593, 593, 442, 443, 271, 422, 950,
+ /* 270 */ 166, 223, 950, 483, 190, 57, 58, 48, 582, 581,
+ /* 280 */ 583, 583, 55, 55, 56, 56, 56, 56, 600, 54,
+ /* 290 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 300 */ 312, 441, 412, 376, 175, 165, 166, 391, 383, 380,
+ /* 310 */ 379, 342, 412, 203, 426, 66, 392, 622, 415, 378,
+ /* 320 */ 597, 166, 442, 338, 444, 571, 601, 74, 415, 624,
+ /* 330 */ 623, 590, 584, 624, 623, 174, 601, 92, 333, 171,
+ /* 340 */ 1, 410, 597, 579, 579, 624, 623, 600, 306, 425,
+ /* 350 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 360 */ 56, 56, 56, 580, 54, 54, 54, 54, 53, 53,
+ /* 370 */ 52, 52, 52, 51, 234, 312, 472, 262, 399, 68,
+ /* 380 */ 412, 339, 571, 389, 624, 623, 578, 602, 597, 589,
+ /* 390 */ 588, 603, 412, 622, 423, 533, 415, 621, 620, 513,
+ /* 400 */ 257, 621, 620, 166, 601, 91, 590, 584, 415, 45,
+ /* 410 */ 597, 586, 585, 621, 620, 250, 601, 92, 39, 347,
+ /* 420 */ 576, 336, 597, 547, 567, 57, 58, 48, 582, 581,
+ /* 430 */ 583, 583, 55, 55, 56, 56, 56, 56, 587, 54,
+ /* 440 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 450 */ 312, 561, 621, 620, 531, 291, 470, 188, 399, 375,
+ /* 460 */ 247, 492, 249, 350, 412, 476, 476, 368, 560, 299,
+ /* 470 */ 334, 412, 281, 482, 67, 565, 410, 622, 579, 579,
+ /* 480 */ 415, 590, 584, 280, 559, 467, 520, 415, 601, 92,
+ /* 490 */ 597, 167, 544, 36, 877, 601, 16, 519, 564, 6,
+ /* 500 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 510 */ 56, 56, 56, 200, 54, 54, 54, 54, 53, 53,
+ /* 520 */ 52, 52, 52, 51, 234, 312, 183, 412, 236, 528,
+ /* 530 */ 395, 535, 358, 256, 349, 255, 397, 412, 248, 182,
+ /* 540 */ 353, 359, 549, 415, 236, 317, 563, 50, 47, 146,
+ /* 550 */ 273, 601, 73, 415, 7, 311, 590, 584, 568, 493,
+ /* 560 */ 213, 601, 92, 233, 232, 410, 173, 579, 579, 330,
+ /* 570 */ 575, 574, 631, 629, 332, 57, 58, 48, 582, 581,
+ /* 580 */ 583, 583, 55, 55, 56, 56, 56, 56, 199, 54,
+ /* 590 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 600 */ 312, 492, 340, 320, 511, 505, 572, 572, 460, 562,
+ /* 610 */ 549, 170, 145, 430, 67, 558, 410, 622, 579, 579,
+ /* 620 */ 384, 236, 600, 412, 408, 575, 574, 504, 572, 572,
+ /* 630 */ 571, 590, 584, 353, 198, 143, 268, 549, 316, 415,
+ /* 640 */ 306, 424, 207, 50, 47, 146, 167, 601, 69, 546,
+ /* 650 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 660 */ 56, 56, 56, 555, 54, 54, 54, 54, 53, 53,
+ /* 670 */ 52, 52, 52, 51, 234, 312, 600, 326, 412, 270,
+ /* 680 */ 145, 264, 274, 266, 459, 571, 423, 35, 412, 568,
+ /* 690 */ 407, 213, 428, 388, 415, 308, 212, 143, 622, 354,
+ /* 700 */ 317, 12, 601, 94, 415, 549, 590, 584, 50, 47,
+ /* 710 */ 146, 365, 601, 97, 552, 362, 318, 147, 602, 361,
+ /* 720 */ 325, 15, 603, 187, 206, 57, 58, 48, 582, 581,
+ /* 730 */ 583, 583, 55, 55, 56, 56, 56, 56, 412, 54,
+ /* 740 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 750 */ 312, 412, 35, 412, 415, 22, 630, 2, 600, 50,
+ /* 760 */ 47, 146, 601, 95, 412, 485, 510, 415, 412, 415,
+ /* 770 */ 412, 11, 235, 486, 412, 601, 104, 601, 103, 19,
+ /* 780 */ 415, 590, 584, 352, 415, 40, 415, 38, 601, 105,
+ /* 790 */ 415, 32, 601, 106, 601, 133, 544, 169, 601, 134,
+ /* 800 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 810 */ 56, 56, 56, 412, 54, 54, 54, 54, 53, 53,
+ /* 820 */ 52, 52, 52, 51, 234, 312, 412, 274, 412, 415,
+ /* 830 */ 412, 274, 274, 274, 201, 230, 721, 601, 98, 484,
+ /* 840 */ 427, 307, 415, 622, 415, 540, 415, 622, 622, 622,
+ /* 850 */ 601, 102, 601, 101, 601, 93, 590, 584, 262, 21,
+ /* 860 */ 129, 622, 522, 521, 554, 222, 469, 521, 600, 324,
+ /* 870 */ 323, 322, 211, 553, 622, 57, 58, 48, 582, 581,
+ /* 880 */ 583, 583, 55, 55, 56, 56, 56, 56, 412, 54,
+ /* 890 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 900 */ 312, 412, 261, 412, 415, 412, 600, 210, 625, 367,
+ /* 910 */ 51, 234, 601, 100, 538, 606, 142, 415, 355, 415,
+ /* 920 */ 412, 415, 412, 496, 622, 601, 77, 601, 96, 601,
+ /* 930 */ 137, 590, 584, 530, 622, 529, 415, 141, 415, 28,
+ /* 940 */ 524, 600, 229, 544, 601, 136, 601, 135, 604, 204,
+ /* 950 */ 57, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 960 */ 56, 56, 56, 412, 54, 54, 54, 54, 53, 53,
+ /* 970 */ 52, 52, 52, 51, 234, 312, 412, 360, 412, 415,
+ /* 980 */ 412, 360, 286, 600, 503, 220, 127, 601, 76, 629,
+ /* 990 */ 332, 382, 415, 622, 415, 540, 415, 622, 412, 613,
+ /* 1000 */ 601, 90, 601, 89, 601, 75, 590, 584, 341, 272,
+ /* 1010 */ 377, 622, 126, 27, 415, 622, 164, 544, 125, 280,
+ /* 1020 */ 373, 122, 601, 88, 480, 57, 46, 48, 582, 581,
+ /* 1030 */ 583, 583, 55, 55, 56, 56, 56, 56, 412, 54,
+ /* 1040 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 1050 */ 312, 412, 360, 412, 415, 412, 284, 186, 369, 321,
+ /* 1060 */ 477, 170, 601, 87, 121, 473, 221, 415, 622, 415,
+ /* 1070 */ 254, 415, 412, 355, 412, 601, 99, 601, 86, 601,
+ /* 1080 */ 17, 590, 584, 259, 612, 120, 159, 158, 415, 622,
+ /* 1090 */ 415, 14, 465, 157, 462, 25, 601, 85, 601, 84,
+ /* 1100 */ 622, 58, 48, 582, 581, 583, 583, 55, 55, 56,
+ /* 1110 */ 56, 56, 56, 412, 54, 54, 54, 54, 53, 53,
+ /* 1120 */ 52, 52, 52, 51, 234, 312, 412, 262, 412, 415,
+ /* 1130 */ 412, 262, 118, 611, 117, 24, 10, 601, 83, 351,
+ /* 1140 */ 216, 219, 415, 622, 415, 608, 415, 622, 412, 622,
+ /* 1150 */ 601, 72, 601, 71, 601, 82, 590, 584, 262, 4,
+ /* 1160 */ 605, 622, 458, 115, 415, 456, 252, 154, 452, 110,
+ /* 1170 */ 108, 453, 601, 81, 622, 451, 622, 48, 582, 581,
+ /* 1180 */ 583, 583, 55, 55, 56, 56, 56, 56, 412, 54,
+ /* 1190 */ 54, 54, 54, 53, 53, 52, 52, 52, 51, 234,
+ /* 1200 */ 44, 406, 450, 3, 415, 412, 262, 107, 416, 623,
+ /* 1210 */ 446, 437, 601, 80, 436, 335, 238, 189, 411, 409,
+ /* 1220 */ 594, 415, 622, 44, 406, 401, 3, 412, 557, 601,
+ /* 1230 */ 70, 416, 623, 412, 622, 149, 622, 421, 404, 64,
+ /* 1240 */ 412, 622, 409, 415, 622, 331, 139, 148, 566, 415,
+ /* 1250 */ 449, 601, 18, 228, 124, 626, 415, 601, 79, 315,
+ /* 1260 */ 181, 404, 412, 545, 601, 78, 262, 541, 41, 42,
+ /* 1270 */ 534, 566, 390, 202, 262, 43, 414, 413, 415, 622,
+ /* 1280 */ 595, 314, 622, 622, 180, 539, 601, 92, 415, 276,
+ /* 1290 */ 622, 41, 42, 509, 616, 615, 601, 9, 43, 414,
+ /* 1300 */ 413, 622, 418, 595, 262, 622, 275, 600, 614, 622,
+ /* 1310 */ 218, 593, 593, 593, 592, 591, 13, 178, 217, 417,
+ /* 1320 */ 622, 236, 622, 44, 406, 490, 3, 269, 399, 267,
+ /* 1330 */ 609, 416, 623, 400, 593, 593, 593, 592, 591, 13,
+ /* 1340 */ 265, 622, 409, 622, 263, 622, 34, 406, 244, 3,
+ /* 1350 */ 258, 363, 464, 463, 416, 623, 622, 356, 251, 8,
+ /* 1360 */ 622, 404, 177, 599, 455, 409, 622, 622, 622, 622,
+ /* 1370 */ 445, 566, 243, 622, 622, 236, 295, 240, 31, 239,
+ /* 1380 */ 622, 431, 30, 396, 404, 290, 622, 294, 622, 293,
+ /* 1390 */ 144, 41, 42, 622, 566, 622, 394, 622, 43, 414,
+ /* 1400 */ 413, 622, 289, 595, 398, 60, 622, 292, 37, 231,
+ /* 1410 */ 598, 172, 622, 29, 41, 42, 393, 523, 622, 556,
+ /* 1420 */ 184, 43, 414, 413, 287, 387, 595, 543, 285, 518,
+ /* 1430 */ 537, 536, 517, 327, 593, 593, 593, 592, 591, 13,
+ /* 1440 */ 215, 283, 278, 514, 513, 304, 303, 302, 179, 300,
+ /* 1450 */ 512, 310, 454, 128, 227, 226, 309, 593, 593, 593,
+ /* 1460 */ 592, 591, 13, 494, 489, 225, 488, 150, 487, 242,
+ /* 1470 */ 163, 61, 374, 481, 162, 161, 624, 623, 241, 372,
+ /* 1480 */ 209, 479, 370, 260, 26, 160, 478, 364, 468, 471,
+ /* 1490 */ 140, 152, 119, 467, 131, 116, 155, 153, 345, 457,
+ /* 1500 */ 151, 346, 130, 114, 113, 112, 111, 448, 319, 23,
+ /* 1510 */ 109, 434, 20, 433, 432, 429, 566, 610, 573, 596,
+ /* 1520 */ 63, 405, 191, 279, 510, 296, 498, 288, 570, 495,
+ /* 1530 */ 499, 497, 461, 194, 5, 305, 193, 192, 381, 569,
+ /* 1540 */ 357, 256, 344, 245, 526, 246, 253, 313, 595, 343,
+ /* 1550 */ 447, 297, 236, 402, 550, 491, 508, 502, 501, 527,
+ /* 1560 */ 234, 208, 525, 962, 962, 962, 371, 962, 962, 962,
+ /* 1570 */ 962, 962, 962, 962, 962, 337, 962, 962, 962, 593,
+ /* 1580 */ 593, 593,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 19, 144, 145, 146, 147, 24, 1, 2, 27, 80,
- /* 10 */ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
- /* 20 */ 91, 92, 93, 94, 95, 91, 92, 93, 94, 95,
- /* 30 */ 19, 50, 51, 80, 81, 82, 83, 212, 85, 86,
- /* 40 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 224,
- /* 50 */ 225, 226, 71, 72, 73, 74, 75, 76, 77, 78,
- /* 60 */ 79, 80, 81, 82, 83, 95, 85, 86, 87, 88,
- /* 70 */ 89, 90, 91, 92, 93, 94, 95, 19, 97, 85,
- /* 80 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
- /* 90 */ 66, 33, 27, 28, 27, 28, 22, 201, 152, 152,
- /* 100 */ 42, 27, 152, 224, 225, 226, 95, 211, 50, 51,
- /* 110 */ 99, 100, 101, 102, 103, 104, 105, 27, 28, 59,
- /* 120 */ 174, 175, 243, 112, 174, 175, 66, 94, 95, 71,
- /* 130 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
- /* 140 */ 82, 83, 195, 85, 86, 87, 88, 89, 90, 91,
- /* 150 */ 92, 93, 94, 95, 19, 209, 89, 90, 173, 209,
- /* 160 */ 210, 26, 97, 98, 97, 98, 181, 100, 108, 109,
- /* 170 */ 110, 97, 174, 152, 107, 152, 109, 89, 90, 91,
- /* 180 */ 92, 93, 94, 95, 163, 50, 51, 97, 98, 99,
- /* 190 */ 55, 244, 102, 103, 104, 174, 175, 174, 175, 132,
- /* 200 */ 133, 89, 90, 113, 66, 19, 71, 72, 73, 74,
- /* 210 */ 75, 76, 77, 78, 79, 80, 81, 82, 83, 198,
- /* 220 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
- /* 230 */ 95, 152, 209, 210, 148, 149, 50, 51, 100, 53,
- /* 240 */ 154, 59, 156, 22, 132, 133, 119, 120, 163, 163,
- /* 250 */ 22, 192, 193, 174, 175, 27, 28, 71, 72, 73,
- /* 260 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
- /* 270 */ 174, 85, 86, 87, 88, 89, 90, 91, 92, 93,
- /* 280 */ 94, 95, 19, 198, 198, 174, 152, 24, 209, 210,
- /* 290 */ 108, 109, 110, 192, 193, 27, 28, 69, 164, 165,
- /* 300 */ 79, 119, 120, 27, 28, 27, 99, 222, 152, 102,
- /* 310 */ 103, 104, 24, 50, 51, 27, 66, 89, 90, 185,
- /* 320 */ 113, 187, 22, 157, 239, 97, 58, 27, 28, 101,
- /* 330 */ 174, 175, 246, 163, 71, 72, 73, 74, 75, 76,
- /* 340 */ 77, 78, 79, 80, 81, 82, 83, 11, 85, 86,
- /* 350 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 19,
- /* 360 */ 132, 133, 134, 23, 152, 97, 98, 91, 198, 119,
- /* 370 */ 120, 152, 22, 97, 98, 97, 152, 27, 28, 27,
- /* 380 */ 28, 27, 28, 227, 160, 97, 174, 175, 164, 165,
- /* 390 */ 50, 51, 222, 174, 175, 59, 230, 97, 98, 233,
- /* 400 */ 188, 137, 66, 139, 234, 187, 177, 188, 152, 239,
- /* 410 */ 69, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- /* 420 */ 80, 81, 82, 83, 12, 85, 86, 87, 88, 89,
- /* 430 */ 90, 91, 92, 93, 94, 95, 19, 177, 97, 152,
- /* 440 */ 23, 29, 101, 152, 108, 109, 110, 97, 98, 97,
- /* 450 */ 98, 97, 98, 224, 225, 226, 22, 45, 24, 47,
- /* 460 */ 152, 152, 152, 152, 152, 174, 175, 50, 51, 249,
- /* 470 */ 250, 59, 21, 132, 133, 134, 124, 221, 124, 188,
- /* 480 */ 171, 172, 171, 172, 224, 225, 226, 152, 71, 72,
- /* 490 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
- /* 500 */ 83, 152, 85, 86, 87, 88, 89, 90, 91, 92,
- /* 510 */ 93, 94, 95, 19, 152, 183, 65, 23, 170, 171,
- /* 520 */ 172, 19, 23, 174, 175, 26, 152, 50, 51, 12,
- /* 530 */ 196, 197, 37, 170, 171, 172, 174, 175, 224, 225,
- /* 540 */ 226, 232, 208, 232, 50, 51, 29, 52, 174, 175,
- /* 550 */ 188, 74, 75, 51, 103, 60, 222, 163, 209, 0,
- /* 560 */ 1, 2, 45, 152, 47, 71, 72, 73, 74, 75,
- /* 570 */ 76, 77, 78, 79, 80, 81, 82, 83, 101, 85,
- /* 580 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
- /* 590 */ 19, 140, 198, 152, 23, 152, 22, 98, 24, 152,
- /* 600 */ 152, 27, 152, 183, 152, 152, 111, 213, 214, 107,
- /* 610 */ 152, 164, 165, 152, 112, 174, 175, 174, 175, 181,
- /* 620 */ 182, 50, 51, 124, 174, 175, 174, 175, 190, 26,
- /* 630 */ 22, 23, 22, 23, 26, 166, 26, 168, 169, 16,
- /* 640 */ 16, 19, 71, 72, 73, 74, 75, 76, 77, 78,
- /* 650 */ 79, 80, 81, 82, 83, 152, 85, 86, 87, 88,
- /* 660 */ 89, 90, 91, 92, 93, 94, 95, 152, 220, 196,
- /* 670 */ 197, 97, 50, 51, 108, 109, 110, 152, 70, 221,
- /* 680 */ 70, 208, 7, 8, 9, 62, 62, 64, 64, 174,
- /* 690 */ 175, 146, 147, 71, 72, 73, 74, 75, 76, 77,
- /* 700 */ 78, 79, 80, 81, 82, 83, 152, 85, 86, 87,
- /* 710 */ 88, 89, 90, 91, 92, 93, 94, 95, 19, 152,
- /* 720 */ 195, 152, 31, 220, 152, 152, 35, 26, 174, 175,
- /* 730 */ 152, 163, 152, 130, 152, 115, 152, 117, 118, 152,
- /* 740 */ 49, 174, 175, 174, 175, 152, 174, 175, 26, 50,
- /* 750 */ 51, 152, 174, 175, 174, 175, 174, 175, 174, 175,
- /* 760 */ 138, 174, 175, 140, 22, 23, 198, 174, 175, 152,
- /* 770 */ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
- /* 780 */ 81, 82, 83, 152, 85, 86, 87, 88, 89, 90,
- /* 790 */ 91, 92, 93, 94, 95, 19, 152, 152, 152, 98,
- /* 800 */ 24, 152, 108, 109, 110, 174, 175, 152, 152, 152,
- /* 810 */ 152, 152, 70, 152, 213, 214, 152, 152, 174, 175,
- /* 820 */ 174, 175, 152, 174, 175, 124, 50, 51, 106, 174,
- /* 830 */ 175, 174, 175, 174, 175, 174, 175, 138, 174, 175,
- /* 840 */ 152, 22, 23, 22, 163, 189, 19, 71, 72, 73,
- /* 850 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
- /* 860 */ 152, 85, 86, 87, 88, 89, 90, 91, 92, 93,
- /* 870 */ 94, 95, 152, 152, 168, 169, 152, 50, 51, 198,
- /* 880 */ 197, 152, 174, 175, 152, 240, 152, 152, 152, 70,
- /* 890 */ 152, 208, 152, 152, 174, 175, 152, 19, 71, 72,
- /* 900 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
- /* 910 */ 83, 152, 85, 86, 87, 88, 89, 90, 91, 92,
- /* 920 */ 93, 94, 95, 152, 195, 247, 248, 152, 50, 51,
- /* 930 */ 195, 195, 152, 174, 175, 195, 195, 26, 152, 195,
- /* 940 */ 252, 220, 163, 122, 152, 174, 175, 163, 19, 71,
- /* 950 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
- /* 960 */ 82, 83, 152, 85, 86, 87, 88, 89, 90, 91,
- /* 970 */ 92, 93, 94, 95, 152, 195, 252, 198, 240, 50,
- /* 980 */ 51, 189, 198, 19, 174, 175, 19, 51, 23, 100,
- /* 990 */ 101, 26, 28, 163, 163, 28, 174, 175, 163, 19,
- /* 1000 */ 152, 72, 73, 74, 75, 76, 77, 78, 79, 80,
- /* 1010 */ 81, 82, 83, 152, 85, 86, 87, 88, 89, 90,
- /* 1020 */ 91, 92, 93, 94, 95, 152, 240, 152, 198, 198,
- /* 1030 */ 50, 51, 33, 198, 123, 174, 175, 116, 7, 8,
- /* 1040 */ 101, 42, 121, 107, 152, 152, 23, 174, 175, 26,
- /* 1050 */ 152, 112, 183, 73, 74, 75, 76, 77, 78, 79,
- /* 1060 */ 80, 81, 82, 83, 152, 85, 86, 87, 88, 89,
- /* 1070 */ 90, 91, 92, 93, 94, 95, 19, 20, 23, 22,
- /* 1080 */ 23, 26, 152, 152, 27, 28, 174, 175, 23, 19,
- /* 1090 */ 20, 26, 22, 132, 133, 38, 152, 27, 28, 152,
- /* 1100 */ 23, 215, 152, 26, 174, 175, 152, 27, 38, 152,
- /* 1110 */ 23, 152, 27, 26, 57, 152, 23, 163, 152, 26,
- /* 1120 */ 152, 174, 175, 66, 174, 175, 69, 57, 174, 175,
- /* 1130 */ 27, 174, 175, 174, 175, 152, 66, 174, 175, 69,
- /* 1140 */ 174, 175, 174, 175, 152, 23, 89, 90, 26, 91,
- /* 1150 */ 152, 236, 198, 96, 97, 98, 132, 133, 101, 89,
- /* 1160 */ 90, 152, 23, 209, 210, 26, 96, 97, 98, 152,
- /* 1170 */ 212, 101, 174, 175, 116, 19, 20, 97, 22, 121,
- /* 1180 */ 152, 193, 97, 27, 28, 152, 152, 152, 152, 132,
- /* 1190 */ 133, 134, 135, 136, 38, 23, 152, 152, 26, 152,
- /* 1200 */ 97, 152, 132, 133, 134, 135, 136, 235, 152, 212,
- /* 1210 */ 199, 150, 212, 57, 212, 200, 203, 216, 241, 216,
- /* 1220 */ 241, 203, 182, 19, 20, 69, 22, 186, 178, 177,
- /* 1230 */ 216, 27, 28, 229, 202, 39, 177, 177, 200, 155,
- /* 1240 */ 245, 122, 38, 41, 22, 89, 90, 91, 159, 159,
- /* 1250 */ 242, 159, 96, 97, 98, 71, 130, 101, 242, 191,
- /* 1260 */ 223, 57, 18, 194, 159, 203, 194, 194, 194, 18,
- /* 1270 */ 158, 223, 191, 69, 203, 159, 158, 19, 20, 191,
- /* 1280 */ 22, 203, 137, 46, 238, 27, 28, 159, 132, 133,
- /* 1290 */ 134, 135, 136, 89, 90, 159, 38, 22, 158, 179,
- /* 1300 */ 96, 97, 98, 237, 158, 101, 159, 159, 158, 179,
- /* 1310 */ 107, 176, 48, 176, 176, 57, 184, 106, 176, 125,
- /* 1320 */ 179, 178, 218, 107, 217, 176, 218, 69, 184, 176,
- /* 1330 */ 176, 217, 159, 218, 217, 159, 132, 133, 134, 135,
- /* 1340 */ 136, 218, 217, 137, 179, 95, 179, 89, 90, 228,
- /* 1350 */ 129, 126, 128, 127, 96, 97, 98, 206, 231, 101,
- /* 1360 */ 5, 25, 231, 205, 207, 10, 11, 12, 13, 14,
- /* 1370 */ 204, 203, 17, 26, 162, 161, 13, 6, 180, 180,
- /* 1380 */ 153, 153, 151, 151, 151, 151, 167, 32, 4, 34,
- /* 1390 */ 132, 133, 134, 135, 136, 3, 22, 142, 43, 68,
- /* 1400 */ 15, 23, 16, 251, 23, 120, 251, 248, 131, 111,
- /* 1410 */ 123, 56, 16, 20, 125, 1, 123, 131, 63, 79,
- /* 1420 */ 79, 66, 67, 111, 36, 28, 122, 1, 5, 22,
- /* 1430 */ 107, 140, 26, 54, 54, 44, 61, 107, 20, 24,
- /* 1440 */ 19, 112, 105, 53, 22, 40, 22, 22, 53, 30,
- /* 1450 */ 23, 22, 22, 53, 23, 23, 23, 22, 116, 23,
- /* 1460 */ 22, 122, 23, 26, 23, 22, 11, 124, 28, 114,
- /* 1470 */ 36, 26, 26, 23, 23, 23, 122, 23, 36, 26,
- /* 1480 */ 36, 22, 24, 23, 22, 1, 23, 26, 22, 24,
- /* 1490 */ 23, 22, 122, 23, 23, 22, 141, 23, 122, 122,
- /* 1500 */ 15,
+ /* 0 */ 19, 143, 144, 145, 146, 24, 7, 8, 27, 78,
+ /* 10 */ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ /* 20 */ 89, 90, 91, 92, 93, 106, 107, 108, 27, 28,
+ /* 30 */ 15, 50, 51, 78, 79, 80, 81, 26, 83, 84,
+ /* 40 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 1,
+ /* 50 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 60 */ 79, 80, 81, 11, 83, 84, 85, 86, 87, 88,
+ /* 70 */ 89, 90, 91, 92, 93, 19, 95, 89, 90, 91,
+ /* 80 */ 92, 93, 26, 83, 84, 85, 86, 87, 88, 89,
+ /* 90 */ 90, 91, 92, 93, 27, 28, 95, 96, 97, 27,
+ /* 100 */ 28, 100, 101, 102, 22, 19, 50, 51, 27, 28,
+ /* 110 */ 58, 55, 111, 7, 8, 9, 87, 88, 89, 90,
+ /* 120 */ 91, 92, 93, 98, 99, 69, 70, 71, 72, 73,
+ /* 130 */ 74, 75, 76, 77, 78, 79, 80, 81, 127, 83,
+ /* 140 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 150 */ 19, 223, 224, 225, 87, 88, 162, 31, 106, 107,
+ /* 160 */ 108, 35, 95, 96, 33, 98, 23, 95, 96, 117,
+ /* 170 */ 119, 243, 105, 42, 107, 49, 95, 96, 151, 93,
+ /* 180 */ 26, 50, 51, 97, 98, 99, 100, 101, 102, 103,
+ /* 190 */ 196, 22, 23, 121, 167, 26, 110, 130, 131, 67,
+ /* 200 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 210 */ 79, 80, 81, 219, 83, 84, 85, 86, 87, 88,
+ /* 220 */ 89, 90, 91, 92, 93, 19, 12, 95, 234, 119,
+ /* 230 */ 24, 99, 113, 239, 115, 116, 151, 68, 23, 147,
+ /* 240 */ 148, 26, 215, 29, 151, 153, 119, 155, 163, 164,
+ /* 250 */ 23, 23, 167, 26, 162, 23, 50, 51, 26, 45,
+ /* 260 */ 167, 47, 130, 131, 132, 172, 173, 23, 22, 23,
+ /* 270 */ 26, 186, 26, 188, 120, 69, 70, 71, 72, 73,
+ /* 280 */ 74, 75, 76, 77, 78, 79, 80, 81, 196, 83,
+ /* 290 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 300 */ 19, 151, 151, 23, 119, 97, 26, 19, 100, 101,
+ /* 310 */ 102, 219, 151, 162, 68, 22, 28, 167, 167, 111,
+ /* 320 */ 27, 26, 172, 173, 231, 232, 175, 176, 167, 27,
+ /* 330 */ 28, 50, 51, 27, 28, 119, 175, 176, 246, 24,
+ /* 340 */ 22, 113, 27, 115, 116, 27, 28, 196, 22, 23,
+ /* 350 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 360 */ 79, 80, 81, 114, 83, 84, 85, 86, 87, 88,
+ /* 370 */ 89, 90, 91, 92, 93, 19, 21, 151, 217, 22,
+ /* 380 */ 151, 231, 232, 222, 27, 28, 23, 114, 95, 50,
+ /* 390 */ 51, 118, 151, 167, 68, 89, 167, 95, 96, 104,
+ /* 400 */ 23, 95, 96, 26, 175, 176, 50, 51, 167, 22,
+ /* 410 */ 95, 72, 73, 95, 96, 16, 175, 176, 137, 64,
+ /* 420 */ 23, 195, 27, 121, 23, 69, 70, 71, 72, 73,
+ /* 430 */ 74, 75, 76, 77, 78, 79, 80, 81, 99, 83,
+ /* 440 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 450 */ 19, 12, 95, 96, 23, 226, 101, 22, 217, 19,
+ /* 460 */ 61, 151, 63, 222, 151, 106, 107, 108, 29, 159,
+ /* 470 */ 244, 151, 99, 163, 164, 23, 113, 167, 115, 116,
+ /* 480 */ 167, 50, 51, 110, 45, 58, 47, 167, 175, 176,
+ /* 490 */ 95, 51, 168, 137, 139, 175, 176, 58, 11, 22,
+ /* 500 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 510 */ 79, 80, 81, 22, 83, 84, 85, 86, 87, 88,
+ /* 520 */ 89, 90, 91, 92, 93, 19, 23, 151, 117, 23,
+ /* 530 */ 217, 207, 19, 106, 107, 108, 216, 151, 139, 23,
+ /* 540 */ 129, 28, 26, 167, 117, 105, 23, 223, 224, 225,
+ /* 550 */ 110, 175, 176, 167, 77, 165, 50, 51, 168, 169,
+ /* 560 */ 170, 175, 176, 87, 88, 113, 26, 115, 116, 171,
+ /* 570 */ 172, 173, 0, 1, 2, 69, 70, 71, 72, 73,
+ /* 580 */ 74, 75, 76, 77, 78, 79, 80, 81, 162, 83,
+ /* 590 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 600 */ 19, 151, 98, 217, 23, 37, 130, 131, 23, 23,
+ /* 610 */ 26, 26, 96, 163, 164, 23, 113, 167, 115, 116,
+ /* 620 */ 52, 117, 196, 151, 171, 172, 173, 59, 130, 131,
+ /* 630 */ 232, 50, 51, 129, 208, 209, 16, 121, 156, 167,
+ /* 640 */ 22, 23, 162, 223, 224, 225, 51, 175, 176, 121,
+ /* 650 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 660 */ 79, 80, 81, 178, 83, 84, 85, 86, 87, 88,
+ /* 670 */ 89, 90, 91, 92, 93, 19, 196, 109, 151, 23,
+ /* 680 */ 96, 61, 151, 63, 23, 232, 68, 26, 151, 168,
+ /* 690 */ 169, 170, 23, 89, 167, 26, 208, 209, 167, 219,
+ /* 700 */ 105, 36, 175, 176, 167, 121, 50, 51, 223, 224,
+ /* 710 */ 225, 229, 175, 176, 178, 233, 247, 248, 114, 239,
+ /* 720 */ 189, 22, 118, 24, 162, 69, 70, 71, 72, 73,
+ /* 730 */ 74, 75, 76, 77, 78, 79, 80, 81, 151, 83,
+ /* 740 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 750 */ 19, 151, 26, 151, 167, 24, 145, 146, 196, 223,
+ /* 760 */ 224, 225, 175, 176, 151, 182, 183, 167, 151, 167,
+ /* 770 */ 151, 36, 199, 190, 151, 175, 176, 175, 176, 206,
+ /* 780 */ 167, 50, 51, 221, 167, 136, 167, 138, 175, 176,
+ /* 790 */ 167, 26, 175, 176, 175, 176, 168, 36, 175, 176,
+ /* 800 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 810 */ 79, 80, 81, 151, 83, 84, 85, 86, 87, 88,
+ /* 820 */ 89, 90, 91, 92, 93, 19, 151, 151, 151, 167,
+ /* 830 */ 151, 151, 151, 151, 162, 207, 23, 175, 176, 26,
+ /* 840 */ 249, 250, 167, 167, 167, 151, 167, 167, 167, 167,
+ /* 850 */ 175, 176, 175, 176, 175, 176, 50, 51, 151, 53,
+ /* 860 */ 22, 167, 192, 193, 33, 189, 192, 193, 196, 189,
+ /* 870 */ 189, 189, 162, 42, 167, 69, 70, 71, 72, 73,
+ /* 880 */ 74, 75, 76, 77, 78, 79, 80, 81, 151, 83,
+ /* 890 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 900 */ 19, 151, 195, 151, 167, 151, 196, 162, 151, 215,
+ /* 910 */ 92, 93, 175, 176, 28, 174, 119, 167, 151, 167,
+ /* 920 */ 151, 167, 151, 182, 167, 175, 176, 175, 176, 175,
+ /* 930 */ 176, 50, 51, 23, 167, 23, 167, 40, 167, 22,
+ /* 940 */ 167, 196, 53, 168, 175, 176, 175, 176, 175, 162,
+ /* 950 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 960 */ 79, 80, 81, 151, 83, 84, 85, 86, 87, 88,
+ /* 970 */ 89, 90, 91, 92, 93, 19, 151, 151, 151, 167,
+ /* 980 */ 151, 151, 207, 196, 30, 218, 22, 175, 176, 1,
+ /* 990 */ 2, 53, 167, 167, 167, 151, 167, 167, 151, 151,
+ /* 1000 */ 175, 176, 175, 176, 175, 176, 50, 51, 221, 23,
+ /* 1010 */ 53, 167, 22, 22, 167, 167, 103, 168, 22, 110,
+ /* 1020 */ 19, 105, 175, 176, 20, 69, 70, 71, 72, 73,
+ /* 1030 */ 74, 75, 76, 77, 78, 79, 80, 81, 151, 83,
+ /* 1040 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 1050 */ 19, 151, 151, 151, 167, 151, 207, 24, 44, 215,
+ /* 1060 */ 60, 26, 175, 176, 54, 54, 240, 167, 167, 167,
+ /* 1070 */ 240, 167, 151, 151, 151, 175, 176, 175, 176, 175,
+ /* 1080 */ 176, 50, 51, 139, 151, 22, 105, 119, 167, 167,
+ /* 1090 */ 167, 5, 1, 36, 28, 77, 175, 176, 175, 176,
+ /* 1100 */ 167, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ /* 1110 */ 79, 80, 81, 151, 83, 84, 85, 86, 87, 88,
+ /* 1120 */ 89, 90, 91, 92, 93, 19, 151, 151, 151, 167,
+ /* 1130 */ 151, 151, 109, 151, 128, 77, 22, 175, 176, 26,
+ /* 1140 */ 218, 240, 167, 167, 167, 151, 167, 167, 151, 167,
+ /* 1150 */ 175, 176, 175, 176, 175, 176, 50, 51, 151, 22,
+ /* 1160 */ 151, 167, 23, 120, 167, 1, 16, 122, 20, 120,
+ /* 1170 */ 109, 195, 175, 176, 167, 195, 167, 71, 72, 73,
+ /* 1180 */ 74, 75, 76, 77, 78, 79, 80, 81, 151, 83,
+ /* 1190 */ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 1200 */ 19, 20, 195, 22, 167, 151, 151, 128, 27, 28,
+ /* 1210 */ 129, 23, 175, 176, 23, 66, 141, 22, 151, 38,
+ /* 1220 */ 151, 167, 167, 19, 20, 151, 22, 151, 151, 175,
+ /* 1230 */ 176, 27, 28, 151, 167, 15, 167, 4, 57, 16,
+ /* 1240 */ 151, 167, 38, 167, 167, 3, 166, 248, 67, 167,
+ /* 1250 */ 195, 175, 176, 181, 181, 150, 167, 175, 176, 251,
+ /* 1260 */ 6, 57, 151, 151, 175, 176, 151, 151, 87, 88,
+ /* 1270 */ 89, 67, 151, 162, 151, 94, 95, 96, 167, 167,
+ /* 1280 */ 99, 251, 167, 167, 152, 151, 175, 176, 167, 151,
+ /* 1290 */ 167, 87, 88, 151, 150, 150, 175, 176, 94, 95,
+ /* 1300 */ 96, 167, 150, 99, 151, 167, 151, 196, 13, 167,
+ /* 1310 */ 195, 130, 131, 132, 133, 134, 135, 152, 195, 160,
+ /* 1320 */ 167, 117, 167, 19, 20, 151, 22, 151, 217, 151,
+ /* 1330 */ 161, 27, 28, 222, 130, 131, 132, 133, 134, 135,
+ /* 1340 */ 151, 167, 38, 167, 151, 167, 19, 20, 195, 22,
+ /* 1350 */ 151, 151, 151, 151, 27, 28, 167, 151, 151, 26,
+ /* 1360 */ 167, 57, 25, 196, 151, 38, 167, 167, 167, 167,
+ /* 1370 */ 151, 67, 151, 167, 167, 117, 201, 151, 125, 151,
+ /* 1380 */ 167, 151, 127, 124, 57, 151, 167, 202, 167, 203,
+ /* 1390 */ 151, 87, 88, 167, 67, 167, 151, 167, 94, 95,
+ /* 1400 */ 96, 167, 151, 99, 123, 126, 167, 204, 136, 227,
+ /* 1410 */ 205, 119, 167, 105, 87, 88, 122, 177, 167, 158,
+ /* 1420 */ 158, 94, 95, 96, 212, 105, 99, 213, 212, 177,
+ /* 1430 */ 213, 213, 185, 48, 130, 131, 132, 133, 134, 135,
+ /* 1440 */ 5, 212, 177, 179, 104, 10, 11, 12, 13, 14,
+ /* 1450 */ 177, 180, 17, 22, 230, 93, 180, 130, 131, 132,
+ /* 1460 */ 133, 134, 135, 185, 177, 230, 177, 32, 177, 34,
+ /* 1470 */ 157, 22, 18, 158, 157, 157, 27, 28, 43, 158,
+ /* 1480 */ 158, 158, 46, 237, 136, 157, 238, 158, 191, 201,
+ /* 1490 */ 69, 56, 191, 58, 220, 22, 157, 62, 18, 201,
+ /* 1500 */ 65, 158, 220, 194, 194, 194, 194, 201, 158, 242,
+ /* 1510 */ 191, 41, 242, 158, 158, 39, 67, 154, 232, 168,
+ /* 1520 */ 245, 228, 198, 178, 183, 200, 168, 211, 232, 168,
+ /* 1530 */ 178, 178, 201, 187, 198, 149, 87, 88, 179, 168,
+ /* 1540 */ 241, 106, 107, 108, 95, 211, 241, 112, 99, 211,
+ /* 1550 */ 201, 197, 117, 193, 210, 188, 184, 184, 184, 175,
+ /* 1560 */ 93, 235, 175, 252, 252, 252, 236, 252, 252, 252,
+ /* 1570 */ 252, 252, 252, 252, 252, 140, 252, 252, 252, 130,
+ /* 1580 */ 131, 132,
};
-#define YY_SHIFT_USE_DFLT (-72)
-#define YY_SHIFT_COUNT (435)
-#define YY_SHIFT_MIN (-71)
-#define YY_SHIFT_MAX (1485)
+#define YY_SHIFT_USE_DFLT (-82)
+#define YY_SHIFT_COUNT (419)
+#define YY_SHIFT_MIN (-81)
+#define YY_SHIFT_MAX (1480)
static const short yy_shift_ofst[] = {
- /* 0 */ 5, 1057, 1355, 1070, 1204, 1204, 1204, 90, 60, -19,
- /* 10 */ 58, 58, 186, 1204, 1204, 1204, 1204, 1204, 1204, 1204,
- /* 20 */ 67, 67, 182, 336, 65, 250, 135, 263, 340, 417,
- /* 30 */ 494, 571, 622, 699, 776, 827, 827, 827, 827, 827,
- /* 40 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 827,
- /* 50 */ 878, 827, 929, 980, 980, 1156, 1204, 1204, 1204, 1204,
- /* 60 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204,
- /* 70 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204,
- /* 80 */ 1204, 1204, 1204, 1204, 1258, 1204, 1204, 1204, 1204, 1204,
- /* 90 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, -71, -47,
- /* 100 */ -47, -47, -47, -47, -6, 88, -66, 65, 65, 451,
- /* 110 */ 502, 112, 112, 33, 127, 278, -30, -72, -72, -72,
- /* 120 */ 11, 412, 412, 268, 608, 610, 65, 65, 65, 65,
- /* 130 */ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- /* 140 */ 65, 65, 65, 65, 65, 559, 138, 278, 127, 24,
- /* 150 */ 24, 24, 24, 24, 24, -72, -72, -72, 228, 341,
- /* 160 */ 341, 207, 276, 300, 352, 354, 350, 65, 65, 65,
- /* 170 */ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- /* 180 */ 65, 65, 65, 65, 495, 495, 495, 65, 65, 499,
- /* 190 */ 65, 65, 65, 574, 65, 65, 517, 65, 65, 65,
- /* 200 */ 65, 65, 65, 65, 65, 65, 65, 566, 691, 288,
- /* 210 */ 288, 288, 701, 620, 1058, 675, 603, 964, 964, 967,
- /* 220 */ 603, 967, 722, 965, 936, 999, 964, 264, 999, 999,
- /* 230 */ 911, 921, 434, 1196, 1119, 1119, 1202, 1202, 1119, 1222,
- /* 240 */ 1184, 1126, 1244, 1244, 1244, 1244, 1119, 1251, 1126, 1222,
- /* 250 */ 1184, 1184, 1126, 1119, 1251, 1145, 1237, 1119, 1119, 1251,
- /* 260 */ 1275, 1119, 1251, 1119, 1251, 1275, 1203, 1203, 1203, 1264,
- /* 270 */ 1275, 1203, 1211, 1203, 1264, 1203, 1203, 1194, 1216, 1194,
- /* 280 */ 1216, 1194, 1216, 1194, 1216, 1119, 1119, 1206, 1275, 1250,
- /* 290 */ 1250, 1275, 1221, 1225, 1224, 1226, 1126, 1336, 1347, 1363,
- /* 300 */ 1363, 1371, 1371, 1371, 1371, -72, -72, -72, -72, -72,
- /* 310 */ -72, 477, 623, 742, 819, 624, 694, 74, 1023, 221,
- /* 320 */ 1055, 1065, 1077, 1087, 1080, 889, 1031, 939, 1093, 1122,
- /* 330 */ 1085, 1139, 961, 1024, 1172, 1103, 821, 1384, 1392, 1374,
- /* 340 */ 1255, 1385, 1331, 1386, 1378, 1381, 1285, 1277, 1298, 1287,
- /* 350 */ 1393, 1289, 1396, 1414, 1293, 1286, 1340, 1341, 1312, 1397,
- /* 360 */ 1388, 1304, 1426, 1423, 1407, 1323, 1291, 1379, 1406, 1380,
- /* 370 */ 1375, 1391, 1330, 1415, 1418, 1421, 1329, 1337, 1422, 1390,
- /* 380 */ 1424, 1425, 1427, 1429, 1395, 1419, 1430, 1400, 1405, 1431,
- /* 390 */ 1432, 1433, 1342, 1435, 1436, 1438, 1437, 1339, 1439, 1441,
- /* 400 */ 1440, 1434, 1443, 1343, 1445, 1442, 1446, 1444, 1445, 1450,
- /* 410 */ 1451, 1452, 1453, 1454, 1459, 1455, 1460, 1462, 1458, 1461,
- /* 420 */ 1463, 1466, 1465, 1461, 1467, 1469, 1470, 1471, 1473, 1354,
- /* 430 */ 1370, 1376, 1377, 1474, 1485, 1484,
+ /* 0 */ 988, 1204, 1435, 1204, 1304, 1304, 67, 67, 1, -19,
+ /* 10 */ 1304, 1304, 1304, 1304, 427, 81, 131, 131, 806, 1181,
+ /* 20 */ 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304,
+ /* 30 */ 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304,
+ /* 40 */ 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1327, 1304,
+ /* 50 */ 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304, 1304,
+ /* 60 */ 1304, 1304, 52, 81, 81, 476, 476, 395, 1258, 56,
+ /* 70 */ 731, 656, 581, 506, 431, 356, 281, 206, 881, 881,
+ /* 80 */ 881, 881, 881, 881, 881, 881, 881, 881, 881, 881,
+ /* 90 */ 881, 881, 881, 956, 881, 1031, 1106, 1106, -69, -45,
+ /* 100 */ -45, -45, -45, -45, 0, 29, -12, 81, 81, 81,
+ /* 110 */ 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
+ /* 120 */ 81, 81, 81, 355, 440, 81, 81, 81, 81, 81,
+ /* 130 */ 504, 411, 395, 818, 1467, -82, -82, -82, 1449, 86,
+ /* 140 */ 439, 439, 306, 357, 302, 72, 318, 246, 169, 81,
+ /* 150 */ 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
+ /* 160 */ 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
+ /* 170 */ 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
+ /* 180 */ 81, 81, 315, 315, 315, 572, 1258, 1258, 1258, -82,
+ /* 190 */ -82, -82, 132, 132, 208, 568, 568, 568, 516, 503,
+ /* 200 */ 214, 452, 363, 228, 119, 119, 119, 119, 359, 126,
+ /* 210 */ 119, 119, 584, 293, 604, 106, 11, 288, 288, 513,
+ /* 220 */ 11, 513, 295, 813, 395, 831, 395, 831, 595, 831,
+ /* 230 */ 288, 649, 498, 498, 395, 154, 273, 699, 1476, 1292,
+ /* 240 */ 1292, 1470, 1470, 1292, 1473, 1421, 1255, 1480, 1480, 1480,
+ /* 250 */ 1480, 1292, 1454, 1255, 1473, 1421, 1421, 1255, 1292, 1454,
+ /* 260 */ 1348, 1436, 1292, 1292, 1454, 1292, 1454, 1292, 1454, 1431,
+ /* 270 */ 1320, 1320, 1320, 1385, 1362, 1362, 1431, 1320, 1340, 1320,
+ /* 280 */ 1385, 1320, 1320, 1294, 1308, 1294, 1308, 1294, 1308, 1292,
+ /* 290 */ 1292, 1272, 1279, 1281, 1253, 1259, 1255, 1258, 1337, 1333,
+ /* 300 */ 1295, 1295, 1254, 1254, 1254, 1254, -82, -82, -82, -82,
+ /* 310 */ -82, -82, 339, 399, 618, 326, 620, -81, 669, 477,
+ /* 320 */ 661, 585, 377, 280, 244, 232, 25, -1, 373, 227,
+ /* 330 */ 215, 1233, 1242, 1195, 1075, 1220, 1149, 1223, 1191, 1188,
+ /* 340 */ 1081, 1113, 1079, 1061, 1049, 1148, 1045, 1150, 1164, 1043,
+ /* 350 */ 1139, 1137, 1113, 1114, 1006, 1058, 1018, 1023, 1066, 1057,
+ /* 360 */ 968, 1091, 1086, 1063, 981, 944, 1011, 1035, 1010, 1000,
+ /* 370 */ 1014, 916, 1033, 1004, 1001, 909, 913, 996, 957, 991,
+ /* 380 */ 990, 986, 964, 938, 954, 917, 889, 897, 912, 910,
+ /* 390 */ 797, 886, 761, 838, 528, 726, 735, 765, 665, 726,
+ /* 400 */ 592, 586, 540, 523, 491, 487, 435, 401, 397, 387,
+ /* 410 */ 249, 216, 185, 127, 110, 51, 82, 143, 15, 48,
};
-#define YY_REDUCE_USE_DFLT (-176)
-#define YY_REDUCE_COUNT (310)
-#define YY_REDUCE_MIN (-175)
-#define YY_REDUCE_MAX (1234)
+#define YY_REDUCE_USE_DFLT (-143)
+#define YY_REDUCE_COUNT (311)
+#define YY_REDUCE_MIN (-142)
+#define YY_REDUCE_MAX (1387)
static const short yy_reduce_ofst[] = {
- /* 0 */ -143, 954, 86, 21, -50, 23, 79, 134, 170, -175,
- /* 10 */ 229, 260, -121, 212, 219, 291, -54, 349, 362, 156,
- /* 20 */ 309, 311, 334, 85, 224, 394, 314, 314, 314, 314,
- /* 30 */ 314, 314, 314, 314, 314, 314, 314, 314, 314, 314,
- /* 40 */ 314, 314, 314, 314, 314, 314, 314, 314, 314, 314,
- /* 50 */ 314, 314, 314, 314, 314, 374, 441, 443, 450, 452,
- /* 60 */ 515, 554, 567, 569, 572, 578, 580, 582, 584, 587,
- /* 70 */ 593, 631, 644, 646, 649, 655, 657, 659, 661, 664,
- /* 80 */ 708, 720, 759, 771, 810, 822, 861, 873, 912, 930,
- /* 90 */ 947, 950, 957, 959, 963, 966, 968, 998, 314, 314,
- /* 100 */ 314, 314, 314, 314, 314, 314, 314, 447, -53, 166,
- /* 110 */ 438, 348, 363, 314, 473, 469, 314, 314, 314, 314,
- /* 120 */ -15, 59, 101, 688, 220, 220, 525, 256, 729, 735,
- /* 130 */ 736, 740, 741, 744, 645, 448, 738, 458, 786, 503,
- /* 140 */ 780, 656, 721, 724, 792, 545, 568, 706, 683, 681,
- /* 150 */ 779, 784, 830, 831, 835, 678, 601, -104, -2, 96,
- /* 160 */ 111, 218, 287, 308, 310, 312, 335, 411, 453, 461,
- /* 170 */ 573, 599, 617, 658, 665, 670, 732, 734, 775, 848,
- /* 180 */ 875, 892, 893, 898, 332, 420, 869, 931, 944, 886,
- /* 190 */ 983, 992, 1009, 958, 1017, 1028, 988, 1033, 1034, 1035,
- /* 200 */ 287, 1036, 1044, 1045, 1047, 1049, 1056, 915, 972, 997,
- /* 210 */ 1000, 1002, 886, 1011, 1015, 1061, 1013, 1001, 1003, 977,
- /* 220 */ 1018, 979, 1050, 1041, 1040, 1052, 1014, 1004, 1059, 1060,
- /* 230 */ 1032, 1038, 1084, 995, 1089, 1090, 1008, 1016, 1092, 1037,
- /* 240 */ 1068, 1062, 1069, 1072, 1073, 1074, 1105, 1112, 1071, 1048,
- /* 250 */ 1081, 1088, 1078, 1116, 1118, 1046, 1066, 1128, 1136, 1140,
- /* 260 */ 1120, 1147, 1146, 1148, 1150, 1130, 1135, 1137, 1138, 1132,
- /* 270 */ 1141, 1142, 1143, 1149, 1144, 1153, 1154, 1104, 1107, 1108,
- /* 280 */ 1114, 1115, 1117, 1123, 1125, 1173, 1176, 1121, 1165, 1127,
- /* 290 */ 1131, 1167, 1157, 1151, 1158, 1166, 1168, 1212, 1214, 1227,
- /* 300 */ 1228, 1231, 1232, 1233, 1234, 1152, 1155, 1159, 1198, 1199,
- /* 310 */ 1219,
+ /* 0 */ -142, 1111, 92, 151, 241, 161, 150, 93, 85, 324,
+ /* 10 */ 386, 313, 320, 229, -6, 310, 536, 485, -72, 1121,
+ /* 20 */ 1089, 1082, 1076, 1054, 1037, 997, 979, 977, 975, 962,
+ /* 30 */ 923, 921, 904, 902, 900, 887, 847, 829, 827, 825,
+ /* 40 */ 812, 771, 769, 754, 752, 750, 737, 679, 677, 675,
+ /* 50 */ 662, 623, 619, 617, 613, 602, 600, 587, 537, 527,
+ /* 60 */ 472, 376, 480, 450, 226, 453, 398, 390, 426, 420,
+ /* 70 */ 420, 420, 420, 420, 420, 420, 420, 420, 420, 420,
+ /* 80 */ 420, 420, 420, 420, 420, 420, 420, 420, 420, 420,
+ /* 90 */ 420, 420, 420, 420, 420, 420, 420, 420, 420, 420,
+ /* 100 */ 420, 420, 420, 420, 420, 420, 420, 1153, 922, 1123,
+ /* 110 */ 1115, 1055, 1007, 980, 976, 901, 844, 830, 767, 826,
+ /* 120 */ 682, 694, 707, 482, 583, 681, 680, 676, 531, 27,
+ /* 130 */ 787, 562, 521, 420, 420, 420, 420, 420, 773, 741,
+ /* 140 */ 674, 670, 1067, 1251, 1245, 1239, 1234, 591, 591, 1230,
+ /* 150 */ 1228, 1226, 1221, 1219, 1213, 1207, 1206, 1202, 1201, 1200,
+ /* 160 */ 1199, 1193, 1189, 1178, 1176, 1174, 1155, 1142, 1138, 1134,
+ /* 170 */ 1116, 1112, 1077, 1074, 1069, 1067, 1009, 994, 982, 933,
+ /* 180 */ 848, 757, 849, 775, 628, 611, 745, 710, 672, 469,
+ /* 190 */ 488, 573, 1387, 1384, 1367, 1374, 1373, 1372, 1344, 1354,
+ /* 200 */ 1360, 1354, 1354, 1354, 1354, 1354, 1354, 1354, 1330, 1326,
+ /* 210 */ 1354, 1354, 1344, 1371, 1336, 1386, 1349, 1338, 1334, 1305,
+ /* 220 */ 1331, 1299, 1359, 1346, 1361, 1353, 1358, 1352, 1341, 1345,
+ /* 230 */ 1316, 1293, 1296, 1286, 1351, 1325, 1324, 1363, 1275, 1356,
+ /* 240 */ 1355, 1270, 1267, 1350, 1282, 1319, 1306, 1312, 1311, 1310,
+ /* 250 */ 1309, 1343, 1339, 1298, 1274, 1301, 1297, 1288, 1329, 1328,
+ /* 260 */ 1248, 1246, 1323, 1322, 1318, 1321, 1317, 1315, 1313, 1276,
+ /* 270 */ 1291, 1289, 1287, 1278, 1235, 1224, 1271, 1273, 1264, 1265,
+ /* 280 */ 1247, 1252, 1240, 1218, 1229, 1217, 1216, 1214, 1212, 1262,
+ /* 290 */ 1261, 1182, 1205, 1203, 1186, 1185, 1175, 1167, 1169, 1159,
+ /* 300 */ 1165, 1132, 1152, 1145, 1144, 1105, 1030, 1008, 999, 1073,
+ /* 310 */ 1072, 1080,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 982, 1300, 1300, 1300, 1214, 1214, 1214, 1305, 1300, 1109,
- /* 10 */ 1138, 1138, 1274, 1305, 1305, 1305, 1305, 1305, 1305, 1212,
- /* 20 */ 1305, 1305, 1305, 1300, 1305, 1113, 1144, 1305, 1305, 1305,
- /* 30 */ 1305, 1305, 1305, 1305, 1305, 1273, 1275, 1152, 1151, 1254,
- /* 40 */ 1125, 1149, 1142, 1146, 1215, 1208, 1209, 1207, 1211, 1216,
- /* 50 */ 1305, 1145, 1177, 1192, 1176, 1305, 1305, 1305, 1305, 1305,
- /* 60 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 70 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 80 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 90 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1186, 1191,
- /* 100 */ 1198, 1190, 1187, 1179, 1178, 1180, 1181, 1305, 1305, 1008,
- /* 110 */ 1074, 1305, 1305, 1182, 1305, 1020, 1183, 1195, 1194, 1193,
- /* 120 */ 1015, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 130 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 140 */ 1305, 1305, 1305, 1305, 1305, 982, 1300, 1305, 1305, 1300,
- /* 150 */ 1300, 1300, 1300, 1300, 1300, 1292, 1113, 1103, 1305, 1305,
- /* 160 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1280, 1278,
- /* 170 */ 1305, 1227, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 180 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 190 */ 1305, 1305, 1305, 1109, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 200 */ 1305, 1305, 1305, 1305, 1305, 1305, 988, 1305, 1247, 1109,
- /* 210 */ 1109, 1109, 1111, 1089, 1101, 990, 1148, 1127, 1127, 1259,
- /* 220 */ 1148, 1259, 1045, 1068, 1042, 1138, 1127, 1210, 1138, 1138,
- /* 230 */ 1110, 1101, 1305, 1285, 1118, 1118, 1277, 1277, 1118, 1157,
- /* 240 */ 1078, 1148, 1085, 1085, 1085, 1085, 1118, 1005, 1148, 1157,
- /* 250 */ 1078, 1078, 1148, 1118, 1005, 1253, 1251, 1118, 1118, 1005,
- /* 260 */ 1220, 1118, 1005, 1118, 1005, 1220, 1076, 1076, 1076, 1060,
- /* 270 */ 1220, 1076, 1045, 1076, 1060, 1076, 1076, 1131, 1126, 1131,
- /* 280 */ 1126, 1131, 1126, 1131, 1126, 1118, 1118, 1305, 1220, 1224,
- /* 290 */ 1224, 1220, 1143, 1132, 1141, 1139, 1148, 1011, 1063, 998,
- /* 300 */ 998, 987, 987, 987, 987, 1297, 1297, 1292, 1047, 1047,
- /* 310 */ 1030, 1305, 1305, 1305, 1305, 1305, 1305, 1022, 1305, 1229,
- /* 320 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 330 */ 1305, 1305, 1305, 1305, 1305, 1305, 1164, 1305, 983, 1287,
- /* 340 */ 1305, 1305, 1284, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 350 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 360 */ 1305, 1257, 1305, 1305, 1305, 1305, 1305, 1305, 1250, 1249,
- /* 370 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 380 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305,
- /* 390 */ 1305, 1305, 1092, 1305, 1305, 1305, 1096, 1305, 1305, 1305,
- /* 400 */ 1305, 1305, 1305, 1305, 1140, 1305, 1133, 1305, 1213, 1305,
- /* 410 */ 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1305, 1302,
- /* 420 */ 1305, 1305, 1305, 1301, 1305, 1305, 1305, 1305, 1305, 1166,
- /* 430 */ 1305, 1165, 1169, 1305, 996, 1305,
+ /* 0 */ 636, 872, 960, 960, 872, 872, 960, 960, 960, 762,
+ /* 10 */ 960, 960, 960, 870, 960, 960, 790, 790, 934, 960,
+ /* 20 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 30 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 40 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 50 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 60 */ 960, 960, 960, 960, 960, 960, 960, 677, 766, 796,
+ /* 70 */ 960, 960, 960, 960, 960, 960, 960, 960, 933, 935,
+ /* 80 */ 804, 803, 913, 777, 801, 794, 798, 873, 866, 867,
+ /* 90 */ 865, 869, 874, 960, 797, 833, 850, 832, 844, 849,
+ /* 100 */ 856, 848, 845, 835, 834, 836, 837, 960, 960, 960,
+ /* 110 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 120 */ 960, 960, 960, 662, 731, 960, 960, 960, 960, 960,
+ /* 130 */ 960, 960, 960, 838, 839, 853, 852, 851, 960, 669,
+ /* 140 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 150 */ 940, 938, 960, 885, 960, 960, 960, 960, 960, 960,
+ /* 160 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 170 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 180 */ 960, 642, 762, 762, 762, 636, 960, 960, 960, 952,
+ /* 190 */ 766, 756, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 200 */ 960, 960, 960, 960, 806, 745, 923, 925, 960, 906,
+ /* 210 */ 743, 664, 764, 679, 754, 644, 800, 779, 779, 918,
+ /* 220 */ 800, 918, 702, 725, 960, 790, 960, 790, 699, 790,
+ /* 230 */ 779, 868, 960, 960, 960, 763, 754, 960, 945, 770,
+ /* 240 */ 770, 937, 937, 770, 812, 735, 800, 742, 742, 742,
+ /* 250 */ 742, 770, 659, 800, 812, 735, 735, 800, 770, 659,
+ /* 260 */ 912, 910, 770, 770, 659, 770, 659, 770, 659, 878,
+ /* 270 */ 733, 733, 733, 717, 882, 882, 878, 733, 702, 733,
+ /* 280 */ 717, 733, 733, 783, 778, 783, 778, 783, 778, 770,
+ /* 290 */ 770, 960, 795, 784, 793, 791, 800, 960, 665, 720,
+ /* 300 */ 652, 652, 641, 641, 641, 641, 957, 957, 952, 704,
+ /* 310 */ 704, 687, 960, 960, 960, 960, 960, 960, 960, 887,
+ /* 320 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 330 */ 960, 960, 637, 947, 960, 960, 944, 960, 960, 960,
+ /* 340 */ 960, 805, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 350 */ 960, 960, 922, 960, 960, 960, 960, 960, 960, 960,
+ /* 360 */ 916, 960, 960, 960, 960, 960, 960, 909, 908, 960,
+ /* 370 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 380 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 390 */ 960, 960, 960, 960, 960, 792, 960, 785, 960, 871,
+ /* 400 */ 960, 960, 960, 960, 960, 960, 960, 960, 960, 960,
+ /* 410 */ 748, 821, 960, 820, 824, 819, 671, 960, 650, 960,
+ /* 420 */ 633, 638, 956, 959, 958, 955, 954, 953, 948, 946,
+ /* 430 */ 943, 942, 941, 939, 936, 932, 891, 889, 896, 895,
+ /* 440 */ 894, 893, 892, 890, 888, 886, 807, 802, 799, 931,
+ /* 450 */ 884, 744, 741, 740, 658, 949, 915, 924, 811, 810,
+ /* 460 */ 813, 921, 920, 919, 917, 914, 901, 809, 808, 736,
+ /* 470 */ 876, 875, 661, 905, 904, 903, 907, 911, 902, 772,
+ /* 480 */ 660, 657, 668, 723, 724, 732, 730, 729, 728, 727,
+ /* 490 */ 726, 722, 670, 678, 716, 701, 700, 881, 883, 880,
+ /* 500 */ 879, 709, 708, 714, 713, 712, 711, 710, 707, 706,
+ /* 510 */ 705, 698, 697, 703, 696, 719, 718, 715, 695, 739,
+ /* 520 */ 738, 737, 734, 694, 693, 692, 824, 691, 690, 830,
+ /* 530 */ 829, 817, 860, 759, 758, 757, 769, 768, 781, 780,
+ /* 540 */ 815, 814, 782, 767, 761, 760, 776, 775, 774, 773,
+ /* 550 */ 765, 755, 787, 789, 788, 786, 862, 771, 859, 930,
+ /* 560 */ 929, 928, 927, 926, 864, 863, 831, 828, 682, 683,
+ /* 570 */ 899, 898, 900, 897, 685, 684, 681, 680, 861, 750,
+ /* 580 */ 749, 857, 854, 846, 842, 858, 855, 847, 843, 841,
+ /* 590 */ 840, 826, 825, 823, 822, 818, 827, 673, 751, 747,
+ /* 600 */ 746, 816, 753, 752, 689, 688, 686, 667, 666, 663,
+ /* 610 */ 656, 654, 653, 655, 651, 649, 648, 647, 646, 645,
+ /* 620 */ 676, 675, 674, 672, 671, 643, 640, 639, 635, 634,
+ /* 630 */ 632,
};
/* The next table maps tokens into fallback tokens. If a construct
@@ -126840,7 +115155,6 @@ static const YYCODETYPE yyFallback[] = {
27, /* OFFSET => ID */
27, /* PRAGMA => ID */
27, /* RAISE => ID */
- 27, /* RECURSIVE => ID */
27, /* REPLACE => ID */
27, /* RESTRICT => ID */
27, /* ROW => ID */
@@ -126848,7 +115162,6 @@ static const YYCODETYPE yyFallback[] = {
27, /* VACUUM => ID */
27, /* VIEW => ID */
27, /* VIRTUAL => ID */
- 27, /* WITH => ID */
27, /* REINDEX => ID */
27, /* RENAME => ID */
27, /* CTIME_KW => ID */
@@ -126866,13 +115179,9 @@ static const YYCODETYPE yyFallback[] = {
** + The semantic value stored at this level of the stack. This is
** the information used by the action routines in the grammar.
** It is sometimes called the "minor" token.
-**
-** After the "shift" half of a SHIFTREDUCE action, the stateno field
-** actually contains the reduce action for the second half of the
-** SHIFTREDUCE.
*/
struct yyStackEntry {
- YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
+ YYACTIONTYPE stateno; /* The state-number */
YYCODETYPE major; /* The major token value. This is the code
** number for the token at this stack level */
YYMINORTYPE minor; /* The user-supplied minor token value. This
@@ -126948,56 +115257,55 @@ static const char *const yyTokenName[] = {
"EACH", "FAIL", "FOR", "IGNORE",
"INITIALLY", "INSTEAD", "LIKE_KW", "MATCH",
"NO", "KEY", "OF", "OFFSET",
- "PRAGMA", "RAISE", "RECURSIVE", "REPLACE",
- "RESTRICT", "ROW", "TRIGGER", "VACUUM",
- "VIEW", "VIRTUAL", "WITH", "REINDEX",
- "RENAME", "CTIME_KW", "ANY", "OR",
- "AND", "IS", "BETWEEN", "IN",
- "ISNULL", "NOTNULL", "NE", "EQ",
- "GT", "LE", "LT", "GE",
- "ESCAPE", "BITAND", "BITOR", "LSHIFT",
- "RSHIFT", "PLUS", "MINUS", "STAR",
- "SLASH", "REM", "CONCAT", "COLLATE",
- "BITNOT", "STRING", "JOIN_KW", "CONSTRAINT",
- "DEFAULT", "NULL", "PRIMARY", "UNIQUE",
- "CHECK", "REFERENCES", "AUTOINCR", "ON",
- "INSERT", "DELETE", "UPDATE", "SET",
- "DEFERRABLE", "FOREIGN", "DROP", "UNION",
- "ALL", "EXCEPT", "INTERSECT", "SELECT",
- "VALUES", "DISTINCT", "DOT", "FROM",
- "JOIN", "USING", "ORDER", "GROUP",
- "HAVING", "LIMIT", "WHERE", "INTO",
- "INTEGER", "FLOAT", "BLOB", "VARIABLE",
- "CASE", "WHEN", "THEN", "ELSE",
- "INDEX", "ALTER", "ADD", "error",
- "input", "cmdlist", "ecmd", "explain",
- "cmdx", "cmd", "transtype", "trans_opt",
- "nm", "savepoint_opt", "create_table", "create_table_args",
- "createkw", "temp", "ifnotexists", "dbnm",
- "columnlist", "conslist_opt", "table_options", "select",
- "column", "columnid", "type", "carglist",
- "typetoken", "typename", "signed", "plus_num",
- "minus_num", "ccons", "term", "expr",
- "onconf", "sortorder", "autoinc", "eidlist_opt",
- "refargs", "defer_subclause", "refarg", "refact",
- "init_deferred_pred_opt", "conslist", "tconscomma", "tcons",
- "sortlist", "eidlist", "defer_subclause_opt", "orconf",
+ "PRAGMA", "RAISE", "REPLACE", "RESTRICT",
+ "ROW", "TRIGGER", "VACUUM", "VIEW",
+ "VIRTUAL", "REINDEX", "RENAME", "CTIME_KW",
+ "ANY", "OR", "AND", "IS",
+ "BETWEEN", "IN", "ISNULL", "NOTNULL",
+ "NE", "EQ", "GT", "LE",
+ "LT", "GE", "ESCAPE", "BITAND",
+ "BITOR", "LSHIFT", "RSHIFT", "PLUS",
+ "MINUS", "STAR", "SLASH", "REM",
+ "CONCAT", "COLLATE", "BITNOT", "STRING",
+ "JOIN_KW", "CONSTRAINT", "DEFAULT", "NULL",
+ "PRIMARY", "UNIQUE", "CHECK", "REFERENCES",
+ "AUTOINCR", "ON", "INSERT", "DELETE",
+ "UPDATE", "SET", "DEFERRABLE", "FOREIGN",
+ "DROP", "UNION", "ALL", "EXCEPT",
+ "INTERSECT", "SELECT", "DISTINCT", "DOT",
+ "FROM", "JOIN", "USING", "ORDER",
+ "GROUP", "HAVING", "LIMIT", "WHERE",
+ "INTO", "VALUES", "INTEGER", "FLOAT",
+ "BLOB", "REGISTER", "VARIABLE", "CASE",
+ "WHEN", "THEN", "ELSE", "INDEX",
+ "ALTER", "ADD", "error", "input",
+ "cmdlist", "ecmd", "explain", "cmdx",
+ "cmd", "transtype", "trans_opt", "nm",
+ "savepoint_opt", "create_table", "create_table_args", "createkw",
+ "temp", "ifnotexists", "dbnm", "columnlist",
+ "conslist_opt", "table_options", "select", "column",
+ "columnid", "type", "carglist", "id",
+ "ids", "typetoken", "typename", "signed",
+ "plus_num", "minus_num", "ccons", "term",
+ "expr", "onconf", "sortorder", "autoinc",
+ "idxlist_opt", "refargs", "defer_subclause", "refarg",
+ "refact", "init_deferred_pred_opt", "conslist", "tconscomma",
+ "tcons", "idxlist", "defer_subclause_opt", "orconf",
"resolvetype", "raisetype", "ifexists", "fullname",
- "selectnowith", "oneselect", "with", "multiselect_op",
- "distinct", "selcollist", "from", "where_opt",
- "groupby_opt", "having_opt", "orderby_opt", "limit_opt",
- "values", "nexprlist", "exprlist", "sclp",
- "as", "seltablist", "stl_prefix", "joinop",
- "indexed_opt", "on_opt", "using_opt", "joinop2",
- "idlist", "setlist", "insert_cmd", "idlist_opt",
- "likeop", "between_op", "in_op", "case_operand",
- "case_exprlist", "case_else", "uniqueflag", "collate",
- "nmnum", "trigger_decl", "trigger_cmd_list", "trigger_time",
+ "oneselect", "multiselect_op", "distinct", "selcollist",
+ "from", "where_opt", "groupby_opt", "having_opt",
+ "orderby_opt", "limit_opt", "sclp", "as",
+ "seltablist", "stl_prefix", "joinop", "indexed_opt",
+ "on_opt", "using_opt", "joinop2", "idlist",
+ "sortlist", "nexprlist", "setlist", "insert_cmd",
+ "inscollist_opt", "valuelist", "exprlist", "likeop",
+ "between_op", "in_op", "case_operand", "case_exprlist",
+ "case_else", "uniqueflag", "collate", "nmnum",
+ "number", "trigger_decl", "trigger_cmd_list", "trigger_time",
"trigger_event", "foreach_clause", "when_clause", "trigger_cmd",
"trnm", "tridxby", "database_kw_opt", "key_opt",
"add_column_fullname", "kwcolumn_opt", "create_vtab", "vtabarglist",
"vtabarg", "vtabargtoken", "lp", "anylist",
- "wqlist",
};
#endif /* NDEBUG */
@@ -127045,294 +115353,295 @@ static const char *const yyRuleName[] = {
/* 37 */ "columnlist ::= column",
/* 38 */ "column ::= columnid type carglist",
/* 39 */ "columnid ::= nm",
- /* 40 */ "nm ::= ID|INDEXED",
- /* 41 */ "nm ::= STRING",
- /* 42 */ "nm ::= JOIN_KW",
- /* 43 */ "type ::=",
- /* 44 */ "type ::= typetoken",
- /* 45 */ "typetoken ::= typename",
- /* 46 */ "typetoken ::= typename LP signed RP",
- /* 47 */ "typetoken ::= typename LP signed COMMA signed RP",
- /* 48 */ "typename ::= ID|STRING",
- /* 49 */ "typename ::= typename ID|STRING",
- /* 50 */ "signed ::= plus_num",
- /* 51 */ "signed ::= minus_num",
- /* 52 */ "carglist ::= carglist ccons",
- /* 53 */ "carglist ::=",
- /* 54 */ "ccons ::= CONSTRAINT nm",
- /* 55 */ "ccons ::= DEFAULT term",
- /* 56 */ "ccons ::= DEFAULT LP expr RP",
- /* 57 */ "ccons ::= DEFAULT PLUS term",
- /* 58 */ "ccons ::= DEFAULT MINUS term",
- /* 59 */ "ccons ::= DEFAULT ID|INDEXED",
- /* 60 */ "ccons ::= NULL onconf",
- /* 61 */ "ccons ::= NOT NULL onconf",
- /* 62 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc",
- /* 63 */ "ccons ::= UNIQUE onconf",
- /* 64 */ "ccons ::= CHECK LP expr RP",
- /* 65 */ "ccons ::= REFERENCES nm eidlist_opt refargs",
- /* 66 */ "ccons ::= defer_subclause",
- /* 67 */ "ccons ::= COLLATE ID|STRING",
- /* 68 */ "autoinc ::=",
- /* 69 */ "autoinc ::= AUTOINCR",
- /* 70 */ "refargs ::=",
- /* 71 */ "refargs ::= refargs refarg",
- /* 72 */ "refarg ::= MATCH nm",
- /* 73 */ "refarg ::= ON INSERT refact",
- /* 74 */ "refarg ::= ON DELETE refact",
- /* 75 */ "refarg ::= ON UPDATE refact",
- /* 76 */ "refact ::= SET NULL",
- /* 77 */ "refact ::= SET DEFAULT",
- /* 78 */ "refact ::= CASCADE",
- /* 79 */ "refact ::= RESTRICT",
- /* 80 */ "refact ::= NO ACTION",
- /* 81 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt",
- /* 82 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt",
- /* 83 */ "init_deferred_pred_opt ::=",
- /* 84 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED",
- /* 85 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE",
- /* 86 */ "conslist_opt ::=",
- /* 87 */ "conslist_opt ::= COMMA conslist",
- /* 88 */ "conslist ::= conslist tconscomma tcons",
- /* 89 */ "conslist ::= tcons",
- /* 90 */ "tconscomma ::= COMMA",
- /* 91 */ "tconscomma ::=",
- /* 92 */ "tcons ::= CONSTRAINT nm",
- /* 93 */ "tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf",
- /* 94 */ "tcons ::= UNIQUE LP sortlist RP onconf",
- /* 95 */ "tcons ::= CHECK LP expr RP onconf",
- /* 96 */ "tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt",
- /* 97 */ "defer_subclause_opt ::=",
- /* 98 */ "defer_subclause_opt ::= defer_subclause",
- /* 99 */ "onconf ::=",
- /* 100 */ "onconf ::= ON CONFLICT resolvetype",
- /* 101 */ "orconf ::=",
- /* 102 */ "orconf ::= OR resolvetype",
- /* 103 */ "resolvetype ::= raisetype",
- /* 104 */ "resolvetype ::= IGNORE",
- /* 105 */ "resolvetype ::= REPLACE",
- /* 106 */ "cmd ::= DROP TABLE ifexists fullname",
- /* 107 */ "ifexists ::= IF EXISTS",
- /* 108 */ "ifexists ::=",
- /* 109 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select",
- /* 110 */ "cmd ::= DROP VIEW ifexists fullname",
- /* 111 */ "cmd ::= select",
- /* 112 */ "select ::= with selectnowith",
- /* 113 */ "selectnowith ::= oneselect",
- /* 114 */ "selectnowith ::= selectnowith multiselect_op oneselect",
- /* 115 */ "multiselect_op ::= UNION",
- /* 116 */ "multiselect_op ::= UNION ALL",
- /* 117 */ "multiselect_op ::= EXCEPT|INTERSECT",
- /* 118 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt",
- /* 119 */ "oneselect ::= values",
- /* 120 */ "values ::= VALUES LP nexprlist RP",
- /* 121 */ "values ::= values COMMA LP exprlist RP",
- /* 122 */ "distinct ::= DISTINCT",
- /* 123 */ "distinct ::= ALL",
- /* 124 */ "distinct ::=",
- /* 125 */ "sclp ::= selcollist COMMA",
- /* 126 */ "sclp ::=",
- /* 127 */ "selcollist ::= sclp expr as",
- /* 128 */ "selcollist ::= sclp STAR",
- /* 129 */ "selcollist ::= sclp nm DOT STAR",
- /* 130 */ "as ::= AS nm",
- /* 131 */ "as ::= ID|STRING",
- /* 132 */ "as ::=",
- /* 133 */ "from ::=",
- /* 134 */ "from ::= FROM seltablist",
- /* 135 */ "stl_prefix ::= seltablist joinop",
- /* 136 */ "stl_prefix ::=",
- /* 137 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt",
- /* 138 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt",
- /* 139 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt",
- /* 140 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt",
- /* 141 */ "dbnm ::=",
- /* 142 */ "dbnm ::= DOT nm",
- /* 143 */ "fullname ::= nm dbnm",
- /* 144 */ "joinop ::= COMMA|JOIN",
- /* 145 */ "joinop ::= JOIN_KW JOIN",
- /* 146 */ "joinop ::= JOIN_KW nm JOIN",
- /* 147 */ "joinop ::= JOIN_KW nm nm JOIN",
- /* 148 */ "on_opt ::= ON expr",
- /* 149 */ "on_opt ::=",
- /* 150 */ "indexed_opt ::=",
- /* 151 */ "indexed_opt ::= INDEXED BY nm",
- /* 152 */ "indexed_opt ::= NOT INDEXED",
- /* 153 */ "using_opt ::= USING LP idlist RP",
- /* 154 */ "using_opt ::=",
- /* 155 */ "orderby_opt ::=",
- /* 156 */ "orderby_opt ::= ORDER BY sortlist",
- /* 157 */ "sortlist ::= sortlist COMMA expr sortorder",
- /* 158 */ "sortlist ::= expr sortorder",
- /* 159 */ "sortorder ::= ASC",
- /* 160 */ "sortorder ::= DESC",
- /* 161 */ "sortorder ::=",
- /* 162 */ "groupby_opt ::=",
- /* 163 */ "groupby_opt ::= GROUP BY nexprlist",
- /* 164 */ "having_opt ::=",
- /* 165 */ "having_opt ::= HAVING expr",
- /* 166 */ "limit_opt ::=",
- /* 167 */ "limit_opt ::= LIMIT expr",
- /* 168 */ "limit_opt ::= LIMIT expr OFFSET expr",
- /* 169 */ "limit_opt ::= LIMIT expr COMMA expr",
- /* 170 */ "cmd ::= with DELETE FROM fullname indexed_opt where_opt",
- /* 171 */ "where_opt ::=",
- /* 172 */ "where_opt ::= WHERE expr",
- /* 173 */ "cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt",
- /* 174 */ "setlist ::= setlist COMMA nm EQ expr",
- /* 175 */ "setlist ::= nm EQ expr",
- /* 176 */ "cmd ::= with insert_cmd INTO fullname idlist_opt select",
- /* 177 */ "cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES",
- /* 178 */ "insert_cmd ::= INSERT orconf",
- /* 179 */ "insert_cmd ::= REPLACE",
- /* 180 */ "idlist_opt ::=",
- /* 181 */ "idlist_opt ::= LP idlist RP",
- /* 182 */ "idlist ::= idlist COMMA nm",
- /* 183 */ "idlist ::= nm",
- /* 184 */ "expr ::= term",
- /* 185 */ "expr ::= LP expr RP",
- /* 186 */ "term ::= NULL",
- /* 187 */ "expr ::= ID|INDEXED",
- /* 188 */ "expr ::= JOIN_KW",
- /* 189 */ "expr ::= nm DOT nm",
- /* 190 */ "expr ::= nm DOT nm DOT nm",
- /* 191 */ "term ::= INTEGER|FLOAT|BLOB",
- /* 192 */ "term ::= STRING",
- /* 193 */ "expr ::= VARIABLE",
- /* 194 */ "expr ::= expr COLLATE ID|STRING",
- /* 195 */ "expr ::= CAST LP expr AS typetoken RP",
- /* 196 */ "expr ::= ID|INDEXED LP distinct exprlist RP",
- /* 197 */ "expr ::= ID|INDEXED LP STAR RP",
- /* 198 */ "term ::= CTIME_KW",
- /* 199 */ "expr ::= expr AND expr",
- /* 200 */ "expr ::= expr OR expr",
- /* 201 */ "expr ::= expr LT|GT|GE|LE expr",
- /* 202 */ "expr ::= expr EQ|NE expr",
- /* 203 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
- /* 204 */ "expr ::= expr PLUS|MINUS expr",
- /* 205 */ "expr ::= expr STAR|SLASH|REM expr",
- /* 206 */ "expr ::= expr CONCAT expr",
- /* 207 */ "likeop ::= LIKE_KW|MATCH",
- /* 208 */ "likeop ::= NOT LIKE_KW|MATCH",
- /* 209 */ "expr ::= expr likeop expr",
- /* 210 */ "expr ::= expr likeop expr ESCAPE expr",
- /* 211 */ "expr ::= expr ISNULL|NOTNULL",
- /* 212 */ "expr ::= expr NOT NULL",
- /* 213 */ "expr ::= expr IS expr",
- /* 214 */ "expr ::= expr IS NOT expr",
- /* 215 */ "expr ::= NOT expr",
- /* 216 */ "expr ::= BITNOT expr",
- /* 217 */ "expr ::= MINUS expr",
- /* 218 */ "expr ::= PLUS expr",
- /* 219 */ "between_op ::= BETWEEN",
- /* 220 */ "between_op ::= NOT BETWEEN",
- /* 221 */ "expr ::= expr between_op expr AND expr",
- /* 222 */ "in_op ::= IN",
- /* 223 */ "in_op ::= NOT IN",
- /* 224 */ "expr ::= expr in_op LP exprlist RP",
- /* 225 */ "expr ::= LP select RP",
- /* 226 */ "expr ::= expr in_op LP select RP",
- /* 227 */ "expr ::= expr in_op nm dbnm",
- /* 228 */ "expr ::= EXISTS LP select RP",
- /* 229 */ "expr ::= CASE case_operand case_exprlist case_else END",
- /* 230 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
- /* 231 */ "case_exprlist ::= WHEN expr THEN expr",
- /* 232 */ "case_else ::= ELSE expr",
- /* 233 */ "case_else ::=",
- /* 234 */ "case_operand ::= expr",
- /* 235 */ "case_operand ::=",
- /* 236 */ "exprlist ::= nexprlist",
- /* 237 */ "exprlist ::=",
- /* 238 */ "nexprlist ::= nexprlist COMMA expr",
- /* 239 */ "nexprlist ::= expr",
- /* 240 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
- /* 241 */ "uniqueflag ::= UNIQUE",
- /* 242 */ "uniqueflag ::=",
- /* 243 */ "eidlist_opt ::=",
- /* 244 */ "eidlist_opt ::= LP eidlist RP",
- /* 245 */ "eidlist ::= eidlist COMMA nm collate sortorder",
- /* 246 */ "eidlist ::= nm collate sortorder",
- /* 247 */ "collate ::=",
- /* 248 */ "collate ::= COLLATE ID|STRING",
- /* 249 */ "cmd ::= DROP INDEX ifexists fullname",
- /* 250 */ "cmd ::= VACUUM",
- /* 251 */ "cmd ::= VACUUM nm",
- /* 252 */ "cmd ::= PRAGMA nm dbnm",
- /* 253 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
- /* 254 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
- /* 255 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
- /* 256 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
- /* 257 */ "nmnum ::= plus_num",
- /* 258 */ "nmnum ::= nm",
- /* 259 */ "nmnum ::= ON",
- /* 260 */ "nmnum ::= DELETE",
- /* 261 */ "nmnum ::= DEFAULT",
- /* 262 */ "plus_num ::= PLUS INTEGER|FLOAT",
- /* 263 */ "plus_num ::= INTEGER|FLOAT",
- /* 264 */ "minus_num ::= MINUS INTEGER|FLOAT",
- /* 265 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
- /* 266 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
- /* 267 */ "trigger_time ::= BEFORE",
- /* 268 */ "trigger_time ::= AFTER",
- /* 269 */ "trigger_time ::= INSTEAD OF",
- /* 270 */ "trigger_time ::=",
- /* 271 */ "trigger_event ::= DELETE|INSERT",
- /* 272 */ "trigger_event ::= UPDATE",
- /* 273 */ "trigger_event ::= UPDATE OF idlist",
- /* 274 */ "foreach_clause ::=",
- /* 275 */ "foreach_clause ::= FOR EACH ROW",
- /* 276 */ "when_clause ::=",
- /* 277 */ "when_clause ::= WHEN expr",
- /* 278 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
- /* 279 */ "trigger_cmd_list ::= trigger_cmd SEMI",
- /* 280 */ "trnm ::= nm",
- /* 281 */ "trnm ::= nm DOT nm",
- /* 282 */ "tridxby ::=",
- /* 283 */ "tridxby ::= INDEXED BY nm",
- /* 284 */ "tridxby ::= NOT INDEXED",
- /* 285 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt",
- /* 286 */ "trigger_cmd ::= insert_cmd INTO trnm idlist_opt select",
- /* 287 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt",
- /* 288 */ "trigger_cmd ::= select",
- /* 289 */ "expr ::= RAISE LP IGNORE RP",
- /* 290 */ "expr ::= RAISE LP raisetype COMMA nm RP",
- /* 291 */ "raisetype ::= ROLLBACK",
- /* 292 */ "raisetype ::= ABORT",
- /* 293 */ "raisetype ::= FAIL",
- /* 294 */ "cmd ::= DROP TRIGGER ifexists fullname",
- /* 295 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
- /* 296 */ "cmd ::= DETACH database_kw_opt expr",
- /* 297 */ "key_opt ::=",
- /* 298 */ "key_opt ::= KEY expr",
- /* 299 */ "database_kw_opt ::= DATABASE",
- /* 300 */ "database_kw_opt ::=",
- /* 301 */ "cmd ::= REINDEX",
- /* 302 */ "cmd ::= REINDEX nm dbnm",
- /* 303 */ "cmd ::= ANALYZE",
- /* 304 */ "cmd ::= ANALYZE nm dbnm",
- /* 305 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
- /* 306 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column",
- /* 307 */ "add_column_fullname ::= fullname",
- /* 308 */ "kwcolumn_opt ::=",
- /* 309 */ "kwcolumn_opt ::= COLUMNKW",
- /* 310 */ "cmd ::= create_vtab",
- /* 311 */ "cmd ::= create_vtab LP vtabarglist RP",
- /* 312 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
- /* 313 */ "vtabarglist ::= vtabarg",
- /* 314 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
- /* 315 */ "vtabarg ::=",
- /* 316 */ "vtabarg ::= vtabarg vtabargtoken",
- /* 317 */ "vtabargtoken ::= ANY",
- /* 318 */ "vtabargtoken ::= lp anylist RP",
- /* 319 */ "lp ::= LP",
- /* 320 */ "anylist ::=",
- /* 321 */ "anylist ::= anylist LP anylist RP",
- /* 322 */ "anylist ::= anylist ANY",
- /* 323 */ "with ::=",
- /* 324 */ "with ::= WITH wqlist",
- /* 325 */ "with ::= WITH RECURSIVE wqlist",
- /* 326 */ "wqlist ::= nm eidlist_opt AS LP select RP",
- /* 327 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP",
+ /* 40 */ "id ::= ID",
+ /* 41 */ "id ::= INDEXED",
+ /* 42 */ "ids ::= ID|STRING",
+ /* 43 */ "nm ::= id",
+ /* 44 */ "nm ::= STRING",
+ /* 45 */ "nm ::= JOIN_KW",
+ /* 46 */ "type ::=",
+ /* 47 */ "type ::= typetoken",
+ /* 48 */ "typetoken ::= typename",
+ /* 49 */ "typetoken ::= typename LP signed RP",
+ /* 50 */ "typetoken ::= typename LP signed COMMA signed RP",
+ /* 51 */ "typename ::= ids",
+ /* 52 */ "typename ::= typename ids",
+ /* 53 */ "signed ::= plus_num",
+ /* 54 */ "signed ::= minus_num",
+ /* 55 */ "carglist ::= carglist ccons",
+ /* 56 */ "carglist ::=",
+ /* 57 */ "ccons ::= CONSTRAINT nm",
+ /* 58 */ "ccons ::= DEFAULT term",
+ /* 59 */ "ccons ::= DEFAULT LP expr RP",
+ /* 60 */ "ccons ::= DEFAULT PLUS term",
+ /* 61 */ "ccons ::= DEFAULT MINUS term",
+ /* 62 */ "ccons ::= DEFAULT id",
+ /* 63 */ "ccons ::= NULL onconf",
+ /* 64 */ "ccons ::= NOT NULL onconf",
+ /* 65 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc",
+ /* 66 */ "ccons ::= UNIQUE onconf",
+ /* 67 */ "ccons ::= CHECK LP expr RP",
+ /* 68 */ "ccons ::= REFERENCES nm idxlist_opt refargs",
+ /* 69 */ "ccons ::= defer_subclause",
+ /* 70 */ "ccons ::= COLLATE ids",
+ /* 71 */ "autoinc ::=",
+ /* 72 */ "autoinc ::= AUTOINCR",
+ /* 73 */ "refargs ::=",
+ /* 74 */ "refargs ::= refargs refarg",
+ /* 75 */ "refarg ::= MATCH nm",
+ /* 76 */ "refarg ::= ON INSERT refact",
+ /* 77 */ "refarg ::= ON DELETE refact",
+ /* 78 */ "refarg ::= ON UPDATE refact",
+ /* 79 */ "refact ::= SET NULL",
+ /* 80 */ "refact ::= SET DEFAULT",
+ /* 81 */ "refact ::= CASCADE",
+ /* 82 */ "refact ::= RESTRICT",
+ /* 83 */ "refact ::= NO ACTION",
+ /* 84 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt",
+ /* 85 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt",
+ /* 86 */ "init_deferred_pred_opt ::=",
+ /* 87 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED",
+ /* 88 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE",
+ /* 89 */ "conslist_opt ::=",
+ /* 90 */ "conslist_opt ::= COMMA conslist",
+ /* 91 */ "conslist ::= conslist tconscomma tcons",
+ /* 92 */ "conslist ::= tcons",
+ /* 93 */ "tconscomma ::= COMMA",
+ /* 94 */ "tconscomma ::=",
+ /* 95 */ "tcons ::= CONSTRAINT nm",
+ /* 96 */ "tcons ::= PRIMARY KEY LP idxlist autoinc RP onconf",
+ /* 97 */ "tcons ::= UNIQUE LP idxlist RP onconf",
+ /* 98 */ "tcons ::= CHECK LP expr RP onconf",
+ /* 99 */ "tcons ::= FOREIGN KEY LP idxlist RP REFERENCES nm idxlist_opt refargs defer_subclause_opt",
+ /* 100 */ "defer_subclause_opt ::=",
+ /* 101 */ "defer_subclause_opt ::= defer_subclause",
+ /* 102 */ "onconf ::=",
+ /* 103 */ "onconf ::= ON CONFLICT resolvetype",
+ /* 104 */ "orconf ::=",
+ /* 105 */ "orconf ::= OR resolvetype",
+ /* 106 */ "resolvetype ::= raisetype",
+ /* 107 */ "resolvetype ::= IGNORE",
+ /* 108 */ "resolvetype ::= REPLACE",
+ /* 109 */ "cmd ::= DROP TABLE ifexists fullname",
+ /* 110 */ "ifexists ::= IF EXISTS",
+ /* 111 */ "ifexists ::=",
+ /* 112 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm AS select",
+ /* 113 */ "cmd ::= DROP VIEW ifexists fullname",
+ /* 114 */ "cmd ::= select",
+ /* 115 */ "select ::= oneselect",
+ /* 116 */ "select ::= select multiselect_op oneselect",
+ /* 117 */ "multiselect_op ::= UNION",
+ /* 118 */ "multiselect_op ::= UNION ALL",
+ /* 119 */ "multiselect_op ::= EXCEPT|INTERSECT",
+ /* 120 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt",
+ /* 121 */ "distinct ::= DISTINCT",
+ /* 122 */ "distinct ::= ALL",
+ /* 123 */ "distinct ::=",
+ /* 124 */ "sclp ::= selcollist COMMA",
+ /* 125 */ "sclp ::=",
+ /* 126 */ "selcollist ::= sclp expr as",
+ /* 127 */ "selcollist ::= sclp STAR",
+ /* 128 */ "selcollist ::= sclp nm DOT STAR",
+ /* 129 */ "as ::= AS nm",
+ /* 130 */ "as ::= ids",
+ /* 131 */ "as ::=",
+ /* 132 */ "from ::=",
+ /* 133 */ "from ::= FROM seltablist",
+ /* 134 */ "stl_prefix ::= seltablist joinop",
+ /* 135 */ "stl_prefix ::=",
+ /* 136 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt",
+ /* 137 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt",
+ /* 138 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt",
+ /* 139 */ "dbnm ::=",
+ /* 140 */ "dbnm ::= DOT nm",
+ /* 141 */ "fullname ::= nm dbnm",
+ /* 142 */ "joinop ::= COMMA|JOIN",
+ /* 143 */ "joinop ::= JOIN_KW JOIN",
+ /* 144 */ "joinop ::= JOIN_KW nm JOIN",
+ /* 145 */ "joinop ::= JOIN_KW nm nm JOIN",
+ /* 146 */ "on_opt ::= ON expr",
+ /* 147 */ "on_opt ::=",
+ /* 148 */ "indexed_opt ::=",
+ /* 149 */ "indexed_opt ::= INDEXED BY nm",
+ /* 150 */ "indexed_opt ::= NOT INDEXED",
+ /* 151 */ "using_opt ::= USING LP idlist RP",
+ /* 152 */ "using_opt ::=",
+ /* 153 */ "orderby_opt ::=",
+ /* 154 */ "orderby_opt ::= ORDER BY sortlist",
+ /* 155 */ "sortlist ::= sortlist COMMA expr sortorder",
+ /* 156 */ "sortlist ::= expr sortorder",
+ /* 157 */ "sortorder ::= ASC",
+ /* 158 */ "sortorder ::= DESC",
+ /* 159 */ "sortorder ::=",
+ /* 160 */ "groupby_opt ::=",
+ /* 161 */ "groupby_opt ::= GROUP BY nexprlist",
+ /* 162 */ "having_opt ::=",
+ /* 163 */ "having_opt ::= HAVING expr",
+ /* 164 */ "limit_opt ::=",
+ /* 165 */ "limit_opt ::= LIMIT expr",
+ /* 166 */ "limit_opt ::= LIMIT expr OFFSET expr",
+ /* 167 */ "limit_opt ::= LIMIT expr COMMA expr",
+ /* 168 */ "cmd ::= DELETE FROM fullname indexed_opt where_opt",
+ /* 169 */ "where_opt ::=",
+ /* 170 */ "where_opt ::= WHERE expr",
+ /* 171 */ "cmd ::= UPDATE orconf fullname indexed_opt SET setlist where_opt",
+ /* 172 */ "setlist ::= setlist COMMA nm EQ expr",
+ /* 173 */ "setlist ::= nm EQ expr",
+ /* 174 */ "cmd ::= insert_cmd INTO fullname inscollist_opt valuelist",
+ /* 175 */ "cmd ::= insert_cmd INTO fullname inscollist_opt select",
+ /* 176 */ "cmd ::= insert_cmd INTO fullname inscollist_opt DEFAULT VALUES",
+ /* 177 */ "insert_cmd ::= INSERT orconf",
+ /* 178 */ "insert_cmd ::= REPLACE",
+ /* 179 */ "valuelist ::= VALUES LP nexprlist RP",
+ /* 180 */ "valuelist ::= valuelist COMMA LP exprlist RP",
+ /* 181 */ "inscollist_opt ::=",
+ /* 182 */ "inscollist_opt ::= LP idlist RP",
+ /* 183 */ "idlist ::= idlist COMMA nm",
+ /* 184 */ "idlist ::= nm",
+ /* 185 */ "expr ::= term",
+ /* 186 */ "expr ::= LP expr RP",
+ /* 187 */ "term ::= NULL",
+ /* 188 */ "expr ::= id",
+ /* 189 */ "expr ::= JOIN_KW",
+ /* 190 */ "expr ::= nm DOT nm",
+ /* 191 */ "expr ::= nm DOT nm DOT nm",
+ /* 192 */ "term ::= INTEGER|FLOAT|BLOB",
+ /* 193 */ "term ::= STRING",
+ /* 194 */ "expr ::= REGISTER",
+ /* 195 */ "expr ::= VARIABLE",
+ /* 196 */ "expr ::= expr COLLATE ids",
+ /* 197 */ "expr ::= CAST LP expr AS typetoken RP",
+ /* 198 */ "expr ::= ID LP distinct exprlist RP",
+ /* 199 */ "expr ::= ID LP STAR RP",
+ /* 200 */ "term ::= CTIME_KW",
+ /* 201 */ "expr ::= expr AND expr",
+ /* 202 */ "expr ::= expr OR expr",
+ /* 203 */ "expr ::= expr LT|GT|GE|LE expr",
+ /* 204 */ "expr ::= expr EQ|NE expr",
+ /* 205 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
+ /* 206 */ "expr ::= expr PLUS|MINUS expr",
+ /* 207 */ "expr ::= expr STAR|SLASH|REM expr",
+ /* 208 */ "expr ::= expr CONCAT expr",
+ /* 209 */ "likeop ::= LIKE_KW",
+ /* 210 */ "likeop ::= NOT LIKE_KW",
+ /* 211 */ "likeop ::= MATCH",
+ /* 212 */ "likeop ::= NOT MATCH",
+ /* 213 */ "expr ::= expr likeop expr",
+ /* 214 */ "expr ::= expr likeop expr ESCAPE expr",
+ /* 215 */ "expr ::= expr ISNULL|NOTNULL",
+ /* 216 */ "expr ::= expr NOT NULL",
+ /* 217 */ "expr ::= expr IS expr",
+ /* 218 */ "expr ::= expr IS NOT expr",
+ /* 219 */ "expr ::= NOT expr",
+ /* 220 */ "expr ::= BITNOT expr",
+ /* 221 */ "expr ::= MINUS expr",
+ /* 222 */ "expr ::= PLUS expr",
+ /* 223 */ "between_op ::= BETWEEN",
+ /* 224 */ "between_op ::= NOT BETWEEN",
+ /* 225 */ "expr ::= expr between_op expr AND expr",
+ /* 226 */ "in_op ::= IN",
+ /* 227 */ "in_op ::= NOT IN",
+ /* 228 */ "expr ::= expr in_op LP exprlist RP",
+ /* 229 */ "expr ::= LP select RP",
+ /* 230 */ "expr ::= expr in_op LP select RP",
+ /* 231 */ "expr ::= expr in_op nm dbnm",
+ /* 232 */ "expr ::= EXISTS LP select RP",
+ /* 233 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 234 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 235 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 236 */ "case_else ::= ELSE expr",
+ /* 237 */ "case_else ::=",
+ /* 238 */ "case_operand ::= expr",
+ /* 239 */ "case_operand ::=",
+ /* 240 */ "exprlist ::= nexprlist",
+ /* 241 */ "exprlist ::=",
+ /* 242 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 243 */ "nexprlist ::= expr",
+ /* 244 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP idxlist RP where_opt",
+ /* 245 */ "uniqueflag ::= UNIQUE",
+ /* 246 */ "uniqueflag ::=",
+ /* 247 */ "idxlist_opt ::=",
+ /* 248 */ "idxlist_opt ::= LP idxlist RP",
+ /* 249 */ "idxlist ::= idxlist COMMA nm collate sortorder",
+ /* 250 */ "idxlist ::= nm collate sortorder",
+ /* 251 */ "collate ::=",
+ /* 252 */ "collate ::= COLLATE ids",
+ /* 253 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 254 */ "cmd ::= VACUUM",
+ /* 255 */ "cmd ::= VACUUM nm",
+ /* 256 */ "cmd ::= PRAGMA nm dbnm",
+ /* 257 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 258 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 259 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 260 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 261 */ "nmnum ::= plus_num",
+ /* 262 */ "nmnum ::= nm",
+ /* 263 */ "nmnum ::= ON",
+ /* 264 */ "nmnum ::= DELETE",
+ /* 265 */ "nmnum ::= DEFAULT",
+ /* 266 */ "plus_num ::= PLUS number",
+ /* 267 */ "plus_num ::= number",
+ /* 268 */ "minus_num ::= MINUS number",
+ /* 269 */ "number ::= INTEGER|FLOAT",
+ /* 270 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 271 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 272 */ "trigger_time ::= BEFORE",
+ /* 273 */ "trigger_time ::= AFTER",
+ /* 274 */ "trigger_time ::= INSTEAD OF",
+ /* 275 */ "trigger_time ::=",
+ /* 276 */ "trigger_event ::= DELETE|INSERT",
+ /* 277 */ "trigger_event ::= UPDATE",
+ /* 278 */ "trigger_event ::= UPDATE OF idlist",
+ /* 279 */ "foreach_clause ::=",
+ /* 280 */ "foreach_clause ::= FOR EACH ROW",
+ /* 281 */ "when_clause ::=",
+ /* 282 */ "when_clause ::= WHEN expr",
+ /* 283 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 284 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 285 */ "trnm ::= nm",
+ /* 286 */ "trnm ::= nm DOT nm",
+ /* 287 */ "tridxby ::=",
+ /* 288 */ "tridxby ::= INDEXED BY nm",
+ /* 289 */ "tridxby ::= NOT INDEXED",
+ /* 290 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt",
+ /* 291 */ "trigger_cmd ::= insert_cmd INTO trnm inscollist_opt valuelist",
+ /* 292 */ "trigger_cmd ::= insert_cmd INTO trnm inscollist_opt select",
+ /* 293 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt",
+ /* 294 */ "trigger_cmd ::= select",
+ /* 295 */ "expr ::= RAISE LP IGNORE RP",
+ /* 296 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 297 */ "raisetype ::= ROLLBACK",
+ /* 298 */ "raisetype ::= ABORT",
+ /* 299 */ "raisetype ::= FAIL",
+ /* 300 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 301 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 302 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 303 */ "key_opt ::=",
+ /* 304 */ "key_opt ::= KEY expr",
+ /* 305 */ "database_kw_opt ::= DATABASE",
+ /* 306 */ "database_kw_opt ::=",
+ /* 307 */ "cmd ::= REINDEX",
+ /* 308 */ "cmd ::= REINDEX nm dbnm",
+ /* 309 */ "cmd ::= ANALYZE",
+ /* 310 */ "cmd ::= ANALYZE nm dbnm",
+ /* 311 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 312 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column",
+ /* 313 */ "add_column_fullname ::= fullname",
+ /* 314 */ "kwcolumn_opt ::=",
+ /* 315 */ "kwcolumn_opt ::= COLUMNKW",
+ /* 316 */ "cmd ::= create_vtab",
+ /* 317 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 318 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 319 */ "vtabarglist ::= vtabarg",
+ /* 320 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
+ /* 321 */ "vtabarg ::=",
+ /* 322 */ "vtabarg ::= vtabarg vtabargtoken",
+ /* 323 */ "vtabargtoken ::= ANY",
+ /* 324 */ "vtabargtoken ::= lp anylist RP",
+ /* 325 */ "lp ::= LP",
+ /* 326 */ "anylist ::=",
+ /* 327 */ "anylist ::= anylist LP anylist RP",
+ /* 328 */ "anylist ::= anylist ANY",
};
#endif /* NDEBUG */
@@ -127372,9 +115681,9 @@ static void yyGrowStack(yyParser *p){
** A pointer to a parser. This pointer is used in subsequent calls
** to sqlite3Parser and sqlite3ParserFree.
*/
-SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(u64)){
+SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(size_t)){
yyParser *pParser;
- pParser = (yyParser*)(*mallocProc)( (u64)sizeof(yyParser) );
+ pParser = (yyParser*)(*mallocProc)( (size_t)sizeof(yyParser) );
if( pParser ){
pParser->yyidx = -1;
#ifdef YYTRACKMAXSTACKDEPTH
@@ -127411,76 +115720,76 @@ static void yy_destructor(
** which appear on the RHS of the rule, but which are not used
** inside the C code.
*/
- case 163: /* select */
- case 196: /* selectnowith */
- case 197: /* oneselect */
- case 208: /* values */
+ case 162: /* select */
+ case 196: /* oneselect */
{
-sqlite3SelectDelete(pParse->db, (yypminor->yy3));
+sqlite3SelectDelete(pParse->db, (yypminor->yy387));
}
break;
- case 174: /* term */
- case 175: /* expr */
+ case 175: /* term */
+ case 176: /* expr */
{
-sqlite3ExprDelete(pParse->db, (yypminor->yy346).pExpr);
+sqlite3ExprDelete(pParse->db, (yypminor->yy118).pExpr);
}
break;
- case 179: /* eidlist_opt */
- case 188: /* sortlist */
- case 189: /* eidlist */
- case 201: /* selcollist */
- case 204: /* groupby_opt */
- case 206: /* orderby_opt */
- case 209: /* nexprlist */
- case 210: /* exprlist */
- case 211: /* sclp */
- case 221: /* setlist */
- case 228: /* case_exprlist */
+ case 180: /* idxlist_opt */
+ case 189: /* idxlist */
+ case 199: /* selcollist */
+ case 202: /* groupby_opt */
+ case 204: /* orderby_opt */
+ case 206: /* sclp */
+ case 216: /* sortlist */
+ case 217: /* nexprlist */
+ case 218: /* setlist */
+ case 222: /* exprlist */
+ case 227: /* case_exprlist */
{
-sqlite3ExprListDelete(pParse->db, (yypminor->yy14));
+sqlite3ExprListDelete(pParse->db, (yypminor->yy322));
}
break;
case 195: /* fullname */
- case 202: /* from */
- case 213: /* seltablist */
- case 214: /* stl_prefix */
+ case 200: /* from */
+ case 208: /* seltablist */
+ case 209: /* stl_prefix */
{
-sqlite3SrcListDelete(pParse->db, (yypminor->yy65));
+sqlite3SrcListDelete(pParse->db, (yypminor->yy259));
}
break;
- case 198: /* with */
- case 252: /* wqlist */
+ case 201: /* where_opt */
+ case 203: /* having_opt */
+ case 212: /* on_opt */
+ case 226: /* case_operand */
+ case 228: /* case_else */
+ case 238: /* when_clause */
+ case 243: /* key_opt */
{
-sqlite3WithDelete(pParse->db, (yypminor->yy59));
+sqlite3ExprDelete(pParse->db, (yypminor->yy314));
}
break;
- case 203: /* where_opt */
- case 205: /* having_opt */
- case 217: /* on_opt */
- case 227: /* case_operand */
- case 229: /* case_else */
- case 238: /* when_clause */
- case 243: /* key_opt */
+ case 213: /* using_opt */
+ case 215: /* idlist */
+ case 220: /* inscollist_opt */
{
-sqlite3ExprDelete(pParse->db, (yypminor->yy132));
+sqlite3IdListDelete(pParse->db, (yypminor->yy384));
}
break;
- case 218: /* using_opt */
- case 220: /* idlist */
- case 223: /* idlist_opt */
+ case 221: /* valuelist */
{
-sqlite3IdListDelete(pParse->db, (yypminor->yy408));
+
+ sqlite3ExprListDelete(pParse->db, (yypminor->yy260).pList);
+ sqlite3SelectDelete(pParse->db, (yypminor->yy260).pSelect);
+
}
break;
case 234: /* trigger_cmd_list */
case 239: /* trigger_cmd */
{
-sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy473));
+sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy203));
}
break;
case 236: /* trigger_event */
{
-sqlite3IdListDelete(pParse->db, (yypminor->yy378).b);
+sqlite3IdListDelete(pParse->db, (yypminor->yy90).b);
}
break;
default: break; /* If no destructor action specified: do nothing */
@@ -127501,7 +115810,7 @@ static int yy_pop_parser_stack(yyParser *pParser){
/* There is no mechanism by which the parser stack can be popped below
** empty in SQLite. */
- assert( pParser->yyidx>=0 );
+ if( NEVER(pParser->yyidx<0) ) return 0;
#ifndef NDEBUG
if( yyTraceFILE && pParser->yyidx>=0 ){
fprintf(yyTraceFILE,"%sPopping %s\n",
@@ -127567,10 +115876,10 @@ static int yy_find_shift_action(
int i;
int stateno = pParser->yystack[pParser->yyidx].stateno;
- if( stateno>=YY_MIN_REDUCE ) return stateno;
- assert( stateno <= YY_SHIFT_COUNT );
- i = yy_shift_ofst[stateno];
- if( i==YY_SHIFT_USE_DFLT ) return yy_default[stateno];
+ if( stateno>YY_SHIFT_COUNT
+ || (i = yy_shift_ofst[stateno])==YY_SHIFT_USE_DFLT ){
+ return yy_default[stateno];
+ }
assert( iLookAhead!=YYNOCODE );
i += iLookAhead;
if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
@@ -127673,29 +115982,7 @@ static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){
}
/*
-** Print tracing information for a SHIFT action
-*/
-#ifndef NDEBUG
-static void yyTraceShift(yyParser *yypParser, int yyNewState){
- if( yyTraceFILE ){
- int i;
- if( yyNewState<YYNSTATE ){
- fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState);
- fprintf(yyTraceFILE,"%sStack:",yyTracePrompt);
- for(i=1; i<=yypParser->yyidx; i++)
- fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]);
- fprintf(yyTraceFILE,"\n");
- }else{
- fprintf(yyTraceFILE,"%sShift *\n",yyTracePrompt);
- }
- }
-}
-#else
-# define yyTraceShift(X,Y)
-#endif
-
-/*
-** Perform a shift action. Return the number of errors.
+** Perform a shift action.
*/
static void yy_shift(
yyParser *yypParser, /* The parser to be shifted */
@@ -127728,7 +116015,16 @@ static void yy_shift(
yytos->stateno = (YYACTIONTYPE)yyNewState;
yytos->major = (YYCODETYPE)yyMajor;
yytos->minor = *yypMinor;
- yyTraceShift(yypParser, yyNewState);
+#ifndef NDEBUG
+ if( yyTraceFILE && yypParser->yyidx>0 ){
+ int i;
+ fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState);
+ fprintf(yyTraceFILE,"%sStack:",yyTracePrompt);
+ for(i=1; i<=yypParser->yyidx; i++)
+ fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]);
+ fprintf(yyTraceFILE,"\n");
+ }
+#endif
}
/* The following table contains information about every rule that
@@ -127738,272 +116034,277 @@ static const struct {
YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
unsigned char nrhs; /* Number of right-hand side symbols in the rule */
} yyRuleInfo[] = {
+ { 143, 1 },
+ { 144, 2 },
{ 144, 1 },
- { 145, 2 },
{ 145, 1 },
+ { 145, 3 },
+ { 146, 0 },
{ 146, 1 },
{ 146, 3 },
- { 147, 0 },
{ 147, 1 },
- { 147, 3 },
- { 148, 1 },
- { 149, 3 },
- { 151, 0 },
- { 151, 1 },
- { 151, 2 },
+ { 148, 3 },
{ 150, 0 },
{ 150, 1 },
- { 150, 1 },
- { 150, 1 },
- { 149, 2 },
- { 149, 2 },
- { 149, 2 },
- { 153, 1 },
- { 153, 0 },
- { 149, 2 },
- { 149, 3 },
- { 149, 5 },
- { 149, 2 },
- { 154, 6 },
- { 156, 1 },
- { 158, 0 },
- { 158, 3 },
- { 157, 1 },
- { 157, 0 },
- { 155, 5 },
- { 155, 2 },
- { 162, 0 },
- { 162, 2 },
- { 160, 3 },
- { 160, 1 },
- { 164, 3 },
- { 165, 1 },
- { 152, 1 },
- { 152, 1 },
+ { 150, 2 },
+ { 149, 0 },
+ { 149, 1 },
+ { 149, 1 },
+ { 149, 1 },
+ { 148, 2 },
+ { 148, 2 },
+ { 148, 2 },
{ 152, 1 },
- { 166, 0 },
- { 166, 1 },
+ { 152, 0 },
+ { 148, 2 },
+ { 148, 3 },
+ { 148, 5 },
+ { 148, 2 },
+ { 153, 6 },
+ { 155, 1 },
+ { 157, 0 },
+ { 157, 3 },
+ { 156, 1 },
+ { 156, 0 },
+ { 154, 5 },
+ { 154, 2 },
+ { 161, 0 },
+ { 161, 2 },
+ { 159, 3 },
+ { 159, 1 },
+ { 163, 3 },
+ { 164, 1 },
+ { 167, 1 },
+ { 167, 1 },
{ 168, 1 },
- { 168, 4 },
- { 168, 6 },
+ { 151, 1 },
+ { 151, 1 },
+ { 151, 1 },
+ { 165, 0 },
+ { 165, 1 },
{ 169, 1 },
- { 169, 2 },
- { 170, 1 },
+ { 169, 4 },
+ { 169, 6 },
{ 170, 1 },
- { 167, 2 },
- { 167, 0 },
- { 173, 2 },
- { 173, 2 },
- { 173, 4 },
- { 173, 3 },
- { 173, 3 },
- { 173, 2 },
- { 173, 2 },
- { 173, 3 },
- { 173, 5 },
- { 173, 2 },
- { 173, 4 },
- { 173, 4 },
- { 173, 1 },
- { 173, 2 },
- { 178, 0 },
- { 178, 1 },
- { 180, 0 },
- { 180, 2 },
- { 182, 2 },
- { 182, 3 },
- { 182, 3 },
- { 182, 3 },
- { 183, 2 },
- { 183, 2 },
- { 183, 1 },
- { 183, 1 },
- { 183, 2 },
- { 181, 3 },
+ { 170, 2 },
+ { 171, 1 },
+ { 171, 1 },
+ { 166, 2 },
+ { 166, 0 },
+ { 174, 2 },
+ { 174, 2 },
+ { 174, 4 },
+ { 174, 3 },
+ { 174, 3 },
+ { 174, 2 },
+ { 174, 2 },
+ { 174, 3 },
+ { 174, 5 },
+ { 174, 2 },
+ { 174, 4 },
+ { 174, 4 },
+ { 174, 1 },
+ { 174, 2 },
+ { 179, 0 },
+ { 179, 1 },
+ { 181, 0 },
{ 181, 2 },
- { 184, 0 },
+ { 183, 2 },
+ { 183, 3 },
+ { 183, 3 },
+ { 183, 3 },
{ 184, 2 },
{ 184, 2 },
- { 161, 0 },
- { 161, 2 },
- { 185, 3 },
- { 185, 1 },
+ { 184, 1 },
+ { 184, 1 },
+ { 184, 2 },
+ { 182, 3 },
+ { 182, 2 },
+ { 185, 0 },
+ { 185, 2 },
+ { 185, 2 },
+ { 160, 0 },
+ { 160, 2 },
+ { 186, 3 },
{ 186, 1 },
- { 186, 0 },
- { 187, 2 },
- { 187, 7 },
- { 187, 5 },
- { 187, 5 },
- { 187, 10 },
+ { 187, 1 },
+ { 187, 0 },
+ { 188, 2 },
+ { 188, 7 },
+ { 188, 5 },
+ { 188, 5 },
+ { 188, 10 },
{ 190, 0 },
{ 190, 1 },
- { 176, 0 },
- { 176, 3 },
+ { 177, 0 },
+ { 177, 3 },
{ 191, 0 },
{ 191, 2 },
{ 192, 1 },
{ 192, 1 },
{ 192, 1 },
- { 149, 4 },
+ { 148, 4 },
{ 194, 2 },
{ 194, 0 },
- { 149, 9 },
- { 149, 4 },
- { 149, 1 },
- { 163, 2 },
- { 196, 1 },
- { 196, 3 },
- { 199, 1 },
- { 199, 2 },
- { 199, 1 },
- { 197, 9 },
+ { 148, 8 },
+ { 148, 4 },
+ { 148, 1 },
+ { 162, 1 },
+ { 162, 3 },
{ 197, 1 },
- { 208, 4 },
- { 208, 5 },
- { 200, 1 },
- { 200, 1 },
+ { 197, 2 },
+ { 197, 1 },
+ { 196, 9 },
+ { 198, 1 },
+ { 198, 1 },
+ { 198, 0 },
+ { 206, 2 },
+ { 206, 0 },
+ { 199, 3 },
+ { 199, 2 },
+ { 199, 4 },
+ { 207, 2 },
+ { 207, 1 },
+ { 207, 0 },
{ 200, 0 },
- { 211, 2 },
- { 211, 0 },
- { 201, 3 },
- { 201, 2 },
- { 201, 4 },
+ { 200, 2 },
+ { 209, 2 },
+ { 209, 0 },
+ { 208, 7 },
+ { 208, 7 },
+ { 208, 7 },
+ { 158, 0 },
+ { 158, 2 },
+ { 195, 2 },
+ { 210, 1 },
+ { 210, 2 },
+ { 210, 3 },
+ { 210, 4 },
{ 212, 2 },
- { 212, 1 },
{ 212, 0 },
- { 202, 0 },
- { 202, 2 },
- { 214, 2 },
- { 214, 0 },
- { 213, 7 },
- { 213, 9 },
- { 213, 7 },
- { 213, 7 },
- { 159, 0 },
- { 159, 2 },
- { 195, 2 },
- { 215, 1 },
- { 215, 2 },
- { 215, 3 },
- { 215, 4 },
- { 217, 2 },
- { 217, 0 },
- { 216, 0 },
- { 216, 3 },
- { 216, 2 },
- { 218, 4 },
- { 218, 0 },
- { 206, 0 },
- { 206, 3 },
- { 188, 4 },
- { 188, 2 },
- { 177, 1 },
- { 177, 1 },
- { 177, 0 },
+ { 211, 0 },
+ { 211, 3 },
+ { 211, 2 },
+ { 213, 4 },
+ { 213, 0 },
{ 204, 0 },
{ 204, 3 },
- { 205, 0 },
- { 205, 2 },
- { 207, 0 },
- { 207, 2 },
- { 207, 4 },
- { 207, 4 },
- { 149, 6 },
+ { 216, 4 },
+ { 216, 2 },
+ { 178, 1 },
+ { 178, 1 },
+ { 178, 0 },
+ { 202, 0 },
+ { 202, 3 },
{ 203, 0 },
{ 203, 2 },
- { 149, 8 },
+ { 205, 0 },
+ { 205, 2 },
+ { 205, 4 },
+ { 205, 4 },
+ { 148, 5 },
+ { 201, 0 },
+ { 201, 2 },
+ { 148, 7 },
+ { 218, 5 },
+ { 218, 3 },
+ { 148, 5 },
+ { 148, 5 },
+ { 148, 6 },
+ { 219, 2 },
+ { 219, 1 },
+ { 221, 4 },
{ 221, 5 },
- { 221, 3 },
- { 149, 6 },
- { 149, 7 },
- { 222, 2 },
- { 222, 1 },
- { 223, 0 },
- { 223, 3 },
+ { 220, 0 },
{ 220, 3 },
- { 220, 1 },
+ { 215, 3 },
+ { 215, 1 },
+ { 176, 1 },
+ { 176, 3 },
{ 175, 1 },
- { 175, 3 },
- { 174, 1 },
+ { 176, 1 },
+ { 176, 1 },
+ { 176, 3 },
+ { 176, 5 },
{ 175, 1 },
{ 175, 1 },
- { 175, 3 },
- { 175, 5 },
- { 174, 1 },
- { 174, 1 },
+ { 176, 1 },
+ { 176, 1 },
+ { 176, 3 },
+ { 176, 6 },
+ { 176, 5 },
+ { 176, 4 },
{ 175, 1 },
- { 175, 3 },
- { 175, 6 },
- { 175, 5 },
- { 175, 4 },
- { 174, 1 },
- { 175, 3 },
- { 175, 3 },
- { 175, 3 },
- { 175, 3 },
- { 175, 3 },
- { 175, 3 },
- { 175, 3 },
- { 175, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 3 },
+ { 223, 1 },
+ { 223, 2 },
+ { 223, 1 },
+ { 223, 2 },
+ { 176, 3 },
+ { 176, 5 },
+ { 176, 2 },
+ { 176, 3 },
+ { 176, 3 },
+ { 176, 4 },
+ { 176, 2 },
+ { 176, 2 },
+ { 176, 2 },
+ { 176, 2 },
{ 224, 1 },
{ 224, 2 },
- { 175, 3 },
- { 175, 5 },
- { 175, 2 },
- { 175, 3 },
- { 175, 3 },
- { 175, 4 },
- { 175, 2 },
- { 175, 2 },
- { 175, 2 },
- { 175, 2 },
+ { 176, 5 },
{ 225, 1 },
{ 225, 2 },
- { 175, 5 },
+ { 176, 5 },
+ { 176, 3 },
+ { 176, 5 },
+ { 176, 4 },
+ { 176, 4 },
+ { 176, 5 },
+ { 227, 5 },
+ { 227, 4 },
+ { 228, 2 },
+ { 228, 0 },
{ 226, 1 },
- { 226, 2 },
- { 175, 5 },
- { 175, 3 },
- { 175, 5 },
- { 175, 4 },
- { 175, 4 },
- { 175, 5 },
- { 228, 5 },
- { 228, 4 },
- { 229, 2 },
+ { 226, 0 },
+ { 222, 1 },
+ { 222, 0 },
+ { 217, 3 },
+ { 217, 1 },
+ { 148, 12 },
+ { 229, 1 },
{ 229, 0 },
- { 227, 1 },
- { 227, 0 },
- { 210, 1 },
- { 210, 0 },
- { 209, 3 },
- { 209, 1 },
- { 149, 12 },
- { 230, 1 },
- { 230, 0 },
- { 179, 0 },
- { 179, 3 },
+ { 180, 0 },
+ { 180, 3 },
{ 189, 5 },
{ 189, 3 },
- { 231, 0 },
- { 231, 2 },
- { 149, 4 },
- { 149, 1 },
- { 149, 2 },
- { 149, 3 },
- { 149, 5 },
- { 149, 6 },
- { 149, 5 },
- { 149, 6 },
- { 232, 1 },
- { 232, 1 },
- { 232, 1 },
- { 232, 1 },
- { 232, 1 },
- { 171, 2 },
- { 171, 1 },
+ { 230, 0 },
+ { 230, 2 },
+ { 148, 4 },
+ { 148, 1 },
+ { 148, 2 },
+ { 148, 3 },
+ { 148, 5 },
+ { 148, 6 },
+ { 148, 5 },
+ { 148, 6 },
+ { 231, 1 },
+ { 231, 1 },
+ { 231, 1 },
+ { 231, 1 },
+ { 231, 1 },
{ 172, 2 },
- { 149, 5 },
+ { 172, 1 },
+ { 173, 2 },
+ { 232, 1 },
+ { 148, 5 },
{ 233, 11 },
{ 235, 1 },
{ 235, 1 },
@@ -128026,30 +116327,31 @@ static const struct {
{ 239, 7 },
{ 239, 5 },
{ 239, 5 },
+ { 239, 5 },
{ 239, 1 },
- { 175, 4 },
- { 175, 6 },
+ { 176, 4 },
+ { 176, 6 },
{ 193, 1 },
{ 193, 1 },
{ 193, 1 },
- { 149, 4 },
- { 149, 6 },
- { 149, 3 },
+ { 148, 4 },
+ { 148, 6 },
+ { 148, 3 },
{ 243, 0 },
{ 243, 2 },
{ 242, 1 },
{ 242, 0 },
- { 149, 1 },
- { 149, 3 },
- { 149, 1 },
- { 149, 3 },
- { 149, 6 },
- { 149, 6 },
+ { 148, 1 },
+ { 148, 3 },
+ { 148, 1 },
+ { 148, 3 },
+ { 148, 6 },
+ { 148, 6 },
{ 244, 1 },
{ 245, 0 },
{ 245, 1 },
- { 149, 1 },
- { 149, 4 },
+ { 148, 1 },
+ { 148, 4 },
{ 246, 8 },
{ 247, 1 },
{ 247, 3 },
@@ -128061,11 +116363,6 @@ static const struct {
{ 251, 0 },
{ 251, 4 },
{ 251, 2 },
- { 198, 0 },
- { 198, 2 },
- { 198, 3 },
- { 252, 6 },
- { 252, 8 },
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -128088,9 +116385,8 @@ static void yy_reduce(
#ifndef NDEBUG
if( yyTraceFILE && yyruleno>=0
&& yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
- yysize = yyRuleInfo[yyruleno].nrhs;
- fprintf(yyTraceFILE, "%sReduce [%s] -> state %d.\n", yyTracePrompt,
- yyRuleName[yyruleno], yymsp[-yysize].stateno);
+ fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt,
+ yyRuleName[yyruleno]);
}
#endif /* NDEBUG */
@@ -128134,17 +116430,17 @@ static void yy_reduce(
{ sqlite3FinishCoding(pParse); }
break;
case 9: /* cmd ::= BEGIN transtype trans_opt */
-{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy328);}
+{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy4);}
break;
case 13: /* transtype ::= */
-{yygotominor.yy328 = TK_DEFERRED;}
+{yygotominor.yy4 = TK_DEFERRED;}
break;
case 14: /* transtype ::= DEFERRED */
case 15: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==15);
case 16: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==16);
- case 115: /* multiselect_op ::= UNION */ yytestcase(yyruleno==115);
- case 117: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==117);
-{yygotominor.yy328 = yymsp[0].major;}
+ case 117: /* multiselect_op ::= UNION */ yytestcase(yyruleno==117);
+ case 119: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==119);
+{yygotominor.yy4 = yymsp[0].major;}
break;
case 17: /* cmd ::= COMMIT trans_opt */
case 18: /* cmd ::= END trans_opt */ yytestcase(yyruleno==18);
@@ -128170,7 +116466,7 @@ static void yy_reduce(
break;
case 26: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */
{
- sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy328,0,0,yymsp[-2].minor.yy328);
+ sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy4,0,0,yymsp[-2].minor.yy4);
}
break;
case 27: /* createkw ::= CREATE */
@@ -128181,47 +116477,45 @@ static void yy_reduce(
break;
case 28: /* ifnotexists ::= */
case 31: /* temp ::= */ yytestcase(yyruleno==31);
- case 68: /* autoinc ::= */ yytestcase(yyruleno==68);
- case 81: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ yytestcase(yyruleno==81);
- case 83: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==83);
- case 85: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ yytestcase(yyruleno==85);
- case 97: /* defer_subclause_opt ::= */ yytestcase(yyruleno==97);
- case 108: /* ifexists ::= */ yytestcase(yyruleno==108);
- case 219: /* between_op ::= BETWEEN */ yytestcase(yyruleno==219);
- case 222: /* in_op ::= IN */ yytestcase(yyruleno==222);
- case 247: /* collate ::= */ yytestcase(yyruleno==247);
-{yygotominor.yy328 = 0;}
+ case 71: /* autoinc ::= */ yytestcase(yyruleno==71);
+ case 84: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ yytestcase(yyruleno==84);
+ case 86: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==86);
+ case 88: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ yytestcase(yyruleno==88);
+ case 100: /* defer_subclause_opt ::= */ yytestcase(yyruleno==100);
+ case 111: /* ifexists ::= */ yytestcase(yyruleno==111);
+ case 223: /* between_op ::= BETWEEN */ yytestcase(yyruleno==223);
+ case 226: /* in_op ::= IN */ yytestcase(yyruleno==226);
+{yygotominor.yy4 = 0;}
break;
case 29: /* ifnotexists ::= IF NOT EXISTS */
case 30: /* temp ::= TEMP */ yytestcase(yyruleno==30);
- case 69: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==69);
- case 84: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ yytestcase(yyruleno==84);
- case 107: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==107);
- case 220: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==220);
- case 223: /* in_op ::= NOT IN */ yytestcase(yyruleno==223);
- case 248: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==248);
-{yygotominor.yy328 = 1;}
+ case 72: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==72);
+ case 87: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ yytestcase(yyruleno==87);
+ case 110: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==110);
+ case 224: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==224);
+ case 227: /* in_op ::= NOT IN */ yytestcase(yyruleno==227);
+{yygotominor.yy4 = 1;}
break;
case 32: /* create_table_args ::= LP columnlist conslist_opt RP table_options */
{
- sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy186,0);
+ sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy210,0);
}
break;
case 33: /* create_table_args ::= AS select */
{
- sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy3);
- sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy3);
+ sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy387);
+ sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy387);
}
break;
case 34: /* table_options ::= */
-{yygotominor.yy186 = 0;}
+{yygotominor.yy210 = 0;}
break;
case 35: /* table_options ::= WITHOUT nm */
{
if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){
- yygotominor.yy186 = TF_WithoutRowid | TF_NoVisibleRowid;
+ yygotominor.yy210 = TF_WithoutRowid;
}else{
- yygotominor.yy186 = 0;
+ yygotominor.yy210 = 0;
sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z);
}
}
@@ -128239,695 +116533,650 @@ static void yy_reduce(
pParse->constraintName.n = 0;
}
break;
- case 40: /* nm ::= ID|INDEXED */
- case 41: /* nm ::= STRING */ yytestcase(yyruleno==41);
- case 42: /* nm ::= JOIN_KW */ yytestcase(yyruleno==42);
- case 45: /* typetoken ::= typename */ yytestcase(yyruleno==45);
- case 48: /* typename ::= ID|STRING */ yytestcase(yyruleno==48);
- case 130: /* as ::= AS nm */ yytestcase(yyruleno==130);
- case 131: /* as ::= ID|STRING */ yytestcase(yyruleno==131);
- case 142: /* dbnm ::= DOT nm */ yytestcase(yyruleno==142);
- case 151: /* indexed_opt ::= INDEXED BY nm */ yytestcase(yyruleno==151);
- case 257: /* nmnum ::= plus_num */ yytestcase(yyruleno==257);
- case 258: /* nmnum ::= nm */ yytestcase(yyruleno==258);
- case 259: /* nmnum ::= ON */ yytestcase(yyruleno==259);
- case 260: /* nmnum ::= DELETE */ yytestcase(yyruleno==260);
- case 261: /* nmnum ::= DEFAULT */ yytestcase(yyruleno==261);
- case 262: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==262);
- case 263: /* plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==263);
- case 264: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==264);
- case 280: /* trnm ::= nm */ yytestcase(yyruleno==280);
+ case 40: /* id ::= ID */
+ case 41: /* id ::= INDEXED */ yytestcase(yyruleno==41);
+ case 42: /* ids ::= ID|STRING */ yytestcase(yyruleno==42);
+ case 43: /* nm ::= id */ yytestcase(yyruleno==43);
+ case 44: /* nm ::= STRING */ yytestcase(yyruleno==44);
+ case 45: /* nm ::= JOIN_KW */ yytestcase(yyruleno==45);
+ case 48: /* typetoken ::= typename */ yytestcase(yyruleno==48);
+ case 51: /* typename ::= ids */ yytestcase(yyruleno==51);
+ case 129: /* as ::= AS nm */ yytestcase(yyruleno==129);
+ case 130: /* as ::= ids */ yytestcase(yyruleno==130);
+ case 140: /* dbnm ::= DOT nm */ yytestcase(yyruleno==140);
+ case 149: /* indexed_opt ::= INDEXED BY nm */ yytestcase(yyruleno==149);
+ case 252: /* collate ::= COLLATE ids */ yytestcase(yyruleno==252);
+ case 261: /* nmnum ::= plus_num */ yytestcase(yyruleno==261);
+ case 262: /* nmnum ::= nm */ yytestcase(yyruleno==262);
+ case 263: /* nmnum ::= ON */ yytestcase(yyruleno==263);
+ case 264: /* nmnum ::= DELETE */ yytestcase(yyruleno==264);
+ case 265: /* nmnum ::= DEFAULT */ yytestcase(yyruleno==265);
+ case 266: /* plus_num ::= PLUS number */ yytestcase(yyruleno==266);
+ case 267: /* plus_num ::= number */ yytestcase(yyruleno==267);
+ case 268: /* minus_num ::= MINUS number */ yytestcase(yyruleno==268);
+ case 269: /* number ::= INTEGER|FLOAT */ yytestcase(yyruleno==269);
+ case 285: /* trnm ::= nm */ yytestcase(yyruleno==285);
{yygotominor.yy0 = yymsp[0].minor.yy0;}
break;
- case 44: /* type ::= typetoken */
+ case 47: /* type ::= typetoken */
{sqlite3AddColumnType(pParse,&yymsp[0].minor.yy0);}
break;
- case 46: /* typetoken ::= typename LP signed RP */
+ case 49: /* typetoken ::= typename LP signed RP */
{
yygotominor.yy0.z = yymsp[-3].minor.yy0.z;
yygotominor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-3].minor.yy0.z);
}
break;
- case 47: /* typetoken ::= typename LP signed COMMA signed RP */
+ case 50: /* typetoken ::= typename LP signed COMMA signed RP */
{
yygotominor.yy0.z = yymsp[-5].minor.yy0.z;
yygotominor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-5].minor.yy0.z);
}
break;
- case 49: /* typename ::= typename ID|STRING */
+ case 52: /* typename ::= typename ids */
{yygotominor.yy0.z=yymsp[-1].minor.yy0.z; yygotominor.yy0.n=yymsp[0].minor.yy0.n+(int)(yymsp[0].minor.yy0.z-yymsp[-1].minor.yy0.z);}
break;
- case 54: /* ccons ::= CONSTRAINT nm */
- case 92: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==92);
+ case 57: /* ccons ::= CONSTRAINT nm */
+ case 95: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==95);
{pParse->constraintName = yymsp[0].minor.yy0;}
break;
- case 55: /* ccons ::= DEFAULT term */
- case 57: /* ccons ::= DEFAULT PLUS term */ yytestcase(yyruleno==57);
-{sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy346);}
+ case 58: /* ccons ::= DEFAULT term */
+ case 60: /* ccons ::= DEFAULT PLUS term */ yytestcase(yyruleno==60);
+{sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy118);}
break;
- case 56: /* ccons ::= DEFAULT LP expr RP */
-{sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy346);}
+ case 59: /* ccons ::= DEFAULT LP expr RP */
+{sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy118);}
break;
- case 58: /* ccons ::= DEFAULT MINUS term */
+ case 61: /* ccons ::= DEFAULT MINUS term */
{
ExprSpan v;
- v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy346.pExpr, 0, 0);
+ v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy118.pExpr, 0, 0);
v.zStart = yymsp[-1].minor.yy0.z;
- v.zEnd = yymsp[0].minor.yy346.zEnd;
+ v.zEnd = yymsp[0].minor.yy118.zEnd;
sqlite3AddDefaultValue(pParse,&v);
}
break;
- case 59: /* ccons ::= DEFAULT ID|INDEXED */
+ case 62: /* ccons ::= DEFAULT id */
{
ExprSpan v;
spanExpr(&v, pParse, TK_STRING, &yymsp[0].minor.yy0);
sqlite3AddDefaultValue(pParse,&v);
}
break;
- case 61: /* ccons ::= NOT NULL onconf */
-{sqlite3AddNotNull(pParse, yymsp[0].minor.yy328);}
+ case 64: /* ccons ::= NOT NULL onconf */
+{sqlite3AddNotNull(pParse, yymsp[0].minor.yy4);}
break;
- case 62: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */
-{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy328,yymsp[0].minor.yy328,yymsp[-2].minor.yy328);}
+ case 65: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */
+{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy4,yymsp[0].minor.yy4,yymsp[-2].minor.yy4);}
break;
- case 63: /* ccons ::= UNIQUE onconf */
-{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy328,0,0,0,0);}
+ case 66: /* ccons ::= UNIQUE onconf */
+{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy4,0,0,0,0);}
break;
- case 64: /* ccons ::= CHECK LP expr RP */
-{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy346.pExpr);}
+ case 67: /* ccons ::= CHECK LP expr RP */
+{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy118.pExpr);}
break;
- case 65: /* ccons ::= REFERENCES nm eidlist_opt refargs */
-{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy328);}
+ case 68: /* ccons ::= REFERENCES nm idxlist_opt refargs */
+{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy322,yymsp[0].minor.yy4);}
break;
- case 66: /* ccons ::= defer_subclause */
-{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy328);}
+ case 69: /* ccons ::= defer_subclause */
+{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy4);}
break;
- case 67: /* ccons ::= COLLATE ID|STRING */
+ case 70: /* ccons ::= COLLATE ids */
{sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);}
break;
- case 70: /* refargs ::= */
-{ yygotominor.yy328 = OE_None*0x0101; /* EV: R-19803-45884 */}
+ case 73: /* refargs ::= */
+{ yygotominor.yy4 = OE_None*0x0101; /* EV: R-19803-45884 */}
break;
- case 71: /* refargs ::= refargs refarg */
-{ yygotominor.yy328 = (yymsp[-1].minor.yy328 & ~yymsp[0].minor.yy429.mask) | yymsp[0].minor.yy429.value; }
+ case 74: /* refargs ::= refargs refarg */
+{ yygotominor.yy4 = (yymsp[-1].minor.yy4 & ~yymsp[0].minor.yy215.mask) | yymsp[0].minor.yy215.value; }
break;
- case 72: /* refarg ::= MATCH nm */
- case 73: /* refarg ::= ON INSERT refact */ yytestcase(yyruleno==73);
-{ yygotominor.yy429.value = 0; yygotominor.yy429.mask = 0x000000; }
+ case 75: /* refarg ::= MATCH nm */
+ case 76: /* refarg ::= ON INSERT refact */ yytestcase(yyruleno==76);
+{ yygotominor.yy215.value = 0; yygotominor.yy215.mask = 0x000000; }
break;
- case 74: /* refarg ::= ON DELETE refact */
-{ yygotominor.yy429.value = yymsp[0].minor.yy328; yygotominor.yy429.mask = 0x0000ff; }
+ case 77: /* refarg ::= ON DELETE refact */
+{ yygotominor.yy215.value = yymsp[0].minor.yy4; yygotominor.yy215.mask = 0x0000ff; }
break;
- case 75: /* refarg ::= ON UPDATE refact */
-{ yygotominor.yy429.value = yymsp[0].minor.yy328<<8; yygotominor.yy429.mask = 0x00ff00; }
+ case 78: /* refarg ::= ON UPDATE refact */
+{ yygotominor.yy215.value = yymsp[0].minor.yy4<<8; yygotominor.yy215.mask = 0x00ff00; }
break;
- case 76: /* refact ::= SET NULL */
-{ yygotominor.yy328 = OE_SetNull; /* EV: R-33326-45252 */}
+ case 79: /* refact ::= SET NULL */
+{ yygotominor.yy4 = OE_SetNull; /* EV: R-33326-45252 */}
break;
- case 77: /* refact ::= SET DEFAULT */
-{ yygotominor.yy328 = OE_SetDflt; /* EV: R-33326-45252 */}
+ case 80: /* refact ::= SET DEFAULT */
+{ yygotominor.yy4 = OE_SetDflt; /* EV: R-33326-45252 */}
break;
- case 78: /* refact ::= CASCADE */
-{ yygotominor.yy328 = OE_Cascade; /* EV: R-33326-45252 */}
+ case 81: /* refact ::= CASCADE */
+{ yygotominor.yy4 = OE_Cascade; /* EV: R-33326-45252 */}
break;
- case 79: /* refact ::= RESTRICT */
-{ yygotominor.yy328 = OE_Restrict; /* EV: R-33326-45252 */}
+ case 82: /* refact ::= RESTRICT */
+{ yygotominor.yy4 = OE_Restrict; /* EV: R-33326-45252 */}
break;
- case 80: /* refact ::= NO ACTION */
-{ yygotominor.yy328 = OE_None; /* EV: R-33326-45252 */}
+ case 83: /* refact ::= NO ACTION */
+{ yygotominor.yy4 = OE_None; /* EV: R-33326-45252 */}
break;
- case 82: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
- case 98: /* defer_subclause_opt ::= defer_subclause */ yytestcase(yyruleno==98);
- case 100: /* onconf ::= ON CONFLICT resolvetype */ yytestcase(yyruleno==100);
- case 103: /* resolvetype ::= raisetype */ yytestcase(yyruleno==103);
-{yygotominor.yy328 = yymsp[0].minor.yy328;}
+ case 85: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
+ case 101: /* defer_subclause_opt ::= defer_subclause */ yytestcase(yyruleno==101);
+ case 103: /* onconf ::= ON CONFLICT resolvetype */ yytestcase(yyruleno==103);
+ case 106: /* resolvetype ::= raisetype */ yytestcase(yyruleno==106);
+{yygotominor.yy4 = yymsp[0].minor.yy4;}
break;
- case 86: /* conslist_opt ::= */
+ case 89: /* conslist_opt ::= */
{yygotominor.yy0.n = 0; yygotominor.yy0.z = 0;}
break;
- case 87: /* conslist_opt ::= COMMA conslist */
+ case 90: /* conslist_opt ::= COMMA conslist */
{yygotominor.yy0 = yymsp[-1].minor.yy0;}
break;
- case 90: /* tconscomma ::= COMMA */
+ case 93: /* tconscomma ::= COMMA */
{pParse->constraintName.n = 0;}
break;
- case 93: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
-{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy328,yymsp[-2].minor.yy328,0);}
+ case 96: /* tcons ::= PRIMARY KEY LP idxlist autoinc RP onconf */
+{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy322,yymsp[0].minor.yy4,yymsp[-2].minor.yy4,0);}
break;
- case 94: /* tcons ::= UNIQUE LP sortlist RP onconf */
-{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy328,0,0,0,0);}
+ case 97: /* tcons ::= UNIQUE LP idxlist RP onconf */
+{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy322,yymsp[0].minor.yy4,0,0,0,0);}
break;
- case 95: /* tcons ::= CHECK LP expr RP onconf */
-{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy346.pExpr);}
+ case 98: /* tcons ::= CHECK LP expr RP onconf */
+{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy118.pExpr);}
break;
- case 96: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
+ case 99: /* tcons ::= FOREIGN KEY LP idxlist RP REFERENCES nm idxlist_opt refargs defer_subclause_opt */
{
- sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy328);
- sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy328);
+ sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy322, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[-1].minor.yy4);
+ sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy4);
}
break;
- case 99: /* onconf ::= */
-{yygotominor.yy328 = OE_Default;}
+ case 102: /* onconf ::= */
+{yygotominor.yy4 = OE_Default;}
break;
- case 101: /* orconf ::= */
-{yygotominor.yy186 = OE_Default;}
+ case 104: /* orconf ::= */
+{yygotominor.yy210 = OE_Default;}
break;
- case 102: /* orconf ::= OR resolvetype */
-{yygotominor.yy186 = (u8)yymsp[0].minor.yy328;}
+ case 105: /* orconf ::= OR resolvetype */
+{yygotominor.yy210 = (u8)yymsp[0].minor.yy4;}
break;
- case 104: /* resolvetype ::= IGNORE */
-{yygotominor.yy328 = OE_Ignore;}
+ case 107: /* resolvetype ::= IGNORE */
+{yygotominor.yy4 = OE_Ignore;}
break;
- case 105: /* resolvetype ::= REPLACE */
-{yygotominor.yy328 = OE_Replace;}
- break;
- case 106: /* cmd ::= DROP TABLE ifexists fullname */
-{
- sqlite3DropTable(pParse, yymsp[0].minor.yy65, 0, yymsp[-1].minor.yy328);
-}
- break;
- case 109: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
-{
- sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy3, yymsp[-7].minor.yy328, yymsp[-5].minor.yy328);
-}
+ case 108: /* resolvetype ::= REPLACE */
+{yygotominor.yy4 = OE_Replace;}
break;
- case 110: /* cmd ::= DROP VIEW ifexists fullname */
+ case 109: /* cmd ::= DROP TABLE ifexists fullname */
{
- sqlite3DropTable(pParse, yymsp[0].minor.yy65, 1, yymsp[-1].minor.yy328);
+ sqlite3DropTable(pParse, yymsp[0].minor.yy259, 0, yymsp[-1].minor.yy4);
}
break;
- case 111: /* cmd ::= select */
+ case 112: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm AS select */
{
- SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0};
- sqlite3Select(pParse, yymsp[0].minor.yy3, &dest);
- sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy3);
+ sqlite3CreateView(pParse, &yymsp[-7].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, yymsp[0].minor.yy387, yymsp[-6].minor.yy4, yymsp[-4].minor.yy4);
}
break;
- case 112: /* select ::= with selectnowith */
+ case 113: /* cmd ::= DROP VIEW ifexists fullname */
{
- Select *p = yymsp[0].minor.yy3;
- if( p ){
- p->pWith = yymsp[-1].minor.yy59;
- parserDoubleLinkSelect(pParse, p);
- }else{
- sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy59);
- }
- yygotominor.yy3 = p;
+ sqlite3DropTable(pParse, yymsp[0].minor.yy259, 1, yymsp[-1].minor.yy4);
}
break;
- case 113: /* selectnowith ::= oneselect */
- case 119: /* oneselect ::= values */ yytestcase(yyruleno==119);
-{yygotominor.yy3 = yymsp[0].minor.yy3;}
- break;
- case 114: /* selectnowith ::= selectnowith multiselect_op oneselect */
+ case 114: /* cmd ::= select */
{
- Select *pRhs = yymsp[0].minor.yy3;
- Select *pLhs = yymsp[-2].minor.yy3;
- if( pRhs && pRhs->pPrior ){
- SrcList *pFrom;
- Token x;
- x.n = 0;
- parserDoubleLinkSelect(pParse, pRhs);
- pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0,0);
- pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0,0);
- }
- if( pRhs ){
- pRhs->op = (u8)yymsp[-1].minor.yy328;
- pRhs->pPrior = pLhs;
- if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue;
- pRhs->selFlags &= ~SF_MultiValue;
- if( yymsp[-1].minor.yy328!=TK_ALL ) pParse->hasCompound = 1;
- }else{
- sqlite3SelectDelete(pParse->db, pLhs);
- }
- yygotominor.yy3 = pRhs;
+ SelectDest dest = {SRT_Output, 0, 0, 0, 0};
+ sqlite3Select(pParse, yymsp[0].minor.yy387, &dest);
+ sqlite3ExplainBegin(pParse->pVdbe);
+ sqlite3ExplainSelect(pParse->pVdbe, yymsp[0].minor.yy387);
+ sqlite3ExplainFinish(pParse->pVdbe);
+ sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy387);
}
break;
- case 116: /* multiselect_op ::= UNION ALL */
-{yygotominor.yy328 = TK_ALL;}
+ case 115: /* select ::= oneselect */
+{yygotominor.yy387 = yymsp[0].minor.yy387;}
break;
- case 118: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
+ case 116: /* select ::= select multiselect_op oneselect */
{
- yygotominor.yy3 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy65,yymsp[-4].minor.yy132,yymsp[-3].minor.yy14,yymsp[-2].minor.yy132,yymsp[-1].minor.yy14,yymsp[-7].minor.yy381,yymsp[0].minor.yy476.pLimit,yymsp[0].minor.yy476.pOffset);
-#if SELECTTRACE_ENABLED
- /* Populate the Select.zSelName[] string that is used to help with
- ** query planner debugging, to differentiate between multiple Select
- ** objects in a complex query.
- **
- ** If the SELECT keyword is immediately followed by a C-style comment
- ** then extract the first few alphanumeric characters from within that
- ** comment to be the zSelName value. Otherwise, the label is #N where
- ** is an integer that is incremented with each SELECT statement seen.
- */
- if( yygotominor.yy3!=0 ){
- const char *z = yymsp[-8].minor.yy0.z+6;
- int i;
- sqlite3_snprintf(sizeof(yygotominor.yy3->zSelName), yygotominor.yy3->zSelName, "#%d",
- ++pParse->nSelect);
- while( z[0]==' ' ) z++;
- if( z[0]=='/' && z[1]=='*' ){
- z += 2;
- while( z[0]==' ' ) z++;
- for(i=0; sqlite3Isalnum(z[i]); i++){}
- sqlite3_snprintf(sizeof(yygotominor.yy3->zSelName), yygotominor.yy3->zSelName, "%.*s", i, z);
- }
+ if( yymsp[0].minor.yy387 ){
+ yymsp[0].minor.yy387->op = (u8)yymsp[-1].minor.yy4;
+ yymsp[0].minor.yy387->pPrior = yymsp[-2].minor.yy387;
+ if( yymsp[-1].minor.yy4!=TK_ALL ) pParse->hasCompound = 1;
+ }else{
+ sqlite3SelectDelete(pParse->db, yymsp[-2].minor.yy387);
}
-#endif /* SELECTRACE_ENABLED */
+ yygotominor.yy387 = yymsp[0].minor.yy387;
}
break;
- case 120: /* values ::= VALUES LP nexprlist RP */
-{
- yygotominor.yy3 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0,0);
-}
+ case 118: /* multiselect_op ::= UNION ALL */
+{yygotominor.yy4 = TK_ALL;}
break;
- case 121: /* values ::= values COMMA LP exprlist RP */
+ case 120: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
{
- Select *pRight, *pLeft = yymsp[-4].minor.yy3;
- pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values|SF_MultiValue,0,0);
- if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue;
- if( pRight ){
- pRight->op = TK_ALL;
- pLeft = yymsp[-4].minor.yy3;
- pRight->pPrior = pLeft;
- yygotominor.yy3 = pRight;
- }else{
- yygotominor.yy3 = pLeft;
- }
+ yygotominor.yy387 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy322,yymsp[-5].minor.yy259,yymsp[-4].minor.yy314,yymsp[-3].minor.yy322,yymsp[-2].minor.yy314,yymsp[-1].minor.yy322,yymsp[-7].minor.yy177,yymsp[0].minor.yy292.pLimit,yymsp[0].minor.yy292.pOffset);
}
break;
- case 122: /* distinct ::= DISTINCT */
-{yygotominor.yy381 = SF_Distinct;}
+ case 121: /* distinct ::= DISTINCT */
+{yygotominor.yy177 = SF_Distinct;}
break;
- case 123: /* distinct ::= ALL */
-{yygotominor.yy381 = SF_All;}
+ case 122: /* distinct ::= ALL */
+ case 123: /* distinct ::= */ yytestcase(yyruleno==123);
+{yygotominor.yy177 = 0;}
break;
- case 124: /* distinct ::= */
-{yygotominor.yy381 = 0;}
+ case 124: /* sclp ::= selcollist COMMA */
+ case 248: /* idxlist_opt ::= LP idxlist RP */ yytestcase(yyruleno==248);
+{yygotominor.yy322 = yymsp[-1].minor.yy322;}
break;
- case 125: /* sclp ::= selcollist COMMA */
- case 244: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==244);
-{yygotominor.yy14 = yymsp[-1].minor.yy14;}
+ case 125: /* sclp ::= */
+ case 153: /* orderby_opt ::= */ yytestcase(yyruleno==153);
+ case 160: /* groupby_opt ::= */ yytestcase(yyruleno==160);
+ case 241: /* exprlist ::= */ yytestcase(yyruleno==241);
+ case 247: /* idxlist_opt ::= */ yytestcase(yyruleno==247);
+{yygotominor.yy322 = 0;}
break;
- case 126: /* sclp ::= */
- case 155: /* orderby_opt ::= */ yytestcase(yyruleno==155);
- case 162: /* groupby_opt ::= */ yytestcase(yyruleno==162);
- case 237: /* exprlist ::= */ yytestcase(yyruleno==237);
- case 243: /* eidlist_opt ::= */ yytestcase(yyruleno==243);
-{yygotominor.yy14 = 0;}
- break;
- case 127: /* selcollist ::= sclp expr as */
+ case 126: /* selcollist ::= sclp expr as */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, yymsp[-1].minor.yy346.pExpr);
- if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[0].minor.yy0, 1);
- sqlite3ExprListSetSpan(pParse,yygotominor.yy14,&yymsp[-1].minor.yy346);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, yymsp[-1].minor.yy118.pExpr);
+ if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yygotominor.yy322, &yymsp[0].minor.yy0, 1);
+ sqlite3ExprListSetSpan(pParse,yygotominor.yy322,&yymsp[-1].minor.yy118);
}
break;
- case 128: /* selcollist ::= sclp STAR */
+ case 127: /* selcollist ::= sclp STAR */
{
Expr *p = sqlite3Expr(pParse->db, TK_ALL, 0);
- yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy14, p);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy322, p);
}
break;
- case 129: /* selcollist ::= sclp nm DOT STAR */
+ case 128: /* selcollist ::= sclp nm DOT STAR */
{
Expr *pRight = sqlite3PExpr(pParse, TK_ALL, 0, 0, &yymsp[0].minor.yy0);
Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0);
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, pDot);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, pDot);
}
break;
- case 132: /* as ::= */
+ case 131: /* as ::= */
{yygotominor.yy0.n = 0;}
break;
- case 133: /* from ::= */
-{yygotominor.yy65 = sqlite3DbMallocZero(pParse->db, sizeof(*yygotominor.yy65));}
+ case 132: /* from ::= */
+{yygotominor.yy259 = sqlite3DbMallocZero(pParse->db, sizeof(*yygotominor.yy259));}
break;
- case 134: /* from ::= FROM seltablist */
+ case 133: /* from ::= FROM seltablist */
{
- yygotominor.yy65 = yymsp[0].minor.yy65;
- sqlite3SrcListShiftJoinType(yygotominor.yy65);
+ yygotominor.yy259 = yymsp[0].minor.yy259;
+ sqlite3SrcListShiftJoinType(yygotominor.yy259);
}
break;
- case 135: /* stl_prefix ::= seltablist joinop */
+ case 134: /* stl_prefix ::= seltablist joinop */
{
- yygotominor.yy65 = yymsp[-1].minor.yy65;
- if( ALWAYS(yygotominor.yy65 && yygotominor.yy65->nSrc>0) ) yygotominor.yy65->a[yygotominor.yy65->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy328;
+ yygotominor.yy259 = yymsp[-1].minor.yy259;
+ if( ALWAYS(yygotominor.yy259 && yygotominor.yy259->nSrc>0) ) yygotominor.yy259->a[yygotominor.yy259->nSrc-1].jointype = (u8)yymsp[0].minor.yy4;
}
break;
- case 136: /* stl_prefix ::= */
-{yygotominor.yy65 = 0;}
- break;
- case 137: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
-{
- yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408);
- sqlite3SrcListIndexedBy(pParse, yygotominor.yy65, &yymsp[-2].minor.yy0);
-}
+ case 135: /* stl_prefix ::= */
+{yygotominor.yy259 = 0;}
break;
- case 138: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+ case 136: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
{
- yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy65,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408);
- sqlite3SrcListFuncArgs(pParse, yygotominor.yy65, yymsp[-4].minor.yy14);
+ yygotominor.yy259 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy259,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy314,yymsp[0].minor.yy384);
+ sqlite3SrcListIndexedBy(pParse, yygotominor.yy259, &yymsp[-2].minor.yy0);
}
break;
- case 139: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+ case 137: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */
{
- yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy3,yymsp[-1].minor.yy132,yymsp[0].minor.yy408);
+ yygotominor.yy259 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy259,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy387,yymsp[-1].minor.yy314,yymsp[0].minor.yy384);
}
break;
- case 140: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+ case 138: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
{
- if( yymsp[-6].minor.yy65==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy132==0 && yymsp[0].minor.yy408==0 ){
- yygotominor.yy65 = yymsp[-4].minor.yy65;
- }else if( yymsp[-4].minor.yy65->nSrc==1 ){
- yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408);
- if( yygotominor.yy65 ){
- struct SrcList_item *pNew = &yygotominor.yy65->a[yygotominor.yy65->nSrc-1];
- struct SrcList_item *pOld = yymsp[-4].minor.yy65->a;
+ if( yymsp[-6].minor.yy259==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy314==0 && yymsp[0].minor.yy384==0 ){
+ yygotominor.yy259 = yymsp[-4].minor.yy259;
+ }else if( yymsp[-4].minor.yy259->nSrc==1 ){
+ yygotominor.yy259 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy259,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy314,yymsp[0].minor.yy384);
+ if( yygotominor.yy259 ){
+ struct SrcList_item *pNew = &yygotominor.yy259->a[yygotominor.yy259->nSrc-1];
+ struct SrcList_item *pOld = yymsp[-4].minor.yy259->a;
pNew->zName = pOld->zName;
pNew->zDatabase = pOld->zDatabase;
pNew->pSelect = pOld->pSelect;
pOld->zName = pOld->zDatabase = 0;
pOld->pSelect = 0;
}
- sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy65);
+ sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy259);
}else{
Select *pSubquery;
- sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy65);
- pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy65,0,0,0,0,SF_NestedFrom,0,0);
- yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy132,yymsp[0].minor.yy408);
+ sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy259);
+ pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy259,0,0,0,0,SF_NestedFrom,0,0);
+ yygotominor.yy259 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy259,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy314,yymsp[0].minor.yy384);
}
}
break;
- case 141: /* dbnm ::= */
- case 150: /* indexed_opt ::= */ yytestcase(yyruleno==150);
+ case 139: /* dbnm ::= */
+ case 148: /* indexed_opt ::= */ yytestcase(yyruleno==148);
{yygotominor.yy0.z=0; yygotominor.yy0.n=0;}
break;
- case 143: /* fullname ::= nm dbnm */
-{yygotominor.yy65 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);}
+ case 141: /* fullname ::= nm dbnm */
+{yygotominor.yy259 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);}
break;
- case 144: /* joinop ::= COMMA|JOIN */
-{ yygotominor.yy328 = JT_INNER; }
+ case 142: /* joinop ::= COMMA|JOIN */
+{ yygotominor.yy4 = JT_INNER; }
break;
- case 145: /* joinop ::= JOIN_KW JOIN */
-{ yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); }
+ case 143: /* joinop ::= JOIN_KW JOIN */
+{ yygotominor.yy4 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); }
break;
- case 146: /* joinop ::= JOIN_KW nm JOIN */
-{ yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); }
+ case 144: /* joinop ::= JOIN_KW nm JOIN */
+{ yygotominor.yy4 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); }
break;
- case 147: /* joinop ::= JOIN_KW nm nm JOIN */
-{ yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); }
+ case 145: /* joinop ::= JOIN_KW nm nm JOIN */
+{ yygotominor.yy4 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); }
break;
- case 148: /* on_opt ::= ON expr */
- case 165: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==165);
- case 172: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==172);
- case 232: /* case_else ::= ELSE expr */ yytestcase(yyruleno==232);
- case 234: /* case_operand ::= expr */ yytestcase(yyruleno==234);
-{yygotominor.yy132 = yymsp[0].minor.yy346.pExpr;}
+ case 146: /* on_opt ::= ON expr */
+ case 163: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==163);
+ case 170: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==170);
+ case 236: /* case_else ::= ELSE expr */ yytestcase(yyruleno==236);
+ case 238: /* case_operand ::= expr */ yytestcase(yyruleno==238);
+{yygotominor.yy314 = yymsp[0].minor.yy118.pExpr;}
break;
- case 149: /* on_opt ::= */
- case 164: /* having_opt ::= */ yytestcase(yyruleno==164);
- case 171: /* where_opt ::= */ yytestcase(yyruleno==171);
- case 233: /* case_else ::= */ yytestcase(yyruleno==233);
- case 235: /* case_operand ::= */ yytestcase(yyruleno==235);
-{yygotominor.yy132 = 0;}
+ case 147: /* on_opt ::= */
+ case 162: /* having_opt ::= */ yytestcase(yyruleno==162);
+ case 169: /* where_opt ::= */ yytestcase(yyruleno==169);
+ case 237: /* case_else ::= */ yytestcase(yyruleno==237);
+ case 239: /* case_operand ::= */ yytestcase(yyruleno==239);
+{yygotominor.yy314 = 0;}
break;
- case 152: /* indexed_opt ::= NOT INDEXED */
+ case 150: /* indexed_opt ::= NOT INDEXED */
{yygotominor.yy0.z=0; yygotominor.yy0.n=1;}
break;
- case 153: /* using_opt ::= USING LP idlist RP */
- case 181: /* idlist_opt ::= LP idlist RP */ yytestcase(yyruleno==181);
-{yygotominor.yy408 = yymsp[-1].minor.yy408;}
+ case 151: /* using_opt ::= USING LP idlist RP */
+ case 182: /* inscollist_opt ::= LP idlist RP */ yytestcase(yyruleno==182);
+{yygotominor.yy384 = yymsp[-1].minor.yy384;}
break;
- case 154: /* using_opt ::= */
- case 180: /* idlist_opt ::= */ yytestcase(yyruleno==180);
-{yygotominor.yy408 = 0;}
+ case 152: /* using_opt ::= */
+ case 181: /* inscollist_opt ::= */ yytestcase(yyruleno==181);
+{yygotominor.yy384 = 0;}
break;
- case 156: /* orderby_opt ::= ORDER BY sortlist */
- case 163: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==163);
- case 236: /* exprlist ::= nexprlist */ yytestcase(yyruleno==236);
-{yygotominor.yy14 = yymsp[0].minor.yy14;}
+ case 154: /* orderby_opt ::= ORDER BY sortlist */
+ case 161: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==161);
+ case 240: /* exprlist ::= nexprlist */ yytestcase(yyruleno==240);
+{yygotominor.yy322 = yymsp[0].minor.yy322;}
break;
- case 157: /* sortlist ::= sortlist COMMA expr sortorder */
+ case 155: /* sortlist ::= sortlist COMMA expr sortorder */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14,yymsp[-1].minor.yy346.pExpr);
- sqlite3ExprListSetSortOrder(yygotominor.yy14,yymsp[0].minor.yy328);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322,yymsp[-1].minor.yy118.pExpr);
+ if( yygotominor.yy322 ) yygotominor.yy322->a[yygotominor.yy322->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy4;
}
break;
- case 158: /* sortlist ::= expr sortorder */
+ case 156: /* sortlist ::= expr sortorder */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy346.pExpr);
- sqlite3ExprListSetSortOrder(yygotominor.yy14,yymsp[0].minor.yy328);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy118.pExpr);
+ if( yygotominor.yy322 && ALWAYS(yygotominor.yy322->a) ) yygotominor.yy322->a[0].sortOrder = (u8)yymsp[0].minor.yy4;
}
break;
- case 159: /* sortorder ::= ASC */
-{yygotominor.yy328 = SQLITE_SO_ASC;}
- break;
- case 160: /* sortorder ::= DESC */
-{yygotominor.yy328 = SQLITE_SO_DESC;}
+ case 157: /* sortorder ::= ASC */
+ case 159: /* sortorder ::= */ yytestcase(yyruleno==159);
+{yygotominor.yy4 = SQLITE_SO_ASC;}
break;
- case 161: /* sortorder ::= */
-{yygotominor.yy328 = SQLITE_SO_UNDEFINED;}
+ case 158: /* sortorder ::= DESC */
+{yygotominor.yy4 = SQLITE_SO_DESC;}
break;
- case 166: /* limit_opt ::= */
-{yygotominor.yy476.pLimit = 0; yygotominor.yy476.pOffset = 0;}
+ case 164: /* limit_opt ::= */
+{yygotominor.yy292.pLimit = 0; yygotominor.yy292.pOffset = 0;}
break;
- case 167: /* limit_opt ::= LIMIT expr */
-{yygotominor.yy476.pLimit = yymsp[0].minor.yy346.pExpr; yygotominor.yy476.pOffset = 0;}
+ case 165: /* limit_opt ::= LIMIT expr */
+{yygotominor.yy292.pLimit = yymsp[0].minor.yy118.pExpr; yygotominor.yy292.pOffset = 0;}
break;
- case 168: /* limit_opt ::= LIMIT expr OFFSET expr */
-{yygotominor.yy476.pLimit = yymsp[-2].minor.yy346.pExpr; yygotominor.yy476.pOffset = yymsp[0].minor.yy346.pExpr;}
+ case 166: /* limit_opt ::= LIMIT expr OFFSET expr */
+{yygotominor.yy292.pLimit = yymsp[-2].minor.yy118.pExpr; yygotominor.yy292.pOffset = yymsp[0].minor.yy118.pExpr;}
break;
- case 169: /* limit_opt ::= LIMIT expr COMMA expr */
-{yygotominor.yy476.pOffset = yymsp[-2].minor.yy346.pExpr; yygotominor.yy476.pLimit = yymsp[0].minor.yy346.pExpr;}
+ case 167: /* limit_opt ::= LIMIT expr COMMA expr */
+{yygotominor.yy292.pOffset = yymsp[-2].minor.yy118.pExpr; yygotominor.yy292.pLimit = yymsp[0].minor.yy118.pExpr;}
break;
- case 170: /* cmd ::= with DELETE FROM fullname indexed_opt where_opt */
+ case 168: /* cmd ::= DELETE FROM fullname indexed_opt where_opt */
{
- sqlite3WithPush(pParse, yymsp[-5].minor.yy59, 1);
- sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy65, &yymsp[-1].minor.yy0);
- sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy65,yymsp[0].minor.yy132);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy259, &yymsp[-1].minor.yy0);
+ sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy259,yymsp[0].minor.yy314);
}
break;
- case 173: /* cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt */
+ case 171: /* cmd ::= UPDATE orconf fullname indexed_opt SET setlist where_opt */
{
- sqlite3WithPush(pParse, yymsp[-7].minor.yy59, 1);
- sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy65, &yymsp[-3].minor.yy0);
- sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy14,"set list");
- sqlite3Update(pParse,yymsp[-4].minor.yy65,yymsp[-1].minor.yy14,yymsp[0].minor.yy132,yymsp[-5].minor.yy186);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy259, &yymsp[-3].minor.yy0);
+ sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy322,"set list");
+ sqlite3Update(pParse,yymsp[-4].minor.yy259,yymsp[-1].minor.yy322,yymsp[0].minor.yy314,yymsp[-5].minor.yy210);
}
break;
- case 174: /* setlist ::= setlist COMMA nm EQ expr */
+ case 172: /* setlist ::= setlist COMMA nm EQ expr */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy346.pExpr);
- sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[0].minor.yy118.pExpr);
+ sqlite3ExprListSetName(pParse, yygotominor.yy322, &yymsp[-2].minor.yy0, 1);
}
break;
- case 175: /* setlist ::= nm EQ expr */
+ case 173: /* setlist ::= nm EQ expr */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy346.pExpr);
- sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy118.pExpr);
+ sqlite3ExprListSetName(pParse, yygotominor.yy322, &yymsp[-2].minor.yy0, 1);
}
break;
- case 176: /* cmd ::= with insert_cmd INTO fullname idlist_opt select */
+ case 174: /* cmd ::= insert_cmd INTO fullname inscollist_opt valuelist */
+{sqlite3Insert(pParse, yymsp[-2].minor.yy259, yymsp[0].minor.yy260.pList, yymsp[0].minor.yy260.pSelect, yymsp[-1].minor.yy384, yymsp[-4].minor.yy210);}
+ break;
+ case 175: /* cmd ::= insert_cmd INTO fullname inscollist_opt select */
+{sqlite3Insert(pParse, yymsp[-2].minor.yy259, 0, yymsp[0].minor.yy387, yymsp[-1].minor.yy384, yymsp[-4].minor.yy210);}
+ break;
+ case 176: /* cmd ::= insert_cmd INTO fullname inscollist_opt DEFAULT VALUES */
+{sqlite3Insert(pParse, yymsp[-3].minor.yy259, 0, 0, yymsp[-2].minor.yy384, yymsp[-5].minor.yy210);}
+ break;
+ case 177: /* insert_cmd ::= INSERT orconf */
+{yygotominor.yy210 = yymsp[0].minor.yy210;}
+ break;
+ case 178: /* insert_cmd ::= REPLACE */
+{yygotominor.yy210 = OE_Replace;}
+ break;
+ case 179: /* valuelist ::= VALUES LP nexprlist RP */
{
- sqlite3WithPush(pParse, yymsp[-5].minor.yy59, 1);
- sqlite3Insert(pParse, yymsp[-2].minor.yy65, yymsp[0].minor.yy3, yymsp[-1].minor.yy408, yymsp[-4].minor.yy186);
+ yygotominor.yy260.pList = yymsp[-1].minor.yy322;
+ yygotominor.yy260.pSelect = 0;
}
break;
- case 177: /* cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES */
+ case 180: /* valuelist ::= valuelist COMMA LP exprlist RP */
{
- sqlite3WithPush(pParse, yymsp[-6].minor.yy59, 1);
- sqlite3Insert(pParse, yymsp[-3].minor.yy65, 0, yymsp[-2].minor.yy408, yymsp[-5].minor.yy186);
+ Select *pRight = sqlite3SelectNew(pParse, yymsp[-1].minor.yy322, 0, 0, 0, 0, 0, 0, 0, 0);
+ if( yymsp[-4].minor.yy260.pList ){
+ yymsp[-4].minor.yy260.pSelect = sqlite3SelectNew(pParse, yymsp[-4].minor.yy260.pList, 0, 0, 0, 0, 0, 0, 0, 0);
+ yymsp[-4].minor.yy260.pList = 0;
+ }
+ yygotominor.yy260.pList = 0;
+ if( yymsp[-4].minor.yy260.pSelect==0 || pRight==0 ){
+ sqlite3SelectDelete(pParse->db, pRight);
+ sqlite3SelectDelete(pParse->db, yymsp[-4].minor.yy260.pSelect);
+ yygotominor.yy260.pSelect = 0;
+ }else{
+ pRight->op = TK_ALL;
+ pRight->pPrior = yymsp[-4].minor.yy260.pSelect;
+ pRight->selFlags |= SF_Values;
+ pRight->pPrior->selFlags |= SF_Values;
+ yygotominor.yy260.pSelect = pRight;
+ }
}
break;
- case 178: /* insert_cmd ::= INSERT orconf */
-{yygotominor.yy186 = yymsp[0].minor.yy186;}
- break;
- case 179: /* insert_cmd ::= REPLACE */
-{yygotominor.yy186 = OE_Replace;}
+ case 183: /* idlist ::= idlist COMMA nm */
+{yygotominor.yy384 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy384,&yymsp[0].minor.yy0);}
break;
- case 182: /* idlist ::= idlist COMMA nm */
-{yygotominor.yy408 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy408,&yymsp[0].minor.yy0);}
+ case 184: /* idlist ::= nm */
+{yygotominor.yy384 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0);}
break;
- case 183: /* idlist ::= nm */
-{yygotominor.yy408 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0);}
+ case 185: /* expr ::= term */
+{yygotominor.yy118 = yymsp[0].minor.yy118;}
break;
- case 184: /* expr ::= term */
-{yygotominor.yy346 = yymsp[0].minor.yy346;}
+ case 186: /* expr ::= LP expr RP */
+{yygotominor.yy118.pExpr = yymsp[-1].minor.yy118.pExpr; spanSet(&yygotominor.yy118,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);}
break;
- case 185: /* expr ::= LP expr RP */
-{yygotominor.yy346.pExpr = yymsp[-1].minor.yy346.pExpr; spanSet(&yygotominor.yy346,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);}
+ case 187: /* term ::= NULL */
+ case 192: /* term ::= INTEGER|FLOAT|BLOB */ yytestcase(yyruleno==192);
+ case 193: /* term ::= STRING */ yytestcase(yyruleno==193);
+{spanExpr(&yygotominor.yy118, pParse, yymsp[0].major, &yymsp[0].minor.yy0);}
break;
- case 186: /* term ::= NULL */
- case 191: /* term ::= INTEGER|FLOAT|BLOB */ yytestcase(yyruleno==191);
- case 192: /* term ::= STRING */ yytestcase(yyruleno==192);
-{spanExpr(&yygotominor.yy346, pParse, yymsp[0].major, &yymsp[0].minor.yy0);}
+ case 188: /* expr ::= id */
+ case 189: /* expr ::= JOIN_KW */ yytestcase(yyruleno==189);
+{spanExpr(&yygotominor.yy118, pParse, TK_ID, &yymsp[0].minor.yy0);}
break;
- case 187: /* expr ::= ID|INDEXED */
- case 188: /* expr ::= JOIN_KW */ yytestcase(yyruleno==188);
-{spanExpr(&yygotominor.yy346, pParse, TK_ID, &yymsp[0].minor.yy0);}
- break;
- case 189: /* expr ::= nm DOT nm */
+ case 190: /* expr ::= nm DOT nm */
{
Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0);
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0);
- spanSet(&yygotominor.yy346,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0);
+ spanSet(&yygotominor.yy118,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);
}
break;
- case 190: /* expr ::= nm DOT nm DOT nm */
+ case 191: /* expr ::= nm DOT nm DOT nm */
{
Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-4].minor.yy0);
Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0);
Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0);
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0);
- spanSet(&yygotominor.yy346,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0);
+ spanSet(&yygotominor.yy118,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0);
}
break;
- case 193: /* expr ::= VARIABLE */
+ case 194: /* expr ::= REGISTER */
{
- if( yymsp[0].minor.yy0.n>=2 && yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1]) ){
- /* When doing a nested parse, one can include terms in an expression
- ** that look like this: #1 #2 ... These terms refer to registers
- ** in the virtual machine. #N is the N-th register. */
- if( pParse->nested==0 ){
- sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &yymsp[0].minor.yy0);
- yygotominor.yy346.pExpr = 0;
- }else{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &yymsp[0].minor.yy0);
- if( yygotominor.yy346.pExpr ) sqlite3GetInt32(&yymsp[0].minor.yy0.z[1], &yygotominor.yy346.pExpr->iTable);
- }
+ /* When doing a nested parse, one can include terms in an expression
+ ** that look like this: #1 #2 ... These terms refer to registers
+ ** in the virtual machine. #N is the N-th register. */
+ if( pParse->nested==0 ){
+ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &yymsp[0].minor.yy0);
+ yygotominor.yy118.pExpr = 0;
}else{
- spanExpr(&yygotominor.yy346, pParse, TK_VARIABLE, &yymsp[0].minor.yy0);
- sqlite3ExprAssignVarNumber(pParse, yygotominor.yy346.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &yymsp[0].minor.yy0);
+ if( yygotominor.yy118.pExpr ) sqlite3GetInt32(&yymsp[0].minor.yy0.z[1], &yygotominor.yy118.pExpr->iTable);
}
- spanSet(&yygotominor.yy346, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0);
+ spanSet(&yygotominor.yy118, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 194: /* expr ::= expr COLLATE ID|STRING */
+ case 195: /* expr ::= VARIABLE */
{
- yygotominor.yy346.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy346.pExpr, &yymsp[0].minor.yy0, 1);
- yygotominor.yy346.zStart = yymsp[-2].minor.yy346.zStart;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ spanExpr(&yygotominor.yy118, pParse, TK_VARIABLE, &yymsp[0].minor.yy0);
+ sqlite3ExprAssignVarNumber(pParse, yygotominor.yy118.pExpr);
+ spanSet(&yygotominor.yy118, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 195: /* expr ::= CAST LP expr AS typetoken RP */
+ case 196: /* expr ::= expr COLLATE ids */
{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy346.pExpr, 0, &yymsp[-1].minor.yy0);
- spanSet(&yygotominor.yy346,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0);
+ yygotominor.yy118.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy118.pExpr, &yymsp[0].minor.yy0);
+ yygotominor.yy118.zStart = yymsp[-2].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 196: /* expr ::= ID|INDEXED LP distinct exprlist RP */
+ case 197: /* expr ::= CAST LP expr AS typetoken RP */
{
- if( yymsp[-1].minor.yy14 && yymsp[-1].minor.yy14->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy118.pExpr, 0, &yymsp[-1].minor.yy0);
+ spanSet(&yygotominor.yy118,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0);
+}
+ break;
+ case 198: /* expr ::= ID LP distinct exprlist RP */
+{
+ if( yymsp[-1].minor.yy322 && yymsp[-1].minor.yy322->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){
sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0);
}
- yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0);
- spanSet(&yygotominor.yy346,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0);
- if( yymsp[-2].minor.yy381==SF_Distinct && yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->flags |= EP_Distinct;
+ yygotominor.yy118.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0);
+ spanSet(&yygotominor.yy118,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0);
+ if( yymsp[-2].minor.yy177 && yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->flags |= EP_Distinct;
}
}
break;
- case 197: /* expr ::= ID|INDEXED LP STAR RP */
+ case 199: /* expr ::= ID LP STAR RP */
{
- yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0);
- spanSet(&yygotominor.yy346,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0);
+ yygotominor.yy118.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0);
+ spanSet(&yygotominor.yy118,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0);
}
break;
- case 198: /* term ::= CTIME_KW */
+ case 200: /* term ::= CTIME_KW */
{
- yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0);
- spanSet(&yygotominor.yy346, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0);
+ yygotominor.yy118.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0);
+ spanSet(&yygotominor.yy118, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 199: /* expr ::= expr AND expr */
- case 200: /* expr ::= expr OR expr */ yytestcase(yyruleno==200);
- case 201: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==201);
- case 202: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==202);
- case 203: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==203);
- case 204: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==204);
- case 205: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==205);
- case 206: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==206);
-{spanBinaryExpr(&yygotominor.yy346,pParse,yymsp[-1].major,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy346);}
+ case 201: /* expr ::= expr AND expr */
+ case 202: /* expr ::= expr OR expr */ yytestcase(yyruleno==202);
+ case 203: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==203);
+ case 204: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==204);
+ case 205: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==205);
+ case 206: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==206);
+ case 207: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==207);
+ case 208: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==208);
+{spanBinaryExpr(&yygotominor.yy118,pParse,yymsp[-1].major,&yymsp[-2].minor.yy118,&yymsp[0].minor.yy118);}
break;
- case 207: /* likeop ::= LIKE_KW|MATCH */
-{yygotominor.yy96.eOperator = yymsp[0].minor.yy0; yygotominor.yy96.bNot = 0;}
+ case 209: /* likeop ::= LIKE_KW */
+ case 211: /* likeop ::= MATCH */ yytestcase(yyruleno==211);
+{yygotominor.yy342.eOperator = yymsp[0].minor.yy0; yygotominor.yy342.bNot = 0;}
break;
- case 208: /* likeop ::= NOT LIKE_KW|MATCH */
-{yygotominor.yy96.eOperator = yymsp[0].minor.yy0; yygotominor.yy96.bNot = 1;}
+ case 210: /* likeop ::= NOT LIKE_KW */
+ case 212: /* likeop ::= NOT MATCH */ yytestcase(yyruleno==212);
+{yygotominor.yy342.eOperator = yymsp[0].minor.yy0; yygotominor.yy342.bNot = 1;}
break;
- case 209: /* expr ::= expr likeop expr */
+ case 213: /* expr ::= expr likeop expr */
{
ExprList *pList;
- pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy346.pExpr);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy346.pExpr);
- yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy96.eOperator);
- if( yymsp[-1].minor.yy96.bNot ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0);
- yygotominor.yy346.zStart = yymsp[-2].minor.yy346.zStart;
- yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd;
- if( yygotominor.yy346.pExpr ) yygotominor.yy346.pExpr->flags |= EP_InfixFunc;
+ pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy118.pExpr);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy118.pExpr);
+ yygotominor.yy118.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy342.eOperator);
+ if( yymsp[-1].minor.yy342.bNot ) yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy118.pExpr, 0, 0);
+ yygotominor.yy118.zStart = yymsp[-2].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = yymsp[0].minor.yy118.zEnd;
+ if( yygotominor.yy118.pExpr ) yygotominor.yy118.pExpr->flags |= EP_InfixFunc;
}
break;
- case 210: /* expr ::= expr likeop expr ESCAPE expr */
+ case 214: /* expr ::= expr likeop expr ESCAPE expr */
{
ExprList *pList;
- pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy346.pExpr);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy346.pExpr);
- yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy96.eOperator);
- if( yymsp[-3].minor.yy96.bNot ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0);
- yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart;
- yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd;
- if( yygotominor.yy346.pExpr ) yygotominor.yy346.pExpr->flags |= EP_InfixFunc;
+ pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy118.pExpr);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy118.pExpr);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy118.pExpr);
+ yygotominor.yy118.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy342.eOperator);
+ if( yymsp[-3].minor.yy342.bNot ) yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy118.pExpr, 0, 0);
+ yygotominor.yy118.zStart = yymsp[-4].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = yymsp[0].minor.yy118.zEnd;
+ if( yygotominor.yy118.pExpr ) yygotominor.yy118.pExpr->flags |= EP_InfixFunc;
}
break;
- case 211: /* expr ::= expr ISNULL|NOTNULL */
-{spanUnaryPostfix(&yygotominor.yy346,pParse,yymsp[0].major,&yymsp[-1].minor.yy346,&yymsp[0].minor.yy0);}
+ case 215: /* expr ::= expr ISNULL|NOTNULL */
+{spanUnaryPostfix(&yygotominor.yy118,pParse,yymsp[0].major,&yymsp[-1].minor.yy118,&yymsp[0].minor.yy0);}
break;
- case 212: /* expr ::= expr NOT NULL */
-{spanUnaryPostfix(&yygotominor.yy346,pParse,TK_NOTNULL,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy0);}
+ case 216: /* expr ::= expr NOT NULL */
+{spanUnaryPostfix(&yygotominor.yy118,pParse,TK_NOTNULL,&yymsp[-2].minor.yy118,&yymsp[0].minor.yy0);}
break;
- case 213: /* expr ::= expr IS expr */
+ case 217: /* expr ::= expr IS expr */
{
- spanBinaryExpr(&yygotominor.yy346,pParse,TK_IS,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy346);
- binaryToUnaryIfNull(pParse, yymsp[0].minor.yy346.pExpr, yygotominor.yy346.pExpr, TK_ISNULL);
+ spanBinaryExpr(&yygotominor.yy118,pParse,TK_IS,&yymsp[-2].minor.yy118,&yymsp[0].minor.yy118);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy118.pExpr, yygotominor.yy118.pExpr, TK_ISNULL);
}
break;
- case 214: /* expr ::= expr IS NOT expr */
+ case 218: /* expr ::= expr IS NOT expr */
{
- spanBinaryExpr(&yygotominor.yy346,pParse,TK_ISNOT,&yymsp[-3].minor.yy346,&yymsp[0].minor.yy346);
- binaryToUnaryIfNull(pParse, yymsp[0].minor.yy346.pExpr, yygotominor.yy346.pExpr, TK_NOTNULL);
+ spanBinaryExpr(&yygotominor.yy118,pParse,TK_ISNOT,&yymsp[-3].minor.yy118,&yymsp[0].minor.yy118);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy118.pExpr, yygotominor.yy118.pExpr, TK_NOTNULL);
}
break;
- case 215: /* expr ::= NOT expr */
- case 216: /* expr ::= BITNOT expr */ yytestcase(yyruleno==216);
-{spanUnaryPrefix(&yygotominor.yy346,pParse,yymsp[-1].major,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);}
+ case 219: /* expr ::= NOT expr */
+ case 220: /* expr ::= BITNOT expr */ yytestcase(yyruleno==220);
+{spanUnaryPrefix(&yygotominor.yy118,pParse,yymsp[-1].major,&yymsp[0].minor.yy118,&yymsp[-1].minor.yy0);}
break;
- case 217: /* expr ::= MINUS expr */
-{spanUnaryPrefix(&yygotominor.yy346,pParse,TK_UMINUS,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);}
+ case 221: /* expr ::= MINUS expr */
+{spanUnaryPrefix(&yygotominor.yy118,pParse,TK_UMINUS,&yymsp[0].minor.yy118,&yymsp[-1].minor.yy0);}
break;
- case 218: /* expr ::= PLUS expr */
-{spanUnaryPrefix(&yygotominor.yy346,pParse,TK_UPLUS,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);}
+ case 222: /* expr ::= PLUS expr */
+{spanUnaryPrefix(&yygotominor.yy118,pParse,TK_UPLUS,&yymsp[0].minor.yy118,&yymsp[-1].minor.yy0);}
break;
- case 221: /* expr ::= expr between_op expr AND expr */
+ case 225: /* expr ::= expr between_op expr AND expr */
{
- ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy346.pExpr);
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy346.pExpr, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->x.pList = pList;
+ ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy118.pExpr);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy118.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy118.pExpr, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->x.pList = pList;
}else{
sqlite3ExprListDelete(pParse->db, pList);
}
- if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0);
- yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart;
- yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd;
+ if( yymsp[-3].minor.yy4 ) yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy118.pExpr, 0, 0);
+ yygotominor.yy118.zStart = yymsp[-4].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = yymsp[0].minor.yy118.zEnd;
}
break;
- case 224: /* expr ::= expr in_op LP exprlist RP */
+ case 228: /* expr ::= expr in_op LP exprlist RP */
{
- if( yymsp[-1].minor.yy14==0 ){
+ if( yymsp[-1].minor.yy322==0 ){
/* Expressions of the form
**
** expr1 IN ()
@@ -128936,241 +117185,225 @@ static void yy_reduce(
** simplify to constants 0 (false) and 1 (true), respectively,
** regardless of the value of expr1.
*/
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy328]);
- sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy346.pExpr);
- }else if( yymsp[-1].minor.yy14->nExpr==1 ){
- /* Expressions of the form:
- **
- ** expr1 IN (?1)
- ** expr1 NOT IN (?2)
- **
- ** with exactly one value on the RHS can be simplified to something
- ** like this:
- **
- ** expr1 == ?1
- ** expr1 <> ?2
- **
- ** But, the RHS of the == or <> is marked with the EP_Generic flag
- ** so that it may not contribute to the computation of comparison
- ** affinity or the collating sequence to use for comparison. Otherwise,
- ** the semantics would be subtly different from IN or NOT IN.
- */
- Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr;
- yymsp[-1].minor.yy14->a[0].pExpr = 0;
- sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14);
- /* pRHS cannot be NULL because a malloc error would have been detected
- ** before now and control would have never reached this point */
- if( ALWAYS(pRHS) ){
- pRHS->flags &= ~EP_Collate;
- pRHS->flags |= EP_Generic;
- }
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy328 ? TK_NE : TK_EQ, yymsp[-4].minor.yy346.pExpr, pRHS, 0);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy4]);
+ sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy118.pExpr);
}else{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy346.pExpr, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->x.pList = yymsp[-1].minor.yy14;
- sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy118.pExpr, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->x.pList = yymsp[-1].minor.yy322;
+ sqlite3ExprSetHeight(pParse, yygotominor.yy118.pExpr);
}else{
- sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14);
+ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322);
}
- if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0);
+ if( yymsp[-3].minor.yy4 ) yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy118.pExpr, 0, 0);
}
- yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ yygotominor.yy118.zStart = yymsp[-4].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 225: /* expr ::= LP select RP */
+ case 229: /* expr ::= LP select RP */
{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->x.pSelect = yymsp[-1].minor.yy3;
- ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect|EP_Subquery);
- sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->x.pSelect = yymsp[-1].minor.yy387;
+ ExprSetProperty(yygotominor.yy118.pExpr, EP_xIsSelect);
+ sqlite3ExprSetHeight(pParse, yygotominor.yy118.pExpr);
}else{
- sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3);
+ sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy387);
}
- yygotominor.yy346.zStart = yymsp[-2].minor.yy0.z;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ yygotominor.yy118.zStart = yymsp[-2].minor.yy0.z;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 226: /* expr ::= expr in_op LP select RP */
+ case 230: /* expr ::= expr in_op LP select RP */
{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy346.pExpr, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->x.pSelect = yymsp[-1].minor.yy3;
- ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect|EP_Subquery);
- sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy118.pExpr, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->x.pSelect = yymsp[-1].minor.yy387;
+ ExprSetProperty(yygotominor.yy118.pExpr, EP_xIsSelect);
+ sqlite3ExprSetHeight(pParse, yygotominor.yy118.pExpr);
}else{
- sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3);
+ sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy387);
}
- if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0);
- yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ if( yymsp[-3].minor.yy4 ) yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy118.pExpr, 0, 0);
+ yygotominor.yy118.zStart = yymsp[-4].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 227: /* expr ::= expr in_op nm dbnm */
+ case 231: /* expr ::= expr in_op nm dbnm */
{
SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-3].minor.yy346.pExpr, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->x.pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0);
- ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect|EP_Subquery);
- sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-3].minor.yy118.pExpr, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->x.pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0);
+ ExprSetProperty(yygotominor.yy118.pExpr, EP_xIsSelect);
+ sqlite3ExprSetHeight(pParse, yygotominor.yy118.pExpr);
}else{
sqlite3SrcListDelete(pParse->db, pSrc);
}
- if( yymsp[-2].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0);
- yygotominor.yy346.zStart = yymsp[-3].minor.yy346.zStart;
- yygotominor.yy346.zEnd = yymsp[0].minor.yy0.z ? &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] : &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n];
+ if( yymsp[-2].minor.yy4 ) yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy118.pExpr, 0, 0);
+ yygotominor.yy118.zStart = yymsp[-3].minor.yy118.zStart;
+ yygotominor.yy118.zEnd = yymsp[0].minor.yy0.z ? &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] : &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n];
}
break;
- case 228: /* expr ::= EXISTS LP select RP */
+ case 232: /* expr ::= EXISTS LP select RP */
{
- Expr *p = yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0);
+ Expr *p = yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0);
if( p ){
- p->x.pSelect = yymsp[-1].minor.yy3;
- ExprSetProperty(p, EP_xIsSelect|EP_Subquery);
- sqlite3ExprSetHeightAndFlags(pParse, p);
+ p->x.pSelect = yymsp[-1].minor.yy387;
+ ExprSetProperty(p, EP_xIsSelect);
+ sqlite3ExprSetHeight(pParse, p);
}else{
- sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3);
+ sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy387);
}
- yygotominor.yy346.zStart = yymsp[-3].minor.yy0.z;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ yygotominor.yy118.zStart = yymsp[-3].minor.yy0.z;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 229: /* expr ::= CASE case_operand case_exprlist case_else END */
+ case 233: /* expr ::= CASE case_operand case_exprlist case_else END */
{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy132, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->x.pList = yymsp[-1].minor.yy132 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy132) : yymsp[-2].minor.yy14;
- sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr);
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy314, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->x.pList = yymsp[-1].minor.yy314 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[-1].minor.yy314) : yymsp[-2].minor.yy322;
+ sqlite3ExprSetHeight(pParse, yygotominor.yy118.pExpr);
}else{
- sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14);
- sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy132);
+ sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy322);
+ sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy314);
}
- yygotominor.yy346.zStart = yymsp[-4].minor.yy0.z;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ yygotominor.yy118.zStart = yymsp[-4].minor.yy0.z;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 230: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ case 234: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy346.pExpr);
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,yygotominor.yy14, yymsp[0].minor.yy346.pExpr);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy118.pExpr);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,yygotominor.yy322, yymsp[0].minor.yy118.pExpr);
}
break;
- case 231: /* case_exprlist ::= WHEN expr THEN expr */
+ case 235: /* case_exprlist ::= WHEN expr THEN expr */
{
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr);
- yygotominor.yy14 = sqlite3ExprListAppend(pParse,yygotominor.yy14, yymsp[0].minor.yy346.pExpr);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy118.pExpr);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,yygotominor.yy322, yymsp[0].minor.yy118.pExpr);
}
break;
- case 238: /* nexprlist ::= nexprlist COMMA expr */
-{yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy346.pExpr);}
+ case 242: /* nexprlist ::= nexprlist COMMA expr */
+{yygotominor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy118.pExpr);}
break;
- case 239: /* nexprlist ::= expr */
-{yygotominor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy346.pExpr);}
+ case 243: /* nexprlist ::= expr */
+{yygotominor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy118.pExpr);}
break;
- case 240: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ case 244: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP idxlist RP where_opt */
{
sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
- sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy328,
- &yymsp[-11].minor.yy0, yymsp[0].minor.yy132, SQLITE_SO_ASC, yymsp[-8].minor.yy328);
+ sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy4,
+ &yymsp[-11].minor.yy0, yymsp[0].minor.yy314, SQLITE_SO_ASC, yymsp[-8].minor.yy4);
}
break;
- case 241: /* uniqueflag ::= UNIQUE */
- case 292: /* raisetype ::= ABORT */ yytestcase(yyruleno==292);
-{yygotominor.yy328 = OE_Abort;}
+ case 245: /* uniqueflag ::= UNIQUE */
+ case 298: /* raisetype ::= ABORT */ yytestcase(yyruleno==298);
+{yygotominor.yy4 = OE_Abort;}
break;
- case 242: /* uniqueflag ::= */
-{yygotominor.yy328 = OE_None;}
+ case 246: /* uniqueflag ::= */
+{yygotominor.yy4 = OE_None;}
break;
- case 245: /* eidlist ::= eidlist COMMA nm collate sortorder */
+ case 249: /* idxlist ::= idxlist COMMA nm collate sortorder */
{
- yygotominor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy328, yymsp[0].minor.yy328);
+ Expr *p = sqlite3ExprAddCollateToken(pParse, 0, &yymsp[-1].minor.yy0);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, p);
+ sqlite3ExprListSetName(pParse,yygotominor.yy322,&yymsp[-2].minor.yy0,1);
+ sqlite3ExprListCheckLength(pParse, yygotominor.yy322, "index");
+ if( yygotominor.yy322 ) yygotominor.yy322->a[yygotominor.yy322->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy4;
}
break;
- case 246: /* eidlist ::= nm collate sortorder */
+ case 250: /* idxlist ::= nm collate sortorder */
{
- yygotominor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy328, yymsp[0].minor.yy328);
+ Expr *p = sqlite3ExprAddCollateToken(pParse, 0, &yymsp[-1].minor.yy0);
+ yygotominor.yy322 = sqlite3ExprListAppend(pParse,0, p);
+ sqlite3ExprListSetName(pParse, yygotominor.yy322, &yymsp[-2].minor.yy0, 1);
+ sqlite3ExprListCheckLength(pParse, yygotominor.yy322, "index");
+ if( yygotominor.yy322 ) yygotominor.yy322->a[yygotominor.yy322->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy4;
}
break;
- case 249: /* cmd ::= DROP INDEX ifexists fullname */
-{sqlite3DropIndex(pParse, yymsp[0].minor.yy65, yymsp[-1].minor.yy328);}
+ case 251: /* collate ::= */
+{yygotominor.yy0.z = 0; yygotominor.yy0.n = 0;}
break;
- case 250: /* cmd ::= VACUUM */
- case 251: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==251);
+ case 253: /* cmd ::= DROP INDEX ifexists fullname */
+{sqlite3DropIndex(pParse, yymsp[0].minor.yy259, yymsp[-1].minor.yy4);}
+ break;
+ case 254: /* cmd ::= VACUUM */
+ case 255: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==255);
{sqlite3Vacuum(pParse);}
break;
- case 252: /* cmd ::= PRAGMA nm dbnm */
+ case 256: /* cmd ::= PRAGMA nm dbnm */
{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
break;
- case 253: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+ case 257: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
break;
- case 254: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ case 258: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
break;
- case 255: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+ case 259: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
break;
- case 256: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ case 260: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
break;
- case 265: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ case 270: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
{
Token all;
all.z = yymsp[-3].minor.yy0.z;
all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n;
- sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy473, &all);
+ sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy203, &all);
}
break;
- case 266: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ case 271: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
{
- sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy328, yymsp[-4].minor.yy378.a, yymsp[-4].minor.yy378.b, yymsp[-2].minor.yy65, yymsp[0].minor.yy132, yymsp[-10].minor.yy328, yymsp[-8].minor.yy328);
+ sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy4, yymsp[-4].minor.yy90.a, yymsp[-4].minor.yy90.b, yymsp[-2].minor.yy259, yymsp[0].minor.yy314, yymsp[-10].minor.yy4, yymsp[-8].minor.yy4);
yygotominor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0);
}
break;
- case 267: /* trigger_time ::= BEFORE */
- case 270: /* trigger_time ::= */ yytestcase(yyruleno==270);
-{ yygotominor.yy328 = TK_BEFORE; }
+ case 272: /* trigger_time ::= BEFORE */
+ case 275: /* trigger_time ::= */ yytestcase(yyruleno==275);
+{ yygotominor.yy4 = TK_BEFORE; }
break;
- case 268: /* trigger_time ::= AFTER */
-{ yygotominor.yy328 = TK_AFTER; }
+ case 273: /* trigger_time ::= AFTER */
+{ yygotominor.yy4 = TK_AFTER; }
break;
- case 269: /* trigger_time ::= INSTEAD OF */
-{ yygotominor.yy328 = TK_INSTEAD;}
+ case 274: /* trigger_time ::= INSTEAD OF */
+{ yygotominor.yy4 = TK_INSTEAD;}
break;
- case 271: /* trigger_event ::= DELETE|INSERT */
- case 272: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==272);
-{yygotominor.yy378.a = yymsp[0].major; yygotominor.yy378.b = 0;}
+ case 276: /* trigger_event ::= DELETE|INSERT */
+ case 277: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==277);
+{yygotominor.yy90.a = yymsp[0].major; yygotominor.yy90.b = 0;}
break;
- case 273: /* trigger_event ::= UPDATE OF idlist */
-{yygotominor.yy378.a = TK_UPDATE; yygotominor.yy378.b = yymsp[0].minor.yy408;}
+ case 278: /* trigger_event ::= UPDATE OF idlist */
+{yygotominor.yy90.a = TK_UPDATE; yygotominor.yy90.b = yymsp[0].minor.yy384;}
break;
- case 276: /* when_clause ::= */
- case 297: /* key_opt ::= */ yytestcase(yyruleno==297);
-{ yygotominor.yy132 = 0; }
+ case 281: /* when_clause ::= */
+ case 303: /* key_opt ::= */ yytestcase(yyruleno==303);
+{ yygotominor.yy314 = 0; }
break;
- case 277: /* when_clause ::= WHEN expr */
- case 298: /* key_opt ::= KEY expr */ yytestcase(yyruleno==298);
-{ yygotominor.yy132 = yymsp[0].minor.yy346.pExpr; }
+ case 282: /* when_clause ::= WHEN expr */
+ case 304: /* key_opt ::= KEY expr */ yytestcase(yyruleno==304);
+{ yygotominor.yy314 = yymsp[0].minor.yy118.pExpr; }
break;
- case 278: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ case 283: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
{
- assert( yymsp[-2].minor.yy473!=0 );
- yymsp[-2].minor.yy473->pLast->pNext = yymsp[-1].minor.yy473;
- yymsp[-2].minor.yy473->pLast = yymsp[-1].minor.yy473;
- yygotominor.yy473 = yymsp[-2].minor.yy473;
+ assert( yymsp[-2].minor.yy203!=0 );
+ yymsp[-2].minor.yy203->pLast->pNext = yymsp[-1].minor.yy203;
+ yymsp[-2].minor.yy203->pLast = yymsp[-1].minor.yy203;
+ yygotominor.yy203 = yymsp[-2].minor.yy203;
}
break;
- case 279: /* trigger_cmd_list ::= trigger_cmd SEMI */
+ case 284: /* trigger_cmd_list ::= trigger_cmd SEMI */
{
- assert( yymsp[-1].minor.yy473!=0 );
- yymsp[-1].minor.yy473->pLast = yymsp[-1].minor.yy473;
- yygotominor.yy473 = yymsp[-1].minor.yy473;
+ assert( yymsp[-1].minor.yy203!=0 );
+ yymsp[-1].minor.yy203->pLast = yymsp[-1].minor.yy203;
+ yygotominor.yy203 = yymsp[-1].minor.yy203;
}
break;
- case 281: /* trnm ::= nm DOT nm */
+ case 286: /* trnm ::= nm DOT nm */
{
yygotominor.yy0 = yymsp[0].minor.yy0;
sqlite3ErrorMsg(pParse,
@@ -129178,137 +117411,123 @@ static void yy_reduce(
"statements within triggers");
}
break;
- case 283: /* tridxby ::= INDEXED BY nm */
+ case 288: /* tridxby ::= INDEXED BY nm */
{
sqlite3ErrorMsg(pParse,
"the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 284: /* tridxby ::= NOT INDEXED */
+ case 289: /* tridxby ::= NOT INDEXED */
{
sqlite3ErrorMsg(pParse,
"the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 285: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */
-{ yygotominor.yy473 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy14, yymsp[0].minor.yy132, yymsp[-5].minor.yy186); }
+ case 290: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */
+{ yygotominor.yy203 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy322, yymsp[0].minor.yy314, yymsp[-5].minor.yy210); }
+ break;
+ case 291: /* trigger_cmd ::= insert_cmd INTO trnm inscollist_opt valuelist */
+{yygotominor.yy203 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy384, yymsp[0].minor.yy260.pList, yymsp[0].minor.yy260.pSelect, yymsp[-4].minor.yy210);}
break;
- case 286: /* trigger_cmd ::= insert_cmd INTO trnm idlist_opt select */
-{yygotominor.yy473 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy408, yymsp[0].minor.yy3, yymsp[-4].minor.yy186);}
+ case 292: /* trigger_cmd ::= insert_cmd INTO trnm inscollist_opt select */
+{yygotominor.yy203 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy384, 0, yymsp[0].minor.yy387, yymsp[-4].minor.yy210);}
break;
- case 287: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */
-{yygotominor.yy473 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy132);}
+ case 293: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */
+{yygotominor.yy203 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy314);}
break;
- case 288: /* trigger_cmd ::= select */
-{yygotominor.yy473 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy3); }
+ case 294: /* trigger_cmd ::= select */
+{yygotominor.yy203 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy387); }
break;
- case 289: /* expr ::= RAISE LP IGNORE RP */
+ case 295: /* expr ::= RAISE LP IGNORE RP */
{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0);
- if( yygotominor.yy346.pExpr ){
- yygotominor.yy346.pExpr->affinity = OE_Ignore;
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0);
+ if( yygotominor.yy118.pExpr ){
+ yygotominor.yy118.pExpr->affinity = OE_Ignore;
}
- yygotominor.yy346.zStart = yymsp[-3].minor.yy0.z;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ yygotominor.yy118.zStart = yymsp[-3].minor.yy0.z;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 290: /* expr ::= RAISE LP raisetype COMMA nm RP */
+ case 296: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
- yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0);
- if( yygotominor.yy346.pExpr ) {
- yygotominor.yy346.pExpr->affinity = (char)yymsp[-3].minor.yy328;
+ yygotominor.yy118.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0);
+ if( yygotominor.yy118.pExpr ) {
+ yygotominor.yy118.pExpr->affinity = (char)yymsp[-3].minor.yy4;
}
- yygotominor.yy346.zStart = yymsp[-5].minor.yy0.z;
- yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
+ yygotominor.yy118.zStart = yymsp[-5].minor.yy0.z;
+ yygotominor.yy118.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 291: /* raisetype ::= ROLLBACK */
-{yygotominor.yy328 = OE_Rollback;}
+ case 297: /* raisetype ::= ROLLBACK */
+{yygotominor.yy4 = OE_Rollback;}
break;
- case 293: /* raisetype ::= FAIL */
-{yygotominor.yy328 = OE_Fail;}
+ case 299: /* raisetype ::= FAIL */
+{yygotominor.yy4 = OE_Fail;}
break;
- case 294: /* cmd ::= DROP TRIGGER ifexists fullname */
+ case 300: /* cmd ::= DROP TRIGGER ifexists fullname */
{
- sqlite3DropTrigger(pParse,yymsp[0].minor.yy65,yymsp[-1].minor.yy328);
+ sqlite3DropTrigger(pParse,yymsp[0].minor.yy259,yymsp[-1].minor.yy4);
}
break;
- case 295: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ case 301: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
{
- sqlite3Attach(pParse, yymsp[-3].minor.yy346.pExpr, yymsp[-1].minor.yy346.pExpr, yymsp[0].minor.yy132);
+ sqlite3Attach(pParse, yymsp[-3].minor.yy118.pExpr, yymsp[-1].minor.yy118.pExpr, yymsp[0].minor.yy314);
}
break;
- case 296: /* cmd ::= DETACH database_kw_opt expr */
+ case 302: /* cmd ::= DETACH database_kw_opt expr */
{
- sqlite3Detach(pParse, yymsp[0].minor.yy346.pExpr);
+ sqlite3Detach(pParse, yymsp[0].minor.yy118.pExpr);
}
break;
- case 301: /* cmd ::= REINDEX */
+ case 307: /* cmd ::= REINDEX */
{sqlite3Reindex(pParse, 0, 0);}
break;
- case 302: /* cmd ::= REINDEX nm dbnm */
+ case 308: /* cmd ::= REINDEX nm dbnm */
{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 303: /* cmd ::= ANALYZE */
+ case 309: /* cmd ::= ANALYZE */
{sqlite3Analyze(pParse, 0, 0);}
break;
- case 304: /* cmd ::= ANALYZE nm dbnm */
+ case 310: /* cmd ::= ANALYZE nm dbnm */
{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 305: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+ case 311: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
{
- sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy65,&yymsp[0].minor.yy0);
+ sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy259,&yymsp[0].minor.yy0);
}
break;
- case 306: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column */
+ case 312: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column */
{
sqlite3AlterFinishAddColumn(pParse, &yymsp[0].minor.yy0);
}
break;
- case 307: /* add_column_fullname ::= fullname */
+ case 313: /* add_column_fullname ::= fullname */
{
pParse->db->lookaside.bEnabled = 0;
- sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy65);
+ sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy259);
}
break;
- case 310: /* cmd ::= create_vtab */
+ case 316: /* cmd ::= create_vtab */
{sqlite3VtabFinishParse(pParse,0);}
break;
- case 311: /* cmd ::= create_vtab LP vtabarglist RP */
+ case 317: /* cmd ::= create_vtab LP vtabarglist RP */
{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
break;
- case 312: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ case 318: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
{
- sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy328);
+ sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy4);
}
break;
- case 315: /* vtabarg ::= */
+ case 321: /* vtabarg ::= */
{sqlite3VtabArgInit(pParse);}
break;
- case 317: /* vtabargtoken ::= ANY */
- case 318: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==318);
- case 319: /* lp ::= LP */ yytestcase(yyruleno==319);
+ case 323: /* vtabargtoken ::= ANY */
+ case 324: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==324);
+ case 325: /* lp ::= LP */ yytestcase(yyruleno==325);
{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
break;
- case 323: /* with ::= */
-{yygotominor.yy59 = 0;}
- break;
- case 324: /* with ::= WITH wqlist */
- case 325: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==325);
-{ yygotominor.yy59 = yymsp[0].minor.yy59; }
- break;
- case 326: /* wqlist ::= nm eidlist_opt AS LP select RP */
-{
- yygotominor.yy59 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy3);
-}
- break;
- case 327: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
-{
- yygotominor.yy59 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy59, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy3);
-}
- break;
default:
/* (0) input ::= cmdlist */ yytestcase(yyruleno==0);
/* (1) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==1);
@@ -129323,28 +117542,28 @@ static void yy_reduce(
/* (25) cmd ::= create_table create_table_args */ yytestcase(yyruleno==25);
/* (36) columnlist ::= columnlist COMMA column */ yytestcase(yyruleno==36);
/* (37) columnlist ::= column */ yytestcase(yyruleno==37);
- /* (43) type ::= */ yytestcase(yyruleno==43);
- /* (50) signed ::= plus_num */ yytestcase(yyruleno==50);
- /* (51) signed ::= minus_num */ yytestcase(yyruleno==51);
- /* (52) carglist ::= carglist ccons */ yytestcase(yyruleno==52);
- /* (53) carglist ::= */ yytestcase(yyruleno==53);
- /* (60) ccons ::= NULL onconf */ yytestcase(yyruleno==60);
- /* (88) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==88);
- /* (89) conslist ::= tcons */ yytestcase(yyruleno==89);
- /* (91) tconscomma ::= */ yytestcase(yyruleno==91);
- /* (274) foreach_clause ::= */ yytestcase(yyruleno==274);
- /* (275) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==275);
- /* (282) tridxby ::= */ yytestcase(yyruleno==282);
- /* (299) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==299);
- /* (300) database_kw_opt ::= */ yytestcase(yyruleno==300);
- /* (308) kwcolumn_opt ::= */ yytestcase(yyruleno==308);
- /* (309) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==309);
- /* (313) vtabarglist ::= vtabarg */ yytestcase(yyruleno==313);
- /* (314) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==314);
- /* (316) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==316);
- /* (320) anylist ::= */ yytestcase(yyruleno==320);
- /* (321) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==321);
- /* (322) anylist ::= anylist ANY */ yytestcase(yyruleno==322);
+ /* (46) type ::= */ yytestcase(yyruleno==46);
+ /* (53) signed ::= plus_num */ yytestcase(yyruleno==53);
+ /* (54) signed ::= minus_num */ yytestcase(yyruleno==54);
+ /* (55) carglist ::= carglist ccons */ yytestcase(yyruleno==55);
+ /* (56) carglist ::= */ yytestcase(yyruleno==56);
+ /* (63) ccons ::= NULL onconf */ yytestcase(yyruleno==63);
+ /* (91) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==91);
+ /* (92) conslist ::= tcons */ yytestcase(yyruleno==92);
+ /* (94) tconscomma ::= */ yytestcase(yyruleno==94);
+ /* (279) foreach_clause ::= */ yytestcase(yyruleno==279);
+ /* (280) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==280);
+ /* (287) tridxby ::= */ yytestcase(yyruleno==287);
+ /* (305) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==305);
+ /* (306) database_kw_opt ::= */ yytestcase(yyruleno==306);
+ /* (314) kwcolumn_opt ::= */ yytestcase(yyruleno==314);
+ /* (315) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==315);
+ /* (319) vtabarglist ::= vtabarg */ yytestcase(yyruleno==319);
+ /* (320) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==320);
+ /* (322) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==322);
+ /* (326) anylist ::= */ yytestcase(yyruleno==326);
+ /* (327) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==327);
+ /* (328) anylist ::= anylist ANY */ yytestcase(yyruleno==328);
break;
};
assert( yyruleno>=0 && yyruleno<sizeof(yyRuleInfo)/sizeof(yyRuleInfo[0]) );
@@ -129352,9 +117571,9 @@ static void yy_reduce(
yysize = yyRuleInfo[yyruleno].nrhs;
yypParser->yyidx -= yysize;
yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto);
- if( yyact <= YY_MAX_SHIFTREDUCE ){
- if( yyact>YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
- /* If the reduce action popped at least
+ if( yyact < YYNSTATE ){
+#ifdef NDEBUG
+ /* If we are not debugging and the reduce action popped at least
** one element off the stack, then we can push the new element back
** onto the stack here, and skip the stack overflow test in yy_shift().
** That gives a significant speed improvement. */
@@ -129364,12 +117583,13 @@ static void yy_reduce(
yymsp->stateno = (YYACTIONTYPE)yyact;
yymsp->major = (YYCODETYPE)yygoto;
yymsp->minor = yygotominor;
- yyTraceShift(yypParser, yyact);
- }else{
+ }else
+#endif
+ {
yy_shift(yypParser,yyact,yygoto,&yygotominor);
}
}else{
- assert( yyact == YY_ACCEPT_ACTION );
+ assert( yyact == YYNSTATE + YYNRULE + 1 );
yy_accept(yypParser);
}
}
@@ -129494,13 +117714,12 @@ SQLITE_PRIVATE void sqlite3Parser(
do{
yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor);
- if( yyact <= YY_MAX_SHIFTREDUCE ){
- if( yyact > YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
+ if( yyact<YYNSTATE ){
yy_shift(yypParser,yyact,yymajor,&yyminorunion);
yypParser->yyerrcnt--;
yymajor = YYNOCODE;
- }else if( yyact <= YY_MAX_REDUCE ){
- yy_reduce(yypParser,yyact-YY_MIN_REDUCE);
+ }else if( yyact < YYNSTATE + YYNRULE ){
+ yy_reduce(yypParser,yyact-YYNSTATE);
}else{
assert( yyact == YY_ERROR_ACTION );
#ifdef YYERRORSYMBOL
@@ -129550,7 +117769,7 @@ SQLITE_PRIVATE void sqlite3Parser(
yymx != YYERRORSYMBOL &&
(yyact = yy_find_reduce_action(
yypParser->yystack[yypParser->yyidx].stateno,
- YYERRORSYMBOL)) >= YY_MIN_REDUCE
+ YYERRORSYMBOL)) >= YYNSTATE
){
yy_pop_parser_stack(yypParser);
}
@@ -129600,11 +117819,6 @@ SQLITE_PRIVATE void sqlite3Parser(
#endif
}
}while( yymajor!=YYNOCODE && yypParser->yyidx>=0 );
-#ifndef NDEBUG
- if( yyTraceFILE ){
- fprintf(yyTraceFILE,"%sReturn\n",yyTracePrompt);
- }
-#endif
return;
}
@@ -129627,7 +117841,6 @@ SQLITE_PRIVATE void sqlite3Parser(
** individual tokens and sends those tokens one-by-one over to the
** parser for analysis.
*/
-/* #include "sqliteInt.h" */
/* #include <stdlib.h> */
/*
@@ -129689,20 +117902,20 @@ const unsigned char ebcdicToAscii[] = {
** is substantially reduced. This is important for embedded applications
** on platforms with limited memory.
*/
-/* Hash score: 182 */
+/* Hash score: 177 */
static int keywordCode(const char *z, int n){
- /* zText[] encodes 834 bytes of keywords in 554 bytes */
+ /* zText[] encodes 819 bytes of keywords in 545 bytes */
/* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */
/* ABLEFTHENDEFERRABLELSEXCEPTRANSACTIONATURALTERAISEXCLUSIVE */
/* XISTSAVEPOINTERSECTRIGGEREFERENCESCONSTRAINTOFFSETEMPORARY */
- /* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERECURSIVE */
- /* BETWEENOTNULLIKECASCADELETECASECOLLATECREATECURRENT_DATEDETACH */
+ /* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERENAMEBETWEEN */
+ /* OTNULLIKECASCADELETECASECOLLATECREATECURRENT_DATEDETACH */
/* IMMEDIATEJOINSERTMATCHPLANALYZEPRAGMABORTVALUESVIRTUALIMITWHEN */
- /* WHERENAMEAFTEREPLACEANDEFAULTAUTOINCREMENTCASTCOLUMNCOMMIT */
+ /* WHEREPLACEAFTERESTRICTANDEFAULTAUTOINCREMENTCASTCOLUMNCOMMIT */
/* CONFLICTCROSSCURRENT_TIMESTAMPRIMARYDEFERREDISTINCTDROPFAIL */
- /* FROMFULLGLOBYIFISNULLORDERESTRICTRIGHTROLLBACKROWUNIONUSING */
- /* VACUUMVIEWINITIALLY */
- static const char zText[553] = {
+ /* FROMFULLGLOBYIFISNULLORDERIGHTROLLBACKROWUNIONUSINGVACUUMVIEW */
+ /* INITIALLY */
+ static const char zText[544] = {
'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H',
'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G',
'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A',
@@ -129715,75 +117928,75 @@ static int keywordCode(const char *z, int n){
'O','F','F','S','E','T','E','M','P','O','R','A','R','Y','U','N','I','Q',
'U','E','R','Y','W','I','T','H','O','U','T','E','R','E','L','E','A','S',
'E','A','T','T','A','C','H','A','V','I','N','G','R','O','U','P','D','A',
- 'T','E','B','E','G','I','N','N','E','R','E','C','U','R','S','I','V','E',
- 'B','E','T','W','E','E','N','O','T','N','U','L','L','I','K','E','C','A',
- 'S','C','A','D','E','L','E','T','E','C','A','S','E','C','O','L','L','A',
- 'T','E','C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A',
- 'T','E','D','E','T','A','C','H','I','M','M','E','D','I','A','T','E','J',
- 'O','I','N','S','E','R','T','M','A','T','C','H','P','L','A','N','A','L',
- 'Y','Z','E','P','R','A','G','M','A','B','O','R','T','V','A','L','U','E',
- 'S','V','I','R','T','U','A','L','I','M','I','T','W','H','E','N','W','H',
- 'E','R','E','N','A','M','E','A','F','T','E','R','E','P','L','A','C','E',
- 'A','N','D','E','F','A','U','L','T','A','U','T','O','I','N','C','R','E',
- 'M','E','N','T','C','A','S','T','C','O','L','U','M','N','C','O','M','M',
- 'I','T','C','O','N','F','L','I','C','T','C','R','O','S','S','C','U','R',
- 'R','E','N','T','_','T','I','M','E','S','T','A','M','P','R','I','M','A',
- 'R','Y','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','D',
- 'R','O','P','F','A','I','L','F','R','O','M','F','U','L','L','G','L','O',
- 'B','Y','I','F','I','S','N','U','L','L','O','R','D','E','R','E','S','T',
- 'R','I','C','T','R','I','G','H','T','R','O','L','L','B','A','C','K','R',
- 'O','W','U','N','I','O','N','U','S','I','N','G','V','A','C','U','U','M',
- 'V','I','E','W','I','N','I','T','I','A','L','L','Y',
+ 'T','E','B','E','G','I','N','N','E','R','E','N','A','M','E','B','E','T',
+ 'W','E','E','N','O','T','N','U','L','L','I','K','E','C','A','S','C','A',
+ 'D','E','L','E','T','E','C','A','S','E','C','O','L','L','A','T','E','C',
+ 'R','E','A','T','E','C','U','R','R','E','N','T','_','D','A','T','E','D',
+ 'E','T','A','C','H','I','M','M','E','D','I','A','T','E','J','O','I','N',
+ 'S','E','R','T','M','A','T','C','H','P','L','A','N','A','L','Y','Z','E',
+ 'P','R','A','G','M','A','B','O','R','T','V','A','L','U','E','S','V','I',
+ 'R','T','U','A','L','I','M','I','T','W','H','E','N','W','H','E','R','E',
+ 'P','L','A','C','E','A','F','T','E','R','E','S','T','R','I','C','T','A',
+ 'N','D','E','F','A','U','L','T','A','U','T','O','I','N','C','R','E','M',
+ 'E','N','T','C','A','S','T','C','O','L','U','M','N','C','O','M','M','I',
+ 'T','C','O','N','F','L','I','C','T','C','R','O','S','S','C','U','R','R',
+ 'E','N','T','_','T','I','M','E','S','T','A','M','P','R','I','M','A','R',
+ 'Y','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','D','R',
+ 'O','P','F','A','I','L','F','R','O','M','F','U','L','L','G','L','O','B',
+ 'Y','I','F','I','S','N','U','L','L','O','R','D','E','R','I','G','H','T',
+ 'R','O','L','L','B','A','C','K','R','O','W','U','N','I','O','N','U','S',
+ 'I','N','G','V','A','C','U','U','M','V','I','E','W','I','N','I','T','I',
+ 'A','L','L','Y',
};
static const unsigned char aHash[127] = {
- 76, 105, 117, 74, 0, 45, 0, 0, 82, 0, 77, 0, 0,
- 42, 12, 78, 15, 0, 116, 85, 54, 112, 0, 19, 0, 0,
- 121, 0, 119, 115, 0, 22, 93, 0, 9, 0, 0, 70, 71,
- 0, 69, 6, 0, 48, 90, 102, 0, 118, 101, 0, 0, 44,
- 0, 103, 24, 0, 17, 0, 122, 53, 23, 0, 5, 110, 25,
- 96, 0, 0, 124, 106, 60, 123, 57, 28, 55, 0, 91, 0,
- 100, 26, 0, 99, 0, 0, 0, 95, 92, 97, 88, 109, 14,
- 39, 108, 0, 81, 0, 18, 89, 111, 32, 0, 120, 80, 113,
- 62, 46, 84, 0, 0, 94, 40, 59, 114, 0, 36, 0, 0,
- 29, 0, 86, 63, 64, 0, 20, 61, 0, 56,
+ 75, 104, 115, 73, 0, 45, 0, 0, 81, 0, 76, 0, 0,
+ 42, 12, 77, 15, 0, 114, 84, 53, 111, 0, 19, 0, 0,
+ 119, 0, 117, 88, 0, 22, 92, 0, 9, 0, 0, 69, 70,
+ 0, 68, 6, 0, 48, 89, 101, 0, 116, 100, 0, 0, 44,
+ 0, 102, 24, 0, 17, 0, 120, 52, 23, 0, 5, 109, 25,
+ 95, 0, 0, 122, 105, 59, 121, 56, 28, 54, 0, 90, 0,
+ 99, 26, 0, 98, 0, 0, 0, 94, 91, 96, 87, 108, 14,
+ 39, 107, 0, 80, 0, 18, 86, 110, 32, 0, 118, 79, 112,
+ 61, 46, 83, 0, 0, 93, 40, 0, 113, 0, 36, 0, 0,
+ 29, 0, 85, 62, 63, 0, 20, 60, 0, 55,
};
- static const unsigned char aNext[124] = {
+ static const unsigned char aNext[122] = {
0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0,
0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 33, 0, 21, 0, 0, 0, 0, 0, 50,
- 0, 43, 3, 47, 0, 0, 0, 0, 30, 0, 58, 0, 38,
- 0, 0, 0, 1, 66, 0, 0, 67, 0, 41, 0, 0, 0,
- 0, 0, 0, 49, 65, 0, 0, 0, 0, 31, 52, 16, 34,
- 10, 0, 0, 0, 0, 0, 0, 0, 11, 72, 79, 0, 8,
- 0, 104, 98, 0, 107, 0, 87, 0, 75, 51, 0, 27, 37,
- 73, 83, 0, 35, 68, 0, 0,
+ 0, 0, 0, 0, 33, 0, 21, 0, 0, 0, 0, 0, 0,
+ 43, 3, 47, 0, 0, 0, 0, 30, 0, 57, 0, 38, 0,
+ 0, 0, 1, 65, 0, 0, 66, 0, 41, 0, 0, 0, 0,
+ 0, 0, 49, 64, 0, 0, 0, 51, 31, 0, 16, 34, 10,
+ 0, 0, 0, 0, 0, 0, 0, 11, 71, 78, 0, 8, 0,
+ 103, 97, 0, 106, 0, 58, 0, 74, 50, 27, 37, 72, 82,
+ 0, 35, 67, 0, 0,
};
- static const unsigned char aLen[124] = {
+ static const unsigned char aLen[122] = {
7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6,
7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 6,
11, 6, 2, 7, 5, 5, 9, 6, 9, 9, 7, 10, 10,
- 4, 6, 2, 3, 9, 4, 2, 6, 5, 7, 4, 5, 7,
- 6, 6, 5, 6, 5, 5, 9, 7, 7, 3, 2, 4, 4,
- 7, 3, 6, 4, 7, 6, 12, 6, 9, 4, 6, 5, 4,
- 7, 6, 5, 6, 7, 5, 4, 5, 6, 5, 7, 3, 7,
- 13, 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 8, 8,
- 2, 4, 4, 4, 4, 4, 2, 2, 6, 5, 8, 5, 8,
- 3, 5, 5, 6, 4, 9, 3,
+ 4, 6, 2, 3, 9, 4, 2, 6, 5, 7, 5, 7, 6,
+ 6, 5, 6, 5, 5, 6, 7, 7, 3, 2, 4, 4, 7,
+ 3, 6, 4, 7, 6, 12, 6, 9, 4, 6, 5, 4, 7,
+ 6, 5, 6, 7, 5, 4, 5, 7, 5, 8, 3, 7, 13,
+ 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 8, 8, 2,
+ 4, 4, 4, 4, 4, 2, 2, 6, 5, 5, 8, 3, 5,
+ 5, 6, 4, 9, 3,
};
- static const unsigned short int aOffset[124] = {
+ static const unsigned short int aOffset[122] = {
0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33,
36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81,
86, 91, 95, 96, 101, 105, 109, 117, 122, 128, 136, 142, 152,
- 159, 162, 162, 165, 167, 167, 171, 176, 179, 184, 184, 188, 192,
- 199, 204, 209, 212, 218, 221, 225, 234, 240, 240, 240, 243, 246,
- 250, 251, 255, 261, 265, 272, 278, 290, 296, 305, 307, 313, 318,
- 320, 327, 332, 337, 343, 349, 354, 358, 361, 367, 371, 378, 380,
- 387, 389, 391, 400, 404, 410, 416, 424, 429, 429, 445, 452, 459,
- 460, 467, 471, 475, 479, 483, 486, 488, 490, 496, 500, 508, 513,
- 521, 524, 529, 534, 540, 544, 549,
+ 159, 162, 162, 165, 167, 167, 171, 176, 179, 184, 188, 192, 199,
+ 204, 209, 212, 218, 221, 225, 231, 237, 237, 237, 240, 243, 247,
+ 248, 252, 258, 262, 269, 275, 287, 293, 302, 304, 310, 315, 317,
+ 324, 329, 334, 340, 346, 351, 355, 358, 365, 369, 377, 379, 386,
+ 388, 390, 399, 403, 409, 415, 423, 428, 428, 444, 451, 458, 459,
+ 466, 470, 474, 478, 482, 485, 487, 489, 495, 499, 504, 512, 515,
+ 520, 525, 531, 535, 540,
};
- static const unsigned char aCode[124] = {
+ static const unsigned char aCode[122] = {
TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE,
TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN,
TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD,
@@ -129793,22 +118006,22 @@ static int keywordCode(const char *z, int n){
TK_ALTER, TK_RAISE, TK_EXCLUSIVE, TK_EXISTS, TK_SAVEPOINT,
TK_INTERSECT, TK_TRIGGER, TK_REFERENCES, TK_CONSTRAINT, TK_INTO,
TK_OFFSET, TK_OF, TK_SET, TK_TEMP, TK_TEMP,
- TK_OR, TK_UNIQUE, TK_QUERY, TK_WITHOUT, TK_WITH,
- TK_JOIN_KW, TK_RELEASE, TK_ATTACH, TK_HAVING, TK_GROUP,
- TK_UPDATE, TK_BEGIN, TK_JOIN_KW, TK_RECURSIVE, TK_BETWEEN,
- TK_NOTNULL, TK_NOT, TK_NO, TK_NULL, TK_LIKE_KW,
- TK_CASCADE, TK_ASC, TK_DELETE, TK_CASE, TK_COLLATE,
- TK_CREATE, TK_CTIME_KW, TK_DETACH, TK_IMMEDIATE, TK_JOIN,
- TK_INSERT, TK_MATCH, TK_PLAN, TK_ANALYZE, TK_PRAGMA,
- TK_ABORT, TK_VALUES, TK_VIRTUAL, TK_LIMIT, TK_WHEN,
- TK_WHERE, TK_RENAME, TK_AFTER, TK_REPLACE, TK_AND,
- TK_DEFAULT, TK_AUTOINCR, TK_TO, TK_IN, TK_CAST,
- TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW,
- TK_CTIME_KW, TK_PRIMARY, TK_DEFERRED, TK_DISTINCT, TK_IS,
- TK_DROP, TK_FAIL, TK_FROM, TK_JOIN_KW, TK_LIKE_KW,
- TK_BY, TK_IF, TK_ISNULL, TK_ORDER, TK_RESTRICT,
- TK_JOIN_KW, TK_ROLLBACK, TK_ROW, TK_UNION, TK_USING,
- TK_VACUUM, TK_VIEW, TK_INITIALLY, TK_ALL,
+ TK_OR, TK_UNIQUE, TK_QUERY, TK_WITHOUT, TK_JOIN_KW,
+ TK_RELEASE, TK_ATTACH, TK_HAVING, TK_GROUP, TK_UPDATE,
+ TK_BEGIN, TK_JOIN_KW, TK_RENAME, TK_BETWEEN, TK_NOTNULL,
+ TK_NOT, TK_NO, TK_NULL, TK_LIKE_KW, TK_CASCADE,
+ TK_ASC, TK_DELETE, TK_CASE, TK_COLLATE, TK_CREATE,
+ TK_CTIME_KW, TK_DETACH, TK_IMMEDIATE, TK_JOIN, TK_INSERT,
+ TK_MATCH, TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_ABORT,
+ TK_VALUES, TK_VIRTUAL, TK_LIMIT, TK_WHEN, TK_WHERE,
+ TK_REPLACE, TK_AFTER, TK_RESTRICT, TK_AND, TK_DEFAULT,
+ TK_AUTOINCR, TK_TO, TK_IN, TK_CAST, TK_COLUMNKW,
+ TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, TK_CTIME_KW,
+ TK_PRIMARY, TK_DEFERRED, TK_DISTINCT, TK_IS, TK_DROP,
+ TK_FAIL, TK_FROM, TK_JOIN_KW, TK_LIKE_KW, TK_BY,
+ TK_IF, TK_ISNULL, TK_ORDER, TK_JOIN_KW, TK_ROLLBACK,
+ TK_ROW, TK_UNION, TK_USING, TK_VACUUM, TK_VIEW,
+ TK_INITIALLY, TK_ALL,
};
int h, i;
if( n<2 ) return TK_ID;
@@ -129866,81 +118079,79 @@ static int keywordCode(const char *z, int n){
testcase( i==46 ); /* UNIQUE */
testcase( i==47 ); /* QUERY */
testcase( i==48 ); /* WITHOUT */
- testcase( i==49 ); /* WITH */
- testcase( i==50 ); /* OUTER */
- testcase( i==51 ); /* RELEASE */
- testcase( i==52 ); /* ATTACH */
- testcase( i==53 ); /* HAVING */
- testcase( i==54 ); /* GROUP */
- testcase( i==55 ); /* UPDATE */
- testcase( i==56 ); /* BEGIN */
- testcase( i==57 ); /* INNER */
- testcase( i==58 ); /* RECURSIVE */
- testcase( i==59 ); /* BETWEEN */
- testcase( i==60 ); /* NOTNULL */
- testcase( i==61 ); /* NOT */
- testcase( i==62 ); /* NO */
- testcase( i==63 ); /* NULL */
- testcase( i==64 ); /* LIKE */
- testcase( i==65 ); /* CASCADE */
- testcase( i==66 ); /* ASC */
- testcase( i==67 ); /* DELETE */
- testcase( i==68 ); /* CASE */
- testcase( i==69 ); /* COLLATE */
- testcase( i==70 ); /* CREATE */
- testcase( i==71 ); /* CURRENT_DATE */
- testcase( i==72 ); /* DETACH */
- testcase( i==73 ); /* IMMEDIATE */
- testcase( i==74 ); /* JOIN */
- testcase( i==75 ); /* INSERT */
- testcase( i==76 ); /* MATCH */
- testcase( i==77 ); /* PLAN */
- testcase( i==78 ); /* ANALYZE */
- testcase( i==79 ); /* PRAGMA */
- testcase( i==80 ); /* ABORT */
- testcase( i==81 ); /* VALUES */
- testcase( i==82 ); /* VIRTUAL */
- testcase( i==83 ); /* LIMIT */
- testcase( i==84 ); /* WHEN */
- testcase( i==85 ); /* WHERE */
- testcase( i==86 ); /* RENAME */
- testcase( i==87 ); /* AFTER */
- testcase( i==88 ); /* REPLACE */
- testcase( i==89 ); /* AND */
- testcase( i==90 ); /* DEFAULT */
- testcase( i==91 ); /* AUTOINCREMENT */
- testcase( i==92 ); /* TO */
- testcase( i==93 ); /* IN */
- testcase( i==94 ); /* CAST */
- testcase( i==95 ); /* COLUMN */
- testcase( i==96 ); /* COMMIT */
- testcase( i==97 ); /* CONFLICT */
- testcase( i==98 ); /* CROSS */
- testcase( i==99 ); /* CURRENT_TIMESTAMP */
- testcase( i==100 ); /* CURRENT_TIME */
- testcase( i==101 ); /* PRIMARY */
- testcase( i==102 ); /* DEFERRED */
- testcase( i==103 ); /* DISTINCT */
- testcase( i==104 ); /* IS */
- testcase( i==105 ); /* DROP */
- testcase( i==106 ); /* FAIL */
- testcase( i==107 ); /* FROM */
- testcase( i==108 ); /* FULL */
- testcase( i==109 ); /* GLOB */
- testcase( i==110 ); /* BY */
- testcase( i==111 ); /* IF */
- testcase( i==112 ); /* ISNULL */
- testcase( i==113 ); /* ORDER */
- testcase( i==114 ); /* RESTRICT */
- testcase( i==115 ); /* RIGHT */
- testcase( i==116 ); /* ROLLBACK */
- testcase( i==117 ); /* ROW */
- testcase( i==118 ); /* UNION */
- testcase( i==119 ); /* USING */
- testcase( i==120 ); /* VACUUM */
- testcase( i==121 ); /* VIEW */
- testcase( i==122 ); /* INITIALLY */
- testcase( i==123 ); /* ALL */
+ testcase( i==49 ); /* OUTER */
+ testcase( i==50 ); /* RELEASE */
+ testcase( i==51 ); /* ATTACH */
+ testcase( i==52 ); /* HAVING */
+ testcase( i==53 ); /* GROUP */
+ testcase( i==54 ); /* UPDATE */
+ testcase( i==55 ); /* BEGIN */
+ testcase( i==56 ); /* INNER */
+ testcase( i==57 ); /* RENAME */
+ testcase( i==58 ); /* BETWEEN */
+ testcase( i==59 ); /* NOTNULL */
+ testcase( i==60 ); /* NOT */
+ testcase( i==61 ); /* NO */
+ testcase( i==62 ); /* NULL */
+ testcase( i==63 ); /* LIKE */
+ testcase( i==64 ); /* CASCADE */
+ testcase( i==65 ); /* ASC */
+ testcase( i==66 ); /* DELETE */
+ testcase( i==67 ); /* CASE */
+ testcase( i==68 ); /* COLLATE */
+ testcase( i==69 ); /* CREATE */
+ testcase( i==70 ); /* CURRENT_DATE */
+ testcase( i==71 ); /* DETACH */
+ testcase( i==72 ); /* IMMEDIATE */
+ testcase( i==73 ); /* JOIN */
+ testcase( i==74 ); /* INSERT */
+ testcase( i==75 ); /* MATCH */
+ testcase( i==76 ); /* PLAN */
+ testcase( i==77 ); /* ANALYZE */
+ testcase( i==78 ); /* PRAGMA */
+ testcase( i==79 ); /* ABORT */
+ testcase( i==80 ); /* VALUES */
+ testcase( i==81 ); /* VIRTUAL */
+ testcase( i==82 ); /* LIMIT */
+ testcase( i==83 ); /* WHEN */
+ testcase( i==84 ); /* WHERE */
+ testcase( i==85 ); /* REPLACE */
+ testcase( i==86 ); /* AFTER */
+ testcase( i==87 ); /* RESTRICT */
+ testcase( i==88 ); /* AND */
+ testcase( i==89 ); /* DEFAULT */
+ testcase( i==90 ); /* AUTOINCREMENT */
+ testcase( i==91 ); /* TO */
+ testcase( i==92 ); /* IN */
+ testcase( i==93 ); /* CAST */
+ testcase( i==94 ); /* COLUMN */
+ testcase( i==95 ); /* COMMIT */
+ testcase( i==96 ); /* CONFLICT */
+ testcase( i==97 ); /* CROSS */
+ testcase( i==98 ); /* CURRENT_TIMESTAMP */
+ testcase( i==99 ); /* CURRENT_TIME */
+ testcase( i==100 ); /* PRIMARY */
+ testcase( i==101 ); /* DEFERRED */
+ testcase( i==102 ); /* DISTINCT */
+ testcase( i==103 ); /* IS */
+ testcase( i==104 ); /* DROP */
+ testcase( i==105 ); /* FAIL */
+ testcase( i==106 ); /* FROM */
+ testcase( i==107 ); /* FULL */
+ testcase( i==108 ); /* GLOB */
+ testcase( i==109 ); /* BY */
+ testcase( i==110 ); /* IF */
+ testcase( i==111 ); /* ISNULL */
+ testcase( i==112 ); /* ORDER */
+ testcase( i==113 ); /* RIGHT */
+ testcase( i==114 ); /* ROLLBACK */
+ testcase( i==115 ); /* ROW */
+ testcase( i==116 ); /* UNION */
+ testcase( i==117 ); /* USING */
+ testcase( i==118 ); /* VACUUM */
+ testcase( i==119 ); /* VIEW */
+ testcase( i==120 ); /* INITIALLY */
+ testcase( i==121 ); /* ALL */
return aCode[i];
}
}
@@ -129949,7 +118160,7 @@ static int keywordCode(const char *z, int n){
SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){
return keywordCode((char*)z, n);
}
-#define SQLITE_N_KEYWORD 124
+#define SQLITE_N_KEYWORD 122
/************** End of keywordhash.h *****************************************/
/************** Continuing where we left off in tokenize.c *******************/
@@ -129967,7 +118178,7 @@ SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){
** end result.
**
** Ticket #1066. the SQL standard does not allow '$' in the
-** middle of identifiers. But many SQL implementations do.
+** middle of identfiers. But many SQL implementations do.
** SQLite will allow '$' in identifiers for compatibility.
** But the feature is undocumented.
*/
@@ -129993,11 +118204,6 @@ SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[] = {
#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40]))
#endif
-/* Make the IdChar function accessible from ctime.c */
-#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_PRIVATE int sqlite3IsIdChar(u8 c){ return IdChar(c); }
-#endif
-
/*
** Return the length of the token that begins at z[0].
@@ -130165,12 +118371,6 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' );
testcase( z[0]=='9' );
*tokenType = TK_INTEGER;
-#ifndef SQLITE_OMIT_HEX_INTEGER
- if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){
- for(i=3; sqlite3Isxdigit(z[i]); i++){}
- return i;
- }
-#endif
for(i=0; sqlite3Isdigit(z[i]); i++){}
#ifndef SQLITE_OMIT_FLOATING_POINT
if( z[i]=='.' ){
@@ -130204,15 +118404,24 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
for(i=1; sqlite3Isdigit(z[i]); i++){}
return i;
}
+ case '#': {
+ for(i=1; sqlite3Isdigit(z[i]); i++){}
+ if( i>1 ){
+ /* Parameters of the form #NNN (where NNN is a number) are used
+ ** internally by sqlite3NestedParse. */
+ *tokenType = TK_REGISTER;
+ return i;
+ }
+ /* Fall through into the next case if the '#' is not followed by
+ ** a digit. Try to match #AAAA where AAAA is a parameter name. */
+ }
#ifndef SQLITE_OMIT_TCL_VARIABLE
case '$':
#endif
case '@': /* For compatibility with MS SQL Server */
- case '#':
case ':': {
int n = 0;
- testcase( z[0]=='$' ); testcase( z[0]=='@' );
- testcase( z[0]==':' ); testcase( z[0]=='#' );
+ testcase( z[0]=='$' ); testcase( z[0]=='@' ); testcase( z[0]==':' );
*tokenType = TK_VARIABLE;
for(i=1; (c=z[i])!=0; i++){
if( IdChar(c) ){
@@ -130284,7 +118493,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
sqlite3 *db = pParse->db; /* The database connection */
int mxSqlLen; /* Max length of an SQL string */
- assert( zSql!=0 );
+
mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH];
if( db->nVdbeActive==0 ){
db->u1.isInterrupted = 0;
@@ -130293,8 +118502,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
pParse->zTail = zSql;
i = 0;
assert( pzErrMsg!=0 );
- /* sqlite3ParserTrace(stdout, "parser: "); */
- pEngine = sqlite3ParserAlloc(sqlite3Malloc);
+ pEngine = sqlite3ParserAlloc((void*(*)(size_t))sqlite3Malloc);
if( pEngine==0 ){
db->mallocFailed = 1;
return SQLITE_NOMEM;
@@ -130325,8 +118533,10 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
break;
}
case TK_ILLEGAL: {
- sqlite3ErrorMsg(pParse, "unrecognized token: \"%T\"",
+ sqlite3DbFree(db, *pzErrMsg);
+ *pzErrMsg = sqlite3MPrintf(db, "unrecognized token: \"%T\"",
&pParse->sLastToken);
+ nErr++;
goto abort_parse;
}
case TK_SEMI: {
@@ -130344,23 +118554,17 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
}
}
abort_parse:
- assert( nErr==0 );
- if( pParse->rc==SQLITE_OK && db->mallocFailed==0 ){
- assert( zSql[i]==0 );
+ if( zSql[i]==0 && nErr==0 && pParse->rc==SQLITE_OK ){
if( lastTokenParsed!=TK_SEMI ){
sqlite3Parser(pEngine, TK_SEMI, pParse->sLastToken, pParse);
pParse->zTail = &zSql[i];
}
- if( pParse->rc==SQLITE_OK && db->mallocFailed==0 ){
- sqlite3Parser(pEngine, 0, pParse->sLastToken, pParse);
- }
+ sqlite3Parser(pEngine, 0, pParse->sLastToken, pParse);
}
#ifdef YYTRACKMAXSTACKDEPTH
- sqlite3_mutex_enter(sqlite3MallocMutex());
sqlite3StatusSet(SQLITE_STATUS_PARSER_STACK,
sqlite3ParserStackPeak(pEngine)
);
- sqlite3_mutex_leave(sqlite3MallocMutex());
#endif /* YYDEBUG */
sqlite3ParserFree(pEngine, sqlite3_free);
db->lookaside.bEnabled = enableLookaside;
@@ -130368,7 +118572,7 @@ abort_parse:
pParse->rc = SQLITE_NOMEM;
}
if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){
- pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc));
+ sqlite3SetString(&pParse->zErrMsg, db, "%s", sqlite3ErrStr(pParse->rc));
}
assert( pzErrMsg!=0 );
if( pParse->zErrMsg ){
@@ -130400,7 +118604,6 @@ abort_parse:
sqlite3DeleteTable(db, pParse->pNewTable);
}
- if( pParse->bFreeWith ) sqlite3WithDelete(db, pParse->pWith);
sqlite3DeleteTrigger(db, pParse->pNewTrigger);
for(i=pParse->nzVar-1; i>=0; i--) sqlite3DbFree(db, pParse->azVar[i]);
sqlite3DbFree(db, pParse->azVar);
@@ -130414,7 +118617,9 @@ abort_parse:
pParse->pZombieTab = p->pNextZombie;
sqlite3DeleteTable(db, p);
}
- assert( nErr==0 || pParse->rc!=SQLITE_OK );
+ if( nErr>0 && pParse->rc==SQLITE_OK ){
+ pParse->rc = SQLITE_ERROR;
+ }
return nErr;
}
@@ -130438,7 +118643,6 @@ abort_parse:
** separating it out, the code will be automatically omitted from
** static links that do not use it.
*/
-/* #include "sqliteInt.h" */
#ifndef SQLITE_OMIT_COMPLETE
/*
@@ -130492,7 +118696,7 @@ SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[];
** a statement.
**
** (4) CREATE The keyword CREATE has been seen at the beginning of a
-** statement, possibly preceded by EXPLAIN and/or followed by
+** statement, possibly preceeded by EXPLAIN and/or followed by
** TEMP or TEMPORARY
**
** (5) TRIGGER We are in the middle of a trigger definition that must be
@@ -130502,7 +118706,7 @@ SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[];
** the end of a trigger definition.
**
** (7) END We've seen the ";END" of the ";END;" that occurs at the end
-** of a trigger definition.
+** of a trigger difinition.
**
** Transitions between states above are determined by tokens extracted
** from the input. The following tokens are significant:
@@ -130523,7 +118727,7 @@ SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[];
** to recognize the end of a trigger can be omitted. All we have to do
** is look for a semicolon that is not part of an string or comment.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){
+SQLITE_API int sqlite3_complete(const char *zSql){
u8 state = 0; /* Current state, using numbers defined in header comment */
u8 token; /* Value of the next token */
@@ -130545,7 +118749,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){
};
#else
/* If triggers are not supported by this compile then the statement machine
- ** used to detect the end of a statement is much simpler
+ ** used to detect the end of a statement is much simplier
*/
static const u8 trans[3][3] = {
/* Token: */
@@ -130556,13 +118760,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){
};
#endif /* SQLITE_OMIT_TRIGGER */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( zSql==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
-
while( *zSql ){
switch( *zSql ){
case ';': { /* A semicolon */
@@ -130688,10 +118885,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){
** above, except that the parameter is required to be UTF-16 encoded, not
** UTF-8.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *zSql){
+SQLITE_API int sqlite3_complete16(const void *zSql){
sqlite3_value *pVal;
char const *zSql8;
- int rc;
+ int rc = SQLITE_NOMEM;
#ifndef SQLITE_OMIT_AUTOINIT
rc = sqlite3_initialize();
@@ -130706,7 +118903,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *zSql){
rc = SQLITE_NOMEM;
}
sqlite3ValueFree(pVal);
- return rc & 0xff;
+ return sqlite3ApiExit(0, rc);
}
#endif /* SQLITE_OMIT_UTF16 */
#endif /* SQLITE_OMIT_COMPLETE */
@@ -130729,7 +118926,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *zSql){
** other files are for internal use by SQLite and should not be
** accessed by users of the library.
*/
-/* #include "sqliteInt.h" */
#ifdef SQLITE_ENABLE_FTS3
/************** Include fts3.h in the middle of main.c ***********************/
@@ -130749,7 +118945,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *zSql){
** This header file is used by programs that want to link against the
** FTS3 library. All it does is declare the sqlite3Fts3Init() interface.
*/
-/* #include "sqlite3.h" */
#if 0
extern "C" {
@@ -130782,7 +118977,6 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db);
** This header file is used by programs that want to link against the
** RTREE library. All it does is declare the sqlite3RtreeInit() interface.
*/
-/* #include "sqlite3.h" */
#if 0
extern "C" {
@@ -130815,7 +119009,6 @@ SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db);
** This header file is used by programs that want to link against the
** ICU extension. All it does is declare the sqlite3IcuInit() interface.
*/
-/* #include "sqlite3.h" */
#if 0
extern "C" {
@@ -130831,12 +119024,6 @@ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db);
/************** End of sqliteicu.h *******************************************/
/************** Continuing where we left off in main.c ***********************/
#endif
-#ifdef SQLITE_ENABLE_JSON1
-SQLITE_PRIVATE int sqlite3Json1Init(sqlite3*);
-#endif
-#ifdef SQLITE_ENABLE_FTS5
-SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*);
-#endif
#ifndef SQLITE_AMALGAMATION
/* IMPLEMENTATION-OF: R-46656-45156 The sqlite3_version[] string constant
@@ -130848,36 +119035,24 @@ SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
/* IMPLEMENTATION-OF: R-53536-42575 The sqlite3_libversion() function returns
** a pointer to the to the sqlite3_version[] string constant.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void){ return sqlite3_version; }
+SQLITE_API const char *sqlite3_libversion(void){ return sqlite3_version; }
/* IMPLEMENTATION-OF: R-63124-39300 The sqlite3_sourceid() function returns a
** pointer to a string constant whose value is the same as the
** SQLITE_SOURCE_ID C preprocessor macro.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
+SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
/* IMPLEMENTATION-OF: R-35210-63508 The sqlite3_libversion_number() function
** returns an integer equal to SQLITE_VERSION_NUMBER.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; }
+SQLITE_API int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; }
/* IMPLEMENTATION-OF: R-20790-14025 The sqlite3_threadsafe() function returns
** zero if and only if SQLite was compiled with mutexing code omitted due to
** the SQLITE_THREADSAFE compile-time option being set to 0.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; }
-
-/*
-** When compiling the test fixture or with debugging enabled (on Win32),
-** this variable being set to non-zero will cause OSTRACE macros to emit
-** extra diagnostic information.
-*/
-#ifdef SQLITE_HAVE_OS_TRACE
-# ifndef SQLITE_DEBUG_OS_TRACE
-# define SQLITE_DEBUG_OS_TRACE 0
-# endif
- int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE;
-#endif
+SQLITE_API int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; }
#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE)
/*
@@ -130886,7 +119061,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void){ return SQLITE_THREADSAFE
** I/O active are written using this function. These messages
** are intended for debugging activity only.
*/
-SQLITE_API void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...) = 0;
+SQLITE_PRIVATE void (*sqlite3IoTrace)(const char*, ...) = 0;
#endif
/*
@@ -130938,7 +119113,7 @@ SQLITE_API char *sqlite3_data_directory = 0;
** * Recursive calls to this routine from thread X return immediately
** without blocking.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){
+SQLITE_API int sqlite3_initialize(void){
MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */
int rc; /* Result code */
#ifdef SQLITE_EXTRA_INIT
@@ -130952,11 +119127,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){
}
#endif
- /* If the following assert() fails on some obscure processor/compiler
- ** combination, the work-around is to set the correct pointer
- ** size at compile-time using -DSQLITE_PTRSIZE=n compile-time option */
- assert( SQLITE_PTRSIZE==sizeof(char*) );
-
/* If SQLite is already completely initialized, then this call
** to sqlite3_initialize() should be a no-op. But the initialization
** must be complete. So isInit must not be set until the very end
@@ -130964,6 +119134,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){
*/
if( sqlite3GlobalConfig.isInit ) return SQLITE_OK;
+#ifdef SQLITE_ENABLE_SQLLOG
+ {
+ extern void sqlite3_init_sqllog(void);
+ sqlite3_init_sqllog();
+ }
+#endif
+
/* Make sure the mutex subsystem is initialized. If unable to
** initialize the mutex subsystem, return early with the error.
** If the system is so sick that we are unable to allocate a mutex,
@@ -131099,14 +119276,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){
** on when SQLite is already shut down. If SQLite is already shut down
** when this routine is invoked, then this routine is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void){
-#ifdef SQLITE_OMIT_WSD
- int rc = sqlite3_wsd_init(4096, 24);
- if( rc!=SQLITE_OK ){
- return rc;
- }
-#endif
-
+SQLITE_API int sqlite3_shutdown(void){
if( sqlite3GlobalConfig.isInit ){
#ifdef SQLITE_EXTRA_SHUTDOWN
void SQLITE_EXTRA_SHUTDOWN(void);
@@ -131153,7 +119323,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void){
** threadsafe. Failure to heed these warnings can lead to unpredictable
** behavior.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
+SQLITE_API int sqlite3_config(int op, ...){
va_list ap;
int rc = SQLITE_OK;
@@ -131165,43 +119335,33 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
switch( op ){
/* Mutex configuration options are only available in a threadsafe
- ** compile.
+ ** compile.
*/
-#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-54466-46756 */
+#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0
case SQLITE_CONFIG_SINGLETHREAD: {
- /* EVIDENCE-OF: R-02748-19096 This option sets the threading mode to
- ** Single-thread. */
- sqlite3GlobalConfig.bCoreMutex = 0; /* Disable mutex on core */
- sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */
+ /* Disable all mutexing */
+ sqlite3GlobalConfig.bCoreMutex = 0;
+ sqlite3GlobalConfig.bFullMutex = 0;
break;
}
-#endif
-#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-20520-54086 */
case SQLITE_CONFIG_MULTITHREAD: {
- /* EVIDENCE-OF: R-14374-42468 This option sets the threading mode to
- ** Multi-thread. */
- sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */
- sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */
+ /* Disable mutexing of database connections */
+ /* Enable mutexing of core data structures */
+ sqlite3GlobalConfig.bCoreMutex = 1;
+ sqlite3GlobalConfig.bFullMutex = 0;
break;
}
-#endif
-#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-59593-21810 */
case SQLITE_CONFIG_SERIALIZED: {
- /* EVIDENCE-OF: R-41220-51800 This option sets the threading mode to
- ** Serialized. */
- sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */
- sqlite3GlobalConfig.bFullMutex = 1; /* Enable mutex on connections */
+ /* Enable all mutexing */
+ sqlite3GlobalConfig.bCoreMutex = 1;
+ sqlite3GlobalConfig.bFullMutex = 1;
break;
}
-#endif
-#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-63666-48755 */
case SQLITE_CONFIG_MUTEX: {
/* Specify an alternative mutex implementation */
sqlite3GlobalConfig.mutex = *va_arg(ap, sqlite3_mutex_methods*);
break;
}
-#endif
-#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-14450-37597 */
case SQLITE_CONFIG_GETMUTEX: {
/* Retrieve the current mutex implementation */
*va_arg(ap, sqlite3_mutex_methods*) = sqlite3GlobalConfig.mutex;
@@ -131209,61 +119369,37 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
}
#endif
+
case SQLITE_CONFIG_MALLOC: {
- /* EVIDENCE-OF: R-55594-21030 The SQLITE_CONFIG_MALLOC option takes a
- ** single argument which is a pointer to an instance of the
- ** sqlite3_mem_methods structure. The argument specifies alternative
- ** low-level memory allocation routines to be used in place of the memory
- ** allocation routines built into SQLite. */
+ /* Specify an alternative malloc implementation */
sqlite3GlobalConfig.m = *va_arg(ap, sqlite3_mem_methods*);
break;
}
case SQLITE_CONFIG_GETMALLOC: {
- /* EVIDENCE-OF: R-51213-46414 The SQLITE_CONFIG_GETMALLOC option takes a
- ** single argument which is a pointer to an instance of the
- ** sqlite3_mem_methods structure. The sqlite3_mem_methods structure is
- ** filled with the currently defined memory allocation routines. */
+ /* Retrieve the current malloc() implementation */
if( sqlite3GlobalConfig.m.xMalloc==0 ) sqlite3MemSetDefault();
*va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m;
break;
}
case SQLITE_CONFIG_MEMSTATUS: {
- /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes
- ** single argument of type int, interpreted as a boolean, which enables
- ** or disables the collection of memory allocation statistics. */
+ /* Enable or disable the malloc status collection */
sqlite3GlobalConfig.bMemstat = va_arg(ap, int);
break;
}
case SQLITE_CONFIG_SCRATCH: {
- /* EVIDENCE-OF: R-08404-60887 There are three arguments to
- ** SQLITE_CONFIG_SCRATCH: A pointer an 8-byte aligned memory buffer from
- ** which the scratch allocations will be drawn, the size of each scratch
- ** allocation (sz), and the maximum number of scratch allocations (N). */
+ /* Designate a buffer for scratch memory space */
sqlite3GlobalConfig.pScratch = va_arg(ap, void*);
sqlite3GlobalConfig.szScratch = va_arg(ap, int);
sqlite3GlobalConfig.nScratch = va_arg(ap, int);
break;
}
case SQLITE_CONFIG_PAGECACHE: {
- /* EVIDENCE-OF: R-31408-40510 There are three arguments to
- ** SQLITE_CONFIG_PAGECACHE: A pointer to 8-byte aligned memory, the size
- ** of each page buffer (sz), and the number of pages (N). */
+ /* Designate a buffer for page cache memory space */
sqlite3GlobalConfig.pPage = va_arg(ap, void*);
sqlite3GlobalConfig.szPage = va_arg(ap, int);
sqlite3GlobalConfig.nPage = va_arg(ap, int);
break;
}
- case SQLITE_CONFIG_PCACHE_HDRSZ: {
- /* EVIDENCE-OF: R-39100-27317 The SQLITE_CONFIG_PCACHE_HDRSZ option takes
- ** a single parameter which is a pointer to an integer and writes into
- ** that integer the number of extra bytes per page required for each page
- ** in SQLITE_CONFIG_PAGECACHE. */
- *va_arg(ap, int*) =
- sqlite3HeaderSizeBtree() +
- sqlite3HeaderSizePcache() +
- sqlite3HeaderSizePcache1();
- break;
- }
case SQLITE_CONFIG_PCACHE: {
/* no-op */
@@ -131276,18 +119412,11 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
}
case SQLITE_CONFIG_PCACHE2: {
- /* EVIDENCE-OF: R-63325-48378 The SQLITE_CONFIG_PCACHE2 option takes a
- ** single argument which is a pointer to an sqlite3_pcache_methods2
- ** object. This object specifies the interface to a custom page cache
- ** implementation. */
+ /* Specify an alternative page cache implementation */
sqlite3GlobalConfig.pcache2 = *va_arg(ap, sqlite3_pcache_methods2*);
break;
}
case SQLITE_CONFIG_GETPCACHE2: {
- /* EVIDENCE-OF: R-22035-46182 The SQLITE_CONFIG_GETPCACHE2 option takes a
- ** single argument which is a pointer to an sqlite3_pcache_methods2
- ** object. SQLite copies of the current page cache implementation into
- ** that object. */
if( sqlite3GlobalConfig.pcache2.xInit==0 ){
sqlite3PCacheSetDefault();
}
@@ -131295,15 +119424,9 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
break;
}
-/* EVIDENCE-OF: R-06626-12911 The SQLITE_CONFIG_HEAP option is only
-** available if SQLite is compiled with either SQLITE_ENABLE_MEMSYS3 or
-** SQLITE_ENABLE_MEMSYS5 and returns SQLITE_ERROR if invoked otherwise. */
#if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5)
case SQLITE_CONFIG_HEAP: {
- /* EVIDENCE-OF: R-19854-42126 There are three arguments to
- ** SQLITE_CONFIG_HEAP: An 8-byte aligned pointer to the memory, the
- ** number of bytes in the memory buffer, and the minimum allocation size.
- */
+ /* Designate a buffer for heap memory space */
sqlite3GlobalConfig.pHeap = va_arg(ap, void*);
sqlite3GlobalConfig.nHeap = va_arg(ap, int);
sqlite3GlobalConfig.mnReq = va_arg(ap, int);
@@ -131316,19 +119439,17 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
}
if( sqlite3GlobalConfig.pHeap==0 ){
- /* EVIDENCE-OF: R-49920-60189 If the first pointer (the memory pointer)
- ** is NULL, then SQLite reverts to using its default memory allocator
- ** (the system malloc() implementation), undoing any prior invocation of
- ** SQLITE_CONFIG_MALLOC.
- **
- ** Setting sqlite3GlobalConfig.m to all zeros will cause malloc to
- ** revert to its default implementation when sqlite3_initialize() is run
+ /* If the heap pointer is NULL, then restore the malloc implementation
+ ** back to NULL pointers too. This will cause the malloc to go
+ ** back to its default implementation when sqlite3_initialize() is
+ ** run.
*/
memset(&sqlite3GlobalConfig.m, 0, sizeof(sqlite3GlobalConfig.m));
}else{
- /* EVIDENCE-OF: R-61006-08918 If the memory pointer is not NULL then the
- ** alternative memory allocator is engaged to handle all of SQLites
- ** memory allocation needs. */
+ /* The heap pointer is not NULL, then install one of the
+ ** mem5.c/mem3.c methods. The enclosing #if guarantees at
+ ** least one of these methods is currently enabled.
+ */
#ifdef SQLITE_ENABLE_MEMSYS3
sqlite3GlobalConfig.m = *sqlite3MemGetMemsys3();
#endif
@@ -131361,25 +119482,12 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
break;
}
- /* EVIDENCE-OF: R-55548-33817 The compile-time setting for URI filenames
- ** can be changed at start-time using the
- ** sqlite3_config(SQLITE_CONFIG_URI,1) or
- ** sqlite3_config(SQLITE_CONFIG_URI,0) configuration calls.
- */
case SQLITE_CONFIG_URI: {
- /* EVIDENCE-OF: R-25451-61125 The SQLITE_CONFIG_URI option takes a single
- ** argument of type int. If non-zero, then URI handling is globally
- ** enabled. If the parameter is zero, then URI handling is globally
- ** disabled. */
sqlite3GlobalConfig.bOpenUri = va_arg(ap, int);
break;
}
case SQLITE_CONFIG_COVERING_INDEX_SCAN: {
- /* EVIDENCE-OF: R-36592-02772 The SQLITE_CONFIG_COVERING_INDEX_SCAN
- ** option takes a single integer argument which is interpreted as a
- ** boolean in order to enable or disable the use of covering indices for
- ** full table scans in the query optimizer. */
sqlite3GlobalConfig.bUseCis = va_arg(ap, int);
break;
}
@@ -131394,45 +119502,25 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
#endif
case SQLITE_CONFIG_MMAP_SIZE: {
- /* EVIDENCE-OF: R-58063-38258 SQLITE_CONFIG_MMAP_SIZE takes two 64-bit
- ** integer (sqlite3_int64) values that are the default mmap size limit
- ** (the default setting for PRAGMA mmap_size) and the maximum allowed
- ** mmap size limit. */
sqlite3_int64 szMmap = va_arg(ap, sqlite3_int64);
sqlite3_int64 mxMmap = va_arg(ap, sqlite3_int64);
- /* EVIDENCE-OF: R-53367-43190 If either argument to this option is
- ** negative, then that argument is changed to its compile-time default.
- **
- ** EVIDENCE-OF: R-34993-45031 The maximum allowed mmap size will be
- ** silently truncated if necessary so that it does not exceed the
- ** compile-time maximum mmap size set by the SQLITE_MAX_MMAP_SIZE
- ** compile-time option.
- */
if( mxMmap<0 || mxMmap>SQLITE_MAX_MMAP_SIZE ){
mxMmap = SQLITE_MAX_MMAP_SIZE;
}
+ sqlite3GlobalConfig.mxMmap = mxMmap;
if( szMmap<0 ) szMmap = SQLITE_DEFAULT_MMAP_SIZE;
if( szMmap>mxMmap) szMmap = mxMmap;
- sqlite3GlobalConfig.mxMmap = mxMmap;
sqlite3GlobalConfig.szMmap = szMmap;
break;
}
-#if SQLITE_OS_WIN && defined(SQLITE_WIN32_MALLOC) /* IMP: R-04780-55815 */
+#if SQLITE_OS_WIN && defined(SQLITE_WIN32_MALLOC)
case SQLITE_CONFIG_WIN32_HEAPSIZE: {
- /* EVIDENCE-OF: R-34926-03360 SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit
- ** unsigned integer value that specifies the maximum size of the created
- ** heap. */
sqlite3GlobalConfig.nHeap = va_arg(ap, int);
break;
}
#endif
- case SQLITE_CONFIG_PMASZ: {
- sqlite3GlobalConfig.szPma = va_arg(ap, unsigned int);
- break;
- }
-
default: {
rc = SQLITE_ERROR;
break;
@@ -131454,7 +119542,6 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
** the lookaside memory.
*/
static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){
-#ifndef SQLITE_OMIT_LOOKASIDE
void *pStart;
if( db->lookaside.nOut ){
return SQLITE_BUSY;
@@ -131500,25 +119587,17 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){
db->lookaside.bEnabled = 1;
db->lookaside.bMalloced = pBuf==0 ?1:0;
}else{
- db->lookaside.pStart = db;
- db->lookaside.pEnd = db;
+ db->lookaside.pEnd = 0;
db->lookaside.bEnabled = 0;
db->lookaside.bMalloced = 0;
}
-#endif /* SQLITE_OMIT_LOOKASIDE */
return SQLITE_OK;
}
/*
** Return the mutex associated with a database connection.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3 *db){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3 *db){
return db->mutex;
}
@@ -131526,12 +119605,8 @@ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3 *db){
** Free up as much memory as we can from the given database
** connection.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3 *db){
+SQLITE_API int sqlite3_db_release_memory(sqlite3 *db){
int i;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
sqlite3BtreeEnterAll(db);
for(i=0; i<db->nDb; i++){
@@ -131549,7 +119624,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3 *db){
/*
** Configuration settings for an individual database connection
*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3 *db, int op, ...){
+SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
va_list ap;
int rc;
va_start(ap, op);
@@ -131621,20 +119696,13 @@ static int binCollFunc(
){
int rc, n;
n = nKey1<nKey2 ? nKey1 : nKey2;
- /* EVIDENCE-OF: R-65033-28449 The built-in BINARY collation compares
- ** strings byte by byte using the memcmp() function from the standard C
- ** library. */
rc = memcmp(pKey1, pKey2, n);
if( rc==0 ){
if( padFlag
&& allSpaces(((char*)pKey1)+n, nKey1-n)
&& allSpaces(((char*)pKey2)+n, nKey2-n)
){
- /* EVIDENCE-OF: R-31624-24737 RTRIM is like BINARY except that extra
- ** spaces at the end of either string do not change the result. In other
- ** words, strings will compare equal to one another as long as they
- ** differ only in the number of spaces at the end.
- */
+ /* Leave rc unchanged at 0 */
}else{
rc = nKey1 - nKey2;
}
@@ -131668,39 +119736,21 @@ static int nocaseCollatingFunc(
/*
** Return the ROWID of the most recent insert
*/
-SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3 *db){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API sqlite_int64 sqlite3_last_insert_rowid(sqlite3 *db){
return db->lastRowid;
}
/*
** Return the number of changes in the most recent call to sqlite3_exec().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3 *db){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API int sqlite3_changes(sqlite3 *db){
return db->nChange;
}
/*
** Return the number of changes since the database handle was opened.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3 *db){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API int sqlite3_total_changes(sqlite3 *db){
return db->nTotalChange;
}
@@ -131744,24 +119794,17 @@ static void functionDestroy(sqlite3 *db, FuncDef *p){
static void disconnectAllVtab(sqlite3 *db){
#ifndef SQLITE_OMIT_VIRTUALTABLE
int i;
- HashElem *p;
sqlite3BtreeEnterAll(db);
for(i=0; i<db->nDb; i++){
Schema *pSchema = db->aDb[i].pSchema;
if( db->aDb[i].pSchema ){
+ HashElem *p;
for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){
Table *pTab = (Table *)sqliteHashData(p);
if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab);
}
}
}
- for(p=sqliteHashFirst(&db->aModule); p; p=sqliteHashNext(p)){
- Module *pMod = (Module *)sqliteHashData(p);
- if( pMod->pEpoTab ){
- sqlite3VtabDisconnect(db, pMod->pEpoTab);
- }
- }
- sqlite3VtabUnlockList(db);
sqlite3BtreeLeaveAll(db);
#else
UNUSED_PARAMETER(db);
@@ -131788,8 +119831,6 @@ static int connectionIsBusy(sqlite3 *db){
*/
static int sqlite3Close(sqlite3 *db, int forceZombie){
if( !db ){
- /* EVIDENCE-OF: R-63257-11740 Calling sqlite3_close() or
- ** sqlite3_close_v2() with a NULL pointer argument is a harmless no-op. */
return SQLITE_OK;
}
if( !sqlite3SafetyCheckSickOrOk(db) ){
@@ -131813,7 +119854,7 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){
** SQLITE_BUSY if the connection can not be closed immediately.
*/
if( !forceZombie && connectionIsBusy(db) ){
- sqlite3ErrorWithMsg(db, SQLITE_BUSY, "unable to close due to unfinalized "
+ sqlite3Error(db, SQLITE_BUSY, "unable to close due to unfinalized "
"statements or unfinished backups");
sqlite3_mutex_leave(db->mutex);
return SQLITE_BUSY;
@@ -131842,8 +119883,8 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){
** unclosed resources, and arranges for deallocation when the last
** prepare statement or sqlite3_backup closes.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); }
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); }
+SQLITE_API int sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); }
+SQLITE_API int sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); }
/*
@@ -131938,19 +119979,16 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){
if( pMod->xDestroy ){
pMod->xDestroy(pMod->pAux);
}
- sqlite3VtabEponymousTableClear(db, pMod);
sqlite3DbFree(db, pMod);
}
sqlite3HashClear(&db->aModule);
#endif
- sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */
- sqlite3ValueFree(db->pErr);
+ sqlite3Error(db, SQLITE_OK, 0); /* Deallocates any cached error strings. */
+ if( db->pErr ){
+ sqlite3ValueFree(db->pErr);
+ }
sqlite3CloseExtensions(db);
-#if SQLITE_USER_AUTHENTICATION
- sqlite3_free(db->auth.zAuthUser);
- sqlite3_free(db->auth.zAuthPW);
-#endif
db->magic = SQLITE_MAGIC_ERROR;
@@ -131973,15 +120011,13 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){
/*
** Rollback all database files. If tripCode is not SQLITE_OK, then
-** any write cursors are invalidated ("tripped" - as in "tripping a circuit
+** any open cursors are invalidated ("tripped" - as in "tripping a circuit
** breaker") and made to return tripCode if there are any further
-** attempts to use that cursor. Read cursors remain open and valid
-** but are "saved" in case the table pages are moved around.
+** attempts to use that cursor.
*/
SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
int i;
int inTrans = 0;
- int schemaChange;
assert( sqlite3_mutex_held(db->mutex) );
sqlite3BeginBenignMalloc();
@@ -131992,7 +120028,6 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
** the database rollback and schema reset, which can cause false
** corruption reports in some cases. */
sqlite3BtreeEnterAll(db);
- schemaChange = (db->flags & SQLITE_InternChanges)!=0 && db->init.busy==0;
for(i=0; i<db->nDb; i++){
Btree *p = db->aDb[i].pBt;
@@ -132000,7 +120035,7 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
if( sqlite3BtreeIsInTrans(p) ){
inTrans = 1;
}
- sqlite3BtreeRollback(p, tripCode, !schemaChange);
+ sqlite3BtreeRollback(p, tripCode);
}
}
sqlite3VtabRollback(db);
@@ -132027,7 +120062,8 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
** Return a static string containing the name corresponding to the error code
** specified in the argument.
*/
-#if defined(SQLITE_NEED_ERR_NAME)
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) || \
+ defined(SQLITE_DEBUG_OS_TRACE)
SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
const char *zName = 0;
int i, origRc = rc;
@@ -132049,7 +120085,6 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
case SQLITE_READONLY_RECOVERY: zName = "SQLITE_READONLY_RECOVERY"; break;
case SQLITE_READONLY_CANTLOCK: zName = "SQLITE_READONLY_CANTLOCK"; break;
case SQLITE_READONLY_ROLLBACK: zName = "SQLITE_READONLY_ROLLBACK"; break;
- case SQLITE_READONLY_DBMOVED: zName = "SQLITE_READONLY_DBMOVED"; break;
case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break;
case SQLITE_IOERR: zName = "SQLITE_IOERR"; break;
case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break;
@@ -132062,6 +120097,7 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break;
case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break;
case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break;
+ case SQLITE_IOERR_BLOCKED: zName = "SQLITE_IOERR_BLOCKED"; break;
case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break;
case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break;
case SQLITE_IOERR_CHECKRESERVEDLOCK:
@@ -132193,7 +120229,7 @@ static int sqliteDefaultBusyCallback(
void *ptr, /* Database connection */
int count /* Number of times table has been busy */
){
-#if SQLITE_OS_WIN || HAVE_USLEEP
+#if SQLITE_OS_WIN || (defined(HAVE_USLEEP) && HAVE_USLEEP)
static const u8 delays[] =
{ 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 };
static const u8 totals[] =
@@ -132251,14 +120287,11 @@ SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){
** This routine sets the busy callback for an Sqlite database to the
** given callback function with the given argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(
+SQLITE_API int sqlite3_busy_handler(
sqlite3 *db,
int (*xBusy)(void*,int),
void *pArg
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
db->busyHandler.xFunc = xBusy;
db->busyHandler.pArg = pArg;
@@ -132274,18 +120307,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(
** given callback function with the given argument. The progress callback will
** be invoked every nOps opcodes.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(
+SQLITE_API void sqlite3_progress_handler(
sqlite3 *db,
int nOps,
int (*xProgress)(void*),
void *pArg
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
if( nOps>0 ){
db->xProgress = xProgress;
@@ -132305,10 +120332,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(
** This routine installs a default busy handler that waits for the
** specified number of milliseconds before returning 0.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3 *db, int ms){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
+SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){
if( ms>0 ){
sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db);
db->busyTimeout = ms;
@@ -132321,13 +120345,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3 *db, int ms){
/*
** Cause any pending operation to stop at its earliest opportunity.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3 *db){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return;
- }
-#endif
+SQLITE_API void sqlite3_interrupt(sqlite3 *db){
db->u1.isInterrupted = 1;
}
@@ -132351,7 +120369,6 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
){
FuncDef *p;
int nName;
- int extraFlags;
assert( sqlite3_mutex_held(db->mutex) );
if( zFunctionName==0 ||
@@ -132362,10 +120379,6 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
(255<(nName = sqlite3Strlen30( zFunctionName))) ){
return SQLITE_MISUSE_BKPT;
}
-
- assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC );
- extraFlags = enc & SQLITE_DETERMINISTIC;
- enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY);
#ifndef SQLITE_OMIT_UTF16
/* If SQLITE_UTF16 is specified as the encoding type, transform this
@@ -132379,10 +120392,10 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
enc = SQLITE_UTF16NATIVE;
}else if( enc==SQLITE_ANY ){
int rc;
- rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8|extraFlags,
+ rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8,
pUserData, xFunc, xStep, xFinal, pDestructor);
if( rc==SQLITE_OK ){
- rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE|extraFlags,
+ rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE,
pUserData, xFunc, xStep, xFinal, pDestructor);
}
if( rc!=SQLITE_OK ){
@@ -132402,7 +120415,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 0);
if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==enc && p->nArg==nArg ){
if( db->nVdbeActive ){
- sqlite3ErrorWithMsg(db, SQLITE_BUSY,
+ sqlite3Error(db, SQLITE_BUSY,
"unable to delete/modify user-function due to active statements");
assert( !db->mallocFailed );
return SQLITE_BUSY;
@@ -132425,8 +120438,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
pDestructor->nRef++;
}
p->pDestructor = pDestructor;
- p->funcFlags = (p->funcFlags & SQLITE_FUNC_ENCMASK) | extraFlags;
- testcase( p->funcFlags & SQLITE_DETERMINISTIC );
+ p->funcFlags &= SQLITE_FUNC_ENCMASK;
p->xFunc = xFunc;
p->xStep = xStep;
p->xFinalize = xFinal;
@@ -132438,7 +120450,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
/*
** Create new user functions.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunc,
int nArg,
@@ -132452,7 +120464,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
xFinal, 0);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
+SQLITE_API int sqlite3_create_function_v2(
sqlite3 *db,
const char *zFunc,
int nArg,
@@ -132465,12 +120477,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
){
int rc = SQLITE_ERROR;
FuncDestructor *pArg = 0;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
if( xDestroy ){
pArg = (FuncDestructor *)sqlite3DbMallocZero(db, sizeof(FuncDestructor));
@@ -132495,7 +120501,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
+SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
const void *zFunctionName,
int nArg,
@@ -132507,10 +120513,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
){
int rc;
char *zFunc8;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zFunctionName==0 ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
assert( !db->mallocFailed );
zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1, SQLITE_UTF16NATIVE);
@@ -132535,19 +120537,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
** A global function must exist in order for name resolution to work
** properly.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(
+SQLITE_API int sqlite3_overload_function(
sqlite3 *db,
const char *zName,
int nArg
){
int nName = sqlite3Strlen30(zName);
int rc = SQLITE_OK;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zName==0 || nArg<-2 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
if( sqlite3FindFunction(db, zName, nName, nArg, SQLITE_UTF8, 0)==0 ){
rc = sqlite3CreateFunc(db, zName, nArg, SQLITE_UTF8,
@@ -132567,15 +120563,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(
** trace is a pointer to a function that is invoked at the start of each
** SQL statement.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){
+SQLITE_API void *sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){
void *pOld;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
pOld = db->pTraceArg;
db->xTrace = xTrace;
@@ -132591,19 +120580,12 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,
** profile is a pointer to a function that is invoked at the conclusion of
** each SQL statement that is run.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_profile(
+SQLITE_API void *sqlite3_profile(
sqlite3 *db,
void (*xProfile)(void*,const char*,sqlite_uint64),
void *pArg
){
void *pOld;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
pOld = db->pProfileArg;
db->xProfile = xProfile;
@@ -132618,19 +120600,12 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_profile(
** If the invoked function returns non-zero, then the commit becomes a
** rollback.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(
+SQLITE_API void *sqlite3_commit_hook(
sqlite3 *db, /* Attach the hook to this database */
int (*xCallback)(void*), /* Function to invoke on each commit */
void *pArg /* Argument to the function */
){
void *pOld;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
pOld = db->pCommitArg;
db->xCommitCallback = xCallback;
@@ -132643,19 +120618,12 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(
** Register a callback to be invoked each time a row is updated,
** inserted or deleted using this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
+SQLITE_API void *sqlite3_update_hook(
sqlite3 *db, /* Attach the hook to this database */
void (*xCallback)(void*,int,char const *,char const *,sqlite_int64),
void *pArg /* Argument to the function */
){
void *pRet;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
pRet = db->pUpdateArg;
db->xUpdateCallback = xCallback;
@@ -132668,19 +120636,12 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
** Register a callback to be invoked each time a transaction is rolled
** back by this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(
+SQLITE_API void *sqlite3_rollback_hook(
sqlite3 *db, /* Attach the hook to this database */
void (*xCallback)(void*), /* Callback function */
void *pArg /* Argument to the function */
){
void *pRet;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
pRet = db->pRollbackArg;
db->xRollbackCallback = xCallback;
@@ -132722,14 +120683,11 @@ SQLITE_PRIVATE int sqlite3WalDefaultHook(
** using sqlite3_wal_hook() disables the automatic checkpoint mechanism
** configured by this function.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){
#ifdef SQLITE_OMIT_WAL
UNUSED_PARAMETER(db);
UNUSED_PARAMETER(nFrame);
#else
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
if( nFrame>0 ){
sqlite3_wal_hook(db, sqlite3WalDefaultHook, SQLITE_INT_TO_PTR(nFrame));
}else{
@@ -132743,19 +120701,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame
** Register a callback to be invoked each time a transaction is written
** into the write-ahead-log by this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
+SQLITE_API void *sqlite3_wal_hook(
sqlite3 *db, /* Attach the hook to this db handle */
int(*xCallback)(void *, sqlite3*, const char*, int),
void *pArg /* First argument passed to xCallback() */
){
#ifndef SQLITE_OMIT_WAL
void *pRet;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
sqlite3_mutex_enter(db->mutex);
pRet = db->pWalArg;
db->xWalCallback = xCallback;
@@ -132770,7 +120722,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
/*
** Checkpoint database zDb.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
+SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of attached database (or NULL) */
int eMode, /* SQLITE_CHECKPOINT_* value */
@@ -132783,21 +120735,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
int rc; /* Return code */
int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
-
/* Initialize the output variables to -1 in case an error occurs. */
if( pnLog ) *pnLog = -1;
if( pnCkpt ) *pnCkpt = -1;
- assert( SQLITE_CHECKPOINT_PASSIVE==0 );
- assert( SQLITE_CHECKPOINT_FULL==1 );
- assert( SQLITE_CHECKPOINT_RESTART==2 );
- assert( SQLITE_CHECKPOINT_TRUNCATE==3 );
- if( eMode<SQLITE_CHECKPOINT_PASSIVE || eMode>SQLITE_CHECKPOINT_TRUNCATE ){
- /* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint
- ** mode: */
+ assert( SQLITE_CHECKPOINT_FULL>SQLITE_CHECKPOINT_PASSIVE );
+ assert( SQLITE_CHECKPOINT_FULL<SQLITE_CHECKPOINT_RESTART );
+ assert( SQLITE_CHECKPOINT_PASSIVE+2==SQLITE_CHECKPOINT_RESTART );
+ if( eMode<SQLITE_CHECKPOINT_PASSIVE || eMode>SQLITE_CHECKPOINT_RESTART ){
return SQLITE_MISUSE;
}
@@ -132807,11 +120752,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
}
if( iDb<0 ){
rc = SQLITE_ERROR;
- sqlite3ErrorWithMsg(db, SQLITE_ERROR, "unknown database: %s", zDb);
+ sqlite3Error(db, SQLITE_ERROR, "unknown database: %s", zDb);
}else{
- db->busyHandler.nBusy = 0;
rc = sqlite3Checkpoint(db, iDb, eMode, pnLog, pnCkpt);
- sqlite3Error(db, rc);
+ sqlite3Error(db, rc, 0);
}
rc = sqlite3ApiExit(db, rc);
sqlite3_mutex_leave(db->mutex);
@@ -132825,10 +120769,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
** to contains a zero-length string, all attached databases are
** checkpointed.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){
- /* EVIDENCE-OF: R-41613-20553 The sqlite3_wal_checkpoint(D,X) is equivalent to
- ** sqlite3_wal_checkpoint_v2(D,X,SQLITE_CHECKPOINT_PASSIVE,0,0). */
- return sqlite3_wal_checkpoint_v2(db,zDb,SQLITE_CHECKPOINT_PASSIVE,0,0);
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){
+ return sqlite3_wal_checkpoint_v2(db, zDb, SQLITE_CHECKPOINT_PASSIVE, 0, 0);
}
#ifndef SQLITE_OMIT_WAL
@@ -132903,11 +120845,9 @@ SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){
return ( db->temp_store!=1 );
#endif
#if SQLITE_TEMP_STORE==3
- UNUSED_PARAMETER(db);
return 1;
#endif
#if SQLITE_TEMP_STORE<1 || SQLITE_TEMP_STORE>3
- UNUSED_PARAMETER(db);
return 0;
#endif
}
@@ -132916,7 +120856,7 @@ SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){
** Return UTF-8 encoded English language explanation of the most recent
** error.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3 *db){
+SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){
const char *z;
if( !db ){
return sqlite3ErrStr(SQLITE_NOMEM);
@@ -132928,7 +120868,6 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3 *db){
if( db->mallocFailed ){
z = sqlite3ErrStr(SQLITE_NOMEM);
}else{
- testcase( db->pErr==0 );
z = (char*)sqlite3_value_text(db->pErr);
assert( !db->mallocFailed );
if( z==0 ){
@@ -132944,7 +120883,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3 *db){
** Return UTF-16 encoded English language explanation of the most recent
** error.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3 *db){
+SQLITE_API const void *sqlite3_errmsg16(sqlite3 *db){
static const u16 outOfMem[] = {
'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0
};
@@ -132970,7 +120909,8 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3 *db){
}else{
z = sqlite3_value_text16(db->pErr);
if( z==0 ){
- sqlite3ErrorWithMsg(db, db->errCode, sqlite3ErrStr(db->errCode));
+ sqlite3ValueSetStr(db->pErr, -1, sqlite3ErrStr(db->errCode),
+ SQLITE_UTF8, SQLITE_STATIC);
z = sqlite3_value_text16(db->pErr);
}
/* A malloc() may have failed within the call to sqlite3_value_text16()
@@ -132989,7 +120929,7 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3 *db){
** Return the most recent error code generated by an SQLite routine. If NULL is
** passed to this function, we assume a malloc() failed during sqlite3_open().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db){
+SQLITE_API int sqlite3_errcode(sqlite3 *db){
if( db && !sqlite3SafetyCheckSickOrOk(db) ){
return SQLITE_MISUSE_BKPT;
}
@@ -132998,7 +120938,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db){
}
return db->errCode & db->errMask;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db){
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db){
if( db && !sqlite3SafetyCheckSickOrOk(db) ){
return SQLITE_MISUSE_BKPT;
}
@@ -133013,11 +120953,37 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db){
** argument. For now, this simply calls the internal sqlite3ErrStr()
** function.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int rc){
+SQLITE_API const char *sqlite3_errstr(int rc){
return sqlite3ErrStr(rc);
}
/*
+** Invalidate all cached KeyInfo objects for database connection "db"
+*/
+static void invalidateCachedKeyInfo(sqlite3 *db){
+ Db *pDb; /* A single database */
+ int iDb; /* The database index number */
+ HashElem *k; /* For looping over tables in pDb */
+ Table *pTab; /* A table in the database */
+ Index *pIdx; /* Each index */
+
+ for(iDb=0, pDb=db->aDb; iDb<db->nDb; iDb++, pDb++){
+ if( pDb->pBt==0 ) continue;
+ sqlite3BtreeEnter(pDb->pBt);
+ for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){
+ pTab = (Table*)sqliteHashData(k);
+ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->pKeyInfo && pIdx->pKeyInfo->db==db ){
+ sqlite3KeyInfoUnref(pIdx->pKeyInfo);
+ pIdx->pKeyInfo = 0;
+ }
+ }
+ }
+ sqlite3BtreeLeave(pDb->pBt);
+ }
+}
+
+/*
** Create a new collating function for database "db". The name is zName
** and the encoding is enc.
*/
@@ -133031,6 +120997,7 @@ static int createCollation(
){
CollSeq *pColl;
int enc2;
+ int nName = sqlite3Strlen30(zName);
assert( sqlite3_mutex_held(db->mutex) );
@@ -133055,11 +121022,12 @@ static int createCollation(
pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 0);
if( pColl && pColl->xCmp ){
if( db->nVdbeActive ){
- sqlite3ErrorWithMsg(db, SQLITE_BUSY,
+ sqlite3Error(db, SQLITE_BUSY,
"unable to delete/modify collation sequence due to active statements");
return SQLITE_BUSY;
}
sqlite3ExpirePreparedStatements(db);
+ invalidateCachedKeyInfo(db);
/* If collation sequence pColl was created directly by a call to
** sqlite3_create_collation, and not generated by synthCollSeq(),
@@ -133068,7 +121036,7 @@ static int createCollation(
** to be called.
*/
if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){
- CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName);
+ CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName, nName);
int j;
for(j=0; j<3; j++){
CollSeq *p = &aColl[j];
@@ -133088,7 +121056,7 @@ static int createCollation(
pColl->pUser = pCtx;
pColl->xDel = xDel;
pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED));
- sqlite3Error(db, SQLITE_OK);
+ sqlite3Error(db, SQLITE_OK, 0);
return SQLITE_OK;
}
@@ -133108,9 +121076,8 @@ static const int aHardLimit[] = {
SQLITE_MAX_FUNCTION_ARG,
SQLITE_MAX_ATTACHED,
SQLITE_MAX_LIKE_PATTERN_LENGTH,
- SQLITE_MAX_VARIABLE_NUMBER, /* IMP: R-38091-32352 */
+ SQLITE_MAX_VARIABLE_NUMBER,
SQLITE_MAX_TRIGGER_DEPTH,
- SQLITE_MAX_WORKER_THREADS,
};
/*
@@ -133134,8 +121101,8 @@ static const int aHardLimit[] = {
#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>1000
# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 1000
#endif
-#if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125
-# error SQLITE_MAX_ATTACHED must be between 0 and 125
+#if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>62
+# error SQLITE_MAX_ATTACHED must be between 0 and 62
#endif
#if SQLITE_MAX_LIKE_PATTERN_LENGTH<1
# error SQLITE_MAX_LIKE_PATTERN_LENGTH must be at least 1
@@ -133146,9 +121113,6 @@ static const int aHardLimit[] = {
#if SQLITE_MAX_TRIGGER_DEPTH<1
# error SQLITE_MAX_TRIGGER_DEPTH must be at least 1
#endif
-#if SQLITE_MAX_WORKER_THREADS<0 || SQLITE_MAX_WORKER_THREADS>50
-# error SQLITE_MAX_WORKER_THREADS must be between 0 and 50
-#endif
/*
@@ -133161,15 +121125,9 @@ static const int aHardLimit[] = {
** It merely prevents new constructs that exceed the limit
** from forming.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3 *db, int limitId, int newLimit){
+SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){
int oldLimit;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return -1;
- }
-#endif
/* EVIDENCE-OF: R-30189-54097 For each limit category SQLITE_LIMIT_NAME
** there is a hard upper bound set at compile-time by a C preprocessor
@@ -133188,8 +121146,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3 *db, int limitId, int newLim
SQLITE_MAX_LIKE_PATTERN_LENGTH );
assert( aHardLimit[SQLITE_LIMIT_VARIABLE_NUMBER]==SQLITE_MAX_VARIABLE_NUMBER);
assert( aHardLimit[SQLITE_LIMIT_TRIGGER_DEPTH]==SQLITE_MAX_TRIGGER_DEPTH );
- assert( aHardLimit[SQLITE_LIMIT_WORKER_THREADS]==SQLITE_MAX_WORKER_THREADS );
- assert( SQLITE_LIMIT_WORKER_THREADS==(SQLITE_N_LIMIT-1) );
+ assert( SQLITE_LIMIT_TRIGGER_DEPTH==(SQLITE_N_LIMIT-1) );
if( limitId<0 || limitId>=SQLITE_N_LIMIT ){
@@ -133246,38 +121203,25 @@ SQLITE_PRIVATE int sqlite3ParseUri(
assert( *pzErrMsg==0 );
- if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */
- || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */
- && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */
+ if( ((flags & SQLITE_OPEN_URI) || sqlite3GlobalConfig.bOpenUri)
+ && nUri>=5 && memcmp(zUri, "file:", 5)==0
){
char *zOpt;
int eState; /* Parser state when parsing URI */
int iIn; /* Input character index */
int iOut = 0; /* Output character index */
- u64 nByte = nUri+2; /* Bytes of space to allocate */
+ int nByte = nUri+2; /* Bytes of space to allocate */
/* Make sure the SQLITE_OPEN_URI flag is set to indicate to the VFS xOpen
** method that there may be extra parameters following the file-name. */
flags |= SQLITE_OPEN_URI;
for(iIn=0; iIn<nUri; iIn++) nByte += (zUri[iIn]=='&');
- zFile = sqlite3_malloc64(nByte);
+ zFile = sqlite3_malloc(nByte);
if( !zFile ) return SQLITE_NOMEM;
iIn = 5;
-#ifdef SQLITE_ALLOW_URI_AUTHORITY
- if( strncmp(zUri+5, "///", 3)==0 ){
- iIn = 7;
- /* The following condition causes URIs with five leading / characters
- ** like file://///host/path to be converted into UNCs like //host/path.
- ** The correct URI for that UNC has only two or four leading / characters
- ** file://host/path or file:////host/path. But 5 leading slashes is a
- ** common error, we are told, so we handle it as a special case. */
- if( strncmp(zUri+7, "///", 3)==0 ){ iIn++; }
- }else if( strncmp(zUri+5, "//localhost/", 12)==0 ){
- iIn = 16;
- }
-#else
+#ifndef SQLITE_ALLOW_URI_AUTHORITY
/* Discard the scheme and authority segments of the URI. */
if( zUri[5]=='/' && zUri[6]=='/' ){
iIn = 7;
@@ -133427,7 +121371,7 @@ SQLITE_PRIVATE int sqlite3ParseUri(
}
}else{
- zFile = sqlite3_malloc64(nUri+2);
+ zFile = sqlite3_malloc(nUri+2);
if( !zFile ) return SQLITE_NOMEM;
memcpy(zFile, zUri, nUri);
zFile[nUri] = '\0';
@@ -133468,9 +121412,6 @@ static int openDatabase(
char *zOpen = 0; /* Filename argument to pass to BtreeOpen() */
char *zErrMsg = 0; /* Error message from sqlite3ParseUri() */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( ppDb==0 ) return SQLITE_MISUSE_BKPT;
-#endif
*ppDb = 0;
#ifndef SQLITE_OMIT_AUTOINIT
rc = sqlite3_initialize();
@@ -133493,9 +121434,7 @@ static int openDatabase(
testcase( (1<<(flags&7))==0x02 ); /* READONLY */
testcase( (1<<(flags&7))==0x04 ); /* READWRITE */
testcase( (1<<(flags&7))==0x40 ); /* READWRITE | CREATE */
- if( ((1<<(flags&7)) & 0x46)==0 ){
- return SQLITE_MISUSE_BKPT; /* IMP: R-65497-44594 */
- }
+ if( ((1<<(flags&7)) & 0x46)==0 ) return SQLITE_MISUSE_BKPT;
if( sqlite3GlobalConfig.bCoreMutex==0 ){
isThreadsafe = 0;
@@ -133554,19 +121493,14 @@ static int openDatabase(
assert( sizeof(db->aLimit)==sizeof(aHardLimit) );
memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit));
- db->aLimit[SQLITE_LIMIT_WORKER_THREADS] = SQLITE_DEFAULT_WORKER_THREADS;
db->autoCommit = 1;
db->nextAutovac = -1;
db->szMmap = sqlite3GlobalConfig.szMmap;
db->nextPagesize = 0;
- db->nMaxSorterMmap = 0x7FFFFFFF;
db->flags |= SQLITE_ShortColNames | SQLITE_EnableTrigger | SQLITE_CacheSpill
#if !defined(SQLITE_DEFAULT_AUTOMATIC_INDEX) || SQLITE_DEFAULT_AUTOMATIC_INDEX
| SQLITE_AutoIndex
#endif
-#if SQLITE_DEFAULT_CKPTFULLFSYNC
- | SQLITE_CkptFullFSync
-#endif
#if SQLITE_DEFAULT_FILE_FORMAT<4
| SQLITE_LegacyFileFmt
#endif
@@ -133579,12 +121513,6 @@ static int openDatabase(
#if defined(SQLITE_DEFAULT_FOREIGN_KEYS) && SQLITE_DEFAULT_FOREIGN_KEYS
| SQLITE_ForeignKeys
#endif
-#if defined(SQLITE_REVERSE_UNORDERED_SELECTS)
- | SQLITE_ReverseOrder
-#endif
-#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK)
- | SQLITE_CellSizeCk
-#endif
;
sqlite3HashInit(&db->aCollSeq);
#ifndef SQLITE_OMIT_VIRTUALTABLE
@@ -133594,30 +121522,26 @@ static int openDatabase(
/* Add the default collation sequence BINARY. BINARY works for both UTF-8
** and UTF-16, so add a version for each to avoid any unnecessary
** conversions. The only error that can occur here is a malloc() failure.
- **
- ** EVIDENCE-OF: R-52786-44878 SQLite defines three built-in collating
- ** functions:
*/
createCollation(db, "BINARY", SQLITE_UTF8, 0, binCollFunc, 0);
createCollation(db, "BINARY", SQLITE_UTF16BE, 0, binCollFunc, 0);
createCollation(db, "BINARY", SQLITE_UTF16LE, 0, binCollFunc, 0);
- createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0);
createCollation(db, "RTRIM", SQLITE_UTF8, (void*)1, binCollFunc, 0);
if( db->mallocFailed ){
goto opendb_out;
}
- /* EVIDENCE-OF: R-08308-17224 The default collating function for all
- ** strings is BINARY.
- */
db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 0);
assert( db->pDfltColl!=0 );
+ /* Also add a UTF-8 case-insensitive collation sequence. */
+ createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0);
+
/* Parse the filename/URI argument. */
db->openFlags = flags;
rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg);
if( rc!=SQLITE_OK ){
if( rc==SQLITE_NOMEM ) db->mallocFailed = 1;
- sqlite3ErrorWithMsg(db, rc, zErrMsg ? "%s" : 0, zErrMsg);
+ sqlite3Error(db, rc, zErrMsg ? "%s" : 0, zErrMsg);
sqlite3_free(zErrMsg);
goto opendb_out;
}
@@ -133629,15 +121553,13 @@ static int openDatabase(
if( rc==SQLITE_IOERR_NOMEM ){
rc = SQLITE_NOMEM;
}
- sqlite3Error(db, rc);
+ sqlite3Error(db, rc, 0);
goto opendb_out;
}
- sqlite3BtreeEnter(db->aDb[0].pBt);
db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt);
- if( !db->mallocFailed ) ENC(db) = SCHEMA_ENC(db);
- sqlite3BtreeLeave(db->aDb[0].pBt);
db->aDb[1].pSchema = sqlite3SchemaGet(db, 0);
+
/* The default safety_level for the main database is 'full'; for the temp
** database it is 'NONE'. This matches the pager layer defaults.
*/
@@ -133655,7 +121577,7 @@ static int openDatabase(
** database schema yet. This is delayed until the first time the database
** is accessed.
*/
- sqlite3Error(db, SQLITE_OK);
+ sqlite3Error(db, SQLITE_OK, 0);
sqlite3RegisterBuiltinFunctions(db);
/* Load automatic extensions - extensions that have been registered
@@ -133684,18 +121606,12 @@ static int openDatabase(
}
#endif
-#ifdef SQLITE_ENABLE_FTS3 /* automatically defined by SQLITE_ENABLE_FTS4 */
+#ifdef SQLITE_ENABLE_FTS3
if( !db->mallocFailed && rc==SQLITE_OK ){
rc = sqlite3Fts3Init(db);
}
#endif
-#ifdef SQLITE_ENABLE_FTS5
- if( !db->mallocFailed && rc==SQLITE_OK ){
- rc = sqlite3Fts5Init(db);
- }
-#endif
-
#ifdef SQLITE_ENABLE_ICU
if( !db->mallocFailed && rc==SQLITE_OK ){
rc = sqlite3IcuInit(db);
@@ -133708,17 +121624,7 @@ static int openDatabase(
}
#endif
-#ifdef SQLITE_ENABLE_DBSTAT_VTAB
- if( !db->mallocFailed && rc==SQLITE_OK){
- rc = sqlite3DbstatRegister(db);
- }
-#endif
-
-#ifdef SQLITE_ENABLE_JSON1
- if( !db->mallocFailed && rc==SQLITE_OK){
- rc = sqlite3Json1Init(db);
- }
-#endif
+ sqlite3Error(db, rc, 0);
/* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking
** mode. -DSQLITE_DEFAULT_LOCKING_MODE=0 make NORMAL the default locking
@@ -133730,8 +121636,6 @@ static int openDatabase(
SQLITE_DEFAULT_LOCKING_MODE);
#endif
- if( rc ) sqlite3Error(db, rc);
-
/* Enable the lookaside-malloc subsystem */
setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside,
sqlite3GlobalConfig.nLookaside);
@@ -133741,8 +121645,7 @@ static int openDatabase(
opendb_out:
sqlite3_free(zOpen);
if( db ){
- assert( db->mutex!=0 || isThreadsafe==0
- || sqlite3GlobalConfig.bFullMutex==0 );
+ assert( db->mutex!=0 || isThreadsafe==0 || sqlite3GlobalConfig.bFullMutex==0 );
sqlite3_mutex_leave(db->mutex);
}
rc = sqlite3_errcode(db);
@@ -133761,20 +121664,20 @@ opendb_out:
sqlite3GlobalConfig.xSqllog(pArg, db, zFilename, 0);
}
#endif
- return rc & 0xff;
+ return sqlite3ApiExit(0, rc);
}
/*
** Open a new database handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
+SQLITE_API int sqlite3_open(
const char *zFilename,
sqlite3 **ppDb
){
return openDatabase(zFilename, ppDb,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
+SQLITE_API int sqlite3_open_v2(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb, /* OUT: SQLite db handle */
int flags, /* Flags */
@@ -133787,7 +121690,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
/*
** Open a new database handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
+SQLITE_API int sqlite3_open16(
const void *zFilename,
sqlite3 **ppDb
){
@@ -133795,15 +121698,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open16(
sqlite3_value *pVal;
int rc;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( ppDb==0 ) return SQLITE_MISUSE_BKPT;
-#endif
+ assert( zFilename );
+ assert( ppDb );
*ppDb = 0;
#ifndef SQLITE_OMIT_AUTOINIT
rc = sqlite3_initialize();
if( rc ) return rc;
#endif
- if( zFilename==0 ) zFilename = "\000\000";
pVal = sqlite3ValueNew(0);
sqlite3ValueSetStr(pVal, -1, zFilename, SQLITE_UTF16NATIVE, SQLITE_STATIC);
zFilename8 = sqlite3ValueText(pVal, SQLITE_UTF8);
@@ -133812,34 +121713,40 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open16(
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0);
assert( *ppDb || rc==SQLITE_NOMEM );
if( rc==SQLITE_OK && !DbHasProperty(*ppDb, 0, DB_SchemaLoaded) ){
- SCHEMA_ENC(*ppDb) = ENC(*ppDb) = SQLITE_UTF16NATIVE;
+ ENC(*ppDb) = SQLITE_UTF16NATIVE;
}
}else{
rc = SQLITE_NOMEM;
}
sqlite3ValueFree(pVal);
- return rc & 0xff;
+ return sqlite3ApiExit(0, rc);
}
#endif /* SQLITE_OMIT_UTF16 */
/*
** Register a new collation sequence with the database handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
+SQLITE_API int sqlite3_create_collation(
sqlite3* db,
const char *zName,
int enc,
void* pCtx,
int(*xCompare)(void*,int,const void*,int,const void*)
){
- return sqlite3_create_collation_v2(db, zName, enc, pCtx, xCompare, 0);
+ int rc;
+ sqlite3_mutex_enter(db->mutex);
+ assert( !db->mallocFailed );
+ rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, 0);
+ rc = sqlite3ApiExit(db, rc);
+ sqlite3_mutex_leave(db->mutex);
+ return rc;
}
/*
** Register a new collation sequence with the database handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
+SQLITE_API int sqlite3_create_collation_v2(
sqlite3* db,
const char *zName,
int enc,
@@ -133848,10 +121755,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
void(*xDel)(void*)
){
int rc;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
assert( !db->mallocFailed );
rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, xDel);
@@ -133864,7 +121767,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
/*
** Register a new collation sequence with the database handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
+SQLITE_API int sqlite3_create_collation16(
sqlite3* db,
const void *zName,
int enc,
@@ -133873,10 +121776,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
){
int rc = SQLITE_OK;
char *zName8;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
assert( !db->mallocFailed );
zName8 = sqlite3Utf16to8(db, zName, -1, SQLITE_UTF16NATIVE);
@@ -133894,14 +121793,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
** Register a collation sequence factory callback with the database handle
** db. Replace any previously installed collation sequence factory.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
+SQLITE_API int sqlite3_collation_needed(
sqlite3 *db,
void *pCollNeededArg,
void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*)
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
db->xCollNeeded = xCollNeeded;
db->xCollNeeded16 = 0;
@@ -133915,14 +121811,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
** Register a collation sequence factory callback with the database handle
** db. Replace any previously installed collation sequence factory.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
+SQLITE_API int sqlite3_collation_needed16(
sqlite3 *db,
void *pCollNeededArg,
void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*)
){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
db->xCollNeeded = 0;
db->xCollNeeded16 = xCollNeeded16;
@@ -133937,7 +121830,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
** This function is now an anachronism. It used to be used to recover from a
** malloc() failure, but SQLite now does this automatically.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_global_recover(void){
+SQLITE_API int sqlite3_global_recover(void){
return SQLITE_OK;
}
#endif
@@ -133948,20 +121841,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_global_recover(void){
** by default. Autocommit is disabled by a BEGIN statement and reenabled
** by the next COMMIT or ROLLBACK.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3 *db){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
+SQLITE_API int sqlite3_get_autocommit(sqlite3 *db){
return db->autoCommit;
}
/*
-** The following routines are substitutes for constants SQLITE_CORRUPT,
+** The following routines are subtitutes for constants SQLITE_CORRUPT,
** SQLITE_MISUSE, SQLITE_CANTOPEN, SQLITE_IOERR and possibly other error
-** constants. They serve two purposes:
+** constants. They server two purposes:
**
** 1. Serve as a convenient place to set a breakpoint in a debugger
** to detect when version error conditions occurs.
@@ -134000,7 +121887,7 @@ SQLITE_PRIVATE int sqlite3CantopenError(int lineno){
** SQLite no longer uses thread-specific data so this routine is now a
** no-op. It is retained for historical compatibility.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_thread_cleanup(void){
+SQLITE_API void sqlite3_thread_cleanup(void){
}
#endif
@@ -134008,7 +121895,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_thread_cleanup(void){
** Return meta information about a specific column of a database table.
** See comment in sqlite3.h (sqlite.h.in) for details.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
+SQLITE_API int sqlite3_table_column_metadata(
sqlite3 *db, /* Connection handle */
const char *zDbName, /* Database name or NULL */
const char *zTableName, /* Table name */
@@ -134023,20 +121911,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
char *zErrMsg = 0;
Table *pTab = 0;
Column *pCol = 0;
- int iCol = 0;
+ int iCol;
+
char const *zDataType = 0;
char const *zCollSeq = 0;
int notnull = 0;
int primarykey = 0;
int autoinc = 0;
-
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zTableName==0 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
-
/* Ensure the database schema has been loaded */
sqlite3_mutex_enter(db->mutex);
sqlite3BtreeEnterAll(db);
@@ -134053,8 +121935,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
}
/* Find the column for which info is requested */
- if( zColumnName==0 ){
- /* Query for existance of table only */
+ if( sqlite3IsRowid(zColumnName) ){
+ iCol = pTab->iPKey;
+ if( iCol>=0 ){
+ pCol = &pTab->aCol[iCol];
+ }
}else{
for(iCol=0; iCol<pTab->nCol; iCol++){
pCol = &pTab->aCol[iCol];
@@ -134063,13 +121948,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
}
}
if( iCol==pTab->nCol ){
- if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){
- iCol = pTab->iPKey;
- pCol = iCol>=0 ? &pTab->aCol[iCol] : 0;
- }else{
- pTab = 0;
- goto error_out;
- }
+ pTab = 0;
+ goto error_out;
}
}
@@ -134116,17 +121996,18 @@ error_out:
zColumnName);
rc = SQLITE_ERROR;
}
- sqlite3ErrorWithMsg(db, rc, (zErrMsg?"%s":0), zErrMsg);
+ sqlite3Error(db, rc, (zErrMsg?"%s":0), zErrMsg);
sqlite3DbFree(db, zErrMsg);
rc = sqlite3ApiExit(db, rc);
sqlite3_mutex_leave(db->mutex);
return rc;
}
+#endif
/*
** Sleep for a little while. Return the amount of time slept.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int ms){
+SQLITE_API int sqlite3_sleep(int ms){
sqlite3_vfs *pVfs;
int rc;
pVfs = sqlite3_vfs_find(0);
@@ -134142,10 +122023,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int ms){
/*
** Enable or disable the extended result codes.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3 *db, int onoff){
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
+SQLITE_API int sqlite3_extended_result_codes(sqlite3 *db, int onoff){
sqlite3_mutex_enter(db->mutex);
db->errMask = onoff ? 0xffffffff : 0xff;
sqlite3_mutex_leave(db->mutex);
@@ -134155,13 +122033,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3 *db, int ono
/*
** Invoke the xFileControl method on a particular database.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){
+SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){
int rc = SQLITE_ERROR;
Btree *pBtree;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
-#endif
sqlite3_mutex_enter(db->mutex);
pBtree = sqlite3DbNameToBtree(db, zDbName);
if( pBtree ){
@@ -134183,17 +122058,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3 *db, const char *zDbN
sqlite3BtreeLeave(pBtree);
}
sqlite3_mutex_leave(db->mutex);
- return rc;
+ return rc;
}
/*
** Interface to the testing logic.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
+SQLITE_API int sqlite3_test_control(int op, ...){
int rc = 0;
-#ifdef SQLITE_OMIT_BUILTIN_TEST
- UNUSED_PARAMETER(op);
-#else
+#ifndef SQLITE_OMIT_BUILTIN_TEST
va_list ap;
va_start(ap, op);
switch( op ){
@@ -134222,7 +122095,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
** to the xRandomness method of the default VFS.
*/
case SQLITE_TESTCTRL_PRNG_RESET: {
- sqlite3_randomness(0,0);
+ sqlite3PrngResetState();
break;
}
@@ -134242,28 +122115,6 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
}
/*
- ** sqlite3_test_control(FAULT_INSTALL, xCallback)
- **
- ** Arrange to invoke xCallback() whenever sqlite3FaultSim() is called,
- ** if xCallback is not NULL.
- **
- ** As a test of the fault simulator mechanism itself, sqlite3FaultSim(0)
- ** is called immediately after installing the new callback and the return
- ** value from sqlite3FaultSim(0) becomes the return from
- ** sqlite3_test_control().
- */
- case SQLITE_TESTCTRL_FAULT_INSTALL: {
- /* MSVC is picky about pulling func ptrs from va lists.
- ** http://support.microsoft.com/kb/47961
- ** sqlite3GlobalConfig.xTestCallback = va_arg(ap, int(*)(int));
- */
- typedef int(*TESTCALLBACKFUNC_t)(int);
- sqlite3GlobalConfig.xTestCallback = va_arg(ap, TESTCALLBACKFUNC_t);
- rc = sqlite3FaultSim(0);
- break;
- }
-
- /*
** sqlite3_test_control(BENIGN_MALLOC_HOOKS, xBegin, xEnd)
**
** Register hooks to call to indicate which malloc() failures
@@ -134289,7 +122140,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
** IMPORTANT: Changing the PENDING byte from 0x40000000 results in
** an incompatible database file format. Changing the PENDING byte
** while any database connection is open results in undefined and
- ** deleterious behavior.
+ ** dileterious behavior.
*/
case SQLITE_TESTCTRL_PENDING_BYTE: {
rc = PENDING_BYTE;
@@ -134354,22 +122205,6 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
break;
}
- /*
- ** sqlite3_test_control(SQLITE_TESTCTRL_BYTEORDER);
- **
- ** The integer returned reveals the byte-order of the computer on which
- ** SQLite is running:
- **
- ** 1 big-endian, determined at run-time
- ** 10 little-endian, determined at run-time
- ** 432101 big-endian, determined at compile-time
- ** 123410 little-endian, determined at compile-time
- */
- case SQLITE_TESTCTRL_BYTEORDER: {
- rc = SQLITE_BYTEORDER*100 + SQLITE_LITTLEENDIAN*10 + SQLITE_BIGENDIAN;
- break;
- }
-
/* sqlite3_test_control(SQLITE_TESTCTRL_RESERVE, sqlite3 *db, int N)
**
** Set the nReserve size to N for the main database on the database
@@ -134444,6 +122279,22 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
break;
}
+#if defined(SQLITE_ENABLE_TREE_EXPLAIN)
+ /* sqlite3_test_control(SQLITE_TESTCTRL_EXPLAIN_STMT,
+ ** sqlite3_stmt*,const char**);
+ **
+ ** If compiled with SQLITE_ENABLE_TREE_EXPLAIN, each sqlite3_stmt holds
+ ** a string that describes the optimized parse tree. This test-control
+ ** returns a pointer to that string.
+ */
+ case SQLITE_TESTCTRL_EXPLAIN_STMT: {
+ sqlite3_stmt *pStmt = va_arg(ap, sqlite3_stmt*);
+ const char **pzRet = va_arg(ap, const char**);
+ *pzRet = sqlite3VdbeExplanation((Vdbe*)pStmt);
+ break;
+ }
+#endif
+
/* sqlite3_test_control(SQLITE_TESTCTRL_NEVER_CORRUPT, int);
**
** Set or clear a flag that indicates that the database file is always well-
@@ -134453,70 +122304,10 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
** that demonstrat invariants on well-formed database files.
*/
case SQLITE_TESTCTRL_NEVER_CORRUPT: {
- sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int);
+ sqlite3Config.neverCorrupt = va_arg(ap, int);
break;
}
-
- /* sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE, xCallback, ptr);
- **
- ** Set the VDBE coverage callback function to xCallback with context
- ** pointer ptr.
- */
- case SQLITE_TESTCTRL_VDBE_COVERAGE: {
-#ifdef SQLITE_VDBE_COVERAGE
- typedef void (*branch_callback)(void*,int,u8,u8);
- sqlite3GlobalConfig.xVdbeBranch = va_arg(ap,branch_callback);
- sqlite3GlobalConfig.pVdbeBranchArg = va_arg(ap,void*);
-#endif
- break;
- }
-
- /* sqlite3_test_control(SQLITE_TESTCTRL_SORTER_MMAP, db, nMax); */
- case SQLITE_TESTCTRL_SORTER_MMAP: {
- sqlite3 *db = va_arg(ap, sqlite3*);
- db->nMaxSorterMmap = va_arg(ap, int);
- break;
- }
-
- /* sqlite3_test_control(SQLITE_TESTCTRL_ISINIT);
- **
- ** Return SQLITE_OK if SQLite has been initialized and SQLITE_ERROR if
- ** not.
- */
- case SQLITE_TESTCTRL_ISINIT: {
- if( sqlite3GlobalConfig.isInit==0 ) rc = SQLITE_ERROR;
- break;
- }
-
- /* sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, db, dbName, onOff, tnum);
- **
- ** This test control is used to create imposter tables. "db" is a pointer
- ** to the database connection. dbName is the database name (ex: "main" or
- ** "temp") which will receive the imposter. "onOff" turns imposter mode on
- ** or off. "tnum" is the root page of the b-tree to which the imposter
- ** table should connect.
- **
- ** Enable imposter mode only when the schema has already been parsed. Then
- ** run a single CREATE TABLE statement to construct the imposter table in
- ** the parsed schema. Then turn imposter mode back off again.
- **
- ** If onOff==0 and tnum>0 then reset the schema for all databases, causing
- ** the schema to be reparsed the next time it is needed. This has the
- ** effect of erasing all imposter tables.
- */
- case SQLITE_TESTCTRL_IMPOSTER: {
- sqlite3 *db = va_arg(ap, sqlite3*);
- sqlite3_mutex_enter(db->mutex);
- db->init.iDb = sqlite3FindDbName(db, va_arg(ap,const char*));
- db->init.busy = db->init.imposterTable = va_arg(ap,int);
- db->init.newTnum = va_arg(ap,int);
- if( db->init.busy==0 && db->init.newTnum>0 ){
- sqlite3ResetAllSchemasOfConnection(db);
- }
- sqlite3_mutex_leave(db->mutex);
- break;
- }
}
va_end(ap);
#endif /* SQLITE_OMIT_BUILTIN_TEST */
@@ -134534,8 +122325,8 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
** parameter if it exists. If the parameter does not exist, this routine
** returns a NULL pointer.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam){
- if( zFilename==0 || zParam==0 ) return 0;
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam){
+ if( zFilename==0 ) return 0;
zFilename += sqlite3Strlen30(zFilename) + 1;
while( zFilename[0] ){
int x = strcmp(zFilename, zParam);
@@ -134549,7 +122340,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilenam
/*
** Return a boolean value for a query parameter.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){
+SQLITE_API int sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){
const char *z = sqlite3_uri_parameter(zFilename, zParam);
bDflt = bDflt!=0;
return z ? sqlite3GetBoolean(z, bDflt) : bDflt;
@@ -134558,14 +122349,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFilename, const c
/*
** Return a 64-bit integer value for a query parameter.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(
const char *zFilename, /* Filename as passed to xOpen */
const char *zParam, /* URI parameter sought */
sqlite3_int64 bDflt /* return if parameter is missing */
){
const char *z = sqlite3_uri_parameter(zFilename, zParam);
sqlite3_int64 v;
- if( z && sqlite3DecOrHexToI64(z, &v)==SQLITE_OK ){
+ if( z && sqlite3Atoi64(z, &v, sqlite3Strlen30(z), SQLITE_UTF8)==SQLITE_OK ){
bDflt = v;
}
return bDflt;
@@ -134590,15 +122381,8 @@ SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){
** Return the filename of the database associated with a database
** connection.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName){
- Btree *pBt;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- pBt = sqlite3DbNameToBtree(db, zDbName);
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){
+ Btree *pBt = sqlite3DbNameToBtree(db, zDbName);
return pBt ? sqlite3BtreeGetFilename(pBt) : 0;
}
@@ -134606,16 +122390,9 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const cha
** Return 1 if database is read-only or 0 if read/write. Return -1 if
** no such database exists.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName){
- Btree *pBt;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
- (void)SQLITE_MISUSE_BKPT;
- return -1;
- }
-#endif
- pBt = sqlite3DbNameToBtree(db, zDbName);
- return pBt ? sqlite3BtreeIsReadonly(pBt) : -1;
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName){
+ Btree *pBt = sqlite3DbNameToBtree(db, zDbName);
+ return pBt ? sqlite3PagerIsreadonly(sqlite3BtreePager(pBt)) : -1;
}
/************** End of main.c ************************************************/
@@ -134635,8 +122412,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbNa
** This file contains the implementation of the sqlite3_unlock_notify()
** API method and its associated functionality.
*/
-/* #include "sqliteInt.h" */
-/* #include "btreeInt.h" */
/* Omit this entire file if SQLITE_ENABLE_UNLOCK_NOTIFY is not defined. */
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
@@ -134767,7 +122542,7 @@ static void leaveMutex(void){
** on the same "db". If xNotify==0 then any prior callbacks are immediately
** cancelled.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
+SQLITE_API int sqlite3_unlock_notify(
sqlite3 *db,
void (*xNotify)(void **, int),
void *pArg
@@ -134806,7 +122581,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
leaveMutex();
assert( !db->mallocFailed );
- sqlite3ErrorWithMsg(db, rc, (rc?"database is deadlocked":0));
+ sqlite3Error(db, rc, (rc?"database is deadlocked":0));
sqlite3_mutex_leave(db->mutex);
return rc;
}
@@ -135280,11 +123055,9 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){
/* If not building as part of the core, include sqlite3ext.h. */
#ifndef SQLITE_CORE
-/* # include "sqlite3ext.h" */
SQLITE_EXTENSION_INIT3
#endif
-/* #include "sqlite3.h" */
/************** Include fts3_tokenizer.h in the middle of fts3Int.h **********/
/************** Begin file fts3_tokenizer.h **********************************/
/*
@@ -135313,7 +123086,6 @@ SQLITE_EXTENSION_INIT3
** If tokenizers are to be allowed to call sqlite3_*() functions, then
** we will need a way to register the API consistently.
*/
-/* #include "sqlite3.h" */
/*
** Structures used by the tokenizer interface. When a new tokenizer
@@ -135664,11 +123436,6 @@ SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(const Fts3Hash *, const voi
#ifdef SQLITE_COVERAGE_TEST
# define ALWAYS(x) (1)
# define NEVER(X) (0)
-#elif defined(SQLITE_DEBUG)
-# define ALWAYS(x) sqlite3Fts3Always((x)!=0)
-# define NEVER(x) sqlite3Fts3Never((x)!=0)
-SQLITE_PRIVATE int sqlite3Fts3Always(int b);
-SQLITE_PRIVATE int sqlite3Fts3Never(int b);
#else
# define ALWAYS(x) (x)
# define NEVER(x) (x)
@@ -135727,8 +123494,6 @@ typedef struct Fts3DeferredToken Fts3DeferredToken;
typedef struct Fts3SegReader Fts3SegReader;
typedef struct Fts3MultiSegReader Fts3MultiSegReader;
-typedef struct MatchinfoBuffer MatchinfoBuffer;
-
/*
** A connection to a fulltext index is an instance of the following
** structure. The xCreate and xConnect methods create an instance
@@ -135747,20 +123512,20 @@ struct Fts3Table {
sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */
char *zContentTbl; /* content=xxx option, or NULL */
char *zLanguageid; /* languageid=xxx option, or NULL */
- int nAutoincrmerge; /* Value configured by 'automerge' */
+ u8 bAutoincrmerge; /* True if automerge=1 */
u32 nLeafAdd; /* Number of leaf blocks added this trans */
/* Precompiled statements used by the implementation. Each of these
** statements is run and reset within a single virtual table API call.
*/
- sqlite3_stmt *aStmt[40];
+ sqlite3_stmt *aStmt[37];
char *zReadExprlist;
char *zWriteExprlist;
int nNodeSize; /* Soft limit for node size */
u8 bFts4; /* True for FTS4, false for FTS3 */
- u8 bHasStat; /* True if %_stat table exists (2==unknown) */
+ u8 bHasStat; /* True if %_stat table exists */
u8 bHasDocsize; /* True if %_docsize table exists */
u8 bDescIdx; /* True if doclists are in reverse order */
u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */
@@ -135794,7 +123559,6 @@ struct Fts3Table {
int nPendingData; /* Current bytes of pending data */
sqlite_int64 iPrevDocid; /* Docid of most recently inserted document */
int iPrevLangid; /* Langid of recently inserted document */
- int bPrevDelete; /* True if last operation was a delete */
#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
/* State variables used for validating that the transaction control
@@ -135839,7 +123603,9 @@ struct Fts3Cursor {
i64 iMinDocid; /* Minimum docid to return */
i64 iMaxDocid; /* Maximum docid to return */
int isMatchinfoNeeded; /* True when aMatchinfo[] needs filling in */
- MatchinfoBuffer *pMIBuffer; /* Buffer for matchinfo data */
+ u32 *aMatchinfo; /* Information about most recent match */
+ int nMatchinfo; /* Number of elements in aMatchinfo[] */
+ char *zMatchinfo; /* Matchinfo specification */
};
#define FTS3_EVAL_FILTER 0
@@ -135911,11 +123677,6 @@ struct Fts3Phrase {
int bIncr; /* True if doclist is loaded incrementally */
int iDoclistToken;
- /* Used by sqlite3Fts3EvalPhrasePoslist() if this is a descendent of an
- ** OR condition. */
- char *pOrPoslist;
- i64 iOrDocid;
-
/* Variables below this point are populated by fts3_expr.c when parsing
** a MATCH expression. Everything above is part of the evaluation phase.
*/
@@ -135959,9 +123720,7 @@ struct Fts3Expr {
u8 bStart; /* True if iDocid is valid */
u8 bDeferred; /* True if this expression is entirely deferred */
- /* The following are used by the fts3_snippet.c module. */
- int iPhrase; /* Index of this phrase in matchinfo() results */
- u32 *aMI; /* See above */
+ u32 *aMI;
};
/*
@@ -136072,7 +123831,6 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table*,int,int);
)
/* fts3.c */
-SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char**,const char*,...);
SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *, sqlite3_int64);
SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *, sqlite_int64 *);
SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *, int *);
@@ -136082,7 +123840,6 @@ SQLITE_PRIVATE void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,i
SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *);
SQLITE_PRIVATE int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *);
SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int*, Fts3Table*);
-SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc);
/* fts3_tokenizer.c */
SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *, int *);
@@ -136098,7 +123855,6 @@ SQLITE_PRIVATE void sqlite3Fts3Snippet(sqlite3_context *, Fts3Cursor *, const ch
const char *, const char *, int, int
);
SQLITE_PRIVATE void sqlite3Fts3Matchinfo(sqlite3_context *, Fts3Cursor *, const char *);
-SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p);
/* fts3_expr.c */
SQLITE_PRIVATE int sqlite3Fts3ExprParse(sqlite3_tokenizer *, int,
@@ -136131,7 +123887,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr);
SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *);
/* fts3_unicode2.c (functions generated by parsing unicode text files) */
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#ifdef SQLITE_ENABLE_FTS4_UNICODE61
SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int, int);
SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int);
SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int);
@@ -136155,9 +123911,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int);
/* #include <string.h> */
/* #include <stdarg.h> */
-/* #include "fts3.h" */
#ifndef SQLITE_CORE
-/* # include "sqlite3ext.h" */
SQLITE_EXTENSION_INIT1
#endif
@@ -136166,13 +123920,6 @@ static int fts3EvalStart(Fts3Cursor *pCsr);
static int fts3TermSegReaderCursor(
Fts3Cursor *, const char *, int, int, Fts3MultiSegReader **);
-#ifndef SQLITE_AMALGAMATION
-# if defined(SQLITE_DEBUG)
-SQLITE_PRIVATE int sqlite3Fts3Always(int b) { assert( b ); return b; }
-SQLITE_PRIVATE int sqlite3Fts3Never(int b) { assert( !b ); return b; }
-# endif
-#endif
-
/*
** Write a 64-bit variable-length integer to memory starting at p[0].
** The length of data written will be between 1 and FTS3_VARINT_MAX bytes.
@@ -136282,7 +124029,7 @@ SQLITE_PRIVATE void sqlite3Fts3Dequote(char *z){
/* If the first byte was a '[', then the close-quote character is a ']' */
if( quote=='[' ) quote = ']';
- while( z[iIn] ){
+ while( ALWAYS(z[iIn]) ){
if( z[iIn]==quote ){
if( z[iIn+1]!=quote ) break;
z[iOut++] = quote;
@@ -136362,17 +124109,6 @@ static int fts3DisconnectMethod(sqlite3_vtab *pVtab){
}
/*
-** Write an error message into *pzErr
-*/
-SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char **pzErr, const char *zFormat, ...){
- va_list ap;
- sqlite3_free(*pzErr);
- va_start(ap, zFormat);
- *pzErr = sqlite3_vmprintf(zFormat, ap);
- va_end(ap);
-}
-
-/*
** Construct one or more SQL statements from the format string given
** and then evaluate those statements. The success code is written
** into *pRc.
@@ -136781,16 +124517,11 @@ static char *fts3WriteExprList(Fts3Table *p, const char *zFunc, int *pRc){
** This function is used when parsing the "prefix=" FTS4 parameter.
*/
static int fts3GobbleInt(const char **pp, int *pnOut){
- const int MAX_NPREFIX = 10000000;
const char *p; /* Iterator pointer */
int nInt = 0; /* Output value */
for(p=*pp; p[0]>='0' && p[0]<='9'; p++){
nInt = nInt * 10 + (p[0] - '0');
- if( nInt>MAX_NPREFIX ){
- nInt = 0;
- break;
- }
}
if( p==*pp ) return SQLITE_ERROR;
*pnOut = nInt;
@@ -136833,6 +124564,7 @@ static int fts3PrefixParameter(
aIndex = sqlite3_malloc(sizeof(struct Fts3Index) * nIndex);
*apIndex = aIndex;
+ *pnIndex = nIndex;
if( !aIndex ){
return SQLITE_NOMEM;
}
@@ -136842,20 +124574,13 @@ static int fts3PrefixParameter(
const char *p = zParam;
int i;
for(i=1; i<nIndex; i++){
- int nPrefix = 0;
+ int nPrefix;
if( fts3GobbleInt(&p, &nPrefix) ) return SQLITE_ERROR;
- assert( nPrefix>=0 );
- if( nPrefix==0 ){
- nIndex--;
- i--;
- }else{
- aIndex[i].nPrefix = nPrefix;
- }
+ aIndex[i].nPrefix = nPrefix;
p++;
}
}
- *pnIndex = nIndex;
return SQLITE_OK;
}
@@ -136890,8 +124615,7 @@ static int fts3ContentColumns(
const char *zTbl, /* Name of content table */
const char ***pazCol, /* OUT: Malloc'd array of column names */
int *pnCol, /* OUT: Size of array *pazCol */
- int *pnStr, /* OUT: Bytes of string content */
- char **pzErr /* OUT: error message */
+ int *pnStr /* OUT: Bytes of string content */
){
int rc = SQLITE_OK; /* Return code */
char *zSql; /* "SELECT *" statement on zTbl */
@@ -136902,9 +124626,6 @@ static int fts3ContentColumns(
rc = SQLITE_NOMEM;
}else{
rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
- if( rc!=SQLITE_OK ){
- sqlite3Fts3ErrMsg(pzErr, "%s", sqlite3_errmsg(db));
- }
}
sqlite3_free(zSql);
@@ -136983,7 +124704,7 @@ static int fts3InitVtab(
const char **aCol; /* Array of column names */
sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */
- int nIndex = 0; /* Size of aIndex[] array */
+ int nIndex; /* Size of aIndex[] array */
struct Fts3Index *aIndex = 0; /* Array of indexes for this table */
/* The results of parsing supported FTS4 key=value options: */
@@ -137071,13 +124792,13 @@ static int fts3InitVtab(
}
}
if( iOpt==SizeofArray(aFts4Opt) ){
- sqlite3Fts3ErrMsg(pzErr, "unrecognized parameter: %s", z);
+ *pzErr = sqlite3_mprintf("unrecognized parameter: %s", z);
rc = SQLITE_ERROR;
}else{
switch( iOpt ){
case 0: /* MATCHINFO */
if( strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "fts3", 4) ){
- sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo: %s", zVal);
+ *pzErr = sqlite3_mprintf("unrecognized matchinfo: %s", zVal);
rc = SQLITE_ERROR;
}
bNoDocsize = 1;
@@ -137105,7 +124826,7 @@ static int fts3InitVtab(
if( (strlen(zVal)!=3 || sqlite3_strnicmp(zVal, "asc", 3))
&& (strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "desc", 4))
){
- sqlite3Fts3ErrMsg(pzErr, "unrecognized order: %s", zVal);
+ *pzErr = sqlite3_mprintf("unrecognized order: %s", zVal);
rc = SQLITE_ERROR;
}
bDescIdx = (zVal[0]=='d' || zVal[0]=='D');
@@ -137156,7 +124877,7 @@ static int fts3InitVtab(
if( nCol==0 ){
sqlite3_free((void*)aCol);
aCol = 0;
- rc = fts3ContentColumns(db, argv[1], zContent,&aCol,&nCol,&nString,pzErr);
+ rc = fts3ContentColumns(db, argv[1], zContent, &aCol, &nCol, &nString);
/* If a languageid= option was specified, remove the language id
** column from the aCol[] array. */
@@ -137191,7 +124912,7 @@ static int fts3InitVtab(
rc = fts3PrefixParameter(zPrefix, &nIndex, &aIndex);
if( rc==SQLITE_ERROR ){
assert( zPrefix );
- sqlite3Fts3ErrMsg(pzErr, "error parsing prefix parameter: %s", zPrefix);
+ *pzErr = sqlite3_mprintf("error parsing prefix parameter: %s", zPrefix);
}
if( rc!=SQLITE_OK ) goto fts3_init_out;
@@ -137219,7 +124940,7 @@ static int fts3InitVtab(
p->bHasStat = isFts4;
p->bFts4 = isFts4;
p->bDescIdx = bDescIdx;
- p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */
+ p->bAutoincrmerge = 0xff; /* 0xff means setting unknown */
p->zContentTbl = zContent;
p->zLanguageid = zLanguageid;
zContent = 0;
@@ -137262,9 +124983,7 @@ static int fts3InitVtab(
int n = (int)strlen(p->azColumn[iCol]);
for(i=0; i<nNotindexed; i++){
char *zNot = azNotindexed[i];
- if( zNot && n==(int)strlen(zNot)
- && 0==sqlite3_strnicmp(p->azColumn[iCol], zNot, n)
- ){
+ if( zNot && 0==sqlite3_strnicmp(p->azColumn[iCol], zNot, n) ){
p->abNotindexed[iCol] = 1;
sqlite3_free(zNot);
azNotindexed[i] = 0;
@@ -137273,7 +124992,7 @@ static int fts3InitVtab(
}
for(i=0; i<nNotindexed; i++){
if( azNotindexed[i] ){
- sqlite3Fts3ErrMsg(pzErr, "no such column: %s", azNotindexed[i]);
+ *pzErr = sqlite3_mprintf("no such column: %s", azNotindexed[i]);
rc = SQLITE_ERROR;
}
}
@@ -137281,7 +125000,7 @@ static int fts3InitVtab(
if( rc==SQLITE_OK && (zCompress==0)!=(zUncompress==0) ){
char const *zMiss = (zCompress==0 ? "compress" : "uncompress");
rc = SQLITE_ERROR;
- sqlite3Fts3ErrMsg(pzErr, "missing %s parameter in fts4 constructor", zMiss);
+ *pzErr = sqlite3_mprintf("missing %s parameter in fts4 constructor", zMiss);
}
p->zReadExprlist = fts3ReadExprList(p, zUncompress, &rc);
p->zWriteExprlist = fts3WriteExprList(p, zCompress, &rc);
@@ -137298,7 +125017,10 @@ static int fts3InitVtab(
** addition of a %_stat table so that it can use incremental merge.
*/
if( !isFts4 && !isCreate ){
- p->bHasStat = 2;
+ int rc2 = SQLITE_OK;
+ fts3DbExec(&rc2, db, "SELECT 1 FROM %Q.'%q_stat' WHERE id=2",
+ p->zDb, p->zName);
+ if( rc2==SQLITE_OK ) p->bHasStat = 1;
}
/* Figure out the page-size for the database. This is required in order to
@@ -137357,32 +125079,6 @@ static int fts3CreateMethod(
return fts3InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr);
}
-/*
-** Set the pIdxInfo->estimatedRows variable to nRow. Unless this
-** extension is currently being used by a version of SQLite too old to
-** support estimatedRows. In that case this function is a no-op.
-*/
-static void fts3SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){
-#if SQLITE_VERSION_NUMBER>=3008002
- if( sqlite3_libversion_number()>=3008002 ){
- pIdxInfo->estimatedRows = nRow;
- }
-#endif
-}
-
-/*
-** Set the SQLITE_INDEX_SCAN_UNIQUE flag in pIdxInfo->flags. Unless this
-** extension is currently being used by a version of SQLite too old to
-** support index-info flags. In that case this function is a no-op.
-*/
-static void fts3SetUniqueFlag(sqlite3_index_info *pIdxInfo){
-#if SQLITE_VERSION_NUMBER>=3008012
- if( sqlite3_libversion_number()>=3008012 ){
- pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
- }
-#endif
-}
-
/*
** Implementation of the xBestIndex method for FTS3 tables. There
** are three possible strategies, in order of preference:
@@ -137410,20 +125106,7 @@ static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
for(i=0; i<pInfo->nConstraint; i++){
int bDocid; /* True if this constraint is on docid */
struct sqlite3_index_constraint *pCons = &pInfo->aConstraint[i];
- if( pCons->usable==0 ){
- if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){
- /* There exists an unusable MATCH constraint. This means that if
- ** the planner does elect to use the results of this call as part
- ** of the overall query plan the user will see an "unable to use
- ** function MATCH in the requested context" error. To discourage
- ** this, return a very high cost here. */
- pInfo->idxNum = FTS3_FULLSCAN_SEARCH;
- pInfo->estimatedCost = 1e50;
- fts3SetEstimatedRows(pInfo, ((sqlite3_int64)1) << 50);
- return SQLITE_OK;
- }
- continue;
- }
+ if( pCons->usable==0 ) continue;
bDocid = (pCons->iColumn<0 || pCons->iColumn==p->nColumn+1);
@@ -137473,9 +125156,6 @@ static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
}
}
- /* If using a docid=? or rowid=? strategy, set the UNIQUE flag. */
- if( pInfo->idxNum==FTS3_DOCID_SEARCH ) fts3SetUniqueFlag(pInfo);
-
iIdx = 1;
if( iCons>=0 ){
pInfo->aConstraintUsage[iCons].argvIndex = iIdx++;
@@ -137544,7 +125224,7 @@ static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){
sqlite3Fts3ExprFree(pCsr->pExpr);
sqlite3Fts3FreeDeferredTokens(pCsr);
sqlite3_free(pCsr->aDoclist);
- sqlite3Fts3MIBufferFree(pCsr->pMIBuffer);
+ sqlite3_free(pCsr->aMatchinfo);
assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 );
sqlite3_free(pCsr);
return SQLITE_OK;
@@ -137755,7 +125435,7 @@ static int fts3SelectLeaf(
sqlite3_int64 *piLeaf, /* Selected leaf node */
sqlite3_int64 *piLeaf2 /* Selected leaf node */
){
- int rc = SQLITE_OK; /* Return code */
+ int rc; /* Return code */
int iHeight; /* Height of this node in tree */
assert( piLeaf || piLeaf2 );
@@ -137766,7 +125446,7 @@ static int fts3SelectLeaf(
if( rc==SQLITE_OK && iHeight>1 ){
char *zBlob = 0; /* Blob read from %_segments table */
- int nBlob = 0; /* Size of zBlob in bytes */
+ int nBlob; /* Size of zBlob in bytes */
if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){
rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob, 0);
@@ -138393,33 +126073,26 @@ static int fts3DoclistOrMerge(
**
** The right-hand input doclist is overwritten by this function.
*/
-static int fts3DoclistPhraseMerge(
+static void fts3DoclistPhraseMerge(
int bDescDoclist, /* True if arguments are desc */
int nDist, /* Distance from left to right (1=adjacent) */
char *aLeft, int nLeft, /* Left doclist */
- char **paRight, int *pnRight /* IN/OUT: Right/output doclist */
+ char *aRight, int *pnRight /* IN/OUT: Right/output doclist */
){
sqlite3_int64 i1 = 0;
sqlite3_int64 i2 = 0;
sqlite3_int64 iPrev = 0;
- char *aRight = *paRight;
char *pEnd1 = &aLeft[nLeft];
char *pEnd2 = &aRight[*pnRight];
char *p1 = aLeft;
char *p2 = aRight;
char *p;
int bFirstOut = 0;
- char *aOut;
+ char *aOut = aRight;
assert( nDist>0 );
- if( bDescDoclist ){
- aOut = sqlite3_malloc(*pnRight + FTS3_VARINT_MAX);
- if( aOut==0 ) return SQLITE_NOMEM;
- }else{
- aOut = aRight;
- }
- p = aOut;
+ p = aOut;
fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1);
fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2);
@@ -138448,12 +126121,6 @@ static int fts3DoclistPhraseMerge(
}
*pnRight = (int)(p - aOut);
- if( bDescDoclist ){
- sqlite3_free(aRight);
- *paRight = aOut;
- }
-
- return SQLITE_OK;
}
/*
@@ -138578,22 +126245,8 @@ static int fts3TermSelectMerge(
){
if( pTS->aaOutput[0]==0 ){
/* If this is the first term selected, copy the doclist to the output
- ** buffer using memcpy().
- **
- ** Add FTS3_VARINT_MAX bytes of unused space to the end of the
- ** allocation. This is so as to ensure that the buffer is big enough
- ** to hold the current doclist AND'd with any other doclist. If the
- ** doclists are stored in order=ASC order, this padding would not be
- ** required (since the size of [doclistA AND doclistB] is always less
- ** than or equal to the size of [doclistA] in that case). But this is
- ** not true for order=DESC. For example, a doclist containing (1, -1)
- ** may be smaller than (-1), as in the first example the -1 may be stored
- ** as a single-byte delta, whereas in the second it must be stored as a
- ** FTS3_VARINT_MAX byte varint.
- **
- ** Similar padding is added in the fts3DoclistOrMerge() function.
- */
- pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1);
+ ** buffer using memcpy(). */
+ pTS->aaOutput[0] = sqlite3_malloc(nDoclist);
pTS->anOutput[0] = nDoclist;
if( pTS->aaOutput[0] ){
memcpy(pTS->aaOutput[0], aDoclist, nDoclist);
@@ -138690,7 +126343,7 @@ static int fts3SegReaderCursor(
** calls out here. */
if( iLevel<0 && p->aIndex ){
Fts3SegReader *pSeg = 0;
- rc = sqlite3Fts3SegReaderPending(p, iIndex, zTerm, nTerm, isPrefix||isScan, &pSeg);
+ rc = sqlite3Fts3SegReaderPending(p, iIndex, zTerm, nTerm, isPrefix, &pSeg);
if( rc==SQLITE_OK && pSeg ){
rc = fts3SegReaderCursorAppend(pCsr, pSeg);
}
@@ -139015,7 +126668,7 @@ static int fts3FilterMethod(
int nVal, /* Number of elements in apVal */
sqlite3_value **apVal /* Arguments for the indexing scheme */
){
- int rc = SQLITE_OK;
+ int rc;
char *zSql; /* SQL statement used to access %_content */
int eSearch;
Fts3Table *p = (Fts3Table *)pCursor->pVtab;
@@ -139045,7 +126698,6 @@ static int fts3FilterMethod(
/* In case the cursor has been used before, clear it now. */
sqlite3_finalize(pCsr->pStmt);
sqlite3_free(pCsr->aDoclist);
- sqlite3Fts3MIBufferFree(pCsr->pMIBuffer);
sqlite3Fts3ExprFree(pCsr->pExpr);
memset(&pCursor[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor));
@@ -139093,17 +126745,10 @@ static int fts3FilterMethod(
** row by docid.
*/
if( eSearch==FTS3_FULLSCAN_SEARCH ){
- if( pDocidGe || pDocidLe ){
- zSql = sqlite3_mprintf(
- "SELECT %s WHERE rowid BETWEEN %lld AND %lld ORDER BY rowid %s",
- p->zReadExprlist, pCsr->iMinDocid, pCsr->iMaxDocid,
- (pCsr->bDesc ? "DESC" : "ASC")
- );
- }else{
- zSql = sqlite3_mprintf("SELECT %s ORDER BY rowid %s",
- p->zReadExprlist, (pCsr->bDesc ? "DESC" : "ASC")
- );
- }
+ zSql = sqlite3_mprintf(
+ "SELECT %s ORDER BY rowid %s",
+ p->zReadExprlist, (pCsr->bDesc ? "DESC" : "ASC")
+ );
if( zSql ){
rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0);
sqlite3_free(zSql);
@@ -139241,10 +126886,7 @@ static int fts3SyncMethod(sqlite3_vtab *pVtab){
Fts3Table *p = (Fts3Table*)pVtab;
int rc = sqlite3Fts3PendingTermsFlush(p);
- if( rc==SQLITE_OK
- && p->nLeafAdd>(nMinMerge/16)
- && p->nAutoincrmerge && p->nAutoincrmerge!=0xff
- ){
+ if( rc==SQLITE_OK && p->bAutoincrmerge==1 && p->nLeafAdd>(nMinMerge/16) ){
int mxLevel = 0; /* Maximum relative level value in db */
int A; /* Incr-merge parameter A */
@@ -139252,41 +126894,14 @@ static int fts3SyncMethod(sqlite3_vtab *pVtab){
assert( rc==SQLITE_OK || mxLevel==0 );
A = p->nLeafAdd * mxLevel;
A += (A/2);
- if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, p->nAutoincrmerge);
+ if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, 8);
}
sqlite3Fts3SegmentsClose(p);
return rc;
}
/*
-** If it is currently unknown whether or not the FTS table has an %_stat
-** table (if p->bHasStat==2), attempt to determine this (set p->bHasStat
-** to 0 or 1). Return SQLITE_OK if successful, or an SQLite error code
-** if an error occurs.
-*/
-static int fts3SetHasStat(Fts3Table *p){
- int rc = SQLITE_OK;
- if( p->bHasStat==2 ){
- const char *zFmt ="SELECT 1 FROM %Q.sqlite_master WHERE tbl_name='%q_stat'";
- char *zSql = sqlite3_mprintf(zFmt, p->zDb, p->zName);
- if( zSql ){
- sqlite3_stmt *pStmt = 0;
- rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
- if( rc==SQLITE_OK ){
- int bHasStat = (sqlite3_step(pStmt)==SQLITE_ROW);
- rc = sqlite3_finalize(pStmt);
- if( rc==SQLITE_OK ) p->bHasStat = bHasStat;
- }
- sqlite3_free(zSql);
- }else{
- rc = SQLITE_NOMEM;
- }
- }
- return rc;
-}
-
-/*
-** Implementation of xBegin() method.
+** Implementation of xBegin() method. This is a no-op.
*/
static int fts3BeginMethod(sqlite3_vtab *pVtab){
Fts3Table *p = (Fts3Table*)pVtab;
@@ -139297,7 +126912,7 @@ static int fts3BeginMethod(sqlite3_vtab *pVtab){
TESTONLY( p->inTransaction = 1 );
TESTONLY( p->mxSavepoint = -1; );
p->nLeafAdd = 0;
- return fts3SetHasStat(p);
+ return SQLITE_OK;
}
/*
@@ -139339,31 +126954,11 @@ static void fts3ReversePoslist(char *pStart, char **ppPoslist){
char *p = &(*ppPoslist)[-2];
char c = 0;
- /* Skip backwards passed any trailing 0x00 bytes added by NearTrim() */
while( p>pStart && (c=*p--)==0 );
-
- /* Search backwards for a varint with value zero (the end of the previous
- ** poslist). This is an 0x00 byte preceded by some byte that does not
- ** have the 0x80 bit set. */
while( p>pStart && (*p & 0x80) | c ){
c = *p--;
}
- assert( p==pStart || c==0 );
-
- /* At this point p points to that preceding byte without the 0x80 bit
- ** set. So to find the start of the poslist, skip forward 2 bytes then
- ** over a varint.
- **
- ** Normally. The other case is that p==pStart and the poslist to return
- ** is the first in the doclist. In this case do not skip forward 2 bytes.
- ** The second part of the if condition (c==0 && *ppPoslist>&p[2])
- ** is required for cases where the first byte of a doclist and the
- ** doclist is empty. For example, if the first docid is 10, a doclist
- ** that begins with:
- **
- ** 0x0A 0x00 <next docid delta varint>
- */
- if( p>pStart || (c==0 && *ppPoslist>&p[2]) ){ p = &p[2]; }
+ if( p>pStart ){ p = &p[2]; }
while( *p++&0x80 );
*ppPoslist = p;
}
@@ -139434,8 +127029,6 @@ static void fts3SnippetFunc(
}
if( !zEllipsis || !zEnd || !zStart ){
sqlite3_result_error_nomem(pContext);
- }else if( nToken==0 ){
- sqlite3_result_text(pContext, "", -1, SQLITE_STATIC);
}else if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){
sqlite3Fts3Snippet(pContext, pCsr, zStart, zEnd, zEllipsis, iCol, nToken);
}
@@ -139568,10 +127161,6 @@ static int fts3RenameMethod(
sqlite3 *db = p->db; /* Database connection */
int rc; /* Return Code */
- /* At this point it must be known if the %_stat table exists or not.
- ** So bHasStat may not be 2. */
- rc = fts3SetHasStat(p);
-
/* As it happens, the pending terms table is always empty here. This is
** because an "ALTER TABLE RENAME TABLE" statement inside a transaction
** always opens a savepoint transaction. And the xSavepoint() method
@@ -139579,9 +127168,7 @@ static int fts3RenameMethod(
** PendingTermsFlush() in in case that changes.
*/
assert( p->nPendingData==0 );
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts3PendingTermsFlush(p);
- }
+ rc = sqlite3Fts3PendingTermsFlush(p);
if( p->zContentTbl==0 ){
fts3DbExec(&rc, db,
@@ -139709,7 +127296,7 @@ static void hashDestroy(void *p){
*/
SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule);
SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule);
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#ifdef SQLITE_ENABLE_FTS4_UNICODE61
SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const**ppModule);
#endif
#ifdef SQLITE_ENABLE_ICU
@@ -139727,7 +127314,7 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){
Fts3Hash *pHash = 0;
const sqlite3_tokenizer_module *pSimple = 0;
const sqlite3_tokenizer_module *pPorter = 0;
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#ifdef SQLITE_ENABLE_FTS4_UNICODE61
const sqlite3_tokenizer_module *pUnicode = 0;
#endif
@@ -139736,7 +127323,7 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){
sqlite3Fts3IcuTokenizerModule(&pIcu);
#endif
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#ifdef SQLITE_ENABLE_FTS4_UNICODE61
sqlite3Fts3UnicodeTokenizer(&pUnicode);
#endif
@@ -139764,7 +127351,7 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){
if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple)
|| sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter)
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#ifdef SQLITE_ENABLE_FTS4_UNICODE61
|| sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode)
#endif
#ifdef SQLITE_ENABLE_ICU
@@ -139871,17 +127458,14 @@ static void fts3EvalAllocateReaders(
** This function assumes that pList points to a buffer allocated using
** sqlite3_malloc(). This function takes responsibility for eventually
** freeing the buffer.
-**
-** SQLITE_OK is returned if successful, or SQLITE_NOMEM if an error occurs.
*/
-static int fts3EvalPhraseMergeToken(
+static void fts3EvalPhraseMergeToken(
Fts3Table *pTab, /* FTS Table pointer */
Fts3Phrase *p, /* Phrase to merge pList/nList into */
int iToken, /* Token pList/nList corresponds to */
char *pList, /* Pointer to doclist */
int nList /* Number of bytes in pList */
){
- int rc = SQLITE_OK;
assert( iToken!=p->iDoclistToken );
if( pList==0 ){
@@ -139920,16 +127504,13 @@ static int fts3EvalPhraseMergeToken(
nDiff = p->iDoclistToken - iToken;
}
- rc = fts3DoclistPhraseMerge(
- pTab->bDescIdx, nDiff, pLeft, nLeft, &pRight, &nRight
- );
+ fts3DoclistPhraseMerge(pTab->bDescIdx, nDiff, pLeft, nLeft, pRight,&nRight);
sqlite3_free(pLeft);
p->doclist.aAll = pRight;
p->doclist.nAll = nRight;
}
if( iToken>p->iDoclistToken ) p->iDoclistToken = iToken;
- return rc;
}
/*
@@ -139955,7 +127536,7 @@ static int fts3EvalPhraseLoad(
char *pThis = 0;
rc = fts3TermSelect(pTab, pToken, p->iColumn, &nThis, &pThis);
if( rc==SQLITE_OK ){
- rc = fts3EvalPhraseMergeToken(pTab, p, iToken, pThis, nThis);
+ fts3EvalPhraseMergeToken(pTab, p, iToken, pThis, nThis);
}
}
assert( pToken->pSegcsr==0 );
@@ -140100,6 +127681,7 @@ static int fts3EvalPhraseStart(Fts3Cursor *pCsr, int bOptOk, Fts3Phrase *p){
int bIncrOk = (bOptOk
&& pCsr->bDesc==pTab->bDescIdx
&& p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0
+ && p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0
#ifdef SQLITE_TEST
&& pTab->bNoIncrDoclist==0
#endif
@@ -140219,7 +127801,6 @@ SQLITE_PRIVATE void sqlite3Fts3DoclistNext(
p += sqlite3Fts3GetVarint(p, piDocid);
}else{
fts3PoslistCopy(0, &p);
- while( p<&aDoclist[nDoclist] && *p==0 ) p++;
if( p>=&aDoclist[nDoclist] ){
*pbEof = 1;
}else{
@@ -140391,7 +127972,7 @@ static int fts3EvalIncrPhraseNext(
bMaxSet = 1;
}
}
- assert( rc!=SQLITE_OK || (p->nToken>=1 && a[p->nToken-1].bIgnore==0) );
+ assert( rc!=SQLITE_OK || a[p->nToken-1].bIgnore==0 );
assert( rc!=SQLITE_OK || bMaxSet );
/* Keep advancing iterators until they all point to the same document */
@@ -140497,14 +128078,12 @@ static void fts3EvalStartReaders(
){
if( pExpr && SQLITE_OK==*pRc ){
if( pExpr->eType==FTSQUERY_PHRASE ){
+ int i;
int nToken = pExpr->pPhrase->nToken;
- if( nToken ){
- int i;
- for(i=0; i<nToken; i++){
- if( pExpr->pPhrase->aToken[i].pDeferred==0 ) break;
- }
- pExpr->bDeferred = (i==nToken);
+ for(i=0; i<nToken; i++){
+ if( pExpr->pPhrase->aToken[i].pDeferred==0 ) break;
}
+ pExpr->bDeferred = (i==nToken);
*pRc = fts3EvalPhraseStart(pCsr, 1, pExpr->pPhrase);
}else{
fts3EvalStartReaders(pCsr, pExpr->pLeft, pRc);
@@ -140760,12 +128339,8 @@ static int fts3EvalSelectDeferred(
rc = fts3TermSelect(pTab, pToken, pTC->iCol, &nList, &pList);
assert( rc==SQLITE_OK || pList==0 );
if( rc==SQLITE_OK ){
- rc = fts3EvalPhraseMergeToken(
- pTab, pTC->pPhrase, pTC->iToken,pList,nList
- );
- }
- if( rc==SQLITE_OK ){
int nCount;
+ fts3EvalPhraseMergeToken(pTab, pTC->pPhrase, pTC->iToken,pList,nList);
nCount = fts3DoclistCountDocids(
pTC->pPhrase->doclist.aAll, pTC->pPhrase->doclist.nAll
);
@@ -140943,7 +128518,7 @@ static int fts3EvalNearTrim(
** 2. NEAR is treated as AND. If the expression is "x NEAR y", it is
** advanced to point to the next row that matches "x AND y".
**
-** See sqlite3Fts3EvalTestDeferred() for details on testing if a row is
+** See fts3EvalTestDeferredAndNear() for details on testing if a row is
** really a match, taking into account deferred tokens and NEAR operators.
*/
static void fts3EvalNextRow(
@@ -140990,22 +128565,6 @@ static void fts3EvalNextRow(
}
pExpr->iDocid = pLeft->iDocid;
pExpr->bEof = (pLeft->bEof || pRight->bEof);
- if( pExpr->eType==FTSQUERY_NEAR && pExpr->bEof ){
- if( pRight->pPhrase && pRight->pPhrase->doclist.aAll ){
- Fts3Doclist *pDl = &pRight->pPhrase->doclist;
- while( *pRc==SQLITE_OK && pRight->bEof==0 ){
- memset(pDl->pList, 0, pDl->nList);
- fts3EvalNextRow(pCsr, pRight, pRc);
- }
- }
- if( pLeft->pPhrase && pLeft->pPhrase->doclist.aAll ){
- Fts3Doclist *pDl = &pLeft->pPhrase->doclist;
- while( *pRc==SQLITE_OK && pLeft->bEof==0 ){
- memset(pDl->pList, 0, pDl->nList);
- fts3EvalNextRow(pCsr, pLeft, pRc);
- }
- }
- }
}
break;
}
@@ -141163,7 +128722,7 @@ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){
}
/*
-** This function is a helper function for sqlite3Fts3EvalTestDeferred().
+** This function is a helper function for fts3EvalTestDeferredAndNear().
** Assuming no error occurs or has occurred, It returns non-zero if the
** expression passed as the second argument matches the row that pCsr
** currently points to, or zero if it does not.
@@ -141284,7 +128843,7 @@ static int fts3EvalTestExpr(
** Or, if no error occurs and it seems the current row does match the FTS
** query, return 0.
*/
-SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc){
+static int fts3EvalTestDeferredAndNear(Fts3Cursor *pCsr, int *pRc){
int rc = *pRc;
int bMiss = 0;
if( rc==SQLITE_OK ){
@@ -141331,7 +128890,7 @@ static int fts3EvalNext(Fts3Cursor *pCsr){
pCsr->isRequireSeek = 1;
pCsr->isMatchinfoNeeded = 1;
pCsr->iPrevId = pExpr->iDocid;
- }while( pCsr->isEof==0 && sqlite3Fts3EvalTestDeferred(pCsr, &rc) );
+ }while( pCsr->isEof==0 && fts3EvalTestDeferredAndNear(pCsr, &rc) );
}
/* Check if the cursor is past the end of the docid range specified
@@ -141378,7 +128937,6 @@ static void fts3EvalRestart(
}
pPhrase->doclist.pNextDocid = 0;
pPhrase->doclist.iDocid = 0;
- pPhrase->pOrPoslist = 0;
}
pExpr->iDocid = 0;
@@ -141492,7 +129050,7 @@ static int fts3EvalGatherStats(
pCsr->iPrevId = pRoot->iDocid;
}while( pCsr->isEof==0
&& pRoot->eType==FTSQUERY_NEAR
- && sqlite3Fts3EvalTestDeferred(pCsr, &rc)
+ && fts3EvalTestDeferredAndNear(pCsr, &rc)
);
if( rc==SQLITE_OK && pCsr->isEof==0 ){
@@ -141517,6 +129075,7 @@ static int fts3EvalGatherStats(
fts3EvalNextRow(pCsr, pRoot, &rc);
assert( pRoot->bEof==0 );
}while( pRoot->iDocid!=iDocid && rc==SQLITE_OK );
+ fts3EvalTestDeferredAndNear(pCsr, &rc);
}
}
return rc;
@@ -141623,13 +129182,13 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(
iDocid = pExpr->iDocid;
pIter = pPhrase->doclist.pList;
if( iDocid!=pCsr->iPrevId || pExpr->bEof ){
- int rc = SQLITE_OK;
int bDescDoclist = pTab->bDescIdx; /* For DOCID_CMP macro */
+ int iMul; /* +1 if csr dir matches index dir, else -1 */
int bOr = 0;
+ u8 bEof = 0;
u8 bTreeEof = 0;
Fts3Expr *p; /* Used to iterate from pExpr to root */
Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */
- int bMatch;
/* Check if this phrase descends from an OR expression node. If not,
** return NULL. Otherwise, the entry that corresponds to docid
@@ -141648,62 +129207,74 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(
** an incremental phrase. Load the entire doclist for the phrase
** into memory in this case. */
if( pPhrase->bIncr ){
- int bEofSave = pNear->bEof;
- fts3EvalRestart(pCsr, pNear, &rc);
- while( rc==SQLITE_OK && !pNear->bEof ){
- fts3EvalNextRow(pCsr, pNear, &rc);
- if( bEofSave==0 && pNear->iDocid==iDocid ) break;
+ int rc = SQLITE_OK;
+ int bEofSave = pExpr->bEof;
+ fts3EvalRestart(pCsr, pExpr, &rc);
+ while( rc==SQLITE_OK && !pExpr->bEof ){
+ fts3EvalNextRow(pCsr, pExpr, &rc);
+ if( bEofSave==0 && pExpr->iDocid==iDocid ) break;
}
+ pIter = pPhrase->doclist.pList;
assert( rc!=SQLITE_OK || pPhrase->bIncr==0 );
+ if( rc!=SQLITE_OK ) return rc;
}
- if( bTreeEof ){
- while( rc==SQLITE_OK && !pNear->bEof ){
- fts3EvalNextRow(pCsr, pNear, &rc);
- }
+
+ iMul = ((pCsr->bDesc==bDescDoclist) ? 1 : -1);
+ while( bTreeEof==1
+ && pNear->bEof==0
+ && (DOCID_CMP(pNear->iDocid, pCsr->iPrevId) * iMul)<0
+ ){
+ int rc = SQLITE_OK;
+ fts3EvalNextRow(pCsr, pExpr, &rc);
+ if( rc!=SQLITE_OK ) return rc;
+ iDocid = pExpr->iDocid;
+ pIter = pPhrase->doclist.pList;
}
- if( rc!=SQLITE_OK ) return rc;
- bMatch = 1;
- for(p=pNear; p; p=p->pLeft){
- u8 bEof = 0;
- Fts3Expr *pTest = p;
- Fts3Phrase *pPh;
- assert( pTest->eType==FTSQUERY_NEAR || pTest->eType==FTSQUERY_PHRASE );
- if( pTest->eType==FTSQUERY_NEAR ) pTest = pTest->pRight;
- assert( pTest->eType==FTSQUERY_PHRASE );
- pPh = pTest->pPhrase;
-
- pIter = pPh->pOrPoslist;
- iDocid = pPh->iOrDocid;
+ bEof = (pPhrase->doclist.nAll==0);
+ assert( bDescDoclist==0 || bDescDoclist==1 );
+ assert( pCsr->bDesc==0 || pCsr->bDesc==1 );
+
+ if( bEof==0 ){
if( pCsr->bDesc==bDescDoclist ){
- bEof = !pPh->doclist.nAll ||
- (pIter >= (pPh->doclist.aAll + pPh->doclist.nAll));
- while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)<0 ) && bEof==0 ){
- sqlite3Fts3DoclistNext(
- bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll,
- &pIter, &iDocid, &bEof
- );
+ int dummy;
+ if( pNear->bEof ){
+ /* This expression is already at EOF. So position it to point to the
+ ** last entry in the doclist at pPhrase->doclist.aAll[]. Variable
+ ** iDocid is already set for this entry, so all that is required is
+ ** to set pIter to point to the first byte of the last position-list
+ ** in the doclist.
+ **
+ ** It would also be correct to set pIter and iDocid to zero. In
+ ** this case, the first call to sqltie3Fts4DoclistPrev() below
+ ** would also move the iterator to point to the last entry in the
+ ** doclist. However, this is expensive, as to do so it has to
+ ** iterate through the entire doclist from start to finish (since
+ ** it does not know the docid for the last entry). */
+ pIter = &pPhrase->doclist.aAll[pPhrase->doclist.nAll-1];
+ fts3ReversePoslist(pPhrase->doclist.aAll, &pIter);
}
- }else{
- bEof = !pPh->doclist.nAll || (pIter && pIter<=pPh->doclist.aAll);
while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)>0 ) && bEof==0 ){
- int dummy;
sqlite3Fts3DoclistPrev(
- bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll,
+ bDescDoclist, pPhrase->doclist.aAll, pPhrase->doclist.nAll,
&pIter, &iDocid, &dummy, &bEof
- );
+ );
+ }
+ }else{
+ if( pNear->bEof ){
+ pIter = 0;
+ iDocid = 0;
+ }
+ while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)<0 ) && bEof==0 ){
+ sqlite3Fts3DoclistNext(
+ bDescDoclist, pPhrase->doclist.aAll, pPhrase->doclist.nAll,
+ &pIter, &iDocid, &bEof
+ );
}
}
- pPh->pOrPoslist = pIter;
- pPh->iOrDocid = iDocid;
- if( bEof || iDocid!=pCsr->iPrevId ) bMatch = 0;
}
- if( bMatch ){
- pIter = pPhrase->pOrPoslist;
- }else{
- pIter = 0;
- }
+ if( bEof || iDocid!=pCsr->iPrevId ) pIter = 0;
}
if( pIter==0 ) return SQLITE_OK;
@@ -141715,13 +129286,10 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(
}
while( iThis<iCol ){
fts3ColumnlistCopy(0, &pIter);
- if( *pIter==0x00 ) return SQLITE_OK;
+ if( *pIter==0x00 ) return 0;
pIter++;
pIter += fts3GetVarint32(pIter, &iThis);
}
- if( *pIter==0x00 ){
- pIter = 0;
- }
*ppOut = ((iCol==iThis)?pIter:0);
return SQLITE_OK;
@@ -141764,7 +129332,7 @@ SQLITE_PRIVATE int sqlite3Fts3Corrupt(){
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_fts3_init(
+SQLITE_API int sqlite3_fts3_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -141791,7 +129359,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_fts3_init(
******************************************************************************
**
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <string.h> */
@@ -141896,7 +129463,7 @@ static int fts3auxConnectMethod(
return SQLITE_OK;
bad_args:
- sqlite3Fts3ErrMsg(pzErr, "invalid arguments to fts4aux constructor");
+ *pzErr = sqlite3_mprintf("invalid arguments to fts4aux constructor");
return SQLITE_ERROR;
}
@@ -142348,7 +129915,6 @@ SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){
** syntax is relatively simple, the whole tokenizer/parser system is
** hand-coded.
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/*
@@ -142518,23 +130084,40 @@ static int getNextToken(
int rc;
sqlite3_tokenizer_cursor *pCursor;
Fts3Expr *pRet = 0;
- int i = 0;
-
- /* Set variable i to the maximum number of bytes of input to tokenize. */
- for(i=0; i<n; i++){
- if( sqlite3_fts3_enable_parentheses && (z[i]=='(' || z[i]==')') ) break;
- if( z[i]=='"' ) break;
- }
+ int nConsumed = 0;
- *pnConsumed = i;
- rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, i, &pCursor);
+ rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, n, &pCursor);
if( rc==SQLITE_OK ){
const char *zToken;
int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0;
int nByte; /* total space to allocate */
rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition);
- if( rc==SQLITE_OK ){
+
+ if( (rc==SQLITE_OK || rc==SQLITE_DONE) && sqlite3_fts3_enable_parentheses ){
+ int i;
+ if( rc==SQLITE_DONE ) iStart = n;
+ for(i=0; i<iStart; i++){
+ if( z[i]=='(' ){
+ pParse->nNest++;
+ rc = fts3ExprParse(pParse, &z[i+1], n-i-1, &pRet, &nConsumed);
+ if( rc==SQLITE_OK && !pRet ){
+ rc = SQLITE_DONE;
+ }
+ nConsumed = (int)(i + 1 + nConsumed);
+ break;
+ }
+
+ if( z[i]==')' ){
+ rc = SQLITE_DONE;
+ pParse->nNest--;
+ nConsumed = i+1;
+ break;
+ }
+ }
+ }
+
+ if( nConsumed==0 && rc==SQLITE_OK ){
nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken;
pRet = (Fts3Expr *)fts3MallocZero(nByte);
if( !pRet ){
@@ -142568,14 +130151,13 @@ static int getNextToken(
}
}
- *pnConsumed = iEnd;
- }else if( i && rc==SQLITE_DONE ){
- rc = SQLITE_OK;
+ nConsumed = iEnd;
}
pModule->xClose(pCursor);
}
+ *pnConsumed = nConsumed;
*ppExpr = pRet;
return rc;
}
@@ -142825,21 +130407,6 @@ static int getNextNode(
return getNextString(pParse, &zInput[1], ii-1, ppExpr);
}
- if( sqlite3_fts3_enable_parentheses ){
- if( *zInput=='(' ){
- int nConsumed = 0;
- pParse->nNest++;
- rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed);
- if( rc==SQLITE_OK && !*ppExpr ){ rc = SQLITE_DONE; }
- *pnConsumed = (int)(zInput - z) + 1 + nConsumed;
- return rc;
- }else if( *zInput==')' ){
- pParse->nNest--;
- *pnConsumed = (int)((zInput - z) + 1);
- *ppExpr = 0;
- return SQLITE_DONE;
- }
- }
/* If control flows to this point, this must be a regular token, or
** the end of the input. Read a regular token using the sqlite3_tokenizer
@@ -142958,100 +130525,96 @@ static int fts3ExprParse(
while( rc==SQLITE_OK ){
Fts3Expr *p = 0;
int nByte = 0;
-
rc = getNextNode(pParse, zIn, nIn, &p, &nByte);
- assert( nByte>0 || (rc!=SQLITE_OK && p==0) );
if( rc==SQLITE_OK ){
- if( p ){
- int isPhrase;
+ int isPhrase;
- if( !sqlite3_fts3_enable_parentheses
- && p->eType==FTSQUERY_PHRASE && pParse->isNot
- ){
- /* Create an implicit NOT operator. */
- Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr));
- if( !pNot ){
- sqlite3Fts3ExprFree(p);
- rc = SQLITE_NOMEM;
- goto exprparse_out;
- }
- pNot->eType = FTSQUERY_NOT;
- pNot->pRight = p;
- p->pParent = pNot;
- if( pNotBranch ){
- pNot->pLeft = pNotBranch;
- pNotBranch->pParent = pNot;
- }
- pNotBranch = pNot;
- p = pPrev;
- }else{
- int eType = p->eType;
- isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft);
+ if( !sqlite3_fts3_enable_parentheses
+ && p->eType==FTSQUERY_PHRASE && pParse->isNot
+ ){
+ /* Create an implicit NOT operator. */
+ Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr));
+ if( !pNot ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_NOMEM;
+ goto exprparse_out;
+ }
+ pNot->eType = FTSQUERY_NOT;
+ pNot->pRight = p;
+ p->pParent = pNot;
+ if( pNotBranch ){
+ pNot->pLeft = pNotBranch;
+ pNotBranch->pParent = pNot;
+ }
+ pNotBranch = pNot;
+ p = pPrev;
+ }else{
+ int eType = p->eType;
+ isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft);
- /* The isRequirePhrase variable is set to true if a phrase or
- ** an expression contained in parenthesis is required. If a
- ** binary operator (AND, OR, NOT or NEAR) is encounted when
- ** isRequirePhrase is set, this is a syntax error.
- */
- if( !isPhrase && isRequirePhrase ){
+ /* The isRequirePhrase variable is set to true if a phrase or
+ ** an expression contained in parenthesis is required. If a
+ ** binary operator (AND, OR, NOT or NEAR) is encounted when
+ ** isRequirePhrase is set, this is a syntax error.
+ */
+ if( !isPhrase && isRequirePhrase ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_ERROR;
+ goto exprparse_out;
+ }
+
+ if( isPhrase && !isRequirePhrase ){
+ /* Insert an implicit AND operator. */
+ Fts3Expr *pAnd;
+ assert( pRet && pPrev );
+ pAnd = fts3MallocZero(sizeof(Fts3Expr));
+ if( !pAnd ){
sqlite3Fts3ExprFree(p);
- rc = SQLITE_ERROR;
+ rc = SQLITE_NOMEM;
goto exprparse_out;
}
+ pAnd->eType = FTSQUERY_AND;
+ insertBinaryOperator(&pRet, pPrev, pAnd);
+ pPrev = pAnd;
+ }
- if( isPhrase && !isRequirePhrase ){
- /* Insert an implicit AND operator. */
- Fts3Expr *pAnd;
- assert( pRet && pPrev );
- pAnd = fts3MallocZero(sizeof(Fts3Expr));
- if( !pAnd ){
- sqlite3Fts3ExprFree(p);
- rc = SQLITE_NOMEM;
- goto exprparse_out;
- }
- pAnd->eType = FTSQUERY_AND;
- insertBinaryOperator(&pRet, pPrev, pAnd);
- pPrev = pAnd;
- }
-
- /* This test catches attempts to make either operand of a NEAR
- ** operator something other than a phrase. For example, either of
- ** the following:
- **
- ** (bracketed expression) NEAR phrase
- ** phrase NEAR (bracketed expression)
- **
- ** Return an error in either case.
- */
- if( pPrev && (
+ /* This test catches attempts to make either operand of a NEAR
+ ** operator something other than a phrase. For example, either of
+ ** the following:
+ **
+ ** (bracketed expression) NEAR phrase
+ ** phrase NEAR (bracketed expression)
+ **
+ ** Return an error in either case.
+ */
+ if( pPrev && (
(eType==FTSQUERY_NEAR && !isPhrase && pPrev->eType!=FTSQUERY_PHRASE)
|| (eType!=FTSQUERY_PHRASE && isPhrase && pPrev->eType==FTSQUERY_NEAR)
- )){
- sqlite3Fts3ExprFree(p);
- rc = SQLITE_ERROR;
- goto exprparse_out;
- }
-
- if( isPhrase ){
- if( pRet ){
- assert( pPrev && pPrev->pLeft && pPrev->pRight==0 );
- pPrev->pRight = p;
- p->pParent = pPrev;
- }else{
- pRet = p;
- }
+ )){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_ERROR;
+ goto exprparse_out;
+ }
+
+ if( isPhrase ){
+ if( pRet ){
+ assert( pPrev && pPrev->pLeft && pPrev->pRight==0 );
+ pPrev->pRight = p;
+ p->pParent = pPrev;
}else{
- insertBinaryOperator(&pRet, pPrev, p);
+ pRet = p;
}
- isRequirePhrase = !isPhrase;
+ }else{
+ insertBinaryOperator(&pRet, pPrev, p);
}
- pPrev = p;
+ isRequirePhrase = !isPhrase;
}
assert( nByte>0 );
}
assert( rc!=SQLITE_OK || (nByte>0 && nByte<=nIn) );
nIn -= nByte;
zIn += nByte;
+ pPrev = p;
}
if( rc==SQLITE_DONE && pRet && isRequirePhrase ){
@@ -143126,151 +130689,125 @@ static int fts3ExprBalance(Fts3Expr **pp, int nMaxDepth){
rc = SQLITE_ERROR;
}
- if( rc==SQLITE_OK ){
- if( (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){
- Fts3Expr **apLeaf;
- apLeaf = (Fts3Expr **)sqlite3_malloc(sizeof(Fts3Expr *) * nMaxDepth);
- if( 0==apLeaf ){
- rc = SQLITE_NOMEM;
- }else{
- memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth);
+ if( rc==SQLITE_OK && (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){
+ Fts3Expr **apLeaf;
+ apLeaf = (Fts3Expr **)sqlite3_malloc(sizeof(Fts3Expr *) * nMaxDepth);
+ if( 0==apLeaf ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth);
+ }
+
+ if( rc==SQLITE_OK ){
+ int i;
+ Fts3Expr *p;
+
+ /* Set $p to point to the left-most leaf in the tree of eType nodes. */
+ for(p=pRoot; p->eType==eType; p=p->pLeft){
+ assert( p->pParent==0 || p->pParent->pLeft==p );
+ assert( p->pLeft && p->pRight );
}
- if( rc==SQLITE_OK ){
- int i;
- Fts3Expr *p;
+ /* This loop runs once for each leaf in the tree of eType nodes. */
+ while( 1 ){
+ int iLvl;
+ Fts3Expr *pParent = p->pParent; /* Current parent of p */
- /* Set $p to point to the left-most leaf in the tree of eType nodes. */
- for(p=pRoot; p->eType==eType; p=p->pLeft){
- assert( p->pParent==0 || p->pParent->pLeft==p );
- assert( p->pLeft && p->pRight );
+ assert( pParent==0 || pParent->pLeft==p );
+ p->pParent = 0;
+ if( pParent ){
+ pParent->pLeft = 0;
+ }else{
+ pRoot = 0;
}
+ rc = fts3ExprBalance(&p, nMaxDepth-1);
+ if( rc!=SQLITE_OK ) break;
- /* This loop runs once for each leaf in the tree of eType nodes. */
- while( 1 ){
- int iLvl;
- Fts3Expr *pParent = p->pParent; /* Current parent of p */
-
- assert( pParent==0 || pParent->pLeft==p );
- p->pParent = 0;
- if( pParent ){
- pParent->pLeft = 0;
+ for(iLvl=0; p && iLvl<nMaxDepth; iLvl++){
+ if( apLeaf[iLvl]==0 ){
+ apLeaf[iLvl] = p;
+ p = 0;
}else{
- pRoot = 0;
+ assert( pFree );
+ pFree->pLeft = apLeaf[iLvl];
+ pFree->pRight = p;
+ pFree->pLeft->pParent = pFree;
+ pFree->pRight->pParent = pFree;
+
+ p = pFree;
+ pFree = pFree->pParent;
+ p->pParent = 0;
+ apLeaf[iLvl] = 0;
}
- rc = fts3ExprBalance(&p, nMaxDepth-1);
- if( rc!=SQLITE_OK ) break;
+ }
+ if( p ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_TOOBIG;
+ break;
+ }
+
+ /* If that was the last leaf node, break out of the loop */
+ if( pParent==0 ) break;
+
+ /* Set $p to point to the next leaf in the tree of eType nodes */
+ for(p=pParent->pRight; p->eType==eType; p=p->pLeft);
- for(iLvl=0; p && iLvl<nMaxDepth; iLvl++){
- if( apLeaf[iLvl]==0 ){
- apLeaf[iLvl] = p;
- p = 0;
+ /* Remove pParent from the original tree. */
+ assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent );
+ pParent->pRight->pParent = pParent->pParent;
+ if( pParent->pParent ){
+ pParent->pParent->pLeft = pParent->pRight;
+ }else{
+ assert( pParent==pRoot );
+ pRoot = pParent->pRight;
+ }
+
+ /* Link pParent into the free node list. It will be used as an
+ ** internal node of the new tree. */
+ pParent->pParent = pFree;
+ pFree = pParent;
+ }
+
+ if( rc==SQLITE_OK ){
+ p = 0;
+ for(i=0; i<nMaxDepth; i++){
+ if( apLeaf[i] ){
+ if( p==0 ){
+ p = apLeaf[i];
+ p->pParent = 0;
}else{
- assert( pFree );
- pFree->pLeft = apLeaf[iLvl];
+ assert( pFree!=0 );
pFree->pRight = p;
+ pFree->pLeft = apLeaf[i];
pFree->pLeft->pParent = pFree;
pFree->pRight->pParent = pFree;
p = pFree;
pFree = pFree->pParent;
p->pParent = 0;
- apLeaf[iLvl] = 0;
}
}
- if( p ){
- sqlite3Fts3ExprFree(p);
- rc = SQLITE_TOOBIG;
- break;
- }
-
- /* If that was the last leaf node, break out of the loop */
- if( pParent==0 ) break;
-
- /* Set $p to point to the next leaf in the tree of eType nodes */
- for(p=pParent->pRight; p->eType==eType; p=p->pLeft);
-
- /* Remove pParent from the original tree. */
- assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent );
- pParent->pRight->pParent = pParent->pParent;
- if( pParent->pParent ){
- pParent->pParent->pLeft = pParent->pRight;
- }else{
- assert( pParent==pRoot );
- pRoot = pParent->pRight;
- }
-
- /* Link pParent into the free node list. It will be used as an
- ** internal node of the new tree. */
- pParent->pParent = pFree;
- pFree = pParent;
}
-
- if( rc==SQLITE_OK ){
- p = 0;
- for(i=0; i<nMaxDepth; i++){
- if( apLeaf[i] ){
- if( p==0 ){
- p = apLeaf[i];
- p->pParent = 0;
- }else{
- assert( pFree!=0 );
- pFree->pRight = p;
- pFree->pLeft = apLeaf[i];
- pFree->pLeft->pParent = pFree;
- pFree->pRight->pParent = pFree;
-
- p = pFree;
- pFree = pFree->pParent;
- p->pParent = 0;
- }
- }
- }
- pRoot = p;
- }else{
- /* An error occurred. Delete the contents of the apLeaf[] array
- ** and pFree list. Everything else is cleaned up by the call to
- ** sqlite3Fts3ExprFree(pRoot) below. */
- Fts3Expr *pDel;
- for(i=0; i<nMaxDepth; i++){
- sqlite3Fts3ExprFree(apLeaf[i]);
- }
- while( (pDel=pFree)!=0 ){
- pFree = pDel->pParent;
- sqlite3_free(pDel);
- }
+ pRoot = p;
+ }else{
+ /* An error occurred. Delete the contents of the apLeaf[] array
+ ** and pFree list. Everything else is cleaned up by the call to
+ ** sqlite3Fts3ExprFree(pRoot) below. */
+ Fts3Expr *pDel;
+ for(i=0; i<nMaxDepth; i++){
+ sqlite3Fts3ExprFree(apLeaf[i]);
+ }
+ while( (pDel=pFree)!=0 ){
+ pFree = pDel->pParent;
+ sqlite3_free(pDel);
}
-
- assert( pFree==0 );
- sqlite3_free( apLeaf );
- }
- }else if( eType==FTSQUERY_NOT ){
- Fts3Expr *pLeft = pRoot->pLeft;
- Fts3Expr *pRight = pRoot->pRight;
-
- pRoot->pLeft = 0;
- pRoot->pRight = 0;
- pLeft->pParent = 0;
- pRight->pParent = 0;
-
- rc = fts3ExprBalance(&pLeft, nMaxDepth-1);
- if( rc==SQLITE_OK ){
- rc = fts3ExprBalance(&pRight, nMaxDepth-1);
}
- if( rc!=SQLITE_OK ){
- sqlite3Fts3ExprFree(pRight);
- sqlite3Fts3ExprFree(pLeft);
- }else{
- assert( pLeft && pRight );
- pRoot->pLeft = pLeft;
- pLeft->pParent = pRoot;
- pRoot->pRight = pRight;
- pRight->pParent = pRoot;
- }
+ assert( pFree==0 );
+ sqlite3_free( apLeaf );
}
}
-
+
if( rc!=SQLITE_OK ){
sqlite3Fts3ExprFree(pRoot);
pRoot = 0;
@@ -143381,13 +130918,13 @@ SQLITE_PRIVATE int sqlite3Fts3ExprParse(
sqlite3Fts3ExprFree(*ppExpr);
*ppExpr = 0;
if( rc==SQLITE_TOOBIG ){
- sqlite3Fts3ErrMsg(pzErr,
+ *pzErr = sqlite3_mprintf(
"FTS expression tree is too large (maximum depth %d)",
SQLITE_FTS3_MAX_EXPR_DEPTH
);
rc = SQLITE_ERROR;
}else if( rc==SQLITE_ERROR ){
- sqlite3Fts3ErrMsg(pzErr, "malformed MATCH expression: [%s]", z);
+ *pzErr = sqlite3_mprintf("malformed MATCH expression: [%s]", z);
}
}
@@ -143668,14 +131205,12 @@ SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3* db){
** * The FTS3 module is being built into the core of
** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <assert.h> */
/* #include <stdlib.h> */
/* #include <string.h> */
-/* #include "fts3_hash.h" */
/*
** Malloc and Free functions
@@ -143741,13 +131276,13 @@ SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash *pH){
*/
static int fts3StrHash(const void *pKey, int nKey){
const char *z = (const char *)pKey;
- unsigned h = 0;
+ int h = 0;
if( nKey<=0 ) nKey = (int) strlen(z);
while( nKey > 0 ){
h = (h<<3) ^ h ^ *z++;
nKey--;
}
- return (int)(h & 0x7fffffff);
+ return h & 0x7fffffff;
}
static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){
if( n1!=n2 ) return 1;
@@ -144053,7 +131588,6 @@ SQLITE_PRIVATE void *sqlite3Fts3HashInsert(
** * The FTS3 module is being built into the core of
** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <assert.h> */
@@ -144061,7 +131595,6 @@ SQLITE_PRIVATE void *sqlite3Fts3HashInsert(
/* #include <stdio.h> */
/* #include <string.h> */
-/* #include "fts3_tokenizer.h" */
/*
** Class derived from sqlite3_tokenizer
@@ -144214,7 +131747,7 @@ static int isVowel(const char *z){
** by a consonant.
**
** In this routine z[] is in reverse order. So we are really looking
-** for an instance of a consonant followed by a vowel.
+** for an instance of of a consonant followed by a vowel.
*/
static int m_gt_0(const char *z){
while( isVowel(z) ){ z++; }
@@ -144434,14 +131967,12 @@ static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){
/* Step 2 */
switch( z[1] ){
case 'a':
- if( !stem(&z, "lanoita", "ate", m_gt_0) ){
- stem(&z, "lanoit", "tion", m_gt_0);
- }
+ stem(&z, "lanoita", "ate", m_gt_0) ||
+ stem(&z, "lanoit", "tion", m_gt_0);
break;
case 'c':
- if( !stem(&z, "icne", "ence", m_gt_0) ){
- stem(&z, "icna", "ance", m_gt_0);
- }
+ stem(&z, "icne", "ence", m_gt_0) ||
+ stem(&z, "icna", "ance", m_gt_0);
break;
case 'e':
stem(&z, "rezi", "ize", m_gt_0);
@@ -144450,54 +131981,43 @@ static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){
stem(&z, "igol", "log", m_gt_0);
break;
case 'l':
- if( !stem(&z, "ilb", "ble", m_gt_0)
- && !stem(&z, "illa", "al", m_gt_0)
- && !stem(&z, "iltne", "ent", m_gt_0)
- && !stem(&z, "ile", "e", m_gt_0)
- ){
- stem(&z, "ilsuo", "ous", m_gt_0);
- }
+ stem(&z, "ilb", "ble", m_gt_0) ||
+ stem(&z, "illa", "al", m_gt_0) ||
+ stem(&z, "iltne", "ent", m_gt_0) ||
+ stem(&z, "ile", "e", m_gt_0) ||
+ stem(&z, "ilsuo", "ous", m_gt_0);
break;
case 'o':
- if( !stem(&z, "noitazi", "ize", m_gt_0)
- && !stem(&z, "noita", "ate", m_gt_0)
- ){
- stem(&z, "rota", "ate", m_gt_0);
- }
+ stem(&z, "noitazi", "ize", m_gt_0) ||
+ stem(&z, "noita", "ate", m_gt_0) ||
+ stem(&z, "rota", "ate", m_gt_0);
break;
case 's':
- if( !stem(&z, "msila", "al", m_gt_0)
- && !stem(&z, "ssenevi", "ive", m_gt_0)
- && !stem(&z, "ssenluf", "ful", m_gt_0)
- ){
- stem(&z, "ssensuo", "ous", m_gt_0);
- }
+ stem(&z, "msila", "al", m_gt_0) ||
+ stem(&z, "ssenevi", "ive", m_gt_0) ||
+ stem(&z, "ssenluf", "ful", m_gt_0) ||
+ stem(&z, "ssensuo", "ous", m_gt_0);
break;
case 't':
- if( !stem(&z, "itila", "al", m_gt_0)
- && !stem(&z, "itivi", "ive", m_gt_0)
- ){
- stem(&z, "itilib", "ble", m_gt_0);
- }
+ stem(&z, "itila", "al", m_gt_0) ||
+ stem(&z, "itivi", "ive", m_gt_0) ||
+ stem(&z, "itilib", "ble", m_gt_0);
break;
}
/* Step 3 */
switch( z[0] ){
case 'e':
- if( !stem(&z, "etaci", "ic", m_gt_0)
- && !stem(&z, "evita", "", m_gt_0)
- ){
- stem(&z, "ezila", "al", m_gt_0);
- }
+ stem(&z, "etaci", "ic", m_gt_0) ||
+ stem(&z, "evita", "", m_gt_0) ||
+ stem(&z, "ezila", "al", m_gt_0);
break;
case 'i':
stem(&z, "itici", "ic", m_gt_0);
break;
case 'l':
- if( !stem(&z, "laci", "ic", m_gt_0) ){
- stem(&z, "luf", "", m_gt_0);
- }
+ stem(&z, "laci", "ic", m_gt_0) ||
+ stem(&z, "luf", "", m_gt_0);
break;
case 's':
stem(&z, "ssen", "", m_gt_0);
@@ -144538,11 +132058,9 @@ static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){
z += 3;
}
}else if( z[2]=='e' ){
- if( !stem(&z, "tneme", "", m_gt_1)
- && !stem(&z, "tnem", "", m_gt_1)
- ){
- stem(&z, "tne", "", m_gt_1);
- }
+ stem(&z, "tneme", "", m_gt_1) ||
+ stem(&z, "tnem", "", m_gt_1) ||
+ stem(&z, "tne", "", m_gt_1);
}
}
break;
@@ -144561,9 +132079,8 @@ static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){
}
break;
case 't':
- if( !stem(&z, "eta", "", m_gt_1) ){
- stem(&z, "iti", "", m_gt_1);
- }
+ stem(&z, "eta", "", m_gt_1) ||
+ stem(&z, "iti", "", m_gt_1);
break;
case 'u':
if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){
@@ -144719,7 +132236,6 @@ SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(
** * The FTS3 module is being built into the core of
** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <assert.h> */
@@ -144765,7 +132281,7 @@ static void scalarFunc(
if( argc==2 ){
void *pOld;
int n = sqlite3_value_bytes(argv[1]);
- if( zName==0 || n!=sizeof(pPtr) ){
+ if( n!=sizeof(pPtr) ){
sqlite3_result_error(context, "argument type mismatch", -1);
return;
}
@@ -144776,9 +132292,7 @@ static void scalarFunc(
return;
}
}else{
- if( zName ){
- pPtr = sqlite3Fts3HashFind(pHash, zName, nName);
- }
+ pPtr = sqlite3Fts3HashFind(pHash, zName, nName);
if( !pPtr ){
char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName);
sqlite3_result_error(context, zErr, -1);
@@ -144859,16 +132373,12 @@ SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(
zEnd = &zCopy[strlen(zCopy)];
z = (char *)sqlite3Fts3NextToken(zCopy, &n);
- if( z==0 ){
- assert( n==0 );
- z = zCopy;
- }
z[n] = '\0';
sqlite3Fts3Dequote(z);
m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash,z,(int)strlen(z)+1);
if( !m ){
- sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", z);
+ *pzErr = sqlite3_mprintf("unknown tokenizer: %s", z);
rc = SQLITE_ERROR;
}else{
char const **aArg = 0;
@@ -144891,7 +132401,7 @@ SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(
rc = m->xCreate(iArg, aArg, ppTok);
assert( rc!=SQLITE_OK || *ppTok );
if( rc!=SQLITE_OK ){
- sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer");
+ *pzErr = sqlite3_mprintf("unknown tokenizer");
}else{
(*ppTok)->pModule = m;
}
@@ -144975,9 +132485,9 @@ static void testFunc(
p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1);
if( !p ){
- char *zErr2 = sqlite3_mprintf("unknown tokenizer: %s", zName);
- sqlite3_result_error(context, zErr2, -1);
- sqlite3_free(zErr2);
+ char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName);
+ sqlite3_result_error(context, zErr, -1);
+ sqlite3_free(zErr);
return;
}
@@ -145215,7 +132725,6 @@ SQLITE_PRIVATE int sqlite3Fts3InitHashTable(
** * The FTS3 module is being built into the core of
** SQLite (in which case SQLITE_ENABLE_FTS3 is defined).
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <assert.h> */
@@ -145223,7 +132732,6 @@ SQLITE_PRIVATE int sqlite3Fts3InitHashTable(
/* #include <stdio.h> */
/* #include <string.h> */
-/* #include "fts3_tokenizer.h" */
typedef struct simple_tokenizer {
sqlite3_tokenizer base;
@@ -145468,7 +132976,6 @@ SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(
** pos: Token offset of token within input.
**
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <string.h> */
@@ -145515,7 +133022,7 @@ static int fts3tokQueryTokenizer(
p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1);
if( !p ){
- sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", zName);
+ *pzErr = sqlite3_mprintf("unknown tokenizer: %s", zName);
return SQLITE_ERROR;
}
@@ -145593,7 +133100,7 @@ static int fts3tokConnectMethod(
sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
char **pzErr /* OUT: sqlite3_malloc'd error message */
){
- Fts3tokTable *pTab = 0;
+ Fts3tokTable *pTab;
const sqlite3_tokenizer_module *pMod = 0;
sqlite3_tokenizer *pTok = 0;
int rc;
@@ -145904,7 +133411,6 @@ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){
** code in fts3.c.
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <string.h> */
@@ -146080,7 +133586,6 @@ struct SegmentWriter {
int nSize; /* Size of allocation at aData */
int nData; /* Bytes of data in aData */
char *aData; /* Pointer to block from malloc() */
- i64 nLeafData; /* Number of bytes of leaf data written */
};
/*
@@ -146156,10 +133661,6 @@ struct SegmentNode {
#define SQL_SELECT_INDEXES 35
#define SQL_SELECT_MXLEVEL 36
-#define SQL_SELECT_LEVEL_RANGE2 37
-#define SQL_UPDATE_LEVEL_IDX 38
-#define SQL_UPDATE_LEVEL 39
-
/*
** This function is used to obtain an SQLite prepared statement handle
** for the statement identified by the second argument. If successful,
@@ -146213,7 +133714,7 @@ static int fts3SqlStmt(
/* 25 */ "",
/* 26 */ "DELETE FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?",
-/* 27 */ "SELECT ? UNION SELECT level / (1024 * ?) FROM %Q.'%q_segdir'",
+/* 27 */ "SELECT DISTINCT level / (1024 * ?) FROM %Q.'%q_segdir'",
/* This statement is used to determine which level to read the input from
** when performing an incremental merge. It returns the absolute level number
@@ -146261,18 +133762,7 @@ static int fts3SqlStmt(
/* SQL_SELECT_MXLEVEL
** Return the largest relative level in the FTS index or indexes. */
-/* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'",
-
- /* Return segments in order from oldest to newest.*/
-/* 37 */ "SELECT level, idx, end_block "
- "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? "
- "ORDER BY level DESC, idx ASC",
-
- /* Update statements used while promoting segments */
-/* 38 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=-1,idx=? "
- "WHERE level=? AND idx=?",
-/* 39 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=? WHERE level=-1"
-
+/* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'"
};
int rc = SQLITE_OK;
sqlite3_stmt *pStmt;
@@ -146747,12 +134237,10 @@ static int fts3PendingTermsAdd(
*/
static int fts3PendingTermsDocid(
Fts3Table *p, /* Full-text table handle */
- int bDelete, /* True if this op is a delete */
int iLangid, /* Language id of row being written */
sqlite_int64 iDocid /* Docid of row being written */
){
assert( iLangid>=0 );
- assert( bDelete==1 || bDelete==0 );
/* TODO(shess) Explore whether partially flushing the buffer on
** forced-flush would provide better performance. I suspect that if
@@ -146760,8 +134248,7 @@ static int fts3PendingTermsDocid(
** buffer was half empty, that would let the less frequent terms
** generate longer doclists.
*/
- if( iDocid<p->iPrevDocid
- || (iDocid==p->iPrevDocid && p->bPrevDelete==0)
+ if( iDocid<=p->iPrevDocid
|| p->iPrevLangid!=iLangid
|| p->nPendingData>p->nMaxPendingData
){
@@ -146770,7 +134257,6 @@ static int fts3PendingTermsDocid(
}
p->iPrevDocid = iDocid;
p->iPrevLangid = iLangid;
- p->bPrevDelete = bDelete;
return SQLITE_OK;
}
@@ -146960,8 +134446,7 @@ static void fts3DeleteTerms(
if( SQLITE_ROW==sqlite3_step(pSelect) ){
int i;
int iLangid = langidFromSelect(p, pSelect);
- i64 iDocid = sqlite3_column_int64(pSelect, 0);
- rc = fts3PendingTermsDocid(p, 1, iLangid, iDocid);
+ rc = fts3PendingTermsDocid(p, iLangid, sqlite3_column_int64(pSelect, 0));
for(i=1; rc==SQLITE_OK && i<=p->nColumn; i++){
int iCol = i-1;
if( p->abNotindexed[iCol]==0 ){
@@ -147209,19 +134694,14 @@ static int fts3SegReaderNext(
if( fts3SegReaderIsPending(pReader) ){
Fts3HashElem *pElem = *(pReader->ppNextElem);
- sqlite3_free(pReader->aNode);
- pReader->aNode = 0;
- if( pElem ){
- char *aCopy;
+ if( pElem==0 ){
+ pReader->aNode = 0;
+ }else{
PendingList *pList = (PendingList *)fts3HashData(pElem);
- int nCopy = pList->nData+1;
pReader->zTerm = (char *)fts3HashKey(pElem);
pReader->nTerm = fts3HashKeysize(pElem);
- aCopy = (char*)sqlite3_malloc(nCopy);
- if( !aCopy ) return SQLITE_NOMEM;
- memcpy(aCopy, pList->aData, nCopy);
- pReader->nNode = pReader->nDoclist = nCopy;
- pReader->aNode = pReader->aDoclist = aCopy;
+ pReader->nNode = pReader->nDoclist = pList->nData + 1;
+ pReader->aNode = pReader->aDoclist = pList->aData;
pReader->ppNextElem++;
assert( pReader->aNode );
}
@@ -147461,14 +134941,12 @@ SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(
** second argument.
*/
SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *pReader){
- if( pReader ){
- if( !fts3SegReaderIsPending(pReader) ){
- sqlite3_free(pReader->zTerm);
- }
+ if( pReader && !fts3SegReaderIsPending(pReader) ){
+ sqlite3_free(pReader->zTerm);
if( !fts3SegReaderIsRootOnly(pReader) ){
sqlite3_free(pReader->aNode);
+ sqlite3_blob_close(pReader->pBlob);
}
- sqlite3_blob_close(pReader->pBlob);
}
sqlite3_free(pReader);
}
@@ -147524,10 +135002,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(
** an array of pending terms by term. This occurs as part of flushing
** the contents of the pending-terms hash table to the database.
*/
-static int SQLITE_CDECL fts3CompareElemByTerm(
- const void *lhs,
- const void *rhs
-){
+static int fts3CompareElemByTerm(const void *lhs, const void *rhs){
char *z1 = fts3HashKey(*(Fts3HashElem **)lhs);
char *z2 = fts3HashKey(*(Fts3HashElem **)rhs);
int n1 = fts3HashKeysize(*(Fts3HashElem **)lhs);
@@ -147828,7 +135303,6 @@ static int fts3WriteSegdir(
sqlite3_int64 iStartBlock, /* Value for "start_block" field */
sqlite3_int64 iLeafEndBlock, /* Value for "leaves_end_block" field */
sqlite3_int64 iEndBlock, /* Value for "end_block" field */
- sqlite3_int64 nLeafData, /* Bytes of leaf data in segment */
char *zRoot, /* Blob value for "root" field */
int nRoot /* Number of bytes in buffer zRoot */
){
@@ -147839,13 +135313,7 @@ static int fts3WriteSegdir(
sqlite3_bind_int(pStmt, 2, iIdx);
sqlite3_bind_int64(pStmt, 3, iStartBlock);
sqlite3_bind_int64(pStmt, 4, iLeafEndBlock);
- if( nLeafData==0 ){
- sqlite3_bind_int64(pStmt, 5, iEndBlock);
- }else{
- char *zEnd = sqlite3_mprintf("%lld %lld", iEndBlock, nLeafData);
- if( !zEnd ) return SQLITE_NOMEM;
- sqlite3_bind_text(pStmt, 5, zEnd, -1, sqlite3_free);
- }
+ sqlite3_bind_int64(pStmt, 5, iEndBlock);
sqlite3_bind_blob(pStmt, 6, zRoot, nRoot, SQLITE_STATIC);
sqlite3_step(pStmt);
rc = sqlite3_reset(pStmt);
@@ -148171,9 +135639,6 @@ static int fts3SegWriterAdd(
nDoclist; /* Doclist data */
}
- /* Increase the total number of bytes written to account for the new entry. */
- pWriter->nLeafData += nReq;
-
/* If the buffer currently allocated is too small for this entry, realloc
** the buffer to make it large enough.
*/
@@ -148245,13 +135710,13 @@ static int fts3SegWriterFlush(
pWriter->iFirst, pWriter->iFree, &iLast, &zRoot, &nRoot);
}
if( rc==SQLITE_OK ){
- rc = fts3WriteSegdir(p, iLevel, iIdx,
- pWriter->iFirst, iLastLeaf, iLast, pWriter->nLeafData, zRoot, nRoot);
+ rc = fts3WriteSegdir(
+ p, iLevel, iIdx, pWriter->iFirst, iLastLeaf, iLast, zRoot, nRoot);
}
}else{
/* The entire tree fits on the root node. Write it to the segdir table. */
- rc = fts3WriteSegdir(p, iLevel, iIdx,
- 0, 0, 0, pWriter->nLeafData, pWriter->aData, pWriter->nData);
+ rc = fts3WriteSegdir(
+ p, iLevel, iIdx, 0, 0, 0, pWriter->aData, pWriter->nData);
}
p->nLeafAdd++;
return rc;
@@ -148336,37 +135801,6 @@ static int fts3SegmentMaxLevel(
}
/*
-** iAbsLevel is an absolute level that may be assumed to exist within
-** the database. This function checks if it is the largest level number
-** within its index. Assuming no error occurs, *pbMax is set to 1 if
-** iAbsLevel is indeed the largest level, or 0 otherwise, and SQLITE_OK
-** is returned. If an error occurs, an error code is returned and the
-** final value of *pbMax is undefined.
-*/
-static int fts3SegmentIsMaxLevel(Fts3Table *p, i64 iAbsLevel, int *pbMax){
-
- /* Set pStmt to the compiled version of:
- **
- ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?
- **
- ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR).
- */
- sqlite3_stmt *pStmt;
- int rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0);
- if( rc!=SQLITE_OK ) return rc;
- sqlite3_bind_int64(pStmt, 1, iAbsLevel+1);
- sqlite3_bind_int64(pStmt, 2,
- ((iAbsLevel/FTS3_SEGDIR_MAXLEVEL)+1) * FTS3_SEGDIR_MAXLEVEL
- );
-
- *pbMax = 0;
- if( SQLITE_ROW==sqlite3_step(pStmt) ){
- *pbMax = sqlite3_column_type(pStmt, 0)==SQLITE_NULL;
- }
- return sqlite3_reset(pStmt);
-}
-
-/*
** Delete all entries in the %_segments table associated with the segment
** opened with seg-reader pSeg. This function does not affect the contents
** of the %_segdir table.
@@ -148902,140 +136336,6 @@ SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish(
}
/*
-** Decode the "end_block" field, selected by column iCol of the SELECT
-** statement passed as the first argument.
-**
-** The "end_block" field may contain either an integer, or a text field
-** containing the text representation of two non-negative integers separated
-** by one or more space (0x20) characters. In the first case, set *piEndBlock
-** to the integer value and *pnByte to zero before returning. In the second,
-** set *piEndBlock to the first value and *pnByte to the second.
-*/
-static void fts3ReadEndBlockField(
- sqlite3_stmt *pStmt,
- int iCol,
- i64 *piEndBlock,
- i64 *pnByte
-){
- const unsigned char *zText = sqlite3_column_text(pStmt, iCol);
- if( zText ){
- int i;
- int iMul = 1;
- i64 iVal = 0;
- for(i=0; zText[i]>='0' && zText[i]<='9'; i++){
- iVal = iVal*10 + (zText[i] - '0');
- }
- *piEndBlock = iVal;
- while( zText[i]==' ' ) i++;
- iVal = 0;
- if( zText[i]=='-' ){
- i++;
- iMul = -1;
- }
- for(/* no-op */; zText[i]>='0' && zText[i]<='9'; i++){
- iVal = iVal*10 + (zText[i] - '0');
- }
- *pnByte = (iVal * (i64)iMul);
- }
-}
-
-
-/*
-** A segment of size nByte bytes has just been written to absolute level
-** iAbsLevel. Promote any segments that should be promoted as a result.
-*/
-static int fts3PromoteSegments(
- Fts3Table *p, /* FTS table handle */
- sqlite3_int64 iAbsLevel, /* Absolute level just updated */
- sqlite3_int64 nByte /* Size of new segment at iAbsLevel */
-){
- int rc = SQLITE_OK;
- sqlite3_stmt *pRange;
-
- rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE2, &pRange, 0);
-
- if( rc==SQLITE_OK ){
- int bOk = 0;
- i64 iLast = (iAbsLevel/FTS3_SEGDIR_MAXLEVEL + 1) * FTS3_SEGDIR_MAXLEVEL - 1;
- i64 nLimit = (nByte*3)/2;
-
- /* Loop through all entries in the %_segdir table corresponding to
- ** segments in this index on levels greater than iAbsLevel. If there is
- ** at least one such segment, and it is possible to determine that all
- ** such segments are smaller than nLimit bytes in size, they will be
- ** promoted to level iAbsLevel. */
- sqlite3_bind_int64(pRange, 1, iAbsLevel+1);
- sqlite3_bind_int64(pRange, 2, iLast);
- while( SQLITE_ROW==sqlite3_step(pRange) ){
- i64 nSize = 0, dummy;
- fts3ReadEndBlockField(pRange, 2, &dummy, &nSize);
- if( nSize<=0 || nSize>nLimit ){
- /* If nSize==0, then the %_segdir.end_block field does not not
- ** contain a size value. This happens if it was written by an
- ** old version of FTS. In this case it is not possible to determine
- ** the size of the segment, and so segment promotion does not
- ** take place. */
- bOk = 0;
- break;
- }
- bOk = 1;
- }
- rc = sqlite3_reset(pRange);
-
- if( bOk ){
- int iIdx = 0;
- sqlite3_stmt *pUpdate1 = 0;
- sqlite3_stmt *pUpdate2 = 0;
-
- if( rc==SQLITE_OK ){
- rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL_IDX, &pUpdate1, 0);
- }
- if( rc==SQLITE_OK ){
- rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL, &pUpdate2, 0);
- }
-
- if( rc==SQLITE_OK ){
-
- /* Loop through all %_segdir entries for segments in this index with
- ** levels equal to or greater than iAbsLevel. As each entry is visited,
- ** updated it to set (level = -1) and (idx = N), where N is 0 for the
- ** oldest segment in the range, 1 for the next oldest, and so on.
- **
- ** In other words, move all segments being promoted to level -1,
- ** setting the "idx" fields as appropriate to keep them in the same
- ** order. The contents of level -1 (which is never used, except
- ** transiently here), will be moved back to level iAbsLevel below. */
- sqlite3_bind_int64(pRange, 1, iAbsLevel);
- while( SQLITE_ROW==sqlite3_step(pRange) ){
- sqlite3_bind_int(pUpdate1, 1, iIdx++);
- sqlite3_bind_int(pUpdate1, 2, sqlite3_column_int(pRange, 0));
- sqlite3_bind_int(pUpdate1, 3, sqlite3_column_int(pRange, 1));
- sqlite3_step(pUpdate1);
- rc = sqlite3_reset(pUpdate1);
- if( rc!=SQLITE_OK ){
- sqlite3_reset(pRange);
- break;
- }
- }
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3_reset(pRange);
- }
-
- /* Move level -1 to level iAbsLevel */
- if( rc==SQLITE_OK ){
- sqlite3_bind_int64(pUpdate2, 1, iAbsLevel);
- sqlite3_step(pUpdate2);
- rc = sqlite3_reset(pUpdate2);
- }
- }
- }
-
-
- return rc;
-}
-
-/*
** Merge all level iLevel segments in the database into a single
** iLevel+1 segment. Or, if iLevel<0, merge all segments into a
** single segment with a level equal to the numerically largest level
@@ -149059,7 +136359,6 @@ static int fts3SegmentMerge(
Fts3SegFilter filter; /* Segment term filter condition */
Fts3MultiSegReader csr; /* Cursor to iterate through level(s) */
int bIgnoreEmpty = 0; /* True to ignore empty segments */
- i64 iMaxLevel = 0; /* Max level number for this index/langid */
assert( iLevel==FTS3_SEGCURSOR_ALL
|| iLevel==FTS3_SEGCURSOR_PENDING
@@ -149071,11 +136370,6 @@ static int fts3SegmentMerge(
rc = sqlite3Fts3SegReaderCursor(p, iLangid, iIndex, iLevel, 0, 0, 1, 0, &csr);
if( rc!=SQLITE_OK || csr.nSegment==0 ) goto finished;
- if( iLevel!=FTS3_SEGCURSOR_PENDING ){
- rc = fts3SegmentMaxLevel(p, iLangid, iIndex, &iMaxLevel);
- if( rc!=SQLITE_OK ) goto finished;
- }
-
if( iLevel==FTS3_SEGCURSOR_ALL ){
/* This call is to merge all segments in the database to a single
** segment. The level of the new segment is equal to the numerically
@@ -149085,21 +136379,21 @@ static int fts3SegmentMerge(
rc = SQLITE_DONE;
goto finished;
}
- iNewLevel = iMaxLevel;
+ rc = fts3SegmentMaxLevel(p, iLangid, iIndex, &iNewLevel);
bIgnoreEmpty = 1;
+ }else if( iLevel==FTS3_SEGCURSOR_PENDING ){
+ iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, 0);
+ rc = fts3AllocateSegdirIdx(p, iLangid, iIndex, 0, &iIdx);
}else{
/* This call is to merge all segments at level iLevel. find the next
** available segment index at level iLevel+1. The call to
** fts3AllocateSegdirIdx() will merge the segments at level iLevel+1 to
** a single iLevel+2 segment if necessary. */
- assert( FTS3_SEGCURSOR_PENDING==-1 );
- iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, iLevel+1);
rc = fts3AllocateSegdirIdx(p, iLangid, iIndex, iLevel+1, &iIdx);
- bIgnoreEmpty = (iLevel!=FTS3_SEGCURSOR_PENDING) && (iNewLevel>iMaxLevel);
+ iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, iLevel+1);
}
if( rc!=SQLITE_OK ) goto finished;
-
assert( csr.nSegment>0 );
assert( iNewLevel>=getAbsoluteLevel(p, iLangid, iIndex, 0) );
assert( iNewLevel<getAbsoluteLevel(p, iLangid, iIndex,FTS3_SEGDIR_MAXLEVEL) );
@@ -149116,7 +136410,7 @@ static int fts3SegmentMerge(
csr.zTerm, csr.nTerm, csr.aDoclist, csr.nDoclist);
}
if( rc!=SQLITE_OK ) goto finished;
- assert( pWriter || bIgnoreEmpty );
+ assert( pWriter );
if( iLevel!=FTS3_SEGCURSOR_PENDING ){
rc = fts3DeleteSegdir(
@@ -149124,14 +136418,7 @@ static int fts3SegmentMerge(
);
if( rc!=SQLITE_OK ) goto finished;
}
- if( pWriter ){
- rc = fts3SegWriterFlush(p, pWriter, iNewLevel, iIdx);
- if( rc==SQLITE_OK ){
- if( iLevel==FTS3_SEGCURSOR_PENDING || iNewLevel<iMaxLevel ){
- rc = fts3PromoteSegments(p, iNewLevel, pWriter->nLeafData);
- }
- }
- }
+ rc = fts3SegWriterFlush(p, pWriter, iNewLevel, iIdx);
finished:
fts3SegWriterFree(pWriter);
@@ -149141,7 +136428,7 @@ static int fts3SegmentMerge(
/*
-** Flush the contents of pendingTerms to level 0 segments.
+** Flush the contents of pendingTerms to level 0 segments.
*/
SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
int rc = SQLITE_OK;
@@ -149157,19 +136444,14 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
** estimate the number of leaf blocks of content to be written
*/
if( rc==SQLITE_OK && p->bHasStat
- && p->nAutoincrmerge==0xff && p->nLeafAdd>0
+ && p->bAutoincrmerge==0xff && p->nLeafAdd>0
){
sqlite3_stmt *pStmt = 0;
rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0);
if( rc==SQLITE_OK ){
sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE);
rc = sqlite3_step(pStmt);
- if( rc==SQLITE_ROW ){
- p->nAutoincrmerge = sqlite3_column_int(pStmt, 0);
- if( p->nAutoincrmerge==1 ) p->nAutoincrmerge = 8;
- }else if( rc==SQLITE_DONE ){
- p->nAutoincrmerge = 0;
- }
+ p->bAutoincrmerge = (rc==SQLITE_ROW && sqlite3_column_int(pStmt, 0));
rc = sqlite3_reset(pStmt);
}
}
@@ -149343,8 +136625,7 @@ static int fts3DoOptimize(Fts3Table *p, int bReturnDone){
rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0);
if( rc==SQLITE_OK ){
int rc2;
- sqlite3_bind_int(pAllLangid, 1, p->iPrevLangid);
- sqlite3_bind_int(pAllLangid, 2, p->nIndex);
+ sqlite3_bind_int(pAllLangid, 1, p->nIndex);
while( sqlite3_step(pAllLangid)==SQLITE_ROW ){
int i;
int iLangid = sqlite3_column_int(pAllLangid, 0);
@@ -149411,7 +136692,7 @@ static int fts3DoRebuild(Fts3Table *p){
while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
int iCol;
int iLangid = langidFromSelect(p, pStmt);
- rc = fts3PendingTermsDocid(p, 0, iLangid, sqlite3_column_int64(pStmt, 0));
+ rc = fts3PendingTermsDocid(p, iLangid, sqlite3_column_int64(pStmt, 0));
memset(aSz, 0, sizeof(aSz[0]) * (p->nColumn+1));
for(iCol=0; rc==SQLITE_OK && iCol<p->nColumn; iCol++){
if( p->abNotindexed[iCol]==0 ){
@@ -149538,8 +136819,6 @@ struct IncrmergeWriter {
int iIdx; /* Index of *output* segment in iAbsLevel+1 */
sqlite3_int64 iStart; /* Block number of first allocated block */
sqlite3_int64 iEnd; /* Block number of last allocated block */
- sqlite3_int64 nLeafData; /* Bytes of leaf page data so far */
- u8 bNoLeafData; /* If true, store 0 for segment size */
NodeWriter aNodeWriter[FTS_MAX_APPENDABLE_HEIGHT];
};
@@ -149878,8 +137157,8 @@ static int fts3IncrmergeAppend(
nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist;
}
- pWriter->nLeafData += nSpace;
blobGrowBuffer(&pLeaf->block, pLeaf->block.n + nSpace, &rc);
+
if( rc==SQLITE_OK ){
if( pLeaf->block.n==0 ){
pLeaf->block.n = 1;
@@ -149978,7 +137257,6 @@ static void fts3IncrmergeRelease(
pWriter->iStart, /* start_block */
pWriter->aNodeWriter[0].iBlock, /* leaves_end_block */
pWriter->iEnd, /* end_block */
- (pWriter->bNoLeafData==0 ? pWriter->nLeafData : 0), /* end_block */
pRoot->block.a, pRoot->block.n /* root */
);
}
@@ -150080,11 +137358,7 @@ static int fts3IncrmergeLoad(
if( sqlite3_step(pSelect)==SQLITE_ROW ){
iStart = sqlite3_column_int64(pSelect, 1);
iLeafEnd = sqlite3_column_int64(pSelect, 2);
- fts3ReadEndBlockField(pSelect, 3, &iEnd, &pWriter->nLeafData);
- if( pWriter->nLeafData<0 ){
- pWriter->nLeafData = pWriter->nLeafData * -1;
- }
- pWriter->bNoLeafData = (pWriter->nLeafData==0);
+ iEnd = sqlite3_column_int64(pSelect, 3);
nRoot = sqlite3_column_bytes(pSelect, 4);
aRoot = sqlite3_column_blob(pSelect, 4);
}else{
@@ -150676,7 +137950,7 @@ static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){
pHint->n = i;
i += sqlite3Fts3GetVarint(&pHint->a[i], piAbsLevel);
i += fts3GetVarint32(&pHint->a[i], pnInput);
- if( i!=nHint ) return FTS_CORRUPT_VTAB;
+ if( i!=nHint ) return SQLITE_CORRUPT_VTAB;
return SQLITE_OK;
}
@@ -150685,11 +137959,11 @@ static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){
/*
** Attempt an incremental merge that writes nMerge leaf blocks.
**
-** Incremental merges happen nMin segments at a time. The segments
-** to be merged are the nMin oldest segments (the ones with the smallest
-** values for the _segdir.idx field) in the highest level that contains
-** at least nMin segments. Multiple merges might occur in an attempt to
-** write the quota of nMerge leaf blocks.
+** Incremental merges happen nMin segments at a time. The two
+** segments to be merged are the nMin oldest segments (the ones with
+** the smallest indexes) in the highest level that contains at least
+** nMin segments. Multiple merges might occur in an attempt to write the
+** quota of nMerge leaf blocks.
*/
SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
int rc; /* Return code */
@@ -150714,7 +137988,6 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
const i64 nMod = FTS3_SEGDIR_MAXLEVEL * p->nIndex;
sqlite3_stmt *pFindLevel = 0; /* SQL used to determine iAbsLevel */
int bUseHint = 0; /* True if attempting to append */
- int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */
/* Search the %_segdir table for the absolute level with the smallest
** relative level number that contains at least nMin segments, if any.
@@ -150768,19 +138041,6 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
** to start work on some other level. */
memset(pWriter, 0, nAlloc);
pFilter->flags = FTS3_SEGMENT_REQUIRE_POS;
-
- if( rc==SQLITE_OK ){
- rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx);
- assert( bUseHint==1 || bUseHint==0 );
- if( iIdx==0 || (bUseHint && iIdx==1) ){
- int bIgnore = 0;
- rc = fts3SegmentIsMaxLevel(p, iAbsLevel+1, &bIgnore);
- if( bIgnore ){
- pFilter->flags |= FTS3_SEGMENT_IGNORE_EMPTY;
- }
- }
- }
-
if( rc==SQLITE_OK ){
rc = fts3IncrmergeCsr(p, iAbsLevel, nSeg, pCsr);
}
@@ -150788,12 +138048,16 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
&& SQLITE_OK==(rc = sqlite3Fts3SegReaderStart(p, pCsr, pFilter))
&& SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pCsr))
){
- if( bUseHint && iIdx>0 ){
- const char *zKey = pCsr->zTerm;
- int nKey = pCsr->nTerm;
- rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter);
- }else{
- rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter);
+ int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */
+ rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx);
+ if( rc==SQLITE_OK ){
+ if( bUseHint && iIdx>0 ){
+ const char *zKey = pCsr->zTerm;
+ int nKey = pCsr->nTerm;
+ rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter);
+ }else{
+ rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter);
+ }
}
if( rc==SQLITE_OK && pWriter->nLeafEst ){
@@ -150815,13 +138079,7 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
}
}
- if( nSeg!=0 ){
- pWriter->nLeafData = pWriter->nLeafData * -1;
- }
fts3IncrmergeRelease(p, pWriter, &rc);
- if( nSeg==0 && pWriter->bNoLeafData==0 ){
- fts3PromoteSegments(p, iAbsLevel+1, pWriter->nLeafData);
- }
}
sqlite3Fts3SegReaderFinish(pCsr);
@@ -150908,10 +138166,7 @@ static int fts3DoAutoincrmerge(
){
int rc = SQLITE_OK;
sqlite3_stmt *pStmt = 0;
- p->nAutoincrmerge = fts3Getint(&zParam);
- if( p->nAutoincrmerge==1 || p->nAutoincrmerge>FTS3_MERGE_COUNT ){
- p->nAutoincrmerge = 8;
- }
+ p->bAutoincrmerge = fts3Getint(&zParam)!=0;
if( !p->bHasStat ){
assert( p->bFts4==0 );
sqlite3Fts3CreateStatTable(&rc, p);
@@ -150920,7 +138175,7 @@ static int fts3DoAutoincrmerge(
rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0);
if( rc ) return rc;
sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE);
- sqlite3_bind_int(pStmt, 2, p->nAutoincrmerge);
+ sqlite3_bind_int(pStmt, 2, p->bAutoincrmerge);
sqlite3_step(pStmt);
rc = sqlite3_reset(pStmt);
return rc;
@@ -151044,8 +138299,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0);
if( rc==SQLITE_OK ){
int rc2;
- sqlite3_bind_int(pAllLangid, 1, p->iPrevLangid);
- sqlite3_bind_int(pAllLangid, 2, p->nIndex);
+ sqlite3_bind_int(pAllLangid, 1, p->nIndex);
while( rc==SQLITE_OK && sqlite3_step(pAllLangid)==SQLITE_ROW ){
int iLangid = sqlite3_column_int(pAllLangid, 0);
int i;
@@ -151058,6 +138312,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
}
/* This block calculates the checksum according to the %_content table */
+ rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0);
if( rc==SQLITE_OK ){
sqlite3_tokenizer_module const *pModule = p->pTokenizer->pModule;
sqlite3_stmt *pStmt = 0;
@@ -151077,36 +138332,34 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
int iCol;
for(iCol=0; rc==SQLITE_OK && iCol<p->nColumn; iCol++){
- if( p->abNotindexed[iCol]==0 ){
- const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1);
- int nText = sqlite3_column_bytes(pStmt, iCol+1);
- sqlite3_tokenizer_cursor *pT = 0;
-
- rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, nText,&pT);
- while( rc==SQLITE_OK ){
- char const *zToken; /* Buffer containing token */
- int nToken = 0; /* Number of bytes in token */
- int iDum1 = 0, iDum2 = 0; /* Dummy variables */
- int iPos = 0; /* Position of token in zText */
-
- rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos);
- if( rc==SQLITE_OK ){
- int i;
- cksum2 = cksum2 ^ fts3ChecksumEntry(
- zToken, nToken, iLang, 0, iDocid, iCol, iPos
- );
- for(i=1; i<p->nIndex; i++){
- if( p->aIndex[i].nPrefix<=nToken ){
- cksum2 = cksum2 ^ fts3ChecksumEntry(
- zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos
- );
- }
+ const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1);
+ int nText = sqlite3_column_bytes(pStmt, iCol+1);
+ sqlite3_tokenizer_cursor *pT = 0;
+
+ rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, nText, &pT);
+ while( rc==SQLITE_OK ){
+ char const *zToken; /* Buffer containing token */
+ int nToken = 0; /* Number of bytes in token */
+ int iDum1 = 0, iDum2 = 0; /* Dummy variables */
+ int iPos = 0; /* Position of token in zText */
+
+ rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos);
+ if( rc==SQLITE_OK ){
+ int i;
+ cksum2 = cksum2 ^ fts3ChecksumEntry(
+ zToken, nToken, iLang, 0, iDocid, iCol, iPos
+ );
+ for(i=1; i<p->nIndex; i++){
+ if( p->aIndex[i].nPrefix<=nToken ){
+ cksum2 = cksum2 ^ fts3ChecksumEntry(
+ zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos
+ );
}
}
}
- if( pT ) pModule->xClose(pT);
- if( rc==SQLITE_DONE ) rc = SQLITE_OK;
}
+ if( pT ) pModule->xClose(pT);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
}
}
@@ -151154,7 +138407,7 @@ static int fts3DoIntegrityCheck(
int rc;
int bOk = 0;
rc = fts3IntegrityCheck(p, &bOk);
- if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB;
+ if( rc==SQLITE_OK && bOk==0 ) rc = SQLITE_CORRUPT_VTAB;
return rc;
}
@@ -151411,10 +138664,6 @@ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(
int nChng = 0; /* Net change in number of documents */
int bInsertDone = 0;
- /* At this point it must be known if the %_stat table exists or not.
- ** So bHasStat may not be 2. */
- assert( p->bHasStat==0 || p->bHasStat==1 );
-
assert( p->pSegments==0 );
assert(
nArg==1 /* DELETE operations */
@@ -151516,7 +138765,7 @@ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(
}
}
if( rc==SQLITE_OK && (!isRemove || *pRowid!=p->iPrevDocid ) ){
- rc = fts3PendingTermsDocid(p, 0, iLangid, *pRowid);
+ rc = fts3PendingTermsDocid(p, iLangid, *pRowid);
}
if( rc==SQLITE_OK ){
assert( p->iPrevDocid==*pRowid );
@@ -151577,7 +138826,6 @@ SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){
******************************************************************************
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <string.h> */
@@ -151593,8 +138841,6 @@ SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){
#define FTS3_MATCHINFO_LENGTH 'l' /* nCol values */
#define FTS3_MATCHINFO_LCS 's' /* nCol values */
#define FTS3_MATCHINFO_HITS 'x' /* 3*nCol*nPhrase values */
-#define FTS3_MATCHINFO_LHITS 'y' /* nCol*nPhrase values */
-#define FTS3_MATCHINFO_LHITS_BM 'b' /* nCol*nPhrase values */
/*
** The default value for the second argument to matchinfo().
@@ -151656,22 +138902,9 @@ struct MatchInfo {
int nCol; /* Number of columns in table */
int nPhrase; /* Number of matchable phrases in query */
sqlite3_int64 nDoc; /* Number of docs in database */
- char flag;
u32 *aMatchinfo; /* Pre-allocated buffer */
};
-/*
-** An instance of this structure is used to manage a pair of buffers, each
-** (nElem * sizeof(u32)) bytes in size. See the MatchinfoBuffer code below
-** for details.
-*/
-struct MatchinfoBuffer {
- u8 aRef[3];
- int nElem;
- int bGlobal; /* Set if global data is loaded */
- char *zMatchinfo;
- u32 aMatchinfo[1];
-};
/*
@@ -151687,97 +138920,6 @@ struct StrBuffer {
};
-/*************************************************************************
-** Start of MatchinfoBuffer code.
-*/
-
-/*
-** Allocate a two-slot MatchinfoBuffer object.
-*/
-static MatchinfoBuffer *fts3MIBufferNew(int nElem, const char *zMatchinfo){
- MatchinfoBuffer *pRet;
- int nByte = sizeof(u32) * (2*nElem + 1) + sizeof(MatchinfoBuffer);
- int nStr = (int)strlen(zMatchinfo);
-
- pRet = sqlite3_malloc(nByte + nStr+1);
- if( pRet ){
- memset(pRet, 0, nByte);
- pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet;
- pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + sizeof(u32)*(nElem+1);
- pRet->nElem = nElem;
- pRet->zMatchinfo = ((char*)pRet) + nByte;
- memcpy(pRet->zMatchinfo, zMatchinfo, nStr+1);
- pRet->aRef[0] = 1;
- }
-
- return pRet;
-}
-
-static void fts3MIBufferFree(void *p){
- MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]);
-
- assert( (u32*)p==&pBuf->aMatchinfo[1]
- || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2]
- );
- if( (u32*)p==&pBuf->aMatchinfo[1] ){
- pBuf->aRef[1] = 0;
- }else{
- pBuf->aRef[2] = 0;
- }
-
- if( pBuf->aRef[0]==0 && pBuf->aRef[1]==0 && pBuf->aRef[2]==0 ){
- sqlite3_free(pBuf);
- }
-}
-
-static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){
- void (*xRet)(void*) = 0;
- u32 *aOut = 0;
-
- if( p->aRef[1]==0 ){
- p->aRef[1] = 1;
- aOut = &p->aMatchinfo[1];
- xRet = fts3MIBufferFree;
- }
- else if( p->aRef[2]==0 ){
- p->aRef[2] = 1;
- aOut = &p->aMatchinfo[p->nElem+2];
- xRet = fts3MIBufferFree;
- }else{
- aOut = (u32*)sqlite3_malloc(p->nElem * sizeof(u32));
- if( aOut ){
- xRet = sqlite3_free;
- if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32));
- }
- }
-
- *paOut = aOut;
- return xRet;
-}
-
-static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){
- p->bGlobal = 1;
- memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32));
-}
-
-/*
-** Free a MatchinfoBuffer object allocated using fts3MIBufferNew()
-*/
-SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p){
- if( p ){
- assert( p->aRef[0]==1 );
- p->aRef[0] = 0;
- if( p->aRef[0]==0 && p->aRef[1]==0 && p->aRef[2]==0 ){
- sqlite3_free(p);
- }
- }
-}
-
-/*
-** End of MatchinfoBuffer code.
-*************************************************************************/
-
-
/*
** This function is used to help iterate through a position-list. A position
** list is a list of unique integers, sorted from smallest to largest. Each
@@ -151814,7 +138956,7 @@ static int fts3ExprIterate2(
void *pCtx /* Second argument to pass to callback */
){
int rc; /* Return code */
- int eType = pExpr->eType; /* Type of expression node pExpr */
+ int eType = pExpr->eType; /* Type of expression node pExpr */
if( eType!=FTSQUERY_PHRASE ){
assert( pExpr->pLeft && pExpr->pRight );
@@ -151848,7 +138990,6 @@ static int fts3ExprIterate(
return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx);
}
-
/*
** This is an fts3ExprIterate() callback used while loading the doclists
** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also
@@ -151893,7 +139034,8 @@ static int fts3ExprLoadDoclists(
static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){
(*(int *)ctx)++;
- pExpr->iPhrase = iPhrase;
+ UNUSED_PARAMETER(pExpr);
+ UNUSED_PARAMETER(iPhrase);
return SQLITE_OK;
}
static int fts3ExprPhraseCount(Fts3Expr *pExpr){
@@ -152114,39 +139256,37 @@ static int fts3BestSnippet(
sIter.nSnippet = nSnippet;
sIter.nPhrase = nList;
sIter.iCurrent = -1;
- rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter);
- if( rc==SQLITE_OK ){
+ (void)fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void *)&sIter);
- /* Set the *pmSeen output variable. */
- for(i=0; i<nList; i++){
- if( sIter.aPhrase[i].pHead ){
- *pmSeen |= (u64)1 << i;
- }
+ /* Set the *pmSeen output variable. */
+ for(i=0; i<nList; i++){
+ if( sIter.aPhrase[i].pHead ){
+ *pmSeen |= (u64)1 << i;
}
+ }
- /* Loop through all candidate snippets. Store the best snippet in
- ** *pFragment. Store its associated 'score' in iBestScore.
- */
- pFragment->iCol = iCol;
- while( !fts3SnippetNextCandidate(&sIter) ){
- int iPos;
- int iScore;
- u64 mCover;
- u64 mHighlite;
- fts3SnippetDetails(&sIter, mCovered, &iPos, &iScore, &mCover,&mHighlite);
- assert( iScore>=0 );
- if( iScore>iBestScore ){
- pFragment->iPos = iPos;
- pFragment->hlmask = mHighlite;
- pFragment->covered = mCover;
- iBestScore = iScore;
- }
+ /* Loop through all candidate snippets. Store the best snippet in
+ ** *pFragment. Store its associated 'score' in iBestScore.
+ */
+ pFragment->iCol = iCol;
+ while( !fts3SnippetNextCandidate(&sIter) ){
+ int iPos;
+ int iScore;
+ u64 mCover;
+ u64 mHighlight;
+ fts3SnippetDetails(&sIter, mCovered, &iPos, &iScore, &mCover, &mHighlight);
+ assert( iScore>=0 );
+ if( iScore>iBestScore ){
+ pFragment->iPos = iPos;
+ pFragment->hlmask = mHighlight;
+ pFragment->covered = mCover;
+ iBestScore = iScore;
}
-
- *piScore = iBestScore;
}
+
sqlite3_free(sIter.aPhrase);
- return rc;
+ *piScore = iBestScore;
+ return SQLITE_OK;
}
@@ -152354,12 +139494,8 @@ static int fts3SnippetText(
** required. They are required if (a) this is not the first fragment,
** or (b) this fragment does not begin at position 0 of its column.
*/
- if( rc==SQLITE_OK ){
- if( iPos>0 || iFragment>0 ){
- rc = fts3StringAppend(pOut, zEllipsis, -1);
- }else if( iBegin ){
- rc = fts3StringAppend(pOut, zDoc, iBegin);
- }
+ if( rc==SQLITE_OK && (iPos>0 || iFragment>0) ){
+ rc = fts3StringAppend(pOut, zEllipsis, -1);
}
if( rc!=SQLITE_OK || iCurrent<iPos ) continue;
}
@@ -152416,60 +139552,6 @@ static int fts3ColumnlistCount(char **ppCollist){
}
/*
-** This function gathers 'y' or 'b' data for a single phrase.
-*/
-static void fts3ExprLHits(
- Fts3Expr *pExpr, /* Phrase expression node */
- MatchInfo *p /* Matchinfo context */
-){
- Fts3Table *pTab = (Fts3Table *)p->pCursor->base.pVtab;
- int iStart;
- Fts3Phrase *pPhrase = pExpr->pPhrase;
- char *pIter = pPhrase->doclist.pList;
- int iCol = 0;
-
- assert( p->flag==FTS3_MATCHINFO_LHITS_BM || p->flag==FTS3_MATCHINFO_LHITS );
- if( p->flag==FTS3_MATCHINFO_LHITS ){
- iStart = pExpr->iPhrase * p->nCol;
- }else{
- iStart = pExpr->iPhrase * ((p->nCol + 31) / 32);
- }
-
- while( 1 ){
- int nHit = fts3ColumnlistCount(&pIter);
- if( (pPhrase->iColumn>=pTab->nColumn || pPhrase->iColumn==iCol) ){
- if( p->flag==FTS3_MATCHINFO_LHITS ){
- p->aMatchinfo[iStart + iCol] = (u32)nHit;
- }else if( nHit ){
- p->aMatchinfo[iStart + (iCol+1)/32] |= (1 << (iCol&0x1F));
- }
- }
- assert( *pIter==0x00 || *pIter==0x01 );
- if( *pIter!=0x01 ) break;
- pIter++;
- pIter += fts3GetVarint32(pIter, &iCol);
- }
-}
-
-/*
-** Gather the results for matchinfo directives 'y' and 'b'.
-*/
-static void fts3ExprLHitGather(
- Fts3Expr *pExpr,
- MatchInfo *p
-){
- assert( (pExpr->pLeft==0)==(pExpr->pRight==0) );
- if( pExpr->bEof==0 && pExpr->iDocid==p->pCursor->iPrevId ){
- if( pExpr->pLeft ){
- fts3ExprLHitGather(pExpr->pLeft, p);
- fts3ExprLHitGather(pExpr->pRight, p);
- }else{
- fts3ExprLHits(pExpr, p);
- }
- }
-}
-
-/*
** fts3ExprIterate() callback used to collect the "global" matchinfo stats
** for a single query.
**
@@ -152547,12 +139629,10 @@ static int fts3MatchinfoCheck(
|| (cArg==FTS3_MATCHINFO_LENGTH && pTab->bHasDocsize)
|| (cArg==FTS3_MATCHINFO_LCS)
|| (cArg==FTS3_MATCHINFO_HITS)
- || (cArg==FTS3_MATCHINFO_LHITS)
- || (cArg==FTS3_MATCHINFO_LHITS_BM)
){
return SQLITE_OK;
}
- sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo request: %c", cArg);
+ *pzErr = sqlite3_mprintf("unrecognized matchinfo request: %c", cArg);
return SQLITE_ERROR;
}
@@ -152572,14 +139652,6 @@ static int fts3MatchinfoSize(MatchInfo *pInfo, char cArg){
nVal = pInfo->nCol;
break;
- case FTS3_MATCHINFO_LHITS:
- nVal = pInfo->nCol * pInfo->nPhrase;
- break;
-
- case FTS3_MATCHINFO_LHITS_BM:
- nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32);
- break;
-
default:
assert( cArg==FTS3_MATCHINFO_HITS );
nVal = pInfo->nCol * pInfo->nPhrase * 3;
@@ -152774,7 +139846,7 @@ static int fts3MatchinfoValues(
sqlite3_stmt *pSelect = 0;
for(i=0; rc==SQLITE_OK && zArg[i]; i++){
- pInfo->flag = zArg[i];
+
switch( zArg[i] ){
case FTS3_MATCHINFO_NPHRASE:
if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nPhrase;
@@ -152834,14 +139906,6 @@ static int fts3MatchinfoValues(
}
break;
- case FTS3_MATCHINFO_LHITS_BM:
- case FTS3_MATCHINFO_LHITS: {
- int nZero = fts3MatchinfoSize(pInfo, zArg[i]) * sizeof(u32);
- memset(pInfo->aMatchinfo, 0, nZero);
- fts3ExprLHitGather(pCsr->pExpr, pInfo);
- break;
- }
-
default: {
Fts3Expr *pExpr;
assert( zArg[i]==FTS3_MATCHINFO_HITS );
@@ -152854,7 +139918,6 @@ static int fts3MatchinfoValues(
if( rc!=SQLITE_OK ) break;
}
rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo);
- sqlite3Fts3EvalTestDeferred(pCsr, &rc);
if( rc!=SQLITE_OK ) break;
}
(void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo);
@@ -152874,8 +139937,7 @@ static int fts3MatchinfoValues(
** Populate pCsr->aMatchinfo[] with data for the current row. The
** 'matchinfo' data is an array of 32-bit unsigned integers (C type u32).
*/
-static void fts3GetMatchinfo(
- sqlite3_context *pCtx, /* Return results here */
+static int fts3GetMatchinfo(
Fts3Cursor *pCsr, /* FTS3 Cursor object */
const char *zArg /* Second argument to matchinfo() function */
){
@@ -152884,9 +139946,6 @@ static void fts3GetMatchinfo(
int rc = SQLITE_OK;
int bGlobal = 0; /* Collect 'global' stats as well as local */
- u32 *aOut = 0;
- void (*xDestroyOut)(void*) = 0;
-
memset(&sInfo, 0, sizeof(MatchInfo));
sInfo.pCursor = pCsr;
sInfo.nCol = pTab->nColumn;
@@ -152894,18 +139953,21 @@ static void fts3GetMatchinfo(
/* If there is cached matchinfo() data, but the format string for the
** cache does not match the format string for this request, discard
** the cached data. */
- if( pCsr->pMIBuffer && strcmp(pCsr->pMIBuffer->zMatchinfo, zArg) ){
- sqlite3Fts3MIBufferFree(pCsr->pMIBuffer);
- pCsr->pMIBuffer = 0;
+ if( pCsr->zMatchinfo && strcmp(pCsr->zMatchinfo, zArg) ){
+ assert( pCsr->aMatchinfo );
+ sqlite3_free(pCsr->aMatchinfo);
+ pCsr->zMatchinfo = 0;
+ pCsr->aMatchinfo = 0;
}
- /* If Fts3Cursor.pMIBuffer is NULL, then this is the first time the
+ /* If Fts3Cursor.aMatchinfo[] is NULL, then this is the first time the
** matchinfo function has been called for this query. In this case
** allocate the array used to accumulate the matchinfo data and
** initialize those elements that are constant for every row.
*/
- if( pCsr->pMIBuffer==0 ){
+ if( pCsr->aMatchinfo==0 ){
int nMatchinfo = 0; /* Number of u32 elements in match-info */
+ int nArg; /* Bytes in zArg */
int i; /* Used to iterate through zArg */
/* Determine the number of phrases in the query */
@@ -152914,46 +139976,30 @@ static void fts3GetMatchinfo(
/* Determine the number of integers in the buffer returned by this call. */
for(i=0; zArg[i]; i++){
- char *zErr = 0;
- if( fts3MatchinfoCheck(pTab, zArg[i], &zErr) ){
- sqlite3_result_error(pCtx, zErr, -1);
- sqlite3_free(zErr);
- return;
- }
nMatchinfo += fts3MatchinfoSize(&sInfo, zArg[i]);
}
/* Allocate space for Fts3Cursor.aMatchinfo[] and Fts3Cursor.zMatchinfo. */
- pCsr->pMIBuffer = fts3MIBufferNew(nMatchinfo, zArg);
- if( !pCsr->pMIBuffer ) rc = SQLITE_NOMEM;
-
+ nArg = (int)strlen(zArg);
+ pCsr->aMatchinfo = (u32 *)sqlite3_malloc(sizeof(u32)*nMatchinfo + nArg + 1);
+ if( !pCsr->aMatchinfo ) return SQLITE_NOMEM;
+
+ pCsr->zMatchinfo = (char *)&pCsr->aMatchinfo[nMatchinfo];
+ pCsr->nMatchinfo = nMatchinfo;
+ memcpy(pCsr->zMatchinfo, zArg, nArg+1);
+ memset(pCsr->aMatchinfo, 0, sizeof(u32)*nMatchinfo);
pCsr->isMatchinfoNeeded = 1;
bGlobal = 1;
}
- if( rc==SQLITE_OK ){
- xDestroyOut = fts3MIBufferAlloc(pCsr->pMIBuffer, &aOut);
- if( xDestroyOut==0 ){
- rc = SQLITE_NOMEM;
- }
- }
-
- if( rc==SQLITE_OK ){
- sInfo.aMatchinfo = aOut;
- sInfo.nPhrase = pCsr->nPhrase;
+ sInfo.aMatchinfo = pCsr->aMatchinfo;
+ sInfo.nPhrase = pCsr->nPhrase;
+ if( pCsr->isMatchinfoNeeded ){
rc = fts3MatchinfoValues(pCsr, bGlobal, &sInfo, zArg);
- if( bGlobal ){
- fts3MIBufferSetGlobal(pCsr->pMIBuffer);
- }
+ pCsr->isMatchinfoNeeded = 0;
}
- if( rc!=SQLITE_OK ){
- sqlite3_result_error_code(pCtx, rc);
- if( xDestroyOut ) xDestroyOut(aOut);
- }else{
- int n = pCsr->pMIBuffer->nElem * sizeof(u32);
- sqlite3_result_blob(pCtx, aOut, n, xDestroyOut);
- }
+ return rc;
}
/*
@@ -153015,7 +140061,7 @@ SQLITE_PRIVATE void sqlite3Fts3Snippet(
*/
for(iRead=0; iRead<pTab->nColumn; iRead++){
SnippetFragment sF = {0, 0, 0, 0};
- int iS = 0;
+ int iS;
if( iCol>=0 && iRead!=iCol ) continue;
/* Find the best snippet of nFToken tokens in column iRead. */
@@ -153159,7 +140205,7 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets(
*/
sCtx.iCol = iCol;
sCtx.iTerm = 0;
- (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx);
+ (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void *)&sCtx);
/* Retreive the text stored in column iCol. If an SQL NULL is stored
** in column iCol, jump immediately to the next iteration of the loop.
@@ -153251,9 +140297,19 @@ SQLITE_PRIVATE void sqlite3Fts3Matchinfo(
const char *zArg /* Second arg to matchinfo() function */
){
Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
+ int rc;
+ int i;
const char *zFormat;
if( zArg ){
+ for(i=0; zArg[i]; i++){
+ char *zErr = 0;
+ if( fts3MatchinfoCheck(pTab, zArg[i], &zErr) ){
+ sqlite3_result_error(pContext, zErr, -1);
+ sqlite3_free(zErr);
+ return;
+ }
+ }
zFormat = zArg;
}else{
zFormat = FTS3_MATCHINFO_DEFAULT;
@@ -153262,10 +140318,17 @@ SQLITE_PRIVATE void sqlite3Fts3Matchinfo(
if( !pCsr->pExpr ){
sqlite3_result_blob(pContext, "", 0, SQLITE_STATIC);
return;
+ }
+
+ /* Retrieve matchinfo() data. */
+ rc = fts3GetMatchinfo(pCsr, zFormat);
+ sqlite3Fts3SegmentsClose(pTab);
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_result_error_code(pContext, rc);
}else{
- /* Retrieve matchinfo() data. */
- fts3GetMatchinfo(pContext, pCsr, zFormat);
- sqlite3Fts3SegmentsClose(pTab);
+ int n = pCsr->nMatchinfo * sizeof(u32);
+ sqlite3_result_blob(pContext, pCsr->aMatchinfo, n, SQLITE_TRANSIENT);
}
}
@@ -153288,9 +140351,8 @@ SQLITE_PRIVATE void sqlite3Fts3Matchinfo(
** Implementation of the "unicode" full-text-search tokenizer.
*/
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#ifdef SQLITE_ENABLE_FTS4_UNICODE61
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
/* #include <assert.h> */
@@ -153298,7 +140360,6 @@ SQLITE_PRIVATE void sqlite3Fts3Matchinfo(
/* #include <stdio.h> */
/* #include <string.h> */
-/* #include "fts3_tokenizer.h" */
/*
** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied
@@ -153506,7 +140567,7 @@ static int unicodeCreate(
for(i=0; rc==SQLITE_OK && i<nArg; i++){
const char *z = azArg[i];
- int n = (int)strlen(z);
+ int n = strlen(z);
if( n==19 && memcmp("remove_diacritics=1", z, 19)==0 ){
pNew->bRemoveDiacritic = 1;
@@ -153593,7 +140654,7 @@ static int unicodeNext(
){
unicode_cursor *pCsr = (unicode_cursor *)pC;
unicode_tokenizer *p = ((unicode_tokenizer *)pCsr->base.pTokenizer);
- int iCode = 0;
+ int iCode;
char *zOut;
const unsigned char *z = &pCsr->aInput[pCsr->iOff];
const unsigned char *zStart = z;
@@ -153638,11 +140699,11 @@ static int unicodeNext(
);
/* Set the output variables and return. */
- pCsr->iOff = (int)(z - pCsr->aInput);
+ pCsr->iOff = (z - pCsr->aInput);
*paToken = pCsr->zToken;
- *pnToken = (int)(zOut - pCsr->zToken);
- *piStart = (int)(zStart - pCsr->aInput);
- *piEnd = (int)(zEnd - pCsr->aInput);
+ *pnToken = zOut - pCsr->zToken;
+ *piStart = (zStart - pCsr->aInput);
+ *piEnd = (zEnd - pCsr->aInput);
*piPos = pCsr->iToken++;
return SQLITE_OK;
}
@@ -153665,7 +140726,7 @@ SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const *
}
#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
-#endif /* ifndef SQLITE_DISABLE_FTS3_UNICODE */
+#endif /* ifndef SQLITE_ENABLE_FTS4_UNICODE61 */
/************** End of fts3_unicode.c ****************************************/
/************** Begin file fts3_unicode2.c ***********************************/
@@ -153686,7 +140747,7 @@ SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const *
** DO NOT EDIT THIS MACHINE GENERATED FILE.
*/
-#ifndef SQLITE_DISABLE_FTS3_UNICODE
+#if defined(SQLITE_ENABLE_FTS4_UNICODE61)
#if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4)
/* #include <assert.h> */
@@ -153710,7 +140771,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int c){
** C. It is not possible to represent a range larger than 1023 codepoints
** using this format.
*/
- static const unsigned int aEntry[] = {
+ const static unsigned int aEntry[] = {
0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07,
0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01,
0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401,
@@ -153802,7 +140863,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int c){
return ( (aAscii[c >> 5] & (1 << (c & 0x001F)))==0 );
}else if( c<(1<<22) ){
unsigned int key = (((unsigned int)c)<<10) | 0x000003FF;
- int iRes = 0;
+ int iRes;
int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
int iLo = 0;
while( iHi>=iLo ){
@@ -153873,7 +140934,7 @@ static int remove_diacritic(int c){
}
assert( key>=aDia[iRes] );
return ((c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : (int)aChar[iRes]);
-}
+};
/*
@@ -154033,7 +141094,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){
return ret;
}
#endif /* defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) */
-#endif /* !defined(SQLITE_DISABLE_FTS3_UNICODE) */
+#endif /* !defined(SQLITE_ENABLE_FTS4_UNICODE61) */
/************** End of fts3_unicode2.c ***************************************/
/************** Begin file rtree.c *******************************************/
@@ -154093,22 +141154,60 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RTREE)
+/*
+** This file contains an implementation of a couple of different variants
+** of the r-tree algorithm. See the README file for further details. The
+** same data-structure is used for all, but the algorithms for insert and
+** delete operations vary. The variants used are selected at compile time
+** by defining the following symbols:
+*/
+
+/* Either, both or none of the following may be set to activate
+** r*tree variant algorithms.
+*/
+#define VARIANT_RSTARTREE_CHOOSESUBTREE 0
+#define VARIANT_RSTARTREE_REINSERT 1
+
+/*
+** Exactly one of the following must be set to 1.
+*/
+#define VARIANT_GUTTMAN_QUADRATIC_SPLIT 0
+#define VARIANT_GUTTMAN_LINEAR_SPLIT 0
+#define VARIANT_RSTARTREE_SPLIT 1
+
+#define VARIANT_GUTTMAN_SPLIT \
+ (VARIANT_GUTTMAN_LINEAR_SPLIT||VARIANT_GUTTMAN_QUADRATIC_SPLIT)
+
+#if VARIANT_GUTTMAN_QUADRATIC_SPLIT
+ #define PickNext QuadraticPickNext
+ #define PickSeeds QuadraticPickSeeds
+ #define AssignCells splitNodeGuttman
+#endif
+#if VARIANT_GUTTMAN_LINEAR_SPLIT
+ #define PickNext LinearPickNext
+ #define PickSeeds LinearPickSeeds
+ #define AssignCells splitNodeGuttman
+#endif
+#if VARIANT_RSTARTREE_SPLIT
+ #define AssignCells splitNodeStartree
+#endif
+
+#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
+# define NDEBUG 1
+#endif
+
#ifndef SQLITE_CORE
-/* #include "sqlite3ext.h" */
SQLITE_EXTENSION_INIT1
#else
-/* #include "sqlite3.h" */
#endif
/* #include <string.h> */
/* #include <assert.h> */
-/* #include <stdio.h> */
#ifndef SQLITE_AMALGAMATION
#include "sqlite3rtree.h"
typedef sqlite3_int64 i64;
typedef unsigned char u8;
-typedef unsigned short u16;
typedef unsigned int u32;
#endif
@@ -154126,7 +141225,6 @@ typedef struct RtreeConstraint RtreeConstraint;
typedef struct RtreeMatchArg RtreeMatchArg;
typedef struct RtreeGeomCallback RtreeGeomCallback;
typedef union RtreeCoord RtreeCoord;
-typedef struct RtreeSearchPoint RtreeSearchPoint;
/* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */
#define RTREE_MAX_DIMENSIONS 5
@@ -154135,7 +141233,7 @@ typedef struct RtreeSearchPoint RtreeSearchPoint;
** ever contain very many entries, so a fixed number of buckets is
** used.
*/
-#define HASHSIZE 97
+#define HASHSIZE 128
/* The xBestIndex method of this virtual table requires an estimate of
** the number of rows in the virtual table to calculate the costs of
@@ -154151,15 +141249,15 @@ typedef struct RtreeSearchPoint RtreeSearchPoint;
** An rtree virtual-table object.
*/
struct Rtree {
- sqlite3_vtab base; /* Base class. Must be first */
+ sqlite3_vtab base;
sqlite3 *db; /* Host database connection */
int iNodeSize; /* Size in bytes of each node in the node table */
- u8 nDim; /* Number of dimensions */
- u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */
- u8 nBytesPerCell; /* Bytes consumed per cell */
+ int nDim; /* Number of dimensions */
+ int nBytesPerCell; /* Bytes consumed per cell */
int iDepth; /* Current depth of the r-tree structure */
char *zDb; /* Name of database containing r-tree table */
char *zName; /* Name of r-tree table */
+ RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */
int nBusy; /* Current number of users of this structure */
i64 nRowEst; /* Estimated number of rows in this table */
@@ -154186,10 +141284,10 @@ struct Rtree {
sqlite3_stmt *pWriteParent;
sqlite3_stmt *pDeleteParent;
- RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */
+ int eCoordType;
};
-/* Possible values for Rtree.eCoordType: */
+/* Possible values for eCoordType: */
#define RTREE_COORD_REAL32 0
#define RTREE_COORD_INT32 1
@@ -154201,31 +141299,12 @@ struct Rtree {
#ifdef SQLITE_RTREE_INT_ONLY
typedef sqlite3_int64 RtreeDValue; /* High accuracy coordinate */
typedef int RtreeValue; /* Low accuracy coordinate */
-# define RTREE_ZERO 0
#else
typedef double RtreeDValue; /* High accuracy coordinate */
typedef float RtreeValue; /* Low accuracy coordinate */
-# define RTREE_ZERO 0.0
#endif
/*
-** When doing a search of an r-tree, instances of the following structure
-** record intermediate results from the tree walk.
-**
-** The id is always a node-id. For iLevel>=1 the id is the node-id of
-** the node that the RtreeSearchPoint represents. When iLevel==0, however,
-** the id is of the parent node and the cell that RtreeSearchPoint
-** represents is the iCell-th entry in the parent node.
-*/
-struct RtreeSearchPoint {
- RtreeDValue rScore; /* The score for this node. Smallest goes first. */
- sqlite3_int64 id; /* Node ID */
- u8 iLevel; /* 0=entries. 1=leaf node. 2+ for higher */
- u8 eWithin; /* PARTLY_WITHIN or FULLY_WITHIN */
- u8 iCell; /* Cell index within the node */
-};
-
-/*
** The minimum number of cells allowed for a node is a third of the
** maximum. In Gutman's notation:
**
@@ -154247,44 +141326,21 @@ struct RtreeSearchPoint {
*/
#define RTREE_MAX_DEPTH 40
-
-/*
-** Number of entries in the cursor RtreeNode cache. The first entry is
-** used to cache the RtreeNode for RtreeCursor.sPoint. The remaining
-** entries cache the RtreeNode for the first elements of the priority queue.
-*/
-#define RTREE_CACHE_SZ 5
-
/*
** An rtree cursor object.
*/
struct RtreeCursor {
- sqlite3_vtab_cursor base; /* Base class. Must be first */
- u8 atEOF; /* True if at end of search */
- u8 bPoint; /* True if sPoint is valid */
+ sqlite3_vtab_cursor base;
+ RtreeNode *pNode; /* Node cursor is currently pointing at */
+ int iCell; /* Index of current cell in pNode */
int iStrategy; /* Copy of idxNum search parameter */
int nConstraint; /* Number of entries in aConstraint */
RtreeConstraint *aConstraint; /* Search constraints. */
- int nPointAlloc; /* Number of slots allocated for aPoint[] */
- int nPoint; /* Number of slots used in aPoint[] */
- int mxLevel; /* iLevel value for root of the tree */
- RtreeSearchPoint *aPoint; /* Priority queue for search points */
- RtreeSearchPoint sPoint; /* Cached next search point */
- RtreeNode *aNode[RTREE_CACHE_SZ]; /* Rtree node cache */
- u32 anQueue[RTREE_MAX_DEPTH+1]; /* Number of queued entries by iLevel */
};
-/* Return the Rtree of a RtreeCursor */
-#define RTREE_OF_CURSOR(X) ((Rtree*)((X)->base.pVtab))
-
-/*
-** A coordinate can be either a floating point number or a integer. All
-** coordinates within a single R-Tree are always of the same time.
-*/
union RtreeCoord {
- RtreeValue f; /* Floating point value */
- int i; /* Integer value */
- u32 u; /* Unsigned for byte-order conversions */
+ RtreeValue f;
+ int i;
};
/*
@@ -154309,67 +141365,38 @@ union RtreeCoord {
struct RtreeConstraint {
int iCoord; /* Index of constrained coordinate */
int op; /* Constraining operation */
- union {
- RtreeDValue rValue; /* Constraint value. */
- int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*);
- int (*xQueryFunc)(sqlite3_rtree_query_info*);
- } u;
- sqlite3_rtree_query_info *pInfo; /* xGeom and xQueryFunc argument */
+ RtreeDValue rValue; /* Constraint value. */
+ int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*);
+ sqlite3_rtree_geometry *pGeom; /* Constraint callback argument for a MATCH */
};
/* Possible values for RtreeConstraint.op */
-#define RTREE_EQ 0x41 /* A */
-#define RTREE_LE 0x42 /* B */
-#define RTREE_LT 0x43 /* C */
-#define RTREE_GE 0x44 /* D */
-#define RTREE_GT 0x45 /* E */
-#define RTREE_MATCH 0x46 /* F: Old-style sqlite3_rtree_geometry_callback() */
-#define RTREE_QUERY 0x47 /* G: New-style sqlite3_rtree_query_callback() */
-
+#define RTREE_EQ 0x41
+#define RTREE_LE 0x42
+#define RTREE_LT 0x43
+#define RTREE_GE 0x44
+#define RTREE_GT 0x45
+#define RTREE_MATCH 0x46
/*
** An rtree structure node.
*/
struct RtreeNode {
- RtreeNode *pParent; /* Parent node */
- i64 iNode; /* The node number */
- int nRef; /* Number of references to this node */
- int isDirty; /* True if the node needs to be written to disk */
- u8 *zData; /* Content of the node, as should be on disk */
- RtreeNode *pNext; /* Next node in this hash collision chain */
+ RtreeNode *pParent; /* Parent node */
+ i64 iNode;
+ int nRef;
+ int isDirty;
+ u8 *zData;
+ RtreeNode *pNext; /* Next node in this hash chain */
};
-
-/* Return the number of cells in a node */
#define NCELL(pNode) readInt16(&(pNode)->zData[2])
/*
-** A single cell from a node, deserialized
+** Structure to store a deserialized rtree record.
*/
struct RtreeCell {
- i64 iRowid; /* Node or entry ID */
- RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; /* Bounding box coordinates */
-};
-
-
-/*
-** This object becomes the sqlite3_user_data() for the SQL functions
-** that are created by sqlite3_rtree_geometry_callback() and
-** sqlite3_rtree_query_callback() and which appear on the right of MATCH
-** operators in order to constrain a search.
-**
-** xGeom and xQueryFunc are the callback functions. Exactly one of
-** xGeom and xQueryFunc fields is non-NULL, depending on whether the
-** SQL function was created using sqlite3_rtree_geometry_callback() or
-** sqlite3_rtree_query_callback().
-**
-** This object is deleted automatically by the destructor mechanism in
-** sqlite3_create_function_v2().
-*/
-struct RtreeGeomCallback {
- int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*);
- int (*xQueryFunc)(sqlite3_rtree_query_info*);
- void (*xDestructor)(void*);
- void *pContext;
+ i64 iRowid;
+ RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2];
};
@@ -154381,17 +141408,29 @@ struct RtreeGeomCallback {
#define RTREE_GEOMETRY_MAGIC 0x891245AB
/*
-** An instance of this structure (in the form of a BLOB) is returned by
-** the SQL functions that sqlite3_rtree_geometry_callback() and
-** sqlite3_rtree_query_callback() create, and is read as the right-hand
-** operand to the MATCH operator of an R-Tree.
+** An instance of this structure must be supplied as a blob argument to
+** the right-hand-side of an SQL MATCH operator used to constrain an
+** r-tree query.
*/
struct RtreeMatchArg {
- u32 magic; /* Always RTREE_GEOMETRY_MAGIC */
- RtreeGeomCallback cb; /* Info about the callback functions */
- int nParam; /* Number of parameters to the SQL function */
- sqlite3_value **apSqlParam; /* Original SQL parameter values */
- RtreeDValue aParam[1]; /* Values for parameters to the SQL function */
+ u32 magic; /* Always RTREE_GEOMETRY_MAGIC */
+ int (*xGeom)(sqlite3_rtree_geometry *, int, RtreeDValue*, int *);
+ void *pContext;
+ int nParam;
+ RtreeDValue aParam[1];
+};
+
+/*
+** When a geometry callback is created (see sqlite3_rtree_geometry_callback),
+** a single instance of the following structure is allocated. It is used
+** as the context for the user-function created by by s_r_g_c(). The object
+** is eventually deleted by the destructor mechanism provided by
+** sqlite3_create_function_v2() (which is called by s_r_g_c() to create
+** the geometry callback function).
+*/
+struct RtreeGeomCallback {
+ int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*);
+ void *pContext;
};
#ifndef MAX
@@ -154409,12 +141448,13 @@ static int readInt16(u8 *p){
return (p[0]<<8) + p[1];
}
static void readCoord(u8 *p, RtreeCoord *pCoord){
- pCoord->u = (
+ u32 i = (
(((u32)p[0]) << 24) +
(((u32)p[1]) << 16) +
(((u32)p[2]) << 8) +
(((u32)p[3]) << 0)
);
+ *(u32 *)pCoord = i;
}
static i64 readInt64(u8 *p){
return (
@@ -154443,7 +141483,7 @@ static int writeCoord(u8 *p, RtreeCoord *pCoord){
u32 i;
assert( sizeof(RtreeCoord)==4 );
assert( sizeof(u32)==4 );
- i = pCoord->u;
+ i = *(u32 *)pCoord;
p[0] = (i>>24)&0xFF;
p[1] = (i>>16)&0xFF;
p[2] = (i>> 8)&0xFF;
@@ -154484,7 +141524,10 @@ static void nodeZero(Rtree *pRtree, RtreeNode *p){
** in the Rtree.aHash table.
*/
static int nodeHash(i64 iNode){
- return iNode % HASHSIZE;
+ return (
+ (iNode>>56) ^ (iNode>>48) ^ (iNode>>40) ^ (iNode>>32) ^
+ (iNode>>24) ^ (iNode>>16) ^ (iNode>> 8) ^ (iNode>> 0)
+ ) % HASHSIZE;
}
/*
@@ -154544,7 +141587,8 @@ static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){
/*
** Obtain a reference to an r-tree node.
*/
-static int nodeAcquire(
+static int
+nodeAcquire(
Rtree *pRtree, /* R-tree structure */
i64 iNode, /* Node number to load */
RtreeNode *pParent, /* Either the parent node or NULL */
@@ -154633,10 +141677,10 @@ static int nodeAcquire(
** Overwrite cell iCell of node pNode with the contents of pCell.
*/
static void nodeOverwriteCell(
- Rtree *pRtree, /* The overall R-Tree */
- RtreeNode *pNode, /* The node into which the cell is to be written */
- RtreeCell *pCell, /* The cell to write */
- int iCell /* Index into pNode into which pCell is written */
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ RtreeCell *pCell,
+ int iCell
){
int ii;
u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell];
@@ -154648,7 +141692,7 @@ static void nodeOverwriteCell(
}
/*
-** Remove the cell with index iCell from node pNode.
+** Remove cell the cell with index iCell from node pNode.
*/
static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){
u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell];
@@ -154665,10 +141709,11 @@ static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){
**
** If there is not enough free space in pNode, return SQLITE_FULL.
*/
-static int nodeInsertCell(
- Rtree *pRtree, /* The overall R-Tree */
- RtreeNode *pNode, /* Write new cell into this node */
- RtreeCell *pCell /* The cell to be inserted */
+static int
+nodeInsertCell(
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ RtreeCell *pCell
){
int nCell; /* Current number of cells in pNode */
int nMaxCell; /* Maximum number of cells for pNode */
@@ -154689,7 +141734,8 @@ static int nodeInsertCell(
/*
** If the node is dirty, write it out to the database.
*/
-static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){
+static int
+nodeWrite(Rtree *pRtree, RtreeNode *pNode){
int rc = SQLITE_OK;
if( pNode->isDirty ){
sqlite3_stmt *p = pRtree->pWriteNode;
@@ -154714,7 +141760,8 @@ static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){
** Release a reference to a node. If the node is dirty and the reference
** count drops to zero, the node data is written to the database.
*/
-static int nodeRelease(Rtree *pRtree, RtreeNode *pNode){
+static int
+nodeRelease(Rtree *pRtree, RtreeNode *pNode){
int rc = SQLITE_OK;
if( pNode ){
assert( pNode->nRef>0 );
@@ -154742,9 +141789,9 @@ static int nodeRelease(Rtree *pRtree, RtreeNode *pNode){
** an internal node, then the 64-bit integer is a child page number.
*/
static i64 nodeGetRowid(
- Rtree *pRtree, /* The overall R-Tree */
- RtreeNode *pNode, /* The node from which to extract the ID */
- int iCell /* The cell index from which to extract the ID */
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ int iCell
){
assert( iCell<NCELL(pNode) );
return readInt64(&pNode->zData[4 + pRtree->nBytesPerCell*iCell]);
@@ -154754,11 +141801,11 @@ static i64 nodeGetRowid(
** Return coordinate iCoord from cell iCell in node pNode.
*/
static void nodeGetCoord(
- Rtree *pRtree, /* The overall R-Tree */
- RtreeNode *pNode, /* The node from which to extract a coordinate */
- int iCell, /* The index of the cell within the node */
- int iCoord, /* Which coordinate to extract */
- RtreeCoord *pCoord /* OUT: Space to write result to */
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ int iCell,
+ int iCoord,
+ RtreeCoord *pCoord /* Space to write result to */
){
readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord);
}
@@ -154768,19 +141815,15 @@ static void nodeGetCoord(
** to by pCell with the results.
*/
static void nodeGetCell(
- Rtree *pRtree, /* The overall R-Tree */
- RtreeNode *pNode, /* The node containing the cell to be read */
- int iCell, /* Index of the cell within the node */
- RtreeCell *pCell /* OUT: Write the cell contents here */
+ Rtree *pRtree,
+ RtreeNode *pNode,
+ int iCell,
+ RtreeCell *pCell
){
- u8 *pData;
- RtreeCoord *pCoord;
int ii;
pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell);
- pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell);
- pCoord = pCell->aCoord;
for(ii=0; ii<pRtree->nDim*2; ii++){
- readCoord(&pData[ii*4], &pCoord[ii]);
+ nodeGetCoord(pRtree, pNode, iCell, ii, &pCell->aCoord[ii]);
}
}
@@ -154906,10 +141949,10 @@ static void freeCursorConstraints(RtreeCursor *pCsr){
if( pCsr->aConstraint ){
int i; /* Used to iterate through constraint array */
for(i=0; i<pCsr->nConstraint; i++){
- sqlite3_rtree_query_info *pInfo = pCsr->aConstraint[i].pInfo;
- if( pInfo ){
- if( pInfo->xDelUser ) pInfo->xDelUser(pInfo->pUser);
- sqlite3_free(pInfo);
+ sqlite3_rtree_geometry *pGeom = pCsr->aConstraint[i].pGeom;
+ if( pGeom ){
+ if( pGeom->xDelUser ) pGeom->xDelUser(pGeom->pUser);
+ sqlite3_free(pGeom);
}
}
sqlite3_free(pCsr->aConstraint);
@@ -154922,13 +141965,12 @@ static void freeCursorConstraints(RtreeCursor *pCsr){
*/
static int rtreeClose(sqlite3_vtab_cursor *cur){
Rtree *pRtree = (Rtree *)(cur->pVtab);
- int ii;
+ int rc;
RtreeCursor *pCsr = (RtreeCursor *)cur;
freeCursorConstraints(pCsr);
- sqlite3_free(pCsr->aPoint);
- for(ii=0; ii<RTREE_CACHE_SZ; ii++) nodeRelease(pRtree, pCsr->aNode[ii]);
+ rc = nodeRelease(pRtree, pCsr->pNode);
sqlite3_free(pCsr);
- return SQLITE_OK;
+ return rc;
}
/*
@@ -154939,164 +141981,194 @@ static int rtreeClose(sqlite3_vtab_cursor *cur){
*/
static int rtreeEof(sqlite3_vtab_cursor *cur){
RtreeCursor *pCsr = (RtreeCursor *)cur;
- return pCsr->atEOF;
-}
-
-/*
-** Convert raw bits from the on-disk RTree record into a coordinate value.
-** The on-disk format is big-endian and needs to be converted for little-
-** endian platforms. The on-disk record stores integer coordinates if
-** eInt is true and it stores 32-bit floating point records if eInt is
-** false. a[] is the four bytes of the on-disk record to be decoded.
-** Store the results in "r".
-**
-** There are three versions of this macro, one each for little-endian and
-** big-endian processors and a third generic implementation. The endian-
-** specific implementations are much faster and are preferred if the
-** processor endianness is known at compile-time. The SQLITE_BYTEORDER
-** macro is part of sqliteInt.h and hence the endian-specific
-** implementation will only be used if this module is compiled as part
-** of the amalgamation.
-*/
-#if defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==1234
-#define RTREE_DECODE_COORD(eInt, a, r) { \
- RtreeCoord c; /* Coordinate decoded */ \
- memcpy(&c.u,a,4); \
- c.u = ((c.u>>24)&0xff)|((c.u>>8)&0xff00)| \
- ((c.u&0xff)<<24)|((c.u&0xff00)<<8); \
- r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
-}
-#elif defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==4321
-#define RTREE_DECODE_COORD(eInt, a, r) { \
- RtreeCoord c; /* Coordinate decoded */ \
- memcpy(&c.u,a,4); \
- r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+ return (pCsr->pNode==0);
}
-#else
-#define RTREE_DECODE_COORD(eInt, a, r) { \
- RtreeCoord c; /* Coordinate decoded */ \
- c.u = ((u32)a[0]<<24) + ((u32)a[1]<<16) \
- +((u32)a[2]<<8) + a[3]; \
- r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
-}
-#endif
/*
-** Check the RTree node or entry given by pCellData and p against the MATCH
-** constraint pConstraint.
+** The r-tree constraint passed as the second argument to this function is
+** guaranteed to be a MATCH constraint.
*/
-static int rtreeCallbackConstraint(
- RtreeConstraint *pConstraint, /* The constraint to test */
- int eInt, /* True if RTree holding integer coordinates */
- u8 *pCellData, /* Raw cell content */
- RtreeSearchPoint *pSearch, /* Container of this cell */
- sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */
- int *peWithin /* OUT: visibility of the cell */
+static int testRtreeGeom(
+ Rtree *pRtree, /* R-Tree object */
+ RtreeConstraint *pConstraint, /* MATCH constraint to test */
+ RtreeCell *pCell, /* Cell to test */
+ int *pbRes /* OUT: Test result */
){
- int i; /* Loop counter */
- sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */
- int nCoord = pInfo->nCoord; /* No. of coordinates */
- int rc; /* Callback return code */
- sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */
+ int i;
+ RtreeDValue aCoord[RTREE_MAX_DIMENSIONS*2];
+ int nCoord = pRtree->nDim*2;
- assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY );
- assert( nCoord==2 || nCoord==4 || nCoord==6 || nCoord==8 || nCoord==10 );
+ assert( pConstraint->op==RTREE_MATCH );
+ assert( pConstraint->pGeom );
- if( pConstraint->op==RTREE_QUERY && pSearch->iLevel==1 ){
- pInfo->iRowid = readInt64(pCellData);
- }
- pCellData += 8;
- for(i=0; i<nCoord; i++, pCellData += 4){
- RTREE_DECODE_COORD(eInt, pCellData, aCoord[i]);
+ for(i=0; i<nCoord; i++){
+ aCoord[i] = DCOORD(pCell->aCoord[i]);
}
- if( pConstraint->op==RTREE_MATCH ){
- rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo,
- nCoord, aCoord, &i);
- if( i==0 ) *peWithin = NOT_WITHIN;
- *prScore = RTREE_ZERO;
- }else{
- pInfo->aCoord = aCoord;
- pInfo->iLevel = pSearch->iLevel - 1;
- pInfo->rScore = pInfo->rParentScore = pSearch->rScore;
- pInfo->eWithin = pInfo->eParentWithin = pSearch->eWithin;
- rc = pConstraint->u.xQueryFunc(pInfo);
- if( pInfo->eWithin<*peWithin ) *peWithin = pInfo->eWithin;
- if( pInfo->rScore<*prScore || *prScore<RTREE_ZERO ){
- *prScore = pInfo->rScore;
+ return pConstraint->xGeom(pConstraint->pGeom, nCoord, aCoord, pbRes);
+}
+
+/*
+** Cursor pCursor currently points to a cell in a non-leaf page.
+** Set *pbEof to true if the sub-tree headed by the cell is filtered
+** (excluded) by the constraints in the pCursor->aConstraint[]
+** array, or false otherwise.
+**
+** Return SQLITE_OK if successful or an SQLite error code if an error
+** occurs within a geometry callback.
+*/
+static int testRtreeCell(Rtree *pRtree, RtreeCursor *pCursor, int *pbEof){
+ RtreeCell cell;
+ int ii;
+ int bRes = 0;
+ int rc = SQLITE_OK;
+
+ nodeGetCell(pRtree, pCursor->pNode, pCursor->iCell, &cell);
+ for(ii=0; bRes==0 && ii<pCursor->nConstraint; ii++){
+ RtreeConstraint *p = &pCursor->aConstraint[ii];
+ RtreeDValue cell_min = DCOORD(cell.aCoord[(p->iCoord>>1)*2]);
+ RtreeDValue cell_max = DCOORD(cell.aCoord[(p->iCoord>>1)*2+1]);
+
+ assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
+ || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_MATCH
+ );
+
+ switch( p->op ){
+ case RTREE_LE: case RTREE_LT:
+ bRes = p->rValue<cell_min;
+ break;
+
+ case RTREE_GE: case RTREE_GT:
+ bRes = p->rValue>cell_max;
+ break;
+
+ case RTREE_EQ:
+ bRes = (p->rValue>cell_max || p->rValue<cell_min);
+ break;
+
+ default: {
+ assert( p->op==RTREE_MATCH );
+ rc = testRtreeGeom(pRtree, p, &cell, &bRes);
+ bRes = !bRes;
+ break;
+ }
}
}
+
+ *pbEof = bRes;
return rc;
}
/*
-** Check the internal RTree node given by pCellData against constraint p.
-** If this constraint cannot be satisfied by any child within the node,
-** set *peWithin to NOT_WITHIN.
+** Test if the cell that cursor pCursor currently points to
+** would be filtered (excluded) by the constraints in the
+** pCursor->aConstraint[] array. If so, set *pbEof to true before
+** returning. If the cell is not filtered (excluded) by the constraints,
+** set pbEof to zero.
+**
+** Return SQLITE_OK if successful or an SQLite error code if an error
+** occurs within a geometry callback.
+**
+** This function assumes that the cell is part of a leaf node.
*/
-static void rtreeNonleafConstraint(
- RtreeConstraint *p, /* The constraint to test */
- int eInt, /* True if RTree holds integer coordinates */
- u8 *pCellData, /* Raw cell content as appears on disk */
- int *peWithin /* Adjust downward, as appropriate */
-){
- sqlite3_rtree_dbl val; /* Coordinate value convert to a double */
-
- /* p->iCoord might point to either a lower or upper bound coordinate
- ** in a coordinate pair. But make pCellData point to the lower bound.
- */
- pCellData += 8 + 4*(p->iCoord&0xfe);
+static int testRtreeEntry(Rtree *pRtree, RtreeCursor *pCursor, int *pbEof){
+ RtreeCell cell;
+ int ii;
+ *pbEof = 0;
- assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
- || p->op==RTREE_GT || p->op==RTREE_EQ );
- switch( p->op ){
- case RTREE_LE:
- case RTREE_LT:
- case RTREE_EQ:
- RTREE_DECODE_COORD(eInt, pCellData, val);
- /* val now holds the lower bound of the coordinate pair */
- if( p->u.rValue>=val ) return;
- if( p->op!=RTREE_EQ ) break; /* RTREE_LE and RTREE_LT end here */
- /* Fall through for the RTREE_EQ case */
+ nodeGetCell(pRtree, pCursor->pNode, pCursor->iCell, &cell);
+ for(ii=0; ii<pCursor->nConstraint; ii++){
+ RtreeConstraint *p = &pCursor->aConstraint[ii];
+ RtreeDValue coord = DCOORD(cell.aCoord[p->iCoord]);
+ int res;
+ assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
+ || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_MATCH
+ );
+ switch( p->op ){
+ case RTREE_LE: res = (coord<=p->rValue); break;
+ case RTREE_LT: res = (coord<p->rValue); break;
+ case RTREE_GE: res = (coord>=p->rValue); break;
+ case RTREE_GT: res = (coord>p->rValue); break;
+ case RTREE_EQ: res = (coord==p->rValue); break;
+ default: {
+ int rc;
+ assert( p->op==RTREE_MATCH );
+ rc = testRtreeGeom(pRtree, p, &cell, &res);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ break;
+ }
+ }
- default: /* RTREE_GT or RTREE_GE, or fallthrough of RTREE_EQ */
- pCellData += 4;
- RTREE_DECODE_COORD(eInt, pCellData, val);
- /* val now holds the upper bound of the coordinate pair */
- if( p->u.rValue<=val ) return;
+ if( !res ){
+ *pbEof = 1;
+ return SQLITE_OK;
+ }
}
- *peWithin = NOT_WITHIN;
+
+ return SQLITE_OK;
}
/*
-** Check the leaf RTree cell given by pCellData against constraint p.
-** If this constraint is not satisfied, set *peWithin to NOT_WITHIN.
-** If the constraint is satisfied, leave *peWithin unchanged.
-**
-** The constraint is of the form: xN op $val
-**
-** The op is given by p->op. The xN is p->iCoord-th coordinate in
-** pCellData. $val is given by p->u.rValue.
+** Cursor pCursor currently points at a node that heads a sub-tree of
+** height iHeight (if iHeight==0, then the node is a leaf). Descend
+** to point to the left-most cell of the sub-tree that matches the
+** configured constraints.
*/
-static void rtreeLeafConstraint(
- RtreeConstraint *p, /* The constraint to test */
- int eInt, /* True if RTree holds integer coordinates */
- u8 *pCellData, /* Raw cell content as appears on disk */
- int *peWithin /* Adjust downward, as appropriate */
+static int descendToCell(
+ Rtree *pRtree,
+ RtreeCursor *pCursor,
+ int iHeight,
+ int *pEof /* OUT: Set to true if cannot descend */
){
- RtreeDValue xN; /* Coordinate value converted to a double */
+ int isEof;
+ int rc;
+ int ii;
+ RtreeNode *pChild;
+ sqlite3_int64 iRowid;
- assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
- || p->op==RTREE_GT || p->op==RTREE_EQ );
- pCellData += 8 + p->iCoord*4;
- RTREE_DECODE_COORD(eInt, pCellData, xN);
- switch( p->op ){
- case RTREE_LE: if( xN <= p->u.rValue ) return; break;
- case RTREE_LT: if( xN < p->u.rValue ) return; break;
- case RTREE_GE: if( xN >= p->u.rValue ) return; break;
- case RTREE_GT: if( xN > p->u.rValue ) return; break;
- default: if( xN == p->u.rValue ) return; break;
+ RtreeNode *pSavedNode = pCursor->pNode;
+ int iSavedCell = pCursor->iCell;
+
+ assert( iHeight>=0 );
+
+ if( iHeight==0 ){
+ rc = testRtreeEntry(pRtree, pCursor, &isEof);
+ }else{
+ rc = testRtreeCell(pRtree, pCursor, &isEof);
+ }
+ if( rc!=SQLITE_OK || isEof || iHeight==0 ){
+ goto descend_to_cell_out;
+ }
+
+ iRowid = nodeGetRowid(pRtree, pCursor->pNode, pCursor->iCell);
+ rc = nodeAcquire(pRtree, iRowid, pCursor->pNode, &pChild);
+ if( rc!=SQLITE_OK ){
+ goto descend_to_cell_out;
+ }
+
+ nodeRelease(pRtree, pCursor->pNode);
+ pCursor->pNode = pChild;
+ isEof = 1;
+ for(ii=0; isEof && ii<NCELL(pChild); ii++){
+ pCursor->iCell = ii;
+ rc = descendToCell(pRtree, pCursor, iHeight-1, &isEof);
+ if( rc!=SQLITE_OK ){
+ goto descend_to_cell_out;
+ }
+ }
+
+ if( isEof ){
+ assert( pCursor->pNode==pChild );
+ nodeReference(pSavedNode);
+ nodeRelease(pRtree, pChild);
+ pCursor->pNode = pSavedNode;
+ pCursor->iCell = iSavedCell;
}
- *peWithin = NOT_WITHIN;
+
+descend_to_cell_out:
+ *pEof = isEof;
+ return rc;
}
/*
@@ -155111,7 +142183,6 @@ static int nodeRowidIndex(
){
int ii;
int nCell = NCELL(pNode);
- assert( nCell<200 );
for(ii=0; ii<nCell; ii++){
if( nodeGetRowid(pRtree, pNode, ii)==iRowid ){
*piIndex = ii;
@@ -155134,302 +142205,48 @@ static int nodeParentIndex(Rtree *pRtree, RtreeNode *pNode, int *piIndex){
return SQLITE_OK;
}
-/*
-** Compare two search points. Return negative, zero, or positive if the first
-** is less than, equal to, or greater than the second.
-**
-** The rScore is the primary key. Smaller rScore values come first.
-** If the rScore is a tie, then use iLevel as the tie breaker with smaller
-** iLevel values coming first. In this way, if rScore is the same for all
-** SearchPoints, then iLevel becomes the deciding factor and the result
-** is a depth-first search, which is the desired default behavior.
-*/
-static int rtreeSearchPointCompare(
- const RtreeSearchPoint *pA,
- const RtreeSearchPoint *pB
-){
- if( pA->rScore<pB->rScore ) return -1;
- if( pA->rScore>pB->rScore ) return +1;
- if( pA->iLevel<pB->iLevel ) return -1;
- if( pA->iLevel>pB->iLevel ) return +1;
- return 0;
-}
-
-/*
-** Interchange to search points in a cursor.
-*/
-static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){
- RtreeSearchPoint t = p->aPoint[i];
- assert( i<j );
- p->aPoint[i] = p->aPoint[j];
- p->aPoint[j] = t;
- i++; j++;
- if( i<RTREE_CACHE_SZ ){
- if( j>=RTREE_CACHE_SZ ){
- nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]);
- p->aNode[i] = 0;
- }else{
- RtreeNode *pTemp = p->aNode[i];
- p->aNode[i] = p->aNode[j];
- p->aNode[j] = pTemp;
- }
- }
-}
-
-/*
-** Return the search point with the lowest current score.
-*/
-static RtreeSearchPoint *rtreeSearchPointFirst(RtreeCursor *pCur){
- return pCur->bPoint ? &pCur->sPoint : pCur->nPoint ? pCur->aPoint : 0;
-}
-
-/*
-** Get the RtreeNode for the search point with the lowest score.
-*/
-static RtreeNode *rtreeNodeOfFirstSearchPoint(RtreeCursor *pCur, int *pRC){
- sqlite3_int64 id;
- int ii = 1 - pCur->bPoint;
- assert( ii==0 || ii==1 );
- assert( pCur->bPoint || pCur->nPoint );
- if( pCur->aNode[ii]==0 ){
- assert( pRC!=0 );
- id = ii ? pCur->aPoint[0].id : pCur->sPoint.id;
- *pRC = nodeAcquire(RTREE_OF_CURSOR(pCur), id, 0, &pCur->aNode[ii]);
- }
- return pCur->aNode[ii];
-}
-
-/*
-** Push a new element onto the priority queue
-*/
-static RtreeSearchPoint *rtreeEnqueue(
- RtreeCursor *pCur, /* The cursor */
- RtreeDValue rScore, /* Score for the new search point */
- u8 iLevel /* Level for the new search point */
-){
- int i, j;
- RtreeSearchPoint *pNew;
- if( pCur->nPoint>=pCur->nPointAlloc ){
- int nNew = pCur->nPointAlloc*2 + 8;
- pNew = sqlite3_realloc(pCur->aPoint, nNew*sizeof(pCur->aPoint[0]));
- if( pNew==0 ) return 0;
- pCur->aPoint = pNew;
- pCur->nPointAlloc = nNew;
- }
- i = pCur->nPoint++;
- pNew = pCur->aPoint + i;
- pNew->rScore = rScore;
- pNew->iLevel = iLevel;
- assert( iLevel<=RTREE_MAX_DEPTH );
- while( i>0 ){
- RtreeSearchPoint *pParent;
- j = (i-1)/2;
- pParent = pCur->aPoint + j;
- if( rtreeSearchPointCompare(pNew, pParent)>=0 ) break;
- rtreeSearchPointSwap(pCur, j, i);
- i = j;
- pNew = pParent;
- }
- return pNew;
-}
-
-/*
-** Allocate a new RtreeSearchPoint and return a pointer to it. Return
-** NULL if malloc fails.
-*/
-static RtreeSearchPoint *rtreeSearchPointNew(
- RtreeCursor *pCur, /* The cursor */
- RtreeDValue rScore, /* Score for the new search point */
- u8 iLevel /* Level for the new search point */
-){
- RtreeSearchPoint *pNew, *pFirst;
- pFirst = rtreeSearchPointFirst(pCur);
- pCur->anQueue[iLevel]++;
- if( pFirst==0
- || pFirst->rScore>rScore
- || (pFirst->rScore==rScore && pFirst->iLevel>iLevel)
- ){
- if( pCur->bPoint ){
- int ii;
- pNew = rtreeEnqueue(pCur, rScore, iLevel);
- if( pNew==0 ) return 0;
- ii = (int)(pNew - pCur->aPoint) + 1;
- if( ii<RTREE_CACHE_SZ ){
- assert( pCur->aNode[ii]==0 );
- pCur->aNode[ii] = pCur->aNode[0];
- }else{
- nodeRelease(RTREE_OF_CURSOR(pCur), pCur->aNode[0]);
- }
- pCur->aNode[0] = 0;
- *pNew = pCur->sPoint;
- }
- pCur->sPoint.rScore = rScore;
- pCur->sPoint.iLevel = iLevel;
- pCur->bPoint = 1;
- return &pCur->sPoint;
- }else{
- return rtreeEnqueue(pCur, rScore, iLevel);
- }
-}
-
-#if 0
-/* Tracing routines for the RtreeSearchPoint queue */
-static void tracePoint(RtreeSearchPoint *p, int idx, RtreeCursor *pCur){
- if( idx<0 ){ printf(" s"); }else{ printf("%2d", idx); }
- printf(" %d.%05lld.%02d %g %d",
- p->iLevel, p->id, p->iCell, p->rScore, p->eWithin
- );
- idx++;
- if( idx<RTREE_CACHE_SZ ){
- printf(" %p\n", pCur->aNode[idx]);
- }else{
- printf("\n");
- }
-}
-static void traceQueue(RtreeCursor *pCur, const char *zPrefix){
- int ii;
- printf("=== %9s ", zPrefix);
- if( pCur->bPoint ){
- tracePoint(&pCur->sPoint, -1, pCur);
- }
- for(ii=0; ii<pCur->nPoint; ii++){
- if( ii>0 || pCur->bPoint ) printf(" ");
- tracePoint(&pCur->aPoint[ii], ii, pCur);
- }
-}
-# define RTREE_QUEUE_TRACE(A,B) traceQueue(A,B)
-#else
-# define RTREE_QUEUE_TRACE(A,B) /* no-op */
-#endif
-
-/* Remove the search point with the lowest current score.
-*/
-static void rtreeSearchPointPop(RtreeCursor *p){
- int i, j, k, n;
- i = 1 - p->bPoint;
- assert( i==0 || i==1 );
- if( p->aNode[i] ){
- nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]);
- p->aNode[i] = 0;
- }
- if( p->bPoint ){
- p->anQueue[p->sPoint.iLevel]--;
- p->bPoint = 0;
- }else if( p->nPoint ){
- p->anQueue[p->aPoint[0].iLevel]--;
- n = --p->nPoint;
- p->aPoint[0] = p->aPoint[n];
- if( n<RTREE_CACHE_SZ-1 ){
- p->aNode[1] = p->aNode[n+1];
- p->aNode[n+1] = 0;
- }
- i = 0;
- while( (j = i*2+1)<n ){
- k = j+1;
- if( k<n && rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[j])<0 ){
- if( rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[i])<0 ){
- rtreeSearchPointSwap(p, i, k);
- i = k;
- }else{
- break;
- }
- }else{
- if( rtreeSearchPointCompare(&p->aPoint[j], &p->aPoint[i])<0 ){
- rtreeSearchPointSwap(p, i, j);
- i = j;
- }else{
- break;
- }
- }
- }
- }
-}
-
-
-/*
-** Continue the search on cursor pCur until the front of the queue
-** contains an entry suitable for returning as a result-set row,
-** or until the RtreeSearchPoint queue is empty, indicating that the
-** query has completed.
+/*
+** Rtree virtual table module xNext method.
*/
-static int rtreeStepToLeaf(RtreeCursor *pCur){
- RtreeSearchPoint *p;
- Rtree *pRtree = RTREE_OF_CURSOR(pCur);
- RtreeNode *pNode;
- int eWithin;
+static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){
+ Rtree *pRtree = (Rtree *)(pVtabCursor->pVtab);
+ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
int rc = SQLITE_OK;
- int nCell;
- int nConstraint = pCur->nConstraint;
- int ii;
- int eInt;
- RtreeSearchPoint x;
- eInt = pRtree->eCoordType==RTREE_COORD_INT32;
- while( (p = rtreeSearchPointFirst(pCur))!=0 && p->iLevel>0 ){
- pNode = rtreeNodeOfFirstSearchPoint(pCur, &rc);
- if( rc ) return rc;
- nCell = NCELL(pNode);
- assert( nCell<200 );
- while( p->iCell<nCell ){
- sqlite3_rtree_dbl rScore = (sqlite3_rtree_dbl)-1;
- u8 *pCellData = pNode->zData + (4+pRtree->nBytesPerCell*p->iCell);
- eWithin = FULLY_WITHIN;
- for(ii=0; ii<nConstraint; ii++){
- RtreeConstraint *pConstraint = pCur->aConstraint + ii;
- if( pConstraint->op>=RTREE_MATCH ){
- rc = rtreeCallbackConstraint(pConstraint, eInt, pCellData, p,
- &rScore, &eWithin);
- if( rc ) return rc;
- }else if( p->iLevel==1 ){
- rtreeLeafConstraint(pConstraint, eInt, pCellData, &eWithin);
- }else{
- rtreeNonleafConstraint(pConstraint, eInt, pCellData, &eWithin);
+ /* RtreeCursor.pNode must not be NULL. If is is NULL, then this cursor is
+ ** already at EOF. It is against the rules to call the xNext() method of
+ ** a cursor that has already reached EOF.
+ */
+ assert( pCsr->pNode );
+
+ if( pCsr->iStrategy==1 ){
+ /* This "scan" is a direct lookup by rowid. There is no next entry. */
+ nodeRelease(pRtree, pCsr->pNode);
+ pCsr->pNode = 0;
+ }else{
+ /* Move to the next entry that matches the configured constraints. */
+ int iHeight = 0;
+ while( pCsr->pNode ){
+ RtreeNode *pNode = pCsr->pNode;
+ int nCell = NCELL(pNode);
+ for(pCsr->iCell++; pCsr->iCell<nCell; pCsr->iCell++){
+ int isEof;
+ rc = descendToCell(pRtree, pCsr, iHeight, &isEof);
+ if( rc!=SQLITE_OK || !isEof ){
+ return rc;
}
- if( eWithin==NOT_WITHIN ) break;
}
- p->iCell++;
- if( eWithin==NOT_WITHIN ) continue;
- x.iLevel = p->iLevel - 1;
- if( x.iLevel ){
- x.id = readInt64(pCellData);
- x.iCell = 0;
- }else{
- x.id = p->id;
- x.iCell = p->iCell - 1;
- }
- if( p->iCell>=nCell ){
- RTREE_QUEUE_TRACE(pCur, "POP-S:");
- rtreeSearchPointPop(pCur);
- }
- if( rScore<RTREE_ZERO ) rScore = RTREE_ZERO;
- p = rtreeSearchPointNew(pCur, rScore, x.iLevel);
- if( p==0 ) return SQLITE_NOMEM;
- p->eWithin = eWithin;
- p->id = x.id;
- p->iCell = x.iCell;
- RTREE_QUEUE_TRACE(pCur, "PUSH-S:");
- break;
- }
- if( p->iCell>=nCell ){
- RTREE_QUEUE_TRACE(pCur, "POP-Se:");
- rtreeSearchPointPop(pCur);
+ pCsr->pNode = pNode->pParent;
+ rc = nodeParentIndex(pRtree, pNode, &pCsr->iCell);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
+ nodeReference(pCsr->pNode);
+ nodeRelease(pRtree, pNode);
+ iHeight++;
}
}
- pCur->atEOF = p==0;
- return SQLITE_OK;
-}
-
-/*
-** Rtree virtual table module xNext method.
-*/
-static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){
- RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
- int rc = SQLITE_OK;
- /* Move to the next entry that matches the configured constraints. */
- RTREE_QUEUE_TRACE(pCsr, "POP-Nx:");
- rtreeSearchPointPop(pCsr);
- rc = rtreeStepToLeaf(pCsr);
return rc;
}
@@ -155437,14 +142254,13 @@ static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){
** Rtree virtual table module xRowid method.
*/
static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){
+ Rtree *pRtree = (Rtree *)pVtabCursor->pVtab;
RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
- RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr);
- int rc = SQLITE_OK;
- RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc);
- if( rc==SQLITE_OK && p ){
- *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell);
- }
- return rc;
+
+ assert(pCsr->pNode);
+ *pRowid = nodeGetRowid(pRtree, pCsr->pNode, pCsr->iCell);
+
+ return SQLITE_OK;
}
/*
@@ -155453,18 +142269,13 @@ static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){
static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
Rtree *pRtree = (Rtree *)cur->pVtab;
RtreeCursor *pCsr = (RtreeCursor *)cur;
- RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr);
- RtreeCoord c;
- int rc = SQLITE_OK;
- RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc);
- if( rc ) return rc;
- if( p==0 ) return SQLITE_OK;
if( i==0 ){
- sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell));
+ i64 iRowid = nodeGetRowid(pRtree, pCsr->pNode, pCsr->iCell);
+ sqlite3_result_int64(ctx, iRowid);
}else{
- if( rc ) return rc;
- nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c);
+ RtreeCoord c;
+ nodeGetCoord(pRtree, pCsr->pNode, pCsr->iCell, i-1, &c);
#ifndef SQLITE_RTREE_INT_ONLY
if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
sqlite3_result_double(ctx, c.f);
@@ -155475,6 +142286,7 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
sqlite3_result_int(ctx, c.i);
}
}
+
return SQLITE_OK;
}
@@ -155485,18 +142297,12 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf
** to zero and return an SQLite error code.
*/
-static int findLeafNode(
- Rtree *pRtree, /* RTree to search */
- i64 iRowid, /* The rowid searching for */
- RtreeNode **ppLeaf, /* Write the node here */
- sqlite3_int64 *piNode /* Write the node-id here */
-){
+static int findLeafNode(Rtree *pRtree, i64 iRowid, RtreeNode **ppLeaf){
int rc;
*ppLeaf = 0;
sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid);
if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){
i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0);
- if( piNode ) *piNode = iNode;
rc = nodeAcquire(pRtree, iNode, 0, ppLeaf);
sqlite3_reset(pRtree->pReadRowid);
}else{
@@ -155512,45 +142318,42 @@ static int findLeafNode(
** operator.
*/
static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){
- RtreeMatchArg *pBlob; /* BLOB returned by geometry function */
- sqlite3_rtree_query_info *pInfo; /* Callback information */
- int nBlob; /* Size of the geometry function blob */
- int nExpected; /* Expected size of the BLOB */
+ RtreeMatchArg *p;
+ sqlite3_rtree_geometry *pGeom;
+ int nBlob;
/* Check that value is actually a blob. */
if( sqlite3_value_type(pValue)!=SQLITE_BLOB ) return SQLITE_ERROR;
/* Check that the blob is roughly the right size. */
nBlob = sqlite3_value_bytes(pValue);
- if( nBlob<(int)sizeof(RtreeMatchArg) ){
+ if( nBlob<(int)sizeof(RtreeMatchArg)
+ || ((nBlob-sizeof(RtreeMatchArg))%sizeof(RtreeDValue))!=0
+ ){
return SQLITE_ERROR;
}
- pInfo = (sqlite3_rtree_query_info*)sqlite3_malloc( sizeof(*pInfo)+nBlob );
- if( !pInfo ) return SQLITE_NOMEM;
- memset(pInfo, 0, sizeof(*pInfo));
- pBlob = (RtreeMatchArg*)&pInfo[1];
+ pGeom = (sqlite3_rtree_geometry *)sqlite3_malloc(
+ sizeof(sqlite3_rtree_geometry) + nBlob
+ );
+ if( !pGeom ) return SQLITE_NOMEM;
+ memset(pGeom, 0, sizeof(sqlite3_rtree_geometry));
+ p = (RtreeMatchArg *)&pGeom[1];
- memcpy(pBlob, sqlite3_value_blob(pValue), nBlob);
- nExpected = (int)(sizeof(RtreeMatchArg) +
- pBlob->nParam*sizeof(sqlite3_value*) +
- (pBlob->nParam-1)*sizeof(RtreeDValue));
- if( pBlob->magic!=RTREE_GEOMETRY_MAGIC || nBlob!=nExpected ){
- sqlite3_free(pInfo);
+ memcpy(p, sqlite3_value_blob(pValue), nBlob);
+ if( p->magic!=RTREE_GEOMETRY_MAGIC
+ || nBlob!=(int)(sizeof(RtreeMatchArg) + (p->nParam-1)*sizeof(RtreeDValue))
+ ){
+ sqlite3_free(pGeom);
return SQLITE_ERROR;
}
- pInfo->pContext = pBlob->cb.pContext;
- pInfo->nParam = pBlob->nParam;
- pInfo->aParam = pBlob->aParam;
- pInfo->apSqlParam = pBlob->apSqlParam;
- if( pBlob->cb.xGeom ){
- pCons->u.xGeom = pBlob->cb.xGeom;
- }else{
- pCons->op = RTREE_QUERY;
- pCons->u.xQueryFunc = pBlob->cb.xQueryFunc;
- }
- pCons->pInfo = pInfo;
+ pGeom->pContext = p->pContext;
+ pGeom->nParam = p->nParam;
+ pGeom->aParam = p->aParam;
+
+ pCons->xGeom = p->xGeom;
+ pCons->pGeom = pGeom;
return SQLITE_OK;
}
@@ -155564,59 +142367,44 @@ static int rtreeFilter(
){
Rtree *pRtree = (Rtree *)pVtabCursor->pVtab;
RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
+
RtreeNode *pRoot = 0;
int ii;
int rc = SQLITE_OK;
- int iCell = 0;
rtreeReference(pRtree);
- /* Reset the cursor to the same state as rtreeOpen() leaves it in. */
freeCursorConstraints(pCsr);
- sqlite3_free(pCsr->aPoint);
- memset(pCsr, 0, sizeof(RtreeCursor));
- pCsr->base.pVtab = (sqlite3_vtab*)pRtree;
-
pCsr->iStrategy = idxNum;
+
if( idxNum==1 ){
/* Special case - lookup by rowid. */
RtreeNode *pLeaf; /* Leaf on which the required cell resides */
- RtreeSearchPoint *p; /* Search point for the the leaf */
i64 iRowid = sqlite3_value_int64(argv[0]);
- i64 iNode = 0;
- rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode);
- if( rc==SQLITE_OK && pLeaf!=0 ){
- p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0);
- assert( p!=0 ); /* Always returns pCsr->sPoint */
- pCsr->aNode[0] = pLeaf;
- p->id = iNode;
- p->eWithin = PARTLY_WITHIN;
- rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell);
- p->iCell = iCell;
- RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:");
- }else{
- pCsr->atEOF = 1;
+ rc = findLeafNode(pRtree, iRowid, &pLeaf);
+ pCsr->pNode = pLeaf;
+ if( pLeaf ){
+ assert( rc==SQLITE_OK );
+ rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &pCsr->iCell);
}
}else{
/* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array
** with the configured constraints.
*/
- rc = nodeAcquire(pRtree, 1, 0, &pRoot);
- if( rc==SQLITE_OK && argc>0 ){
+ if( argc>0 ){
pCsr->aConstraint = sqlite3_malloc(sizeof(RtreeConstraint)*argc);
pCsr->nConstraint = argc;
if( !pCsr->aConstraint ){
rc = SQLITE_NOMEM;
}else{
memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*argc);
- memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1));
assert( (idxStr==0 && argc==0)
|| (idxStr && (int)strlen(idxStr)==argc*2) );
for(ii=0; ii<argc; ii++){
RtreeConstraint *p = &pCsr->aConstraint[ii];
p->op = idxStr[ii*2];
- p->iCoord = idxStr[ii*2+1]-'0';
- if( p->op>=RTREE_MATCH ){
+ p->iCoord = idxStr[ii*2+1]-'a';
+ if( p->op==RTREE_MATCH ){
/* A MATCH operator. The right-hand-side must be a blob that
** can be cast into an RtreeMatchArg object. One created using
** an sqlite3_rtree_geometry_callback() SQL user function.
@@ -155625,35 +142413,41 @@ static int rtreeFilter(
if( rc!=SQLITE_OK ){
break;
}
- p->pInfo->nCoord = pRtree->nDim*2;
- p->pInfo->anQueue = pCsr->anQueue;
- p->pInfo->mxLevel = pRtree->iDepth + 1;
}else{
#ifdef SQLITE_RTREE_INT_ONLY
- p->u.rValue = sqlite3_value_int64(argv[ii]);
+ p->rValue = sqlite3_value_int64(argv[ii]);
#else
- p->u.rValue = sqlite3_value_double(argv[ii]);
+ p->rValue = sqlite3_value_double(argv[ii]);
#endif
}
}
}
}
+
if( rc==SQLITE_OK ){
- RtreeSearchPoint *pNew;
- pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, pRtree->iDepth+1);
- if( pNew==0 ) return SQLITE_NOMEM;
- pNew->id = 1;
- pNew->iCell = 0;
- pNew->eWithin = PARTLY_WITHIN;
- assert( pCsr->bPoint==1 );
- pCsr->aNode[0] = pRoot;
- pRoot = 0;
- RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:");
- rc = rtreeStepToLeaf(pCsr);
+ pCsr->pNode = 0;
+ rc = nodeAcquire(pRtree, 1, 0, &pRoot);
+ }
+ if( rc==SQLITE_OK ){
+ int isEof = 1;
+ int nCell = NCELL(pRoot);
+ pCsr->pNode = pRoot;
+ for(pCsr->iCell=0; rc==SQLITE_OK && pCsr->iCell<nCell; pCsr->iCell++){
+ assert( pCsr->pNode==pRoot );
+ rc = descendToCell(pRtree, pCsr, pRtree->iDepth, &isEof);
+ if( !isEof ){
+ break;
+ }
+ }
+ if( rc==SQLITE_OK && isEof ){
+ assert( pCsr->pNode==pRoot );
+ nodeRelease(pRtree, pRoot);
+ pCsr->pNode = 0;
+ }
+ assert( rc!=SQLITE_OK || !pCsr->pNode || pCsr->iCell<NCELL(pCsr->pNode) );
}
}
- nodeRelease(pRtree, pRoot);
rtreeRelease(pRtree);
return rc;
}
@@ -155709,30 +142503,17 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
Rtree *pRtree = (Rtree*)tab;
int rc = SQLITE_OK;
int ii;
- int bMatch = 0; /* True if there exists a MATCH constraint */
i64 nRow; /* Estimated rows returned by this scan */
int iIdx = 0;
char zIdxStr[RTREE_MAX_DIMENSIONS*8+1];
memset(zIdxStr, 0, sizeof(zIdxStr));
- /* Check if there exists a MATCH constraint - even an unusable one. If there
- ** is, do not consider the lookup-by-rowid plan as using such a plan would
- ** require the VDBE to evaluate the MATCH constraint, which is not currently
- ** possible. */
- for(ii=0; ii<pIdxInfo->nConstraint; ii++){
- if( pIdxInfo->aConstraint[ii].op==SQLITE_INDEX_CONSTRAINT_MATCH ){
- bMatch = 1;
- }
- }
-
assert( pIdxInfo->idxStr==0 );
for(ii=0; ii<pIdxInfo->nConstraint && iIdx<(int)(sizeof(zIdxStr)-1); ii++){
struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii];
- if( bMatch==0 && p->usable
- && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ
- ){
+ if( p->usable && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){
/* We have an equality constraint on the rowid. Use strategy 1. */
int jj;
for(jj=0; jj<ii; jj++){
@@ -155768,7 +142549,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
break;
}
zIdxStr[iIdx++] = op;
- zIdxStr[iIdx++] = p->iColumn - 1 + '0';
+ zIdxStr[iIdx++] = p->iColumn - 1 + 'a';
pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2);
pIdxInfo->aConstraintUsage[ii].omit = 1;
}
@@ -155861,32 +142642,62 @@ static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){
return (cellArea(pRtree, &cell)-area);
}
+#if VARIANT_RSTARTREE_CHOOSESUBTREE || VARIANT_RSTARTREE_SPLIT
static RtreeDValue cellOverlap(
Rtree *pRtree,
RtreeCell *p,
RtreeCell *aCell,
- int nCell
+ int nCell,
+ int iExclude
){
int ii;
- RtreeDValue overlap = RTREE_ZERO;
+ RtreeDValue overlap = 0.0;
for(ii=0; ii<nCell; ii++){
- int jj;
- RtreeDValue o = (RtreeDValue)1;
- for(jj=0; jj<(pRtree->nDim*2); jj+=2){
- RtreeDValue x1, x2;
- x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj]));
- x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1]));
- if( x2<x1 ){
- o = (RtreeDValue)0;
- break;
- }else{
- o = o * (x2-x1);
+#if VARIANT_RSTARTREE_CHOOSESUBTREE
+ if( ii!=iExclude )
+#else
+ assert( iExclude==-1 );
+ UNUSED_PARAMETER(iExclude);
+#endif
+ {
+ int jj;
+ RtreeDValue o = (RtreeDValue)1;
+ for(jj=0; jj<(pRtree->nDim*2); jj+=2){
+ RtreeDValue x1, x2;
+
+ x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj]));
+ x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1]));
+
+ if( x2<x1 ){
+ o = 0.0;
+ break;
+ }else{
+ o = o * (x2-x1);
+ }
}
+ overlap += o;
}
- overlap += o;
}
return overlap;
}
+#endif
+
+#if VARIANT_RSTARTREE_CHOOSESUBTREE
+static RtreeDValue cellOverlapEnlargement(
+ Rtree *pRtree,
+ RtreeCell *p,
+ RtreeCell *pInsert,
+ RtreeCell *aCell,
+ int nCell,
+ int iExclude
+){
+ RtreeDValue before, after;
+ before = cellOverlap(pRtree, p, aCell, nCell, iExclude);
+ cellUnion(pRtree, p, pInsert);
+ after = cellOverlap(pRtree, p, aCell, nCell, iExclude);
+ return (after-before);
+}
+#endif
/*
@@ -155908,8 +142719,12 @@ static int ChooseLeaf(
int iCell;
sqlite3_int64 iBest = 0;
- RtreeDValue fMinGrowth = RTREE_ZERO;
- RtreeDValue fMinArea = RTREE_ZERO;
+ RtreeDValue fMinGrowth = 0.0;
+ RtreeDValue fMinArea = 0.0;
+#if VARIANT_RSTARTREE_CHOOSESUBTREE
+ RtreeDValue fMinOverlap = 0.0;
+ RtreeDValue overlap;
+#endif
int nCell = NCELL(pNode);
RtreeCell cell;
@@ -155917,6 +142732,22 @@ static int ChooseLeaf(
RtreeCell *aCell = 0;
+#if VARIANT_RSTARTREE_CHOOSESUBTREE
+ if( ii==(pRtree->iDepth-1) ){
+ int jj;
+ aCell = sqlite3_malloc(sizeof(RtreeCell)*nCell);
+ if( !aCell ){
+ rc = SQLITE_NOMEM;
+ nodeRelease(pRtree, pNode);
+ pNode = 0;
+ continue;
+ }
+ for(jj=0; jj<nCell; jj++){
+ nodeGetCell(pRtree, pNode, jj, &aCell[jj]);
+ }
+ }
+#endif
+
/* Select the child node which will be enlarged the least if pCell
** is inserted into it. Resolve ties by choosing the entry with
** the smallest area.
@@ -155928,9 +142759,26 @@ static int ChooseLeaf(
nodeGetCell(pRtree, pNode, iCell, &cell);
growth = cellGrowth(pRtree, &cell, pCell);
area = cellArea(pRtree, &cell);
+
+#if VARIANT_RSTARTREE_CHOOSESUBTREE
+ if( ii==(pRtree->iDepth-1) ){
+ overlap = cellOverlapEnlargement(pRtree,&cell,pCell,aCell,nCell,iCell);
+ }else{
+ overlap = 0.0;
+ }
+ if( (iCell==0)
+ || (overlap<fMinOverlap)
+ || (overlap==fMinOverlap && growth<fMinGrowth)
+ || (overlap==fMinOverlap && growth==fMinGrowth && area<fMinArea)
+ ){
+ bBest = 1;
+ fMinOverlap = overlap;
+ }
+#else
if( iCell==0||growth<fMinGrowth||(growth==fMinGrowth && area<fMinArea) ){
bBest = 1;
}
+#endif
if( bBest ){
fMinGrowth = growth;
fMinArea = area;
@@ -156001,6 +142849,155 @@ static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){
static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int);
+#if VARIANT_GUTTMAN_LINEAR_SPLIT
+/*
+** Implementation of the linear variant of the PickNext() function from
+** Guttman[84].
+*/
+static RtreeCell *LinearPickNext(
+ Rtree *pRtree,
+ RtreeCell *aCell,
+ int nCell,
+ RtreeCell *pLeftBox,
+ RtreeCell *pRightBox,
+ int *aiUsed
+){
+ int ii;
+ for(ii=0; aiUsed[ii]; ii++);
+ aiUsed[ii] = 1;
+ return &aCell[ii];
+}
+
+/*
+** Implementation of the linear variant of the PickSeeds() function from
+** Guttman[84].
+*/
+static void LinearPickSeeds(
+ Rtree *pRtree,
+ RtreeCell *aCell,
+ int nCell,
+ int *piLeftSeed,
+ int *piRightSeed
+){
+ int i;
+ int iLeftSeed = 0;
+ int iRightSeed = 1;
+ RtreeDValue maxNormalInnerWidth = (RtreeDValue)0;
+
+ /* Pick two "seed" cells from the array of cells. The algorithm used
+ ** here is the LinearPickSeeds algorithm from Gutman[1984]. The
+ ** indices of the two seed cells in the array are stored in local
+ ** variables iLeftSeek and iRightSeed.
+ */
+ for(i=0; i<pRtree->nDim; i++){
+ RtreeDValue x1 = DCOORD(aCell[0].aCoord[i*2]);
+ RtreeDValue x2 = DCOORD(aCell[0].aCoord[i*2+1]);
+ RtreeDValue x3 = x1;
+ RtreeDValue x4 = x2;
+ int jj;
+
+ int iCellLeft = 0;
+ int iCellRight = 0;
+
+ for(jj=1; jj<nCell; jj++){
+ RtreeDValue left = DCOORD(aCell[jj].aCoord[i*2]);
+ RtreeDValue right = DCOORD(aCell[jj].aCoord[i*2+1]);
+
+ if( left<x1 ) x1 = left;
+ if( right>x4 ) x4 = right;
+ if( left>x3 ){
+ x3 = left;
+ iCellRight = jj;
+ }
+ if( right<x2 ){
+ x2 = right;
+ iCellLeft = jj;
+ }
+ }
+
+ if( x4!=x1 ){
+ RtreeDValue normalwidth = (x3 - x2) / (x4 - x1);
+ if( normalwidth>maxNormalInnerWidth ){
+ iLeftSeed = iCellLeft;
+ iRightSeed = iCellRight;
+ }
+ }
+ }
+
+ *piLeftSeed = iLeftSeed;
+ *piRightSeed = iRightSeed;
+}
+#endif /* VARIANT_GUTTMAN_LINEAR_SPLIT */
+
+#if VARIANT_GUTTMAN_QUADRATIC_SPLIT
+/*
+** Implementation of the quadratic variant of the PickNext() function from
+** Guttman[84].
+*/
+static RtreeCell *QuadraticPickNext(
+ Rtree *pRtree,
+ RtreeCell *aCell,
+ int nCell,
+ RtreeCell *pLeftBox,
+ RtreeCell *pRightBox,
+ int *aiUsed
+){
+ #define FABS(a) ((a)<0.0?-1.0*(a):(a))
+
+ int iSelect = -1;
+ RtreeDValue fDiff;
+ int ii;
+ for(ii=0; ii<nCell; ii++){
+ if( aiUsed[ii]==0 ){
+ RtreeDValue left = cellGrowth(pRtree, pLeftBox, &aCell[ii]);
+ RtreeDValue right = cellGrowth(pRtree, pLeftBox, &aCell[ii]);
+ RtreeDValue diff = FABS(right-left);
+ if( iSelect<0 || diff>fDiff ){
+ fDiff = diff;
+ iSelect = ii;
+ }
+ }
+ }
+ aiUsed[iSelect] = 1;
+ return &aCell[iSelect];
+}
+
+/*
+** Implementation of the quadratic variant of the PickSeeds() function from
+** Guttman[84].
+*/
+static void QuadraticPickSeeds(
+ Rtree *pRtree,
+ RtreeCell *aCell,
+ int nCell,
+ int *piLeftSeed,
+ int *piRightSeed
+){
+ int ii;
+ int jj;
+
+ int iLeftSeed = 0;
+ int iRightSeed = 1;
+ RtreeDValue fWaste = 0.0;
+
+ for(ii=0; ii<nCell; ii++){
+ for(jj=ii+1; jj<nCell; jj++){
+ RtreeDValue right = cellArea(pRtree, &aCell[jj]);
+ RtreeDValue growth = cellGrowth(pRtree, &aCell[ii], &aCell[jj]);
+ RtreeDValue waste = growth - right;
+
+ if( waste>fWaste ){
+ iLeftSeed = ii;
+ iRightSeed = jj;
+ fWaste = waste;
+ }
+ }
+ }
+
+ *piLeftSeed = iLeftSeed;
+ *piRightSeed = iRightSeed;
+}
+#endif /* VARIANT_GUTTMAN_QUADRATIC_SPLIT */
/*
** Arguments aIdx, aDistance and aSpare all point to arrays of size
@@ -156141,6 +143138,7 @@ static void SortByDimension(
}
}
+#if VARIANT_RSTARTREE_SPLIT
/*
** Implementation of the R*-tree variant of SplitNode from Beckman[1990].
*/
@@ -156159,7 +143157,7 @@ static int splitNodeStartree(
int iBestDim = 0;
int iBestSplit = 0;
- RtreeDValue fBestMargin = RTREE_ZERO;
+ RtreeDValue fBestMargin = 0.0;
int nByte = (pRtree->nDim+1)*(sizeof(int*)+nCell*sizeof(int));
@@ -156180,9 +143178,9 @@ static int splitNodeStartree(
}
for(ii=0; ii<pRtree->nDim; ii++){
- RtreeDValue margin = RTREE_ZERO;
- RtreeDValue fBestOverlap = RTREE_ZERO;
- RtreeDValue fBestArea = RTREE_ZERO;
+ RtreeDValue margin = 0.0;
+ RtreeDValue fBestOverlap = 0.0;
+ RtreeDValue fBestArea = 0.0;
int iBestLeft = 0;
int nLeft;
@@ -156208,7 +143206,7 @@ static int splitNodeStartree(
}
margin += cellMargin(pRtree, &left);
margin += cellMargin(pRtree, &right);
- overlap = cellOverlap(pRtree, &left, &right, 1);
+ overlap = cellOverlap(pRtree, &left, &right, 1, -1);
area = cellArea(pRtree, &left) + cellArea(pRtree, &right);
if( (nLeft==RTREE_MINCELLS(pRtree))
|| (overlap<fBestOverlap)
@@ -156240,7 +143238,63 @@ static int splitNodeStartree(
sqlite3_free(aaSorted);
return SQLITE_OK;
}
+#endif
+
+#if VARIANT_GUTTMAN_SPLIT
+/*
+** Implementation of the regular R-tree SplitNode from Guttman[1984].
+*/
+static int splitNodeGuttman(
+ Rtree *pRtree,
+ RtreeCell *aCell,
+ int nCell,
+ RtreeNode *pLeft,
+ RtreeNode *pRight,
+ RtreeCell *pBboxLeft,
+ RtreeCell *pBboxRight
+){
+ int iLeftSeed = 0;
+ int iRightSeed = 1;
+ int *aiUsed;
+ int i;
+
+ aiUsed = sqlite3_malloc(sizeof(int)*nCell);
+ if( !aiUsed ){
+ return SQLITE_NOMEM;
+ }
+ memset(aiUsed, 0, sizeof(int)*nCell);
+
+ PickSeeds(pRtree, aCell, nCell, &iLeftSeed, &iRightSeed);
+
+ memcpy(pBboxLeft, &aCell[iLeftSeed], sizeof(RtreeCell));
+ memcpy(pBboxRight, &aCell[iRightSeed], sizeof(RtreeCell));
+ nodeInsertCell(pRtree, pLeft, &aCell[iLeftSeed]);
+ nodeInsertCell(pRtree, pRight, &aCell[iRightSeed]);
+ aiUsed[iLeftSeed] = 1;
+ aiUsed[iRightSeed] = 1;
+
+ for(i=nCell-2; i>0; i--){
+ RtreeCell *pNext;
+ pNext = PickNext(pRtree, aCell, nCell, pBboxLeft, pBboxRight, aiUsed);
+ RtreeDValue diff =
+ cellGrowth(pRtree, pBboxLeft, pNext) -
+ cellGrowth(pRtree, pBboxRight, pNext)
+ ;
+ if( (RTREE_MINCELLS(pRtree)-NCELL(pRight)==i)
+ || (diff>0.0 && (RTREE_MINCELLS(pRtree)-NCELL(pLeft)!=i))
+ ){
+ nodeInsertCell(pRtree, pRight, pNext);
+ cellUnion(pRtree, pBboxRight, pNext);
+ }else{
+ nodeInsertCell(pRtree, pLeft, pNext);
+ cellUnion(pRtree, pBboxLeft, pNext);
+ }
+ }
+ sqlite3_free(aiUsed);
+ return SQLITE_OK;
+}
+#endif
static int updateMapping(
Rtree *pRtree,
@@ -156318,8 +143372,7 @@ static int SplitNode(
memset(pLeft->zData, 0, pRtree->iNodeSize);
memset(pRight->zData, 0, pRtree->iNodeSize);
- rc = splitNodeStartree(pRtree, aCell, nCell, pLeft, pRight,
- &leftbbox, &rightbbox);
+ rc = AssignCells(pRtree, aCell, nCell, pLeft, pRight, &leftbbox, &rightbbox);
if( rc!=SQLITE_OK ){
goto splitnode_out;
}
@@ -156602,7 +143655,7 @@ static int Reinsert(
}
for(ii=0; ii<nCell; ii++){
- aDistance[ii] = RTREE_ZERO;
+ aDistance[ii] = 0.0;
for(iDim=0; iDim<pRtree->nDim; iDim++){
RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) -
DCOORD(aCell[ii].aCoord[iDim*2]));
@@ -156668,12 +143721,16 @@ static int rtreeInsertCell(
}
}
if( nodeInsertCell(pRtree, pNode, pCell) ){
+#if VARIANT_RSTARTREE_REINSERT
if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){
rc = SplitNode(pRtree, pNode, pCell, iHeight);
}else{
pRtree->iReinsertHeight = iHeight;
rc = Reinsert(pRtree, pNode, pCell, iHeight);
}
+#else
+ rc = SplitNode(pRtree, pNode, pCell, iHeight);
+#endif
}else{
rc = AdjustTree(pRtree, pNode, pCell);
if( rc==SQLITE_OK ){
@@ -156743,7 +143800,7 @@ static int rtreeDeleteRowid(Rtree *pRtree, sqlite3_int64 iDelete){
** about to be deleted.
*/
if( rc==SQLITE_OK ){
- rc = findLeafNode(pRtree, iDelete, &pLeaf, 0);
+ rc = findLeafNode(pRtree, iDelete, &pLeaf);
}
/* Delete the cell in question from the leaf node. */
@@ -156857,8 +143914,6 @@ static int rtreeUpdate(
rtreeReference(pRtree);
assert(nData>=1);
- cell.iRowid = 0; /* Used only to suppress a compiler warning */
-
/* Constraint handling. A write operation on an r-tree table may return
** SQLITE_CONSTRAINT for two reasons:
**
@@ -156873,19 +143928,11 @@ static int rtreeUpdate(
if( nData>1 ){
int ii;
- /* Populate the cell.aCoord[] array. The first coordinate is azData[3].
- **
- ** NB: nData can only be less than nDim*2+3 if the rtree is mis-declared
- ** with "column" that are interpreted as table constraints.
- ** Example: CREATE VIRTUAL TABLE bad USING rtree(x,y,CHECK(y>5));
- ** This problem was discovered after years of use, so we silently ignore
- ** these kinds of misdeclared tables to avoid breaking any legacy.
- */
- assert( nData<=(pRtree->nDim*2 + 3) );
-
+ /* Populate the cell.aCoord[] array. The first coordinate is azData[3]. */
+ assert( nData==(pRtree->nDim*2 + 3) );
#ifndef SQLITE_RTREE_INT_ONLY
if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
- for(ii=0; ii<nData-4; ii+=2){
+ for(ii=0; ii<(pRtree->nDim*2); ii+=2){
cell.aCoord[ii].f = rtreeValueDown(azData[ii+3]);
cell.aCoord[ii+1].f = rtreeValueUp(azData[ii+4]);
if( cell.aCoord[ii].f>cell.aCoord[ii+1].f ){
@@ -156896,7 +143943,7 @@ static int rtreeUpdate(
}else
#endif
{
- for(ii=0; ii<nData-4; ii+=2){
+ for(ii=0; ii<(pRtree->nDim*2); ii+=2){
cell.aCoord[ii].i = sqlite3_value_int(azData[ii+3]);
cell.aCoord[ii+1].i = sqlite3_value_int(azData[ii+4]);
if( cell.aCoord[ii].i>cell.aCoord[ii+1].i ){
@@ -156998,32 +144045,26 @@ static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){
** on sqlite_stat1 data. Otherwise, use RTREE_DEFAULT_ROWEST.
*/
static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){
- const char *zFmt = "SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'";
- char *zSql;
+ const char *zSql = "SELECT stat FROM sqlite_stat1 WHERE tbl= ? || '_rowid'";
sqlite3_stmt *p;
int rc;
i64 nRow = 0;
- zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName);
- if( zSql==0 ){
- rc = SQLITE_NOMEM;
- }else{
- rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0);
- if( rc==SQLITE_OK ){
- if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0);
- rc = sqlite3_finalize(p);
- }else if( rc!=SQLITE_NOMEM ){
- rc = SQLITE_OK;
- }
+ rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_text(p, 1, pRtree->zName, -1, SQLITE_STATIC);
+ if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0);
+ rc = sqlite3_finalize(p);
+ }else if( rc!=SQLITE_NOMEM ){
+ rc = SQLITE_OK;
+ }
- if( rc==SQLITE_OK ){
- if( nRow==0 ){
- pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
- }else{
- pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST);
- }
+ if( rc==SQLITE_OK ){
+ if( nRow==0 ){
+ pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
+ }else{
+ pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST);
}
- sqlite3_free(zSql);
}
return rc;
@@ -157090,8 +144131,7 @@ static int rtreeSqlInit(
char *zCreate = sqlite3_mprintf(
"CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY, data BLOB);"
"CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY, nodeno INTEGER);"
-"CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY,"
- " parentnode INTEGER);"
+"CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY, parentnode INTEGER);"
"INSERT INTO '%q'.'%q_node' VALUES(1, zeroblob(%d))",
zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, pRtree->iNodeSize
);
@@ -157293,8 +144333,6 @@ static int rtreeInit(
if( rc==SQLITE_OK ){
*ppVtab = (sqlite3_vtab *)pRtree;
}else{
- assert( *ppVtab==0 );
- assert( pRtree->nBusy==1 );
rtreeRelease(pRtree);
}
return rc;
@@ -157305,10 +144343,10 @@ static int rtreeInit(
** Implementation of a scalar function that decodes r-tree nodes to
** human readable strings. This can be used for debugging and analysis.
**
-** The scalar function takes two arguments: (1) the number of dimensions
-** to the rtree (between 1 and 5, inclusive) and (2) a blob of data containing
-** an r-tree node. For a two-dimensional r-tree structure called "rt", to
-** deserialize all nodes, a statement like:
+** The scalar function takes two arguments, a blob of data containing
+** an r-tree node, and the number of dimensions the r-tree indexes.
+** For a two-dimensional r-tree structure called "rt", to deserialize
+** all nodes, a statement like:
**
** SELECT rtreenode(2, data) FROM rt_node;
**
@@ -157341,7 +144379,7 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
nCell = (int)strlen(zCell);
for(jj=0; jj<tree.nDim*2; jj++){
#ifndef SQLITE_RTREE_INT_ONLY
- sqlite3_snprintf(512-nCell,&zCell[nCell], " %g",
+ sqlite3_snprintf(512-nCell,&zCell[nCell], " %f",
(double)cell.aCoord[jj].f);
#else
sqlite3_snprintf(512-nCell,&zCell[nCell], " %d",
@@ -157362,15 +144400,6 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
sqlite3_result_text(ctx, zText, -1, sqlite3_free);
}
-/* This routine implements an SQL function that returns the "depth" parameter
-** from the front of a blob that is an r-tree node. For example:
-**
-** SELECT rtreedepth(data) FROM rt_node WHERE nodeno=1;
-**
-** The depth value is 0 for all nodes other than the root node, and the root
-** node always has nodeno=1, so the example above is the primary use for this
-** routine. This routine is intended for testing and analysis only.
-*/
static void rtreedepth(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
UNUSED_PARAMETER(nArg);
if( sqlite3_value_type(apArg[0])!=SQLITE_BLOB
@@ -157413,87 +144442,57 @@ SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db){
}
/*
-** This routine deletes the RtreeGeomCallback object that was attached
-** one of the SQL functions create by sqlite3_rtree_geometry_callback()
-** or sqlite3_rtree_query_callback(). In other words, this routine is the
-** destructor for an RtreeGeomCallback objecct. This routine is called when
-** the corresponding SQL function is deleted.
-*/
-static void rtreeFreeCallback(void *p){
- RtreeGeomCallback *pInfo = (RtreeGeomCallback*)p;
- if( pInfo->xDestructor ) pInfo->xDestructor(pInfo->pContext);
- sqlite3_free(p);
-}
-
-/*
-** This routine frees the BLOB that is returned by geomCallback().
+** A version of sqlite3_free() that can be used as a callback. This is used
+** in two places - as the destructor for the blob value returned by the
+** invocation of a geometry function, and as the destructor for the geometry
+** functions themselves.
*/
-static void rtreeMatchArgFree(void *pArg){
- int i;
- RtreeMatchArg *p = (RtreeMatchArg*)pArg;
- for(i=0; i<p->nParam; i++){
- sqlite3_value_free(p->apSqlParam[i]);
- }
+static void doSqlite3Free(void *p){
sqlite3_free(p);
}
/*
-** Each call to sqlite3_rtree_geometry_callback() or
-** sqlite3_rtree_query_callback() creates an ordinary SQLite
-** scalar function that is implemented by this routine.
-**
-** All this function does is construct an RtreeMatchArg object that
-** contains the geometry-checking callback routines and a list of
-** parameters to this function, then return that RtreeMatchArg object
-** as a BLOB.
+** Each call to sqlite3_rtree_geometry_callback() creates an ordinary SQLite
+** scalar user function. This C function is the callback used for all such
+** registered SQL functions.
**
-** The R-Tree MATCH operator will read the returned BLOB, deserialize
-** the RtreeMatchArg object, and use the RtreeMatchArg object to figure
-** out which elements of the R-Tree should be returned by the query.
+** The scalar user functions return a blob that is interpreted by r-tree
+** table MATCH operators.
*/
static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){
RtreeGeomCallback *pGeomCtx = (RtreeGeomCallback *)sqlite3_user_data(ctx);
RtreeMatchArg *pBlob;
int nBlob;
- int memErr = 0;
- nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue)
- + nArg*sizeof(sqlite3_value*);
+ nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue);
pBlob = (RtreeMatchArg *)sqlite3_malloc(nBlob);
if( !pBlob ){
sqlite3_result_error_nomem(ctx);
}else{
int i;
pBlob->magic = RTREE_GEOMETRY_MAGIC;
- pBlob->cb = pGeomCtx[0];
- pBlob->apSqlParam = (sqlite3_value**)&pBlob->aParam[nArg];
+ pBlob->xGeom = pGeomCtx->xGeom;
+ pBlob->pContext = pGeomCtx->pContext;
pBlob->nParam = nArg;
for(i=0; i<nArg; i++){
- pBlob->apSqlParam[i] = sqlite3_value_dup(aArg[i]);
- if( pBlob->apSqlParam[i]==0 ) memErr = 1;
#ifdef SQLITE_RTREE_INT_ONLY
pBlob->aParam[i] = sqlite3_value_int64(aArg[i]);
#else
pBlob->aParam[i] = sqlite3_value_double(aArg[i]);
#endif
}
- if( memErr ){
- sqlite3_result_error_nomem(ctx);
- rtreeMatchArgFree(pBlob);
- }else{
- sqlite3_result_blob(ctx, pBlob, nBlob, rtreeMatchArgFree);
- }
+ sqlite3_result_blob(ctx, pBlob, nBlob, doSqlite3Free);
}
}
/*
** Register a new geometry function for use with the r-tree MATCH operator.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
- sqlite3 *db, /* Register SQL function on this connection */
- const char *zGeom, /* Name of the new SQL function */
- int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*), /* Callback */
- void *pContext /* Extra data associated with the callback */
+SQLITE_API int sqlite3_rtree_geometry_callback(
+ sqlite3 *db,
+ const char *zGeom,
+ int (*xGeom)(sqlite3_rtree_geometry *, int, RtreeDValue *, int *),
+ void *pContext
){
RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */
@@ -157501,36 +144500,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback));
if( !pGeomCtx ) return SQLITE_NOMEM;
pGeomCtx->xGeom = xGeom;
- pGeomCtx->xQueryFunc = 0;
- pGeomCtx->xDestructor = 0;
pGeomCtx->pContext = pContext;
- return sqlite3_create_function_v2(db, zGeom, -1, SQLITE_ANY,
- (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback
- );
-}
-
-/*
-** Register a new 2nd-generation geometry function for use with the
-** r-tree MATCH operator.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
- sqlite3 *db, /* Register SQL function on this connection */
- const char *zQueryFunc, /* Name of new SQL function */
- int (*xQueryFunc)(sqlite3_rtree_query_info*), /* Callback */
- void *pContext, /* Extra data passed into the callback */
- void (*xDestructor)(void*) /* Destructor for the extra data */
-){
- RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */
- /* Allocate and populate the context object. */
- pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback));
- if( !pGeomCtx ) return SQLITE_NOMEM;
- pGeomCtx->xGeom = 0;
- pGeomCtx->xQueryFunc = xQueryFunc;
- pGeomCtx->xDestructor = xDestructor;
- pGeomCtx->pContext = pContext;
- return sqlite3_create_function_v2(db, zQueryFunc, -1, SQLITE_ANY,
- (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback
+ /* Create the new user-function. Register a destructor function to delete
+ ** the context object when it is no longer required. */
+ return sqlite3_create_function_v2(db, zGeom, -1, SQLITE_ANY,
+ (void *)pGeomCtx, geomCallback, 0, 0, doSqlite3Free
);
}
@@ -157538,7 +144513,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_init(
+SQLITE_API int sqlite3_rtree_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -157593,10 +144568,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_init(
/* #include <assert.h> */
#ifndef SQLITE_CORE
-/* #include "sqlite3ext.h" */
SQLITE_EXTENSION_INIT1
#else
-/* #include "sqlite3.h" */
#endif
/*
@@ -157637,6 +144610,7 @@ static int icuLikeCompare(
/* Read (and consume) the next character from the input pattern. */
UChar32 uPattern;
U8_NEXT_UNSAFE(zPattern, iPattern, uPattern);
+ assert(uPattern!=0);
/* There are now 4 possibilities:
**
@@ -157975,7 +144949,6 @@ static void icuLoadCollation(
int rc; /* Return code from sqlite3_create_collation_x() */
assert(nArg==2);
- (void)nArg; /* Unused parameter */
zLocale = (const char *)sqlite3_value_text(apArg[0]);
zName = (const char *)sqlite3_value_text(apArg[1]);
@@ -158045,7 +145018,7 @@ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_icu_init(
+SQLITE_API int sqlite3_icu_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -158072,13 +145045,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_icu_init(
*************************************************************************
** This file implements a tokenizer for fts3 based on the ICU library.
*/
-/* #include "fts3Int.h" */
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
#ifdef SQLITE_ENABLE_ICU
/* #include <assert.h> */
/* #include <string.h> */
-/* #include "fts3_tokenizer.h" */
#include <unicode/ubrk.h>
/* #include <unicode/ucol.h> */
@@ -158301,13 +145272,12 @@ static int icuNext(
** The set of routines that implement the simple tokenizer
*/
static const sqlite3_tokenizer_module icuTokenizerModule = {
- 0, /* iVersion */
- icuCreate, /* xCreate */
- icuDestroy, /* xCreate */
- icuOpen, /* xOpen */
- icuClose, /* xClose */
- icuNext, /* xNext */
- 0, /* xLanguageid */
+ 0, /* iVersion */
+ icuCreate, /* xCreate */
+ icuDestroy, /* xCreate */
+ icuOpen, /* xOpen */
+ icuClose, /* xClose */
+ icuNext, /* xNext */
};
/*
@@ -158323,25929 +145293,3 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(
#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
/************** End of fts3_icu.c ********************************************/
-/************** Begin file sqlite3rbu.c **************************************/
-/*
-** 2014 August 30
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-**
-** OVERVIEW
-**
-** The RBU extension requires that the RBU update be packaged as an
-** SQLite database. The tables it expects to find are described in
-** sqlite3rbu.h. Essentially, for each table xyz in the target database
-** that the user wishes to write to, a corresponding data_xyz table is
-** created in the RBU database and populated with one row for each row to
-** update, insert or delete from the target table.
-**
-** The update proceeds in three stages:
-**
-** 1) The database is updated. The modified database pages are written
-** to a *-oal file. A *-oal file is just like a *-wal file, except
-** that it is named "<database>-oal" instead of "<database>-wal".
-** Because regular SQLite clients do not look for file named
-** "<database>-oal", they go on using the original database in
-** rollback mode while the *-oal file is being generated.
-**
-** During this stage RBU does not update the database by writing
-** directly to the target tables. Instead it creates "imposter"
-** tables using the SQLITE_TESTCTRL_IMPOSTER interface that it uses
-** to update each b-tree individually. All updates required by each
-** b-tree are completed before moving on to the next, and all
-** updates are done in sorted key order.
-**
-** 2) The "<database>-oal" file is moved to the equivalent "<database>-wal"
-** location using a call to rename(2). Before doing this the RBU
-** module takes an EXCLUSIVE lock on the database file, ensuring
-** that there are no other active readers.
-**
-** Once the EXCLUSIVE lock is released, any other database readers
-** detect the new *-wal file and read the database in wal mode. At
-** this point they see the new version of the database - including
-** the updates made as part of the RBU update.
-**
-** 3) The new *-wal file is checkpointed. This proceeds in the same way
-** as a regular database checkpoint, except that a single frame is
-** checkpointed each time sqlite3rbu_step() is called. If the RBU
-** handle is closed before the entire *-wal file is checkpointed,
-** the checkpoint progress is saved in the RBU database and the
-** checkpoint can be resumed by another RBU client at some point in
-** the future.
-**
-** POTENTIAL PROBLEMS
-**
-** The rename() call might not be portable. And RBU is not currently
-** syncing the directory after renaming the file.
-**
-** When state is saved, any commit to the *-oal file and the commit to
-** the RBU update database are not atomic. So if the power fails at the
-** wrong moment they might get out of sync. As the main database will be
-** committed before the RBU update database this will likely either just
-** pass unnoticed, or result in SQLITE_CONSTRAINT errors (due to UNIQUE
-** constraint violations).
-**
-** If some client does modify the target database mid RBU update, or some
-** other error occurs, the RBU extension will keep throwing errors. It's
-** not really clear how to get out of this state. The system could just
-** by delete the RBU update database and *-oal file and have the device
-** download the update again and start over.
-**
-** At present, for an UPDATE, both the new.* and old.* records are
-** collected in the rbu_xyz table. And for both UPDATEs and DELETEs all
-** fields are collected. This means we're probably writing a lot more
-** data to disk when saving the state of an ongoing update to the RBU
-** update database than is strictly necessary.
-**
-*/
-
-/* #include <assert.h> */
-/* #include <string.h> */
-/* #include <stdio.h> */
-
-/* #include "sqlite3.h" */
-
-#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RBU)
-/************** Include sqlite3rbu.h in the middle of sqlite3rbu.c ***********/
-/************** Begin file sqlite3rbu.h **************************************/
-/*
-** 2014 August 30
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains the public interface for the RBU extension.
-*/
-
-/*
-** SUMMARY
-**
-** Writing a transaction containing a large number of operations on
-** b-tree indexes that are collectively larger than the available cache
-** memory can be very inefficient.
-**
-** The problem is that in order to update a b-tree, the leaf page (at least)
-** containing the entry being inserted or deleted must be modified. If the
-** working set of leaves is larger than the available cache memory, then a
-** single leaf that is modified more than once as part of the transaction
-** may be loaded from or written to the persistent media multiple times.
-** Additionally, because the index updates are likely to be applied in
-** random order, access to pages within the database is also likely to be in
-** random order, which is itself quite inefficient.
-**
-** One way to improve the situation is to sort the operations on each index
-** by index key before applying them to the b-tree. This leads to an IO
-** pattern that resembles a single linear scan through the index b-tree,
-** and all but guarantees each modified leaf page is loaded and stored
-** exactly once. SQLite uses this trick to improve the performance of
-** CREATE INDEX commands. This extension allows it to be used to improve
-** the performance of large transactions on existing databases.
-**
-** Additionally, this extension allows the work involved in writing the
-** large transaction to be broken down into sub-transactions performed
-** sequentially by separate processes. This is useful if the system cannot
-** guarantee that a single update process will run for long enough to apply
-** the entire update, for example because the update is being applied on a
-** mobile device that is frequently rebooted. Even after the writer process
-** has committed one or more sub-transactions, other database clients continue
-** to read from the original database snapshot. In other words, partially
-** applied transactions are not visible to other clients.
-**
-** "RBU" stands for "Resumable Bulk Update". As in a large database update
-** transmitted via a wireless network to a mobile device. A transaction
-** applied using this extension is hence refered to as an "RBU update".
-**
-**
-** LIMITATIONS
-**
-** An "RBU update" transaction is subject to the following limitations:
-**
-** * The transaction must consist of INSERT, UPDATE and DELETE operations
-** only.
-**
-** * INSERT statements may not use any default values.
-**
-** * UPDATE and DELETE statements must identify their target rows by
-** non-NULL PRIMARY KEY values. Rows with NULL values stored in PRIMARY
-** KEY fields may not be updated or deleted. If the table being written
-** has no PRIMARY KEY, affected rows must be identified by rowid.
-**
-** * UPDATE statements may not modify PRIMARY KEY columns.
-**
-** * No triggers will be fired.
-**
-** * No foreign key violations are detected or reported.
-**
-** * CHECK constraints are not enforced.
-**
-** * No constraint handling mode except for "OR ROLLBACK" is supported.
-**
-**
-** PREPARATION
-**
-** An "RBU update" is stored as a separate SQLite database. A database
-** containing an RBU update is an "RBU database". For each table in the
-** target database to be updated, the RBU database should contain a table
-** named "data_<target name>" containing the same set of columns as the
-** target table, and one more - "rbu_control". The data_% table should
-** have no PRIMARY KEY or UNIQUE constraints, but each column should have
-** the same type as the corresponding column in the target database.
-** The "rbu_control" column should have no type at all. For example, if
-** the target database contains:
-**
-** CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT, c UNIQUE);
-**
-** Then the RBU database should contain:
-**
-** CREATE TABLE data_t1(a INTEGER, b TEXT, c, rbu_control);
-**
-** The order of the columns in the data_% table does not matter.
-**
-** Instead of a regular table, the RBU database may also contain virtual
-** tables or view named using the data_<target> naming scheme.
-**
-** Instead of the plain data_<target> naming scheme, RBU database tables
-** may also be named data<integer>_<target>, where <integer> is any sequence
-** of zero or more numeric characters (0-9). This can be significant because
-** tables within the RBU database are always processed in order sorted by
-** name. By judicious selection of the the <integer> portion of the names
-** of the RBU tables the user can therefore control the order in which they
-** are processed. This can be useful, for example, to ensure that "external
-** content" FTS4 tables are updated before their underlying content tables.
-**
-** If the target database table is a virtual table or a table that has no
-** PRIMARY KEY declaration, the data_% table must also contain a column
-** named "rbu_rowid". This column is mapped to the tables implicit primary
-** key column - "rowid". Virtual tables for which the "rowid" column does
-** not function like a primary key value cannot be updated using RBU. For
-** example, if the target db contains either of the following:
-**
-** CREATE VIRTUAL TABLE x1 USING fts3(a, b);
-** CREATE TABLE x1(a, b)
-**
-** then the RBU database should contain:
-**
-** CREATE TABLE data_x1(a, b, rbu_rowid, rbu_control);
-**
-** All non-hidden columns (i.e. all columns matched by "SELECT *") of the
-** target table must be present in the input table. For virtual tables,
-** hidden columns are optional - they are updated by RBU if present in
-** the input table, or not otherwise. For example, to write to an fts4
-** table with a hidden languageid column such as:
-**
-** CREATE VIRTUAL TABLE ft1 USING fts4(a, b, languageid='langid');
-**
-** Either of the following input table schemas may be used:
-**
-** CREATE TABLE data_ft1(a, b, langid, rbu_rowid, rbu_control);
-** CREATE TABLE data_ft1(a, b, rbu_rowid, rbu_control);
-**
-** For each row to INSERT into the target database as part of the RBU
-** update, the corresponding data_% table should contain a single record
-** with the "rbu_control" column set to contain integer value 0. The
-** other columns should be set to the values that make up the new record
-** to insert.
-**
-** If the target database table has an INTEGER PRIMARY KEY, it is not
-** possible to insert a NULL value into the IPK column. Attempting to
-** do so results in an SQLITE_MISMATCH error.
-**
-** For each row to DELETE from the target database as part of the RBU
-** update, the corresponding data_% table should contain a single record
-** with the "rbu_control" column set to contain integer value 1. The
-** real primary key values of the row to delete should be stored in the
-** corresponding columns of the data_% table. The values stored in the
-** other columns are not used.
-**
-** For each row to UPDATE from the target database as part of the RBU
-** update, the corresponding data_% table should contain a single record
-** with the "rbu_control" column set to contain a value of type text.
-** The real primary key values identifying the row to update should be
-** stored in the corresponding columns of the data_% table row, as should
-** the new values of all columns being update. The text value in the
-** "rbu_control" column must contain the same number of characters as
-** there are columns in the target database table, and must consist entirely
-** of 'x' and '.' characters (or in some special cases 'd' - see below). For
-** each column that is being updated, the corresponding character is set to
-** 'x'. For those that remain as they are, the corresponding character of the
-** rbu_control value should be set to '.'. For example, given the tables
-** above, the update statement:
-**
-** UPDATE t1 SET c = 'usa' WHERE a = 4;
-**
-** is represented by the data_t1 row created by:
-**
-** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..x');
-**
-** Instead of an 'x' character, characters of the rbu_control value specified
-** for UPDATEs may also be set to 'd'. In this case, instead of updating the
-** target table with the value stored in the corresponding data_% column, the
-** user-defined SQL function "rbu_delta()" is invoked and the result stored in
-** the target table column. rbu_delta() is invoked with two arguments - the
-** original value currently stored in the target table column and the
-** value specified in the data_xxx table.
-**
-** For example, this row:
-**
-** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..d');
-**
-** is similar to an UPDATE statement such as:
-**
-** UPDATE t1 SET c = rbu_delta(c, 'usa') WHERE a = 4;
-**
-** Finally, if an 'f' character appears in place of a 'd' or 's' in an
-** ota_control string, the contents of the data_xxx table column is assumed
-** to be a "fossil delta" - a patch to be applied to a blob value in the
-** format used by the fossil source-code management system. In this case
-** the existing value within the target database table must be of type BLOB.
-** It is replaced by the result of applying the specified fossil delta to
-** itself.
-**
-** If the target database table is a virtual table or a table with no PRIMARY
-** KEY, the rbu_control value should not include a character corresponding
-** to the rbu_rowid value. For example, this:
-**
-** INSERT INTO data_ft1(a, b, rbu_rowid, rbu_control)
-** VALUES(NULL, 'usa', 12, '.x');
-**
-** causes a result similar to:
-**
-** UPDATE ft1 SET b = 'usa' WHERE rowid = 12;
-**
-** The data_xxx tables themselves should have no PRIMARY KEY declarations.
-** However, RBU is more efficient if reading the rows in from each data_xxx
-** table in "rowid" order is roughly the same as reading them sorted by
-** the PRIMARY KEY of the corresponding target database table. In other
-** words, rows should be sorted using the destination table PRIMARY KEY
-** fields before they are inserted into the data_xxx tables.
-**
-** USAGE
-**
-** The API declared below allows an application to apply an RBU update
-** stored on disk to an existing target database. Essentially, the
-** application:
-**
-** 1) Opens an RBU handle using the sqlite3rbu_open() function.
-**
-** 2) Registers any required virtual table modules with the database
-** handle returned by sqlite3rbu_db(). Also, if required, register
-** the rbu_delta() implementation.
-**
-** 3) Calls the sqlite3rbu_step() function one or more times on
-** the new handle. Each call to sqlite3rbu_step() performs a single
-** b-tree operation, so thousands of calls may be required to apply
-** a complete update.
-**
-** 4) Calls sqlite3rbu_close() to close the RBU update handle. If
-** sqlite3rbu_step() has been called enough times to completely
-** apply the update to the target database, then the RBU database
-** is marked as fully applied. Otherwise, the state of the RBU
-** update application is saved in the RBU database for later
-** resumption.
-**
-** See comments below for more detail on APIs.
-**
-** If an update is only partially applied to the target database by the
-** time sqlite3rbu_close() is called, various state information is saved
-** within the RBU database. This allows subsequent processes to automatically
-** resume the RBU update from where it left off.
-**
-** To remove all RBU extension state information, returning an RBU database
-** to its original contents, it is sufficient to drop all tables that begin
-** with the prefix "rbu_"
-**
-** DATABASE LOCKING
-**
-** An RBU update may not be applied to a database in WAL mode. Attempting
-** to do so is an error (SQLITE_ERROR).
-**
-** While an RBU handle is open, a SHARED lock may be held on the target
-** database file. This means it is possible for other clients to read the
-** database, but not to write it.
-**
-** If an RBU update is started and then suspended before it is completed,
-** then an external client writes to the database, then attempting to resume
-** the suspended RBU update is also an error (SQLITE_BUSY).
-*/
-
-#ifndef _SQLITE3RBU_H
-#define _SQLITE3RBU_H
-
-/* #include "sqlite3.h" ** Required for error code definitions ** */
-
-#if 0
-extern "C" {
-#endif
-
-typedef struct sqlite3rbu sqlite3rbu;
-
-/*
-** Open an RBU handle.
-**
-** Argument zTarget is the path to the target database. Argument zRbu is
-** the path to the RBU database. Each call to this function must be matched
-** by a call to sqlite3rbu_close(). When opening the databases, RBU passes
-** the SQLITE_CONFIG_URI flag to sqlite3_open_v2(). So if either zTarget
-** or zRbu begin with "file:", it will be interpreted as an SQLite
-** database URI, not a regular file name.
-**
-** If the zState argument is passed a NULL value, the RBU extension stores
-** the current state of the update (how many rows have been updated, which
-** indexes are yet to be updated etc.) within the RBU database itself. This
-** can be convenient, as it means that the RBU application does not need to
-** organize removing a separate state file after the update is concluded.
-** Or, if zState is non-NULL, it must be a path to a database file in which
-** the RBU extension can store the state of the update.
-**
-** When resuming an RBU update, the zState argument must be passed the same
-** value as when the RBU update was started.
-**
-** Once the RBU update is finished, the RBU extension does not
-** automatically remove any zState database file, even if it created it.
-**
-** By default, RBU uses the default VFS to access the files on disk. To
-** use a VFS other than the default, an SQLite "file:" URI containing a
-** "vfs=..." option may be passed as the zTarget option.
-**
-** IMPORTANT NOTE FOR ZIPVFS USERS: The RBU extension works with all of
-** SQLite's built-in VFSs, including the multiplexor VFS. However it does
-** not work out of the box with zipvfs. Refer to the comment describing
-** the zipvfs_create_vfs() API below for details on using RBU with zipvfs.
-*/
-SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
- const char *zTarget,
- const char *zRbu,
- const char *zState
-);
-
-/*
-** Internally, each RBU connection uses a separate SQLite database
-** connection to access the target and rbu update databases. This
-** API allows the application direct access to these database handles.
-**
-** The first argument passed to this function must be a valid, open, RBU
-** handle. The second argument should be passed zero to access the target
-** database handle, or non-zero to access the rbu update database handle.
-** Accessing the underlying database handles may be useful in the
-** following scenarios:
-**
-** * If any target tables are virtual tables, it may be necessary to
-** call sqlite3_create_module() on the target database handle to
-** register the required virtual table implementations.
-**
-** * If the data_xxx tables in the RBU source database are virtual
-** tables, the application may need to call sqlite3_create_module() on
-** the rbu update db handle to any required virtual table
-** implementations.
-**
-** * If the application uses the "rbu_delta()" feature described above,
-** it must use sqlite3_create_function() or similar to register the
-** rbu_delta() implementation with the target database handle.
-**
-** If an error has occurred, either while opening or stepping the RBU object,
-** this function may return NULL. The error code and message may be collected
-** when sqlite3rbu_close() is called.
-*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu*, int bRbu);
-
-/*
-** Do some work towards applying the RBU update to the target db.
-**
-** Return SQLITE_DONE if the update has been completely applied, or
-** SQLITE_OK if no error occurs but there remains work to do to apply
-** the RBU update. If an error does occur, some other error code is
-** returned.
-**
-** Once a call to sqlite3rbu_step() has returned a value other than
-** SQLITE_OK, all subsequent calls on the same RBU handle are no-ops
-** that immediately return the same value.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *pRbu);
-
-/*
-** Force RBU to save its state to disk.
-**
-** If a power failure or application crash occurs during an update, following
-** system recovery RBU may resume the update from the point at which the state
-** was last saved. In other words, from the most recent successful call to
-** sqlite3rbu_close() or this function.
-**
-** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_savestate(sqlite3rbu *pRbu);
-
-/*
-** Close an RBU handle.
-**
-** If the RBU update has been completely applied, mark the RBU database
-** as fully applied. Otherwise, assuming no error has occurred, save the
-** current state of the RBU update appliation to the RBU database.
-**
-** If an error has already occurred as part of an sqlite3rbu_step()
-** or sqlite3rbu_open() call, or if one occurs within this function, an
-** SQLite error code is returned. Additionally, *pzErrmsg may be set to
-** point to a buffer containing a utf-8 formatted English language error
-** message. It is the responsibility of the caller to eventually free any
-** such buffer using sqlite3_free().
-**
-** Otherwise, if no error occurs, this function returns SQLITE_OK if the
-** update has been partially applied, or SQLITE_DONE if it has been
-** completely applied.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *pRbu, char **pzErrmsg);
-
-/*
-** Return the total number of key-value operations (inserts, deletes or
-** updates) that have been performed on the target database since the
-** current RBU update was started.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu);
-
-/*
-** Create an RBU VFS named zName that accesses the underlying file-system
-** via existing VFS zParent. Or, if the zParent parameter is passed NULL,
-** then the new RBU VFS uses the default system VFS to access the file-system.
-** The new object is registered as a non-default VFS with SQLite before
-** returning.
-**
-** Part of the RBU implementation uses a custom VFS object. Usually, this
-** object is created and deleted automatically by RBU.
-**
-** The exception is for applications that also use zipvfs. In this case,
-** the custom VFS must be explicitly created by the user before the RBU
-** handle is opened. The RBU VFS should be installed so that the zipvfs
-** VFS uses the RBU VFS, which in turn uses any other VFS layers in use
-** (for example multiplexor) to access the file-system. For example,
-** to assemble an RBU enabled VFS stack that uses both zipvfs and
-** multiplexor (error checking omitted):
-**
-** // Create a VFS named "multiplex" (not the default).
-** sqlite3_multiplex_initialize(0, 0);
-**
-** // Create an rbu VFS named "rbu" that uses multiplexor. If the
-** // second argument were replaced with NULL, the "rbu" VFS would
-** // access the file-system via the system default VFS, bypassing the
-** // multiplexor.
-** sqlite3rbu_create_vfs("rbu", "multiplex");
-**
-** // Create a zipvfs VFS named "zipvfs" that uses rbu.
-** zipvfs_create_vfs_v3("zipvfs", "rbu", 0, xCompressorAlgorithmDetector);
-**
-** // Make zipvfs the default VFS.
-** sqlite3_vfs_register(sqlite3_vfs_find("zipvfs"), 1);
-**
-** Because the default VFS created above includes a RBU functionality, it
-** may be used by RBU clients. Attempting to use RBU with a zipvfs VFS stack
-** that does not include the RBU layer results in an error.
-**
-** The overhead of adding the "rbu" VFS to the system is negligible for
-** non-RBU users. There is no harm in an application accessing the
-** file-system via "rbu" all the time, even if it only uses RBU functionality
-** occasionally.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const char *zParent);
-
-/*
-** Deregister and destroy an RBU vfs created by an earlier call to
-** sqlite3rbu_create_vfs().
-**
-** VFS objects are not reference counted. If a VFS object is destroyed
-** before all database handles that use it have been closed, the results
-** are undefined.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3rbu_destroy_vfs(const char *zName);
-
-#if 0
-} /* end of the 'extern "C"' block */
-#endif
-
-#endif /* _SQLITE3RBU_H */
-
-/************** End of sqlite3rbu.h ******************************************/
-/************** Continuing where we left off in sqlite3rbu.c *****************/
-
-#if defined(_WIN32_WCE)
-/* #include "windows.h" */
-#endif
-
-/* Maximum number of prepared UPDATE statements held by this module */
-#define SQLITE_RBU_UPDATE_CACHESIZE 16
-
-/*
-** Swap two objects of type TYPE.
-*/
-#if !defined(SQLITE_AMALGAMATION)
-# define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;}
-#endif
-
-/*
-** The rbu_state table is used to save the state of a partially applied
-** update so that it can be resumed later. The table consists of integer
-** keys mapped to values as follows:
-**
-** RBU_STATE_STAGE:
-** May be set to integer values 1, 2, 4 or 5. As follows:
-** 1: the *-rbu file is currently under construction.
-** 2: the *-rbu file has been constructed, but not yet moved
-** to the *-wal path.
-** 4: the checkpoint is underway.
-** 5: the rbu update has been checkpointed.
-**
-** RBU_STATE_TBL:
-** Only valid if STAGE==1. The target database name of the table
-** currently being written.
-**
-** RBU_STATE_IDX:
-** Only valid if STAGE==1. The target database name of the index
-** currently being written, or NULL if the main table is currently being
-** updated.
-**
-** RBU_STATE_ROW:
-** Only valid if STAGE==1. Number of rows already processed for the current
-** table/index.
-**
-** RBU_STATE_PROGRESS:
-** Trbul number of sqlite3rbu_step() calls made so far as part of this
-** rbu update.
-**
-** RBU_STATE_CKPT:
-** Valid if STAGE==4. The 64-bit checksum associated with the wal-index
-** header created by recovering the *-wal file. This is used to detect
-** cases when another client appends frames to the *-wal file in the
-** middle of an incremental checkpoint (an incremental checkpoint cannot
-** be continued if this happens).
-**
-** RBU_STATE_COOKIE:
-** Valid if STAGE==1. The current change-counter cookie value in the
-** target db file.
-**
-** RBU_STATE_OALSZ:
-** Valid if STAGE==1. The size in bytes of the *-oal file.
-*/
-#define RBU_STATE_STAGE 1
-#define RBU_STATE_TBL 2
-#define RBU_STATE_IDX 3
-#define RBU_STATE_ROW 4
-#define RBU_STATE_PROGRESS 5
-#define RBU_STATE_CKPT 6
-#define RBU_STATE_COOKIE 7
-#define RBU_STATE_OALSZ 8
-
-#define RBU_STAGE_OAL 1
-#define RBU_STAGE_MOVE 2
-#define RBU_STAGE_CAPTURE 3
-#define RBU_STAGE_CKPT 4
-#define RBU_STAGE_DONE 5
-
-
-#define RBU_CREATE_STATE \
- "CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)"
-
-typedef struct RbuFrame RbuFrame;
-typedef struct RbuObjIter RbuObjIter;
-typedef struct RbuState RbuState;
-typedef struct rbu_vfs rbu_vfs;
-typedef struct rbu_file rbu_file;
-typedef struct RbuUpdateStmt RbuUpdateStmt;
-
-#if !defined(SQLITE_AMALGAMATION)
-typedef unsigned int u32;
-typedef unsigned char u8;
-typedef sqlite3_int64 i64;
-#endif
-
-/*
-** These values must match the values defined in wal.c for the equivalent
-** locks. These are not magic numbers as they are part of the SQLite file
-** format.
-*/
-#define WAL_LOCK_WRITE 0
-#define WAL_LOCK_CKPT 1
-#define WAL_LOCK_READ0 3
-
-/*
-** A structure to store values read from the rbu_state table in memory.
-*/
-struct RbuState {
- int eStage;
- char *zTbl;
- char *zIdx;
- i64 iWalCksum;
- int nRow;
- i64 nProgress;
- u32 iCookie;
- i64 iOalSz;
-};
-
-struct RbuUpdateStmt {
- char *zMask; /* Copy of update mask used with pUpdate */
- sqlite3_stmt *pUpdate; /* Last update statement (or NULL) */
- RbuUpdateStmt *pNext;
-};
-
-/*
-** An iterator of this type is used to iterate through all objects in
-** the target database that require updating. For each such table, the
-** iterator visits, in order:
-**
-** * the table itself,
-** * each index of the table (zero or more points to visit), and
-** * a special "cleanup table" state.
-**
-** abIndexed:
-** If the table has no indexes on it, abIndexed is set to NULL. Otherwise,
-** it points to an array of flags nTblCol elements in size. The flag is
-** set for each column that is either a part of the PK or a part of an
-** index. Or clear otherwise.
-**
-*/
-struct RbuObjIter {
- sqlite3_stmt *pTblIter; /* Iterate through tables */
- sqlite3_stmt *pIdxIter; /* Index iterator */
- int nTblCol; /* Size of azTblCol[] array */
- char **azTblCol; /* Array of unquoted target column names */
- char **azTblType; /* Array of target column types */
- int *aiSrcOrder; /* src table col -> target table col */
- u8 *abTblPk; /* Array of flags, set on target PK columns */
- u8 *abNotNull; /* Array of flags, set on NOT NULL columns */
- u8 *abIndexed; /* Array of flags, set on indexed & PK cols */
- int eType; /* Table type - an RBU_PK_XXX value */
-
- /* Output variables. zTbl==0 implies EOF. */
- int bCleanup; /* True in "cleanup" state */
- const char *zTbl; /* Name of target db table */
- const char *zDataTbl; /* Name of rbu db table (or null) */
- const char *zIdx; /* Name of target db index (or null) */
- int iTnum; /* Root page of current object */
- int iPkTnum; /* If eType==EXTERNAL, root of PK index */
- int bUnique; /* Current index is unique */
-
- /* Statements created by rbuObjIterPrepareAll() */
- int nCol; /* Number of columns in current object */
- sqlite3_stmt *pSelect; /* Source data */
- sqlite3_stmt *pInsert; /* Statement for INSERT operations */
- sqlite3_stmt *pDelete; /* Statement for DELETE ops */
- sqlite3_stmt *pTmpInsert; /* Insert into rbu_tmp_$zDataTbl */
-
- /* Last UPDATE used (for PK b-tree updates only), or NULL. */
- RbuUpdateStmt *pRbuUpdate;
-};
-
-/*
-** Values for RbuObjIter.eType
-**
-** 0: Table does not exist (error)
-** 1: Table has an implicit rowid.
-** 2: Table has an explicit IPK column.
-** 3: Table has an external PK index.
-** 4: Table is WITHOUT ROWID.
-** 5: Table is a virtual table.
-*/
-#define RBU_PK_NOTABLE 0
-#define RBU_PK_NONE 1
-#define RBU_PK_IPK 2
-#define RBU_PK_EXTERNAL 3
-#define RBU_PK_WITHOUT_ROWID 4
-#define RBU_PK_VTAB 5
-
-
-/*
-** Within the RBU_STAGE_OAL stage, each call to sqlite3rbu_step() performs
-** one of the following operations.
-*/
-#define RBU_INSERT 1 /* Insert on a main table b-tree */
-#define RBU_DELETE 2 /* Delete a row from a main table b-tree */
-#define RBU_IDX_DELETE 3 /* Delete a row from an aux. index b-tree */
-#define RBU_IDX_INSERT 4 /* Insert on an aux. index b-tree */
-#define RBU_UPDATE 5 /* Update a row in a main table b-tree */
-
-
-/*
-** A single step of an incremental checkpoint - frame iWalFrame of the wal
-** file should be copied to page iDbPage of the database file.
-*/
-struct RbuFrame {
- u32 iDbPage;
- u32 iWalFrame;
-};
-
-/*
-** RBU handle.
-*/
-struct sqlite3rbu {
- int eStage; /* Value of RBU_STATE_STAGE field */
- sqlite3 *dbMain; /* target database handle */
- sqlite3 *dbRbu; /* rbu database handle */
- char *zTarget; /* Path to target db */
- char *zRbu; /* Path to rbu db */
- char *zState; /* Path to state db (or NULL if zRbu) */
- char zStateDb[5]; /* Db name for state ("stat" or "main") */
- int rc; /* Value returned by last rbu_step() call */
- char *zErrmsg; /* Error message if rc!=SQLITE_OK */
- int nStep; /* Rows processed for current object */
- int nProgress; /* Rows processed for all objects */
- RbuObjIter objiter; /* Iterator for skipping through tbl/idx */
- const char *zVfsName; /* Name of automatically created rbu vfs */
- rbu_file *pTargetFd; /* File handle open on target db */
- i64 iOalSz;
-
- /* The following state variables are used as part of the incremental
- ** checkpoint stage (eStage==RBU_STAGE_CKPT). See comments surrounding
- ** function rbuSetupCheckpoint() for details. */
- u32 iMaxFrame; /* Largest iWalFrame value in aFrame[] */
- u32 mLock;
- int nFrame; /* Entries in aFrame[] array */
- int nFrameAlloc; /* Allocated size of aFrame[] array */
- RbuFrame *aFrame;
- int pgsz;
- u8 *aBuf;
- i64 iWalCksum;
-};
-
-/*
-** An rbu VFS is implemented using an instance of this structure.
-*/
-struct rbu_vfs {
- sqlite3_vfs base; /* rbu VFS shim methods */
- sqlite3_vfs *pRealVfs; /* Underlying VFS */
- sqlite3_mutex *mutex; /* Mutex to protect pMain */
- rbu_file *pMain; /* Linked list of main db files */
-};
-
-/*
-** Each file opened by an rbu VFS is represented by an instance of
-** the following structure.
-*/
-struct rbu_file {
- sqlite3_file base; /* sqlite3_file methods */
- sqlite3_file *pReal; /* Underlying file handle */
- rbu_vfs *pRbuVfs; /* Pointer to the rbu_vfs object */
- sqlite3rbu *pRbu; /* Pointer to rbu object (rbu target only) */
-
- int openFlags; /* Flags this file was opened with */
- u32 iCookie; /* Cookie value for main db files */
- u8 iWriteVer; /* "write-version" value for main db files */
-
- int nShm; /* Number of entries in apShm[] array */
- char **apShm; /* Array of mmap'd *-shm regions */
- char *zDel; /* Delete this when closing file */
-
- const char *zWal; /* Wal filename for this main db file */
- rbu_file *pWalFd; /* Wal file descriptor for this main db */
- rbu_file *pMainNext; /* Next MAIN_DB file */
-};
-
-
-/*************************************************************************
-** The following three functions, found below:
-**
-** rbuDeltaGetInt()
-** rbuDeltaChecksum()
-** rbuDeltaApply()
-**
-** are lifted from the fossil source code (http://fossil-scm.org). They
-** are used to implement the scalar SQL function rbu_fossil_delta().
-*/
-
-/*
-** Read bytes from *pz and convert them into a positive integer. When
-** finished, leave *pz pointing to the first character past the end of
-** the integer. The *pLen parameter holds the length of the string
-** in *pz and is decremented once for each character in the integer.
-*/
-static unsigned int rbuDeltaGetInt(const char **pz, int *pLen){
- static const signed char zValue[] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, 36,
- -1, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, -1, -1, -1, 63, -1,
- };
- unsigned int v = 0;
- int c;
- unsigned char *z = (unsigned char*)*pz;
- unsigned char *zStart = z;
- while( (c = zValue[0x7f&*(z++)])>=0 ){
- v = (v<<6) + c;
- }
- z--;
- *pLen -= z - zStart;
- *pz = (char*)z;
- return v;
-}
-
-/*
-** Compute a 32-bit checksum on the N-byte buffer. Return the result.
-*/
-static unsigned int rbuDeltaChecksum(const char *zIn, size_t N){
- const unsigned char *z = (const unsigned char *)zIn;
- unsigned sum0 = 0;
- unsigned sum1 = 0;
- unsigned sum2 = 0;
- unsigned sum3 = 0;
- while(N >= 16){
- sum0 += ((unsigned)z[0] + z[4] + z[8] + z[12]);
- sum1 += ((unsigned)z[1] + z[5] + z[9] + z[13]);
- sum2 += ((unsigned)z[2] + z[6] + z[10]+ z[14]);
- sum3 += ((unsigned)z[3] + z[7] + z[11]+ z[15]);
- z += 16;
- N -= 16;
- }
- while(N >= 4){
- sum0 += z[0];
- sum1 += z[1];
- sum2 += z[2];
- sum3 += z[3];
- z += 4;
- N -= 4;
- }
- sum3 += (sum2 << 8) + (sum1 << 16) + (sum0 << 24);
- switch(N){
- case 3: sum3 += (z[2] << 8);
- case 2: sum3 += (z[1] << 16);
- case 1: sum3 += (z[0] << 24);
- default: ;
- }
- return sum3;
-}
-
-/*
-** Apply a delta.
-**
-** The output buffer should be big enough to hold the whole output
-** file and a NUL terminator at the end. The delta_output_size()
-** routine will determine this size for you.
-**
-** The delta string should be null-terminated. But the delta string
-** may contain embedded NUL characters (if the input and output are
-** binary files) so we also have to pass in the length of the delta in
-** the lenDelta parameter.
-**
-** This function returns the size of the output file in bytes (excluding
-** the final NUL terminator character). Except, if the delta string is
-** malformed or intended for use with a source file other than zSrc,
-** then this routine returns -1.
-**
-** Refer to the delta_create() documentation above for a description
-** of the delta file format.
-*/
-static int rbuDeltaApply(
- const char *zSrc, /* The source or pattern file */
- int lenSrc, /* Length of the source file */
- const char *zDelta, /* Delta to apply to the pattern */
- int lenDelta, /* Length of the delta */
- char *zOut /* Write the output into this preallocated buffer */
-){
- unsigned int limit;
- unsigned int total = 0;
-#ifndef FOSSIL_OMIT_DELTA_CKSUM_TEST
- char *zOrigOut = zOut;
-#endif
-
- limit = rbuDeltaGetInt(&zDelta, &lenDelta);
- if( *zDelta!='\n' ){
- /* ERROR: size integer not terminated by "\n" */
- return -1;
- }
- zDelta++; lenDelta--;
- while( *zDelta && lenDelta>0 ){
- unsigned int cnt, ofst;
- cnt = rbuDeltaGetInt(&zDelta, &lenDelta);
- switch( zDelta[0] ){
- case '@': {
- zDelta++; lenDelta--;
- ofst = rbuDeltaGetInt(&zDelta, &lenDelta);
- if( lenDelta>0 && zDelta[0]!=',' ){
- /* ERROR: copy command not terminated by ',' */
- return -1;
- }
- zDelta++; lenDelta--;
- total += cnt;
- if( total>limit ){
- /* ERROR: copy exceeds output file size */
- return -1;
- }
- if( (int)(ofst+cnt) > lenSrc ){
- /* ERROR: copy extends past end of input */
- return -1;
- }
- memcpy(zOut, &zSrc[ofst], cnt);
- zOut += cnt;
- break;
- }
- case ':': {
- zDelta++; lenDelta--;
- total += cnt;
- if( total>limit ){
- /* ERROR: insert command gives an output larger than predicted */
- return -1;
- }
- if( (int)cnt>lenDelta ){
- /* ERROR: insert count exceeds size of delta */
- return -1;
- }
- memcpy(zOut, zDelta, cnt);
- zOut += cnt;
- zDelta += cnt;
- lenDelta -= cnt;
- break;
- }
- case ';': {
- zDelta++; lenDelta--;
- zOut[0] = 0;
-#ifndef FOSSIL_OMIT_DELTA_CKSUM_TEST
- if( cnt!=rbuDeltaChecksum(zOrigOut, total) ){
- /* ERROR: bad checksum */
- return -1;
- }
-#endif
- if( total!=limit ){
- /* ERROR: generated size does not match predicted size */
- return -1;
- }
- return total;
- }
- default: {
- /* ERROR: unknown delta operator */
- return -1;
- }
- }
- }
- /* ERROR: unterminated delta */
- return -1;
-}
-
-static int rbuDeltaOutputSize(const char *zDelta, int lenDelta){
- int size;
- size = rbuDeltaGetInt(&zDelta, &lenDelta);
- if( *zDelta!='\n' ){
- /* ERROR: size integer not terminated by "\n" */
- return -1;
- }
- return size;
-}
-
-/*
-** End of code taken from fossil.
-*************************************************************************/
-
-/*
-** Implementation of SQL scalar function rbu_fossil_delta().
-**
-** This function applies a fossil delta patch to a blob. Exactly two
-** arguments must be passed to this function. The first is the blob to
-** patch and the second the patch to apply. If no error occurs, this
-** function returns the patched blob.
-*/
-static void rbuFossilDeltaFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- const char *aDelta;
- int nDelta;
- const char *aOrig;
- int nOrig;
-
- int nOut;
- int nOut2;
- char *aOut;
-
- assert( argc==2 );
-
- nOrig = sqlite3_value_bytes(argv[0]);
- aOrig = (const char*)sqlite3_value_blob(argv[0]);
- nDelta = sqlite3_value_bytes(argv[1]);
- aDelta = (const char*)sqlite3_value_blob(argv[1]);
-
- /* Figure out the size of the output */
- nOut = rbuDeltaOutputSize(aDelta, nDelta);
- if( nOut<0 ){
- sqlite3_result_error(context, "corrupt fossil delta", -1);
- return;
- }
-
- aOut = sqlite3_malloc(nOut+1);
- if( aOut==0 ){
- sqlite3_result_error_nomem(context);
- }else{
- nOut2 = rbuDeltaApply(aOrig, nOrig, aDelta, nDelta, aOut);
- if( nOut2!=nOut ){
- sqlite3_result_error(context, "corrupt fossil delta", -1);
- }else{
- sqlite3_result_blob(context, aOut, nOut, sqlite3_free);
- }
- }
-}
-
-
-/*
-** Prepare the SQL statement in buffer zSql against database handle db.
-** If successful, set *ppStmt to point to the new statement and return
-** SQLITE_OK.
-**
-** Otherwise, if an error does occur, set *ppStmt to NULL and return
-** an SQLite error code. Additionally, set output variable *pzErrmsg to
-** point to a buffer containing an error message. It is the responsibility
-** of the caller to (eventually) free this buffer using sqlite3_free().
-*/
-static int prepareAndCollectError(
- sqlite3 *db,
- sqlite3_stmt **ppStmt,
- char **pzErrmsg,
- const char *zSql
-){
- int rc = sqlite3_prepare_v2(db, zSql, -1, ppStmt, 0);
- if( rc!=SQLITE_OK ){
- *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db));
- *ppStmt = 0;
- }
- return rc;
-}
-
-/*
-** Reset the SQL statement passed as the first argument. Return a copy
-** of the value returned by sqlite3_reset().
-**
-** If an error has occurred, then set *pzErrmsg to point to a buffer
-** containing an error message. It is the responsibility of the caller
-** to eventually free this buffer using sqlite3_free().
-*/
-static int resetAndCollectError(sqlite3_stmt *pStmt, char **pzErrmsg){
- int rc = sqlite3_reset(pStmt);
- if( rc!=SQLITE_OK ){
- *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(sqlite3_db_handle(pStmt)));
- }
- return rc;
-}
-
-/*
-** Unless it is NULL, argument zSql points to a buffer allocated using
-** sqlite3_malloc containing an SQL statement. This function prepares the SQL
-** statement against database db and frees the buffer. If statement
-** compilation is successful, *ppStmt is set to point to the new statement
-** handle and SQLITE_OK is returned.
-**
-** Otherwise, if an error occurs, *ppStmt is set to NULL and an error code
-** returned. In this case, *pzErrmsg may also be set to point to an error
-** message. It is the responsibility of the caller to free this error message
-** buffer using sqlite3_free().
-**
-** If argument zSql is NULL, this function assumes that an OOM has occurred.
-** In this case SQLITE_NOMEM is returned and *ppStmt set to NULL.
-*/
-static int prepareFreeAndCollectError(
- sqlite3 *db,
- sqlite3_stmt **ppStmt,
- char **pzErrmsg,
- char *zSql
-){
- int rc;
- assert( *pzErrmsg==0 );
- if( zSql==0 ){
- rc = SQLITE_NOMEM;
- *ppStmt = 0;
- }else{
- rc = prepareAndCollectError(db, ppStmt, pzErrmsg, zSql);
- sqlite3_free(zSql);
- }
- return rc;
-}
-
-/*
-** Free the RbuObjIter.azTblCol[] and RbuObjIter.abTblPk[] arrays allocated
-** by an earlier call to rbuObjIterCacheTableInfo().
-*/
-static void rbuObjIterFreeCols(RbuObjIter *pIter){
- int i;
- for(i=0; i<pIter->nTblCol; i++){
- sqlite3_free(pIter->azTblCol[i]);
- sqlite3_free(pIter->azTblType[i]);
- }
- sqlite3_free(pIter->azTblCol);
- pIter->azTblCol = 0;
- pIter->azTblType = 0;
- pIter->aiSrcOrder = 0;
- pIter->abTblPk = 0;
- pIter->abNotNull = 0;
- pIter->nTblCol = 0;
- pIter->eType = 0; /* Invalid value */
-}
-
-/*
-** Finalize all statements and free all allocations that are specific to
-** the current object (table/index pair).
-*/
-static void rbuObjIterClearStatements(RbuObjIter *pIter){
- RbuUpdateStmt *pUp;
-
- sqlite3_finalize(pIter->pSelect);
- sqlite3_finalize(pIter->pInsert);
- sqlite3_finalize(pIter->pDelete);
- sqlite3_finalize(pIter->pTmpInsert);
- pUp = pIter->pRbuUpdate;
- while( pUp ){
- RbuUpdateStmt *pTmp = pUp->pNext;
- sqlite3_finalize(pUp->pUpdate);
- sqlite3_free(pUp);
- pUp = pTmp;
- }
-
- pIter->pSelect = 0;
- pIter->pInsert = 0;
- pIter->pDelete = 0;
- pIter->pRbuUpdate = 0;
- pIter->pTmpInsert = 0;
- pIter->nCol = 0;
-}
-
-/*
-** Clean up any resources allocated as part of the iterator object passed
-** as the only argument.
-*/
-static void rbuObjIterFinalize(RbuObjIter *pIter){
- rbuObjIterClearStatements(pIter);
- sqlite3_finalize(pIter->pTblIter);
- sqlite3_finalize(pIter->pIdxIter);
- rbuObjIterFreeCols(pIter);
- memset(pIter, 0, sizeof(RbuObjIter));
-}
-
-/*
-** Advance the iterator to the next position.
-**
-** If no error occurs, SQLITE_OK is returned and the iterator is left
-** pointing to the next entry. Otherwise, an error code and message is
-** left in the RBU handle passed as the first argument. A copy of the
-** error code is returned.
-*/
-static int rbuObjIterNext(sqlite3rbu *p, RbuObjIter *pIter){
- int rc = p->rc;
- if( rc==SQLITE_OK ){
-
- /* Free any SQLite statements used while processing the previous object */
- rbuObjIterClearStatements(pIter);
- if( pIter->zIdx==0 ){
- rc = sqlite3_exec(p->dbMain,
- "DROP TRIGGER IF EXISTS temp.rbu_insert_tr;"
- "DROP TRIGGER IF EXISTS temp.rbu_update1_tr;"
- "DROP TRIGGER IF EXISTS temp.rbu_update2_tr;"
- "DROP TRIGGER IF EXISTS temp.rbu_delete_tr;"
- , 0, 0, &p->zErrmsg
- );
- }
-
- if( rc==SQLITE_OK ){
- if( pIter->bCleanup ){
- rbuObjIterFreeCols(pIter);
- pIter->bCleanup = 0;
- rc = sqlite3_step(pIter->pTblIter);
- if( rc!=SQLITE_ROW ){
- rc = resetAndCollectError(pIter->pTblIter, &p->zErrmsg);
- pIter->zTbl = 0;
- }else{
- pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0);
- pIter->zDataTbl = (const char*)sqlite3_column_text(pIter->pTblIter,1);
- rc = (pIter->zDataTbl && pIter->zTbl) ? SQLITE_OK : SQLITE_NOMEM;
- }
- }else{
- if( pIter->zIdx==0 ){
- sqlite3_stmt *pIdx = pIter->pIdxIter;
- rc = sqlite3_bind_text(pIdx, 1, pIter->zTbl, -1, SQLITE_STATIC);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3_step(pIter->pIdxIter);
- if( rc!=SQLITE_ROW ){
- rc = resetAndCollectError(pIter->pIdxIter, &p->zErrmsg);
- pIter->bCleanup = 1;
- pIter->zIdx = 0;
- }else{
- pIter->zIdx = (const char*)sqlite3_column_text(pIter->pIdxIter, 0);
- pIter->iTnum = sqlite3_column_int(pIter->pIdxIter, 1);
- pIter->bUnique = sqlite3_column_int(pIter->pIdxIter, 2);
- rc = pIter->zIdx ? SQLITE_OK : SQLITE_NOMEM;
- }
- }
- }
- }
- }
-
- if( rc!=SQLITE_OK ){
- rbuObjIterFinalize(pIter);
- p->rc = rc;
- }
- return rc;
-}
-
-
-/*
-** The implementation of the rbu_target_name() SQL function. This function
-** accepts one argument - the name of a table in the RBU database. If the
-** table name matches the pattern:
-**
-** data[0-9]_<name>
-**
-** where <name> is any sequence of 1 or more characters, <name> is returned.
-** Otherwise, if the only argument does not match the above pattern, an SQL
-** NULL is returned.
-**
-** "data_t1" -> "t1"
-** "data0123_t2" -> "t2"
-** "dataAB_t3" -> NULL
-*/
-static void rbuTargetNameFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- const char *zIn;
- assert( argc==1 );
-
- zIn = (const char*)sqlite3_value_text(argv[0]);
- if( zIn && strlen(zIn)>4 && memcmp("data", zIn, 4)==0 ){
- int i;
- for(i=4; zIn[i]>='0' && zIn[i]<='9'; i++);
- if( zIn[i]=='_' && zIn[i+1] ){
- sqlite3_result_text(context, &zIn[i+1], -1, SQLITE_STATIC);
- }
- }
-}
-
-/*
-** Initialize the iterator structure passed as the second argument.
-**
-** If no error occurs, SQLITE_OK is returned and the iterator is left
-** pointing to the first entry. Otherwise, an error code and message is
-** left in the RBU handle passed as the first argument. A copy of the
-** error code is returned.
-*/
-static int rbuObjIterFirst(sqlite3rbu *p, RbuObjIter *pIter){
- int rc;
- memset(pIter, 0, sizeof(RbuObjIter));
-
- rc = prepareAndCollectError(p->dbRbu, &pIter->pTblIter, &p->zErrmsg,
- "SELECT rbu_target_name(name) AS target, name FROM sqlite_master "
- "WHERE type IN ('table', 'view') AND target IS NOT NULL "
- "ORDER BY name"
- );
-
- if( rc==SQLITE_OK ){
- rc = prepareAndCollectError(p->dbMain, &pIter->pIdxIter, &p->zErrmsg,
- "SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' "
- " FROM main.sqlite_master "
- " WHERE type='index' AND tbl_name = ?"
- );
- }
-
- pIter->bCleanup = 1;
- p->rc = rc;
- return rbuObjIterNext(p, pIter);
-}
-
-/*
-** This is a wrapper around "sqlite3_mprintf(zFmt, ...)". If an OOM occurs,
-** an error code is stored in the RBU handle passed as the first argument.
-**
-** If an error has already occurred (p->rc is already set to something other
-** than SQLITE_OK), then this function returns NULL without modifying the
-** stored error code. In this case it still calls sqlite3_free() on any
-** printf() parameters associated with %z conversions.
-*/
-static char *rbuMPrintf(sqlite3rbu *p, const char *zFmt, ...){
- char *zSql = 0;
- va_list ap;
- va_start(ap, zFmt);
- zSql = sqlite3_vmprintf(zFmt, ap);
- if( p->rc==SQLITE_OK ){
- if( zSql==0 ) p->rc = SQLITE_NOMEM;
- }else{
- sqlite3_free(zSql);
- zSql = 0;
- }
- va_end(ap);
- return zSql;
-}
-
-/*
-** Argument zFmt is a sqlite3_mprintf() style format string. The trailing
-** arguments are the usual subsitution values. This function performs
-** the printf() style substitutions and executes the result as an SQL
-** statement on the RBU handles database.
-**
-** If an error occurs, an error code and error message is stored in the
-** RBU handle. If an error has already occurred when this function is
-** called, it is a no-op.
-*/
-static int rbuMPrintfExec(sqlite3rbu *p, sqlite3 *db, const char *zFmt, ...){
- va_list ap;
- char *zSql;
- va_start(ap, zFmt);
- zSql = sqlite3_vmprintf(zFmt, ap);
- if( p->rc==SQLITE_OK ){
- if( zSql==0 ){
- p->rc = SQLITE_NOMEM;
- }else{
- p->rc = sqlite3_exec(db, zSql, 0, 0, &p->zErrmsg);
- }
- }
- sqlite3_free(zSql);
- va_end(ap);
- return p->rc;
-}
-
-/*
-** Attempt to allocate and return a pointer to a zeroed block of nByte
-** bytes.
-**
-** If an error (i.e. an OOM condition) occurs, return NULL and leave an
-** error code in the rbu handle passed as the first argument. Or, if an
-** error has already occurred when this function is called, return NULL
-** immediately without attempting the allocation or modifying the stored
-** error code.
-*/
-static void *rbuMalloc(sqlite3rbu *p, int nByte){
- void *pRet = 0;
- if( p->rc==SQLITE_OK ){
- assert( nByte>0 );
- pRet = sqlite3_malloc(nByte);
- if( pRet==0 ){
- p->rc = SQLITE_NOMEM;
- }else{
- memset(pRet, 0, nByte);
- }
- }
- return pRet;
-}
-
-
-/*
-** Allocate and zero the pIter->azTblCol[] and abTblPk[] arrays so that
-** there is room for at least nCol elements. If an OOM occurs, store an
-** error code in the RBU handle passed as the first argument.
-*/
-static void rbuAllocateIterArrays(sqlite3rbu *p, RbuObjIter *pIter, int nCol){
- int nByte = (2*sizeof(char*) + sizeof(int) + 3*sizeof(u8)) * nCol;
- char **azNew;
-
- azNew = (char**)rbuMalloc(p, nByte);
- if( azNew ){
- pIter->azTblCol = azNew;
- pIter->azTblType = &azNew[nCol];
- pIter->aiSrcOrder = (int*)&pIter->azTblType[nCol];
- pIter->abTblPk = (u8*)&pIter->aiSrcOrder[nCol];
- pIter->abNotNull = (u8*)&pIter->abTblPk[nCol];
- pIter->abIndexed = (u8*)&pIter->abNotNull[nCol];
- }
-}
-
-/*
-** The first argument must be a nul-terminated string. This function
-** returns a copy of the string in memory obtained from sqlite3_malloc().
-** It is the responsibility of the caller to eventually free this memory
-** using sqlite3_free().
-**
-** If an OOM condition is encountered when attempting to allocate memory,
-** output variable (*pRc) is set to SQLITE_NOMEM before returning. Otherwise,
-** if the allocation succeeds, (*pRc) is left unchanged.
-*/
-static char *rbuStrndup(const char *zStr, int *pRc){
- char *zRet = 0;
-
- assert( *pRc==SQLITE_OK );
- if( zStr ){
- int nCopy = strlen(zStr) + 1;
- zRet = (char*)sqlite3_malloc(nCopy);
- if( zRet ){
- memcpy(zRet, zStr, nCopy);
- }else{
- *pRc = SQLITE_NOMEM;
- }
- }
-
- return zRet;
-}
-
-/*
-** Finalize the statement passed as the second argument.
-**
-** If the sqlite3_finalize() call indicates that an error occurs, and the
-** rbu handle error code is not already set, set the error code and error
-** message accordingly.
-*/
-static void rbuFinalize(sqlite3rbu *p, sqlite3_stmt *pStmt){
- sqlite3 *db = sqlite3_db_handle(pStmt);
- int rc = sqlite3_finalize(pStmt);
- if( p->rc==SQLITE_OK && rc!=SQLITE_OK ){
- p->rc = rc;
- p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db));
- }
-}
-
-/* Determine the type of a table.
-**
-** peType is of type (int*), a pointer to an output parameter of type
-** (int). This call sets the output parameter as follows, depending
-** on the type of the table specified by parameters dbName and zTbl.
-**
-** RBU_PK_NOTABLE: No such table.
-** RBU_PK_NONE: Table has an implicit rowid.
-** RBU_PK_IPK: Table has an explicit IPK column.
-** RBU_PK_EXTERNAL: Table has an external PK index.
-** RBU_PK_WITHOUT_ROWID: Table is WITHOUT ROWID.
-** RBU_PK_VTAB: Table is a virtual table.
-**
-** Argument *piPk is also of type (int*), and also points to an output
-** parameter. Unless the table has an external primary key index
-** (i.e. unless *peType is set to 3), then *piPk is set to zero. Or,
-** if the table does have an external primary key index, then *piPk
-** is set to the root page number of the primary key index before
-** returning.
-**
-** ALGORITHM:
-**
-** if( no entry exists in sqlite_master ){
-** return RBU_PK_NOTABLE
-** }else if( sql for the entry starts with "CREATE VIRTUAL" ){
-** return RBU_PK_VTAB
-** }else if( "PRAGMA index_list()" for the table contains a "pk" index ){
-** if( the index that is the pk exists in sqlite_master ){
-** *piPK = rootpage of that index.
-** return RBU_PK_EXTERNAL
-** }else{
-** return RBU_PK_WITHOUT_ROWID
-** }
-** }else if( "PRAGMA table_info()" lists one or more "pk" columns ){
-** return RBU_PK_IPK
-** }else{
-** return RBU_PK_NONE
-** }
-*/
-static void rbuTableType(
- sqlite3rbu *p,
- const char *zTab,
- int *peType,
- int *piTnum,
- int *piPk
-){
- /*
- ** 0) SELECT count(*) FROM sqlite_master where name=%Q AND IsVirtual(%Q)
- ** 1) PRAGMA index_list = ?
- ** 2) SELECT count(*) FROM sqlite_master where name=%Q
- ** 3) PRAGMA table_info = ?
- */
- sqlite3_stmt *aStmt[4] = {0, 0, 0, 0};
-
- *peType = RBU_PK_NOTABLE;
- *piPk = 0;
-
- assert( p->rc==SQLITE_OK );
- p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[0], &p->zErrmsg,
- sqlite3_mprintf(
- "SELECT (sql LIKE 'create virtual%%'), rootpage"
- " FROM sqlite_master"
- " WHERE name=%Q", zTab
- ));
- if( p->rc!=SQLITE_OK || sqlite3_step(aStmt[0])!=SQLITE_ROW ){
- /* Either an error, or no such table. */
- goto rbuTableType_end;
- }
- if( sqlite3_column_int(aStmt[0], 0) ){
- *peType = RBU_PK_VTAB; /* virtual table */
- goto rbuTableType_end;
- }
- *piTnum = sqlite3_column_int(aStmt[0], 1);
-
- p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[1], &p->zErrmsg,
- sqlite3_mprintf("PRAGMA index_list=%Q",zTab)
- );
- if( p->rc ) goto rbuTableType_end;
- while( sqlite3_step(aStmt[1])==SQLITE_ROW ){
- const u8 *zOrig = sqlite3_column_text(aStmt[1], 3);
- const u8 *zIdx = sqlite3_column_text(aStmt[1], 1);
- if( zOrig && zIdx && zOrig[0]=='p' ){
- p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[2], &p->zErrmsg,
- sqlite3_mprintf(
- "SELECT rootpage FROM sqlite_master WHERE name = %Q", zIdx
- ));
- if( p->rc==SQLITE_OK ){
- if( sqlite3_step(aStmt[2])==SQLITE_ROW ){
- *piPk = sqlite3_column_int(aStmt[2], 0);
- *peType = RBU_PK_EXTERNAL;
- }else{
- *peType = RBU_PK_WITHOUT_ROWID;
- }
- }
- goto rbuTableType_end;
- }
- }
-
- p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[3], &p->zErrmsg,
- sqlite3_mprintf("PRAGMA table_info=%Q",zTab)
- );
- if( p->rc==SQLITE_OK ){
- while( sqlite3_step(aStmt[3])==SQLITE_ROW ){
- if( sqlite3_column_int(aStmt[3],5)>0 ){
- *peType = RBU_PK_IPK; /* explicit IPK column */
- goto rbuTableType_end;
- }
- }
- *peType = RBU_PK_NONE;
- }
-
-rbuTableType_end: {
- unsigned int i;
- for(i=0; i<sizeof(aStmt)/sizeof(aStmt[0]); i++){
- rbuFinalize(p, aStmt[i]);
- }
- }
-}
-
-/*
-** This is a helper function for rbuObjIterCacheTableInfo(). It populates
-** the pIter->abIndexed[] array.
-*/
-static void rbuObjIterCacheIndexedCols(sqlite3rbu *p, RbuObjIter *pIter){
- sqlite3_stmt *pList = 0;
- int bIndex = 0;
-
- if( p->rc==SQLITE_OK ){
- memcpy(pIter->abIndexed, pIter->abTblPk, sizeof(u8)*pIter->nTblCol);
- p->rc = prepareFreeAndCollectError(p->dbMain, &pList, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl)
- );
- }
-
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pList) ){
- const char *zIdx = (const char*)sqlite3_column_text(pList, 1);
- sqlite3_stmt *pXInfo = 0;
- if( zIdx==0 ) break;
- p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx)
- );
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
- int iCid = sqlite3_column_int(pXInfo, 1);
- if( iCid>=0 ) pIter->abIndexed[iCid] = 1;
- }
- rbuFinalize(p, pXInfo);
- bIndex = 1;
- }
-
- rbuFinalize(p, pList);
- if( bIndex==0 ) pIter->abIndexed = 0;
-}
-
-
-/*
-** If they are not already populated, populate the pIter->azTblCol[],
-** pIter->abTblPk[], pIter->nTblCol and pIter->bRowid variables according to
-** the table (not index) that the iterator currently points to.
-**
-** Return SQLITE_OK if successful, or an SQLite error code otherwise. If
-** an error does occur, an error code and error message are also left in
-** the RBU handle.
-*/
-static int rbuObjIterCacheTableInfo(sqlite3rbu *p, RbuObjIter *pIter){
- if( pIter->azTblCol==0 ){
- sqlite3_stmt *pStmt = 0;
- int nCol = 0;
- int i; /* for() loop iterator variable */
- int bRbuRowid = 0; /* If input table has column "rbu_rowid" */
- int iOrder = 0;
- int iTnum = 0;
-
- /* Figure out the type of table this step will deal with. */
- assert( pIter->eType==0 );
- rbuTableType(p, pIter->zTbl, &pIter->eType, &iTnum, &pIter->iPkTnum);
- if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_NOTABLE ){
- p->rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf("no such table: %s", pIter->zTbl);
- }
- if( p->rc ) return p->rc;
- if( pIter->zIdx==0 ) pIter->iTnum = iTnum;
-
- assert( pIter->eType==RBU_PK_NONE || pIter->eType==RBU_PK_IPK
- || pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_WITHOUT_ROWID
- || pIter->eType==RBU_PK_VTAB
- );
-
- /* Populate the azTblCol[] and nTblCol variables based on the columns
- ** of the input table. Ignore any input table columns that begin with
- ** "rbu_". */
- p->rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg,
- sqlite3_mprintf("SELECT * FROM '%q'", pIter->zDataTbl)
- );
- if( p->rc==SQLITE_OK ){
- nCol = sqlite3_column_count(pStmt);
- rbuAllocateIterArrays(p, pIter, nCol);
- }
- for(i=0; p->rc==SQLITE_OK && i<nCol; i++){
- const char *zName = (const char*)sqlite3_column_name(pStmt, i);
- if( sqlite3_strnicmp("rbu_", zName, 4) ){
- char *zCopy = rbuStrndup(zName, &p->rc);
- pIter->aiSrcOrder[pIter->nTblCol] = pIter->nTblCol;
- pIter->azTblCol[pIter->nTblCol++] = zCopy;
- }
- else if( 0==sqlite3_stricmp("rbu_rowid", zName) ){
- bRbuRowid = 1;
- }
- }
- sqlite3_finalize(pStmt);
- pStmt = 0;
-
- if( p->rc==SQLITE_OK
- && bRbuRowid!=(pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE)
- ){
- p->rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf(
- "table %q %s rbu_rowid column", pIter->zDataTbl,
- (bRbuRowid ? "may not have" : "requires")
- );
- }
-
- /* Check that all non-HIDDEN columns in the destination table are also
- ** present in the input table. Populate the abTblPk[], azTblType[] and
- ** aiTblOrder[] arrays at the same time. */
- if( p->rc==SQLITE_OK ){
- p->rc = prepareFreeAndCollectError(p->dbMain, &pStmt, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA table_info(%Q)", pIter->zTbl)
- );
- }
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
- const char *zName = (const char*)sqlite3_column_text(pStmt, 1);
- if( zName==0 ) break; /* An OOM - finalize() below returns S_NOMEM */
- for(i=iOrder; i<pIter->nTblCol; i++){
- if( 0==strcmp(zName, pIter->azTblCol[i]) ) break;
- }
- if( i==pIter->nTblCol ){
- p->rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf("column missing from %q: %s",
- pIter->zDataTbl, zName
- );
- }else{
- int iPk = sqlite3_column_int(pStmt, 5);
- int bNotNull = sqlite3_column_int(pStmt, 3);
- const char *zType = (const char*)sqlite3_column_text(pStmt, 2);
-
- if( i!=iOrder ){
- SWAP(int, pIter->aiSrcOrder[i], pIter->aiSrcOrder[iOrder]);
- SWAP(char*, pIter->azTblCol[i], pIter->azTblCol[iOrder]);
- }
-
- pIter->azTblType[iOrder] = rbuStrndup(zType, &p->rc);
- pIter->abTblPk[iOrder] = (iPk!=0);
- pIter->abNotNull[iOrder] = (u8)bNotNull || (iPk!=0);
- iOrder++;
- }
- }
-
- rbuFinalize(p, pStmt);
- rbuObjIterCacheIndexedCols(p, pIter);
- assert( pIter->eType!=RBU_PK_VTAB || pIter->abIndexed==0 );
- }
-
- return p->rc;
-}
-
-/*
-** This function constructs and returns a pointer to a nul-terminated
-** string containing some SQL clause or list based on one or more of the
-** column names currently stored in the pIter->azTblCol[] array.
-*/
-static char *rbuObjIterGetCollist(
- sqlite3rbu *p, /* RBU object */
- RbuObjIter *pIter /* Object iterator for column names */
-){
- char *zList = 0;
- const char *zSep = "";
- int i;
- for(i=0; i<pIter->nTblCol; i++){
- const char *z = pIter->azTblCol[i];
- zList = rbuMPrintf(p, "%z%s\"%w\"", zList, zSep, z);
- zSep = ", ";
- }
- return zList;
-}
-
-/*
-** This function is used to create a SELECT list (the list of SQL
-** expressions that follows a SELECT keyword) for a SELECT statement
-** used to read from an data_xxx or rbu_tmp_xxx table while updating the
-** index object currently indicated by the iterator object passed as the
-** second argument. A "PRAGMA index_xinfo = <idxname>" statement is used
-** to obtain the required information.
-**
-** If the index is of the following form:
-**
-** CREATE INDEX i1 ON t1(c, b COLLATE nocase);
-**
-** and "t1" is a table with an explicit INTEGER PRIMARY KEY column
-** "ipk", the returned string is:
-**
-** "`c` COLLATE 'BINARY', `b` COLLATE 'NOCASE', `ipk` COLLATE 'BINARY'"
-**
-** As well as the returned string, three other malloc'd strings are
-** returned via output parameters. As follows:
-**
-** pzImposterCols: ...
-** pzImposterPk: ...
-** pzWhere: ...
-*/
-static char *rbuObjIterGetIndexCols(
- sqlite3rbu *p, /* RBU object */
- RbuObjIter *pIter, /* Object iterator for column names */
- char **pzImposterCols, /* OUT: Columns for imposter table */
- char **pzImposterPk, /* OUT: Imposter PK clause */
- char **pzWhere, /* OUT: WHERE clause */
- int *pnBind /* OUT: Trbul number of columns */
-){
- int rc = p->rc; /* Error code */
- int rc2; /* sqlite3_finalize() return code */
- char *zRet = 0; /* String to return */
- char *zImpCols = 0; /* String to return via *pzImposterCols */
- char *zImpPK = 0; /* String to return via *pzImposterPK */
- char *zWhere = 0; /* String to return via *pzWhere */
- int nBind = 0; /* Value to return via *pnBind */
- const char *zCom = ""; /* Set to ", " later on */
- const char *zAnd = ""; /* Set to " AND " later on */
- sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = ? */
-
- if( rc==SQLITE_OK ){
- assert( p->zErrmsg==0 );
- rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", pIter->zIdx)
- );
- }
-
- while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
- int iCid = sqlite3_column_int(pXInfo, 1);
- int bDesc = sqlite3_column_int(pXInfo, 3);
- const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4);
- const char *zCol;
- const char *zType;
-
- if( iCid<0 ){
- /* An integer primary key. If the table has an explicit IPK, use
- ** its name. Otherwise, use "rbu_rowid". */
- if( pIter->eType==RBU_PK_IPK ){
- int i;
- for(i=0; pIter->abTblPk[i]==0; i++);
- assert( i<pIter->nTblCol );
- zCol = pIter->azTblCol[i];
- }else{
- zCol = "rbu_rowid";
- }
- zType = "INTEGER";
- }else{
- zCol = pIter->azTblCol[iCid];
- zType = pIter->azTblType[iCid];
- }
-
- zRet = sqlite3_mprintf("%z%s\"%w\" COLLATE %Q", zRet, zCom, zCol, zCollate);
- if( pIter->bUnique==0 || sqlite3_column_int(pXInfo, 5) ){
- const char *zOrder = (bDesc ? " DESC" : "");
- zImpPK = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\"%s",
- zImpPK, zCom, nBind, zCol, zOrder
- );
- }
- zImpCols = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\" %s COLLATE %Q",
- zImpCols, zCom, nBind, zCol, zType, zCollate
- );
- zWhere = sqlite3_mprintf(
- "%z%s\"rbu_imp_%d%w\" IS ?", zWhere, zAnd, nBind, zCol
- );
- if( zRet==0 || zImpPK==0 || zImpCols==0 || zWhere==0 ) rc = SQLITE_NOMEM;
- zCom = ", ";
- zAnd = " AND ";
- nBind++;
- }
-
- rc2 = sqlite3_finalize(pXInfo);
- if( rc==SQLITE_OK ) rc = rc2;
-
- if( rc!=SQLITE_OK ){
- sqlite3_free(zRet);
- sqlite3_free(zImpCols);
- sqlite3_free(zImpPK);
- sqlite3_free(zWhere);
- zRet = 0;
- zImpCols = 0;
- zImpPK = 0;
- zWhere = 0;
- p->rc = rc;
- }
-
- *pzImposterCols = zImpCols;
- *pzImposterPk = zImpPK;
- *pzWhere = zWhere;
- *pnBind = nBind;
- return zRet;
-}
-
-/*
-** Assuming the current table columns are "a", "b" and "c", and the zObj
-** paramter is passed "old", return a string of the form:
-**
-** "old.a, old.b, old.b"
-**
-** With the column names escaped.
-**
-** For tables with implicit rowids - RBU_PK_EXTERNAL and RBU_PK_NONE, append
-** the text ", old._rowid_" to the returned value.
-*/
-static char *rbuObjIterGetOldlist(
- sqlite3rbu *p,
- RbuObjIter *pIter,
- const char *zObj
-){
- char *zList = 0;
- if( p->rc==SQLITE_OK && pIter->abIndexed ){
- const char *zS = "";
- int i;
- for(i=0; i<pIter->nTblCol; i++){
- if( pIter->abIndexed[i] ){
- const char *zCol = pIter->azTblCol[i];
- zList = sqlite3_mprintf("%z%s%s.\"%w\"", zList, zS, zObj, zCol);
- }else{
- zList = sqlite3_mprintf("%z%sNULL", zList, zS);
- }
- zS = ", ";
- if( zList==0 ){
- p->rc = SQLITE_NOMEM;
- break;
- }
- }
-
- /* For a table with implicit rowids, append "old._rowid_" to the list. */
- if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
- zList = rbuMPrintf(p, "%z, %s._rowid_", zList, zObj);
- }
- }
- return zList;
-}
-
-/*
-** Return an expression that can be used in a WHERE clause to match the
-** primary key of the current table. For example, if the table is:
-**
-** CREATE TABLE t1(a, b, c, PRIMARY KEY(b, c));
-**
-** Return the string:
-**
-** "b = ?1 AND c = ?2"
-*/
-static char *rbuObjIterGetWhere(
- sqlite3rbu *p,
- RbuObjIter *pIter
-){
- char *zList = 0;
- if( pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE ){
- zList = rbuMPrintf(p, "_rowid_ = ?%d", pIter->nTblCol+1);
- }else if( pIter->eType==RBU_PK_EXTERNAL ){
- const char *zSep = "";
- int i;
- for(i=0; i<pIter->nTblCol; i++){
- if( pIter->abTblPk[i] ){
- zList = rbuMPrintf(p, "%z%sc%d=?%d", zList, zSep, i, i+1);
- zSep = " AND ";
- }
- }
- zList = rbuMPrintf(p,
- "_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)", zList
- );
-
- }else{
- const char *zSep = "";
- int i;
- for(i=0; i<pIter->nTblCol; i++){
- if( pIter->abTblPk[i] ){
- const char *zCol = pIter->azTblCol[i];
- zList = rbuMPrintf(p, "%z%s\"%w\"=?%d", zList, zSep, zCol, i+1);
- zSep = " AND ";
- }
- }
- }
- return zList;
-}
-
-/*
-** The SELECT statement iterating through the keys for the current object
-** (p->objiter.pSelect) currently points to a valid row. However, there
-** is something wrong with the rbu_control value in the rbu_control value
-** stored in the (p->nCol+1)'th column. Set the error code and error message
-** of the RBU handle to something reflecting this.
-*/
-static void rbuBadControlError(sqlite3rbu *p){
- p->rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf("invalid rbu_control value");
-}
-
-
-/*
-** Return a nul-terminated string containing the comma separated list of
-** assignments that should be included following the "SET" keyword of
-** an UPDATE statement used to update the table object that the iterator
-** passed as the second argument currently points to if the rbu_control
-** column of the data_xxx table entry is set to zMask.
-**
-** The memory for the returned string is obtained from sqlite3_malloc().
-** It is the responsibility of the caller to eventually free it using
-** sqlite3_free().
-**
-** If an OOM error is encountered when allocating space for the new
-** string, an error code is left in the rbu handle passed as the first
-** argument and NULL is returned. Or, if an error has already occurred
-** when this function is called, NULL is returned immediately, without
-** attempting the allocation or modifying the stored error code.
-*/
-static char *rbuObjIterGetSetlist(
- sqlite3rbu *p,
- RbuObjIter *pIter,
- const char *zMask
-){
- char *zList = 0;
- if( p->rc==SQLITE_OK ){
- int i;
-
- if( (int)strlen(zMask)!=pIter->nTblCol ){
- rbuBadControlError(p);
- }else{
- const char *zSep = "";
- for(i=0; i<pIter->nTblCol; i++){
- char c = zMask[pIter->aiSrcOrder[i]];
- if( c=='x' ){
- zList = rbuMPrintf(p, "%z%s\"%w\"=?%d",
- zList, zSep, pIter->azTblCol[i], i+1
- );
- zSep = ", ";
- }
- else if( c=='d' ){
- zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_delta(\"%w\", ?%d)",
- zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1
- );
- zSep = ", ";
- }
- else if( c=='f' ){
- zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)",
- zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1
- );
- zSep = ", ";
- }
- }
- }
- }
- return zList;
-}
-
-/*
-** Return a nul-terminated string consisting of nByte comma separated
-** "?" expressions. For example, if nByte is 3, return a pointer to
-** a buffer containing the string "?,?,?".
-**
-** The memory for the returned string is obtained from sqlite3_malloc().
-** It is the responsibility of the caller to eventually free it using
-** sqlite3_free().
-**
-** If an OOM error is encountered when allocating space for the new
-** string, an error code is left in the rbu handle passed as the first
-** argument and NULL is returned. Or, if an error has already occurred
-** when this function is called, NULL is returned immediately, without
-** attempting the allocation or modifying the stored error code.
-*/
-static char *rbuObjIterGetBindlist(sqlite3rbu *p, int nBind){
- char *zRet = 0;
- int nByte = nBind*2 + 1;
-
- zRet = (char*)rbuMalloc(p, nByte);
- if( zRet ){
- int i;
- for(i=0; i<nBind; i++){
- zRet[i*2] = '?';
- zRet[i*2+1] = (i+1==nBind) ? '\0' : ',';
- }
- }
- return zRet;
-}
-
-/*
-** The iterator currently points to a table (not index) of type
-** RBU_PK_WITHOUT_ROWID. This function creates the PRIMARY KEY
-** declaration for the corresponding imposter table. For example,
-** if the iterator points to a table created as:
-**
-** CREATE TABLE t1(a, b, c, PRIMARY KEY(b, a DESC)) WITHOUT ROWID
-**
-** this function returns:
-**
-** PRIMARY KEY("b", "a" DESC)
-*/
-static char *rbuWithoutRowidPK(sqlite3rbu *p, RbuObjIter *pIter){
- char *z = 0;
- assert( pIter->zIdx==0 );
- if( p->rc==SQLITE_OK ){
- const char *zSep = "PRIMARY KEY(";
- sqlite3_stmt *pXList = 0; /* PRAGMA index_list = (pIter->zTbl) */
- sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = <pk-index> */
-
- p->rc = prepareFreeAndCollectError(p->dbMain, &pXList, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl)
- );
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXList) ){
- const char *zOrig = (const char*)sqlite3_column_text(pXList,3);
- if( zOrig && strcmp(zOrig, "pk")==0 ){
- const char *zIdx = (const char*)sqlite3_column_text(pXList,1);
- if( zIdx ){
- p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx)
- );
- }
- break;
- }
- }
- rbuFinalize(p, pXList);
-
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
- if( sqlite3_column_int(pXInfo, 5) ){
- /* int iCid = sqlite3_column_int(pXInfo, 0); */
- const char *zCol = (const char*)sqlite3_column_text(pXInfo, 2);
- const char *zDesc = sqlite3_column_int(pXInfo, 3) ? " DESC" : "";
- z = rbuMPrintf(p, "%z%s\"%w\"%s", z, zSep, zCol, zDesc);
- zSep = ", ";
- }
- }
- z = rbuMPrintf(p, "%z)", z);
- rbuFinalize(p, pXInfo);
- }
- return z;
-}
-
-/*
-** This function creates the second imposter table used when writing to
-** a table b-tree where the table has an external primary key. If the
-** iterator passed as the second argument does not currently point to
-** a table (not index) with an external primary key, this function is a
-** no-op.
-**
-** Assuming the iterator does point to a table with an external PK, this
-** function creates a WITHOUT ROWID imposter table named "rbu_imposter2"
-** used to access that PK index. For example, if the target table is
-** declared as follows:
-**
-** CREATE TABLE t1(a, b TEXT, c REAL, PRIMARY KEY(b, c));
-**
-** then the imposter table schema is:
-**
-** CREATE TABLE rbu_imposter2(c1 TEXT, c2 REAL, id INTEGER) WITHOUT ROWID;
-**
-*/
-static void rbuCreateImposterTable2(sqlite3rbu *p, RbuObjIter *pIter){
- if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_EXTERNAL ){
- int tnum = pIter->iPkTnum; /* Root page of PK index */
- sqlite3_stmt *pQuery = 0; /* SELECT name ... WHERE rootpage = $tnum */
- const char *zIdx = 0; /* Name of PK index */
- sqlite3_stmt *pXInfo = 0; /* PRAGMA main.index_xinfo = $zIdx */
- const char *zComma = "";
- char *zCols = 0; /* Used to build up list of table cols */
- char *zPk = 0; /* Used to build up table PK declaration */
-
- /* Figure out the name of the primary key index for the current table.
- ** This is needed for the argument to "PRAGMA index_xinfo". Set
- ** zIdx to point to a nul-terminated string containing this name. */
- p->rc = prepareAndCollectError(p->dbMain, &pQuery, &p->zErrmsg,
- "SELECT name FROM sqlite_master WHERE rootpage = ?"
- );
- if( p->rc==SQLITE_OK ){
- sqlite3_bind_int(pQuery, 1, tnum);
- if( SQLITE_ROW==sqlite3_step(pQuery) ){
- zIdx = (const char*)sqlite3_column_text(pQuery, 0);
- }
- }
- if( zIdx ){
- p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg,
- sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx)
- );
- }
- rbuFinalize(p, pQuery);
-
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){
- int bKey = sqlite3_column_int(pXInfo, 5);
- if( bKey ){
- int iCid = sqlite3_column_int(pXInfo, 1);
- int bDesc = sqlite3_column_int(pXInfo, 3);
- const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4);
- zCols = rbuMPrintf(p, "%z%sc%d %s COLLATE %s", zCols, zComma,
- iCid, pIter->azTblType[iCid], zCollate
- );
- zPk = rbuMPrintf(p, "%z%sc%d%s", zPk, zComma, iCid, bDesc?" DESC":"");
- zComma = ", ";
- }
- }
- zCols = rbuMPrintf(p, "%z, id INTEGER", zCols);
- rbuFinalize(p, pXInfo);
-
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum);
- rbuMPrintfExec(p, p->dbMain,
- "CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID",
- zCols, zPk
- );
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0);
- }
-}
-
-/*
-** If an error has already occurred when this function is called, it
-** immediately returns zero (without doing any work). Or, if an error
-** occurs during the execution of this function, it sets the error code
-** in the sqlite3rbu object indicated by the first argument and returns
-** zero.
-**
-** The iterator passed as the second argument is guaranteed to point to
-** a table (not an index) when this function is called. This function
-** attempts to create any imposter table required to write to the main
-** table b-tree of the table before returning. Non-zero is returned if
-** an imposter table are created, or zero otherwise.
-**
-** An imposter table is required in all cases except RBU_PK_VTAB. Only
-** virtual tables are written to directly. The imposter table has the
-** same schema as the actual target table (less any UNIQUE constraints).
-** More precisely, the "same schema" means the same columns, types,
-** collation sequences. For tables that do not have an external PRIMARY
-** KEY, it also means the same PRIMARY KEY declaration.
-*/
-static void rbuCreateImposterTable(sqlite3rbu *p, RbuObjIter *pIter){
- if( p->rc==SQLITE_OK && pIter->eType!=RBU_PK_VTAB ){
- int tnum = pIter->iTnum;
- const char *zComma = "";
- char *zSql = 0;
- int iCol;
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1);
-
- for(iCol=0; p->rc==SQLITE_OK && iCol<pIter->nTblCol; iCol++){
- const char *zPk = "";
- const char *zCol = pIter->azTblCol[iCol];
- const char *zColl = 0;
-
- p->rc = sqlite3_table_column_metadata(
- p->dbMain, "main", pIter->zTbl, zCol, 0, &zColl, 0, 0, 0
- );
-
- if( pIter->eType==RBU_PK_IPK && pIter->abTblPk[iCol] ){
- /* If the target table column is an "INTEGER PRIMARY KEY", add
- ** "PRIMARY KEY" to the imposter table column declaration. */
- zPk = "PRIMARY KEY ";
- }
- zSql = rbuMPrintf(p, "%z%s\"%w\" %s %sCOLLATE %s%s",
- zSql, zComma, zCol, pIter->azTblType[iCol], zPk, zColl,
- (pIter->abNotNull[iCol] ? " NOT NULL" : "")
- );
- zComma = ", ";
- }
-
- if( pIter->eType==RBU_PK_WITHOUT_ROWID ){
- char *zPk = rbuWithoutRowidPK(p, pIter);
- if( zPk ){
- zSql = rbuMPrintf(p, "%z, %z", zSql, zPk);
- }
- }
-
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum);
- rbuMPrintfExec(p, p->dbMain, "CREATE TABLE \"rbu_imp_%w\"(%z)%s",
- pIter->zTbl, zSql,
- (pIter->eType==RBU_PK_WITHOUT_ROWID ? " WITHOUT ROWID" : "")
- );
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0);
- }
-}
-
-/*
-** Prepare a statement used to insert rows into the "rbu_tmp_xxx" table.
-** Specifically a statement of the form:
-**
-** INSERT INTO rbu_tmp_xxx VALUES(?, ?, ? ...);
-**
-** The number of bound variables is equal to the number of columns in
-** the target table, plus one (for the rbu_control column), plus one more
-** (for the rbu_rowid column) if the target table is an implicit IPK or
-** virtual table.
-*/
-static void rbuObjIterPrepareTmpInsert(
- sqlite3rbu *p,
- RbuObjIter *pIter,
- const char *zCollist,
- const char *zRbuRowid
-){
- int bRbuRowid = (pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE);
- char *zBind = rbuObjIterGetBindlist(p, pIter->nTblCol + 1 + bRbuRowid);
- if( zBind ){
- assert( pIter->pTmpInsert==0 );
- p->rc = prepareFreeAndCollectError(
- p->dbRbu, &pIter->pTmpInsert, &p->zErrmsg, sqlite3_mprintf(
- "INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)",
- p->zStateDb, pIter->zDataTbl, zCollist, zRbuRowid, zBind
- ));
- }
-}
-
-static void rbuTmpInsertFunc(
- sqlite3_context *pCtx,
- int nVal,
- sqlite3_value **apVal
-){
- sqlite3rbu *p = sqlite3_user_data(pCtx);
- int rc = SQLITE_OK;
- int i;
-
- for(i=0; rc==SQLITE_OK && i<nVal; i++){
- rc = sqlite3_bind_value(p->objiter.pTmpInsert, i+1, apVal[i]);
- }
- if( rc==SQLITE_OK ){
- sqlite3_step(p->objiter.pTmpInsert);
- rc = sqlite3_reset(p->objiter.pTmpInsert);
- }
-
- if( rc!=SQLITE_OK ){
- sqlite3_result_error_code(pCtx, rc);
- }
-}
-
-/*
-** Ensure that the SQLite statement handles required to update the
-** target database object currently indicated by the iterator passed
-** as the second argument are available.
-*/
-static int rbuObjIterPrepareAll(
- sqlite3rbu *p,
- RbuObjIter *pIter,
- int nOffset /* Add "LIMIT -1 OFFSET $nOffset" to SELECT */
-){
- assert( pIter->bCleanup==0 );
- if( pIter->pSelect==0 && rbuObjIterCacheTableInfo(p, pIter)==SQLITE_OK ){
- const int tnum = pIter->iTnum;
- char *zCollist = 0; /* List of indexed columns */
- char **pz = &p->zErrmsg;
- const char *zIdx = pIter->zIdx;
- char *zLimit = 0;
-
- if( nOffset ){
- zLimit = sqlite3_mprintf(" LIMIT -1 OFFSET %d", nOffset);
- if( !zLimit ) p->rc = SQLITE_NOMEM;
- }
-
- if( zIdx ){
- const char *zTbl = pIter->zTbl;
- char *zImposterCols = 0; /* Columns for imposter table */
- char *zImposterPK = 0; /* Primary key declaration for imposter */
- char *zWhere = 0; /* WHERE clause on PK columns */
- char *zBind = 0;
- int nBind = 0;
-
- assert( pIter->eType!=RBU_PK_VTAB );
- zCollist = rbuObjIterGetIndexCols(
- p, pIter, &zImposterCols, &zImposterPK, &zWhere, &nBind
- );
- zBind = rbuObjIterGetBindlist(p, nBind);
-
- /* Create the imposter table used to write to this index. */
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1);
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1,tnum);
- rbuMPrintfExec(p, p->dbMain,
- "CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID",
- zTbl, zImposterCols, zImposterPK
- );
- sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0);
-
- /* Create the statement to insert index entries */
- pIter->nCol = nBind;
- if( p->rc==SQLITE_OK ){
- p->rc = prepareFreeAndCollectError(
- p->dbMain, &pIter->pInsert, &p->zErrmsg,
- sqlite3_mprintf("INSERT INTO \"rbu_imp_%w\" VALUES(%s)", zTbl, zBind)
- );
- }
-
- /* And to delete index entries */
- if( p->rc==SQLITE_OK ){
- p->rc = prepareFreeAndCollectError(
- p->dbMain, &pIter->pDelete, &p->zErrmsg,
- sqlite3_mprintf("DELETE FROM \"rbu_imp_%w\" WHERE %s", zTbl, zWhere)
- );
- }
-
- /* Create the SELECT statement to read keys in sorted order */
- if( p->rc==SQLITE_OK ){
- char *zSql;
- if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
- zSql = sqlite3_mprintf(
- "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' ORDER BY %s%s",
- zCollist, p->zStateDb, pIter->zDataTbl,
- zCollist, zLimit
- );
- }else{
- zSql = sqlite3_mprintf(
- "SELECT %s, rbu_control FROM '%q' "
- "WHERE typeof(rbu_control)='integer' AND rbu_control!=1 "
- "UNION ALL "
- "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' "
- "ORDER BY %s%s",
- zCollist, pIter->zDataTbl,
- zCollist, p->zStateDb, pIter->zDataTbl,
- zCollist, zLimit
- );
- }
- p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, zSql);
- }
-
- sqlite3_free(zImposterCols);
- sqlite3_free(zImposterPK);
- sqlite3_free(zWhere);
- sqlite3_free(zBind);
- }else{
- int bRbuRowid = (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE);
- const char *zTbl = pIter->zTbl; /* Table this step applies to */
- const char *zWrite; /* Imposter table name */
-
- char *zBindings = rbuObjIterGetBindlist(p, pIter->nTblCol + bRbuRowid);
- char *zWhere = rbuObjIterGetWhere(p, pIter);
- char *zOldlist = rbuObjIterGetOldlist(p, pIter, "old");
- char *zNewlist = rbuObjIterGetOldlist(p, pIter, "new");
-
- zCollist = rbuObjIterGetCollist(p, pIter);
- pIter->nCol = pIter->nTblCol;
-
- /* Create the imposter table or tables (if required). */
- rbuCreateImposterTable(p, pIter);
- rbuCreateImposterTable2(p, pIter);
- zWrite = (pIter->eType==RBU_PK_VTAB ? "" : "rbu_imp_");
-
- /* Create the INSERT statement to write to the target PK b-tree */
- if( p->rc==SQLITE_OK ){
- p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pInsert, pz,
- sqlite3_mprintf(
- "INSERT INTO \"%s%w\"(%s%s) VALUES(%s)",
- zWrite, zTbl, zCollist, (bRbuRowid ? ", _rowid_" : ""), zBindings
- )
- );
- }
-
- /* Create the DELETE statement to write to the target PK b-tree */
- if( p->rc==SQLITE_OK ){
- p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pDelete, pz,
- sqlite3_mprintf(
- "DELETE FROM \"%s%w\" WHERE %s", zWrite, zTbl, zWhere
- )
- );
- }
-
- if( pIter->abIndexed ){
- const char *zRbuRowid = "";
- if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
- zRbuRowid = ", rbu_rowid";
- }
-
- /* Create the rbu_tmp_xxx table and the triggers to populate it. */
- rbuMPrintfExec(p, p->dbRbu,
- "CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS "
- "SELECT *%s FROM '%q' WHERE 0;"
- , p->zStateDb, pIter->zDataTbl
- , (pIter->eType==RBU_PK_EXTERNAL ? ", 0 AS rbu_rowid" : "")
- , pIter->zDataTbl
- );
-
- rbuMPrintfExec(p, p->dbMain,
- "CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" "
- "BEGIN "
- " SELECT rbu_tmp_insert(2, %s);"
- "END;"
-
- "CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" "
- "BEGIN "
- " SELECT rbu_tmp_insert(2, %s);"
- "END;"
-
- "CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" "
- "BEGIN "
- " SELECT rbu_tmp_insert(3, %s);"
- "END;",
- zWrite, zTbl, zOldlist,
- zWrite, zTbl, zOldlist,
- zWrite, zTbl, zNewlist
- );
-
- if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
- rbuMPrintfExec(p, p->dbMain,
- "CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" "
- "BEGIN "
- " SELECT rbu_tmp_insert(0, %s);"
- "END;",
- zWrite, zTbl, zNewlist
- );
- }
-
- rbuObjIterPrepareTmpInsert(p, pIter, zCollist, zRbuRowid);
- }
-
- /* Create the SELECT statement to read keys from data_xxx */
- if( p->rc==SQLITE_OK ){
- p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz,
- sqlite3_mprintf(
- "SELECT %s, rbu_control%s FROM '%q'%s",
- zCollist, (bRbuRowid ? ", rbu_rowid" : ""),
- pIter->zDataTbl, zLimit
- )
- );
- }
-
- sqlite3_free(zWhere);
- sqlite3_free(zOldlist);
- sqlite3_free(zNewlist);
- sqlite3_free(zBindings);
- }
- sqlite3_free(zCollist);
- sqlite3_free(zLimit);
- }
-
- return p->rc;
-}
-
-/*
-** Set output variable *ppStmt to point to an UPDATE statement that may
-** be used to update the imposter table for the main table b-tree of the
-** table object that pIter currently points to, assuming that the
-** rbu_control column of the data_xyz table contains zMask.
-**
-** If the zMask string does not specify any columns to update, then this
-** is not an error. Output variable *ppStmt is set to NULL in this case.
-*/
-static int rbuGetUpdateStmt(
- sqlite3rbu *p, /* RBU handle */
- RbuObjIter *pIter, /* Object iterator */
- const char *zMask, /* rbu_control value ('x.x.') */
- sqlite3_stmt **ppStmt /* OUT: UPDATE statement handle */
-){
- RbuUpdateStmt **pp;
- RbuUpdateStmt *pUp = 0;
- int nUp = 0;
-
- /* In case an error occurs */
- *ppStmt = 0;
-
- /* Search for an existing statement. If one is found, shift it to the front
- ** of the LRU queue and return immediately. Otherwise, leave nUp pointing
- ** to the number of statements currently in the cache and pUp to the
- ** last object in the list. */
- for(pp=&pIter->pRbuUpdate; *pp; pp=&((*pp)->pNext)){
- pUp = *pp;
- if( strcmp(pUp->zMask, zMask)==0 ){
- *pp = pUp->pNext;
- pUp->pNext = pIter->pRbuUpdate;
- pIter->pRbuUpdate = pUp;
- *ppStmt = pUp->pUpdate;
- return SQLITE_OK;
- }
- nUp++;
- }
- assert( pUp==0 || pUp->pNext==0 );
-
- if( nUp>=SQLITE_RBU_UPDATE_CACHESIZE ){
- for(pp=&pIter->pRbuUpdate; *pp!=pUp; pp=&((*pp)->pNext));
- *pp = 0;
- sqlite3_finalize(pUp->pUpdate);
- pUp->pUpdate = 0;
- }else{
- pUp = (RbuUpdateStmt*)rbuMalloc(p, sizeof(RbuUpdateStmt)+pIter->nTblCol+1);
- }
-
- if( pUp ){
- char *zWhere = rbuObjIterGetWhere(p, pIter);
- char *zSet = rbuObjIterGetSetlist(p, pIter, zMask);
- char *zUpdate = 0;
-
- pUp->zMask = (char*)&pUp[1];
- memcpy(pUp->zMask, zMask, pIter->nTblCol);
- pUp->pNext = pIter->pRbuUpdate;
- pIter->pRbuUpdate = pUp;
-
- if( zSet ){
- const char *zPrefix = "";
-
- if( pIter->eType!=RBU_PK_VTAB ) zPrefix = "rbu_imp_";
- zUpdate = sqlite3_mprintf("UPDATE \"%s%w\" SET %s WHERE %s",
- zPrefix, pIter->zTbl, zSet, zWhere
- );
- p->rc = prepareFreeAndCollectError(
- p->dbMain, &pUp->pUpdate, &p->zErrmsg, zUpdate
- );
- *ppStmt = pUp->pUpdate;
- }
- sqlite3_free(zWhere);
- sqlite3_free(zSet);
- }
-
- return p->rc;
-}
-
-static sqlite3 *rbuOpenDbhandle(sqlite3rbu *p, const char *zName){
- sqlite3 *db = 0;
- if( p->rc==SQLITE_OK ){
- const int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_URI;
- p->rc = sqlite3_open_v2(zName, &db, flags, p->zVfsName);
- if( p->rc ){
- p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db));
- sqlite3_close(db);
- db = 0;
- }
- }
- return db;
-}
-
-/*
-** Open the database handle and attach the RBU database as "rbu". If an
-** error occurs, leave an error code and message in the RBU handle.
-*/
-static void rbuOpenDatabase(sqlite3rbu *p){
- assert( p->rc==SQLITE_OK );
- assert( p->dbMain==0 && p->dbRbu==0 );
-
- p->eStage = 0;
- p->dbMain = rbuOpenDbhandle(p, p->zTarget);
- p->dbRbu = rbuOpenDbhandle(p, p->zRbu);
-
- /* If using separate RBU and state databases, attach the state database to
- ** the RBU db handle now. */
- if( p->zState ){
- rbuMPrintfExec(p, p->dbRbu, "ATTACH %Q AS stat", p->zState);
- memcpy(p->zStateDb, "stat", 4);
- }else{
- memcpy(p->zStateDb, "main", 4);
- }
-
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_create_function(p->dbMain,
- "rbu_tmp_insert", -1, SQLITE_UTF8, (void*)p, rbuTmpInsertFunc, 0, 0
- );
- }
-
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_create_function(p->dbMain,
- "rbu_fossil_delta", 2, SQLITE_UTF8, 0, rbuFossilDeltaFunc, 0, 0
- );
- }
-
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_create_function(p->dbRbu,
- "rbu_target_name", 1, SQLITE_UTF8, (void*)p, rbuTargetNameFunc, 0, 0
- );
- }
-
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p);
- }
- rbuMPrintfExec(p, p->dbMain, "SELECT * FROM sqlite_master");
-
- /* Mark the database file just opened as an RBU target database. If
- ** this call returns SQLITE_NOTFOUND, then the RBU vfs is not in use.
- ** This is an error. */
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p);
- }
-
- if( p->rc==SQLITE_NOTFOUND ){
- p->rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf("rbu vfs not found");
- }
-}
-
-/*
-** This routine is a copy of the sqlite3FileSuffix3() routine from the core.
-** It is a no-op unless SQLITE_ENABLE_8_3_NAMES is defined.
-**
-** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database
-** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and
-** if filename in z[] has a suffix (a.k.a. "extension") that is longer than
-** three characters, then shorten the suffix on z[] to be the last three
-** characters of the original suffix.
-**
-** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always
-** do the suffix shortening regardless of URI parameter.
-**
-** Examples:
-**
-** test.db-journal => test.nal
-** test.db-wal => test.wal
-** test.db-shm => test.shm
-** test.db-mj7f3319fa => test.9fa
-*/
-static void rbuFileSuffix3(const char *zBase, char *z){
-#ifdef SQLITE_ENABLE_8_3_NAMES
-#if SQLITE_ENABLE_8_3_NAMES<2
- if( sqlite3_uri_boolean(zBase, "8_3_names", 0) )
-#endif
- {
- int i, sz;
- sz = sqlite3Strlen30(z);
- for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){}
- if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4);
- }
-#endif
-}
-
-/*
-** Return the current wal-index header checksum for the target database
-** as a 64-bit integer.
-**
-** The checksum is store in the first page of xShmMap memory as an 8-byte
-** blob starting at byte offset 40.
-*/
-static i64 rbuShmChecksum(sqlite3rbu *p){
- i64 iRet = 0;
- if( p->rc==SQLITE_OK ){
- sqlite3_file *pDb = p->pTargetFd->pReal;
- u32 volatile *ptr;
- p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr);
- if( p->rc==SQLITE_OK ){
- iRet = ((i64)ptr[10] << 32) + ptr[11];
- }
- }
- return iRet;
-}
-
-/*
-** This function is called as part of initializing or reinitializing an
-** incremental checkpoint.
-**
-** It populates the sqlite3rbu.aFrame[] array with the set of
-** (wal frame -> db page) copy operations required to checkpoint the
-** current wal file, and obtains the set of shm locks required to safely
-** perform the copy operations directly on the file-system.
-**
-** If argument pState is not NULL, then the incremental checkpoint is
-** being resumed. In this case, if the checksum of the wal-index-header
-** following recovery is not the same as the checksum saved in the RbuState
-** object, then the rbu handle is set to DONE state. This occurs if some
-** other client appends a transaction to the wal file in the middle of
-** an incremental checkpoint.
-*/
-static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){
-
- /* If pState is NULL, then the wal file may not have been opened and
- ** recovered. Running a read-statement here to ensure that doing so
- ** does not interfere with the "capture" process below. */
- if( pState==0 ){
- p->eStage = 0;
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_exec(p->dbMain, "SELECT * FROM sqlite_master", 0, 0, 0);
- }
- }
-
- /* Assuming no error has occurred, run a "restart" checkpoint with the
- ** sqlite3rbu.eStage variable set to CAPTURE. This turns on the following
- ** special behaviour in the rbu VFS:
- **
- ** * If the exclusive shm WRITER or READ0 lock cannot be obtained,
- ** the checkpoint fails with SQLITE_BUSY (normally SQLite would
- ** proceed with running a passive checkpoint instead of failing).
- **
- ** * Attempts to read from the *-wal file or write to the database file
- ** do not perform any IO. Instead, the frame/page combinations that
- ** would be read/written are recorded in the sqlite3rbu.aFrame[]
- ** array.
- **
- ** * Calls to xShmLock(UNLOCK) to release the exclusive shm WRITER,
- ** READ0 and CHECKPOINT locks taken as part of the checkpoint are
- ** no-ops. These locks will not be released until the connection
- ** is closed.
- **
- ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL
- ** error.
- **
- ** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the
- ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[]
- ** array populated with a set of (frame -> page) mappings. Because the
- ** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy
- ** data from the wal file into the database file according to the
- ** contents of aFrame[].
- */
- if( p->rc==SQLITE_OK ){
- int rc2;
- p->eStage = RBU_STAGE_CAPTURE;
- rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0);
- if( rc2!=SQLITE_INTERNAL ) p->rc = rc2;
- }
-
- if( p->rc==SQLITE_OK ){
- p->eStage = RBU_STAGE_CKPT;
- p->nStep = (pState ? pState->nRow : 0);
- p->aBuf = rbuMalloc(p, p->pgsz);
- p->iWalCksum = rbuShmChecksum(p);
- }
-
- if( p->rc==SQLITE_OK && pState && pState->iWalCksum!=p->iWalCksum ){
- p->rc = SQLITE_DONE;
- p->eStage = RBU_STAGE_DONE;
- }
-}
-
-/*
-** Called when iAmt bytes are read from offset iOff of the wal file while
-** the rbu object is in capture mode. Record the frame number of the frame
-** being read in the aFrame[] array.
-*/
-static int rbuCaptureWalRead(sqlite3rbu *pRbu, i64 iOff, int iAmt){
- const u32 mReq = (1<<WAL_LOCK_WRITE)|(1<<WAL_LOCK_CKPT)|(1<<WAL_LOCK_READ0);
- u32 iFrame;
-
- if( pRbu->mLock!=mReq ){
- pRbu->rc = SQLITE_BUSY;
- return SQLITE_INTERNAL;
- }
-
- pRbu->pgsz = iAmt;
- if( pRbu->nFrame==pRbu->nFrameAlloc ){
- int nNew = (pRbu->nFrameAlloc ? pRbu->nFrameAlloc : 64) * 2;
- RbuFrame *aNew;
- aNew = (RbuFrame*)sqlite3_realloc(pRbu->aFrame, nNew * sizeof(RbuFrame));
- if( aNew==0 ) return SQLITE_NOMEM;
- pRbu->aFrame = aNew;
- pRbu->nFrameAlloc = nNew;
- }
-
- iFrame = (u32)((iOff-32) / (i64)(iAmt+24)) + 1;
- if( pRbu->iMaxFrame<iFrame ) pRbu->iMaxFrame = iFrame;
- pRbu->aFrame[pRbu->nFrame].iWalFrame = iFrame;
- pRbu->aFrame[pRbu->nFrame].iDbPage = 0;
- pRbu->nFrame++;
- return SQLITE_OK;
-}
-
-/*
-** Called when a page of data is written to offset iOff of the database
-** file while the rbu handle is in capture mode. Record the page number
-** of the page being written in the aFrame[] array.
-*/
-static int rbuCaptureDbWrite(sqlite3rbu *pRbu, i64 iOff){
- pRbu->aFrame[pRbu->nFrame-1].iDbPage = (u32)(iOff / pRbu->pgsz) + 1;
- return SQLITE_OK;
-}
-
-/*
-** This is called as part of an incremental checkpoint operation. Copy
-** a single frame of data from the wal file into the database file, as
-** indicated by the RbuFrame object.
-*/
-static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){
- sqlite3_file *pWal = p->pTargetFd->pWalFd->pReal;
- sqlite3_file *pDb = p->pTargetFd->pReal;
- i64 iOff;
-
- assert( p->rc==SQLITE_OK );
- iOff = (i64)(pFrame->iWalFrame-1) * (p->pgsz + 24) + 32 + 24;
- p->rc = pWal->pMethods->xRead(pWal, p->aBuf, p->pgsz, iOff);
- if( p->rc ) return;
-
- iOff = (i64)(pFrame->iDbPage-1) * p->pgsz;
- p->rc = pDb->pMethods->xWrite(pDb, p->aBuf, p->pgsz, iOff);
-}
-
-
-/*
-** Take an EXCLUSIVE lock on the database file.
-*/
-static void rbuLockDatabase(sqlite3rbu *p){
- sqlite3_file *pReal = p->pTargetFd->pReal;
- assert( p->rc==SQLITE_OK );
- p->rc = pReal->pMethods->xLock(pReal, SQLITE_LOCK_SHARED);
- if( p->rc==SQLITE_OK ){
- p->rc = pReal->pMethods->xLock(pReal, SQLITE_LOCK_EXCLUSIVE);
- }
-}
-
-#if defined(_WIN32_WCE)
-static LPWSTR rbuWinUtf8ToUnicode(const char *zFilename){
- int nChar;
- LPWSTR zWideFilename;
-
- nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0);
- if( nChar==0 ){
- return 0;
- }
- zWideFilename = sqlite3_malloc( nChar*sizeof(zWideFilename[0]) );
- if( zWideFilename==0 ){
- return 0;
- }
- memset(zWideFilename, 0, nChar*sizeof(zWideFilename[0]));
- nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename,
- nChar);
- if( nChar==0 ){
- sqlite3_free(zWideFilename);
- zWideFilename = 0;
- }
- return zWideFilename;
-}
-#endif
-
-/*
-** The RBU handle is currently in RBU_STAGE_OAL state, with a SHARED lock
-** on the database file. This proc moves the *-oal file to the *-wal path,
-** then reopens the database file (this time in vanilla, non-oal, WAL mode).
-** If an error occurs, leave an error code and error message in the rbu
-** handle.
-*/
-static void rbuMoveOalFile(sqlite3rbu *p){
- const char *zBase = sqlite3_db_filename(p->dbMain, "main");
-
- char *zWal = sqlite3_mprintf("%s-wal", zBase);
- char *zOal = sqlite3_mprintf("%s-oal", zBase);
-
- assert( p->eStage==RBU_STAGE_MOVE );
- assert( p->rc==SQLITE_OK && p->zErrmsg==0 );
- if( zWal==0 || zOal==0 ){
- p->rc = SQLITE_NOMEM;
- }else{
- /* Move the *-oal file to *-wal. At this point connection p->db is
- ** holding a SHARED lock on the target database file (because it is
- ** in WAL mode). So no other connection may be writing the db.
- **
- ** In order to ensure that there are no database readers, an EXCLUSIVE
- ** lock is obtained here before the *-oal is moved to *-wal.
- */
- rbuLockDatabase(p);
- if( p->rc==SQLITE_OK ){
- rbuFileSuffix3(zBase, zWal);
- rbuFileSuffix3(zBase, zOal);
-
- /* Re-open the databases. */
- rbuObjIterFinalize(&p->objiter);
- sqlite3_close(p->dbMain);
- sqlite3_close(p->dbRbu);
- p->dbMain = 0;
- p->dbRbu = 0;
-
-#if defined(_WIN32_WCE)
- {
- LPWSTR zWideOal;
- LPWSTR zWideWal;
-
- zWideOal = rbuWinUtf8ToUnicode(zOal);
- if( zWideOal ){
- zWideWal = rbuWinUtf8ToUnicode(zWal);
- if( zWideWal ){
- if( MoveFileW(zWideOal, zWideWal) ){
- p->rc = SQLITE_OK;
- }else{
- p->rc = SQLITE_IOERR;
- }
- sqlite3_free(zWideWal);
- }else{
- p->rc = SQLITE_IOERR_NOMEM;
- }
- sqlite3_free(zWideOal);
- }else{
- p->rc = SQLITE_IOERR_NOMEM;
- }
- }
-#else
- p->rc = rename(zOal, zWal) ? SQLITE_IOERR : SQLITE_OK;
-#endif
-
- if( p->rc==SQLITE_OK ){
- rbuOpenDatabase(p);
- rbuSetupCheckpoint(p, 0);
- }
- }
- }
-
- sqlite3_free(zWal);
- sqlite3_free(zOal);
-}
-
-/*
-** The SELECT statement iterating through the keys for the current object
-** (p->objiter.pSelect) currently points to a valid row. This function
-** determines the type of operation requested by this row and returns
-** one of the following values to indicate the result:
-**
-** * RBU_INSERT
-** * RBU_DELETE
-** * RBU_IDX_DELETE
-** * RBU_UPDATE
-**
-** If RBU_UPDATE is returned, then output variable *pzMask is set to
-** point to the text value indicating the columns to update.
-**
-** If the rbu_control field contains an invalid value, an error code and
-** message are left in the RBU handle and zero returned.
-*/
-static int rbuStepType(sqlite3rbu *p, const char **pzMask){
- int iCol = p->objiter.nCol; /* Index of rbu_control column */
- int res = 0; /* Return value */
-
- switch( sqlite3_column_type(p->objiter.pSelect, iCol) ){
- case SQLITE_INTEGER: {
- int iVal = sqlite3_column_int(p->objiter.pSelect, iCol);
- if( iVal==0 ){
- res = RBU_INSERT;
- }else if( iVal==1 ){
- res = RBU_DELETE;
- }else if( iVal==2 ){
- res = RBU_IDX_DELETE;
- }else if( iVal==3 ){
- res = RBU_IDX_INSERT;
- }
- break;
- }
-
- case SQLITE_TEXT: {
- const unsigned char *z = sqlite3_column_text(p->objiter.pSelect, iCol);
- if( z==0 ){
- p->rc = SQLITE_NOMEM;
- }else{
- *pzMask = (const char*)z;
- }
- res = RBU_UPDATE;
-
- break;
- }
-
- default:
- break;
- }
-
- if( res==0 ){
- rbuBadControlError(p);
- }
- return res;
-}
-
-#ifdef SQLITE_DEBUG
-/*
-** Assert that column iCol of statement pStmt is named zName.
-*/
-static void assertColumnName(sqlite3_stmt *pStmt, int iCol, const char *zName){
- const char *zCol = sqlite3_column_name(pStmt, iCol);
- assert( 0==sqlite3_stricmp(zName, zCol) );
-}
-#else
-# define assertColumnName(x,y,z)
-#endif
-
-/*
-** This function does the work for an sqlite3rbu_step() call.
-**
-** The object-iterator (p->objiter) currently points to a valid object,
-** and the input cursor (p->objiter.pSelect) currently points to a valid
-** input row. Perform whatever processing is required and return.
-**
-** If no error occurs, SQLITE_OK is returned. Otherwise, an error code
-** and message is left in the RBU handle and a copy of the error code
-** returned.
-*/
-static int rbuStep(sqlite3rbu *p){
- RbuObjIter *pIter = &p->objiter;
- const char *zMask = 0;
- int i;
- int eType = rbuStepType(p, &zMask);
-
- if( eType ){
- assert( eType!=RBU_UPDATE || pIter->zIdx==0 );
-
- if( pIter->zIdx==0 && eType==RBU_IDX_DELETE ){
- rbuBadControlError(p);
- }
- else if(
- eType==RBU_INSERT
- || eType==RBU_DELETE
- || eType==RBU_IDX_DELETE
- || eType==RBU_IDX_INSERT
- ){
- sqlite3_value *pVal;
- sqlite3_stmt *pWriter;
-
- assert( eType!=RBU_UPDATE );
- assert( eType!=RBU_DELETE || pIter->zIdx==0 );
-
- if( eType==RBU_IDX_DELETE || eType==RBU_DELETE ){
- pWriter = pIter->pDelete;
- }else{
- pWriter = pIter->pInsert;
- }
-
- for(i=0; i<pIter->nCol; i++){
- /* If this is an INSERT into a table b-tree and the table has an
- ** explicit INTEGER PRIMARY KEY, check that this is not an attempt
- ** to write a NULL into the IPK column. That is not permitted. */
- if( eType==RBU_INSERT
- && pIter->zIdx==0 && pIter->eType==RBU_PK_IPK && pIter->abTblPk[i]
- && sqlite3_column_type(pIter->pSelect, i)==SQLITE_NULL
- ){
- p->rc = SQLITE_MISMATCH;
- p->zErrmsg = sqlite3_mprintf("datatype mismatch");
- goto step_out;
- }
-
- if( eType==RBU_DELETE && pIter->abTblPk[i]==0 ){
- continue;
- }
-
- pVal = sqlite3_column_value(pIter->pSelect, i);
- p->rc = sqlite3_bind_value(pWriter, i+1, pVal);
- if( p->rc ) goto step_out;
- }
- if( pIter->zIdx==0
- && (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE)
- ){
- /* For a virtual table, or a table with no primary key, the
- ** SELECT statement is:
- **
- ** SELECT <cols>, rbu_control, rbu_rowid FROM ....
- **
- ** Hence column_value(pIter->nCol+1).
- */
- assertColumnName(pIter->pSelect, pIter->nCol+1, "rbu_rowid");
- pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1);
- p->rc = sqlite3_bind_value(pWriter, pIter->nCol+1, pVal);
- }
- if( p->rc==SQLITE_OK ){
- sqlite3_step(pWriter);
- p->rc = resetAndCollectError(pWriter, &p->zErrmsg);
- }
- }else{
- sqlite3_value *pVal;
- sqlite3_stmt *pUpdate = 0;
- assert( eType==RBU_UPDATE );
- rbuGetUpdateStmt(p, pIter, zMask, &pUpdate);
- if( pUpdate ){
- for(i=0; p->rc==SQLITE_OK && i<pIter->nCol; i++){
- char c = zMask[pIter->aiSrcOrder[i]];
- pVal = sqlite3_column_value(pIter->pSelect, i);
- if( pIter->abTblPk[i] || c!='.' ){
- p->rc = sqlite3_bind_value(pUpdate, i+1, pVal);
- }
- }
- if( p->rc==SQLITE_OK
- && (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE)
- ){
- /* Bind the rbu_rowid value to column _rowid_ */
- assertColumnName(pIter->pSelect, pIter->nCol+1, "rbu_rowid");
- pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1);
- p->rc = sqlite3_bind_value(pUpdate, pIter->nCol+1, pVal);
- }
- if( p->rc==SQLITE_OK ){
- sqlite3_step(pUpdate);
- p->rc = resetAndCollectError(pUpdate, &p->zErrmsg);
- }
- }
- }
- }
-
- step_out:
- return p->rc;
-}
-
-/*
-** Increment the schema cookie of the main database opened by p->dbMain.
-*/
-static void rbuIncrSchemaCookie(sqlite3rbu *p){
- if( p->rc==SQLITE_OK ){
- int iCookie = 1000000;
- sqlite3_stmt *pStmt;
-
- p->rc = prepareAndCollectError(p->dbMain, &pStmt, &p->zErrmsg,
- "PRAGMA schema_version"
- );
- if( p->rc==SQLITE_OK ){
- /* Coverage: it may be that this sqlite3_step() cannot fail. There
- ** is already a transaction open, so the prepared statement cannot
- ** throw an SQLITE_SCHEMA exception. The only database page the
- ** statement reads is page 1, which is guaranteed to be in the cache.
- ** And no memory allocations are required. */
- if( SQLITE_ROW==sqlite3_step(pStmt) ){
- iCookie = sqlite3_column_int(pStmt, 0);
- }
- rbuFinalize(p, pStmt);
- }
- if( p->rc==SQLITE_OK ){
- rbuMPrintfExec(p, p->dbMain, "PRAGMA schema_version = %d", iCookie+1);
- }
- }
-}
-
-/*
-** Update the contents of the rbu_state table within the rbu database. The
-** value stored in the RBU_STATE_STAGE column is eStage. All other values
-** are determined by inspecting the rbu handle passed as the first argument.
-*/
-static void rbuSaveState(sqlite3rbu *p, int eStage){
- if( p->rc==SQLITE_OK || p->rc==SQLITE_DONE ){
- sqlite3_stmt *pInsert = 0;
- int rc;
-
- assert( p->zErrmsg==0 );
- rc = prepareFreeAndCollectError(p->dbRbu, &pInsert, &p->zErrmsg,
- sqlite3_mprintf(
- "INSERT OR REPLACE INTO %s.rbu_state(k, v) VALUES "
- "(%d, %d), "
- "(%d, %Q), "
- "(%d, %Q), "
- "(%d, %d), "
- "(%d, %d), "
- "(%d, %lld), "
- "(%d, %lld), "
- "(%d, %lld) ",
- p->zStateDb,
- RBU_STATE_STAGE, eStage,
- RBU_STATE_TBL, p->objiter.zTbl,
- RBU_STATE_IDX, p->objiter.zIdx,
- RBU_STATE_ROW, p->nStep,
- RBU_STATE_PROGRESS, p->nProgress,
- RBU_STATE_CKPT, p->iWalCksum,
- RBU_STATE_COOKIE, (i64)p->pTargetFd->iCookie,
- RBU_STATE_OALSZ, p->iOalSz
- )
- );
- assert( pInsert==0 || rc==SQLITE_OK );
-
- if( rc==SQLITE_OK ){
- sqlite3_step(pInsert);
- rc = sqlite3_finalize(pInsert);
- }
- if( rc!=SQLITE_OK ) p->rc = rc;
- }
-}
-
-
-/*
-** Step the RBU object.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *p){
- if( p ){
- switch( p->eStage ){
- case RBU_STAGE_OAL: {
- RbuObjIter *pIter = &p->objiter;
- while( p->rc==SQLITE_OK && pIter->zTbl ){
-
- if( pIter->bCleanup ){
- /* Clean up the rbu_tmp_xxx table for the previous table. It
- ** cannot be dropped as there are currently active SQL statements.
- ** But the contents can be deleted. */
- if( pIter->abIndexed ){
- rbuMPrintfExec(p, p->dbRbu,
- "DELETE FROM %s.'rbu_tmp_%q'", p->zStateDb, pIter->zDataTbl
- );
- }
- }else{
- rbuObjIterPrepareAll(p, pIter, 0);
-
- /* Advance to the next row to process. */
- if( p->rc==SQLITE_OK ){
- int rc = sqlite3_step(pIter->pSelect);
- if( rc==SQLITE_ROW ){
- p->nProgress++;
- p->nStep++;
- return rbuStep(p);
- }
- p->rc = sqlite3_reset(pIter->pSelect);
- p->nStep = 0;
- }
- }
-
- rbuObjIterNext(p, pIter);
- }
-
- if( p->rc==SQLITE_OK ){
- assert( pIter->zTbl==0 );
- rbuSaveState(p, RBU_STAGE_MOVE);
- rbuIncrSchemaCookie(p);
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_exec(p->dbMain, "COMMIT", 0, 0, &p->zErrmsg);
- }
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, &p->zErrmsg);
- }
- p->eStage = RBU_STAGE_MOVE;
- }
- break;
- }
-
- case RBU_STAGE_MOVE: {
- if( p->rc==SQLITE_OK ){
- rbuMoveOalFile(p);
- p->nProgress++;
- }
- break;
- }
-
- case RBU_STAGE_CKPT: {
- if( p->rc==SQLITE_OK ){
- if( p->nStep>=p->nFrame ){
- sqlite3_file *pDb = p->pTargetFd->pReal;
-
- /* Sync the db file */
- p->rc = pDb->pMethods->xSync(pDb, SQLITE_SYNC_NORMAL);
-
- /* Update nBackfill */
- if( p->rc==SQLITE_OK ){
- void volatile *ptr;
- p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, &ptr);
- if( p->rc==SQLITE_OK ){
- ((u32 volatile*)ptr)[24] = p->iMaxFrame;
- }
- }
-
- if( p->rc==SQLITE_OK ){
- p->eStage = RBU_STAGE_DONE;
- p->rc = SQLITE_DONE;
- }
- }else{
- RbuFrame *pFrame = &p->aFrame[p->nStep];
- rbuCheckpointFrame(p, pFrame);
- p->nStep++;
- }
- p->nProgress++;
- }
- break;
- }
-
- default:
- break;
- }
- return p->rc;
- }else{
- return SQLITE_NOMEM;
- }
-}
-
-/*
-** Free an RbuState object allocated by rbuLoadState().
-*/
-static void rbuFreeState(RbuState *p){
- if( p ){
- sqlite3_free(p->zTbl);
- sqlite3_free(p->zIdx);
- sqlite3_free(p);
- }
-}
-
-/*
-** Allocate an RbuState object and load the contents of the rbu_state
-** table into it. Return a pointer to the new object. It is the
-** responsibility of the caller to eventually free the object using
-** sqlite3_free().
-**
-** If an error occurs, leave an error code and message in the rbu handle
-** and return NULL.
-*/
-static RbuState *rbuLoadState(sqlite3rbu *p){
- RbuState *pRet = 0;
- sqlite3_stmt *pStmt = 0;
- int rc;
- int rc2;
-
- pRet = (RbuState*)rbuMalloc(p, sizeof(RbuState));
- if( pRet==0 ) return 0;
-
- rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg,
- sqlite3_mprintf("SELECT k, v FROM %s.rbu_state", p->zStateDb)
- );
- while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
- switch( sqlite3_column_int(pStmt, 0) ){
- case RBU_STATE_STAGE:
- pRet->eStage = sqlite3_column_int(pStmt, 1);
- if( pRet->eStage!=RBU_STAGE_OAL
- && pRet->eStage!=RBU_STAGE_MOVE
- && pRet->eStage!=RBU_STAGE_CKPT
- ){
- p->rc = SQLITE_CORRUPT;
- }
- break;
-
- case RBU_STATE_TBL:
- pRet->zTbl = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc);
- break;
-
- case RBU_STATE_IDX:
- pRet->zIdx = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc);
- break;
-
- case RBU_STATE_ROW:
- pRet->nRow = sqlite3_column_int(pStmt, 1);
- break;
-
- case RBU_STATE_PROGRESS:
- pRet->nProgress = sqlite3_column_int64(pStmt, 1);
- break;
-
- case RBU_STATE_CKPT:
- pRet->iWalCksum = sqlite3_column_int64(pStmt, 1);
- break;
-
- case RBU_STATE_COOKIE:
- pRet->iCookie = (u32)sqlite3_column_int64(pStmt, 1);
- break;
-
- case RBU_STATE_OALSZ:
- pRet->iOalSz = (u32)sqlite3_column_int64(pStmt, 1);
- break;
-
- default:
- rc = SQLITE_CORRUPT;
- break;
- }
- }
- rc2 = sqlite3_finalize(pStmt);
- if( rc==SQLITE_OK ) rc = rc2;
-
- p->rc = rc;
- return pRet;
-}
-
-/*
-** Compare strings z1 and z2, returning 0 if they are identical, or non-zero
-** otherwise. Either or both argument may be NULL. Two NULL values are
-** considered equal, and NULL is considered distinct from all other values.
-*/
-static int rbuStrCompare(const char *z1, const char *z2){
- if( z1==0 && z2==0 ) return 0;
- if( z1==0 || z2==0 ) return 1;
- return (sqlite3_stricmp(z1, z2)!=0);
-}
-
-/*
-** This function is called as part of sqlite3rbu_open() when initializing
-** an rbu handle in OAL stage. If the rbu update has not started (i.e.
-** the rbu_state table was empty) it is a no-op. Otherwise, it arranges
-** things so that the next call to sqlite3rbu_step() continues on from
-** where the previous rbu handle left off.
-**
-** If an error occurs, an error code and error message are left in the
-** rbu handle passed as the first argument.
-*/
-static void rbuSetupOal(sqlite3rbu *p, RbuState *pState){
- assert( p->rc==SQLITE_OK );
- if( pState->zTbl ){
- RbuObjIter *pIter = &p->objiter;
- int rc = SQLITE_OK;
-
- while( rc==SQLITE_OK && pIter->zTbl && (pIter->bCleanup
- || rbuStrCompare(pIter->zIdx, pState->zIdx)
- || rbuStrCompare(pIter->zTbl, pState->zTbl)
- )){
- rc = rbuObjIterNext(p, pIter);
- }
-
- if( rc==SQLITE_OK && !pIter->zTbl ){
- rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf("rbu_state mismatch error");
- }
-
- if( rc==SQLITE_OK ){
- p->nStep = pState->nRow;
- rc = rbuObjIterPrepareAll(p, &p->objiter, p->nStep);
- }
-
- p->rc = rc;
- }
-}
-
-/*
-** If there is a "*-oal" file in the file-system corresponding to the
-** target database in the file-system, delete it. If an error occurs,
-** leave an error code and error message in the rbu handle.
-*/
-static void rbuDeleteOalFile(sqlite3rbu *p){
- char *zOal = rbuMPrintf(p, "%s-oal", p->zTarget);
- if( zOal ){
- sqlite3_vfs *pVfs = sqlite3_vfs_find(0);
- assert( pVfs && p->rc==SQLITE_OK && p->zErrmsg==0 );
- pVfs->xDelete(pVfs, zOal, 0);
- sqlite3_free(zOal);
- }
-}
-
-/*
-** Allocate a private rbu VFS for the rbu handle passed as the only
-** argument. This VFS will be used unless the call to sqlite3rbu_open()
-** specified a URI with a vfs=? option in place of a target database
-** file name.
-*/
-static void rbuCreateVfs(sqlite3rbu *p){
- int rnd;
- char zRnd[64];
-
- assert( p->rc==SQLITE_OK );
- sqlite3_randomness(sizeof(int), (void*)&rnd);
- sqlite3_snprintf(sizeof(zRnd), zRnd, "rbu_vfs_%d", rnd);
- p->rc = sqlite3rbu_create_vfs(zRnd, 0);
- if( p->rc==SQLITE_OK ){
- sqlite3_vfs *pVfs = sqlite3_vfs_find(zRnd);
- assert( pVfs );
- p->zVfsName = pVfs->zName;
- }
-}
-
-/*
-** Destroy the private VFS created for the rbu handle passed as the only
-** argument by an earlier call to rbuCreateVfs().
-*/
-static void rbuDeleteVfs(sqlite3rbu *p){
- if( p->zVfsName ){
- sqlite3rbu_destroy_vfs(p->zVfsName);
- p->zVfsName = 0;
- }
-}
-
-/*
-** Open and return a new RBU handle.
-*/
-SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
- const char *zTarget,
- const char *zRbu,
- const char *zState
-){
- sqlite3rbu *p;
- int nTarget = strlen(zTarget);
- int nRbu = strlen(zRbu);
- int nState = zState ? strlen(zState) : 0;
-
- p = (sqlite3rbu*)sqlite3_malloc(sizeof(sqlite3rbu)+nTarget+1+nRbu+1+nState+1);
- if( p ){
- RbuState *pState = 0;
-
- /* Create the custom VFS. */
- memset(p, 0, sizeof(sqlite3rbu));
- rbuCreateVfs(p);
-
- /* Open the target database */
- if( p->rc==SQLITE_OK ){
- p->zTarget = (char*)&p[1];
- memcpy(p->zTarget, zTarget, nTarget+1);
- p->zRbu = &p->zTarget[nTarget+1];
- memcpy(p->zRbu, zRbu, nRbu+1);
- if( zState ){
- p->zState = &p->zRbu[nRbu+1];
- memcpy(p->zState, zState, nState+1);
- }
- rbuOpenDatabase(p);
- }
-
- /* If it has not already been created, create the rbu_state table */
- rbuMPrintfExec(p, p->dbRbu, RBU_CREATE_STATE, p->zStateDb);
-
- if( p->rc==SQLITE_OK ){
- pState = rbuLoadState(p);
- assert( pState || p->rc!=SQLITE_OK );
- if( p->rc==SQLITE_OK ){
-
- if( pState->eStage==0 ){
- rbuDeleteOalFile(p);
- p->eStage = RBU_STAGE_OAL;
- }else{
- p->eStage = pState->eStage;
- }
- p->nProgress = pState->nProgress;
- p->iOalSz = pState->iOalSz;
- }
- }
- assert( p->rc!=SQLITE_OK || p->eStage!=0 );
-
- if( p->rc==SQLITE_OK && p->pTargetFd->pWalFd ){
- if( p->eStage==RBU_STAGE_OAL ){
- p->rc = SQLITE_ERROR;
- p->zErrmsg = sqlite3_mprintf("cannot update wal mode database");
- }else if( p->eStage==RBU_STAGE_MOVE ){
- p->eStage = RBU_STAGE_CKPT;
- p->nStep = 0;
- }
- }
-
- if( p->rc==SQLITE_OK
- && (p->eStage==RBU_STAGE_OAL || p->eStage==RBU_STAGE_MOVE)
- && pState->eStage!=0 && p->pTargetFd->iCookie!=pState->iCookie
- ){
- /* At this point (pTargetFd->iCookie) contains the value of the
- ** change-counter cookie (the thing that gets incremented when a
- ** transaction is committed in rollback mode) currently stored on
- ** page 1 of the database file. */
- p->rc = SQLITE_BUSY;
- p->zErrmsg = sqlite3_mprintf("database modified during rbu update");
- }
-
- if( p->rc==SQLITE_OK ){
- if( p->eStage==RBU_STAGE_OAL ){
- sqlite3 *db = p->dbMain;
-
- /* Open transactions both databases. The *-oal file is opened or
- ** created at this point. */
- p->rc = sqlite3_exec(db, "BEGIN IMMEDIATE", 0, 0, &p->zErrmsg);
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3_exec(p->dbRbu, "BEGIN IMMEDIATE", 0, 0, &p->zErrmsg);
- }
-
- /* Check if the main database is a zipvfs db. If it is, set the upper
- ** level pager to use "journal_mode=off". This prevents it from
- ** generating a large journal using a temp file. */
- if( p->rc==SQLITE_OK ){
- int frc = sqlite3_file_control(db, "main", SQLITE_FCNTL_ZIPVFS, 0);
- if( frc==SQLITE_OK ){
- p->rc = sqlite3_exec(db, "PRAGMA journal_mode=off",0,0,&p->zErrmsg);
- }
- }
-
- /* Point the object iterator at the first object */
- if( p->rc==SQLITE_OK ){
- p->rc = rbuObjIterFirst(p, &p->objiter);
- }
-
- /* If the RBU database contains no data_xxx tables, declare the RBU
- ** update finished. */
- if( p->rc==SQLITE_OK && p->objiter.zTbl==0 ){
- p->rc = SQLITE_DONE;
- }
-
- if( p->rc==SQLITE_OK ){
- rbuSetupOal(p, pState);
- }
-
- }else if( p->eStage==RBU_STAGE_MOVE ){
- /* no-op */
- }else if( p->eStage==RBU_STAGE_CKPT ){
- rbuSetupCheckpoint(p, pState);
- }else if( p->eStage==RBU_STAGE_DONE ){
- p->rc = SQLITE_DONE;
- }else{
- p->rc = SQLITE_CORRUPT;
- }
- }
-
- rbuFreeState(pState);
- }
-
- return p;
-}
-
-
-/*
-** Return the database handle used by pRbu.
-*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu *pRbu, int bRbu){
- sqlite3 *db = 0;
- if( pRbu ){
- db = (bRbu ? pRbu->dbRbu : pRbu->dbMain);
- }
- return db;
-}
-
-
-/*
-** If the error code currently stored in the RBU handle is SQLITE_CONSTRAINT,
-** then edit any error message string so as to remove all occurrences of
-** the pattern "rbu_imp_[0-9]*".
-*/
-static void rbuEditErrmsg(sqlite3rbu *p){
- if( p->rc==SQLITE_CONSTRAINT && p->zErrmsg ){
- int i;
- int nErrmsg = strlen(p->zErrmsg);
- for(i=0; i<(nErrmsg-8); i++){
- if( memcmp(&p->zErrmsg[i], "rbu_imp_", 8)==0 ){
- int nDel = 8;
- while( p->zErrmsg[i+nDel]>='0' && p->zErrmsg[i+nDel]<='9' ) nDel++;
- memmove(&p->zErrmsg[i], &p->zErrmsg[i+nDel], nErrmsg + 1 - i - nDel);
- nErrmsg -= nDel;
- }
- }
- }
-}
-
-/*
-** Close the RBU handle.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){
- int rc;
- if( p ){
-
- /* Commit the transaction to the *-oal file. */
- if( p->rc==SQLITE_OK && p->eStage==RBU_STAGE_OAL ){
- p->rc = sqlite3_exec(p->dbMain, "COMMIT", 0, 0, &p->zErrmsg);
- }
-
- rbuSaveState(p, p->eStage);
-
- if( p->rc==SQLITE_OK && p->eStage==RBU_STAGE_OAL ){
- p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, &p->zErrmsg);
- }
-
- /* Close any open statement handles. */
- rbuObjIterFinalize(&p->objiter);
-
- /* Close the open database handle and VFS object. */
- sqlite3_close(p->dbMain);
- sqlite3_close(p->dbRbu);
- rbuDeleteVfs(p);
- sqlite3_free(p->aBuf);
- sqlite3_free(p->aFrame);
-
- rbuEditErrmsg(p);
- rc = p->rc;
- *pzErrmsg = p->zErrmsg;
- sqlite3_free(p);
- }else{
- rc = SQLITE_NOMEM;
- *pzErrmsg = 0;
- }
- return rc;
-}
-
-/*
-** Return the total number of key-value operations (inserts, deletes or
-** updates) that have been performed on the target database since the
-** current RBU update was started.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu){
- return pRbu->nProgress;
-}
-
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_savestate(sqlite3rbu *p){
- int rc = p->rc;
-
- if( rc==SQLITE_DONE ) return SQLITE_OK;
-
- assert( p->eStage>=RBU_STAGE_OAL && p->eStage<=RBU_STAGE_DONE );
- if( p->eStage==RBU_STAGE_OAL ){
- assert( rc!=SQLITE_DONE );
- if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbMain, "COMMIT", 0, 0, 0);
- }
-
- p->rc = rc;
- rbuSaveState(p, p->eStage);
- rc = p->rc;
-
- if( p->eStage==RBU_STAGE_OAL ){
- assert( rc!=SQLITE_DONE );
- if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, 0);
- if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbRbu, "BEGIN IMMEDIATE", 0, 0, 0);
- if( rc==SQLITE_OK ) rc = sqlite3_exec(p->dbMain, "BEGIN IMMEDIATE", 0, 0,0);
- }
-
- p->rc = rc;
- return rc;
-}
-
-/**************************************************************************
-** Beginning of RBU VFS shim methods. The VFS shim modifies the behaviour
-** of a standard VFS in the following ways:
-**
-** 1. Whenever the first page of a main database file is read or
-** written, the value of the change-counter cookie is stored in
-** rbu_file.iCookie. Similarly, the value of the "write-version"
-** database header field is stored in rbu_file.iWriteVer. This ensures
-** that the values are always trustworthy within an open transaction.
-**
-** 2. Whenever an SQLITE_OPEN_WAL file is opened, the (rbu_file.pWalFd)
-** member variable of the associated database file descriptor is set
-** to point to the new file. A mutex protected linked list of all main
-** db fds opened using a particular RBU VFS is maintained at
-** rbu_vfs.pMain to facilitate this.
-**
-** 3. Using a new file-control "SQLITE_FCNTL_RBU", a main db rbu_file
-** object can be marked as the target database of an RBU update. This
-** turns on the following extra special behaviour:
-**
-** 3a. If xAccess() is called to check if there exists a *-wal file
-** associated with an RBU target database currently in RBU_STAGE_OAL
-** stage (preparing the *-oal file), the following special handling
-** applies:
-**
-** * if the *-wal file does exist, return SQLITE_CANTOPEN. An RBU
-** target database may not be in wal mode already.
-**
-** * if the *-wal file does not exist, set the output parameter to
-** non-zero (to tell SQLite that it does exist) anyway.
-**
-** Then, when xOpen() is called to open the *-wal file associated with
-** the RBU target in RBU_STAGE_OAL stage, instead of opening the *-wal
-** file, the rbu vfs opens the corresponding *-oal file instead.
-**
-** 3b. The *-shm pages returned by xShmMap() for a target db file in
-** RBU_STAGE_OAL mode are actually stored in heap memory. This is to
-** avoid creating a *-shm file on disk. Additionally, xShmLock() calls
-** are no-ops on target database files in RBU_STAGE_OAL mode. This is
-** because assert() statements in some VFS implementations fail if
-** xShmLock() is called before xShmMap().
-**
-** 3c. If an EXCLUSIVE lock is attempted on a target database file in any
-** mode except RBU_STAGE_DONE (all work completed and checkpointed), it
-** fails with an SQLITE_BUSY error. This is to stop RBU connections
-** from automatically checkpointing a *-wal (or *-oal) file from within
-** sqlite3_close().
-**
-** 3d. In RBU_STAGE_CAPTURE mode, all xRead() calls on the wal file, and
-** all xWrite() calls on the target database file perform no IO.
-** Instead the frame and page numbers that would be read and written
-** are recorded. Additionally, successful attempts to obtain exclusive
-** xShmLock() WRITER, CHECKPOINTER and READ0 locks on the target
-** database file are recorded. xShmLock() calls to unlock the same
-** locks are no-ops (so that once obtained, these locks are never
-** relinquished). Finally, calls to xSync() on the target database
-** file fail with SQLITE_INTERNAL errors.
-*/
-
-static void rbuUnlockShm(rbu_file *p){
- if( p->pRbu ){
- int (*xShmLock)(sqlite3_file*,int,int,int) = p->pReal->pMethods->xShmLock;
- int i;
- for(i=0; i<SQLITE_SHM_NLOCK;i++){
- if( (1<<i) & p->pRbu->mLock ){
- xShmLock(p->pReal, i, 1, SQLITE_SHM_UNLOCK|SQLITE_SHM_EXCLUSIVE);
- }
- }
- p->pRbu->mLock = 0;
- }
-}
-
-/*
-** Close an rbu file.
-*/
-static int rbuVfsClose(sqlite3_file *pFile){
- rbu_file *p = (rbu_file*)pFile;
- int rc;
- int i;
-
- /* Free the contents of the apShm[] array. And the array itself. */
- for(i=0; i<p->nShm; i++){
- sqlite3_free(p->apShm[i]);
- }
- sqlite3_free(p->apShm);
- p->apShm = 0;
- sqlite3_free(p->zDel);
-
- if( p->openFlags & SQLITE_OPEN_MAIN_DB ){
- rbu_file **pp;
- sqlite3_mutex_enter(p->pRbuVfs->mutex);
- for(pp=&p->pRbuVfs->pMain; *pp!=p; pp=&((*pp)->pMainNext));
- *pp = p->pMainNext;
- sqlite3_mutex_leave(p->pRbuVfs->mutex);
- rbuUnlockShm(p);
- p->pReal->pMethods->xShmUnmap(p->pReal, 0);
- }
-
- /* Close the underlying file handle */
- rc = p->pReal->pMethods->xClose(p->pReal);
- return rc;
-}
-
-
-/*
-** Read and return an unsigned 32-bit big-endian integer from the buffer
-** passed as the only argument.
-*/
-static u32 rbuGetU32(u8 *aBuf){
- return ((u32)aBuf[0] << 24)
- + ((u32)aBuf[1] << 16)
- + ((u32)aBuf[2] << 8)
- + ((u32)aBuf[3]);
-}
-
-/*
-** Read data from an rbuVfs-file.
-*/
-static int rbuVfsRead(
- sqlite3_file *pFile,
- void *zBuf,
- int iAmt,
- sqlite_int64 iOfst
-){
- rbu_file *p = (rbu_file*)pFile;
- sqlite3rbu *pRbu = p->pRbu;
- int rc;
-
- if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){
- assert( p->openFlags & SQLITE_OPEN_WAL );
- rc = rbuCaptureWalRead(p->pRbu, iOfst, iAmt);
- }else{
- if( pRbu && pRbu->eStage==RBU_STAGE_OAL
- && (p->openFlags & SQLITE_OPEN_WAL)
- && iOfst>=pRbu->iOalSz
- ){
- rc = SQLITE_OK;
- memset(zBuf, 0, iAmt);
- }else{
- rc = p->pReal->pMethods->xRead(p->pReal, zBuf, iAmt, iOfst);
- }
- if( rc==SQLITE_OK && iOfst==0 && (p->openFlags & SQLITE_OPEN_MAIN_DB) ){
- /* These look like magic numbers. But they are stable, as they are part
- ** of the definition of the SQLite file format, which may not change. */
- u8 *pBuf = (u8*)zBuf;
- p->iCookie = rbuGetU32(&pBuf[24]);
- p->iWriteVer = pBuf[19];
- }
- }
- return rc;
-}
-
-/*
-** Write data to an rbuVfs-file.
-*/
-static int rbuVfsWrite(
- sqlite3_file *pFile,
- const void *zBuf,
- int iAmt,
- sqlite_int64 iOfst
-){
- rbu_file *p = (rbu_file*)pFile;
- sqlite3rbu *pRbu = p->pRbu;
- int rc;
-
- if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){
- assert( p->openFlags & SQLITE_OPEN_MAIN_DB );
- rc = rbuCaptureDbWrite(p->pRbu, iOfst);
- }else{
- if( pRbu && pRbu->eStage==RBU_STAGE_OAL
- && (p->openFlags & SQLITE_OPEN_WAL)
- && iOfst>=pRbu->iOalSz
- ){
- pRbu->iOalSz = iAmt + iOfst;
- }
- rc = p->pReal->pMethods->xWrite(p->pReal, zBuf, iAmt, iOfst);
- if( rc==SQLITE_OK && iOfst==0 && (p->openFlags & SQLITE_OPEN_MAIN_DB) ){
- /* These look like magic numbers. But they are stable, as they are part
- ** of the definition of the SQLite file format, which may not change. */
- u8 *pBuf = (u8*)zBuf;
- p->iCookie = rbuGetU32(&pBuf[24]);
- p->iWriteVer = pBuf[19];
- }
- }
- return rc;
-}
-
-/*
-** Truncate an rbuVfs-file.
-*/
-static int rbuVfsTruncate(sqlite3_file *pFile, sqlite_int64 size){
- rbu_file *p = (rbu_file*)pFile;
- return p->pReal->pMethods->xTruncate(p->pReal, size);
-}
-
-/*
-** Sync an rbuVfs-file.
-*/
-static int rbuVfsSync(sqlite3_file *pFile, int flags){
- rbu_file *p = (rbu_file *)pFile;
- if( p->pRbu && p->pRbu->eStage==RBU_STAGE_CAPTURE ){
- if( p->openFlags & SQLITE_OPEN_MAIN_DB ){
- return SQLITE_INTERNAL;
- }
- return SQLITE_OK;
- }
- return p->pReal->pMethods->xSync(p->pReal, flags);
-}
-
-/*
-** Return the current file-size of an rbuVfs-file.
-*/
-static int rbuVfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){
- rbu_file *p = (rbu_file *)pFile;
- return p->pReal->pMethods->xFileSize(p->pReal, pSize);
-}
-
-/*
-** Lock an rbuVfs-file.
-*/
-static int rbuVfsLock(sqlite3_file *pFile, int eLock){
- rbu_file *p = (rbu_file*)pFile;
- sqlite3rbu *pRbu = p->pRbu;
- int rc = SQLITE_OK;
-
- assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
- if( pRbu && eLock==SQLITE_LOCK_EXCLUSIVE && pRbu->eStage!=RBU_STAGE_DONE ){
- /* Do not allow EXCLUSIVE locks. Preventing SQLite from taking this
- ** prevents it from checkpointing the database from sqlite3_close(). */
- rc = SQLITE_BUSY;
- }else{
- rc = p->pReal->pMethods->xLock(p->pReal, eLock);
- }
-
- return rc;
-}
-
-/*
-** Unlock an rbuVfs-file.
-*/
-static int rbuVfsUnlock(sqlite3_file *pFile, int eLock){
- rbu_file *p = (rbu_file *)pFile;
- return p->pReal->pMethods->xUnlock(p->pReal, eLock);
-}
-
-/*
-** Check if another file-handle holds a RESERVED lock on an rbuVfs-file.
-*/
-static int rbuVfsCheckReservedLock(sqlite3_file *pFile, int *pResOut){
- rbu_file *p = (rbu_file *)pFile;
- return p->pReal->pMethods->xCheckReservedLock(p->pReal, pResOut);
-}
-
-/*
-** File control method. For custom operations on an rbuVfs-file.
-*/
-static int rbuVfsFileControl(sqlite3_file *pFile, int op, void *pArg){
- rbu_file *p = (rbu_file *)pFile;
- int (*xControl)(sqlite3_file*,int,void*) = p->pReal->pMethods->xFileControl;
- int rc;
-
- assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB)
- || p->openFlags & (SQLITE_OPEN_TRANSIENT_DB|SQLITE_OPEN_TEMP_JOURNAL)
- );
- if( op==SQLITE_FCNTL_RBU ){
- sqlite3rbu *pRbu = (sqlite3rbu*)pArg;
-
- /* First try to find another RBU vfs lower down in the vfs stack. If
- ** one is found, this vfs will operate in pass-through mode. The lower
- ** level vfs will do the special RBU handling. */
- rc = xControl(p->pReal, op, pArg);
-
- if( rc==SQLITE_NOTFOUND ){
- /* Now search for a zipvfs instance lower down in the VFS stack. If
- ** one is found, this is an error. */
- void *dummy = 0;
- rc = xControl(p->pReal, SQLITE_FCNTL_ZIPVFS, &dummy);
- if( rc==SQLITE_OK ){
- rc = SQLITE_ERROR;
- pRbu->zErrmsg = sqlite3_mprintf("rbu/zipvfs setup error");
- }else if( rc==SQLITE_NOTFOUND ){
- pRbu->pTargetFd = p;
- p->pRbu = pRbu;
- if( p->pWalFd ) p->pWalFd->pRbu = pRbu;
- rc = SQLITE_OK;
- }
- }
- return rc;
- }
-
- rc = xControl(p->pReal, op, pArg);
- if( rc==SQLITE_OK && op==SQLITE_FCNTL_VFSNAME ){
- rbu_vfs *pRbuVfs = p->pRbuVfs;
- char *zIn = *(char**)pArg;
- char *zOut = sqlite3_mprintf("rbu(%s)/%z", pRbuVfs->base.zName, zIn);
- *(char**)pArg = zOut;
- if( zOut==0 ) rc = SQLITE_NOMEM;
- }
-
- return rc;
-}
-
-/*
-** Return the sector-size in bytes for an rbuVfs-file.
-*/
-static int rbuVfsSectorSize(sqlite3_file *pFile){
- rbu_file *p = (rbu_file *)pFile;
- return p->pReal->pMethods->xSectorSize(p->pReal);
-}
-
-/*
-** Return the device characteristic flags supported by an rbuVfs-file.
-*/
-static int rbuVfsDeviceCharacteristics(sqlite3_file *pFile){
- rbu_file *p = (rbu_file *)pFile;
- return p->pReal->pMethods->xDeviceCharacteristics(p->pReal);
-}
-
-/*
-** Take or release a shared-memory lock.
-*/
-static int rbuVfsShmLock(sqlite3_file *pFile, int ofst, int n, int flags){
- rbu_file *p = (rbu_file*)pFile;
- sqlite3rbu *pRbu = p->pRbu;
- int rc = SQLITE_OK;
-
-#ifdef SQLITE_AMALGAMATION
- assert( WAL_CKPT_LOCK==1 );
-#endif
-
- assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
- if( pRbu && (pRbu->eStage==RBU_STAGE_OAL || pRbu->eStage==RBU_STAGE_MOVE) ){
- /* Magic number 1 is the WAL_CKPT_LOCK lock. Preventing SQLite from
- ** taking this lock also prevents any checkpoints from occurring.
- ** todo: really, it's not clear why this might occur, as
- ** wal_autocheckpoint ought to be turned off. */
- if( ofst==WAL_LOCK_CKPT && n==1 ) rc = SQLITE_BUSY;
- }else{
- int bCapture = 0;
- if( n==1 && (flags & SQLITE_SHM_EXCLUSIVE)
- && pRbu && pRbu->eStage==RBU_STAGE_CAPTURE
- && (ofst==WAL_LOCK_WRITE || ofst==WAL_LOCK_CKPT || ofst==WAL_LOCK_READ0)
- ){
- bCapture = 1;
- }
-
- if( bCapture==0 || 0==(flags & SQLITE_SHM_UNLOCK) ){
- rc = p->pReal->pMethods->xShmLock(p->pReal, ofst, n, flags);
- if( bCapture && rc==SQLITE_OK ){
- pRbu->mLock |= (1 << ofst);
- }
- }
- }
-
- return rc;
-}
-
-/*
-** Obtain a pointer to a mapping of a single 32KiB page of the *-shm file.
-*/
-static int rbuVfsShmMap(
- sqlite3_file *pFile,
- int iRegion,
- int szRegion,
- int isWrite,
- void volatile **pp
-){
- rbu_file *p = (rbu_file*)pFile;
- int rc = SQLITE_OK;
- int eStage = (p->pRbu ? p->pRbu->eStage : 0);
-
- /* If not in RBU_STAGE_OAL, allow this call to pass through. Or, if this
- ** rbu is in the RBU_STAGE_OAL state, use heap memory for *-shm space
- ** instead of a file on disk. */
- assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
- if( eStage==RBU_STAGE_OAL || eStage==RBU_STAGE_MOVE ){
- if( iRegion<=p->nShm ){
- int nByte = (iRegion+1) * sizeof(char*);
- char **apNew = (char**)sqlite3_realloc(p->apShm, nByte);
- if( apNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- memset(&apNew[p->nShm], 0, sizeof(char*) * (1 + iRegion - p->nShm));
- p->apShm = apNew;
- p->nShm = iRegion+1;
- }
- }
-
- if( rc==SQLITE_OK && p->apShm[iRegion]==0 ){
- char *pNew = (char*)sqlite3_malloc(szRegion);
- if( pNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- memset(pNew, 0, szRegion);
- p->apShm[iRegion] = pNew;
- }
- }
-
- if( rc==SQLITE_OK ){
- *pp = p->apShm[iRegion];
- }else{
- *pp = 0;
- }
- }else{
- assert( p->apShm==0 );
- rc = p->pReal->pMethods->xShmMap(p->pReal, iRegion, szRegion, isWrite, pp);
- }
-
- return rc;
-}
-
-/*
-** Memory barrier.
-*/
-static void rbuVfsShmBarrier(sqlite3_file *pFile){
- rbu_file *p = (rbu_file *)pFile;
- p->pReal->pMethods->xShmBarrier(p->pReal);
-}
-
-/*
-** The xShmUnmap method.
-*/
-static int rbuVfsShmUnmap(sqlite3_file *pFile, int delFlag){
- rbu_file *p = (rbu_file*)pFile;
- int rc = SQLITE_OK;
- int eStage = (p->pRbu ? p->pRbu->eStage : 0);
-
- assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) );
- if( eStage==RBU_STAGE_OAL || eStage==RBU_STAGE_MOVE ){
- /* no-op */
- }else{
- /* Release the checkpointer and writer locks */
- rbuUnlockShm(p);
- rc = p->pReal->pMethods->xShmUnmap(p->pReal, delFlag);
- }
- return rc;
-}
-
-/*
-** Given that zWal points to a buffer containing a wal file name passed to
-** either the xOpen() or xAccess() VFS method, return a pointer to the
-** file-handle opened by the same database connection on the corresponding
-** database file.
-*/
-static rbu_file *rbuFindMaindb(rbu_vfs *pRbuVfs, const char *zWal){
- rbu_file *pDb;
- sqlite3_mutex_enter(pRbuVfs->mutex);
- for(pDb=pRbuVfs->pMain; pDb && pDb->zWal!=zWal; pDb=pDb->pMainNext);
- sqlite3_mutex_leave(pRbuVfs->mutex);
- return pDb;
-}
-
-/*
-** Open an rbu file handle.
-*/
-static int rbuVfsOpen(
- sqlite3_vfs *pVfs,
- const char *zName,
- sqlite3_file *pFile,
- int flags,
- int *pOutFlags
-){
- static sqlite3_io_methods rbuvfs_io_methods = {
- 2, /* iVersion */
- rbuVfsClose, /* xClose */
- rbuVfsRead, /* xRead */
- rbuVfsWrite, /* xWrite */
- rbuVfsTruncate, /* xTruncate */
- rbuVfsSync, /* xSync */
- rbuVfsFileSize, /* xFileSize */
- rbuVfsLock, /* xLock */
- rbuVfsUnlock, /* xUnlock */
- rbuVfsCheckReservedLock, /* xCheckReservedLock */
- rbuVfsFileControl, /* xFileControl */
- rbuVfsSectorSize, /* xSectorSize */
- rbuVfsDeviceCharacteristics, /* xDeviceCharacteristics */
- rbuVfsShmMap, /* xShmMap */
- rbuVfsShmLock, /* xShmLock */
- rbuVfsShmBarrier, /* xShmBarrier */
- rbuVfsShmUnmap, /* xShmUnmap */
- 0, 0 /* xFetch, xUnfetch */
- };
- rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs;
- sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs;
- rbu_file *pFd = (rbu_file *)pFile;
- int rc = SQLITE_OK;
- const char *zOpen = zName;
-
- memset(pFd, 0, sizeof(rbu_file));
- pFd->pReal = (sqlite3_file*)&pFd[1];
- pFd->pRbuVfs = pRbuVfs;
- pFd->openFlags = flags;
- if( zName ){
- if( flags & SQLITE_OPEN_MAIN_DB ){
- /* A main database has just been opened. The following block sets
- ** (pFd->zWal) to point to a buffer owned by SQLite that contains
- ** the name of the *-wal file this db connection will use. SQLite
- ** happens to pass a pointer to this buffer when using xAccess()
- ** or xOpen() to operate on the *-wal file. */
- int n = strlen(zName);
- const char *z = &zName[n];
- if( flags & SQLITE_OPEN_URI ){
- int odd = 0;
- while( 1 ){
- if( z[0]==0 ){
- odd = 1 - odd;
- if( odd && z[1]==0 ) break;
- }
- z++;
- }
- z += 2;
- }else{
- while( *z==0 ) z++;
- }
- z += (n + 8 + 1);
- pFd->zWal = z;
- }
- else if( flags & SQLITE_OPEN_WAL ){
- rbu_file *pDb = rbuFindMaindb(pRbuVfs, zName);
- if( pDb ){
- if( pDb->pRbu && pDb->pRbu->eStage==RBU_STAGE_OAL ){
- /* This call is to open a *-wal file. Intead, open the *-oal. This
- ** code ensures that the string passed to xOpen() is terminated by a
- ** pair of '\0' bytes in case the VFS attempts to extract a URI
- ** parameter from it. */
- int nCopy = strlen(zName);
- char *zCopy = sqlite3_malloc(nCopy+2);
- if( zCopy ){
- memcpy(zCopy, zName, nCopy);
- zCopy[nCopy-3] = 'o';
- zCopy[nCopy] = '\0';
- zCopy[nCopy+1] = '\0';
- zOpen = (const char*)(pFd->zDel = zCopy);
- }else{
- rc = SQLITE_NOMEM;
- }
- pFd->pRbu = pDb->pRbu;
- }
- pDb->pWalFd = pFd;
- }
- }
- }
-
- if( rc==SQLITE_OK ){
- rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, flags, pOutFlags);
- }
- if( pFd->pReal->pMethods ){
- /* The xOpen() operation has succeeded. Set the sqlite3_file.pMethods
- ** pointer and, if the file is a main database file, link it into the
- ** mutex protected linked list of all such files. */
- pFile->pMethods = &rbuvfs_io_methods;
- if( flags & SQLITE_OPEN_MAIN_DB ){
- sqlite3_mutex_enter(pRbuVfs->mutex);
- pFd->pMainNext = pRbuVfs->pMain;
- pRbuVfs->pMain = pFd;
- sqlite3_mutex_leave(pRbuVfs->mutex);
- }
- }else{
- sqlite3_free(pFd->zDel);
- }
-
- return rc;
-}
-
-/*
-** Delete the file located at zPath.
-*/
-static int rbuVfsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xDelete(pRealVfs, zPath, dirSync);
-}
-
-/*
-** Test for access permissions. Return true if the requested permission
-** is available, or false otherwise.
-*/
-static int rbuVfsAccess(
- sqlite3_vfs *pVfs,
- const char *zPath,
- int flags,
- int *pResOut
-){
- rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs;
- sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs;
- int rc;
-
- rc = pRealVfs->xAccess(pRealVfs, zPath, flags, pResOut);
-
- /* If this call is to check if a *-wal file associated with an RBU target
- ** database connection exists, and the RBU update is in RBU_STAGE_OAL,
- ** the following special handling is activated:
- **
- ** a) if the *-wal file does exist, return SQLITE_CANTOPEN. This
- ** ensures that the RBU extension never tries to update a database
- ** in wal mode, even if the first page of the database file has
- ** been damaged.
- **
- ** b) if the *-wal file does not exist, claim that it does anyway,
- ** causing SQLite to call xOpen() to open it. This call will also
- ** be intercepted (see the rbuVfsOpen() function) and the *-oal
- ** file opened instead.
- */
- if( rc==SQLITE_OK && flags==SQLITE_ACCESS_EXISTS ){
- rbu_file *pDb = rbuFindMaindb(pRbuVfs, zPath);
- if( pDb && pDb->pRbu && pDb->pRbu->eStage==RBU_STAGE_OAL ){
- if( *pResOut ){
- rc = SQLITE_CANTOPEN;
- }else{
- *pResOut = 1;
- }
- }
- }
-
- return rc;
-}
-
-/*
-** Populate buffer zOut with the full canonical pathname corresponding
-** to the pathname in zPath. zOut is guaranteed to point to a buffer
-** of at least (DEVSYM_MAX_PATHNAME+1) bytes.
-*/
-static int rbuVfsFullPathname(
- sqlite3_vfs *pVfs,
- const char *zPath,
- int nOut,
- char *zOut
-){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xFullPathname(pRealVfs, zPath, nOut, zOut);
-}
-
-#ifndef SQLITE_OMIT_LOAD_EXTENSION
-/*
-** Open the dynamic library located at zPath and return a handle.
-*/
-static void *rbuVfsDlOpen(sqlite3_vfs *pVfs, const char *zPath){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xDlOpen(pRealVfs, zPath);
-}
-
-/*
-** Populate the buffer zErrMsg (size nByte bytes) with a human readable
-** utf-8 string describing the most recent error encountered associated
-** with dynamic libraries.
-*/
-static void rbuVfsDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- pRealVfs->xDlError(pRealVfs, nByte, zErrMsg);
-}
-
-/*
-** Return a pointer to the symbol zSymbol in the dynamic library pHandle.
-*/
-static void (*rbuVfsDlSym(
- sqlite3_vfs *pVfs,
- void *pArg,
- const char *zSym
-))(void){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xDlSym(pRealVfs, pArg, zSym);
-}
-
-/*
-** Close the dynamic library handle pHandle.
-*/
-static void rbuVfsDlClose(sqlite3_vfs *pVfs, void *pHandle){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- pRealVfs->xDlClose(pRealVfs, pHandle);
-}
-#endif /* SQLITE_OMIT_LOAD_EXTENSION */
-
-/*
-** Populate the buffer pointed to by zBufOut with nByte bytes of
-** random data.
-*/
-static int rbuVfsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xRandomness(pRealVfs, nByte, zBufOut);
-}
-
-/*
-** Sleep for nMicro microseconds. Return the number of microseconds
-** actually slept.
-*/
-static int rbuVfsSleep(sqlite3_vfs *pVfs, int nMicro){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xSleep(pRealVfs, nMicro);
-}
-
-/*
-** Return the current time as a Julian Day number in *pTimeOut.
-*/
-static int rbuVfsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){
- sqlite3_vfs *pRealVfs = ((rbu_vfs*)pVfs)->pRealVfs;
- return pRealVfs->xCurrentTime(pRealVfs, pTimeOut);
-}
-
-/*
-** No-op.
-*/
-static int rbuVfsGetLastError(sqlite3_vfs *pVfs, int a, char *b){
- return 0;
-}
-
-/*
-** Deregister and destroy an RBU vfs created by an earlier call to
-** sqlite3rbu_create_vfs().
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3rbu_destroy_vfs(const char *zName){
- sqlite3_vfs *pVfs = sqlite3_vfs_find(zName);
- if( pVfs && pVfs->xOpen==rbuVfsOpen ){
- sqlite3_mutex_free(((rbu_vfs*)pVfs)->mutex);
- sqlite3_vfs_unregister(pVfs);
- sqlite3_free(pVfs);
- }
-}
-
-/*
-** Create an RBU VFS named zName that accesses the underlying file-system
-** via existing VFS zParent. The new object is registered as a non-default
-** VFS with SQLite before returning.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const char *zParent){
-
- /* Template for VFS */
- static sqlite3_vfs vfs_template = {
- 1, /* iVersion */
- 0, /* szOsFile */
- 0, /* mxPathname */
- 0, /* pNext */
- 0, /* zName */
- 0, /* pAppData */
- rbuVfsOpen, /* xOpen */
- rbuVfsDelete, /* xDelete */
- rbuVfsAccess, /* xAccess */
- rbuVfsFullPathname, /* xFullPathname */
-
-#ifndef SQLITE_OMIT_LOAD_EXTENSION
- rbuVfsDlOpen, /* xDlOpen */
- rbuVfsDlError, /* xDlError */
- rbuVfsDlSym, /* xDlSym */
- rbuVfsDlClose, /* xDlClose */
-#else
- 0, 0, 0, 0,
-#endif
-
- rbuVfsRandomness, /* xRandomness */
- rbuVfsSleep, /* xSleep */
- rbuVfsCurrentTime, /* xCurrentTime */
- rbuVfsGetLastError, /* xGetLastError */
- 0, /* xCurrentTimeInt64 (version 2) */
- 0, 0, 0 /* Unimplemented version 3 methods */
- };
-
- rbu_vfs *pNew = 0; /* Newly allocated VFS */
- int nName;
- int rc = SQLITE_OK;
-
- int nByte;
- nName = strlen(zName);
- nByte = sizeof(rbu_vfs) + nName + 1;
- pNew = (rbu_vfs*)sqlite3_malloc(nByte);
- if( pNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- sqlite3_vfs *pParent; /* Parent VFS */
- memset(pNew, 0, nByte);
- pParent = sqlite3_vfs_find(zParent);
- if( pParent==0 ){
- rc = SQLITE_NOTFOUND;
- }else{
- char *zSpace;
- memcpy(&pNew->base, &vfs_template, sizeof(sqlite3_vfs));
- pNew->base.mxPathname = pParent->mxPathname;
- pNew->base.szOsFile = sizeof(rbu_file) + pParent->szOsFile;
- pNew->pRealVfs = pParent;
- pNew->base.zName = (const char*)(zSpace = (char*)&pNew[1]);
- memcpy(zSpace, zName, nName);
-
- /* Allocate the mutex and register the new VFS (not as the default) */
- pNew->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_RECURSIVE);
- if( pNew->mutex==0 ){
- rc = SQLITE_NOMEM;
- }else{
- rc = sqlite3_vfs_register(&pNew->base, 0);
- }
- }
-
- if( rc!=SQLITE_OK ){
- sqlite3_mutex_free(pNew->mutex);
- sqlite3_free(pNew);
- }
- }
-
- return rc;
-}
-
-
-/**************************************************************************/
-
-#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RBU) */
-
-/************** End of sqlite3rbu.c ******************************************/
-/************** Begin file dbstat.c ******************************************/
-/*
-** 2010 July 12
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains an implementation of the "dbstat" virtual table.
-**
-** The dbstat virtual table is used to extract low-level formatting
-** information from an SQLite database in order to implement the
-** "sqlite3_analyzer" utility. See the ../tool/spaceanal.tcl script
-** for an example implementation.
-**
-** Additional information is available on the "dbstat.html" page of the
-** official SQLite documentation.
-*/
-
-/* #include "sqliteInt.h" ** Requires access to internal data structures ** */
-#if (defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST)) \
- && !defined(SQLITE_OMIT_VIRTUALTABLE)
-
-/*
-** Page paths:
-**
-** The value of the 'path' column describes the path taken from the
-** root-node of the b-tree structure to each page. The value of the
-** root-node path is '/'.
-**
-** The value of the path for the left-most child page of the root of
-** a b-tree is '/000/'. (Btrees store content ordered from left to right
-** so the pages to the left have smaller keys than the pages to the right.)
-** The next to left-most child of the root page is
-** '/001', and so on, each sibling page identified by a 3-digit hex
-** value. The children of the 451st left-most sibling have paths such
-** as '/1c2/000/, '/1c2/001/' etc.
-**
-** Overflow pages are specified by appending a '+' character and a
-** six-digit hexadecimal value to the path to the cell they are linked
-** from. For example, the three overflow pages in a chain linked from
-** the left-most cell of the 450th child of the root page are identified
-** by the paths:
-**
-** '/1c2/000+000000' // First page in overflow chain
-** '/1c2/000+000001' // Second page in overflow chain
-** '/1c2/000+000002' // Third page in overflow chain
-**
-** If the paths are sorted using the BINARY collation sequence, then
-** the overflow pages associated with a cell will appear earlier in the
-** sort-order than its child page:
-**
-** '/1c2/000/' // Left-most child of 451st child of root
-*/
-#define VTAB_SCHEMA \
- "CREATE TABLE xx( " \
- " name STRING, /* Name of table or index */" \
- " path INTEGER, /* Path to page from root */" \
- " pageno INTEGER, /* Page number */" \
- " pagetype STRING, /* 'internal', 'leaf' or 'overflow' */" \
- " ncell INTEGER, /* Cells on page (0 for overflow) */" \
- " payload INTEGER, /* Bytes of payload on this page */" \
- " unused INTEGER, /* Bytes of unused space on this page */" \
- " mx_payload INTEGER, /* Largest payload size of all cells */" \
- " pgoffset INTEGER, /* Offset of page in file */" \
- " pgsize INTEGER, /* Size of the page */" \
- " schema TEXT HIDDEN /* Database schema being analyzed */" \
- ");"
-
-
-typedef struct StatTable StatTable;
-typedef struct StatCursor StatCursor;
-typedef struct StatPage StatPage;
-typedef struct StatCell StatCell;
-
-struct StatCell {
- int nLocal; /* Bytes of local payload */
- u32 iChildPg; /* Child node (or 0 if this is a leaf) */
- int nOvfl; /* Entries in aOvfl[] */
- u32 *aOvfl; /* Array of overflow page numbers */
- int nLastOvfl; /* Bytes of payload on final overflow page */
- int iOvfl; /* Iterates through aOvfl[] */
-};
-
-struct StatPage {
- u32 iPgno;
- DbPage *pPg;
- int iCell;
-
- char *zPath; /* Path to this page */
-
- /* Variables populated by statDecodePage(): */
- u8 flags; /* Copy of flags byte */
- int nCell; /* Number of cells on page */
- int nUnused; /* Number of unused bytes on page */
- StatCell *aCell; /* Array of parsed cells */
- u32 iRightChildPg; /* Right-child page number (or 0) */
- int nMxPayload; /* Largest payload of any cell on this page */
-};
-
-struct StatCursor {
- sqlite3_vtab_cursor base;
- sqlite3_stmt *pStmt; /* Iterates through set of root pages */
- int isEof; /* After pStmt has returned SQLITE_DONE */
- int iDb; /* Schema used for this query */
-
- StatPage aPage[32];
- int iPage; /* Current entry in aPage[] */
-
- /* Values to return. */
- char *zName; /* Value of 'name' column */
- char *zPath; /* Value of 'path' column */
- u32 iPageno; /* Value of 'pageno' column */
- char *zPagetype; /* Value of 'pagetype' column */
- int nCell; /* Value of 'ncell' column */
- int nPayload; /* Value of 'payload' column */
- int nUnused; /* Value of 'unused' column */
- int nMxPayload; /* Value of 'mx_payload' column */
- i64 iOffset; /* Value of 'pgOffset' column */
- int szPage; /* Value of 'pgSize' column */
-};
-
-struct StatTable {
- sqlite3_vtab base;
- sqlite3 *db;
- int iDb; /* Index of database to analyze */
-};
-
-#ifndef get2byte
-# define get2byte(x) ((x)[0]<<8 | (x)[1])
-#endif
-
-/*
-** Connect to or create a statvfs virtual table.
-*/
-static int statConnect(
- sqlite3 *db,
- void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVtab,
- char **pzErr
-){
- StatTable *pTab = 0;
- int rc = SQLITE_OK;
- int iDb;
-
- if( argc>=4 ){
- iDb = sqlite3FindDbName(db, argv[3]);
- if( iDb<0 ){
- *pzErr = sqlite3_mprintf("no such database: %s", argv[3]);
- return SQLITE_ERROR;
- }
- }else{
- iDb = 0;
- }
- rc = sqlite3_declare_vtab(db, VTAB_SCHEMA);
- if( rc==SQLITE_OK ){
- pTab = (StatTable *)sqlite3_malloc64(sizeof(StatTable));
- if( pTab==0 ) rc = SQLITE_NOMEM;
- }
-
- assert( rc==SQLITE_OK || pTab==0 );
- if( rc==SQLITE_OK ){
- memset(pTab, 0, sizeof(StatTable));
- pTab->db = db;
- pTab->iDb = iDb;
- }
-
- *ppVtab = (sqlite3_vtab*)pTab;
- return rc;
-}
-
-/*
-** Disconnect from or destroy a statvfs virtual table.
-*/
-static int statDisconnect(sqlite3_vtab *pVtab){
- sqlite3_free(pVtab);
- return SQLITE_OK;
-}
-
-/*
-** There is no "best-index". This virtual table always does a linear
-** scan. However, a schema=? constraint should cause this table to
-** operate on a different database schema, so check for it.
-**
-** idxNum is normally 0, but will be 1 if a schema=? constraint exists.
-*/
-static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
- int i;
-
- pIdxInfo->estimatedCost = 1.0e6; /* Initial cost estimate */
-
- /* Look for a valid schema=? constraint. If found, change the idxNum to
- ** 1 and request the value of that constraint be sent to xFilter. And
- ** lower the cost estimate to encourage the constrained version to be
- ** used.
- */
- for(i=0; i<pIdxInfo->nConstraint; i++){
- if( pIdxInfo->aConstraint[i].usable==0 ) continue;
- if( pIdxInfo->aConstraint[i].op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
- if( pIdxInfo->aConstraint[i].iColumn!=10 ) continue;
- pIdxInfo->idxNum = 1;
- pIdxInfo->estimatedCost = 1.0;
- pIdxInfo->aConstraintUsage[i].argvIndex = 1;
- pIdxInfo->aConstraintUsage[i].omit = 1;
- break;
- }
-
-
- /* Records are always returned in ascending order of (name, path).
- ** If this will satisfy the client, set the orderByConsumed flag so that
- ** SQLite does not do an external sort.
- */
- if( ( pIdxInfo->nOrderBy==1
- && pIdxInfo->aOrderBy[0].iColumn==0
- && pIdxInfo->aOrderBy[0].desc==0
- ) ||
- ( pIdxInfo->nOrderBy==2
- && pIdxInfo->aOrderBy[0].iColumn==0
- && pIdxInfo->aOrderBy[0].desc==0
- && pIdxInfo->aOrderBy[1].iColumn==1
- && pIdxInfo->aOrderBy[1].desc==0
- )
- ){
- pIdxInfo->orderByConsumed = 1;
- }
-
- return SQLITE_OK;
-}
-
-/*
-** Open a new statvfs cursor.
-*/
-static int statOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
- StatTable *pTab = (StatTable *)pVTab;
- StatCursor *pCsr;
-
- pCsr = (StatCursor *)sqlite3_malloc64(sizeof(StatCursor));
- if( pCsr==0 ){
- return SQLITE_NOMEM;
- }else{
- memset(pCsr, 0, sizeof(StatCursor));
- pCsr->base.pVtab = pVTab;
- pCsr->iDb = pTab->iDb;
- }
-
- *ppCursor = (sqlite3_vtab_cursor *)pCsr;
- return SQLITE_OK;
-}
-
-static void statClearPage(StatPage *p){
- int i;
- if( p->aCell ){
- for(i=0; i<p->nCell; i++){
- sqlite3_free(p->aCell[i].aOvfl);
- }
- sqlite3_free(p->aCell);
- }
- sqlite3PagerUnref(p->pPg);
- sqlite3_free(p->zPath);
- memset(p, 0, sizeof(StatPage));
-}
-
-static void statResetCsr(StatCursor *pCsr){
- int i;
- sqlite3_reset(pCsr->pStmt);
- for(i=0; i<ArraySize(pCsr->aPage); i++){
- statClearPage(&pCsr->aPage[i]);
- }
- pCsr->iPage = 0;
- sqlite3_free(pCsr->zPath);
- pCsr->zPath = 0;
- pCsr->isEof = 0;
-}
-
-/*
-** Close a statvfs cursor.
-*/
-static int statClose(sqlite3_vtab_cursor *pCursor){
- StatCursor *pCsr = (StatCursor *)pCursor;
- statResetCsr(pCsr);
- sqlite3_finalize(pCsr->pStmt);
- sqlite3_free(pCsr);
- return SQLITE_OK;
-}
-
-static void getLocalPayload(
- int nUsable, /* Usable bytes per page */
- u8 flags, /* Page flags */
- int nTotal, /* Total record (payload) size */
- int *pnLocal /* OUT: Bytes stored locally */
-){
- int nLocal;
- int nMinLocal;
- int nMaxLocal;
-
- if( flags==0x0D ){ /* Table leaf node */
- nMinLocal = (nUsable - 12) * 32 / 255 - 23;
- nMaxLocal = nUsable - 35;
- }else{ /* Index interior and leaf nodes */
- nMinLocal = (nUsable - 12) * 32 / 255 - 23;
- nMaxLocal = (nUsable - 12) * 64 / 255 - 23;
- }
-
- nLocal = nMinLocal + (nTotal - nMinLocal) % (nUsable - 4);
- if( nLocal>nMaxLocal ) nLocal = nMinLocal;
- *pnLocal = nLocal;
-}
-
-static int statDecodePage(Btree *pBt, StatPage *p){
- int nUnused;
- int iOff;
- int nHdr;
- int isLeaf;
- int szPage;
-
- u8 *aData = sqlite3PagerGetData(p->pPg);
- u8 *aHdr = &aData[p->iPgno==1 ? 100 : 0];
-
- p->flags = aHdr[0];
- p->nCell = get2byte(&aHdr[3]);
- p->nMxPayload = 0;
-
- isLeaf = (p->flags==0x0A || p->flags==0x0D);
- nHdr = 12 - isLeaf*4 + (p->iPgno==1)*100;
-
- nUnused = get2byte(&aHdr[5]) - nHdr - 2*p->nCell;
- nUnused += (int)aHdr[7];
- iOff = get2byte(&aHdr[1]);
- while( iOff ){
- nUnused += get2byte(&aData[iOff+2]);
- iOff = get2byte(&aData[iOff]);
- }
- p->nUnused = nUnused;
- p->iRightChildPg = isLeaf ? 0 : sqlite3Get4byte(&aHdr[8]);
- szPage = sqlite3BtreeGetPageSize(pBt);
-
- if( p->nCell ){
- int i; /* Used to iterate through cells */
- int nUsable; /* Usable bytes per page */
-
- sqlite3BtreeEnter(pBt);
- nUsable = szPage - sqlite3BtreeGetReserveNoMutex(pBt);
- sqlite3BtreeLeave(pBt);
- p->aCell = sqlite3_malloc64((p->nCell+1) * sizeof(StatCell));
- if( p->aCell==0 ) return SQLITE_NOMEM;
- memset(p->aCell, 0, (p->nCell+1) * sizeof(StatCell));
-
- for(i=0; i<p->nCell; i++){
- StatCell *pCell = &p->aCell[i];
-
- iOff = get2byte(&aData[nHdr+i*2]);
- if( !isLeaf ){
- pCell->iChildPg = sqlite3Get4byte(&aData[iOff]);
- iOff += 4;
- }
- if( p->flags==0x05 ){
- /* A table interior node. nPayload==0. */
- }else{
- u32 nPayload; /* Bytes of payload total (local+overflow) */
- int nLocal; /* Bytes of payload stored locally */
- iOff += getVarint32(&aData[iOff], nPayload);
- if( p->flags==0x0D ){
- u64 dummy;
- iOff += sqlite3GetVarint(&aData[iOff], &dummy);
- }
- if( nPayload>(u32)p->nMxPayload ) p->nMxPayload = nPayload;
- getLocalPayload(nUsable, p->flags, nPayload, &nLocal);
- pCell->nLocal = nLocal;
- assert( nLocal>=0 );
- assert( nPayload>=(u32)nLocal );
- assert( nLocal<=(nUsable-35) );
- if( nPayload>(u32)nLocal ){
- int j;
- int nOvfl = ((nPayload - nLocal) + nUsable-4 - 1) / (nUsable - 4);
- pCell->nLastOvfl = (nPayload-nLocal) - (nOvfl-1) * (nUsable-4);
- pCell->nOvfl = nOvfl;
- pCell->aOvfl = sqlite3_malloc64(sizeof(u32)*nOvfl);
- if( pCell->aOvfl==0 ) return SQLITE_NOMEM;
- pCell->aOvfl[0] = sqlite3Get4byte(&aData[iOff+nLocal]);
- for(j=1; j<nOvfl; j++){
- int rc;
- u32 iPrev = pCell->aOvfl[j-1];
- DbPage *pPg = 0;
- rc = sqlite3PagerGet(sqlite3BtreePager(pBt), iPrev, &pPg);
- if( rc!=SQLITE_OK ){
- assert( pPg==0 );
- return rc;
- }
- pCell->aOvfl[j] = sqlite3Get4byte(sqlite3PagerGetData(pPg));
- sqlite3PagerUnref(pPg);
- }
- }
- }
- }
- }
-
- return SQLITE_OK;
-}
-
-/*
-** Populate the pCsr->iOffset and pCsr->szPage member variables. Based on
-** the current value of pCsr->iPageno.
-*/
-static void statSizeAndOffset(StatCursor *pCsr){
- StatTable *pTab = (StatTable *)((sqlite3_vtab_cursor *)pCsr)->pVtab;
- Btree *pBt = pTab->db->aDb[pTab->iDb].pBt;
- Pager *pPager = sqlite3BtreePager(pBt);
- sqlite3_file *fd;
- sqlite3_int64 x[2];
-
- /* The default page size and offset */
- pCsr->szPage = sqlite3BtreeGetPageSize(pBt);
- pCsr->iOffset = (i64)pCsr->szPage * (pCsr->iPageno - 1);
-
- /* If connected to a ZIPVFS backend, override the page size and
- ** offset with actual values obtained from ZIPVFS.
- */
- fd = sqlite3PagerFile(pPager);
- x[0] = pCsr->iPageno;
- if( fd->pMethods!=0 && sqlite3OsFileControl(fd, 230440, &x)==SQLITE_OK ){
- pCsr->iOffset = x[0];
- pCsr->szPage = (int)x[1];
- }
-}
-
-/*
-** Move a statvfs cursor to the next entry in the file.
-*/
-static int statNext(sqlite3_vtab_cursor *pCursor){
- int rc;
- int nPayload;
- char *z;
- StatCursor *pCsr = (StatCursor *)pCursor;
- StatTable *pTab = (StatTable *)pCursor->pVtab;
- Btree *pBt = pTab->db->aDb[pCsr->iDb].pBt;
- Pager *pPager = sqlite3BtreePager(pBt);
-
- sqlite3_free(pCsr->zPath);
- pCsr->zPath = 0;
-
-statNextRestart:
- if( pCsr->aPage[0].pPg==0 ){
- rc = sqlite3_step(pCsr->pStmt);
- if( rc==SQLITE_ROW ){
- int nPage;
- u32 iRoot = (u32)sqlite3_column_int64(pCsr->pStmt, 1);
- sqlite3PagerPagecount(pPager, &nPage);
- if( nPage==0 ){
- pCsr->isEof = 1;
- return sqlite3_reset(pCsr->pStmt);
- }
- rc = sqlite3PagerGet(pPager, iRoot, &pCsr->aPage[0].pPg);
- pCsr->aPage[0].iPgno = iRoot;
- pCsr->aPage[0].iCell = 0;
- pCsr->aPage[0].zPath = z = sqlite3_mprintf("/");
- pCsr->iPage = 0;
- if( z==0 ) rc = SQLITE_NOMEM;
- }else{
- pCsr->isEof = 1;
- return sqlite3_reset(pCsr->pStmt);
- }
- }else{
-
- /* Page p itself has already been visited. */
- StatPage *p = &pCsr->aPage[pCsr->iPage];
-
- while( p->iCell<p->nCell ){
- StatCell *pCell = &p->aCell[p->iCell];
- if( pCell->iOvfl<pCell->nOvfl ){
- int nUsable;
- sqlite3BtreeEnter(pBt);
- nUsable = sqlite3BtreeGetPageSize(pBt) -
- sqlite3BtreeGetReserveNoMutex(pBt);
- sqlite3BtreeLeave(pBt);
- pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0);
- pCsr->iPageno = pCell->aOvfl[pCell->iOvfl];
- pCsr->zPagetype = "overflow";
- pCsr->nCell = 0;
- pCsr->nMxPayload = 0;
- pCsr->zPath = z = sqlite3_mprintf(
- "%s%.3x+%.6x", p->zPath, p->iCell, pCell->iOvfl
- );
- if( pCell->iOvfl<pCell->nOvfl-1 ){
- pCsr->nUnused = 0;
- pCsr->nPayload = nUsable - 4;
- }else{
- pCsr->nPayload = pCell->nLastOvfl;
- pCsr->nUnused = nUsable - 4 - pCsr->nPayload;
- }
- pCell->iOvfl++;
- statSizeAndOffset(pCsr);
- return z==0 ? SQLITE_NOMEM : SQLITE_OK;
- }
- if( p->iRightChildPg ) break;
- p->iCell++;
- }
-
- if( !p->iRightChildPg || p->iCell>p->nCell ){
- statClearPage(p);
- if( pCsr->iPage==0 ) return statNext(pCursor);
- pCsr->iPage--;
- goto statNextRestart; /* Tail recursion */
- }
- pCsr->iPage++;
- assert( p==&pCsr->aPage[pCsr->iPage-1] );
-
- if( p->iCell==p->nCell ){
- p[1].iPgno = p->iRightChildPg;
- }else{
- p[1].iPgno = p->aCell[p->iCell].iChildPg;
- }
- rc = sqlite3PagerGet(pPager, p[1].iPgno, &p[1].pPg);
- p[1].iCell = 0;
- p[1].zPath = z = sqlite3_mprintf("%s%.3x/", p->zPath, p->iCell);
- p->iCell++;
- if( z==0 ) rc = SQLITE_NOMEM;
- }
-
-
- /* Populate the StatCursor fields with the values to be returned
- ** by the xColumn() and xRowid() methods.
- */
- if( rc==SQLITE_OK ){
- int i;
- StatPage *p = &pCsr->aPage[pCsr->iPage];
- pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0);
- pCsr->iPageno = p->iPgno;
-
- rc = statDecodePage(pBt, p);
- if( rc==SQLITE_OK ){
- statSizeAndOffset(pCsr);
-
- switch( p->flags ){
- case 0x05: /* table internal */
- case 0x02: /* index internal */
- pCsr->zPagetype = "internal";
- break;
- case 0x0D: /* table leaf */
- case 0x0A: /* index leaf */
- pCsr->zPagetype = "leaf";
- break;
- default:
- pCsr->zPagetype = "corrupted";
- break;
- }
- pCsr->nCell = p->nCell;
- pCsr->nUnused = p->nUnused;
- pCsr->nMxPayload = p->nMxPayload;
- pCsr->zPath = z = sqlite3_mprintf("%s", p->zPath);
- if( z==0 ) rc = SQLITE_NOMEM;
- nPayload = 0;
- for(i=0; i<p->nCell; i++){
- nPayload += p->aCell[i].nLocal;
- }
- pCsr->nPayload = nPayload;
- }
- }
-
- return rc;
-}
-
-static int statEof(sqlite3_vtab_cursor *pCursor){
- StatCursor *pCsr = (StatCursor *)pCursor;
- return pCsr->isEof;
-}
-
-static int statFilter(
- sqlite3_vtab_cursor *pCursor,
- int idxNum, const char *idxStr,
- int argc, sqlite3_value **argv
-){
- StatCursor *pCsr = (StatCursor *)pCursor;
- StatTable *pTab = (StatTable*)(pCursor->pVtab);
- char *zSql;
- int rc = SQLITE_OK;
- char *zMaster;
-
- if( idxNum==1 ){
- const char *zDbase = (const char*)sqlite3_value_text(argv[0]);
- pCsr->iDb = sqlite3FindDbName(pTab->db, zDbase);
- if( pCsr->iDb<0 ){
- sqlite3_free(pCursor->pVtab->zErrMsg);
- pCursor->pVtab->zErrMsg = sqlite3_mprintf("no such schema: %s", zDbase);
- return pCursor->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
- }
- }else{
- pCsr->iDb = pTab->iDb;
- }
- statResetCsr(pCsr);
- sqlite3_finalize(pCsr->pStmt);
- pCsr->pStmt = 0;
- zMaster = pCsr->iDb==1 ? "sqlite_temp_master" : "sqlite_master";
- zSql = sqlite3_mprintf(
- "SELECT 'sqlite_master' AS name, 1 AS rootpage, 'table' AS type"
- " UNION ALL "
- "SELECT name, rootpage, type"
- " FROM \"%w\".%s WHERE rootpage!=0"
- " ORDER BY name", pTab->db->aDb[pCsr->iDb].zName, zMaster);
- if( zSql==0 ){
- return SQLITE_NOMEM;
- }else{
- rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pStmt, 0);
- sqlite3_free(zSql);
- }
-
- if( rc==SQLITE_OK ){
- rc = statNext(pCursor);
- }
- return rc;
-}
-
-static int statColumn(
- sqlite3_vtab_cursor *pCursor,
- sqlite3_context *ctx,
- int i
-){
- StatCursor *pCsr = (StatCursor *)pCursor;
- switch( i ){
- case 0: /* name */
- sqlite3_result_text(ctx, pCsr->zName, -1, SQLITE_TRANSIENT);
- break;
- case 1: /* path */
- sqlite3_result_text(ctx, pCsr->zPath, -1, SQLITE_TRANSIENT);
- break;
- case 2: /* pageno */
- sqlite3_result_int64(ctx, pCsr->iPageno);
- break;
- case 3: /* pagetype */
- sqlite3_result_text(ctx, pCsr->zPagetype, -1, SQLITE_STATIC);
- break;
- case 4: /* ncell */
- sqlite3_result_int(ctx, pCsr->nCell);
- break;
- case 5: /* payload */
- sqlite3_result_int(ctx, pCsr->nPayload);
- break;
- case 6: /* unused */
- sqlite3_result_int(ctx, pCsr->nUnused);
- break;
- case 7: /* mx_payload */
- sqlite3_result_int(ctx, pCsr->nMxPayload);
- break;
- case 8: /* pgoffset */
- sqlite3_result_int64(ctx, pCsr->iOffset);
- break;
- case 9: /* pgsize */
- sqlite3_result_int(ctx, pCsr->szPage);
- break;
- default: { /* schema */
- sqlite3 *db = sqlite3_context_db_handle(ctx);
- int iDb = pCsr->iDb;
- sqlite3_result_text(ctx, db->aDb[iDb].zName, -1, SQLITE_STATIC);
- break;
- }
- }
- return SQLITE_OK;
-}
-
-static int statRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
- StatCursor *pCsr = (StatCursor *)pCursor;
- *pRowid = pCsr->iPageno;
- return SQLITE_OK;
-}
-
-/*
-** Invoke this routine to register the "dbstat" virtual table module
-*/
-SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){
- static sqlite3_module dbstat_module = {
- 0, /* iVersion */
- statConnect, /* xCreate */
- statConnect, /* xConnect */
- statBestIndex, /* xBestIndex */
- statDisconnect, /* xDisconnect */
- statDisconnect, /* xDestroy */
- statOpen, /* xOpen - open a cursor */
- statClose, /* xClose - close a cursor */
- statFilter, /* xFilter - configure scan constraints */
- statNext, /* xNext - advance a cursor */
- statEof, /* xEof - check for end of scan */
- statColumn, /* xColumn - read data */
- statRowid, /* xRowid - read data */
- 0, /* xUpdate */
- 0, /* xBegin */
- 0, /* xSync */
- 0, /* xCommit */
- 0, /* xRollback */
- 0, /* xFindMethod */
- 0, /* xRename */
- };
- return sqlite3_create_module(db, "dbstat", &dbstat_module, 0);
-}
-#elif defined(SQLITE_ENABLE_DBSTAT_VTAB)
-SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ return SQLITE_OK; }
-#endif /* SQLITE_ENABLE_DBSTAT_VTAB */
-
-/************** End of dbstat.c **********************************************/
-/************** Begin file json1.c *******************************************/
-/*
-** 2015-08-12
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This SQLite extension implements JSON functions. The interface is
-** modeled after MySQL JSON functions:
-**
-** https://dev.mysql.com/doc/refman/5.7/en/json.html
-**
-** For the time being, all JSON is stored as pure text. (We might add
-** a JSONB type in the future which stores a binary encoding of JSON in
-** a BLOB, but there is no support for JSONB in the current implementation.
-** This implementation parses JSON text at 250 MB/s, so it is hard to see
-** how JSONB might improve on that.)
-*/
-#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1)
-#if !defined(_SQLITEINT_H_)
-/* #include "sqlite3ext.h" */
-#endif
-SQLITE_EXTENSION_INIT1
-/* #include <assert.h> */
-/* #include <string.h> */
-/* #include <stdlib.h> */
-/* #include <stdarg.h> */
-
-#define UNUSED_PARAM(X) (void)(X)
-
-#ifndef LARGEST_INT64
-# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32))
-# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64)
-#endif
-
-/*
-** Versions of isspace(), isalnum() and isdigit() to which it is safe
-** to pass signed char values.
-*/
-#ifdef sqlite3Isdigit
- /* Use the SQLite core versions if this routine is part of the
- ** SQLite amalgamation */
-# define safe_isdigit(x) sqlite3Isdigit(x)
-# define safe_isalnum(x) sqlite3Isalnum(x)
-#else
- /* Use the standard library for separate compilation */
-#include <ctype.h> /* amalgamator: keep */
-# define safe_isdigit(x) isdigit((unsigned char)(x))
-# define safe_isalnum(x) isalnum((unsigned char)(x))
-#endif
-
-/*
-** Growing our own isspace() routine this way is twice as fast as
-** the library isspace() function, resulting in a 7% overall performance
-** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
-*/
-static const char jsonIsSpace[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-#define safe_isspace(x) (jsonIsSpace[(unsigned char)x])
-
-#ifndef SQLITE_AMALGAMATION
- /* Unsigned integer types. These are already defined in the sqliteInt.h,
- ** but the definitions need to be repeated for separate compilation. */
- typedef sqlite3_uint64 u64;
- typedef unsigned int u32;
- typedef unsigned char u8;
-#endif
-
-/* Objects */
-typedef struct JsonString JsonString;
-typedef struct JsonNode JsonNode;
-typedef struct JsonParse JsonParse;
-
-/* An instance of this object represents a JSON string
-** under construction. Really, this is a generic string accumulator
-** that can be and is used to create strings other than JSON.
-*/
-struct JsonString {
- sqlite3_context *pCtx; /* Function context - put error messages here */
- char *zBuf; /* Append JSON content here */
- u64 nAlloc; /* Bytes of storage available in zBuf[] */
- u64 nUsed; /* Bytes of zBuf[] currently used */
- u8 bStatic; /* True if zBuf is static space */
- u8 bErr; /* True if an error has been encountered */
- char zSpace[100]; /* Initial static space */
-};
-
-/* JSON type values
-*/
-#define JSON_NULL 0
-#define JSON_TRUE 1
-#define JSON_FALSE 2
-#define JSON_INT 3
-#define JSON_REAL 4
-#define JSON_STRING 5
-#define JSON_ARRAY 6
-#define JSON_OBJECT 7
-
-/* The "subtype" set for JSON values */
-#define JSON_SUBTYPE 74 /* Ascii for "J" */
-
-/*
-** Names of the various JSON types:
-*/
-static const char * const jsonType[] = {
- "null", "true", "false", "integer", "real", "text", "array", "object"
-};
-
-/* Bit values for the JsonNode.jnFlag field
-*/
-#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
-#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
-#define JNODE_REMOVE 0x04 /* Do not output */
-#define JNODE_REPLACE 0x08 /* Replace with JsonNode.iVal */
-#define JNODE_APPEND 0x10 /* More ARRAY/OBJECT entries at u.iAppend */
-#define JNODE_LABEL 0x20 /* Is a label of an object */
-
-
-/* A single node of parsed JSON
-*/
-struct JsonNode {
- u8 eType; /* One of the JSON_ type values */
- u8 jnFlags; /* JNODE flags */
- u8 iVal; /* Replacement value when JNODE_REPLACE */
- u32 n; /* Bytes of content, or number of sub-nodes */
- union {
- const char *zJContent; /* Content for INT, REAL, and STRING */
- u32 iAppend; /* More terms for ARRAY and OBJECT */
- u32 iKey; /* Key for ARRAY objects in json_tree() */
- } u;
-};
-
-/* A completely parsed JSON string
-*/
-struct JsonParse {
- u32 nNode; /* Number of slots of aNode[] used */
- u32 nAlloc; /* Number of slots of aNode[] allocated */
- JsonNode *aNode; /* Array of nodes containing the parse */
- const char *zJson; /* Original JSON string */
- u32 *aUp; /* Index of parent of each node */
- u8 oom; /* Set to true if out of memory */
- u8 nErr; /* Number of errors seen */
-};
-
-/**************************************************************************
-** Utility routines for dealing with JsonString objects
-**************************************************************************/
-
-/* Set the JsonString object to an empty string
-*/
-static void jsonZero(JsonString *p){
- p->zBuf = p->zSpace;
- p->nAlloc = sizeof(p->zSpace);
- p->nUsed = 0;
- p->bStatic = 1;
-}
-
-/* Initialize the JsonString object
-*/
-static void jsonInit(JsonString *p, sqlite3_context *pCtx){
- p->pCtx = pCtx;
- p->bErr = 0;
- jsonZero(p);
-}
-
-
-/* Free all allocated memory and reset the JsonString object back to its
-** initial state.
-*/
-static void jsonReset(JsonString *p){
- if( !p->bStatic ) sqlite3_free(p->zBuf);
- jsonZero(p);
-}
-
-
-/* Report an out-of-memory (OOM) condition
-*/
-static void jsonOom(JsonString *p){
- p->bErr = 1;
- sqlite3_result_error_nomem(p->pCtx);
- jsonReset(p);
-}
-
-/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
-** Return zero on success. Return non-zero on an OOM error
-*/
-static int jsonGrow(JsonString *p, u32 N){
- u64 nTotal = N<p->nAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
- char *zNew;
- if( p->bStatic ){
- if( p->bErr ) return 1;
- zNew = sqlite3_malloc64(nTotal);
- if( zNew==0 ){
- jsonOom(p);
- return SQLITE_NOMEM;
- }
- memcpy(zNew, p->zBuf, (size_t)p->nUsed);
- p->zBuf = zNew;
- p->bStatic = 0;
- }else{
- zNew = sqlite3_realloc64(p->zBuf, nTotal);
- if( zNew==0 ){
- jsonOom(p);
- return SQLITE_NOMEM;
- }
- p->zBuf = zNew;
- }
- p->nAlloc = nTotal;
- return SQLITE_OK;
-}
-
-/* Append N bytes from zIn onto the end of the JsonString string.
-*/
-static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
- if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return;
- memcpy(p->zBuf+p->nUsed, zIn, N);
- p->nUsed += N;
-}
-
-/* Append formatted text (not to exceed N bytes) to the JsonString.
-*/
-static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
- va_list ap;
- if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
- va_start(ap, zFormat);
- sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
- va_end(ap);
- p->nUsed += (int)strlen(p->zBuf+p->nUsed);
-}
-
-/* Append a single character
-*/
-static void jsonAppendChar(JsonString *p, char c){
- if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return;
- p->zBuf[p->nUsed++] = c;
-}
-
-/* Append a comma separator to the output buffer, if the previous
-** character is not '[' or '{'.
-*/
-static void jsonAppendSeparator(JsonString *p){
- char c;
- if( p->nUsed==0 ) return;
- c = p->zBuf[p->nUsed-1];
- if( c!='[' && c!='{' ) jsonAppendChar(p, ',');
-}
-
-/* Append the N-byte string in zIn to the end of the JsonString string
-** under construction. Enclose the string in "..." and escape
-** any double-quotes or backslash characters contained within the
-** string.
-*/
-static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( (N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0 ) return;
- p->zBuf[p->nUsed++] = '"';
- for(i=0; i<N; i++){
- char c = zIn[i];
- if( c=='"' || c=='\\' ){
- if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
- p->zBuf[p->nUsed++] = '\\';
- }
- p->zBuf[p->nUsed++] = c;
- }
- p->zBuf[p->nUsed++] = '"';
- assert( p->nUsed<p->nAlloc );
-}
-
-/*
-** Append a function parameter value to the JSON string under
-** construction.
-*/
-static void jsonAppendValue(
- JsonString *p, /* Append to this JSON string */
- sqlite3_value *pValue /* Value to append */
-){
- switch( sqlite3_value_type(pValue) ){
- case SQLITE_NULL: {
- jsonAppendRaw(p, "null", 4);
- break;
- }
- case SQLITE_INTEGER:
- case SQLITE_FLOAT: {
- const char *z = (const char*)sqlite3_value_text(pValue);
- u32 n = (u32)sqlite3_value_bytes(pValue);
- jsonAppendRaw(p, z, n);
- break;
- }
- case SQLITE_TEXT: {
- const char *z = (const char*)sqlite3_value_text(pValue);
- u32 n = (u32)sqlite3_value_bytes(pValue);
- if( sqlite3_value_subtype(pValue)==JSON_SUBTYPE ){
- jsonAppendRaw(p, z, n);
- }else{
- jsonAppendString(p, z, n);
- }
- break;
- }
- default: {
- if( p->bErr==0 ){
- sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
- p->bErr = 1;
- jsonReset(p);
- }
- break;
- }
- }
-}
-
-
-/* Make the JSON in p the result of the SQL function.
-*/
-static void jsonResult(JsonString *p){
- if( p->bErr==0 ){
- sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
- p->bStatic ? SQLITE_TRANSIENT : sqlite3_free,
- SQLITE_UTF8);
- jsonZero(p);
- }
- assert( p->bStatic );
-}
-
-/**************************************************************************
-** Utility routines for dealing with JsonNode and JsonParse objects
-**************************************************************************/
-
-/*
-** Return the number of consecutive JsonNode slots need to represent
-** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
-** OBJECT types, the number might be larger.
-**
-** Appended elements are not counted. The value returned is the number
-** by which the JsonNode counter should increment in order to go to the
-** next peer value.
-*/
-static u32 jsonNodeSize(JsonNode *pNode){
- return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
-}
-
-/*
-** Reclaim all memory allocated by a JsonParse object. But do not
-** delete the JsonParse object itself.
-*/
-static void jsonParseReset(JsonParse *pParse){
- sqlite3_free(pParse->aNode);
- pParse->aNode = 0;
- pParse->nNode = 0;
- pParse->nAlloc = 0;
- sqlite3_free(pParse->aUp);
- pParse->aUp = 0;
-}
-
-/*
-** Convert the JsonNode pNode into a pure JSON string and
-** append to pOut. Subsubstructure is also included. Return
-** the number of JsonNode objects that are encoded.
-*/
-static void jsonRenderNode(
- JsonNode *pNode, /* The node to render */
- JsonString *pOut, /* Write JSON here */
- sqlite3_value **aReplace /* Replacement values */
-){
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- jsonAppendRaw(pOut, "null", 4);
- break;
- }
- case JSON_TRUE: {
- jsonAppendRaw(pOut, "true", 4);
- break;
- }
- case JSON_FALSE: {
- jsonAppendRaw(pOut, "false", 5);
- break;
- }
- case JSON_STRING: {
- if( pNode->jnFlags & JNODE_RAW ){
- jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
- break;
- }
- /* Fall through into the next case */
- }
- case JSON_REAL:
- case JSON_INT: {
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- break;
- }
- case JSON_ARRAY: {
- u32 j = 1;
- jsonAppendChar(pOut, '[');
- for(;;){
- while( j<=pNode->n ){
- if( pNode[j].jnFlags & (JNODE_REMOVE|JNODE_REPLACE) ){
- if( pNode[j].jnFlags & JNODE_REPLACE ){
- jsonAppendSeparator(pOut);
- jsonAppendValue(pOut, aReplace[pNode[j].iVal]);
- }
- }else{
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- }
- j += jsonNodeSize(&pNode[j]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, ']');
- break;
- }
- case JSON_OBJECT: {
- u32 j = 1;
- jsonAppendChar(pOut, '{');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- jsonAppendChar(pOut, ':');
- if( pNode[j+1].jnFlags & JNODE_REPLACE ){
- jsonAppendValue(pOut, aReplace[pNode[j+1].iVal]);
- }else{
- jsonRenderNode(&pNode[j+1], pOut, aReplace);
- }
- }
- j += 1 + jsonNodeSize(&pNode[j+1]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, '}');
- break;
- }
- }
-}
-
-/*
-** Return a JsonNode and all its descendents as a JSON string.
-*/
-static void jsonReturnJson(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- JsonString s;
- jsonInit(&s, pCtx);
- jsonRenderNode(pNode, &s, aReplace);
- jsonResult(&s);
- sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
-}
-
-/*
-** Make the JsonNode the return value of the function.
-*/
-static void jsonReturn(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- sqlite3_result_null(pCtx);
- break;
- }
- case JSON_TRUE: {
- sqlite3_result_int(pCtx, 1);
- break;
- }
- case JSON_FALSE: {
- sqlite3_result_int(pCtx, 0);
- break;
- }
- case JSON_INT: {
- sqlite3_int64 i = 0;
- const char *z = pNode->u.zJContent;
- if( z[0]=='-' ){ z++; }
- while( z[0]>='0' && z[0]<='9' ){
- unsigned v = *(z++) - '0';
- if( i>=LARGEST_INT64/10 ){
- if( i>LARGEST_INT64/10 ) goto int_as_real;
- if( z[0]>='0' && z[0]<='9' ) goto int_as_real;
- if( v==9 ) goto int_as_real;
- if( v==8 ){
- if( pNode->u.zJContent[0]=='-' ){
- sqlite3_result_int64(pCtx, SMALLEST_INT64);
- goto int_done;
- }else{
- goto int_as_real;
- }
- }
- }
- i = i*10 + v;
- }
- if( pNode->u.zJContent[0]=='-' ){ i = -i; }
- sqlite3_result_int64(pCtx, i);
- int_done:
- break;
- int_as_real: /* fall through to real */;
- }
- case JSON_REAL: {
- double r;
-#ifdef SQLITE_AMALGAMATION
- const char *z = pNode->u.zJContent;
- sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
-#else
- r = strtod(pNode->u.zJContent, 0);
-#endif
- sqlite3_result_double(pCtx, r);
- break;
- }
- case JSON_STRING: {
-#if 0 /* Never happens because JNODE_RAW is only set by json_set(),
- ** json_insert() and json_replace() and those routines do not
- ** call jsonReturn() */
- if( pNode->jnFlags & JNODE_RAW ){
- sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
- SQLITE_TRANSIENT);
- }else
-#endif
- assert( (pNode->jnFlags & JNODE_RAW)==0 );
- if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
- /* JSON formatted without any backslash-escapes */
- sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
- SQLITE_TRANSIENT);
- }else{
- /* Translate JSON formatted string into raw text */
- u32 i;
- u32 n = pNode->n;
- const char *z = pNode->u.zJContent;
- char *zOut;
- u32 j;
- zOut = sqlite3_malloc( n+1 );
- if( zOut==0 ){
- sqlite3_result_error_nomem(pCtx);
- break;
- }
- for(i=1, j=0; i<n-1; i++){
- char c = z[i];
- if( c!='\\' ){
- zOut[j++] = c;
- }else{
- c = z[++i];
- if( c=='u' ){
- u32 v = 0, k;
- for(k=0; k<4 && i<n-2; i++, k++){
- c = z[i+1];
- if( c>='0' && c<='9' ) v = v*16 + c - '0';
- else if( c>='A' && c<='F' ) v = v*16 + c - 'A' + 10;
- else if( c>='a' && c<='f' ) v = v*16 + c - 'a' + 10;
- else break;
- }
- if( v==0 ) break;
- if( v<=0x7f ){
- zOut[j++] = (char)v;
- }else if( v<=0x7ff ){
- zOut[j++] = (char)(0xc0 | (v>>6));
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- zOut[j++] = (char)(0xe0 | (v>>12));
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }
- }else{
- if( c=='b' ){
- c = '\b';
- }else if( c=='f' ){
- c = '\f';
- }else if( c=='n' ){
- c = '\n';
- }else if( c=='r' ){
- c = '\r';
- }else if( c=='t' ){
- c = '\t';
- }
- zOut[j++] = c;
- }
- }
- }
- zOut[j] = 0;
- sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
- }
- break;
- }
- case JSON_ARRAY:
- case JSON_OBJECT: {
- jsonReturnJson(pNode, pCtx, aReplace);
- break;
- }
- }
-}
-
-/* Forward reference */
-static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
-
-/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define JSON_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define JSON_NOINLINE __declspec(noinline)
-#else
-# define JSON_NOINLINE
-#endif
-
-
-static JSON_NOINLINE int jsonParseAddNodeExpand(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- u32 nNew;
- JsonNode *pNew;
- assert( pParse->nNode>=pParse->nAlloc );
- if( pParse->oom ) return -1;
- nNew = pParse->nAlloc*2 + 10;
- pNew = sqlite3_realloc(pParse->aNode, sizeof(JsonNode)*nNew);
- if( pNew==0 ){
- pParse->oom = 1;
- return -1;
- }
- pParse->nAlloc = nNew;
- pParse->aNode = pNew;
- assert( pParse->nNode<pParse->nAlloc );
- return jsonParseAddNode(pParse, eType, n, zContent);
-}
-
-/*
-** Create a new JsonNode instance based on the arguments and append that
-** instance to the JsonParse. Return the index in pParse->aNode[] of the
-** new node, or -1 if a memory allocation fails.
-*/
-static int jsonParseAddNode(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- JsonNode *p;
- if( pParse->nNode>=pParse->nAlloc ){
- return jsonParseAddNodeExpand(pParse, eType, n, zContent);
- }
- p = &pParse->aNode[pParse->nNode];
- p->eType = (u8)eType;
- p->jnFlags = 0;
- p->iVal = 0;
- p->n = n;
- p->u.zJContent = zContent;
- return pParse->nNode++;
-}
-
-/*
-** Parse a single JSON value which begins at pParse->zJson[i]. Return the
-** index of the first character past the end of the value parsed.
-**
-** Return negative for a syntax error. Special cases: return -2 if the
-** first non-whitespace character is '}' and return -3 if the first
-** non-whitespace character is ']'.
-*/
-static int jsonParseValue(JsonParse *pParse, u32 i){
- char c;
- u32 j;
- int iThis;
- int x;
- JsonNode *pNode;
- while( safe_isspace(pParse->zJson[i]) ){ i++; }
- if( (c = pParse->zJson[i])=='{' ){
- /* Parse object */
- iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- if( iThis<0 ) return -1;
- for(j=i+1;;j++){
- while( safe_isspace(pParse->zJson[j]) ){ j++; }
- x = jsonParseValue(pParse, j);
- if( x<0 ){
- if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1;
- return -1;
- }
- if( pParse->oom ) return -1;
- pNode = &pParse->aNode[pParse->nNode-1];
- if( pNode->eType!=JSON_STRING ) return -1;
- pNode->jnFlags |= JNODE_LABEL;
- j = x;
- while( safe_isspace(pParse->zJson[j]) ){ j++; }
- if( pParse->zJson[j]!=':' ) return -1;
- j++;
- x = jsonParseValue(pParse, j);
- if( x<0 ) return -1;
- j = x;
- while( safe_isspace(pParse->zJson[j]) ){ j++; }
- c = pParse->zJson[j];
- if( c==',' ) continue;
- if( c!='}' ) return -1;
- break;
- }
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
- return j+1;
- }else if( c=='[' ){
- /* Parse array */
- iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- if( iThis<0 ) return -1;
- for(j=i+1;;j++){
- while( safe_isspace(pParse->zJson[j]) ){ j++; }
- x = jsonParseValue(pParse, j);
- if( x<0 ){
- if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1;
- return -1;
- }
- j = x;
- while( safe_isspace(pParse->zJson[j]) ){ j++; }
- c = pParse->zJson[j];
- if( c==',' ) continue;
- if( c!=']' ) return -1;
- break;
- }
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
- return j+1;
- }else if( c=='"' ){
- /* Parse string */
- u8 jnFlags = 0;
- j = i+1;
- for(;;){
- c = pParse->zJson[j];
- if( c==0 ) return -1;
- if( c=='\\' ){
- c = pParse->zJson[++j];
- if( c==0 ) return -1;
- jnFlags = JNODE_ESCAPE;
- }else if( c=='"' ){
- break;
- }
- j++;
- }
- jsonParseAddNode(pParse, JSON_STRING, j+1-i, &pParse->zJson[i]);
- if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags;
- return j+1;
- }else if( c=='n'
- && strncmp(pParse->zJson+i,"null",4)==0
- && !safe_isalnum(pParse->zJson[i+4]) ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return i+4;
- }else if( c=='t'
- && strncmp(pParse->zJson+i,"true",4)==0
- && !safe_isalnum(pParse->zJson[i+4]) ){
- jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
- return i+4;
- }else if( c=='f'
- && strncmp(pParse->zJson+i,"false",5)==0
- && !safe_isalnum(pParse->zJson[i+5]) ){
- jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
- return i+5;
- }else if( c=='-' || (c>='0' && c<='9') ){
- /* Parse number */
- u8 seenDP = 0;
- u8 seenE = 0;
- j = i+1;
- for(;; j++){
- c = pParse->zJson[j];
- if( c>='0' && c<='9' ) continue;
- if( c=='.' ){
- if( pParse->zJson[j-1]=='-' ) return -1;
- if( seenDP ) return -1;
- seenDP = 1;
- continue;
- }
- if( c=='e' || c=='E' ){
- if( pParse->zJson[j-1]<'0' ) return -1;
- if( seenE ) return -1;
- seenDP = seenE = 1;
- c = pParse->zJson[j+1];
- if( c=='+' || c=='-' ){
- j++;
- c = pParse->zJson[j+1];
- }
- if( c<'0' || c>'9' ) return -1;
- continue;
- }
- break;
- }
- if( pParse->zJson[j-1]<'0' ) return -1;
- jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT,
- j - i, &pParse->zJson[i]);
- return j;
- }else if( c=='}' ){
- return -2; /* End of {...} */
- }else if( c==']' ){
- return -3; /* End of [...] */
- }else if( c==0 ){
- return 0; /* End of file */
- }else{
- return -1; /* Syntax error */
- }
-}
-
-/*
-** Parse a complete JSON string. Return 0 on success or non-zero if there
-** are any errors. If an error occurs, free all memory associated with
-** pParse.
-**
-** pParse is uninitialized when this routine is called.
-*/
-static int jsonParse(
- JsonParse *pParse, /* Initialize and fill this JsonParse object */
- sqlite3_context *pCtx, /* Report errors here */
- const char *zJson /* Input JSON text to be parsed */
-){
- int i;
- memset(pParse, 0, sizeof(*pParse));
- if( zJson==0 ) return 1;
- pParse->zJson = zJson;
- i = jsonParseValue(pParse, 0);
- if( pParse->oom ) i = -1;
- if( i>0 ){
- while( safe_isspace(zJson[i]) ) i++;
- if( zJson[i] ) i = -1;
- }
- if( i<=0 ){
- if( pCtx!=0 ){
- if( pParse->oom ){
- sqlite3_result_error_nomem(pCtx);
- }else{
- sqlite3_result_error(pCtx, "malformed JSON", -1);
- }
- }
- jsonParseReset(pParse);
- return 1;
- }
- return 0;
-}
-
-/* Mark node i of pParse as being a child of iParent. Call recursively
-** to fill in all the descendants of node i.
-*/
-static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
- JsonNode *pNode = &pParse->aNode[i];
- u32 j;
- pParse->aUp[i] = iParent;
- switch( pNode->eType ){
- case JSON_ARRAY: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
- jsonParseFillInParentage(pParse, i+j, i);
- }
- break;
- }
- case JSON_OBJECT: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
- pParse->aUp[i+j] = i;
- jsonParseFillInParentage(pParse, i+j+1, i);
- }
- break;
- }
- default: {
- break;
- }
- }
-}
-
-/*
-** Compute the parentage of all nodes in a completed parse.
-*/
-static int jsonParseFindParents(JsonParse *pParse){
- u32 *aUp;
- assert( pParse->aUp==0 );
- aUp = pParse->aUp = sqlite3_malloc( sizeof(u32)*pParse->nNode );
- if( aUp==0 ){
- pParse->oom = 1;
- return SQLITE_NOMEM;
- }
- jsonParseFillInParentage(pParse, 0, 0);
- return SQLITE_OK;
-}
-
-/*
-** Compare the OBJECT label at pNode against zKey,nKey. Return true on
-** a match.
-*/
-static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->n!=nKey ) return 0;
- return strncmp(pNode->u.zJContent, zKey, nKey)==0;
- }else{
- if( pNode->n!=nKey+2 ) return 0;
- return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
- }
-}
-
-/* forward declaration */
-static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
-
-/*
-** Search along zPath to find the node specified. Return a pointer
-** to that node, or NULL if zPath is malformed or if there is no such
-** node.
-**
-** If pApnd!=0, then try to append new nodes to complete zPath if it is
-** possible to do so and if no existing node corresponds to zPath. If
-** new nodes are appended *pApnd is set to 1.
-*/
-static JsonNode *jsonLookupStep(
- JsonParse *pParse, /* The JSON to search */
- u32 iRoot, /* Begin the search at this node */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- const char **pzErr /* Make *pzErr point to any syntax error in zPath */
-){
- u32 i, j, nKey;
- const char *zKey;
- JsonNode *pRoot = &pParse->aNode[iRoot];
- if( zPath[0]==0 ) return pRoot;
- if( zPath[0]=='.' ){
- if( pRoot->eType!=JSON_OBJECT ) return 0;
- zPath++;
- if( zPath[0]=='"' ){
- zKey = zPath + 1;
- for(i=1; zPath[i] && zPath[i]!='"'; i++){}
- nKey = i-1;
- if( zPath[i] ){
- i++;
- }else{
- *pzErr = zPath;
- return 0;
- }
- }else{
- zKey = zPath;
- for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
- nKey = i;
- }
- if( nKey==0 ){
- *pzErr = zPath;
- return 0;
- }
- j = 1;
- for(;;){
- while( j<=pRoot->n ){
- if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
- return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
- }
- j++;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( pApnd ){
- u32 iStart, iLabel;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- iLabel = jsonParseAddNode(pParse, JSON_STRING, i, zPath);
- zPath += i;
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
- }
- return pNode;
- }
- }else if( zPath[0]=='[' && safe_isdigit(zPath[1]) ){
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- i = 0;
- j = 1;
- while( safe_isdigit(zPath[j]) ){
- i = i*10 + zPath[j] - '0';
- j++;
- }
- if( zPath[j]!=']' ){
- *pzErr = zPath;
- return 0;
- }
- zPath += j + 1;
- j = 1;
- for(;;){
- while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){
- if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( j<=pRoot->n ){
- return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
- }
- if( i==0 && pApnd ){
- u32 iStart;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- }
- return pNode;
- }
- }else{
- *pzErr = zPath;
- }
- return 0;
-}
-
-/*
-** Append content to pParse that will complete zPath. Return a pointer
-** to the inserted node, or return NULL if the append fails.
-*/
-static JsonNode *jsonLookupAppend(
- JsonParse *pParse, /* Append content to the JSON parse */
- const char *zPath, /* Description of content to append */
- int *pApnd, /* Set this flag to 1 */
- const char **pzErr /* Make this point to any syntax error */
-){
- *pApnd = 1;
- if( zPath[0]==0 ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
- }
- if( zPath[0]=='.' ){
- jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- }else if( strncmp(zPath,"[0]",3)==0 ){
- jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- }else{
- return 0;
- }
- if( pParse->oom ) return 0;
- return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
-}
-
-/*
-** Return the text of a syntax error message on a JSON path. Space is
-** obtained from sqlite3_malloc().
-*/
-static char *jsonPathSyntaxError(const char *zErr){
- return sqlite3_mprintf("JSON path error near '%q'", zErr);
-}
-
-/*
-** Do a node lookup using zPath. Return a pointer to the node on success.
-** Return NULL if not found or if there is an error.
-**
-** On an error, write an error message into pCtx and increment the
-** pParse->nErr counter.
-**
-** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
-** nodes are appended.
-*/
-static JsonNode *jsonLookup(
- JsonParse *pParse, /* The JSON to search */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- sqlite3_context *pCtx /* Report errors here, if not NULL */
-){
- const char *zErr = 0;
- JsonNode *pNode = 0;
- char *zMsg;
-
- if( zPath==0 ) return 0;
- if( zPath[0]!='$' ){
- zErr = zPath;
- goto lookup_err;
- }
- zPath++;
- pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
- if( zErr==0 ) return pNode;
-
-lookup_err:
- pParse->nErr++;
- assert( zErr!=0 && pCtx!=0 );
- zMsg = jsonPathSyntaxError(zErr);
- if( zMsg ){
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
- }else{
- sqlite3_result_error_nomem(pCtx);
- }
- return 0;
-}
-
-
-/*
-** Report the wrong number of arguments for json_insert(), json_replace()
-** or json_set().
-*/
-static void jsonWrongNumArgs(
- sqlite3_context *pCtx,
- const char *zFuncName
-){
- char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
- zFuncName);
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
-}
-
-
-/****************************************************************************
-** SQL functions used for testing and debugging
-****************************************************************************/
-
-#ifdef SQLITE_DEBUG
-/*
-** The json_parse(JSON) function returns a string which describes
-** a parse of the JSON provided. Or it returns NULL if JSON is not
-** well-formed.
-*/
-static void jsonParseFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonString s; /* Output string - not real JSON */
- JsonParse x; /* The parse */
- u32 i;
-
- assert( argc==1 );
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- jsonParseFindParents(&x);
- jsonInit(&s, ctx);
- for(i=0; i<x.nNode; i++){
- const char *zType;
- if( x.aNode[i].jnFlags & JNODE_LABEL ){
- assert( x.aNode[i].eType==JSON_STRING );
- zType = "label";
- }else{
- zType = jsonType[x.aNode[i].eType];
- }
- jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d",
- i, zType, x.aNode[i].n, x.aUp[i]);
- if( x.aNode[i].u.zJContent!=0 ){
- jsonAppendRaw(&s, " ", 1);
- jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n);
- }
- jsonAppendRaw(&s, "\n", 1);
- }
- jsonParseReset(&x);
- jsonResult(&s);
-}
-
-/*
-** The json_test1(JSON) function return true (1) if the input is JSON
-** text generated by another json function. It returns (0) if the input
-** is not known to be JSON.
-*/
-static void jsonTest1Func(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- UNUSED_PARAM(argc);
- sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE);
-}
-#endif /* SQLITE_DEBUG */
-
-/****************************************************************************
-** SQL function implementations
-****************************************************************************/
-
-/*
-** Implementation of the json_array(VALUE,...) function. Return a JSON
-** array that contains all values given in arguments. Or if any argument
-** is a BLOB, throw an error.
-*/
-static void jsonArrayFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- int i;
- JsonString jx;
-
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '[');
- for(i=0; i<argc; i++){
- jsonAppendSeparator(&jx);
- jsonAppendValue(&jx, argv[i]);
- }
- jsonAppendChar(&jx, ']');
- jsonResult(&jx);
- sqlite3_result_subtype(ctx, JSON_SUBTYPE);
-}
-
-
-/*
-** json_array_length(JSON)
-** json_array_length(JSON, PATH)
-**
-** Return the number of elements in the top-level JSON array.
-** Return 0 if the input is not a well-formed JSON array.
-*/
-static void jsonArrayLengthFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- sqlite3_int64 n = 0;
- u32 i;
- JsonNode *pNode;
-
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- if( argc==2 ){
- const char *zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- }else{
- pNode = x.aNode;
- }
- if( pNode==0 ){
- x.nErr = 1;
- }else if( pNode->eType==JSON_ARRAY ){
- assert( (pNode->jnFlags & JNODE_APPEND)==0 );
- for(i=1; i<=pNode->n; n++){
- i += jsonNodeSize(&pNode[i]);
- }
- }
- if( x.nErr==0 ) sqlite3_result_int64(ctx, n);
- jsonParseReset(&x);
-}
-
-/*
-** json_extract(JSON, PATH, ...)
-**
-** Return the element described by PATH. Return NULL if there is no
-** PATH element. If there are multiple PATHs, then return a JSON array
-** with the result from each path. Throw an error if the JSON or any PATH
-** is malformed.
-*/
-static void jsonExtractFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- JsonString jx;
- int i;
-
- if( argc<2 ) return;
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '[');
- for(i=1; i<argc; i++){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) break;
- if( argc>2 ){
- jsonAppendSeparator(&jx);
- if( pNode ){
- jsonRenderNode(pNode, &jx, 0);
- }else{
- jsonAppendRaw(&jx, "null", 4);
- }
- }else if( pNode ){
- jsonReturn(pNode, ctx, 0);
- }
- }
- if( argc>2 && i==argc ){
- jsonAppendChar(&jx, ']');
- jsonResult(&jx);
- sqlite3_result_subtype(ctx, JSON_SUBTYPE);
- }
- jsonReset(&jx);
- jsonParseReset(&x);
-}
-
-/*
-** Implementation of the json_object(NAME,VALUE,...) function. Return a JSON
-** object that contains all name/value given in arguments. Or if any name
-** is not a string or if any value is a BLOB, throw an error.
-*/
-static void jsonObjectFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- int i;
- JsonString jx;
- const char *z;
- u32 n;
-
- if( argc&1 ){
- sqlite3_result_error(ctx, "json_object() requires an even number "
- "of arguments", -1);
- return;
- }
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '{');
- for(i=0; i<argc; i+=2){
- if( sqlite3_value_type(argv[i])!=SQLITE_TEXT ){
- sqlite3_result_error(ctx, "json_object() labels must be TEXT", -1);
- jsonReset(&jx);
- return;
- }
- jsonAppendSeparator(&jx);
- z = (const char*)sqlite3_value_text(argv[i]);
- n = (u32)sqlite3_value_bytes(argv[i]);
- jsonAppendString(&jx, z, n);
- jsonAppendChar(&jx, ':');
- jsonAppendValue(&jx, argv[i+1]);
- }
- jsonAppendChar(&jx, '}');
- jsonResult(&jx);
- sqlite3_result_subtype(ctx, JSON_SUBTYPE);
-}
-
-
-/*
-** json_remove(JSON, PATH, ...)
-**
-** Remove the named elements from JSON and return the result. malformed
-** JSON or PATH arguments result in an error.
-*/
-static void jsonRemoveFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
-
- if( argc<1 ) return;
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i++){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- if( zPath==0 ) goto remove_done;
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto remove_done;
- if( pNode ) pNode->jnFlags |= JNODE_REMOVE;
- }
- if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){
- jsonReturnJson(x.aNode, ctx, 0);
- }
-remove_done:
- jsonParseReset(&x);
-}
-
-/*
-** json_replace(JSON, PATH, VALUE, ...)
-**
-** Replace the value at PATH with VALUE. If PATH does not already exist,
-** this routine is a no-op. If JSON or PATH is malformed, throw an error.
-*/
-static void jsonReplaceFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
-
- if( argc<1 ) return;
- if( (argc&1)==0 ) {
- jsonWrongNumArgs(ctx, "replace");
- return;
- }
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto replace_err;
- if( pNode ){
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- pNode->iVal = (u8)(i+1);
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- sqlite3_result_value(ctx, argv[x.aNode[0].iVal]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-replace_err:
- jsonParseReset(&x);
-}
-
-/*
-** json_set(JSON, PATH, VALUE, ...)
-**
-** Set the value at PATH to VALUE. Create the PATH if it does not already
-** exist. Overwrite existing values that do exist.
-** If JSON or PATH is malformed, throw an error.
-**
-** json_insert(JSON, PATH, VALUE, ...)
-**
-** Create PATH and initialize it to VALUE. If PATH already exists, this
-** routine is a no-op. If JSON or PATH is malformed, throw an error.
-*/
-static void jsonSetFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
- int bApnd;
- int bIsSet = *(int*)sqlite3_user_data(ctx);
-
- if( argc<1 ) return;
- if( (argc&1)==0 ) {
- jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
- return;
- }
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- bApnd = 0;
- pNode = jsonLookup(&x, zPath, &bApnd, ctx);
- if( x.oom ){
- sqlite3_result_error_nomem(ctx);
- goto jsonSetDone;
- }else if( x.nErr ){
- goto jsonSetDone;
- }else if( pNode && (bApnd || bIsSet) ){
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- pNode->iVal = (u8)(i+1);
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- sqlite3_result_value(ctx, argv[x.aNode[0].iVal]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-jsonSetDone:
- jsonParseReset(&x);
-}
-
-/*
-** json_type(JSON)
-** json_type(JSON, PATH)
-**
-** Return the top-level "type" of a JSON string. Throw an error if
-** either the JSON or PATH inputs are not well-formed.
-*/
-static void jsonTypeFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- const char *zPath;
- JsonNode *pNode;
-
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- if( argc==2 ){
- zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- }else{
- pNode = x.aNode;
- }
- if( pNode ){
- sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
- }
- jsonParseReset(&x);
-}
-
-/*
-** json_valid(JSON)
-**
-** Return 1 if JSON is a well-formed JSON string according to RFC-7159.
-** Return 0 otherwise.
-*/
-static void jsonValidFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- int rc = 0;
-
- UNUSED_PARAM(argc);
- if( jsonParse(&x, 0, (const char*)sqlite3_value_text(argv[0]))==0 ){
- rc = 1;
- }
- jsonParseReset(&x);
- sqlite3_result_int(ctx, rc);
-}
-
-#ifndef SQLITE_OMIT_VIRTUALTABLE
-/****************************************************************************
-** The json_each virtual table
-****************************************************************************/
-typedef struct JsonEachCursor JsonEachCursor;
-struct JsonEachCursor {
- sqlite3_vtab_cursor base; /* Base class - must be first */
- u32 iRowid; /* The rowid */
- u32 iBegin; /* The first node of the scan */
- u32 i; /* Index in sParse.aNode[] of current row */
- u32 iEnd; /* EOF when i equals or exceeds this value */
- u8 eType; /* Type of top-level element */
- u8 bRecursive; /* True for json_tree(). False for json_each() */
- char *zJson; /* Input JSON */
- char *zRoot; /* Path by which to filter zJson */
- JsonParse sParse; /* Parse of the input JSON */
-};
-
-/* Constructor for the json_each virtual table */
-static int jsonEachConnect(
- sqlite3 *db,
- void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVtab,
- char **pzErr
-){
- sqlite3_vtab *pNew;
- int rc;
-
-/* Column numbers */
-#define JEACH_KEY 0
-#define JEACH_VALUE 1
-#define JEACH_TYPE 2
-#define JEACH_ATOM 3
-#define JEACH_ID 4
-#define JEACH_PARENT 5
-#define JEACH_FULLKEY 6
-#define JEACH_PATH 7
-#define JEACH_JSON 8
-#define JEACH_ROOT 9
-
- UNUSED_PARAM(pzErr);
- UNUSED_PARAM(argv);
- UNUSED_PARAM(argc);
- UNUSED_PARAM(pAux);
- rc = sqlite3_declare_vtab(db,
- "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
- "json HIDDEN,root HIDDEN)");
- if( rc==SQLITE_OK ){
- pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
- if( pNew==0 ) return SQLITE_NOMEM;
- memset(pNew, 0, sizeof(*pNew));
- }
- return rc;
-}
-
-/* destructor for json_each virtual table */
-static int jsonEachDisconnect(sqlite3_vtab *pVtab){
- sqlite3_free(pVtab);
- return SQLITE_OK;
-}
-
-/* constructor for a JsonEachCursor object for json_each(). */
-static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
- JsonEachCursor *pCur;
-
- UNUSED_PARAM(p);
- pCur = sqlite3_malloc( sizeof(*pCur) );
- if( pCur==0 ) return SQLITE_NOMEM;
- memset(pCur, 0, sizeof(*pCur));
- *ppCursor = &pCur->base;
- return SQLITE_OK;
-}
-
-/* constructor for a JsonEachCursor object for json_tree(). */
-static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
- int rc = jsonEachOpenEach(p, ppCursor);
- if( rc==SQLITE_OK ){
- JsonEachCursor *pCur = (JsonEachCursor*)*ppCursor;
- pCur->bRecursive = 1;
- }
- return rc;
-}
-
-/* Reset a JsonEachCursor back to its original state. Free any memory
-** held. */
-static void jsonEachCursorReset(JsonEachCursor *p){
- sqlite3_free(p->zJson);
- sqlite3_free(p->zRoot);
- jsonParseReset(&p->sParse);
- p->iRowid = 0;
- p->i = 0;
- p->iEnd = 0;
- p->eType = 0;
- p->zJson = 0;
- p->zRoot = 0;
-}
-
-/* Destructor for a jsonEachCursor object */
-static int jsonEachClose(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- jsonEachCursorReset(p);
- sqlite3_free(cur);
- return SQLITE_OK;
-}
-
-/* Return TRUE if the jsonEachCursor object has been advanced off the end
-** of the JSON object */
-static int jsonEachEof(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- return p->i >= p->iEnd;
-}
-
-/* Advance the cursor to the next element for json_tree() */
-static int jsonEachNext(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- if( p->bRecursive ){
- if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
- p->i++;
- p->iRowid++;
- if( p->i<p->iEnd ){
- u32 iUp = p->sParse.aUp[p->i];
- JsonNode *pUp = &p->sParse.aNode[iUp];
- p->eType = pUp->eType;
- if( pUp->eType==JSON_ARRAY ){
- if( iUp==p->i-1 ){
- pUp->u.iKey = 0;
- }else{
- pUp->u.iKey++;
- }
- }
- }
- }else{
- switch( p->eType ){
- case JSON_ARRAY: {
- p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
- p->iRowid++;
- break;
- }
- case JSON_OBJECT: {
- p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
- p->iRowid++;
- break;
- }
- default: {
- p->i = p->iEnd;
- break;
- }
- }
- }
- return SQLITE_OK;
-}
-
-/* Append the name of the path for element i to pStr
-*/
-static void jsonEachComputePath(
- JsonEachCursor *p, /* The cursor */
- JsonString *pStr, /* Write the path here */
- u32 i /* Path to this element */
-){
- JsonNode *pNode, *pUp;
- u32 iUp;
- if( i==0 ){
- jsonAppendChar(pStr, '$');
- return;
- }
- iUp = p->sParse.aUp[i];
- jsonEachComputePath(p, pStr, iUp);
- pNode = &p->sParse.aNode[i];
- pUp = &p->sParse.aNode[iUp];
- if( pUp->eType==JSON_ARRAY ){
- jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
- }else{
- assert( pUp->eType==JSON_OBJECT );
- if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
- assert( pNode->eType==JSON_STRING );
- assert( pNode->jnFlags & JNODE_LABEL );
- jsonPrintf(pNode->n+1, pStr, ".%.*s", pNode->n-2, pNode->u.zJContent+1);
- }
-}
-
-/* Return the value of a column */
-static int jsonEachColumn(
- sqlite3_vtab_cursor *cur, /* The cursor */
- sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
- int i /* Which column to return */
-){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- JsonNode *pThis = &p->sParse.aNode[p->i];
- switch( i ){
- case JEACH_KEY: {
- if( p->i==0 ) break;
- if( p->eType==JSON_OBJECT ){
- jsonReturn(pThis, ctx, 0);
- }else if( p->eType==JSON_ARRAY ){
- u32 iKey;
- if( p->bRecursive ){
- if( p->iRowid==0 ) break;
- iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
- }else{
- iKey = p->iRowid;
- }
- sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
- }
- break;
- }
- case JEACH_VALUE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- jsonReturn(pThis, ctx, 0);
- break;
- }
- case JEACH_TYPE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
- break;
- }
- case JEACH_ATOM: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- if( pThis->eType>=JSON_ARRAY ) break;
- jsonReturn(pThis, ctx, 0);
- break;
- }
- case JEACH_ID: {
- sqlite3_result_int64(ctx,
- (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
- break;
- }
- case JEACH_PARENT: {
- if( p->i>p->iBegin && p->bRecursive ){
- sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
- }
- break;
- }
- case JEACH_FULLKEY: {
- JsonString x;
- jsonInit(&x, ctx);
- if( p->bRecursive ){
- jsonEachComputePath(p, &x, p->i);
- }else{
- if( p->zRoot ){
- jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
- }else{
- jsonAppendChar(&x, '$');
- }
- if( p->eType==JSON_ARRAY ){
- jsonPrintf(30, &x, "[%d]", p->iRowid);
- }else{
- jsonPrintf(pThis->n, &x, ".%.*s", pThis->n-2, pThis->u.zJContent+1);
- }
- }
- jsonResult(&x);
- break;
- }
- case JEACH_PATH: {
- if( p->bRecursive ){
- JsonString x;
- jsonInit(&x, ctx);
- jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
- jsonResult(&x);
- break;
- }
- /* For json_each() path and root are the same so fall through
- ** into the root case */
- }
- case JEACH_ROOT: {
- const char *zRoot = p->zRoot;
- if( zRoot==0 ) zRoot = "$";
- sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
- break;
- }
- case JEACH_JSON: {
- assert( i==JEACH_JSON );
- sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
- break;
- }
- }
- return SQLITE_OK;
-}
-
-/* Return the current rowid value */
-static int jsonEachRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- *pRowid = p->iRowid;
- return SQLITE_OK;
-}
-
-/* The query strategy is to look for an equality constraint on the json
-** column. Without such a constraint, the table cannot operate. idxNum is
-** 1 if the constraint is found, 3 if the constraint and zRoot are found,
-** and 0 otherwise.
-*/
-static int jsonEachBestIndex(
- sqlite3_vtab *tab,
- sqlite3_index_info *pIdxInfo
-){
- int i;
- int jsonIdx = -1;
- int rootIdx = -1;
- const struct sqlite3_index_constraint *pConstraint;
-
- UNUSED_PARAM(tab);
- pConstraint = pIdxInfo->aConstraint;
- for(i=0; i<pIdxInfo->nConstraint; i++, pConstraint++){
- if( pConstraint->usable==0 ) continue;
- if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
- switch( pConstraint->iColumn ){
- case JEACH_JSON: jsonIdx = i; break;
- case JEACH_ROOT: rootIdx = i; break;
- default: /* no-op */ break;
- }
- }
- if( jsonIdx<0 ){
- pIdxInfo->idxNum = 0;
- pIdxInfo->estimatedCost = 1e99;
- }else{
- pIdxInfo->estimatedCost = 1.0;
- pIdxInfo->aConstraintUsage[jsonIdx].argvIndex = 1;
- pIdxInfo->aConstraintUsage[jsonIdx].omit = 1;
- if( rootIdx<0 ){
- pIdxInfo->idxNum = 1;
- }else{
- pIdxInfo->aConstraintUsage[rootIdx].argvIndex = 2;
- pIdxInfo->aConstraintUsage[rootIdx].omit = 1;
- pIdxInfo->idxNum = 3;
- }
- }
- return SQLITE_OK;
-}
-
-/* Start a search on a new JSON string */
-static int jsonEachFilter(
- sqlite3_vtab_cursor *cur,
- int idxNum, const char *idxStr,
- int argc, sqlite3_value **argv
-){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- const char *z;
- const char *zRoot = 0;
- sqlite3_int64 n;
-
- UNUSED_PARAM(idxStr);
- UNUSED_PARAM(argc);
- jsonEachCursorReset(p);
- if( idxNum==0 ) return SQLITE_OK;
- z = (const char*)sqlite3_value_text(argv[0]);
- if( z==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[0]);
- p->zJson = sqlite3_malloc64( n+1 );
- if( p->zJson==0 ) return SQLITE_NOMEM;
- memcpy(p->zJson, z, (size_t)n+1);
- if( jsonParse(&p->sParse, 0, p->zJson) ){
- int rc = SQLITE_NOMEM;
- if( p->sParse.oom==0 ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
- if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
- }
- jsonEachCursorReset(p);
- return rc;
- }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
- jsonEachCursorReset(p);
- return SQLITE_NOMEM;
- }else{
- JsonNode *pNode = 0;
- if( idxNum==3 ){
- const char *zErr = 0;
- zRoot = (const char*)sqlite3_value_text(argv[1]);
- if( zRoot==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[1]);
- p->zRoot = sqlite3_malloc64( n+1 );
- if( p->zRoot==0 ) return SQLITE_NOMEM;
- memcpy(p->zRoot, zRoot, (size_t)n+1);
- if( zRoot[0]!='$' ){
- zErr = zRoot;
- }else{
- pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
- }
- if( zErr ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
- jsonEachCursorReset(p);
- return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
- }else if( pNode==0 ){
- return SQLITE_OK;
- }
- }else{
- pNode = p->sParse.aNode;
- }
- p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
- p->eType = pNode->eType;
- if( p->eType>=JSON_ARRAY ){
- pNode->u.iKey = 0;
- p->iEnd = p->i + pNode->n + 1;
- if( p->bRecursive ){
- p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
- if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
- p->i--;
- }
- }else{
- p->i++;
- }
- }else{
- p->iEnd = p->i+1;
- }
- }
- return SQLITE_OK;
-}
-
-/* The methods of the json_each virtual table */
-static sqlite3_module jsonEachModule = {
- 0, /* iVersion */
- 0, /* xCreate */
- jsonEachConnect, /* xConnect */
- jsonEachBestIndex, /* xBestIndex */
- jsonEachDisconnect, /* xDisconnect */
- 0, /* xDestroy */
- jsonEachOpenEach, /* xOpen - open a cursor */
- jsonEachClose, /* xClose - close a cursor */
- jsonEachFilter, /* xFilter - configure scan constraints */
- jsonEachNext, /* xNext - advance a cursor */
- jsonEachEof, /* xEof - check for end of scan */
- jsonEachColumn, /* xColumn - read data */
- jsonEachRowid, /* xRowid - read data */
- 0, /* xUpdate */
- 0, /* xBegin */
- 0, /* xSync */
- 0, /* xCommit */
- 0, /* xRollback */
- 0, /* xFindMethod */
- 0, /* xRename */
- 0, /* xSavepoint */
- 0, /* xRelease */
- 0 /* xRollbackTo */
-};
-
-/* The methods of the json_tree virtual table. */
-static sqlite3_module jsonTreeModule = {
- 0, /* iVersion */
- 0, /* xCreate */
- jsonEachConnect, /* xConnect */
- jsonEachBestIndex, /* xBestIndex */
- jsonEachDisconnect, /* xDisconnect */
- 0, /* xDestroy */
- jsonEachOpenTree, /* xOpen - open a cursor */
- jsonEachClose, /* xClose - close a cursor */
- jsonEachFilter, /* xFilter - configure scan constraints */
- jsonEachNext, /* xNext - advance a cursor */
- jsonEachEof, /* xEof - check for end of scan */
- jsonEachColumn, /* xColumn - read data */
- jsonEachRowid, /* xRowid - read data */
- 0, /* xUpdate */
- 0, /* xBegin */
- 0, /* xSync */
- 0, /* xCommit */
- 0, /* xRollback */
- 0, /* xFindMethod */
- 0, /* xRename */
- 0, /* xSavepoint */
- 0, /* xRelease */
- 0 /* xRollbackTo */
-};
-#endif /* SQLITE_OMIT_VIRTUALTABLE */
-
-/****************************************************************************
-** The following routines are the only publically visible identifiers in this
-** file. Call the following routines in order to register the various SQL
-** functions and the virtual table implemented by this file.
-****************************************************************************/
-
-SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){
- int rc = SQLITE_OK;
- unsigned int i;
- static const struct {
- const char *zName;
- int nArg;
- int flag;
- void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
- } aFunc[] = {
- { "json", 1, 0, jsonRemoveFunc },
- { "json_array", -1, 0, jsonArrayFunc },
- { "json_array_length", 1, 0, jsonArrayLengthFunc },
- { "json_array_length", 2, 0, jsonArrayLengthFunc },
- { "json_extract", -1, 0, jsonExtractFunc },
- { "json_insert", -1, 0, jsonSetFunc },
- { "json_object", -1, 0, jsonObjectFunc },
- { "json_remove", -1, 0, jsonRemoveFunc },
- { "json_replace", -1, 0, jsonReplaceFunc },
- { "json_set", -1, 1, jsonSetFunc },
- { "json_type", 1, 0, jsonTypeFunc },
- { "json_type", 2, 0, jsonTypeFunc },
- { "json_valid", 1, 0, jsonValidFunc },
-
-#if SQLITE_DEBUG
- /* DEBUG and TESTING functions */
- { "json_parse", 1, 0, jsonParseFunc },
- { "json_test1", 1, 0, jsonTest1Func },
-#endif
- };
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- static const struct {
- const char *zName;
- sqlite3_module *pModule;
- } aMod[] = {
- { "json_each", &jsonEachModule },
- { "json_tree", &jsonTreeModule },
- };
-#endif
- for(i=0; i<sizeof(aFunc)/sizeof(aFunc[0]) && rc==SQLITE_OK; i++){
- rc = sqlite3_create_function(db, aFunc[i].zName, aFunc[i].nArg,
- SQLITE_UTF8 | SQLITE_DETERMINISTIC,
- (void*)&aFunc[i].flag,
- aFunc[i].xFunc, 0, 0);
- }
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- for(i=0; i<sizeof(aMod)/sizeof(aMod[0]) && rc==SQLITE_OK; i++){
- rc = sqlite3_create_module(db, aMod[i].zName, aMod[i].pModule, 0);
- }
-#endif
- return rc;
-}
-
-
-#ifndef SQLITE_CORE
-#ifdef _WIN32
-__declspec(dllexport)
-#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_json_init(
- sqlite3 *db,
- char **pzErrMsg,
- const sqlite3_api_routines *pApi
-){
- SQLITE_EXTENSION_INIT2(pApi);
- (void)pzErrMsg; /* Unused parameter */
- return sqlite3Json1Init(db);
-}
-#endif
-#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1) */
-
-/************** End of json1.c ***********************************************/
-/************** Begin file fts5.c ********************************************/
-
-
-#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5)
-
-#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
-# define NDEBUG 1
-#endif
-#if defined(NDEBUG) && defined(SQLITE_DEBUG)
-# undef NDEBUG
-#endif
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** Interfaces to extend FTS5. Using the interfaces defined in this file,
-** FTS5 may be extended with:
-**
-** * custom tokenizers, and
-** * custom auxiliary functions.
-*/
-
-
-#ifndef _FTS5_H
-#define _FTS5_H
-
-/* #include "sqlite3.h" */
-
-#if 0
-extern "C" {
-#endif
-
-/*************************************************************************
-** CUSTOM AUXILIARY FUNCTIONS
-**
-** Virtual table implementations may overload SQL functions by implementing
-** the sqlite3_module.xFindFunction() method.
-*/
-
-typedef struct Fts5ExtensionApi Fts5ExtensionApi;
-typedef struct Fts5Context Fts5Context;
-typedef struct Fts5PhraseIter Fts5PhraseIter;
-
-typedef void (*fts5_extension_function)(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-);
-
-struct Fts5PhraseIter {
- const unsigned char *a;
- const unsigned char *b;
-};
-
-/*
-** EXTENSION API FUNCTIONS
-**
-** xUserData(pFts):
-** Return a copy of the context pointer the extension function was
-** registered with.
-**
-** xColumnTotalSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the FTS5 table. Or, if iCol is
-** non-negative but less than the number of columns in the table, return
-** the total number of tokens in column iCol, considering all rows in
-** the FTS5 table.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnCount(pFts):
-** Return the number of columns in the table.
-**
-** xColumnSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the current row. Or, if iCol is
-** non-negative but less than the number of columns in the table, set
-** *pnToken to the number of tokens in column iCol of the current row.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
-** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
-** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
-** if an error occurs, an SQLite error code is returned and the final values
-** of (*pz) and (*pn) are undefined.
-**
-** xPhraseCount:
-** Returns the number of phrases in the current query expression.
-**
-** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
-**
-** xInstCount:
-** Set *pnInst to the total number of occurrences of all phrases within
-** the query within the current row. Return SQLITE_OK if successful, or
-** an error code (i.e. SQLITE_NOMEM) if an error occurs.
-**
-** xInst:
-** Query for the details of phrase match iIdx within the current row.
-** Phrase matches are numbered starting from zero, so the iIdx argument
-** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
-**
-** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM)
-** if an error occurs.
-**
-** xRowid:
-** Returns the rowid of the current row.
-**
-** xTokenize:
-** Tokenize text using the tokenizer belonging to the FTS5 table.
-**
-** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
-** This API function is used to query the FTS table for phrase iPhrase
-** of the current query. Specifically, a query equivalent to:
-**
-** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
-**
-** with $p set to a phrase equivalent to the phrase iPhrase of the
-** current query is executed. For each row visited, the callback function
-** passed as the fourth argument is invoked. The context and API objects
-** passed to the callback function may be used to access the properties of
-** each matched row. Invoking Api.xUserData() returns a copy of the pointer
-** passed as the third argument to pUserData.
-**
-** If the callback function returns any value other than SQLITE_OK, the
-** query is abandoned and the xQueryPhrase function returns immediately.
-** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
-** Otherwise, the error code is propagated upwards.
-**
-** If the query runs to completion without incident, SQLITE_OK is returned.
-** Or, if some error occurs before the query completes or is aborted by
-** the callback, an SQLite error code is returned.
-**
-**
-** xSetAuxdata(pFts5, pAux, xDelete)
-**
-** Save the pointer passed as the second argument as the extension functions
-** "auxiliary data". The pointer may then be retrieved by the current or any
-** future invocation of the same fts5 extension function made as part of
-** of the same MATCH query using the xGetAuxdata() API.
-**
-** Each extension function is allocated a single auxiliary data slot for
-** each FTS query (MATCH expression). If the extension function is invoked
-** more than once for a single FTS query, then all invocations share a
-** single auxiliary data context.
-**
-** If there is already an auxiliary data pointer when this function is
-** invoked, then it is replaced by the new pointer. If an xDelete callback
-** was specified along with the original pointer, it is invoked at this
-** point.
-**
-** The xDelete callback, if one is specified, is also invoked on the
-** auxiliary data pointer after the FTS5 query has finished.
-**
-** If an error (e.g. an OOM condition) occurs within this function, an
-** the auxiliary data is set to NULL and an error code returned. If the
-** xDelete parameter was not NULL, it is invoked on the auxiliary data
-** pointer before returning.
-**
-**
-** xGetAuxdata(pFts5, bClear)
-**
-** Returns the current auxiliary data pointer for the fts5 extension
-** function. See the xSetAuxdata() method for details.
-**
-** If the bClear argument is non-zero, then the auxiliary data is cleared
-** (set to NULL) before this function returns. In this case the xDelete,
-** if any, is not invoked.
-**
-**
-** xRowCount(pFts5, pnRow)
-**
-** This function is used to retrieve the total number of rows in the table.
-** In other words, the same value that would be returned by:
-**
-** SELECT count(*) FROM ftstable;
-**
-** xPhraseFirst()
-** This function is used, along with type Fts5PhraseIter and the xPhraseNext
-** method, to iterate through all instances of a single query phrase within
-** the current row. This is the same information as is accessible via the
-** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
-** to use, this API may be faster under some circumstances. To iterate
-** through instances of phrase iPhrase, use the following code:
-**
-** Fts5PhraseIter iter;
-** int iCol, iOff;
-** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
-** iOff>=0;
-** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
-** ){
-** // An instance of phrase iPhrase at offset iOff of column iCol
-** }
-**
-** The Fts5PhraseIter structure is defined above. Applications should not
-** modify this structure directly - it should only be used as shown above
-** with the xPhraseFirst() and xPhraseNext() API methods.
-**
-** xPhraseNext()
-** See xPhraseFirst above.
-*/
-struct Fts5ExtensionApi {
- int iVersion; /* Currently always set to 1 */
-
- void *(*xUserData)(Fts5Context*);
-
- int (*xColumnCount)(Fts5Context*);
- int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
- int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
-
- int (*xTokenize)(Fts5Context*,
- const char *pText, int nText, /* Text to tokenize */
- void *pCtx, /* Context passed to xToken() */
- int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
- );
-
- int (*xPhraseCount)(Fts5Context*);
- int (*xPhraseSize)(Fts5Context*, int iPhrase);
-
- int (*xInstCount)(Fts5Context*, int *pnInst);
- int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
-
- sqlite3_int64 (*xRowid)(Fts5Context*);
- int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
- int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
-
- int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
- int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
- );
- int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
- void *(*xGetAuxdata)(Fts5Context*, int bClear);
-
- void (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
- void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
-};
-
-/*
-** CUSTOM AUXILIARY FUNCTIONS
-*************************************************************************/
-
-/*************************************************************************
-** CUSTOM TOKENIZERS
-**
-** Applications may also register custom tokenizer types. A tokenizer
-** is registered by providing fts5 with a populated instance of the
-** following structure. All structure methods must be defined, setting
-** any member of the fts5_tokenizer struct to NULL leads to undefined
-** behaviour. The structure methods are expected to function as follows:
-**
-** xCreate:
-** This function is used to allocate and inititalize a tokenizer instance.
-** A tokenizer instance is required to actually tokenize text.
-**
-** The first argument passed to this function is a copy of the (void*)
-** pointer provided by the application when the fts5_tokenizer object
-** was registered with FTS5 (the third argument to xCreateTokenizer()).
-** The second and third arguments are an array of nul-terminated strings
-** containing the tokenizer arguments, if any, specified following the
-** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
-** to create the FTS5 table.
-**
-** The final argument is an output variable. If successful, (*ppOut)
-** should be set to point to the new tokenizer handle and SQLITE_OK
-** returned. If an error occurs, some value other than SQLITE_OK should
-** be returned. In this case, fts5 assumes that the final value of *ppOut
-** is undefined.
-**
-** xDelete:
-** This function is invoked to delete a tokenizer handle previously
-** allocated using xCreate(). Fts5 guarantees that this function will
-** be invoked exactly once for each successful call to xCreate().
-**
-** xTokenize:
-** This function is expected to tokenize the nText byte string indicated
-** by argument pText. pText may or may not be nul-terminated. The first
-** argument passed to this function is a pointer to an Fts5Tokenizer object
-** returned by an earlier call to xCreate().
-**
-** The second argument indicates the reason that FTS5 is requesting
-** tokenization of the supplied text. This is always one of the following
-** four values:
-**
-** <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
-** or removed from the FTS table. The tokenizer is being invoked to
-** determine the set of tokens to add to (or delete from) the
-** FTS index.
-**
-** <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed
-** against the FTS index. The tokenizer is being called to tokenize
-** a bareword or quoted string specified as part of the query.
-**
-** <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
-** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
-** followed by a "*" character, indicating that the last token
-** returned by the tokenizer will be treated as a token prefix.
-**
-** <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to
-** satisfy an fts5_api.xTokenize() request made by an auxiliary
-** function. Or an fts5_api.xColumnSize() request made by the same
-** on a columnsize=0 database.
-** </ul>
-**
-** For each token in the input string, the supplied callback xToken() must
-** be invoked. The first argument to it should be a copy of the pointer
-** passed as the second argument to xTokenize(). The third and fourth
-** arguments are a pointer to a buffer containing the token text, and the
-** size of the token in bytes. The 4th and 5th arguments are the byte offsets
-** of the first byte of and first byte immediately following the text from
-** which the token is derived within the input.
-**
-** The second argument passed to the xToken() callback ("tflags") should
-** normally be set to 0. The exception is if the tokenizer supports
-** synonyms. In this case see the discussion below for details.
-**
-** FTS5 assumes the xToken() callback is invoked for each token in the
-** order that they occur within the input text.
-**
-** If an xToken() callback returns any value other than SQLITE_OK, then
-** the tokenization should be abandoned and the xTokenize() method should
-** immediately return a copy of the xToken() return value. Or, if the
-** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
-** if an error occurs with the xTokenize() implementation itself, it
-** may abandon the tokenization and return any error code other than
-** SQLITE_OK or SQLITE_DONE.
-**
-** SYNONYM SUPPORT
-**
-** Custom tokenizers may also support synonyms. Consider a case in which a
-** user wishes to query for a phrase such as "first place". Using the
-** built-in tokenizers, the FTS5 query 'first + place' will match instances
-** of "first place" within the document set, but not alternative forms
-** such as "1st place". In some applications, it would be better to match
-** all instances of "first place" or "1st place" regardless of which form
-** the user specified in the MATCH query text.
-**
-** There are several ways to approach this in FTS5:
-**
-** <ol><li> By mapping all synonyms to a single token. In this case, the
-** In the above example, this means that the tokenizer returns the
-** same token for inputs "first" and "1st". Say that token is in
-** fact "first", so that when the user inserts the document "I won
-** 1st place" entries are added to the index for tokens "i", "won",
-** "first" and "place". If the user then queries for '1st + place',
-** the tokenizer substitutes "first" for "1st" and the query works
-** as expected.
-**
-** <li> By adding multiple synonyms for a single term to the FTS index.
-** In this case, when tokenizing query text, the tokenizer may
-** provide multiple synonyms for a single term within the document.
-** FTS5 then queries the index for each synonym individually. For
-** example, faced with the query:
-**
-** <codeblock>
-** ... MATCH 'first place'</codeblock>
-**
-** the tokenizer offers both "1st" and "first" as synonyms for the
-** first token in the MATCH query and FTS5 effectively runs a query
-** similar to:
-**
-** <codeblock>
-** ... MATCH '(first OR 1st) place'</codeblock>
-**
-** except that, for the purposes of auxiliary functions, the query
-** still appears to contain just two phrases - "(first OR 1st)"
-** being treated as a single phrase.
-**
-** <li> By adding multiple synonyms for a single term to the FTS index.
-** Using this method, when tokenizing document text, the tokenizer
-** provides multiple synonyms for each token. So that when a
-** document such as "I won first place" is tokenized, entries are
-** added to the FTS index for "i", "won", "first", "1st" and
-** "place".
-**
-** This way, even if the tokenizer does not provide synonyms
-** when tokenizing query text (it should not - to do would be
-** inefficient), it doesn't matter if the user queries for
-** 'first + place' or '1st + place', as there are entires in the
-** FTS index corresponding to both forms of the first token.
-** </ol>
-**
-** Whether it is parsing document or query text, any call to xToken that
-** specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
-** is considered to supply a synonym for the previous token. For example,
-** when parsing the document "I won first place", a tokenizer that supports
-** synonyms would call xToken() 5 times, as follows:
-**
-** <codeblock>
-** xToken(pCtx, 0, "i", 1, 0, 1);
-** xToken(pCtx, 0, "won", 3, 2, 5);
-** xToken(pCtx, 0, "first", 5, 6, 11);
-** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
-** xToken(pCtx, 0, "place", 5, 12, 17);
-**</codeblock>
-**
-** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
-** xToken() is called. Multiple synonyms may be specified for a single token
-** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
-** There is no limit to the number of synonyms that may be provided for a
-** single token.
-**
-** In many cases, method (1) above is the best approach. It does not add
-** extra data to the FTS index or require FTS5 to query for multiple terms,
-** so it is efficient in terms of disk space and query speed. However, it
-** does not support prefix queries very well. If, as suggested above, the
-** token "first" is subsituted for "1st" by the tokenizer, then the query:
-**
-** <codeblock>
-** ... MATCH '1s*'</codeblock>
-**
-** will not match documents that contain the token "1st" (as the tokenizer
-** will probably not map "1s" to any prefix of "first").
-**
-** For full prefix support, method (3) may be preferred. In this case,
-** because the index contains entries for both "first" and "1st", prefix
-** queries such as 'fi*' or '1s*' will match correctly. However, because
-** extra entries are added to the FTS index, this method uses more space
-** within the database.
-**
-** Method (2) offers a midpoint between (1) and (3). Using this method,
-** a query such as '1s*' will match documents that contain the literal
-** token "1st", but not "first" (assuming the tokenizer is not able to
-** provide synonyms for prefixes). However, a non-prefix query like '1st'
-** will match against "1st" and "first". This method does not require
-** extra disk space, as no extra entries are added to the FTS index.
-** On the other hand, it may require more CPU cycles to run MATCH queries,
-** as separate queries of the FTS index are required for each synonym.
-**
-** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
-** inefficient.
-*/
-typedef struct Fts5Tokenizer Fts5Tokenizer;
-typedef struct fts5_tokenizer fts5_tokenizer;
-struct fts5_tokenizer {
- int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
- void (*xDelete)(Fts5Tokenizer*);
- int (*xTokenize)(Fts5Tokenizer*,
- void *pCtx,
- int flags, /* Mask of FTS5_TOKENIZE_* flags */
- const char *pText, int nText,
- int (*xToken)(
- void *pCtx, /* Copy of 2nd argument to xTokenize() */
- int tflags, /* Mask of FTS5_TOKEN_* flags */
- const char *pToken, /* Pointer to buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Byte offset of token within input text */
- int iEnd /* Byte offset of end of token within input text */
- )
- );
-};
-
-/* Flags that may be passed as the third argument to xTokenize() */
-#define FTS5_TOKENIZE_QUERY 0x0001
-#define FTS5_TOKENIZE_PREFIX 0x0002
-#define FTS5_TOKENIZE_DOCUMENT 0x0004
-#define FTS5_TOKENIZE_AUX 0x0008
-
-/* Flags that may be passed by the tokenizer implementation back to FTS5
-** as the third argument to the supplied xToken callback. */
-#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
-
-/*
-** END OF CUSTOM TOKENIZERS
-*************************************************************************/
-
-/*************************************************************************
-** FTS5 EXTENSION REGISTRATION API
-*/
-typedef struct fts5_api fts5_api;
-struct fts5_api {
- int iVersion; /* Currently always set to 2 */
-
- /* Create a new tokenizer */
- int (*xCreateTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_tokenizer *pTokenizer,
- void (*xDestroy)(void*)
- );
-
- /* Find an existing tokenizer */
- int (*xFindTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void **ppContext,
- fts5_tokenizer *pTokenizer
- );
-
- /* Create a new auxiliary function */
- int (*xCreateFunction)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_extension_function xFunction,
- void (*xDestroy)(void*)
- );
-};
-
-/*
-** END OF REGISTRATION API
-*************************************************************************/
-
-#if 0
-} /* end of the 'extern "C"' block */
-#endif
-
-#endif /* _FTS5_H */
-
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-*/
-#ifndef _FTS5INT_H
-#define _FTS5INT_H
-
-/* #include "sqlite3ext.h" */
-SQLITE_EXTENSION_INIT1
-
-/* #include <string.h> */
-/* #include <assert.h> */
-
-#ifndef SQLITE_AMALGAMATION
-
-typedef unsigned char u8;
-typedef unsigned int u32;
-typedef unsigned short u16;
-typedef sqlite3_int64 i64;
-typedef sqlite3_uint64 u64;
-
-#define ArraySize(x) (sizeof(x) / sizeof(x[0]))
-
-#define testcase(x)
-#define ALWAYS(x) 1
-#define NEVER(x) 0
-
-#define MIN(x,y) (((x) < (y)) ? (x) : (y))
-#define MAX(x,y) (((x) > (y)) ? (x) : (y))
-
-/*
-** Constants for the largest and smallest possible 64-bit signed integers.
-*/
-# define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
-# define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64)
-
-#endif
-
-
-/*
-** Maximum number of prefix indexes on single FTS5 table. This must be
-** less than 32. If it is set to anything large than that, an #error
-** directive in fts5_index.c will cause the build to fail.
-*/
-#define FTS5_MAX_PREFIX_INDEXES 31
-
-#define FTS5_DEFAULT_NEARDIST 10
-#define FTS5_DEFAULT_RANK "bm25"
-
-/* Name of rank and rowid columns */
-#define FTS5_RANK_NAME "rank"
-#define FTS5_ROWID_NAME "rowid"
-
-#ifdef SQLITE_DEBUG
-# define FTS5_CORRUPT sqlite3Fts5Corrupt()
-static int sqlite3Fts5Corrupt(void);
-#else
-# define FTS5_CORRUPT SQLITE_CORRUPT_VTAB
-#endif
-
-/*
-** The assert_nc() macro is similar to the assert() macro, except that it
-** is used for assert() conditions that are true only if it can be
-** guranteed that the database is not corrupt.
-*/
-#ifdef SQLITE_DEBUG
-SQLITE_API extern int sqlite3_fts5_may_be_corrupt;
-# define assert_nc(x) assert(sqlite3_fts5_may_be_corrupt || (x))
-#else
-# define assert_nc(x) assert(x)
-#endif
-
-typedef struct Fts5Global Fts5Global;
-typedef struct Fts5Colset Fts5Colset;
-
-/* If a NEAR() clump or phrase may only match a specific set of columns,
-** then an object of the following type is used to record the set of columns.
-** Each entry in the aiCol[] array is a column that may be matched.
-**
-** This object is used by fts5_expr.c and fts5_index.c.
-*/
-struct Fts5Colset {
- int nCol;
- int aiCol[1];
-};
-
-
-
-/**************************************************************************
-** Interface to code in fts5_config.c. fts5_config.c contains contains code
-** to parse the arguments passed to the CREATE VIRTUAL TABLE statement.
-*/
-
-typedef struct Fts5Config Fts5Config;
-
-/*
-** An instance of the following structure encodes all information that can
-** be gleaned from the CREATE VIRTUAL TABLE statement.
-**
-** And all information loaded from the %_config table.
-**
-** nAutomerge:
-** The minimum number of segments that an auto-merge operation should
-** attempt to merge together. A value of 1 sets the object to use the
-** compile time default. Zero disables auto-merge altogether.
-**
-** zContent:
-**
-** zContentRowid:
-** The value of the content_rowid= option, if one was specified. Or
-** the string "rowid" otherwise. This text is not quoted - if it is
-** used as part of an SQL statement it needs to be quoted appropriately.
-**
-** zContentExprlist:
-**
-** pzErrmsg:
-** This exists in order to allow the fts5_index.c module to return a
-** decent error message if it encounters a file-format version it does
-** not understand.
-**
-** bColumnsize:
-** True if the %_docsize table is created.
-**
-** bPrefixIndex:
-** This is only used for debugging. If set to false, any prefix indexes
-** are ignored. This value is configured using:
-**
-** INSERT INTO tbl(tbl, rank) VALUES('prefix-index', $bPrefixIndex);
-**
-*/
-struct Fts5Config {
- sqlite3 *db; /* Database handle */
- char *zDb; /* Database holding FTS index (e.g. "main") */
- char *zName; /* Name of FTS index */
- int nCol; /* Number of columns */
- char **azCol; /* Column names */
- u8 *abUnindexed; /* True for unindexed columns */
- int nPrefix; /* Number of prefix indexes */
- int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */
- int eContent; /* An FTS5_CONTENT value */
- char *zContent; /* content table */
- char *zContentRowid; /* "content_rowid=" option value */
- int bColumnsize; /* "columnsize=" option value (dflt==1) */
- char *zContentExprlist;
- Fts5Tokenizer *pTok;
- fts5_tokenizer *pTokApi;
-
- /* Values loaded from the %_config table */
- int iCookie; /* Incremented when %_config is modified */
- int pgsz; /* Approximate page size used in %_data */
- int nAutomerge; /* 'automerge' setting */
- int nCrisisMerge; /* Maximum allowed segments per level */
- char *zRank; /* Name of rank function */
- char *zRankArgs; /* Arguments to rank function */
-
- /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */
- char **pzErrmsg;
-
-#ifdef SQLITE_DEBUG
- int bPrefixIndex; /* True to use prefix-indexes */
-#endif
-};
-
-/* Current expected value of %_config table 'version' field */
-#define FTS5_CURRENT_VERSION 4
-
-#define FTS5_CONTENT_NORMAL 0
-#define FTS5_CONTENT_NONE 1
-#define FTS5_CONTENT_EXTERNAL 2
-
-
-
-
-static int sqlite3Fts5ConfigParse(
- Fts5Global*, sqlite3*, int, const char **, Fts5Config**, char**
-);
-static void sqlite3Fts5ConfigFree(Fts5Config*);
-
-static int sqlite3Fts5ConfigDeclareVtab(Fts5Config *pConfig);
-
-static int sqlite3Fts5Tokenize(
- Fts5Config *pConfig, /* FTS5 Configuration object */
- int flags, /* FTS5_TOKENIZE_* flags */
- const char *pText, int nText, /* Text to tokenize */
- void *pCtx, /* Context passed to xToken() */
- int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
-);
-
-static void sqlite3Fts5Dequote(char *z);
-
-/* Load the contents of the %_config table */
-static int sqlite3Fts5ConfigLoad(Fts5Config*, int);
-
-/* Set the value of a single config attribute */
-static int sqlite3Fts5ConfigSetValue(Fts5Config*, const char*, sqlite3_value*, int*);
-
-static int sqlite3Fts5ConfigParseRank(const char*, char**, char**);
-
-/*
-** End of interface to code in fts5_config.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_buffer.c.
-*/
-
-/*
-** Buffer object for the incremental building of string data.
-*/
-typedef struct Fts5Buffer Fts5Buffer;
-struct Fts5Buffer {
- u8 *p;
- int n;
- int nSpace;
-};
-
-static int sqlite3Fts5BufferGrow(int*, Fts5Buffer*, int);
-static void sqlite3Fts5BufferAppendVarint(int*, Fts5Buffer*, i64);
-static void sqlite3Fts5BufferAppendBlob(int*, Fts5Buffer*, int, const u8*);
-static void sqlite3Fts5BufferAppendString(int *, Fts5Buffer*, const char*);
-static void sqlite3Fts5BufferFree(Fts5Buffer*);
-static void sqlite3Fts5BufferZero(Fts5Buffer*);
-static void sqlite3Fts5BufferSet(int*, Fts5Buffer*, int, const u8*);
-static void sqlite3Fts5BufferAppendPrintf(int *, Fts5Buffer*, char *zFmt, ...);
-static void sqlite3Fts5BufferAppend32(int*, Fts5Buffer*, int);
-
-static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...);
-
-#define fts5BufferZero(x) sqlite3Fts5BufferZero(x)
-#define fts5BufferGrow(a,b,c) sqlite3Fts5BufferGrow(a,b,c)
-#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,c)
-#define fts5BufferFree(a) sqlite3Fts5BufferFree(a)
-#define fts5BufferAppendBlob(a,b,c,d) sqlite3Fts5BufferAppendBlob(a,b,c,d)
-#define fts5BufferSet(a,b,c,d) sqlite3Fts5BufferSet(a,b,c,d)
-#define fts5BufferAppend32(a,b,c) sqlite3Fts5BufferAppend32(a,b,c)
-
-/* Write and decode big-endian 32-bit integer values */
-static void sqlite3Fts5Put32(u8*, int);
-static int sqlite3Fts5Get32(const u8*);
-
-#define FTS5_POS2COLUMN(iPos) (int)(iPos >> 32)
-#define FTS5_POS2OFFSET(iPos) (int)(iPos & 0xFFFFFFFF)
-
-typedef struct Fts5PoslistReader Fts5PoslistReader;
-struct Fts5PoslistReader {
- /* Variables used only by sqlite3Fts5PoslistIterXXX() functions. */
- const u8 *a; /* Position list to iterate through */
- int n; /* Size of buffer at a[] in bytes */
- int i; /* Current offset in a[] */
-
- u8 bFlag; /* For client use (any custom purpose) */
-
- /* Output variables */
- u8 bEof; /* Set to true at EOF */
- i64 iPos; /* (iCol<<32) + iPos */
-};
-static int sqlite3Fts5PoslistReaderInit(
- const u8 *a, int n, /* Poslist buffer to iterate through */
- Fts5PoslistReader *pIter /* Iterator object to initialize */
-);
-static int sqlite3Fts5PoslistReaderNext(Fts5PoslistReader*);
-
-typedef struct Fts5PoslistWriter Fts5PoslistWriter;
-struct Fts5PoslistWriter {
- i64 iPrev;
-};
-static int sqlite3Fts5PoslistWriterAppend(Fts5Buffer*, Fts5PoslistWriter*, i64);
-
-static int sqlite3Fts5PoslistNext64(
- const u8 *a, int n, /* Buffer containing poslist */
- int *pi, /* IN/OUT: Offset within a[] */
- i64 *piOff /* IN/OUT: Current offset */
-);
-
-/* Malloc utility */
-static void *sqlite3Fts5MallocZero(int *pRc, int nByte);
-static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn);
-
-/* Character set tests (like isspace(), isalpha() etc.) */
-static int sqlite3Fts5IsBareword(char t);
-
-/*
-** End of interface to code in fts5_buffer.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_index.c. fts5_index.c contains contains code
-** to access the data stored in the %_data table.
-*/
-
-typedef struct Fts5Index Fts5Index;
-typedef struct Fts5IndexIter Fts5IndexIter;
-
-/*
-** Values used as part of the flags argument passed to IndexQuery().
-*/
-#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
-#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
-#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
-#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
-
-/*
-** Create/destroy an Fts5Index object.
-*/
-static int sqlite3Fts5IndexOpen(Fts5Config *pConfig, int bCreate, Fts5Index**, char**);
-static int sqlite3Fts5IndexClose(Fts5Index *p);
-
-/*
-** for(
-** sqlite3Fts5IndexQuery(p, "token", 5, 0, 0, &pIter);
-** 0==sqlite3Fts5IterEof(pIter);
-** sqlite3Fts5IterNext(pIter)
-** ){
-** i64 iRowid = sqlite3Fts5IterRowid(pIter);
-** }
-*/
-
-/*
-** Open a new iterator to iterate though all rowids that match the
-** specified token or token prefix.
-*/
-static int sqlite3Fts5IndexQuery(
- Fts5Index *p, /* FTS index to query */
- const char *pToken, int nToken, /* Token (or prefix) to query for */
- int flags, /* Mask of FTS5INDEX_QUERY_X flags */
- Fts5Colset *pColset, /* Match these columns only */
- Fts5IndexIter **ppIter /* OUT: New iterator object */
-);
-
-/*
-** The various operations on open token or token prefix iterators opened
-** using sqlite3Fts5IndexQuery().
-*/
-static int sqlite3Fts5IterEof(Fts5IndexIter*);
-static int sqlite3Fts5IterNext(Fts5IndexIter*);
-static int sqlite3Fts5IterNextFrom(Fts5IndexIter*, i64 iMatch);
-static i64 sqlite3Fts5IterRowid(Fts5IndexIter*);
-static int sqlite3Fts5IterPoslist(Fts5IndexIter*,Fts5Colset*, const u8**, int*, i64*);
-static int sqlite3Fts5IterPoslistBuffer(Fts5IndexIter *pIter, Fts5Buffer *pBuf);
-
-/*
-** Close an iterator opened by sqlite3Fts5IndexQuery().
-*/
-static void sqlite3Fts5IterClose(Fts5IndexIter*);
-
-/*
-** This interface is used by the fts5vocab module.
-*/
-static const char *sqlite3Fts5IterTerm(Fts5IndexIter*, int*);
-static int sqlite3Fts5IterNextScan(Fts5IndexIter*);
-
-
-/*
-** Insert or remove data to or from the index. Each time a document is
-** added to or removed from the index, this function is called one or more
-** times.
-**
-** For an insert, it must be called once for each token in the new document.
-** If the operation is a delete, it must be called (at least) once for each
-** unique token in the document with an iCol value less than zero. The iPos
-** argument is ignored for a delete.
-*/
-static int sqlite3Fts5IndexWrite(
- Fts5Index *p, /* Index to write to */
- int iCol, /* Column token appears in (-ve -> delete) */
- int iPos, /* Position of token within column */
- const char *pToken, int nToken /* Token to add or remove to or from index */
-);
-
-/*
-** Indicate that subsequent calls to sqlite3Fts5IndexWrite() pertain to
-** document iDocid.
-*/
-static int sqlite3Fts5IndexBeginWrite(
- Fts5Index *p, /* Index to write to */
- int bDelete, /* True if current operation is a delete */
- i64 iDocid /* Docid to add or remove data from */
-);
-
-/*
-** Flush any data stored in the in-memory hash tables to the database.
-** If the bCommit flag is true, also close any open blob handles.
-*/
-static int sqlite3Fts5IndexSync(Fts5Index *p, int bCommit);
-
-/*
-** Discard any data stored in the in-memory hash tables. Do not write it
-** to the database. Additionally, assume that the contents of the %_data
-** table may have changed on disk. So any in-memory caches of %_data
-** records must be invalidated.
-*/
-static int sqlite3Fts5IndexRollback(Fts5Index *p);
-
-/*
-** Get or set the "averages" values.
-*/
-static int sqlite3Fts5IndexGetAverages(Fts5Index *p, i64 *pnRow, i64 *anSize);
-static int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8*, int);
-
-/*
-** Functions called by the storage module as part of integrity-check.
-*/
-static u64 sqlite3Fts5IndexCksum(Fts5Config*,i64,int,int,const char*,int);
-static int sqlite3Fts5IndexIntegrityCheck(Fts5Index*, u64 cksum);
-
-/*
-** Called during virtual module initialization to register UDF
-** fts5_decode() with SQLite
-*/
-static int sqlite3Fts5IndexInit(sqlite3*);
-
-static int sqlite3Fts5IndexSetCookie(Fts5Index*, int);
-
-/*
-** Return the total number of entries read from the %_data table by
-** this connection since it was created.
-*/
-static int sqlite3Fts5IndexReads(Fts5Index *p);
-
-static int sqlite3Fts5IndexReinit(Fts5Index *p);
-static int sqlite3Fts5IndexOptimize(Fts5Index *p);
-static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge);
-
-static int sqlite3Fts5IndexLoadConfig(Fts5Index *p);
-
-/*
-** End of interface to code in fts5_index.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_varint.c.
-*/
-static int sqlite3Fts5GetVarint32(const unsigned char *p, u32 *v);
-static int sqlite3Fts5GetVarintLen(u32 iVal);
-static u8 sqlite3Fts5GetVarint(const unsigned char*, u64*);
-static int sqlite3Fts5PutVarint(unsigned char *p, u64 v);
-
-#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&b)
-#define fts5GetVarint sqlite3Fts5GetVarint
-
-#define fts5FastGetVarint32(a, iOff, nVal) { \
- nVal = (a)[iOff++]; \
- if( nVal & 0x80 ){ \
- iOff--; \
- iOff += fts5GetVarint32(&(a)[iOff], nVal); \
- } \
-}
-
-
-/*
-** End of interface to code in fts5_varint.c.
-**************************************************************************/
-
-
-/**************************************************************************
-** Interface to code in fts5.c.
-*/
-
-static int sqlite3Fts5GetTokenizer(
- Fts5Global*,
- const char **azArg,
- int nArg,
- Fts5Tokenizer**,
- fts5_tokenizer**,
- char **pzErr
-);
-
-static Fts5Index *sqlite3Fts5IndexFromCsrid(Fts5Global*, i64, int*);
-
-/*
-** End of interface to code in fts5.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_hash.c.
-*/
-typedef struct Fts5Hash Fts5Hash;
-
-/*
-** Create a hash table, free a hash table.
-*/
-static int sqlite3Fts5HashNew(Fts5Hash**, int *pnSize);
-static void sqlite3Fts5HashFree(Fts5Hash*);
-
-static int sqlite3Fts5HashWrite(
- Fts5Hash*,
- i64 iRowid, /* Rowid for this entry */
- int iCol, /* Column token appears in (-ve -> delete) */
- int iPos, /* Position of token within column */
- char bByte,
- const char *pToken, int nToken /* Token to add or remove to or from index */
-);
-
-/*
-** Empty (but do not delete) a hash table.
-*/
-static void sqlite3Fts5HashClear(Fts5Hash*);
-
-static int sqlite3Fts5HashQuery(
- Fts5Hash*, /* Hash table to query */
- const char *pTerm, int nTerm, /* Query term */
- const u8 **ppDoclist, /* OUT: Pointer to doclist for pTerm */
- int *pnDoclist /* OUT: Size of doclist in bytes */
-);
-
-static int sqlite3Fts5HashScanInit(
- Fts5Hash*, /* Hash table to query */
- const char *pTerm, int nTerm /* Query prefix */
-);
-static void sqlite3Fts5HashScanNext(Fts5Hash*);
-static int sqlite3Fts5HashScanEof(Fts5Hash*);
-static void sqlite3Fts5HashScanEntry(Fts5Hash *,
- const char **pzTerm, /* OUT: term (nul-terminated) */
- const u8 **ppDoclist, /* OUT: pointer to doclist */
- int *pnDoclist /* OUT: size of doclist in bytes */
-);
-
-
-/*
-** End of interface to code in fts5_hash.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_storage.c. fts5_storage.c contains contains
-** code to access the data stored in the %_content and %_docsize tables.
-*/
-
-#define FTS5_STMT_SCAN_ASC 0 /* SELECT rowid, * FROM ... ORDER BY 1 ASC */
-#define FTS5_STMT_SCAN_DESC 1 /* SELECT rowid, * FROM ... ORDER BY 1 DESC */
-#define FTS5_STMT_LOOKUP 2 /* SELECT rowid, * FROM ... WHERE rowid=? */
-
-typedef struct Fts5Storage Fts5Storage;
-
-static int sqlite3Fts5StorageOpen(Fts5Config*, Fts5Index*, int, Fts5Storage**, char**);
-static int sqlite3Fts5StorageClose(Fts5Storage *p);
-static int sqlite3Fts5StorageRename(Fts5Storage*, const char *zName);
-
-static int sqlite3Fts5DropAll(Fts5Config*);
-static int sqlite3Fts5CreateTable(Fts5Config*, const char*, const char*, int, char **);
-
-static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64);
-static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*);
-static int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64);
-
-static int sqlite3Fts5StorageIntegrity(Fts5Storage *p);
-
-static int sqlite3Fts5StorageStmt(Fts5Storage *p, int eStmt, sqlite3_stmt**, char**);
-static void sqlite3Fts5StorageStmtRelease(Fts5Storage *p, int eStmt, sqlite3_stmt*);
-
-static int sqlite3Fts5StorageDocsize(Fts5Storage *p, i64 iRowid, int *aCol);
-static int sqlite3Fts5StorageSize(Fts5Storage *p, int iCol, i64 *pnAvg);
-static int sqlite3Fts5StorageRowCount(Fts5Storage *p, i64 *pnRow);
-
-static int sqlite3Fts5StorageSync(Fts5Storage *p, int bCommit);
-static int sqlite3Fts5StorageRollback(Fts5Storage *p);
-
-static int sqlite3Fts5StorageConfigValue(
- Fts5Storage *p, const char*, sqlite3_value*, int
-);
-
-static int sqlite3Fts5StorageSpecialDelete(Fts5Storage *p, i64 iDel, sqlite3_value**);
-
-static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p);
-static int sqlite3Fts5StorageRebuild(Fts5Storage *p);
-static int sqlite3Fts5StorageOptimize(Fts5Storage *p);
-static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge);
-
-/*
-** End of interface to code in fts5_storage.c.
-**************************************************************************/
-
-
-/**************************************************************************
-** Interface to code in fts5_expr.c.
-*/
-typedef struct Fts5Expr Fts5Expr;
-typedef struct Fts5ExprNode Fts5ExprNode;
-typedef struct Fts5Parse Fts5Parse;
-typedef struct Fts5Token Fts5Token;
-typedef struct Fts5ExprPhrase Fts5ExprPhrase;
-typedef struct Fts5ExprNearset Fts5ExprNearset;
-
-struct Fts5Token {
- const char *p; /* Token text (not NULL terminated) */
- int n; /* Size of buffer p in bytes */
-};
-
-/* Parse a MATCH expression. */
-static int sqlite3Fts5ExprNew(
- Fts5Config *pConfig,
- const char *zExpr,
- Fts5Expr **ppNew,
- char **pzErr
-);
-
-/*
-** for(rc = sqlite3Fts5ExprFirst(pExpr, pIdx, bDesc);
-** rc==SQLITE_OK && 0==sqlite3Fts5ExprEof(pExpr);
-** rc = sqlite3Fts5ExprNext(pExpr)
-** ){
-** // The document with rowid iRowid matches the expression!
-** i64 iRowid = sqlite3Fts5ExprRowid(pExpr);
-** }
-*/
-static int sqlite3Fts5ExprFirst(Fts5Expr*, Fts5Index *pIdx, i64 iMin, int bDesc);
-static int sqlite3Fts5ExprNext(Fts5Expr*, i64 iMax);
-static int sqlite3Fts5ExprEof(Fts5Expr*);
-static i64 sqlite3Fts5ExprRowid(Fts5Expr*);
-
-static void sqlite3Fts5ExprFree(Fts5Expr*);
-
-/* Called during startup to register a UDF with SQLite */
-static int sqlite3Fts5ExprInit(Fts5Global*, sqlite3*);
-
-static int sqlite3Fts5ExprPhraseCount(Fts5Expr*);
-static int sqlite3Fts5ExprPhraseSize(Fts5Expr*, int iPhrase);
-static int sqlite3Fts5ExprPoslist(Fts5Expr*, int, const u8 **);
-
-static int sqlite3Fts5ExprClonePhrase(Fts5Config*, Fts5Expr*, int, Fts5Expr**);
-
-/*******************************************
-** The fts5_expr.c API above this point is used by the other hand-written
-** C code in this module. The interfaces below this point are called by
-** the parser code in fts5parse.y. */
-
-static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...);
-
-static Fts5ExprNode *sqlite3Fts5ParseNode(
- Fts5Parse *pParse,
- int eType,
- Fts5ExprNode *pLeft,
- Fts5ExprNode *pRight,
- Fts5ExprNearset *pNear
-);
-
-static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
- Fts5Parse *pParse,
- Fts5ExprPhrase *pPhrase,
- Fts5Token *pToken,
- int bPrefix
-);
-
-static Fts5ExprNearset *sqlite3Fts5ParseNearset(
- Fts5Parse*,
- Fts5ExprNearset*,
- Fts5ExprPhrase*
-);
-
-static Fts5Colset *sqlite3Fts5ParseColset(
- Fts5Parse*,
- Fts5Colset*,
- Fts5Token *
-);
-
-static void sqlite3Fts5ParsePhraseFree(Fts5ExprPhrase*);
-static void sqlite3Fts5ParseNearsetFree(Fts5ExprNearset*);
-static void sqlite3Fts5ParseNodeFree(Fts5ExprNode*);
-
-static void sqlite3Fts5ParseSetDistance(Fts5Parse*, Fts5ExprNearset*, Fts5Token*);
-static void sqlite3Fts5ParseSetColset(Fts5Parse*, Fts5ExprNearset*, Fts5Colset*);
-static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p);
-static void sqlite3Fts5ParseNear(Fts5Parse *pParse, Fts5Token*);
-
-/*
-** End of interface to code in fts5_expr.c.
-**************************************************************************/
-
-
-
-/**************************************************************************
-** Interface to code in fts5_aux.c.
-*/
-
-static int sqlite3Fts5AuxInit(fts5_api*);
-/*
-** End of interface to code in fts5_aux.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_tokenizer.c.
-*/
-
-static int sqlite3Fts5TokenizerInit(fts5_api*);
-/*
-** End of interface to code in fts5_tokenizer.c.
-**************************************************************************/
-
-/**************************************************************************
-** Interface to code in fts5_vocab.c.
-*/
-
-static int sqlite3Fts5VocabInit(Fts5Global*, sqlite3*);
-
-/*
-** End of interface to code in fts5_vocab.c.
-**************************************************************************/
-
-
-/**************************************************************************
-** Interface to automatically generated code in fts5_unicode2.c.
-*/
-static int sqlite3Fts5UnicodeIsalnum(int c);
-static int sqlite3Fts5UnicodeIsdiacritic(int c);
-static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic);
-/*
-** End of interface to code in fts5_unicode2.c.
-**************************************************************************/
-
-#endif
-
-#define FTS5_OR 1
-#define FTS5_AND 2
-#define FTS5_NOT 3
-#define FTS5_TERM 4
-#define FTS5_COLON 5
-#define FTS5_LP 6
-#define FTS5_RP 7
-#define FTS5_LCP 8
-#define FTS5_RCP 9
-#define FTS5_STRING 10
-#define FTS5_COMMA 11
-#define FTS5_PLUS 12
-#define FTS5_STAR 13
-
-/* Driver template for the LEMON parser generator.
-** The author disclaims copyright to this source code.
-**
-** This version of "lempar.c" is modified, slightly, for use by SQLite.
-** The only modifications are the addition of a couple of NEVER()
-** macros to disable tests that are needed in the case of a general
-** LALR(1) grammar but which are always false in the
-** specific grammar used by SQLite.
-*/
-/* First off, code is included that follows the "include" declaration
-** in the input grammar file. */
-/* #include <stdio.h> */
-
-
-/*
-** Disable all error recovery processing in the parser push-down
-** automaton.
-*/
-#define fts5YYNOERRORRECOVERY 1
-
-/*
-** Make fts5yytestcase() the same as testcase()
-*/
-#define fts5yytestcase(X) testcase(X)
-
-/* Next is all token values, in a form suitable for use by makeheaders.
-** This section will be null unless lemon is run with the -m switch.
-*/
-/*
-** These constants (all generated automatically by the parser generator)
-** specify the various kinds of tokens (terminals) that the parser
-** understands.
-**
-** Each symbol here is a terminal symbol in the grammar.
-*/
-/* Make sure the INTERFACE macro is defined.
-*/
-#ifndef INTERFACE
-# define INTERFACE 1
-#endif
-/* The next thing included is series of defines which control
-** various aspects of the generated parser.
-** fts5YYCODETYPE is the data type used for storing terminal
-** and nonterminal numbers. "unsigned char" is
-** used if there are fewer than 250 terminals
-** and nonterminals. "int" is used otherwise.
-** fts5YYNOCODE is a number of type fts5YYCODETYPE which corresponds
-** to no legal terminal or nonterminal number. This
-** number is used to fill in empty slots of the hash
-** table.
-** fts5YYFALLBACK If defined, this indicates that one or more tokens
-** have fall-back values which should be used if the
-** original value of the token will not parse.
-** fts5YYACTIONTYPE is the data type used for storing terminal
-** and nonterminal numbers. "unsigned char" is
-** used if there are fewer than 250 rules and
-** states combined. "int" is used otherwise.
-** sqlite3Fts5ParserFTS5TOKENTYPE is the data type used for minor tokens given
-** directly to the parser from the tokenizer.
-** fts5YYMINORTYPE is the data type used for all minor tokens.
-** This is typically a union of many types, one of
-** which is sqlite3Fts5ParserFTS5TOKENTYPE. The entry in the union
-** for base tokens is called "fts5yy0".
-** fts5YYSTACKDEPTH is the maximum depth of the parser's stack. If
-** zero the stack is dynamically sized using realloc()
-** sqlite3Fts5ParserARG_SDECL A static variable declaration for the %extra_argument
-** sqlite3Fts5ParserARG_PDECL A parameter declaration for the %extra_argument
-** sqlite3Fts5ParserARG_STORE Code to store %extra_argument into fts5yypParser
-** sqlite3Fts5ParserARG_FETCH Code to extract %extra_argument from fts5yypParser
-** fts5YYERRORSYMBOL is the code number of the error symbol. If not
-** defined, then do no error processing.
-** fts5YYNSTATE the combined number of states.
-** fts5YYNRULE the number of rules in the grammar
-** fts5YY_MAX_SHIFT Maximum value for shift actions
-** fts5YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
-** fts5YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
-** fts5YY_MIN_REDUCE Maximum value for reduce actions
-** fts5YY_ERROR_ACTION The fts5yy_action[] code for syntax error
-** fts5YY_ACCEPT_ACTION The fts5yy_action[] code for accept
-** fts5YY_NO_ACTION The fts5yy_action[] code for no-op
-*/
-#define fts5YYCODETYPE unsigned char
-#define fts5YYNOCODE 27
-#define fts5YYACTIONTYPE unsigned char
-#define sqlite3Fts5ParserFTS5TOKENTYPE Fts5Token
-typedef union {
- int fts5yyinit;
- sqlite3Fts5ParserFTS5TOKENTYPE fts5yy0;
- Fts5Colset* fts5yy3;
- Fts5ExprPhrase* fts5yy11;
- Fts5ExprNode* fts5yy18;
- int fts5yy20;
- Fts5ExprNearset* fts5yy26;
-} fts5YYMINORTYPE;
-#ifndef fts5YYSTACKDEPTH
-#define fts5YYSTACKDEPTH 100
-#endif
-#define sqlite3Fts5ParserARG_SDECL Fts5Parse *pParse;
-#define sqlite3Fts5ParserARG_PDECL ,Fts5Parse *pParse
-#define sqlite3Fts5ParserARG_FETCH Fts5Parse *pParse = fts5yypParser->pParse
-#define sqlite3Fts5ParserARG_STORE fts5yypParser->pParse = pParse
-#define fts5YYNSTATE 26
-#define fts5YYNRULE 24
-#define fts5YY_MAX_SHIFT 25
-#define fts5YY_MIN_SHIFTREDUCE 40
-#define fts5YY_MAX_SHIFTREDUCE 63
-#define fts5YY_MIN_REDUCE 64
-#define fts5YY_MAX_REDUCE 87
-#define fts5YY_ERROR_ACTION 88
-#define fts5YY_ACCEPT_ACTION 89
-#define fts5YY_NO_ACTION 90
-
-/* The fts5yyzerominor constant is used to initialize instances of
-** fts5YYMINORTYPE objects to zero. */
-static const fts5YYMINORTYPE fts5yyzerominor = { 0 };
-
-/* Define the fts5yytestcase() macro to be a no-op if is not already defined
-** otherwise.
-**
-** Applications can choose to define fts5yytestcase() in the %include section
-** to a macro that can assist in verifying code coverage. For production
-** code the fts5yytestcase() macro should be turned off. But it is useful
-** for testing.
-*/
-#ifndef fts5yytestcase
-# define fts5yytestcase(X)
-#endif
-
-
-/* Next are the tables used to determine what action to take based on the
-** current state and lookahead token. These tables are used to implement
-** functions that take a state number and lookahead value and return an
-** action integer.
-**
-** Suppose the action integer is N. Then the action is determined as
-** follows
-**
-** 0 <= N <= fts5YY_MAX_SHIFT Shift N. That is, push the lookahead
-** token onto the stack and goto state N.
-**
-** N between fts5YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
-** and fts5YY_MAX_SHIFTREDUCE reduce by rule N-fts5YY_MIN_SHIFTREDUCE.
-**
-** N between fts5YY_MIN_REDUCE Reduce by rule N-fts5YY_MIN_REDUCE
-** and fts5YY_MAX_REDUCE
-
-** N == fts5YY_ERROR_ACTION A syntax error has occurred.
-**
-** N == fts5YY_ACCEPT_ACTION The parser accepts its input.
-**
-** N == fts5YY_NO_ACTION No such action. Denotes unused
-** slots in the fts5yy_action[] table.
-**
-** The action table is constructed as a single large table named fts5yy_action[].
-** Given state S and lookahead X, the action is computed as
-**
-** fts5yy_action[ fts5yy_shift_ofst[S] + X ]
-**
-** If the index value fts5yy_shift_ofst[S]+X is out of range or if the value
-** fts5yy_lookahead[fts5yy_shift_ofst[S]+X] is not equal to X or if fts5yy_shift_ofst[S]
-** is equal to fts5YY_SHIFT_USE_DFLT, it means that the action is not in the table
-** and that fts5yy_default[S] should be used instead.
-**
-** The formula above is for computing the action when the lookahead is
-** a terminal symbol. If the lookahead is a non-terminal (as occurs after
-** a reduce action) then the fts5yy_reduce_ofst[] array is used in place of
-** the fts5yy_shift_ofst[] array and fts5YY_REDUCE_USE_DFLT is used in place of
-** fts5YY_SHIFT_USE_DFLT.
-**
-** The following are the tables generated in this section:
-**
-** fts5yy_action[] A single table containing all actions.
-** fts5yy_lookahead[] A table containing the lookahead for each entry in
-** fts5yy_action. Used to detect hash collisions.
-** fts5yy_shift_ofst[] For each state, the offset into fts5yy_action for
-** shifting terminals.
-** fts5yy_reduce_ofst[] For each state, the offset into fts5yy_action for
-** shifting non-terminals after a reduce.
-** fts5yy_default[] Default action for each state.
-*/
-#define fts5YY_ACTTAB_COUNT (78)
-static const fts5YYACTIONTYPE fts5yy_action[] = {
- /* 0 */ 89, 15, 46, 5, 48, 24, 12, 19, 23, 14,
- /* 10 */ 46, 5, 48, 24, 20, 21, 23, 43, 46, 5,
- /* 20 */ 48, 24, 6, 18, 23, 17, 46, 5, 48, 24,
- /* 30 */ 75, 7, 23, 25, 46, 5, 48, 24, 62, 47,
- /* 40 */ 23, 48, 24, 7, 11, 23, 9, 3, 4, 2,
- /* 50 */ 62, 50, 52, 44, 64, 3, 4, 2, 49, 4,
- /* 60 */ 2, 1, 23, 11, 16, 9, 12, 2, 10, 61,
- /* 70 */ 53, 59, 62, 60, 22, 13, 55, 8,
-};
-static const fts5YYCODETYPE fts5yy_lookahead[] = {
- /* 0 */ 15, 16, 17, 18, 19, 20, 10, 11, 23, 16,
- /* 10 */ 17, 18, 19, 20, 23, 24, 23, 16, 17, 18,
- /* 20 */ 19, 20, 22, 23, 23, 16, 17, 18, 19, 20,
- /* 30 */ 5, 6, 23, 16, 17, 18, 19, 20, 13, 17,
- /* 40 */ 23, 19, 20, 6, 8, 23, 10, 1, 2, 3,
- /* 50 */ 13, 9, 10, 7, 0, 1, 2, 3, 19, 2,
- /* 60 */ 3, 6, 23, 8, 21, 10, 10, 3, 10, 25,
- /* 70 */ 10, 10, 13, 25, 12, 10, 7, 5,
-};
-#define fts5YY_SHIFT_USE_DFLT (-5)
-#define fts5YY_SHIFT_COUNT (25)
-#define fts5YY_SHIFT_MIN (-4)
-#define fts5YY_SHIFT_MAX (72)
-static const signed char fts5yy_shift_ofst[] = {
- /* 0 */ 55, 55, 55, 55, 55, 36, -4, 56, 58, 25,
- /* 10 */ 37, 60, 59, 59, 46, 54, 42, 57, 62, 61,
- /* 20 */ 62, 69, 65, 62, 72, 64,
-};
-#define fts5YY_REDUCE_USE_DFLT (-16)
-#define fts5YY_REDUCE_COUNT (13)
-#define fts5YY_REDUCE_MIN (-15)
-#define fts5YY_REDUCE_MAX (48)
-static const signed char fts5yy_reduce_ofst[] = {
- /* 0 */ -15, -7, 1, 9, 17, 22, -9, 0, 39, 44,
- /* 10 */ 44, 43, 44, 48,
-};
-static const fts5YYACTIONTYPE fts5yy_default[] = {
- /* 0 */ 88, 88, 88, 88, 88, 69, 82, 88, 88, 87,
- /* 10 */ 87, 88, 87, 87, 88, 88, 88, 66, 80, 88,
- /* 20 */ 81, 88, 88, 78, 88, 65,
-};
-
-/* The next table maps tokens into fallback tokens. If a construct
-** like the following:
-**
-** %fallback ID X Y Z.
-**
-** appears in the grammar, then ID becomes a fallback token for X, Y,
-** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
-** but it does not parse, the type of the token is changed to ID and
-** the parse is retried before an error is thrown.
-*/
-#ifdef fts5YYFALLBACK
-static const fts5YYCODETYPE fts5yyFallback[] = {
-};
-#endif /* fts5YYFALLBACK */
-
-/* The following structure represents a single element of the
-** parser's stack. Information stored includes:
-**
-** + The state number for the parser at this level of the stack.
-**
-** + The value of the token stored at this level of the stack.
-** (In other words, the "major" token.)
-**
-** + The semantic value stored at this level of the stack. This is
-** the information used by the action routines in the grammar.
-** It is sometimes called the "minor" token.
-**
-** After the "shift" half of a SHIFTREDUCE action, the stateno field
-** actually contains the reduce action for the second half of the
-** SHIFTREDUCE.
-*/
-struct fts5yyStackEntry {
- fts5YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
- fts5YYCODETYPE major; /* The major token value. This is the code
- ** number for the token at this stack level */
- fts5YYMINORTYPE minor; /* The user-supplied minor token value. This
- ** is the value of the token */
-};
-typedef struct fts5yyStackEntry fts5yyStackEntry;
-
-/* The state of the parser is completely contained in an instance of
-** the following structure */
-struct fts5yyParser {
- int fts5yyidx; /* Index of top element in stack */
-#ifdef fts5YYTRACKMAXSTACKDEPTH
- int fts5yyidxMax; /* Maximum value of fts5yyidx */
-#endif
- int fts5yyerrcnt; /* Shifts left before out of the error */
- sqlite3Fts5ParserARG_SDECL /* A place to hold %extra_argument */
-#if fts5YYSTACKDEPTH<=0
- int fts5yystksz; /* Current side of the stack */
- fts5yyStackEntry *fts5yystack; /* The parser's stack */
-#else
- fts5yyStackEntry fts5yystack[fts5YYSTACKDEPTH]; /* The parser's stack */
-#endif
-};
-typedef struct fts5yyParser fts5yyParser;
-
-#ifndef NDEBUG
-/* #include <stdio.h> */
-static FILE *fts5yyTraceFILE = 0;
-static char *fts5yyTracePrompt = 0;
-#endif /* NDEBUG */
-
-#ifndef NDEBUG
-/*
-** Turn parser tracing on by giving a stream to which to write the trace
-** and a prompt to preface each trace message. Tracing is turned off
-** by making either argument NULL
-**
-** Inputs:
-** <ul>
-** <li> A FILE* to which trace output should be written.
-** If NULL, then tracing is turned off.
-** <li> A prefix string written at the beginning of every
-** line of trace output. If NULL, then tracing is
-** turned off.
-** </ul>
-**
-** Outputs:
-** None.
-*/
-static void sqlite3Fts5ParserTrace(FILE *TraceFILE, char *zTracePrompt){
- fts5yyTraceFILE = TraceFILE;
- fts5yyTracePrompt = zTracePrompt;
- if( fts5yyTraceFILE==0 ) fts5yyTracePrompt = 0;
- else if( fts5yyTracePrompt==0 ) fts5yyTraceFILE = 0;
-}
-#endif /* NDEBUG */
-
-#ifndef NDEBUG
-/* For tracing shifts, the names of all terminals and nonterminals
-** are required. The following table supplies these names */
-static const char *const fts5yyTokenName[] = {
- "$", "OR", "AND", "NOT",
- "TERM", "COLON", "LP", "RP",
- "LCP", "RCP", "STRING", "COMMA",
- "PLUS", "STAR", "error", "input",
- "expr", "cnearset", "exprlist", "nearset",
- "colset", "colsetlist", "nearphrases", "phrase",
- "neardist_opt", "star_opt",
-};
-#endif /* NDEBUG */
-
-#ifndef NDEBUG
-/* For tracing reduce actions, the names of all rules are required.
-*/
-static const char *const fts5yyRuleName[] = {
- /* 0 */ "input ::= expr",
- /* 1 */ "expr ::= expr AND expr",
- /* 2 */ "expr ::= expr OR expr",
- /* 3 */ "expr ::= expr NOT expr",
- /* 4 */ "expr ::= LP expr RP",
- /* 5 */ "expr ::= exprlist",
- /* 6 */ "exprlist ::= cnearset",
- /* 7 */ "exprlist ::= exprlist cnearset",
- /* 8 */ "cnearset ::= nearset",
- /* 9 */ "cnearset ::= colset COLON nearset",
- /* 10 */ "colset ::= LCP colsetlist RCP",
- /* 11 */ "colset ::= STRING",
- /* 12 */ "colsetlist ::= colsetlist STRING",
- /* 13 */ "colsetlist ::= STRING",
- /* 14 */ "nearset ::= phrase",
- /* 15 */ "nearset ::= STRING LP nearphrases neardist_opt RP",
- /* 16 */ "nearphrases ::= phrase",
- /* 17 */ "nearphrases ::= nearphrases phrase",
- /* 18 */ "neardist_opt ::=",
- /* 19 */ "neardist_opt ::= COMMA STRING",
- /* 20 */ "phrase ::= phrase PLUS STRING star_opt",
- /* 21 */ "phrase ::= STRING star_opt",
- /* 22 */ "star_opt ::= STAR",
- /* 23 */ "star_opt ::=",
-};
-#endif /* NDEBUG */
-
-
-#if fts5YYSTACKDEPTH<=0
-/*
-** Try to increase the size of the parser stack.
-*/
-static void fts5yyGrowStack(fts5yyParser *p){
- int newSize;
- fts5yyStackEntry *pNew;
-
- newSize = p->fts5yystksz*2 + 100;
- pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0]));
- if( pNew ){
- p->fts5yystack = pNew;
- p->fts5yystksz = newSize;
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sStack grows to %d entries!\n",
- fts5yyTracePrompt, p->fts5yystksz);
- }
-#endif
- }
-}
-#endif
-
-/*
-** This function allocates a new parser.
-** The only argument is a pointer to a function which works like
-** malloc.
-**
-** Inputs:
-** A pointer to the function used to allocate memory.
-**
-** Outputs:
-** A pointer to a parser. This pointer is used in subsequent calls
-** to sqlite3Fts5Parser and sqlite3Fts5ParserFree.
-*/
-static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(u64)){
- fts5yyParser *pParser;
- pParser = (fts5yyParser*)(*mallocProc)( (u64)sizeof(fts5yyParser) );
- if( pParser ){
- pParser->fts5yyidx = -1;
-#ifdef fts5YYTRACKMAXSTACKDEPTH
- pParser->fts5yyidxMax = 0;
-#endif
-#if fts5YYSTACKDEPTH<=0
- pParser->fts5yystack = NULL;
- pParser->fts5yystksz = 0;
- fts5yyGrowStack(pParser);
-#endif
- }
- return pParser;
-}
-
-/* The following function deletes the value associated with a
-** symbol. The symbol can be either a terminal or nonterminal.
-** "fts5yymajor" is the symbol code, and "fts5yypminor" is a pointer to
-** the value.
-*/
-static void fts5yy_destructor(
- fts5yyParser *fts5yypParser, /* The parser */
- fts5YYCODETYPE fts5yymajor, /* Type code for object to destroy */
- fts5YYMINORTYPE *fts5yypminor /* The object to be destroyed */
-){
- sqlite3Fts5ParserARG_FETCH;
- switch( fts5yymajor ){
- /* Here is inserted the actions which take place when a
- ** terminal or non-terminal is destroyed. This can happen
- ** when the symbol is popped from the stack during a
- ** reduce or during error processing or when a parser is
- ** being destroyed before it is finished parsing.
- **
- ** Note: during a reduce, the only symbols destroyed are those
- ** which appear on the RHS of the rule, but which are not used
- ** inside the C code.
- */
- case 15: /* input */
-{
- (void)pParse;
-}
- break;
- case 16: /* expr */
- case 17: /* cnearset */
- case 18: /* exprlist */
-{
- sqlite3Fts5ParseNodeFree((fts5yypminor->fts5yy18));
-}
- break;
- case 19: /* nearset */
- case 22: /* nearphrases */
-{
- sqlite3Fts5ParseNearsetFree((fts5yypminor->fts5yy26));
-}
- break;
- case 20: /* colset */
- case 21: /* colsetlist */
-{
- sqlite3_free((fts5yypminor->fts5yy3));
-}
- break;
- case 23: /* phrase */
-{
- sqlite3Fts5ParsePhraseFree((fts5yypminor->fts5yy11));
-}
- break;
- default: break; /* If no destructor action specified: do nothing */
- }
-}
-
-/*
-** Pop the parser's stack once.
-**
-** If there is a destructor routine associated with the token which
-** is popped from the stack, then call it.
-**
-** Return the major token number for the symbol popped.
-*/
-static int fts5yy_pop_parser_stack(fts5yyParser *pParser){
- fts5YYCODETYPE fts5yymajor;
- fts5yyStackEntry *fts5yytos = &pParser->fts5yystack[pParser->fts5yyidx];
-
- /* There is no mechanism by which the parser stack can be popped below
- ** empty in SQLite. */
- assert( pParser->fts5yyidx>=0 );
-#ifndef NDEBUG
- if( fts5yyTraceFILE && pParser->fts5yyidx>=0 ){
- fprintf(fts5yyTraceFILE,"%sPopping %s\n",
- fts5yyTracePrompt,
- fts5yyTokenName[fts5yytos->major]);
- }
-#endif
- fts5yymajor = fts5yytos->major;
- fts5yy_destructor(pParser, fts5yymajor, &fts5yytos->minor);
- pParser->fts5yyidx--;
- return fts5yymajor;
-}
-
-/*
-** Deallocate and destroy a parser. Destructors are all called for
-** all stack elements before shutting the parser down.
-**
-** Inputs:
-** <ul>
-** <li> A pointer to the parser. This should be a pointer
-** obtained from sqlite3Fts5ParserAlloc.
-** <li> A pointer to a function used to reclaim memory obtained
-** from malloc.
-** </ul>
-*/
-static void sqlite3Fts5ParserFree(
- void *p, /* The parser to be deleted */
- void (*freeProc)(void*) /* Function used to reclaim memory */
-){
- fts5yyParser *pParser = (fts5yyParser*)p;
- /* In SQLite, we never try to destroy a parser that was not successfully
- ** created in the first place. */
- if( NEVER(pParser==0) ) return;
- while( pParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(pParser);
-#if fts5YYSTACKDEPTH<=0
- free(pParser->fts5yystack);
-#endif
- (*freeProc)((void*)pParser);
-}
-
-/*
-** Return the peak depth of the stack for a parser.
-*/
-#ifdef fts5YYTRACKMAXSTACKDEPTH
-static int sqlite3Fts5ParserStackPeak(void *p){
- fts5yyParser *pParser = (fts5yyParser*)p;
- return pParser->fts5yyidxMax;
-}
-#endif
-
-/*
-** Find the appropriate action for a parser given the terminal
-** look-ahead token iLookAhead.
-**
-** If the look-ahead token is fts5YYNOCODE, then check to see if the action is
-** independent of the look-ahead. If it is, return the action, otherwise
-** return fts5YY_NO_ACTION.
-*/
-static int fts5yy_find_shift_action(
- fts5yyParser *pParser, /* The parser */
- fts5YYCODETYPE iLookAhead /* The look-ahead token */
-){
- int i;
- int stateno = pParser->fts5yystack[pParser->fts5yyidx].stateno;
-
- if( stateno>=fts5YY_MIN_REDUCE ) return stateno;
- assert( stateno <= fts5YY_SHIFT_COUNT );
- i = fts5yy_shift_ofst[stateno];
- if( i==fts5YY_SHIFT_USE_DFLT ) return fts5yy_default[stateno];
- assert( iLookAhead!=fts5YYNOCODE );
- i += iLookAhead;
- if( i<0 || i>=fts5YY_ACTTAB_COUNT || fts5yy_lookahead[i]!=iLookAhead ){
- if( iLookAhead>0 ){
-#ifdef fts5YYFALLBACK
- fts5YYCODETYPE iFallback; /* Fallback token */
- if( iLookAhead<sizeof(fts5yyFallback)/sizeof(fts5yyFallback[0])
- && (iFallback = fts5yyFallback[iLookAhead])!=0 ){
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE, "%sFALLBACK %s => %s\n",
- fts5yyTracePrompt, fts5yyTokenName[iLookAhead], fts5yyTokenName[iFallback]);
- }
-#endif
- return fts5yy_find_shift_action(pParser, iFallback);
- }
-#endif
-#ifdef fts5YYWILDCARD
- {
- int j = i - iLookAhead + fts5YYWILDCARD;
- if(
-#if fts5YY_SHIFT_MIN+fts5YYWILDCARD<0
- j>=0 &&
-#endif
-#if fts5YY_SHIFT_MAX+fts5YYWILDCARD>=fts5YY_ACTTAB_COUNT
- j<fts5YY_ACTTAB_COUNT &&
-#endif
- fts5yy_lookahead[j]==fts5YYWILDCARD
- ){
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE, "%sWILDCARD %s => %s\n",
- fts5yyTracePrompt, fts5yyTokenName[iLookAhead], fts5yyTokenName[fts5YYWILDCARD]);
- }
-#endif /* NDEBUG */
- return fts5yy_action[j];
- }
- }
-#endif /* fts5YYWILDCARD */
- }
- return fts5yy_default[stateno];
- }else{
- return fts5yy_action[i];
- }
-}
-
-/*
-** Find the appropriate action for a parser given the non-terminal
-** look-ahead token iLookAhead.
-**
-** If the look-ahead token is fts5YYNOCODE, then check to see if the action is
-** independent of the look-ahead. If it is, return the action, otherwise
-** return fts5YY_NO_ACTION.
-*/
-static int fts5yy_find_reduce_action(
- int stateno, /* Current state number */
- fts5YYCODETYPE iLookAhead /* The look-ahead token */
-){
- int i;
-#ifdef fts5YYERRORSYMBOL
- if( stateno>fts5YY_REDUCE_COUNT ){
- return fts5yy_default[stateno];
- }
-#else
- assert( stateno<=fts5YY_REDUCE_COUNT );
-#endif
- i = fts5yy_reduce_ofst[stateno];
- assert( i!=fts5YY_REDUCE_USE_DFLT );
- assert( iLookAhead!=fts5YYNOCODE );
- i += iLookAhead;
-#ifdef fts5YYERRORSYMBOL
- if( i<0 || i>=fts5YY_ACTTAB_COUNT || fts5yy_lookahead[i]!=iLookAhead ){
- return fts5yy_default[stateno];
- }
-#else
- assert( i>=0 && i<fts5YY_ACTTAB_COUNT );
- assert( fts5yy_lookahead[i]==iLookAhead );
-#endif
- return fts5yy_action[i];
-}
-
-/*
-** The following routine is called if the stack overflows.
-*/
-static void fts5yyStackOverflow(fts5yyParser *fts5yypParser, fts5YYMINORTYPE *fts5yypMinor){
- sqlite3Fts5ParserARG_FETCH;
- fts5yypParser->fts5yyidx--;
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sStack Overflow!\n",fts5yyTracePrompt);
- }
-#endif
- while( fts5yypParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(fts5yypParser);
- /* Here code is inserted which will execute if the parser
- ** stack every overflows */
-
- assert( 0 );
- sqlite3Fts5ParserARG_STORE; /* Suppress warning about unused %extra_argument var */
-}
-
-/*
-** Print tracing information for a SHIFT action
-*/
-#ifndef NDEBUG
-static void fts5yyTraceShift(fts5yyParser *fts5yypParser, int fts5yyNewState){
- if( fts5yyTraceFILE ){
- int i;
- if( fts5yyNewState<fts5YYNSTATE ){
- fprintf(fts5yyTraceFILE,"%sShift %d\n",fts5yyTracePrompt,fts5yyNewState);
- fprintf(fts5yyTraceFILE,"%sStack:",fts5yyTracePrompt);
- for(i=1; i<=fts5yypParser->fts5yyidx; i++)
- fprintf(fts5yyTraceFILE," %s",fts5yyTokenName[fts5yypParser->fts5yystack[i].major]);
- fprintf(fts5yyTraceFILE,"\n");
- }else{
- fprintf(fts5yyTraceFILE,"%sShift *\n",fts5yyTracePrompt);
- }
- }
-}
-#else
-# define fts5yyTraceShift(X,Y)
-#endif
-
-/*
-** Perform a shift action. Return the number of errors.
-*/
-static void fts5yy_shift(
- fts5yyParser *fts5yypParser, /* The parser to be shifted */
- int fts5yyNewState, /* The new state to shift in */
- int fts5yyMajor, /* The major token to shift in */
- fts5YYMINORTYPE *fts5yypMinor /* Pointer to the minor token to shift in */
-){
- fts5yyStackEntry *fts5yytos;
- fts5yypParser->fts5yyidx++;
-#ifdef fts5YYTRACKMAXSTACKDEPTH
- if( fts5yypParser->fts5yyidx>fts5yypParser->fts5yyidxMax ){
- fts5yypParser->fts5yyidxMax = fts5yypParser->fts5yyidx;
- }
-#endif
-#if fts5YYSTACKDEPTH>0
- if( fts5yypParser->fts5yyidx>=fts5YYSTACKDEPTH ){
- fts5yyStackOverflow(fts5yypParser, fts5yypMinor);
- return;
- }
-#else
- if( fts5yypParser->fts5yyidx>=fts5yypParser->fts5yystksz ){
- fts5yyGrowStack(fts5yypParser);
- if( fts5yypParser->fts5yyidx>=fts5yypParser->fts5yystksz ){
- fts5yyStackOverflow(fts5yypParser, fts5yypMinor);
- return;
- }
- }
-#endif
- fts5yytos = &fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx];
- fts5yytos->stateno = (fts5YYACTIONTYPE)fts5yyNewState;
- fts5yytos->major = (fts5YYCODETYPE)fts5yyMajor;
- fts5yytos->minor = *fts5yypMinor;
- fts5yyTraceShift(fts5yypParser, fts5yyNewState);
-}
-
-/* The following table contains information about every rule that
-** is used during the reduce.
-*/
-static const struct {
- fts5YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
- unsigned char nrhs; /* Number of right-hand side symbols in the rule */
-} fts5yyRuleInfo[] = {
- { 15, 1 },
- { 16, 3 },
- { 16, 3 },
- { 16, 3 },
- { 16, 3 },
- { 16, 1 },
- { 18, 1 },
- { 18, 2 },
- { 17, 1 },
- { 17, 3 },
- { 20, 3 },
- { 20, 1 },
- { 21, 2 },
- { 21, 1 },
- { 19, 1 },
- { 19, 5 },
- { 22, 1 },
- { 22, 2 },
- { 24, 0 },
- { 24, 2 },
- { 23, 4 },
- { 23, 2 },
- { 25, 1 },
- { 25, 0 },
-};
-
-static void fts5yy_accept(fts5yyParser*); /* Forward Declaration */
-
-/*
-** Perform a reduce action and the shift that must immediately
-** follow the reduce.
-*/
-static void fts5yy_reduce(
- fts5yyParser *fts5yypParser, /* The parser */
- int fts5yyruleno /* Number of the rule by which to reduce */
-){
- int fts5yygoto; /* The next state */
- int fts5yyact; /* The next action */
- fts5YYMINORTYPE fts5yygotominor; /* The LHS of the rule reduced */
- fts5yyStackEntry *fts5yymsp; /* The top of the parser's stack */
- int fts5yysize; /* Amount to pop the stack */
- sqlite3Fts5ParserARG_FETCH;
- fts5yymsp = &fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx];
-#ifndef NDEBUG
- if( fts5yyTraceFILE && fts5yyruleno>=0
- && fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ){
- fts5yysize = fts5yyRuleInfo[fts5yyruleno].nrhs;
- fprintf(fts5yyTraceFILE, "%sReduce [%s] -> state %d.\n", fts5yyTracePrompt,
- fts5yyRuleName[fts5yyruleno], fts5yymsp[-fts5yysize].stateno);
- }
-#endif /* NDEBUG */
-
- /* Silence complaints from purify about fts5yygotominor being uninitialized
- ** in some cases when it is copied into the stack after the following
- ** switch. fts5yygotominor is uninitialized when a rule reduces that does
- ** not set the value of its left-hand side nonterminal. Leaving the
- ** value of the nonterminal uninitialized is utterly harmless as long
- ** as the value is never used. So really the only thing this code
- ** accomplishes is to quieten purify.
- **
- ** 2007-01-16: The wireshark project (www.wireshark.org) reports that
- ** without this code, their parser segfaults. I'm not sure what there
- ** parser is doing to make this happen. This is the second bug report
- ** from wireshark this week. Clearly they are stressing Lemon in ways
- ** that it has not been previously stressed... (SQLite ticket #2172)
- */
- /*memset(&fts5yygotominor, 0, sizeof(fts5yygotominor));*/
- fts5yygotominor = fts5yyzerominor;
-
-
- switch( fts5yyruleno ){
- /* Beginning here are the reduction cases. A typical example
- ** follows:
- ** case 0:
- ** #line <lineno> <grammarfile>
- ** { ... } // User supplied code
- ** #line <lineno> <thisfile>
- ** break;
- */
- case 0: /* input ::= expr */
-{ sqlite3Fts5ParseFinished(pParse, fts5yymsp[0].minor.fts5yy18); }
- break;
- case 1: /* expr ::= expr AND expr */
-{
- fts5yygotominor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_AND, fts5yymsp[-2].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
-}
- break;
- case 2: /* expr ::= expr OR expr */
-{
- fts5yygotominor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_OR, fts5yymsp[-2].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
-}
- break;
- case 3: /* expr ::= expr NOT expr */
-{
- fts5yygotominor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_NOT, fts5yymsp[-2].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
-}
- break;
- case 4: /* expr ::= LP expr RP */
-{fts5yygotominor.fts5yy18 = fts5yymsp[-1].minor.fts5yy18;}
- break;
- case 5: /* expr ::= exprlist */
- case 6: /* exprlist ::= cnearset */ fts5yytestcase(fts5yyruleno==6);
-{fts5yygotominor.fts5yy18 = fts5yymsp[0].minor.fts5yy18;}
- break;
- case 7: /* exprlist ::= exprlist cnearset */
-{
- fts5yygotominor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_AND, fts5yymsp[-1].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
-}
- break;
- case 8: /* cnearset ::= nearset */
-{
- fts5yygotominor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy26);
-}
- break;
- case 9: /* cnearset ::= colset COLON nearset */
-{
- sqlite3Fts5ParseSetColset(pParse, fts5yymsp[0].minor.fts5yy26, fts5yymsp[-2].minor.fts5yy3);
- fts5yygotominor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy26);
-}
- break;
- case 10: /* colset ::= LCP colsetlist RCP */
-{ fts5yygotominor.fts5yy3 = fts5yymsp[-1].minor.fts5yy3; }
- break;
- case 11: /* colset ::= STRING */
-{
- fts5yygotominor.fts5yy3 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
-}
- break;
- case 12: /* colsetlist ::= colsetlist STRING */
-{
- fts5yygotominor.fts5yy3 = sqlite3Fts5ParseColset(pParse, fts5yymsp[-1].minor.fts5yy3, &fts5yymsp[0].minor.fts5yy0); }
- break;
- case 13: /* colsetlist ::= STRING */
-{
- fts5yygotominor.fts5yy3 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
-}
- break;
- case 14: /* nearset ::= phrase */
-{ fts5yygotominor.fts5yy26 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy11); }
- break;
- case 15: /* nearset ::= STRING LP nearphrases neardist_opt RP */
-{
- sqlite3Fts5ParseNear(pParse, &fts5yymsp[-4].minor.fts5yy0);
- sqlite3Fts5ParseSetDistance(pParse, fts5yymsp[-2].minor.fts5yy26, &fts5yymsp[-1].minor.fts5yy0);
- fts5yygotominor.fts5yy26 = fts5yymsp[-2].minor.fts5yy26;
-}
- break;
- case 16: /* nearphrases ::= phrase */
-{
- fts5yygotominor.fts5yy26 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy11);
-}
- break;
- case 17: /* nearphrases ::= nearphrases phrase */
-{
- fts5yygotominor.fts5yy26 = sqlite3Fts5ParseNearset(pParse, fts5yymsp[-1].minor.fts5yy26, fts5yymsp[0].minor.fts5yy11);
-}
- break;
- case 18: /* neardist_opt ::= */
-{ fts5yygotominor.fts5yy0.p = 0; fts5yygotominor.fts5yy0.n = 0; }
- break;
- case 19: /* neardist_opt ::= COMMA STRING */
-{ fts5yygotominor.fts5yy0 = fts5yymsp[0].minor.fts5yy0; }
- break;
- case 20: /* phrase ::= phrase PLUS STRING star_opt */
-{
- fts5yygotominor.fts5yy11 = sqlite3Fts5ParseTerm(pParse, fts5yymsp[-3].minor.fts5yy11, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy20);
-}
- break;
- case 21: /* phrase ::= STRING star_opt */
-{
- fts5yygotominor.fts5yy11 = sqlite3Fts5ParseTerm(pParse, 0, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy20);
-}
- break;
- case 22: /* star_opt ::= STAR */
-{ fts5yygotominor.fts5yy20 = 1; }
- break;
- case 23: /* star_opt ::= */
-{ fts5yygotominor.fts5yy20 = 0; }
- break;
- default:
- break;
- };
- assert( fts5yyruleno>=0 && fts5yyruleno<sizeof(fts5yyRuleInfo)/sizeof(fts5yyRuleInfo[0]) );
- fts5yygoto = fts5yyRuleInfo[fts5yyruleno].lhs;
- fts5yysize = fts5yyRuleInfo[fts5yyruleno].nrhs;
- fts5yypParser->fts5yyidx -= fts5yysize;
- fts5yyact = fts5yy_find_reduce_action(fts5yymsp[-fts5yysize].stateno,(fts5YYCODETYPE)fts5yygoto);
- if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){
- if( fts5yyact>fts5YY_MAX_SHIFT ) fts5yyact += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE;
- /* If the reduce action popped at least
- ** one element off the stack, then we can push the new element back
- ** onto the stack here, and skip the stack overflow test in fts5yy_shift().
- ** That gives a significant speed improvement. */
- if( fts5yysize ){
- fts5yypParser->fts5yyidx++;
- fts5yymsp -= fts5yysize-1;
- fts5yymsp->stateno = (fts5YYACTIONTYPE)fts5yyact;
- fts5yymsp->major = (fts5YYCODETYPE)fts5yygoto;
- fts5yymsp->minor = fts5yygotominor;
- fts5yyTraceShift(fts5yypParser, fts5yyact);
- }else{
- fts5yy_shift(fts5yypParser,fts5yyact,fts5yygoto,&fts5yygotominor);
- }
- }else{
- assert( fts5yyact == fts5YY_ACCEPT_ACTION );
- fts5yy_accept(fts5yypParser);
- }
-}
-
-/*
-** The following code executes when the parse fails
-*/
-#ifndef fts5YYNOERRORRECOVERY
-static void fts5yy_parse_failed(
- fts5yyParser *fts5yypParser /* The parser */
-){
- sqlite3Fts5ParserARG_FETCH;
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sFail!\n",fts5yyTracePrompt);
- }
-#endif
- while( fts5yypParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(fts5yypParser);
- /* Here code is inserted which will be executed whenever the
- ** parser fails */
- sqlite3Fts5ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */
-}
-#endif /* fts5YYNOERRORRECOVERY */
-
-/*
-** The following code executes when a syntax error first occurs.
-*/
-static void fts5yy_syntax_error(
- fts5yyParser *fts5yypParser, /* The parser */
- int fts5yymajor, /* The major type of the error token */
- fts5YYMINORTYPE fts5yyminor /* The minor type of the error token */
-){
- sqlite3Fts5ParserARG_FETCH;
-#define FTS5TOKEN (fts5yyminor.fts5yy0)
-
- sqlite3Fts5ParseError(
- pParse, "fts5: syntax error near \"%.*s\"",FTS5TOKEN.n,FTS5TOKEN.p
- );
- sqlite3Fts5ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */
-}
-
-/*
-** The following is executed when the parser accepts
-*/
-static void fts5yy_accept(
- fts5yyParser *fts5yypParser /* The parser */
-){
- sqlite3Fts5ParserARG_FETCH;
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sAccept!\n",fts5yyTracePrompt);
- }
-#endif
- while( fts5yypParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(fts5yypParser);
- /* Here code is inserted which will be executed whenever the
- ** parser accepts */
- sqlite3Fts5ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */
-}
-
-/* The main parser program.
-** The first argument is a pointer to a structure obtained from
-** "sqlite3Fts5ParserAlloc" which describes the current state of the parser.
-** The second argument is the major token number. The third is
-** the minor token. The fourth optional argument is whatever the
-** user wants (and specified in the grammar) and is available for
-** use by the action routines.
-**
-** Inputs:
-** <ul>
-** <li> A pointer to the parser (an opaque structure.)
-** <li> The major token number.
-** <li> The minor token number.
-** <li> An option argument of a grammar-specified type.
-** </ul>
-**
-** Outputs:
-** None.
-*/
-static void sqlite3Fts5Parser(
- void *fts5yyp, /* The parser */
- int fts5yymajor, /* The major token code number */
- sqlite3Fts5ParserFTS5TOKENTYPE fts5yyminor /* The value for the token */
- sqlite3Fts5ParserARG_PDECL /* Optional %extra_argument parameter */
-){
- fts5YYMINORTYPE fts5yyminorunion;
- int fts5yyact; /* The parser action. */
-#if !defined(fts5YYERRORSYMBOL) && !defined(fts5YYNOERRORRECOVERY)
- int fts5yyendofinput; /* True if we are at the end of input */
-#endif
-#ifdef fts5YYERRORSYMBOL
- int fts5yyerrorhit = 0; /* True if fts5yymajor has invoked an error */
-#endif
- fts5yyParser *fts5yypParser; /* The parser */
-
- /* (re)initialize the parser, if necessary */
- fts5yypParser = (fts5yyParser*)fts5yyp;
- if( fts5yypParser->fts5yyidx<0 ){
-#if fts5YYSTACKDEPTH<=0
- if( fts5yypParser->fts5yystksz <=0 ){
- /*memset(&fts5yyminorunion, 0, sizeof(fts5yyminorunion));*/
- fts5yyminorunion = fts5yyzerominor;
- fts5yyStackOverflow(fts5yypParser, &fts5yyminorunion);
- return;
- }
-#endif
- fts5yypParser->fts5yyidx = 0;
- fts5yypParser->fts5yyerrcnt = -1;
- fts5yypParser->fts5yystack[0].stateno = 0;
- fts5yypParser->fts5yystack[0].major = 0;
- }
- fts5yyminorunion.fts5yy0 = fts5yyminor;
-#if !defined(fts5YYERRORSYMBOL) && !defined(fts5YYNOERRORRECOVERY)
- fts5yyendofinput = (fts5yymajor==0);
-#endif
- sqlite3Fts5ParserARG_STORE;
-
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sInput %s\n",fts5yyTracePrompt,fts5yyTokenName[fts5yymajor]);
- }
-#endif
-
- do{
- fts5yyact = fts5yy_find_shift_action(fts5yypParser,(fts5YYCODETYPE)fts5yymajor);
- if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){
- if( fts5yyact > fts5YY_MAX_SHIFT ) fts5yyact += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE;
- fts5yy_shift(fts5yypParser,fts5yyact,fts5yymajor,&fts5yyminorunion);
- fts5yypParser->fts5yyerrcnt--;
- fts5yymajor = fts5YYNOCODE;
- }else if( fts5yyact <= fts5YY_MAX_REDUCE ){
- fts5yy_reduce(fts5yypParser,fts5yyact-fts5YY_MIN_REDUCE);
- }else{
- assert( fts5yyact == fts5YY_ERROR_ACTION );
-#ifdef fts5YYERRORSYMBOL
- int fts5yymx;
-#endif
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sSyntax Error!\n",fts5yyTracePrompt);
- }
-#endif
-#ifdef fts5YYERRORSYMBOL
- /* A syntax error has occurred.
- ** The response to an error depends upon whether or not the
- ** grammar defines an error token "ERROR".
- **
- ** This is what we do if the grammar does define ERROR:
- **
- ** * Call the %syntax_error function.
- **
- ** * Begin popping the stack until we enter a state where
- ** it is legal to shift the error symbol, then shift
- ** the error symbol.
- **
- ** * Set the error count to three.
- **
- ** * Begin accepting and shifting new tokens. No new error
- ** processing will occur until three tokens have been
- ** shifted successfully.
- **
- */
- if( fts5yypParser->fts5yyerrcnt<0 ){
- fts5yy_syntax_error(fts5yypParser,fts5yymajor,fts5yyminorunion);
- }
- fts5yymx = fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx].major;
- if( fts5yymx==fts5YYERRORSYMBOL || fts5yyerrorhit ){
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sDiscard input token %s\n",
- fts5yyTracePrompt,fts5yyTokenName[fts5yymajor]);
- }
-#endif
- fts5yy_destructor(fts5yypParser, (fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
- fts5yymajor = fts5YYNOCODE;
- }else{
- while(
- fts5yypParser->fts5yyidx >= 0 &&
- fts5yymx != fts5YYERRORSYMBOL &&
- (fts5yyact = fts5yy_find_reduce_action(
- fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx].stateno,
- fts5YYERRORSYMBOL)) >= fts5YY_MIN_REDUCE
- ){
- fts5yy_pop_parser_stack(fts5yypParser);
- }
- if( fts5yypParser->fts5yyidx < 0 || fts5yymajor==0 ){
- fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
- fts5yy_parse_failed(fts5yypParser);
- fts5yymajor = fts5YYNOCODE;
- }else if( fts5yymx!=fts5YYERRORSYMBOL ){
- fts5YYMINORTYPE u2;
- u2.fts5YYERRSYMDT = 0;
- fts5yy_shift(fts5yypParser,fts5yyact,fts5YYERRORSYMBOL,&u2);
- }
- }
- fts5yypParser->fts5yyerrcnt = 3;
- fts5yyerrorhit = 1;
-#elif defined(fts5YYNOERRORRECOVERY)
- /* If the fts5YYNOERRORRECOVERY macro is defined, then do not attempt to
- ** do any kind of error recovery. Instead, simply invoke the syntax
- ** error routine and continue going as if nothing had happened.
- **
- ** Applications can set this macro (for example inside %include) if
- ** they intend to abandon the parse upon the first syntax error seen.
- */
- fts5yy_syntax_error(fts5yypParser,fts5yymajor,fts5yyminorunion);
- fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
- fts5yymajor = fts5YYNOCODE;
-
-#else /* fts5YYERRORSYMBOL is not defined */
- /* This is what we do if the grammar does not define ERROR:
- **
- ** * Report an error message, and throw away the input token.
- **
- ** * If the input token is $, then fail the parse.
- **
- ** As before, subsequent error messages are suppressed until
- ** three input tokens have been successfully shifted.
- */
- if( fts5yypParser->fts5yyerrcnt<=0 ){
- fts5yy_syntax_error(fts5yypParser,fts5yymajor,fts5yyminorunion);
- }
- fts5yypParser->fts5yyerrcnt = 3;
- fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion);
- if( fts5yyendofinput ){
- fts5yy_parse_failed(fts5yypParser);
- }
- fts5yymajor = fts5YYNOCODE;
-#endif
- }
- }while( fts5yymajor!=fts5YYNOCODE && fts5yypParser->fts5yyidx>=0 );
-#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE,"%sReturn\n",fts5yyTracePrompt);
- }
-#endif
- return;
-}
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-*/
-
-
-#include <math.h> /* amalgamator: keep */
-
-/*
-** Object used to iterate through all "coalesced phrase instances" in
-** a single column of the current row. If the phrase instances in the
-** column being considered do not overlap, this object simply iterates
-** through them. Or, if they do overlap (share one or more tokens in
-** common), each set of overlapping instances is treated as a single
-** match. See documentation for the highlight() auxiliary function for
-** details.
-**
-** Usage is:
-**
-** for(rc = fts5CInstIterNext(pApi, pFts, iCol, &iter);
-** (rc==SQLITE_OK && 0==fts5CInstIterEof(&iter);
-** rc = fts5CInstIterNext(&iter)
-** ){
-** printf("instance starts at %d, ends at %d\n", iter.iStart, iter.iEnd);
-** }
-**
-*/
-typedef struct CInstIter CInstIter;
-struct CInstIter {
- const Fts5ExtensionApi *pApi; /* API offered by current FTS version */
- Fts5Context *pFts; /* First arg to pass to pApi functions */
- int iCol; /* Column to search */
- int iInst; /* Next phrase instance index */
- int nInst; /* Total number of phrase instances */
-
- /* Output variables */
- int iStart; /* First token in coalesced phrase instance */
- int iEnd; /* Last token in coalesced phrase instance */
-};
-
-/*
-** Advance the iterator to the next coalesced phrase instance. Return
-** an SQLite error code if an error occurs, or SQLITE_OK otherwise.
-*/
-static int fts5CInstIterNext(CInstIter *pIter){
- int rc = SQLITE_OK;
- pIter->iStart = -1;
- pIter->iEnd = -1;
-
- while( rc==SQLITE_OK && pIter->iInst<pIter->nInst ){
- int ip; int ic; int io;
- rc = pIter->pApi->xInst(pIter->pFts, pIter->iInst, &ip, &ic, &io);
- if( rc==SQLITE_OK ){
- if( ic==pIter->iCol ){
- int iEnd = io - 1 + pIter->pApi->xPhraseSize(pIter->pFts, ip);
- if( pIter->iStart<0 ){
- pIter->iStart = io;
- pIter->iEnd = iEnd;
- }else if( io<=pIter->iEnd ){
- if( iEnd>pIter->iEnd ) pIter->iEnd = iEnd;
- }else{
- break;
- }
- }
- pIter->iInst++;
- }
- }
-
- return rc;
-}
-
-/*
-** Initialize the iterator object indicated by the final parameter to
-** iterate through coalesced phrase instances in column iCol.
-*/
-static int fts5CInstIterInit(
- const Fts5ExtensionApi *pApi,
- Fts5Context *pFts,
- int iCol,
- CInstIter *pIter
-){
- int rc;
-
- memset(pIter, 0, sizeof(CInstIter));
- pIter->pApi = pApi;
- pIter->pFts = pFts;
- pIter->iCol = iCol;
- rc = pApi->xInstCount(pFts, &pIter->nInst);
-
- if( rc==SQLITE_OK ){
- rc = fts5CInstIterNext(pIter);
- }
-
- return rc;
-}
-
-
-
-/*************************************************************************
-** Start of highlight() implementation.
-*/
-typedef struct HighlightContext HighlightContext;
-struct HighlightContext {
- CInstIter iter; /* Coalesced Instance Iterator */
- int iPos; /* Current token offset in zIn[] */
- int iRangeStart; /* First token to include */
- int iRangeEnd; /* If non-zero, last token to include */
- const char *zOpen; /* Opening highlight */
- const char *zClose; /* Closing highlight */
- const char *zIn; /* Input text */
- int nIn; /* Size of input text in bytes */
- int iOff; /* Current offset within zIn[] */
- char *zOut; /* Output value */
-};
-
-/*
-** Append text to the HighlightContext output string - p->zOut. Argument
-** z points to a buffer containing n bytes of text to append. If n is
-** negative, everything up until the first '\0' is appended to the output.
-**
-** If *pRc is set to any value other than SQLITE_OK when this function is
-** called, it is a no-op. If an error (i.e. an OOM condition) is encountered,
-** *pRc is set to an error code before returning.
-*/
-static void fts5HighlightAppend(
- int *pRc,
- HighlightContext *p,
- const char *z, int n
-){
- if( *pRc==SQLITE_OK ){
- if( n<0 ) n = strlen(z);
- p->zOut = sqlite3_mprintf("%z%.*s", p->zOut, n, z);
- if( p->zOut==0 ) *pRc = SQLITE_NOMEM;
- }
-}
-
-/*
-** Tokenizer callback used by implementation of highlight() function.
-*/
-static int fts5HighlightCb(
- void *pContext, /* Pointer to HighlightContext object */
- int tflags, /* Mask of FTS5_TOKEN_* flags */
- const char *pToken, /* Buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStartOff, /* Start offset of token */
- int iEndOff /* End offset of token */
-){
- HighlightContext *p = (HighlightContext*)pContext;
- int rc = SQLITE_OK;
- int iPos;
-
- if( tflags & FTS5_TOKEN_COLOCATED ) return SQLITE_OK;
- iPos = p->iPos++;
-
- if( p->iRangeEnd>0 ){
- if( iPos<p->iRangeStart || iPos>p->iRangeEnd ) return SQLITE_OK;
- if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff;
- }
-
- if( iPos==p->iter.iStart ){
- fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff);
- fts5HighlightAppend(&rc, p, p->zOpen, -1);
- p->iOff = iStartOff;
- }
-
- if( iPos==p->iter.iEnd ){
- if( p->iRangeEnd && p->iter.iStart<p->iRangeStart ){
- fts5HighlightAppend(&rc, p, p->zOpen, -1);
- }
- fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
- fts5HighlightAppend(&rc, p, p->zClose, -1);
- p->iOff = iEndOff;
- if( rc==SQLITE_OK ){
- rc = fts5CInstIterNext(&p->iter);
- }
- }
-
- if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){
- fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
- p->iOff = iEndOff;
- if( iPos<p->iter.iEnd ){
- fts5HighlightAppend(&rc, p, p->zClose, -1);
- }
- }
-
- return rc;
-}
-
-/*
-** Implementation of highlight() function.
-*/
-static void fts5HighlightFunction(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-){
- HighlightContext ctx;
- int rc;
- int iCol;
-
- if( nVal!=3 ){
- const char *zErr = "wrong number of arguments to function highlight()";
- sqlite3_result_error(pCtx, zErr, -1);
- return;
- }
-
- iCol = sqlite3_value_int(apVal[0]);
- memset(&ctx, 0, sizeof(HighlightContext));
- ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]);
- ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
- rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn);
-
- if( ctx.zIn ){
- if( rc==SQLITE_OK ){
- rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter);
- }
-
- if( rc==SQLITE_OK ){
- rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
- }
- fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
-
- if( rc==SQLITE_OK ){
- sqlite3_result_text(pCtx, (const char*)ctx.zOut, -1, SQLITE_TRANSIENT);
- }
- sqlite3_free(ctx.zOut);
- }
- if( rc!=SQLITE_OK ){
- sqlite3_result_error_code(pCtx, rc);
- }
-}
-/*
-** End of highlight() implementation.
-**************************************************************************/
-
-/*
-** Implementation of snippet() function.
-*/
-static void fts5SnippetFunction(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-){
- HighlightContext ctx;
- int rc = SQLITE_OK; /* Return code */
- int iCol; /* 1st argument to snippet() */
- const char *zEllips; /* 4th argument to snippet() */
- int nToken; /* 5th argument to snippet() */
- int nInst = 0; /* Number of instance matches this row */
- int i; /* Used to iterate through instances */
- int nPhrase; /* Number of phrases in query */
- unsigned char *aSeen; /* Array of "seen instance" flags */
- int iBestCol; /* Column containing best snippet */
- int iBestStart = 0; /* First token of best snippet */
- int iBestLast; /* Last token of best snippet */
- int nBestScore = 0; /* Score of best snippet */
- int nColSize = 0; /* Total size of iBestCol in tokens */
-
- if( nVal!=5 ){
- const char *zErr = "wrong number of arguments to function snippet()";
- sqlite3_result_error(pCtx, zErr, -1);
- return;
- }
-
- memset(&ctx, 0, sizeof(HighlightContext));
- iCol = sqlite3_value_int(apVal[0]);
- ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]);
- ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
- zEllips = (const char*)sqlite3_value_text(apVal[3]);
- nToken = sqlite3_value_int(apVal[4]);
- iBestLast = nToken-1;
-
- iBestCol = (iCol>=0 ? iCol : 0);
- nPhrase = pApi->xPhraseCount(pFts);
- aSeen = sqlite3_malloc(nPhrase);
- if( aSeen==0 ){
- rc = SQLITE_NOMEM;
- }
-
- if( rc==SQLITE_OK ){
- rc = pApi->xInstCount(pFts, &nInst);
- }
- for(i=0; rc==SQLITE_OK && i<nInst; i++){
- int ip, iSnippetCol, iStart;
- memset(aSeen, 0, nPhrase);
- rc = pApi->xInst(pFts, i, &ip, &iSnippetCol, &iStart);
- if( rc==SQLITE_OK && (iCol<0 || iSnippetCol==iCol) ){
- int nScore = 1000;
- int iLast = iStart - 1 + pApi->xPhraseSize(pFts, ip);
- int j;
- aSeen[ip] = 1;
-
- for(j=i+1; rc==SQLITE_OK && j<nInst; j++){
- int ic; int io; int iFinal;
- rc = pApi->xInst(pFts, j, &ip, &ic, &io);
- iFinal = io + pApi->xPhraseSize(pFts, ip) - 1;
- if( rc==SQLITE_OK && ic==iSnippetCol && iLast<iStart+nToken ){
- nScore += aSeen[ip] ? 1000 : 1;
- aSeen[ip] = 1;
- if( iFinal>iLast ) iLast = iFinal;
- }
- }
-
- if( rc==SQLITE_OK && nScore>nBestScore ){
- iBestCol = iSnippetCol;
- iBestStart = iStart;
- iBestLast = iLast;
- nBestScore = nScore;
- }
- }
- }
-
- if( rc==SQLITE_OK ){
- rc = pApi->xColumnSize(pFts, iBestCol, &nColSize);
- }
- if( rc==SQLITE_OK ){
- rc = pApi->xColumnText(pFts, iBestCol, &ctx.zIn, &ctx.nIn);
- }
- if( ctx.zIn ){
- if( rc==SQLITE_OK ){
- rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter);
- }
-
- if( (iBestStart+nToken-1)>iBestLast ){
- iBestStart -= (iBestStart+nToken-1-iBestLast) / 2;
- }
- if( iBestStart+nToken>nColSize ){
- iBestStart = nColSize - nToken;
- }
- if( iBestStart<0 ) iBestStart = 0;
-
- ctx.iRangeStart = iBestStart;
- ctx.iRangeEnd = iBestStart + nToken - 1;
-
- if( iBestStart>0 ){
- fts5HighlightAppend(&rc, &ctx, zEllips, -1);
- }
- if( rc==SQLITE_OK ){
- rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
- }
- if( ctx.iRangeEnd>=(nColSize-1) ){
- fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
- }else{
- fts5HighlightAppend(&rc, &ctx, zEllips, -1);
- }
-
- if( rc==SQLITE_OK ){
- sqlite3_result_text(pCtx, (const char*)ctx.zOut, -1, SQLITE_TRANSIENT);
- }else{
- sqlite3_result_error_code(pCtx, rc);
- }
- sqlite3_free(ctx.zOut);
- }
- sqlite3_free(aSeen);
-}
-
-/************************************************************************/
-
-/*
-** The first time the bm25() function is called for a query, an instance
-** of the following structure is allocated and populated.
-*/
-typedef struct Fts5Bm25Data Fts5Bm25Data;
-struct Fts5Bm25Data {
- int nPhrase; /* Number of phrases in query */
- double avgdl; /* Average number of tokens in each row */
- double *aIDF; /* IDF for each phrase */
- double *aFreq; /* Array used to calculate phrase freq. */
-};
-
-/*
-** Callback used by fts5Bm25GetData() to count the number of rows in the
-** table matched by each individual phrase within the query.
-*/
-static int fts5CountCb(
- const Fts5ExtensionApi *pApi,
- Fts5Context *pFts,
- void *pUserData /* Pointer to sqlite3_int64 variable */
-){
- sqlite3_int64 *pn = (sqlite3_int64*)pUserData;
- (*pn)++;
- return SQLITE_OK;
-}
-
-/*
-** Set *ppData to point to the Fts5Bm25Data object for the current query.
-** If the object has not already been allocated, allocate and populate it
-** now.
-*/
-static int fts5Bm25GetData(
- const Fts5ExtensionApi *pApi,
- Fts5Context *pFts,
- Fts5Bm25Data **ppData /* OUT: bm25-data object for this query */
-){
- int rc = SQLITE_OK; /* Return code */
- Fts5Bm25Data *p; /* Object to return */
-
- p = pApi->xGetAuxdata(pFts, 0);
- if( p==0 ){
- int nPhrase; /* Number of phrases in query */
- sqlite3_int64 nRow = 0; /* Number of rows in table */
- sqlite3_int64 nToken = 0; /* Number of tokens in table */
- int nByte; /* Bytes of space to allocate */
- int i;
-
- /* Allocate the Fts5Bm25Data object */
- nPhrase = pApi->xPhraseCount(pFts);
- nByte = sizeof(Fts5Bm25Data) + nPhrase*2*sizeof(double);
- p = (Fts5Bm25Data*)sqlite3_malloc(nByte);
- if( p==0 ){
- rc = SQLITE_NOMEM;
- }else{
- memset(p, 0, nByte);
- p->nPhrase = nPhrase;
- p->aIDF = (double*)&p[1];
- p->aFreq = &p->aIDF[nPhrase];
- }
-
- /* Calculate the average document length for this FTS5 table */
- if( rc==SQLITE_OK ) rc = pApi->xRowCount(pFts, &nRow);
- if( rc==SQLITE_OK ) rc = pApi->xColumnTotalSize(pFts, -1, &nToken);
- if( rc==SQLITE_OK ) p->avgdl = (double)nToken / (double)nRow;
-
- /* Calculate an IDF for each phrase in the query */
- for(i=0; rc==SQLITE_OK && i<nPhrase; i++){
- sqlite3_int64 nHit = 0;
- rc = pApi->xQueryPhrase(pFts, i, (void*)&nHit, fts5CountCb);
- if( rc==SQLITE_OK ){
- /* Calculate the IDF (Inverse Document Frequency) for phrase i.
- ** This is done using the standard BM25 formula as found on wikipedia:
- **
- ** IDF = log( (N - nHit + 0.5) / (nHit + 0.5) )
- **
- ** where "N" is the total number of documents in the set and nHit
- ** is the number that contain at least one instance of the phrase
- ** under consideration.
- **
- ** The problem with this is that if (N < 2*nHit), the IDF is
- ** negative. Which is undesirable. So the mimimum allowable IDF is
- ** (1e-6) - roughly the same as a term that appears in just over
- ** half of set of 5,000,000 documents. */
- double idf = log( (nRow - nHit + 0.5) / (nHit + 0.5) );
- if( idf<=0.0 ) idf = 1e-6;
- p->aIDF[i] = idf;
- }
- }
-
- if( rc!=SQLITE_OK ){
- sqlite3_free(p);
- }else{
- rc = pApi->xSetAuxdata(pFts, p, sqlite3_free);
- }
- if( rc!=SQLITE_OK ) p = 0;
- }
- *ppData = p;
- return rc;
-}
-
-/*
-** Implementation of bm25() function.
-*/
-static void fts5Bm25Function(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-){
- const double k1 = 1.2; /* Constant "k1" from BM25 formula */
- const double b = 0.75; /* Constant "b" from BM25 formula */
- int rc = SQLITE_OK; /* Error code */
- double score = 0.0; /* SQL function return value */
- Fts5Bm25Data *pData; /* Values allocated/calculated once only */
- int i; /* Iterator variable */
- int nInst = 0; /* Value returned by xInstCount() */
- double D = 0.0; /* Total number of tokens in row */
- double *aFreq = 0; /* Array of phrase freq. for current row */
-
- /* Calculate the phrase frequency (symbol "f(qi,D)" in the documentation)
- ** for each phrase in the query for the current row. */
- rc = fts5Bm25GetData(pApi, pFts, &pData);
- if( rc==SQLITE_OK ){
- aFreq = pData->aFreq;
- memset(aFreq, 0, sizeof(double) * pData->nPhrase);
- rc = pApi->xInstCount(pFts, &nInst);
- }
- for(i=0; rc==SQLITE_OK && i<nInst; i++){
- int ip; int ic; int io;
- rc = pApi->xInst(pFts, i, &ip, &ic, &io);
- if( rc==SQLITE_OK ){
- double w = (nVal > ic) ? sqlite3_value_double(apVal[ic]) : 1.0;
- aFreq[ip] += w;
- }
- }
-
- /* Figure out the total size of the current row in tokens. */
- if( rc==SQLITE_OK ){
- int nTok;
- rc = pApi->xColumnSize(pFts, -1, &nTok);
- D = (double)nTok;
- }
-
- /* Determine the BM25 score for the current row. */
- for(i=0; rc==SQLITE_OK && i<pData->nPhrase; i++){
- score += pData->aIDF[i] * (
- ( aFreq[i] * (k1 + 1.0) ) /
- ( aFreq[i] + k1 * (1 - b + b * D / pData->avgdl) )
- );
- }
-
- /* If no error has occurred, return the calculated score. Otherwise,
- ** throw an SQL exception. */
- if( rc==SQLITE_OK ){
- sqlite3_result_double(pCtx, -1.0 * score);
- }else{
- sqlite3_result_error_code(pCtx, rc);
- }
-}
-
-static int sqlite3Fts5AuxInit(fts5_api *pApi){
- struct Builtin {
- const char *zFunc; /* Function name (nul-terminated) */
- void *pUserData; /* User-data pointer */
- fts5_extension_function xFunc;/* Callback function */
- void (*xDestroy)(void*); /* Destructor function */
- } aBuiltin [] = {
- { "snippet", 0, fts5SnippetFunction, 0 },
- { "highlight", 0, fts5HighlightFunction, 0 },
- { "bm25", 0, fts5Bm25Function, 0 },
- };
- int rc = SQLITE_OK; /* Return code */
- int i; /* To iterate through builtin functions */
-
- for(i=0; rc==SQLITE_OK && i<sizeof(aBuiltin)/sizeof(aBuiltin[0]); i++){
- rc = pApi->xCreateFunction(pApi,
- aBuiltin[i].zFunc,
- aBuiltin[i].pUserData,
- aBuiltin[i].xFunc,
- aBuiltin[i].xDestroy
- );
- }
-
- return rc;
-}
-
-
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-*/
-
-
-
-
-static int sqlite3Fts5BufferGrow(int *pRc, Fts5Buffer *pBuf, int nByte){
-
- if( (pBuf->n + nByte) > pBuf->nSpace ){
- u8 *pNew;
- int nNew = pBuf->nSpace ? pBuf->nSpace*2 : 64;
-
- /* A no-op if an error has already occurred */
- if( *pRc ) return 1;
-
- while( nNew<(pBuf->n + nByte) ){
- nNew = nNew * 2;
- }
- pNew = sqlite3_realloc(pBuf->p, nNew);
- if( pNew==0 ){
- *pRc = SQLITE_NOMEM;
- return 1;
- }else{
- pBuf->nSpace = nNew;
- pBuf->p = pNew;
- }
- }
- return 0;
-}
-
-/*
-** Encode value iVal as an SQLite varint and append it to the buffer object
-** pBuf. If an OOM error occurs, set the error code in p.
-*/
-static void sqlite3Fts5BufferAppendVarint(int *pRc, Fts5Buffer *pBuf, i64 iVal){
- if( sqlite3Fts5BufferGrow(pRc, pBuf, 9) ) return;
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iVal);
-}
-
-static void sqlite3Fts5Put32(u8 *aBuf, int iVal){
- aBuf[0] = (iVal>>24) & 0x00FF;
- aBuf[1] = (iVal>>16) & 0x00FF;
- aBuf[2] = (iVal>> 8) & 0x00FF;
- aBuf[3] = (iVal>> 0) & 0x00FF;
-}
-
-static int sqlite3Fts5Get32(const u8 *aBuf){
- return (aBuf[0] << 24) + (aBuf[1] << 16) + (aBuf[2] << 8) + aBuf[3];
-}
-
-static void sqlite3Fts5BufferAppend32(int *pRc, Fts5Buffer *pBuf, int iVal){
- if( sqlite3Fts5BufferGrow(pRc, pBuf, 4) ) return;
- sqlite3Fts5Put32(&pBuf->p[pBuf->n], iVal);
- pBuf->n += 4;
-}
-
-/*
-** Append buffer nData/pData to buffer pBuf. If an OOM error occurs, set
-** the error code in p. If an error has already occurred when this function
-** is called, it is a no-op.
-*/
-static void sqlite3Fts5BufferAppendBlob(
- int *pRc,
- Fts5Buffer *pBuf,
- int nData,
- const u8 *pData
-){
- assert( *pRc || nData>=0 );
- if( sqlite3Fts5BufferGrow(pRc, pBuf, nData) ) return;
- memcpy(&pBuf->p[pBuf->n], pData, nData);
- pBuf->n += nData;
-}
-
-/*
-** Append the nul-terminated string zStr to the buffer pBuf. This function
-** ensures that the byte following the buffer data is set to 0x00, even
-** though this byte is not included in the pBuf->n count.
-*/
-static void sqlite3Fts5BufferAppendString(
- int *pRc,
- Fts5Buffer *pBuf,
- const char *zStr
-){
- int nStr = strlen(zStr);
- sqlite3Fts5BufferAppendBlob(pRc, pBuf, nStr+1, (const u8*)zStr);
- pBuf->n--;
-}
-
-/*
-** Argument zFmt is a printf() style format string. This function performs
-** the printf() style processing, then appends the results to buffer pBuf.
-**
-** Like sqlite3Fts5BufferAppendString(), this function ensures that the byte
-** following the buffer data is set to 0x00, even though this byte is not
-** included in the pBuf->n count.
-*/
-static void sqlite3Fts5BufferAppendPrintf(
- int *pRc,
- Fts5Buffer *pBuf,
- char *zFmt, ...
-){
- if( *pRc==SQLITE_OK ){
- char *zTmp;
- va_list ap;
- va_start(ap, zFmt);
- zTmp = sqlite3_vmprintf(zFmt, ap);
- va_end(ap);
-
- if( zTmp==0 ){
- *pRc = SQLITE_NOMEM;
- }else{
- sqlite3Fts5BufferAppendString(pRc, pBuf, zTmp);
- sqlite3_free(zTmp);
- }
- }
-}
-
-static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...){
- char *zRet = 0;
- if( *pRc==SQLITE_OK ){
- va_list ap;
- va_start(ap, zFmt);
- zRet = sqlite3_vmprintf(zFmt, ap);
- va_end(ap);
- if( zRet==0 ){
- *pRc = SQLITE_NOMEM;
- }
- }
- return zRet;
-}
-
-
-/*
-** Free any buffer allocated by pBuf. Zero the structure before returning.
-*/
-static void sqlite3Fts5BufferFree(Fts5Buffer *pBuf){
- sqlite3_free(pBuf->p);
- memset(pBuf, 0, sizeof(Fts5Buffer));
-}
-
-/*
-** Zero the contents of the buffer object. But do not free the associated
-** memory allocation.
-*/
-static void sqlite3Fts5BufferZero(Fts5Buffer *pBuf){
- pBuf->n = 0;
-}
-
-/*
-** Set the buffer to contain nData/pData. If an OOM error occurs, leave an
-** the error code in p. If an error has already occurred when this function
-** is called, it is a no-op.
-*/
-static void sqlite3Fts5BufferSet(
- int *pRc,
- Fts5Buffer *pBuf,
- int nData,
- const u8 *pData
-){
- pBuf->n = 0;
- sqlite3Fts5BufferAppendBlob(pRc, pBuf, nData, pData);
-}
-
-static int sqlite3Fts5PoslistNext64(
- const u8 *a, int n, /* Buffer containing poslist */
- int *pi, /* IN/OUT: Offset within a[] */
- i64 *piOff /* IN/OUT: Current offset */
-){
- int i = *pi;
- if( i>=n ){
- /* EOF */
- *piOff = -1;
- return 1;
- }else{
- i64 iOff = *piOff;
- int iVal;
- fts5FastGetVarint32(a, i, iVal);
- if( iVal==1 ){
- fts5FastGetVarint32(a, i, iVal);
- iOff = ((i64)iVal) << 32;
- fts5FastGetVarint32(a, i, iVal);
- }
- *piOff = iOff + (iVal-2);
- *pi = i;
- return 0;
- }
-}
-
-
-/*
-** Advance the iterator object passed as the only argument. Return true
-** if the iterator reaches EOF, or false otherwise.
-*/
-static int sqlite3Fts5PoslistReaderNext(Fts5PoslistReader *pIter){
- if( sqlite3Fts5PoslistNext64(pIter->a, pIter->n, &pIter->i, &pIter->iPos) ){
- pIter->bEof = 1;
- }
- return pIter->bEof;
-}
-
-static int sqlite3Fts5PoslistReaderInit(
- const u8 *a, int n, /* Poslist buffer to iterate through */
- Fts5PoslistReader *pIter /* Iterator object to initialize */
-){
- memset(pIter, 0, sizeof(*pIter));
- pIter->a = a;
- pIter->n = n;
- sqlite3Fts5PoslistReaderNext(pIter);
- return pIter->bEof;
-}
-
-static int sqlite3Fts5PoslistWriterAppend(
- Fts5Buffer *pBuf,
- Fts5PoslistWriter *pWriter,
- i64 iPos
-){
- static const i64 colmask = ((i64)(0x7FFFFFFF)) << 32;
- int rc = SQLITE_OK;
- if( 0==sqlite3Fts5BufferGrow(&rc, pBuf, 5+5+5) ){
- if( (iPos & colmask) != (pWriter->iPrev & colmask) ){
- pBuf->p[pBuf->n++] = 1;
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos>>32));
- pWriter->iPrev = (iPos & colmask);
- }
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos-pWriter->iPrev)+2);
- pWriter->iPrev = iPos;
- }
- return rc;
-}
-
-static void *sqlite3Fts5MallocZero(int *pRc, int nByte){
- void *pRet = 0;
- if( *pRc==SQLITE_OK ){
- pRet = sqlite3_malloc(nByte);
- if( pRet==0 && nByte>0 ){
- *pRc = SQLITE_NOMEM;
- }else{
- memset(pRet, 0, nByte);
- }
- }
- return pRet;
-}
-
-/*
-** Return a nul-terminated copy of the string indicated by pIn. If nIn
-** is non-negative, then it is the length of the string in bytes. Otherwise,
-** the length of the string is determined using strlen().
-**
-** It is the responsibility of the caller to eventually free the returned
-** buffer using sqlite3_free(). If an OOM error occurs, NULL is returned.
-*/
-static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn){
- char *zRet = 0;
- if( *pRc==SQLITE_OK ){
- if( nIn<0 ){
- nIn = strlen(pIn);
- }
- zRet = (char*)sqlite3_malloc(nIn+1);
- if( zRet ){
- memcpy(zRet, pIn, nIn);
- zRet[nIn] = '\0';
- }else{
- *pRc = SQLITE_NOMEM;
- }
- }
- return zRet;
-}
-
-
-/*
-** Return true if character 't' may be part of an FTS5 bareword, or false
-** otherwise. Characters that may be part of barewords:
-**
-** * All non-ASCII characters,
-** * The 52 upper and lower case ASCII characters, and
-** * The 10 integer ASCII characters.
-** * The underscore character "_" (0x5F).
-** * The unicode "subsitute" character (0x1A).
-*/
-static int sqlite3Fts5IsBareword(char t){
- u8 aBareword[128] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00 .. 0x0F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, /* 0x10 .. 0x1F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20 .. 0x2F */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 0x30 .. 0x3F */
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x40 .. 0x4F */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 0x50 .. 0x5F */
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x60 .. 0x6F */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 /* 0x70 .. 0x7F */
- };
-
- return (t & 0x80) || aBareword[(int)t];
-}
-
-
-
-/*
-** 2014 Jun 09
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This is an SQLite module implementing full-text search.
-*/
-
-
-
-
-#define FTS5_DEFAULT_PAGE_SIZE 4050
-#define FTS5_DEFAULT_AUTOMERGE 4
-#define FTS5_DEFAULT_CRISISMERGE 16
-
-/* Maximum allowed page size */
-#define FTS5_MAX_PAGE_SIZE (128*1024)
-
-static int fts5_iswhitespace(char x){
- return (x==' ');
-}
-
-static int fts5_isopenquote(char x){
- return (x=='"' || x=='\'' || x=='[' || x=='`');
-}
-
-/*
-** Argument pIn points to a character that is part of a nul-terminated
-** string. Return a pointer to the first character following *pIn in
-** the string that is not a white-space character.
-*/
-static const char *fts5ConfigSkipWhitespace(const char *pIn){
- const char *p = pIn;
- if( p ){
- while( fts5_iswhitespace(*p) ){ p++; }
- }
- return p;
-}
-
-/*
-** Argument pIn points to a character that is part of a nul-terminated
-** string. Return a pointer to the first character following *pIn in
-** the string that is not a "bareword" character.
-*/
-static const char *fts5ConfigSkipBareword(const char *pIn){
- const char *p = pIn;
- while ( sqlite3Fts5IsBareword(*p) ) p++;
- if( p==pIn ) p = 0;
- return p;
-}
-
-static int fts5_isdigit(char a){
- return (a>='0' && a<='9');
-}
-
-
-
-static const char *fts5ConfigSkipLiteral(const char *pIn){
- const char *p = pIn;
- switch( *p ){
- case 'n': case 'N':
- if( sqlite3_strnicmp("null", p, 4)==0 ){
- p = &p[4];
- }else{
- p = 0;
- }
- break;
-
- case 'x': case 'X':
- p++;
- if( *p=='\'' ){
- p++;
- while( (*p>='a' && *p<='f')
- || (*p>='A' && *p<='F')
- || (*p>='0' && *p<='9')
- ){
- p++;
- }
- if( *p=='\'' && 0==((p-pIn)%2) ){
- p++;
- }else{
- p = 0;
- }
- }else{
- p = 0;
- }
- break;
-
- case '\'':
- p++;
- while( p ){
- if( *p=='\'' ){
- p++;
- if( *p!='\'' ) break;
- }
- p++;
- if( *p==0 ) p = 0;
- }
- break;
-
- default:
- /* maybe a number */
- if( *p=='+' || *p=='-' ) p++;
- while( fts5_isdigit(*p) ) p++;
-
- /* At this point, if the literal was an integer, the parse is
- ** finished. Or, if it is a floating point value, it may continue
- ** with either a decimal point or an 'E' character. */
- if( *p=='.' && fts5_isdigit(p[1]) ){
- p += 2;
- while( fts5_isdigit(*p) ) p++;
- }
- if( p==pIn ) p = 0;
-
- break;
- }
-
- return p;
-}
-
-/*
-** The first character of the string pointed to by argument z is guaranteed
-** to be an open-quote character (see function fts5_isopenquote()).
-**
-** This function searches for the corresponding close-quote character within
-** the string and, if found, dequotes the string in place and adds a new
-** nul-terminator byte.
-**
-** If the close-quote is found, the value returned is the byte offset of
-** the character immediately following it. Or, if the close-quote is not
-** found, -1 is returned. If -1 is returned, the buffer is left in an
-** undefined state.
-*/
-static int fts5Dequote(char *z){
- char q;
- int iIn = 1;
- int iOut = 0;
- q = z[0];
-
- /* Set stack variable q to the close-quote character */
- assert( q=='[' || q=='\'' || q=='"' || q=='`' );
- if( q=='[' ) q = ']';
-
- while( ALWAYS(z[iIn]) ){
- if( z[iIn]==q ){
- if( z[iIn+1]!=q ){
- /* Character iIn was the close quote. */
- iIn++;
- break;
- }else{
- /* Character iIn and iIn+1 form an escaped quote character. Skip
- ** the input cursor past both and copy a single quote character
- ** to the output buffer. */
- iIn += 2;
- z[iOut++] = q;
- }
- }else{
- z[iOut++] = z[iIn++];
- }
- }
-
- z[iOut] = '\0';
- return iIn;
-}
-
-/*
-** Convert an SQL-style quoted string into a normal string by removing
-** the quote characters. The conversion is done in-place. If the
-** input does not begin with a quote character, then this routine
-** is a no-op.
-**
-** Examples:
-**
-** "abc" becomes abc
-** 'xyz' becomes xyz
-** [pqr] becomes pqr
-** `mno` becomes mno
-*/
-static void sqlite3Fts5Dequote(char *z){
- char quote; /* Quote character (if any ) */
-
- assert( 0==fts5_iswhitespace(z[0]) );
- quote = z[0];
- if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){
- fts5Dequote(z);
- }
-}
-
-/*
-** Parse a "special" CREATE VIRTUAL TABLE directive and update
-** configuration object pConfig as appropriate.
-**
-** If successful, object pConfig is updated and SQLITE_OK returned. If
-** an error occurs, an SQLite error code is returned and an error message
-** may be left in *pzErr. It is the responsibility of the caller to
-** eventually free any such error message using sqlite3_free().
-*/
-static int fts5ConfigParseSpecial(
- Fts5Global *pGlobal,
- Fts5Config *pConfig, /* Configuration object to update */
- const char *zCmd, /* Special command to parse */
- const char *zArg, /* Argument to parse */
- char **pzErr /* OUT: Error message */
-){
- int rc = SQLITE_OK;
- int nCmd = strlen(zCmd);
- if( sqlite3_strnicmp("prefix", zCmd, nCmd)==0 ){
- const int nByte = sizeof(int) * FTS5_MAX_PREFIX_INDEXES;
- const char *p;
- if( pConfig->aPrefix ){
- *pzErr = sqlite3_mprintf("multiple prefix=... directives");
- rc = SQLITE_ERROR;
- }else{
- pConfig->aPrefix = sqlite3Fts5MallocZero(&rc, nByte);
- }
- p = zArg;
- while( rc==SQLITE_OK && p[0] ){
- int nPre = 0;
- while( p[0]==' ' ) p++;
- while( p[0]>='0' && p[0]<='9' && nPre<1000 ){
- nPre = nPre*10 + (p[0] - '0');
- p++;
- }
- while( p[0]==' ' ) p++;
- if( p[0]==',' ){
- p++;
- }else if( p[0] ){
- *pzErr = sqlite3_mprintf("malformed prefix=... directive");
- rc = SQLITE_ERROR;
- }
- if( rc==SQLITE_OK && (nPre==0 || nPre>=1000) ){
- *pzErr = sqlite3_mprintf("prefix length out of range: %d", nPre);
- rc = SQLITE_ERROR;
- }
- pConfig->aPrefix[pConfig->nPrefix] = nPre;
- pConfig->nPrefix++;
- }
- return rc;
- }
-
- if( sqlite3_strnicmp("tokenize", zCmd, nCmd)==0 ){
- const char *p = (const char*)zArg;
- int nArg = strlen(zArg) + 1;
- char **azArg = sqlite3Fts5MallocZero(&rc, sizeof(char*) * nArg);
- char *pDel = sqlite3Fts5MallocZero(&rc, nArg * 2);
- char *pSpace = pDel;
-
- if( azArg && pSpace ){
- if( pConfig->pTok ){
- *pzErr = sqlite3_mprintf("multiple tokenize=... directives");
- rc = SQLITE_ERROR;
- }else{
- for(nArg=0; p && *p; nArg++){
- const char *p2 = fts5ConfigSkipWhitespace(p);
- if( *p2=='\'' ){
- p = fts5ConfigSkipLiteral(p2);
- }else{
- p = fts5ConfigSkipBareword(p2);
- }
- if( p ){
- memcpy(pSpace, p2, p-p2);
- azArg[nArg] = pSpace;
- sqlite3Fts5Dequote(pSpace);
- pSpace += (p - p2) + 1;
- p = fts5ConfigSkipWhitespace(p);
- }
- }
- if( p==0 ){
- *pzErr = sqlite3_mprintf("parse error in tokenize directive");
- rc = SQLITE_ERROR;
- }else{
- rc = sqlite3Fts5GetTokenizer(pGlobal,
- (const char**)azArg, nArg, &pConfig->pTok, &pConfig->pTokApi,
- pzErr
- );
- }
- }
- }
-
- sqlite3_free(azArg);
- sqlite3_free(pDel);
- return rc;
- }
-
- if( sqlite3_strnicmp("content", zCmd, nCmd)==0 ){
- if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){
- *pzErr = sqlite3_mprintf("multiple content=... directives");
- rc = SQLITE_ERROR;
- }else{
- if( zArg[0] ){
- pConfig->eContent = FTS5_CONTENT_EXTERNAL;
- pConfig->zContent = sqlite3Fts5Mprintf(&rc, "%Q.%Q", pConfig->zDb,zArg);
- }else{
- pConfig->eContent = FTS5_CONTENT_NONE;
- }
- }
- return rc;
- }
-
- if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){
- if( pConfig->zContentRowid ){
- *pzErr = sqlite3_mprintf("multiple content_rowid=... directives");
- rc = SQLITE_ERROR;
- }else{
- pConfig->zContentRowid = sqlite3Fts5Strndup(&rc, zArg, -1);
- }
- return rc;
- }
-
- if( sqlite3_strnicmp("columnsize", zCmd, nCmd)==0 ){
- if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
- *pzErr = sqlite3_mprintf("malformed columnsize=... directive");
- rc = SQLITE_ERROR;
- }else{
- pConfig->bColumnsize = (zArg[0]=='1');
- }
- return rc;
- }
-
- *pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd);
- return SQLITE_ERROR;
-}
-
-/*
-** Allocate an instance of the default tokenizer ("simple") at
-** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error
-** code if an error occurs.
-*/
-static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){
- assert( pConfig->pTok==0 && pConfig->pTokApi==0 );
- return sqlite3Fts5GetTokenizer(
- pGlobal, 0, 0, &pConfig->pTok, &pConfig->pTokApi, 0
- );
-}
-
-/*
-** Gobble up the first bareword or quoted word from the input buffer zIn.
-** Return a pointer to the character immediately following the last in
-** the gobbled word if successful, or a NULL pointer otherwise (failed
-** to find close-quote character).
-**
-** Before returning, set pzOut to point to a new buffer containing a
-** nul-terminated, dequoted copy of the gobbled word. If the word was
-** quoted, *pbQuoted is also set to 1 before returning.
-**
-** If *pRc is other than SQLITE_OK when this function is called, it is
-** a no-op (NULL is returned). Otherwise, if an OOM occurs within this
-** function, *pRc is set to SQLITE_NOMEM before returning. *pRc is *not*
-** set if a parse error (failed to find close quote) occurs.
-*/
-static const char *fts5ConfigGobbleWord(
- int *pRc, /* IN/OUT: Error code */
- const char *zIn, /* Buffer to gobble string/bareword from */
- char **pzOut, /* OUT: malloc'd buffer containing str/bw */
- int *pbQuoted /* OUT: Set to true if dequoting required */
-){
- const char *zRet = 0;
-
- int nIn = strlen(zIn);
- char *zOut = sqlite3_malloc(nIn+1);
-
- assert( *pRc==SQLITE_OK );
- *pbQuoted = 0;
- *pzOut = 0;
-
- if( zOut==0 ){
- *pRc = SQLITE_NOMEM;
- }else{
- memcpy(zOut, zIn, nIn+1);
- if( fts5_isopenquote(zOut[0]) ){
- int ii = fts5Dequote(zOut);
- zRet = &zIn[ii];
- *pbQuoted = 1;
- }else{
- zRet = fts5ConfigSkipBareword(zIn);
- zOut[zRet-zIn] = '\0';
- }
- }
-
- if( zRet==0 ){
- sqlite3_free(zOut);
- }else{
- *pzOut = zOut;
- }
-
- return zRet;
-}
-
-static int fts5ConfigParseColumn(
- Fts5Config *p,
- char *zCol,
- char *zArg,
- char **pzErr
-){
- int rc = SQLITE_OK;
- if( 0==sqlite3_stricmp(zCol, FTS5_RANK_NAME)
- || 0==sqlite3_stricmp(zCol, FTS5_ROWID_NAME)
- ){
- *pzErr = sqlite3_mprintf("reserved fts5 column name: %s", zCol);
- rc = SQLITE_ERROR;
- }else if( zArg ){
- if( 0==sqlite3_stricmp(zArg, "unindexed") ){
- p->abUnindexed[p->nCol] = 1;
- }else{
- *pzErr = sqlite3_mprintf("unrecognized column option: %s", zArg);
- rc = SQLITE_ERROR;
- }
- }
-
- p->azCol[p->nCol++] = zCol;
- return rc;
-}
-
-/*
-** Populate the Fts5Config.zContentExprlist string.
-*/
-static int fts5ConfigMakeExprlist(Fts5Config *p){
- int i;
- int rc = SQLITE_OK;
- Fts5Buffer buf = {0, 0, 0};
-
- sqlite3Fts5BufferAppendPrintf(&rc, &buf, "T.%Q", p->zContentRowid);
- if( p->eContent!=FTS5_CONTENT_NONE ){
- for(i=0; i<p->nCol; i++){
- if( p->eContent==FTS5_CONTENT_EXTERNAL ){
- sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.%Q", p->azCol[i]);
- }else{
- sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.c%d", i);
- }
- }
- }
-
- assert( p->zContentExprlist==0 );
- p->zContentExprlist = (char*)buf.p;
- return rc;
-}
-
-/*
-** Arguments nArg/azArg contain the string arguments passed to the xCreate
-** or xConnect method of the virtual table. This function attempts to
-** allocate an instance of Fts5Config containing the results of parsing
-** those arguments.
-**
-** If successful, SQLITE_OK is returned and *ppOut is set to point to the
-** new Fts5Config object. If an error occurs, an SQLite error code is
-** returned, *ppOut is set to NULL and an error message may be left in
-** *pzErr. It is the responsibility of the caller to eventually free any
-** such error message using sqlite3_free().
-*/
-static int sqlite3Fts5ConfigParse(
- Fts5Global *pGlobal,
- sqlite3 *db,
- int nArg, /* Number of arguments */
- const char **azArg, /* Array of nArg CREATE VIRTUAL TABLE args */
- Fts5Config **ppOut, /* OUT: Results of parse */
- char **pzErr /* OUT: Error message */
-){
- int rc = SQLITE_OK; /* Return code */
- Fts5Config *pRet; /* New object to return */
- int i;
- int nByte;
-
- *ppOut = pRet = (Fts5Config*)sqlite3_malloc(sizeof(Fts5Config));
- if( pRet==0 ) return SQLITE_NOMEM;
- memset(pRet, 0, sizeof(Fts5Config));
- pRet->db = db;
- pRet->iCookie = -1;
-
- nByte = nArg * (sizeof(char*) + sizeof(u8));
- pRet->azCol = (char**)sqlite3Fts5MallocZero(&rc, nByte);
- pRet->abUnindexed = (u8*)&pRet->azCol[nArg];
- pRet->zDb = sqlite3Fts5Strndup(&rc, azArg[1], -1);
- pRet->zName = sqlite3Fts5Strndup(&rc, azArg[2], -1);
- pRet->bColumnsize = 1;
-#ifdef SQLITE_DEBUG
- pRet->bPrefixIndex = 1;
-#endif
- if( rc==SQLITE_OK && sqlite3_stricmp(pRet->zName, FTS5_RANK_NAME)==0 ){
- *pzErr = sqlite3_mprintf("reserved fts5 table name: %s", pRet->zName);
- rc = SQLITE_ERROR;
- }
-
- for(i=3; rc==SQLITE_OK && i<nArg; i++){
- const char *zOrig = azArg[i];
- const char *z;
- char *zOne = 0;
- char *zTwo = 0;
- int bOption = 0;
- int bMustBeCol = 0;
-
- z = fts5ConfigGobbleWord(&rc, zOrig, &zOne, &bMustBeCol);
- z = fts5ConfigSkipWhitespace(z);
- if( z && *z=='=' ){
- bOption = 1;
- z++;
- if( bMustBeCol ) z = 0;
- }
- z = fts5ConfigSkipWhitespace(z);
- if( z && z[0] ){
- int bDummy;
- z = fts5ConfigGobbleWord(&rc, z, &zTwo, &bDummy);
- if( z && z[0] ) z = 0;
- }
-
- if( rc==SQLITE_OK ){
- if( z==0 ){
- *pzErr = sqlite3_mprintf("parse error in \"%s\"", zOrig);
- rc = SQLITE_ERROR;
- }else{
- if( bOption ){
- rc = fts5ConfigParseSpecial(pGlobal, pRet, zOne, zTwo?zTwo:"", pzErr);
- }else{
- rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr);
- zOne = 0;
- }
- }
- }
-
- sqlite3_free(zOne);
- sqlite3_free(zTwo);
- }
-
- /* If a tokenizer= option was successfully parsed, the tokenizer has
- ** already been allocated. Otherwise, allocate an instance of the default
- ** tokenizer (unicode61) now. */
- if( rc==SQLITE_OK && pRet->pTok==0 ){
- rc = fts5ConfigDefaultTokenizer(pGlobal, pRet);
- }
-
- /* If no zContent option was specified, fill in the default values. */
- if( rc==SQLITE_OK && pRet->zContent==0 ){
- const char *zTail = 0;
- assert( pRet->eContent==FTS5_CONTENT_NORMAL
- || pRet->eContent==FTS5_CONTENT_NONE
- );
- if( pRet->eContent==FTS5_CONTENT_NORMAL ){
- zTail = "content";
- }else if( pRet->bColumnsize ){
- zTail = "docsize";
- }
-
- if( zTail ){
- pRet->zContent = sqlite3Fts5Mprintf(
- &rc, "%Q.'%q_%s'", pRet->zDb, pRet->zName, zTail
- );
- }
- }
-
- if( rc==SQLITE_OK && pRet->zContentRowid==0 ){
- pRet->zContentRowid = sqlite3Fts5Strndup(&rc, "rowid", -1);
- }
-
- /* Formulate the zContentExprlist text */
- if( rc==SQLITE_OK ){
- rc = fts5ConfigMakeExprlist(pRet);
- }
-
- if( rc!=SQLITE_OK ){
- sqlite3Fts5ConfigFree(pRet);
- *ppOut = 0;
- }
- return rc;
-}
-
-/*
-** Free the configuration object passed as the only argument.
-*/
-static void sqlite3Fts5ConfigFree(Fts5Config *pConfig){
- if( pConfig ){
- int i;
- if( pConfig->pTok ){
- pConfig->pTokApi->xDelete(pConfig->pTok);
- }
- sqlite3_free(pConfig->zDb);
- sqlite3_free(pConfig->zName);
- for(i=0; i<pConfig->nCol; i++){
- sqlite3_free(pConfig->azCol[i]);
- }
- sqlite3_free(pConfig->azCol);
- sqlite3_free(pConfig->aPrefix);
- sqlite3_free(pConfig->zRank);
- sqlite3_free(pConfig->zRankArgs);
- sqlite3_free(pConfig->zContent);
- sqlite3_free(pConfig->zContentRowid);
- sqlite3_free(pConfig->zContentExprlist);
- sqlite3_free(pConfig);
- }
-}
-
-/*
-** Call sqlite3_declare_vtab() based on the contents of the configuration
-** object passed as the only argument. Return SQLITE_OK if successful, or
-** an SQLite error code if an error occurs.
-*/
-static int sqlite3Fts5ConfigDeclareVtab(Fts5Config *pConfig){
- int i;
- int rc = SQLITE_OK;
- char *zSql;
-
- zSql = sqlite3Fts5Mprintf(&rc, "CREATE TABLE x(");
- for(i=0; zSql && i<pConfig->nCol; i++){
- const char *zSep = (i==0?"":", ");
- zSql = sqlite3Fts5Mprintf(&rc, "%z%s%Q", zSql, zSep, pConfig->azCol[i]);
- }
- zSql = sqlite3Fts5Mprintf(&rc, "%z, %Q HIDDEN, %s HIDDEN)",
- zSql, pConfig->zName, FTS5_RANK_NAME
- );
-
- assert( zSql || rc==SQLITE_NOMEM );
- if( zSql ){
- rc = sqlite3_declare_vtab(pConfig->db, zSql);
- sqlite3_free(zSql);
- }
-
- return rc;
-}
-
-/*
-** Tokenize the text passed via the second and third arguments.
-**
-** The callback is invoked once for each token in the input text. The
-** arguments passed to it are, in order:
-**
-** void *pCtx // Copy of 4th argument to sqlite3Fts5Tokenize()
-** const char *pToken // Pointer to buffer containing token
-** int nToken // Size of token in bytes
-** int iStart // Byte offset of start of token within input text
-** int iEnd // Byte offset of end of token within input text
-** int iPos // Position of token in input (first token is 0)
-**
-** If the callback returns a non-zero value the tokenization is abandoned
-** and no further callbacks are issued.
-**
-** This function returns SQLITE_OK if successful or an SQLite error code
-** if an error occurs. If the tokenization was abandoned early because
-** the callback returned SQLITE_DONE, this is not an error and this function
-** still returns SQLITE_OK. Or, if the tokenization was abandoned early
-** because the callback returned another non-zero value, it is assumed
-** to be an SQLite error code and returned to the caller.
-*/
-static int sqlite3Fts5Tokenize(
- Fts5Config *pConfig, /* FTS5 Configuration object */
- int flags, /* FTS5_TOKENIZE_* flags */
- const char *pText, int nText, /* Text to tokenize */
- void *pCtx, /* Context passed to xToken() */
- int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
-){
- if( pText==0 ) return SQLITE_OK;
- return pConfig->pTokApi->xTokenize(
- pConfig->pTok, pCtx, flags, pText, nText, xToken
- );
-}
-
-/*
-** Argument pIn points to the first character in what is expected to be
-** a comma-separated list of SQL literals followed by a ')' character.
-** If it actually is this, return a pointer to the ')'. Otherwise, return
-** NULL to indicate a parse error.
-*/
-static const char *fts5ConfigSkipArgs(const char *pIn){
- const char *p = pIn;
-
- while( 1 ){
- p = fts5ConfigSkipWhitespace(p);
- p = fts5ConfigSkipLiteral(p);
- p = fts5ConfigSkipWhitespace(p);
- if( p==0 || *p==')' ) break;
- if( *p!=',' ){
- p = 0;
- break;
- }
- p++;
- }
-
- return p;
-}
-
-/*
-** Parameter zIn contains a rank() function specification. The format of
-** this is:
-**
-** + Bareword (function name)
-** + Open parenthesis - "("
-** + Zero or more SQL literals in a comma separated list
-** + Close parenthesis - ")"
-*/
-static int sqlite3Fts5ConfigParseRank(
- const char *zIn, /* Input string */
- char **pzRank, /* OUT: Rank function name */
- char **pzRankArgs /* OUT: Rank function arguments */
-){
- const char *p = zIn;
- const char *pRank;
- char *zRank = 0;
- char *zRankArgs = 0;
- int rc = SQLITE_OK;
-
- *pzRank = 0;
- *pzRankArgs = 0;
-
- p = fts5ConfigSkipWhitespace(p);
- pRank = p;
- p = fts5ConfigSkipBareword(p);
-
- if( p ){
- zRank = sqlite3Fts5MallocZero(&rc, 1 + p - pRank);
- if( zRank ) memcpy(zRank, pRank, p-pRank);
- }else{
- rc = SQLITE_ERROR;
- }
-
- if( rc==SQLITE_OK ){
- p = fts5ConfigSkipWhitespace(p);
- if( *p!='(' ) rc = SQLITE_ERROR;
- p++;
- }
- if( rc==SQLITE_OK ){
- const char *pArgs;
- p = fts5ConfigSkipWhitespace(p);
- pArgs = p;
- if( *p!=')' ){
- p = fts5ConfigSkipArgs(p);
- if( p==0 ){
- rc = SQLITE_ERROR;
- }else{
- zRankArgs = sqlite3Fts5MallocZero(&rc, 1 + p - pArgs);
- if( zRankArgs ) memcpy(zRankArgs, pArgs, p-pArgs);
- }
- }
- }
-
- if( rc!=SQLITE_OK ){
- sqlite3_free(zRank);
- assert( zRankArgs==0 );
- }else{
- *pzRank = zRank;
- *pzRankArgs = zRankArgs;
- }
- return rc;
-}
-
-static int sqlite3Fts5ConfigSetValue(
- Fts5Config *pConfig,
- const char *zKey,
- sqlite3_value *pVal,
- int *pbBadkey
-){
- int rc = SQLITE_OK;
-
- if( 0==sqlite3_stricmp(zKey, "pgsz") ){
- int pgsz = 0;
- if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
- pgsz = sqlite3_value_int(pVal);
- }
- if( pgsz<=0 || pgsz>FTS5_MAX_PAGE_SIZE ){
- *pbBadkey = 1;
- }else{
- pConfig->pgsz = pgsz;
- }
- }
-
- else if( 0==sqlite3_stricmp(zKey, "automerge") ){
- int nAutomerge = -1;
- if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
- nAutomerge = sqlite3_value_int(pVal);
- }
- if( nAutomerge<0 || nAutomerge>64 ){
- *pbBadkey = 1;
- }else{
- if( nAutomerge==1 ) nAutomerge = FTS5_DEFAULT_AUTOMERGE;
- pConfig->nAutomerge = nAutomerge;
- }
- }
-
- else if( 0==sqlite3_stricmp(zKey, "crisismerge") ){
- int nCrisisMerge = -1;
- if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
- nCrisisMerge = sqlite3_value_int(pVal);
- }
- if( nCrisisMerge<0 ){
- *pbBadkey = 1;
- }else{
- if( nCrisisMerge<=1 ) nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
- pConfig->nCrisisMerge = nCrisisMerge;
- }
- }
-
- else if( 0==sqlite3_stricmp(zKey, "rank") ){
- const char *zIn = (const char*)sqlite3_value_text(pVal);
- char *zRank;
- char *zRankArgs;
- rc = sqlite3Fts5ConfigParseRank(zIn, &zRank, &zRankArgs);
- if( rc==SQLITE_OK ){
- sqlite3_free(pConfig->zRank);
- sqlite3_free(pConfig->zRankArgs);
- pConfig->zRank = zRank;
- pConfig->zRankArgs = zRankArgs;
- }else if( rc==SQLITE_ERROR ){
- rc = SQLITE_OK;
- *pbBadkey = 1;
- }
- }else{
- *pbBadkey = 1;
- }
- return rc;
-}
-
-/*
-** Load the contents of the %_config table into memory.
-*/
-static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){
- const char *zSelect = "SELECT k, v FROM %Q.'%q_config'";
- char *zSql;
- sqlite3_stmt *p = 0;
- int rc = SQLITE_OK;
- int iVersion = 0;
-
- /* Set default values */
- pConfig->pgsz = FTS5_DEFAULT_PAGE_SIZE;
- pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE;
- pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
-
- zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName);
- if( zSql ){
- rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p, 0);
- sqlite3_free(zSql);
- }
-
- assert( rc==SQLITE_OK || p==0 );
- if( rc==SQLITE_OK ){
- while( SQLITE_ROW==sqlite3_step(p) ){
- const char *zK = (const char*)sqlite3_column_text(p, 0);
- sqlite3_value *pVal = sqlite3_column_value(p, 1);
- if( 0==sqlite3_stricmp(zK, "version") ){
- iVersion = sqlite3_value_int(pVal);
- }else{
- int bDummy = 0;
- sqlite3Fts5ConfigSetValue(pConfig, zK, pVal, &bDummy);
- }
- }
- rc = sqlite3_finalize(p);
- }
-
- if( rc==SQLITE_OK && iVersion!=FTS5_CURRENT_VERSION ){
- rc = SQLITE_ERROR;
- if( pConfig->pzErrmsg ){
- assert( 0==*pConfig->pzErrmsg );
- *pConfig->pzErrmsg = sqlite3_mprintf(
- "invalid fts5 file format (found %d, expected %d) - run 'rebuild'",
- iVersion, FTS5_CURRENT_VERSION
- );
- }
- }
-
- if( rc==SQLITE_OK ){
- pConfig->iCookie = iCookie;
- }
- return rc;
-}
-
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-*/
-
-
-
-
-/*
-** All token types in the generated fts5parse.h file are greater than 0.
-*/
-#define FTS5_EOF 0
-
-#define FTS5_LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
-
-typedef struct Fts5ExprTerm Fts5ExprTerm;
-
-/*
-** Functions generated by lemon from fts5parse.y.
-*/
-static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(u64));
-static void sqlite3Fts5ParserFree(void*, void (*freeProc)(void*));
-static void sqlite3Fts5Parser(void*, int, Fts5Token, Fts5Parse*);
-#ifndef NDEBUG
-/* #include <stdio.h> */
-static void sqlite3Fts5ParserTrace(FILE*, char*);
-#endif
-
-
-struct Fts5Expr {
- Fts5Index *pIndex;
- Fts5ExprNode *pRoot;
- int bDesc; /* Iterate in descending rowid order */
- int nPhrase; /* Number of phrases in expression */
- Fts5ExprPhrase **apExprPhrase; /* Pointers to phrase objects */
-};
-
-/*
-** eType:
-** Expression node type. Always one of:
-**
-** FTS5_AND (nChild, apChild valid)
-** FTS5_OR (nChild, apChild valid)
-** FTS5_NOT (nChild, apChild valid)
-** FTS5_STRING (pNear valid)
-** FTS5_TERM (pNear valid)
-*/
-struct Fts5ExprNode {
- int eType; /* Node type */
- int bEof; /* True at EOF */
- int bNomatch; /* True if entry is not a match */
-
- i64 iRowid; /* Current rowid */
- Fts5ExprNearset *pNear; /* For FTS5_STRING - cluster of phrases */
-
- /* Child nodes. For a NOT node, this array always contains 2 entries. For
- ** AND or OR nodes, it contains 2 or more entries. */
- int nChild; /* Number of child nodes */
- Fts5ExprNode *apChild[1]; /* Array of child nodes */
-};
-
-#define Fts5NodeIsString(p) ((p)->eType==FTS5_TERM || (p)->eType==FTS5_STRING)
-
-/*
-** An instance of the following structure represents a single search term
-** or term prefix.
-*/
-struct Fts5ExprTerm {
- int bPrefix; /* True for a prefix term */
- char *zTerm; /* nul-terminated term */
- Fts5IndexIter *pIter; /* Iterator for this term */
- Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */
-};
-
-/*
-** A phrase. One or more terms that must appear in a contiguous sequence
-** within a document for it to match.
-*/
-struct Fts5ExprPhrase {
- Fts5ExprNode *pNode; /* FTS5_STRING node this phrase is part of */
- Fts5Buffer poslist; /* Current position list */
- int nTerm; /* Number of entries in aTerm[] */
- Fts5ExprTerm aTerm[1]; /* Terms that make up this phrase */
-};
-
-/*
-** One or more phrases that must appear within a certain token distance of
-** each other within each matching document.
-*/
-struct Fts5ExprNearset {
- int nNear; /* NEAR parameter */
- Fts5Colset *pColset; /* Columns to search (NULL -> all columns) */
- int nPhrase; /* Number of entries in aPhrase[] array */
- Fts5ExprPhrase *apPhrase[1]; /* Array of phrase pointers */
-};
-
-
-/*
-** Parse context.
-*/
-struct Fts5Parse {
- Fts5Config *pConfig;
- char *zErr;
- int rc;
- int nPhrase; /* Size of apPhrase array */
- Fts5ExprPhrase **apPhrase; /* Array of all phrases */
- Fts5ExprNode *pExpr; /* Result of a successful parse */
-};
-
-static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){
- va_list ap;
- va_start(ap, zFmt);
- if( pParse->rc==SQLITE_OK ){
- pParse->zErr = sqlite3_vmprintf(zFmt, ap);
- pParse->rc = SQLITE_ERROR;
- }
- va_end(ap);
-}
-
-static int fts5ExprIsspace(char t){
- return t==' ' || t=='\t' || t=='\n' || t=='\r';
-}
-
-/*
-** Read the first token from the nul-terminated string at *pz.
-*/
-static int fts5ExprGetToken(
- Fts5Parse *pParse,
- const char **pz, /* IN/OUT: Pointer into buffer */
- Fts5Token *pToken
-){
- const char *z = *pz;
- int tok;
-
- /* Skip past any whitespace */
- while( fts5ExprIsspace(*z) ) z++;
-
- pToken->p = z;
- pToken->n = 1;
- switch( *z ){
- case '(': tok = FTS5_LP; break;
- case ')': tok = FTS5_RP; break;
- case '{': tok = FTS5_LCP; break;
- case '}': tok = FTS5_RCP; break;
- case ':': tok = FTS5_COLON; break;
- case ',': tok = FTS5_COMMA; break;
- case '+': tok = FTS5_PLUS; break;
- case '*': tok = FTS5_STAR; break;
- case '\0': tok = FTS5_EOF; break;
-
- case '"': {
- const char *z2;
- tok = FTS5_STRING;
-
- for(z2=&z[1]; 1; z2++){
- if( z2[0]=='"' ){
- z2++;
- if( z2[0]!='"' ) break;
- }
- if( z2[0]=='\0' ){
- sqlite3Fts5ParseError(pParse, "unterminated string");
- return FTS5_EOF;
- }
- }
- pToken->n = (z2 - z);
- break;
- }
-
- default: {
- const char *z2;
- if( sqlite3Fts5IsBareword(z[0])==0 ){
- sqlite3Fts5ParseError(pParse, "fts5: syntax error near \"%.1s\"", z);
- return FTS5_EOF;
- }
- tok = FTS5_STRING;
- for(z2=&z[1]; sqlite3Fts5IsBareword(*z2); z2++);
- pToken->n = (z2 - z);
- if( pToken->n==2 && memcmp(pToken->p, "OR", 2)==0 ) tok = FTS5_OR;
- if( pToken->n==3 && memcmp(pToken->p, "NOT", 3)==0 ) tok = FTS5_NOT;
- if( pToken->n==3 && memcmp(pToken->p, "AND", 3)==0 ) tok = FTS5_AND;
- break;
- }
- }
-
- *pz = &pToken->p[pToken->n];
- return tok;
-}
-
-static void *fts5ParseAlloc(u64 t){ return sqlite3_malloc((int)t); }
-static void fts5ParseFree(void *p){ sqlite3_free(p); }
-
-static int sqlite3Fts5ExprNew(
- Fts5Config *pConfig, /* FTS5 Configuration */
- const char *zExpr, /* Expression text */
- Fts5Expr **ppNew,
- char **pzErr
-){
- Fts5Parse sParse;
- Fts5Token token;
- const char *z = zExpr;
- int t; /* Next token type */
- void *pEngine;
- Fts5Expr *pNew;
-
- *ppNew = 0;
- *pzErr = 0;
- memset(&sParse, 0, sizeof(sParse));
- pEngine = sqlite3Fts5ParserAlloc(fts5ParseAlloc);
- if( pEngine==0 ){ return SQLITE_NOMEM; }
- sParse.pConfig = pConfig;
-
- do {
- t = fts5ExprGetToken(&sParse, &z, &token);
- sqlite3Fts5Parser(pEngine, t, token, &sParse);
- }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF );
- sqlite3Fts5ParserFree(pEngine, fts5ParseFree);
-
- assert( sParse.rc!=SQLITE_OK || sParse.zErr==0 );
- if( sParse.rc==SQLITE_OK ){
- *ppNew = pNew = sqlite3_malloc(sizeof(Fts5Expr));
- if( pNew==0 ){
- sParse.rc = SQLITE_NOMEM;
- sqlite3Fts5ParseNodeFree(sParse.pExpr);
- }else{
- pNew->pRoot = sParse.pExpr;
- pNew->pIndex = 0;
- pNew->apExprPhrase = sParse.apPhrase;
- pNew->nPhrase = sParse.nPhrase;
- sParse.apPhrase = 0;
- }
- }
-
- sqlite3_free(sParse.apPhrase);
- *pzErr = sParse.zErr;
- return sParse.rc;
-}
-
-/*
-** Free the expression node object passed as the only argument.
-*/
-static void sqlite3Fts5ParseNodeFree(Fts5ExprNode *p){
- if( p ){
- int i;
- for(i=0; i<p->nChild; i++){
- sqlite3Fts5ParseNodeFree(p->apChild[i]);
- }
- sqlite3Fts5ParseNearsetFree(p->pNear);
- sqlite3_free(p);
- }
-}
-
-/*
-** Free the expression object passed as the only argument.
-*/
-static void sqlite3Fts5ExprFree(Fts5Expr *p){
- if( p ){
- sqlite3Fts5ParseNodeFree(p->pRoot);
- sqlite3_free(p->apExprPhrase);
- sqlite3_free(p);
- }
-}
-
-/*
-** Argument pTerm must be a synonym iterator. Return the current rowid
-** that it points to.
-*/
-static i64 fts5ExprSynonymRowid(Fts5ExprTerm *pTerm, int bDesc, int *pbEof){
- i64 iRet = 0;
- int bRetValid = 0;
- Fts5ExprTerm *p;
-
- assert( pTerm->pSynonym );
- assert( bDesc==0 || bDesc==1 );
- for(p=pTerm; p; p=p->pSynonym){
- if( 0==sqlite3Fts5IterEof(p->pIter) ){
- i64 iRowid = sqlite3Fts5IterRowid(p->pIter);
- if( bRetValid==0 || (bDesc!=(iRowid<iRet)) ){
- iRet = iRowid;
- bRetValid = 1;
- }
- }
- }
-
- if( pbEof && bRetValid==0 ) *pbEof = 1;
- return iRet;
-}
-
-/*
-** Argument pTerm must be a synonym iterator.
-*/
-static int fts5ExprSynonymPoslist(
- Fts5ExprTerm *pTerm,
- Fts5Colset *pColset,
- i64 iRowid,
- int *pbDel, /* OUT: Caller should sqlite3_free(*pa) */
- u8 **pa, int *pn
-){
- Fts5PoslistReader aStatic[4];
- Fts5PoslistReader *aIter = aStatic;
- int nIter = 0;
- int nAlloc = 4;
- int rc = SQLITE_OK;
- Fts5ExprTerm *p;
-
- assert( pTerm->pSynonym );
- for(p=pTerm; p; p=p->pSynonym){
- Fts5IndexIter *pIter = p->pIter;
- if( sqlite3Fts5IterEof(pIter)==0 && sqlite3Fts5IterRowid(pIter)==iRowid ){
- const u8 *a;
- int n;
- i64 dummy;
- rc = sqlite3Fts5IterPoslist(pIter, pColset, &a, &n, &dummy);
- if( rc!=SQLITE_OK ) goto synonym_poslist_out;
- if( nIter==nAlloc ){
- int nByte = sizeof(Fts5PoslistReader) * nAlloc * 2;
- Fts5PoslistReader *aNew = (Fts5PoslistReader*)sqlite3_malloc(nByte);
- if( aNew==0 ){
- rc = SQLITE_NOMEM;
- goto synonym_poslist_out;
- }
- memcpy(aNew, aIter, sizeof(Fts5PoslistReader) * nIter);
- nAlloc = nAlloc*2;
- if( aIter!=aStatic ) sqlite3_free(aIter);
- aIter = aNew;
- }
- sqlite3Fts5PoslistReaderInit(a, n, &aIter[nIter]);
- assert( aIter[nIter].bEof==0 );
- nIter++;
- }
- }
-
- assert( *pbDel==0 );
- if( nIter==1 ){
- *pa = (u8*)aIter[0].a;
- *pn = aIter[0].n;
- }else{
- Fts5PoslistWriter writer = {0};
- Fts5Buffer buf = {0,0,0};
- i64 iPrev = -1;
- while( 1 ){
- int i;
- i64 iMin = FTS5_LARGEST_INT64;
- for(i=0; i<nIter; i++){
- if( aIter[i].bEof==0 ){
- if( aIter[i].iPos==iPrev ){
- if( sqlite3Fts5PoslistReaderNext(&aIter[i]) ) continue;
- }
- if( aIter[i].iPos<iMin ){
- iMin = aIter[i].iPos;
- }
- }
- }
- if( iMin==FTS5_LARGEST_INT64 || rc!=SQLITE_OK ) break;
- rc = sqlite3Fts5PoslistWriterAppend(&buf, &writer, iMin);
- iPrev = iMin;
- }
- if( rc ){
- sqlite3_free(buf.p);
- }else{
- *pa = buf.p;
- *pn = buf.n;
- *pbDel = 1;
- }
- }
-
- synonym_poslist_out:
- if( aIter!=aStatic ) sqlite3_free(aIter);
- return rc;
-}
-
-
-/*
-** All individual term iterators in pPhrase are guaranteed to be valid and
-** pointing to the same rowid when this function is called. This function
-** checks if the current rowid really is a match, and if so populates
-** the pPhrase->poslist buffer accordingly. Output parameter *pbMatch
-** is set to true if this is really a match, or false otherwise.
-**
-** SQLITE_OK is returned if an error occurs, or an SQLite error code
-** otherwise. It is not considered an error code if the current rowid is
-** not a match.
-*/
-static int fts5ExprPhraseIsMatch(
- Fts5ExprNode *pNode, /* Node pPhrase belongs to */
- Fts5Colset *pColset, /* Restrict matches to these columns */
- Fts5ExprPhrase *pPhrase, /* Phrase object to initialize */
- int *pbMatch /* OUT: Set to true if really a match */
-){
- Fts5PoslistWriter writer = {0};
- Fts5PoslistReader aStatic[4];
- Fts5PoslistReader *aIter = aStatic;
- int i;
- int rc = SQLITE_OK;
-
- fts5BufferZero(&pPhrase->poslist);
-
- /* If the aStatic[] array is not large enough, allocate a large array
- ** using sqlite3_malloc(). This approach could be improved upon. */
- if( pPhrase->nTerm>(sizeof(aStatic) / sizeof(aStatic[0])) ){
- int nByte = sizeof(Fts5PoslistReader) * pPhrase->nTerm;
- aIter = (Fts5PoslistReader*)sqlite3_malloc(nByte);
- if( !aIter ) return SQLITE_NOMEM;
- }
- memset(aIter, 0, sizeof(Fts5PoslistReader) * pPhrase->nTerm);
-
- /* Initialize a term iterator for each term in the phrase */
- for(i=0; i<pPhrase->nTerm; i++){
- Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
- i64 dummy;
- int n = 0;
- int bFlag = 0;
- const u8 *a = 0;
- if( pTerm->pSynonym ){
- rc = fts5ExprSynonymPoslist(
- pTerm, pColset, pNode->iRowid, &bFlag, (u8**)&a, &n
- );
- }else{
- rc = sqlite3Fts5IterPoslist(pTerm->pIter, pColset, &a, &n, &dummy);
- }
- if( rc!=SQLITE_OK ) goto ismatch_out;
- sqlite3Fts5PoslistReaderInit(a, n, &aIter[i]);
- aIter[i].bFlag = bFlag;
- if( aIter[i].bEof ) goto ismatch_out;
- }
-
- while( 1 ){
- int bMatch;
- i64 iPos = aIter[0].iPos;
- do {
- bMatch = 1;
- for(i=0; i<pPhrase->nTerm; i++){
- Fts5PoslistReader *pPos = &aIter[i];
- i64 iAdj = iPos + i;
- if( pPos->iPos!=iAdj ){
- bMatch = 0;
- while( pPos->iPos<iAdj ){
- if( sqlite3Fts5PoslistReaderNext(pPos) ) goto ismatch_out;
- }
- if( pPos->iPos>iAdj ) iPos = pPos->iPos-i;
- }
- }
- }while( bMatch==0 );
-
- /* Append position iPos to the output */
- rc = sqlite3Fts5PoslistWriterAppend(&pPhrase->poslist, &writer, iPos);
- if( rc!=SQLITE_OK ) goto ismatch_out;
-
- for(i=0; i<pPhrase->nTerm; i++){
- if( sqlite3Fts5PoslistReaderNext(&aIter[i]) ) goto ismatch_out;
- }
- }
-
- ismatch_out:
- *pbMatch = (pPhrase->poslist.n>0);
- for(i=0; i<pPhrase->nTerm; i++){
- if( aIter[i].bFlag ) sqlite3_free((u8*)aIter[i].a);
- }
- if( aIter!=aStatic ) sqlite3_free(aIter);
- return rc;
-}
-
-typedef struct Fts5LookaheadReader Fts5LookaheadReader;
-struct Fts5LookaheadReader {
- const u8 *a; /* Buffer containing position list */
- int n; /* Size of buffer a[] in bytes */
- int i; /* Current offset in position list */
- i64 iPos; /* Current position */
- i64 iLookahead; /* Next position */
-};
-
-#define FTS5_LOOKAHEAD_EOF (((i64)1) << 62)
-
-static int fts5LookaheadReaderNext(Fts5LookaheadReader *p){
- p->iPos = p->iLookahead;
- if( sqlite3Fts5PoslistNext64(p->a, p->n, &p->i, &p->iLookahead) ){
- p->iLookahead = FTS5_LOOKAHEAD_EOF;
- }
- return (p->iPos==FTS5_LOOKAHEAD_EOF);
-}
-
-static int fts5LookaheadReaderInit(
- const u8 *a, int n, /* Buffer to read position list from */
- Fts5LookaheadReader *p /* Iterator object to initialize */
-){
- memset(p, 0, sizeof(Fts5LookaheadReader));
- p->a = a;
- p->n = n;
- fts5LookaheadReaderNext(p);
- return fts5LookaheadReaderNext(p);
-}
-
-#if 0
-static int fts5LookaheadReaderEof(Fts5LookaheadReader *p){
- return (p->iPos==FTS5_LOOKAHEAD_EOF);
-}
-#endif
-
-typedef struct Fts5NearTrimmer Fts5NearTrimmer;
-struct Fts5NearTrimmer {
- Fts5LookaheadReader reader; /* Input iterator */
- Fts5PoslistWriter writer; /* Writer context */
- Fts5Buffer *pOut; /* Output poslist */
-};
-
-/*
-** The near-set object passed as the first argument contains more than
-** one phrase. All phrases currently point to the same row. The
-** Fts5ExprPhrase.poslist buffers are populated accordingly. This function
-** tests if the current row contains instances of each phrase sufficiently
-** close together to meet the NEAR constraint. Non-zero is returned if it
-** does, or zero otherwise.
-**
-** If in/out parameter (*pRc) is set to other than SQLITE_OK when this
-** function is called, it is a no-op. Or, if an error (e.g. SQLITE_NOMEM)
-** occurs within this function (*pRc) is set accordingly before returning.
-** The return value is undefined in both these cases.
-**
-** If no error occurs and non-zero (a match) is returned, the position-list
-** of each phrase object is edited to contain only those entries that
-** meet the constraint before returning.
-*/
-static int fts5ExprNearIsMatch(int *pRc, Fts5ExprNearset *pNear){
- Fts5NearTrimmer aStatic[4];
- Fts5NearTrimmer *a = aStatic;
- Fts5ExprPhrase **apPhrase = pNear->apPhrase;
-
- int i;
- int rc = *pRc;
- int bMatch;
-
- assert( pNear->nPhrase>1 );
-
- /* If the aStatic[] array is not large enough, allocate a large array
- ** using sqlite3_malloc(). This approach could be improved upon. */
- if( pNear->nPhrase>(sizeof(aStatic) / sizeof(aStatic[0])) ){
- int nByte = sizeof(Fts5NearTrimmer) * pNear->nPhrase;
- a = (Fts5NearTrimmer*)sqlite3Fts5MallocZero(&rc, nByte);
- }else{
- memset(aStatic, 0, sizeof(aStatic));
- }
- if( rc!=SQLITE_OK ){
- *pRc = rc;
- return 0;
- }
-
- /* Initialize a lookahead iterator for each phrase. After passing the
- ** buffer and buffer size to the lookaside-reader init function, zero
- ** the phrase poslist buffer. The new poslist for the phrase (containing
- ** the same entries as the original with some entries removed on account
- ** of the NEAR constraint) is written over the original even as it is
- ** being read. This is safe as the entries for the new poslist are a
- ** subset of the old, so it is not possible for data yet to be read to
- ** be overwritten. */
- for(i=0; i<pNear->nPhrase; i++){
- Fts5Buffer *pPoslist = &apPhrase[i]->poslist;
- fts5LookaheadReaderInit(pPoslist->p, pPoslist->n, &a[i].reader);
- pPoslist->n = 0;
- a[i].pOut = pPoslist;
- }
-
- while( 1 ){
- int iAdv;
- i64 iMin;
- i64 iMax;
-
- /* This block advances the phrase iterators until they point to a set of
- ** entries that together comprise a match. */
- iMax = a[0].reader.iPos;
- do {
- bMatch = 1;
- for(i=0; i<pNear->nPhrase; i++){
- Fts5LookaheadReader *pPos = &a[i].reader;
- iMin = iMax - pNear->apPhrase[i]->nTerm - pNear->nNear;
- if( pPos->iPos<iMin || pPos->iPos>iMax ){
- bMatch = 0;
- while( pPos->iPos<iMin ){
- if( fts5LookaheadReaderNext(pPos) ) goto ismatch_out;
- }
- if( pPos->iPos>iMax ) iMax = pPos->iPos;
- }
- }
- }while( bMatch==0 );
-
- /* Add an entry to each output position list */
- for(i=0; i<pNear->nPhrase; i++){
- i64 iPos = a[i].reader.iPos;
- Fts5PoslistWriter *pWriter = &a[i].writer;
- if( a[i].pOut->n==0 || iPos!=pWriter->iPrev ){
- sqlite3Fts5PoslistWriterAppend(a[i].pOut, pWriter, iPos);
- }
- }
-
- iAdv = 0;
- iMin = a[0].reader.iLookahead;
- for(i=0; i<pNear->nPhrase; i++){
- if( a[i].reader.iLookahead < iMin ){
- iMin = a[i].reader.iLookahead;
- iAdv = i;
- }
- }
- if( fts5LookaheadReaderNext(&a[iAdv].reader) ) goto ismatch_out;
- }
-
- ismatch_out: {
- int bRet = a[0].pOut->n>0;
- *pRc = rc;
- if( a!=aStatic ) sqlite3_free(a);
- return bRet;
- }
-}
-
-/*
-** Advance the first term iterator in the first phrase of pNear. Set output
-** variable *pbEof to true if it reaches EOF or if an error occurs.
-**
-** Return SQLITE_OK if successful, or an SQLite error code if an error
-** occurs.
-*/
-static int fts5ExprNearAdvanceFirst(
- Fts5Expr *pExpr, /* Expression pPhrase belongs to */
- Fts5ExprNode *pNode, /* FTS5_STRING or FTS5_TERM node */
- int bFromValid,
- i64 iFrom
-){
- Fts5ExprTerm *pTerm = &pNode->pNear->apPhrase[0]->aTerm[0];
- int rc = SQLITE_OK;
-
- if( pTerm->pSynonym ){
- int bEof = 1;
- Fts5ExprTerm *p;
-
- /* Find the firstest rowid any synonym points to. */
- i64 iRowid = fts5ExprSynonymRowid(pTerm, pExpr->bDesc, 0);
-
- /* Advance each iterator that currently points to iRowid. Or, if iFrom
- ** is valid - each iterator that points to a rowid before iFrom. */
- for(p=pTerm; p; p=p->pSynonym){
- if( sqlite3Fts5IterEof(p->pIter)==0 ){
- i64 ii = sqlite3Fts5IterRowid(p->pIter);
- if( ii==iRowid
- || (bFromValid && ii!=iFrom && (ii>iFrom)==pExpr->bDesc)
- ){
- if( bFromValid ){
- rc = sqlite3Fts5IterNextFrom(p->pIter, iFrom);
- }else{
- rc = sqlite3Fts5IterNext(p->pIter);
- }
- if( rc!=SQLITE_OK ) break;
- if( sqlite3Fts5IterEof(p->pIter)==0 ){
- bEof = 0;
- }
- }else{
- bEof = 0;
- }
- }
- }
-
- /* Set the EOF flag if either all synonym iterators are at EOF or an
- ** error has occurred. */
- pNode->bEof = (rc || bEof);
- }else{
- Fts5IndexIter *pIter = pTerm->pIter;
-
- assert( Fts5NodeIsString(pNode) );
- if( bFromValid ){
- rc = sqlite3Fts5IterNextFrom(pIter, iFrom);
- }else{
- rc = sqlite3Fts5IterNext(pIter);
- }
-
- pNode->bEof = (rc || sqlite3Fts5IterEof(pIter));
- }
-
- return rc;
-}
-
-/*
-** Advance iterator pIter until it points to a value equal to or laster
-** than the initial value of *piLast. If this means the iterator points
-** to a value laster than *piLast, update *piLast to the new lastest value.
-**
-** If the iterator reaches EOF, set *pbEof to true before returning. If
-** an error occurs, set *pRc to an error code. If either *pbEof or *pRc
-** are set, return a non-zero value. Otherwise, return zero.
-*/
-static int fts5ExprAdvanceto(
- Fts5IndexIter *pIter, /* Iterator to advance */
- int bDesc, /* True if iterator is "rowid DESC" */
- i64 *piLast, /* IN/OUT: Lastest rowid seen so far */
- int *pRc, /* OUT: Error code */
- int *pbEof /* OUT: Set to true if EOF */
-){
- i64 iLast = *piLast;
- i64 iRowid;
-
- iRowid = sqlite3Fts5IterRowid(pIter);
- if( (bDesc==0 && iLast>iRowid) || (bDesc && iLast<iRowid) ){
- int rc = sqlite3Fts5IterNextFrom(pIter, iLast);
- if( rc || sqlite3Fts5IterEof(pIter) ){
- *pRc = rc;
- *pbEof = 1;
- return 1;
- }
- iRowid = sqlite3Fts5IterRowid(pIter);
- assert( (bDesc==0 && iRowid>=iLast) || (bDesc==1 && iRowid<=iLast) );
- }
- *piLast = iRowid;
-
- return 0;
-}
-
-static int fts5ExprSynonymAdvanceto(
- Fts5ExprTerm *pTerm, /* Term iterator to advance */
- int bDesc, /* True if iterator is "rowid DESC" */
- i64 *piLast, /* IN/OUT: Lastest rowid seen so far */
- int *pRc /* OUT: Error code */
-){
- int rc = SQLITE_OK;
- i64 iLast = *piLast;
- Fts5ExprTerm *p;
- int bEof = 0;
-
- for(p=pTerm; rc==SQLITE_OK && p; p=p->pSynonym){
- if( sqlite3Fts5IterEof(p->pIter)==0 ){
- i64 iRowid = sqlite3Fts5IterRowid(p->pIter);
- if( (bDesc==0 && iLast>iRowid) || (bDesc && iLast<iRowid) ){
- rc = sqlite3Fts5IterNextFrom(p->pIter, iLast);
- }
- }
- }
-
- if( rc!=SQLITE_OK ){
- *pRc = rc;
- bEof = 1;
- }else{
- *piLast = fts5ExprSynonymRowid(pTerm, bDesc, &bEof);
- }
- return bEof;
-}
-
-
-static int fts5ExprNearTest(
- int *pRc,
- Fts5Expr *pExpr, /* Expression that pNear is a part of */
- Fts5ExprNode *pNode /* The "NEAR" node (FTS5_STRING) */
-){
- Fts5ExprNearset *pNear = pNode->pNear;
- int rc = *pRc;
- int i;
-
- /* Check that each phrase in the nearset matches the current row.
- ** Populate the pPhrase->poslist buffers at the same time. If any
- ** phrase is not a match, break out of the loop early. */
- for(i=0; rc==SQLITE_OK && i<pNear->nPhrase; i++){
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
- if( pPhrase->nTerm>1 || pPhrase->aTerm[0].pSynonym || pNear->pColset ){
- int bMatch = 0;
- rc = fts5ExprPhraseIsMatch(pNode, pNear->pColset, pPhrase, &bMatch);
- if( bMatch==0 ) break;
- }else{
- rc = sqlite3Fts5IterPoslistBuffer(
- pPhrase->aTerm[0].pIter, &pPhrase->poslist
- );
- }
- }
-
- *pRc = rc;
- if( i==pNear->nPhrase && (i==1 || fts5ExprNearIsMatch(pRc, pNear)) ){
- return 1;
- }
-
- return 0;
-}
-
-static int fts5ExprTokenTest(
- Fts5Expr *pExpr, /* Expression that pNear is a part of */
- Fts5ExprNode *pNode /* The "NEAR" node (FTS5_TERM) */
-){
- /* As this "NEAR" object is actually a single phrase that consists
- ** of a single term only, grab pointers into the poslist managed by the
- ** fts5_index.c iterator object. This is much faster than synthesizing
- ** a new poslist the way we have to for more complicated phrase or NEAR
- ** expressions. */
- Fts5ExprNearset *pNear = pNode->pNear;
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
- Fts5IndexIter *pIter = pPhrase->aTerm[0].pIter;
- Fts5Colset *pColset = pNear->pColset;
- int rc;
-
- assert( pNode->eType==FTS5_TERM );
- assert( pNear->nPhrase==1 && pPhrase->nTerm==1 );
- assert( pPhrase->aTerm[0].pSynonym==0 );
-
- rc = sqlite3Fts5IterPoslist(pIter, pColset,
- (const u8**)&pPhrase->poslist.p, &pPhrase->poslist.n, &pNode->iRowid
- );
- pNode->bNomatch = (pPhrase->poslist.n==0);
- return rc;
-}
-
-/*
-** All individual term iterators in pNear are guaranteed to be valid when
-** this function is called. This function checks if all term iterators
-** point to the same rowid, and if not, advances them until they do.
-** If an EOF is reached before this happens, *pbEof is set to true before
-** returning.
-**
-** SQLITE_OK is returned if an error occurs, or an SQLite error code
-** otherwise. It is not considered an error code if an iterator reaches
-** EOF.
-*/
-static int fts5ExprNearNextMatch(
- Fts5Expr *pExpr, /* Expression pPhrase belongs to */
- Fts5ExprNode *pNode
-){
- Fts5ExprNearset *pNear = pNode->pNear;
- Fts5ExprPhrase *pLeft = pNear->apPhrase[0];
- int rc = SQLITE_OK;
- i64 iLast; /* Lastest rowid any iterator points to */
- int i, j; /* Phrase and token index, respectively */
- int bMatch; /* True if all terms are at the same rowid */
- const int bDesc = pExpr->bDesc;
-
- /* Check that this node should not be FTS5_TERM */
- assert( pNear->nPhrase>1
- || pNear->apPhrase[0]->nTerm>1
- || pNear->apPhrase[0]->aTerm[0].pSynonym
- );
-
- /* Initialize iLast, the "lastest" rowid any iterator points to. If the
- ** iterator skips through rowids in the default ascending order, this means
- ** the maximum rowid. Or, if the iterator is "ORDER BY rowid DESC", then it
- ** means the minimum rowid. */
- if( pLeft->aTerm[0].pSynonym ){
- iLast = fts5ExprSynonymRowid(&pLeft->aTerm[0], bDesc, 0);
- }else{
- iLast = sqlite3Fts5IterRowid(pLeft->aTerm[0].pIter);
- }
-
- do {
- bMatch = 1;
- for(i=0; i<pNear->nPhrase; i++){
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
- for(j=0; j<pPhrase->nTerm; j++){
- Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
- if( pTerm->pSynonym ){
- i64 iRowid = fts5ExprSynonymRowid(pTerm, bDesc, 0);
- if( iRowid==iLast ) continue;
- bMatch = 0;
- if( fts5ExprSynonymAdvanceto(pTerm, bDesc, &iLast, &rc) ){
- pNode->bEof = 1;
- return rc;
- }
- }else{
- Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter;
- i64 iRowid = sqlite3Fts5IterRowid(pIter);
- if( iRowid==iLast ) continue;
- bMatch = 0;
- if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){
- return rc;
- }
- }
- }
- }
- }while( bMatch==0 );
-
- pNode->iRowid = iLast;
- pNode->bNomatch = (0==fts5ExprNearTest(&rc, pExpr, pNode));
-
- return rc;
-}
-
-/*
-** Initialize all term iterators in the pNear object. If any term is found
-** to match no documents at all, return immediately without initializing any
-** further iterators.
-*/
-static int fts5ExprNearInitAll(
- Fts5Expr *pExpr,
- Fts5ExprNode *pNode
-){
- Fts5ExprNearset *pNear = pNode->pNear;
- int i, j;
- int rc = SQLITE_OK;
-
- for(i=0; rc==SQLITE_OK && i<pNear->nPhrase; i++){
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
- for(j=0; j<pPhrase->nTerm; j++){
- Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
- Fts5ExprTerm *p;
- int bEof = 1;
-
- for(p=pTerm; p && rc==SQLITE_OK; p=p->pSynonym){
- if( p->pIter ){
- sqlite3Fts5IterClose(p->pIter);
- p->pIter = 0;
- }
- rc = sqlite3Fts5IndexQuery(
- pExpr->pIndex, p->zTerm, strlen(p->zTerm),
- (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
- (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
- pNear->pColset,
- &p->pIter
- );
- assert( rc==SQLITE_OK || p->pIter==0 );
- if( p->pIter && 0==sqlite3Fts5IterEof(p->pIter) ){
- bEof = 0;
- }
- }
-
- if( bEof ){
- pNode->bEof = 1;
- return rc;
- }
- }
- }
-
- return rc;
-}
-
-/* fts5ExprNodeNext() calls fts5ExprNodeNextMatch(). And vice-versa. */
-static int fts5ExprNodeNextMatch(Fts5Expr*, Fts5ExprNode*);
-
-
-/*
-** If pExpr is an ASC iterator, this function returns a value with the
-** same sign as:
-**
-** (iLhs - iRhs)
-**
-** Otherwise, if this is a DESC iterator, the opposite is returned:
-**
-** (iRhs - iLhs)
-*/
-static int fts5RowidCmp(
- Fts5Expr *pExpr,
- i64 iLhs,
- i64 iRhs
-){
- assert( pExpr->bDesc==0 || pExpr->bDesc==1 );
- if( pExpr->bDesc==0 ){
- if( iLhs<iRhs ) return -1;
- return (iLhs > iRhs);
- }else{
- if( iLhs>iRhs ) return -1;
- return (iLhs < iRhs);
- }
-}
-
-static void fts5ExprSetEof(Fts5ExprNode *pNode){
- int i;
- pNode->bEof = 1;
- for(i=0; i<pNode->nChild; i++){
- fts5ExprSetEof(pNode->apChild[i]);
- }
-}
-
-static void fts5ExprNodeZeroPoslist(Fts5ExprNode *pNode){
- if( pNode->eType==FTS5_STRING || pNode->eType==FTS5_TERM ){
- Fts5ExprNearset *pNear = pNode->pNear;
- int i;
- for(i=0; i<pNear->nPhrase; i++){
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
- pPhrase->poslist.n = 0;
- }
- }else{
- int i;
- for(i=0; i<pNode->nChild; i++){
- fts5ExprNodeZeroPoslist(pNode->apChild[i]);
- }
- }
-}
-
-
-static int fts5ExprNodeNext(Fts5Expr*, Fts5ExprNode*, int, i64);
-
-/*
-** Argument pNode is an FTS5_AND node.
-*/
-static int fts5ExprAndNextRowid(
- Fts5Expr *pExpr, /* Expression pPhrase belongs to */
- Fts5ExprNode *pAnd /* FTS5_AND node to advance */
-){
- int iChild;
- i64 iLast = pAnd->iRowid;
- int rc = SQLITE_OK;
- int bMatch;
-
- assert( pAnd->bEof==0 );
- do {
- pAnd->bNomatch = 0;
- bMatch = 1;
- for(iChild=0; iChild<pAnd->nChild; iChild++){
- Fts5ExprNode *pChild = pAnd->apChild[iChild];
- if( 0 && pChild->eType==FTS5_STRING ){
- /* TODO */
- }else{
- int cmp = fts5RowidCmp(pExpr, iLast, pChild->iRowid);
- if( cmp>0 ){
- /* Advance pChild until it points to iLast or laster */
- rc = fts5ExprNodeNext(pExpr, pChild, 1, iLast);
- if( rc!=SQLITE_OK ) return rc;
- }
- }
-
- /* If the child node is now at EOF, so is the parent AND node. Otherwise,
- ** the child node is guaranteed to have advanced at least as far as
- ** rowid iLast. So if it is not at exactly iLast, pChild->iRowid is the
- ** new lastest rowid seen so far. */
- assert( pChild->bEof || fts5RowidCmp(pExpr, iLast, pChild->iRowid)<=0 );
- if( pChild->bEof ){
- fts5ExprSetEof(pAnd);
- bMatch = 1;
- break;
- }else if( iLast!=pChild->iRowid ){
- bMatch = 0;
- iLast = pChild->iRowid;
- }
-
- if( pChild->bNomatch ){
- pAnd->bNomatch = 1;
- }
- }
- }while( bMatch==0 );
-
- if( pAnd->bNomatch && pAnd!=pExpr->pRoot ){
- fts5ExprNodeZeroPoslist(pAnd);
- }
- pAnd->iRowid = iLast;
- return SQLITE_OK;
-}
-
-
-/*
-** Compare the values currently indicated by the two nodes as follows:
-**
-** res = (*p1) - (*p2)
-**
-** Nodes that point to values that come later in the iteration order are
-** considered to be larger. Nodes at EOF are the largest of all.
-**
-** This means that if the iteration order is ASC, then numerically larger
-** rowids are considered larger. Or if it is the default DESC, numerically
-** smaller rowids are larger.
-*/
-static int fts5NodeCompare(
- Fts5Expr *pExpr,
- Fts5ExprNode *p1,
- Fts5ExprNode *p2
-){
- if( p2->bEof ) return -1;
- if( p1->bEof ) return +1;
- return fts5RowidCmp(pExpr, p1->iRowid, p2->iRowid);
-}
-
-/*
-** Advance node iterator pNode, part of expression pExpr. If argument
-** bFromValid is zero, then pNode is advanced exactly once. Or, if argument
-** bFromValid is non-zero, then pNode is advanced until it is at or past
-** rowid value iFrom. Whether "past" means "less than" or "greater than"
-** depends on whether this is an ASC or DESC iterator.
-*/
-static int fts5ExprNodeNext(
- Fts5Expr *pExpr,
- Fts5ExprNode *pNode,
- int bFromValid,
- i64 iFrom
-){
- int rc = SQLITE_OK;
-
- if( pNode->bEof==0 ){
- switch( pNode->eType ){
- case FTS5_STRING: {
- rc = fts5ExprNearAdvanceFirst(pExpr, pNode, bFromValid, iFrom);
- break;
- };
-
- case FTS5_TERM: {
- Fts5IndexIter *pIter = pNode->pNear->apPhrase[0]->aTerm[0].pIter;
- if( bFromValid ){
- rc = sqlite3Fts5IterNextFrom(pIter, iFrom);
- }else{
- rc = sqlite3Fts5IterNext(pIter);
- }
- if( rc==SQLITE_OK && sqlite3Fts5IterEof(pIter)==0 ){
- assert( rc==SQLITE_OK );
- rc = fts5ExprTokenTest(pExpr, pNode);
- }else{
- pNode->bEof = 1;
- }
- return rc;
- };
-
- case FTS5_AND: {
- Fts5ExprNode *pLeft = pNode->apChild[0];
- rc = fts5ExprNodeNext(pExpr, pLeft, bFromValid, iFrom);
- break;
- }
-
- case FTS5_OR: {
- int i;
- i64 iLast = pNode->iRowid;
-
- for(i=0; rc==SQLITE_OK && i<pNode->nChild; i++){
- Fts5ExprNode *p1 = pNode->apChild[i];
- assert( p1->bEof || fts5RowidCmp(pExpr, p1->iRowid, iLast)>=0 );
- if( p1->bEof==0 ){
- if( (p1->iRowid==iLast)
- || (bFromValid && fts5RowidCmp(pExpr, p1->iRowid, iFrom)<0)
- ){
- rc = fts5ExprNodeNext(pExpr, p1, bFromValid, iFrom);
- }
- }
- }
-
- break;
- }
-
- default: assert( pNode->eType==FTS5_NOT ); {
- assert( pNode->nChild==2 );
- rc = fts5ExprNodeNext(pExpr, pNode->apChild[0], bFromValid, iFrom);
- break;
- }
- }
-
- if( rc==SQLITE_OK ){
- rc = fts5ExprNodeNextMatch(pExpr, pNode);
- }
- }
-
- /* Assert that if bFromValid was true, either:
- **
- ** a) an error occurred, or
- ** b) the node is now at EOF, or
- ** c) the node is now at or past rowid iFrom.
- */
- assert( bFromValid==0
- || rc!=SQLITE_OK /* a */
- || pNode->bEof /* b */
- || pNode->iRowid==iFrom || pExpr->bDesc==(pNode->iRowid<iFrom) /* c */
- );
-
- return rc;
-}
-
-
-/*
-** If pNode currently points to a match, this function returns SQLITE_OK
-** without modifying it. Otherwise, pNode is advanced until it does point
-** to a match or EOF is reached.
-*/
-static int fts5ExprNodeNextMatch(
- Fts5Expr *pExpr, /* Expression of which pNode is a part */
- Fts5ExprNode *pNode /* Expression node to test */
-){
- int rc = SQLITE_OK;
- if( pNode->bEof==0 ){
- switch( pNode->eType ){
-
- case FTS5_STRING: {
- /* Advance the iterators until they all point to the same rowid */
- rc = fts5ExprNearNextMatch(pExpr, pNode);
- break;
- }
-
- case FTS5_TERM: {
- rc = fts5ExprTokenTest(pExpr, pNode);
- break;
- }
-
- case FTS5_AND: {
- rc = fts5ExprAndNextRowid(pExpr, pNode);
- break;
- }
-
- case FTS5_OR: {
- Fts5ExprNode *pNext = pNode->apChild[0];
- int i;
-
- for(i=1; i<pNode->nChild; i++){
- Fts5ExprNode *pChild = pNode->apChild[i];
- int cmp = fts5NodeCompare(pExpr, pNext, pChild);
- if( cmp>0 || (cmp==0 && pChild->bNomatch==0) ){
- pNext = pChild;
- }
- }
- pNode->iRowid = pNext->iRowid;
- pNode->bEof = pNext->bEof;
- pNode->bNomatch = pNext->bNomatch;
- break;
- }
-
- default: assert( pNode->eType==FTS5_NOT ); {
- Fts5ExprNode *p1 = pNode->apChild[0];
- Fts5ExprNode *p2 = pNode->apChild[1];
- assert( pNode->nChild==2 );
-
- while( rc==SQLITE_OK && p1->bEof==0 ){
- int cmp = fts5NodeCompare(pExpr, p1, p2);
- if( cmp>0 ){
- rc = fts5ExprNodeNext(pExpr, p2, 1, p1->iRowid);
- cmp = fts5NodeCompare(pExpr, p1, p2);
- }
- assert( rc!=SQLITE_OK || cmp<=0 );
- if( cmp || p2->bNomatch ) break;
- rc = fts5ExprNodeNext(pExpr, p1, 0, 0);
- }
- pNode->bEof = p1->bEof;
- pNode->iRowid = p1->iRowid;
- break;
- }
- }
- }
- return rc;
-}
-
-
-/*
-** Set node pNode, which is part of expression pExpr, to point to the first
-** match. If there are no matches, set the Node.bEof flag to indicate EOF.
-**
-** Return an SQLite error code if an error occurs, or SQLITE_OK otherwise.
-** It is not an error if there are no matches.
-*/
-static int fts5ExprNodeFirst(Fts5Expr *pExpr, Fts5ExprNode *pNode){
- int rc = SQLITE_OK;
- pNode->bEof = 0;
-
- if( Fts5NodeIsString(pNode) ){
- /* Initialize all term iterators in the NEAR object. */
- rc = fts5ExprNearInitAll(pExpr, pNode);
- }else{
- int i;
- for(i=0; i<pNode->nChild && rc==SQLITE_OK; i++){
- rc = fts5ExprNodeFirst(pExpr, pNode->apChild[i]);
- }
- pNode->iRowid = pNode->apChild[0]->iRowid;
- }
-
- if( rc==SQLITE_OK ){
- rc = fts5ExprNodeNextMatch(pExpr, pNode);
- }
- return rc;
-}
-
-
-/*
-** Begin iterating through the set of documents in index pIdx matched by
-** the MATCH expression passed as the first argument. If the "bDesc"
-** parameter is passed a non-zero value, iteration is in descending rowid
-** order. Or, if it is zero, in ascending order.
-**
-** If iterating in ascending rowid order (bDesc==0), the first document
-** visited is that with the smallest rowid that is larger than or equal
-** to parameter iFirst. Or, if iterating in ascending order (bDesc==1),
-** then the first document visited must have a rowid smaller than or
-** equal to iFirst.
-**
-** Return SQLITE_OK if successful, or an SQLite error code otherwise. It
-** is not considered an error if the query does not match any documents.
-*/
-static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){
- Fts5ExprNode *pRoot = p->pRoot;
- int rc = SQLITE_OK;
- if( pRoot ){
- p->pIndex = pIdx;
- p->bDesc = bDesc;
- rc = fts5ExprNodeFirst(p, pRoot);
-
- /* If not at EOF but the current rowid occurs earlier than iFirst in
- ** the iteration order, move to document iFirst or later. */
- if( pRoot->bEof==0 && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 ){
- rc = fts5ExprNodeNext(p, pRoot, 1, iFirst);
- }
-
- /* If the iterator is not at a real match, skip forward until it is. */
- while( pRoot->bNomatch && rc==SQLITE_OK && pRoot->bEof==0 ){
- rc = fts5ExprNodeNext(p, pRoot, 0, 0);
- }
- }
- return rc;
-}
-
-/*
-** Move to the next document
-**
-** Return SQLITE_OK if successful, or an SQLite error code otherwise. It
-** is not considered an error if the query does not match any documents.
-*/
-static int sqlite3Fts5ExprNext(Fts5Expr *p, i64 iLast){
- int rc;
- Fts5ExprNode *pRoot = p->pRoot;
- do {
- rc = fts5ExprNodeNext(p, pRoot, 0, 0);
- }while( pRoot->bNomatch && pRoot->bEof==0 && rc==SQLITE_OK );
- if( fts5RowidCmp(p, pRoot->iRowid, iLast)>0 ){
- pRoot->bEof = 1;
- }
- return rc;
-}
-
-static int sqlite3Fts5ExprEof(Fts5Expr *p){
- return (p->pRoot==0 || p->pRoot->bEof);
-}
-
-static i64 sqlite3Fts5ExprRowid(Fts5Expr *p){
- return p->pRoot->iRowid;
-}
-
-static int fts5ParseStringFromToken(Fts5Token *pToken, char **pz){
- int rc = SQLITE_OK;
- *pz = sqlite3Fts5Strndup(&rc, pToken->p, pToken->n);
- return rc;
-}
-
-/*
-** Free the phrase object passed as the only argument.
-*/
-static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){
- if( pPhrase ){
- int i;
- for(i=0; i<pPhrase->nTerm; i++){
- Fts5ExprTerm *pSyn;
- Fts5ExprTerm *pNext;
- Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
- sqlite3_free(pTerm->zTerm);
- sqlite3Fts5IterClose(pTerm->pIter);
-
- for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){
- pNext = pSyn->pSynonym;
- sqlite3Fts5IterClose(pSyn->pIter);
- sqlite3_free(pSyn);
- }
- }
- if( pPhrase->poslist.nSpace>0 ) fts5BufferFree(&pPhrase->poslist);
- sqlite3_free(pPhrase);
- }
-}
-
-/*
-** If argument pNear is NULL, then a new Fts5ExprNearset object is allocated
-** and populated with pPhrase. Or, if pNear is not NULL, phrase pPhrase is
-** appended to it and the results returned.
-**
-** If an OOM error occurs, both the pNear and pPhrase objects are freed and
-** NULL returned.
-*/
-static Fts5ExprNearset *sqlite3Fts5ParseNearset(
- Fts5Parse *pParse, /* Parse context */
- Fts5ExprNearset *pNear, /* Existing nearset, or NULL */
- Fts5ExprPhrase *pPhrase /* Recently parsed phrase */
-){
- const int SZALLOC = 8;
- Fts5ExprNearset *pRet = 0;
-
- if( pParse->rc==SQLITE_OK ){
- if( pPhrase==0 ){
- return pNear;
- }
- if( pNear==0 ){
- int nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*);
- pRet = sqlite3_malloc(nByte);
- if( pRet==0 ){
- pParse->rc = SQLITE_NOMEM;
- }else{
- memset(pRet, 0, nByte);
- }
- }else if( (pNear->nPhrase % SZALLOC)==0 ){
- int nNew = pNear->nPhrase + SZALLOC;
- int nByte = sizeof(Fts5ExprNearset) + nNew * sizeof(Fts5ExprPhrase*);
-
- pRet = (Fts5ExprNearset*)sqlite3_realloc(pNear, nByte);
- if( pRet==0 ){
- pParse->rc = SQLITE_NOMEM;
- }
- }else{
- pRet = pNear;
- }
- }
-
- if( pRet==0 ){
- assert( pParse->rc!=SQLITE_OK );
- sqlite3Fts5ParseNearsetFree(pNear);
- sqlite3Fts5ParsePhraseFree(pPhrase);
- }else{
- pRet->apPhrase[pRet->nPhrase++] = pPhrase;
- }
- return pRet;
-}
-
-typedef struct TokenCtx TokenCtx;
-struct TokenCtx {
- Fts5ExprPhrase *pPhrase;
- int rc;
-};
-
-/*
-** Callback for tokenizing terms used by ParseTerm().
-*/
-static int fts5ParseTokenize(
- void *pContext, /* Pointer to Fts5InsertCtx object */
- int tflags, /* Mask of FTS5_TOKEN_* flags */
- const char *pToken, /* Buffer containing token */
- int nToken, /* Size of token in bytes */
- int iUnused1, /* Start offset of token */
- int iUnused2 /* End offset of token */
-){
- int rc = SQLITE_OK;
- const int SZALLOC = 8;
- TokenCtx *pCtx = (TokenCtx*)pContext;
- Fts5ExprPhrase *pPhrase = pCtx->pPhrase;
-
- /* If an error has already occurred, this is a no-op */
- if( pCtx->rc!=SQLITE_OK ) return pCtx->rc;
-
- assert( pPhrase==0 || pPhrase->nTerm>0 );
- if( pPhrase && (tflags & FTS5_TOKEN_COLOCATED) ){
- Fts5ExprTerm *pSyn;
- int nByte = sizeof(Fts5ExprTerm) + nToken+1;
- pSyn = (Fts5ExprTerm*)sqlite3_malloc(nByte);
- if( pSyn==0 ){
- rc = SQLITE_NOMEM;
- }else{
- memset(pSyn, 0, nByte);
- pSyn->zTerm = (char*)&pSyn[1];
- memcpy(pSyn->zTerm, pToken, nToken);
- pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym;
- pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn;
- }
- }else{
- Fts5ExprTerm *pTerm;
- if( pPhrase==0 || (pPhrase->nTerm % SZALLOC)==0 ){
- Fts5ExprPhrase *pNew;
- int nNew = SZALLOC + (pPhrase ? pPhrase->nTerm : 0);
-
- pNew = (Fts5ExprPhrase*)sqlite3_realloc(pPhrase,
- sizeof(Fts5ExprPhrase) + sizeof(Fts5ExprTerm) * nNew
- );
- if( pNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- if( pPhrase==0 ) memset(pNew, 0, sizeof(Fts5ExprPhrase));
- pCtx->pPhrase = pPhrase = pNew;
- pNew->nTerm = nNew - SZALLOC;
- }
- }
-
- if( rc==SQLITE_OK ){
- pTerm = &pPhrase->aTerm[pPhrase->nTerm++];
- memset(pTerm, 0, sizeof(Fts5ExprTerm));
- pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
- }
- }
-
- pCtx->rc = rc;
- return rc;
-}
-
-
-/*
-** Free the phrase object passed as the only argument.
-*/
-static void sqlite3Fts5ParsePhraseFree(Fts5ExprPhrase *pPhrase){
- fts5ExprPhraseFree(pPhrase);
-}
-
-/*
-** Free the phrase object passed as the second argument.
-*/
-static void sqlite3Fts5ParseNearsetFree(Fts5ExprNearset *pNear){
- if( pNear ){
- int i;
- for(i=0; i<pNear->nPhrase; i++){
- fts5ExprPhraseFree(pNear->apPhrase[i]);
- }
- sqlite3_free(pNear->pColset);
- sqlite3_free(pNear);
- }
-}
-
-static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p){
- assert( pParse->pExpr==0 );
- pParse->pExpr = p;
-}
-
-/*
-** This function is called by the parser to process a string token. The
-** string may or may not be quoted. In any case it is tokenized and a
-** phrase object consisting of all tokens returned.
-*/
-static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
- Fts5Parse *pParse, /* Parse context */
- Fts5ExprPhrase *pAppend, /* Phrase to append to */
- Fts5Token *pToken, /* String to tokenize */
- int bPrefix /* True if there is a trailing "*" */
-){
- Fts5Config *pConfig = pParse->pConfig;
- TokenCtx sCtx; /* Context object passed to callback */
- int rc; /* Tokenize return code */
- char *z = 0;
-
- memset(&sCtx, 0, sizeof(TokenCtx));
- sCtx.pPhrase = pAppend;
-
- rc = fts5ParseStringFromToken(pToken, &z);
- if( rc==SQLITE_OK ){
- int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_QUERY : 0);
- int n;
- sqlite3Fts5Dequote(z);
- n = strlen(z);
- rc = sqlite3Fts5Tokenize(pConfig, flags, z, n, &sCtx, fts5ParseTokenize);
- }
- sqlite3_free(z);
- if( rc || (rc = sCtx.rc) ){
- pParse->rc = rc;
- fts5ExprPhraseFree(sCtx.pPhrase);
- sCtx.pPhrase = 0;
- }else if( sCtx.pPhrase ){
-
- if( pAppend==0 ){
- if( (pParse->nPhrase % 8)==0 ){
- int nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
- Fts5ExprPhrase **apNew;
- apNew = (Fts5ExprPhrase**)sqlite3_realloc(pParse->apPhrase, nByte);
- if( apNew==0 ){
- pParse->rc = SQLITE_NOMEM;
- fts5ExprPhraseFree(sCtx.pPhrase);
- return 0;
- }
- pParse->apPhrase = apNew;
- }
- pParse->nPhrase++;
- }
-
- pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase;
- assert( sCtx.pPhrase->nTerm>0 );
- sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = bPrefix;
- }
-
- return sCtx.pPhrase;
-}
-
-/*
-** Create a new FTS5 expression by cloning phrase iPhrase of the
-** expression passed as the second argument.
-*/
-static int sqlite3Fts5ExprClonePhrase(
- Fts5Config *pConfig,
- Fts5Expr *pExpr,
- int iPhrase,
- Fts5Expr **ppNew
-){
- int rc = SQLITE_OK; /* Return code */
- Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */
- int i; /* Used to iterate through phrase terms */
-
- Fts5Expr *pNew = 0; /* Expression to return via *ppNew */
-
- TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */
-
-
- pOrig = pExpr->apExprPhrase[iPhrase];
-
- pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
- if( rc==SQLITE_OK ){
- pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc,
- sizeof(Fts5ExprPhrase*));
- }
- if( rc==SQLITE_OK ){
- pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc,
- sizeof(Fts5ExprNode));
- }
- if( rc==SQLITE_OK ){
- pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc,
- sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*));
- }
-
- for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
- int tflags = 0;
- Fts5ExprTerm *p;
- for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
- const char *zTerm = p->zTerm;
- rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, strlen(zTerm), 0, 0);
- tflags = FTS5_TOKEN_COLOCATED;
- }
- if( rc==SQLITE_OK ){
- sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
- }
- }
-
- if( rc==SQLITE_OK ){
- /* All the allocations succeeded. Put the expression object together. */
- pNew->pIndex = pExpr->pIndex;
- pNew->nPhrase = 1;
- pNew->apExprPhrase[0] = sCtx.pPhrase;
- pNew->pRoot->pNear->apPhrase[0] = sCtx.pPhrase;
- pNew->pRoot->pNear->nPhrase = 1;
- sCtx.pPhrase->pNode = pNew->pRoot;
-
- if( pOrig->nTerm==1 && pOrig->aTerm[0].pSynonym==0 ){
- pNew->pRoot->eType = FTS5_TERM;
- }else{
- pNew->pRoot->eType = FTS5_STRING;
- }
- }else{
- sqlite3Fts5ExprFree(pNew);
- fts5ExprPhraseFree(sCtx.pPhrase);
- pNew = 0;
- }
-
- *ppNew = pNew;
- return rc;
-}
-
-
-/*
-** Token pTok has appeared in a MATCH expression where the NEAR operator
-** is expected. If token pTok does not contain "NEAR", store an error
-** in the pParse object.
-*/
-static void sqlite3Fts5ParseNear(Fts5Parse *pParse, Fts5Token *pTok){
- if( pTok->n!=4 || memcmp("NEAR", pTok->p, 4) ){
- sqlite3Fts5ParseError(
- pParse, "fts5: syntax error near \"%.*s\"", pTok->n, pTok->p
- );
- }
-}
-
-static void sqlite3Fts5ParseSetDistance(
- Fts5Parse *pParse,
- Fts5ExprNearset *pNear,
- Fts5Token *p
-){
- int nNear = 0;
- int i;
- if( p->n ){
- for(i=0; i<p->n; i++){
- char c = (char)p->p[i];
- if( c<'0' || c>'9' ){
- sqlite3Fts5ParseError(
- pParse, "expected integer, got \"%.*s\"", p->n, p->p
- );
- return;
- }
- nNear = nNear * 10 + (p->p[i] - '0');
- }
- }else{
- nNear = FTS5_DEFAULT_NEARDIST;
- }
- pNear->nNear = nNear;
-}
-
-/*
-** The second argument passed to this function may be NULL, or it may be
-** an existing Fts5Colset object. This function returns a pointer to
-** a new colset object containing the contents of (p) with new value column
-** number iCol appended.
-**
-** If an OOM error occurs, store an error code in pParse and return NULL.
-** The old colset object (if any) is not freed in this case.
-*/
-static Fts5Colset *fts5ParseColset(
- Fts5Parse *pParse, /* Store SQLITE_NOMEM here if required */
- Fts5Colset *p, /* Existing colset object */
- int iCol /* New column to add to colset object */
-){
- int nCol = p ? p->nCol : 0; /* Num. columns already in colset object */
- Fts5Colset *pNew; /* New colset object to return */
-
- assert( pParse->rc==SQLITE_OK );
- assert( iCol>=0 && iCol<pParse->pConfig->nCol );
-
- pNew = sqlite3_realloc(p, sizeof(Fts5Colset) + sizeof(int)*nCol);
- if( pNew==0 ){
- pParse->rc = SQLITE_NOMEM;
- }else{
- int *aiCol = pNew->aiCol;
- int i, j;
- for(i=0; i<nCol; i++){
- if( aiCol[i]==iCol ) return pNew;
- if( aiCol[i]>iCol ) break;
- }
- for(j=nCol; j>i; j--){
- aiCol[j] = aiCol[j-1];
- }
- aiCol[i] = iCol;
- pNew->nCol = nCol+1;
-
-#ifndef NDEBUG
- /* Check that the array is in order and contains no duplicate entries. */
- for(i=1; i<pNew->nCol; i++) assert( pNew->aiCol[i]>pNew->aiCol[i-1] );
-#endif
- }
-
- return pNew;
-}
-
-static Fts5Colset *sqlite3Fts5ParseColset(
- Fts5Parse *pParse, /* Store SQLITE_NOMEM here if required */
- Fts5Colset *pColset, /* Existing colset object */
- Fts5Token *p
-){
- Fts5Colset *pRet = 0;
- int iCol;
- char *z; /* Dequoted copy of token p */
-
- z = sqlite3Fts5Strndup(&pParse->rc, p->p, p->n);
- if( pParse->rc==SQLITE_OK ){
- Fts5Config *pConfig = pParse->pConfig;
- sqlite3Fts5Dequote(z);
- for(iCol=0; iCol<pConfig->nCol; iCol++){
- if( 0==sqlite3_stricmp(pConfig->azCol[iCol], z) ) break;
- }
- if( iCol==pConfig->nCol ){
- sqlite3Fts5ParseError(pParse, "no such column: %s", z);
- }else{
- pRet = fts5ParseColset(pParse, pColset, iCol);
- }
- sqlite3_free(z);
- }
-
- if( pRet==0 ){
- assert( pParse->rc!=SQLITE_OK );
- sqlite3_free(pColset);
- }
-
- return pRet;
-}
-
-static void sqlite3Fts5ParseSetColset(
- Fts5Parse *pParse,
- Fts5ExprNearset *pNear,
- Fts5Colset *pColset
-){
- if( pNear ){
- pNear->pColset = pColset;
- }else{
- sqlite3_free(pColset);
- }
-}
-
-static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){
- if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){
- int nByte = sizeof(Fts5ExprNode*) * pSub->nChild;
- memcpy(&p->apChild[p->nChild], pSub->apChild, nByte);
- p->nChild += pSub->nChild;
- sqlite3_free(pSub);
- }else{
- p->apChild[p->nChild++] = pSub;
- }
-}
-
-/*
-** Allocate and return a new expression object. If anything goes wrong (i.e.
-** OOM error), leave an error code in pParse and return NULL.
-*/
-static Fts5ExprNode *sqlite3Fts5ParseNode(
- Fts5Parse *pParse, /* Parse context */
- int eType, /* FTS5_STRING, AND, OR or NOT */
- Fts5ExprNode *pLeft, /* Left hand child expression */
- Fts5ExprNode *pRight, /* Right hand child expression */
- Fts5ExprNearset *pNear /* For STRING expressions, the near cluster */
-){
- Fts5ExprNode *pRet = 0;
-
- if( pParse->rc==SQLITE_OK ){
- int nChild = 0; /* Number of children of returned node */
- int nByte; /* Bytes of space to allocate for this node */
-
- assert( (eType!=FTS5_STRING && !pNear)
- || (eType==FTS5_STRING && !pLeft && !pRight)
- );
- if( eType==FTS5_STRING && pNear==0 ) return 0;
- if( eType!=FTS5_STRING && pLeft==0 ) return pRight;
- if( eType!=FTS5_STRING && pRight==0 ) return pLeft;
-
- if( eType==FTS5_NOT ){
- nChild = 2;
- }else if( eType==FTS5_AND || eType==FTS5_OR ){
- nChild = 2;
- if( pLeft->eType==eType ) nChild += pLeft->nChild-1;
- if( pRight->eType==eType ) nChild += pRight->nChild-1;
- }
-
- nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1);
- pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
-
- if( pRet ){
- pRet->eType = eType;
- pRet->pNear = pNear;
- if( eType==FTS5_STRING ){
- int iPhrase;
- for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
- pNear->apPhrase[iPhrase]->pNode = pRet;
- }
- if( pNear->nPhrase==1
- && pNear->apPhrase[0]->nTerm==1
- && pNear->apPhrase[0]->aTerm[0].pSynonym==0
- ){
- pRet->eType = FTS5_TERM;
- }
- }else{
- fts5ExprAddChildren(pRet, pLeft);
- fts5ExprAddChildren(pRet, pRight);
- }
- }
- }
-
- if( pRet==0 ){
- assert( pParse->rc!=SQLITE_OK );
- sqlite3Fts5ParseNodeFree(pLeft);
- sqlite3Fts5ParseNodeFree(pRight);
- sqlite3Fts5ParseNearsetFree(pNear);
- }
- return pRet;
-}
-
-static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
- int nByte = 0;
- Fts5ExprTerm *p;
- char *zQuoted;
-
- /* Determine the maximum amount of space required. */
- for(p=pTerm; p; p=p->pSynonym){
- nByte += strlen(pTerm->zTerm) * 2 + 3 + 2;
- }
- zQuoted = sqlite3_malloc(nByte);
-
- if( zQuoted ){
- int i = 0;
- for(p=pTerm; p; p=p->pSynonym){
- char *zIn = p->zTerm;
- zQuoted[i++] = '"';
- while( *zIn ){
- if( *zIn=='"' ) zQuoted[i++] = '"';
- zQuoted[i++] = *zIn++;
- }
- zQuoted[i++] = '"';
- if( p->pSynonym ) zQuoted[i++] = '|';
- }
- if( pTerm->bPrefix ){
- zQuoted[i++] = ' ';
- zQuoted[i++] = '*';
- }
- zQuoted[i++] = '\0';
- }
- return zQuoted;
-}
-
-static char *fts5PrintfAppend(char *zApp, const char *zFmt, ...){
- char *zNew;
- va_list ap;
- va_start(ap, zFmt);
- zNew = sqlite3_vmprintf(zFmt, ap);
- va_end(ap);
- if( zApp && zNew ){
- char *zNew2 = sqlite3_mprintf("%s%s", zApp, zNew);
- sqlite3_free(zNew);
- zNew = zNew2;
- }
- sqlite3_free(zApp);
- return zNew;
-}
-
-/*
-** Compose a tcl-readable representation of expression pExpr. Return a
-** pointer to a buffer containing that representation. It is the
-** responsibility of the caller to at some point free the buffer using
-** sqlite3_free().
-*/
-static char *fts5ExprPrintTcl(
- Fts5Config *pConfig,
- const char *zNearsetCmd,
- Fts5ExprNode *pExpr
-){
- char *zRet = 0;
- if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){
- Fts5ExprNearset *pNear = pExpr->pNear;
- int i;
- int iTerm;
-
- zRet = fts5PrintfAppend(zRet, "%s ", zNearsetCmd);
- if( zRet==0 ) return 0;
- if( pNear->pColset ){
- int *aiCol = pNear->pColset->aiCol;
- int nCol = pNear->pColset->nCol;
- if( nCol==1 ){
- zRet = fts5PrintfAppend(zRet, "-col %d ", aiCol[0]);
- }else{
- zRet = fts5PrintfAppend(zRet, "-col {%d", aiCol[0]);
- for(i=1; i<pNear->pColset->nCol; i++){
- zRet = fts5PrintfAppend(zRet, " %d", aiCol[i]);
- }
- zRet = fts5PrintfAppend(zRet, "} ");
- }
- if( zRet==0 ) return 0;
- }
-
- if( pNear->nPhrase>1 ){
- zRet = fts5PrintfAppend(zRet, "-near %d ", pNear->nNear);
- if( zRet==0 ) return 0;
- }
-
- zRet = fts5PrintfAppend(zRet, "--");
- if( zRet==0 ) return 0;
-
- for(i=0; i<pNear->nPhrase; i++){
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
-
- zRet = fts5PrintfAppend(zRet, " {");
- for(iTerm=0; zRet && iTerm<pPhrase->nTerm; iTerm++){
- char *zTerm = pPhrase->aTerm[iTerm].zTerm;
- zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm);
- }
-
- if( zRet ) zRet = fts5PrintfAppend(zRet, "}");
- if( zRet==0 ) return 0;
- }
-
- }else{
- char const *zOp = 0;
- int i;
- switch( pExpr->eType ){
- case FTS5_AND: zOp = "AND"; break;
- case FTS5_NOT: zOp = "NOT"; break;
- default:
- assert( pExpr->eType==FTS5_OR );
- zOp = "OR";
- break;
- }
-
- zRet = sqlite3_mprintf("%s", zOp);
- for(i=0; zRet && i<pExpr->nChild; i++){
- char *z = fts5ExprPrintTcl(pConfig, zNearsetCmd, pExpr->apChild[i]);
- if( !z ){
- sqlite3_free(zRet);
- zRet = 0;
- }else{
- zRet = fts5PrintfAppend(zRet, " [%z]", z);
- }
- }
- }
-
- return zRet;
-}
-
-static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){
- char *zRet = 0;
- if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){
- Fts5ExprNearset *pNear = pExpr->pNear;
- int i;
- int iTerm;
-
- if( pNear->pColset ){
- int iCol = pNear->pColset->aiCol[0];
- zRet = fts5PrintfAppend(zRet, "%s : ", pConfig->azCol[iCol]);
- if( zRet==0 ) return 0;
- }
-
- if( pNear->nPhrase>1 ){
- zRet = fts5PrintfAppend(zRet, "NEAR(");
- if( zRet==0 ) return 0;
- }
-
- for(i=0; i<pNear->nPhrase; i++){
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
- if( i!=0 ){
- zRet = fts5PrintfAppend(zRet, " ");
- if( zRet==0 ) return 0;
- }
- for(iTerm=0; iTerm<pPhrase->nTerm; iTerm++){
- char *zTerm = fts5ExprTermPrint(&pPhrase->aTerm[iTerm]);
- if( zTerm ){
- zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" + ", zTerm);
- sqlite3_free(zTerm);
- }
- if( zTerm==0 || zRet==0 ){
- sqlite3_free(zRet);
- return 0;
- }
- }
- }
-
- if( pNear->nPhrase>1 ){
- zRet = fts5PrintfAppend(zRet, ", %d)", pNear->nNear);
- if( zRet==0 ) return 0;
- }
-
- }else{
- char const *zOp = 0;
- int i;
-
- switch( pExpr->eType ){
- case FTS5_AND: zOp = " AND "; break;
- case FTS5_NOT: zOp = " NOT "; break;
- default:
- assert( pExpr->eType==FTS5_OR );
- zOp = " OR ";
- break;
- }
-
- for(i=0; i<pExpr->nChild; i++){
- char *z = fts5ExprPrint(pConfig, pExpr->apChild[i]);
- if( z==0 ){
- sqlite3_free(zRet);
- zRet = 0;
- }else{
- int e = pExpr->apChild[i]->eType;
- int b = (e!=FTS5_STRING && e!=FTS5_TERM);
- zRet = fts5PrintfAppend(zRet, "%s%s%z%s",
- (i==0 ? "" : zOp),
- (b?"(":""), z, (b?")":"")
- );
- }
- if( zRet==0 ) break;
- }
- }
-
- return zRet;
-}
-
-/*
-** The implementation of user-defined scalar functions fts5_expr() (bTcl==0)
-** and fts5_expr_tcl() (bTcl!=0).
-*/
-static void fts5ExprFunction(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal, /* Function arguments */
- int bTcl
-){
- Fts5Global *pGlobal = (Fts5Global*)sqlite3_user_data(pCtx);
- sqlite3 *db = sqlite3_context_db_handle(pCtx);
- const char *zExpr = 0;
- char *zErr = 0;
- Fts5Expr *pExpr = 0;
- int rc;
- int i;
-
- const char **azConfig; /* Array of arguments for Fts5Config */
- const char *zNearsetCmd = "nearset";
- int nConfig; /* Size of azConfig[] */
- Fts5Config *pConfig = 0;
- int iArg = 1;
-
- if( nArg<1 ){
- zErr = sqlite3_mprintf("wrong number of arguments to function %s",
- bTcl ? "fts5_expr_tcl" : "fts5_expr"
- );
- sqlite3_result_error(pCtx, zErr, -1);
- sqlite3_free(zErr);
- return;
- }
-
- if( bTcl && nArg>1 ){
- zNearsetCmd = (const char*)sqlite3_value_text(apVal[1]);
- iArg = 2;
- }
-
- nConfig = 3 + (nArg-iArg);
- azConfig = (const char**)sqlite3_malloc(sizeof(char*) * nConfig);
- if( azConfig==0 ){
- sqlite3_result_error_nomem(pCtx);
- return;
- }
- azConfig[0] = 0;
- azConfig[1] = "main";
- azConfig[2] = "tbl";
- for(i=3; iArg<nArg; iArg++){
- azConfig[i++] = (const char*)sqlite3_value_text(apVal[iArg]);
- }
-
- zExpr = (const char*)sqlite3_value_text(apVal[0]);
-
- rc = sqlite3Fts5ConfigParse(pGlobal, db, nConfig, azConfig, &pConfig, &zErr);
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5ExprNew(pConfig, zExpr, &pExpr, &zErr);
- }
- if( rc==SQLITE_OK ){
- char *zText;
- if( pExpr->pRoot==0 ){
- zText = sqlite3_mprintf("");
- }else if( bTcl ){
- zText = fts5ExprPrintTcl(pConfig, zNearsetCmd, pExpr->pRoot);
- }else{
- zText = fts5ExprPrint(pConfig, pExpr->pRoot);
- }
- if( zText==0 ){
- rc = SQLITE_NOMEM;
- }else{
- sqlite3_result_text(pCtx, zText, -1, SQLITE_TRANSIENT);
- sqlite3_free(zText);
- }
- }
-
- if( rc!=SQLITE_OK ){
- if( zErr ){
- sqlite3_result_error(pCtx, zErr, -1);
- sqlite3_free(zErr);
- }else{
- sqlite3_result_error_code(pCtx, rc);
- }
- }
- sqlite3_free((void *)azConfig);
- sqlite3Fts5ConfigFree(pConfig);
- sqlite3Fts5ExprFree(pExpr);
-}
-
-static void fts5ExprFunctionHr(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal /* Function arguments */
-){
- fts5ExprFunction(pCtx, nArg, apVal, 0);
-}
-static void fts5ExprFunctionTcl(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal /* Function arguments */
-){
- fts5ExprFunction(pCtx, nArg, apVal, 1);
-}
-
-/*
-** The implementation of an SQLite user-defined-function that accepts a
-** single integer as an argument. If the integer is an alpha-numeric
-** unicode code point, 1 is returned. Otherwise 0.
-*/
-static void fts5ExprIsAlnum(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal /* Function arguments */
-){
- int iCode;
- if( nArg!=1 ){
- sqlite3_result_error(pCtx,
- "wrong number of arguments to function fts5_isalnum", -1
- );
- return;
- }
- iCode = sqlite3_value_int(apVal[0]);
- sqlite3_result_int(pCtx, sqlite3Fts5UnicodeIsalnum(iCode));
-}
-
-static void fts5ExprFold(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal /* Function arguments */
-){
- if( nArg!=1 && nArg!=2 ){
- sqlite3_result_error(pCtx,
- "wrong number of arguments to function fts5_fold", -1
- );
- }else{
- int iCode;
- int bRemoveDiacritics = 0;
- iCode = sqlite3_value_int(apVal[0]);
- if( nArg==2 ) bRemoveDiacritics = sqlite3_value_int(apVal[1]);
- sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics));
- }
-}
-
-/*
-** This is called during initialization to register the fts5_expr() scalar
-** UDF with the SQLite handle passed as the only argument.
-*/
-static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){
- struct Fts5ExprFunc {
- const char *z;
- void (*x)(sqlite3_context*,int,sqlite3_value**);
- } aFunc[] = {
- { "fts5_expr", fts5ExprFunctionHr },
- { "fts5_expr_tcl", fts5ExprFunctionTcl },
- { "fts5_isalnum", fts5ExprIsAlnum },
- { "fts5_fold", fts5ExprFold },
- };
- int i;
- int rc = SQLITE_OK;
- void *pCtx = (void*)pGlobal;
-
- for(i=0; rc==SQLITE_OK && i<(sizeof(aFunc) / sizeof(aFunc[0])); i++){
- struct Fts5ExprFunc *p = &aFunc[i];
- rc = sqlite3_create_function(db, p->z, -1, SQLITE_UTF8, pCtx, p->x, 0, 0);
- }
-
- /* Avoid a warning indicating that sqlite3Fts5ParserTrace() is unused */
-#ifndef NDEBUG
- (void)sqlite3Fts5ParserTrace;
-#endif
-
- return rc;
-}
-
-/*
-** Return the number of phrases in expression pExpr.
-*/
-static int sqlite3Fts5ExprPhraseCount(Fts5Expr *pExpr){
- return (pExpr ? pExpr->nPhrase : 0);
-}
-
-/*
-** Return the number of terms in the iPhrase'th phrase in pExpr.
-*/
-static int sqlite3Fts5ExprPhraseSize(Fts5Expr *pExpr, int iPhrase){
- if( iPhrase<0 || iPhrase>=pExpr->nPhrase ) return 0;
- return pExpr->apExprPhrase[iPhrase]->nTerm;
-}
-
-/*
-** This function is used to access the current position list for phrase
-** iPhrase.
-*/
-static int sqlite3Fts5ExprPoslist(Fts5Expr *pExpr, int iPhrase, const u8 **pa){
- int nRet;
- Fts5ExprPhrase *pPhrase = pExpr->apExprPhrase[iPhrase];
- Fts5ExprNode *pNode = pPhrase->pNode;
- if( pNode->bEof==0 && pNode->iRowid==pExpr->pRoot->iRowid ){
- *pa = pPhrase->poslist.p;
- nRet = pPhrase->poslist.n;
- }else{
- *pa = 0;
- nRet = 0;
- }
- return nRet;
-}
-
-/*
-** 2014 August 11
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-*/
-
-
-
-
-typedef struct Fts5HashEntry Fts5HashEntry;
-
-/*
-** This file contains the implementation of an in-memory hash table used
-** to accumuluate "term -> doclist" content before it is flused to a level-0
-** segment.
-*/
-
-
-struct Fts5Hash {
- int *pnByte; /* Pointer to bytes counter */
- int nEntry; /* Number of entries currently in hash */
- int nSlot; /* Size of aSlot[] array */
- Fts5HashEntry *pScan; /* Current ordered scan item */
- Fts5HashEntry **aSlot; /* Array of hash slots */
-};
-
-/*
-** Each entry in the hash table is represented by an object of the
-** following type. Each object, its key (zKey[]) and its current data
-** are stored in a single memory allocation. The position list data
-** immediately follows the key data in memory.
-**
-** The data that follows the key is in a similar, but not identical format
-** to the doclist data stored in the database. It is:
-**
-** * Rowid, as a varint
-** * Position list, without 0x00 terminator.
-** * Size of previous position list and rowid, as a 4 byte
-** big-endian integer.
-**
-** iRowidOff:
-** Offset of last rowid written to data area. Relative to first byte of
-** structure.
-**
-** nData:
-** Bytes of data written since iRowidOff.
-*/
-struct Fts5HashEntry {
- Fts5HashEntry *pHashNext; /* Next hash entry with same hash-key */
- Fts5HashEntry *pScanNext; /* Next entry in sorted order */
-
- int nAlloc; /* Total size of allocation */
- int iSzPoslist; /* Offset of space for 4-byte poslist size */
- int nData; /* Total bytes of data (incl. structure) */
- u8 bDel; /* Set delete-flag @ iSzPoslist */
-
- int iCol; /* Column of last value written */
- int iPos; /* Position of last value written */
- i64 iRowid; /* Rowid of last value written */
- char zKey[8]; /* Nul-terminated entry key */
-};
-
-/*
-** Size of Fts5HashEntry without the zKey[] array.
-*/
-#define FTS5_HASHENTRYSIZE (sizeof(Fts5HashEntry)-8)
-
-
-
-/*
-** Allocate a new hash table.
-*/
-static int sqlite3Fts5HashNew(Fts5Hash **ppNew, int *pnByte){
- int rc = SQLITE_OK;
- Fts5Hash *pNew;
-
- *ppNew = pNew = (Fts5Hash*)sqlite3_malloc(sizeof(Fts5Hash));
- if( pNew==0 ){
- rc = SQLITE_NOMEM;
- }else{
- int nByte;
- memset(pNew, 0, sizeof(Fts5Hash));
- pNew->pnByte = pnByte;
-
- pNew->nSlot = 1024;
- nByte = sizeof(Fts5HashEntry*) * pNew->nSlot;
- pNew->aSlot = (Fts5HashEntry**)sqlite3_malloc(nByte);
- if( pNew->aSlot==0 ){
- sqlite3_free(pNew);
- *ppNew = 0;
- rc = SQLITE_NOMEM;
- }else{
- memset(pNew->aSlot, 0, nByte);
- }
- }
- return rc;
-}
-
-/*
-** Free a hash table object.
-*/
-static void sqlite3Fts5HashFree(Fts5Hash *pHash){
- if( pHash ){
- sqlite3Fts5HashClear(pHash);
- sqlite3_free(pHash->aSlot);
- sqlite3_free(pHash);
- }
-}
-
-/*
-** Empty (but do not delete) a hash table.
-*/
-static void sqlite3Fts5HashClear(Fts5Hash *pHash){
- int i;
- for(i=0; i<pHash->nSlot; i++){
- Fts5HashEntry *pNext;
- Fts5HashEntry *pSlot;
- for(pSlot=pHash->aSlot[i]; pSlot; pSlot=pNext){
- pNext = pSlot->pHashNext;
- sqlite3_free(pSlot);
- }
- }
- memset(pHash->aSlot, 0, pHash->nSlot * sizeof(Fts5HashEntry*));
- pHash->nEntry = 0;
-}
-
-static unsigned int fts5HashKey(int nSlot, const u8 *p, int n){
- int i;
- unsigned int h = 13;
- for(i=n-1; i>=0; i--){
- h = (h << 3) ^ h ^ p[i];
- }
- return (h % nSlot);
-}
-
-static unsigned int fts5HashKey2(int nSlot, u8 b, const u8 *p, int n){
- int i;
- unsigned int h = 13;
- for(i=n-1; i>=0; i--){
- h = (h << 3) ^ h ^ p[i];
- }
- h = (h << 3) ^ h ^ b;
- return (h % nSlot);
-}
-
-/*
-** Resize the hash table by doubling the number of slots.
-*/
-static int fts5HashResize(Fts5Hash *pHash){
- int nNew = pHash->nSlot*2;
- int i;
- Fts5HashEntry **apNew;
- Fts5HashEntry **apOld = pHash->aSlot;
-
- apNew = (Fts5HashEntry**)sqlite3_malloc(nNew*sizeof(Fts5HashEntry*));
- if( !apNew ) return SQLITE_NOMEM;
- memset(apNew, 0, nNew*sizeof(Fts5HashEntry*));
-
- for(i=0; i<pHash->nSlot; i++){
- while( apOld[i] ){
- int iHash;
- Fts5HashEntry *p = apOld[i];
- apOld[i] = p->pHashNext;
- iHash = fts5HashKey(nNew, (u8*)p->zKey, strlen(p->zKey));
- p->pHashNext = apNew[iHash];
- apNew[iHash] = p;
- }
- }
-
- sqlite3_free(apOld);
- pHash->nSlot = nNew;
- pHash->aSlot = apNew;
- return SQLITE_OK;
-}
-
-static void fts5HashAddPoslistSize(Fts5HashEntry *p){
- if( p->iSzPoslist ){
- u8 *pPtr = (u8*)p;
- int nSz = (p->nData - p->iSzPoslist - 1); /* Size in bytes */
- int nPos = nSz*2 + p->bDel; /* Value of nPos field */
-
- assert( p->bDel==0 || p->bDel==1 );
- if( nPos<=127 ){
- pPtr[p->iSzPoslist] = nPos;
- }else{
- int nByte = sqlite3Fts5GetVarintLen((u32)nPos);
- memmove(&pPtr[p->iSzPoslist + nByte], &pPtr[p->iSzPoslist + 1], nSz);
- sqlite3Fts5PutVarint(&pPtr[p->iSzPoslist], nPos);
- p->nData += (nByte-1);
- }
- p->bDel = 0;
- p->iSzPoslist = 0;
- }
-}
-
-static int sqlite3Fts5HashWrite(
- Fts5Hash *pHash,
- i64 iRowid, /* Rowid for this entry */
- int iCol, /* Column token appears in (-ve -> delete) */
- int iPos, /* Position of token within column */
- char bByte, /* First byte of token */
- const char *pToken, int nToken /* Token to add or remove to or from index */
-){
- unsigned int iHash;
- Fts5HashEntry *p;
- u8 *pPtr;
- int nIncr = 0; /* Amount to increment (*pHash->pnByte) by */
-
- /* Attempt to locate an existing hash entry */
- iHash = fts5HashKey2(pHash->nSlot, (u8)bByte, (const u8*)pToken, nToken);
- for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
- if( p->zKey[0]==bByte
- && memcmp(&p->zKey[1], pToken, nToken)==0
- && p->zKey[nToken+1]==0
- ){
- break;
- }
- }
-
- /* If an existing hash entry cannot be found, create a new one. */
- if( p==0 ){
- int nByte = FTS5_HASHENTRYSIZE + (nToken+1) + 1 + 64;
- if( nByte<128 ) nByte = 128;
-
- if( (pHash->nEntry*2)>=pHash->nSlot ){
- int rc = fts5HashResize(pHash);
- if( rc!=SQLITE_OK ) return rc;
- iHash = fts5HashKey2(pHash->nSlot, (u8)bByte, (const u8*)pToken, nToken);
- }
-
- p = (Fts5HashEntry*)sqlite3_malloc(nByte);
- if( !p ) return SQLITE_NOMEM;
- memset(p, 0, FTS5_HASHENTRYSIZE);
- p->nAlloc = nByte;
- p->zKey[0] = bByte;
- memcpy(&p->zKey[1], pToken, nToken);
- assert( iHash==fts5HashKey(pHash->nSlot, (u8*)p->zKey, nToken+1) );
- p->zKey[nToken+1] = '\0';
- p->nData = nToken+1 + 1 + FTS5_HASHENTRYSIZE;
- p->nData += sqlite3Fts5PutVarint(&((u8*)p)[p->nData], iRowid);
- p->iSzPoslist = p->nData;
- p->nData += 1;
- p->iRowid = iRowid;
- p->pHashNext = pHash->aSlot[iHash];
- pHash->aSlot[iHash] = p;
- pHash->nEntry++;
- nIncr += p->nData;
- }
-
- /* Check there is enough space to append a new entry. Worst case scenario
- ** is:
- **
- ** + 9 bytes for a new rowid,
- ** + 4 byte reserved for the "poslist size" varint.
- ** + 1 byte for a "new column" byte,
- ** + 3 bytes for a new column number (16-bit max) as a varint,
- ** + 5 bytes for the new position offset (32-bit max).
- */
- if( (p->nAlloc - p->nData) < (9 + 4 + 1 + 3 + 5) ){
- int nNew = p->nAlloc * 2;
- Fts5HashEntry *pNew;
- Fts5HashEntry **pp;
- pNew = (Fts5HashEntry*)sqlite3_realloc(p, nNew);
- if( pNew==0 ) return SQLITE_NOMEM;
- pNew->nAlloc = nNew;
- for(pp=&pHash->aSlot[iHash]; *pp!=p; pp=&(*pp)->pHashNext);
- *pp = pNew;
- p = pNew;
- }
- pPtr = (u8*)p;
- nIncr -= p->nData;
-
- /* If this is a new rowid, append the 4-byte size field for the previous
- ** entry, and the new rowid for this entry. */
- if( iRowid!=p->iRowid ){
- fts5HashAddPoslistSize(p);
- p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iRowid - p->iRowid);
- p->iSzPoslist = p->nData;
- p->nData += 1;
- p->iCol = 0;
- p->iPos = 0;
- p->iRowid = iRowid;
- }
-
- if( iCol>=0 ){
- /* Append a new column value, if necessary */
- assert( iCol>=p->iCol );
- if( iCol!=p->iCol ){
- pPtr[p->nData++] = 0x01;
- p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iCol);
- p->iCol = iCol;
- p->iPos = 0;
- }
-
- /* Append the new position offset */
- p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iPos - p->iPos + 2);
- p->iPos = iPos;
- }else{
- /* This is a delete. Set the delete flag. */
- p->bDel = 1;
- }
- nIncr += p->nData;
-
- *pHash->pnByte += nIncr;
- return SQLITE_OK;
-}
-
-
-/*
-** Arguments pLeft and pRight point to linked-lists of hash-entry objects,
-** each sorted in key order. This function merges the two lists into a
-** single list and returns a pointer to its first element.
-*/
-static Fts5HashEntry *fts5HashEntryMerge(
- Fts5HashEntry *pLeft,
- Fts5HashEntry *pRight
-){
- Fts5HashEntry *p1 = pLeft;
- Fts5HashEntry *p2 = pRight;
- Fts5HashEntry *pRet = 0;
- Fts5HashEntry **ppOut = &pRet;
-
- while( p1 || p2 ){
- if( p1==0 ){
- *ppOut = p2;
- p2 = 0;
- }else if( p2==0 ){
- *ppOut = p1;
- p1 = 0;
- }else{
- int i = 0;
- while( p1->zKey[i]==p2->zKey[i] ) i++;
-
- if( ((u8)p1->zKey[i])>((u8)p2->zKey[i]) ){
- /* p2 is smaller */
- *ppOut = p2;
- ppOut = &p2->pScanNext;
- p2 = p2->pScanNext;
- }else{
- /* p1 is smaller */
- *ppOut = p1;
- ppOut = &p1->pScanNext;
- p1 = p1->pScanNext;
- }
- *ppOut = 0;
- }
- }
-
- return pRet;
-}
-
-/*
-** Extract all tokens from hash table iHash and link them into a list
-** in sorted order. The hash table is cleared before returning. It is
-** the responsibility of the caller to free the elements of the returned
-** list.
-*/
-static int fts5HashEntrySort(
- Fts5Hash *pHash,
- const char *pTerm, int nTerm, /* Query prefix, if any */
- Fts5HashEntry **ppSorted
-){
- const int nMergeSlot = 32;
- Fts5HashEntry **ap;
- Fts5HashEntry *pList;
- int iSlot;
- int i;
-
- *ppSorted = 0;
- ap = sqlite3_malloc(sizeof(Fts5HashEntry*) * nMergeSlot);
- if( !ap ) return SQLITE_NOMEM;
- memset(ap, 0, sizeof(Fts5HashEntry*) * nMergeSlot);
-
- for(iSlot=0; iSlot<pHash->nSlot; iSlot++){
- Fts5HashEntry *pIter;
- for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){
- if( pTerm==0 || 0==memcmp(pIter->zKey, pTerm, nTerm) ){
- Fts5HashEntry *pEntry = pIter;
- pEntry->pScanNext = 0;
- for(i=0; ap[i]; i++){
- pEntry = fts5HashEntryMerge(pEntry, ap[i]);
- ap[i] = 0;
- }
- ap[i] = pEntry;
- }
- }
- }
-
- pList = 0;
- for(i=0; i<nMergeSlot; i++){
- pList = fts5HashEntryMerge(pList, ap[i]);
- }
-
- pHash->nEntry = 0;
- sqlite3_free(ap);
- *ppSorted = pList;
- return SQLITE_OK;
-}
-
-/*
-** Query the hash table for a doclist associated with term pTerm/nTerm.
-*/
-static int sqlite3Fts5HashQuery(
- Fts5Hash *pHash, /* Hash table to query */
- const char *pTerm, int nTerm, /* Query term */
- const u8 **ppDoclist, /* OUT: Pointer to doclist for pTerm */
- int *pnDoclist /* OUT: Size of doclist in bytes */
-){
- unsigned int iHash = fts5HashKey(pHash->nSlot, (const u8*)pTerm, nTerm);
- Fts5HashEntry *p;
-
- for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
- if( memcmp(p->zKey, pTerm, nTerm)==0 && p->zKey[nTerm]==0 ) break;
- }
-
- if( p ){
- fts5HashAddPoslistSize(p);
- *ppDoclist = (const u8*)&p->zKey[nTerm+1];
- *pnDoclist = p->nData - (FTS5_HASHENTRYSIZE + nTerm + 1);
- }else{
- *ppDoclist = 0;
- *pnDoclist = 0;
- }
-
- return SQLITE_OK;
-}
-
-static int sqlite3Fts5HashScanInit(
- Fts5Hash *p, /* Hash table to query */
- const char *pTerm, int nTerm /* Query prefix */
-){
- return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan);
-}
-
-static void sqlite3Fts5HashScanNext(Fts5Hash *p){
- assert( !sqlite3Fts5HashScanEof(p) );
- p->pScan = p->pScan->pScanNext;
-}
-
-static int sqlite3Fts5HashScanEof(Fts5Hash *p){
- return (p->pScan==0);
-}
-
-static void sqlite3Fts5HashScanEntry(
- Fts5Hash *pHash,
- const char **pzTerm, /* OUT: term (nul-terminated) */
- const u8 **ppDoclist, /* OUT: pointer to doclist */
- int *pnDoclist /* OUT: size of doclist in bytes */
-){
- Fts5HashEntry *p;
- if( (p = pHash->pScan) ){
- int nTerm = strlen(p->zKey);
- fts5HashAddPoslistSize(p);
- *pzTerm = p->zKey;
- *ppDoclist = (const u8*)&p->zKey[nTerm+1];
- *pnDoclist = p->nData - (FTS5_HASHENTRYSIZE + nTerm + 1);
- }else{
- *pzTerm = 0;
- *ppDoclist = 0;
- *pnDoclist = 0;
- }
-}
-
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** Low level access to the FTS index stored in the database file. The
-** routines in this file file implement all read and write access to the
-** %_data table. Other parts of the system access this functionality via
-** the interface defined in fts5Int.h.
-*/
-
-
-
-/*
-** Overview:
-**
-** The %_data table contains all the FTS indexes for an FTS5 virtual table.
-** As well as the main term index, there may be up to 31 prefix indexes.
-** The format is similar to FTS3/4, except that:
-**
-** * all segment b-tree leaf data is stored in fixed size page records
-** (e.g. 1000 bytes). A single doclist may span multiple pages. Care is
-** taken to ensure it is possible to iterate in either direction through
-** the entries in a doclist, or to seek to a specific entry within a
-** doclist, without loading it into memory.
-**
-** * large doclists that span many pages have associated "doclist index"
-** records that contain a copy of the first rowid on each page spanned by
-** the doclist. This is used to speed up seek operations, and merges of
-** large doclists with very small doclists.
-**
-** * extra fields in the "structure record" record the state of ongoing
-** incremental merge operations.
-**
-*/
-
-
-#define FTS5_OPT_WORK_UNIT 1000 /* Number of leaf pages per optimize step */
-#define FTS5_WORK_UNIT 64 /* Number of leaf pages in unit of work */
-
-#define FTS5_MIN_DLIDX_SIZE 4 /* Add dlidx if this many empty pages */
-
-#define FTS5_MAIN_PREFIX '0'
-
-#if FTS5_MAX_PREFIX_INDEXES > 31
-# error "FTS5_MAX_PREFIX_INDEXES is too large"
-#endif
-
-/*
-** Details:
-**
-** The %_data table managed by this module,
-**
-** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB);
-**
-** , contains the following 5 types of records. See the comments surrounding
-** the FTS5_*_ROWID macros below for a description of how %_data rowids are
-** assigned to each fo them.
-**
-** 1. Structure Records:
-**
-** The set of segments that make up an index - the index structure - are
-** recorded in a single record within the %_data table. The record consists
-** of a single 32-bit configuration cookie value followed by a list of
-** SQLite varints. If the FTS table features more than one index (because
-** there are one or more prefix indexes), it is guaranteed that all share
-** the same cookie value.
-**
-** Immediately following the configuration cookie, the record begins with
-** three varints:
-**
-** + number of levels,
-** + total number of segments on all levels,
-** + value of write counter.
-**
-** Then, for each level from 0 to nMax:
-**
-** + number of input segments in ongoing merge.
-** + total number of segments in level.
-** + for each segment from oldest to newest:
-** + segment id (always > 0)
-** + first leaf page number (often 1, always greater than 0)
-** + final leaf page number
-**
-** 2. The Averages Record:
-**
-** A single record within the %_data table. The data is a list of varints.
-** The first value is the number of rows in the index. Then, for each column
-** from left to right, the total number of tokens in the column for all
-** rows of the table.
-**
-** 3. Segment leaves:
-**
-** TERM/DOCLIST FORMAT:
-**
-** Most of each segment leaf is taken up by term/doclist data. The
-** general format of term/doclist, starting with the first term
-** on the leaf page, is:
-**
-** varint : size of first term
-** blob: first term data
-** doclist: first doclist
-** zero-or-more {
-** varint: number of bytes in common with previous term
-** varint: number of bytes of new term data (nNew)
-** blob: nNew bytes of new term data
-** doclist: next doclist
-** }
-**
-** doclist format:
-**
-** varint: first rowid
-** poslist: first poslist
-** zero-or-more {
-** varint: rowid delta (always > 0)
-** poslist: next poslist
-** }
-**
-** poslist format:
-**
-** varint: size of poslist in bytes multiplied by 2, not including
-** this field. Plus 1 if this entry carries the "delete" flag.
-** collist: collist for column 0
-** zero-or-more {
-** 0x01 byte
-** varint: column number (I)
-** collist: collist for column I
-** }
-**
-** collist format:
-**
-** varint: first offset + 2
-** zero-or-more {
-** varint: offset delta + 2
-** }
-**
-** PAGE FORMAT
-**
-** Each leaf page begins with a 4-byte header containing 2 16-bit
-** unsigned integer fields in big-endian format. They are:
-**
-** * The byte offset of the first rowid on the page, if it exists
-** and occurs before the first term (otherwise 0).
-**
-** * The byte offset of the start of the page footer. If the page
-** footer is 0 bytes in size, then this field is the same as the
-** size of the leaf page in bytes.
-**
-** The page footer consists of a single varint for each term located
-** on the page. Each varint is the byte offset of the current term
-** within the page, delta-compressed against the previous value. In
-** other words, the first varint in the footer is the byte offset of
-** the first term, the second is the byte offset of the second less that
-** of the first, and so on.
-**
-** The term/doclist format described above is accurate if the entire
-** term/doclist data fits on a single leaf page. If this is not the case,
-** the format is changed in two ways:
-**
-** + if the first rowid on a page occurs before the first term, it
-** is stored as a literal value:
-**
-** varint: first rowid
-**
-** + the first term on each page is stored in the same way as the
-** very first term of the segment:
-**
-** varint : size of first term
-** blob: first term data
-**
-** 5. Segment doclist indexes:
-**
-** Doclist indexes are themselves b-trees, however they usually consist of
-** a single leaf record only. The format of each doclist index leaf page
-** is:
-**
-** * Flags byte. Bits are:
-** 0x01: Clear if leaf is also the root page, otherwise set.
-**
-** * Page number of fts index leaf page. As a varint.
-**
-** * First rowid on page indicated by previous field. As a varint.
-**
-** * A list of varints, one for each subsequent termless page. A
-** positive delta if the termless page contains at least one rowid,
-** or an 0x00 byte otherwise.
-**
-** Internal doclist index nodes are:
-**
-** * Flags byte. Bits are:
-** 0x01: Clear for root page, otherwise set.
-**
-** * Page number of first child page. As a varint.
-**
-** * Copy of first rowid on page indicated by previous field. As a varint.
-**
-** * A list of delta-encoded varints - the first rowid on each subsequent
-** child page.
-**
-*/
-
-/*
-** Rowids for the averages and structure records in the %_data table.
-*/
-#define FTS5_AVERAGES_ROWID 1 /* Rowid used for the averages record */
-#define FTS5_STRUCTURE_ROWID 10 /* The structure record */
-
-/*
-** Macros determining the rowids used by segment leaves and dlidx leaves
-** and nodes. All nodes and leaves are stored in the %_data table with large
-** positive rowids.
-**
-** Each segment has a unique non-zero 16-bit id.
-**
-** The rowid for each segment leaf is found by passing the segment id and
-** the leaf page number to the FTS5_SEGMENT_ROWID macro. Leaves are numbered
-** sequentially starting from 1.
-*/
-#define FTS5_DATA_ID_B 16 /* Max seg id number 65535 */
-#define FTS5_DATA_DLI_B 1 /* Doclist-index flag (1 bit) */
-#define FTS5_DATA_HEIGHT_B 5 /* Max dlidx tree height of 32 */
-#define FTS5_DATA_PAGE_B 31 /* Max page number of 2147483648 */
-
-#define fts5_dri(segid, dlidx, height, pgno) ( \
- ((i64)(segid) << (FTS5_DATA_PAGE_B+FTS5_DATA_HEIGHT_B+FTS5_DATA_DLI_B)) + \
- ((i64)(dlidx) << (FTS5_DATA_PAGE_B + FTS5_DATA_HEIGHT_B)) + \
- ((i64)(height) << (FTS5_DATA_PAGE_B)) + \
- ((i64)(pgno)) \
-)
-
-#define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno)
-#define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno)
-
-/*
-** Maximum segments permitted in a single index
-*/
-#define FTS5_MAX_SEGMENT 2000
-
-#ifdef SQLITE_DEBUG
-static int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; }
-#endif
-
-
-/*
-** Each time a blob is read from the %_data table, it is padded with this
-** many zero bytes. This makes it easier to decode the various record formats
-** without overreading if the records are corrupt.
-*/
-#define FTS5_DATA_ZERO_PADDING 8
-#define FTS5_DATA_PADDING 20
-
-typedef struct Fts5Data Fts5Data;
-typedef struct Fts5DlidxIter Fts5DlidxIter;
-typedef struct Fts5DlidxLvl Fts5DlidxLvl;
-typedef struct Fts5DlidxWriter Fts5DlidxWriter;
-typedef struct Fts5PageWriter Fts5PageWriter;
-typedef struct Fts5SegIter Fts5SegIter;
-typedef struct Fts5DoclistIter Fts5DoclistIter;
-typedef struct Fts5SegWriter Fts5SegWriter;
-typedef struct Fts5Structure Fts5Structure;
-typedef struct Fts5StructureLevel Fts5StructureLevel;
-typedef struct Fts5StructureSegment Fts5StructureSegment;
-
-struct Fts5Data {
- u8 *p; /* Pointer to buffer containing record */
- int nn; /* Size of record in bytes */
- int szLeaf; /* Size of leaf without page-index */
-};
-
-/*
-** One object per %_data table.
-*/
-struct Fts5Index {
- Fts5Config *pConfig; /* Virtual table configuration */
- char *zDataTbl; /* Name of %_data table */
- int nWorkUnit; /* Leaf pages in a "unit" of work */
-
- /*
- ** Variables related to the accumulation of tokens and doclists within the
- ** in-memory hash tables before they are flushed to disk.
- */
- Fts5Hash *pHash; /* Hash table for in-memory data */
- int nMaxPendingData; /* Max pending data before flush to disk */
- int nPendingData; /* Current bytes of pending data */
- i64 iWriteRowid; /* Rowid for current doc being written */
- int bDelete; /* Current write is a delete */
-
- /* Error state. */
- int rc; /* Current error code */
-
- /* State used by the fts5DataXXX() functions. */
- sqlite3_blob *pReader; /* RO incr-blob open on %_data table */
- sqlite3_stmt *pWriter; /* "INSERT ... %_data VALUES(?,?)" */
- sqlite3_stmt *pDeleter; /* "DELETE FROM %_data ... id>=? AND id<=?" */
- sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */
- sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=? */
- sqlite3_stmt *pIdxSelect;
- int nRead; /* Total number of blocks read */
-};
-
-struct Fts5DoclistIter {
- u8 *aEof; /* Pointer to 1 byte past end of doclist */
-
- /* Output variables. aPoslist==0 at EOF */
- i64 iRowid;
- u8 *aPoslist;
- int nPoslist;
- int nSize;
-};
-
-/*
-** The contents of the "structure" record for each index are represented
-** using an Fts5Structure record in memory. Which uses instances of the
-** other Fts5StructureXXX types as components.
-*/
-struct Fts5StructureSegment {
- int iSegid; /* Segment id */
- int pgnoFirst; /* First leaf page number in segment */
- int pgnoLast; /* Last leaf page number in segment */
-};
-struct Fts5StructureLevel {
- int nMerge; /* Number of segments in incr-merge */
- int nSeg; /* Total number of segments on level */
- Fts5StructureSegment *aSeg; /* Array of segments. aSeg[0] is oldest. */
-};
-struct Fts5Structure {
- int nRef; /* Object reference count */
- u64 nWriteCounter; /* Total leaves written to level 0 */
- int nSegment; /* Total segments in this structure */
- int nLevel; /* Number of levels in this index */
- Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */
-};
-
-/*
-** An object of type Fts5SegWriter is used to write to segments.
-*/
-struct Fts5PageWriter {
- int pgno; /* Page number for this page */
- int iPrevPgidx; /* Previous value written into pgidx */
- Fts5Buffer buf; /* Buffer containing leaf data */
- Fts5Buffer pgidx; /* Buffer containing page-index */
- Fts5Buffer term; /* Buffer containing previous term on page */
-};
-struct Fts5DlidxWriter {
- int pgno; /* Page number for this page */
- int bPrevValid; /* True if iPrev is valid */
- i64 iPrev; /* Previous rowid value written to page */
- Fts5Buffer buf; /* Buffer containing page data */
-};
-struct Fts5SegWriter {
- int iSegid; /* Segid to write to */
- Fts5PageWriter writer; /* PageWriter object */
- i64 iPrevRowid; /* Previous rowid written to current leaf */
- u8 bFirstRowidInDoclist; /* True if next rowid is first in doclist */
- u8 bFirstRowidInPage; /* True if next rowid is first in page */
- /* TODO1: Can use (writer.pgidx.n==0) instead of bFirstTermInPage */
- u8 bFirstTermInPage; /* True if next term will be first in leaf */
- int nLeafWritten; /* Number of leaf pages written */
- int nEmpty; /* Number of contiguous term-less nodes */
-
- int nDlidx; /* Allocated size of aDlidx[] array */
- Fts5DlidxWriter *aDlidx; /* Array of Fts5DlidxWriter objects */
-
- /* Values to insert into the %_idx table */
- Fts5Buffer btterm; /* Next term to insert into %_idx table */
- int iBtPage; /* Page number corresponding to btterm */
-};
-
-/*
-** Object for iterating through the merged results of one or more segments,
-** visiting each term/rowid pair in the merged data.
-**
-** nSeg is always a power of two greater than or equal to the number of
-** segments that this object is merging data from. Both the aSeg[] and
-** aFirst[] arrays are sized at nSeg entries. The aSeg[] array is padded
-** with zeroed objects - these are handled as if they were iterators opened
-** on empty segments.
-**
-** The results of comparing segments aSeg[N] and aSeg[N+1], where N is an
-** even number, is stored in aFirst[(nSeg+N)/2]. The "result" of the
-** comparison in this context is the index of the iterator that currently
-** points to the smaller term/rowid combination. Iterators at EOF are
-** considered to be greater than all other iterators.
-**
-** aFirst[1] contains the index in aSeg[] of the iterator that points to
-** the smallest key overall. aFirst[0] is unused.
-*/
-
-typedef struct Fts5CResult Fts5CResult;
-struct Fts5CResult {
- u16 iFirst; /* aSeg[] index of firstest iterator */
- u8 bTermEq; /* True if the terms are equal */
-};
-
-/*
-** Object for iterating through a single segment, visiting each term/rowid
-** pair in the segment.
-**
-** pSeg:
-** The segment to iterate through.
-**
-** iLeafPgno:
-** Current leaf page number within segment.
-**
-** iLeafOffset:
-** Byte offset within the current leaf that is the first byte of the
-** position list data (one byte passed the position-list size field).
-** rowid field of the current entry. Usually this is the size field of the
-** position list data. The exception is if the rowid for the current entry
-** is the last thing on the leaf page.
-**
-** pLeaf:
-** Buffer containing current leaf page data. Set to NULL at EOF.
-**
-** iTermLeafPgno, iTermLeafOffset:
-** Leaf page number containing the last term read from the segment. And
-** the offset immediately following the term data.
-**
-** flags:
-** Mask of FTS5_SEGITER_XXX values. Interpreted as follows:
-**
-** FTS5_SEGITER_ONETERM:
-** If set, set the iterator to point to EOF after the current doclist
-** has been exhausted. Do not proceed to the next term in the segment.
-**
-** FTS5_SEGITER_REVERSE:
-** This flag is only ever set if FTS5_SEGITER_ONETERM is also set. If
-** it is set, iterate through rowid in descending order instead of the
-** default ascending order.
-**
-** iRowidOffset/nRowidOffset/aRowidOffset:
-** These are used if the FTS5_SEGITER_REVERSE flag is set.
-**
-** For each rowid on the page corresponding to the current term, the
-** corresponding aRowidOffset[] entry is set to the byte offset of the
-** start of the "position-list-size" field within the page.
-**
-** iTermIdx:
-** Index of current term on iTermLeafPgno.
-*/
-struct Fts5SegIter {
- Fts5StructureSegment *pSeg; /* Segment to iterate through */
- int flags; /* Mask of configuration flags */
- int iLeafPgno; /* Current leaf page number */
- Fts5Data *pLeaf; /* Current leaf data */
- Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */
- int iLeafOffset; /* Byte offset within current leaf */
-
- /* The page and offset from which the current term was read. The offset
- ** is the offset of the first rowid in the current doclist. */
- int iTermLeafPgno;
- int iTermLeafOffset;
-
- int iPgidxOff; /* Next offset in pgidx */
- int iEndofDoclist;
-
- /* The following are only used if the FTS5_SEGITER_REVERSE flag is set. */
- int iRowidOffset; /* Current entry in aRowidOffset[] */
- int nRowidOffset; /* Allocated size of aRowidOffset[] array */
- int *aRowidOffset; /* Array of offset to rowid fields */
-
- Fts5DlidxIter *pDlidx; /* If there is a doclist-index */
-
- /* Variables populated based on current entry. */
- Fts5Buffer term; /* Current term */
- i64 iRowid; /* Current rowid */
- int nPos; /* Number of bytes in current position list */
- int bDel; /* True if the delete flag is set */
-};
-
-/*
-** Argument is a pointer to an Fts5Data structure that contains a
-** leaf page.
-*/
-#define ASSERT_SZLEAF_OK(x) assert( \
- (x)->szLeaf==(x)->nn || (x)->szLeaf==fts5GetU16(&(x)->p[2]) \
-)
-
-#define FTS5_SEGITER_ONETERM 0x01
-#define FTS5_SEGITER_REVERSE 0x02
-
-
-/*
-** Argument is a pointer to an Fts5Data structure that contains a leaf
-** page. This macro evaluates to true if the leaf contains no terms, or
-** false if it contains at least one term.
-*/
-#define fts5LeafIsTermless(x) ((x)->szLeaf >= (x)->nn)
-
-#define fts5LeafTermOff(x, i) (fts5GetU16(&(x)->p[(x)->szLeaf + (i)*2]))
-
-#define fts5LeafFirstRowidOff(x) (fts5GetU16((x)->p))
-
-/*
-** poslist:
-** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered.
-** There is no way to tell if this is populated or not.
-*/
-struct Fts5IndexIter {
- Fts5Index *pIndex; /* Index that owns this iterator */
- Fts5Structure *pStruct; /* Database structure for this iterator */
- Fts5Buffer poslist; /* Buffer containing current poslist */
-
- int nSeg; /* Size of aSeg[] array */
- int bRev; /* True to iterate in reverse order */
- u8 bSkipEmpty; /* True to skip deleted entries */
- u8 bEof; /* True at EOF */
- u8 bFiltered; /* True if column-filter already applied */
-
- i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */
- Fts5CResult *aFirst; /* Current merge state (see above) */
- Fts5SegIter aSeg[1]; /* Array of segment iterators */
-};
-
-
-/*
-** An instance of the following type is used to iterate through the contents
-** of a doclist-index record.
-**
-** pData:
-** Record containing the doclist-index data.
-**
-** bEof:
-** Set to true once iterator has reached EOF.
-**
-** iOff:
-** Set to the current offset within record pData.
-*/
-struct Fts5DlidxLvl {
- Fts5Data *pData; /* Data for current page of this level */
- int iOff; /* Current offset into pData */
- int bEof; /* At EOF already */
- int iFirstOff; /* Used by reverse iterators */
-
- /* Output variables */
- int iLeafPgno; /* Page number of current leaf page */
- i64 iRowid; /* First rowid on leaf iLeafPgno */
-};
-struct Fts5DlidxIter {
- int nLvl;
- int iSegid;
- Fts5DlidxLvl aLvl[1];
-};
-
-static void fts5PutU16(u8 *aOut, u16 iVal){
- aOut[0] = (iVal>>8);
- aOut[1] = (iVal&0xFF);
-}
-
-static u16 fts5GetU16(const u8 *aIn){
- return ((u16)aIn[0] << 8) + aIn[1];
-}
-
-/*
-** Allocate and return a buffer at least nByte bytes in size.
-**
-** If an OOM error is encountered, return NULL and set the error code in
-** the Fts5Index handle passed as the first argument.
-*/
-static void *fts5IdxMalloc(Fts5Index *p, int nByte){
- return sqlite3Fts5MallocZero(&p->rc, nByte);
-}
-
-/*
-** Compare the contents of the pLeft buffer with the pRight/nRight blob.
-**
-** Return -ve if pLeft is smaller than pRight, 0 if they are equal or
-** +ve if pRight is smaller than pLeft. In other words:
-**
-** res = *pLeft - *pRight
-*/
-#ifdef SQLITE_DEBUG
-static int fts5BufferCompareBlob(
- Fts5Buffer *pLeft, /* Left hand side of comparison */
- const u8 *pRight, int nRight /* Right hand side of comparison */
-){
- int nCmp = MIN(pLeft->n, nRight);
- int res = memcmp(pLeft->p, pRight, nCmp);
- return (res==0 ? (pLeft->n - nRight) : res);
-}
-#endif
-
-/*
-** Compare the contents of the two buffers using memcmp(). If one buffer
-** is a prefix of the other, it is considered the lesser.
-**
-** Return -ve if pLeft is smaller than pRight, 0 if they are equal or
-** +ve if pRight is smaller than pLeft. In other words:
-**
-** res = *pLeft - *pRight
-*/
-static int fts5BufferCompare(Fts5Buffer *pLeft, Fts5Buffer *pRight){
- int nCmp = MIN(pLeft->n, pRight->n);
- int res = memcmp(pLeft->p, pRight->p, nCmp);
- return (res==0 ? (pLeft->n - pRight->n) : res);
-}
-
-#ifdef SQLITE_DEBUG
-static int fts5BlobCompare(
- const u8 *pLeft, int nLeft,
- const u8 *pRight, int nRight
-){
- int nCmp = MIN(nLeft, nRight);
- int res = memcmp(pLeft, pRight, nCmp);
- return (res==0 ? (nLeft - nRight) : res);
-}
-#endif
-
-static int fts5LeafFirstTermOff(Fts5Data *pLeaf){
- int ret;
- fts5GetVarint32(&pLeaf->p[pLeaf->szLeaf], ret);
- return ret;
-}
-
-/*
-** Close the read-only blob handle, if it is open.
-*/
-static void fts5CloseReader(Fts5Index *p){
- if( p->pReader ){
- sqlite3_blob *pReader = p->pReader;
- p->pReader = 0;
- sqlite3_blob_close(pReader);
- }
-}
-
-
-/*
-** Retrieve a record from the %_data table.
-**
-** If an error occurs, NULL is returned and an error left in the
-** Fts5Index object.
-*/
-static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){
- Fts5Data *pRet = 0;
- if( p->rc==SQLITE_OK ){
- int rc = SQLITE_OK;
-
- if( p->pReader ){
- /* This call may return SQLITE_ABORT if there has been a savepoint
- ** rollback since it was last used. In this case a new blob handle
- ** is required. */
- sqlite3_blob *pBlob = p->pReader;
- p->pReader = 0;
- rc = sqlite3_blob_reopen(pBlob, iRowid);
- assert( p->pReader==0 );
- p->pReader = pBlob;
- if( rc!=SQLITE_OK ){
- fts5CloseReader(p);
- }
- if( rc==SQLITE_ABORT ) rc = SQLITE_OK;
- }
-
- /* If the blob handle is not open at this point, open it and seek
- ** to the requested entry. */
- if( p->pReader==0 && rc==SQLITE_OK ){
- Fts5Config *pConfig = p->pConfig;
- rc = sqlite3_blob_open(pConfig->db,
- pConfig->zDb, p->zDataTbl, "block", iRowid, 0, &p->pReader
- );
- }
-
- /* If either of the sqlite3_blob_open() or sqlite3_blob_reopen() calls
- ** above returned SQLITE_ERROR, return SQLITE_CORRUPT_VTAB instead.
- ** All the reasons those functions might return SQLITE_ERROR - missing
- ** table, missing row, non-blob/text in block column - indicate
- ** backing store corruption. */
- if( rc==SQLITE_ERROR ) rc = FTS5_CORRUPT;
-
- if( rc==SQLITE_OK ){
- u8 *aOut = 0; /* Read blob data into this buffer */
- int nByte = sqlite3_blob_bytes(p->pReader);
- int nAlloc = sizeof(Fts5Data) + nByte + FTS5_DATA_PADDING;
- pRet = (Fts5Data*)sqlite3_malloc(nAlloc);
- if( pRet ){
- pRet->nn = nByte;
- aOut = pRet->p = (u8*)&pRet[1];
- }else{
- rc = SQLITE_NOMEM;
- }
-
- if( rc==SQLITE_OK ){
- rc = sqlite3_blob_read(p->pReader, aOut, nByte, 0);
- }
- if( rc!=SQLITE_OK ){
- sqlite3_free(pRet);
- pRet = 0;
- }else{
- /* TODO1: Fix this */
- pRet->szLeaf = fts5GetU16(&pRet->p[2]);
- }
- }
- p->rc = rc;
- p->nRead++;
- }
-
- assert( (pRet==0)==(p->rc!=SQLITE_OK) );
- return pRet;
-}
-
-/*
-** Release a reference to data record returned by an earlier call to
-** fts5DataRead().
-*/
-static void fts5DataRelease(Fts5Data *pData){
- sqlite3_free(pData);
-}
-
-static int fts5IndexPrepareStmt(
- Fts5Index *p,
- sqlite3_stmt **ppStmt,
- char *zSql
-){
- if( p->rc==SQLITE_OK ){
- if( zSql ){
- p->rc = sqlite3_prepare_v2(p->pConfig->db, zSql, -1, ppStmt, 0);
- }else{
- p->rc = SQLITE_NOMEM;
- }
- }
- sqlite3_free(zSql);
- return p->rc;
-}
-
-
-/*
-** INSERT OR REPLACE a record into the %_data table.
-*/
-static void fts5DataWrite(Fts5Index *p, i64 iRowid, const u8 *pData, int nData){
- if( p->rc!=SQLITE_OK ) return;
-
- if( p->pWriter==0 ){
- Fts5Config *pConfig = p->pConfig;
- fts5IndexPrepareStmt(p, &p->pWriter, sqlite3_mprintf(
- "REPLACE INTO '%q'.'%q_data'(id, block) VALUES(?,?)",
- pConfig->zDb, pConfig->zName
- ));
- if( p->rc ) return;
- }
-
- sqlite3_bind_int64(p->pWriter, 1, iRowid);
- sqlite3_bind_blob(p->pWriter, 2, pData, nData, SQLITE_STATIC);
- sqlite3_step(p->pWriter);
- p->rc = sqlite3_reset(p->pWriter);
-}
-
-/*
-** Execute the following SQL:
-**
-** DELETE FROM %_data WHERE id BETWEEN $iFirst AND $iLast
-*/
-static void fts5DataDelete(Fts5Index *p, i64 iFirst, i64 iLast){
- if( p->rc!=SQLITE_OK ) return;
-
- if( p->pDeleter==0 ){
- int rc;
- Fts5Config *pConfig = p->pConfig;
- char *zSql = sqlite3_mprintf(
- "DELETE FROM '%q'.'%q_data' WHERE id>=? AND id<=?",
- pConfig->zDb, pConfig->zName
- );
- if( zSql==0 ){
- rc = SQLITE_NOMEM;
- }else{
- rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p->pDeleter, 0);
- sqlite3_free(zSql);
- }
- if( rc!=SQLITE_OK ){
- p->rc = rc;
- return;
- }
- }
-
- sqlite3_bind_int64(p->pDeleter, 1, iFirst);
- sqlite3_bind_int64(p->pDeleter, 2, iLast);
- sqlite3_step(p->pDeleter);
- p->rc = sqlite3_reset(p->pDeleter);
-}
-
-/*
-** Remove all records associated with segment iSegid.
-*/
-static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){
- i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0);
- i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1;
- fts5DataDelete(p, iFirst, iLast);
- if( p->pIdxDeleter==0 ){
- Fts5Config *pConfig = p->pConfig;
- fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf(
- "DELETE FROM '%q'.'%q_idx' WHERE segid=?",
- pConfig->zDb, pConfig->zName
- ));
- }
- if( p->rc==SQLITE_OK ){
- sqlite3_bind_int(p->pIdxDeleter, 1, iSegid);
- sqlite3_step(p->pIdxDeleter);
- p->rc = sqlite3_reset(p->pIdxDeleter);
- }
-}
-
-/*
-** Release a reference to an Fts5Structure object returned by an earlier
-** call to fts5StructureRead() or fts5StructureDecode().
-*/
-static void fts5StructureRelease(Fts5Structure *pStruct){
- if( pStruct && 0>=(--pStruct->nRef) ){
- int i;
- assert( pStruct->nRef==0 );
- for(i=0; i<pStruct->nLevel; i++){
- sqlite3_free(pStruct->aLevel[i].aSeg);
- }
- sqlite3_free(pStruct);
- }
-}
-
-static void fts5StructureRef(Fts5Structure *pStruct){
- pStruct->nRef++;
-}
-
-/*
-** Deserialize and return the structure record currently stored in serialized
-** form within buffer pData/nData.
-**
-** The Fts5Structure.aLevel[] and each Fts5StructureLevel.aSeg[] array
-** are over-allocated by one slot. This allows the structure contents
-** to be more easily edited.
-**
-** If an error occurs, *ppOut is set to NULL and an SQLite error code
-** returned. Otherwise, *ppOut is set to point to the new object and
-** SQLITE_OK returned.
-*/
-static int fts5StructureDecode(
- const u8 *pData, /* Buffer containing serialized structure */
- int nData, /* Size of buffer pData in bytes */
- int *piCookie, /* Configuration cookie value */
- Fts5Structure **ppOut /* OUT: Deserialized object */
-){
- int rc = SQLITE_OK;
- int i = 0;
- int iLvl;
- int nLevel = 0;
- int nSegment = 0;
- int nByte; /* Bytes of space to allocate at pRet */
- Fts5Structure *pRet = 0; /* Structure object to return */
-
- /* Grab the cookie value */
- if( piCookie ) *piCookie = sqlite3Fts5Get32(pData);
- i = 4;
-
- /* Read the total number of levels and segments from the start of the
- ** structure record. */
- i += fts5GetVarint32(&pData[i], nLevel);
- i += fts5GetVarint32(&pData[i], nSegment);
- nByte = (
- sizeof(Fts5Structure) + /* Main structure */
- sizeof(Fts5StructureLevel) * (nLevel-1) /* aLevel[] array */
- );
- pRet = (Fts5Structure*)sqlite3Fts5MallocZero(&rc, nByte);
-
- if( pRet ){
- pRet->nRef = 1;
- pRet->nLevel = nLevel;
- pRet->nSegment = nSegment;
- i += sqlite3Fts5GetVarint(&pData[i], &pRet->nWriteCounter);
-
- for(iLvl=0; rc==SQLITE_OK && iLvl<nLevel; iLvl++){
- Fts5StructureLevel *pLvl = &pRet->aLevel[iLvl];
- int nTotal;
- int iSeg;
-
- i += fts5GetVarint32(&pData[i], pLvl->nMerge);
- i += fts5GetVarint32(&pData[i], nTotal);
- assert( nTotal>=pLvl->nMerge );
- pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&rc,
- nTotal * sizeof(Fts5StructureSegment)
- );
-
- if( rc==SQLITE_OK ){
- pLvl->nSeg = nTotal;
- for(iSeg=0; iSeg<nTotal; iSeg++){
- i += fts5GetVarint32(&pData[i], pLvl->aSeg[iSeg].iSegid);
- i += fts5GetVarint32(&pData[i], pLvl->aSeg[iSeg].pgnoFirst);
- i += fts5GetVarint32(&pData[i], pLvl->aSeg[iSeg].pgnoLast);
- }
- }else{
- fts5StructureRelease(pRet);
- pRet = 0;
- }
- }
- }
-
- *ppOut = pRet;
- return rc;
-}
-
-/*
-**
-*/
-static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){
- if( *pRc==SQLITE_OK ){
- Fts5Structure *pStruct = *ppStruct;
- int nLevel = pStruct->nLevel;
- int nByte = (
- sizeof(Fts5Structure) + /* Main structure */
- sizeof(Fts5StructureLevel) * (nLevel+1) /* aLevel[] array */
- );
-
- pStruct = sqlite3_realloc(pStruct, nByte);
- if( pStruct ){
- memset(&pStruct->aLevel[nLevel], 0, sizeof(Fts5StructureLevel));
- pStruct->nLevel++;
- *ppStruct = pStruct;
- }else{
- *pRc = SQLITE_NOMEM;
- }
- }
-}
-
-/*
-** Extend level iLvl so that there is room for at least nExtra more
-** segments.
-*/
-static void fts5StructureExtendLevel(
- int *pRc,
- Fts5Structure *pStruct,
- int iLvl,
- int nExtra,
- int bInsert
-){
- if( *pRc==SQLITE_OK ){
- Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
- Fts5StructureSegment *aNew;
- int nByte;
-
- nByte = (pLvl->nSeg + nExtra) * sizeof(Fts5StructureSegment);
- aNew = sqlite3_realloc(pLvl->aSeg, nByte);
- if( aNew ){
- if( bInsert==0 ){
- memset(&aNew[pLvl->nSeg], 0, sizeof(Fts5StructureSegment) * nExtra);
- }else{
- int nMove = pLvl->nSeg * sizeof(Fts5StructureSegment);
- memmove(&aNew[nExtra], aNew, nMove);
- memset(aNew, 0, sizeof(Fts5StructureSegment) * nExtra);
- }
- pLvl->aSeg = aNew;
- }else{
- *pRc = SQLITE_NOMEM;
- }
- }
-}
-
-/*
-** Read, deserialize and return the structure record.
-**
-** The Fts5Structure.aLevel[] and each Fts5StructureLevel.aSeg[] array
-** are over-allocated as described for function fts5StructureDecode()
-** above.
-**
-** If an error occurs, NULL is returned and an error code left in the
-** Fts5Index handle. If an error has already occurred when this function
-** is called, it is a no-op.
-*/
-static Fts5Structure *fts5StructureRead(Fts5Index *p){
- Fts5Config *pConfig = p->pConfig;
- Fts5Structure *pRet = 0; /* Object to return */
- int iCookie; /* Configuration cookie */
- Fts5Data *pData;
-
- pData = fts5DataRead(p, FTS5_STRUCTURE_ROWID);
- if( p->rc ) return 0;
- /* TODO: Do we need this if the leaf-index is appended? Probably... */
- memset(&pData->p[pData->nn], 0, FTS5_DATA_PADDING);
- p->rc = fts5StructureDecode(pData->p, pData->nn, &iCookie, &pRet);
- if( p->rc==SQLITE_OK && pConfig->iCookie!=iCookie ){
- p->rc = sqlite3Fts5ConfigLoad(pConfig, iCookie);
- }
-
- fts5DataRelease(pData);
- if( p->rc!=SQLITE_OK ){
- fts5StructureRelease(pRet);
- pRet = 0;
- }
- return pRet;
-}
-
-/*
-** Return the total number of segments in index structure pStruct. This
-** function is only ever used as part of assert() conditions.
-*/
-#ifdef SQLITE_DEBUG
-static int fts5StructureCountSegments(Fts5Structure *pStruct){
- int nSegment = 0; /* Total number of segments */
- if( pStruct ){
- int iLvl; /* Used to iterate through levels */
- for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
- nSegment += pStruct->aLevel[iLvl].nSeg;
- }
- }
-
- return nSegment;
-}
-#endif
-
-/*
-** Serialize and store the "structure" record.
-**
-** If an error occurs, leave an error code in the Fts5Index object. If an
-** error has already occurred, this function is a no-op.
-*/
-static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
- if( p->rc==SQLITE_OK ){
- Fts5Buffer buf; /* Buffer to serialize record into */
- int iLvl; /* Used to iterate through levels */
- int iCookie; /* Cookie value to store */
-
- assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
- memset(&buf, 0, sizeof(Fts5Buffer));
-
- /* Append the current configuration cookie */
- iCookie = p->pConfig->iCookie;
- if( iCookie<0 ) iCookie = 0;
- fts5BufferAppend32(&p->rc, &buf, iCookie);
-
- fts5BufferAppendVarint(&p->rc, &buf, pStruct->nLevel);
- fts5BufferAppendVarint(&p->rc, &buf, pStruct->nSegment);
- fts5BufferAppendVarint(&p->rc, &buf, (i64)pStruct->nWriteCounter);
-
- for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
- int iSeg; /* Used to iterate through segments */
- Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->nMerge);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->nSeg);
- assert( pLvl->nMerge<=pLvl->nSeg );
-
- for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast);
- }
- }
-
- fts5DataWrite(p, FTS5_STRUCTURE_ROWID, buf.p, buf.n);
- fts5BufferFree(&buf);
- }
-}
-
-#if 0
-static void fts5DebugStructure(int*,Fts5Buffer*,Fts5Structure*);
-static void fts5PrintStructure(const char *zCaption, Fts5Structure *pStruct){
- int rc = SQLITE_OK;
- Fts5Buffer buf;
- memset(&buf, 0, sizeof(buf));
- fts5DebugStructure(&rc, &buf, pStruct);
- fprintf(stdout, "%s: %s\n", zCaption, buf.p);
- fflush(stdout);
- fts5BufferFree(&buf);
-}
-#else
-# define fts5PrintStructure(x,y)
-#endif
-
-static int fts5SegmentSize(Fts5StructureSegment *pSeg){
- return 1 + pSeg->pgnoLast - pSeg->pgnoFirst;
-}
-
-/*
-** Return a copy of index structure pStruct. Except, promote as many
-** segments as possible to level iPromote. If an OOM occurs, NULL is
-** returned.
-*/
-static void fts5StructurePromoteTo(
- Fts5Index *p,
- int iPromote,
- int szPromote,
- Fts5Structure *pStruct
-){
- int il, is;
- Fts5StructureLevel *pOut = &pStruct->aLevel[iPromote];
-
- if( pOut->nMerge==0 ){
- for(il=iPromote+1; il<pStruct->nLevel; il++){
- Fts5StructureLevel *pLvl = &pStruct->aLevel[il];
- if( pLvl->nMerge ) return;
- for(is=pLvl->nSeg-1; is>=0; is--){
- int sz = fts5SegmentSize(&pLvl->aSeg[is]);
- if( sz>szPromote ) return;
- fts5StructureExtendLevel(&p->rc, pStruct, iPromote, 1, 1);
- if( p->rc ) return;
- memcpy(pOut->aSeg, &pLvl->aSeg[is], sizeof(Fts5StructureSegment));
- pOut->nSeg++;
- pLvl->nSeg--;
- }
- }
- }
-}
-
-/*
-** A new segment has just been written to level iLvl of index structure
-** pStruct. This function determines if any segments should be promoted
-** as a result. Segments are promoted in two scenarios:
-**
-** a) If the segment just written is smaller than one or more segments
-** within the previous populated level, it is promoted to the previous
-** populated level.
-**
-** b) If the segment just written is larger than the newest segment on
-** the next populated level, then that segment, and any other adjacent
-** segments that are also smaller than the one just written, are
-** promoted.
-**
-** If one or more segments are promoted, the structure object is updated
-** to reflect this.
-*/
-static void fts5StructurePromote(
- Fts5Index *p, /* FTS5 backend object */
- int iLvl, /* Index level just updated */
- Fts5Structure *pStruct /* Index structure */
-){
- if( p->rc==SQLITE_OK ){
- int iTst;
- int iPromote = -1;
- int szPromote = 0; /* Promote anything this size or smaller */
- Fts5StructureSegment *pSeg; /* Segment just written */
- int szSeg; /* Size of segment just written */
- int nSeg = pStruct->aLevel[iLvl].nSeg;
-
- if( nSeg==0 ) return;
- pSeg = &pStruct->aLevel[iLvl].aSeg[pStruct->aLevel[iLvl].nSeg-1];
- szSeg = (1 + pSeg->pgnoLast - pSeg->pgnoFirst);
-
- /* Check for condition (a) */
- for(iTst=iLvl-1; iTst>=0 && pStruct->aLevel[iTst].nSeg==0; iTst--);
- if( iTst>=0 ){
- int i;
- int szMax = 0;
- Fts5StructureLevel *pTst = &pStruct->aLevel[iTst];
- assert( pTst->nMerge==0 );
- for(i=0; i<pTst->nSeg; i++){
- int sz = pTst->aSeg[i].pgnoLast - pTst->aSeg[i].pgnoFirst + 1;
- if( sz>szMax ) szMax = sz;
- }
- if( szMax>=szSeg ){
- /* Condition (a) is true. Promote the newest segment on level
- ** iLvl to level iTst. */
- iPromote = iTst;
- szPromote = szMax;
- }
- }
-
- /* If condition (a) is not met, assume (b) is true. StructurePromoteTo()
- ** is a no-op if it is not. */
- if( iPromote<0 ){
- iPromote = iLvl;
- szPromote = szSeg;
- }
- fts5StructurePromoteTo(p, iPromote, szPromote, pStruct);
- }
-}
-
-
-/*
-** Advance the iterator passed as the only argument. If the end of the
-** doclist-index page is reached, return non-zero.
-*/
-static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){
- Fts5Data *pData = pLvl->pData;
-
- if( pLvl->iOff==0 ){
- assert( pLvl->bEof==0 );
- pLvl->iOff = 1;
- pLvl->iOff += fts5GetVarint32(&pData->p[1], pLvl->iLeafPgno);
- pLvl->iOff += fts5GetVarint(&pData->p[pLvl->iOff], (u64*)&pLvl->iRowid);
- pLvl->iFirstOff = pLvl->iOff;
- }else{
- int iOff;
- for(iOff=pLvl->iOff; iOff<pData->nn; iOff++){
- if( pData->p[iOff] ) break;
- }
-
- if( iOff<pData->nn ){
- i64 iVal;
- pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1;
- iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal);
- pLvl->iRowid += iVal;
- pLvl->iOff = iOff;
- }else{
- pLvl->bEof = 1;
- }
- }
-
- return pLvl->bEof;
-}
-
-/*
-** Advance the iterator passed as the only argument.
-*/
-static int fts5DlidxIterNextR(Fts5Index *p, Fts5DlidxIter *pIter, int iLvl){
- Fts5DlidxLvl *pLvl = &pIter->aLvl[iLvl];
-
- assert( iLvl<pIter->nLvl );
- if( fts5DlidxLvlNext(pLvl) ){
- if( (iLvl+1) < pIter->nLvl ){
- fts5DlidxIterNextR(p, pIter, iLvl+1);
- if( pLvl[1].bEof==0 ){
- fts5DataRelease(pLvl->pData);
- memset(pLvl, 0, sizeof(Fts5DlidxLvl));
- pLvl->pData = fts5DataRead(p,
- FTS5_DLIDX_ROWID(pIter->iSegid, iLvl, pLvl[1].iLeafPgno)
- );
- if( pLvl->pData ) fts5DlidxLvlNext(pLvl);
- }
- }
- }
-
- return pIter->aLvl[0].bEof;
-}
-static int fts5DlidxIterNext(Fts5Index *p, Fts5DlidxIter *pIter){
- return fts5DlidxIterNextR(p, pIter, 0);
-}
-
-/*
-** The iterator passed as the first argument has the following fields set
-** as follows. This function sets up the rest of the iterator so that it
-** points to the first rowid in the doclist-index.
-**
-** pData:
-** pointer to doclist-index record,
-**
-** When this function is called pIter->iLeafPgno is the page number the
-** doclist is associated with (the one featuring the term).
-*/
-static int fts5DlidxIterFirst(Fts5DlidxIter *pIter){
- int i;
- for(i=0; i<pIter->nLvl; i++){
- fts5DlidxLvlNext(&pIter->aLvl[i]);
- }
- return pIter->aLvl[0].bEof;
-}
-
-
-static int fts5DlidxIterEof(Fts5Index *p, Fts5DlidxIter *pIter){
- return p->rc!=SQLITE_OK || pIter->aLvl[0].bEof;
-}
-
-static void fts5DlidxIterLast(Fts5Index *p, Fts5DlidxIter *pIter){
- int i;
-
- /* Advance each level to the last entry on the last page */
- for(i=pIter->nLvl-1; p->rc==SQLITE_OK && i>=0; i--){
- Fts5DlidxLvl *pLvl = &pIter->aLvl[i];
- while( fts5DlidxLvlNext(pLvl)==0 );
- pLvl->bEof = 0;
-
- if( i>0 ){
- Fts5DlidxLvl *pChild = &pLvl[-1];
- fts5DataRelease(pChild->pData);
- memset(pChild, 0, sizeof(Fts5DlidxLvl));
- pChild->pData = fts5DataRead(p,
- FTS5_DLIDX_ROWID(pIter->iSegid, i-1, pLvl->iLeafPgno)
- );
- }
- }
-}
-
-/*
-** Move the iterator passed as the only argument to the previous entry.
-*/
-static int fts5DlidxLvlPrev(Fts5DlidxLvl *pLvl){
- int iOff = pLvl->iOff;
-
- assert( pLvl->bEof==0 );
- if( iOff<=pLvl->iFirstOff ){
- pLvl->bEof = 1;
- }else{
- u8 *a = pLvl->pData->p;
- i64 iVal;
- int iLimit;
- int ii;
- int nZero = 0;
-
- /* Currently iOff points to the first byte of a varint. This block
- ** decrements iOff until it points to the first byte of the previous
- ** varint. Taking care not to read any memory locations that occur
- ** before the buffer in memory. */
- iLimit = (iOff>9 ? iOff-9 : 0);
- for(iOff--; iOff>iLimit; iOff--){
- if( (a[iOff-1] & 0x80)==0 ) break;
- }
-
- fts5GetVarint(&a[iOff], (u64*)&iVal);
- pLvl->iRowid -= iVal;
- pLvl->iLeafPgno--;
-
- /* Skip backwards past any 0x00 varints. */
- for(ii=iOff-1; ii>=pLvl->iFirstOff && a[ii]==0x00; ii--){
- nZero++;
- }
- if( ii>=pLvl->iFirstOff && (a[ii] & 0x80) ){
- /* The byte immediately before the last 0x00 byte has the 0x80 bit
- ** set. So the last 0x00 is only a varint 0 if there are 8 more 0x80
- ** bytes before a[ii]. */
- int bZero = 0; /* True if last 0x00 counts */
- if( (ii-8)>=pLvl->iFirstOff ){
- int j;
- for(j=1; j<=8 && (a[ii-j] & 0x80); j++);
- bZero = (j>8);
- }
- if( bZero==0 ) nZero--;
- }
- pLvl->iLeafPgno -= nZero;
- pLvl->iOff = iOff - nZero;
- }
-
- return pLvl->bEof;
-}
-
-static int fts5DlidxIterPrevR(Fts5Index *p, Fts5DlidxIter *pIter, int iLvl){
- Fts5DlidxLvl *pLvl = &pIter->aLvl[iLvl];
-
- assert( iLvl<pIter->nLvl );
- if( fts5DlidxLvlPrev(pLvl) ){
- if( (iLvl+1) < pIter->nLvl ){
- fts5DlidxIterPrevR(p, pIter, iLvl+1);
- if( pLvl[1].bEof==0 ){
- fts5DataRelease(pLvl->pData);
- memset(pLvl, 0, sizeof(Fts5DlidxLvl));
- pLvl->pData = fts5DataRead(p,
- FTS5_DLIDX_ROWID(pIter->iSegid, iLvl, pLvl[1].iLeafPgno)
- );
- if( pLvl->pData ){
- while( fts5DlidxLvlNext(pLvl)==0 );
- pLvl->bEof = 0;
- }
- }
- }
- }
-
- return pIter->aLvl[0].bEof;
-}
-static int fts5DlidxIterPrev(Fts5Index *p, Fts5DlidxIter *pIter){
- return fts5DlidxIterPrevR(p, pIter, 0);
-}
-
-/*
-** Free a doclist-index iterator object allocated by fts5DlidxIterInit().
-*/
-static void fts5DlidxIterFree(Fts5DlidxIter *pIter){
- if( pIter ){
- int i;
- for(i=0; i<pIter->nLvl; i++){
- fts5DataRelease(pIter->aLvl[i].pData);
- }
- sqlite3_free(pIter);
- }
-}
-
-static Fts5DlidxIter *fts5DlidxIterInit(
- Fts5Index *p, /* Fts5 Backend to iterate within */
- int bRev, /* True for ORDER BY ASC */
- int iSegid, /* Segment id */
- int iLeafPg /* Leaf page number to load dlidx for */
-){
- Fts5DlidxIter *pIter = 0;
- int i;
- int bDone = 0;
-
- for(i=0; p->rc==SQLITE_OK && bDone==0; i++){
- int nByte = sizeof(Fts5DlidxIter) + i * sizeof(Fts5DlidxLvl);
- Fts5DlidxIter *pNew;
-
- pNew = (Fts5DlidxIter*)sqlite3_realloc(pIter, nByte);
- if( pNew==0 ){
- p->rc = SQLITE_NOMEM;
- }else{
- i64 iRowid = FTS5_DLIDX_ROWID(iSegid, i, iLeafPg);
- Fts5DlidxLvl *pLvl = &pNew->aLvl[i];
- pIter = pNew;
- memset(pLvl, 0, sizeof(Fts5DlidxLvl));
- pLvl->pData = fts5DataRead(p, iRowid);
- if( pLvl->pData && (pLvl->pData->p[0] & 0x0001)==0 ){
- bDone = 1;
- }
- pIter->nLvl = i+1;
- }
- }
-
- if( p->rc==SQLITE_OK ){
- pIter->iSegid = iSegid;
- if( bRev==0 ){
- fts5DlidxIterFirst(pIter);
- }else{
- fts5DlidxIterLast(p, pIter);
- }
- }
-
- if( p->rc!=SQLITE_OK ){
- fts5DlidxIterFree(pIter);
- pIter = 0;
- }
-
- return pIter;
-}
-
-static i64 fts5DlidxIterRowid(Fts5DlidxIter *pIter){
- return pIter->aLvl[0].iRowid;
-}
-static int fts5DlidxIterPgno(Fts5DlidxIter *pIter){
- return pIter->aLvl[0].iLeafPgno;
-}
-
-/*
-** Load the next leaf page into the segment iterator.
-*/
-static void fts5SegIterNextPage(
- Fts5Index *p, /* FTS5 backend object */
- Fts5SegIter *pIter /* Iterator to advance to next page */
-){
- Fts5Data *pLeaf;
- Fts5StructureSegment *pSeg = pIter->pSeg;
- fts5DataRelease(pIter->pLeaf);
- pIter->iLeafPgno++;
- if( pIter->pNextLeaf ){
- pIter->pLeaf = pIter->pNextLeaf;
- pIter->pNextLeaf = 0;
- }else if( pIter->iLeafPgno<=pSeg->pgnoLast ){
- pIter->pLeaf = fts5DataRead(p,
- FTS5_SEGMENT_ROWID(pSeg->iSegid, pIter->iLeafPgno)
- );
- }else{
- pIter->pLeaf = 0;
- }
- pLeaf = pIter->pLeaf;
-
- if( pLeaf ){
- pIter->iPgidxOff = pLeaf->szLeaf;
- if( fts5LeafIsTermless(pLeaf) ){
- pIter->iEndofDoclist = pLeaf->nn+1;
- }else{
- pIter->iPgidxOff += fts5GetVarint32(&pLeaf->p[pIter->iPgidxOff],
- pIter->iEndofDoclist
- );
- }
- }
-}
-
-/*
-** Argument p points to a buffer containing a varint to be interpreted as a
-** position list size field. Read the varint and return the number of bytes
-** read. Before returning, set *pnSz to the number of bytes in the position
-** list, and *pbDel to true if the delete flag is set, or false otherwise.
-*/
-static int fts5GetPoslistSize(const u8 *p, int *pnSz, int *pbDel){
- int nSz;
- int n = 0;
- fts5FastGetVarint32(p, n, nSz);
- assert_nc( nSz>=0 );
- *pnSz = nSz/2;
- *pbDel = nSz & 0x0001;
- return n;
-}
-
-/*
-** Fts5SegIter.iLeafOffset currently points to the first byte of a
-** position-list size field. Read the value of the field and store it
-** in the following variables:
-**
-** Fts5SegIter.nPos
-** Fts5SegIter.bDel
-**
-** Leave Fts5SegIter.iLeafOffset pointing to the first byte of the
-** position list content (if any).
-*/
-static void fts5SegIterLoadNPos(Fts5Index *p, Fts5SegIter *pIter){
- if( p->rc==SQLITE_OK ){
- int iOff = pIter->iLeafOffset; /* Offset to read at */
- int nSz;
- ASSERT_SZLEAF_OK(pIter->pLeaf);
- fts5FastGetVarint32(pIter->pLeaf->p, iOff, nSz);
- pIter->bDel = (nSz & 0x0001);
- pIter->nPos = nSz>>1;
- pIter->iLeafOffset = iOff;
- }
-}
-
-static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){
- u8 *a = pIter->pLeaf->p; /* Buffer to read data from */
- int iOff = pIter->iLeafOffset;
-
- ASSERT_SZLEAF_OK(pIter->pLeaf);
- if( iOff>=pIter->pLeaf->szLeaf ){
- fts5SegIterNextPage(p, pIter);
- if( pIter->pLeaf==0 ){
- if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT;
- return;
- }
- iOff = 4;
- a = pIter->pLeaf->p;
- }
- iOff += sqlite3Fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid);
- pIter->iLeafOffset = iOff;
-}
-
-/*
-** Fts5SegIter.iLeafOffset currently points to the first byte of the
-** "nSuffix" field of a term. Function parameter nKeep contains the value
-** of the "nPrefix" field (if there was one - it is passed 0 if this is
-** the first term in the segment).
-**
-** This function populates:
-**
-** Fts5SegIter.term
-** Fts5SegIter.rowid
-**
-** accordingly and leaves (Fts5SegIter.iLeafOffset) set to the content of
-** the first position list. The position list belonging to document
-** (Fts5SegIter.iRowid).
-*/
-static void fts5SegIterLoadTerm(Fts5Index *p, Fts5SegIter *pIter, int nKeep){
- u8 *a = pIter->pLeaf->p; /* Buffer to read data from */
- int iOff = pIter->iLeafOffset; /* Offset to read at */
- int nNew; /* Bytes of new data */
-
- iOff += fts5GetVarint32(&a[iOff], nNew);
- pIter->term.n = nKeep;
- fts5BufferAppendBlob(&p->rc, &pIter->term, nNew, &a[iOff]);
- iOff += nNew;
- pIter->iTermLeafOffset = iOff;
- pIter->iTermLeafPgno = pIter->iLeafPgno;
- pIter->iLeafOffset = iOff;
-
- if( pIter->iPgidxOff>=pIter->pLeaf->nn ){
- pIter->iEndofDoclist = pIter->pLeaf->nn+1;
- }else{
- int nExtra;
- pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], nExtra);
- pIter->iEndofDoclist += nExtra;
- }
-
- fts5SegIterLoadRowid(p, pIter);
-}
-
-/*
-** Initialize the iterator object pIter to iterate through the entries in
-** segment pSeg. The iterator is left pointing to the first entry when
-** this function returns.
-**
-** If an error occurs, Fts5Index.rc is set to an appropriate error code. If
-** an error has already occurred when this function is called, it is a no-op.
-*/
-static void fts5SegIterInit(
- Fts5Index *p, /* FTS index object */
- Fts5StructureSegment *pSeg, /* Description of segment */
- Fts5SegIter *pIter /* Object to populate */
-){
- if( pSeg->pgnoFirst==0 ){
- /* This happens if the segment is being used as an input to an incremental
- ** merge and all data has already been "trimmed". See function
- ** fts5TrimSegments() for details. In this case leave the iterator empty.
- ** The caller will see the (pIter->pLeaf==0) and assume the iterator is
- ** at EOF already. */
- assert( pIter->pLeaf==0 );
- return;
- }
-
- if( p->rc==SQLITE_OK ){
- memset(pIter, 0, sizeof(*pIter));
- pIter->pSeg = pSeg;
- pIter->iLeafPgno = pSeg->pgnoFirst-1;
- fts5SegIterNextPage(p, pIter);
- }
-
- if( p->rc==SQLITE_OK ){
- pIter->iLeafOffset = 4;
- assert_nc( pIter->pLeaf->nn>4 );
- assert( fts5LeafFirstTermOff(pIter->pLeaf)==4 );
- pIter->iPgidxOff = pIter->pLeaf->szLeaf+1;
- fts5SegIterLoadTerm(p, pIter, 0);
- fts5SegIterLoadNPos(p, pIter);
- }
-}
-
-/*
-** This function is only ever called on iterators created by calls to
-** Fts5IndexQuery() with the FTS5INDEX_QUERY_DESC flag set.
-**
-** The iterator is in an unusual state when this function is called: the
-** Fts5SegIter.iLeafOffset variable is set to the offset of the start of
-** the position-list size field for the first relevant rowid on the page.
-** Fts5SegIter.rowid is set, but nPos and bDel are not.
-**
-** This function advances the iterator so that it points to the last
-** relevant rowid on the page and, if necessary, initializes the
-** aRowidOffset[] and iRowidOffset variables. At this point the iterator
-** is in its regular state - Fts5SegIter.iLeafOffset points to the first
-** byte of the position list content associated with said rowid.
-*/
-static void fts5SegIterReverseInitPage(Fts5Index *p, Fts5SegIter *pIter){
- int n = pIter->pLeaf->szLeaf;
- int i = pIter->iLeafOffset;
- u8 *a = pIter->pLeaf->p;
- int iRowidOffset = 0;
-
- if( n>pIter->iEndofDoclist ){
- n = pIter->iEndofDoclist;
- }
-
- ASSERT_SZLEAF_OK(pIter->pLeaf);
- while( 1 ){
- i64 iDelta = 0;
- int nPos;
- int bDummy;
-
- i += fts5GetPoslistSize(&a[i], &nPos, &bDummy);
- i += nPos;
- if( i>=n ) break;
- i += fts5GetVarint(&a[i], (u64*)&iDelta);
- pIter->iRowid += iDelta;
-
- if( iRowidOffset>=pIter->nRowidOffset ){
- int nNew = pIter->nRowidOffset + 8;
- int *aNew = (int*)sqlite3_realloc(pIter->aRowidOffset, nNew*sizeof(int));
- if( aNew==0 ){
- p->rc = SQLITE_NOMEM;
- break;
- }
- pIter->aRowidOffset = aNew;
- pIter->nRowidOffset = nNew;
- }
-
- pIter->aRowidOffset[iRowidOffset++] = pIter->iLeafOffset;
- pIter->iLeafOffset = i;
- }
- pIter->iRowidOffset = iRowidOffset;
- fts5SegIterLoadNPos(p, pIter);
-}
-
-/*
-**
-*/
-static void fts5SegIterReverseNewPage(Fts5Index *p, Fts5SegIter *pIter){
- assert( pIter->flags & FTS5_SEGITER_REVERSE );
- assert( pIter->flags & FTS5_SEGITER_ONETERM );
-
- fts5DataRelease(pIter->pLeaf);
- pIter->pLeaf = 0;
- while( p->rc==SQLITE_OK && pIter->iLeafPgno>pIter->iTermLeafPgno ){
- Fts5Data *pNew;
- pIter->iLeafPgno--;
- pNew = fts5DataRead(p, FTS5_SEGMENT_ROWID(
- pIter->pSeg->iSegid, pIter->iLeafPgno
- ));
- if( pNew ){
- /* iTermLeafOffset may be equal to szLeaf if the term is the last
- ** thing on the page - i.e. the first rowid is on the following page.
- ** In this case leaf pIter->pLeaf==0, this iterator is at EOF. */
- if( pIter->iLeafPgno==pIter->iTermLeafPgno
- && pIter->iTermLeafOffset<pNew->szLeaf
- ){
- pIter->pLeaf = pNew;
- pIter->iLeafOffset = pIter->iTermLeafOffset;
- }else{
- int iRowidOff;
- iRowidOff = fts5LeafFirstRowidOff(pNew);
- if( iRowidOff ){
- pIter->pLeaf = pNew;
- pIter->iLeafOffset = iRowidOff;
- }
- }
-
- if( pIter->pLeaf ){
- u8 *a = &pIter->pLeaf->p[pIter->iLeafOffset];
- pIter->iLeafOffset += fts5GetVarint(a, (u64*)&pIter->iRowid);
- break;
- }else{
- fts5DataRelease(pNew);
- }
- }
- }
-
- if( pIter->pLeaf ){
- pIter->iEndofDoclist = pIter->pLeaf->nn+1;
- fts5SegIterReverseInitPage(p, pIter);
- }
-}
-
-/*
-** Return true if the iterator passed as the second argument currently
-** points to a delete marker. A delete marker is an entry with a 0 byte
-** position-list.
-*/
-static int fts5MultiIterIsEmpty(Fts5Index *p, Fts5IndexIter *pIter){
- Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst];
- return (p->rc==SQLITE_OK && pSeg->pLeaf && pSeg->nPos==0);
-}
-
-/*
-** Advance iterator pIter to the next entry.
-**
-** If an error occurs, Fts5Index.rc is set to an appropriate error code. It
-** is not considered an error if the iterator reaches EOF. If an error has
-** already occurred when this function is called, it is a no-op.
-*/
-static void fts5SegIterNext(
- Fts5Index *p, /* FTS5 backend object */
- Fts5SegIter *pIter, /* Iterator to advance */
- int *pbNewTerm /* OUT: Set for new term */
-){
- assert( pbNewTerm==0 || *pbNewTerm==0 );
- if( p->rc==SQLITE_OK ){
- if( pIter->flags & FTS5_SEGITER_REVERSE ){
- assert( pIter->pNextLeaf==0 );
- if( pIter->iRowidOffset>0 ){
- u8 *a = pIter->pLeaf->p;
- int iOff;
- int nPos;
- int bDummy;
- i64 iDelta;
-
- pIter->iRowidOffset--;
- pIter->iLeafOffset = iOff = pIter->aRowidOffset[pIter->iRowidOffset];
- iOff += fts5GetPoslistSize(&a[iOff], &nPos, &bDummy);
- iOff += nPos;
- fts5GetVarint(&a[iOff], (u64*)&iDelta);
- pIter->iRowid -= iDelta;
- fts5SegIterLoadNPos(p, pIter);
- }else{
- fts5SegIterReverseNewPage(p, pIter);
- }
- }else{
- Fts5Data *pLeaf = pIter->pLeaf;
- int iOff;
- int bNewTerm = 0;
- int nKeep = 0;
-
- /* Search for the end of the position list within the current page. */
- u8 *a = pLeaf->p;
- int n = pLeaf->szLeaf;
-
- ASSERT_SZLEAF_OK(pLeaf);
- iOff = pIter->iLeafOffset + pIter->nPos;
-
- if( iOff<n ){
- /* The next entry is on the current page. */
- assert_nc( iOff<=pIter->iEndofDoclist );
- if( iOff>=pIter->iEndofDoclist ){
- bNewTerm = 1;
- if( iOff!=fts5LeafFirstTermOff(pLeaf) ){
- iOff += fts5GetVarint32(&a[iOff], nKeep);
- }
- }else{
- u64 iDelta;
- iOff += sqlite3Fts5GetVarint(&a[iOff], &iDelta);
- pIter->iRowid += iDelta;
- assert_nc( iDelta>0 );
- }
- pIter->iLeafOffset = iOff;
-
- }else if( pIter->pSeg==0 ){
- const u8 *pList = 0;
- const char *zTerm = 0;
- int nList = 0;
- if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){
- sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
- }
- if( pList==0 ){
- fts5DataRelease(pIter->pLeaf);
- pIter->pLeaf = 0;
- }else{
- pIter->pLeaf->p = (u8*)pList;
- pIter->pLeaf->nn = nList;
- pIter->pLeaf->szLeaf = nList;
- pIter->iEndofDoclist = nList+1;
- sqlite3Fts5BufferSet(&p->rc, &pIter->term, strlen(zTerm), (u8*)zTerm);
- pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
- if( pbNewTerm ) *pbNewTerm = 1;
- }
- }else{
- iOff = 0;
- /* Next entry is not on the current page */
- while( iOff==0 ){
- fts5SegIterNextPage(p, pIter);
- pLeaf = pIter->pLeaf;
- if( pLeaf==0 ) break;
- ASSERT_SZLEAF_OK(pLeaf);
- if( (iOff = fts5LeafFirstRowidOff(pLeaf)) && iOff<pLeaf->szLeaf ){
- iOff += sqlite3Fts5GetVarint(&pLeaf->p[iOff], (u64*)&pIter->iRowid);
- pIter->iLeafOffset = iOff;
-
- if( pLeaf->nn>pLeaf->szLeaf ){
- pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
- &pLeaf->p[pLeaf->szLeaf], pIter->iEndofDoclist
- );
- }
-
- }
- else if( pLeaf->nn>pLeaf->szLeaf ){
- pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
- &pLeaf->p[pLeaf->szLeaf], iOff
- );
- pIter->iLeafOffset = iOff;
- pIter->iEndofDoclist = iOff;
- bNewTerm = 1;
- }
- if( iOff>=pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- return;
- }
- }
- }
-
- /* Check if the iterator is now at EOF. If so, return early. */
- if( pIter->pLeaf ){
- if( bNewTerm ){
- if( pIter->flags & FTS5_SEGITER_ONETERM ){
- fts5DataRelease(pIter->pLeaf);
- pIter->pLeaf = 0;
- }else{
- fts5SegIterLoadTerm(p, pIter, nKeep);
- fts5SegIterLoadNPos(p, pIter);
- if( pbNewTerm ) *pbNewTerm = 1;
- }
- }else{
- fts5SegIterLoadNPos(p, pIter);
- }
- }
- }
- }
-}
-
-#define SWAPVAL(T, a, b) { T tmp; tmp=a; a=b; b=tmp; }
-
-/*
-** Iterator pIter currently points to the first rowid in a doclist. This
-** function sets the iterator up so that iterates in reverse order through
-** the doclist.
-*/
-static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){
- Fts5DlidxIter *pDlidx = pIter->pDlidx;
- Fts5Data *pLast = 0;
- int pgnoLast = 0;
-
- if( pDlidx ){
- int iSegid = pIter->pSeg->iSegid;
- pgnoLast = fts5DlidxIterPgno(pDlidx);
- pLast = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast));
- }else{
- Fts5Data *pLeaf = pIter->pLeaf; /* Current leaf data */
-
- /* Currently, Fts5SegIter.iLeafOffset points to the first byte of
- ** position-list content for the current rowid. Back it up so that it
- ** points to the start of the position-list size field. */
- pIter->iLeafOffset -= sqlite3Fts5GetVarintLen(pIter->nPos*2+pIter->bDel);
-
- /* If this condition is true then the largest rowid for the current
- ** term may not be stored on the current page. So search forward to
- ** see where said rowid really is. */
- if( pIter->iEndofDoclist>=pLeaf->szLeaf ){
- int pgno;
- Fts5StructureSegment *pSeg = pIter->pSeg;
-
- /* The last rowid in the doclist may not be on the current page. Search
- ** forward to find the page containing the last rowid. */
- for(pgno=pIter->iLeafPgno+1; !p->rc && pgno<=pSeg->pgnoLast; pgno++){
- i64 iAbs = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno);
- Fts5Data *pNew = fts5DataRead(p, iAbs);
- if( pNew ){
- int iRowid, bTermless;
- iRowid = fts5LeafFirstRowidOff(pNew);
- bTermless = fts5LeafIsTermless(pNew);
- if( iRowid ){
- SWAPVAL(Fts5Data*, pNew, pLast);
- pgnoLast = pgno;
- }
- fts5DataRelease(pNew);
- if( bTermless==0 ) break;
- }
- }
- }
- }
-
- /* If pLast is NULL at this point, then the last rowid for this doclist
- ** lies on the page currently indicated by the iterator. In this case
- ** pIter->iLeafOffset is already set to point to the position-list size
- ** field associated with the first relevant rowid on the page.
- **
- ** Or, if pLast is non-NULL, then it is the page that contains the last
- ** rowid. In this case configure the iterator so that it points to the
- ** first rowid on this page.
- */
- if( pLast ){
- int iOff;
- fts5DataRelease(pIter->pLeaf);
- pIter->pLeaf = pLast;
- pIter->iLeafPgno = pgnoLast;
- iOff = fts5LeafFirstRowidOff(pLast);
- iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid);
- pIter->iLeafOffset = iOff;
-
- if( fts5LeafIsTermless(pLast) ){
- pIter->iEndofDoclist = pLast->nn+1;
- }else{
- pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast);
- }
-
- }
-
- fts5SegIterReverseInitPage(p, pIter);
-}
-
-/*
-** Iterator pIter currently points to the first rowid of a doclist.
-** There is a doclist-index associated with the final term on the current
-** page. If the current term is the last term on the page, load the
-** doclist-index from disk and initialize an iterator at (pIter->pDlidx).
-*/
-static void fts5SegIterLoadDlidx(Fts5Index *p, Fts5SegIter *pIter){
- int iSeg = pIter->pSeg->iSegid;
- int bRev = (pIter->flags & FTS5_SEGITER_REVERSE);
- Fts5Data *pLeaf = pIter->pLeaf; /* Current leaf data */
-
- assert( pIter->flags & FTS5_SEGITER_ONETERM );
- assert( pIter->pDlidx==0 );
-
- /* Check if the current doclist ends on this page. If it does, return
- ** early without loading the doclist-index (as it belongs to a different
- ** term. */
- if( pIter->iTermLeafPgno==pIter->iLeafPgno
- && pIter->iEndofDoclist<pLeaf->szLeaf
- ){
- return;
- }
-
- pIter->pDlidx = fts5DlidxIterInit(p, bRev, iSeg, pIter->iTermLeafPgno);
-}
-
-#define fts5IndexSkipVarint(a, iOff) { \
- int iEnd = iOff+9; \
- while( (a[iOff++] & 0x80) && iOff<iEnd ); \
-}
-
-/*
-** The iterator object passed as the second argument currently contains
-** no valid values except for the Fts5SegIter.pLeaf member variable. This
-** function searches the leaf page for a term matching (pTerm/nTerm).
-**
-** If the specified term is found on the page, then the iterator is left
-** pointing to it. If argument bGe is zero and the term is not found,
-** the iterator is left pointing at EOF.
-**
-** If bGe is non-zero and the specified term is not found, then the
-** iterator is left pointing to the smallest term in the segment that
-** is larger than the specified term, even if this term is not on the
-** current page.
-*/
-static void fts5LeafSeek(
- Fts5Index *p, /* Leave any error code here */
- int bGe, /* True for a >= search */
- Fts5SegIter *pIter, /* Iterator to seek */
- const u8 *pTerm, int nTerm /* Term to search for */
-){
- int iOff;
- const u8 *a = pIter->pLeaf->p;
- int szLeaf = pIter->pLeaf->szLeaf;
- int n = pIter->pLeaf->nn;
-
- int nMatch = 0;
- int nKeep = 0;
- int nNew = 0;
- int iTermOff;
- int iPgidx; /* Current offset in pgidx */
- int bEndOfPage = 0;
-
- assert( p->rc==SQLITE_OK );
-
- iPgidx = szLeaf;
- iPgidx += fts5GetVarint32(&a[iPgidx], iTermOff);
- iOff = iTermOff;
-
- while( 1 ){
-
- /* Figure out how many new bytes are in this term */
- fts5FastGetVarint32(a, iOff, nNew);
- if( nKeep<nMatch ){
- goto search_failed;
- }
-
- assert( nKeep>=nMatch );
- if( nKeep==nMatch ){
- int nCmp;
- int i;
- nCmp = MIN(nNew, nTerm-nMatch);
- for(i=0; i<nCmp; i++){
- if( a[iOff+i]!=pTerm[nMatch+i] ) break;
- }
- nMatch += i;
-
- if( nTerm==nMatch ){
- if( i==nNew ){
- goto search_success;
- }else{
- goto search_failed;
- }
- }else if( i<nNew && a[iOff+i]>pTerm[nMatch] ){
- goto search_failed;
- }
- }
-
- if( iPgidx>=n ){
- bEndOfPage = 1;
- break;
- }
-
- iPgidx += fts5GetVarint32(&a[iPgidx], nKeep);
- iTermOff += nKeep;
- iOff = iTermOff;
-
- /* Read the nKeep field of the next term. */
- fts5FastGetVarint32(a, iOff, nKeep);
- }
-
- search_failed:
- if( bGe==0 ){
- fts5DataRelease(pIter->pLeaf);
- pIter->pLeaf = 0;
- return;
- }else if( bEndOfPage ){
- do {
- fts5SegIterNextPage(p, pIter);
- if( pIter->pLeaf==0 ) return;
- a = pIter->pLeaf->p;
- if( fts5LeafIsTermless(pIter->pLeaf)==0 ){
- fts5GetVarint32(&pIter->pLeaf->p[pIter->pLeaf->szLeaf], iOff);
- if( iOff<4 || iOff>=pIter->pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- }else{
- nKeep = 0;
- iOff += fts5GetVarint32(&a[iOff], nNew);
- break;
- }
- }
- }while( 1 );
- }
-
- search_success:
-
- pIter->iLeafOffset = iOff + nNew;
- pIter->iTermLeafOffset = pIter->iLeafOffset;
- pIter->iTermLeafPgno = pIter->iLeafPgno;
-
- fts5BufferSet(&p->rc, &pIter->term, nKeep, pTerm);
- fts5BufferAppendBlob(&p->rc, &pIter->term, nNew, &a[iOff]);
-
- if( iPgidx>=n ){
- pIter->iEndofDoclist = pIter->pLeaf->nn+1;
- }else{
- int nExtra;
- iPgidx += fts5GetVarint32(&a[iPgidx], nExtra);
- pIter->iEndofDoclist = iTermOff + nExtra;
- }
- pIter->iPgidxOff = iPgidx;
-
- fts5SegIterLoadRowid(p, pIter);
- fts5SegIterLoadNPos(p, pIter);
-}
-
-/*
-** Initialize the object pIter to point to term pTerm/nTerm within segment
-** pSeg. If there is no such term in the index, the iterator is set to EOF.
-**
-** If an error occurs, Fts5Index.rc is set to an appropriate error code. If
-** an error has already occurred when this function is called, it is a no-op.
-*/
-static void fts5SegIterSeekInit(
- Fts5Index *p, /* FTS5 backend */
- Fts5Buffer *pBuf, /* Buffer to use for loading pages */
- const u8 *pTerm, int nTerm, /* Term to seek to */
- int flags, /* Mask of FTS5INDEX_XXX flags */
- Fts5StructureSegment *pSeg, /* Description of segment */
- Fts5SegIter *pIter /* Object to populate */
-){
- int iPg = 1;
- int bGe = (flags & FTS5INDEX_QUERY_SCAN);
- int bDlidx = 0; /* True if there is a doclist-index */
-
- static int nCall = 0;
- nCall++;
-
- assert( bGe==0 || (flags & FTS5INDEX_QUERY_DESC)==0 );
- assert( pTerm && nTerm );
- memset(pIter, 0, sizeof(*pIter));
- pIter->pSeg = pSeg;
-
- /* This block sets stack variable iPg to the leaf page number that may
- ** contain term (pTerm/nTerm), if it is present in the segment. */
- if( p->pIdxSelect==0 ){
- Fts5Config *pConfig = p->pConfig;
- fts5IndexPrepareStmt(p, &p->pIdxSelect, sqlite3_mprintf(
- "SELECT pgno FROM '%q'.'%q_idx' WHERE "
- "segid=? AND term<=? ORDER BY term DESC LIMIT 1",
- pConfig->zDb, pConfig->zName
- ));
- }
- if( p->rc ) return;
- sqlite3_bind_int(p->pIdxSelect, 1, pSeg->iSegid);
- sqlite3_bind_blob(p->pIdxSelect, 2, pTerm, nTerm, SQLITE_STATIC);
- if( SQLITE_ROW==sqlite3_step(p->pIdxSelect) ){
- i64 val = sqlite3_column_int(p->pIdxSelect, 0);
- iPg = (int)(val>>1);
- bDlidx = (val & 0x0001);
- }
- p->rc = sqlite3_reset(p->pIdxSelect);
-
- if( iPg<pSeg->pgnoFirst ){
- iPg = pSeg->pgnoFirst;
- bDlidx = 0;
- }
-
- pIter->iLeafPgno = iPg - 1;
- fts5SegIterNextPage(p, pIter);
-
- if( pIter->pLeaf ){
- fts5LeafSeek(p, bGe, pIter, pTerm, nTerm);
- }
-
- if( p->rc==SQLITE_OK && bGe==0 ){
- pIter->flags |= FTS5_SEGITER_ONETERM;
- if( pIter->pLeaf ){
- if( flags & FTS5INDEX_QUERY_DESC ){
- pIter->flags |= FTS5_SEGITER_REVERSE;
- }
- if( bDlidx ){
- fts5SegIterLoadDlidx(p, pIter);
- }
- if( flags & FTS5INDEX_QUERY_DESC ){
- fts5SegIterReverse(p, pIter);
- }
- }
- }
-
- /* Either:
- **
- ** 1) an error has occurred, or
- ** 2) the iterator points to EOF, or
- ** 3) the iterator points to an entry with term (pTerm/nTerm), or
- ** 4) the FTS5INDEX_QUERY_SCAN flag was set and the iterator points
- ** to an entry with a term greater than or equal to (pTerm/nTerm).
- */
- assert( p->rc!=SQLITE_OK /* 1 */
- || pIter->pLeaf==0 /* 2 */
- || fts5BufferCompareBlob(&pIter->term, pTerm, nTerm)==0 /* 3 */
- || (bGe && fts5BufferCompareBlob(&pIter->term, pTerm, nTerm)>0) /* 4 */
- );
-}
-
-/*
-** Initialize the object pIter to point to term pTerm/nTerm within the
-** in-memory hash table. If there is no such term in the hash-table, the
-** iterator is set to EOF.
-**
-** If an error occurs, Fts5Index.rc is set to an appropriate error code. If
-** an error has already occurred when this function is called, it is a no-op.
-*/
-static void fts5SegIterHashInit(
- Fts5Index *p, /* FTS5 backend */
- const u8 *pTerm, int nTerm, /* Term to seek to */
- int flags, /* Mask of FTS5INDEX_XXX flags */
- Fts5SegIter *pIter /* Object to populate */
-){
- const u8 *pList = 0;
- int nList = 0;
- const u8 *z = 0;
- int n = 0;
-
- assert( p->pHash );
- assert( p->rc==SQLITE_OK );
-
- if( pTerm==0 || (flags & FTS5INDEX_QUERY_SCAN) ){
- p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm);
- sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList);
- n = (z ? strlen((const char*)z) : 0);
- }else{
- pIter->flags |= FTS5_SEGITER_ONETERM;
- sqlite3Fts5HashQuery(p->pHash, (const char*)pTerm, nTerm, &pList, &nList);
- z = pTerm;
- n = nTerm;
- }
-
- if( pList ){
- Fts5Data *pLeaf;
- sqlite3Fts5BufferSet(&p->rc, &pIter->term, n, z);
- pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data));
- if( pLeaf==0 ) return;
- pLeaf->p = (u8*)pList;
- pLeaf->nn = pLeaf->szLeaf = nList;
- pIter->pLeaf = pLeaf;
- pIter->iLeafOffset = fts5GetVarint(pLeaf->p, (u64*)&pIter->iRowid);
- pIter->iEndofDoclist = pLeaf->nn+1;
-
- if( flags & FTS5INDEX_QUERY_DESC ){
- pIter->flags |= FTS5_SEGITER_REVERSE;
- fts5SegIterReverseInitPage(p, pIter);
- }else{
- fts5SegIterLoadNPos(p, pIter);
- }
- }
-}
-
-/*
-** Zero the iterator passed as the only argument.
-*/
-static void fts5SegIterClear(Fts5SegIter *pIter){
- fts5BufferFree(&pIter->term);
- fts5DataRelease(pIter->pLeaf);
- fts5DataRelease(pIter->pNextLeaf);
- fts5DlidxIterFree(pIter->pDlidx);
- sqlite3_free(pIter->aRowidOffset);
- memset(pIter, 0, sizeof(Fts5SegIter));
-}
-
-#ifdef SQLITE_DEBUG
-
-/*
-** This function is used as part of the big assert() procedure implemented by
-** fts5AssertMultiIterSetup(). It ensures that the result currently stored
-** in *pRes is the correct result of comparing the current positions of the
-** two iterators.
-*/
-static void fts5AssertComparisonResult(
- Fts5IndexIter *pIter,
- Fts5SegIter *p1,
- Fts5SegIter *p2,
- Fts5CResult *pRes
-){
- int i1 = p1 - pIter->aSeg;
- int i2 = p2 - pIter->aSeg;
-
- if( p1->pLeaf || p2->pLeaf ){
- if( p1->pLeaf==0 ){
- assert( pRes->iFirst==i2 );
- }else if( p2->pLeaf==0 ){
- assert( pRes->iFirst==i1 );
- }else{
- int nMin = MIN(p1->term.n, p2->term.n);
- int res = memcmp(p1->term.p, p2->term.p, nMin);
- if( res==0 ) res = p1->term.n - p2->term.n;
-
- if( res==0 ){
- assert( pRes->bTermEq==1 );
- assert( p1->iRowid!=p2->iRowid );
- res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : 1;
- }else{
- assert( pRes->bTermEq==0 );
- }
-
- if( res<0 ){
- assert( pRes->iFirst==i1 );
- }else{
- assert( pRes->iFirst==i2 );
- }
- }
- }
-}
-
-/*
-** This function is a no-op unless SQLITE_DEBUG is defined when this module
-** is compiled. In that case, this function is essentially an assert()
-** statement used to verify that the contents of the pIter->aFirst[] array
-** are correct.
-*/
-static void fts5AssertMultiIterSetup(Fts5Index *p, Fts5IndexIter *pIter){
- if( p->rc==SQLITE_OK ){
- Fts5SegIter *pFirst = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- int i;
-
- assert( (pFirst->pLeaf==0)==pIter->bEof );
-
- /* Check that pIter->iSwitchRowid is set correctly. */
- for(i=0; i<pIter->nSeg; i++){
- Fts5SegIter *p1 = &pIter->aSeg[i];
- assert( p1==pFirst
- || p1->pLeaf==0
- || fts5BufferCompare(&pFirst->term, &p1->term)
- || p1->iRowid==pIter->iSwitchRowid
- || (p1->iRowid<pIter->iSwitchRowid)==pIter->bRev
- );
- }
-
- for(i=0; i<pIter->nSeg; i+=2){
- Fts5SegIter *p1 = &pIter->aSeg[i];
- Fts5SegIter *p2 = &pIter->aSeg[i+1];
- Fts5CResult *pRes = &pIter->aFirst[(pIter->nSeg + i) / 2];
- fts5AssertComparisonResult(pIter, p1, p2, pRes);
- }
-
- for(i=1; i<(pIter->nSeg / 2); i+=2){
- Fts5SegIter *p1 = &pIter->aSeg[ pIter->aFirst[i*2].iFirst ];
- Fts5SegIter *p2 = &pIter->aSeg[ pIter->aFirst[i*2+1].iFirst ];
- Fts5CResult *pRes = &pIter->aFirst[i];
- fts5AssertComparisonResult(pIter, p1, p2, pRes);
- }
- }
-}
-#else
-# define fts5AssertMultiIterSetup(x,y)
-#endif
-
-/*
-** Do the comparison necessary to populate pIter->aFirst[iOut].
-**
-** If the returned value is non-zero, then it is the index of an entry
-** in the pIter->aSeg[] array that is (a) not at EOF, and (b) pointing
-** to a key that is a duplicate of another, higher priority,
-** segment-iterator in the pSeg->aSeg[] array.
-*/
-static int fts5MultiIterDoCompare(Fts5IndexIter *pIter, int iOut){
- int i1; /* Index of left-hand Fts5SegIter */
- int i2; /* Index of right-hand Fts5SegIter */
- int iRes;
- Fts5SegIter *p1; /* Left-hand Fts5SegIter */
- Fts5SegIter *p2; /* Right-hand Fts5SegIter */
- Fts5CResult *pRes = &pIter->aFirst[iOut];
-
- assert( iOut<pIter->nSeg && iOut>0 );
- assert( pIter->bRev==0 || pIter->bRev==1 );
-
- if( iOut>=(pIter->nSeg/2) ){
- i1 = (iOut - pIter->nSeg/2) * 2;
- i2 = i1 + 1;
- }else{
- i1 = pIter->aFirst[iOut*2].iFirst;
- i2 = pIter->aFirst[iOut*2+1].iFirst;
- }
- p1 = &pIter->aSeg[i1];
- p2 = &pIter->aSeg[i2];
-
- pRes->bTermEq = 0;
- if( p1->pLeaf==0 ){ /* If p1 is at EOF */
- iRes = i2;
- }else if( p2->pLeaf==0 ){ /* If p2 is at EOF */
- iRes = i1;
- }else{
- int res = fts5BufferCompare(&p1->term, &p2->term);
- if( res==0 ){
- assert( i2>i1 );
- assert( i2!=0 );
- pRes->bTermEq = 1;
- if( p1->iRowid==p2->iRowid ){
- p1->bDel = p2->bDel;
- return i2;
- }
- res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1;
- }
- assert( res!=0 );
- if( res<0 ){
- iRes = i1;
- }else{
- iRes = i2;
- }
- }
-
- pRes->iFirst = iRes;
- return 0;
-}
-
-/*
-** Move the seg-iter so that it points to the first rowid on page iLeafPgno.
-** It is an error if leaf iLeafPgno does not exist or contains no rowids.
-*/
-static void fts5SegIterGotoPage(
- Fts5Index *p, /* FTS5 backend object */
- Fts5SegIter *pIter, /* Iterator to advance */
- int iLeafPgno
-){
- assert( iLeafPgno>pIter->iLeafPgno );
-
- if( iLeafPgno>pIter->pSeg->pgnoLast ){
- p->rc = FTS5_CORRUPT;
- }else{
- fts5DataRelease(pIter->pNextLeaf);
- pIter->pNextLeaf = 0;
- pIter->iLeafPgno = iLeafPgno-1;
- fts5SegIterNextPage(p, pIter);
- assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno );
-
- if( p->rc==SQLITE_OK ){
- int iOff;
- u8 *a = pIter->pLeaf->p;
- int n = pIter->pLeaf->szLeaf;
-
- iOff = fts5LeafFirstRowidOff(pIter->pLeaf);
- if( iOff<4 || iOff>=n ){
- p->rc = FTS5_CORRUPT;
- }else{
- iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid);
- pIter->iLeafOffset = iOff;
- fts5SegIterLoadNPos(p, pIter);
- }
- }
- }
-}
-
-/*
-** Advance the iterator passed as the second argument until it is at or
-** past rowid iFrom. Regardless of the value of iFrom, the iterator is
-** always advanced at least once.
-*/
-static void fts5SegIterNextFrom(
- Fts5Index *p, /* FTS5 backend object */
- Fts5SegIter *pIter, /* Iterator to advance */
- i64 iMatch /* Advance iterator at least this far */
-){
- int bRev = (pIter->flags & FTS5_SEGITER_REVERSE);
- Fts5DlidxIter *pDlidx = pIter->pDlidx;
- int iLeafPgno = pIter->iLeafPgno;
- int bMove = 1;
-
- assert( pIter->flags & FTS5_SEGITER_ONETERM );
- assert( pIter->pDlidx );
- assert( pIter->pLeaf );
-
- if( bRev==0 ){
- while( !fts5DlidxIterEof(p, pDlidx) && iMatch>fts5DlidxIterRowid(pDlidx) ){
- iLeafPgno = fts5DlidxIterPgno(pDlidx);
- fts5DlidxIterNext(p, pDlidx);
- }
- assert_nc( iLeafPgno>=pIter->iLeafPgno || p->rc );
- if( iLeafPgno>pIter->iLeafPgno ){
- fts5SegIterGotoPage(p, pIter, iLeafPgno);
- bMove = 0;
- }
- }else{
- assert( pIter->pNextLeaf==0 );
- assert( iMatch<pIter->iRowid );
- while( !fts5DlidxIterEof(p, pDlidx) && iMatch<fts5DlidxIterRowid(pDlidx) ){
- fts5DlidxIterPrev(p, pDlidx);
- }
- iLeafPgno = fts5DlidxIterPgno(pDlidx);
-
- assert( fts5DlidxIterEof(p, pDlidx) || iLeafPgno<=pIter->iLeafPgno );
-
- if( iLeafPgno<pIter->iLeafPgno ){
- pIter->iLeafPgno = iLeafPgno+1;
- fts5SegIterReverseNewPage(p, pIter);
- bMove = 0;
- }
- }
-
- do{
- if( bMove ) fts5SegIterNext(p, pIter, 0);
- if( pIter->pLeaf==0 ) break;
- if( bRev==0 && pIter->iRowid>=iMatch ) break;
- if( bRev!=0 && pIter->iRowid<=iMatch ) break;
- bMove = 1;
- }while( p->rc==SQLITE_OK );
-}
-
-
-/*
-** Free the iterator object passed as the second argument.
-*/
-static void fts5MultiIterFree(Fts5Index *p, Fts5IndexIter *pIter){
- if( pIter ){
- int i;
- for(i=0; i<pIter->nSeg; i++){
- fts5SegIterClear(&pIter->aSeg[i]);
- }
- fts5StructureRelease(pIter->pStruct);
- fts5BufferFree(&pIter->poslist);
- sqlite3_free(pIter);
- }
-}
-
-static void fts5MultiIterAdvanced(
- Fts5Index *p, /* FTS5 backend to iterate within */
- Fts5IndexIter *pIter, /* Iterator to update aFirst[] array for */
- int iChanged, /* Index of sub-iterator just advanced */
- int iMinset /* Minimum entry in aFirst[] to set */
-){
- int i;
- for(i=(pIter->nSeg+iChanged)/2; i>=iMinset && p->rc==SQLITE_OK; i=i/2){
- int iEq;
- if( (iEq = fts5MultiIterDoCompare(pIter, i)) ){
- fts5SegIterNext(p, &pIter->aSeg[iEq], 0);
- i = pIter->nSeg + iEq;
- }
- }
-}
-
-/*
-** Sub-iterator iChanged of iterator pIter has just been advanced. It still
-** points to the same term though - just a different rowid. This function
-** attempts to update the contents of the pIter->aFirst[] accordingly.
-** If it does so successfully, 0 is returned. Otherwise 1.
-**
-** If non-zero is returned, the caller should call fts5MultiIterAdvanced()
-** on the iterator instead. That function does the same as this one, except
-** that it deals with more complicated cases as well.
-*/
-static int fts5MultiIterAdvanceRowid(
- Fts5Index *p, /* FTS5 backend to iterate within */
- Fts5IndexIter *pIter, /* Iterator to update aFirst[] array for */
- int iChanged /* Index of sub-iterator just advanced */
-){
- Fts5SegIter *pNew = &pIter->aSeg[iChanged];
-
- if( pNew->iRowid==pIter->iSwitchRowid
- || (pNew->iRowid<pIter->iSwitchRowid)==pIter->bRev
- ){
- int i;
- Fts5SegIter *pOther = &pIter->aSeg[iChanged ^ 0x0001];
- pIter->iSwitchRowid = pIter->bRev ? SMALLEST_INT64 : LARGEST_INT64;
- for(i=(pIter->nSeg+iChanged)/2; 1; i=i/2){
- Fts5CResult *pRes = &pIter->aFirst[i];
-
- assert( pNew->pLeaf );
- assert( pRes->bTermEq==0 || pOther->pLeaf );
-
- if( pRes->bTermEq ){
- if( pNew->iRowid==pOther->iRowid ){
- return 1;
- }else if( (pOther->iRowid>pNew->iRowid)==pIter->bRev ){
- pIter->iSwitchRowid = pOther->iRowid;
- pNew = pOther;
- }else if( (pOther->iRowid>pIter->iSwitchRowid)==pIter->bRev ){
- pIter->iSwitchRowid = pOther->iRowid;
- }
- }
- pRes->iFirst = (pNew - pIter->aSeg);
- if( i==1 ) break;
-
- pOther = &pIter->aSeg[ pIter->aFirst[i ^ 0x0001].iFirst ];
- }
- }
-
- return 0;
-}
-
-/*
-** Set the pIter->bEof variable based on the state of the sub-iterators.
-*/
-static void fts5MultiIterSetEof(Fts5IndexIter *pIter){
- Fts5SegIter *pSeg = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- pIter->bEof = pSeg->pLeaf==0;
- pIter->iSwitchRowid = pSeg->iRowid;
-}
-
-/*
-** Move the iterator to the next entry.
-**
-** If an error occurs, an error code is left in Fts5Index.rc. It is not
-** considered an error if the iterator reaches EOF, or if it is already at
-** EOF when this function is called.
-*/
-static void fts5MultiIterNext(
- Fts5Index *p,
- Fts5IndexIter *pIter,
- int bFrom, /* True if argument iFrom is valid */
- i64 iFrom /* Advance at least as far as this */
-){
- if( p->rc==SQLITE_OK ){
- int bUseFrom = bFrom;
- do {
- int iFirst = pIter->aFirst[1].iFirst;
- int bNewTerm = 0;
- Fts5SegIter *pSeg = &pIter->aSeg[iFirst];
- assert( p->rc==SQLITE_OK );
- if( bUseFrom && pSeg->pDlidx ){
- fts5SegIterNextFrom(p, pSeg, iFrom);
- }else{
- fts5SegIterNext(p, pSeg, &bNewTerm);
- }
-
- if( pSeg->pLeaf==0 || bNewTerm
- || fts5MultiIterAdvanceRowid(p, pIter, iFirst)
- ){
- fts5MultiIterAdvanced(p, pIter, iFirst, 1);
- fts5MultiIterSetEof(pIter);
- }
- fts5AssertMultiIterSetup(p, pIter);
-
- bUseFrom = 0;
- }while( pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter) );
- }
-}
-
-static Fts5IndexIter *fts5MultiIterAlloc(
- Fts5Index *p, /* FTS5 backend to iterate within */
- int nSeg
-){
- Fts5IndexIter *pNew;
- int nSlot; /* Power of two >= nSeg */
-
- for(nSlot=2; nSlot<nSeg; nSlot=nSlot*2);
- pNew = fts5IdxMalloc(p,
- sizeof(Fts5IndexIter) + /* pNew */
- sizeof(Fts5SegIter) * (nSlot-1) + /* pNew->aSeg[] */
- sizeof(Fts5CResult) * nSlot /* pNew->aFirst[] */
- );
- if( pNew ){
- pNew->nSeg = nSlot;
- pNew->aFirst = (Fts5CResult*)&pNew->aSeg[nSlot];
- pNew->pIndex = p;
- }
- return pNew;
-}
-
-/*
-** Allocate a new Fts5IndexIter object.
-**
-** The new object will be used to iterate through data in structure pStruct.
-** If iLevel is -ve, then all data in all segments is merged. Or, if iLevel
-** is zero or greater, data from the first nSegment segments on level iLevel
-** is merged.
-**
-** The iterator initially points to the first term/rowid entry in the
-** iterated data.
-*/
-static void fts5MultiIterNew(
- Fts5Index *p, /* FTS5 backend to iterate within */
- Fts5Structure *pStruct, /* Structure of specific index */
- int bSkipEmpty, /* True to ignore delete-keys */
- int flags, /* FTS5INDEX_QUERY_XXX flags */
- const u8 *pTerm, int nTerm, /* Term to seek to (or NULL/0) */
- int iLevel, /* Level to iterate (-1 for all) */
- int nSegment, /* Number of segments to merge (iLevel>=0) */
- Fts5IndexIter **ppOut /* New object */
-){
- int nSeg = 0; /* Number of segment-iters in use */
- int iIter = 0; /* */
- int iSeg; /* Used to iterate through segments */
- Fts5Buffer buf = {0,0,0}; /* Buffer used by fts5SegIterSeekInit() */
- Fts5StructureLevel *pLvl;
- Fts5IndexIter *pNew;
-
- assert( (pTerm==0 && nTerm==0) || iLevel<0 );
-
- /* Allocate space for the new multi-seg-iterator. */
- if( p->rc==SQLITE_OK ){
- if( iLevel<0 ){
- assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
- nSeg = pStruct->nSegment;
- nSeg += (p->pHash ? 1 : 0);
- }else{
- nSeg = MIN(pStruct->aLevel[iLevel].nSeg, nSegment);
- }
- }
- *ppOut = pNew = fts5MultiIterAlloc(p, nSeg);
- if( pNew==0 ) return;
- pNew->bRev = (0!=(flags & FTS5INDEX_QUERY_DESC));
- pNew->bSkipEmpty = bSkipEmpty;
- pNew->pStruct = pStruct;
- fts5StructureRef(pStruct);
-
- /* Initialize each of the component segment iterators. */
- if( iLevel<0 ){
- Fts5StructureLevel *pEnd = &pStruct->aLevel[pStruct->nLevel];
- if( p->pHash ){
- /* Add a segment iterator for the current contents of the hash table. */
- Fts5SegIter *pIter = &pNew->aSeg[iIter++];
- fts5SegIterHashInit(p, pTerm, nTerm, flags, pIter);
- }
- for(pLvl=&pStruct->aLevel[0]; pLvl<pEnd; pLvl++){
- for(iSeg=pLvl->nSeg-1; iSeg>=0; iSeg--){
- Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
- Fts5SegIter *pIter = &pNew->aSeg[iIter++];
- if( pTerm==0 ){
- fts5SegIterInit(p, pSeg, pIter);
- }else{
- fts5SegIterSeekInit(p, &buf, pTerm, nTerm, flags, pSeg, pIter);
- }
- }
- }
- }else{
- pLvl = &pStruct->aLevel[iLevel];
- for(iSeg=nSeg-1; iSeg>=0; iSeg--){
- fts5SegIterInit(p, &pLvl->aSeg[iSeg], &pNew->aSeg[iIter++]);
- }
- }
- assert( iIter==nSeg );
-
- /* If the above was successful, each component iterators now points
- ** to the first entry in its segment. In this case initialize the
- ** aFirst[] array. Or, if an error has occurred, free the iterator
- ** object and set the output variable to NULL. */
- if( p->rc==SQLITE_OK ){
- for(iIter=pNew->nSeg-1; iIter>0; iIter--){
- int iEq;
- if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){
- fts5SegIterNext(p, &pNew->aSeg[iEq], 0);
- fts5MultiIterAdvanced(p, pNew, iEq, iIter);
- }
- }
- fts5MultiIterSetEof(pNew);
- fts5AssertMultiIterSetup(p, pNew);
-
- if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){
- fts5MultiIterNext(p, pNew, 0, 0);
- }
- }else{
- fts5MultiIterFree(p, pNew);
- *ppOut = 0;
- }
- fts5BufferFree(&buf);
-}
-
-/*
-** Create an Fts5IndexIter that iterates through the doclist provided
-** as the second argument.
-*/
-static void fts5MultiIterNew2(
- Fts5Index *p, /* FTS5 backend to iterate within */
- Fts5Data *pData, /* Doclist to iterate through */
- int bDesc, /* True for descending rowid order */
- Fts5IndexIter **ppOut /* New object */
-){
- Fts5IndexIter *pNew;
- pNew = fts5MultiIterAlloc(p, 2);
- if( pNew ){
- Fts5SegIter *pIter = &pNew->aSeg[1];
-
- pNew->bFiltered = 1;
- pIter->flags = FTS5_SEGITER_ONETERM;
- if( pData->szLeaf>0 ){
- pIter->pLeaf = pData;
- pIter->iLeafOffset = fts5GetVarint(pData->p, (u64*)&pIter->iRowid);
- pIter->iEndofDoclist = pData->nn;
- pNew->aFirst[1].iFirst = 1;
- if( bDesc ){
- pNew->bRev = 1;
- pIter->flags |= FTS5_SEGITER_REVERSE;
- fts5SegIterReverseInitPage(p, pIter);
- }else{
- fts5SegIterLoadNPos(p, pIter);
- }
- pData = 0;
- }else{
- pNew->bEof = 1;
- }
-
- *ppOut = pNew;
- }
-
- fts5DataRelease(pData);
-}
-
-/*
-** Return true if the iterator is at EOF or if an error has occurred.
-** False otherwise.
-*/
-static int fts5MultiIterEof(Fts5Index *p, Fts5IndexIter *pIter){
- assert( p->rc
- || (pIter->aSeg[ pIter->aFirst[1].iFirst ].pLeaf==0)==pIter->bEof
- );
- return (p->rc || pIter->bEof);
-}
-
-/*
-** Return the rowid of the entry that the iterator currently points
-** to. If the iterator points to EOF when this function is called the
-** results are undefined.
-*/
-static i64 fts5MultiIterRowid(Fts5IndexIter *pIter){
- assert( pIter->aSeg[ pIter->aFirst[1].iFirst ].pLeaf );
- return pIter->aSeg[ pIter->aFirst[1].iFirst ].iRowid;
-}
-
-/*
-** Move the iterator to the next entry at or following iMatch.
-*/
-static void fts5MultiIterNextFrom(
- Fts5Index *p,
- Fts5IndexIter *pIter,
- i64 iMatch
-){
- while( 1 ){
- i64 iRowid;
- fts5MultiIterNext(p, pIter, 1, iMatch);
- if( fts5MultiIterEof(p, pIter) ) break;
- iRowid = fts5MultiIterRowid(pIter);
- if( pIter->bRev==0 && iRowid>=iMatch ) break;
- if( pIter->bRev!=0 && iRowid<=iMatch ) break;
- }
-}
-
-/*
-** Return a pointer to a buffer containing the term associated with the
-** entry that the iterator currently points to.
-*/
-static const u8 *fts5MultiIterTerm(Fts5IndexIter *pIter, int *pn){
- Fts5SegIter *p = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- *pn = p->term.n;
- return p->term.p;
-}
-
-static void fts5ChunkIterate(
- Fts5Index *p, /* Index object */
- Fts5SegIter *pSeg, /* Poslist of this iterator */
- void *pCtx, /* Context pointer for xChunk callback */
- void (*xChunk)(Fts5Index*, void*, const u8*, int)
-){
- int nRem = pSeg->nPos; /* Number of bytes still to come */
- Fts5Data *pData = 0;
- u8 *pChunk = &pSeg->pLeaf->p[pSeg->iLeafOffset];
- int nChunk = MIN(nRem, pSeg->pLeaf->szLeaf - pSeg->iLeafOffset);
- int pgno = pSeg->iLeafPgno;
- int pgnoSave = 0;
-
- if( (pSeg->flags & FTS5_SEGITER_REVERSE)==0 ){
- pgnoSave = pgno+1;
- }
-
- while( 1 ){
- xChunk(p, pCtx, pChunk, nChunk);
- nRem -= nChunk;
- fts5DataRelease(pData);
- if( nRem<=0 ){
- break;
- }else{
- pgno++;
- pData = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno));
- if( pData==0 ) break;
- pChunk = &pData->p[4];
- nChunk = MIN(nRem, pData->szLeaf - 4);
- if( pgno==pgnoSave ){
- assert( pSeg->pNextLeaf==0 );
- pSeg->pNextLeaf = pData;
- pData = 0;
- }
- }
- }
-}
-
-
-
-/*
-** Allocate a new segment-id for the structure pStruct. The new segment
-** id must be between 1 and 65335 inclusive, and must not be used by
-** any currently existing segment. If a free segment id cannot be found,
-** SQLITE_FULL is returned.
-**
-** If an error has already occurred, this function is a no-op. 0 is
-** returned in this case.
-*/
-static int fts5AllocateSegid(Fts5Index *p, Fts5Structure *pStruct){
- int iSegid = 0;
-
- if( p->rc==SQLITE_OK ){
- if( pStruct->nSegment>=FTS5_MAX_SEGMENT ){
- p->rc = SQLITE_FULL;
- }else{
- while( iSegid==0 ){
- int iLvl, iSeg;
- sqlite3_randomness(sizeof(u32), (void*)&iSegid);
- iSegid = iSegid & ((1 << FTS5_DATA_ID_B)-1);
- for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
- for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
- if( iSegid==pStruct->aLevel[iLvl].aSeg[iSeg].iSegid ){
- iSegid = 0;
- }
- }
- }
- }
- }
- }
-
- return iSegid;
-}
-
-/*
-** Discard all data currently cached in the hash-tables.
-*/
-static void fts5IndexDiscardData(Fts5Index *p){
- assert( p->pHash || p->nPendingData==0 );
- if( p->pHash ){
- sqlite3Fts5HashClear(p->pHash);
- p->nPendingData = 0;
- }
-}
-
-/*
-** Return the size of the prefix, in bytes, that buffer (nNew/pNew) shares
-** with buffer (nOld/pOld).
-*/
-static int fts5PrefixCompress(
- int nOld, const u8 *pOld,
- int nNew, const u8 *pNew
-){
- int i;
- assert( fts5BlobCompare(pOld, nOld, pNew, nNew)<0 );
- for(i=0; i<nOld; i++){
- if( pOld[i]!=pNew[i] ) break;
- }
- return i;
-}
-
-static void fts5WriteDlidxClear(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- int bFlush /* If true, write dlidx to disk */
-){
- int i;
- assert( bFlush==0 || (pWriter->nDlidx>0 && pWriter->aDlidx[0].buf.n>0) );
- for(i=0; i<pWriter->nDlidx; i++){
- Fts5DlidxWriter *pDlidx = &pWriter->aDlidx[i];
- if( pDlidx->buf.n==0 ) break;
- if( bFlush ){
- assert( pDlidx->pgno!=0 );
- fts5DataWrite(p,
- FTS5_DLIDX_ROWID(pWriter->iSegid, i, pDlidx->pgno),
- pDlidx->buf.p, pDlidx->buf.n
- );
- }
- sqlite3Fts5BufferZero(&pDlidx->buf);
- pDlidx->bPrevValid = 0;
- }
-}
-
-/*
-** Grow the pWriter->aDlidx[] array to at least nLvl elements in size.
-** Any new array elements are zeroed before returning.
-*/
-static int fts5WriteDlidxGrow(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- int nLvl
-){
- if( p->rc==SQLITE_OK && nLvl>=pWriter->nDlidx ){
- Fts5DlidxWriter *aDlidx = (Fts5DlidxWriter*)sqlite3_realloc(
- pWriter->aDlidx, sizeof(Fts5DlidxWriter) * nLvl
- );
- if( aDlidx==0 ){
- p->rc = SQLITE_NOMEM;
- }else{
- int nByte = sizeof(Fts5DlidxWriter) * (nLvl - pWriter->nDlidx);
- memset(&aDlidx[pWriter->nDlidx], 0, nByte);
- pWriter->aDlidx = aDlidx;
- pWriter->nDlidx = nLvl;
- }
- }
- return p->rc;
-}
-
-/*
-** If the current doclist-index accumulating in pWriter->aDlidx[] is large
-** enough, flush it to disk and return 1. Otherwise discard it and return
-** zero.
-*/
-static int fts5WriteFlushDlidx(Fts5Index *p, Fts5SegWriter *pWriter){
- int bFlag = 0;
-
- /* If there were FTS5_MIN_DLIDX_SIZE or more empty leaf pages written
- ** to the database, also write the doclist-index to disk. */
- if( pWriter->aDlidx[0].buf.n>0 && pWriter->nEmpty>=FTS5_MIN_DLIDX_SIZE ){
- bFlag = 1;
- }
- fts5WriteDlidxClear(p, pWriter, bFlag);
- pWriter->nEmpty = 0;
- return bFlag;
-}
-
-/*
-** This function is called whenever processing of the doclist for the
-** last term on leaf page (pWriter->iBtPage) is completed.
-**
-** The doclist-index for that term is currently stored in-memory within the
-** Fts5SegWriter.aDlidx[] array. If it is large enough, this function
-** writes it out to disk. Or, if it is too small to bother with, discards
-** it.
-**
-** Fts5SegWriter.btterm currently contains the first term on page iBtPage.
-*/
-static void fts5WriteFlushBtree(Fts5Index *p, Fts5SegWriter *pWriter){
- int bFlag;
-
- assert( pWriter->iBtPage || pWriter->nEmpty==0 );
- if( pWriter->iBtPage==0 ) return;
- bFlag = fts5WriteFlushDlidx(p, pWriter);
-
- if( p->rc==SQLITE_OK ){
- const char *z = (pWriter->btterm.n>0?(const char*)pWriter->btterm.p:"");
- /* The following was already done in fts5WriteInit(): */
- /* sqlite3_bind_int(p->pIdxWriter, 1, pWriter->iSegid); */
- sqlite3_bind_blob(p->pIdxWriter, 2, z, pWriter->btterm.n, SQLITE_STATIC);
- sqlite3_bind_int64(p->pIdxWriter, 3, bFlag + ((i64)pWriter->iBtPage<<1));
- sqlite3_step(p->pIdxWriter);
- p->rc = sqlite3_reset(p->pIdxWriter);
- }
- pWriter->iBtPage = 0;
-}
-
-/*
-** This is called once for each leaf page except the first that contains
-** at least one term. Argument (nTerm/pTerm) is the split-key - a term that
-** is larger than all terms written to earlier leaves, and equal to or
-** smaller than the first term on the new leaf.
-**
-** If an error occurs, an error code is left in Fts5Index.rc. If an error
-** has already occurred when this function is called, it is a no-op.
-*/
-static void fts5WriteBtreeTerm(
- Fts5Index *p, /* FTS5 backend object */
- Fts5SegWriter *pWriter, /* Writer object */
- int nTerm, const u8 *pTerm /* First term on new page */
-){
- fts5WriteFlushBtree(p, pWriter);
- fts5BufferSet(&p->rc, &pWriter->btterm, nTerm, pTerm);
- pWriter->iBtPage = pWriter->writer.pgno;
-}
-
-/*
-** This function is called when flushing a leaf page that contains no
-** terms at all to disk.
-*/
-static void fts5WriteBtreeNoTerm(
- Fts5Index *p, /* FTS5 backend object */
- Fts5SegWriter *pWriter /* Writer object */
-){
- /* If there were no rowids on the leaf page either and the doclist-index
- ** has already been started, append an 0x00 byte to it. */
- if( pWriter->bFirstRowidInPage && pWriter->aDlidx[0].buf.n>0 ){
- Fts5DlidxWriter *pDlidx = &pWriter->aDlidx[0];
- assert( pDlidx->bPrevValid );
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, 0);
- }
-
- /* Increment the "number of sequential leaves without a term" counter. */
- pWriter->nEmpty++;
-}
-
-static i64 fts5DlidxExtractFirstRowid(Fts5Buffer *pBuf){
- i64 iRowid;
- int iOff;
-
- iOff = 1 + fts5GetVarint(&pBuf->p[1], (u64*)&iRowid);
- fts5GetVarint(&pBuf->p[iOff], (u64*)&iRowid);
- return iRowid;
-}
-
-/*
-** Rowid iRowid has just been appended to the current leaf page. It is the
-** first on the page. This function appends an appropriate entry to the current
-** doclist-index.
-*/
-static void fts5WriteDlidxAppend(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- i64 iRowid
-){
- int i;
- int bDone = 0;
-
- for(i=0; p->rc==SQLITE_OK && bDone==0; i++){
- i64 iVal;
- Fts5DlidxWriter *pDlidx = &pWriter->aDlidx[i];
-
- if( pDlidx->buf.n>=p->pConfig->pgsz ){
- /* The current doclist-index page is full. Write it to disk and push
- ** a copy of iRowid (which will become the first rowid on the next
- ** doclist-index leaf page) up into the next level of the b-tree
- ** hierarchy. If the node being flushed is currently the root node,
- ** also push its first rowid upwards. */
- pDlidx->buf.p[0] = 0x01; /* Not the root node */
- fts5DataWrite(p,
- FTS5_DLIDX_ROWID(pWriter->iSegid, i, pDlidx->pgno),
- pDlidx->buf.p, pDlidx->buf.n
- );
- fts5WriteDlidxGrow(p, pWriter, i+2);
- pDlidx = &pWriter->aDlidx[i];
- if( p->rc==SQLITE_OK && pDlidx[1].buf.n==0 ){
- i64 iFirst = fts5DlidxExtractFirstRowid(&pDlidx->buf);
-
- /* This was the root node. Push its first rowid up to the new root. */
- pDlidx[1].pgno = pDlidx->pgno;
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx[1].buf, 0);
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx[1].buf, pDlidx->pgno);
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx[1].buf, iFirst);
- pDlidx[1].bPrevValid = 1;
- pDlidx[1].iPrev = iFirst;
- }
-
- sqlite3Fts5BufferZero(&pDlidx->buf);
- pDlidx->bPrevValid = 0;
- pDlidx->pgno++;
- }else{
- bDone = 1;
- }
-
- if( pDlidx->bPrevValid ){
- iVal = iRowid - pDlidx->iPrev;
- }else{
- i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno);
- assert( pDlidx->buf.n==0 );
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, !bDone);
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, iPgno);
- iVal = iRowid;
- }
-
- sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, iVal);
- pDlidx->bPrevValid = 1;
- pDlidx->iPrev = iRowid;
- }
-}
-
-static void fts5WriteFlushLeaf(Fts5Index *p, Fts5SegWriter *pWriter){
- static const u8 zero[] = { 0x00, 0x00, 0x00, 0x00 };
- Fts5PageWriter *pPage = &pWriter->writer;
- i64 iRowid;
-
- assert( (pPage->pgidx.n==0)==(pWriter->bFirstTermInPage) );
-
- /* Set the szLeaf header field. */
- assert( 0==fts5GetU16(&pPage->buf.p[2]) );
- fts5PutU16(&pPage->buf.p[2], pPage->buf.n);
-
- if( pWriter->bFirstTermInPage ){
- /* No term was written to this page. */
- assert( pPage->pgidx.n==0 );
- fts5WriteBtreeNoTerm(p, pWriter);
- }else{
- /* Append the pgidx to the page buffer. Set the szLeaf header field. */
- fts5BufferAppendBlob(&p->rc, &pPage->buf, pPage->pgidx.n, pPage->pgidx.p);
- }
-
- /* Write the page out to disk */
- iRowid = FTS5_SEGMENT_ROWID(pWriter->iSegid, pPage->pgno);
- fts5DataWrite(p, iRowid, pPage->buf.p, pPage->buf.n);
-
- /* Initialize the next page. */
- fts5BufferZero(&pPage->buf);
- fts5BufferZero(&pPage->pgidx);
- fts5BufferAppendBlob(&p->rc, &pPage->buf, 4, zero);
- pPage->iPrevPgidx = 0;
- pPage->pgno++;
-
- /* Increase the leaves written counter */
- pWriter->nLeafWritten++;
-
- /* The new leaf holds no terms or rowids */
- pWriter->bFirstTermInPage = 1;
- pWriter->bFirstRowidInPage = 1;
-}
-
-/*
-** Append term pTerm/nTerm to the segment being written by the writer passed
-** as the second argument.
-**
-** If an error occurs, set the Fts5Index.rc error code. If an error has
-** already occurred, this function is a no-op.
-*/
-static void fts5WriteAppendTerm(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- int nTerm, const u8 *pTerm
-){
- int nPrefix; /* Bytes of prefix compression for term */
- Fts5PageWriter *pPage = &pWriter->writer;
- Fts5Buffer *pPgidx = &pWriter->writer.pgidx;
-
- assert( p->rc==SQLITE_OK );
- assert( pPage->buf.n>=4 );
- assert( pPage->buf.n>4 || pWriter->bFirstTermInPage );
-
- /* If the current leaf page is full, flush it to disk. */
- if( (pPage->buf.n + pPgidx->n + nTerm + 2)>=p->pConfig->pgsz ){
- if( pPage->buf.n>4 ){
- fts5WriteFlushLeaf(p, pWriter);
- }
- fts5BufferGrow(&p->rc, &pPage->buf, nTerm+FTS5_DATA_PADDING);
- }
-
- /* TODO1: Updating pgidx here. */
- pPgidx->n += sqlite3Fts5PutVarint(
- &pPgidx->p[pPgidx->n], pPage->buf.n - pPage->iPrevPgidx
- );
- pPage->iPrevPgidx = pPage->buf.n;
-#if 0
- fts5PutU16(&pPgidx->p[pPgidx->n], pPage->buf.n);
- pPgidx->n += 2;
-#endif
-
- if( pWriter->bFirstTermInPage ){
- nPrefix = 0;
- if( pPage->pgno!=1 ){
- /* This is the first term on a leaf that is not the leftmost leaf in
- ** the segment b-tree. In this case it is necessary to add a term to
- ** the b-tree hierarchy that is (a) larger than the largest term
- ** already written to the segment and (b) smaller than or equal to
- ** this term. In other words, a prefix of (pTerm/nTerm) that is one
- ** byte longer than the longest prefix (pTerm/nTerm) shares with the
- ** previous term.
- **
- ** Usually, the previous term is available in pPage->term. The exception
- ** is if this is the first term written in an incremental-merge step.
- ** In this case the previous term is not available, so just write a
- ** copy of (pTerm/nTerm) into the parent node. This is slightly
- ** inefficient, but still correct. */
- int n = nTerm;
- if( pPage->term.n ){
- n = 1 + fts5PrefixCompress(pPage->term.n, pPage->term.p, nTerm, pTerm);
- }
- fts5WriteBtreeTerm(p, pWriter, n, pTerm);
- pPage = &pWriter->writer;
- }
- }else{
- nPrefix = fts5PrefixCompress(pPage->term.n, pPage->term.p, nTerm, pTerm);
- fts5BufferAppendVarint(&p->rc, &pPage->buf, nPrefix);
- }
-
- /* Append the number of bytes of new data, then the term data itself
- ** to the page. */
- fts5BufferAppendVarint(&p->rc, &pPage->buf, nTerm - nPrefix);
- fts5BufferAppendBlob(&p->rc, &pPage->buf, nTerm - nPrefix, &pTerm[nPrefix]);
-
- /* Update the Fts5PageWriter.term field. */
- fts5BufferSet(&p->rc, &pPage->term, nTerm, pTerm);
- pWriter->bFirstTermInPage = 0;
-
- pWriter->bFirstRowidInPage = 0;
- pWriter->bFirstRowidInDoclist = 1;
-
- assert( p->rc || (pWriter->nDlidx>0 && pWriter->aDlidx[0].buf.n==0) );
- pWriter->aDlidx[0].pgno = pPage->pgno;
-}
-
-/*
-** Append a rowid and position-list size field to the writers output.
-*/
-static void fts5WriteAppendRowid(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- i64 iRowid,
- int nPos
-){
- if( p->rc==SQLITE_OK ){
- Fts5PageWriter *pPage = &pWriter->writer;
-
- if( (pPage->buf.n + pPage->pgidx.n)>=p->pConfig->pgsz ){
- fts5WriteFlushLeaf(p, pWriter);
- }
-
- /* If this is to be the first rowid written to the page, set the
- ** rowid-pointer in the page-header. Also append a value to the dlidx
- ** buffer, in case a doclist-index is required. */
- if( pWriter->bFirstRowidInPage ){
- fts5PutU16(pPage->buf.p, pPage->buf.n);
- fts5WriteDlidxAppend(p, pWriter, iRowid);
- }
-
- /* Write the rowid. */
- if( pWriter->bFirstRowidInDoclist || pWriter->bFirstRowidInPage ){
- fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid);
- }else{
- assert( p->rc || iRowid>pWriter->iPrevRowid );
- fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid - pWriter->iPrevRowid);
- }
- pWriter->iPrevRowid = iRowid;
- pWriter->bFirstRowidInDoclist = 0;
- pWriter->bFirstRowidInPage = 0;
-
- fts5BufferAppendVarint(&p->rc, &pPage->buf, nPos);
- }
-}
-
-static void fts5WriteAppendPoslistData(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- const u8 *aData,
- int nData
-){
- Fts5PageWriter *pPage = &pWriter->writer;
- const u8 *a = aData;
- int n = nData;
-
- assert( p->pConfig->pgsz>0 );
- while( p->rc==SQLITE_OK
- && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz
- ){
- int nReq = p->pConfig->pgsz - pPage->buf.n - pPage->pgidx.n;
- int nCopy = 0;
- while( nCopy<nReq ){
- i64 dummy;
- nCopy += fts5GetVarint(&a[nCopy], (u64*)&dummy);
- }
- fts5BufferAppendBlob(&p->rc, &pPage->buf, nCopy, a);
- a += nCopy;
- n -= nCopy;
- fts5WriteFlushLeaf(p, pWriter);
- }
- if( n>0 ){
- fts5BufferAppendBlob(&p->rc, &pPage->buf, n, a);
- }
-}
-
-/*
-** Flush any data cached by the writer object to the database. Free any
-** allocations associated with the writer.
-*/
-static void fts5WriteFinish(
- Fts5Index *p,
- Fts5SegWriter *pWriter, /* Writer object */
- int *pnLeaf /* OUT: Number of leaf pages in b-tree */
-){
- int i;
- Fts5PageWriter *pLeaf = &pWriter->writer;
- if( p->rc==SQLITE_OK ){
- assert( pLeaf->pgno>=1 );
- if( pLeaf->buf.n>4 ){
- fts5WriteFlushLeaf(p, pWriter);
- }
- *pnLeaf = pLeaf->pgno-1;
- fts5WriteFlushBtree(p, pWriter);
- }
- fts5BufferFree(&pLeaf->term);
- fts5BufferFree(&pLeaf->buf);
- fts5BufferFree(&pLeaf->pgidx);
- fts5BufferFree(&pWriter->btterm);
-
- for(i=0; i<pWriter->nDlidx; i++){
- sqlite3Fts5BufferFree(&pWriter->aDlidx[i].buf);
- }
- sqlite3_free(pWriter->aDlidx);
-}
-
-static void fts5WriteInit(
- Fts5Index *p,
- Fts5SegWriter *pWriter,
- int iSegid
-){
- const int nBuffer = p->pConfig->pgsz + FTS5_DATA_PADDING;
-
- memset(pWriter, 0, sizeof(Fts5SegWriter));
- pWriter->iSegid = iSegid;
-
- fts5WriteDlidxGrow(p, pWriter, 1);
- pWriter->writer.pgno = 1;
- pWriter->bFirstTermInPage = 1;
- pWriter->iBtPage = 1;
-
- /* Grow the two buffers to pgsz + padding bytes in size. */
- fts5BufferGrow(&p->rc, &pWriter->writer.pgidx, nBuffer);
- fts5BufferGrow(&p->rc, &pWriter->writer.buf, nBuffer);
-
- if( p->pIdxWriter==0 ){
- Fts5Config *pConfig = p->pConfig;
- fts5IndexPrepareStmt(p, &p->pIdxWriter, sqlite3_mprintf(
- "INSERT INTO '%q'.'%q_idx'(segid,term,pgno) VALUES(?,?,?)",
- pConfig->zDb, pConfig->zName
- ));
- }
-
- if( p->rc==SQLITE_OK ){
- /* Initialize the 4-byte leaf-page header to 0x00. */
- memset(pWriter->writer.buf.p, 0, 4);
- pWriter->writer.buf.n = 4;
-
- /* Bind the current output segment id to the index-writer. This is an
- ** optimization over binding the same value over and over as rows are
- ** inserted into %_idx by the current writer. */
- sqlite3_bind_int(p->pIdxWriter, 1, pWriter->iSegid);
- }
-}
-
-/*
-** Iterator pIter was used to iterate through the input segments of on an
-** incremental merge operation. This function is called if the incremental
-** merge step has finished but the input has not been completely exhausted.
-*/
-static void fts5TrimSegments(Fts5Index *p, Fts5IndexIter *pIter){
- int i;
- Fts5Buffer buf;
- memset(&buf, 0, sizeof(Fts5Buffer));
- for(i=0; i<pIter->nSeg; i++){
- Fts5SegIter *pSeg = &pIter->aSeg[i];
- if( pSeg->pSeg==0 ){
- /* no-op */
- }else if( pSeg->pLeaf==0 ){
- /* All keys from this input segment have been transfered to the output.
- ** Set both the first and last page-numbers to 0 to indicate that the
- ** segment is now empty. */
- pSeg->pSeg->pgnoLast = 0;
- pSeg->pSeg->pgnoFirst = 0;
- }else{
- int iOff = pSeg->iTermLeafOffset; /* Offset on new first leaf page */
- i64 iLeafRowid;
- Fts5Data *pData;
- int iId = pSeg->pSeg->iSegid;
- u8 aHdr[4] = {0x00, 0x00, 0x00, 0x00};
-
- iLeafRowid = FTS5_SEGMENT_ROWID(iId, pSeg->iTermLeafPgno);
- pData = fts5DataRead(p, iLeafRowid);
- if( pData ){
- fts5BufferZero(&buf);
- fts5BufferGrow(&p->rc, &buf, pData->nn);
- fts5BufferAppendBlob(&p->rc, &buf, sizeof(aHdr), aHdr);
- fts5BufferAppendVarint(&p->rc, &buf, pSeg->term.n);
- fts5BufferAppendBlob(&p->rc, &buf, pSeg->term.n, pSeg->term.p);
- fts5BufferAppendBlob(&p->rc, &buf, pData->szLeaf-iOff, &pData->p[iOff]);
- if( p->rc==SQLITE_OK ){
- /* Set the szLeaf field */
- fts5PutU16(&buf.p[2], buf.n);
- }
-
- /* Set up the new page-index array */
- fts5BufferAppendVarint(&p->rc, &buf, 4);
- if( pSeg->iLeafPgno==pSeg->iTermLeafPgno
- && pSeg->iEndofDoclist<pData->szLeaf
- ){
- int nDiff = pData->szLeaf - pSeg->iEndofDoclist;
- fts5BufferAppendVarint(&p->rc, &buf, buf.n - 1 - nDiff - 4);
- fts5BufferAppendBlob(&p->rc, &buf,
- pData->nn - pSeg->iPgidxOff, &pData->p[pSeg->iPgidxOff]
- );
- }
-
- fts5DataRelease(pData);
- pSeg->pSeg->pgnoFirst = pSeg->iTermLeafPgno;
- fts5DataDelete(p, FTS5_SEGMENT_ROWID(iId, 1), iLeafRowid);
- fts5DataWrite(p, iLeafRowid, buf.p, buf.n);
- }
- }
- }
- fts5BufferFree(&buf);
-}
-
-static void fts5MergeChunkCallback(
- Fts5Index *p,
- void *pCtx,
- const u8 *pChunk, int nChunk
-){
- Fts5SegWriter *pWriter = (Fts5SegWriter*)pCtx;
- fts5WriteAppendPoslistData(p, pWriter, pChunk, nChunk);
-}
-
-/*
-**
-*/
-static void fts5IndexMergeLevel(
- Fts5Index *p, /* FTS5 backend object */
- Fts5Structure **ppStruct, /* IN/OUT: Stucture of index */
- int iLvl, /* Level to read input from */
- int *pnRem /* Write up to this many output leaves */
-){
- Fts5Structure *pStruct = *ppStruct;
- Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
- Fts5StructureLevel *pLvlOut;
- Fts5IndexIter *pIter = 0; /* Iterator to read input data */
- int nRem = pnRem ? *pnRem : 0; /* Output leaf pages left to write */
- int nInput; /* Number of input segments */
- Fts5SegWriter writer; /* Writer object */
- Fts5StructureSegment *pSeg; /* Output segment */
- Fts5Buffer term;
- int bOldest; /* True if the output segment is the oldest */
-
- assert( iLvl<pStruct->nLevel );
- assert( pLvl->nMerge<=pLvl->nSeg );
-
- memset(&writer, 0, sizeof(Fts5SegWriter));
- memset(&term, 0, sizeof(Fts5Buffer));
- if( pLvl->nMerge ){
- pLvlOut = &pStruct->aLevel[iLvl+1];
- assert( pLvlOut->nSeg>0 );
- nInput = pLvl->nMerge;
- pSeg = &pLvlOut->aSeg[pLvlOut->nSeg-1];
-
- fts5WriteInit(p, &writer, pSeg->iSegid);
- writer.writer.pgno = pSeg->pgnoLast+1;
- writer.iBtPage = 0;
- }else{
- int iSegid = fts5AllocateSegid(p, pStruct);
-
- /* Extend the Fts5Structure object as required to ensure the output
- ** segment exists. */
- if( iLvl==pStruct->nLevel-1 ){
- fts5StructureAddLevel(&p->rc, ppStruct);
- pStruct = *ppStruct;
- }
- fts5StructureExtendLevel(&p->rc, pStruct, iLvl+1, 1, 0);
- if( p->rc ) return;
- pLvl = &pStruct->aLevel[iLvl];
- pLvlOut = &pStruct->aLevel[iLvl+1];
-
- fts5WriteInit(p, &writer, iSegid);
-
- /* Add the new segment to the output level */
- pSeg = &pLvlOut->aSeg[pLvlOut->nSeg];
- pLvlOut->nSeg++;
- pSeg->pgnoFirst = 1;
- pSeg->iSegid = iSegid;
- pStruct->nSegment++;
-
- /* Read input from all segments in the input level */
- nInput = pLvl->nSeg;
- }
- bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2);
-
- assert( iLvl>=0 );
- for(fts5MultiIterNew(p, pStruct, 0, 0, 0, 0, iLvl, nInput, &pIter);
- fts5MultiIterEof(p, pIter)==0;
- fts5MultiIterNext(p, pIter, 0, 0)
- ){
- Fts5SegIter *pSegIter = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- int nPos; /* position-list size field value */
- int nTerm;
- const u8 *pTerm;
-
- /* Check for key annihilation. */
- if( pSegIter->nPos==0 && (bOldest || pSegIter->bDel==0) ) continue;
-
- pTerm = fts5MultiIterTerm(pIter, &nTerm);
- if( nTerm!=term.n || memcmp(pTerm, term.p, nTerm) ){
- if( pnRem && writer.nLeafWritten>nRem ){
- break;
- }
-
- /* This is a new term. Append a term to the output segment. */
- fts5WriteAppendTerm(p, &writer, nTerm, pTerm);
- fts5BufferSet(&p->rc, &term, nTerm, pTerm);
- }
-
- /* Append the rowid to the output */
- /* WRITEPOSLISTSIZE */
- nPos = pSegIter->nPos*2 + pSegIter->bDel;
- fts5WriteAppendRowid(p, &writer, fts5MultiIterRowid(pIter), nPos);
-
- /* Append the position-list data to the output */
- fts5ChunkIterate(p, pSegIter, (void*)&writer, fts5MergeChunkCallback);
- }
-
- /* Flush the last leaf page to disk. Set the output segment b-tree height
- ** and last leaf page number at the same time. */
- fts5WriteFinish(p, &writer, &pSeg->pgnoLast);
-
- if( fts5MultiIterEof(p, pIter) ){
- int i;
-
- /* Remove the redundant segments from the %_data table */
- for(i=0; i<nInput; i++){
- fts5DataRemoveSegment(p, pLvl->aSeg[i].iSegid);
- }
-
- /* Remove the redundant segments from the input level */
- if( pLvl->nSeg!=nInput ){
- int nMove = (pLvl->nSeg - nInput) * sizeof(Fts5StructureSegment);
- memmove(pLvl->aSeg, &pLvl->aSeg[nInput], nMove);
- }
- pStruct->nSegment -= nInput;
- pLvl->nSeg -= nInput;
- pLvl->nMerge = 0;
- if( pSeg->pgnoLast==0 ){
- pLvlOut->nSeg--;
- pStruct->nSegment--;
- }
- }else{
- assert( pSeg->pgnoLast>0 );
- fts5TrimSegments(p, pIter);
- pLvl->nMerge = nInput;
- }
-
- fts5MultiIterFree(p, pIter);
- fts5BufferFree(&term);
- if( pnRem ) *pnRem -= writer.nLeafWritten;
-}
-
-/*
-** Do up to nPg pages of automerge work on the index.
-*/
-static void fts5IndexMerge(
- Fts5Index *p, /* FTS5 backend object */
- Fts5Structure **ppStruct, /* IN/OUT: Current structure of index */
- int nPg /* Pages of work to do */
-){
- int nRem = nPg;
- Fts5Structure *pStruct = *ppStruct;
- while( nRem>0 && p->rc==SQLITE_OK ){
- int iLvl; /* To iterate through levels */
- int iBestLvl = 0; /* Level offering the most input segments */
- int nBest = 0; /* Number of input segments on best level */
-
- /* Set iBestLvl to the level to read input segments from. */
- assert( pStruct->nLevel>0 );
- for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
- Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
- if( pLvl->nMerge ){
- if( pLvl->nMerge>nBest ){
- iBestLvl = iLvl;
- nBest = pLvl->nMerge;
- }
- break;
- }
- if( pLvl->nSeg>nBest ){
- nBest = pLvl->nSeg;
- iBestLvl = iLvl;
- }
- }
-
- /* If nBest is still 0, then the index must be empty. */
-#ifdef SQLITE_DEBUG
- for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){
- assert( pStruct->aLevel[iLvl].nSeg==0 );
- }
-#endif
-
- if( nBest<p->pConfig->nAutomerge
- && pStruct->aLevel[iBestLvl].nMerge==0
- ){
- break;
- }
- fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
- if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){
- fts5StructurePromote(p, iBestLvl+1, pStruct);
- }
- }
- *ppStruct = pStruct;
-}
-
-/*
-** A total of nLeaf leaf pages of data has just been flushed to a level-0
-** segment. This function updates the write-counter accordingly and, if
-** necessary, performs incremental merge work.
-**
-** If an error occurs, set the Fts5Index.rc error code. If an error has
-** already occurred, this function is a no-op.
-*/
-static void fts5IndexAutomerge(
- Fts5Index *p, /* FTS5 backend object */
- Fts5Structure **ppStruct, /* IN/OUT: Current structure of index */
- int nLeaf /* Number of output leaves just written */
-){
- if( p->rc==SQLITE_OK && p->pConfig->nAutomerge>0 ){
- Fts5Structure *pStruct = *ppStruct;
- u64 nWrite; /* Initial value of write-counter */
- int nWork; /* Number of work-quanta to perform */
- int nRem; /* Number of leaf pages left to write */
-
- /* Update the write-counter. While doing so, set nWork. */
- nWrite = pStruct->nWriteCounter;
- nWork = (int)(((nWrite + nLeaf) / p->nWorkUnit) - (nWrite / p->nWorkUnit));
- pStruct->nWriteCounter += nLeaf;
- nRem = (int)(p->nWorkUnit * nWork * pStruct->nLevel);
-
- fts5IndexMerge(p, ppStruct, nRem);
- }
-}
-
-static void fts5IndexCrisismerge(
- Fts5Index *p, /* FTS5 backend object */
- Fts5Structure **ppStruct /* IN/OUT: Current structure of index */
-){
- const int nCrisis = p->pConfig->nCrisisMerge;
- Fts5Structure *pStruct = *ppStruct;
- int iLvl = 0;
-
- assert( p->rc!=SQLITE_OK || pStruct->nLevel>0 );
- while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){
- fts5IndexMergeLevel(p, &pStruct, iLvl, 0);
- assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) );
- fts5StructurePromote(p, iLvl+1, pStruct);
- iLvl++;
- }
- *ppStruct = pStruct;
-}
-
-static int fts5IndexReturn(Fts5Index *p){
- int rc = p->rc;
- p->rc = SQLITE_OK;
- return rc;
-}
-
-typedef struct Fts5FlushCtx Fts5FlushCtx;
-struct Fts5FlushCtx {
- Fts5Index *pIdx;
- Fts5SegWriter writer;
-};
-
-/*
-** Buffer aBuf[] contains a list of varints, all small enough to fit
-** in a 32-bit integer. Return the size of the largest prefix of this
-** list nMax bytes or less in size.
-*/
-static int fts5PoslistPrefix(const u8 *aBuf, int nMax){
- int ret;
- u32 dummy;
- ret = fts5GetVarint32(aBuf, dummy);
- if( ret<nMax ){
- while( 1 ){
- int i = fts5GetVarint32(&aBuf[ret], dummy);
- if( (ret + i) > nMax ) break;
- ret += i;
- }
- }
- return ret;
-}
-
-#define fts5BufferSafeAppendBlob(pBuf, pBlob, nBlob) { \
- assert( (pBuf)->nSpace>=((pBuf)->n+nBlob) ); \
- memcpy(&(pBuf)->p[(pBuf)->n], pBlob, nBlob); \
- (pBuf)->n += nBlob; \
-}
-
-#define fts5BufferSafeAppendVarint(pBuf, iVal) { \
- (pBuf)->n += sqlite3Fts5PutVarint(&(pBuf)->p[(pBuf)->n], (iVal)); \
- assert( (pBuf)->nSpace>=(pBuf)->n ); \
-}
-
-/*
-** Flush the contents of in-memory hash table iHash to a new level-0
-** segment on disk. Also update the corresponding structure record.
-**
-** If an error occurs, set the Fts5Index.rc error code. If an error has
-** already occurred, this function is a no-op.
-*/
-static void fts5FlushOneHash(Fts5Index *p){
- Fts5Hash *pHash = p->pHash;
- Fts5Structure *pStruct;
- int iSegid;
- int pgnoLast = 0; /* Last leaf page number in segment */
-
- /* Obtain a reference to the index structure and allocate a new segment-id
- ** for the new level-0 segment. */
- pStruct = fts5StructureRead(p);
- iSegid = fts5AllocateSegid(p, pStruct);
-
- if( iSegid ){
- const int pgsz = p->pConfig->pgsz;
-
- Fts5StructureSegment *pSeg; /* New segment within pStruct */
- Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
- Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
-
- Fts5SegWriter writer;
- fts5WriteInit(p, &writer, iSegid);
-
- pBuf = &writer.writer.buf;
- pPgidx = &writer.writer.pgidx;
-
- /* fts5WriteInit() should have initialized the buffers to (most likely)
- ** the maximum space required. */
- assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
- assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
-
- /* Begin scanning through hash table entries. This loop runs once for each
- ** term/doclist currently stored within the hash table. */
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
- }
- while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
- const char *zTerm; /* Buffer containing term */
- const u8 *pDoclist; /* Pointer to doclist for this term */
- int nDoclist; /* Size of doclist in bytes */
-
- /* Write the term for this entry to disk. */
- sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
- fts5WriteAppendTerm(p, &writer, strlen(zTerm), (const u8*)zTerm);
-
- assert( writer.bFirstRowidInPage==0 );
- if( pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
- /* The entire doclist will fit on the current leaf. */
- fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
- }else{
- i64 iRowid = 0;
- i64 iDelta = 0;
- int iOff = 0;
-
- /* The entire doclist will not fit on this leaf. The following
- ** loop iterates through the poslists that make up the current
- ** doclist. */
- while( p->rc==SQLITE_OK && iOff<nDoclist ){
- int nPos;
- int nCopy;
- int bDummy;
- iOff += fts5GetVarint(&pDoclist[iOff], (u64*)&iDelta);
- nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy);
- nCopy += nPos;
- iRowid += iDelta;
-
- if( writer.bFirstRowidInPage ){
- fts5PutU16(&pBuf->p[0], pBuf->n); /* first rowid on page */
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
- writer.bFirstRowidInPage = 0;
- fts5WriteDlidxAppend(p, &writer, iRowid);
- }else{
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iDelta);
- }
- assert( pBuf->n<=pBuf->nSpace );
-
- if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
- /* The entire poslist will fit on the current leaf. So copy
- ** it in one go. */
- fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
- }else{
- /* The entire poslist will not fit on this leaf. So it needs
- ** to be broken into sections. The only qualification being
- ** that each varint must be stored contiguously. */
- const u8 *pPoslist = &pDoclist[iOff];
- int iPos = 0;
- while( p->rc==SQLITE_OK ){
- int nSpace = pgsz - pBuf->n - pPgidx->n;
- int n = 0;
- if( (nCopy - iPos)<=nSpace ){
- n = nCopy - iPos;
- }else{
- n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
- }
- assert( n>0 );
- fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
- iPos += n;
- if( (pBuf->n + pPgidx->n)>=pgsz ){
- fts5WriteFlushLeaf(p, &writer);
- }
- if( iPos>=nCopy ) break;
- }
- }
- iOff += nCopy;
- }
- }
-
- /* TODO2: Doclist terminator written here. */
- /* pBuf->p[pBuf->n++] = '\0'; */
- assert( pBuf->n<=pBuf->nSpace );
- sqlite3Fts5HashScanNext(pHash);
- }
- sqlite3Fts5HashClear(pHash);
- fts5WriteFinish(p, &writer, &pgnoLast);
-
- /* Update the Fts5Structure. It is written back to the database by the
- ** fts5StructureRelease() call below. */
- if( pStruct->nLevel==0 ){
- fts5StructureAddLevel(&p->rc, &pStruct);
- }
- fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
- if( p->rc==SQLITE_OK ){
- pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
- pSeg->iSegid = iSegid;
- pSeg->pgnoFirst = 1;
- pSeg->pgnoLast = pgnoLast;
- pStruct->nSegment++;
- }
- fts5StructurePromote(p, 0, pStruct);
- }
-
- fts5IndexAutomerge(p, &pStruct, pgnoLast);
- fts5IndexCrisismerge(p, &pStruct);
- fts5StructureWrite(p, pStruct);
- fts5StructureRelease(pStruct);
-}
-
-/*
-** Flush any data stored in the in-memory hash tables to the database.
-*/
-static void fts5IndexFlush(Fts5Index *p){
- /* Unless it is empty, flush the hash table to disk */
- if( p->nPendingData ){
- assert( p->pHash );
- p->nPendingData = 0;
- fts5FlushOneHash(p);
- }
-}
-
-
-static int sqlite3Fts5IndexOptimize(Fts5Index *p){
- Fts5Structure *pStruct;
- Fts5Structure *pNew = 0;
- int nSeg = 0;
-
- assert( p->rc==SQLITE_OK );
- fts5IndexFlush(p);
- pStruct = fts5StructureRead(p);
-
- if( pStruct ){
- assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
- nSeg = pStruct->nSegment;
- if( nSeg>1 ){
- int nByte = sizeof(Fts5Structure);
- nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel);
- pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte);
- }
- }
- if( pNew ){
- Fts5StructureLevel *pLvl;
- int nByte = nSeg * sizeof(Fts5StructureSegment);
- pNew->nLevel = pStruct->nLevel+1;
- pNew->nRef = 1;
- pNew->nWriteCounter = pStruct->nWriteCounter;
- pLvl = &pNew->aLevel[pStruct->nLevel];
- pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte);
- if( pLvl->aSeg ){
- int iLvl, iSeg;
- int iSegOut = 0;
- for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
- for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
- pLvl->aSeg[iSegOut] = pStruct->aLevel[iLvl].aSeg[iSeg];
- iSegOut++;
- }
- }
- pNew->nSegment = pLvl->nSeg = nSeg;
- }else{
- sqlite3_free(pNew);
- pNew = 0;
- }
- }
-
- if( pNew ){
- int iLvl = pNew->nLevel-1;
- while( p->rc==SQLITE_OK && pNew->aLevel[iLvl].nSeg>0 ){
- int nRem = FTS5_OPT_WORK_UNIT;
- fts5IndexMergeLevel(p, &pNew, iLvl, &nRem);
- }
-
- fts5StructureWrite(p, pNew);
- fts5StructureRelease(pNew);
- }
-
- fts5StructureRelease(pStruct);
- return fts5IndexReturn(p);
-}
-
-static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
- Fts5Structure *pStruct;
-
- pStruct = fts5StructureRead(p);
- if( pStruct && pStruct->nLevel ){
- fts5IndexMerge(p, &pStruct, nMerge);
- fts5StructureWrite(p, pStruct);
- }
- fts5StructureRelease(pStruct);
-
- return fts5IndexReturn(p);
-}
-
-static void fts5PoslistCallback(
- Fts5Index *p,
- void *pContext,
- const u8 *pChunk, int nChunk
-){
- assert_nc( nChunk>=0 );
- if( nChunk>0 ){
- fts5BufferSafeAppendBlob((Fts5Buffer*)pContext, pChunk, nChunk);
- }
-}
-
-typedef struct PoslistCallbackCtx PoslistCallbackCtx;
-struct PoslistCallbackCtx {
- Fts5Buffer *pBuf; /* Append to this buffer */
- Fts5Colset *pColset; /* Restrict matches to this column */
- int eState; /* See above */
-};
-
-/*
-** TODO: Make this more efficient!
-*/
-static int fts5IndexColsetTest(Fts5Colset *pColset, int iCol){
- int i;
- for(i=0; i<pColset->nCol; i++){
- if( pColset->aiCol[i]==iCol ) return 1;
- }
- return 0;
-}
-
-static void fts5PoslistFilterCallback(
- Fts5Index *p,
- void *pContext,
- const u8 *pChunk, int nChunk
-){
- PoslistCallbackCtx *pCtx = (PoslistCallbackCtx*)pContext;
- assert_nc( nChunk>=0 );
- if( nChunk>0 ){
- /* Search through to find the first varint with value 1. This is the
- ** start of the next columns hits. */
- int i = 0;
- int iStart = 0;
-
- if( pCtx->eState==2 ){
- int iCol;
- fts5FastGetVarint32(pChunk, i, iCol);
- if( fts5IndexColsetTest(pCtx->pColset, iCol) ){
- pCtx->eState = 1;
- fts5BufferSafeAppendVarint(pCtx->pBuf, 1);
- }else{
- pCtx->eState = 0;
- }
- }
-
- do {
- while( i<nChunk && pChunk[i]!=0x01 ){
- while( pChunk[i] & 0x80 ) i++;
- i++;
- }
- if( pCtx->eState ){
- fts5BufferSafeAppendBlob(pCtx->pBuf, &pChunk[iStart], i-iStart);
- }
- if( i<nChunk ){
- int iCol;
- iStart = i;
- i++;
- if( i>=nChunk ){
- pCtx->eState = 2;
- }else{
- fts5FastGetVarint32(pChunk, i, iCol);
- pCtx->eState = fts5IndexColsetTest(pCtx->pColset, iCol);
- if( pCtx->eState ){
- fts5BufferSafeAppendBlob(pCtx->pBuf, &pChunk[iStart], i-iStart);
- iStart = i;
- }
- }
- }
- }while( i<nChunk );
- }
-}
-
-/*
-** Iterator pIter currently points to a valid entry (not EOF). This
-** function appends the position list data for the current entry to
-** buffer pBuf. It does not make a copy of the position-list size
-** field.
-*/
-static void fts5SegiterPoslist(
- Fts5Index *p,
- Fts5SegIter *pSeg,
- Fts5Colset *pColset,
- Fts5Buffer *pBuf
-){
- if( 0==fts5BufferGrow(&p->rc, pBuf, pSeg->nPos) ){
- if( pColset==0 ){
- fts5ChunkIterate(p, pSeg, (void*)pBuf, fts5PoslistCallback);
- }else{
- PoslistCallbackCtx sCtx;
- sCtx.pBuf = pBuf;
- sCtx.pColset = pColset;
- sCtx.eState = pColset ? fts5IndexColsetTest(pColset, 0) : 1;
- assert( sCtx.eState==0 || sCtx.eState==1 );
- fts5ChunkIterate(p, pSeg, (void*)&sCtx, fts5PoslistFilterCallback);
- }
- }
-}
-
-/*
-** IN/OUT parameter (*pa) points to a position list n bytes in size. If
-** the position list contains entries for column iCol, then (*pa) is set
-** to point to the sub-position-list for that column and the number of
-** bytes in it returned. Or, if the argument position list does not
-** contain any entries for column iCol, return 0.
-*/
-static int fts5IndexExtractCol(
- const u8 **pa, /* IN/OUT: Pointer to poslist */
- int n, /* IN: Size of poslist in bytes */
- int iCol /* Column to extract from poslist */
-){
- int iCurrent = 0; /* Anything before the first 0x01 is col 0 */
- const u8 *p = *pa;
- const u8 *pEnd = &p[n]; /* One byte past end of position list */
- u8 prev = 0;
-
- while( iCol!=iCurrent ){
- /* Advance pointer p until it points to pEnd or an 0x01 byte that is
- ** not part of a varint */
- while( (prev & 0x80) || *p!=0x01 ){
- prev = *p++;
- if( p==pEnd ) return 0;
- }
- *pa = p++;
- p += fts5GetVarint32(p, iCurrent);
- }
-
- /* Advance pointer p until it points to pEnd or an 0x01 byte that is
- ** not part of a varint */
- assert( (prev & 0x80)==0 );
- while( p<pEnd && ((prev & 0x80) || *p!=0x01) ){
- prev = *p++;
- }
- return p - (*pa);
-}
-
-
-/*
-** Iterator pMulti currently points to a valid entry (not EOF). This
-** function appends the following to buffer pBuf:
-**
-** * The varint iDelta, and
-** * the position list that currently points to, including the size field.
-**
-** If argument pColset is NULL, then the position list is filtered according
-** to pColset before being appended to the buffer. If this means there are
-** no entries in the position list, nothing is appended to the buffer (not
-** even iDelta).
-**
-** If an error occurs, an error code is left in p->rc.
-*/
-static int fts5AppendPoslist(
- Fts5Index *p,
- i64 iDelta,
- Fts5IndexIter *pMulti,
- Fts5Colset *pColset,
- Fts5Buffer *pBuf
-){
- if( p->rc==SQLITE_OK ){
- Fts5SegIter *pSeg = &pMulti->aSeg[ pMulti->aFirst[1].iFirst ];
- assert( fts5MultiIterEof(p, pMulti)==0 );
- assert( pSeg->nPos>0 );
- if( 0==fts5BufferGrow(&p->rc, pBuf, pSeg->nPos+9+9) ){
- int iSv1;
- int iSv2;
- int iData;
-
- /* Append iDelta */
- iSv1 = pBuf->n;
- fts5BufferSafeAppendVarint(pBuf, iDelta);
-
- /* WRITEPOSLISTSIZE */
- iSv2 = pBuf->n;
- fts5BufferSafeAppendVarint(pBuf, pSeg->nPos*2);
- iData = pBuf->n;
-
- if( pSeg->iLeafOffset+pSeg->nPos<=pSeg->pLeaf->szLeaf
- && (pColset==0 || pColset->nCol==1)
- ){
- const u8 *pPos = &pSeg->pLeaf->p[pSeg->iLeafOffset];
- int nPos;
- if( pColset ){
- nPos = fts5IndexExtractCol(&pPos, pSeg->nPos, pColset->aiCol[0]);
- }else{
- nPos = pSeg->nPos;
- }
- fts5BufferSafeAppendBlob(pBuf, pPos, nPos);
- }else{
- fts5SegiterPoslist(p, pSeg, pColset, pBuf);
- }
-
- if( pColset ){
- int nActual = pBuf->n - iData;
- if( nActual!=pSeg->nPos ){
- if( nActual==0 ){
- pBuf->n = iSv1;
- return 1;
- }else{
- int nReq = sqlite3Fts5GetVarintLen((u32)(nActual*2));
- while( iSv2<(iData-nReq) ){ pBuf->p[iSv2++] = 0x80; }
- sqlite3Fts5PutVarint(&pBuf->p[iSv2], nActual*2);
- }
- }
- }
- }
- }
-
- return 0;
-}
-
-static void fts5DoclistIterNext(Fts5DoclistIter *pIter){
- u8 *p = pIter->aPoslist + pIter->nSize + pIter->nPoslist;
-
- assert( pIter->aPoslist );
- if( p>=pIter->aEof ){
- pIter->aPoslist = 0;
- }else{
- i64 iDelta;
-
- p += fts5GetVarint(p, (u64*)&iDelta);
- pIter->iRowid += iDelta;
-
- /* Read position list size */
- if( p[0] & 0x80 ){
- int nPos;
- pIter->nSize = fts5GetVarint32(p, nPos);
- pIter->nPoslist = (nPos>>1);
- }else{
- pIter->nPoslist = ((int)(p[0])) >> 1;
- pIter->nSize = 1;
- }
-
- pIter->aPoslist = p;
- }
-}
-
-static void fts5DoclistIterInit(
- Fts5Buffer *pBuf,
- Fts5DoclistIter *pIter
-){
- memset(pIter, 0, sizeof(*pIter));
- pIter->aPoslist = pBuf->p;
- pIter->aEof = &pBuf->p[pBuf->n];
- fts5DoclistIterNext(pIter);
-}
-
-#if 0
-/*
-** Append a doclist to buffer pBuf.
-**
-** This function assumes that space within the buffer has already been
-** allocated.
-*/
-static void fts5MergeAppendDocid(
- Fts5Buffer *pBuf, /* Buffer to write to */
- i64 *piLastRowid, /* IN/OUT: Previous rowid written (if any) */
- i64 iRowid /* Rowid to append */
-){
- assert( pBuf->n!=0 || (*piLastRowid)==0 );
- fts5BufferSafeAppendVarint(pBuf, iRowid - *piLastRowid);
- *piLastRowid = iRowid;
-}
-#endif
-
-#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \
- assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \
- fts5BufferSafeAppendVarint((pBuf), (iRowid) - (iLastRowid)); \
- (iLastRowid) = (iRowid); \
-}
-
-/*
-** Buffers p1 and p2 contain doclists. This function merges the content
-** of the two doclists together and sets buffer p1 to the result before
-** returning.
-**
-** If an error occurs, an error code is left in p->rc. If an error has
-** already occurred, this function is a no-op.
-*/
-static void fts5MergePrefixLists(
- Fts5Index *p, /* FTS5 backend object */
- Fts5Buffer *p1, /* First list to merge */
- Fts5Buffer *p2 /* Second list to merge */
-){
- if( p2->n ){
- i64 iLastRowid = 0;
- Fts5DoclistIter i1;
- Fts5DoclistIter i2;
- Fts5Buffer out;
- Fts5Buffer tmp;
- memset(&out, 0, sizeof(out));
- memset(&tmp, 0, sizeof(tmp));
-
- sqlite3Fts5BufferGrow(&p->rc, &out, p1->n + p2->n);
- fts5DoclistIterInit(p1, &i1);
- fts5DoclistIterInit(p2, &i2);
- while( p->rc==SQLITE_OK && (i1.aPoslist!=0 || i2.aPoslist!=0) ){
- if( i2.aPoslist==0 || (i1.aPoslist && i1.iRowid<i2.iRowid) ){
- /* Copy entry from i1 */
- fts5MergeAppendDocid(&out, iLastRowid, i1.iRowid);
- fts5BufferSafeAppendBlob(&out, i1.aPoslist, i1.nPoslist+i1.nSize);
- fts5DoclistIterNext(&i1);
- }
- else if( i1.aPoslist==0 || i2.iRowid!=i1.iRowid ){
- /* Copy entry from i2 */
- fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid);
- fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.nPoslist+i2.nSize);
- fts5DoclistIterNext(&i2);
- }
- else{
- i64 iPos1 = 0;
- i64 iPos2 = 0;
- int iOff1 = 0;
- int iOff2 = 0;
- u8 *a1 = &i1.aPoslist[i1.nSize];
- u8 *a2 = &i2.aPoslist[i2.nSize];
-
- Fts5PoslistWriter writer;
- memset(&writer, 0, sizeof(writer));
-
- /* Merge the two position lists. */
- fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid);
- fts5BufferZero(&tmp);
-
- sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1);
- sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2);
-
- while( p->rc==SQLITE_OK && (iPos1>=0 || iPos2>=0) ){
- i64 iNew;
- if( iPos2<0 || (iPos1>=0 && iPos1<iPos2) ){
- iNew = iPos1;
- sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1);
- }else{
- iNew = iPos2;
- sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2);
- if( iPos1==iPos2 ){
- sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1,&iPos1);
- }
- }
- p->rc = sqlite3Fts5PoslistWriterAppend(&tmp, &writer, iNew);
- }
-
- /* WRITEPOSLISTSIZE */
- fts5BufferSafeAppendVarint(&out, tmp.n * 2);
- fts5BufferSafeAppendBlob(&out, tmp.p, tmp.n);
- fts5DoclistIterNext(&i1);
- fts5DoclistIterNext(&i2);
- }
- }
-
- fts5BufferSet(&p->rc, p1, out.n, out.p);
- fts5BufferFree(&tmp);
- fts5BufferFree(&out);
- }
-}
-
-static void fts5BufferSwap(Fts5Buffer *p1, Fts5Buffer *p2){
- Fts5Buffer tmp = *p1;
- *p1 = *p2;
- *p2 = tmp;
-}
-
-static void fts5SetupPrefixIter(
- Fts5Index *p, /* Index to read from */
- int bDesc, /* True for "ORDER BY rowid DESC" */
- const u8 *pToken, /* Buffer containing prefix to match */
- int nToken, /* Size of buffer pToken in bytes */
- Fts5Colset *pColset, /* Restrict matches to these columns */
- Fts5IndexIter **ppIter /* OUT: New iterator */
-){
- Fts5Structure *pStruct;
- Fts5Buffer *aBuf;
- const int nBuf = 32;
-
- aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf);
- pStruct = fts5StructureRead(p);
-
- if( aBuf && pStruct ){
- const int flags = FTS5INDEX_QUERY_SCAN;
- int i;
- i64 iLastRowid = 0;
- Fts5IndexIter *p1 = 0; /* Iterator used to gather data from index */
- Fts5Data *pData;
- Fts5Buffer doclist;
-
- memset(&doclist, 0, sizeof(doclist));
- for(fts5MultiIterNew(p, pStruct, 1, flags, pToken, nToken, -1, 0, &p1);
- fts5MultiIterEof(p, p1)==0;
- fts5MultiIterNext(p, p1, 0, 0)
- ){
- i64 iRowid = fts5MultiIterRowid(p1);
- int nTerm;
- const u8 *pTerm = fts5MultiIterTerm(p1, &nTerm);
- assert_nc( memcmp(pToken, pTerm, MIN(nToken, nTerm))<=0 );
- if( nTerm<nToken || memcmp(pToken, pTerm, nToken) ) break;
-
- if( doclist.n>0 && iRowid<=iLastRowid ){
- for(i=0; p->rc==SQLITE_OK && doclist.n; i++){
- assert( i<nBuf );
- if( aBuf[i].n==0 ){
- fts5BufferSwap(&doclist, &aBuf[i]);
- fts5BufferZero(&doclist);
- }else{
- fts5MergePrefixLists(p, &doclist, &aBuf[i]);
- fts5BufferZero(&aBuf[i]);
- }
- }
- iLastRowid = 0;
- }
-
- if( !fts5AppendPoslist(p, iRowid-iLastRowid, p1, pColset, &doclist) ){
- iLastRowid = iRowid;
- }
- }
-
- for(i=0; i<nBuf; i++){
- if( p->rc==SQLITE_OK ){
- fts5MergePrefixLists(p, &doclist, &aBuf[i]);
- }
- fts5BufferFree(&aBuf[i]);
- }
- fts5MultiIterFree(p, p1);
-
- pData = fts5IdxMalloc(p, sizeof(Fts5Data) + doclist.n);
- if( pData ){
- pData->p = (u8*)&pData[1];
- pData->nn = pData->szLeaf = doclist.n;
- memcpy(pData->p, doclist.p, doclist.n);
- fts5MultiIterNew2(p, pData, bDesc, ppIter);
- }
- fts5BufferFree(&doclist);
- }
-
- fts5StructureRelease(pStruct);
- sqlite3_free(aBuf);
-}
-
-
-/*
-** Indicate that all subsequent calls to sqlite3Fts5IndexWrite() pertain
-** to the document with rowid iRowid.
-*/
-static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){
- assert( p->rc==SQLITE_OK );
-
- /* Allocate the hash table if it has not already been allocated */
- if( p->pHash==0 ){
- p->rc = sqlite3Fts5HashNew(&p->pHash, &p->nPendingData);
- }
-
- /* Flush the hash table to disk if required */
- if( iRowid<p->iWriteRowid
- || (iRowid==p->iWriteRowid && p->bDelete==0)
- || (p->nPendingData > p->nMaxPendingData)
- ){
- fts5IndexFlush(p);
- }
-
- p->iWriteRowid = iRowid;
- p->bDelete = bDelete;
- return fts5IndexReturn(p);
-}
-
-/*
-** Commit data to disk.
-*/
-static int sqlite3Fts5IndexSync(Fts5Index *p, int bCommit){
- assert( p->rc==SQLITE_OK );
- fts5IndexFlush(p);
- if( bCommit ) fts5CloseReader(p);
- return fts5IndexReturn(p);
-}
-
-/*
-** Discard any data stored in the in-memory hash tables. Do not write it
-** to the database. Additionally, assume that the contents of the %_data
-** table may have changed on disk. So any in-memory caches of %_data
-** records must be invalidated.
-*/
-static int sqlite3Fts5IndexRollback(Fts5Index *p){
- fts5CloseReader(p);
- fts5IndexDiscardData(p);
- assert( p->rc==SQLITE_OK );
- return SQLITE_OK;
-}
-
-/*
-** The %_data table is completely empty when this function is called. This
-** function populates it with the initial structure objects for each index,
-** and the initial version of the "averages" record (a zero-byte blob).
-*/
-static int sqlite3Fts5IndexReinit(Fts5Index *p){
- Fts5Structure s;
- memset(&s, 0, sizeof(Fts5Structure));
- fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0);
- fts5StructureWrite(p, &s);
- return fts5IndexReturn(p);
-}
-
-/*
-** Open a new Fts5Index handle. If the bCreate argument is true, create
-** and initialize the underlying %_data table.
-**
-** If successful, set *pp to point to the new object and return SQLITE_OK.
-** Otherwise, set *pp to NULL and return an SQLite error code.
-*/
-static int sqlite3Fts5IndexOpen(
- Fts5Config *pConfig,
- int bCreate,
- Fts5Index **pp,
- char **pzErr
-){
- int rc = SQLITE_OK;
- Fts5Index *p; /* New object */
-
- *pp = p = (Fts5Index*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Index));
- if( rc==SQLITE_OK ){
- p->pConfig = pConfig;
- p->nWorkUnit = FTS5_WORK_UNIT;
- p->nMaxPendingData = 1024*1024;
- p->zDataTbl = sqlite3Fts5Mprintf(&rc, "%s_data", pConfig->zName);
- if( p->zDataTbl && bCreate ){
- rc = sqlite3Fts5CreateTable(
- pConfig, "data", "id INTEGER PRIMARY KEY, block BLOB", 0, pzErr
- );
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5CreateTable(pConfig, "idx",
- "segid, term, pgno, PRIMARY KEY(segid, term)",
- 1, pzErr
- );
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5IndexReinit(p);
- }
- }
- }
-
- assert( rc!=SQLITE_OK || p->rc==SQLITE_OK );
- if( rc ){
- sqlite3Fts5IndexClose(p);
- *pp = 0;
- }
- return rc;
-}
-
-/*
-** Close a handle opened by an earlier call to sqlite3Fts5IndexOpen().
-*/
-static int sqlite3Fts5IndexClose(Fts5Index *p){
- int rc = SQLITE_OK;
- if( p ){
- assert( p->pReader==0 );
- sqlite3_finalize(p->pWriter);
- sqlite3_finalize(p->pDeleter);
- sqlite3_finalize(p->pIdxWriter);
- sqlite3_finalize(p->pIdxDeleter);
- sqlite3_finalize(p->pIdxSelect);
- sqlite3Fts5HashFree(p->pHash);
- sqlite3_free(p->zDataTbl);
- sqlite3_free(p);
- }
- return rc;
-}
-
-/*
-** Argument p points to a buffer containing utf-8 text that is n bytes in
-** size. Return the number of bytes in the nChar character prefix of the
-** buffer, or 0 if there are less than nChar characters in total.
-*/
-static int fts5IndexCharlenToBytelen(const char *p, int nByte, int nChar){
- int n = 0;
- int i;
- for(i=0; i<nChar; i++){
- if( n>=nByte ) return 0; /* Input contains fewer than nChar chars */
- if( (unsigned char)p[n++]>=0xc0 ){
- while( (p[n] & 0xc0)==0x80 ) n++;
- }
- }
- return n;
-}
-
-/*
-** pIn is a UTF-8 encoded string, nIn bytes in size. Return the number of
-** unicode characters in the string.
-*/
-static int fts5IndexCharlen(const char *pIn, int nIn){
- int nChar = 0;
- int i = 0;
- while( i<nIn ){
- if( (unsigned char)pIn[i++]>=0xc0 ){
- while( i<nIn && (pIn[i] & 0xc0)==0x80 ) i++;
- }
- nChar++;
- }
- return nChar;
-}
-
-/*
-** Insert or remove data to or from the index. Each time a document is
-** added to or removed from the index, this function is called one or more
-** times.
-**
-** For an insert, it must be called once for each token in the new document.
-** If the operation is a delete, it must be called (at least) once for each
-** unique token in the document with an iCol value less than zero. The iPos
-** argument is ignored for a delete.
-*/
-static int sqlite3Fts5IndexWrite(
- Fts5Index *p, /* Index to write to */
- int iCol, /* Column token appears in (-ve -> delete) */
- int iPos, /* Position of token within column */
- const char *pToken, int nToken /* Token to add or remove to or from index */
-){
- int i; /* Used to iterate through indexes */
- int rc = SQLITE_OK; /* Return code */
- Fts5Config *pConfig = p->pConfig;
-
- assert( p->rc==SQLITE_OK );
- assert( (iCol<0)==p->bDelete );
-
- /* Add the entry to the main terms index. */
- rc = sqlite3Fts5HashWrite(
- p->pHash, p->iWriteRowid, iCol, iPos, FTS5_MAIN_PREFIX, pToken, nToken
- );
-
- for(i=0; i<pConfig->nPrefix && rc==SQLITE_OK; i++){
- int nByte = fts5IndexCharlenToBytelen(pToken, nToken, pConfig->aPrefix[i]);
- if( nByte ){
- rc = sqlite3Fts5HashWrite(p->pHash,
- p->iWriteRowid, iCol, iPos, FTS5_MAIN_PREFIX+i+1, pToken, nByte
- );
- }
- }
-
- return rc;
-}
-
-/*
-** Open a new iterator to iterate though all rowid that match the
-** specified token or token prefix.
-*/
-static int sqlite3Fts5IndexQuery(
- Fts5Index *p, /* FTS index to query */
- const char *pToken, int nToken, /* Token (or prefix) to query for */
- int flags, /* Mask of FTS5INDEX_QUERY_X flags */
- Fts5Colset *pColset, /* Match these columns only */
- Fts5IndexIter **ppIter /* OUT: New iterator object */
-){
- Fts5Config *pConfig = p->pConfig;
- Fts5IndexIter *pRet = 0;
- int iIdx = 0;
- Fts5Buffer buf = {0, 0, 0};
-
- /* If the QUERY_SCAN flag is set, all other flags must be clear. */
- assert( (flags & FTS5INDEX_QUERY_SCAN)==0
- || (flags & FTS5INDEX_QUERY_SCAN)==FTS5INDEX_QUERY_SCAN
- );
-
- if( sqlite3Fts5BufferGrow(&p->rc, &buf, nToken+1)==0 ){
- memcpy(&buf.p[1], pToken, nToken);
-
-#ifdef SQLITE_DEBUG
- /* If the QUERY_TEST_NOIDX flag was specified, then this must be a
- ** prefix-query. Instead of using a prefix-index (if one exists),
- ** evaluate the prefix query using the main FTS index. This is used
- ** for internal sanity checking by the integrity-check in debug
- ** mode only. */
- if( pConfig->bPrefixIndex==0 || (flags & FTS5INDEX_QUERY_TEST_NOIDX) ){
- assert( flags & FTS5INDEX_QUERY_PREFIX );
- iIdx = 1+pConfig->nPrefix;
- }else
-#endif
- if( flags & FTS5INDEX_QUERY_PREFIX ){
- int nChar = fts5IndexCharlen(pToken, nToken);
- for(iIdx=1; iIdx<=pConfig->nPrefix; iIdx++){
- if( pConfig->aPrefix[iIdx-1]==nChar ) break;
- }
- }
-
- if( iIdx<=pConfig->nPrefix ){
- Fts5Structure *pStruct = fts5StructureRead(p);
- buf.p[0] = FTS5_MAIN_PREFIX + iIdx;
- if( pStruct ){
- fts5MultiIterNew(p, pStruct, 1, flags, buf.p, nToken+1, -1, 0, &pRet);
- fts5StructureRelease(pStruct);
- }
- }else{
- int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0;
- buf.p[0] = FTS5_MAIN_PREFIX;
- fts5SetupPrefixIter(p, bDesc, buf.p, nToken+1, pColset, &pRet);
- }
-
- if( p->rc ){
- sqlite3Fts5IterClose(pRet);
- pRet = 0;
- fts5CloseReader(p);
- }
- *ppIter = pRet;
- sqlite3Fts5BufferFree(&buf);
- }
- return fts5IndexReturn(p);
-}
-
-/*
-** Return true if the iterator passed as the only argument is at EOF.
-*/
-static int sqlite3Fts5IterEof(Fts5IndexIter *pIter){
- assert( pIter->pIndex->rc==SQLITE_OK );
- return pIter->bEof;
-}
-
-/*
-** Move to the next matching rowid.
-*/
-static int sqlite3Fts5IterNext(Fts5IndexIter *pIter){
- assert( pIter->pIndex->rc==SQLITE_OK );
- fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
- return fts5IndexReturn(pIter->pIndex);
-}
-
-/*
-** Move to the next matching term/rowid. Used by the fts5vocab module.
-*/
-static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIter){
- Fts5Index *p = pIter->pIndex;
-
- assert( pIter->pIndex->rc==SQLITE_OK );
-
- fts5MultiIterNext(p, pIter, 0, 0);
- if( p->rc==SQLITE_OK ){
- Fts5SegIter *pSeg = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- if( pSeg->pLeaf && pSeg->term.p[0]!=FTS5_MAIN_PREFIX ){
- fts5DataRelease(pSeg->pLeaf);
- pSeg->pLeaf = 0;
- pIter->bEof = 1;
- }
- }
-
- return fts5IndexReturn(pIter->pIndex);
-}
-
-/*
-** Move to the next matching rowid that occurs at or after iMatch. The
-** definition of "at or after" depends on whether this iterator iterates
-** in ascending or descending rowid order.
-*/
-static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIter, i64 iMatch){
- fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
- return fts5IndexReturn(pIter->pIndex);
-}
-
-/*
-** Return the current rowid.
-*/
-static i64 sqlite3Fts5IterRowid(Fts5IndexIter *pIter){
- return fts5MultiIterRowid(pIter);
-}
-
-/*
-** Return the current term.
-*/
-static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIter, int *pn){
- int n;
- const char *z = (const char*)fts5MultiIterTerm(pIter, &n);
- *pn = n-1;
- return &z[1];
-}
-
-
-static int fts5IndexExtractColset (
- Fts5Colset *pColset, /* Colset to filter on */
- const u8 *pPos, int nPos, /* Position list */
- Fts5Buffer *pBuf /* Output buffer */
-){
- int rc = SQLITE_OK;
- int i;
-
- fts5BufferZero(pBuf);
- for(i=0; i<pColset->nCol; i++){
- const u8 *pSub = pPos;
- int nSub = fts5IndexExtractCol(&pSub, nPos, pColset->aiCol[i]);
- if( nSub ){
- fts5BufferAppendBlob(&rc, pBuf, nSub, pSub);
- }
- }
- return rc;
-}
-
-
-/*
-** Return a pointer to a buffer containing a copy of the position list for
-** the current entry. Output variable *pn is set to the size of the buffer
-** in bytes before returning.
-**
-** The returned position list does not include the "number of bytes" varint
-** field that starts the position list on disk.
-*/
-static int sqlite3Fts5IterPoslist(
- Fts5IndexIter *pIter,
- Fts5Colset *pColset, /* Column filter (or NULL) */
- const u8 **pp, /* OUT: Pointer to position-list data */
- int *pn, /* OUT: Size of position-list in bytes */
- i64 *piRowid /* OUT: Current rowid */
-){
- Fts5SegIter *pSeg = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- assert( pIter->pIndex->rc==SQLITE_OK );
- *piRowid = pSeg->iRowid;
- if( pSeg->iLeafOffset+pSeg->nPos<=pSeg->pLeaf->szLeaf ){
- u8 *pPos = &pSeg->pLeaf->p[pSeg->iLeafOffset];
- if( pColset==0 || pIter->bFiltered ){
- *pn = pSeg->nPos;
- *pp = pPos;
- }else if( pColset->nCol==1 ){
- *pp = pPos;
- *pn = fts5IndexExtractCol(pp, pSeg->nPos, pColset->aiCol[0]);
- }else{
- fts5BufferZero(&pIter->poslist);
- fts5IndexExtractColset(pColset, pPos, pSeg->nPos, &pIter->poslist);
- *pp = pIter->poslist.p;
- *pn = pIter->poslist.n;
- }
- }else{
- fts5BufferZero(&pIter->poslist);
- fts5SegiterPoslist(pIter->pIndex, pSeg, pColset, &pIter->poslist);
- *pp = pIter->poslist.p;
- *pn = pIter->poslist.n;
- }
- return fts5IndexReturn(pIter->pIndex);
-}
-
-/*
-** This function is similar to sqlite3Fts5IterPoslist(), except that it
-** copies the position list into the buffer supplied as the second
-** argument.
-*/
-static int sqlite3Fts5IterPoslistBuffer(Fts5IndexIter *pIter, Fts5Buffer *pBuf){
- Fts5Index *p = pIter->pIndex;
- Fts5SegIter *pSeg = &pIter->aSeg[ pIter->aFirst[1].iFirst ];
- assert( p->rc==SQLITE_OK );
- fts5BufferZero(pBuf);
- fts5SegiterPoslist(p, pSeg, 0, pBuf);
- return fts5IndexReturn(p);
-}
-
-/*
-** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery().
-*/
-static void sqlite3Fts5IterClose(Fts5IndexIter *pIter){
- if( pIter ){
- Fts5Index *pIndex = pIter->pIndex;
- fts5MultiIterFree(pIter->pIndex, pIter);
- fts5CloseReader(pIndex);
- }
-}
-
-/*
-** Read and decode the "averages" record from the database.
-**
-** Parameter anSize must point to an array of size nCol, where nCol is
-** the number of user defined columns in the FTS table.
-*/
-static int sqlite3Fts5IndexGetAverages(Fts5Index *p, i64 *pnRow, i64 *anSize){
- int nCol = p->pConfig->nCol;
- Fts5Data *pData;
-
- *pnRow = 0;
- memset(anSize, 0, sizeof(i64) * nCol);
- pData = fts5DataRead(p, FTS5_AVERAGES_ROWID);
- if( p->rc==SQLITE_OK && pData->nn ){
- int i = 0;
- int iCol;
- i += fts5GetVarint(&pData->p[i], (u64*)pnRow);
- for(iCol=0; i<pData->nn && iCol<nCol; iCol++){
- i += fts5GetVarint(&pData->p[i], (u64*)&anSize[iCol]);
- }
- }
-
- fts5DataRelease(pData);
- return fts5IndexReturn(p);
-}
-
-/*
-** Replace the current "averages" record with the contents of the buffer
-** supplied as the second argument.
-*/
-static int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8 *pData, int nData){
- assert( p->rc==SQLITE_OK );
- fts5DataWrite(p, FTS5_AVERAGES_ROWID, pData, nData);
- return fts5IndexReturn(p);
-}
-
-/*
-** Return the total number of blocks this module has read from the %_data
-** table since it was created.
-*/
-static int sqlite3Fts5IndexReads(Fts5Index *p){
- return p->nRead;
-}
-
-/*
-** Set the 32-bit cookie value stored at the start of all structure
-** records to the value passed as the second argument.
-**
-** Return SQLITE_OK if successful, or an SQLite error code if an error
-** occurs.
-*/
-static int sqlite3Fts5IndexSetCookie(Fts5Index *p, int iNew){
- int rc; /* Return code */
- Fts5Config *pConfig = p->pConfig; /* Configuration object */
- u8 aCookie[4]; /* Binary representation of iNew */
- sqlite3_blob *pBlob = 0;
-
- assert( p->rc==SQLITE_OK );
- sqlite3Fts5Put32(aCookie, iNew);
-
- rc = sqlite3_blob_open(pConfig->db, pConfig->zDb, p->zDataTbl,
- "block", FTS5_STRUCTURE_ROWID, 1, &pBlob
- );
- if( rc==SQLITE_OK ){
- sqlite3_blob_write(pBlob, aCookie, 4, 0);
- rc = sqlite3_blob_close(pBlob);
- }
-
- return rc;
-}
-
-static int sqlite3Fts5IndexLoadConfig(Fts5Index *p){
- Fts5Structure *pStruct;
- pStruct = fts5StructureRead(p);
- fts5StructureRelease(pStruct);
- return fts5IndexReturn(p);
-}
-
-
-/*************************************************************************
-**************************************************************************
-** Below this point is the implementation of the integrity-check
-** functionality.
-*/
-
-/*
-** Return a simple checksum value based on the arguments.
-*/
-static u64 fts5IndexEntryCksum(
- i64 iRowid,
- int iCol,
- int iPos,
- int iIdx,
- const char *pTerm,
- int nTerm
-){
- int i;
- u64 ret = iRowid;
- ret += (ret<<3) + iCol;
- ret += (ret<<3) + iPos;
- if( iIdx>=0 ) ret += (ret<<3) + (FTS5_MAIN_PREFIX + iIdx);
- for(i=0; i<nTerm; i++) ret += (ret<<3) + pTerm[i];
- return ret;
-}
-
-#ifdef SQLITE_DEBUG
-/*
-** This function is purely an internal test. It does not contribute to
-** FTS functionality, or even the integrity-check, in any way.
-**
-** Instead, it tests that the same set of pgno/rowid combinations are
-** visited regardless of whether the doclist-index identified by parameters
-** iSegid/iLeaf is iterated in forwards or reverse order.
-*/
-static void fts5TestDlidxReverse(
- Fts5Index *p,
- int iSegid, /* Segment id to load from */
- int iLeaf /* Load doclist-index for this leaf */
-){
- Fts5DlidxIter *pDlidx = 0;
- u64 cksum1 = 13;
- u64 cksum2 = 13;
-
- for(pDlidx=fts5DlidxIterInit(p, 0, iSegid, iLeaf);
- fts5DlidxIterEof(p, pDlidx)==0;
- fts5DlidxIterNext(p, pDlidx)
- ){
- i64 iRowid = fts5DlidxIterRowid(pDlidx);
- int pgno = fts5DlidxIterPgno(pDlidx);
- assert( pgno>iLeaf );
- cksum1 += iRowid + ((i64)pgno<<32);
- }
- fts5DlidxIterFree(pDlidx);
- pDlidx = 0;
-
- for(pDlidx=fts5DlidxIterInit(p, 1, iSegid, iLeaf);
- fts5DlidxIterEof(p, pDlidx)==0;
- fts5DlidxIterPrev(p, pDlidx)
- ){
- i64 iRowid = fts5DlidxIterRowid(pDlidx);
- int pgno = fts5DlidxIterPgno(pDlidx);
- assert( fts5DlidxIterPgno(pDlidx)>iLeaf );
- cksum2 += iRowid + ((i64)pgno<<32);
- }
- fts5DlidxIterFree(pDlidx);
- pDlidx = 0;
-
- if( p->rc==SQLITE_OK && cksum1!=cksum2 ) p->rc = FTS5_CORRUPT;
-}
-
-static int fts5QueryCksum(
- Fts5Index *p, /* Fts5 index object */
- int iIdx,
- const char *z, /* Index key to query for */
- int n, /* Size of index key in bytes */
- int flags, /* Flags for Fts5IndexQuery */
- u64 *pCksum /* IN/OUT: Checksum value */
-){
- u64 cksum = *pCksum;
- Fts5IndexIter *pIdxIter = 0;
- int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIdxIter);
-
- while( rc==SQLITE_OK && 0==sqlite3Fts5IterEof(pIdxIter) ){
- i64 dummy;
- const u8 *pPos;
- int nPos;
- i64 rowid = sqlite3Fts5IterRowid(pIdxIter);
- rc = sqlite3Fts5IterPoslist(pIdxIter, 0, &pPos, &nPos, &dummy);
- if( rc==SQLITE_OK ){
- Fts5PoslistReader sReader;
- for(sqlite3Fts5PoslistReaderInit(pPos, nPos, &sReader);
- sReader.bEof==0;
- sqlite3Fts5PoslistReaderNext(&sReader)
- ){
- int iCol = FTS5_POS2COLUMN(sReader.iPos);
- int iOff = FTS5_POS2OFFSET(sReader.iPos);
- cksum ^= fts5IndexEntryCksum(rowid, iCol, iOff, iIdx, z, n);
- }
- rc = sqlite3Fts5IterNext(pIdxIter);
- }
- }
- sqlite3Fts5IterClose(pIdxIter);
-
- *pCksum = cksum;
- return rc;
-}
-
-
-/*
-** This function is also purely an internal test. It does not contribute to
-** FTS functionality, or even the integrity-check, in any way.
-*/
-static void fts5TestTerm(
- Fts5Index *p,
- Fts5Buffer *pPrev, /* Previous term */
- const char *z, int n, /* Possibly new term to test */
- u64 expected,
- u64 *pCksum
-){
- int rc = p->rc;
- if( pPrev->n==0 ){
- fts5BufferSet(&rc, pPrev, n, (const u8*)z);
- }else
- if( rc==SQLITE_OK && (pPrev->n!=n || memcmp(pPrev->p, z, n)) ){
- u64 cksum3 = *pCksum;
- const char *zTerm = (const char*)&pPrev->p[1]; /* term sans prefix-byte */
- int nTerm = pPrev->n-1; /* Size of zTerm in bytes */
- int iIdx = (pPrev->p[0] - FTS5_MAIN_PREFIX);
- int flags = (iIdx==0 ? 0 : FTS5INDEX_QUERY_PREFIX);
- u64 ck1 = 0;
- u64 ck2 = 0;
-
- /* Check that the results returned for ASC and DESC queries are
- ** the same. If not, call this corruption. */
- rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, flags, &ck1);
- if( rc==SQLITE_OK ){
- int f = flags|FTS5INDEX_QUERY_DESC;
- rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, f, &ck2);
- }
- if( rc==SQLITE_OK && ck1!=ck2 ) rc = FTS5_CORRUPT;
-
- /* If this is a prefix query, check that the results returned if the
- ** the index is disabled are the same. In both ASC and DESC order.
- **
- ** This check may only be performed if the hash table is empty. This
- ** is because the hash table only supports a single scan query at
- ** a time, and the multi-iter loop from which this function is called
- ** is already performing such a scan. */
- if( p->nPendingData==0 ){
- if( iIdx>0 && rc==SQLITE_OK ){
- int f = flags|FTS5INDEX_QUERY_TEST_NOIDX;
- ck2 = 0;
- rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, f, &ck2);
- if( rc==SQLITE_OK && ck1!=ck2 ) rc = FTS5_CORRUPT;
- }
- if( iIdx>0 && rc==SQLITE_OK ){
- int f = flags|FTS5INDEX_QUERY_TEST_NOIDX|FTS5INDEX_QUERY_DESC;
- ck2 = 0;
- rc = fts5QueryCksum(p, iIdx, zTerm, nTerm, f, &ck2);
- if( rc==SQLITE_OK && ck1!=ck2 ) rc = FTS5_CORRUPT;
- }
- }
-
- cksum3 ^= ck1;
- fts5BufferSet(&rc, pPrev, n, (const u8*)z);
-
- if( rc==SQLITE_OK && cksum3!=expected ){
- rc = FTS5_CORRUPT;
- }
- *pCksum = cksum3;
- }
- p->rc = rc;
-}
-
-#else
-# define fts5TestDlidxReverse(x,y,z)
-# define fts5TestTerm(u,v,w,x,y,z)
-#endif
-
-/*
-** Check that:
-**
-** 1) All leaves of pSeg between iFirst and iLast (inclusive) exist and
-** contain zero terms.
-** 2) All leaves of pSeg between iNoRowid and iLast (inclusive) exist and
-** contain zero rowids.
-*/
-static void fts5IndexIntegrityCheckEmpty(
- Fts5Index *p,
- Fts5StructureSegment *pSeg, /* Segment to check internal consistency */
- int iFirst,
- int iNoRowid,
- int iLast
-){
- int i;
-
- /* Now check that the iter.nEmpty leaves following the current leaf
- ** (a) exist and (b) contain no terms. */
- for(i=iFirst; p->rc==SQLITE_OK && i<=iLast; i++){
- Fts5Data *pLeaf = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->iSegid, i));
- if( pLeaf ){
- if( !fts5LeafIsTermless(pLeaf) ) p->rc = FTS5_CORRUPT;
- if( i>=iNoRowid && 0!=fts5LeafFirstRowidOff(pLeaf) ) p->rc = FTS5_CORRUPT;
- }
- fts5DataRelease(pLeaf);
- }
-}
-
-static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){
- int iTermOff = 0;
- int ii;
-
- Fts5Buffer buf1 = {0,0,0};
- Fts5Buffer buf2 = {0,0,0};
-
- ii = pLeaf->szLeaf;
- while( ii<pLeaf->nn && p->rc==SQLITE_OK ){
- int res;
- int iOff;
- int nIncr;
-
- ii += fts5GetVarint32(&pLeaf->p[ii], nIncr);
- iTermOff += nIncr;
- iOff = iTermOff;
-
- if( iOff>=pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- }else if( iTermOff==nIncr ){
- int nByte;
- iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte);
- if( (iOff+nByte)>pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- }else{
- fts5BufferSet(&p->rc, &buf1, nByte, &pLeaf->p[iOff]);
- }
- }else{
- int nKeep, nByte;
- iOff += fts5GetVarint32(&pLeaf->p[iOff], nKeep);
- iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte);
- if( nKeep>buf1.n || (iOff+nByte)>pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- }else{
- buf1.n = nKeep;
- fts5BufferAppendBlob(&p->rc, &buf1, nByte, &pLeaf->p[iOff]);
- }
-
- if( p->rc==SQLITE_OK ){
- res = fts5BufferCompare(&buf1, &buf2);
- if( res<=0 ) p->rc = FTS5_CORRUPT;
- }
- }
- fts5BufferSet(&p->rc, &buf2, buf1.n, buf1.p);
- }
-
- fts5BufferFree(&buf1);
- fts5BufferFree(&buf2);
-}
-
-static void fts5IndexIntegrityCheckSegment(
- Fts5Index *p, /* FTS5 backend object */
- Fts5StructureSegment *pSeg /* Segment to check internal consistency */
-){
- Fts5Config *pConfig = p->pConfig;
- sqlite3_stmt *pStmt = 0;
- int rc2;
- int iIdxPrevLeaf = pSeg->pgnoFirst-1;
- int iDlidxPrevLeaf = pSeg->pgnoLast;
-
- if( pSeg->pgnoFirst==0 ) return;
-
- fts5IndexPrepareStmt(p, &pStmt, sqlite3_mprintf(
- "SELECT segid, term, (pgno>>1), (pgno&1) FROM %Q.'%q_idx' WHERE segid=%d",
- pConfig->zDb, pConfig->zName, pSeg->iSegid
- ));
-
- /* Iterate through the b-tree hierarchy. */
- while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
- i64 iRow; /* Rowid for this leaf */
- Fts5Data *pLeaf; /* Data for this leaf */
-
- int nIdxTerm = sqlite3_column_bytes(pStmt, 1);
- const char *zIdxTerm = (const char*)sqlite3_column_text(pStmt, 1);
- int iIdxLeaf = sqlite3_column_int(pStmt, 2);
- int bIdxDlidx = sqlite3_column_int(pStmt, 3);
-
- /* If the leaf in question has already been trimmed from the segment,
- ** ignore this b-tree entry. Otherwise, load it into memory. */
- if( iIdxLeaf<pSeg->pgnoFirst ) continue;
- iRow = FTS5_SEGMENT_ROWID(pSeg->iSegid, iIdxLeaf);
- pLeaf = fts5DataRead(p, iRow);
- if( pLeaf==0 ) break;
-
- /* Check that the leaf contains at least one term, and that it is equal
- ** to or larger than the split-key in zIdxTerm. Also check that if there
- ** is also a rowid pointer within the leaf page header, it points to a
- ** location before the term. */
- if( pLeaf->nn<=pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- }else{
- int iOff; /* Offset of first term on leaf */
- int iRowidOff; /* Offset of first rowid on leaf */
- int nTerm; /* Size of term on leaf in bytes */
- int res; /* Comparison of term and split-key */
-
- iOff = fts5LeafFirstTermOff(pLeaf);
- iRowidOff = fts5LeafFirstRowidOff(pLeaf);
- if( iRowidOff>=iOff ){
- p->rc = FTS5_CORRUPT;
- }else{
- iOff += fts5GetVarint32(&pLeaf->p[iOff], nTerm);
- res = memcmp(&pLeaf->p[iOff], zIdxTerm, MIN(nTerm, nIdxTerm));
- if( res==0 ) res = nTerm - nIdxTerm;
- if( res<0 ) p->rc = FTS5_CORRUPT;
- }
-
- fts5IntegrityCheckPgidx(p, pLeaf);
- }
- fts5DataRelease(pLeaf);
- if( p->rc ) break;
-
-
- /* Now check that the iter.nEmpty leaves following the current leaf
- ** (a) exist and (b) contain no terms. */
- fts5IndexIntegrityCheckEmpty(
- p, pSeg, iIdxPrevLeaf+1, iDlidxPrevLeaf+1, iIdxLeaf-1
- );
- if( p->rc ) break;
-
- /* If there is a doclist-index, check that it looks right. */
- if( bIdxDlidx ){
- Fts5DlidxIter *pDlidx = 0; /* For iterating through doclist index */
- int iPrevLeaf = iIdxLeaf;
- int iSegid = pSeg->iSegid;
- int iPg = 0;
- i64 iKey;
-
- for(pDlidx=fts5DlidxIterInit(p, 0, iSegid, iIdxLeaf);
- fts5DlidxIterEof(p, pDlidx)==0;
- fts5DlidxIterNext(p, pDlidx)
- ){
-
- /* Check any rowid-less pages that occur before the current leaf. */
- for(iPg=iPrevLeaf+1; iPg<fts5DlidxIterPgno(pDlidx); iPg++){
- iKey = FTS5_SEGMENT_ROWID(iSegid, iPg);
- pLeaf = fts5DataRead(p, iKey);
- if( pLeaf ){
- if( fts5LeafFirstRowidOff(pLeaf)!=0 ) p->rc = FTS5_CORRUPT;
- fts5DataRelease(pLeaf);
- }
- }
- iPrevLeaf = fts5DlidxIterPgno(pDlidx);
-
- /* Check that the leaf page indicated by the iterator really does
- ** contain the rowid suggested by the same. */
- iKey = FTS5_SEGMENT_ROWID(iSegid, iPrevLeaf);
- pLeaf = fts5DataRead(p, iKey);
- if( pLeaf ){
- i64 iRowid;
- int iRowidOff = fts5LeafFirstRowidOff(pLeaf);
- ASSERT_SZLEAF_OK(pLeaf);
- if( iRowidOff>=pLeaf->szLeaf ){
- p->rc = FTS5_CORRUPT;
- }else{
- fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid);
- if( iRowid!=fts5DlidxIterRowid(pDlidx) ) p->rc = FTS5_CORRUPT;
- }
- fts5DataRelease(pLeaf);
- }
- }
-
- iDlidxPrevLeaf = iPg;
- fts5DlidxIterFree(pDlidx);
- fts5TestDlidxReverse(p, iSegid, iIdxLeaf);
- }else{
- iDlidxPrevLeaf = pSeg->pgnoLast;
- /* TODO: Check there is no doclist index */
- }
-
- iIdxPrevLeaf = iIdxLeaf;
- }
-
- rc2 = sqlite3_finalize(pStmt);
- if( p->rc==SQLITE_OK ) p->rc = rc2;
-
- /* Page iter.iLeaf must now be the rightmost leaf-page in the segment */
-#if 0
- if( p->rc==SQLITE_OK && iter.iLeaf!=pSeg->pgnoLast ){
- p->rc = FTS5_CORRUPT;
- }
-#endif
-}
-
-
-/*
-** Run internal checks to ensure that the FTS index (a) is internally
-** consistent and (b) contains entries for which the XOR of the checksums
-** as calculated by fts5IndexEntryCksum() is cksum.
-**
-** Return SQLITE_CORRUPT if any of the internal checks fail, or if the
-** checksum does not match. Return SQLITE_OK if all checks pass without
-** error, or some other SQLite error code if another error (e.g. OOM)
-** occurs.
-*/
-static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum){
- u64 cksum2 = 0; /* Checksum based on contents of indexes */
- Fts5Buffer poslist = {0,0,0}; /* Buffer used to hold a poslist */
- Fts5IndexIter *pIter; /* Used to iterate through entire index */
- Fts5Structure *pStruct; /* Index structure */
-
- /* Used by extra internal tests only run if NDEBUG is not defined */
- u64 cksum3 = 0; /* Checksum based on contents of indexes */
- Fts5Buffer term = {0,0,0}; /* Buffer used to hold most recent term */
-
- /* Load the FTS index structure */
- pStruct = fts5StructureRead(p);
-
- /* Check that the internal nodes of each segment match the leaves */
- if( pStruct ){
- int iLvl, iSeg;
- for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
- for(iSeg=0; iSeg<pStruct->aLevel[iLvl].nSeg; iSeg++){
- Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
- fts5IndexIntegrityCheckSegment(p, pSeg);
- }
- }
- }
-
- /* The cksum argument passed to this function is a checksum calculated
- ** based on all expected entries in the FTS index (including prefix index
- ** entries). This block checks that a checksum calculated based on the
- ** actual contents of FTS index is identical.
- **
- ** Two versions of the same checksum are calculated. The first (stack
- ** variable cksum2) based on entries extracted from the full-text index
- ** while doing a linear scan of each individual index in turn.
- **
- ** As each term visited by the linear scans, a separate query for the
- ** same term is performed. cksum3 is calculated based on the entries
- ** extracted by these queries.
- */
- for(fts5MultiIterNew(p, pStruct, 0, 0, 0, 0, -1, 0, &pIter);
- fts5MultiIterEof(p, pIter)==0;
- fts5MultiIterNext(p, pIter, 0, 0)
- ){
- int n; /* Size of term in bytes */
- i64 iPos = 0; /* Position read from poslist */
- int iOff = 0; /* Offset within poslist */
- i64 iRowid = fts5MultiIterRowid(pIter);
- char *z = (char*)fts5MultiIterTerm(pIter, &n);
-
- /* If this is a new term, query for it. Update cksum3 with the results. */
- fts5TestTerm(p, &term, z, n, cksum2, &cksum3);
-
- poslist.n = 0;
- fts5SegiterPoslist(p, &pIter->aSeg[pIter->aFirst[1].iFirst] , 0, &poslist);
- while( 0==sqlite3Fts5PoslistNext64(poslist.p, poslist.n, &iOff, &iPos) ){
- int iCol = FTS5_POS2COLUMN(iPos);
- int iTokOff = FTS5_POS2OFFSET(iPos);
- cksum2 ^= fts5IndexEntryCksum(iRowid, iCol, iTokOff, -1, z, n);
- }
- }
- fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3);
-
- fts5MultiIterFree(p, pIter);
- if( p->rc==SQLITE_OK && cksum!=cksum2 ) p->rc = FTS5_CORRUPT;
-
- fts5StructureRelease(pStruct);
- fts5BufferFree(&term);
- fts5BufferFree(&poslist);
- return fts5IndexReturn(p);
-}
-
-
-/*
-** Calculate and return a checksum that is the XOR of the index entry
-** checksum of all entries that would be generated by the token specified
-** by the final 5 arguments.
-*/
-static u64 sqlite3Fts5IndexCksum(
- Fts5Config *pConfig, /* Configuration object */
- i64 iRowid, /* Document term appears in */
- int iCol, /* Column term appears in */
- int iPos, /* Position term appears in */
- const char *pTerm, int nTerm /* Term at iPos */
-){
- u64 ret = 0; /* Return value */
- int iIdx; /* For iterating through indexes */
-
- ret = fts5IndexEntryCksum(iRowid, iCol, iPos, 0, pTerm, nTerm);
-
- for(iIdx=0; iIdx<pConfig->nPrefix; iIdx++){
- int nByte = fts5IndexCharlenToBytelen(pTerm, nTerm, pConfig->aPrefix[iIdx]);
- if( nByte ){
- ret ^= fts5IndexEntryCksum(iRowid, iCol, iPos, iIdx+1, pTerm, nByte);
- }
- }
-
- return ret;
-}
-
-/*************************************************************************
-**************************************************************************
-** Below this point is the implementation of the fts5_decode() scalar
-** function only.
-*/
-
-/*
-** Decode a segment-data rowid from the %_data table. This function is
-** the opposite of macro FTS5_SEGMENT_ROWID().
-*/
-static void fts5DecodeRowid(
- i64 iRowid, /* Rowid from %_data table */
- int *piSegid, /* OUT: Segment id */
- int *pbDlidx, /* OUT: Dlidx flag */
- int *piHeight, /* OUT: Height */
- int *piPgno /* OUT: Page number */
-){
- *piPgno = (int)(iRowid & (((i64)1 << FTS5_DATA_PAGE_B) - 1));
- iRowid >>= FTS5_DATA_PAGE_B;
-
- *piHeight = (int)(iRowid & (((i64)1 << FTS5_DATA_HEIGHT_B) - 1));
- iRowid >>= FTS5_DATA_HEIGHT_B;
-
- *pbDlidx = (int)(iRowid & 0x0001);
- iRowid >>= FTS5_DATA_DLI_B;
-
- *piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1));
-}
-
-static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){
- int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */
- fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno);
-
- if( iSegid==0 ){
- if( iKey==FTS5_AVERAGES_ROWID ){
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{averages} ");
- }else{
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{structure}");
- }
- }
- else{
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}",
- bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno
- );
- }
-}
-
-static void fts5DebugStructure(
- int *pRc, /* IN/OUT: error code */
- Fts5Buffer *pBuf,
- Fts5Structure *p
-){
- int iLvl, iSeg; /* Iterate through levels, segments */
-
- for(iLvl=0; iLvl<p->nLevel; iLvl++){
- Fts5StructureLevel *pLvl = &p->aLevel[iLvl];
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf,
- " {lvl=%d nMerge=%d nSeg=%d", iLvl, pLvl->nMerge, pLvl->nSeg
- );
- for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
- Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}",
- pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast
- );
- }
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}");
- }
-}
-
-/*
-** This is part of the fts5_decode() debugging aid.
-**
-** Arguments pBlob/nBlob contain a serialized Fts5Structure object. This
-** function appends a human-readable representation of the same object
-** to the buffer passed as the second argument.
-*/
-static void fts5DecodeStructure(
- int *pRc, /* IN/OUT: error code */
- Fts5Buffer *pBuf,
- const u8 *pBlob, int nBlob
-){
- int rc; /* Return code */
- Fts5Structure *p = 0; /* Decoded structure object */
-
- rc = fts5StructureDecode(pBlob, nBlob, 0, &p);
- if( rc!=SQLITE_OK ){
- *pRc = rc;
- return;
- }
-
- fts5DebugStructure(pRc, pBuf, p);
- fts5StructureRelease(p);
-}
-
-/*
-** This is part of the fts5_decode() debugging aid.
-**
-** Arguments pBlob/nBlob contain an "averages" record. This function
-** appends a human-readable representation of record to the buffer passed
-** as the second argument.
-*/
-static void fts5DecodeAverages(
- int *pRc, /* IN/OUT: error code */
- Fts5Buffer *pBuf,
- const u8 *pBlob, int nBlob
-){
- int i = 0;
- const char *zSpace = "";
-
- while( i<nBlob ){
- u64 iVal;
- i += sqlite3Fts5GetVarint(&pBlob[i], &iVal);
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "%s%d", zSpace, (int)iVal);
- zSpace = " ";
- }
-}
-
-/*
-** Buffer (a/n) is assumed to contain a list of serialized varints. Read
-** each varint and append its string representation to buffer pBuf. Return
-** after either the input buffer is exhausted or a 0 value is read.
-**
-** The return value is the number of bytes read from the input buffer.
-*/
-static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
- int iOff = 0;
- while( iOff<n ){
- int iVal;
- iOff += fts5GetVarint32(&a[iOff], iVal);
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %d", iVal);
- }
- return iOff;
-}
-
-/*
-** The start of buffer (a/n) contains the start of a doclist. The doclist
-** may or may not finish within the buffer. This function appends a text
-** representation of the part of the doclist that is present to buffer
-** pBuf.
-**
-** The return value is the number of bytes read from the input buffer.
-*/
-static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
- i64 iDocid = 0;
- int iOff = 0;
-
- if( n>0 ){
- iOff = sqlite3Fts5GetVarint(a, (u64*)&iDocid);
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " id=%lld", iDocid);
- }
- while( iOff<n ){
- int nPos;
- int bDummy;
- iOff += fts5GetPoslistSize(&a[iOff], &nPos, &bDummy);
- iOff += fts5DecodePoslist(pRc, pBuf, &a[iOff], MIN(n-iOff, nPos));
- if( iOff<n ){
- i64 iDelta;
- iOff += sqlite3Fts5GetVarint(&a[iOff], (u64*)&iDelta);
- iDocid += iDelta;
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " id=%lld", iDocid);
- }
- }
-
- return iOff;
-}
-
-/*
-** The implementation of user-defined scalar function fts5_decode().
-*/
-static void fts5DecodeFunction(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args (always 2) */
- sqlite3_value **apVal /* Function arguments */
-){
- i64 iRowid; /* Rowid for record being decoded */
- int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */
- const u8 *aBlob; int n; /* Record to decode */
- u8 *a = 0;
- Fts5Buffer s; /* Build up text to return here */
- int rc = SQLITE_OK; /* Return code */
- int nSpace = 0;
-
- assert( nArg==2 );
- memset(&s, 0, sizeof(Fts5Buffer));
- iRowid = sqlite3_value_int64(apVal[0]);
-
- /* Make a copy of the second argument (a blob) in aBlob[]. The aBlob[]
- ** copy is followed by FTS5_DATA_ZERO_PADDING 0x00 bytes, which prevents
- ** buffer overreads even if the record is corrupt. */
- n = sqlite3_value_bytes(apVal[1]);
- aBlob = sqlite3_value_blob(apVal[1]);
- nSpace = n + FTS5_DATA_ZERO_PADDING;
- a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace);
- if( a==0 ) goto decode_out;
- memcpy(a, aBlob, n);
-
-
- fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno);
-
- fts5DebugRowid(&rc, &s, iRowid);
- if( bDlidx ){
- Fts5Data dlidx;
- Fts5DlidxLvl lvl;
-
- dlidx.p = a;
- dlidx.nn = n;
-
- memset(&lvl, 0, sizeof(Fts5DlidxLvl));
- lvl.pData = &dlidx;
- lvl.iLeafPgno = iPgno;
-
- for(fts5DlidxLvlNext(&lvl); lvl.bEof==0; fts5DlidxLvlNext(&lvl)){
- sqlite3Fts5BufferAppendPrintf(&rc, &s,
- " %d(%lld)", lvl.iLeafPgno, lvl.iRowid
- );
- }
- }else if( iSegid==0 ){
- if( iRowid==FTS5_AVERAGES_ROWID ){
- fts5DecodeAverages(&rc, &s, a, n);
- }else{
- fts5DecodeStructure(&rc, &s, a, n);
- }
- }else{
- Fts5Buffer term; /* Current term read from page */
- int szLeaf; /* Offset of pgidx in a[] */
- int iPgidxOff;
- int iPgidxPrev = 0; /* Previous value read from pgidx */
- int iTermOff = 0;
- int iRowidOff = 0;
- int iOff;
- int nDoclist;
-
- memset(&term, 0, sizeof(Fts5Buffer));
-
- if( n<4 ){
- sqlite3Fts5BufferSet(&rc, &s, 7, (const u8*)"corrupt");
- goto decode_out;
- }else{
- iRowidOff = fts5GetU16(&a[0]);
- iPgidxOff = szLeaf = fts5GetU16(&a[2]);
- if( iPgidxOff<n ){
- fts5GetVarint32(&a[iPgidxOff], iTermOff);
- }
- }
-
- /* Decode the position list tail at the start of the page */
- if( iRowidOff!=0 ){
- iOff = iRowidOff;
- }else if( iTermOff!=0 ){
- iOff = iTermOff;
- }else{
- iOff = szLeaf;
- }
- fts5DecodePoslist(&rc, &s, &a[4], iOff-4);
-
- /* Decode any more doclist data that appears on the page before the
- ** first term. */
- nDoclist = (iTermOff ? iTermOff : szLeaf) - iOff;
- fts5DecodeDoclist(&rc, &s, &a[iOff], nDoclist);
-
- while( iPgidxOff<n ){
- int bFirst = (iPgidxOff==szLeaf); /* True for first term on page */
- int nByte; /* Bytes of data */
- int iEnd;
-
- iPgidxOff += fts5GetVarint32(&a[iPgidxOff], nByte);
- iPgidxPrev += nByte;
- iOff = iPgidxPrev;
-
- if( iPgidxOff<n ){
- fts5GetVarint32(&a[iPgidxOff], nByte);
- iEnd = iPgidxPrev + nByte;
- }else{
- iEnd = szLeaf;
- }
-
- if( bFirst==0 ){
- iOff += fts5GetVarint32(&a[iOff], nByte);
- term.n = nByte;
- }
- iOff += fts5GetVarint32(&a[iOff], nByte);
- fts5BufferAppendBlob(&rc, &term, nByte, &a[iOff]);
- iOff += nByte;
-
- sqlite3Fts5BufferAppendPrintf(
- &rc, &s, " term=%.*s", term.n, (const char*)term.p
- );
- iOff += fts5DecodeDoclist(&rc, &s, &a[iOff], iEnd-iOff);
- }
-
- fts5BufferFree(&term);
- }
-
- decode_out:
- sqlite3_free(a);
- if( rc==SQLITE_OK ){
- sqlite3_result_text(pCtx, (const char*)s.p, s.n, SQLITE_TRANSIENT);
- }else{
- sqlite3_result_error_code(pCtx, rc);
- }
- fts5BufferFree(&s);
-}
-
-/*
-** The implementation of user-defined scalar function fts5_rowid().
-*/
-static void fts5RowidFunction(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args (always 2) */
- sqlite3_value **apVal /* Function arguments */
-){
- const char *zArg;
- if( nArg==0 ){
- sqlite3_result_error(pCtx, "should be: fts5_rowid(subject, ....)", -1);
- }else{
- zArg = (const char*)sqlite3_value_text(apVal[0]);
- if( 0==sqlite3_stricmp(zArg, "segment") ){
- i64 iRowid;
- int segid, pgno;
- if( nArg!=3 ){
- sqlite3_result_error(pCtx,
- "should be: fts5_rowid('segment', segid, pgno))", -1
- );
- }else{
- segid = sqlite3_value_int(apVal[1]);
- pgno = sqlite3_value_int(apVal[2]);
- iRowid = FTS5_SEGMENT_ROWID(segid, pgno);
- sqlite3_result_int64(pCtx, iRowid);
- }
- }else{
- sqlite3_result_error(pCtx,
- "first arg to fts5_rowid() must be 'segment'" , -1
- );
- }
- }
-}
-
-/*
-** This is called as part of registering the FTS5 module with database
-** connection db. It registers several user-defined scalar functions useful
-** with FTS5.
-**
-** If successful, SQLITE_OK is returned. If an error occurs, some other
-** SQLite error code is returned instead.
-*/
-static int sqlite3Fts5IndexInit(sqlite3 *db){
- int rc = sqlite3_create_function(
- db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0
- );
- if( rc==SQLITE_OK ){
- rc = sqlite3_create_function(
- db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0
- );
- }
- return rc;
-}
-
-
-/*
-** 2014 Jun 09
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This is an SQLite module implementing full-text search.
-*/
-
-
-
-/*
-** This variable is set to false when running tests for which the on disk
-** structures should not be corrupt. Otherwise, true. If it is false, extra
-** assert() conditions in the fts5 code are activated - conditions that are
-** only true if it is guaranteed that the fts5 database is not corrupt.
-*/
-SQLITE_API int sqlite3_fts5_may_be_corrupt = 1;
-
-
-typedef struct Fts5Auxdata Fts5Auxdata;
-typedef struct Fts5Auxiliary Fts5Auxiliary;
-typedef struct Fts5Cursor Fts5Cursor;
-typedef struct Fts5Sorter Fts5Sorter;
-typedef struct Fts5Table Fts5Table;
-typedef struct Fts5TokenizerModule Fts5TokenizerModule;
-
-/*
-** NOTES ON TRANSACTIONS:
-**
-** SQLite invokes the following virtual table methods as transactions are
-** opened and closed by the user:
-**
-** xBegin(): Start of a new transaction.
-** xSync(): Initial part of two-phase commit.
-** xCommit(): Final part of two-phase commit.
-** xRollback(): Rollback the transaction.
-**
-** Anything that is required as part of a commit that may fail is performed
-** in the xSync() callback. Current versions of SQLite ignore any errors
-** returned by xCommit().
-**
-** And as sub-transactions are opened/closed:
-**
-** xSavepoint(int S): Open savepoint S.
-** xRelease(int S): Commit and close savepoint S.
-** xRollbackTo(int S): Rollback to start of savepoint S.
-**
-** During a write-transaction the fts5_index.c module may cache some data
-** in-memory. It is flushed to disk whenever xSync(), xRelease() or
-** xSavepoint() is called. And discarded whenever xRollback() or xRollbackTo()
-** is called.
-**
-** Additionally, if SQLITE_DEBUG is defined, an instance of the following
-** structure is used to record the current transaction state. This information
-** is not required, but it is used in the assert() statements executed by
-** function fts5CheckTransactionState() (see below).
-*/
-struct Fts5TransactionState {
- int eState; /* 0==closed, 1==open, 2==synced */
- int iSavepoint; /* Number of open savepoints (0 -> none) */
-};
-
-/*
-** A single object of this type is allocated when the FTS5 module is
-** registered with a database handle. It is used to store pointers to
-** all registered FTS5 extensions - tokenizers and auxiliary functions.
-*/
-struct Fts5Global {
- fts5_api api; /* User visible part of object (see fts5.h) */
- sqlite3 *db; /* Associated database connection */
- i64 iNextId; /* Used to allocate unique cursor ids */
- Fts5Auxiliary *pAux; /* First in list of all aux. functions */
- Fts5TokenizerModule *pTok; /* First in list of all tokenizer modules */
- Fts5TokenizerModule *pDfltTok; /* Default tokenizer module */
- Fts5Cursor *pCsr; /* First in list of all open cursors */
-};
-
-/*
-** Each auxiliary function registered with the FTS5 module is represented
-** by an object of the following type. All such objects are stored as part
-** of the Fts5Global.pAux list.
-*/
-struct Fts5Auxiliary {
- Fts5Global *pGlobal; /* Global context for this function */
- char *zFunc; /* Function name (nul-terminated) */
- void *pUserData; /* User-data pointer */
- fts5_extension_function xFunc; /* Callback function */
- void (*xDestroy)(void*); /* Destructor function */
- Fts5Auxiliary *pNext; /* Next registered auxiliary function */
-};
-
-/*
-** Each tokenizer module registered with the FTS5 module is represented
-** by an object of the following type. All such objects are stored as part
-** of the Fts5Global.pTok list.
-*/
-struct Fts5TokenizerModule {
- char *zName; /* Name of tokenizer */
- void *pUserData; /* User pointer passed to xCreate() */
- fts5_tokenizer x; /* Tokenizer functions */
- void (*xDestroy)(void*); /* Destructor function */
- Fts5TokenizerModule *pNext; /* Next registered tokenizer module */
-};
-
-/*
-** Virtual-table object.
-*/
-struct Fts5Table {
- sqlite3_vtab base; /* Base class used by SQLite core */
- Fts5Config *pConfig; /* Virtual table configuration */
- Fts5Index *pIndex; /* Full-text index */
- Fts5Storage *pStorage; /* Document store */
- Fts5Global *pGlobal; /* Global (connection wide) data */
- Fts5Cursor *pSortCsr; /* Sort data from this cursor */
-#ifdef SQLITE_DEBUG
- struct Fts5TransactionState ts;
-#endif
-};
-
-struct Fts5MatchPhrase {
- Fts5Buffer *pPoslist; /* Pointer to current poslist */
- int nTerm; /* Size of phrase in terms */
-};
-
-/*
-** pStmt:
-** SELECT rowid, <fts> FROM <fts> ORDER BY +rank;
-**
-** aIdx[]:
-** There is one entry in the aIdx[] array for each phrase in the query,
-** the value of which is the offset within aPoslist[] following the last
-** byte of the position list for the corresponding phrase.
-*/
-struct Fts5Sorter {
- sqlite3_stmt *pStmt;
- i64 iRowid; /* Current rowid */
- const u8 *aPoslist; /* Position lists for current row */
- int nIdx; /* Number of entries in aIdx[] */
- int aIdx[1]; /* Offsets into aPoslist for current row */
-};
-
-
-/*
-** Virtual-table cursor object.
-**
-** iSpecial:
-** If this is a 'special' query (refer to function fts5SpecialMatch()),
-** then this variable contains the result of the query.
-**
-** iFirstRowid, iLastRowid:
-** These variables are only used for FTS5_PLAN_MATCH cursors. Assuming the
-** cursor iterates in ascending order of rowids, iFirstRowid is the lower
-** limit of rowids to return, and iLastRowid the upper. In other words, the
-** WHERE clause in the user's query might have been:
-**
-** <tbl> MATCH <expr> AND rowid BETWEEN $iFirstRowid AND $iLastRowid
-**
-** If the cursor iterates in descending order of rowid, iFirstRowid
-** is the upper limit (i.e. the "first" rowid visited) and iLastRowid
-** the lower.
-*/
-struct Fts5Cursor {
- sqlite3_vtab_cursor base; /* Base class used by SQLite core */
- Fts5Cursor *pNext; /* Next cursor in Fts5Cursor.pCsr list */
- int *aColumnSize; /* Values for xColumnSize() */
- i64 iCsrId; /* Cursor id */
-
- /* Zero from this point onwards on cursor reset */
- int ePlan; /* FTS5_PLAN_XXX value */
- int bDesc; /* True for "ORDER BY rowid DESC" queries */
- i64 iFirstRowid; /* Return no rowids earlier than this */
- i64 iLastRowid; /* Return no rowids later than this */
- sqlite3_stmt *pStmt; /* Statement used to read %_content */
- Fts5Expr *pExpr; /* Expression for MATCH queries */
- Fts5Sorter *pSorter; /* Sorter for "ORDER BY rank" queries */
- int csrflags; /* Mask of cursor flags (see below) */
- i64 iSpecial; /* Result of special query */
-
- /* "rank" function. Populated on demand from vtab.xColumn(). */
- char *zRank; /* Custom rank function */
- char *zRankArgs; /* Custom rank function args */
- Fts5Auxiliary *pRank; /* Rank callback (or NULL) */
- int nRankArg; /* Number of trailing arguments for rank() */
- sqlite3_value **apRankArg; /* Array of trailing arguments */
- sqlite3_stmt *pRankArgStmt; /* Origin of objects in apRankArg[] */
-
- /* Auxiliary data storage */
- Fts5Auxiliary *pAux; /* Currently executing extension function */
- Fts5Auxdata *pAuxdata; /* First in linked list of saved aux-data */
-
- /* Cache used by auxiliary functions xInst() and xInstCount() */
- Fts5PoslistReader *aInstIter; /* One for each phrase */
- int nInstAlloc; /* Size of aInst[] array (entries / 3) */
- int nInstCount; /* Number of phrase instances */
- int *aInst; /* 3 integers per phrase instance */
-};
-
-/*
-** Bits that make up the "idxNum" parameter passed indirectly by
-** xBestIndex() to xFilter().
-*/
-#define FTS5_BI_MATCH 0x0001 /* <tbl> MATCH ? */
-#define FTS5_BI_RANK 0x0002 /* rank MATCH ? */
-#define FTS5_BI_ROWID_EQ 0x0004 /* rowid == ? */
-#define FTS5_BI_ROWID_LE 0x0008 /* rowid <= ? */
-#define FTS5_BI_ROWID_GE 0x0010 /* rowid >= ? */
-
-#define FTS5_BI_ORDER_RANK 0x0020
-#define FTS5_BI_ORDER_ROWID 0x0040
-#define FTS5_BI_ORDER_DESC 0x0080
-
-/*
-** Values for Fts5Cursor.csrflags
-*/
-#define FTS5CSR_REQUIRE_CONTENT 0x01
-#define FTS5CSR_REQUIRE_DOCSIZE 0x02
-#define FTS5CSR_REQUIRE_INST 0x04
-#define FTS5CSR_EOF 0x08
-#define FTS5CSR_FREE_ZRANK 0x10
-#define FTS5CSR_REQUIRE_RESEEK 0x20
-
-#define BitFlagAllTest(x,y) (((x) & (y))==(y))
-#define BitFlagTest(x,y) (((x) & (y))!=0)
-
-
-/*
-** Macros to Set(), Clear() and Test() cursor flags.
-*/
-#define CsrFlagSet(pCsr, flag) ((pCsr)->csrflags |= (flag))
-#define CsrFlagClear(pCsr, flag) ((pCsr)->csrflags &= ~(flag))
-#define CsrFlagTest(pCsr, flag) ((pCsr)->csrflags & (flag))
-
-struct Fts5Auxdata {
- Fts5Auxiliary *pAux; /* Extension to which this belongs */
- void *pPtr; /* Pointer value */
- void(*xDelete)(void*); /* Destructor */
- Fts5Auxdata *pNext; /* Next object in linked list */
-};
-
-#ifdef SQLITE_DEBUG
-#define FTS5_BEGIN 1
-#define FTS5_SYNC 2
-#define FTS5_COMMIT 3
-#define FTS5_ROLLBACK 4
-#define FTS5_SAVEPOINT 5
-#define FTS5_RELEASE 6
-#define FTS5_ROLLBACKTO 7
-static void fts5CheckTransactionState(Fts5Table *p, int op, int iSavepoint){
- switch( op ){
- case FTS5_BEGIN:
- assert( p->ts.eState==0 );
- p->ts.eState = 1;
- p->ts.iSavepoint = -1;
- break;
-
- case FTS5_SYNC:
- assert( p->ts.eState==1 );
- p->ts.eState = 2;
- break;
-
- case FTS5_COMMIT:
- assert( p->ts.eState==2 );
- p->ts.eState = 0;
- break;
-
- case FTS5_ROLLBACK:
- assert( p->ts.eState==1 || p->ts.eState==2 || p->ts.eState==0 );
- p->ts.eState = 0;
- break;
-
- case FTS5_SAVEPOINT:
- assert( p->ts.eState==1 );
- assert( iSavepoint>=0 );
- assert( iSavepoint>p->ts.iSavepoint );
- p->ts.iSavepoint = iSavepoint;
- break;
-
- case FTS5_RELEASE:
- assert( p->ts.eState==1 );
- assert( iSavepoint>=0 );
- assert( iSavepoint<=p->ts.iSavepoint );
- p->ts.iSavepoint = iSavepoint-1;
- break;
-
- case FTS5_ROLLBACKTO:
- assert( p->ts.eState==1 );
- assert( iSavepoint>=0 );
- assert( iSavepoint<=p->ts.iSavepoint );
- p->ts.iSavepoint = iSavepoint;
- break;
- }
-}
-#else
-# define fts5CheckTransactionState(x,y,z)
-#endif
-
-/*
-** Return true if pTab is a contentless table.
-*/
-static int fts5IsContentless(Fts5Table *pTab){
- return pTab->pConfig->eContent==FTS5_CONTENT_NONE;
-}
-
-/*
-** Delete a virtual table handle allocated by fts5InitVtab().
-*/
-static void fts5FreeVtab(Fts5Table *pTab){
- if( pTab ){
- sqlite3Fts5IndexClose(pTab->pIndex);
- sqlite3Fts5StorageClose(pTab->pStorage);
- sqlite3Fts5ConfigFree(pTab->pConfig);
- sqlite3_free(pTab);
- }
-}
-
-/*
-** The xDisconnect() virtual table method.
-*/
-static int fts5DisconnectMethod(sqlite3_vtab *pVtab){
- fts5FreeVtab((Fts5Table*)pVtab);
- return SQLITE_OK;
-}
-
-/*
-** The xDestroy() virtual table method.
-*/
-static int fts5DestroyMethod(sqlite3_vtab *pVtab){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- int rc = sqlite3Fts5DropAll(pTab->pConfig);
- if( rc==SQLITE_OK ){
- fts5FreeVtab((Fts5Table*)pVtab);
- }
- return rc;
-}
-
-/*
-** This function is the implementation of both the xConnect and xCreate
-** methods of the FTS3 virtual table.
-**
-** The argv[] array contains the following:
-**
-** argv[0] -> module name ("fts5")
-** argv[1] -> database name
-** argv[2] -> table name
-** argv[...] -> "column name" and other module argument fields.
-*/
-static int fts5InitVtab(
- int bCreate, /* True for xCreate, false for xConnect */
- sqlite3 *db, /* The SQLite database connection */
- void *pAux, /* Hash table containing tokenizers */
- int argc, /* Number of elements in argv array */
- const char * const *argv, /* xCreate/xConnect argument array */
- sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */
- char **pzErr /* Write any error message here */
-){
- Fts5Global *pGlobal = (Fts5Global*)pAux;
- const char **azConfig = (const char**)argv;
- int rc = SQLITE_OK; /* Return code */
- Fts5Config *pConfig = 0; /* Results of parsing argc/argv */
- Fts5Table *pTab = 0; /* New virtual table object */
-
- /* Allocate the new vtab object and parse the configuration */
- pTab = (Fts5Table*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Table));
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5ConfigParse(pGlobal, db, argc, azConfig, &pConfig, pzErr);
- assert( (rc==SQLITE_OK && *pzErr==0) || pConfig==0 );
- }
- if( rc==SQLITE_OK ){
- pTab->pConfig = pConfig;
- pTab->pGlobal = pGlobal;
- }
-
- /* Open the index sub-system */
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5IndexOpen(pConfig, bCreate, &pTab->pIndex, pzErr);
- }
-
- /* Open the storage sub-system */
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageOpen(
- pConfig, pTab->pIndex, bCreate, &pTab->pStorage, pzErr
- );
- }
-
- /* Call sqlite3_declare_vtab() */
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5ConfigDeclareVtab(pConfig);
- }
-
- if( rc!=SQLITE_OK ){
- fts5FreeVtab(pTab);
- pTab = 0;
- }else if( bCreate ){
- fts5CheckTransactionState(pTab, FTS5_BEGIN, 0);
- }
- *ppVTab = (sqlite3_vtab*)pTab;
- return rc;
-}
-
-/*
-** The xConnect() and xCreate() methods for the virtual table. All the
-** work is done in function fts5InitVtab().
-*/
-static int fts5ConnectMethod(
- sqlite3 *db, /* Database connection */
- void *pAux, /* Pointer to tokenizer hash table */
- int argc, /* Number of elements in argv array */
- const char * const *argv, /* xCreate/xConnect argument array */
- sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
- char **pzErr /* OUT: sqlite3_malloc'd error message */
-){
- return fts5InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr);
-}
-static int fts5CreateMethod(
- sqlite3 *db, /* Database connection */
- void *pAux, /* Pointer to tokenizer hash table */
- int argc, /* Number of elements in argv array */
- const char * const *argv, /* xCreate/xConnect argument array */
- sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
- char **pzErr /* OUT: sqlite3_malloc'd error message */
-){
- return fts5InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr);
-}
-
-/*
-** The different query plans.
-*/
-#define FTS5_PLAN_MATCH 1 /* (<tbl> MATCH ?) */
-#define FTS5_PLAN_SOURCE 2 /* A source cursor for SORTED_MATCH */
-#define FTS5_PLAN_SPECIAL 3 /* An internal query */
-#define FTS5_PLAN_SORTED_MATCH 4 /* (<tbl> MATCH ? ORDER BY rank) */
-#define FTS5_PLAN_SCAN 5 /* No usable constraint */
-#define FTS5_PLAN_ROWID 6 /* (rowid = ?) */
-
-/*
-** Set the SQLITE_INDEX_SCAN_UNIQUE flag in pIdxInfo->flags. Unless this
-** extension is currently being used by a version of SQLite too old to
-** support index-info flags. In that case this function is a no-op.
-*/
-static void fts5SetUniqueFlag(sqlite3_index_info *pIdxInfo){
-#if SQLITE_VERSION_NUMBER>=3008012
- if( sqlite3_libversion_number()>=3008012 ){
- pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
- }
-#endif
-}
-
-/*
-** Implementation of the xBestIndex method for FTS5 tables. Within the
-** WHERE constraint, it searches for the following:
-**
-** 1. A MATCH constraint against the special column.
-** 2. A MATCH constraint against the "rank" column.
-** 3. An == constraint against the rowid column.
-** 4. A < or <= constraint against the rowid column.
-** 5. A > or >= constraint against the rowid column.
-**
-** Within the ORDER BY, either:
-**
-** 5. ORDER BY rank [ASC|DESC]
-** 6. ORDER BY rowid [ASC|DESC]
-**
-** Costs are assigned as follows:
-**
-** a) If an unusable MATCH operator is present in the WHERE clause, the
-** cost is unconditionally set to 1e50 (a really big number).
-**
-** a) If a MATCH operator is present, the cost depends on the other
-** constraints also present. As follows:
-**
-** * No other constraints: cost=1000.0
-** * One rowid range constraint: cost=750.0
-** * Both rowid range constraints: cost=500.0
-** * An == rowid constraint: cost=100.0
-**
-** b) Otherwise, if there is no MATCH:
-**
-** * No other constraints: cost=1000000.0
-** * One rowid range constraint: cost=750000.0
-** * Both rowid range constraints: cost=250000.0
-** * An == rowid constraint: cost=10.0
-**
-** Costs are not modified by the ORDER BY clause.
-*/
-static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
- Fts5Table *pTab = (Fts5Table*)pVTab;
- Fts5Config *pConfig = pTab->pConfig;
- int idxFlags = 0; /* Parameter passed through to xFilter() */
- int bHasMatch;
- int iNext;
- int i;
-
- struct Constraint {
- int op; /* Mask against sqlite3_index_constraint.op */
- int fts5op; /* FTS5 mask for idxFlags */
- int iCol; /* 0==rowid, 1==tbl, 2==rank */
- int omit; /* True to omit this if found */
- int iConsIndex; /* Index in pInfo->aConstraint[] */
- } aConstraint[] = {
- {SQLITE_INDEX_CONSTRAINT_MATCH|SQLITE_INDEX_CONSTRAINT_EQ,
- FTS5_BI_MATCH, 1, 1, -1},
- {SQLITE_INDEX_CONSTRAINT_MATCH|SQLITE_INDEX_CONSTRAINT_EQ,
- FTS5_BI_RANK, 2, 1, -1},
- {SQLITE_INDEX_CONSTRAINT_EQ, FTS5_BI_ROWID_EQ, 0, 0, -1},
- {SQLITE_INDEX_CONSTRAINT_LT|SQLITE_INDEX_CONSTRAINT_LE,
- FTS5_BI_ROWID_LE, 0, 0, -1},
- {SQLITE_INDEX_CONSTRAINT_GT|SQLITE_INDEX_CONSTRAINT_GE,
- FTS5_BI_ROWID_GE, 0, 0, -1},
- };
-
- int aColMap[3];
- aColMap[0] = -1;
- aColMap[1] = pConfig->nCol;
- aColMap[2] = pConfig->nCol+1;
-
- /* Set idxFlags flags for all WHERE clause terms that will be used. */
- for(i=0; i<pInfo->nConstraint; i++){
- struct sqlite3_index_constraint *p = &pInfo->aConstraint[i];
- int j;
- for(j=0; j<sizeof(aConstraint)/sizeof(aConstraint[0]); j++){
- struct Constraint *pC = &aConstraint[j];
- if( p->iColumn==aColMap[pC->iCol] && p->op & pC->op ){
- if( p->usable ){
- pC->iConsIndex = i;
- idxFlags |= pC->fts5op;
- }else if( j==0 ){
- /* As there exists an unusable MATCH constraint this is an
- ** unusable plan. Set a prohibitively high cost. */
- pInfo->estimatedCost = 1e50;
- return SQLITE_OK;
- }
- }
- }
- }
-
- /* Set idxFlags flags for the ORDER BY clause */
- if( pInfo->nOrderBy==1 ){
- int iSort = pInfo->aOrderBy[0].iColumn;
- if( iSort==(pConfig->nCol+1) && BitFlagTest(idxFlags, FTS5_BI_MATCH) ){
- idxFlags |= FTS5_BI_ORDER_RANK;
- }else if( iSort==-1 ){
- idxFlags |= FTS5_BI_ORDER_ROWID;
- }
- if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){
- pInfo->orderByConsumed = 1;
- if( pInfo->aOrderBy[0].desc ){
- idxFlags |= FTS5_BI_ORDER_DESC;
- }
- }
- }
-
- /* Calculate the estimated cost based on the flags set in idxFlags. */
- bHasMatch = BitFlagTest(idxFlags, FTS5_BI_MATCH);
- if( BitFlagTest(idxFlags, FTS5_BI_ROWID_EQ) ){
- pInfo->estimatedCost = bHasMatch ? 100.0 : 10.0;
- if( bHasMatch==0 ) fts5SetUniqueFlag(pInfo);
- }else if( BitFlagAllTest(idxFlags, FTS5_BI_ROWID_LE|FTS5_BI_ROWID_GE) ){
- pInfo->estimatedCost = bHasMatch ? 500.0 : 250000.0;
- }else if( BitFlagTest(idxFlags, FTS5_BI_ROWID_LE|FTS5_BI_ROWID_GE) ){
- pInfo->estimatedCost = bHasMatch ? 750.0 : 750000.0;
- }else{
- pInfo->estimatedCost = bHasMatch ? 1000.0 : 1000000.0;
- }
-
- /* Assign argvIndex values to each constraint in use. */
- iNext = 1;
- for(i=0; i<sizeof(aConstraint)/sizeof(aConstraint[0]); i++){
- struct Constraint *pC = &aConstraint[i];
- if( pC->iConsIndex>=0 ){
- pInfo->aConstraintUsage[pC->iConsIndex].argvIndex = iNext++;
- pInfo->aConstraintUsage[pC->iConsIndex].omit = pC->omit;
- }
- }
-
- pInfo->idxNum = idxFlags;
- return SQLITE_OK;
-}
-
-/*
-** Implementation of xOpen method.
-*/
-static int fts5OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
- Fts5Table *pTab = (Fts5Table*)pVTab;
- Fts5Config *pConfig = pTab->pConfig;
- Fts5Cursor *pCsr; /* New cursor object */
- int nByte; /* Bytes of space to allocate */
- int rc = SQLITE_OK; /* Return code */
-
- nByte = sizeof(Fts5Cursor) + pConfig->nCol * sizeof(int);
- pCsr = (Fts5Cursor*)sqlite3_malloc(nByte);
- if( pCsr ){
- Fts5Global *pGlobal = pTab->pGlobal;
- memset(pCsr, 0, nByte);
- pCsr->aColumnSize = (int*)&pCsr[1];
- pCsr->pNext = pGlobal->pCsr;
- pGlobal->pCsr = pCsr;
- pCsr->iCsrId = ++pGlobal->iNextId;
- }else{
- rc = SQLITE_NOMEM;
- }
- *ppCsr = (sqlite3_vtab_cursor*)pCsr;
- return rc;
-}
-
-static int fts5StmtType(Fts5Cursor *pCsr){
- if( pCsr->ePlan==FTS5_PLAN_SCAN ){
- return (pCsr->bDesc) ? FTS5_STMT_SCAN_DESC : FTS5_STMT_SCAN_ASC;
- }
- return FTS5_STMT_LOOKUP;
-}
-
-/*
-** This function is called after the cursor passed as the only argument
-** is moved to point at a different row. It clears all cached data
-** specific to the previous row stored by the cursor object.
-*/
-static void fts5CsrNewrow(Fts5Cursor *pCsr){
- CsrFlagSet(pCsr,
- FTS5CSR_REQUIRE_CONTENT
- | FTS5CSR_REQUIRE_DOCSIZE
- | FTS5CSR_REQUIRE_INST
- );
-}
-
-static void fts5FreeCursorComponents(Fts5Cursor *pCsr){
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- Fts5Auxdata *pData;
- Fts5Auxdata *pNext;
-
- sqlite3_free(pCsr->aInstIter);
- sqlite3_free(pCsr->aInst);
- if( pCsr->pStmt ){
- int eStmt = fts5StmtType(pCsr);
- sqlite3Fts5StorageStmtRelease(pTab->pStorage, eStmt, pCsr->pStmt);
- }
- if( pCsr->pSorter ){
- Fts5Sorter *pSorter = pCsr->pSorter;
- sqlite3_finalize(pSorter->pStmt);
- sqlite3_free(pSorter);
- }
-
- if( pCsr->ePlan!=FTS5_PLAN_SOURCE ){
- sqlite3Fts5ExprFree(pCsr->pExpr);
- }
-
- for(pData=pCsr->pAuxdata; pData; pData=pNext){
- pNext = pData->pNext;
- if( pData->xDelete ) pData->xDelete(pData->pPtr);
- sqlite3_free(pData);
- }
-
- sqlite3_finalize(pCsr->pRankArgStmt);
- sqlite3_free(pCsr->apRankArg);
-
- if( CsrFlagTest(pCsr, FTS5CSR_FREE_ZRANK) ){
- sqlite3_free(pCsr->zRank);
- sqlite3_free(pCsr->zRankArgs);
- }
-
- memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan - (u8*)pCsr));
-}
-
-
-/*
-** Close the cursor. For additional information see the documentation
-** on the xClose method of the virtual table interface.
-*/
-static int fts5CloseMethod(sqlite3_vtab_cursor *pCursor){
- if( pCursor ){
- Fts5Table *pTab = (Fts5Table*)(pCursor->pVtab);
- Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
- Fts5Cursor **pp;
-
- fts5FreeCursorComponents(pCsr);
- /* Remove the cursor from the Fts5Global.pCsr list */
- for(pp=&pTab->pGlobal->pCsr; (*pp)!=pCsr; pp=&(*pp)->pNext);
- *pp = pCsr->pNext;
-
- sqlite3_free(pCsr);
- }
- return SQLITE_OK;
-}
-
-static int fts5SorterNext(Fts5Cursor *pCsr){
- Fts5Sorter *pSorter = pCsr->pSorter;
- int rc;
-
- rc = sqlite3_step(pSorter->pStmt);
- if( rc==SQLITE_DONE ){
- rc = SQLITE_OK;
- CsrFlagSet(pCsr, FTS5CSR_EOF);
- }else if( rc==SQLITE_ROW ){
- const u8 *a;
- const u8 *aBlob;
- int nBlob;
- int i;
- int iOff = 0;
- rc = SQLITE_OK;
-
- pSorter->iRowid = sqlite3_column_int64(pSorter->pStmt, 0);
- nBlob = sqlite3_column_bytes(pSorter->pStmt, 1);
- aBlob = a = sqlite3_column_blob(pSorter->pStmt, 1);
-
- for(i=0; i<(pSorter->nIdx-1); i++){
- int iVal;
- a += fts5GetVarint32(a, iVal);
- iOff += iVal;
- pSorter->aIdx[i] = iOff;
- }
- pSorter->aIdx[i] = &aBlob[nBlob] - a;
-
- pSorter->aPoslist = a;
- fts5CsrNewrow(pCsr);
- }
-
- return rc;
-}
-
-
-/*
-** Set the FTS5CSR_REQUIRE_RESEEK flag on all FTS5_PLAN_MATCH cursors
-** open on table pTab.
-*/
-static void fts5TripCursors(Fts5Table *pTab){
- Fts5Cursor *pCsr;
- for(pCsr=pTab->pGlobal->pCsr; pCsr; pCsr=pCsr->pNext){
- if( pCsr->ePlan==FTS5_PLAN_MATCH
- && pCsr->base.pVtab==(sqlite3_vtab*)pTab
- ){
- CsrFlagSet(pCsr, FTS5CSR_REQUIRE_RESEEK);
- }
- }
-}
-
-/*
-** If the REQUIRE_RESEEK flag is set on the cursor passed as the first
-** argument, close and reopen all Fts5IndexIter iterators that the cursor
-** is using. Then attempt to move the cursor to a rowid equal to or laster
-** (in the cursors sort order - ASC or DESC) than the current rowid.
-**
-** If the new rowid is not equal to the old, set output parameter *pbSkip
-** to 1 before returning. Otherwise, leave it unchanged.
-**
-** Return SQLITE_OK if successful or if no reseek was required, or an
-** error code if an error occurred.
-*/
-static int fts5CursorReseek(Fts5Cursor *pCsr, int *pbSkip){
- int rc = SQLITE_OK;
- assert( *pbSkip==0 );
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_RESEEK) ){
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- int bDesc = pCsr->bDesc;
- i64 iRowid = sqlite3Fts5ExprRowid(pCsr->pExpr);
-
- rc = sqlite3Fts5ExprFirst(pCsr->pExpr, pTab->pIndex, iRowid, bDesc);
- if( rc==SQLITE_OK && iRowid!=sqlite3Fts5ExprRowid(pCsr->pExpr) ){
- *pbSkip = 1;
- }
-
- CsrFlagClear(pCsr, FTS5CSR_REQUIRE_RESEEK);
- fts5CsrNewrow(pCsr);
- if( sqlite3Fts5ExprEof(pCsr->pExpr) ){
- CsrFlagSet(pCsr, FTS5CSR_EOF);
- }
- }
- return rc;
-}
-
-
-/*
-** Advance the cursor to the next row in the table that matches the
-** search criteria.
-**
-** Return SQLITE_OK if nothing goes wrong. SQLITE_OK is returned
-** even if we reach end-of-file. The fts5EofMethod() will be called
-** subsequently to determine whether or not an EOF was hit.
-*/
-static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
- int rc = SQLITE_OK;
-
- assert( (pCsr->ePlan<3)==
- (pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE)
- );
-
- if( pCsr->ePlan<3 ){
- int bSkip = 0;
- if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc;
- rc = sqlite3Fts5ExprNext(pCsr->pExpr, pCsr->iLastRowid);
- if( sqlite3Fts5ExprEof(pCsr->pExpr) ){
- CsrFlagSet(pCsr, FTS5CSR_EOF);
- }
- fts5CsrNewrow(pCsr);
- }else{
- switch( pCsr->ePlan ){
- case FTS5_PLAN_SPECIAL: {
- CsrFlagSet(pCsr, FTS5CSR_EOF);
- break;
- }
-
- case FTS5_PLAN_SORTED_MATCH: {
- rc = fts5SorterNext(pCsr);
- break;
- }
-
- default:
- rc = sqlite3_step(pCsr->pStmt);
- if( rc!=SQLITE_ROW ){
- CsrFlagSet(pCsr, FTS5CSR_EOF);
- rc = sqlite3_reset(pCsr->pStmt);
- }else{
- rc = SQLITE_OK;
- }
- break;
- }
- }
-
- return rc;
-}
-
-static int fts5CursorFirstSorted(Fts5Table *pTab, Fts5Cursor *pCsr, int bDesc){
- Fts5Config *pConfig = pTab->pConfig;
- Fts5Sorter *pSorter;
- int nPhrase;
- int nByte;
- int rc = SQLITE_OK;
- char *zSql;
- const char *zRank = pCsr->zRank;
- const char *zRankArgs = pCsr->zRankArgs;
-
- nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
- nByte = sizeof(Fts5Sorter) + sizeof(int) * (nPhrase-1);
- pSorter = (Fts5Sorter*)sqlite3_malloc(nByte);
- if( pSorter==0 ) return SQLITE_NOMEM;
- memset(pSorter, 0, nByte);
- pSorter->nIdx = nPhrase;
-
- /* TODO: It would be better to have some system for reusing statement
- ** handles here, rather than preparing a new one for each query. But that
- ** is not possible as SQLite reference counts the virtual table objects.
- ** And since the statement required here reads from this very virtual
- ** table, saving it creates a circular reference.
- **
- ** If SQLite a built-in statement cache, this wouldn't be a problem. */
- zSql = sqlite3Fts5Mprintf(&rc,
- "SELECT rowid, rank FROM %Q.%Q ORDER BY %s(%s%s%s) %s",
- pConfig->zDb, pConfig->zName, zRank, pConfig->zName,
- (zRankArgs ? ", " : ""),
- (zRankArgs ? zRankArgs : ""),
- bDesc ? "DESC" : "ASC"
- );
- if( zSql ){
- rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &pSorter->pStmt, 0);
- sqlite3_free(zSql);
- }
-
- pCsr->pSorter = pSorter;
- if( rc==SQLITE_OK ){
- assert( pTab->pSortCsr==0 );
- pTab->pSortCsr = pCsr;
- rc = fts5SorterNext(pCsr);
- pTab->pSortCsr = 0;
- }
-
- if( rc!=SQLITE_OK ){
- sqlite3_finalize(pSorter->pStmt);
- sqlite3_free(pSorter);
- pCsr->pSorter = 0;
- }
-
- return rc;
-}
-
-static int fts5CursorFirst(Fts5Table *pTab, Fts5Cursor *pCsr, int bDesc){
- int rc;
- Fts5Expr *pExpr = pCsr->pExpr;
- rc = sqlite3Fts5ExprFirst(pExpr, pTab->pIndex, pCsr->iFirstRowid, bDesc);
- if( sqlite3Fts5ExprEof(pExpr) ){
- CsrFlagSet(pCsr, FTS5CSR_EOF);
- }
- fts5CsrNewrow(pCsr);
- return rc;
-}
-
-/*
-** Process a "special" query. A special query is identified as one with a
-** MATCH expression that begins with a '*' character. The remainder of
-** the text passed to the MATCH operator are used as the special query
-** parameters.
-*/
-static int fts5SpecialMatch(
- Fts5Table *pTab,
- Fts5Cursor *pCsr,
- const char *zQuery
-){
- int rc = SQLITE_OK; /* Return code */
- const char *z = zQuery; /* Special query text */
- int n; /* Number of bytes in text at z */
-
- while( z[0]==' ' ) z++;
- for(n=0; z[n] && z[n]!=' '; n++);
-
- assert( pTab->base.zErrMsg==0 );
- pCsr->ePlan = FTS5_PLAN_SPECIAL;
-
- if( 0==sqlite3_strnicmp("reads", z, n) ){
- pCsr->iSpecial = sqlite3Fts5IndexReads(pTab->pIndex);
- }
- else if( 0==sqlite3_strnicmp("id", z, n) ){
- pCsr->iSpecial = pCsr->iCsrId;
- }
- else{
- /* An unrecognized directive. Return an error message. */
- pTab->base.zErrMsg = sqlite3_mprintf("unknown special query: %.*s", n, z);
- rc = SQLITE_ERROR;
- }
-
- return rc;
-}
-
-/*
-** Search for an auxiliary function named zName that can be used with table
-** pTab. If one is found, return a pointer to the corresponding Fts5Auxiliary
-** structure. Otherwise, if no such function exists, return NULL.
-*/
-static Fts5Auxiliary *fts5FindAuxiliary(Fts5Table *pTab, const char *zName){
- Fts5Auxiliary *pAux;
-
- for(pAux=pTab->pGlobal->pAux; pAux; pAux=pAux->pNext){
- if( sqlite3_stricmp(zName, pAux->zFunc)==0 ) return pAux;
- }
-
- /* No function of the specified name was found. Return 0. */
- return 0;
-}
-
-
-static int fts5FindRankFunction(Fts5Cursor *pCsr){
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- Fts5Config *pConfig = pTab->pConfig;
- int rc = SQLITE_OK;
- Fts5Auxiliary *pAux = 0;
- const char *zRank = pCsr->zRank;
- const char *zRankArgs = pCsr->zRankArgs;
-
- if( zRankArgs ){
- char *zSql = sqlite3Fts5Mprintf(&rc, "SELECT %s", zRankArgs);
- if( zSql ){
- sqlite3_stmt *pStmt = 0;
- rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &pStmt, 0);
- sqlite3_free(zSql);
- assert( rc==SQLITE_OK || pCsr->pRankArgStmt==0 );
- if( rc==SQLITE_OK ){
- if( SQLITE_ROW==sqlite3_step(pStmt) ){
- int nByte;
- pCsr->nRankArg = sqlite3_column_count(pStmt);
- nByte = sizeof(sqlite3_value*)*pCsr->nRankArg;
- pCsr->apRankArg = (sqlite3_value**)sqlite3Fts5MallocZero(&rc, nByte);
- if( rc==SQLITE_OK ){
- int i;
- for(i=0; i<pCsr->nRankArg; i++){
- pCsr->apRankArg[i] = sqlite3_column_value(pStmt, i);
- }
- }
- pCsr->pRankArgStmt = pStmt;
- }else{
- rc = sqlite3_finalize(pStmt);
- assert( rc!=SQLITE_OK );
- }
- }
- }
- }
-
- if( rc==SQLITE_OK ){
- pAux = fts5FindAuxiliary(pTab, zRank);
- if( pAux==0 ){
- assert( pTab->base.zErrMsg==0 );
- pTab->base.zErrMsg = sqlite3_mprintf("no such function: %s", zRank);
- rc = SQLITE_ERROR;
- }
- }
-
- pCsr->pRank = pAux;
- return rc;
-}
-
-
-static int fts5CursorParseRank(
- Fts5Config *pConfig,
- Fts5Cursor *pCsr,
- sqlite3_value *pRank
-){
- int rc = SQLITE_OK;
- if( pRank ){
- const char *z = (const char*)sqlite3_value_text(pRank);
- char *zRank = 0;
- char *zRankArgs = 0;
-
- if( z==0 ){
- if( sqlite3_value_type(pRank)==SQLITE_NULL ) rc = SQLITE_ERROR;
- }else{
- rc = sqlite3Fts5ConfigParseRank(z, &zRank, &zRankArgs);
- }
- if( rc==SQLITE_OK ){
- pCsr->zRank = zRank;
- pCsr->zRankArgs = zRankArgs;
- CsrFlagSet(pCsr, FTS5CSR_FREE_ZRANK);
- }else if( rc==SQLITE_ERROR ){
- pCsr->base.pVtab->zErrMsg = sqlite3_mprintf(
- "parse error in rank function: %s", z
- );
- }
- }else{
- if( pConfig->zRank ){
- pCsr->zRank = (char*)pConfig->zRank;
- pCsr->zRankArgs = (char*)pConfig->zRankArgs;
- }else{
- pCsr->zRank = (char*)FTS5_DEFAULT_RANK;
- pCsr->zRankArgs = 0;
- }
- }
- return rc;
-}
-
-static i64 fts5GetRowidLimit(sqlite3_value *pVal, i64 iDefault){
- if( pVal ){
- int eType = sqlite3_value_numeric_type(pVal);
- if( eType==SQLITE_INTEGER ){
- return sqlite3_value_int64(pVal);
- }
- }
- return iDefault;
-}
-
-/*
-** This is the xFilter interface for the virtual table. See
-** the virtual table xFilter method documentation for additional
-** information.
-**
-** There are three possible query strategies:
-**
-** 1. Full-text search using a MATCH operator.
-** 2. A by-rowid lookup.
-** 3. A full-table scan.
-*/
-static int fts5FilterMethod(
- sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
- int idxNum, /* Strategy index */
- const char *idxStr, /* Unused */
- int nVal, /* Number of elements in apVal */
- sqlite3_value **apVal /* Arguments for the indexing scheme */
-){
- Fts5Table *pTab = (Fts5Table*)(pCursor->pVtab);
- Fts5Config *pConfig = pTab->pConfig;
- Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
- int rc = SQLITE_OK; /* Error code */
- int iVal = 0; /* Counter for apVal[] */
- int bDesc; /* True if ORDER BY [rank|rowid] DESC */
- int bOrderByRank; /* True if ORDER BY rank */
- sqlite3_value *pMatch = 0; /* <tbl> MATCH ? expression (or NULL) */
- sqlite3_value *pRank = 0; /* rank MATCH ? expression (or NULL) */
- sqlite3_value *pRowidEq = 0; /* rowid = ? expression (or NULL) */
- sqlite3_value *pRowidLe = 0; /* rowid <= ? expression (or NULL) */
- sqlite3_value *pRowidGe = 0; /* rowid >= ? expression (or NULL) */
- char **pzErrmsg = pConfig->pzErrmsg;
-
- if( pCsr->ePlan ){
- fts5FreeCursorComponents(pCsr);
- memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan-(u8*)pCsr));
- }
-
- assert( pCsr->pStmt==0 );
- assert( pCsr->pExpr==0 );
- assert( pCsr->csrflags==0 );
- assert( pCsr->pRank==0 );
- assert( pCsr->zRank==0 );
- assert( pCsr->zRankArgs==0 );
-
- assert( pzErrmsg==0 || pzErrmsg==&pTab->base.zErrMsg );
- pConfig->pzErrmsg = &pTab->base.zErrMsg;
-
- /* Decode the arguments passed through to this function.
- **
- ** Note: The following set of if(...) statements must be in the same
- ** order as the corresponding entries in the struct at the top of
- ** fts5BestIndexMethod(). */
- if( BitFlagTest(idxNum, FTS5_BI_MATCH) ) pMatch = apVal[iVal++];
- if( BitFlagTest(idxNum, FTS5_BI_RANK) ) pRank = apVal[iVal++];
- if( BitFlagTest(idxNum, FTS5_BI_ROWID_EQ) ) pRowidEq = apVal[iVal++];
- if( BitFlagTest(idxNum, FTS5_BI_ROWID_LE) ) pRowidLe = apVal[iVal++];
- if( BitFlagTest(idxNum, FTS5_BI_ROWID_GE) ) pRowidGe = apVal[iVal++];
- assert( iVal==nVal );
- bOrderByRank = ((idxNum & FTS5_BI_ORDER_RANK) ? 1 : 0);
- pCsr->bDesc = bDesc = ((idxNum & FTS5_BI_ORDER_DESC) ? 1 : 0);
-
- /* Set the cursor upper and lower rowid limits. Only some strategies
- ** actually use them. This is ok, as the xBestIndex() method leaves the
- ** sqlite3_index_constraint.omit flag clear for range constraints
- ** on the rowid field. */
- if( pRowidEq ){
- pRowidLe = pRowidGe = pRowidEq;
- }
- if( bDesc ){
- pCsr->iFirstRowid = fts5GetRowidLimit(pRowidLe, LARGEST_INT64);
- pCsr->iLastRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64);
- }else{
- pCsr->iLastRowid = fts5GetRowidLimit(pRowidLe, LARGEST_INT64);
- pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64);
- }
-
- if( pTab->pSortCsr ){
- /* If pSortCsr is non-NULL, then this call is being made as part of
- ** processing for a "... MATCH <expr> ORDER BY rank" query (ePlan is
- ** set to FTS5_PLAN_SORTED_MATCH). pSortCsr is the cursor that will
- ** return results to the user for this query. The current cursor
- ** (pCursor) is used to execute the query issued by function
- ** fts5CursorFirstSorted() above. */
- assert( pRowidEq==0 && pRowidLe==0 && pRowidGe==0 && pRank==0 );
- assert( nVal==0 && pMatch==0 && bOrderByRank==0 && bDesc==0 );
- assert( pCsr->iLastRowid==LARGEST_INT64 );
- assert( pCsr->iFirstRowid==SMALLEST_INT64 );
- pCsr->ePlan = FTS5_PLAN_SOURCE;
- pCsr->pExpr = pTab->pSortCsr->pExpr;
- rc = fts5CursorFirst(pTab, pCsr, bDesc);
- }else if( pMatch ){
- const char *zExpr = (const char*)sqlite3_value_text(apVal[0]);
- if( zExpr==0 ) zExpr = "";
-
- rc = fts5CursorParseRank(pConfig, pCsr, pRank);
- if( rc==SQLITE_OK ){
- if( zExpr[0]=='*' ){
- /* The user has issued a query of the form "MATCH '*...'". This
- ** indicates that the MATCH expression is not a full text query,
- ** but a request for an internal parameter. */
- rc = fts5SpecialMatch(pTab, pCsr, &zExpr[1]);
- }else{
- char **pzErr = &pTab->base.zErrMsg;
- rc = sqlite3Fts5ExprNew(pConfig, zExpr, &pCsr->pExpr, pzErr);
- if( rc==SQLITE_OK ){
- if( bOrderByRank ){
- pCsr->ePlan = FTS5_PLAN_SORTED_MATCH;
- rc = fts5CursorFirstSorted(pTab, pCsr, bDesc);
- }else{
- pCsr->ePlan = FTS5_PLAN_MATCH;
- rc = fts5CursorFirst(pTab, pCsr, bDesc);
- }
- }
- }
- }
- }else if( pConfig->zContent==0 ){
- *pConfig->pzErrmsg = sqlite3_mprintf(
- "%s: table does not support scanning", pConfig->zName
- );
- rc = SQLITE_ERROR;
- }else{
- /* This is either a full-table scan (ePlan==FTS5_PLAN_SCAN) or a lookup
- ** by rowid (ePlan==FTS5_PLAN_ROWID). */
- pCsr->ePlan = (pRowidEq ? FTS5_PLAN_ROWID : FTS5_PLAN_SCAN);
- rc = sqlite3Fts5StorageStmt(
- pTab->pStorage, fts5StmtType(pCsr), &pCsr->pStmt, &pTab->base.zErrMsg
- );
- if( rc==SQLITE_OK ){
- if( pCsr->ePlan==FTS5_PLAN_ROWID ){
- sqlite3_bind_value(pCsr->pStmt, 1, apVal[0]);
- }else{
- sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iFirstRowid);
- sqlite3_bind_int64(pCsr->pStmt, 2, pCsr->iLastRowid);
- }
- rc = fts5NextMethod(pCursor);
- }
- }
-
- pConfig->pzErrmsg = pzErrmsg;
- return rc;
-}
-
-/*
-** This is the xEof method of the virtual table. SQLite calls this
-** routine to find out if it has reached the end of a result set.
-*/
-static int fts5EofMethod(sqlite3_vtab_cursor *pCursor){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
- return (CsrFlagTest(pCsr, FTS5CSR_EOF) ? 1 : 0);
-}
-
-/*
-** Return the rowid that the cursor currently points to.
-*/
-static i64 fts5CursorRowid(Fts5Cursor *pCsr){
- assert( pCsr->ePlan==FTS5_PLAN_MATCH
- || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH
- || pCsr->ePlan==FTS5_PLAN_SOURCE
- );
- if( pCsr->pSorter ){
- return pCsr->pSorter->iRowid;
- }else{
- return sqlite3Fts5ExprRowid(pCsr->pExpr);
- }
-}
-
-/*
-** This is the xRowid method. The SQLite core calls this routine to
-** retrieve the rowid for the current row of the result set. fts5
-** exposes %_content.rowid as the rowid for the virtual table. The
-** rowid should be written to *pRowid.
-*/
-static int fts5RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
- int ePlan = pCsr->ePlan;
-
- assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 );
- switch( ePlan ){
- case FTS5_PLAN_SPECIAL:
- *pRowid = 0;
- break;
-
- case FTS5_PLAN_SOURCE:
- case FTS5_PLAN_MATCH:
- case FTS5_PLAN_SORTED_MATCH:
- *pRowid = fts5CursorRowid(pCsr);
- break;
-
- default:
- *pRowid = sqlite3_column_int64(pCsr->pStmt, 0);
- break;
- }
-
- return SQLITE_OK;
-}
-
-/*
-** If the cursor requires seeking (bSeekRequired flag is set), seek it.
-** Return SQLITE_OK if no error occurs, or an SQLite error code otherwise.
-**
-** If argument bErrormsg is true and an error occurs, an error message may
-** be left in sqlite3_vtab.zErrMsg.
-*/
-static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){
- int rc = SQLITE_OK;
-
- /* If the cursor does not yet have a statement handle, obtain one now. */
- if( pCsr->pStmt==0 ){
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- int eStmt = fts5StmtType(pCsr);
- rc = sqlite3Fts5StorageStmt(
- pTab->pStorage, eStmt, &pCsr->pStmt, (bErrormsg?&pTab->base.zErrMsg:0)
- );
- assert( rc!=SQLITE_OK || pTab->base.zErrMsg==0 );
- assert( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_CONTENT) );
- }
-
- if( rc==SQLITE_OK && CsrFlagTest(pCsr, FTS5CSR_REQUIRE_CONTENT) ){
- assert( pCsr->pExpr );
- sqlite3_reset(pCsr->pStmt);
- sqlite3_bind_int64(pCsr->pStmt, 1, fts5CursorRowid(pCsr));
- rc = sqlite3_step(pCsr->pStmt);
- if( rc==SQLITE_ROW ){
- rc = SQLITE_OK;
- CsrFlagClear(pCsr, FTS5CSR_REQUIRE_CONTENT);
- }else{
- rc = sqlite3_reset(pCsr->pStmt);
- if( rc==SQLITE_OK ){
- rc = FTS5_CORRUPT;
- }
- }
- }
- return rc;
-}
-
-static void fts5SetVtabError(Fts5Table *p, const char *zFormat, ...){
- va_list ap; /* ... printf arguments */
- va_start(ap, zFormat);
- assert( p->base.zErrMsg==0 );
- p->base.zErrMsg = sqlite3_vmprintf(zFormat, ap);
- va_end(ap);
-}
-
-/*
-** This function is called to handle an FTS INSERT command. In other words,
-** an INSERT statement of the form:
-**
-** INSERT INTO fts(fts) VALUES($pCmd)
-** INSERT INTO fts(fts, rank) VALUES($pCmd, $pVal)
-**
-** Argument pVal is the value assigned to column "fts" by the INSERT
-** statement. This function returns SQLITE_OK if successful, or an SQLite
-** error code if an error occurs.
-**
-** The commands implemented by this function are documented in the "Special
-** INSERT Directives" section of the documentation. It should be updated if
-** more commands are added to this function.
-*/
-static int fts5SpecialInsert(
- Fts5Table *pTab, /* Fts5 table object */
- const char *zCmd, /* Text inserted into table-name column */
- sqlite3_value *pVal /* Value inserted into rank column */
-){
- Fts5Config *pConfig = pTab->pConfig;
- int rc = SQLITE_OK;
- int bError = 0;
-
- if( 0==sqlite3_stricmp("delete-all", zCmd) ){
- if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
- fts5SetVtabError(pTab,
- "'delete-all' may only be used with a "
- "contentless or external content fts5 table"
- );
- rc = SQLITE_ERROR;
- }else{
- rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage);
- }
- }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){
- if( pConfig->eContent==FTS5_CONTENT_NONE ){
- fts5SetVtabError(pTab,
- "'rebuild' may not be used with a contentless fts5 table"
- );
- rc = SQLITE_ERROR;
- }else{
- rc = sqlite3Fts5StorageRebuild(pTab->pStorage);
- }
- }else if( 0==sqlite3_stricmp("optimize", zCmd) ){
- rc = sqlite3Fts5StorageOptimize(pTab->pStorage);
- }else if( 0==sqlite3_stricmp("merge", zCmd) ){
- int nMerge = sqlite3_value_int(pVal);
- rc = sqlite3Fts5StorageMerge(pTab->pStorage, nMerge);
- }else if( 0==sqlite3_stricmp("integrity-check", zCmd) ){
- rc = sqlite3Fts5StorageIntegrity(pTab->pStorage);
-#ifdef SQLITE_DEBUG
- }else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){
- pConfig->bPrefixIndex = sqlite3_value_int(pVal);
-#endif
- }else{
- rc = sqlite3Fts5IndexLoadConfig(pTab->pIndex);
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5ConfigSetValue(pTab->pConfig, zCmd, pVal, &bError);
- }
- if( rc==SQLITE_OK ){
- if( bError ){
- rc = SQLITE_ERROR;
- }else{
- rc = sqlite3Fts5StorageConfigValue(pTab->pStorage, zCmd, pVal, 0);
- }
- }
- }
- return rc;
-}
-
-static int fts5SpecialDelete(
- Fts5Table *pTab,
- sqlite3_value **apVal,
- sqlite3_int64 *piRowid
-){
- int rc = SQLITE_OK;
- int eType1 = sqlite3_value_type(apVal[1]);
- if( eType1==SQLITE_INTEGER ){
- sqlite3_int64 iDel = sqlite3_value_int64(apVal[1]);
- rc = sqlite3Fts5StorageSpecialDelete(pTab->pStorage, iDel, &apVal[2]);
- }
- return rc;
-}
-
-static void fts5StorageInsert(
- int *pRc,
- Fts5Table *pTab,
- sqlite3_value **apVal,
- i64 *piRowid
-){
- int rc = *pRc;
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, piRowid);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *piRowid);
- }
- *pRc = rc;
-}
-
-/*
-** This function is the implementation of the xUpdate callback used by
-** FTS3 virtual tables. It is invoked by SQLite each time a row is to be
-** inserted, updated or deleted.
-**
-** A delete specifies a single argument - the rowid of the row to remove.
-**
-** Update and insert operations pass:
-**
-** 1. The "old" rowid, or NULL.
-** 2. The "new" rowid.
-** 3. Values for each of the nCol matchable columns.
-** 4. Values for the two hidden columns (<tablename> and "rank").
-*/
-static int fts5UpdateMethod(
- sqlite3_vtab *pVtab, /* Virtual table handle */
- int nArg, /* Size of argument array */
- sqlite3_value **apVal, /* Array of arguments */
- sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */
-){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- Fts5Config *pConfig = pTab->pConfig;
- int eType0; /* value_type() of apVal[0] */
- int rc = SQLITE_OK; /* Return code */
-
- /* A transaction must be open when this is called. */
- assert( pTab->ts.eState==1 );
-
- assert( pVtab->zErrMsg==0 );
- assert( nArg==1 || nArg==(2+pConfig->nCol+2) );
- assert( nArg==1
- || sqlite3_value_type(apVal[1])==SQLITE_INTEGER
- || sqlite3_value_type(apVal[1])==SQLITE_NULL
- );
- assert( pTab->pConfig->pzErrmsg==0 );
- pTab->pConfig->pzErrmsg = &pTab->base.zErrMsg;
-
- /* Put any active cursors into REQUIRE_SEEK state. */
- fts5TripCursors(pTab);
-
- eType0 = sqlite3_value_type(apVal[0]);
- if( eType0==SQLITE_NULL
- && sqlite3_value_type(apVal[2+pConfig->nCol])!=SQLITE_NULL
- ){
- /* A "special" INSERT op. These are handled separately. */
- const char *z = (const char*)sqlite3_value_text(apVal[2+pConfig->nCol]);
- if( pConfig->eContent!=FTS5_CONTENT_NORMAL
- && 0==sqlite3_stricmp("delete", z)
- ){
- rc = fts5SpecialDelete(pTab, apVal, pRowid);
- }else{
- rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]);
- }
- }else{
- /* A regular INSERT, UPDATE or DELETE statement. The trick here is that
- ** any conflict on the rowid value must be detected before any
- ** modifications are made to the database file. There are 4 cases:
- **
- ** 1) DELETE
- ** 2) UPDATE (rowid not modified)
- ** 3) UPDATE (rowid modified)
- ** 4) INSERT
- **
- ** Cases 3 and 4 may violate the rowid constraint.
- */
- int eConflict = sqlite3_vtab_on_conflict(pConfig->db);
-
- assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL );
- assert( nArg!=1 || eType0==SQLITE_INTEGER );
-
- /* Filter out attempts to run UPDATE or DELETE on contentless tables.
- ** This is not suported. */
- if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){
- pTab->base.zErrMsg = sqlite3_mprintf(
- "cannot %s contentless fts5 table: %s",
- (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName
- );
- rc = SQLITE_ERROR;
- }
-
- /* Case 1: DELETE */
- else if( nArg==1 ){
- i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */
- rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel);
- }
-
- /* Case 2: INSERT */
- else if( eType0!=SQLITE_INTEGER ){
- /* If this is a REPLACE, first remove the current entry (if any) */
- if( eConflict==SQLITE_REPLACE
- && sqlite3_value_type(apVal[1])==SQLITE_INTEGER
- ){
- i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */
- rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew);
- }
- fts5StorageInsert(&rc, pTab, apVal, pRowid);
- }
-
- /* Case 2: UPDATE */
- else{
- i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */
- i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */
- if( iOld!=iNew ){
- if( eConflict==SQLITE_REPLACE ){
- rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld);
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew);
- }
- fts5StorageInsert(&rc, pTab, apVal, pRowid);
- }else{
- rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, pRowid);
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *pRowid);
- }
- }
- }else{
- rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld);
- fts5StorageInsert(&rc, pTab, apVal, pRowid);
- }
- }
- }
-
- pTab->pConfig->pzErrmsg = 0;
- return rc;
-}
-
-/*
-** Implementation of xSync() method.
-*/
-static int fts5SyncMethod(sqlite3_vtab *pVtab){
- int rc;
- Fts5Table *pTab = (Fts5Table*)pVtab;
- fts5CheckTransactionState(pTab, FTS5_SYNC, 0);
- pTab->pConfig->pzErrmsg = &pTab->base.zErrMsg;
- fts5TripCursors(pTab);
- rc = sqlite3Fts5StorageSync(pTab->pStorage, 1);
- pTab->pConfig->pzErrmsg = 0;
- return rc;
-}
-
-/*
-** Implementation of xBegin() method.
-*/
-static int fts5BeginMethod(sqlite3_vtab *pVtab){
- fts5CheckTransactionState((Fts5Table*)pVtab, FTS5_BEGIN, 0);
- return SQLITE_OK;
-}
-
-/*
-** Implementation of xCommit() method. This is a no-op. The contents of
-** the pending-terms hash-table have already been flushed into the database
-** by fts5SyncMethod().
-*/
-static int fts5CommitMethod(sqlite3_vtab *pVtab){
- fts5CheckTransactionState((Fts5Table*)pVtab, FTS5_COMMIT, 0);
- return SQLITE_OK;
-}
-
-/*
-** Implementation of xRollback(). Discard the contents of the pending-terms
-** hash-table. Any changes made to the database are reverted by SQLite.
-*/
-static int fts5RollbackMethod(sqlite3_vtab *pVtab){
- int rc;
- Fts5Table *pTab = (Fts5Table*)pVtab;
- fts5CheckTransactionState(pTab, FTS5_ROLLBACK, 0);
- rc = sqlite3Fts5StorageRollback(pTab->pStorage);
- return rc;
-}
-
-static void *fts5ApiUserData(Fts5Context *pCtx){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- return pCsr->pAux->pUserData;
-}
-
-static int fts5ApiColumnCount(Fts5Context *pCtx){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- return ((Fts5Table*)(pCsr->base.pVtab))->pConfig->nCol;
-}
-
-static int fts5ApiColumnTotalSize(
- Fts5Context *pCtx,
- int iCol,
- sqlite3_int64 *pnToken
-){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- return sqlite3Fts5StorageSize(pTab->pStorage, iCol, pnToken);
-}
-
-static int fts5ApiRowCount(Fts5Context *pCtx, i64 *pnRow){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- return sqlite3Fts5StorageRowCount(pTab->pStorage, pnRow);
-}
-
-static int fts5ApiTokenize(
- Fts5Context *pCtx,
- const char *pText, int nText,
- void *pUserData,
- int (*xToken)(void*, int, const char*, int, int, int)
-){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- return sqlite3Fts5Tokenize(
- pTab->pConfig, FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken
- );
-}
-
-static int fts5ApiPhraseCount(Fts5Context *pCtx){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- return sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
-}
-
-static int fts5ApiPhraseSize(Fts5Context *pCtx, int iPhrase){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- return sqlite3Fts5ExprPhraseSize(pCsr->pExpr, iPhrase);
-}
-
-static int fts5CsrPoslist(Fts5Cursor *pCsr, int iPhrase, const u8 **pa){
- int n;
- if( pCsr->pSorter ){
- Fts5Sorter *pSorter = pCsr->pSorter;
- int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
- n = pSorter->aIdx[iPhrase] - i1;
- *pa = &pSorter->aPoslist[i1];
- }else{
- n = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
- }
- return n;
-}
-
-/*
-** Ensure that the Fts5Cursor.nInstCount and aInst[] variables are populated
-** correctly for the current view. Return SQLITE_OK if successful, or an
-** SQLite error code otherwise.
-*/
-static int fts5CacheInstArray(Fts5Cursor *pCsr){
- int rc = SQLITE_OK;
- Fts5PoslistReader *aIter; /* One iterator for each phrase */
- int nIter; /* Number of iterators/phrases */
-
- nIter = sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
- if( pCsr->aInstIter==0 ){
- int nByte = sizeof(Fts5PoslistReader) * nIter;
- pCsr->aInstIter = (Fts5PoslistReader*)sqlite3Fts5MallocZero(&rc, nByte);
- }
- aIter = pCsr->aInstIter;
-
- if( aIter ){
- int nInst = 0; /* Number instances seen so far */
- int i;
-
- /* Initialize all iterators */
- for(i=0; i<nIter; i++){
- const u8 *a;
- int n = fts5CsrPoslist(pCsr, i, &a);
- sqlite3Fts5PoslistReaderInit(a, n, &aIter[i]);
- }
-
- while( 1 ){
- int *aInst;
- int iBest = -1;
- for(i=0; i<nIter; i++){
- if( (aIter[i].bEof==0)
- && (iBest<0 || aIter[i].iPos<aIter[iBest].iPos)
- ){
- iBest = i;
- }
- }
- if( iBest<0 ) break;
-
- nInst++;
- if( nInst>=pCsr->nInstAlloc ){
- pCsr->nInstAlloc = pCsr->nInstAlloc ? pCsr->nInstAlloc*2 : 32;
- aInst = (int*)sqlite3_realloc(
- pCsr->aInst, pCsr->nInstAlloc*sizeof(int)*3
- );
- if( aInst ){
- pCsr->aInst = aInst;
- }else{
- rc = SQLITE_NOMEM;
- break;
- }
- }
-
- aInst = &pCsr->aInst[3 * (nInst-1)];
- aInst[0] = iBest;
- aInst[1] = FTS5_POS2COLUMN(aIter[iBest].iPos);
- aInst[2] = FTS5_POS2OFFSET(aIter[iBest].iPos);
- sqlite3Fts5PoslistReaderNext(&aIter[iBest]);
- }
-
- pCsr->nInstCount = nInst;
- CsrFlagClear(pCsr, FTS5CSR_REQUIRE_INST);
- }
- return rc;
-}
-
-static int fts5ApiInstCount(Fts5Context *pCtx, int *pnInst){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- int rc = SQLITE_OK;
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
- || SQLITE_OK==(rc = fts5CacheInstArray(pCsr)) ){
- *pnInst = pCsr->nInstCount;
- }
- return rc;
-}
-
-static int fts5ApiInst(
- Fts5Context *pCtx,
- int iIdx,
- int *piPhrase,
- int *piCol,
- int *piOff
-){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- int rc = SQLITE_OK;
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
- || SQLITE_OK==(rc = fts5CacheInstArray(pCsr))
- ){
- if( iIdx<0 || iIdx>=pCsr->nInstCount ){
- rc = SQLITE_RANGE;
- }else{
- *piPhrase = pCsr->aInst[iIdx*3];
- *piCol = pCsr->aInst[iIdx*3 + 1];
- *piOff = pCsr->aInst[iIdx*3 + 2];
- }
- }
- return rc;
-}
-
-static sqlite3_int64 fts5ApiRowid(Fts5Context *pCtx){
- return fts5CursorRowid((Fts5Cursor*)pCtx);
-}
-
-static int fts5ApiColumnText(
- Fts5Context *pCtx,
- int iCol,
- const char **pz,
- int *pn
-){
- int rc = SQLITE_OK;
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- if( fts5IsContentless((Fts5Table*)(pCsr->base.pVtab)) ){
- *pz = 0;
- *pn = 0;
- }else{
- rc = fts5SeekCursor(pCsr, 0);
- if( rc==SQLITE_OK ){
- *pz = (const char*)sqlite3_column_text(pCsr->pStmt, iCol+1);
- *pn = sqlite3_column_bytes(pCsr->pStmt, iCol+1);
- }
- }
- return rc;
-}
-
-static int fts5ColumnSizeCb(
- void *pContext, /* Pointer to int */
- int tflags,
- const char *pToken, /* Buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Start offset of token */
- int iEnd /* End offset of token */
-){
- int *pCnt = (int*)pContext;
- if( (tflags & FTS5_TOKEN_COLOCATED)==0 ){
- (*pCnt)++;
- }
- return SQLITE_OK;
-}
-
-static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- Fts5Config *pConfig = pTab->pConfig;
- int rc = SQLITE_OK;
-
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_DOCSIZE) ){
- if( pConfig->bColumnsize ){
- i64 iRowid = fts5CursorRowid(pCsr);
- rc = sqlite3Fts5StorageDocsize(pTab->pStorage, iRowid, pCsr->aColumnSize);
- }else if( pConfig->zContent==0 ){
- int i;
- for(i=0; i<pConfig->nCol; i++){
- if( pConfig->abUnindexed[i]==0 ){
- pCsr->aColumnSize[i] = -1;
- }
- }
- }else{
- int i;
- for(i=0; rc==SQLITE_OK && i<pConfig->nCol; i++){
- if( pConfig->abUnindexed[i]==0 ){
- const char *z; int n;
- void *p = (void*)(&pCsr->aColumnSize[i]);
- pCsr->aColumnSize[i] = 0;
- rc = fts5ApiColumnText(pCtx, i, &z, &n);
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5Tokenize(
- pConfig, FTS5_TOKENIZE_AUX, z, n, p, fts5ColumnSizeCb
- );
- }
- }
- }
- }
- CsrFlagClear(pCsr, FTS5CSR_REQUIRE_DOCSIZE);
- }
- if( iCol<0 ){
- int i;
- *pnToken = 0;
- for(i=0; i<pConfig->nCol; i++){
- *pnToken += pCsr->aColumnSize[i];
- }
- }else if( iCol<pConfig->nCol ){
- *pnToken = pCsr->aColumnSize[iCol];
- }else{
- *pnToken = 0;
- rc = SQLITE_RANGE;
- }
- return rc;
-}
-
-/*
-** Implementation of the xSetAuxdata() method.
-*/
-static int fts5ApiSetAuxdata(
- Fts5Context *pCtx, /* Fts5 context */
- void *pPtr, /* Pointer to save as auxdata */
- void(*xDelete)(void*) /* Destructor for pPtr (or NULL) */
-){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Auxdata *pData;
-
- /* Search through the cursors list of Fts5Auxdata objects for one that
- ** corresponds to the currently executing auxiliary function. */
- for(pData=pCsr->pAuxdata; pData; pData=pData->pNext){
- if( pData->pAux==pCsr->pAux ) break;
- }
-
- if( pData ){
- if( pData->xDelete ){
- pData->xDelete(pData->pPtr);
- }
- }else{
- int rc = SQLITE_OK;
- pData = (Fts5Auxdata*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Auxdata));
- if( pData==0 ){
- if( xDelete ) xDelete(pPtr);
- return rc;
- }
- pData->pAux = pCsr->pAux;
- pData->pNext = pCsr->pAuxdata;
- pCsr->pAuxdata = pData;
- }
-
- pData->xDelete = xDelete;
- pData->pPtr = pPtr;
- return SQLITE_OK;
-}
-
-static void *fts5ApiGetAuxdata(Fts5Context *pCtx, int bClear){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Auxdata *pData;
- void *pRet = 0;
-
- for(pData=pCsr->pAuxdata; pData; pData=pData->pNext){
- if( pData->pAux==pCsr->pAux ) break;
- }
-
- if( pData ){
- pRet = pData->pPtr;
- if( bClear ){
- pData->pPtr = 0;
- pData->xDelete = 0;
- }
- }
-
- return pRet;
-}
-
-static void fts5ApiPhraseNext(
- Fts5Context *pCtx,
- Fts5PhraseIter *pIter,
- int *piCol, int *piOff
-){
- if( pIter->a>=pIter->b ){
- *piCol = -1;
- *piOff = -1;
- }else{
- int iVal;
- pIter->a += fts5GetVarint32(pIter->a, iVal);
- if( iVal==1 ){
- pIter->a += fts5GetVarint32(pIter->a, iVal);
- *piCol = iVal;
- *piOff = 0;
- pIter->a += fts5GetVarint32(pIter->a, iVal);
- }
- *piOff += (iVal-2);
- }
-}
-
-static void fts5ApiPhraseFirst(
- Fts5Context *pCtx,
- int iPhrase,
- Fts5PhraseIter *pIter,
- int *piCol, int *piOff
-){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- int n = fts5CsrPoslist(pCsr, iPhrase, &pIter->a);
- pIter->b = &pIter->a[n];
- *piCol = 0;
- *piOff = 0;
- fts5ApiPhraseNext(pCtx, pIter, piCol, piOff);
-}
-
-static int fts5ApiQueryPhrase(Fts5Context*, int, void*,
- int(*)(const Fts5ExtensionApi*, Fts5Context*, void*)
-);
-
-static const Fts5ExtensionApi sFts5Api = {
- 2, /* iVersion */
- fts5ApiUserData,
- fts5ApiColumnCount,
- fts5ApiRowCount,
- fts5ApiColumnTotalSize,
- fts5ApiTokenize,
- fts5ApiPhraseCount,
- fts5ApiPhraseSize,
- fts5ApiInstCount,
- fts5ApiInst,
- fts5ApiRowid,
- fts5ApiColumnText,
- fts5ApiColumnSize,
- fts5ApiQueryPhrase,
- fts5ApiSetAuxdata,
- fts5ApiGetAuxdata,
- fts5ApiPhraseFirst,
- fts5ApiPhraseNext,
-};
-
-
-/*
-** Implementation of API function xQueryPhrase().
-*/
-static int fts5ApiQueryPhrase(
- Fts5Context *pCtx,
- int iPhrase,
- void *pUserData,
- int(*xCallback)(const Fts5ExtensionApi*, Fts5Context*, void*)
-){
- Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
- int rc;
- Fts5Cursor *pNew = 0;
-
- rc = fts5OpenMethod(pCsr->base.pVtab, (sqlite3_vtab_cursor**)&pNew);
- if( rc==SQLITE_OK ){
- Fts5Config *pConf = pTab->pConfig;
- pNew->ePlan = FTS5_PLAN_MATCH;
- pNew->iFirstRowid = SMALLEST_INT64;
- pNew->iLastRowid = LARGEST_INT64;
- pNew->base.pVtab = (sqlite3_vtab*)pTab;
- rc = sqlite3Fts5ExprClonePhrase(pConf, pCsr->pExpr, iPhrase, &pNew->pExpr);
- }
-
- if( rc==SQLITE_OK ){
- for(rc = fts5CursorFirst(pTab, pNew, 0);
- rc==SQLITE_OK && CsrFlagTest(pNew, FTS5CSR_EOF)==0;
- rc = fts5NextMethod((sqlite3_vtab_cursor*)pNew)
- ){
- rc = xCallback(&sFts5Api, (Fts5Context*)pNew, pUserData);
- if( rc!=SQLITE_OK ){
- if( rc==SQLITE_DONE ) rc = SQLITE_OK;
- break;
- }
- }
- }
-
- fts5CloseMethod((sqlite3_vtab_cursor*)pNew);
- return rc;
-}
-
-static void fts5ApiInvoke(
- Fts5Auxiliary *pAux,
- Fts5Cursor *pCsr,
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- assert( pCsr->pAux==0 );
- pCsr->pAux = pAux;
- pAux->xFunc(&sFts5Api, (Fts5Context*)pCsr, context, argc, argv);
- pCsr->pAux = 0;
-}
-
-static Fts5Cursor *fts5CursorFromCsrid(Fts5Global *pGlobal, i64 iCsrId){
- Fts5Cursor *pCsr;
- for(pCsr=pGlobal->pCsr; pCsr; pCsr=pCsr->pNext){
- if( pCsr->iCsrId==iCsrId ) break;
- }
- return pCsr;
-}
-
-static void fts5ApiCallback(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
-
- Fts5Auxiliary *pAux;
- Fts5Cursor *pCsr;
- i64 iCsrId;
-
- assert( argc>=1 );
- pAux = (Fts5Auxiliary*)sqlite3_user_data(context);
- iCsrId = sqlite3_value_int64(argv[0]);
-
- pCsr = fts5CursorFromCsrid(pAux->pGlobal, iCsrId);
- if( pCsr==0 ){
- char *zErr = sqlite3_mprintf("no such cursor: %lld", iCsrId);
- sqlite3_result_error(context, zErr, -1);
- sqlite3_free(zErr);
- }else{
- fts5ApiInvoke(pAux, pCsr, context, argc-1, &argv[1]);
- }
-}
-
-
-/*
-** Given cursor id iId, return a pointer to the corresponding Fts5Index
-** object. Or NULL If the cursor id does not exist.
-**
-** If successful, set *pnCol to the number of indexed columns in the
-** table before returning.
-*/
-static Fts5Index *sqlite3Fts5IndexFromCsrid(
- Fts5Global *pGlobal,
- i64 iCsrId,
- int *pnCol
-){
- Fts5Cursor *pCsr;
- Fts5Table *pTab;
-
- pCsr = fts5CursorFromCsrid(pGlobal, iCsrId);
- pTab = (Fts5Table*)pCsr->base.pVtab;
- *pnCol = pTab->pConfig->nCol;
-
- return pTab->pIndex;
-}
-
-/*
-** Return a "position-list blob" corresponding to the current position of
-** cursor pCsr via sqlite3_result_blob(). A position-list blob contains
-** the current position-list for each phrase in the query associated with
-** cursor pCsr.
-**
-** A position-list blob begins with (nPhrase-1) varints, where nPhrase is
-** the number of phrases in the query. Following the varints are the
-** concatenated position lists for each phrase, in order.
-**
-** The first varint (if it exists) contains the size of the position list
-** for phrase 0. The second (same disclaimer) contains the size of position
-** list 1. And so on. There is no size field for the final position list,
-** as it can be derived from the total size of the blob.
-*/
-static int fts5PoslistBlob(sqlite3_context *pCtx, Fts5Cursor *pCsr){
- int i;
- int rc = SQLITE_OK;
- int nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr);
- Fts5Buffer val;
-
- memset(&val, 0, sizeof(Fts5Buffer));
-
- /* Append the varints */
- for(i=0; i<(nPhrase-1); i++){
- const u8 *dummy;
- int nByte = sqlite3Fts5ExprPoslist(pCsr->pExpr, i, &dummy);
- sqlite3Fts5BufferAppendVarint(&rc, &val, nByte);
- }
-
- /* Append the position lists */
- for(i=0; i<nPhrase; i++){
- const u8 *pPoslist;
- int nPoslist;
- nPoslist = sqlite3Fts5ExprPoslist(pCsr->pExpr, i, &pPoslist);
- sqlite3Fts5BufferAppendBlob(&rc, &val, nPoslist, pPoslist);
- }
-
- sqlite3_result_blob(pCtx, val.p, val.n, sqlite3_free);
- return rc;
-}
-
-/*
-** This is the xColumn method, called by SQLite to request a value from
-** the row that the supplied cursor currently points to.
-*/
-static int fts5ColumnMethod(
- sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
- sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
- int iCol /* Index of column to read value from */
-){
- Fts5Table *pTab = (Fts5Table*)(pCursor->pVtab);
- Fts5Config *pConfig = pTab->pConfig;
- Fts5Cursor *pCsr = (Fts5Cursor*)pCursor;
- int rc = SQLITE_OK;
-
- assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 );
-
- if( pCsr->ePlan==FTS5_PLAN_SPECIAL ){
- if( iCol==pConfig->nCol ){
- sqlite3_result_int64(pCtx, pCsr->iSpecial);
- }
- }else
-
- if( iCol==pConfig->nCol ){
- /* User is requesting the value of the special column with the same name
- ** as the table. Return the cursor integer id number. This value is only
- ** useful in that it may be passed as the first argument to an FTS5
- ** auxiliary function. */
- sqlite3_result_int64(pCtx, pCsr->iCsrId);
- }else if( iCol==pConfig->nCol+1 ){
-
- /* The value of the "rank" column. */
- if( pCsr->ePlan==FTS5_PLAN_SOURCE ){
- fts5PoslistBlob(pCtx, pCsr);
- }else if(
- pCsr->ePlan==FTS5_PLAN_MATCH
- || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH
- ){
- if( pCsr->pRank || SQLITE_OK==(rc = fts5FindRankFunction(pCsr)) ){
- fts5ApiInvoke(pCsr->pRank, pCsr, pCtx, pCsr->nRankArg, pCsr->apRankArg);
- }
- }
- }else if( !fts5IsContentless(pTab) ){
- rc = fts5SeekCursor(pCsr, 1);
- if( rc==SQLITE_OK ){
- sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1));
- }
- }
- return rc;
-}
-
-
-/*
-** This routine implements the xFindFunction method for the FTS3
-** virtual table.
-*/
-static int fts5FindFunctionMethod(
- sqlite3_vtab *pVtab, /* Virtual table handle */
- int nArg, /* Number of SQL function arguments */
- const char *zName, /* Name of SQL function */
- void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */
- void **ppArg /* OUT: User data for *pxFunc */
-){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- Fts5Auxiliary *pAux;
-
- pAux = fts5FindAuxiliary(pTab, zName);
- if( pAux ){
- *pxFunc = fts5ApiCallback;
- *ppArg = (void*)pAux;
- return 1;
- }
-
- /* No function of the specified name was found. Return 0. */
- return 0;
-}
-
-/*
-** Implementation of FTS5 xRename method. Rename an fts5 table.
-*/
-static int fts5RenameMethod(
- sqlite3_vtab *pVtab, /* Virtual table handle */
- const char *zName /* New name of table */
-){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- return sqlite3Fts5StorageRename(pTab->pStorage, zName);
-}
-
-/*
-** The xSavepoint() method.
-**
-** Flush the contents of the pending-terms table to disk.
-*/
-static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint);
- fts5TripCursors(pTab);
- return sqlite3Fts5StorageSync(pTab->pStorage, 0);
-}
-
-/*
-** The xRelease() method.
-**
-** This is a no-op.
-*/
-static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- fts5CheckTransactionState(pTab, FTS5_RELEASE, iSavepoint);
- fts5TripCursors(pTab);
- return sqlite3Fts5StorageSync(pTab->pStorage, 0);
-}
-
-/*
-** The xRollbackTo() method.
-**
-** Discard the contents of the pending terms table.
-*/
-static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
- Fts5Table *pTab = (Fts5Table*)pVtab;
- fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint);
- fts5TripCursors(pTab);
- return sqlite3Fts5StorageRollback(pTab->pStorage);
-}
-
-/*
-** Register a new auxiliary function with global context pGlobal.
-*/
-static int fts5CreateAux(
- fts5_api *pApi, /* Global context (one per db handle) */
- const char *zName, /* Name of new function */
- void *pUserData, /* User data for aux. function */
- fts5_extension_function xFunc, /* Aux. function implementation */
- void(*xDestroy)(void*) /* Destructor for pUserData */
-){
- Fts5Global *pGlobal = (Fts5Global*)pApi;
- int rc = sqlite3_overload_function(pGlobal->db, zName, -1);
- if( rc==SQLITE_OK ){
- Fts5Auxiliary *pAux;
- int nName; /* Size of zName in bytes, including \0 */
- int nByte; /* Bytes of space to allocate */
-
- nName = (int)strlen(zName) + 1;
- nByte = sizeof(Fts5Auxiliary) + nName;
- pAux = (Fts5Auxiliary*)sqlite3_malloc(nByte);
- if( pAux ){
- memset(pAux, 0, nByte);
- pAux->zFunc = (char*)&pAux[1];
- memcpy(pAux->zFunc, zName, nName);
- pAux->pGlobal = pGlobal;
- pAux->pUserData = pUserData;
- pAux->xFunc = xFunc;
- pAux->xDestroy = xDestroy;
- pAux->pNext = pGlobal->pAux;
- pGlobal->pAux = pAux;
- }else{
- rc = SQLITE_NOMEM;
- }
- }
-
- return rc;
-}
-
-/*
-** Register a new tokenizer. This is the implementation of the
-** fts5_api.xCreateTokenizer() method.
-*/
-static int fts5CreateTokenizer(
- fts5_api *pApi, /* Global context (one per db handle) */
- const char *zName, /* Name of new function */
- void *pUserData, /* User data for aux. function */
- fts5_tokenizer *pTokenizer, /* Tokenizer implementation */
- void(*xDestroy)(void*) /* Destructor for pUserData */
-){
- Fts5Global *pGlobal = (Fts5Global*)pApi;
- Fts5TokenizerModule *pNew;
- int nName; /* Size of zName and its \0 terminator */
- int nByte; /* Bytes of space to allocate */
- int rc = SQLITE_OK;
-
- nName = (int)strlen(zName) + 1;
- nByte = sizeof(Fts5TokenizerModule) + nName;
- pNew = (Fts5TokenizerModule*)sqlite3_malloc(nByte);
- if( pNew ){
- memset(pNew, 0, nByte);
- pNew->zName = (char*)&pNew[1];
- memcpy(pNew->zName, zName, nName);
- pNew->pUserData = pUserData;
- pNew->x = *pTokenizer;
- pNew->xDestroy = xDestroy;
- pNew->pNext = pGlobal->pTok;
- pGlobal->pTok = pNew;
- if( pNew->pNext==0 ){
- pGlobal->pDfltTok = pNew;
- }
- }else{
- rc = SQLITE_NOMEM;
- }
-
- return rc;
-}
-
-static Fts5TokenizerModule *fts5LocateTokenizer(
- Fts5Global *pGlobal,
- const char *zName
-){
- Fts5TokenizerModule *pMod = 0;
-
- if( zName==0 ){
- pMod = pGlobal->pDfltTok;
- }else{
- for(pMod=pGlobal->pTok; pMod; pMod=pMod->pNext){
- if( sqlite3_stricmp(zName, pMod->zName)==0 ) break;
- }
- }
-
- return pMod;
-}
-
-/*
-** Find a tokenizer. This is the implementation of the
-** fts5_api.xFindTokenizer() method.
-*/
-static int fts5FindTokenizer(
- fts5_api *pApi, /* Global context (one per db handle) */
- const char *zName, /* Name of new function */
- void **ppUserData,
- fts5_tokenizer *pTokenizer /* Populate this object */
-){
- int rc = SQLITE_OK;
- Fts5TokenizerModule *pMod;
-
- pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName);
- if( pMod ){
- *pTokenizer = pMod->x;
- *ppUserData = pMod->pUserData;
- }else{
- memset(pTokenizer, 0, sizeof(fts5_tokenizer));
- rc = SQLITE_ERROR;
- }
-
- return rc;
-}
-
-static int sqlite3Fts5GetTokenizer(
- Fts5Global *pGlobal,
- const char **azArg,
- int nArg,
- Fts5Tokenizer **ppTok,
- fts5_tokenizer **ppTokApi,
- char **pzErr
-){
- Fts5TokenizerModule *pMod;
- int rc = SQLITE_OK;
-
- pMod = fts5LocateTokenizer(pGlobal, nArg==0 ? 0 : azArg[0]);
- if( pMod==0 ){
- assert( nArg>0 );
- rc = SQLITE_ERROR;
- *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]);
- }else{
- rc = pMod->x.xCreate(pMod->pUserData, &azArg[1], (nArg?nArg-1:0), ppTok);
- *ppTokApi = &pMod->x;
- if( rc!=SQLITE_OK && pzErr ){
- *pzErr = sqlite3_mprintf("error in tokenizer constructor");
- }
- }
-
- if( rc!=SQLITE_OK ){
- *ppTokApi = 0;
- *ppTok = 0;
- }
-
- return rc;
-}
-
-static void fts5ModuleDestroy(void *pCtx){
- Fts5TokenizerModule *pTok, *pNextTok;
- Fts5Auxiliary *pAux, *pNextAux;
- Fts5Global *pGlobal = (Fts5Global*)pCtx;
-
- for(pAux=pGlobal->pAux; pAux; pAux=pNextAux){
- pNextAux = pAux->pNext;
- if( pAux->xDestroy ) pAux->xDestroy(pAux->pUserData);
- sqlite3_free(pAux);
- }
-
- for(pTok=pGlobal->pTok; pTok; pTok=pNextTok){
- pNextTok = pTok->pNext;
- if( pTok->xDestroy ) pTok->xDestroy(pTok->pUserData);
- sqlite3_free(pTok);
- }
-
- sqlite3_free(pGlobal);
-}
-
-static void fts5Fts5Func(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal /* Function arguments */
-){
- Fts5Global *pGlobal = (Fts5Global*)sqlite3_user_data(pCtx);
- char buf[8];
- assert( nArg==0 );
- assert( sizeof(buf)>=sizeof(pGlobal) );
- memcpy(buf, (void*)&pGlobal, sizeof(pGlobal));
- sqlite3_result_blob(pCtx, buf, sizeof(pGlobal), SQLITE_TRANSIENT);
-}
-
-/*
-** Implementation of fts5_source_id() function.
-*/
-static void fts5SourceIdFunc(
- sqlite3_context *pCtx, /* Function call context */
- int nArg, /* Number of args */
- sqlite3_value **apVal /* Function arguments */
-){
- assert( nArg==0 );
- sqlite3_result_text(pCtx, "fts5: 2015-11-02 18:31:45 bda77dda9697c463c3d0704014d51627fceee328", -1, SQLITE_TRANSIENT);
-}
-
-static int fts5Init(sqlite3 *db){
- static const sqlite3_module fts5Mod = {
- /* iVersion */ 2,
- /* xCreate */ fts5CreateMethod,
- /* xConnect */ fts5ConnectMethod,
- /* xBestIndex */ fts5BestIndexMethod,
- /* xDisconnect */ fts5DisconnectMethod,
- /* xDestroy */ fts5DestroyMethod,
- /* xOpen */ fts5OpenMethod,
- /* xClose */ fts5CloseMethod,
- /* xFilter */ fts5FilterMethod,
- /* xNext */ fts5NextMethod,
- /* xEof */ fts5EofMethod,
- /* xColumn */ fts5ColumnMethod,
- /* xRowid */ fts5RowidMethod,
- /* xUpdate */ fts5UpdateMethod,
- /* xBegin */ fts5BeginMethod,
- /* xSync */ fts5SyncMethod,
- /* xCommit */ fts5CommitMethod,
- /* xRollback */ fts5RollbackMethod,
- /* xFindFunction */ fts5FindFunctionMethod,
- /* xRename */ fts5RenameMethod,
- /* xSavepoint */ fts5SavepointMethod,
- /* xRelease */ fts5ReleaseMethod,
- /* xRollbackTo */ fts5RollbackToMethod,
- };
-
- int rc;
- Fts5Global *pGlobal = 0;
-
- pGlobal = (Fts5Global*)sqlite3_malloc(sizeof(Fts5Global));
- if( pGlobal==0 ){
- rc = SQLITE_NOMEM;
- }else{
- void *p = (void*)pGlobal;
- memset(pGlobal, 0, sizeof(Fts5Global));
- pGlobal->db = db;
- pGlobal->api.iVersion = 2;
- pGlobal->api.xCreateFunction = fts5CreateAux;
- pGlobal->api.xCreateTokenizer = fts5CreateTokenizer;
- pGlobal->api.xFindTokenizer = fts5FindTokenizer;
- rc = sqlite3_create_module_v2(db, "fts5", &fts5Mod, p, fts5ModuleDestroy);
- if( rc==SQLITE_OK ) rc = sqlite3Fts5IndexInit(db);
- if( rc==SQLITE_OK ) rc = sqlite3Fts5ExprInit(pGlobal, db);
- if( rc==SQLITE_OK ) rc = sqlite3Fts5AuxInit(&pGlobal->api);
- if( rc==SQLITE_OK ) rc = sqlite3Fts5TokenizerInit(&pGlobal->api);
- if( rc==SQLITE_OK ) rc = sqlite3Fts5VocabInit(pGlobal, db);
- if( rc==SQLITE_OK ){
- rc = sqlite3_create_function(
- db, "fts5", 0, SQLITE_UTF8, p, fts5Fts5Func, 0, 0
- );
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3_create_function(
- db, "fts5_source_id", 0, SQLITE_UTF8, p, fts5SourceIdFunc, 0, 0
- );
- }
- }
- return rc;
-}
-
-/*
-** The following functions are used to register the module with SQLite. If
-** this module is being built as part of the SQLite core (SQLITE_CORE is
-** defined), then sqlite3_open() will call sqlite3Fts5Init() directly.
-**
-** Or, if this module is being built as a loadable extension,
-** sqlite3Fts5Init() is omitted and the two standard entry points
-** sqlite3_fts_init() and sqlite3_fts5_init() defined instead.
-*/
-#ifndef SQLITE_CORE
-#ifdef _WIN32
-__declspec(dllexport)
-#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_fts_init(
- sqlite3 *db,
- char **pzErrMsg,
- const sqlite3_api_routines *pApi
-){
- SQLITE_EXTENSION_INIT2(pApi);
- (void)pzErrMsg; /* Unused parameter */
- return fts5Init(db);
-}
-
-#ifdef _WIN32
-__declspec(dllexport)
-#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_fts5_init(
- sqlite3 *db,
- char **pzErrMsg,
- const sqlite3_api_routines *pApi
-){
- SQLITE_EXTENSION_INIT2(pApi);
- (void)pzErrMsg; /* Unused parameter */
- return fts5Init(db);
-}
-#else
-SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3 *db){
- return fts5Init(db);
-}
-#endif
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-*/
-
-
-
-
-struct Fts5Storage {
- Fts5Config *pConfig;
- Fts5Index *pIndex;
- int bTotalsValid; /* True if nTotalRow/aTotalSize[] are valid */
- i64 nTotalRow; /* Total number of rows in FTS table */
- i64 *aTotalSize; /* Total sizes of each column */
- sqlite3_stmt *aStmt[11];
-};
-
-
-#if FTS5_STMT_SCAN_ASC!=0
-# error "FTS5_STMT_SCAN_ASC mismatch"
-#endif
-#if FTS5_STMT_SCAN_DESC!=1
-# error "FTS5_STMT_SCAN_DESC mismatch"
-#endif
-#if FTS5_STMT_LOOKUP!=2
-# error "FTS5_STMT_LOOKUP mismatch"
-#endif
-
-#define FTS5_STMT_INSERT_CONTENT 3
-#define FTS5_STMT_REPLACE_CONTENT 4
-#define FTS5_STMT_DELETE_CONTENT 5
-#define FTS5_STMT_REPLACE_DOCSIZE 6
-#define FTS5_STMT_DELETE_DOCSIZE 7
-#define FTS5_STMT_LOOKUP_DOCSIZE 8
-#define FTS5_STMT_REPLACE_CONFIG 9
-#define FTS5_STMT_SCAN 10
-
-/*
-** Prepare the two insert statements - Fts5Storage.pInsertContent and
-** Fts5Storage.pInsertDocsize - if they have not already been prepared.
-** Return SQLITE_OK if successful, or an SQLite error code if an error
-** occurs.
-*/
-static int fts5StorageGetStmt(
- Fts5Storage *p, /* Storage handle */
- int eStmt, /* FTS5_STMT_XXX constant */
- sqlite3_stmt **ppStmt, /* OUT: Prepared statement handle */
- char **pzErrMsg /* OUT: Error message (if any) */
-){
- int rc = SQLITE_OK;
-
- /* If there is no %_docsize table, there should be no requests for
- ** statements to operate on it. */
- assert( p->pConfig->bColumnsize || (
- eStmt!=FTS5_STMT_REPLACE_DOCSIZE
- && eStmt!=FTS5_STMT_DELETE_DOCSIZE
- && eStmt!=FTS5_STMT_LOOKUP_DOCSIZE
- ));
-
- assert( eStmt>=0 && eStmt<ArraySize(p->aStmt) );
- if( p->aStmt[eStmt]==0 ){
- const char *azStmt[] = {
- "SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC",
- "SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC",
- "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP */
-
- "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */
- "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */
- "DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */
- "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */
- "DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */
-
- "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */
-
- "REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */
- "SELECT %s FROM %s AS T", /* SCAN */
- };
- Fts5Config *pC = p->pConfig;
- char *zSql = 0;
-
- switch( eStmt ){
- case FTS5_STMT_SCAN:
- zSql = sqlite3_mprintf(azStmt[eStmt],
- pC->zContentExprlist, pC->zContent
- );
- break;
-
- case FTS5_STMT_SCAN_ASC:
- case FTS5_STMT_SCAN_DESC:
- zSql = sqlite3_mprintf(azStmt[eStmt], pC->zContentExprlist,
- pC->zContent, pC->zContentRowid, pC->zContentRowid,
- pC->zContentRowid
- );
- break;
-
- case FTS5_STMT_LOOKUP:
- zSql = sqlite3_mprintf(azStmt[eStmt],
- pC->zContentExprlist, pC->zContent, pC->zContentRowid
- );
- break;
-
- case FTS5_STMT_INSERT_CONTENT:
- case FTS5_STMT_REPLACE_CONTENT: {
- int nCol = pC->nCol + 1;
- char *zBind;
- int i;
-
- zBind = sqlite3_malloc(1 + nCol*2);
- if( zBind ){
- for(i=0; i<nCol; i++){
- zBind[i*2] = '?';
- zBind[i*2 + 1] = ',';
- }
- zBind[i*2-1] = '\0';
- zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, zBind);
- sqlite3_free(zBind);
- }
- break;
- }
-
- default:
- zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName);
- break;
- }
-
- if( zSql==0 ){
- rc = SQLITE_NOMEM;
- }else{
- rc = sqlite3_prepare_v2(pC->db, zSql, -1, &p->aStmt[eStmt], 0);
- sqlite3_free(zSql);
- if( rc!=SQLITE_OK && pzErrMsg ){
- *pzErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pC->db));
- }
- }
- }
-
- *ppStmt = p->aStmt[eStmt];
- return rc;
-}
-
-
-static int fts5ExecPrintf(
- sqlite3 *db,
- char **pzErr,
- const char *zFormat,
- ...
-){
- int rc;
- va_list ap; /* ... printf arguments */
- char *zSql;
-
- va_start(ap, zFormat);
- zSql = sqlite3_vmprintf(zFormat, ap);
-
- if( zSql==0 ){
- rc = SQLITE_NOMEM;
- }else{
- rc = sqlite3_exec(db, zSql, 0, 0, pzErr);
- sqlite3_free(zSql);
- }
-
- va_end(ap);
- return rc;
-}
-
-/*
-** Drop all shadow tables. Return SQLITE_OK if successful or an SQLite error
-** code otherwise.
-*/
-static int sqlite3Fts5DropAll(Fts5Config *pConfig){
- int rc = fts5ExecPrintf(pConfig->db, 0,
- "DROP TABLE IF EXISTS %Q.'%q_data';"
- "DROP TABLE IF EXISTS %Q.'%q_idx';"
- "DROP TABLE IF EXISTS %Q.'%q_config';",
- pConfig->zDb, pConfig->zName,
- pConfig->zDb, pConfig->zName,
- pConfig->zDb, pConfig->zName
- );
- if( rc==SQLITE_OK && pConfig->bColumnsize ){
- rc = fts5ExecPrintf(pConfig->db, 0,
- "DROP TABLE IF EXISTS %Q.'%q_docsize';",
- pConfig->zDb, pConfig->zName
- );
- }
- if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){
- rc = fts5ExecPrintf(pConfig->db, 0,
- "DROP TABLE IF EXISTS %Q.'%q_content';",
- pConfig->zDb, pConfig->zName
- );
- }
- return rc;
-}
-
-static void fts5StorageRenameOne(
- Fts5Config *pConfig, /* Current FTS5 configuration */
- int *pRc, /* IN/OUT: Error code */
- const char *zTail, /* Tail of table name e.g. "data", "config" */
- const char *zName /* New name of FTS5 table */
-){
- if( *pRc==SQLITE_OK ){
- *pRc = fts5ExecPrintf(pConfig->db, 0,
- "ALTER TABLE %Q.'%q_%s' RENAME TO '%q_%s';",
- pConfig->zDb, pConfig->zName, zTail, zName, zTail
- );
- }
-}
-
-static int sqlite3Fts5StorageRename(Fts5Storage *pStorage, const char *zName){
- Fts5Config *pConfig = pStorage->pConfig;
- int rc = sqlite3Fts5StorageSync(pStorage, 1);
-
- fts5StorageRenameOne(pConfig, &rc, "data", zName);
- fts5StorageRenameOne(pConfig, &rc, "idx", zName);
- fts5StorageRenameOne(pConfig, &rc, "config", zName);
- if( pConfig->bColumnsize ){
- fts5StorageRenameOne(pConfig, &rc, "docsize", zName);
- }
- if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
- fts5StorageRenameOne(pConfig, &rc, "content", zName);
- }
- return rc;
-}
-
-/*
-** Create the shadow table named zPost, with definition zDefn. Return
-** SQLITE_OK if successful, or an SQLite error code otherwise.
-*/
-static int sqlite3Fts5CreateTable(
- Fts5Config *pConfig, /* FTS5 configuration */
- const char *zPost, /* Shadow table to create (e.g. "content") */
- const char *zDefn, /* Columns etc. for shadow table */
- int bWithout, /* True for without rowid */
- char **pzErr /* OUT: Error message */
-){
- int rc;
- char *zErr = 0;
-
- rc = fts5ExecPrintf(pConfig->db, &zErr, "CREATE TABLE %Q.'%q_%q'(%s)%s",
- pConfig->zDb, pConfig->zName, zPost, zDefn, bWithout?" WITHOUT ROWID":""
- );
- if( zErr ){
- *pzErr = sqlite3_mprintf(
- "fts5: error creating shadow table %q_%s: %s",
- pConfig->zName, zPost, zErr
- );
- sqlite3_free(zErr);
- }
-
- return rc;
-}
-
-/*
-** Open a new Fts5Index handle. If the bCreate argument is true, create
-** and initialize the underlying tables
-**
-** If successful, set *pp to point to the new object and return SQLITE_OK.
-** Otherwise, set *pp to NULL and return an SQLite error code.
-*/
-static int sqlite3Fts5StorageOpen(
- Fts5Config *pConfig,
- Fts5Index *pIndex,
- int bCreate,
- Fts5Storage **pp,
- char **pzErr /* OUT: Error message */
-){
- int rc = SQLITE_OK;
- Fts5Storage *p; /* New object */
- int nByte; /* Bytes of space to allocate */
-
- nByte = sizeof(Fts5Storage) /* Fts5Storage object */
- + pConfig->nCol * sizeof(i64); /* Fts5Storage.aTotalSize[] */
- *pp = p = (Fts5Storage*)sqlite3_malloc(nByte);
- if( !p ) return SQLITE_NOMEM;
-
- memset(p, 0, nByte);
- p->aTotalSize = (i64*)&p[1];
- p->pConfig = pConfig;
- p->pIndex = pIndex;
-
- if( bCreate ){
- if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
- int nDefn = 32 + pConfig->nCol*10;
- char *zDefn = sqlite3_malloc(32 + pConfig->nCol * 10);
- if( zDefn==0 ){
- rc = SQLITE_NOMEM;
- }else{
- int i;
- int iOff;
- sqlite3_snprintf(nDefn, zDefn, "id INTEGER PRIMARY KEY");
- iOff = strlen(zDefn);
- for(i=0; i<pConfig->nCol; i++){
- sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i);
- iOff += strlen(&zDefn[iOff]);
- }
- rc = sqlite3Fts5CreateTable(pConfig, "content", zDefn, 0, pzErr);
- }
- sqlite3_free(zDefn);
- }
-
- if( rc==SQLITE_OK && pConfig->bColumnsize ){
- rc = sqlite3Fts5CreateTable(
- pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr
- );
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5CreateTable(
- pConfig, "config", "k PRIMARY KEY, v", 1, pzErr
- );
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageConfigValue(p, "version", 0, FTS5_CURRENT_VERSION);
- }
- }
-
- if( rc ){
- sqlite3Fts5StorageClose(p);
- *pp = 0;
- }
- return rc;
-}
-
-/*
-** Close a handle opened by an earlier call to sqlite3Fts5StorageOpen().
-*/
-static int sqlite3Fts5StorageClose(Fts5Storage *p){
- int rc = SQLITE_OK;
- if( p ){
- int i;
-
- /* Finalize all SQL statements */
- for(i=0; i<ArraySize(p->aStmt); i++){
- sqlite3_finalize(p->aStmt[i]);
- }
-
- sqlite3_free(p);
- }
- return rc;
-}
-
-typedef struct Fts5InsertCtx Fts5InsertCtx;
-struct Fts5InsertCtx {
- Fts5Storage *pStorage;
- int iCol;
- int szCol; /* Size of column value in tokens */
-};
-
-/*
-** Tokenization callback used when inserting tokens into the FTS index.
-*/
-static int fts5StorageInsertCallback(
- void *pContext, /* Pointer to Fts5InsertCtx object */
- int tflags,
- const char *pToken, /* Buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Start offset of token */
- int iEnd /* End offset of token */
-){
- Fts5InsertCtx *pCtx = (Fts5InsertCtx*)pContext;
- Fts5Index *pIdx = pCtx->pStorage->pIndex;
- if( (tflags & FTS5_TOKEN_COLOCATED)==0 || pCtx->szCol==0 ){
- pCtx->szCol++;
- }
- return sqlite3Fts5IndexWrite(pIdx, pCtx->iCol, pCtx->szCol-1, pToken, nToken);
-}
-
-/*
-** If a row with rowid iDel is present in the %_content table, add the
-** delete-markers to the FTS index necessary to delete it. Do not actually
-** remove the %_content row at this time though.
-*/
-static int fts5StorageDeleteFromIndex(Fts5Storage *p, i64 iDel){
- Fts5Config *pConfig = p->pConfig;
- sqlite3_stmt *pSeek; /* SELECT to read row iDel from %_data */
- int rc; /* Return code */
-
- rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP, &pSeek, 0);
- if( rc==SQLITE_OK ){
- int rc2;
- sqlite3_bind_int64(pSeek, 1, iDel);
- if( sqlite3_step(pSeek)==SQLITE_ROW ){
- int iCol;
- Fts5InsertCtx ctx;
- ctx.pStorage = p;
- ctx.iCol = -1;
- rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
- for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){
- if( pConfig->abUnindexed[iCol-1] ) continue;
- ctx.szCol = 0;
- rc = sqlite3Fts5Tokenize(pConfig,
- FTS5_TOKENIZE_DOCUMENT,
- (const char*)sqlite3_column_text(pSeek, iCol),
- sqlite3_column_bytes(pSeek, iCol),
- (void*)&ctx,
- fts5StorageInsertCallback
- );
- p->aTotalSize[iCol-1] -= (i64)ctx.szCol;
- }
- p->nTotalRow--;
- }
- rc2 = sqlite3_reset(pSeek);
- if( rc==SQLITE_OK ) rc = rc2;
- }
-
- return rc;
-}
-
-
-/*
-** Insert a record into the %_docsize table. Specifically, do:
-**
-** INSERT OR REPLACE INTO %_docsize(id, sz) VALUES(iRowid, pBuf);
-**
-** If there is no %_docsize table (as happens if the columnsize=0 option
-** is specified when the FTS5 table is created), this function is a no-op.
-*/
-static int fts5StorageInsertDocsize(
- Fts5Storage *p, /* Storage module to write to */
- i64 iRowid, /* id value */
- Fts5Buffer *pBuf /* sz value */
-){
- int rc = SQLITE_OK;
- if( p->pConfig->bColumnsize ){
- sqlite3_stmt *pReplace = 0;
- rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0);
- if( rc==SQLITE_OK ){
- sqlite3_bind_int64(pReplace, 1, iRowid);
- sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC);
- sqlite3_step(pReplace);
- rc = sqlite3_reset(pReplace);
- }
- }
- return rc;
-}
-
-/*
-** Load the contents of the "averages" record from disk into the
-** p->nTotalRow and p->aTotalSize[] variables. If successful, and if
-** argument bCache is true, set the p->bTotalsValid flag to indicate
-** that the contents of aTotalSize[] and nTotalRow are valid until
-** further notice.
-**
-** Return SQLITE_OK if successful, or an SQLite error code if an error
-** occurs.
-*/
-static int fts5StorageLoadTotals(Fts5Storage *p, int bCache){
- int rc = SQLITE_OK;
- if( p->bTotalsValid==0 ){
- rc = sqlite3Fts5IndexGetAverages(p->pIndex, &p->nTotalRow, p->aTotalSize);
- p->bTotalsValid = bCache;
- }
- return rc;
-}
-
-/*
-** Store the current contents of the p->nTotalRow and p->aTotalSize[]
-** variables in the "averages" record on disk.
-**
-** Return SQLITE_OK if successful, or an SQLite error code if an error
-** occurs.
-*/
-static int fts5StorageSaveTotals(Fts5Storage *p){
- int nCol = p->pConfig->nCol;
- int i;
- Fts5Buffer buf;
- int rc = SQLITE_OK;
- memset(&buf, 0, sizeof(buf));
-
- sqlite3Fts5BufferAppendVarint(&rc, &buf, p->nTotalRow);
- for(i=0; i<nCol; i++){
- sqlite3Fts5BufferAppendVarint(&rc, &buf, p->aTotalSize[i]);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5IndexSetAverages(p->pIndex, buf.p, buf.n);
- }
- sqlite3_free(buf.p);
-
- return rc;
-}
-
-/*
-** Remove a row from the FTS table.
-*/
-static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel){
- Fts5Config *pConfig = p->pConfig;
- int rc;
- sqlite3_stmt *pDel = 0;
-
- rc = fts5StorageLoadTotals(p, 1);
-
- /* Delete the index records */
- if( rc==SQLITE_OK ){
- rc = fts5StorageDeleteFromIndex(p, iDel);
- }
-
- /* Delete the %_docsize record */
- if( rc==SQLITE_OK && pConfig->bColumnsize ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_DOCSIZE, &pDel, 0);
- if( rc==SQLITE_OK ){
- sqlite3_bind_int64(pDel, 1, iDel);
- sqlite3_step(pDel);
- rc = sqlite3_reset(pDel);
- }
- }
-
- /* Delete the %_content record */
- if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_CONTENT, &pDel, 0);
- }
- if( rc==SQLITE_OK ){
- sqlite3_bind_int64(pDel, 1, iDel);
- sqlite3_step(pDel);
- rc = sqlite3_reset(pDel);
- }
-
- /* Write the averages record */
- if( rc==SQLITE_OK ){
- rc = fts5StorageSaveTotals(p);
- }
-
- return rc;
-}
-
-static int sqlite3Fts5StorageSpecialDelete(
- Fts5Storage *p,
- i64 iDel,
- sqlite3_value **apVal
-){
- Fts5Config *pConfig = p->pConfig;
- int rc;
- sqlite3_stmt *pDel = 0;
-
- assert( pConfig->eContent!=FTS5_CONTENT_NORMAL );
- rc = fts5StorageLoadTotals(p, 1);
-
- /* Delete the index records */
- if( rc==SQLITE_OK ){
- int iCol;
- Fts5InsertCtx ctx;
- ctx.pStorage = p;
- ctx.iCol = -1;
-
- rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
- for(iCol=0; rc==SQLITE_OK && iCol<pConfig->nCol; iCol++){
- if( pConfig->abUnindexed[iCol] ) continue;
- ctx.szCol = 0;
- rc = sqlite3Fts5Tokenize(pConfig,
- FTS5_TOKENIZE_DOCUMENT,
- (const char*)sqlite3_value_text(apVal[iCol]),
- sqlite3_value_bytes(apVal[iCol]),
- (void*)&ctx,
- fts5StorageInsertCallback
- );
- p->aTotalSize[iCol] -= (i64)ctx.szCol;
- }
- p->nTotalRow--;
- }
-
- /* Delete the %_docsize record */
- if( pConfig->bColumnsize ){
- if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_DOCSIZE, &pDel, 0);
- }
- if( rc==SQLITE_OK ){
- sqlite3_bind_int64(pDel, 1, iDel);
- sqlite3_step(pDel);
- rc = sqlite3_reset(pDel);
- }
- }
-
- /* Write the averages record */
- if( rc==SQLITE_OK ){
- rc = fts5StorageSaveTotals(p);
- }
-
- return rc;
-}
-
-/*
-** Delete all entries in the FTS5 index.
-*/
-static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p){
- Fts5Config *pConfig = p->pConfig;
- int rc;
-
- /* Delete the contents of the %_data and %_docsize tables. */
- rc = fts5ExecPrintf(pConfig->db, 0,
- "DELETE FROM %Q.'%q_data';"
- "DELETE FROM %Q.'%q_idx';",
- pConfig->zDb, pConfig->zName,
- pConfig->zDb, pConfig->zName
- );
- if( rc==SQLITE_OK && pConfig->bColumnsize ){
- rc = fts5ExecPrintf(pConfig->db, 0,
- "DELETE FROM %Q.'%q_docsize';",
- pConfig->zDb, pConfig->zName
- );
- }
-
- /* Reinitialize the %_data table. This call creates the initial structure
- ** and averages records. */
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5IndexReinit(p->pIndex);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5StorageConfigValue(p, "version", 0, FTS5_CURRENT_VERSION);
- }
- return rc;
-}
-
-static int sqlite3Fts5StorageRebuild(Fts5Storage *p){
- Fts5Buffer buf = {0,0,0};
- Fts5Config *pConfig = p->pConfig;
- sqlite3_stmt *pScan = 0;
- Fts5InsertCtx ctx;
- int rc;
-
- memset(&ctx, 0, sizeof(Fts5InsertCtx));
- ctx.pStorage = p;
- rc = sqlite3Fts5StorageDeleteAll(p);
- if( rc==SQLITE_OK ){
- rc = fts5StorageLoadTotals(p, 1);
- }
-
- if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
- }
-
- while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){
- i64 iRowid = sqlite3_column_int64(pScan, 0);
-
- sqlite3Fts5BufferZero(&buf);
- rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 0, iRowid);
- for(ctx.iCol=0; rc==SQLITE_OK && ctx.iCol<pConfig->nCol; ctx.iCol++){
- ctx.szCol = 0;
- if( pConfig->abUnindexed[ctx.iCol]==0 ){
- rc = sqlite3Fts5Tokenize(pConfig,
- FTS5_TOKENIZE_DOCUMENT,
- (const char*)sqlite3_column_text(pScan, ctx.iCol+1),
- sqlite3_column_bytes(pScan, ctx.iCol+1),
- (void*)&ctx,
- fts5StorageInsertCallback
- );
- }
- sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol);
- p->aTotalSize[ctx.iCol] += (i64)ctx.szCol;
- }
- p->nTotalRow++;
-
- if( rc==SQLITE_OK ){
- rc = fts5StorageInsertDocsize(p, iRowid, &buf);
- }
- }
- sqlite3_free(buf.p);
-
- /* Write the averages record */
- if( rc==SQLITE_OK ){
- rc = fts5StorageSaveTotals(p);
- }
- return rc;
-}
-
-static int sqlite3Fts5StorageOptimize(Fts5Storage *p){
- return sqlite3Fts5IndexOptimize(p->pIndex);
-}
-
-static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge){
- return sqlite3Fts5IndexMerge(p->pIndex, nMerge);
-}
-
-/*
-** Allocate a new rowid. This is used for "external content" tables when
-** a NULL value is inserted into the rowid column. The new rowid is allocated
-** by inserting a dummy row into the %_docsize table. The dummy will be
-** overwritten later.
-**
-** If the %_docsize table does not exist, SQLITE_MISMATCH is returned. In
-** this case the user is required to provide a rowid explicitly.
-*/
-static int fts5StorageNewRowid(Fts5Storage *p, i64 *piRowid){
- int rc = SQLITE_MISMATCH;
- if( p->pConfig->bColumnsize ){
- sqlite3_stmt *pReplace = 0;
- rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0);
- if( rc==SQLITE_OK ){
- sqlite3_bind_null(pReplace, 1);
- sqlite3_bind_null(pReplace, 2);
- sqlite3_step(pReplace);
- rc = sqlite3_reset(pReplace);
- }
- if( rc==SQLITE_OK ){
- *piRowid = sqlite3_last_insert_rowid(p->pConfig->db);
- }
- }
- return rc;
-}
-
-/*
-** Insert a new row into the FTS content table.
-*/
-static int sqlite3Fts5StorageContentInsert(
- Fts5Storage *p,
- sqlite3_value **apVal,
- i64 *piRowid
-){
- Fts5Config *pConfig = p->pConfig;
- int rc = SQLITE_OK;
-
- /* Insert the new row into the %_content table. */
- if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){
- if( sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){
- *piRowid = sqlite3_value_int64(apVal[1]);
- }else{
- rc = fts5StorageNewRowid(p, piRowid);
- }
- }else{
- sqlite3_stmt *pInsert = 0; /* Statement to write %_content table */
- int i; /* Counter variable */
-#if 0
- if( eConflict==SQLITE_REPLACE ){
- eStmt = FTS5_STMT_REPLACE_CONTENT;
- rc = fts5StorageDeleteFromIndex(p, sqlite3_value_int64(apVal[1]));
- }else{
- eStmt = FTS5_STMT_INSERT_CONTENT;
- }
-#endif
- if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT, &pInsert, 0);
- }
- for(i=1; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){
- rc = sqlite3_bind_value(pInsert, i, apVal[i]);
- }
- if( rc==SQLITE_OK ){
- sqlite3_step(pInsert);
- rc = sqlite3_reset(pInsert);
- }
- *piRowid = sqlite3_last_insert_rowid(pConfig->db);
- }
-
- return rc;
-}
-
-/*
-** Insert new entries into the FTS index and %_docsize table.
-*/
-static int sqlite3Fts5StorageIndexInsert(
- Fts5Storage *p,
- sqlite3_value **apVal,
- i64 iRowid
-){
- Fts5Config *pConfig = p->pConfig;
- int rc = SQLITE_OK; /* Return code */
- Fts5InsertCtx ctx; /* Tokenization callback context object */
- Fts5Buffer buf; /* Buffer used to build up %_docsize blob */
-
- memset(&buf, 0, sizeof(Fts5Buffer));
- ctx.pStorage = p;
- rc = fts5StorageLoadTotals(p, 1);
-
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 0, iRowid);
- }
- for(ctx.iCol=0; rc==SQLITE_OK && ctx.iCol<pConfig->nCol; ctx.iCol++){
- ctx.szCol = 0;
- if( pConfig->abUnindexed[ctx.iCol]==0 ){
- rc = sqlite3Fts5Tokenize(pConfig,
- FTS5_TOKENIZE_DOCUMENT,
- (const char*)sqlite3_value_text(apVal[ctx.iCol+2]),
- sqlite3_value_bytes(apVal[ctx.iCol+2]),
- (void*)&ctx,
- fts5StorageInsertCallback
- );
- }
- sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol);
- p->aTotalSize[ctx.iCol] += (i64)ctx.szCol;
- }
- p->nTotalRow++;
-
- /* Write the %_docsize record */
- if( rc==SQLITE_OK ){
- rc = fts5StorageInsertDocsize(p, iRowid, &buf);
- }
- sqlite3_free(buf.p);
-
- /* Write the averages record */
- if( rc==SQLITE_OK ){
- rc = fts5StorageSaveTotals(p);
- }
-
- return rc;
-}
-
-static int fts5StorageCount(Fts5Storage *p, const char *zSuffix, i64 *pnRow){
- Fts5Config *pConfig = p->pConfig;
- char *zSql;
- int rc;
-
- zSql = sqlite3_mprintf("SELECT count(*) FROM %Q.'%q_%s'",
- pConfig->zDb, pConfig->zName, zSuffix
- );
- if( zSql==0 ){
- rc = SQLITE_NOMEM;
- }else{
- sqlite3_stmt *pCnt = 0;
- rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &pCnt, 0);
- if( rc==SQLITE_OK ){
- if( SQLITE_ROW==sqlite3_step(pCnt) ){
- *pnRow = sqlite3_column_int64(pCnt, 0);
- }
- rc = sqlite3_finalize(pCnt);
- }
- }
-
- sqlite3_free(zSql);
- return rc;
-}
-
-/*
-** Context object used by sqlite3Fts5StorageIntegrity().
-*/
-typedef struct Fts5IntegrityCtx Fts5IntegrityCtx;
-struct Fts5IntegrityCtx {
- i64 iRowid;
- int iCol;
- int szCol;
- u64 cksum;
- Fts5Config *pConfig;
-};
-
-/*
-** Tokenization callback used by integrity check.
-*/
-static int fts5StorageIntegrityCallback(
- void *pContext, /* Pointer to Fts5InsertCtx object */
- int tflags,
- const char *pToken, /* Buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Start offset of token */
- int iEnd /* End offset of token */
-){
- Fts5IntegrityCtx *pCtx = (Fts5IntegrityCtx*)pContext;
- if( (tflags & FTS5_TOKEN_COLOCATED)==0 || pCtx->szCol==0 ){
- pCtx->szCol++;
- }
- pCtx->cksum ^= sqlite3Fts5IndexCksum(
- pCtx->pConfig, pCtx->iRowid, pCtx->iCol, pCtx->szCol-1, pToken, nToken
- );
- return SQLITE_OK;
-}
-
-/*
-** Check that the contents of the FTS index match that of the %_content
-** table. Return SQLITE_OK if they do, or SQLITE_CORRUPT if not. Return
-** some other SQLite error code if an error occurs while attempting to
-** determine this.
-*/
-static int sqlite3Fts5StorageIntegrity(Fts5Storage *p){
- Fts5Config *pConfig = p->pConfig;
- int rc; /* Return code */
- int *aColSize; /* Array of size pConfig->nCol */
- i64 *aTotalSize; /* Array of size pConfig->nCol */
- Fts5IntegrityCtx ctx;
- sqlite3_stmt *pScan;
-
- memset(&ctx, 0, sizeof(Fts5IntegrityCtx));
- ctx.pConfig = p->pConfig;
- aTotalSize = (i64*)sqlite3_malloc(pConfig->nCol * (sizeof(int)+sizeof(i64)));
- if( !aTotalSize ) return SQLITE_NOMEM;
- aColSize = (int*)&aTotalSize[pConfig->nCol];
- memset(aTotalSize, 0, sizeof(i64) * pConfig->nCol);
-
- /* Generate the expected index checksum based on the contents of the
- ** %_content table. This block stores the checksum in ctx.cksum. */
- rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
- if( rc==SQLITE_OK ){
- int rc2;
- while( SQLITE_ROW==sqlite3_step(pScan) ){
- int i;
- ctx.iRowid = sqlite3_column_int64(pScan, 0);
- ctx.szCol = 0;
- if( pConfig->bColumnsize ){
- rc = sqlite3Fts5StorageDocsize(p, ctx.iRowid, aColSize);
- }
- for(i=0; rc==SQLITE_OK && i<pConfig->nCol; i++){
- if( pConfig->abUnindexed[i] ) continue;
- ctx.iCol = i;
- ctx.szCol = 0;
- rc = sqlite3Fts5Tokenize(pConfig,
- FTS5_TOKENIZE_DOCUMENT,
- (const char*)sqlite3_column_text(pScan, i+1),
- sqlite3_column_bytes(pScan, i+1),
- (void*)&ctx,
- fts5StorageIntegrityCallback
- );
- if( pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){
- rc = FTS5_CORRUPT;
- }
- aTotalSize[i] += ctx.szCol;
- }
- if( rc!=SQLITE_OK ) break;
- }
- rc2 = sqlite3_reset(pScan);
- if( rc==SQLITE_OK ) rc = rc2;
- }
-
- /* Test that the "totals" (sometimes called "averages") record looks Ok */
- if( rc==SQLITE_OK ){
- int i;
- rc = fts5StorageLoadTotals(p, 0);
- for(i=0; rc==SQLITE_OK && i<pConfig->nCol; i++){
- if( p->aTotalSize[i]!=aTotalSize[i] ) rc = FTS5_CORRUPT;
- }
- }
-
- /* Check that the %_docsize and %_content tables contain the expected
- ** number of rows. */
- if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){
- i64 nRow;
- rc = fts5StorageCount(p, "content", &nRow);
- if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT;
- }
- if( rc==SQLITE_OK && pConfig->bColumnsize ){
- i64 nRow;
- rc = fts5StorageCount(p, "docsize", &nRow);
- if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT;
- }
-
- /* Pass the expected checksum down to the FTS index module. It will
- ** verify, amongst other things, that it matches the checksum generated by
- ** inspecting the index itself. */
- if( rc==SQLITE_OK ){
- rc = sqlite3Fts5IndexIntegrityCheck(p->pIndex, ctx.cksum);
- }
-
- sqlite3_free(aTotalSize);
- return rc;
-}
-
-/*
-** Obtain an SQLite statement handle that may be used to read data from the
-** %_content table.
-*/
-static int sqlite3Fts5StorageStmt(
- Fts5Storage *p,
- int eStmt,
- sqlite3_stmt **pp,
- char **pzErrMsg
-){
- int rc;
- assert( eStmt==FTS5_STMT_SCAN_ASC
- || eStmt==FTS5_STMT_SCAN_DESC
- || eStmt==FTS5_STMT_LOOKUP
- );
- rc = fts5StorageGetStmt(p, eStmt, pp, pzErrMsg);
- if( rc==SQLITE_OK ){
- assert( p->aStmt[eStmt]==*pp );
- p->aStmt[eStmt] = 0;
- }
- return rc;
-}
-
-/*
-** Release an SQLite statement handle obtained via an earlier call to
-** sqlite3Fts5StorageStmt(). The eStmt parameter passed to this function
-** must match that passed to the sqlite3Fts5StorageStmt() call.
-*/
-static void sqlite3Fts5StorageStmtRelease(
- Fts5Storage *p,
- int eStmt,
- sqlite3_stmt *pStmt
-){
- assert( eStmt==FTS5_STMT_SCAN_ASC
- || eStmt==FTS5_STMT_SCAN_DESC
- || eStmt==FTS5_STMT_LOOKUP
- );
- if( p->aStmt[eStmt]==0 ){
- sqlite3_reset(pStmt);
- p->aStmt[eStmt] = pStmt;
- }else{
- sqlite3_finalize(pStmt);
- }
-}
-
-static int fts5StorageDecodeSizeArray(
- int *aCol, int nCol, /* Array to populate */
- const u8 *aBlob, int nBlob /* Record to read varints from */
-){
- int i;
- int iOff = 0;
- for(i=0; i<nCol; i++){
- if( iOff>=nBlob ) return 1;
- iOff += fts5GetVarint32(&aBlob[iOff], aCol[i]);
- }
- return (iOff!=nBlob);
-}
-
-/*
-** Argument aCol points to an array of integers containing one entry for
-** each table column. This function reads the %_docsize record for the
-** specified rowid and populates aCol[] with the results.
-**
-** An SQLite error code is returned if an error occurs, or SQLITE_OK
-** otherwise.
-*/
-static int sqlite3Fts5StorageDocsize(Fts5Storage *p, i64 iRowid, int *aCol){
- int nCol = p->pConfig->nCol; /* Number of user columns in table */
- sqlite3_stmt *pLookup = 0; /* Statement to query %_docsize */
- int rc; /* Return Code */
-
- assert( p->pConfig->bColumnsize );
- rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0);
- if( rc==SQLITE_OK ){
- int bCorrupt = 1;
- sqlite3_bind_int64(pLookup, 1, iRowid);
- if( SQLITE_ROW==sqlite3_step(pLookup) ){
- const u8 *aBlob = sqlite3_column_blob(pLookup, 0);
- int nBlob = sqlite3_column_bytes(pLookup, 0);
- if( 0==fts5StorageDecodeSizeArray(aCol, nCol, aBlob, nBlob) ){
- bCorrupt = 0;
- }
- }
- rc = sqlite3_reset(pLookup);
- if( bCorrupt && rc==SQLITE_OK ){
- rc = FTS5_CORRUPT;
- }
- }
-
- return rc;
-}
-
-static int sqlite3Fts5StorageSize(Fts5Storage *p, int iCol, i64 *pnToken){
- int rc = fts5StorageLoadTotals(p, 0);
- if( rc==SQLITE_OK ){
- *pnToken = 0;
- if( iCol<0 ){
- int i;
- for(i=0; i<p->pConfig->nCol; i++){
- *pnToken += p->aTotalSize[i];
- }
- }else if( iCol<p->pConfig->nCol ){
- *pnToken = p->aTotalSize[iCol];
- }else{
- rc = SQLITE_RANGE;
- }
- }
- return rc;
-}
-
-static int sqlite3Fts5StorageRowCount(Fts5Storage *p, i64 *pnRow){
- int rc = fts5StorageLoadTotals(p, 0);
- if( rc==SQLITE_OK ){
- *pnRow = p->nTotalRow;
- }
- return rc;
-}
-
-/*
-** Flush any data currently held in-memory to disk.
-*/
-static int sqlite3Fts5StorageSync(Fts5Storage *p, int bCommit){
- if( bCommit && p->bTotalsValid ){
- int rc = fts5StorageSaveTotals(p);
- p->bTotalsValid = 0;
- if( rc!=SQLITE_OK ) return rc;
- }
- return sqlite3Fts5IndexSync(p->pIndex, bCommit);
-}
-
-static int sqlite3Fts5StorageRollback(Fts5Storage *p){
- p->bTotalsValid = 0;
- return sqlite3Fts5IndexRollback(p->pIndex);
-}
-
-static int sqlite3Fts5StorageConfigValue(
- Fts5Storage *p,
- const char *z,
- sqlite3_value *pVal,
- int iVal
-){
- sqlite3_stmt *pReplace = 0;
- int rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_CONFIG, &pReplace, 0);
- if( rc==SQLITE_OK ){
- sqlite3_bind_text(pReplace, 1, z, -1, SQLITE_STATIC);
- if( pVal ){
- sqlite3_bind_value(pReplace, 2, pVal);
- }else{
- sqlite3_bind_int(pReplace, 2, iVal);
- }
- sqlite3_step(pReplace);
- rc = sqlite3_reset(pReplace);
- }
- if( rc==SQLITE_OK && pVal ){
- int iNew = p->pConfig->iCookie + 1;
- rc = sqlite3Fts5IndexSetCookie(p->pIndex, iNew);
- if( rc==SQLITE_OK ){
- p->pConfig->iCookie = iNew;
- }
- }
- return rc;
-}
-
-
-
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-*/
-
-
-
-/**************************************************************************
-** Start of ascii tokenizer implementation.
-*/
-
-/*
-** For tokenizers with no "unicode" modifier, the set of token characters
-** is the same as the set of ASCII range alphanumeric characters.
-*/
-static unsigned char aAsciiTokenChar[128] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00..0x0F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10..0x1F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20..0x2F */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 0x30..0x3F */
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x40..0x4F */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 0x50..0x5F */
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x60..0x6F */
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 0x70..0x7F */
-};
-
-typedef struct AsciiTokenizer AsciiTokenizer;
-struct AsciiTokenizer {
- unsigned char aTokenChar[128];
-};
-
-static void fts5AsciiAddExceptions(
- AsciiTokenizer *p,
- const char *zArg,
- int bTokenChars
-){
- int i;
- for(i=0; zArg[i]; i++){
- if( (zArg[i] & 0x80)==0 ){
- p->aTokenChar[(int)zArg[i]] = (unsigned char)bTokenChars;
- }
- }
-}
-
-/*
-** Delete a "ascii" tokenizer.
-*/
-static void fts5AsciiDelete(Fts5Tokenizer *p){
- sqlite3_free(p);
-}
-
-/*
-** Create an "ascii" tokenizer.
-*/
-static int fts5AsciiCreate(
- void *pCtx,
- const char **azArg, int nArg,
- Fts5Tokenizer **ppOut
-){
- int rc = SQLITE_OK;
- AsciiTokenizer *p = 0;
- if( nArg%2 ){
- rc = SQLITE_ERROR;
- }else{
- p = sqlite3_malloc(sizeof(AsciiTokenizer));
- if( p==0 ){
- rc = SQLITE_NOMEM;
- }else{
- int i;
- memset(p, 0, sizeof(AsciiTokenizer));
- memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar));
- for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
- const char *zArg = azArg[i+1];
- if( 0==sqlite3_stricmp(azArg[i], "tokenchars") ){
- fts5AsciiAddExceptions(p, zArg, 1);
- }else
- if( 0==sqlite3_stricmp(azArg[i], "separators") ){
- fts5AsciiAddExceptions(p, zArg, 0);
- }else{
- rc = SQLITE_ERROR;
- }
- }
- if( rc!=SQLITE_OK ){
- fts5AsciiDelete((Fts5Tokenizer*)p);
- p = 0;
- }
- }
- }
-
- *ppOut = (Fts5Tokenizer*)p;
- return rc;
-}
-
-
-static void asciiFold(char *aOut, const char *aIn, int nByte){
- int i;
- for(i=0; i<nByte; i++){
- char c = aIn[i];
- if( c>='A' && c<='Z' ) c += 32;
- aOut[i] = c;
- }
-}
-
-/*
-** Tokenize some text using the ascii tokenizer.
-*/
-static int fts5AsciiTokenize(
- Fts5Tokenizer *pTokenizer,
- void *pCtx,
- int flags,
- const char *pText, int nText,
- int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd)
-){
- AsciiTokenizer *p = (AsciiTokenizer*)pTokenizer;
- int rc = SQLITE_OK;
- int ie;
- int is = 0;
-
- char aFold[64];
- int nFold = sizeof(aFold);
- char *pFold = aFold;
- unsigned char *a = p->aTokenChar;
-
- while( is<nText && rc==SQLITE_OK ){
- int nByte;
-
- /* Skip any leading divider characters. */
- while( is<nText && ((pText[is]&0x80)==0 && a[(int)pText[is]]==0) ){
- is++;
- }
- if( is==nText ) break;
-
- /* Count the token characters */
- ie = is+1;
- while( ie<nText && ((pText[ie]&0x80) || a[(int)pText[ie]] ) ){
- ie++;
- }
-
- /* Fold to lower case */
- nByte = ie-is;
- if( nByte>nFold ){
- if( pFold!=aFold ) sqlite3_free(pFold);
- pFold = sqlite3_malloc(nByte*2);
- if( pFold==0 ){
- rc = SQLITE_NOMEM;
- break;
- }
- nFold = nByte*2;
- }
- asciiFold(pFold, &pText[is], nByte);
-
- /* Invoke the token callback */
- rc = xToken(pCtx, 0, pFold, nByte, is, ie);
- is = ie+1;
- }
-
- if( pFold!=aFold ) sqlite3_free(pFold);
- if( rc==SQLITE_DONE ) rc = SQLITE_OK;
- return rc;
-}
-
-/**************************************************************************
-** Start of unicode61 tokenizer implementation.
-*/
-
-
-/*
-** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied
-** from the sqlite3 source file utf.c. If this file is compiled as part
-** of the amalgamation, they are not required.
-*/
-#ifndef SQLITE_AMALGAMATION
-
-static const unsigned char sqlite3Utf8Trans1[] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00,
-};
-
-#define READ_UTF8(zIn, zTerm, c) \
- c = *(zIn++); \
- if( c>=0xc0 ){ \
- c = sqlite3Utf8Trans1[c-0xc0]; \
- while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \
- c = (c<<6) + (0x3f & *(zIn++)); \
- } \
- if( c<0x80 \
- || (c&0xFFFFF800)==0xD800 \
- || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \
- }
-
-
-#define WRITE_UTF8(zOut, c) { \
- if( c<0x00080 ){ \
- *zOut++ = (unsigned char)(c&0xFF); \
- } \
- else if( c<0x00800 ){ \
- *zOut++ = 0xC0 + (unsigned char)((c>>6)&0x1F); \
- *zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
- } \
- else if( c<0x10000 ){ \
- *zOut++ = 0xE0 + (unsigned char)((c>>12)&0x0F); \
- *zOut++ = 0x80 + (unsigned char)((c>>6) & 0x3F); \
- *zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
- }else{ \
- *zOut++ = 0xF0 + (unsigned char)((c>>18) & 0x07); \
- *zOut++ = 0x80 + (unsigned char)((c>>12) & 0x3F); \
- *zOut++ = 0x80 + (unsigned char)((c>>6) & 0x3F); \
- *zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
- } \
-}
-
-#endif /* ifndef SQLITE_AMALGAMATION */
-
-typedef struct Unicode61Tokenizer Unicode61Tokenizer;
-struct Unicode61Tokenizer {
- unsigned char aTokenChar[128]; /* ASCII range token characters */
- char *aFold; /* Buffer to fold text into */
- int nFold; /* Size of aFold[] in bytes */
- int bRemoveDiacritic; /* True if remove_diacritics=1 is set */
- int nException;
- int *aiException;
-};
-
-static int fts5UnicodeAddExceptions(
- Unicode61Tokenizer *p, /* Tokenizer object */
- const char *z, /* Characters to treat as exceptions */
- int bTokenChars /* 1 for 'tokenchars', 0 for 'separators' */
-){
- int rc = SQLITE_OK;
- int n = strlen(z);
- int *aNew;
-
- if( n>0 ){
- aNew = (int*)sqlite3_realloc(p->aiException, (n+p->nException)*sizeof(int));
- if( aNew ){
- int nNew = p->nException;
- const unsigned char *zCsr = (const unsigned char*)z;
- const unsigned char *zTerm = (const unsigned char*)&z[n];
- while( zCsr<zTerm ){
- int iCode;
- int bToken;
- READ_UTF8(zCsr, zTerm, iCode);
- if( iCode<128 ){
- p->aTokenChar[iCode] = bTokenChars;
- }else{
- bToken = sqlite3Fts5UnicodeIsalnum(iCode);
- assert( (bToken==0 || bToken==1) );
- assert( (bTokenChars==0 || bTokenChars==1) );
- if( bToken!=bTokenChars && sqlite3Fts5UnicodeIsdiacritic(iCode)==0 ){
- int i;
- for(i=0; i<nNew; i++){
- if( aNew[i]>iCode ) break;
- }
- memmove(&aNew[i+1], &aNew[i], (nNew-i)*sizeof(int));
- aNew[i] = iCode;
- nNew++;
- }
- }
- }
- p->aiException = aNew;
- p->nException = nNew;
- }else{
- rc = SQLITE_NOMEM;
- }
- }
-
- return rc;
-}
-
-/*
-** Return true if the p->aiException[] array contains the value iCode.
-*/
-static int fts5UnicodeIsException(Unicode61Tokenizer *p, int iCode){
- if( p->nException>0 ){
- int *a = p->aiException;
- int iLo = 0;
- int iHi = p->nException-1;
-
- while( iHi>=iLo ){
- int iTest = (iHi + iLo) / 2;
- if( iCode==a[iTest] ){
- return 1;
- }else if( iCode>a[iTest] ){
- iLo = iTest+1;
- }else{
- iHi = iTest-1;
- }
- }
- }
-
- return 0;
-}
-
-/*
-** Delete a "unicode61" tokenizer.
-*/
-static void fts5UnicodeDelete(Fts5Tokenizer *pTok){
- if( pTok ){
- Unicode61Tokenizer *p = (Unicode61Tokenizer*)pTok;
- sqlite3_free(p->aiException);
- sqlite3_free(p->aFold);
- sqlite3_free(p);
- }
- return;
-}
-
-/*
-** Create a "unicode61" tokenizer.
-*/
-static int fts5UnicodeCreate(
- void *pCtx,
- const char **azArg, int nArg,
- Fts5Tokenizer **ppOut
-){
- int rc = SQLITE_OK; /* Return code */
- Unicode61Tokenizer *p = 0; /* New tokenizer object */
-
- if( nArg%2 ){
- rc = SQLITE_ERROR;
- }else{
- p = (Unicode61Tokenizer*)sqlite3_malloc(sizeof(Unicode61Tokenizer));
- if( p ){
- int i;
- memset(p, 0, sizeof(Unicode61Tokenizer));
- memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar));
- p->bRemoveDiacritic = 1;
- p->nFold = 64;
- p->aFold = sqlite3_malloc(p->nFold * sizeof(char));
- if( p->aFold==0 ){
- rc = SQLITE_NOMEM;
- }
- for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
- const char *zArg = azArg[i+1];
- if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
- if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1] ){
- rc = SQLITE_ERROR;
- }
- p->bRemoveDiacritic = (zArg[0]=='1');
- }else
- if( 0==sqlite3_stricmp(azArg[i], "tokenchars") ){
- rc = fts5UnicodeAddExceptions(p, zArg, 1);
- }else
- if( 0==sqlite3_stricmp(azArg[i], "separators") ){
- rc = fts5UnicodeAddExceptions(p, zArg, 0);
- }else{
- rc = SQLITE_ERROR;
- }
- }
- }else{
- rc = SQLITE_NOMEM;
- }
- if( rc!=SQLITE_OK ){
- fts5UnicodeDelete((Fts5Tokenizer*)p);
- p = 0;
- }
- *ppOut = (Fts5Tokenizer*)p;
- }
- return rc;
-}
-
-/*
-** Return true if, for the purposes of tokenizing with the tokenizer
-** passed as the first argument, codepoint iCode is considered a token
-** character (not a separator).
-*/
-static int fts5UnicodeIsAlnum(Unicode61Tokenizer *p, int iCode){
- assert( (sqlite3Fts5UnicodeIsalnum(iCode) & 0xFFFFFFFE)==0 );
- return sqlite3Fts5UnicodeIsalnum(iCode) ^ fts5UnicodeIsException(p, iCode);
-}
-
-static int fts5UnicodeTokenize(
- Fts5Tokenizer *pTokenizer,
- void *pCtx,
- int flags,
- const char *pText, int nText,
- int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd)
-){
- Unicode61Tokenizer *p = (Unicode61Tokenizer*)pTokenizer;
- int rc = SQLITE_OK;
- unsigned char *a = p->aTokenChar;
-
- unsigned char *zTerm = (unsigned char*)&pText[nText];
- unsigned char *zCsr = (unsigned char *)pText;
-
- /* Output buffer */
- char *aFold = p->aFold;
- int nFold = p->nFold;
- const char *pEnd = &aFold[nFold-6];
-
- /* Each iteration of this loop gobbles up a contiguous run of separators,
- ** then the next token. */
- while( rc==SQLITE_OK ){
- int iCode; /* non-ASCII codepoint read from input */
- char *zOut = aFold;
- int is;
- int ie;
-
- /* Skip any separator characters. */
- while( 1 ){
- if( zCsr>=zTerm ) goto tokenize_done;
- if( *zCsr & 0x80 ) {
- /* A character outside of the ascii range. Skip past it if it is
- ** a separator character. Or break out of the loop if it is not. */
- is = zCsr - (unsigned char*)pText;
- READ_UTF8(zCsr, zTerm, iCode);
- if( fts5UnicodeIsAlnum(p, iCode) ){
- goto non_ascii_tokenchar;
- }
- }else{
- if( a[*zCsr] ){
- is = zCsr - (unsigned char*)pText;
- goto ascii_tokenchar;
- }
- zCsr++;
- }
- }
-
- /* Run through the tokenchars. Fold them into the output buffer along
- ** the way. */
- while( zCsr<zTerm ){
-
- /* Grow the output buffer so that there is sufficient space to fit the
- ** largest possible utf-8 character. */
- if( zOut>pEnd ){
- aFold = sqlite3_malloc(nFold*2);
- if( aFold==0 ){
- rc = SQLITE_NOMEM;
- goto tokenize_done;
- }
- zOut = &aFold[zOut - p->aFold];
- memcpy(aFold, p->aFold, nFold);
- sqlite3_free(p->aFold);
- p->aFold = aFold;
- p->nFold = nFold = nFold*2;
- pEnd = &aFold[nFold-6];
- }
-
- if( *zCsr & 0x80 ){
- /* An non-ascii-range character. Fold it into the output buffer if
- ** it is a token character, or break out of the loop if it is not. */
- READ_UTF8(zCsr, zTerm, iCode);
- if( fts5UnicodeIsAlnum(p,iCode)||sqlite3Fts5UnicodeIsdiacritic(iCode) ){
- non_ascii_tokenchar:
- iCode = sqlite3Fts5UnicodeFold(iCode, p->bRemoveDiacritic);
- if( iCode ) WRITE_UTF8(zOut, iCode);
- }else{
- break;
- }
- }else if( a[*zCsr]==0 ){
- /* An ascii-range separator character. End of token. */
- break;
- }else{
- ascii_tokenchar:
- if( *zCsr>='A' && *zCsr<='Z' ){
- *zOut++ = *zCsr + 32;
- }else{
- *zOut++ = *zCsr;
- }
- zCsr++;
- }
- ie = zCsr - (unsigned char*)pText;
- }
-
- /* Invoke the token callback */
- rc = xToken(pCtx, 0, aFold, zOut-aFold, is, ie);
- }
-
- tokenize_done:
- if( rc==SQLITE_DONE ) rc = SQLITE_OK;
- return rc;
-}
-
-/**************************************************************************
-** Start of porter stemmer implementation.
-*/
-
-/* Any tokens larger than this (in bytes) are passed through without
-** stemming. */
-#define FTS5_PORTER_MAX_TOKEN 64
-
-typedef struct PorterTokenizer PorterTokenizer;
-struct PorterTokenizer {
- fts5_tokenizer tokenizer; /* Parent tokenizer module */
- Fts5Tokenizer *pTokenizer; /* Parent tokenizer instance */
- char aBuf[FTS5_PORTER_MAX_TOKEN + 64];
-};
-
-/*
-** Delete a "porter" tokenizer.
-*/
-static void fts5PorterDelete(Fts5Tokenizer *pTok){
- if( pTok ){
- PorterTokenizer *p = (PorterTokenizer*)pTok;
- if( p->pTokenizer ){
- p->tokenizer.xDelete(p->pTokenizer);
- }
- sqlite3_free(p);
- }
-}
-
-/*
-** Create a "porter" tokenizer.
-*/
-static int fts5PorterCreate(
- void *pCtx,
- const char **azArg, int nArg,
- Fts5Tokenizer **ppOut
-){
- fts5_api *pApi = (fts5_api*)pCtx;
- int rc = SQLITE_OK;
- PorterTokenizer *pRet;
- void *pUserdata = 0;
- const char *zBase = "unicode61";
-
- if( nArg>0 ){
- zBase = azArg[0];
- }
-
- pRet = (PorterTokenizer*)sqlite3_malloc(sizeof(PorterTokenizer));
- if( pRet ){
- memset(pRet, 0, sizeof(PorterTokenizer));
- rc = pApi->xFindTokenizer(pApi, zBase, &pUserdata, &pRet->tokenizer);
- }else{
- rc = SQLITE_NOMEM;
- }
- if( rc==SQLITE_OK ){
- int nArg2 = (nArg>0 ? nArg-1 : 0);
- const char **azArg2 = (nArg2 ? &azArg[1] : 0);
- rc = pRet->tokenizer.xCreate(pUserdata, azArg2, nArg2, &pRet->pTokenizer);
- }
-
- if( rc!=SQLITE_OK ){
- fts5PorterDelete((Fts5Tokenizer*)pRet);
- pRet = 0;
- }
- *ppOut = (Fts5Tokenizer*)pRet;
- return rc;
-}
-
-typedef struct PorterContext PorterContext;
-struct PorterContext {
- void *pCtx;
- int (*xToken)(void*, int, const char*, int, int, int);
- char *aBuf;
-};
-
-typedef struct PorterRule PorterRule;
-struct PorterRule {
- const char *zSuffix;
- int nSuffix;
- int (*xCond)(char *zStem, int nStem);
- const char *zOutput;
- int nOutput;
-};
-
-#if 0
-static int fts5PorterApply(char *aBuf, int *pnBuf, PorterRule *aRule){
- int ret = -1;
- int nBuf = *pnBuf;
- PorterRule *p;
-
- for(p=aRule; p->zSuffix; p++){
- assert( strlen(p->zSuffix)==p->nSuffix );
- assert( strlen(p->zOutput)==p->nOutput );
- if( nBuf<p->nSuffix ) continue;
- if( 0==memcmp(&aBuf[nBuf - p->nSuffix], p->zSuffix, p->nSuffix) ) break;
- }
-
- if( p->zSuffix ){
- int nStem = nBuf - p->nSuffix;
- if( p->xCond==0 || p->xCond(aBuf, nStem) ){
- memcpy(&aBuf[nStem], p->zOutput, p->nOutput);
- *pnBuf = nStem + p->nOutput;
- ret = p - aRule;
- }
- }
-
- return ret;
-}
-#endif
-
-static int fts5PorterIsVowel(char c, int bYIsVowel){
- return (
- c=='a' || c=='e' || c=='i' || c=='o' || c=='u' || (bYIsVowel && c=='y')
- );
-}
-
-static int fts5PorterGobbleVC(char *zStem, int nStem, int bPrevCons){
- int i;
- int bCons = bPrevCons;
-
- /* Scan for a vowel */
- for(i=0; i<nStem; i++){
- if( 0==(bCons = !fts5PorterIsVowel(zStem[i], bCons)) ) break;
- }
-
- /* Scan for a consonent */
- for(i++; i<nStem; i++){
- if( (bCons = !fts5PorterIsVowel(zStem[i], bCons)) ) return i+1;
- }
- return 0;
-}
-
-/* porter rule condition: (m > 0) */
-static int fts5Porter_MGt0(char *zStem, int nStem){
- return !!fts5PorterGobbleVC(zStem, nStem, 0);
-}
-
-/* porter rule condition: (m > 1) */
-static int fts5Porter_MGt1(char *zStem, int nStem){
- int n;
- n = fts5PorterGobbleVC(zStem, nStem, 0);
- if( n && fts5PorterGobbleVC(&zStem[n], nStem-n, 1) ){
- return 1;
- }
- return 0;
-}
-
-/* porter rule condition: (m = 1) */
-static int fts5Porter_MEq1(char *zStem, int nStem){
- int n;
- n = fts5PorterGobbleVC(zStem, nStem, 0);
- if( n && 0==fts5PorterGobbleVC(&zStem[n], nStem-n, 1) ){
- return 1;
- }
- return 0;
-}
-
-/* porter rule condition: (*o) */
-static int fts5Porter_Ostar(char *zStem, int nStem){
- if( zStem[nStem-1]=='w' || zStem[nStem-1]=='x' || zStem[nStem-1]=='y' ){
- return 0;
- }else{
- int i;
- int mask = 0;
- int bCons = 0;
- for(i=0; i<nStem; i++){
- bCons = !fts5PorterIsVowel(zStem[i], bCons);
- assert( bCons==0 || bCons==1 );
- mask = (mask << 1) + bCons;
- }
- return ((mask & 0x0007)==0x0005);
- }
-}
-
-/* porter rule condition: (m > 1 and (*S or *T)) */
-static int fts5Porter_MGt1_and_S_or_T(char *zStem, int nStem){
- assert( nStem>0 );
- return (zStem[nStem-1]=='s' || zStem[nStem-1]=='t')
- && fts5Porter_MGt1(zStem, nStem);
-}
-
-/* porter rule condition: (*v*) */
-static int fts5Porter_Vowel(char *zStem, int nStem){
- int i;
- for(i=0; i<nStem; i++){
- if( fts5PorterIsVowel(zStem[i], i>0) ){
- return 1;
- }
- }
- return 0;
-}
-
-
-/**************************************************************************
-***************************************************************************
-** GENERATED CODE STARTS HERE (mkportersteps.tcl)
-*/
-
-static int fts5PorterStep4(char *aBuf, int *pnBuf){
- int ret = 0;
- int nBuf = *pnBuf;
- switch( aBuf[nBuf-2] ){
-
- case 'a':
- if( nBuf>2 && 0==memcmp("al", &aBuf[nBuf-2], 2) ){
- if( fts5Porter_MGt1(aBuf, nBuf-2) ){
- *pnBuf = nBuf - 2;
- }
- }
- break;
-
- case 'c':
- if( nBuf>4 && 0==memcmp("ance", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt1(aBuf, nBuf-4) ){
- *pnBuf = nBuf - 4;
- }
- }else if( nBuf>4 && 0==memcmp("ence", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt1(aBuf, nBuf-4) ){
- *pnBuf = nBuf - 4;
- }
- }
- break;
-
- case 'e':
- if( nBuf>2 && 0==memcmp("er", &aBuf[nBuf-2], 2) ){
- if( fts5Porter_MGt1(aBuf, nBuf-2) ){
- *pnBuf = nBuf - 2;
- }
- }
- break;
-
- case 'i':
- if( nBuf>2 && 0==memcmp("ic", &aBuf[nBuf-2], 2) ){
- if( fts5Porter_MGt1(aBuf, nBuf-2) ){
- *pnBuf = nBuf - 2;
- }
- }
- break;
-
- case 'l':
- if( nBuf>4 && 0==memcmp("able", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt1(aBuf, nBuf-4) ){
- *pnBuf = nBuf - 4;
- }
- }else if( nBuf>4 && 0==memcmp("ible", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt1(aBuf, nBuf-4) ){
- *pnBuf = nBuf - 4;
- }
- }
- break;
-
- case 'n':
- if( nBuf>3 && 0==memcmp("ant", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }else if( nBuf>5 && 0==memcmp("ement", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt1(aBuf, nBuf-5) ){
- *pnBuf = nBuf - 5;
- }
- }else if( nBuf>4 && 0==memcmp("ment", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt1(aBuf, nBuf-4) ){
- *pnBuf = nBuf - 4;
- }
- }else if( nBuf>3 && 0==memcmp("ent", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- case 'o':
- if( nBuf>3 && 0==memcmp("ion", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1_and_S_or_T(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }else if( nBuf>2 && 0==memcmp("ou", &aBuf[nBuf-2], 2) ){
- if( fts5Porter_MGt1(aBuf, nBuf-2) ){
- *pnBuf = nBuf - 2;
- }
- }
- break;
-
- case 's':
- if( nBuf>3 && 0==memcmp("ism", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- case 't':
- if( nBuf>3 && 0==memcmp("ate", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }else if( nBuf>3 && 0==memcmp("iti", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- case 'u':
- if( nBuf>3 && 0==memcmp("ous", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- case 'v':
- if( nBuf>3 && 0==memcmp("ive", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- case 'z':
- if( nBuf>3 && 0==memcmp("ize", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt1(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- }
- return ret;
-}
-
-
-static int fts5PorterStep1B2(char *aBuf, int *pnBuf){
- int ret = 0;
- int nBuf = *pnBuf;
- switch( aBuf[nBuf-2] ){
-
- case 'a':
- if( nBuf>2 && 0==memcmp("at", &aBuf[nBuf-2], 2) ){
- memcpy(&aBuf[nBuf-2], "ate", 3);
- *pnBuf = nBuf - 2 + 3;
- ret = 1;
- }
- break;
-
- case 'b':
- if( nBuf>2 && 0==memcmp("bl", &aBuf[nBuf-2], 2) ){
- memcpy(&aBuf[nBuf-2], "ble", 3);
- *pnBuf = nBuf - 2 + 3;
- ret = 1;
- }
- break;
-
- case 'i':
- if( nBuf>2 && 0==memcmp("iz", &aBuf[nBuf-2], 2) ){
- memcpy(&aBuf[nBuf-2], "ize", 3);
- *pnBuf = nBuf - 2 + 3;
- ret = 1;
- }
- break;
-
- }
- return ret;
-}
-
-
-static int fts5PorterStep2(char *aBuf, int *pnBuf){
- int ret = 0;
- int nBuf = *pnBuf;
- switch( aBuf[nBuf-2] ){
-
- case 'a':
- if( nBuf>7 && 0==memcmp("ational", &aBuf[nBuf-7], 7) ){
- if( fts5Porter_MGt0(aBuf, nBuf-7) ){
- memcpy(&aBuf[nBuf-7], "ate", 3);
- *pnBuf = nBuf - 7 + 3;
- }
- }else if( nBuf>6 && 0==memcmp("tional", &aBuf[nBuf-6], 6) ){
- if( fts5Porter_MGt0(aBuf, nBuf-6) ){
- memcpy(&aBuf[nBuf-6], "tion", 4);
- *pnBuf = nBuf - 6 + 4;
- }
- }
- break;
-
- case 'c':
- if( nBuf>4 && 0==memcmp("enci", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "ence", 4);
- *pnBuf = nBuf - 4 + 4;
- }
- }else if( nBuf>4 && 0==memcmp("anci", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "ance", 4);
- *pnBuf = nBuf - 4 + 4;
- }
- }
- break;
-
- case 'e':
- if( nBuf>4 && 0==memcmp("izer", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "ize", 3);
- *pnBuf = nBuf - 4 + 3;
- }
- }
- break;
-
- case 'g':
- if( nBuf>4 && 0==memcmp("logi", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "log", 3);
- *pnBuf = nBuf - 4 + 3;
- }
- }
- break;
-
- case 'l':
- if( nBuf>3 && 0==memcmp("bli", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt0(aBuf, nBuf-3) ){
- memcpy(&aBuf[nBuf-3], "ble", 3);
- *pnBuf = nBuf - 3 + 3;
- }
- }else if( nBuf>4 && 0==memcmp("alli", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "al", 2);
- *pnBuf = nBuf - 4 + 2;
- }
- }else if( nBuf>5 && 0==memcmp("entli", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "ent", 3);
- *pnBuf = nBuf - 5 + 3;
- }
- }else if( nBuf>3 && 0==memcmp("eli", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt0(aBuf, nBuf-3) ){
- memcpy(&aBuf[nBuf-3], "e", 1);
- *pnBuf = nBuf - 3 + 1;
- }
- }else if( nBuf>5 && 0==memcmp("ousli", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "ous", 3);
- *pnBuf = nBuf - 5 + 3;
- }
- }
- break;
-
- case 'o':
- if( nBuf>7 && 0==memcmp("ization", &aBuf[nBuf-7], 7) ){
- if( fts5Porter_MGt0(aBuf, nBuf-7) ){
- memcpy(&aBuf[nBuf-7], "ize", 3);
- *pnBuf = nBuf - 7 + 3;
- }
- }else if( nBuf>5 && 0==memcmp("ation", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "ate", 3);
- *pnBuf = nBuf - 5 + 3;
- }
- }else if( nBuf>4 && 0==memcmp("ator", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "ate", 3);
- *pnBuf = nBuf - 4 + 3;
- }
- }
- break;
-
- case 's':
- if( nBuf>5 && 0==memcmp("alism", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "al", 2);
- *pnBuf = nBuf - 5 + 2;
- }
- }else if( nBuf>7 && 0==memcmp("iveness", &aBuf[nBuf-7], 7) ){
- if( fts5Porter_MGt0(aBuf, nBuf-7) ){
- memcpy(&aBuf[nBuf-7], "ive", 3);
- *pnBuf = nBuf - 7 + 3;
- }
- }else if( nBuf>7 && 0==memcmp("fulness", &aBuf[nBuf-7], 7) ){
- if( fts5Porter_MGt0(aBuf, nBuf-7) ){
- memcpy(&aBuf[nBuf-7], "ful", 3);
- *pnBuf = nBuf - 7 + 3;
- }
- }else if( nBuf>7 && 0==memcmp("ousness", &aBuf[nBuf-7], 7) ){
- if( fts5Porter_MGt0(aBuf, nBuf-7) ){
- memcpy(&aBuf[nBuf-7], "ous", 3);
- *pnBuf = nBuf - 7 + 3;
- }
- }
- break;
-
- case 't':
- if( nBuf>5 && 0==memcmp("aliti", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "al", 2);
- *pnBuf = nBuf - 5 + 2;
- }
- }else if( nBuf>5 && 0==memcmp("iviti", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "ive", 3);
- *pnBuf = nBuf - 5 + 3;
- }
- }else if( nBuf>6 && 0==memcmp("biliti", &aBuf[nBuf-6], 6) ){
- if( fts5Porter_MGt0(aBuf, nBuf-6) ){
- memcpy(&aBuf[nBuf-6], "ble", 3);
- *pnBuf = nBuf - 6 + 3;
- }
- }
- break;
-
- }
- return ret;
-}
-
-
-static int fts5PorterStep3(char *aBuf, int *pnBuf){
- int ret = 0;
- int nBuf = *pnBuf;
- switch( aBuf[nBuf-2] ){
-
- case 'a':
- if( nBuf>4 && 0==memcmp("ical", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- memcpy(&aBuf[nBuf-4], "ic", 2);
- *pnBuf = nBuf - 4 + 2;
- }
- }
- break;
-
- case 's':
- if( nBuf>4 && 0==memcmp("ness", &aBuf[nBuf-4], 4) ){
- if( fts5Porter_MGt0(aBuf, nBuf-4) ){
- *pnBuf = nBuf - 4;
- }
- }
- break;
-
- case 't':
- if( nBuf>5 && 0==memcmp("icate", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "ic", 2);
- *pnBuf = nBuf - 5 + 2;
- }
- }else if( nBuf>5 && 0==memcmp("iciti", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "ic", 2);
- *pnBuf = nBuf - 5 + 2;
- }
- }
- break;
-
- case 'u':
- if( nBuf>3 && 0==memcmp("ful", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt0(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- }
- }
- break;
-
- case 'v':
- if( nBuf>5 && 0==memcmp("ative", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- *pnBuf = nBuf - 5;
- }
- }
- break;
-
- case 'z':
- if( nBuf>5 && 0==memcmp("alize", &aBuf[nBuf-5], 5) ){
- if( fts5Porter_MGt0(aBuf, nBuf-5) ){
- memcpy(&aBuf[nBuf-5], "al", 2);
- *pnBuf = nBuf - 5 + 2;
- }
- }
- break;
-
- }
- return ret;
-}
-
-
-static int fts5PorterStep1B(char *aBuf, int *pnBuf){
- int ret = 0;
- int nBuf = *pnBuf;
- switch( aBuf[nBuf-2] ){
-
- case 'e':
- if( nBuf>3 && 0==memcmp("eed", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_MGt0(aBuf, nBuf-3) ){
- memcpy(&aBuf[nBuf-3], "ee", 2);
- *pnBuf = nBuf - 3 + 2;
- }
- }else if( nBuf>2 && 0==memcmp("ed", &aBuf[nBuf-2], 2) ){
- if( fts5Porter_Vowel(aBuf, nBuf-2) ){
- *pnBuf = nBuf - 2;
- ret = 1;
- }
- }
- break;
-
- case 'n':
- if( nBuf>3 && 0==memcmp("ing", &aBuf[nBuf-3], 3) ){
- if( fts5Porter_Vowel(aBuf, nBuf-3) ){
- *pnBuf = nBuf - 3;
- ret = 1;
- }
- }
- break;
-
- }
- return ret;
-}
-
-/*
-** GENERATED CODE ENDS HERE (mkportersteps.tcl)
-***************************************************************************
-**************************************************************************/
-
-static void fts5PorterStep1A(char *aBuf, int *pnBuf){
- int nBuf = *pnBuf;
- if( aBuf[nBuf-1]=='s' ){
- if( aBuf[nBuf-2]=='e' ){
- if( (nBuf>4 && aBuf[nBuf-4]=='s' && aBuf[nBuf-3]=='s')
- || (nBuf>3 && aBuf[nBuf-3]=='i' )
- ){
- *pnBuf = nBuf-2;
- }else{
- *pnBuf = nBuf-1;
- }
- }
- else if( aBuf[nBuf-2]!='s' ){
- *pnBuf = nBuf-1;
- }
- }
-}
-
-static int fts5PorterCb(
- void *pCtx,
- int tflags,
- const char *pToken,
- int nToken,
- int iStart,
- int iEnd
-){
- PorterContext *p = (PorterContext*)pCtx;
-
- char *aBuf;
- int nBuf;
-
- if( nToken>FTS5_PORTER_MAX_TOKEN || nToken<3 ) goto pass_through;
- aBuf = p->aBuf;
- nBuf = nToken;
- memcpy(aBuf, pToken, nBuf);
-
- /* Step 1. */
- fts5PorterStep1A(aBuf, &nBuf);
- if( fts5PorterStep1B(aBuf, &nBuf) ){
- if( fts5PorterStep1B2(aBuf, &nBuf)==0 ){
- char c = aBuf[nBuf-1];
- if( fts5PorterIsVowel(c, 0)==0
- && c!='l' && c!='s' && c!='z' && c==aBuf[nBuf-2]
- ){
- nBuf--;
- }else if( fts5Porter_MEq1(aBuf, nBuf) && fts5Porter_Ostar(aBuf, nBuf) ){
- aBuf[nBuf++] = 'e';
- }
- }
- }
-
- /* Step 1C. */
- if( aBuf[nBuf-1]=='y' && fts5Porter_Vowel(aBuf, nBuf-1) ){
- aBuf[nBuf-1] = 'i';
- }
-
- /* Steps 2 through 4. */
- fts5PorterStep2(aBuf, &nBuf);
- fts5PorterStep3(aBuf, &nBuf);
- fts5PorterStep4(aBuf, &nBuf);
-
- /* Step 5a. */
- assert( nBuf>0 );
- if( aBuf[nBuf-1]=='e' ){
- if( fts5Porter_MGt1(aBuf, nBuf-1)
- || (fts5Porter_MEq1(aBuf, nBuf-1) && !fts5Porter_Ostar(aBuf, nBuf-1))
- ){
- nBuf--;
- }
- }
-
- /* Step 5b. */
- if( nBuf>1 && aBuf[nBuf-1]=='l'
- && aBuf[nBuf-2]=='l' && fts5Porter_MGt1(aBuf, nBuf-1)
- ){
- nBuf--;
- }
-
- return p->xToken(p->pCtx, tflags, aBuf, nBuf, iStart, iEnd);
-
- pass_through:
- return p->xToken(p->pCtx, tflags, pToken, nToken, iStart, iEnd);
-}
-
-/*
-** Tokenize using the porter tokenizer.
-*/
-static int fts5PorterTokenize(
- Fts5Tokenizer *pTokenizer,
- void *pCtx,
- int flags,
- const char *pText, int nText,
- int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd)
-){
- PorterTokenizer *p = (PorterTokenizer*)pTokenizer;
- PorterContext sCtx;
- sCtx.xToken = xToken;
- sCtx.pCtx = pCtx;
- sCtx.aBuf = p->aBuf;
- return p->tokenizer.xTokenize(
- p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb
- );
-}
-
-/*
-** Register all built-in tokenizers with FTS5.
-*/
-static int sqlite3Fts5TokenizerInit(fts5_api *pApi){
- struct BuiltinTokenizer {
- const char *zName;
- fts5_tokenizer x;
- } aBuiltin[] = {
- { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}},
- { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }},
- { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }},
- };
-
- int rc = SQLITE_OK; /* Return code */
- int i; /* To iterate through builtin functions */
-
- for(i=0; rc==SQLITE_OK && i<sizeof(aBuiltin)/sizeof(aBuiltin[0]); i++){
- rc = pApi->xCreateTokenizer(pApi,
- aBuiltin[i].zName,
- (void*)pApi,
- &aBuiltin[i].x,
- 0
- );
- }
-
- return rc;
-}
-
-
-
-/*
-** 2012 May 25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-*/
-
-/*
-** DO NOT EDIT THIS MACHINE GENERATED FILE.
-*/
-
-
-/* #include <assert.h> */
-
-/*
-** Return true if the argument corresponds to a unicode codepoint
-** classified as either a letter or a number. Otherwise false.
-**
-** The results are undefined if the value passed to this function
-** is less than zero.
-*/
-static int sqlite3Fts5UnicodeIsalnum(int c){
- /* Each unsigned integer in the following array corresponds to a contiguous
- ** range of unicode codepoints that are not either letters or numbers (i.e.
- ** codepoints for which this function should return 0).
- **
- ** The most significant 22 bits in each 32-bit value contain the first
- ** codepoint in the range. The least significant 10 bits are used to store
- ** the size of the range (always at least 1). In other words, the value
- ** ((C<<22) + N) represents a range of N codepoints starting with codepoint
- ** C. It is not possible to represent a range larger than 1023 codepoints
- ** using this format.
- */
- static const unsigned int aEntry[] = {
- 0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07,
- 0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01,
- 0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401,
- 0x000BBC81, 0x000DD401, 0x000DF801, 0x000E1002, 0x000E1C01,
- 0x000FD801, 0x00120808, 0x00156806, 0x00162402, 0x00163C01,
- 0x00164437, 0x0017CC02, 0x00180005, 0x00181816, 0x00187802,
- 0x00192C15, 0x0019A804, 0x0019C001, 0x001B5001, 0x001B580F,
- 0x001B9C07, 0x001BF402, 0x001C000E, 0x001C3C01, 0x001C4401,
- 0x001CC01B, 0x001E980B, 0x001FAC09, 0x001FD804, 0x00205804,
- 0x00206C09, 0x00209403, 0x0020A405, 0x0020C00F, 0x00216403,
- 0x00217801, 0x0023901B, 0x00240004, 0x0024E803, 0x0024F812,
- 0x00254407, 0x00258804, 0x0025C001, 0x00260403, 0x0026F001,
- 0x0026F807, 0x00271C02, 0x00272C03, 0x00275C01, 0x00278802,
- 0x0027C802, 0x0027E802, 0x00280403, 0x0028F001, 0x0028F805,
- 0x00291C02, 0x00292C03, 0x00294401, 0x0029C002, 0x0029D401,
- 0x002A0403, 0x002AF001, 0x002AF808, 0x002B1C03, 0x002B2C03,
- 0x002B8802, 0x002BC002, 0x002C0403, 0x002CF001, 0x002CF807,
- 0x002D1C02, 0x002D2C03, 0x002D5802, 0x002D8802, 0x002DC001,
- 0x002E0801, 0x002EF805, 0x002F1803, 0x002F2804, 0x002F5C01,
- 0x002FCC08, 0x00300403, 0x0030F807, 0x00311803, 0x00312804,
- 0x00315402, 0x00318802, 0x0031FC01, 0x00320802, 0x0032F001,
- 0x0032F807, 0x00331803, 0x00332804, 0x00335402, 0x00338802,
- 0x00340802, 0x0034F807, 0x00351803, 0x00352804, 0x00355C01,
- 0x00358802, 0x0035E401, 0x00360802, 0x00372801, 0x00373C06,
- 0x00375801, 0x00376008, 0x0037C803, 0x0038C401, 0x0038D007,
- 0x0038FC01, 0x00391C09, 0x00396802, 0x003AC401, 0x003AD006,
- 0x003AEC02, 0x003B2006, 0x003C041F, 0x003CD00C, 0x003DC417,
- 0x003E340B, 0x003E6424, 0x003EF80F, 0x003F380D, 0x0040AC14,
- 0x00412806, 0x00415804, 0x00417803, 0x00418803, 0x00419C07,
- 0x0041C404, 0x0042080C, 0x00423C01, 0x00426806, 0x0043EC01,
- 0x004D740C, 0x004E400A, 0x00500001, 0x0059B402, 0x005A0001,
- 0x005A6C02, 0x005BAC03, 0x005C4803, 0x005CC805, 0x005D4802,
- 0x005DC802, 0x005ED023, 0x005F6004, 0x005F7401, 0x0060000F,
- 0x0062A401, 0x0064800C, 0x0064C00C, 0x00650001, 0x00651002,
- 0x0066C011, 0x00672002, 0x00677822, 0x00685C05, 0x00687802,
- 0x0069540A, 0x0069801D, 0x0069FC01, 0x006A8007, 0x006AA006,
- 0x006C0005, 0x006CD011, 0x006D6823, 0x006E0003, 0x006E840D,
- 0x006F980E, 0x006FF004, 0x00709014, 0x0070EC05, 0x0071F802,
- 0x00730008, 0x00734019, 0x0073B401, 0x0073C803, 0x00770027,
- 0x0077F004, 0x007EF401, 0x007EFC03, 0x007F3403, 0x007F7403,
- 0x007FB403, 0x007FF402, 0x00800065, 0x0081A806, 0x0081E805,
- 0x00822805, 0x0082801A, 0x00834021, 0x00840002, 0x00840C04,
- 0x00842002, 0x00845001, 0x00845803, 0x00847806, 0x00849401,
- 0x00849C01, 0x0084A401, 0x0084B801, 0x0084E802, 0x00850005,
- 0x00852804, 0x00853C01, 0x00864264, 0x00900027, 0x0091000B,
- 0x0092704E, 0x00940200, 0x009C0475, 0x009E53B9, 0x00AD400A,
- 0x00B39406, 0x00B3BC03, 0x00B3E404, 0x00B3F802, 0x00B5C001,
- 0x00B5FC01, 0x00B7804F, 0x00B8C00C, 0x00BA001A, 0x00BA6C59,
- 0x00BC00D6, 0x00BFC00C, 0x00C00005, 0x00C02019, 0x00C0A807,
- 0x00C0D802, 0x00C0F403, 0x00C26404, 0x00C28001, 0x00C3EC01,
- 0x00C64002, 0x00C6580A, 0x00C70024, 0x00C8001F, 0x00C8A81E,
- 0x00C94001, 0x00C98020, 0x00CA2827, 0x00CB003F, 0x00CC0100,
- 0x01370040, 0x02924037, 0x0293F802, 0x02983403, 0x0299BC10,
- 0x029A7C01, 0x029BC008, 0x029C0017, 0x029C8002, 0x029E2402,
- 0x02A00801, 0x02A01801, 0x02A02C01, 0x02A08C09, 0x02A0D804,
- 0x02A1D004, 0x02A20002, 0x02A2D011, 0x02A33802, 0x02A38012,
- 0x02A3E003, 0x02A4980A, 0x02A51C0D, 0x02A57C01, 0x02A60004,
- 0x02A6CC1B, 0x02A77802, 0x02A8A40E, 0x02A90C01, 0x02A93002,
- 0x02A97004, 0x02A9DC03, 0x02A9EC01, 0x02AAC001, 0x02AAC803,
- 0x02AADC02, 0x02AAF802, 0x02AB0401, 0x02AB7802, 0x02ABAC07,
- 0x02ABD402, 0x02AF8C0B, 0x03600001, 0x036DFC02, 0x036FFC02,
- 0x037FFC01, 0x03EC7801, 0x03ECA401, 0x03EEC810, 0x03F4F802,
- 0x03F7F002, 0x03F8001A, 0x03F88007, 0x03F8C023, 0x03F95013,
- 0x03F9A004, 0x03FBFC01, 0x03FC040F, 0x03FC6807, 0x03FCEC06,
- 0x03FD6C0B, 0x03FF8007, 0x03FFA007, 0x03FFE405, 0x04040003,
- 0x0404DC09, 0x0405E411, 0x0406400C, 0x0407402E, 0x040E7C01,
- 0x040F4001, 0x04215C01, 0x04247C01, 0x0424FC01, 0x04280403,
- 0x04281402, 0x04283004, 0x0428E003, 0x0428FC01, 0x04294009,
- 0x0429FC01, 0x042CE407, 0x04400003, 0x0440E016, 0x04420003,
- 0x0442C012, 0x04440003, 0x04449C0E, 0x04450004, 0x04460003,
- 0x0446CC0E, 0x04471404, 0x045AAC0D, 0x0491C004, 0x05BD442E,
- 0x05BE3C04, 0x074000F6, 0x07440027, 0x0744A4B5, 0x07480046,
- 0x074C0057, 0x075B0401, 0x075B6C01, 0x075BEC01, 0x075C5401,
- 0x075CD401, 0x075D3C01, 0x075DBC01, 0x075E2401, 0x075EA401,
- 0x075F0C01, 0x07BBC002, 0x07C0002C, 0x07C0C064, 0x07C2800F,
- 0x07C2C40E, 0x07C3040F, 0x07C3440F, 0x07C4401F, 0x07C4C03C,
- 0x07C5C02B, 0x07C7981D, 0x07C8402B, 0x07C90009, 0x07C94002,
- 0x07CC0021, 0x07CCC006, 0x07CCDC46, 0x07CE0014, 0x07CE8025,
- 0x07CF1805, 0x07CF8011, 0x07D0003F, 0x07D10001, 0x07D108B6,
- 0x07D3E404, 0x07D4003E, 0x07D50004, 0x07D54018, 0x07D7EC46,
- 0x07D9140B, 0x07DA0046, 0x07DC0074, 0x38000401, 0x38008060,
- 0x380400F0,
- };
- static const unsigned int aAscii[4] = {
- 0xFFFFFFFF, 0xFC00FFFF, 0xF8000001, 0xF8000001,
- };
-
- if( c<128 ){
- return ( (aAscii[c >> 5] & (1 << (c & 0x001F)))==0 );
- }else if( c<(1<<22) ){
- unsigned int key = (((unsigned int)c)<<10) | 0x000003FF;
- int iRes = 0;
- int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
- int iLo = 0;
- while( iHi>=iLo ){
- int iTest = (iHi + iLo) / 2;
- if( key >= aEntry[iTest] ){
- iRes = iTest;
- iLo = iTest+1;
- }else{
- iHi = iTest-1;
- }
- }
- assert( aEntry[0]<key );
- assert( key>=aEntry[iRes] );
- return (((unsigned int)c) >= ((aEntry[iRes]>>10) + (aEntry[iRes]&0x3FF)));
- }
- return 1;
-}
-
-
-/*
-** If the argument is a codepoint corresponding to a lowercase letter
-** in the ASCII range with a diacritic added, return the codepoint
-** of the ASCII letter only. For example, if passed 235 - "LATIN
-** SMALL LETTER E WITH DIAERESIS" - return 65 ("LATIN SMALL LETTER
-** E"). The resuls of passing a codepoint that corresponds to an
-** uppercase letter are undefined.
-*/
-static int fts5_remove_diacritic(int c){
- unsigned short aDia[] = {
- 0, 1797, 1848, 1859, 1891, 1928, 1940, 1995,
- 2024, 2040, 2060, 2110, 2168, 2206, 2264, 2286,
- 2344, 2383, 2472, 2488, 2516, 2596, 2668, 2732,
- 2782, 2842, 2894, 2954, 2984, 3000, 3028, 3336,
- 3456, 3696, 3712, 3728, 3744, 3896, 3912, 3928,
- 3968, 4008, 4040, 4106, 4138, 4170, 4202, 4234,
- 4266, 4296, 4312, 4344, 4408, 4424, 4472, 4504,
- 6148, 6198, 6264, 6280, 6360, 6429, 6505, 6529,
- 61448, 61468, 61534, 61592, 61642, 61688, 61704, 61726,
- 61784, 61800, 61836, 61880, 61914, 61948, 61998, 62122,
- 62154, 62200, 62218, 62302, 62364, 62442, 62478, 62536,
- 62554, 62584, 62604, 62640, 62648, 62656, 62664, 62730,
- 62924, 63050, 63082, 63274, 63390,
- };
- char aChar[] = {
- '\0', 'a', 'c', 'e', 'i', 'n', 'o', 'u', 'y', 'y', 'a', 'c',
- 'd', 'e', 'e', 'g', 'h', 'i', 'j', 'k', 'l', 'n', 'o', 'r',
- 's', 't', 'u', 'u', 'w', 'y', 'z', 'o', 'u', 'a', 'i', 'o',
- 'u', 'g', 'k', 'o', 'j', 'g', 'n', 'a', 'e', 'i', 'o', 'r',
- 'u', 's', 't', 'h', 'a', 'e', 'o', 'y', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', 'a', 'b', 'd', 'd', 'e', 'f', 'g', 'h',
- 'h', 'i', 'k', 'l', 'l', 'm', 'n', 'p', 'r', 'r', 's', 't',
- 'u', 'v', 'w', 'w', 'x', 'y', 'z', 'h', 't', 'w', 'y', 'a',
- 'e', 'i', 'o', 'u', 'y',
- };
-
- unsigned int key = (((unsigned int)c)<<3) | 0x00000007;
- int iRes = 0;
- int iHi = sizeof(aDia)/sizeof(aDia[0]) - 1;
- int iLo = 0;
- while( iHi>=iLo ){
- int iTest = (iHi + iLo) / 2;
- if( key >= aDia[iTest] ){
- iRes = iTest;
- iLo = iTest+1;
- }else{
- iHi = iTest-1;
- }
- }
- assert( key>=aDia[iRes] );
- return ((c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : (int)aChar[iRes]);
-}
-
-
-/*
-** Return true if the argument interpreted as a unicode codepoint
-** is a diacritical modifier character.
-*/
-static int sqlite3Fts5UnicodeIsdiacritic(int c){
- unsigned int mask0 = 0x08029FDF;
- unsigned int mask1 = 0x000361F8;
- if( c<768 || c>817 ) return 0;
- return (c < 768+32) ?
- (mask0 & (1 << (c-768))) :
- (mask1 & (1 << (c-768-32)));
-}
-
-
-/*
-** Interpret the argument as a unicode codepoint. If the codepoint
-** is an upper case character that has a lower case equivalent,
-** return the codepoint corresponding to the lower case version.
-** Otherwise, return a copy of the argument.
-**
-** The results are undefined if the value passed to this function
-** is less than zero.
-*/
-static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic){
- /* Each entry in the following array defines a rule for folding a range
- ** of codepoints to lower case. The rule applies to a range of nRange
- ** codepoints starting at codepoint iCode.
- **
- ** If the least significant bit in flags is clear, then the rule applies
- ** to all nRange codepoints (i.e. all nRange codepoints are upper case and
- ** need to be folded). Or, if it is set, then the rule only applies to
- ** every second codepoint in the range, starting with codepoint C.
- **
- ** The 7 most significant bits in flags are an index into the aiOff[]
- ** array. If a specific codepoint C does require folding, then its lower
- ** case equivalent is ((C + aiOff[flags>>1]) & 0xFFFF).
- **
- ** The contents of this array are generated by parsing the CaseFolding.txt
- ** file distributed as part of the "Unicode Character Database". See
- ** http://www.unicode.org for details.
- */
- static const struct TableEntry {
- unsigned short iCode;
- unsigned char flags;
- unsigned char nRange;
- } aEntry[] = {
- {65, 14, 26}, {181, 64, 1}, {192, 14, 23},
- {216, 14, 7}, {256, 1, 48}, {306, 1, 6},
- {313, 1, 16}, {330, 1, 46}, {376, 116, 1},
- {377, 1, 6}, {383, 104, 1}, {385, 50, 1},
- {386, 1, 4}, {390, 44, 1}, {391, 0, 1},
- {393, 42, 2}, {395, 0, 1}, {398, 32, 1},
- {399, 38, 1}, {400, 40, 1}, {401, 0, 1},
- {403, 42, 1}, {404, 46, 1}, {406, 52, 1},
- {407, 48, 1}, {408, 0, 1}, {412, 52, 1},
- {413, 54, 1}, {415, 56, 1}, {416, 1, 6},
- {422, 60, 1}, {423, 0, 1}, {425, 60, 1},
- {428, 0, 1}, {430, 60, 1}, {431, 0, 1},
- {433, 58, 2}, {435, 1, 4}, {439, 62, 1},
- {440, 0, 1}, {444, 0, 1}, {452, 2, 1},
- {453, 0, 1}, {455, 2, 1}, {456, 0, 1},
- {458, 2, 1}, {459, 1, 18}, {478, 1, 18},
- {497, 2, 1}, {498, 1, 4}, {502, 122, 1},
- {503, 134, 1}, {504, 1, 40}, {544, 110, 1},
- {546, 1, 18}, {570, 70, 1}, {571, 0, 1},
- {573, 108, 1}, {574, 68, 1}, {577, 0, 1},
- {579, 106, 1}, {580, 28, 1}, {581, 30, 1},
- {582, 1, 10}, {837, 36, 1}, {880, 1, 4},
- {886, 0, 1}, {902, 18, 1}, {904, 16, 3},
- {908, 26, 1}, {910, 24, 2}, {913, 14, 17},
- {931, 14, 9}, {962, 0, 1}, {975, 4, 1},
- {976, 140, 1}, {977, 142, 1}, {981, 146, 1},
- {982, 144, 1}, {984, 1, 24}, {1008, 136, 1},
- {1009, 138, 1}, {1012, 130, 1}, {1013, 128, 1},
- {1015, 0, 1}, {1017, 152, 1}, {1018, 0, 1},
- {1021, 110, 3}, {1024, 34, 16}, {1040, 14, 32},
- {1120, 1, 34}, {1162, 1, 54}, {1216, 6, 1},
- {1217, 1, 14}, {1232, 1, 88}, {1329, 22, 38},
- {4256, 66, 38}, {4295, 66, 1}, {4301, 66, 1},
- {7680, 1, 150}, {7835, 132, 1}, {7838, 96, 1},
- {7840, 1, 96}, {7944, 150, 8}, {7960, 150, 6},
- {7976, 150, 8}, {7992, 150, 8}, {8008, 150, 6},
- {8025, 151, 8}, {8040, 150, 8}, {8072, 150, 8},
- {8088, 150, 8}, {8104, 150, 8}, {8120, 150, 2},
- {8122, 126, 2}, {8124, 148, 1}, {8126, 100, 1},
- {8136, 124, 4}, {8140, 148, 1}, {8152, 150, 2},
- {8154, 120, 2}, {8168, 150, 2}, {8170, 118, 2},
- {8172, 152, 1}, {8184, 112, 2}, {8186, 114, 2},
- {8188, 148, 1}, {8486, 98, 1}, {8490, 92, 1},
- {8491, 94, 1}, {8498, 12, 1}, {8544, 8, 16},
- {8579, 0, 1}, {9398, 10, 26}, {11264, 22, 47},
- {11360, 0, 1}, {11362, 88, 1}, {11363, 102, 1},
- {11364, 90, 1}, {11367, 1, 6}, {11373, 84, 1},
- {11374, 86, 1}, {11375, 80, 1}, {11376, 82, 1},
- {11378, 0, 1}, {11381, 0, 1}, {11390, 78, 2},
- {11392, 1, 100}, {11499, 1, 4}, {11506, 0, 1},
- {42560, 1, 46}, {42624, 1, 24}, {42786, 1, 14},
- {42802, 1, 62}, {42873, 1, 4}, {42877, 76, 1},
- {42878, 1, 10}, {42891, 0, 1}, {42893, 74, 1},
- {42896, 1, 4}, {42912, 1, 10}, {42922, 72, 1},
- {65313, 14, 26},
- };
- static const unsigned short aiOff[] = {
- 1, 2, 8, 15, 16, 26, 28, 32,
- 37, 38, 40, 48, 63, 64, 69, 71,
- 79, 80, 116, 202, 203, 205, 206, 207,
- 209, 210, 211, 213, 214, 217, 218, 219,
- 775, 7264, 10792, 10795, 23228, 23256, 30204, 54721,
- 54753, 54754, 54756, 54787, 54793, 54809, 57153, 57274,
- 57921, 58019, 58363, 61722, 65268, 65341, 65373, 65406,
- 65408, 65410, 65415, 65424, 65436, 65439, 65450, 65462,
- 65472, 65476, 65478, 65480, 65482, 65488, 65506, 65511,
- 65514, 65521, 65527, 65528, 65529,
- };
-
- int ret = c;
-
- assert( sizeof(unsigned short)==2 && sizeof(unsigned char)==1 );
-
- if( c<128 ){
- if( c>='A' && c<='Z' ) ret = c + ('a' - 'A');
- }else if( c<65536 ){
- const struct TableEntry *p;
- int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
- int iLo = 0;
- int iRes = -1;
-
- assert( c>aEntry[0].iCode );
- while( iHi>=iLo ){
- int iTest = (iHi + iLo) / 2;
- int cmp = (c - aEntry[iTest].iCode);
- if( cmp>=0 ){
- iRes = iTest;
- iLo = iTest+1;
- }else{
- iHi = iTest-1;
- }
- }
-
- assert( iRes>=0 && c>=aEntry[iRes].iCode );
- p = &aEntry[iRes];
- if( c<(p->iCode + p->nRange) && 0==(0x01 & p->flags & (p->iCode ^ c)) ){
- ret = (c + (aiOff[p->flags>>1])) & 0x0000FFFF;
- assert( ret>0 );
- }
-
- if( bRemoveDiacritic ) ret = fts5_remove_diacritic(ret);
- }
-
- else if( c>=66560 && c<66600 ){
- ret = c + 40;
- }
-
- return ret;
-}
-
-/*
-** 2015 May 30
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** Routines for varint serialization and deserialization.
-*/
-
-
-
-/*
-** This is a copy of the sqlite3GetVarint32() routine from the SQLite core.
-** Except, this version does handle the single byte case that the core
-** version depends on being handled before its function is called.
-*/
-static int sqlite3Fts5GetVarint32(const unsigned char *p, u32 *v){
- u32 a,b;
-
- /* The 1-byte case. Overwhelmingly the most common. */
- a = *p;
- /* a: p0 (unmasked) */
- if (!(a&0x80))
- {
- /* Values between 0 and 127 */
- *v = a;
- return 1;
- }
-
- /* The 2-byte case */
- p++;
- b = *p;
- /* b: p1 (unmasked) */
- if (!(b&0x80))
- {
- /* Values between 128 and 16383 */
- a &= 0x7f;
- a = a<<7;
- *v = a | b;
- return 2;
- }
-
- /* The 3-byte case */
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<14 | p2 (unmasked) */
- if (!(a&0x80))
- {
- /* Values between 16384 and 2097151 */
- a &= (0x7f<<14)|(0x7f);
- b &= 0x7f;
- b = b<<7;
- *v = a | b;
- return 3;
- }
-
- /* A 32-bit varint is used to store size information in btrees.
- ** Objects are rarely larger than 2MiB limit of a 3-byte varint.
- ** A 3-byte varint is sufficient, for example, to record the size
- ** of a 1048569-byte BLOB or string.
- **
- ** We only unroll the first 1-, 2-, and 3- byte cases. The very
- ** rare larger cases can be handled by the slower 64-bit varint
- ** routine.
- */
- {
- u64 v64;
- u8 n;
- p -= 2;
- n = sqlite3Fts5GetVarint(p, &v64);
- *v = (u32)v64;
- assert( n>3 && n<=9 );
- return n;
- }
-}
-
-
-/*
-** Bitmasks used by sqlite3GetVarint(). These precomputed constants
-** are defined here rather than simply putting the constant expressions
-** inline in order to work around bugs in the RVT compiler.
-**
-** SLOT_2_0 A mask for (0x7f<<14) | 0x7f
-**
-** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0
-*/
-#define SLOT_2_0 0x001fc07f
-#define SLOT_4_2_0 0xf01fc07f
-
-/*
-** Read a 64-bit variable-length integer from memory starting at p[0].
-** Return the number of bytes read. The value is stored in *v.
-*/
-static u8 sqlite3Fts5GetVarint(const unsigned char *p, u64 *v){
- u32 a,b,s;
-
- a = *p;
- /* a: p0 (unmasked) */
- if (!(a&0x80))
- {
- *v = a;
- return 1;
- }
-
- p++;
- b = *p;
- /* b: p1 (unmasked) */
- if (!(b&0x80))
- {
- a &= 0x7f;
- a = a<<7;
- a |= b;
- *v = a;
- return 2;
- }
-
- /* Verify that constants are precomputed correctly */
- assert( SLOT_2_0 == ((0x7f<<14) | (0x7f)) );
- assert( SLOT_4_2_0 == ((0xfU<<28) | (0x7f<<14) | (0x7f)) );
-
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<14 | p2 (unmasked) */
- if (!(a&0x80))
- {
- a &= SLOT_2_0;
- b &= 0x7f;
- b = b<<7;
- a |= b;
- *v = a;
- return 3;
- }
-
- /* CSE1 from below */
- a &= SLOT_2_0;
- p++;
- b = b<<14;
- b |= *p;
- /* b: p1<<14 | p3 (unmasked) */
- if (!(b&0x80))
- {
- b &= SLOT_2_0;
- /* moved CSE1 up */
- /* a &= (0x7f<<14)|(0x7f); */
- a = a<<7;
- a |= b;
- *v = a;
- return 4;
- }
-
- /* a: p0<<14 | p2 (masked) */
- /* b: p1<<14 | p3 (unmasked) */
- /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
- /* moved CSE1 up */
- /* a &= (0x7f<<14)|(0x7f); */
- b &= SLOT_2_0;
- s = a;
- /* s: p0<<14 | p2 (masked) */
-
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<28 | p2<<14 | p4 (unmasked) */
- if (!(a&0x80))
- {
- /* we can skip these cause they were (effectively) done above in calc'ing s */
- /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */
- /* b &= (0x7f<<14)|(0x7f); */
- b = b<<7;
- a |= b;
- s = s>>18;
- *v = ((u64)s)<<32 | a;
- return 5;
- }
-
- /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
- s = s<<7;
- s |= b;
- /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */
-
- p++;
- b = b<<14;
- b |= *p;
- /* b: p1<<28 | p3<<14 | p5 (unmasked) */
- if (!(b&0x80))
- {
- /* we can skip this cause it was (effectively) done above in calc'ing s */
- /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */
- a &= SLOT_2_0;
- a = a<<7;
- a |= b;
- s = s>>18;
- *v = ((u64)s)<<32 | a;
- return 6;
- }
-
- p++;
- a = a<<14;
- a |= *p;
- /* a: p2<<28 | p4<<14 | p6 (unmasked) */
- if (!(a&0x80))
- {
- a &= SLOT_4_2_0;
- b &= SLOT_2_0;
- b = b<<7;
- a |= b;
- s = s>>11;
- *v = ((u64)s)<<32 | a;
- return 7;
- }
-
- /* CSE2 from below */
- a &= SLOT_2_0;
- p++;
- b = b<<14;
- b |= *p;
- /* b: p3<<28 | p5<<14 | p7 (unmasked) */
- if (!(b&0x80))
- {
- b &= SLOT_4_2_0;
- /* moved CSE2 up */
- /* a &= (0x7f<<14)|(0x7f); */
- a = a<<7;
- a |= b;
- s = s>>4;
- *v = ((u64)s)<<32 | a;
- return 8;
- }
-
- p++;
- a = a<<15;
- a |= *p;
- /* a: p4<<29 | p6<<15 | p8 (unmasked) */
-
- /* moved CSE2 up */
- /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */
- b &= SLOT_2_0;
- b = b<<8;
- a |= b;
-
- s = s<<4;
- b = p[-4];
- b &= 0x7f;
- b = b>>3;
- s |= b;
-
- *v = ((u64)s)<<32 | a;
-
- return 9;
-}
-
-/*
-** The variable-length integer encoding is as follows:
-**
-** KEY:
-** A = 0xxxxxxx 7 bits of data and one flag bit
-** B = 1xxxxxxx 7 bits of data and one flag bit
-** C = xxxxxxxx 8 bits of data
-**
-** 7 bits - A
-** 14 bits - BA
-** 21 bits - BBA
-** 28 bits - BBBA
-** 35 bits - BBBBA
-** 42 bits - BBBBBA
-** 49 bits - BBBBBBA
-** 56 bits - BBBBBBBA
-** 64 bits - BBBBBBBBC
-*/
-
-#ifdef SQLITE_NOINLINE
-# define FTS5_NOINLINE SQLITE_NOINLINE
-#else
-# define FTS5_NOINLINE
-#endif
-
-/*
-** Write a 64-bit variable-length integer to memory starting at p[0].
-** The length of data write will be between 1 and 9 bytes. The number
-** of bytes written is returned.
-**
-** A variable-length integer consists of the lower 7 bits of each byte
-** for all bytes that have the 8th bit set and one byte with the 8th
-** bit clear. Except, if we get to the 9th byte, it stores the full
-** 8 bits and is the last byte.
-*/
-static int FTS5_NOINLINE fts5PutVarint64(unsigned char *p, u64 v){
- int i, j, n;
- u8 buf[10];
- if( v & (((u64)0xff000000)<<32) ){
- p[8] = (u8)v;
- v >>= 8;
- for(i=7; i>=0; i--){
- p[i] = (u8)((v & 0x7f) | 0x80);
- v >>= 7;
- }
- return 9;
- }
- n = 0;
- do{
- buf[n++] = (u8)((v & 0x7f) | 0x80);
- v >>= 7;
- }while( v!=0 );
- buf[0] &= 0x7f;
- assert( n<=9 );
- for(i=0, j=n-1; j>=0; j--, i++){
- p[i] = buf[j];
- }
- return n;
-}
-
-static int sqlite3Fts5PutVarint(unsigned char *p, u64 v){
- if( v<=0x7f ){
- p[0] = v&0x7f;
- return 1;
- }
- if( v<=0x3fff ){
- p[0] = ((v>>7)&0x7f)|0x80;
- p[1] = v&0x7f;
- return 2;
- }
- return fts5PutVarint64(p,v);
-}
-
-
-static int sqlite3Fts5GetVarintLen(u32 iVal){
- if( iVal<(1 << 7 ) ) return 1;
- if( iVal<(1 << 14) ) return 2;
- if( iVal<(1 << 21) ) return 3;
- if( iVal<(1 << 28) ) return 4;
- return 5;
-}
-
-
-/*
-** 2015 May 08
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This is an SQLite virtual table module implementing direct access to an
-** existing FTS5 index. The module may create several different types of
-** tables:
-**
-** col:
-** CREATE TABLE vocab(term, col, doc, cnt, PRIMARY KEY(term, col));
-**
-** One row for each term/column combination. The value of $doc is set to
-** the number of fts5 rows that contain at least one instance of term
-** $term within column $col. Field $cnt is set to the total number of
-** instances of term $term in column $col (in any row of the fts5 table).
-**
-** row:
-** CREATE TABLE vocab(term, doc, cnt, PRIMARY KEY(term));
-**
-** One row for each term in the database. The value of $doc is set to
-** the number of fts5 rows that contain at least one instance of term
-** $term. Field $cnt is set to the total number of instances of term
-** $term in the database.
-*/
-
-
-
-
-typedef struct Fts5VocabTable Fts5VocabTable;
-typedef struct Fts5VocabCursor Fts5VocabCursor;
-
-struct Fts5VocabTable {
- sqlite3_vtab base;
- char *zFts5Tbl; /* Name of fts5 table */
- char *zFts5Db; /* Db containing fts5 table */
- sqlite3 *db; /* Database handle */
- Fts5Global *pGlobal; /* FTS5 global object for this database */
- int eType; /* FTS5_VOCAB_COL or ROW */
-};
-
-struct Fts5VocabCursor {
- sqlite3_vtab_cursor base;
- sqlite3_stmt *pStmt; /* Statement holding lock on pIndex */
- Fts5Index *pIndex; /* Associated FTS5 index */
-
- int bEof; /* True if this cursor is at EOF */
- Fts5IndexIter *pIter; /* Term/rowid iterator object */
-
- /* These are used by 'col' tables only */
- int nCol;
- int iCol;
- i64 *aCnt;
- i64 *aDoc;
-
- /* Output values */
- i64 rowid; /* This table's current rowid value */
- Fts5Buffer term; /* Current value of 'term' column */
- i64 aVal[3]; /* Up to three columns left of 'term' */
-};
-
-#define FTS5_VOCAB_COL 0
-#define FTS5_VOCAB_ROW 1
-
-#define FTS5_VOCAB_COL_SCHEMA "term, col, doc, cnt"
-#define FTS5_VOCAB_ROW_SCHEMA "term, doc, cnt"
-
-/*
-** Translate a string containing an fts5vocab table type to an
-** FTS5_VOCAB_XXX constant. If successful, set *peType to the output
-** value and return SQLITE_OK. Otherwise, set *pzErr to an error message
-** and return SQLITE_ERROR.
-*/
-static int fts5VocabTableType(const char *zType, char **pzErr, int *peType){
- int rc = SQLITE_OK;
- char *zCopy = sqlite3Fts5Strndup(&rc, zType, -1);
- if( rc==SQLITE_OK ){
- sqlite3Fts5Dequote(zCopy);
- if( sqlite3_stricmp(zCopy, "col")==0 ){
- *peType = FTS5_VOCAB_COL;
- }else
-
- if( sqlite3_stricmp(zCopy, "row")==0 ){
- *peType = FTS5_VOCAB_ROW;
- }else
- {
- *pzErr = sqlite3_mprintf("fts5vocab: unknown table type: %Q", zCopy);
- rc = SQLITE_ERROR;
- }
- sqlite3_free(zCopy);
- }
-
- return rc;
-}
-
-
-/*
-** The xDisconnect() virtual table method.
-*/
-static int fts5VocabDisconnectMethod(sqlite3_vtab *pVtab){
- Fts5VocabTable *pTab = (Fts5VocabTable*)pVtab;
- sqlite3_free(pTab);
- return SQLITE_OK;
-}
-
-/*
-** The xDestroy() virtual table method.
-*/
-static int fts5VocabDestroyMethod(sqlite3_vtab *pVtab){
- Fts5VocabTable *pTab = (Fts5VocabTable*)pVtab;
- sqlite3_free(pTab);
- return SQLITE_OK;
-}
-
-/*
-** This function is the implementation of both the xConnect and xCreate
-** methods of the FTS3 virtual table.
-**
-** The argv[] array contains the following:
-**
-** argv[0] -> module name ("fts5vocab")
-** argv[1] -> database name
-** argv[2] -> table name
-**
-** then:
-**
-** argv[3] -> name of fts5 table
-** argv[4] -> type of fts5vocab table
-**
-** or, for tables in the TEMP schema only.
-**
-** argv[3] -> name of fts5 tables database
-** argv[4] -> name of fts5 table
-** argv[5] -> type of fts5vocab table
-*/
-static int fts5VocabInitVtab(
- sqlite3 *db, /* The SQLite database connection */
- void *pAux, /* Pointer to Fts5Global object */
- int argc, /* Number of elements in argv array */
- const char * const *argv, /* xCreate/xConnect argument array */
- sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */
- char **pzErr /* Write any error message here */
-){
- const char *azSchema[] = {
- "CREATE TABlE vocab(" FTS5_VOCAB_COL_SCHEMA ")",
- "CREATE TABlE vocab(" FTS5_VOCAB_ROW_SCHEMA ")"
- };
-
- Fts5VocabTable *pRet = 0;
- int rc = SQLITE_OK; /* Return code */
- int bDb;
-
- bDb = (argc==6 && strlen(argv[1])==4 && memcmp("temp", argv[1], 4)==0);
-
- if( argc!=5 && bDb==0 ){
- *pzErr = sqlite3_mprintf("wrong number of vtable arguments");
- rc = SQLITE_ERROR;
- }else{
- int nByte; /* Bytes of space to allocate */
- const char *zDb = bDb ? argv[3] : argv[1];
- const char *zTab = bDb ? argv[4] : argv[3];
- const char *zType = bDb ? argv[5] : argv[4];
- int nDb = strlen(zDb)+1;
- int nTab = strlen(zTab)+1;
- int eType;
-
- rc = fts5VocabTableType(zType, pzErr, &eType);
- if( rc==SQLITE_OK ){
- assert( eType>=0 && eType<sizeof(azSchema)/sizeof(azSchema[0]) );
- rc = sqlite3_declare_vtab(db, azSchema[eType]);
- }
-
- nByte = sizeof(Fts5VocabTable) + nDb + nTab;
- pRet = sqlite3Fts5MallocZero(&rc, nByte);
- if( pRet ){
- pRet->pGlobal = (Fts5Global*)pAux;
- pRet->eType = eType;
- pRet->db = db;
- pRet->zFts5Tbl = (char*)&pRet[1];
- pRet->zFts5Db = &pRet->zFts5Tbl[nTab];
- memcpy(pRet->zFts5Tbl, zTab, nTab);
- memcpy(pRet->zFts5Db, zDb, nDb);
- sqlite3Fts5Dequote(pRet->zFts5Tbl);
- sqlite3Fts5Dequote(pRet->zFts5Db);
- }
- }
-
- *ppVTab = (sqlite3_vtab*)pRet;
- return rc;
-}
-
-
-/*
-** The xConnect() and xCreate() methods for the virtual table. All the
-** work is done in function fts5VocabInitVtab().
-*/
-static int fts5VocabConnectMethod(
- sqlite3 *db, /* Database connection */
- void *pAux, /* Pointer to tokenizer hash table */
- int argc, /* Number of elements in argv array */
- const char * const *argv, /* xCreate/xConnect argument array */
- sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
- char **pzErr /* OUT: sqlite3_malloc'd error message */
-){
- return fts5VocabInitVtab(db, pAux, argc, argv, ppVtab, pzErr);
-}
-static int fts5VocabCreateMethod(
- sqlite3 *db, /* Database connection */
- void *pAux, /* Pointer to tokenizer hash table */
- int argc, /* Number of elements in argv array */
- const char * const *argv, /* xCreate/xConnect argument array */
- sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */
- char **pzErr /* OUT: sqlite3_malloc'd error message */
-){
- return fts5VocabInitVtab(db, pAux, argc, argv, ppVtab, pzErr);
-}
-
-/*
-** Implementation of the xBestIndex method.
-*/
-static int fts5VocabBestIndexMethod(
- sqlite3_vtab *pVTab,
- sqlite3_index_info *pInfo
-){
- return SQLITE_OK;
-}
-
-/*
-** Implementation of xOpen method.
-*/
-static int fts5VocabOpenMethod(
- sqlite3_vtab *pVTab,
- sqlite3_vtab_cursor **ppCsr
-){
- Fts5VocabTable *pTab = (Fts5VocabTable*)pVTab;
- Fts5Index *pIndex = 0;
- int nCol = 0;
- Fts5VocabCursor *pCsr = 0;
- int rc = SQLITE_OK;
- sqlite3_stmt *pStmt = 0;
- char *zSql = 0;
- int nByte;
-
- zSql = sqlite3Fts5Mprintf(&rc,
- "SELECT t.%Q FROM %Q.%Q AS t WHERE t.%Q MATCH '*id'",
- pTab->zFts5Tbl, pTab->zFts5Db, pTab->zFts5Tbl, pTab->zFts5Tbl
- );
- if( zSql ){
- rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pStmt, 0);
- }
- sqlite3_free(zSql);
- assert( rc==SQLITE_OK || pStmt==0 );
- if( rc==SQLITE_ERROR ) rc = SQLITE_OK;
-
- if( pStmt && sqlite3_step(pStmt)==SQLITE_ROW ){
- i64 iId = sqlite3_column_int64(pStmt, 0);
- pIndex = sqlite3Fts5IndexFromCsrid(pTab->pGlobal, iId, &nCol);
- }
-
- if( rc==SQLITE_OK && pIndex==0 ){
- rc = sqlite3_finalize(pStmt);
- pStmt = 0;
- if( rc==SQLITE_OK ){
- pVTab->zErrMsg = sqlite3_mprintf(
- "no such fts5 table: %s.%s", pTab->zFts5Db, pTab->zFts5Tbl
- );
- rc = SQLITE_ERROR;
- }
- }
-
- nByte = nCol * sizeof(i64) * 2 + sizeof(Fts5VocabCursor);
- pCsr = (Fts5VocabCursor*)sqlite3Fts5MallocZero(&rc, nByte);
- if( pCsr ){
- pCsr->pIndex = pIndex;
- pCsr->pStmt = pStmt;
- pCsr->nCol = nCol;
- pCsr->aCnt = (i64*)&pCsr[1];
- pCsr->aDoc = &pCsr->aCnt[nCol];
- }else{
- sqlite3_finalize(pStmt);
- }
-
- *ppCsr = (sqlite3_vtab_cursor*)pCsr;
- return rc;
-}
-
-static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){
- pCsr->rowid = 0;
- sqlite3Fts5IterClose(pCsr->pIter);
- pCsr->pIter = 0;
-}
-
-/*
-** Close the cursor. For additional information see the documentation
-** on the xClose method of the virtual table interface.
-*/
-static int fts5VocabCloseMethod(sqlite3_vtab_cursor *pCursor){
- Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
- fts5VocabResetCursor(pCsr);
- sqlite3Fts5BufferFree(&pCsr->term);
- sqlite3_finalize(pCsr->pStmt);
- sqlite3_free(pCsr);
- return SQLITE_OK;
-}
-
-
-/*
-** Advance the cursor to the next row in the table.
-*/
-static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){
- Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
- Fts5VocabTable *pTab = (Fts5VocabTable*)pCursor->pVtab;
- int rc = SQLITE_OK;
-
- pCsr->rowid++;
-
- if( pTab->eType==FTS5_VOCAB_COL ){
- for(pCsr->iCol++; pCsr->iCol<pCsr->nCol; pCsr->iCol++){
- if( pCsr->aCnt[pCsr->iCol] ) break;
- }
- }
-
- if( pTab->eType==FTS5_VOCAB_ROW || pCsr->iCol>=pCsr->nCol ){
- if( sqlite3Fts5IterEof(pCsr->pIter) ){
- pCsr->bEof = 1;
- }else{
- const char *zTerm;
- int nTerm;
-
- zTerm = sqlite3Fts5IterTerm(pCsr->pIter, &nTerm);
- sqlite3Fts5BufferSet(&rc, &pCsr->term, nTerm, (const u8*)zTerm);
- memset(pCsr->aVal, 0, sizeof(pCsr->aVal));
- memset(pCsr->aCnt, 0, pCsr->nCol * sizeof(i64));
- memset(pCsr->aDoc, 0, pCsr->nCol * sizeof(i64));
- pCsr->iCol = 0;
-
- assert( pTab->eType==FTS5_VOCAB_COL || pTab->eType==FTS5_VOCAB_ROW );
- while( rc==SQLITE_OK ){
- i64 dummy;
- const u8 *pPos; int nPos; /* Position list */
- i64 iPos = 0; /* 64-bit position read from poslist */
- int iOff = 0; /* Current offset within position list */
-
- rc = sqlite3Fts5IterPoslist(pCsr->pIter, 0, &pPos, &nPos, &dummy);
- if( rc==SQLITE_OK ){
- if( pTab->eType==FTS5_VOCAB_ROW ){
- while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){
- pCsr->aVal[1]++;
- }
- pCsr->aVal[0]++;
- }else{
- int iCol = -1;
- while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){
- int ii = FTS5_POS2COLUMN(iPos);
- pCsr->aCnt[ii]++;
- if( iCol!=ii ){
- pCsr->aDoc[ii]++;
- iCol = ii;
- }
- }
- }
- rc = sqlite3Fts5IterNextScan(pCsr->pIter);
- }
- if( rc==SQLITE_OK ){
- zTerm = sqlite3Fts5IterTerm(pCsr->pIter, &nTerm);
- if( nTerm!=pCsr->term.n || memcmp(zTerm, pCsr->term.p, nTerm) ) break;
- if( sqlite3Fts5IterEof(pCsr->pIter) ) break;
- }
- }
- }
- }
-
- if( pCsr->bEof==0 && pTab->eType==FTS5_VOCAB_COL ){
- while( pCsr->aCnt[pCsr->iCol]==0 ) pCsr->iCol++;
- pCsr->aVal[0] = pCsr->iCol;
- pCsr->aVal[1] = pCsr->aDoc[pCsr->iCol];
- pCsr->aVal[2] = pCsr->aCnt[pCsr->iCol];
- }
- return rc;
-}
-
-/*
-** This is the xFilter implementation for the virtual table.
-*/
-static int fts5VocabFilterMethod(
- sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
- int idxNum, /* Strategy index */
- const char *idxStr, /* Unused */
- int nVal, /* Number of elements in apVal */
- sqlite3_value **apVal /* Arguments for the indexing scheme */
-){
- Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
- int rc;
- const int flags = FTS5INDEX_QUERY_SCAN;
-
- fts5VocabResetCursor(pCsr);
- rc = sqlite3Fts5IndexQuery(pCsr->pIndex, 0, 0, flags, 0, &pCsr->pIter);
- if( rc==SQLITE_OK ){
- rc = fts5VocabNextMethod(pCursor);
- }
-
- return rc;
-}
-
-/*
-** This is the xEof method of the virtual table. SQLite calls this
-** routine to find out if it has reached the end of a result set.
-*/
-static int fts5VocabEofMethod(sqlite3_vtab_cursor *pCursor){
- Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
- return pCsr->bEof;
-}
-
-static int fts5VocabColumnMethod(
- sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
- sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
- int iCol /* Index of column to read value from */
-){
- Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
- switch( iCol ){
- case 0: /* term */
- sqlite3_result_text(
- pCtx, (const char*)pCsr->term.p, pCsr->term.n, SQLITE_TRANSIENT
- );
- break;
-
- default:
- assert( iCol<4 && iCol>0 );
- sqlite3_result_int64(pCtx, pCsr->aVal[iCol-1]);
- break;
- }
- return SQLITE_OK;
-}
-
-/*
-** This is the xRowid method. The SQLite core calls this routine to
-** retrieve the rowid for the current row of the result set. The
-** rowid should be written to *pRowid.
-*/
-static int fts5VocabRowidMethod(
- sqlite3_vtab_cursor *pCursor,
- sqlite_int64 *pRowid
-){
- Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor;
- *pRowid = pCsr->rowid;
- return SQLITE_OK;
-}
-
-static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
- static const sqlite3_module fts5Vocab = {
- /* iVersion */ 2,
- /* xCreate */ fts5VocabCreateMethod,
- /* xConnect */ fts5VocabConnectMethod,
- /* xBestIndex */ fts5VocabBestIndexMethod,
- /* xDisconnect */ fts5VocabDisconnectMethod,
- /* xDestroy */ fts5VocabDestroyMethod,
- /* xOpen */ fts5VocabOpenMethod,
- /* xClose */ fts5VocabCloseMethod,
- /* xFilter */ fts5VocabFilterMethod,
- /* xNext */ fts5VocabNextMethod,
- /* xEof */ fts5VocabEofMethod,
- /* xColumn */ fts5VocabColumnMethod,
- /* xRowid */ fts5VocabRowidMethod,
- /* xUpdate */ 0,
- /* xBegin */ 0,
- /* xSync */ 0,
- /* xCommit */ 0,
- /* xRollback */ 0,
- /* xFindFunction */ 0,
- /* xRename */ 0,
- /* xSavepoint */ 0,
- /* xRelease */ 0,
- /* xRollbackTo */ 0,
- };
- void *p = (void*)pGlobal;
-
- return sqlite3_create_module_v2(db, "fts5vocab", &fts5Vocab, p, 0);
-}
-
-
-
-
-
-#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */
-
-/************** End of fts5.c ************************************************/
diff --git a/3rdparty/sqlite3/sqlite3.h b/3rdparty/sqlite3/sqlite3.h
index 7cca0ac..59b9570 100644
--- a/3rdparty/sqlite3/sqlite3.h
+++ b/3rdparty/sqlite3/sqlite3.h
@@ -23,7 +23,7 @@
**
** The official C-language API documentation for SQLite is derived
** from comments in this file. This file is the authoritative source
-** on how SQLite interfaces are supposed to operate.
+** on how SQLite interfaces are suppose to operate.
**
** The name of this file under configuration management is "sqlite.h.in".
** The makefile makes some minor changes to this file (such as inserting
@@ -43,25 +43,21 @@ extern "C" {
/*
-** Provide the ability to override linkage features of the interface.
+** Add the ability to override 'extern'
*/
#ifndef SQLITE_EXTERN
# define SQLITE_EXTERN extern
#endif
+
#ifndef SQLITE_API
# define SQLITE_API
#endif
-#ifndef SQLITE_CDECL
-# define SQLITE_CDECL
-#endif
-#ifndef SQLITE_STDCALL
-# define SQLITE_STDCALL
-#endif
+
/*
** These no-op macros are used in front of interfaces to mark those
** interfaces as either deprecated or experimental. New applications
-** should not use deprecated interfaces - they are supported for backwards
+** should not use deprecated interfaces - they are support for backwards
** compatibility only. Application writers should be aware that
** experimental interfaces are subject to change in point releases.
**
@@ -111,9 +107,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.9.2"
-#define SQLITE_VERSION_NUMBER 3009002
-#define SQLITE_SOURCE_ID "2015-11-02 18:31:45 bda77dda9697c463c3d0704014d51627fceee328"
+#define SQLITE_VERSION "3.8.2"
+#define SQLITE_VERSION_NUMBER 3008002
+#define SQLITE_SOURCE_ID "2013-12-06 14:53:30 27392118af4c38c5203a04b8013e1afdb1cebd0d"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -124,7 +120,7 @@ extern "C" {
** but are associated with the library instead of the header file. ^(Cautious
** programmers might include assert() statements in their application to
** verify that values returned by these interfaces match the macros in
-** the header, and thus ensure that the application is
+** the header, and thus insure that the application is
** compiled with matching library and header files.
**
** <blockquote><pre>
@@ -146,9 +142,9 @@ extern "C" {
** See also: [sqlite_version()] and [sqlite_source_id()].
*/
SQLITE_API SQLITE_EXTERN const char sqlite3_version[];
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
+SQLITE_API const char *sqlite3_libversion(void);
+SQLITE_API const char *sqlite3_sourceid(void);
+SQLITE_API int sqlite3_libversion_number(void);
/*
** CAPI3REF: Run-Time Library Compilation Options Diagnostics
@@ -173,8 +169,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
** [sqlite_compileoption_get()] and the [compile_options pragma].
*/
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
+SQLITE_API const char *sqlite3_compileoption_get(int N);
#endif
/*
@@ -205,7 +201,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but
** can be fully or partially disabled using a call to [sqlite3_config()]
** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD],
-** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the
+** or [SQLITE_CONFIG_MUTEX]. ^(The return value of the
** sqlite3_threadsafe() function shows only the compile-time setting of
** thread safety, not any run-time changes to that setting made by
** sqlite3_config(). In other words, the return value from sqlite3_threadsafe()
@@ -213,7 +209,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
**
** See the [threading mode] documentation for additional information.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void);
+SQLITE_API int sqlite3_threadsafe(void);
/*
** CAPI3REF: Database Connection Handle
@@ -270,11 +266,10 @@ typedef sqlite_uint64 sqlite3_uint64;
/*
** CAPI3REF: Closing A Database Connection
-** DESTRUCTOR: sqlite3
**
** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors
** for the [sqlite3] object.
-** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if
+** ^Calls to sqlite3_close() and sqlite3_close_v2() return SQLITE_OK if
** the [sqlite3] object is successfully destroyed and all associated
** resources are deallocated.
**
@@ -282,7 +277,7 @@ typedef sqlite_uint64 sqlite3_uint64;
** statements or unfinished sqlite3_backup objects then sqlite3_close()
** will leave the database connection open and return [SQLITE_BUSY].
** ^If sqlite3_close_v2() is called with unfinalized prepared statements
-** and/or unfinished sqlite3_backups, then the database connection becomes
+** and unfinished sqlite3_backups, then the database connection becomes
** an unusable "zombie" which will automatically be deallocated when the
** last prepared statement is finalized or the last sqlite3_backup is
** finished. The sqlite3_close_v2() interface is intended for use with
@@ -295,7 +290,7 @@ typedef sqlite_uint64 sqlite3_uint64;
** with the [sqlite3] object prior to attempting to close the object. ^If
** sqlite3_close_v2() is called on a [database connection] that still has
** outstanding [prepared statements], [BLOB handles], and/or
-** [sqlite3_backup] objects then it returns [SQLITE_OK] and the deallocation
+** [sqlite3_backup] objects then it returns SQLITE_OK but the deallocation
** of resources is deferred until all [prepared statements], [BLOB handles],
** and [sqlite3_backup] objects are also destroyed.
**
@@ -310,8 +305,8 @@ typedef sqlite_uint64 sqlite3_uint64;
** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
** argument is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3*);
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3*);
+SQLITE_API int sqlite3_close(sqlite3*);
+SQLITE_API int sqlite3_close_v2(sqlite3*);
/*
** The type for a callback function.
@@ -322,7 +317,6 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
/*
** CAPI3REF: One-Step Query Execution Interface
-** METHOD: sqlite3
**
** The sqlite3_exec() interface is a convenience wrapper around
** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()],
@@ -374,7 +368,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** Restrictions:
**
** <ul>
-** <li> The application must ensure that the 1st parameter to sqlite3_exec()
+** <li> The application must insure that the 1st parameter to sqlite3_exec()
** is a valid and open [database connection].
** <li> The application must not close the [database connection] specified by
** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
@@ -382,7 +376,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** </ul>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
+SQLITE_API int sqlite3_exec(
sqlite3*, /* An open database */
const char *sql, /* SQL to be evaluated */
int (*callback)(void*,int,char**,char**), /* Callback function */
@@ -392,14 +386,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
/*
** CAPI3REF: Result Codes
-** KEYWORDS: {result code definitions}
+** KEYWORDS: SQLITE_OK {error code} {error codes}
+** KEYWORDS: {result code} {result codes}
**
** Many SQLite functions return an integer result code from the set shown
** here in order to indicate success or failure.
**
** New error codes may be added in future versions of SQLite.
**
-** See also: [extended result code definitions]
+** See also: [SQLITE_IOERR_READ | extended result codes],
+** [sqlite3_vtab_on_conflict()] [SQLITE_ROLLBACK | result codes].
*/
#define SQLITE_OK 0 /* Successful result */
/* beginning-of-error-codes */
@@ -437,19 +433,26 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
/*
** CAPI3REF: Extended Result Codes
-** KEYWORDS: {extended result code definitions}
+** KEYWORDS: {extended error code} {extended error codes}
+** KEYWORDS: {extended result code} {extended result codes}
**
-** In its default configuration, SQLite API routines return one of 30 integer
-** [result codes]. However, experience has shown that many of
+** In its default configuration, SQLite API routines return one of 26 integer
+** [SQLITE_OK | result codes]. However, experience has shown that many of
** these result codes are too coarse-grained. They do not provide as
** much information about problems as programmers might like. In an effort to
** address this, newer versions of SQLite (version 3.3.8 and later) include
** support for additional result codes that provide more detailed information
-** about errors. These [extended result codes] are enabled or disabled
+** about errors. The extended result codes are enabled or disabled
** on a per database connection basis using the
-** [sqlite3_extended_result_codes()] API. Or, the extended code for
-** the most recent error can be obtained using
-** [sqlite3_extended_errcode()].
+** [sqlite3_extended_result_codes()] API.
+**
+** Some of the available extended result codes are listed here.
+** One may expect the number of extended result codes will increase
+** over time. Software that uses extended result codes should expect
+** to see new result codes in future releases of SQLite.
+**
+** The SQLITE_OK result code will never be extended. It will always
+** be exactly zero.
*/
#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
@@ -477,7 +480,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8))
#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8))
-#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8))
@@ -489,7 +491,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8))
#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8))
-#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8))
#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8))
#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8))
#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8))
@@ -504,7 +505,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
-#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
/*
** CAPI3REF: Flags For File Open Operations
@@ -558,11 +558,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
** after reboot following a crash or power loss, the only bytes in a
** file that were written at the application level might have changed
** and that adjacent bytes, even bytes within the same sector are
-** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
-** flag indicate that a file cannot be deleted when open. The
-** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
-** read-only media and cannot be changed even by processes with
-** elevated privileges.
+** guaranteed to be unchanged.
*/
#define SQLITE_IOCAP_ATOMIC 0x00000001
#define SQLITE_IOCAP_ATOMIC512 0x00000002
@@ -577,7 +573,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
#define SQLITE_IOCAP_SEQUENTIAL 0x00000400
#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800
#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000
-#define SQLITE_IOCAP_IMMUTABLE 0x00002000
/*
** CAPI3REF: File Locking Levels
@@ -684,7 +679,7 @@ struct sqlite3_file {
** locking strategy (for example to use dot-file locks), to inquire
** about the status of a lock, or to break stale locks. The SQLite
** core reserves all opcodes less than 100 for its own use.
-** A [file control opcodes | list of opcodes] less than 100 is available.
+** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available.
** Applications that define a custom xFileControl method should use opcodes
** greater than 100 to avoid conflicts. VFS implementations should
** return [SQLITE_NOTFOUND] for file control opcodes that they do not
@@ -757,22 +752,19 @@ struct sqlite3_io_methods {
/*
** CAPI3REF: Standard File Control Opcodes
-** KEYWORDS: {file control opcodes} {file control opcode}
**
** These integer constants are opcodes for the xFileControl method
** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()]
** interface.
**
-** <ul>
-** <li>[[SQLITE_FCNTL_LOCKSTATE]]
** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This
** opcode causes the xFileControl method to write the current state of
** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED],
** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE])
** into an integer that the pArg argument points to. This capability
-** is used during testing and is only available when the SQLITE_TEST
-** compile-time option is used.
-**
+** is used during testing and only needs to be supported when SQLITE_TEST
+** is defined.
+** <ul>
** <li>[[SQLITE_FCNTL_SIZE_HINT]]
** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS
** layer a hint of how large the database file will grow to be during the
@@ -797,29 +789,15 @@ struct sqlite3_io_methods {
** additional information.
**
** <li>[[SQLITE_FCNTL_SYNC_OMITTED]]
-** No longer in use.
-**
-** <li>[[SQLITE_FCNTL_SYNC]]
-** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
-** sent to the VFS immediately before the xSync method is invoked on a
-** database file descriptor. Or, if the xSync method is not invoked
-** because the user has configured SQLite with
-** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place
-** of the xSync method. In most cases, the pointer argument passed with
-** this file-control is NULL. However, if the database file is being synced
-** as part of a multi-database commit, the argument points to a nul-terminated
-** string containing the transactions master-journal file name. VFSes that
-** do not need this signal should silently ignore this opcode. Applications
-** should not call [sqlite3_file_control()] with this opcode as doing so may
-** disrupt the operation of the specialized VFSes that do require it.
-**
-** <li>[[SQLITE_FCNTL_COMMIT_PHASETWO]]
-** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite
-** and sent to the VFS after a transaction has been committed immediately
-** but before the database is unlocked. VFSes that do not need this signal
-** should silently ignore this opcode. Applications should not call
-** [sqlite3_file_control()] with this opcode as doing so may disrupt the
-** operation of the specialized VFSes that do require it.
+** ^(The [SQLITE_FCNTL_SYNC_OMITTED] opcode is generated internally by
+** SQLite and sent to all VFSes in place of a call to the xSync method
+** when the database connection has [PRAGMA synchronous] set to OFF.)^
+** Some specialized VFSes need this signal in order to operate correctly
+** when [PRAGMA synchronous | PRAGMA synchronous=OFF] is set, but most
+** VFSes do not need this signal and should silently ignore this opcode.
+** Applications should not call [sqlite3_file_control()] with this
+** opcode as doing so may disrupt the operation of the specialized VFSes
+** that do require it.
**
** <li>[[SQLITE_FCNTL_WIN32_AV_RETRY]]
** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic
@@ -897,9 +875,7 @@ struct sqlite3_io_methods {
** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA]
** file control returns [SQLITE_OK], then the parser assumes that the
** VFS has handled the PRAGMA itself and the parser generates a no-op
-** prepared statement if result string is NULL, or that returns a copy
-** of the result string if the string is non-NULL.
-** ^If the [SQLITE_FCNTL_PRAGMA] file control returns
+** prepared statement. ^If the [SQLITE_FCNTL_PRAGMA] file control returns
** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means
** that the VFS encountered an error while handling the [PRAGMA] and the
** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA]
@@ -945,39 +921,12 @@ struct sqlite3_io_methods {
** SQLite stack may generate instances of this file control if
** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled.
**
-** <li>[[SQLITE_FCNTL_HAS_MOVED]]
-** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a
-** pointer to an integer and it writes a boolean into that integer depending
-** on whether or not the file has been renamed, moved, or deleted since it
-** was first opened.
-**
-** <li>[[SQLITE_FCNTL_WIN32_SET_HANDLE]]
-** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
-** opcode causes the xFileControl method to swap the file handle with the one
-** pointed to by the pArg argument. This capability is used during testing
-** and only needs to be supported when SQLITE_TEST is defined.
-**
-** <li>[[SQLITE_FCNTL_WAL_BLOCK]]
-** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might
-** be advantageous to block on the next WAL lock if the lock is not immediately
-** available. The WAL subsystem issues this signal during rare
-** circumstances in order to fix a problem with priority inversion.
-** Applications should <em>not</em> use this file-control.
-**
-** <li>[[SQLITE_FCNTL_ZIPVFS]]
-** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other
-** VFS should return SQLITE_NOTFOUND for this opcode.
-**
-** <li>[[SQLITE_FCNTL_RBU]]
-** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by
-** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for
-** this opcode.
** </ul>
*/
#define SQLITE_FCNTL_LOCKSTATE 1
-#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2
-#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3
-#define SQLITE_FCNTL_LAST_ERRNO 4
+#define SQLITE_GET_LOCKPROXYFILE 2
+#define SQLITE_SET_LOCKPROXYFILE 3
+#define SQLITE_LAST_ERRNO 4
#define SQLITE_FCNTL_SIZE_HINT 5
#define SQLITE_FCNTL_CHUNK_SIZE 6
#define SQLITE_FCNTL_FILE_POINTER 7
@@ -992,19 +941,6 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_TEMPFILENAME 16
#define SQLITE_FCNTL_MMAP_SIZE 18
#define SQLITE_FCNTL_TRACE 19
-#define SQLITE_FCNTL_HAS_MOVED 20
-#define SQLITE_FCNTL_SYNC 21
-#define SQLITE_FCNTL_COMMIT_PHASETWO 22
-#define SQLITE_FCNTL_WIN32_SET_HANDLE 23
-#define SQLITE_FCNTL_WAL_BLOCK 24
-#define SQLITE_FCNTL_ZIPVFS 25
-#define SQLITE_FCNTL_RBU 26
-
-/* deprecated names */
-#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
-#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE
-#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO
-
/*
** CAPI3REF: Mutex Handle
@@ -1256,7 +1192,7 @@ struct sqlite3_vfs {
** </ul>
**
** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as
-** was given on the corresponding lock.
+** was given no the corresponding lock.
**
** The xShmLock method can transition between unlocked and SHARED or
** between unlocked and EXCLUSIVE. It cannot transition between SHARED
@@ -1353,10 +1289,10 @@ struct sqlite3_vfs {
** must return [SQLITE_OK] on success and some other [error code] upon
** failure.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
+SQLITE_API int sqlite3_initialize(void);
+SQLITE_API int sqlite3_shutdown(void);
+SQLITE_API int sqlite3_os_init(void);
+SQLITE_API int sqlite3_os_end(void);
/*
** CAPI3REF: Configuring The SQLite Library
@@ -1367,11 +1303,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
** applications and so this routine is usually not necessary. It is
** provided to support rare applications with unusual needs.
**
-** <b>The sqlite3_config() interface is not threadsafe. The application
-** must ensure that no other SQLite interfaces are invoked by other
-** threads while sqlite3_config() is running.</b>
-**
-** The sqlite3_config() interface
+** The sqlite3_config() interface is not threadsafe. The application
+** must insure that no other SQLite interfaces are invoked by other
+** threads while sqlite3_config() is running. Furthermore, sqlite3_config()
** may only be invoked prior to library initialization using
** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
@@ -1389,11 +1323,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
** ^If the option is unknown or SQLite is unable to set the option
** then this routine returns a non-zero [error code].
*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
+SQLITE_API int sqlite3_config(int, ...);
/*
** CAPI3REF: Configure database connections
-** METHOD: sqlite3
**
** The sqlite3_db_config() interface is used to make configuration
** changes to a [database connection]. The interface is similar to
@@ -1408,7 +1341,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
** the call is considered successful.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Memory Allocation Routines
@@ -1542,33 +1475,31 @@ struct sqlite3_mem_methods {
** SQLITE_CONFIG_SERIALIZED configuration option.</dd>
**
** [[SQLITE_CONFIG_MALLOC]] <dt>SQLITE_CONFIG_MALLOC</dt>
-** <dd> ^(The SQLITE_CONFIG_MALLOC option takes a single argument which is
-** a pointer to an instance of the [sqlite3_mem_methods] structure.
-** The argument specifies
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mem_methods] structure. The argument specifies
** alternative low-level memory allocation routines to be used in place of
** the memory allocation routines built into SQLite.)^ ^SQLite makes
** its own private copy of the content of the [sqlite3_mem_methods] structure
** before the [sqlite3_config()] call returns.</dd>
**
** [[SQLITE_CONFIG_GETMALLOC]] <dt>SQLITE_CONFIG_GETMALLOC</dt>
-** <dd> ^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which
-** is a pointer to an instance of the [sqlite3_mem_methods] structure.
-** The [sqlite3_mem_methods]
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mem_methods] structure. The [sqlite3_mem_methods]
** structure is filled with the currently defined memory allocation routines.)^
** This option can be used to overload the default memory allocation
** routines with a wrapper that simulations memory allocation failure or
** tracks memory usage, for example. </dd>
**
** [[SQLITE_CONFIG_MEMSTATUS]] <dt>SQLITE_CONFIG_MEMSTATUS</dt>
-** <dd> ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int,
-** interpreted as a boolean, which enables or disables the collection of
-** memory allocation statistics. ^(When memory allocation statistics are
-** disabled, the following SQLite interfaces become non-operational:
+** <dd> ^This option takes single argument of type int, interpreted as a
+** boolean, which enables or disables the collection of memory allocation
+** statistics. ^(When memory allocation statistics are disabled, the
+** following SQLite interfaces become non-operational:
** <ul>
** <li> [sqlite3_memory_used()]
** <li> [sqlite3_memory_highwater()]
** <li> [sqlite3_soft_heap_limit64()]
-** <li> [sqlite3_status64()]
+** <li> [sqlite3_status()]
** </ul>)^
** ^Memory allocation statistics are enabled by default unless SQLite is
** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory
@@ -1576,67 +1507,53 @@ struct sqlite3_mem_methods {
** </dd>
**
** [[SQLITE_CONFIG_SCRATCH]] <dt>SQLITE_CONFIG_SCRATCH</dt>
-** <dd> ^The SQLITE_CONFIG_SCRATCH option specifies a static memory buffer
-** that SQLite can use for scratch memory. ^(There are three arguments
-** to SQLITE_CONFIG_SCRATCH: A pointer an 8-byte
+** <dd> ^This option specifies a static memory buffer that SQLite can use for
+** scratch memory. There are three arguments: A pointer an 8-byte
** aligned memory buffer from which the scratch allocations will be
** drawn, the size of each scratch allocation (sz),
-** and the maximum number of scratch allocations (N).)^
+** and the maximum number of scratch allocations (N). The sz
+** argument must be a multiple of 16.
** The first argument must be a pointer to an 8-byte aligned buffer
** of at least sz*N bytes of memory.
-** ^SQLite will not use more than one scratch buffers per thread.
-** ^SQLite will never request a scratch buffer that is more than 6
-** times the database page size.
-** ^If SQLite needs needs additional
+** ^SQLite will use no more than two scratch buffers per thread. So
+** N should be set to twice the expected maximum number of threads.
+** ^SQLite will never require a scratch buffer that is more than 6
+** times the database page size. ^If SQLite needs needs additional
** scratch memory beyond what is provided by this configuration option, then
-** [sqlite3_malloc()] will be used to obtain the memory needed.<p>
-** ^When the application provides any amount of scratch memory using
-** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large
-** [sqlite3_malloc|heap allocations].
-** This can help [Robson proof|prevent memory allocation failures] due to heap
-** fragmentation in low-memory embedded systems.
-** </dd>
+** [sqlite3_malloc()] will be used to obtain the memory needed.</dd>
**
** [[SQLITE_CONFIG_PAGECACHE]] <dt>SQLITE_CONFIG_PAGECACHE</dt>
-** <dd> ^The SQLITE_CONFIG_PAGECACHE option specifies a static memory buffer
-** that SQLite can use for the database page cache with the default page
-** cache implementation.
+** <dd> ^This option specifies a static memory buffer that SQLite can use for
+** the database page cache with the default page cache implementation.
** This configuration should not be used if an application-define page
-** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2]
-** configuration option.
-** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
-** 8-byte aligned
+** cache implementation is loaded using the SQLITE_CONFIG_PCACHE2 option.
+** There are three arguments to this option: A pointer to 8-byte aligned
** memory, the size of each page buffer (sz), and the number of pages (N).
** The sz argument should be the size of the largest database page
-** (a power of two between 512 and 65536) plus some extra bytes for each
-** page header. ^The number of extra bytes needed by the page header
-** can be determined using the [SQLITE_CONFIG_PCACHE_HDRSZ] option
-** to [sqlite3_config()].
-** ^It is harmless, apart from the wasted memory,
-** for the sz parameter to be larger than necessary. The first
-** argument should pointer to an 8-byte aligned block of memory that
-** is at least sz*N bytes of memory, otherwise subsequent behavior is
-** undefined.
+** (a power of two between 512 and 32768) plus a little extra for each
+** page header. ^The page header size is 20 to 40 bytes depending on
+** the host architecture. ^It is harmless, apart from the wasted memory,
+** to make sz a little too large. The first
+** argument should point to an allocation of at least sz*N bytes of memory.
** ^SQLite will use the memory provided by the first argument to satisfy its
** memory needs for the first N pages that it adds to cache. ^If additional
** page cache memory is needed beyond what is provided by this option, then
-** SQLite goes to [sqlite3_malloc()] for the additional storage space.</dd>
+** SQLite goes to [sqlite3_malloc()] for the additional storage space.
+** The pointer in the first argument must
+** be aligned to an 8-byte boundary or subsequent behavior of SQLite
+** will be undefined.</dd>
**
** [[SQLITE_CONFIG_HEAP]] <dt>SQLITE_CONFIG_HEAP</dt>
-** <dd> ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer
-** that SQLite will use for all of its dynamic memory allocation needs
-** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and
-** [SQLITE_CONFIG_PAGECACHE].
-** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled
-** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns
-** [SQLITE_ERROR] if invoked otherwise.
-** ^There are three arguments to SQLITE_CONFIG_HEAP:
-** An 8-byte aligned pointer to the memory,
+** <dd> ^This option specifies a static memory buffer that SQLite will use
+** for all of its dynamic memory allocation needs beyond those provided
+** for by [SQLITE_CONFIG_SCRATCH] and [SQLITE_CONFIG_PAGECACHE].
+** There are three arguments: An 8-byte aligned pointer to the memory,
** the number of bytes in the memory buffer, and the minimum allocation size.
** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts
** to using its default memory allocator (the system malloc() implementation),
** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the
-** memory pointer is not NULL then the alternative memory
+** memory pointer is not NULL and either [SQLITE_ENABLE_MEMSYS3] or
+** [SQLITE_ENABLE_MEMSYS5] are defined, then the alternative memory
** allocator is engaged to handle all of SQLites memory allocation needs.
** The first pointer (the memory pointer) must be aligned to an 8-byte
** boundary or subsequent behavior of SQLite will be undefined.
@@ -1644,11 +1561,11 @@ struct sqlite3_mem_methods {
** for the minimum allocation size are 2**5 through 2**8.</dd>
**
** [[SQLITE_CONFIG_MUTEX]] <dt>SQLITE_CONFIG_MUTEX</dt>
-** <dd> ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a
-** pointer to an instance of the [sqlite3_mutex_methods] structure.
-** The argument specifies alternative low-level mutex routines to be used
-** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of
-** the content of the [sqlite3_mutex_methods] structure before the call to
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mutex_methods] structure. The argument specifies
+** alternative low-level mutex routines to be used in place
+** the mutex routines built into SQLite.)^ ^SQLite makes a copy of the
+** content of the [sqlite3_mutex_methods] structure before the call to
** [sqlite3_config()] returns. ^If SQLite is compiled with
** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
** the entire mutexing subsystem is omitted from the build and hence calls to
@@ -1656,8 +1573,8 @@ struct sqlite3_mem_methods {
** return [SQLITE_ERROR].</dd>
**
** [[SQLITE_CONFIG_GETMUTEX]] <dt>SQLITE_CONFIG_GETMUTEX</dt>
-** <dd> ^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which
-** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The
+** <dd> ^(This option takes a single argument which is a pointer to an
+** instance of the [sqlite3_mutex_methods] structure. The
** [sqlite3_mutex_methods]
** structure is filled with the currently defined mutex routines.)^
** This option can be used to overload the default mutex allocation
@@ -1669,25 +1586,25 @@ struct sqlite3_mem_methods {
** return [SQLITE_ERROR].</dd>
**
** [[SQLITE_CONFIG_LOOKASIDE]] <dt>SQLITE_CONFIG_LOOKASIDE</dt>
-** <dd> ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine
-** the default size of lookaside memory on each [database connection].
-** The first argument is the
+** <dd> ^(This option takes two arguments that determine the default
+** memory allocation for the lookaside memory allocator on each
+** [database connection]. The first argument is the
** size of each lookaside buffer slot and the second is the number of
-** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE
-** sets the <i>default</i> lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
-** option to [sqlite3_db_config()] can be used to change the lookaside
+** slots allocated to each database connection.)^ ^(This option sets the
+** <i>default</i> lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
+** verb to [sqlite3_db_config()] can be used to change the lookaside
** configuration on individual connections.)^ </dd>
**
** [[SQLITE_CONFIG_PCACHE2]] <dt>SQLITE_CONFIG_PCACHE2</dt>
-** <dd> ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is
-** a pointer to an [sqlite3_pcache_methods2] object. This object specifies
-** the interface to a custom page cache implementation.)^
-** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.</dd>
+** <dd> ^(This option takes a single argument which is a pointer to
+** an [sqlite3_pcache_methods2] object. This object specifies the interface
+** to a custom page cache implementation.)^ ^SQLite makes a copy of the
+** object and uses it for page cache memory allocations.</dd>
**
** [[SQLITE_CONFIG_GETPCACHE2]] <dt>SQLITE_CONFIG_GETPCACHE2</dt>
-** <dd> ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which
-** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of
-** the current page cache implementation into that object.)^ </dd>
+** <dd> ^(This option takes a single argument which is a pointer to an
+** [sqlite3_pcache_methods2] object. SQLite copies of the current
+** page cache implementation into that object.)^ </dd>
**
** [[SQLITE_CONFIG_LOG]] <dt>SQLITE_CONFIG_LOG</dt>
** <dd> The SQLITE_CONFIG_LOG option is used to configure the SQLite
@@ -1710,11 +1627,10 @@ struct sqlite3_mem_methods {
** function must be threadsafe. </dd>
**
** [[SQLITE_CONFIG_URI]] <dt>SQLITE_CONFIG_URI
-** <dd>^(The SQLITE_CONFIG_URI option takes a single argument of type int.
-** If non-zero, then URI handling is globally enabled. If the parameter is zero,
-** then URI handling is globally disabled.)^ ^If URI handling is globally
-** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()],
-** [sqlite3_open16()] or
+** <dd>^(This option takes a single argument of type int. If non-zero, then
+** URI handling is globally enabled. If the parameter is zero, then URI handling
+** is globally disabled.)^ ^If URI handling is globally enabled, all filenames
+** passed to [sqlite3_open()], [sqlite3_open_v2()], [sqlite3_open16()] or
** specified as part of [ATTACH] commands are interpreted as URIs, regardless
** of whether or not the [SQLITE_OPEN_URI] flag is set when the database
** connection is opened. ^If it is globally disabled, filenames are
@@ -1724,10 +1640,9 @@ struct sqlite3_mem_methods {
** [SQLITE_USE_URI] symbol defined.)^
**
** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]] <dt>SQLITE_CONFIG_COVERING_INDEX_SCAN
-** <dd>^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer
-** argument which is interpreted as a boolean in order to enable or disable
-** the use of covering indices for full table scans in the query optimizer.
-** ^The default setting is determined
+** <dd>^This option takes a single integer argument which is interpreted as
+** a boolean in order to enable or disable the use of covering indices for
+** full table scans in the query optimizer. ^The default setting is determined
** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on"
** if that compile-time option is omitted.
** The ability to disable the use of covering indices for full table scans
@@ -1767,37 +1682,18 @@ struct sqlite3_mem_methods {
** ^The default setting can be overridden by each database connection using
** either the [PRAGMA mmap_size] command, or by using the
** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size
-** will be silently truncated if necessary so that it does not exceed the
-** compile-time maximum mmap size set by the
+** cannot be changed at run-time. Nor may the maximum allowed mmap size
+** exceed the compile-time maximum mmap size set by the
** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^
** ^If either argument to this option is negative, then that argument is
** changed to its compile-time default.
**
** [[SQLITE_CONFIG_WIN32_HEAPSIZE]]
** <dt>SQLITE_CONFIG_WIN32_HEAPSIZE
-** <dd>^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is
-** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro
-** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
+** <dd>^This option is only available if SQLite is compiled for Windows
+** with the [SQLITE_WIN32_MALLOC] pre-processor macro defined.
+** SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
** that specifies the maximum size of the created heap.
-**
-** [[SQLITE_CONFIG_PCACHE_HDRSZ]]
-** <dt>SQLITE_CONFIG_PCACHE_HDRSZ
-** <dd>^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which
-** is a pointer to an integer and writes into that integer the number of extra
-** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE].
-** The amount of extra space required can change depending on the compiler,
-** target platform, and SQLite version.
-**
-** [[SQLITE_CONFIG_PMASZ]]
-** <dt>SQLITE_CONFIG_PMASZ
-** <dd>^The SQLITE_CONFIG_PMASZ option takes a single parameter which
-** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded
-** sorter to that integer. The default minimum PMA Size is set by the
-** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched
-** to help with sort operations when multithreaded sorting
-** is enabled (using the [PRAGMA threads] command) and the amount of content
-** to be sorted exceeds the page size times the minimum of the
-** [PRAGMA cache_size] setting and this value.
** </dl>
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@@ -1823,8 +1719,6 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */
-#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */
-#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */
/*
** CAPI3REF: Database Connection Configuration Options
@@ -1891,17 +1785,15 @@ struct sqlite3_mem_methods {
/*
** CAPI3REF: Enable Or Disable Extended Result Codes
-** METHOD: sqlite3
**
** ^The sqlite3_extended_result_codes() routine enables or disables the
** [extended result codes] feature of SQLite. ^The extended result
** codes are disabled by default for historical compatibility.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff);
+SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff);
/*
** CAPI3REF: Last Insert Rowid
-** METHOD: sqlite3
**
** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables)
** has a unique 64-bit signed
@@ -1949,51 +1841,52 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff)
** unpredictable and might not equal either the old or the new
** last insert [rowid].
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
+SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
/*
** CAPI3REF: Count The Number Of Rows Modified
-** METHOD: sqlite3
**
-** ^This function returns the number of rows modified, inserted or
-** deleted by the most recently completed INSERT, UPDATE or DELETE
-** statement on the database connection specified by the only parameter.
-** ^Executing any other type of SQL statement does not modify the value
-** returned by this function.
-**
-** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are
-** considered - auxiliary changes caused by [CREATE TRIGGER | triggers],
-** [foreign key actions] or [REPLACE] constraint resolution are not counted.
-**
-** Changes to a view that are intercepted by
-** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value
-** returned by sqlite3_changes() immediately after an INSERT, UPDATE or
-** DELETE statement run on a view is always zero. Only changes made to real
-** tables are counted.
-**
-** Things are more complicated if the sqlite3_changes() function is
-** executed while a trigger program is running. This may happen if the
-** program uses the [changes() SQL function], or if some other callback
-** function invokes sqlite3_changes() directly. Essentially:
-**
-** <ul>
-** <li> ^(Before entering a trigger program the value returned by
-** sqlite3_changes() function is saved. After the trigger program
-** has finished, the original value is restored.)^
-**
-** <li> ^(Within a trigger program each INSERT, UPDATE and DELETE
-** statement sets the value returned by sqlite3_changes()
-** upon completion as normal. Of course, this value will not include
-** any changes performed by sub-triggers, as the sqlite3_changes()
-** value will be saved and restored after each sub-trigger has run.)^
-** </ul>
-**
-** ^This means that if the changes() SQL function (or similar) is used
-** by the first INSERT, UPDATE or DELETE statement within a trigger, it
-** returns the value as set when the calling statement began executing.
-** ^If it is used by the second or subsequent such statement within a trigger
-** program, the value returned reflects the number of rows modified by the
-** previous INSERT, UPDATE or DELETE statement within the same trigger.
+** ^This function returns the number of database rows that were changed
+** or inserted or deleted by the most recently completed SQL statement
+** on the [database connection] specified by the first parameter.
+** ^(Only changes that are directly specified by the [INSERT], [UPDATE],
+** or [DELETE] statement are counted. Auxiliary changes caused by
+** triggers or [foreign key actions] are not counted.)^ Use the
+** [sqlite3_total_changes()] function to find the total number of changes
+** including changes caused by triggers and foreign key actions.
+**
+** ^Changes to a view that are simulated by an [INSTEAD OF trigger]
+** are not counted. Only real table changes are counted.
+**
+** ^(A "row change" is a change to a single row of a single table
+** caused by an INSERT, DELETE, or UPDATE statement. Rows that
+** are changed as side effects of [REPLACE] constraint resolution,
+** rollback, ABORT processing, [DROP TABLE], or by any other
+** mechanisms do not count as direct row changes.)^
+**
+** A "trigger context" is a scope of execution that begins and
+** ends with the script of a [CREATE TRIGGER | trigger].
+** Most SQL statements are
+** evaluated outside of any trigger. This is the "top level"
+** trigger context. If a trigger fires from the top level, a
+** new trigger context is entered for the duration of that one
+** trigger. Subtriggers create subcontexts for their duration.
+**
+** ^Calling [sqlite3_exec()] or [sqlite3_step()] recursively does
+** not create a new trigger context.
+**
+** ^This function returns the number of direct row changes in the
+** most recent INSERT, UPDATE, or DELETE statement within the same
+** trigger context.
+**
+** ^Thus, when called from the top level, this function returns the
+** number of changes in the most recent INSERT, UPDATE, or DELETE
+** that also occurred at the top level. ^(Within the body of a trigger,
+** the sqlite3_changes() interface can be called to find the number of
+** changes in the most recently completed INSERT, UPDATE, or DELETE
+** statement within the body of the same trigger.
+** However, the number returned does not include changes
+** caused by subtriggers since those have their own context.)^
**
** See also the [sqlite3_total_changes()] interface, the
** [count_changes pragma], and the [changes() SQL function].
@@ -2002,23 +1895,25 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
** while [sqlite3_changes()] is running then the value returned
** is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
+SQLITE_API int sqlite3_changes(sqlite3*);
/*
** CAPI3REF: Total Number Of Rows Modified
-** METHOD: sqlite3
**
-** ^This function returns the total number of rows inserted, modified or
-** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed
-** since the database connection was opened, including those executed as
-** part of trigger programs. ^Executing any other type of SQL statement
-** does not affect the value returned by sqlite3_total_changes().
-**
-** ^Changes made as part of [foreign key actions] are included in the
-** count, but those made as part of REPLACE constraint resolution are
-** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
-** are not counted.
-**
+** ^This function returns the number of row changes caused by [INSERT],
+** [UPDATE] or [DELETE] statements since the [database connection] was opened.
+** ^(The count returned by sqlite3_total_changes() includes all changes
+** from all [CREATE TRIGGER | trigger] contexts and changes made by
+** [foreign key actions]. However,
+** the count does not include changes used to implement [REPLACE] constraints,
+** do rollbacks or ABORT processing, or [DROP TABLE] processing. The
+** count does not include rows of views that fire an [INSTEAD OF trigger],
+** though if the INSTEAD OF trigger makes changes of its own, those changes
+** are counted.)^
+** ^The sqlite3_total_changes() function counts the changes as soon as
+** the statement that makes them is completed (when the statement handle
+** is passed to [sqlite3_reset()] or [sqlite3_finalize()]).
+**
** See also the [sqlite3_changes()] interface, the
** [count_changes pragma], and the [total_changes() SQL function].
**
@@ -2026,11 +1921,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
** while [sqlite3_total_changes()] is running then the value
** returned is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
+SQLITE_API int sqlite3_total_changes(sqlite3*);
/*
** CAPI3REF: Interrupt A Long-Running Query
-** METHOD: sqlite3
**
** ^This function causes any pending database operation to abort and
** return at its earliest opportunity. This routine is typically
@@ -2066,7 +1960,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
** If the database connection closes while [sqlite3_interrupt()]
** is running then bad things will likely happen.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
+SQLITE_API void sqlite3_interrupt(sqlite3*);
/*
** CAPI3REF: Determine If An SQL Statement Is Complete
@@ -2101,41 +1995,33 @@ SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
** The input to [sqlite3_complete16()] must be a zero-terminated
** UTF-16 string in native byte order.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *sql);
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
+SQLITE_API int sqlite3_complete(const char *sql);
+SQLITE_API int sqlite3_complete16(const void *sql);
/*
** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
-** KEYWORDS: {busy-handler callback} {busy handler}
-** METHOD: sqlite3
-**
-** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X
-** that might be invoked with argument P whenever
-** an attempt is made to access a database table associated with
-** [database connection] D when another thread
-** or process has the table locked.
-** The sqlite3_busy_handler() interface is used to implement
-** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout].
-**
-** ^If the busy callback is NULL, then [SQLITE_BUSY]
+**
+** ^This routine sets a callback function that might be invoked whenever
+** an attempt is made to open a database table that another thread
+** or process has locked.
+**
+** ^If the busy callback is NULL, then [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]
** is returned immediately upon encountering the lock. ^If the busy callback
** is not NULL, then the callback might be invoked with two arguments.
**
** ^The first argument to the busy handler is a copy of the void* pointer which
** is the third argument to sqlite3_busy_handler(). ^The second argument to
** the busy handler callback is the number of times that the busy handler has
-** been invoked previously for the same locking event. ^If the
+** been invoked for this locking event. ^If the
** busy callback returns 0, then no additional attempts are made to
-** access the database and [SQLITE_BUSY] is returned
-** to the application.
+** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned.
** ^If the callback returns non-zero, then another attempt
-** is made to access the database and the cycle repeats.
+** is made to open the database for reading and the cycle repeats.
**
** The presence of a busy handler does not guarantee that it will be invoked
** when there is lock contention. ^If SQLite determines that invoking the busy
** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY]
-** to the application instead of invoking the
-** busy handler.
+** or [SQLITE_IOERR_BLOCKED] instead of invoking the busy handler.
** Consider a scenario where one process is holding a read lock that
** it is trying to promote to a reserved lock and
** a second process is holding a reserved lock that it is trying
@@ -2149,48 +2035,57 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
**
** ^The default busy callback is NULL.
**
+** ^The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED]
+** when SQLite is in the middle of a large transaction where all the
+** changes will not fit into the in-memory cache. SQLite will
+** already hold a RESERVED lock on the database file, but it needs
+** to promote this lock to EXCLUSIVE so that it can spill cache
+** pages into the database file without harm to concurrent
+** readers. ^If it is unable to promote the lock, then the in-memory
+** cache will be left in an inconsistent state and so the error
+** code is promoted from the relatively benign [SQLITE_BUSY] to
+** the more severe [SQLITE_IOERR_BLOCKED]. ^This error code promotion
+** forces an automatic rollback of the changes. See the
+** <a href="/cvstrac/wiki?p=CorruptionFollowingBusyError">
+** CorruptionFollowingBusyError</a> wiki page for a discussion of why
+** this is important.
+**
** ^(There can only be a single busy handler defined for each
** [database connection]. Setting a new busy handler clears any
** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()]
-** or evaluating [PRAGMA busy_timeout=N] will change the
-** busy handler and thus clear any previously set busy handler.
+** will also set or clear the busy handler.
**
** The busy callback should not take any actions which modify the
-** database connection that invoked the busy handler. In other words,
-** the busy handler is not reentrant. Any such actions
+** database connection that invoked the busy handler. Any such actions
** result in undefined behavior.
**
** A busy handler must not close the database connection
** or [prepared statement] that invoked the busy handler.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*);
+SQLITE_API int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*);
/*
** CAPI3REF: Set A Busy Timeout
-** METHOD: sqlite3
**
** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps
** for a specified amount of time when a table is locked. ^The handler
** will sleep multiple times until at least "ms" milliseconds of sleeping
** have accumulated. ^After at least "ms" milliseconds of sleeping,
** the handler returns 0 which causes [sqlite3_step()] to return
-** [SQLITE_BUSY].
+** [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED].
**
** ^Calling this routine with an argument less than or equal to zero
** turns off all busy handlers.
**
** ^(There can only be a single busy handler for a particular
-** [database connection] at any given moment. If another busy handler
+** [database connection] any any given moment. If another busy handler
** was defined (using [sqlite3_busy_handler()]) prior to calling
** this routine, that other busy handler is cleared.)^
-**
-** See also: [PRAGMA busy_timeout]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
+SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms);
/*
** CAPI3REF: Convenience Routines For Running Queries
-** METHOD: sqlite3
**
** This is a legacy interface that is preserved for backwards compatibility.
** Use of this interface is not recommended.
@@ -2261,7 +2156,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
** reflected in subsequent calls to [sqlite3_errcode()] or
** [sqlite3_errmsg()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
+SQLITE_API int sqlite3_get_table(
sqlite3 *db, /* An open database */
const char *zSql, /* SQL to be evaluated */
char ***pazResult, /* Results of the query */
@@ -2269,17 +2164,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
int *pnColumn, /* Number of result columns written here */
char **pzErrmsg /* Error msg written here */
);
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
+SQLITE_API void sqlite3_free_table(char **result);
/*
** CAPI3REF: Formatted String Printing Functions
**
** These routines are work-alikes of the "printf()" family of functions
** from the standard C library.
-** These routines understand most of the common K&R formatting options,
-** plus some additional non-standard formats, detailed below.
-** Note that some of the more obscure formatting options from recent
-** C-library standards are omitted from this implementation.
**
** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their
** results into memory obtained from [sqlite3_malloc()].
@@ -2312,7 +2203,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
** These routines all implement some additional formatting
** options that are useful for constructing SQL statements.
** All of the usual printf() formatting options apply. In addition, there
-** is are "%q", "%Q", "%w" and "%z" options.
+** is are "%q", "%Q", and "%z" options.
**
** ^(The %q option works like %s in that it substitutes a nul-terminated
** string from the argument list. But %q also doubles every '\'' character.
@@ -2365,20 +2256,14 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
** The code above will render a correct SQL statement in the zSQL
** variable even if the zText variable is a NULL pointer.
**
-** ^(The "%w" formatting option is like "%q" except that it expects to
-** be contained within double-quotes instead of single quotes, and it
-** escapes the double-quote character instead of the single-quote
-** character.)^ The "%w" formatting option is intended for safely inserting
-** table and column names into a constructed SQL statement.
-**
** ^(The "%z" formatting option works like "%s" but with the
** addition that after the string has been read and copied into
** the result, [sqlite3_free()] is called on the input string.)^
*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char*,...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char*, va_list);
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int,char*,const char*, ...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list);
+SQLITE_API char *sqlite3_mprintf(const char*,...);
+SQLITE_API char *sqlite3_vmprintf(const char*, va_list);
+SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...);
+SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list);
/*
** CAPI3REF: Memory Allocation Subsystem
@@ -2395,10 +2280,6 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns
** a NULL pointer.
**
-** ^The sqlite3_malloc64(N) routine works just like
-** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead
-** of a signed 32-bit integer.
-**
** ^Calling sqlite3_free() with a pointer previously returned
** by sqlite3_malloc() or sqlite3_realloc() releases that memory so
** that it might be reused. ^The sqlite3_free() routine is
@@ -2410,38 +2291,24 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** might result if sqlite3_free() is called with a non-NULL pointer that
** was not obtained from sqlite3_malloc() or sqlite3_realloc().
**
-** ^The sqlite3_realloc(X,N) interface attempts to resize a
-** prior memory allocation X to be at least N bytes.
-** ^If the X parameter to sqlite3_realloc(X,N)
+** ^(The sqlite3_realloc() interface attempts to resize a
+** prior memory allocation to be at least N bytes, where N is the
+** second parameter. The memory allocation to be resized is the first
+** parameter.)^ ^ If the first parameter to sqlite3_realloc()
** is a NULL pointer then its behavior is identical to calling
-** sqlite3_malloc(N).
-** ^If the N parameter to sqlite3_realloc(X,N) is zero or
+** sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc().
+** ^If the second parameter to sqlite3_realloc() is zero or
** negative then the behavior is exactly the same as calling
-** sqlite3_free(X).
-** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation
-** of at least N bytes in size or NULL if insufficient memory is available.
+** sqlite3_free(P) where P is the first parameter to sqlite3_realloc().
+** ^sqlite3_realloc() returns a pointer to a memory allocation
+** of at least N bytes in size or NULL if sufficient memory is unavailable.
** ^If M is the size of the prior allocation, then min(N,M) bytes
** of the prior allocation are copied into the beginning of buffer returned
-** by sqlite3_realloc(X,N) and the prior allocation is freed.
-** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the
-** prior allocation is not freed.
-**
-** ^The sqlite3_realloc64(X,N) interfaces works the same as
-** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead
-** of a 32-bit signed integer.
-**
-** ^If X is a memory allocation previously obtained from sqlite3_malloc(),
-** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then
-** sqlite3_msize(X) returns the size of that memory allocation in bytes.
-** ^The value returned by sqlite3_msize(X) might be larger than the number
-** of bytes requested when X was allocated. ^If X is a NULL pointer then
-** sqlite3_msize(X) returns zero. If X points to something that is not
-** the beginning of memory allocation, or if it points to a formerly
-** valid memory allocation that has now been freed, then the behavior
-** of sqlite3_msize(X) is undefined and possibly harmful.
-**
-** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(),
-** sqlite3_malloc64(), and sqlite3_realloc64()
+** by sqlite3_realloc() and the prior allocation is freed.
+** ^If sqlite3_realloc() returns NULL, then the prior allocation
+** is not freed.
+**
+** ^The memory returned by sqlite3_malloc() and sqlite3_realloc()
** is always aligned to at least an 8 byte boundary, or to a
** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time
** option is used.
@@ -2468,12 +2335,9 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** a block of memory after it has been released using
** [sqlite3_free()] or [sqlite3_realloc()].
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void*, int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void*, sqlite3_uint64);
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void*);
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
+SQLITE_API void *sqlite3_malloc(int);
+SQLITE_API void *sqlite3_realloc(void*, int);
+SQLITE_API void sqlite3_free(void*);
/*
** CAPI3REF: Memory Allocator Statistics
@@ -2498,8 +2362,8 @@ SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
** by [sqlite3_memory_highwater(1)] is the high-water mark
** prior to the reset.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void);
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
/*
** CAPI3REF: Pseudo-Random Number Generator
@@ -2511,22 +2375,18 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
** applications to access the same PRNG for other purposes.
**
** ^A call to this routine stores N bytes of randomness into buffer P.
-** ^The P parameter can be a NULL pointer.
-**
-** ^If this routine has not been previously called or if the previous
-** call had N less than one or a NULL pointer for P, then the PRNG is
-** seeded using randomness obtained from the xRandomness method of
-** the default [sqlite3_vfs] object.
-** ^If the previous call to this routine had an N of 1 or more and a
-** non-NULL P then the pseudo-randomness is generated
+**
+** ^The first time this routine is invoked (either internally or by
+** the application) the PRNG is seeded using randomness obtained
+** from the xRandomness method of the default [sqlite3_vfs] object.
+** ^On all subsequent invocations, the pseudo-randomness is generated
** internally and without recourse to the [sqlite3_vfs] xRandomness
** method.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
+SQLITE_API void sqlite3_randomness(int N, void *P);
/*
** CAPI3REF: Compile-Time Authorization Callbacks
-** METHOD: sqlite3
**
** ^This routine registers an authorizer callback with a particular
** [database connection], supplied in the first argument.
@@ -2605,7 +2465,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
** as stated in the previous paragraph, sqlite3_step() invokes
** sqlite3_prepare_v2() to reprepare a statement after a schema change.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
+SQLITE_API int sqlite3_set_authorizer(
sqlite3*,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pUserData
@@ -2620,8 +2480,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
** [sqlite3_set_authorizer | authorizer documentation] for additional
** information.
**
-** Note that SQLITE_IGNORE is also used as a [conflict resolution mode]
-** returned from the [sqlite3_vtab_on_conflict()] interface.
+** Note that SQLITE_IGNORE is also used as a [SQLITE_ROLLBACK | return code]
+** from the [sqlite3_vtab_on_conflict()] interface.
*/
#define SQLITE_DENY 1 /* Abort the SQL statement with an error */
#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */
@@ -2679,11 +2539,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
#define SQLITE_FUNCTION 31 /* NULL Function Name */
#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */
#define SQLITE_COPY 0 /* No longer used */
-#define SQLITE_RECURSIVE 33 /* NULL NULL */
/*
** CAPI3REF: Tracing And Profiling Functions
-** METHOD: sqlite3
**
** These routines register callback functions that can be used for
** tracing and profiling the execution of SQL statements.
@@ -2710,13 +2568,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
** sqlite3_profile() function is considered experimental and is
** subject to change in future versions of SQLite.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
-SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
+SQLITE_API void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
+SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*,
void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
/*
** CAPI3REF: Query Progress Callbacks
-** METHOD: sqlite3
**
** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
** function X to be invoked periodically during long running calls to
@@ -2746,11 +2603,10 @@ SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
** database connections for the meaning of "modify" in this paragraph.
**
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
+SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
/*
** CAPI3REF: Opening A New Database Connection
-** CONSTRUCTOR: sqlite3
**
** ^These routines open an SQLite database file as specified by the
** filename argument. ^The filename argument is interpreted as UTF-8 for
@@ -2765,9 +2621,9 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** an English language description of the error following a failure of any
** of the sqlite3_open() routines.
**
-** ^The default encoding will be UTF-8 for databases created using
-** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases
-** created using sqlite3_open16() will be UTF-16 in the native byte order.
+** ^The default encoding for the database will be UTF-8 if
+** sqlite3_open() or sqlite3_open_v2() is called and
+** UTF-16 in the native byte order if sqlite3_open16() is used.
**
** Whether or not an error occurs when it is opened, resources
** associated with the [database connection] handle should be released by
@@ -2855,14 +2711,13 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** then it is interpreted as an absolute path. ^If the path does not begin
** with a '/' (meaning that the authority section is omitted from the URI)
** then the path is interpreted as a relative path.
-** ^(On windows, the first component of an absolute path
-** is a drive specification (e.g. "C:").)^
+** ^On windows, the first component of an absolute path
+** is a drive specification (e.g. "C:").
**
** [[core URI query parameters]]
** The query component of a URI may contain parameters that are interpreted
** either by SQLite itself, or by a [VFS | custom VFS implementation].
-** SQLite and its built-in [VFSes] interpret the
-** following query parameters:
+** SQLite interprets the following three query parameters:
**
** <ul>
** <li> <b>vfs</b>: ^The "vfs" parameter may be used to specify the name of
@@ -2896,28 +2751,6 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** ^If sqlite3_open_v2() is used and the "cache" parameter is present in
** a URI filename, its value overrides any behavior requested by setting
** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag.
-**
-** <li> <b>psow</b>: ^The psow parameter indicates whether or not the
-** [powersafe overwrite] property does or does not apply to the
-** storage media on which the database file resides.
-**
-** <li> <b>nolock</b>: ^The nolock parameter is a boolean query parameter
-** which if set disables file locking in rollback journal modes. This
-** is useful for accessing a database on a filesystem that does not
-** support locking. Caution: Database corruption might result if two
-** or more processes write to the same database and any one of those
-** processes uses nolock=1.
-**
-** <li> <b>immutable</b>: ^The immutable parameter is a boolean query
-** parameter that indicates that the database file is stored on
-** read-only media. ^When immutable is set, SQLite assumes that the
-** database file cannot be changed, even by a process with higher
-** privilege, and so the database is opened read-only and all locking
-** and change detection is disabled. Caution: Setting the immutable
-** property on a database file that does in fact change can result
-** in incorrect query results and/or [SQLITE_CORRUPT] errors.
-** See also: [SQLITE_IOCAP_IMMUTABLE].
-**
** </ul>
**
** ^Specifying an unknown parameter in the query component of a URI is not an
@@ -2947,9 +2780,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
** Open file "data.db" in the current directory for read-only access.
** Regardless of whether or not shared-cache mode is enabled by
** default, use a private cache.
-** <tr><td> file:/home/fred/data.db?vfs=unix-dotfile <td>
-** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile"
-** that uses dot-files in place of posix advisory locking.
+** <tr><td> file:/home/fred/data.db?vfs=unix-nolock <td>
+** Open file "/home/fred/data.db". Use the special VFS "unix-nolock".
** <tr><td> file:data.db?mode=readonly <td>
** An error. "readonly" is not a valid option for the "mode" parameter.
** </table>
@@ -2975,15 +2807,15 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
**
** See also: [sqlite3_temp_directory]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
+SQLITE_API int sqlite3_open(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
+SQLITE_API int sqlite3_open16(
const void *filename, /* Database filename (UTF-16) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
+SQLITE_API int sqlite3_open_v2(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb, /* OUT: SQLite db handle */
int flags, /* Flags */
@@ -3029,22 +2861,19 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
** VFS method, then the behavior of this routine is undefined and probably
** undesirable.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam);
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
/*
** CAPI3REF: Error Codes And Messages
-** METHOD: sqlite3
-**
-** ^If the most recent sqlite3_* API call associated with
-** [database connection] D failed, then the sqlite3_errcode(D) interface
-** returns the numeric [result code] or [extended result code] for that
-** API call.
-** If the most recent API call was successful,
-** then the return value from sqlite3_errcode() is undefined.
-** ^The sqlite3_extended_errcode()
+**
+** ^The sqlite3_errcode() interface returns the numeric [result code] or
+** [extended result code] for the most recent failed sqlite3_* API call
+** associated with a [database connection]. If a prior API call failed
+** but the most recent API call succeeded, the return value from
+** sqlite3_errcode() is undefined. ^The sqlite3_extended_errcode()
** interface is the same except that it always returns the
** [extended result code] even when extended result codes are
** disabled.
@@ -3075,41 +2904,40 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const cha
** was invoked incorrectly by the application. In that case, the
** error code and message may or may not be set.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db);
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3*);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int);
+SQLITE_API int sqlite3_errcode(sqlite3 *db);
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db);
+SQLITE_API const char *sqlite3_errmsg(sqlite3*);
+SQLITE_API const void *sqlite3_errmsg16(sqlite3*);
+SQLITE_API const char *sqlite3_errstr(int);
/*
-** CAPI3REF: Prepared Statement Object
+** CAPI3REF: SQL Statement Object
** KEYWORDS: {prepared statement} {prepared statements}
**
-** An instance of this object represents a single SQL statement that
-** has been compiled into binary form and is ready to be evaluated.
-**
-** Think of each SQL statement as a separate computer program. The
-** original SQL text is source code. A prepared statement object
-** is the compiled object code. All SQL must be converted into a
-** prepared statement before it can be run.
+** An instance of this object represents a single SQL statement.
+** This object is variously known as a "prepared statement" or a
+** "compiled SQL statement" or simply as a "statement".
**
-** The life-cycle of a prepared statement object usually goes like this:
+** The life of a statement object goes something like this:
**
** <ol>
-** <li> Create the prepared statement object using [sqlite3_prepare_v2()].
-** <li> Bind values to [parameters] using the sqlite3_bind_*()
+** <li> Create the object using [sqlite3_prepare_v2()] or a related
+** function.
+** <li> Bind values to [host parameters] using the sqlite3_bind_*()
** interfaces.
** <li> Run the SQL by calling [sqlite3_step()] one or more times.
-** <li> Reset the prepared statement using [sqlite3_reset()] then go back
+** <li> Reset the statement using [sqlite3_reset()] then go back
** to step 2. Do this zero or more times.
** <li> Destroy the object using [sqlite3_finalize()].
** </ol>
+**
+** Refer to documentation on individual methods above for additional
+** information.
*/
typedef struct sqlite3_stmt sqlite3_stmt;
/*
** CAPI3REF: Run-time Limits
-** METHOD: sqlite3
**
** ^(This interface allows the size of various constructs to be limited
** on a connection by connection basis. The first parameter is the
@@ -3147,7 +2975,7 @@ typedef struct sqlite3_stmt sqlite3_stmt;
**
** New run-time limit categories may be added in future releases.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
+SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
/*
** CAPI3REF: Run-Time Limit Categories
@@ -3199,10 +3027,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
**
** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(<dt>SQLITE_LIMIT_TRIGGER_DEPTH</dt>
** <dd>The maximum depth of recursion for triggers.</dd>)^
-**
-** [[SQLITE_LIMIT_WORKER_THREADS]] ^(<dt>SQLITE_LIMIT_WORKER_THREADS</dt>
-** <dd>The maximum number of auxiliary worker threads that a single
-** [prepared statement] may start.</dd>)^
** </dl>
*/
#define SQLITE_LIMIT_LENGTH 0
@@ -3216,13 +3040,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8
#define SQLITE_LIMIT_VARIABLE_NUMBER 9
#define SQLITE_LIMIT_TRIGGER_DEPTH 10
-#define SQLITE_LIMIT_WORKER_THREADS 11
/*
** CAPI3REF: Compiling An SQL Statement
** KEYWORDS: {SQL statement compiler}
-** METHOD: sqlite3
-** CONSTRUCTOR: sqlite3_stmt
**
** To execute an SQL query, it must first be compiled into a byte-code
** program using one of these routines.
@@ -3236,14 +3057,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2()
** use UTF-16.
**
-** ^If the nByte argument is negative, then zSql is read up to the
-** first zero terminator. ^If nByte is positive, then it is the
-** number of bytes read from zSql. ^If nByte is zero, then no prepared
-** statement is generated.
-** If the caller knows that the supplied string is nul-terminated, then
-** there is a small performance advantage to passing an nByte parameter that
-** is the number of bytes in the input string <i>including</i>
-** the nul-terminator.
+** ^If the nByte argument is less than zero, then zSql is read up to the
+** first zero terminator. ^If nByte is non-negative, then it is the maximum
+** number of bytes read from zSql. ^When nByte is non-negative, the
+** zSql string ends at either the first '\000' or '\u0000' character or
+** the nByte-th byte, whichever comes first. If the caller knows
+** that the supplied string is nul-terminated, then there is a small
+** performance advantage to be gained by passing an nByte parameter that
+** is equal to the number of bytes in the input string <i>including</i>
+** the nul-terminator bytes as this saves SQLite from having to
+** make a copy of the input string.
**
** ^If pzTail is not NULL then *pzTail is made to point to the first byte
** past the end of the first SQL statement in zSql. These routines only
@@ -3299,28 +3122,28 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
** </li>
** </ol>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
+SQLITE_API int sqlite3_prepare(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
+SQLITE_API int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
+SQLITE_API int sqlite3_prepare16(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const void **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
+SQLITE_API int sqlite3_prepare16_v2(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
@@ -3330,17 +3153,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
/*
** CAPI3REF: Retrieving Statement SQL
-** METHOD: sqlite3_stmt
**
** ^This interface can be used to retrieve a saved copy of the original
** SQL text used to create a [prepared statement] if that statement was
** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If An SQL Statement Writes The Database
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if
** and only if the [prepared statement] X makes no direct changes to
@@ -3368,16 +3189,14 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
** change the configuration of a database connection, they do not make
** changes to the content of the database files on disk.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
** [prepared statement] S has been stepped at least once using
-** [sqlite3_step(S)] but has neither run to completion (returned
-** [SQLITE_DONE] from [sqlite3_step(S)]) nor
+** [sqlite3_step(S)] but has not run to completion and/or has not
** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S)
** interface returns false if S is a NULL pointer. If S is not a
** NULL pointer and is not a pointer to a valid [prepared statement]
@@ -3389,7 +3208,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
** for example, in diagnostic routines to search for prepared
** statements that are holding a transaction open.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*);
/*
** CAPI3REF: Dynamically Typed Value Object
@@ -3404,9 +3223,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
** Some interfaces require a protected sqlite3_value. Other interfaces
** will accept either a protected or an unprotected sqlite3_value.
** Every interface that accepts sqlite3_value arguments specifies
-** whether or not it requires a protected sqlite3_value. The
-** [sqlite3_value_dup()] interface can be used to construct a new
-** protected sqlite3_value from an unprotected sqlite3_value.
+** whether or not it requires a protected sqlite3_value.
**
** The terms "protected" and "unprotected" refer to whether or not
** a mutex is held. An internal mutex is held for a protected
@@ -3450,7 +3267,6 @@ typedef struct sqlite3_context sqlite3_context;
** CAPI3REF: Binding Values To Prepared Statements
** KEYWORDS: {host parameter} {host parameters} {host parameter name}
** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding}
-** METHOD: sqlite3_stmt
**
** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants,
** literals may be replaced by a [parameter] that matches one of following
@@ -3497,18 +3313,18 @@ typedef struct sqlite3_context sqlite3_context;
** If the fourth parameter to sqlite3_bind_blob() is negative, then
** the behavior is undefined.
** If a non-negative fourth parameter is provided to sqlite3_bind_text()
-** or sqlite3_bind_text16() or sqlite3_bind_text64() then
-** that parameter must be the byte offset
+** or sqlite3_bind_text16() then that parameter must be the byte offset
** where the NUL terminator would occur assuming the string were NUL
** terminated. If any NUL characters occur at byte offsets less than
** the value of the fourth parameter then the resulting string value will
** contain embedded NULs. The result of expressions involving strings
** with embedded NULs is undefined.
**
-** ^The fifth argument to the BLOB and string binding interfaces
-** is a destructor used to dispose of the BLOB or
+** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and
+** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or
** string after SQLite has finished with it. ^The destructor is called
-** to dispose of the BLOB or string even if the call to bind API fails.
+** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(),
+** sqlite3_bind_text(), or sqlite3_bind_text16() fails.
** ^If the fifth argument is
** the special value [SQLITE_STATIC], then SQLite assumes that the
** information is in static, unmanaged space and does not need to be freed.
@@ -3516,14 +3332,6 @@ typedef struct sqlite3_context sqlite3_context;
** SQLite makes its own private copy of the data immediately, before
** the sqlite3_bind_*() routine returns.
**
-** ^The sixth argument to sqlite3_bind_text64() must be one of
-** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]
-** to specify the encoding of the text in the third parameter. If
-** the sixth argument to sqlite3_bind_text64() is not one of the
-** allowed values shown above, or if the text encoding is different
-** from the encoding specified by the sixth parameter, then the behavior
-** is undefined.
-**
** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that
** is filled with zeroes. ^A zeroblob uses a fixed amount of memory
** (just an integer to hold its size) while it is being processed.
@@ -3544,33 +3352,24 @@ typedef struct sqlite3_context sqlite3_context;
**
** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an
** [error code] if anything goes wrong.
-** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB
-** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or
-** [SQLITE_MAX_LENGTH].
** ^[SQLITE_RANGE] is returned if the parameter
** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails.
**
** See also: [sqlite3_bind_parameter_count()],
** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
- void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt*, int, double);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt*, int, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt*, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
- void(*)(void*), unsigned char encoding);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
+SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double);
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int);
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int);
+SQLITE_API int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
/*
** CAPI3REF: Number Of SQL Parameters
-** METHOD: sqlite3_stmt
**
** ^This routine can be used to find the number of [SQL parameters]
** in a [prepared statement]. SQL parameters are tokens of the
@@ -3587,11 +3386,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite
** [sqlite3_bind_parameter_name()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*);
/*
** CAPI3REF: Name Of A Host Parameter
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_bind_parameter_name(P,N) interface returns
** the name of the N-th [SQL parameter] in the [prepared statement] P.
@@ -3615,11 +3413,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*, int);
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
/*
** CAPI3REF: Index Of A Parameter With A Given Name
-** METHOD: sqlite3_stmt
**
** ^Return the index of an SQL parameter given its name. ^The
** index value returned is suitable for use as the second
@@ -3630,23 +3427,21 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*,
**
** See also: [sqlite3_bind_blob|sqlite3_bind()],
** [sqlite3_bind_parameter_count()], and
-** [sqlite3_bind_parameter_name()].
+** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
/*
** CAPI3REF: Reset All Bindings On A Prepared Statement
-** METHOD: sqlite3_stmt
**
** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset
** the [sqlite3_bind_blob | bindings] on a [prepared statement].
** ^Use this routine to reset all host parameters to NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
/*
** CAPI3REF: Number Of Columns In A Result Set
-** METHOD: sqlite3_stmt
**
** ^Return the number of columns in the result set returned by the
** [prepared statement]. ^This routine returns 0 if pStmt is an SQL
@@ -3654,11 +3449,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
**
** See also: [sqlite3_data_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Column Names In A Result Set
-** METHOD: sqlite3_stmt
**
** ^These routines return the name assigned to a particular column
** in the result set of a [SELECT] statement. ^The sqlite3_column_name()
@@ -3683,12 +3477,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
** then the name of the column is unspecified and may change from
** one release of SQLite to the next.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt*, int N);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N);
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N);
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N);
/*
** CAPI3REF: Source Of Data In A Query Result
-** METHOD: sqlite3_stmt
**
** ^These routines provide a means to determine the database, table, and
** table column that is the origin of a particular result column in
@@ -3732,16 +3525,15 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N
** for the same [prepared statement] and result column
** at the same time then the results are undefined.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
/*
** CAPI3REF: Declared Datatype Of A Query Result
-** METHOD: sqlite3_stmt
**
** ^(The first parameter is a [prepared statement].
** If this statement is a [SELECT] statement and the Nth column of the
@@ -3769,12 +3561,11 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*
** is associated with individual values, not with the containers
** used to hold those values.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
/*
** CAPI3REF: Evaluate An SQL Statement
-** METHOD: sqlite3_stmt
**
** After a [prepared statement] has been prepared using either
** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy
@@ -3850,11 +3641,10 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,in
** then the more specific [error codes] are returned directly
** by sqlite3_step(). The use of the "v2" interface is recommended.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
+SQLITE_API int sqlite3_step(sqlite3_stmt*);
/*
** CAPI3REF: Number of columns in a result set
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_data_count(P) interface returns the number of columns in the
** current row of the result set of [prepared statement] P.
@@ -3871,7 +3661,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
**
** See also: [sqlite3_column_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Fundamental Datatypes
@@ -3908,7 +3698,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Result Values From A Query
** KEYWORDS: {column access functions}
-** METHOD: sqlite3_stmt
+**
+** These routines form the "result set" interface.
**
** ^These routines return information about a single column of the current
** result row of a query. ^In every case the first argument is a pointer
@@ -3969,14 +3760,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** even empty strings, are always zero-terminated. ^The return
** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer.
**
-** <b>Warning:</b> ^The object returned by [sqlite3_column_value()] is an
-** [unprotected sqlite3_value] object. In a multithreaded environment,
-** an unprotected sqlite3_value object may only be used safely with
-** [sqlite3_bind_value()] and [sqlite3_result_value()].
+** ^The object returned by [sqlite3_column_value()] is an
+** [unprotected sqlite3_value] object. An unprotected sqlite3_value object
+** may only be used with [sqlite3_bind_value()] and [sqlite3_result_value()].
** If the [unprotected sqlite3_value] object returned by
** [sqlite3_column_value()] is used in any other way, including calls
** to routines like [sqlite3_value_int()], [sqlite3_value_text()],
-** or [sqlite3_value_bytes()], the behavior is not threadsafe.
+** or [sqlite3_value_bytes()], then the behavior is undefined.
**
** These routines attempt to convert the value where appropriate. ^For
** example, if the internal representation is FLOAT and a text result
@@ -4007,6 +3797,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** </table>
** </blockquote>)^
**
+** The table above makes reference to standard C library functions atoi()
+** and atof(). SQLite does not really use these functions. It has its
+** own equivalent internal routines. The atoi() and atof() names are
+** used in the table for brevity and because they are familiar to most
+** C programmers.
+**
** Note that when type conversions occur, pointers returned by prior
** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or
** sqlite3_column_text16() may be invalidated.
@@ -4031,7 +3827,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** of conversion are done in place when it is possible, but sometimes they
** are not possible and in those cases prior pointers are invalidated.
**
-** The safest policy is to invoke these routines
+** The safest and easiest to remember policy is to invoke these routines
** in one of the following ways:
**
** <ul>
@@ -4051,7 +3847,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** ^The pointers returned are valid until a type conversion occurs as
** described above, or until [sqlite3_step()] or [sqlite3_reset()] or
** [sqlite3_finalize()] is called. ^The memory space used to hold strings
-** and BLOBs is freed automatically. Do <em>not</em> pass the pointers returned
+** and BLOBs is freed automatically. Do <b>not</b> pass the pointers returned
** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
** [sqlite3_free()].
**
@@ -4061,20 +3857,19 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** pointer. Subsequent calls to [sqlite3_errcode()] will return
** [SQLITE_NOMEM].)^
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt*, int iCol);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt*, int iCol);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
+SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
/*
** CAPI3REF: Destroy A Prepared Statement Object
-** DESTRUCTOR: sqlite3_stmt
**
** ^The sqlite3_finalize() function is called to delete a [prepared statement].
** ^If the most recent evaluation of the statement encountered no errors
@@ -4098,11 +3893,10 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int
** statement after it has been finalized can result in undefined and
** undesirable behavior such as segfaults and heap corruption.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Reset A Prepared Statement Object
-** METHOD: sqlite3_stmt
**
** The sqlite3_reset() function is called to reset a [prepared statement]
** object back to its initial state, ready to be re-executed.
@@ -4125,14 +3919,13 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
** KEYWORDS: {application-defined SQL function}
** KEYWORDS: {application-defined SQL functions}
-** METHOD: sqlite3
**
** ^These functions (collectively known as "function creation routines")
** are used to add SQL functions or aggregates or to redefine the behavior
@@ -4164,24 +3957,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
**
** ^The fourth parameter, eTextRep, specifies what
** [SQLITE_UTF8 | text encoding] this SQL function prefers for
-** its parameters. The application should set this parameter to
-** [SQLITE_UTF16LE] if the function implementation invokes
-** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the
-** implementation invokes [sqlite3_value_text16be()] on an input, or
-** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8]
-** otherwise. ^The same SQL function may be registered multiple times using
-** different preferred text encodings, with different implementations for
-** each encoding.
+** its parameters. Every SQL function implementation must be able to work
+** with UTF-8, UTF-16le, or UTF-16be. But some implementations may be
+** more efficient with one encoding than another. ^An application may
+** invoke sqlite3_create_function() or sqlite3_create_function16() multiple
+** times with the same function but with different values of eTextRep.
** ^When multiple implementations of the same function are available, SQLite
** will pick the one that involves the least amount of data conversion.
-**
-** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC]
-** to signal that the function will always return the same result given
-** the same inputs within a single SQL statement. Most SQL functions are
-** deterministic. The built-in [random()] SQL function is an example of a
-** function that is not deterministic. The SQLite query planner is able to
-** perform additional optimizations on deterministic functions, so use
-** of the [SQLITE_DETERMINISTIC] flag is recommended where possible.
+** If there is only a single implementation which does not care what text
+** encoding is used, then the fourth argument should be [SQLITE_ANY].
**
** ^(The fifth parameter is an arbitrary pointer. The implementation of the
** function can gain access to this pointer using [sqlite3_user_data()].)^
@@ -4225,7 +4009,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
** close the database connection nor finalize or reset the prepared
** statement in which the function is running.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4235,7 +4019,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
+SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
const void *zFunctionName,
int nArg,
@@ -4245,7 +4029,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
+SQLITE_API int sqlite3_create_function_v2(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4263,50 +4047,39 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
** These constant define integer codes that represent the various
** text encodings supported by SQLite.
*/
-#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */
-#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */
-#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */
+#define SQLITE_UTF8 1
+#define SQLITE_UTF16LE 2
+#define SQLITE_UTF16BE 3
#define SQLITE_UTF16 4 /* Use native byte order */
-#define SQLITE_ANY 5 /* Deprecated */
+#define SQLITE_ANY 5 /* sqlite3_create_function only */
#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */
/*
-** CAPI3REF: Function Flags
-**
-** These constants may be ORed together with the
-** [SQLITE_UTF8 | preferred text encoding] as the fourth argument
-** to [sqlite3_create_function()], [sqlite3_create_function16()], or
-** [sqlite3_create_function_v2()].
-*/
-#define SQLITE_DETERMINISTIC 0x800
-
-/*
** CAPI3REF: Deprecated Functions
** DEPRECATED
**
** These functions are [deprecated]. In order to maintain
** backwards compatibility with older code, these functions continue
** to be supported. However, new applications should avoid
-** the use of these functions. To encourage programmers to avoid
-** these functions, we will not explain what they do.
+** the use of these functions. To help encourage people to avoid
+** using these functions, we are not going to tell you what they do.
*/
#ifndef SQLITE_OMIT_DEPRECATED
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_global_recover(void);
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_thread_cleanup(void);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
+SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
void*,sqlite3_int64);
#endif
/*
-** CAPI3REF: Obtaining SQL Values
-** METHOD: sqlite3_value
+** CAPI3REF: Obtaining SQL Function Parameter Values
**
** The C-language implementation of SQL functions and aggregates uses
** this set of interface routines to access the parameter values on
-** the function or aggregate.
+** the function or aggregate.
**
** The xFunc (for scalar functions) or xStep (for aggregates) parameters
** to [sqlite3_create_function()] and [sqlite3_create_function16()]
@@ -4321,7 +4094,7 @@ SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(voi
** object results in undefined behavior.
**
** ^These routines work just like the corresponding [column access functions]
-** except that these routines take a single [protected sqlite3_value] object
+** except that these routines take a single [protected sqlite3_value] object
** pointer instead of a [sqlite3_stmt*] pointer and an integer column number.
**
** ^The sqlite3_value_text16() interface extracts a UTF-16 string
@@ -4346,55 +4119,21 @@ SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(voi
** These routines must be called from the same thread as
** the SQL function that supplied the [sqlite3_value*] parameters.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value*);
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value*);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value*);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
-
-/*
-** CAPI3REF: Finding The Subtype Of SQL Values
-** METHOD: sqlite3_value
-**
-** The sqlite3_value_subtype(V) function returns the subtype for
-** an [application-defined SQL function] argument V. The subtype
-** information can be used to pass a limited amount of context from
-** one SQL function to another. Use the [sqlite3_result_subtype()]
-** routine to set the subtype for the return value of an SQL function.
-**
-** SQLite makes no use of subtype itself. It merely passes the subtype
-** from the result of one [application-defined SQL function] into the
-** input of another.
-*/
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
-
-/*
-** CAPI3REF: Copy And Free SQL Values
-** METHOD: sqlite3_value
-**
-** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
-** object D and returns a pointer to that copy. ^The [sqlite3_value] returned
-** is a [protected sqlite3_value] object even if the input is not.
-** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
-** memory allocation fails.
-**
-** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
-** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
-** then sqlite3_value_free(V) is a harmless no-op.
-*/
-SQLITE_API SQLITE_EXPERIMENTAL sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
-SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
+SQLITE_API double sqlite3_value_double(sqlite3_value*);
+SQLITE_API int sqlite3_value_int(sqlite3_value*);
+SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*);
+SQLITE_API int sqlite3_value_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
/*
** CAPI3REF: Obtain Aggregate Function Context
-** METHOD: sqlite3_context
**
** Implementations of aggregate SQL functions use this
** routine to allocate memory for storing their state.
@@ -4435,11 +4174,10 @@ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_value_free(sqlite3_va
** This routine must be called from the same thread in which
** the aggregate SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int nBytes);
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
/*
** CAPI3REF: User Data For Functions
-** METHOD: sqlite3_context
**
** ^The sqlite3_user_data() interface returns a copy of
** the pointer that was the pUserData parameter (the 5th parameter)
@@ -4450,11 +4188,10 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int
** This routine must be called from the same thread in which
** the application-defined function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
+SQLITE_API void *sqlite3_user_data(sqlite3_context*);
/*
** CAPI3REF: Database Connection For Functions
-** METHOD: sqlite3_context
**
** ^The sqlite3_context_db_handle() interface returns a copy of
** the pointer to the [database connection] (the 1st parameter)
@@ -4462,11 +4199,10 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
** and [sqlite3_create_function16()] routines that originally
** registered the application defined function.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
/*
** CAPI3REF: Function Auxiliary Data
-** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
** associate metadata with argument values. If the same value is passed to
@@ -4515,8 +4251,8 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
** These routines must be called from the same thread in which
** the SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context*, int N);
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
+SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
/*
@@ -4539,7 +4275,6 @@ typedef void (*sqlite3_destructor_type)(void*);
/*
** CAPI3REF: Setting The Result Of An SQL Function
-** METHOD: sqlite3_context
**
** These routines are used by the xFunc or xFinal callbacks that
** implement SQL functions and aggregates. See
@@ -4555,9 +4290,9 @@ typedef void (*sqlite3_destructor_type)(void*);
** to by the second parameter and which is N bytes long where N is the
** third parameter.
**
-** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N)
-** interfaces set the result of the application-defined function to be
-** a BLOB containing all zero bytes and N bytes in size.
+** ^The sqlite3_result_zeroblob() interfaces set the result of
+** the application-defined function to be a BLOB containing all zero
+** bytes and N bytes in size, where N is the value of the 2nd parameter.
**
** ^The sqlite3_result_double() interface sets the result from
** an application-defined function to be a floating point value specified
@@ -4606,10 +4341,6 @@ typedef void (*sqlite3_destructor_type)(void*);
** set the return value of the application-defined function to be
** a text string which is represented as UTF-8, UTF-16 native byte order,
** UTF-16 little endian, or UTF-16 big endian, respectively.
-** ^The sqlite3_result_text64() interface sets the return value of an
-** application-defined function to be a text string in an encoding
-** specified by the fifth (and last) parameter, which must be one
-** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE].
** ^SQLite takes the text result from the application from
** the 2nd parameter of the sqlite3_result_text* interfaces.
** ^If the 3rd parameter to the sqlite3_result_text* interfaces
@@ -4639,7 +4370,7 @@ typedef void (*sqlite3_destructor_type)(void*);
** from [sqlite3_malloc()] before it returns.
**
** ^The sqlite3_result_value() interface sets the result of
-** the application-defined function to be a copy of the
+** the application-defined function to be a copy the
** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The
** sqlite3_result_value() interface makes a copy of the [sqlite3_value]
** so that the [sqlite3_value] specified in the parameter may change or
@@ -4652,46 +4383,25 @@ typedef void (*sqlite3_destructor_type)(void*);
** than the one containing the application-defined function that received
** the [sqlite3_context] pointer, the results are undefined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(sqlite3_context*,const void*,
- sqlite3_uint64,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context*, double);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context*, const char*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context*, const void*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
- void(*)(void*), unsigned char encoding);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
-
-
-/*
-** CAPI3REF: Setting The Subtype Of An SQL Function
-** METHOD: sqlite3_context
-**
-** The sqlite3_result_subtype(C,T) function causes the subtype of
-** the result from the [application-defined SQL function] with
-** [sqlite3_context] C to be the value T. Only the lower 8 bits
-** of the subtype T are preserved in current versions of SQLite;
-** higher order bits are discarded.
-** The number of subtype bytes preserved by SQLite might increase
-** in future releases of SQLite.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned int);
+SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
+SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int);
+SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int);
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
+SQLITE_API void sqlite3_result_null(sqlite3_context*);
+SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n);
/*
** CAPI3REF: Define New Collating Sequences
-** METHOD: sqlite3
**
** ^These functions add, remove, or modify a [collation] associated
** with the [database connection] specified as the first argument.
@@ -4769,14 +4479,14 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned
**
** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
+SQLITE_API int sqlite3_create_collation(
sqlite3*,
const char *zName,
int eTextRep,
void *pArg,
int(*xCompare)(void*,int,const void*,int,const void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
+SQLITE_API int sqlite3_create_collation_v2(
sqlite3*,
const char *zName,
int eTextRep,
@@ -4784,7 +4494,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
int(*xCompare)(void*,int,const void*,int,const void*),
void(*xDestroy)(void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
+SQLITE_API int sqlite3_create_collation16(
sqlite3*,
const void *zName,
int eTextRep,
@@ -4794,7 +4504,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
/*
** CAPI3REF: Collation Needed Callbacks
-** METHOD: sqlite3
**
** ^To avoid having to register all collation sequences before a database
** can be used, a single callback function may be registered with the
@@ -4819,12 +4528,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
** [sqlite3_create_collation_v2()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
+SQLITE_API int sqlite3_collation_needed(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const char*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
+SQLITE_API int sqlite3_collation_needed16(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const void*)
@@ -4838,11 +4547,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_key(
+SQLITE_API int sqlite3_key(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
+SQLITE_API int sqlite3_key_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The key */
@@ -4856,11 +4565,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey(
+SQLITE_API int sqlite3_rekey(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The new key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
+SQLITE_API int sqlite3_rekey_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The new key */
@@ -4870,7 +4579,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
** Specify the activation key for a SEE database. Unless
** activated, none of the SEE routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
+SQLITE_API void sqlite3_activate_see(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -4880,7 +4589,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
** Specify the activation key for a CEROD database. Unless
** activated, none of the CEROD routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
+SQLITE_API void sqlite3_activate_cerod(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -4902,7 +4611,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
** all, then the behavior of sqlite3_sleep() may deviate from the description
** in the previous paragraphs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
+SQLITE_API int sqlite3_sleep(int);
/*
** CAPI3REF: Name Of The Folder Holding Temporary Files
@@ -4914,13 +4623,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
** is a NULL pointer, then SQLite performs a search for an appropriate
** temporary file directory.
**
-** Applications are strongly discouraged from using this global variable.
-** It is required to set a temporary folder on Windows Runtime (WinRT).
-** But for all other platforms, it is highly recommended that applications
-** neither read nor write this variable. This global variable is a relic
-** that exists for backwards compatibility of legacy applications and should
-** be avoided in new projects.
-**
** It is not safe to read or modify this variable in more than one
** thread at a time. It is not safe to read or modify this variable
** if a [database connection] is being used at the same time in a separate
@@ -4939,11 +4641,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
** Hence, if this variable is modified directly, either it should be
** made NULL or made to point to memory obtained from [sqlite3_malloc]
** or else the use of the [temp_store_directory pragma] should be avoided.
-** Except when requested by the [temp_store_directory pragma], SQLite
-** does not free the memory that sqlite3_temp_directory points to. If
-** the application wants that memory to be freed, it must do
-** so itself, taking care to only do so after all [database connection]
-** objects have been destroyed.
**
** <b>Note to Windows Runtime users:</b> The temporary directory must be set
** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various
@@ -5002,7 +4699,6 @@ SQLITE_API SQLITE_EXTERN char *sqlite3_data_directory;
/*
** CAPI3REF: Test For Auto-Commit Mode
** KEYWORDS: {autocommit mode}
-** METHOD: sqlite3
**
** ^The sqlite3_get_autocommit() interface returns non-zero or
** zero if the given database connection is or is not in autocommit mode,
@@ -5021,11 +4717,10 @@ SQLITE_API SQLITE_EXTERN char *sqlite3_data_directory;
** connection while this routine is running, then the return value
** is undefined.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
+SQLITE_API int sqlite3_get_autocommit(sqlite3*);
/*
** CAPI3REF: Find The Database Handle Of A Prepared Statement
-** METHOD: sqlite3_stmt
**
** ^The sqlite3_db_handle interface returns the [database connection] handle
** to which a [prepared statement] belongs. ^The [database connection]
@@ -5034,11 +4729,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
** create the statement in the first place.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
/*
** CAPI3REF: Return The Filename For A Database Connection
-** METHOD: sqlite3
**
** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename
** associated with database N of connection D. ^The main database file
@@ -5051,21 +4745,19 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
** will be an absolute pathname, even if the filename used
** to open the database originally was a URI or relative pathname.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Determine if a database is read-only
-** METHOD: sqlite3
**
** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N
** of connection D is read-only, 0 if it is read/write, or -1 if N is not
** the name of a database on connection D.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Find the next prepared statement
-** METHOD: sqlite3
**
** ^This interface returns a pointer to the next [prepared statement] after
** pStmt associated with the [database connection] pDb. ^If pStmt is NULL
@@ -5077,11 +4769,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbNa
** [sqlite3_next_stmt(D,S)] must refer to an open database
** connection and in particular must not be a NULL pointer.
*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
/*
** CAPI3REF: Commit And Rollback Notification Callbacks
-** METHOD: sqlite3
**
** ^The sqlite3_commit_hook() interface registers a callback
** function to be invoked whenever a transaction is [COMMIT | committed].
@@ -5126,12 +4817,11 @@ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_
**
** See also the [sqlite3_update_hook()] interface.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
+SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
+SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
/*
** CAPI3REF: Data Change Notification Callbacks
-** METHOD: sqlite3
**
** ^The sqlite3_update_hook() interface registers a callback function
** with the [database connection] identified by the first argument
@@ -5178,7 +4868,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *),
** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()]
** interfaces.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
+SQLITE_API void *sqlite3_update_hook(
sqlite3*,
void(*)(void *,int ,char const *,char const *,sqlite3_int64),
void*
@@ -5208,17 +4898,12 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
** future releases of SQLite. Applications that care about shared
** cache setting should set it explicitly.
**
-** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0
-** and will always return SQLITE_MISUSE. On those systems,
-** shared cache mode should be enabled per-database connection via
-** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE].
-**
** This interface is threadsafe on processors where writing a
** 32-bit integer is atomic.
**
** See Also: [SQLite Shared-Cache Mode]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
+SQLITE_API int sqlite3_enable_shared_cache(int);
/*
** CAPI3REF: Attempt To Free Heap Memory
@@ -5234,11 +4919,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
**
** See also: [sqlite3_db_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
+SQLITE_API int sqlite3_release_memory(int);
/*
** CAPI3REF: Free Memory Used By A Database Connection
-** METHOD: sqlite3
**
** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap
** memory as possible from database connection D. Unlike the
@@ -5248,7 +4932,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
**
** See also: [sqlite3_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
+SQLITE_API int sqlite3_db_release_memory(sqlite3*);
/*
** CAPI3REF: Impose A Limit On Heap Size
@@ -5300,7 +4984,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
** The circumstances under which SQLite will enforce the soft heap limit may
** changes in future releases of SQLite.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 N);
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N);
/*
** CAPI3REF: Deprecated Soft Heap Limit Interface
@@ -5311,34 +4995,26 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64
** only. All new applications should use the
** [sqlite3_soft_heap_limit64()] interface rather than this one.
*/
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N);
/*
** CAPI3REF: Extract Metadata About A Column Of A Table
-** METHOD: sqlite3
-**
-** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns
-** information about column C of table T in database D
-** on [database connection] X.)^ ^The sqlite3_table_column_metadata()
-** interface returns SQLITE_OK and fills in the non-NULL pointers in
-** the final five arguments with appropriate values if the specified
-** column exists. ^The sqlite3_table_column_metadata() interface returns
-** SQLITE_ERROR and if the specified column does not exist.
-** ^If the column-name parameter to sqlite3_table_column_metadata() is a
-** NULL pointer, then this routine simply checks for the existance of the
-** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it
-** does not.
+**
+** ^This routine returns metadata about a specific column of a specific
+** database table accessible using the [database connection] handle
+** passed as the first function argument.
**
** ^The column is identified by the second, third and fourth parameters to
-** this function. ^(The second parameter is either the name of the database
+** this function. ^The second parameter is either the name of the database
** (i.e. "main", "temp", or an attached database) containing the specified
-** table or NULL.)^ ^If it is NULL, then all attached databases are searched
+** table or NULL. ^If it is NULL, then all attached databases are searched
** for the table using the same algorithm used by the database engine to
** resolve unqualified table references.
**
** ^The third and fourth parameters to this function are the table and column
-** name of the desired column, respectively.
+** name of the desired column, respectively. Neither of these parameters
+** may be NULL.
**
** ^Metadata is returned by writing to the memory locations passed as the 5th
** and subsequent parameters to this function. ^Any of these arguments may be
@@ -5357,17 +5033,16 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
** </blockquote>)^
**
** ^The memory pointed to by the character pointers returned for the
-** declaration type and collation sequence is valid until the next
+** declaration type and collation sequence is valid only until the next
** call to any SQLite API function.
**
** ^If the specified table is actually a view, an [error code] is returned.
**
-** ^If the specified column is "rowid", "oid" or "_rowid_" and the table
-** is not a [WITHOUT ROWID] table and an
+** ^If the specified column is "rowid", "oid" or "_rowid_" and an
** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output
** parameters are set for the explicitly declared column. ^(If there is no
-** [INTEGER PRIMARY KEY] column, then the outputs
-** for the [rowid] are set as follows:
+** explicitly declared [INTEGER PRIMARY KEY] column, then the output
+** parameters are set as follows:
**
** <pre>
** data type: "INTEGER"
@@ -5377,11 +5052,15 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
** auto increment: 0
** </pre>)^
**
-** ^This function causes all database schemas to be read from disk and
-** parsed, if that has not already been done, and returns an error if
-** any errors are encountered while loading the schema.
+** ^(This function may load one or more schemas from database files. If an
+** error occurs during this process, or if the requested table or column
+** cannot be found, an [error code] is returned and an error message left
+** in the [database connection] (to be retrieved using sqlite3_errmsg()).)^
+**
+** ^This API is only available if the library was compiled with the
+** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
+SQLITE_API int sqlite3_table_column_metadata(
sqlite3 *db, /* Connection handle */
const char *zDbName, /* Database name or NULL */
const char *zTableName, /* Table name */
@@ -5395,7 +5074,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
/*
** CAPI3REF: Load An Extension
-** METHOD: sqlite3
**
** ^This interface loads an SQLite extension library from the named file.
**
@@ -5428,7 +5106,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
**
** See also the [load_extension() SQL function].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
+SQLITE_API int sqlite3_load_extension(
sqlite3 *db, /* Load the extension into this database connection */
const char *zFile, /* Name of the shared library containing extension */
const char *zProc, /* Entry point. Derived from zFile if 0 */
@@ -5437,7 +5115,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
/*
** CAPI3REF: Enable Or Disable Extension Loading
-** METHOD: sqlite3
**
** ^So as not to open security holes in older applications that are
** unprepared to deal with [extension loading], and as a means of disabling
@@ -5449,7 +5126,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
** to turn extension loading on and call it with onoff==0 to turn
** it back off again.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff);
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
/*
** CAPI3REF: Automatically Load Statically Linked Extensions
@@ -5487,7 +5164,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
** See also: [sqlite3_reset_auto_extension()]
** and [sqlite3_cancel_auto_extension()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
+SQLITE_API int sqlite3_auto_extension(void (*xEntryPoint)(void));
/*
** CAPI3REF: Cancel Automatic Extension Loading
@@ -5499,7 +5176,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
** unregistered and it returns 0 if X was not on the list of initialization
** routines.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(void));
+SQLITE_API int sqlite3_cancel_auto_extension(void (*xEntryPoint)(void));
/*
** CAPI3REF: Reset Automatic Extension Loading
@@ -5507,7 +5184,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(
** ^This interface disables all automatic extensions previously
** registered using [sqlite3_auto_extension()].
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void);
+SQLITE_API void sqlite3_reset_auto_extension(void);
/*
** The interface to the virtual-table mechanism is currently considered
@@ -5634,31 +5311,13 @@ struct sqlite3_module {
** ^The estimatedRows value is an estimate of the number of rows that
** will be returned by the strategy.
**
-** The xBestIndex method may optionally populate the idxFlags field with a
-** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
-** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
-** assumes that the strategy may visit at most one row.
-**
-** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
-** SQLite also assumes that if a call to the xUpdate() method is made as
-** part of the same statement to delete or update a virtual table row and the
-** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
-** any database changes. In other words, if the xUpdate() returns
-** SQLITE_CONSTRAINT, the database contents must be exactly as they were
-** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
-** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
-** the xUpdate method are automatically rolled back by SQLite.
-**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
** structure for SQLite version 3.8.2. If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting
** to read or write the estimatedRows field are undefined (but are likely
** to included crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
-** value greater than or equal to 3008002. Similarly, the idxFlags field
-** was added for version 3.9.0. It may therefore only be used if
-** sqlite3_libversion_number() returns a value greater than or equal to
-** 3009000.
+** value greater than or equal to 3008002.
*/
struct sqlite3_index_info {
/* Inputs */
@@ -5686,16 +5345,9 @@ struct sqlite3_index_info {
double estimatedCost; /* Estimated cost of using this index */
/* Fields below are only available in SQLite 3.8.2 and later */
sqlite3_int64 estimatedRows; /* Estimated number of rows returned */
- /* Fields below are only available in SQLite 3.9.0 and later */
- int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */
};
/*
-** CAPI3REF: Virtual Table Scan Flags
-*/
-#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */
-
-/*
** CAPI3REF: Virtual Table Constraint Operator Codes
**
** These macros defined the allowed values for the
@@ -5712,7 +5364,6 @@ struct sqlite3_index_info {
/*
** CAPI3REF: Register A Virtual Table Implementation
-** METHOD: sqlite3
**
** ^These routines are used to register a new [virtual table module] name.
** ^Module names must be registered before
@@ -5736,13 +5387,13 @@ struct sqlite3_index_info {
** interface is equivalent to sqlite3_create_module_v2() with a NULL
** destructor.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
+SQLITE_API int sqlite3_create_module(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
void *pClientData /* Client data for xCreate/xConnect */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
+SQLITE_API int sqlite3_create_module_v2(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
@@ -5770,7 +5421,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
*/
struct sqlite3_vtab {
const sqlite3_module *pModule; /* The module for this virtual table */
- int nRef; /* Number of open cursors */
+ int nRef; /* NO LONGER USED */
char *zErrMsg; /* Error message from sqlite3_mprintf() */
/* Virtual table implementations will typically add additional fields */
};
@@ -5805,11 +5456,10 @@ struct sqlite3_vtab_cursor {
** to declare the format (the names and datatypes of the columns) of
** the virtual tables they implement.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
+SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
/*
** CAPI3REF: Overload A Function For A Virtual Table
-** METHOD: sqlite3
**
** ^(Virtual tables can provide alternative implementations of functions
** using the [xFindFunction] method of the [virtual table module].
@@ -5824,7 +5474,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
** purpose is to be a placeholder function that can be overloaded
** by a [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
+SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
/*
** The interface to the virtual-table mechanism defined above (back up
@@ -5852,8 +5502,6 @@ typedef struct sqlite3_blob sqlite3_blob;
/*
** CAPI3REF: Open A BLOB For Incremental I/O
-** METHOD: sqlite3
-** CONSTRUCTOR: sqlite3_blob
**
** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located
** in row iRow, column zColumn, table zTable in database zDb;
@@ -5863,42 +5511,26 @@ typedef struct sqlite3_blob sqlite3_blob;
** SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
** </pre>)^
**
-** ^(Parameter zDb is not the filename that contains the database, but
-** rather the symbolic name of the database. For attached databases, this is
-** the name that appears after the AS keyword in the [ATTACH] statement.
-** For the main database file, the database name is "main". For TEMP
-** tables, the database name is "temp".)^
-**
** ^If the flags parameter is non-zero, then the BLOB is opened for read
-** and write access. ^If the flags parameter is zero, the BLOB is opened for
-** read-only access.
-**
-** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored
-** in *ppBlob. Otherwise an [error code] is returned and, unless the error
-** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
-** the API is not misused, it is always safe to call [sqlite3_blob_close()]
-** on *ppBlob after this function it returns.
-**
-** This function fails with SQLITE_ERROR if any of the following are true:
-** <ul>
-** <li> ^(Database zDb does not exist)^,
-** <li> ^(Table zTable does not exist within database zDb)^,
-** <li> ^(Table zTable is a WITHOUT ROWID table)^,
-** <li> ^(Column zColumn does not exist)^,
-** <li> ^(Row iRow is not present in the table)^,
-** <li> ^(The specified column of row iRow contains a value that is not
-** a TEXT or BLOB value)^,
-** <li> ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE
-** constraint and the blob is being opened for read/write access)^,
-** <li> ^([foreign key constraints | Foreign key constraints] are enabled,
-** column zColumn is part of a [child key] definition and the blob is
-** being opened for read/write access)^.
-** </ul>
-**
-** ^Unless it returns SQLITE_MISUSE, this function sets the
-** [database connection] error code and message accessible via
-** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
-**
+** and write access. ^If it is zero, the BLOB is opened for read access.
+** ^It is not possible to open a column that is part of an index or primary
+** key for writing. ^If [foreign key constraints] are enabled, it is
+** not possible to open a column that is part of a [child key] for writing.
+**
+** ^Note that the database name is not the filename that contains
+** the database but rather the symbolic name of the database that
+** appears after the AS keyword when the database is connected using [ATTACH].
+** ^For the main database file, the database name is "main".
+** ^For TEMP tables, the database name is "temp".
+**
+** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is written
+** to *ppBlob. Otherwise an [error code] is returned and *ppBlob is set
+** to be a null pointer.)^
+** ^This function sets the [database connection] error code and message
+** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()] and related
+** functions. ^Note that the *ppBlob variable is always initialized in a
+** way that makes it safe to invoke [sqlite3_blob_close()] on *ppBlob
+** regardless of the success or failure of this routine.
**
** ^(If the row that a BLOB handle points to is modified by an
** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
@@ -5916,14 +5548,18 @@ typedef struct sqlite3_blob sqlite3_blob;
** interface. Use the [UPDATE] SQL command to change the size of a
** blob.
**
+** ^The [sqlite3_blob_open()] interface will fail for a [WITHOUT ROWID]
+** table. Incremental BLOB I/O is not possible on [WITHOUT ROWID] tables.
+**
** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces
-** and the built-in [zeroblob] SQL function may be used to create a
-** zero-filled blob to read or write using the incremental-blob interface.
+** and the built-in [zeroblob] SQL function can be used, if desired,
+** to create an empty, zero-filled blob in which to read or write using
+** this interface.
**
** To avoid a resource leak, every open [BLOB handle] should eventually
** be released by a call to [sqlite3_blob_close()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
+SQLITE_API int sqlite3_blob_open(
sqlite3*,
const char *zDb,
const char *zTable,
@@ -5935,7 +5571,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
/*
** CAPI3REF: Move a BLOB Handle to a New Row
-** METHOD: sqlite3_blob
**
** ^This function is used to move an existing blob handle so that it points
** to a different row of the same database table. ^The new row is identified
@@ -5956,34 +5591,34 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
**
** ^This function sets the database handle error code and message.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
/*
** CAPI3REF: Close A BLOB Handle
-** DESTRUCTOR: sqlite3_blob
**
-** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed
-** unconditionally. Even if this routine returns an error code, the
-** handle is still closed.)^
+** ^Closes an open [BLOB handle].
**
-** ^If the blob handle being closed was opened for read-write access, and if
-** the database is in auto-commit mode and there are no other open read-write
-** blob handles or active write statements, the current transaction is
-** committed. ^If an error occurs while committing the transaction, an error
-** code is returned and the transaction rolled back.
+** ^Closing a BLOB shall cause the current transaction to commit
+** if there are no other BLOBs, no pending prepared statements, and the
+** database connection is in [autocommit mode].
+** ^If any writes were made to the BLOB, they might be held in cache
+** until the close operation if they will fit.
**
-** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
-** with a null pointer (such as would be returned by a failed call to
-** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
-** is passed a valid open blob handle, the values returned by the
-** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
+** ^(Closing the BLOB often forces the changes
+** out to disk and so if any I/O errors occur, they will likely occur
+** at the time when the BLOB is closed. Any errors that occur during
+** closing are reported as a non-zero return value.)^
+**
+** ^(The BLOB is closed unconditionally. Even if this routine returns
+** an error code, the BLOB is still closed.)^
+**
+** ^Calling this routine with a null pointer (such as would be returned
+** by a failed call to [sqlite3_blob_open()]) is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *);
/*
** CAPI3REF: Return The Size Of An Open BLOB
-** METHOD: sqlite3_blob
**
** ^Returns the size in bytes of the BLOB accessible via the
** successfully opened [BLOB handle] in its only argument. ^The
@@ -5995,11 +5630,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
** been closed by [sqlite3_blob_close()]. Passing any other pointer in
** to this routine results in undefined and probably undesirable behavior.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *);
/*
** CAPI3REF: Read Data From A BLOB Incrementally
-** METHOD: sqlite3_blob
**
** ^(This function is used to read data from an open [BLOB handle] into a
** caller-supplied buffer. N bytes of data are copied into buffer Z
@@ -6024,33 +5658,26 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
**
** See also: [sqlite3_blob_write()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
/*
** CAPI3REF: Write Data Into A BLOB Incrementally
-** METHOD: sqlite3_blob
-**
-** ^(This function is used to write data into an open [BLOB handle] from a
-** caller-supplied buffer. N bytes of data are copied from the buffer Z
-** into the open BLOB, starting at offset iOffset.)^
**
-** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
-** Otherwise, an [error code] or an [extended error code] is returned.)^
-** ^Unless SQLITE_MISUSE is returned, this function sets the
-** [database connection] error code and message accessible via
-** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
+** ^This function is used to write data into an open [BLOB handle] from a
+** caller-supplied buffer. ^N bytes of data are copied from the buffer Z
+** into the open BLOB, starting at offset iOffset.
**
** ^If the [BLOB handle] passed as the first argument was not opened for
** writing (the flags parameter to [sqlite3_blob_open()] was zero),
** this function returns [SQLITE_READONLY].
**
-** This function may only modify the contents of the BLOB; it is
+** ^This function may only modify the contents of the BLOB; it is
** not possible to increase the size of a BLOB using this API.
** ^If offset iOffset is less than N bytes from the end of the BLOB,
-** [SQLITE_ERROR] is returned and no data is written. The size of the
-** BLOB (and hence the maximum value of N+iOffset) can be determined
-** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less
-** than zero [SQLITE_ERROR] is returned and no data is written.
+** [SQLITE_ERROR] is returned and no data is written. ^If N is
+** less than zero [SQLITE_ERROR] is returned and no data is written.
+** The size of the BLOB (and hence the maximum value of N+iOffset)
+** can be determined using the [sqlite3_blob_bytes()] interface.
**
** ^An attempt to write to an expired [BLOB handle] fails with an
** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred
@@ -6059,6 +5686,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N,
** have been overwritten by the statement that expired the BLOB handle
** or by other independent statements.
**
+** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
+** Otherwise, an [error code] or an [extended error code] is returned.)^
+**
** This routine only works on a [BLOB handle] which has been created
** by a prior successful call to [sqlite3_blob_open()] and which has not
** been closed by [sqlite3_blob_close()]. Passing any other pointer in
@@ -6066,7 +5696,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N,
**
** See also: [sqlite3_blob_read()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
/*
** CAPI3REF: Virtual File System Objects
@@ -6097,9 +5727,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z,
** ^(If the default VFS is unregistered, another VFS is chosen as
** the default. The choice for the new VFS is arbitrary.)^
*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfsName);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
/*
** CAPI3REF: Mutexes
@@ -6111,51 +5741,45 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** The SQLite source code contains multiple implementations
** of these mutex routines. An appropriate implementation
-** is selected automatically at compile-time. The following
+** is selected automatically at compile-time. ^(The following
** implementations are available in the SQLite core:
**
** <ul>
** <li> SQLITE_MUTEX_PTHREADS
** <li> SQLITE_MUTEX_W32
** <li> SQLITE_MUTEX_NOOP
-** </ul>
+** </ul>)^
**
-** The SQLITE_MUTEX_NOOP implementation is a set of routines
+** ^The SQLITE_MUTEX_NOOP implementation is a set of routines
** that does no real locking and is appropriate for use in
-** a single-threaded application. The SQLITE_MUTEX_PTHREADS and
+** a single-threaded application. ^The SQLITE_MUTEX_PTHREADS and
** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix
** and Windows.
**
-** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
+** ^(If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex
** implementation is included with the library. In this case the
** application must supply a custom mutex implementation using the
** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function
** before calling sqlite3_initialize() or any other public sqlite3_
-** function that calls sqlite3_initialize().
+** function that calls sqlite3_initialize().)^
**
** ^The sqlite3_mutex_alloc() routine allocates a new
-** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc()
-** routine returns NULL if it is unable to allocate the requested
-** mutex. The argument to sqlite3_mutex_alloc() must one of these
-** integer constants:
+** mutex and returns a pointer to it. ^If it returns NULL
+** that means that a mutex could not be allocated. ^SQLite
+** will unwind its stack and return an error. ^(The argument
+** to sqlite3_mutex_alloc() is one of these integer constants:
**
** <ul>
** <li> SQLITE_MUTEX_FAST
** <li> SQLITE_MUTEX_RECURSIVE
** <li> SQLITE_MUTEX_STATIC_MASTER
** <li> SQLITE_MUTEX_STATIC_MEM
-** <li> SQLITE_MUTEX_STATIC_OPEN
+** <li> SQLITE_MUTEX_STATIC_MEM2
** <li> SQLITE_MUTEX_STATIC_PRNG
** <li> SQLITE_MUTEX_STATIC_LRU
-** <li> SQLITE_MUTEX_STATIC_PMEM
-** <li> SQLITE_MUTEX_STATIC_APP1
-** <li> SQLITE_MUTEX_STATIC_APP2
-** <li> SQLITE_MUTEX_STATIC_APP3
-** <li> SQLITE_MUTEX_STATIC_VFS1
-** <li> SQLITE_MUTEX_STATIC_VFS2
-** <li> SQLITE_MUTEX_STATIC_VFS3
-** </ul>
+** <li> SQLITE_MUTEX_STATIC_LRU2
+** </ul>)^
**
** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
** cause sqlite3_mutex_alloc() to create
@@ -6163,14 +5787,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
** The mutex implementation does not need to make a distinction
** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
-** not want to. SQLite will only request a recursive mutex in
-** cases where it really needs one. If a faster non-recursive mutex
+** not want to. ^SQLite will only request a recursive mutex in
+** cases where it really needs one. ^If a faster non-recursive mutex
** implementation is available on the host platform, the mutex subsystem
** might return such a mutex in response to SQLITE_MUTEX_FAST.
**
** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other
** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return
-** a pointer to a static preexisting mutex. ^Nine static mutexes are
+** a pointer to a static preexisting mutex. ^Six static mutexes are
** used by the current version of SQLite. Future versions of SQLite
** may add additional static mutexes. Static mutexes are for internal
** use by SQLite only. Applications that use SQLite mutexes should
@@ -6179,13 +5803,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
-** returns a different mutex on every call. ^For the static
+** returns a different mutex on every call. ^But for the static
** mutex types, the same mutex is returned on every call that has
** the same type number.
**
** ^The sqlite3_mutex_free() routine deallocates a previously
-** allocated dynamic mutex. Attempting to deallocate a static
-** mutex results in undefined behavior.
+** allocated dynamic mutex. ^SQLite is careful to deallocate every
+** dynamic mutex that it allocates. The dynamic mutexes must not be in
+** use when they are deallocated. Attempting to deallocate a static
+** mutex results in undefined behavior. ^SQLite never deallocates
+** a static mutex.
**
** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
** to enter a mutex. ^If another thread is already within the mutex,
@@ -6193,21 +5820,23 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK]
** upon successful entry. ^(Mutexes created using
** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread.
-** In such cases, the
+** In such cases the,
** mutex must be exited an equal number of times before another thread
-** can enter.)^ If the same thread tries to enter any mutex other
-** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined.
+** can enter.)^ ^(If the same thread tries to enter any other
+** kind of mutex more than once, the behavior is undefined.
+** SQLite will never exhibit
+** such behavior in its own use of mutexes.)^
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. The SQLite core only ever uses
+** sqlite3_mutex_try() as an optimization so this is acceptable behavior.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
-** previously entered by the same thread. The behavior
+** previously entered by the same thread. ^(The behavior
** is undefined if the mutex is not currently entered by the
-** calling thread or is not currently allocated.
+** calling thread or is not currently allocated. SQLite will
+** never do either.)^
**
** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or
** sqlite3_mutex_leave() is a NULL pointer, then all three routines
@@ -6215,11 +5844,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int);
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*);
/*
** CAPI3REF: Mutex Methods Object
@@ -6228,9 +5857,9 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
** used to allocate and use mutexes.
**
** Usually, the default mutex implementations provided by SQLite are
-** sufficient, however the application has the option of substituting a custom
+** sufficient, however the user has the option of substituting a custom
** implementation for specialized deployments or systems for which SQLite
-** does not provide a suitable implementation. In this case, the application
+** does not provide a suitable implementation. In this case, the user
** creates and populates an instance of this structure to pass
** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option.
** Additionally, an instance of this structure can be used as an
@@ -6271,13 +5900,13 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
** (i.e. it is acceptable to provide an implementation that segfaults if
** it is passed a NULL pointer).
**
-** The xMutexInit() method must be threadsafe. It must be harmless to
+** The xMutexInit() method must be threadsafe. ^It must be harmless to
** invoke xMutexInit() multiple times within the same process and without
** intervening calls to xMutexEnd(). Second and subsequent calls to
** xMutexInit() must be no-ops.
**
-** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
-** and its associates). Similarly, xMutexAlloc() must not use SQLite memory
+** ^xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
+** and its associates). ^Similarly, xMutexAlloc() must not use SQLite memory
** allocation for a static mutex. ^However xMutexAlloc() may use SQLite
** memory allocation for a fast or recursive mutex.
**
@@ -6303,34 +5932,34 @@ struct sqlite3_mutex_methods {
** CAPI3REF: Mutex Verification Routines
**
** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines
-** are intended for use inside assert() statements. The SQLite core
+** are intended for use inside assert() statements. ^The SQLite core
** never uses these routines except inside an assert() and applications
-** are advised to follow the lead of the core. The SQLite core only
+** are advised to follow the lead of the core. ^The SQLite core only
** provides implementations for these routines when it is compiled
-** with the SQLITE_DEBUG flag. External mutex implementations
+** with the SQLITE_DEBUG flag. ^External mutex implementations
** are only required to provide these routines if SQLITE_DEBUG is
** defined and if NDEBUG is not defined.
**
-** These routines should return true if the mutex in their argument
+** ^These routines should return true if the mutex in their argument
** is held or not held, respectively, by the calling thread.
**
-** The implementation is not required to provide versions of these
+** ^The implementation is not required to provide versions of these
** routines that actually work. If the implementation does not provide working
** versions of these routines, it should at least provide stubs that always
** return true so that one does not get spurious assertion failures.
**
-** If the argument to sqlite3_mutex_held() is a NULL pointer then
+** ^If the argument to sqlite3_mutex_held() is a NULL pointer then
** the routine should return 1. This seems counter-intuitive since
** clearly the mutex cannot be held if it does not exist. But
** the reason the mutex does not exist is because the build is not
** using mutexes. And we do not want the assert() containing the
** call to sqlite3_mutex_held() to fail, so a non-zero return is
-** the appropriate thing to do. The sqlite3_mutex_notheld()
+** the appropriate thing to do. ^The sqlite3_mutex_notheld()
** interface should also return 1 when given a NULL pointer.
*/
#ifndef NDEBUG
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*);
#endif
/*
@@ -6353,16 +5982,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
-#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */
-#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */
-#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */
-#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */
-#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */
-#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */
/*
** CAPI3REF: Retrieve the mutex for a database connection
-** METHOD: sqlite3
**
** ^This interface returns a pointer the [sqlite3_mutex] object that
** serializes access to the [database connection] given in the argument
@@ -6370,11 +5992,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
** ^If the [threading mode] is Single-thread or Multi-thread then this
** routine returns a NULL pointer.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
/*
** CAPI3REF: Low-Level Control Of Database Files
-** METHOD: sqlite3
**
** ^The [sqlite3_file_control()] interface makes a direct call to the
** xFileControl method for the [sqlite3_io_methods] object associated
@@ -6405,7 +6026,7 @@ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
**
** See also: [SQLITE_FCNTL_LOCKSTATE]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
+SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
/*
** CAPI3REF: Testing Interface
@@ -6424,7 +6045,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName
** Unlike most of the SQLite API, this function is not guaranteed to
** operate consistently from one release to the next.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
+SQLITE_API int sqlite3_test_control(int op, ...);
/*
** CAPI3REF: Testing Interface Operation Codes
@@ -6452,19 +6073,14 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ISKEYWORD 16
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
-#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
+#define SQLITE_TESTCTRL_EXPLAIN_STMT 19
#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
-#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
-#define SQLITE_TESTCTRL_BYTEORDER 22
-#define SQLITE_TESTCTRL_ISINIT 23
-#define SQLITE_TESTCTRL_SORTER_MMAP 24
-#define SQLITE_TESTCTRL_IMPOSTER 25
-#define SQLITE_TESTCTRL_LAST 25
+#define SQLITE_TESTCTRL_LAST 20
/*
** CAPI3REF: SQLite Runtime Status
**
-** ^These interfaces are used to retrieve runtime status information
+** ^This interface is used to retrieve runtime status information
** about the performance of SQLite, and optionally to reset various
** highwater marks. ^The first argument is an integer code for
** the specific parameter to measure. ^(Recognized integer codes
@@ -6478,22 +6094,19 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
** ^(Other parameters record only the highwater mark and not the current
** value. For these latter parameters nothing is written into *pCurrent.)^
**
-** ^The sqlite3_status() and sqlite3_status64() routines return
-** SQLITE_OK on success and a non-zero [error code] on failure.
+** ^The sqlite3_status() routine returns SQLITE_OK on success and a
+** non-zero [error code] on failure.
**
-** If either the current value or the highwater mark is too large to
-** be represented by a 32-bit integer, then the values returned by
-** sqlite3_status() are undefined.
+** This routine is threadsafe but is not atomic. This routine can be
+** called while other threads are running the same or different SQLite
+** interfaces. However the values returned in *pCurrent and
+** *pHighwater reflect the status of SQLite at different points in time
+** and it is possible that another thread might change the parameter
+** in between the times when *pCurrent and *pHighwater are written.
**
** See also: [sqlite3_db_status()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
- int op,
- sqlite3_int64 *pCurrent,
- sqlite3_int64 *pHighwater,
- int resetFlag
-);
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
/*
@@ -6591,7 +6204,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
/*
** CAPI3REF: Database Connection Status
-** METHOD: sqlite3
**
** ^This interface is used to retrieve runtime status information
** about a single [database connection]. ^The first argument is the
@@ -6612,7 +6224,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
**
** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
+SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
/*
** CAPI3REF: Status Parameters for database connections
@@ -6654,12 +6266,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
** the current value is always zero.)^
**
** [[SQLITE_DBSTATUS_CACHE_USED]] ^(<dt>SQLITE_DBSTATUS_CACHE_USED</dt>
-** <dd>This parameter returns the approximate number of bytes of heap
+** <dd>This parameter returns the approximate number of of bytes of heap
** memory used by all pager caches associated with the database connection.)^
** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
**
** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(<dt>SQLITE_DBSTATUS_SCHEMA_USED</dt>
-** <dd>This parameter returns the approximate number of bytes of heap
+** <dd>This parameter returns the approximate number of of bytes of heap
** memory used to store the schema for all databases associated
** with the connection - main, temp, and any [ATTACH]-ed databases.)^
** ^The full amount of memory used by the schemas is reported, even if the
@@ -6668,7 +6280,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0.
**
** [[SQLITE_DBSTATUS_STMT_USED]] ^(<dt>SQLITE_DBSTATUS_STMT_USED</dt>
-** <dd>This parameter returns the approximate number of bytes of heap
+** <dd>This parameter returns the approximate number of of bytes of heap
** and lookaside memory used by all prepared statements associated with
** the database connection.)^
** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0.
@@ -6720,7 +6332,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
/*
** CAPI3REF: Prepared Statement Status
-** METHOD: sqlite3_stmt
**
** ^(Each prepared statement maintains various
** [SQLITE_STMTSTATUS counters] that measure the number
@@ -6742,7 +6353,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
**
** See also: [sqlite3_status()] and [sqlite3_db_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
/*
** CAPI3REF: Status Parameters for prepared statements
@@ -7069,10 +6680,6 @@ typedef struct sqlite3_backup sqlite3_backup;
** must be different or else sqlite3_backup_init(D,N,S,M) will fail with
** an error.
**
-** ^A call to sqlite3_backup_init() will fail, returning SQLITE_ERROR, if
-** there is already a read or read-write transaction open on the
-** destination database.
-**
** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is
** returned and an error code and error message are stored in the
** destination [database connection] D.
@@ -7165,20 +6772,20 @@ typedef struct sqlite3_backup sqlite3_backup;
** is not a permanent error and does not affect the return value of
** sqlite3_backup_finish().
**
-** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]]
+** [[sqlite3_backup__remaining()]] [[sqlite3_backup_pagecount()]]
** <b>sqlite3_backup_remaining() and sqlite3_backup_pagecount()</b>
**
-** ^The sqlite3_backup_remaining() routine returns the number of pages still
-** to be backed up at the conclusion of the most recent sqlite3_backup_step().
-** ^The sqlite3_backup_pagecount() routine returns the total number of pages
-** in the source database at the conclusion of the most recent
-** sqlite3_backup_step().
-** ^(The values returned by these functions are only updated by
-** sqlite3_backup_step(). If the source database is modified in a way that
-** changes the size of the source database or the number of pages remaining,
-** those changes are not reflected in the output of sqlite3_backup_pagecount()
-** and sqlite3_backup_remaining() until after the next
-** sqlite3_backup_step().)^
+** ^Each call to sqlite3_backup_step() sets two values inside
+** the [sqlite3_backup] object: the number of pages still to be backed
+** up and the total number of pages in the source database file.
+** The sqlite3_backup_remaining() and sqlite3_backup_pagecount() interfaces
+** retrieve these two values, respectively.
+**
+** ^The values returned by these functions are only updated by
+** sqlite3_backup_step(). ^If the source database is modified during a backup
+** operation, then the values are not updated to account for any extra
+** pages that need to be updated or the size of the source database file
+** changing.
**
** <b>Concurrent Usage of Database Handles</b>
**
@@ -7211,20 +6818,19 @@ typedef struct sqlite3_backup sqlite3_backup;
** same time as another thread is invoking sqlite3_backup_step() it is
** possible that they return invalid values.
*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
sqlite3 *pDest, /* Destination database handle */
const char *zDestName, /* Destination database name */
sqlite3 *pSource, /* Source database handle */
const char *zSourceName /* Source database name */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage);
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p);
/*
** CAPI3REF: Unlock Notification
-** METHOD: sqlite3
**
** ^When running in shared-cache mode, a database operation may fail with
** an [SQLITE_LOCKED] error if the required locks on the shared-cache or
@@ -7337,7 +6943,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
** the special "DROP TABLE/INDEX" case, the extended error code is just
** SQLITE_LOCKED.)^
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
+SQLITE_API int sqlite3_unlock_notify(
sqlite3 *pBlocked, /* Waiting connection */
void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
void *pNotifyArg /* Argument to pass to xNotify */
@@ -7352,8 +6958,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
** strings in a case-independent fashion, using the same definition of "case
** independence" that SQLite uses internally when comparing identifiers.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
+SQLITE_API int sqlite3_stricmp(const char *, const char *);
+SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
/*
** CAPI3REF: String Globbing
@@ -7368,7 +6974,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
** Note that this routine returns zero on a match and non-zero if the strings
** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
/*
** CAPI3REF: Error Logging Interface
@@ -7391,17 +6997,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zSt
** a few hundred characters, it will be truncated to the length of the
** buffer.
*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...);
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...);
/*
** CAPI3REF: Write-Ahead Log Commit Hook
-** METHOD: sqlite3
**
** ^The [sqlite3_wal_hook()] function is used to register a callback that
-** is invoked each time data is committed to a database in wal mode.
+** will be invoked each time a database connection commits data to a
+** [write-ahead log] (i.e. whenever a transaction is committed in
+** [journal_mode | journal_mode=WAL mode]).
**
-** ^(The callback is invoked by SQLite after the commit has taken place and
-** the associated write-lock on the database released)^, so the implementation
+** ^The callback is invoked by SQLite after the commit has taken place and
+** the associated write-lock on the database released, so the implementation
** may read, write or [checkpoint] the database as required.
**
** ^The first parameter passed to the callback function when it is invoked
@@ -7427,7 +7034,7 @@ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...)
** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
** those overwrite any prior [sqlite3_wal_hook()] settings.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
+SQLITE_API void *sqlite3_wal_hook(
sqlite3*,
int(*)(void *,sqlite3*,const char*,int),
void*
@@ -7435,7 +7042,6 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
/*
** CAPI3REF: Configure an auto-checkpoint
-** METHOD: sqlite3
**
** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around
** [sqlite3_wal_hook()] that causes any database on [database connection] D
@@ -7453,132 +7059,103 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
** ^The [wal_autocheckpoint pragma] can be used to invoke this interface
** from SQL.
**
-** ^Checkpoints initiated by this mechanism are
-** [sqlite3_wal_checkpoint_v2|PASSIVE].
-**
** ^Every new [database connection] defaults to having the auto-checkpoint
** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT]
** pages. The use of this interface
** is only necessary if the default setting is found to be suboptimal
** for a particular application.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
/*
** CAPI3REF: Checkpoint a database
-** METHOD: sqlite3
**
-** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to
-** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^
+** ^The [sqlite3_wal_checkpoint(D,X)] interface causes database named X
+** on [database connection] D to be [checkpointed]. ^If X is NULL or an
+** empty string, then a checkpoint is run on all databases of
+** connection D. ^If the database connection D is not in
+** [WAL | write-ahead log mode] then this interface is a harmless no-op.
**
-** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the
-** [write-ahead log] for database X on [database connection] D to be
-** transferred into the database file and for the write-ahead log to
-** be reset. See the [checkpointing] documentation for addition
-** information.
+** ^The [wal_checkpoint pragma] can be used to invoke this interface
+** from SQL. ^The [sqlite3_wal_autocheckpoint()] interface and the
+** [wal_autocheckpoint pragma] can be used to cause this interface to be
+** run whenever the WAL reaches a certain size threshold.
**
-** This interface used to be the only way to cause a checkpoint to
-** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()]
-** interface was added. This interface is retained for backwards
-** compatibility and as a convenience for applications that need to manually
-** start a callback but which do not need the full power (and corresponding
-** complication) of [sqlite3_wal_checkpoint_v2()].
+** See also: [sqlite3_wal_checkpoint_v2()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
/*
** CAPI3REF: Checkpoint a database
-** METHOD: sqlite3
**
-** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint
-** operation on database X of [database connection] D in mode M. Status
-** information is written back into integers pointed to by L and C.)^
-** ^(The M parameter must be a valid [checkpoint mode]:)^
+** Run a checkpoint operation on WAL database zDb attached to database
+** handle db. The specific operation is determined by the value of the
+** eMode parameter:
**
** <dl>
** <dt>SQLITE_CHECKPOINT_PASSIVE<dd>
-** ^Checkpoint as many frames as possible without waiting for any database
-** readers or writers to finish, then sync the database file if all frames
-** in the log were checkpointed. ^The [busy-handler callback]
-** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode.
-** ^On the other hand, passive mode might leave the checkpoint unfinished
-** if there are concurrent readers or writers.
+** Checkpoint as many frames as possible without waiting for any database
+** readers or writers to finish. Sync the db file if all frames in the log
+** are checkpointed. This mode is the same as calling
+** sqlite3_wal_checkpoint(). The busy-handler callback is never invoked.
**
** <dt>SQLITE_CHECKPOINT_FULL<dd>
-** ^This mode blocks (it invokes the
-** [sqlite3_busy_handler|busy-handler callback]) until there is no
+** This mode blocks (calls the busy-handler callback) until there is no
** database writer and all readers are reading from the most recent database
-** snapshot. ^It then checkpoints all frames in the log file and syncs the
-** database file. ^This mode blocks new database writers while it is pending,
-** but new database readers are allowed to continue unimpeded.
+** snapshot. It then checkpoints all frames in the log file and syncs the
+** database file. This call blocks database writers while it is running,
+** but not database readers.
**
** <dt>SQLITE_CHECKPOINT_RESTART<dd>
-** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition
-** that after checkpointing the log file it blocks (calls the
-** [busy-handler callback])
-** until all readers are reading from the database file only. ^This ensures
-** that the next writer will restart the log file from the beginning.
-** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new
-** database writer attempts while it is pending, but does not impede readers.
-**
-** <dt>SQLITE_CHECKPOINT_TRUNCATE<dd>
-** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the
-** addition that it also truncates the log file to zero bytes just prior
-** to a successful return.
+** This mode works the same way as SQLITE_CHECKPOINT_FULL, except after
+** checkpointing the log file it blocks (calls the busy-handler callback)
+** until all readers are reading from the database file only. This ensures
+** that the next client to write to the database file restarts the log file
+** from the beginning. This call blocks database writers while it is running,
+** but not database readers.
** </dl>
**
-** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in
-** the log file or to -1 if the checkpoint could not run because
-** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not
-** NULL,then *pnCkpt is set to the total number of checkpointed frames in the
-** log file (including any that were already checkpointed before the function
-** was called) or to -1 if the checkpoint could not run due to an error or
-** because the database is not in WAL mode. ^Note that upon successful
-** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been
-** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero.
-**
-** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If
+** If pnLog is not NULL, then *pnLog is set to the total number of frames in
+** the log file before returning. If pnCkpt is not NULL, then *pnCkpt is set to
+** the total number of checkpointed frames (including any that were already
+** checkpointed when this function is called). *pnLog and *pnCkpt may be
+** populated even if sqlite3_wal_checkpoint_v2() returns other than SQLITE_OK.
+** If no values are available because of an error, they are both set to -1
+** before returning to communicate this to the caller.
+**
+** All calls obtain an exclusive "checkpoint" lock on the database file. If
** any other process is running a checkpoint operation at the same time, the
-** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a
+** lock cannot be obtained and SQLITE_BUSY is returned. Even if there is a
** busy-handler configured, it will not be invoked in this case.
**
-** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the
-** exclusive "writer" lock on the database file. ^If the writer lock cannot be
-** obtained immediately, and a busy-handler is configured, it is invoked and
-** the writer lock retried until either the busy-handler returns 0 or the lock
-** is successfully obtained. ^The busy-handler is also invoked while waiting for
-** database readers as described above. ^If the busy-handler returns 0 before
+** The SQLITE_CHECKPOINT_FULL and RESTART modes also obtain the exclusive
+** "writer" lock on the database file. If the writer lock cannot be obtained
+** immediately, and a busy-handler is configured, it is invoked and the writer
+** lock retried until either the busy-handler returns 0 or the lock is
+** successfully obtained. The busy-handler is also invoked while waiting for
+** database readers as described above. If the busy-handler returns 0 before
** the writer lock is obtained or while waiting for database readers, the
** checkpoint operation proceeds from that point in the same way as
** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible
-** without blocking any further. ^SQLITE_BUSY is returned in this case.
+** without blocking any further. SQLITE_BUSY is returned in this case.
**
-** ^If parameter zDb is NULL or points to a zero length string, then the
-** specified operation is attempted on all WAL databases [attached] to
-** [database connection] db. In this case the
-** values written to output parameters *pnLog and *pnCkpt are undefined. ^If
+** If parameter zDb is NULL or points to a zero length string, then the
+** specified operation is attempted on all WAL databases. In this case the
+** values written to output parameters *pnLog and *pnCkpt are undefined. If
** an SQLITE_BUSY error is encountered when processing one or more of the
** attached WAL databases, the operation is still attempted on any remaining
-** attached databases and SQLITE_BUSY is returned at the end. ^If any other
+** attached databases and SQLITE_BUSY is returned to the caller. If any other
** error occurs while processing an attached database, processing is abandoned
-** and the error code is returned to the caller immediately. ^If no error
+** and the error code returned to the caller immediately. If no error
** (SQLITE_BUSY or otherwise) is encountered while processing the attached
** databases, SQLITE_OK is returned.
**
-** ^If database zDb is the name of an attached database that is not in WAL
-** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If
+** If database zDb is the name of an attached database that is not in WAL
+** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. If
** zDb is not NULL (or a zero length string) and is not the name of any
** attached database, SQLITE_ERROR is returned to the caller.
-**
-** ^Unless it returns SQLITE_MISUSE,
-** the sqlite3_wal_checkpoint_v2() interface
-** sets the error information that is queried by
-** [sqlite3_errcode()] and [sqlite3_errmsg()].
-**
-** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
-** from SQL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
+SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of attached database (or NULL) */
int eMode, /* SQLITE_CHECKPOINT_* value */
@@ -7587,18 +7164,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
);
/*
-** CAPI3REF: Checkpoint Mode Values
-** KEYWORDS: {checkpoint mode}
+** CAPI3REF: Checkpoint operation parameters
**
-** These constants define all valid values for the "checkpoint mode" passed
-** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface.
-** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the
-** meaning of each of these checkpoint modes.
+** These constants can be used as the 3rd parameter to
+** [sqlite3_wal_checkpoint_v2()]. See the [sqlite3_wal_checkpoint_v2()]
+** documentation for additional information about the meaning and use of
+** each of these values.
*/
-#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */
-#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */
-#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */
-#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */
+#define SQLITE_CHECKPOINT_PASSIVE 0
+#define SQLITE_CHECKPOINT_FULL 1
+#define SQLITE_CHECKPOINT_RESTART 2
/*
** CAPI3REF: Virtual Table Interface Configuration
@@ -7614,7 +7189,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options
** may be added in the future.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Virtual Table Configuration Options
@@ -7667,11 +7242,10 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
** of the SQL statement that triggered the call to the [xUpdate] method of the
** [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *);
/*
** CAPI3REF: Conflict resolution modes
-** KEYWORDS: {conflict resolution mode}
**
** These constants are returned by [sqlite3_vtab_on_conflict()] to
** inform a [virtual table] implementation what the [ON CONFLICT] mode
@@ -7687,108 +7261,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
/* #define SQLITE_ABORT 4 // Also an error code */
#define SQLITE_REPLACE 5
-/*
-** CAPI3REF: Prepared Statement Scan Status Opcodes
-** KEYWORDS: {scanstatus options}
-**
-** The following constants can be used for the T parameter to the
-** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a
-** different metric for sqlite3_stmt_scanstatus() to return.
-**
-** When the value returned to V is a string, space to hold that string is
-** managed by the prepared statement S and will be automatically freed when
-** S is finalized.
-**
-** <dl>
-** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt>
-** <dd>^The [sqlite3_int64] variable pointed to by the T parameter will be
-** set to the total number of times that the X-th loop has run.</dd>
-**
-** [[SQLITE_SCANSTAT_NVISIT]] <dt>SQLITE_SCANSTAT_NVISIT</dt>
-** <dd>^The [sqlite3_int64] variable pointed to by the T parameter will be set
-** to the total number of rows examined by all iterations of the X-th loop.</dd>
-**
-** [[SQLITE_SCANSTAT_EST]] <dt>SQLITE_SCANSTAT_EST</dt>
-** <dd>^The "double" variable pointed to by the T parameter will be set to the
-** query planner's estimate for the average number of rows output from each
-** iteration of the X-th loop. If the query planner's estimates was accurate,
-** then this value will approximate the quotient NVISIT/NLOOP and the
-** product of this value for all prior loops with the same SELECTID will
-** be the NLOOP value for the current loop.
-**
-** [[SQLITE_SCANSTAT_NAME]] <dt>SQLITE_SCANSTAT_NAME</dt>
-** <dd>^The "const char *" variable pointed to by the T parameter will be set
-** to a zero-terminated UTF-8 string containing the name of the index or table
-** used for the X-th loop.
-**
-** [[SQLITE_SCANSTAT_EXPLAIN]] <dt>SQLITE_SCANSTAT_EXPLAIN</dt>
-** <dd>^The "const char *" variable pointed to by the T parameter will be set
-** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
-** description for the X-th loop.
-**
-** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt>
-** <dd>^The "int" variable pointed to by the T parameter will be set to the
-** "select-id" for the X-th loop. The select-id identifies which query or
-** subquery the loop is part of. The main query has a select-id of zero.
-** The select-id is the same value as is output in the first column
-** of an [EXPLAIN QUERY PLAN] query.
-** </dl>
-*/
-#define SQLITE_SCANSTAT_NLOOP 0
-#define SQLITE_SCANSTAT_NVISIT 1
-#define SQLITE_SCANSTAT_EST 2
-#define SQLITE_SCANSTAT_NAME 3
-#define SQLITE_SCANSTAT_EXPLAIN 4
-#define SQLITE_SCANSTAT_SELECTID 5
-
-/*
-** CAPI3REF: Prepared Statement Scan Status
-** METHOD: sqlite3_stmt
-**
-** This interface returns information about the predicted and measured
-** performance for pStmt. Advanced applications can use this
-** interface to compare the predicted and the measured performance and
-** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
-**
-** Since this interface is expected to be rarely used, it is only
-** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS]
-** compile-time option.
-**
-** The "iScanStatusOp" parameter determines which status information to return.
-** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
-** of this interface is undefined.
-** ^The requested measurement is written into a variable pointed to by
-** the "pOut" parameter.
-** Parameter "idx" identifies the specific loop to retrieve statistics for.
-** Loops are numbered starting from zero. ^If idx is out of range - less than
-** zero or greater than or equal to the total number of loops used to implement
-** the statement - a non-zero value is returned and the variable that pOut
-** points to is unchanged.
-**
-** ^Statistics might not be available for all loops in all statements. ^In cases
-** where there exist loops with no available statistics, this function behaves
-** as if the loop did not exist - it returns non-zero and leave the variable
-** that pOut points to unchanged.
-**
-** See also: [sqlite3_stmt_scanstatus_reset()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
- sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
- int idx, /* Index of loop to report on */
- int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
- void *pOut /* Result written here */
-);
-
-/*
-** CAPI3REF: Zero Scan-Status Counters
-** METHOD: sqlite3_stmt
-**
-** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
-**
-** This API is only available if the library is built with pre-processor
-** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
/*
@@ -7826,16 +7298,6 @@ extern "C" {
#endif
typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry;
-typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
-
-/* The double-precision datatype used by RTree depends on the
-** SQLITE_RTREE_INT_ONLY compile-time option.
-*/
-#ifdef SQLITE_RTREE_INT_ONLY
- typedef sqlite3_int64 sqlite3_rtree_dbl;
-#else
- typedef double sqlite3_rtree_dbl;
-#endif
/*
** Register a geometry callback named zGeom that can be used as part of an
@@ -7843,10 +7305,14 @@ typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
**
** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zGeom(... params ...)
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
+SQLITE_API int sqlite3_rtree_geometry_callback(
sqlite3 *db,
const char *zGeom,
- int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
+#ifdef SQLITE_RTREE_INT_ONLY
+ int (*xGeom)(sqlite3_rtree_geometry*, int n, sqlite3_int64 *a, int *pRes),
+#else
+ int (*xGeom)(sqlite3_rtree_geometry*, int n, double *a, int *pRes),
+#endif
void *pContext
);
@@ -7858,62 +7324,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
struct sqlite3_rtree_geometry {
void *pContext; /* Copy of pContext passed to s_r_g_c() */
int nParam; /* Size of array aParam[] */
- sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */
+ double *aParam; /* Parameters passed to SQL geom function */
void *pUser; /* Callback implementation user data */
void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */
};
-/*
-** Register a 2nd-generation geometry callback named zScore that can be
-** used as part of an R-Tree geometry query as follows:
-**
-** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zQueryFunc(... params ...)
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
- sqlite3 *db,
- const char *zQueryFunc,
- int (*xQueryFunc)(sqlite3_rtree_query_info*),
- void *pContext,
- void (*xDestructor)(void*)
-);
-
-
-/*
-** A pointer to a structure of the following type is passed as the
-** argument to scored geometry callback registered using
-** sqlite3_rtree_query_callback().
-**
-** Note that the first 5 fields of this structure are identical to
-** sqlite3_rtree_geometry. This structure is a subclass of
-** sqlite3_rtree_geometry.
-*/
-struct sqlite3_rtree_query_info {
- void *pContext; /* pContext from when function registered */
- int nParam; /* Number of function parameters */
- sqlite3_rtree_dbl *aParam; /* value of function parameters */
- void *pUser; /* callback can use this, if desired */
- void (*xDelUser)(void*); /* function to free pUser */
- sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */
- unsigned int *anQueue; /* Number of pending entries in the queue */
- int nCoord; /* Number of coordinates */
- int iLevel; /* Level of current node or entry */
- int mxLevel; /* The largest iLevel value in the tree */
- sqlite3_int64 iRowid; /* Rowid for current entry */
- sqlite3_rtree_dbl rParentScore; /* Score of parent node */
- int eParentWithin; /* Visibility of parent node */
- int eWithin; /* OUT: Visiblity */
- sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
- /* The following fields are only available in 3.8.11 and later */
- sqlite3_value **apSqlParam; /* Original SQL values of parameters */
-};
-
-/*
-** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin.
-*/
-#define NOT_WITHIN 0 /* Object completely outside of query region */
-#define PARTLY_WITHIN 1 /* Object partially overlaps query region */
-#define FULLY_WITHIN 2 /* Object fully contained within query region */
-
#ifdef __cplusplus
} /* end of the 'extern "C"' block */
@@ -7921,523 +7336,3 @@ struct sqlite3_rtree_query_info {
#endif /* ifndef _SQLITE3RTREE_H_ */
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** Interfaces to extend FTS5. Using the interfaces defined in this file,
-** FTS5 may be extended with:
-**
-** * custom tokenizers, and
-** * custom auxiliary functions.
-*/
-
-
-#ifndef _FTS5_H
-#define _FTS5_H
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*************************************************************************
-** CUSTOM AUXILIARY FUNCTIONS
-**
-** Virtual table implementations may overload SQL functions by implementing
-** the sqlite3_module.xFindFunction() method.
-*/
-
-typedef struct Fts5ExtensionApi Fts5ExtensionApi;
-typedef struct Fts5Context Fts5Context;
-typedef struct Fts5PhraseIter Fts5PhraseIter;
-
-typedef void (*fts5_extension_function)(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-);
-
-struct Fts5PhraseIter {
- const unsigned char *a;
- const unsigned char *b;
-};
-
-/*
-** EXTENSION API FUNCTIONS
-**
-** xUserData(pFts):
-** Return a copy of the context pointer the extension function was
-** registered with.
-**
-** xColumnTotalSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the FTS5 table. Or, if iCol is
-** non-negative but less than the number of columns in the table, return
-** the total number of tokens in column iCol, considering all rows in
-** the FTS5 table.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnCount(pFts):
-** Return the number of columns in the table.
-**
-** xColumnSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the current row. Or, if iCol is
-** non-negative but less than the number of columns in the table, set
-** *pnToken to the number of tokens in column iCol of the current row.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
-** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
-** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
-** if an error occurs, an SQLite error code is returned and the final values
-** of (*pz) and (*pn) are undefined.
-**
-** xPhraseCount:
-** Returns the number of phrases in the current query expression.
-**
-** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
-**
-** xInstCount:
-** Set *pnInst to the total number of occurrences of all phrases within
-** the query within the current row. Return SQLITE_OK if successful, or
-** an error code (i.e. SQLITE_NOMEM) if an error occurs.
-**
-** xInst:
-** Query for the details of phrase match iIdx within the current row.
-** Phrase matches are numbered starting from zero, so the iIdx argument
-** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
-**
-** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM)
-** if an error occurs.
-**
-** xRowid:
-** Returns the rowid of the current row.
-**
-** xTokenize:
-** Tokenize text using the tokenizer belonging to the FTS5 table.
-**
-** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
-** This API function is used to query the FTS table for phrase iPhrase
-** of the current query. Specifically, a query equivalent to:
-**
-** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
-**
-** with $p set to a phrase equivalent to the phrase iPhrase of the
-** current query is executed. For each row visited, the callback function
-** passed as the fourth argument is invoked. The context and API objects
-** passed to the callback function may be used to access the properties of
-** each matched row. Invoking Api.xUserData() returns a copy of the pointer
-** passed as the third argument to pUserData.
-**
-** If the callback function returns any value other than SQLITE_OK, the
-** query is abandoned and the xQueryPhrase function returns immediately.
-** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
-** Otherwise, the error code is propagated upwards.
-**
-** If the query runs to completion without incident, SQLITE_OK is returned.
-** Or, if some error occurs before the query completes or is aborted by
-** the callback, an SQLite error code is returned.
-**
-**
-** xSetAuxdata(pFts5, pAux, xDelete)
-**
-** Save the pointer passed as the second argument as the extension functions
-** "auxiliary data". The pointer may then be retrieved by the current or any
-** future invocation of the same fts5 extension function made as part of
-** of the same MATCH query using the xGetAuxdata() API.
-**
-** Each extension function is allocated a single auxiliary data slot for
-** each FTS query (MATCH expression). If the extension function is invoked
-** more than once for a single FTS query, then all invocations share a
-** single auxiliary data context.
-**
-** If there is already an auxiliary data pointer when this function is
-** invoked, then it is replaced by the new pointer. If an xDelete callback
-** was specified along with the original pointer, it is invoked at this
-** point.
-**
-** The xDelete callback, if one is specified, is also invoked on the
-** auxiliary data pointer after the FTS5 query has finished.
-**
-** If an error (e.g. an OOM condition) occurs within this function, an
-** the auxiliary data is set to NULL and an error code returned. If the
-** xDelete parameter was not NULL, it is invoked on the auxiliary data
-** pointer before returning.
-**
-**
-** xGetAuxdata(pFts5, bClear)
-**
-** Returns the current auxiliary data pointer for the fts5 extension
-** function. See the xSetAuxdata() method for details.
-**
-** If the bClear argument is non-zero, then the auxiliary data is cleared
-** (set to NULL) before this function returns. In this case the xDelete,
-** if any, is not invoked.
-**
-**
-** xRowCount(pFts5, pnRow)
-**
-** This function is used to retrieve the total number of rows in the table.
-** In other words, the same value that would be returned by:
-**
-** SELECT count(*) FROM ftstable;
-**
-** xPhraseFirst()
-** This function is used, along with type Fts5PhraseIter and the xPhraseNext
-** method, to iterate through all instances of a single query phrase within
-** the current row. This is the same information as is accessible via the
-** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
-** to use, this API may be faster under some circumstances. To iterate
-** through instances of phrase iPhrase, use the following code:
-**
-** Fts5PhraseIter iter;
-** int iCol, iOff;
-** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
-** iOff>=0;
-** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
-** ){
-** // An instance of phrase iPhrase at offset iOff of column iCol
-** }
-**
-** The Fts5PhraseIter structure is defined above. Applications should not
-** modify this structure directly - it should only be used as shown above
-** with the xPhraseFirst() and xPhraseNext() API methods.
-**
-** xPhraseNext()
-** See xPhraseFirst above.
-*/
-struct Fts5ExtensionApi {
- int iVersion; /* Currently always set to 1 */
-
- void *(*xUserData)(Fts5Context*);
-
- int (*xColumnCount)(Fts5Context*);
- int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
- int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
-
- int (*xTokenize)(Fts5Context*,
- const char *pText, int nText, /* Text to tokenize */
- void *pCtx, /* Context passed to xToken() */
- int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
- );
-
- int (*xPhraseCount)(Fts5Context*);
- int (*xPhraseSize)(Fts5Context*, int iPhrase);
-
- int (*xInstCount)(Fts5Context*, int *pnInst);
- int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
-
- sqlite3_int64 (*xRowid)(Fts5Context*);
- int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
- int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
-
- int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
- int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
- );
- int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
- void *(*xGetAuxdata)(Fts5Context*, int bClear);
-
- void (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
- void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
-};
-
-/*
-** CUSTOM AUXILIARY FUNCTIONS
-*************************************************************************/
-
-/*************************************************************************
-** CUSTOM TOKENIZERS
-**
-** Applications may also register custom tokenizer types. A tokenizer
-** is registered by providing fts5 with a populated instance of the
-** following structure. All structure methods must be defined, setting
-** any member of the fts5_tokenizer struct to NULL leads to undefined
-** behaviour. The structure methods are expected to function as follows:
-**
-** xCreate:
-** This function is used to allocate and inititalize a tokenizer instance.
-** A tokenizer instance is required to actually tokenize text.
-**
-** The first argument passed to this function is a copy of the (void*)
-** pointer provided by the application when the fts5_tokenizer object
-** was registered with FTS5 (the third argument to xCreateTokenizer()).
-** The second and third arguments are an array of nul-terminated strings
-** containing the tokenizer arguments, if any, specified following the
-** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
-** to create the FTS5 table.
-**
-** The final argument is an output variable. If successful, (*ppOut)
-** should be set to point to the new tokenizer handle and SQLITE_OK
-** returned. If an error occurs, some value other than SQLITE_OK should
-** be returned. In this case, fts5 assumes that the final value of *ppOut
-** is undefined.
-**
-** xDelete:
-** This function is invoked to delete a tokenizer handle previously
-** allocated using xCreate(). Fts5 guarantees that this function will
-** be invoked exactly once for each successful call to xCreate().
-**
-** xTokenize:
-** This function is expected to tokenize the nText byte string indicated
-** by argument pText. pText may or may not be nul-terminated. The first
-** argument passed to this function is a pointer to an Fts5Tokenizer object
-** returned by an earlier call to xCreate().
-**
-** The second argument indicates the reason that FTS5 is requesting
-** tokenization of the supplied text. This is always one of the following
-** four values:
-**
-** <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
-** or removed from the FTS table. The tokenizer is being invoked to
-** determine the set of tokens to add to (or delete from) the
-** FTS index.
-**
-** <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed
-** against the FTS index. The tokenizer is being called to tokenize
-** a bareword or quoted string specified as part of the query.
-**
-** <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
-** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
-** followed by a "*" character, indicating that the last token
-** returned by the tokenizer will be treated as a token prefix.
-**
-** <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to
-** satisfy an fts5_api.xTokenize() request made by an auxiliary
-** function. Or an fts5_api.xColumnSize() request made by the same
-** on a columnsize=0 database.
-** </ul>
-**
-** For each token in the input string, the supplied callback xToken() must
-** be invoked. The first argument to it should be a copy of the pointer
-** passed as the second argument to xTokenize(). The third and fourth
-** arguments are a pointer to a buffer containing the token text, and the
-** size of the token in bytes. The 4th and 5th arguments are the byte offsets
-** of the first byte of and first byte immediately following the text from
-** which the token is derived within the input.
-**
-** The second argument passed to the xToken() callback ("tflags") should
-** normally be set to 0. The exception is if the tokenizer supports
-** synonyms. In this case see the discussion below for details.
-**
-** FTS5 assumes the xToken() callback is invoked for each token in the
-** order that they occur within the input text.
-**
-** If an xToken() callback returns any value other than SQLITE_OK, then
-** the tokenization should be abandoned and the xTokenize() method should
-** immediately return a copy of the xToken() return value. Or, if the
-** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
-** if an error occurs with the xTokenize() implementation itself, it
-** may abandon the tokenization and return any error code other than
-** SQLITE_OK or SQLITE_DONE.
-**
-** SYNONYM SUPPORT
-**
-** Custom tokenizers may also support synonyms. Consider a case in which a
-** user wishes to query for a phrase such as "first place". Using the
-** built-in tokenizers, the FTS5 query 'first + place' will match instances
-** of "first place" within the document set, but not alternative forms
-** such as "1st place". In some applications, it would be better to match
-** all instances of "first place" or "1st place" regardless of which form
-** the user specified in the MATCH query text.
-**
-** There are several ways to approach this in FTS5:
-**
-** <ol><li> By mapping all synonyms to a single token. In this case, the
-** In the above example, this means that the tokenizer returns the
-** same token for inputs "first" and "1st". Say that token is in
-** fact "first", so that when the user inserts the document "I won
-** 1st place" entries are added to the index for tokens "i", "won",
-** "first" and "place". If the user then queries for '1st + place',
-** the tokenizer substitutes "first" for "1st" and the query works
-** as expected.
-**
-** <li> By adding multiple synonyms for a single term to the FTS index.
-** In this case, when tokenizing query text, the tokenizer may
-** provide multiple synonyms for a single term within the document.
-** FTS5 then queries the index for each synonym individually. For
-** example, faced with the query:
-**
-** <codeblock>
-** ... MATCH 'first place'</codeblock>
-**
-** the tokenizer offers both "1st" and "first" as synonyms for the
-** first token in the MATCH query and FTS5 effectively runs a query
-** similar to:
-**
-** <codeblock>
-** ... MATCH '(first OR 1st) place'</codeblock>
-**
-** except that, for the purposes of auxiliary functions, the query
-** still appears to contain just two phrases - "(first OR 1st)"
-** being treated as a single phrase.
-**
-** <li> By adding multiple synonyms for a single term to the FTS index.
-** Using this method, when tokenizing document text, the tokenizer
-** provides multiple synonyms for each token. So that when a
-** document such as "I won first place" is tokenized, entries are
-** added to the FTS index for "i", "won", "first", "1st" and
-** "place".
-**
-** This way, even if the tokenizer does not provide synonyms
-** when tokenizing query text (it should not - to do would be
-** inefficient), it doesn't matter if the user queries for
-** 'first + place' or '1st + place', as there are entires in the
-** FTS index corresponding to both forms of the first token.
-** </ol>
-**
-** Whether it is parsing document or query text, any call to xToken that
-** specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
-** is considered to supply a synonym for the previous token. For example,
-** when parsing the document "I won first place", a tokenizer that supports
-** synonyms would call xToken() 5 times, as follows:
-**
-** <codeblock>
-** xToken(pCtx, 0, "i", 1, 0, 1);
-** xToken(pCtx, 0, "won", 3, 2, 5);
-** xToken(pCtx, 0, "first", 5, 6, 11);
-** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
-** xToken(pCtx, 0, "place", 5, 12, 17);
-**</codeblock>
-**
-** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
-** xToken() is called. Multiple synonyms may be specified for a single token
-** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
-** There is no limit to the number of synonyms that may be provided for a
-** single token.
-**
-** In many cases, method (1) above is the best approach. It does not add
-** extra data to the FTS index or require FTS5 to query for multiple terms,
-** so it is efficient in terms of disk space and query speed. However, it
-** does not support prefix queries very well. If, as suggested above, the
-** token "first" is subsituted for "1st" by the tokenizer, then the query:
-**
-** <codeblock>
-** ... MATCH '1s*'</codeblock>
-**
-** will not match documents that contain the token "1st" (as the tokenizer
-** will probably not map "1s" to any prefix of "first").
-**
-** For full prefix support, method (3) may be preferred. In this case,
-** because the index contains entries for both "first" and "1st", prefix
-** queries such as 'fi*' or '1s*' will match correctly. However, because
-** extra entries are added to the FTS index, this method uses more space
-** within the database.
-**
-** Method (2) offers a midpoint between (1) and (3). Using this method,
-** a query such as '1s*' will match documents that contain the literal
-** token "1st", but not "first" (assuming the tokenizer is not able to
-** provide synonyms for prefixes). However, a non-prefix query like '1st'
-** will match against "1st" and "first". This method does not require
-** extra disk space, as no extra entries are added to the FTS index.
-** On the other hand, it may require more CPU cycles to run MATCH queries,
-** as separate queries of the FTS index are required for each synonym.
-**
-** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
-** inefficient.
-*/
-typedef struct Fts5Tokenizer Fts5Tokenizer;
-typedef struct fts5_tokenizer fts5_tokenizer;
-struct fts5_tokenizer {
- int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
- void (*xDelete)(Fts5Tokenizer*);
- int (*xTokenize)(Fts5Tokenizer*,
- void *pCtx,
- int flags, /* Mask of FTS5_TOKENIZE_* flags */
- const char *pText, int nText,
- int (*xToken)(
- void *pCtx, /* Copy of 2nd argument to xTokenize() */
- int tflags, /* Mask of FTS5_TOKEN_* flags */
- const char *pToken, /* Pointer to buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Byte offset of token within input text */
- int iEnd /* Byte offset of end of token within input text */
- )
- );
-};
-
-/* Flags that may be passed as the third argument to xTokenize() */
-#define FTS5_TOKENIZE_QUERY 0x0001
-#define FTS5_TOKENIZE_PREFIX 0x0002
-#define FTS5_TOKENIZE_DOCUMENT 0x0004
-#define FTS5_TOKENIZE_AUX 0x0008
-
-/* Flags that may be passed by the tokenizer implementation back to FTS5
-** as the third argument to the supplied xToken callback. */
-#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
-
-/*
-** END OF CUSTOM TOKENIZERS
-*************************************************************************/
-
-/*************************************************************************
-** FTS5 EXTENSION REGISTRATION API
-*/
-typedef struct fts5_api fts5_api;
-struct fts5_api {
- int iVersion; /* Currently always set to 2 */
-
- /* Create a new tokenizer */
- int (*xCreateTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_tokenizer *pTokenizer,
- void (*xDestroy)(void*)
- );
-
- /* Find an existing tokenizer */
- int (*xFindTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void **ppContext,
- fts5_tokenizer *pTokenizer
- );
-
- /* Create a new auxiliary function */
- int (*xCreateFunction)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_extension_function xFunction,
- void (*xDestroy)(void*)
- );
-};
-
-/*
-** END OF REGISTRATION API
-*************************************************************************/
-
-#ifdef __cplusplus
-} /* end of the 'extern "C"' block */
-#endif
-
-#endif /* _FTS5_H */
-
-
diff --git a/3rdparty/sqlite3/sqlite3ext.h b/3rdparty/sqlite3/sqlite3ext.h
index 017ea30..ecf93f6 100644
--- a/3rdparty/sqlite3/sqlite3ext.h
+++ b/3rdparty/sqlite3/sqlite3ext.h
@@ -28,7 +28,7 @@ typedef struct sqlite3_api_routines sqlite3_api_routines;
** WARNING: In order to maintain backwards compatibility, add new
** interfaces to the end of this structure only. If you insert new
** interfaces in the middle of this structure, then older different
-** versions of SQLite will not be able to load each other's shared
+** versions of SQLite will not be able to load each others' shared
** libraries!
*/
struct sqlite3_api_routines {
@@ -250,36 +250,11 @@ struct sqlite3_api_routines {
const char *(*uri_parameter)(const char*,const char*);
char *(*vsnprintf)(int,char*,const char*,va_list);
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
- /* Version 3.8.7 and later */
- int (*auto_extension)(void(*)(void));
- int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
- void(*)(void*));
- int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
- void(*)(void*),unsigned char);
- int (*cancel_auto_extension)(void(*)(void));
- int (*load_extension)(sqlite3*,const char*,const char*,char**);
- void *(*malloc64)(sqlite3_uint64);
- sqlite3_uint64 (*msize)(void*);
- void *(*realloc64)(void*,sqlite3_uint64);
- void (*reset_auto_extension)(void);
- void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
- void(*)(void*));
- void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
- void(*)(void*), unsigned char);
- int (*strglob)(const char*,const char*);
- /* Version 3.8.11 and later */
- sqlite3_value *(*value_dup)(const sqlite3_value*);
- void (*value_free)(sqlite3_value*);
- int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
- int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
- /* Version 3.9.0 and later */
- unsigned int (*value_subtype)(sqlite3_value*);
- void (*result_subtype)(sqlite3_context*,unsigned int);
};
/*
** The following macros redefine the API routines so that they are
-** redirected through the global sqlite3_api structure.
+** redirected throught the global sqlite3_api structure.
**
** This header file is also used by the loadext.c source file
** (part of the main SQLite library - not an extension) so that
@@ -288,7 +263,7 @@ struct sqlite3_api_routines {
** the API. So the redefinition macros are only valid if the
** SQLITE_CORE macros is undefined.
*/
-#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+#ifndef SQLITE_CORE
#define sqlite3_aggregate_context sqlite3_api->aggregate_context
#ifndef SQLITE_OMIT_DEPRECATED
#define sqlite3_aggregate_count sqlite3_api->aggregate_count
@@ -415,7 +390,6 @@ struct sqlite3_api_routines {
#define sqlite3_value_text16le sqlite3_api->value_text16le
#define sqlite3_value_type sqlite3_api->value_type
#define sqlite3_vmprintf sqlite3_api->vmprintf
-#define sqlite3_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_overload_function sqlite3_api->overload_function
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
@@ -493,30 +467,9 @@ struct sqlite3_api_routines {
#define sqlite3_uri_parameter sqlite3_api->uri_parameter
#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
-/* Version 3.8.7 and later */
-#define sqlite3_auto_extension sqlite3_api->auto_extension
-#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
-#define sqlite3_bind_text64 sqlite3_api->bind_text64
-#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
-#define sqlite3_load_extension sqlite3_api->load_extension
-#define sqlite3_malloc64 sqlite3_api->malloc64
-#define sqlite3_msize sqlite3_api->msize
-#define sqlite3_realloc64 sqlite3_api->realloc64
-#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
-#define sqlite3_result_blob64 sqlite3_api->result_blob64
-#define sqlite3_result_text64 sqlite3_api->result_text64
-#define sqlite3_strglob sqlite3_api->strglob
-/* Version 3.8.11 and later */
-#define sqlite3_value_dup sqlite3_api->value_dup
-#define sqlite3_value_free sqlite3_api->value_free
-#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
-#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
-/* Version 3.9.0 and later */
-#define sqlite3_value_subtype sqlite3_api->value_subtype
-#define sqlite3_result_subtype sqlite3_api->result_subtype
-#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
+#endif /* SQLITE_CORE */
-#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
+#ifndef SQLITE_CORE
/* This case when the file really is being compiled as a loadable
** extension */
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;